1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2011 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 #include "qla_target.h" 9 10 #include <linux/delay.h> 11 #include <linux/slab.h> 12 #include <scsi/scsi_tcq.h> 13 #include <scsi/scsi_bsg_fc.h> 14 #include <scsi/scsi_eh.h> 15 16 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); 17 static void qla2x00_process_completed_request(struct scsi_qla_host *, 18 struct req_que *, uint32_t); 19 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *); 20 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *); 21 static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, 22 sts_entry_t *); 23 24 /** 25 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. 26 * @irq: 27 * @dev_id: SCSI driver HA context 28 * 29 * Called by system whenever the host adapter generates an interrupt. 30 * 31 * Returns handled flag. 32 */ 33 irqreturn_t 34 qla2100_intr_handler(int irq, void *dev_id) 35 { 36 scsi_qla_host_t *vha; 37 struct qla_hw_data *ha; 38 struct device_reg_2xxx __iomem *reg; 39 int status; 40 unsigned long iter; 41 uint16_t hccr; 42 uint16_t mb[4]; 43 struct rsp_que *rsp; 44 unsigned long flags; 45 46 rsp = (struct rsp_que *) dev_id; 47 if (!rsp) { 48 ql_log(ql_log_info, NULL, 0x505d, 49 "%s: NULL response queue pointer.\n", __func__); 50 return (IRQ_NONE); 51 } 52 53 ha = rsp->hw; 54 reg = &ha->iobase->isp; 55 status = 0; 56 57 spin_lock_irqsave(&ha->hardware_lock, flags); 58 vha = pci_get_drvdata(ha->pdev); 59 for (iter = 50; iter--; ) { 60 hccr = RD_REG_WORD(®->hccr); 61 if (hccr & HCCR_RISC_PAUSE) { 62 if (pci_channel_offline(ha->pdev)) 63 break; 64 65 /* 66 * Issue a "HARD" reset in order for the RISC interrupt 67 * bit to be cleared. Schedule a big hammer to get 68 * out of the RISC PAUSED state. 69 */ 70 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 71 RD_REG_WORD(®->hccr); 72 73 ha->isp_ops->fw_dump(vha, 1); 74 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 75 break; 76 } else if ((RD_REG_WORD(®->istatus) & ISR_RISC_INT) == 0) 77 break; 78 79 if (RD_REG_WORD(®->semaphore) & BIT_0) { 80 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 81 RD_REG_WORD(®->hccr); 82 83 /* Get mailbox data. */ 84 mb[0] = RD_MAILBOX_REG(ha, reg, 0); 85 if (mb[0] > 0x3fff && mb[0] < 0x8000) { 86 qla2x00_mbx_completion(vha, mb[0]); 87 status |= MBX_INTERRUPT; 88 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) { 89 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 90 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 91 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 92 qla2x00_async_event(vha, rsp, mb); 93 } else { 94 /*EMPTY*/ 95 ql_dbg(ql_dbg_async, vha, 0x5025, 96 "Unrecognized interrupt type (%d).\n", 97 mb[0]); 98 } 99 /* Release mailbox registers. */ 100 WRT_REG_WORD(®->semaphore, 0); 101 RD_REG_WORD(®->semaphore); 102 } else { 103 qla2x00_process_response_queue(rsp); 104 105 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 106 RD_REG_WORD(®->hccr); 107 } 108 } 109 spin_unlock_irqrestore(&ha->hardware_lock, flags); 110 111 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 112 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 113 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 114 complete(&ha->mbx_intr_comp); 115 } 116 117 return (IRQ_HANDLED); 118 } 119 120 /** 121 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. 122 * @irq: 123 * @dev_id: SCSI driver HA context 124 * 125 * Called by system whenever the host adapter generates an interrupt. 126 * 127 * Returns handled flag. 128 */ 129 irqreturn_t 130 qla2300_intr_handler(int irq, void *dev_id) 131 { 132 scsi_qla_host_t *vha; 133 struct device_reg_2xxx __iomem *reg; 134 int status; 135 unsigned long iter; 136 uint32_t stat; 137 uint16_t hccr; 138 uint16_t mb[4]; 139 struct rsp_que *rsp; 140 struct qla_hw_data *ha; 141 unsigned long flags; 142 143 rsp = (struct rsp_que *) dev_id; 144 if (!rsp) { 145 ql_log(ql_log_info, NULL, 0x5058, 146 "%s: NULL response queue pointer.\n", __func__); 147 return (IRQ_NONE); 148 } 149 150 ha = rsp->hw; 151 reg = &ha->iobase->isp; 152 status = 0; 153 154 spin_lock_irqsave(&ha->hardware_lock, flags); 155 vha = pci_get_drvdata(ha->pdev); 156 for (iter = 50; iter--; ) { 157 stat = RD_REG_DWORD(®->u.isp2300.host_status); 158 if (stat & HSR_RISC_PAUSED) { 159 if (unlikely(pci_channel_offline(ha->pdev))) 160 break; 161 162 hccr = RD_REG_WORD(®->hccr); 163 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8)) 164 ql_log(ql_log_warn, vha, 0x5026, 165 "Parity error -- HCCR=%x, Dumping " 166 "firmware.\n", hccr); 167 else 168 ql_log(ql_log_warn, vha, 0x5027, 169 "RISC paused -- HCCR=%x, Dumping " 170 "firmware.\n", hccr); 171 172 /* 173 * Issue a "HARD" reset in order for the RISC 174 * interrupt bit to be cleared. Schedule a big 175 * hammer to get out of the RISC PAUSED state. 176 */ 177 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 178 RD_REG_WORD(®->hccr); 179 180 ha->isp_ops->fw_dump(vha, 1); 181 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 182 break; 183 } else if ((stat & HSR_RISC_INT) == 0) 184 break; 185 186 switch (stat & 0xff) { 187 case 0x1: 188 case 0x2: 189 case 0x10: 190 case 0x11: 191 qla2x00_mbx_completion(vha, MSW(stat)); 192 status |= MBX_INTERRUPT; 193 194 /* Release mailbox registers. */ 195 WRT_REG_WORD(®->semaphore, 0); 196 break; 197 case 0x12: 198 mb[0] = MSW(stat); 199 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 200 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 201 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 202 qla2x00_async_event(vha, rsp, mb); 203 break; 204 case 0x13: 205 qla2x00_process_response_queue(rsp); 206 break; 207 case 0x15: 208 mb[0] = MBA_CMPLT_1_16BIT; 209 mb[1] = MSW(stat); 210 qla2x00_async_event(vha, rsp, mb); 211 break; 212 case 0x16: 213 mb[0] = MBA_SCSI_COMPLETION; 214 mb[1] = MSW(stat); 215 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 216 qla2x00_async_event(vha, rsp, mb); 217 break; 218 default: 219 ql_dbg(ql_dbg_async, vha, 0x5028, 220 "Unrecognized interrupt type (%d).\n", stat & 0xff); 221 break; 222 } 223 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 224 RD_REG_WORD_RELAXED(®->hccr); 225 } 226 spin_unlock_irqrestore(&ha->hardware_lock, flags); 227 228 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 229 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 230 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 231 complete(&ha->mbx_intr_comp); 232 } 233 234 return (IRQ_HANDLED); 235 } 236 237 /** 238 * qla2x00_mbx_completion() - Process mailbox command completions. 239 * @ha: SCSI driver HA context 240 * @mb0: Mailbox0 register 241 */ 242 static void 243 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 244 { 245 uint16_t cnt; 246 uint32_t mboxes; 247 uint16_t __iomem *wptr; 248 struct qla_hw_data *ha = vha->hw; 249 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 250 251 /* Read all mbox registers? */ 252 mboxes = (1 << ha->mbx_count) - 1; 253 if (!ha->mcp) 254 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERRROR.\n"); 255 else 256 mboxes = ha->mcp->in_mb; 257 258 /* Load return mailbox registers. */ 259 ha->flags.mbox_int = 1; 260 ha->mailbox_out[0] = mb0; 261 mboxes >>= 1; 262 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1); 263 264 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 265 if (IS_QLA2200(ha) && cnt == 8) 266 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8); 267 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0)) 268 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr); 269 else if (mboxes & BIT_0) 270 ha->mailbox_out[cnt] = RD_REG_WORD(wptr); 271 272 wptr++; 273 mboxes >>= 1; 274 } 275 } 276 277 static void 278 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr) 279 { 280 static char *event[] = 281 { "Complete", "Request Notification", "Time Extension" }; 282 int rval; 283 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24; 284 uint16_t __iomem *wptr; 285 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS]; 286 287 /* Seed data -- mailbox1 -> mailbox7. */ 288 wptr = (uint16_t __iomem *)®24->mailbox1; 289 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++) 290 mb[cnt] = RD_REG_WORD(wptr); 291 292 ql_dbg(ql_dbg_async, vha, 0x5021, 293 "Inter-Driver Communication %s -- " 294 "%04x %04x %04x %04x %04x %04x %04x.\n", 295 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3], 296 mb[4], mb[5], mb[6]); 297 298 /* Acknowledgement needed? [Notify && non-zero timeout]. */ 299 timeout = (descr >> 8) & 0xf; 300 if (aen != MBA_IDC_NOTIFY || !timeout) 301 return; 302 303 ql_dbg(ql_dbg_async, vha, 0x5022, 304 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n", 305 vha->host_no, event[aen & 0xff], timeout); 306 307 rval = qla2x00_post_idc_ack_work(vha, mb); 308 if (rval != QLA_SUCCESS) 309 ql_log(ql_log_warn, vha, 0x5023, 310 "IDC failed to post ACK.\n"); 311 } 312 313 #define LS_UNKNOWN 2 314 char * 315 qla2x00_get_link_speed_str(struct qla_hw_data *ha) 316 { 317 static char *link_speeds[] = {"1", "2", "?", "4", "8", "16", "10"}; 318 char *link_speed; 319 int fw_speed = ha->link_data_rate; 320 321 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 322 link_speed = link_speeds[0]; 323 else if (fw_speed == 0x13) 324 link_speed = link_speeds[6]; 325 else { 326 link_speed = link_speeds[LS_UNKNOWN]; 327 if (fw_speed < 6) 328 link_speed = 329 link_speeds[fw_speed]; 330 } 331 332 return link_speed; 333 } 334 335 /** 336 * qla2x00_async_event() - Process aynchronous events. 337 * @ha: SCSI driver HA context 338 * @mb: Mailbox registers (0 - 3) 339 */ 340 void 341 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) 342 { 343 uint16_t handle_cnt; 344 uint16_t cnt, mbx; 345 uint32_t handles[5]; 346 struct qla_hw_data *ha = vha->hw; 347 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 348 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 349 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; 350 uint32_t rscn_entry, host_pid; 351 unsigned long flags; 352 353 /* Setup to process RIO completion. */ 354 handle_cnt = 0; 355 if (IS_CNA_CAPABLE(ha)) 356 goto skip_rio; 357 switch (mb[0]) { 358 case MBA_SCSI_COMPLETION: 359 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1])); 360 handle_cnt = 1; 361 break; 362 case MBA_CMPLT_1_16BIT: 363 handles[0] = mb[1]; 364 handle_cnt = 1; 365 mb[0] = MBA_SCSI_COMPLETION; 366 break; 367 case MBA_CMPLT_2_16BIT: 368 handles[0] = mb[1]; 369 handles[1] = mb[2]; 370 handle_cnt = 2; 371 mb[0] = MBA_SCSI_COMPLETION; 372 break; 373 case MBA_CMPLT_3_16BIT: 374 handles[0] = mb[1]; 375 handles[1] = mb[2]; 376 handles[2] = mb[3]; 377 handle_cnt = 3; 378 mb[0] = MBA_SCSI_COMPLETION; 379 break; 380 case MBA_CMPLT_4_16BIT: 381 handles[0] = mb[1]; 382 handles[1] = mb[2]; 383 handles[2] = mb[3]; 384 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); 385 handle_cnt = 4; 386 mb[0] = MBA_SCSI_COMPLETION; 387 break; 388 case MBA_CMPLT_5_16BIT: 389 handles[0] = mb[1]; 390 handles[1] = mb[2]; 391 handles[2] = mb[3]; 392 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); 393 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7); 394 handle_cnt = 5; 395 mb[0] = MBA_SCSI_COMPLETION; 396 break; 397 case MBA_CMPLT_2_32BIT: 398 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1])); 399 handles[1] = le32_to_cpu( 400 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) | 401 RD_MAILBOX_REG(ha, reg, 6)); 402 handle_cnt = 2; 403 mb[0] = MBA_SCSI_COMPLETION; 404 break; 405 default: 406 break; 407 } 408 skip_rio: 409 switch (mb[0]) { 410 case MBA_SCSI_COMPLETION: /* Fast Post */ 411 if (!vha->flags.online) 412 break; 413 414 for (cnt = 0; cnt < handle_cnt; cnt++) 415 qla2x00_process_completed_request(vha, rsp->req, 416 handles[cnt]); 417 break; 418 419 case MBA_RESET: /* Reset */ 420 ql_dbg(ql_dbg_async, vha, 0x5002, 421 "Asynchronous RESET.\n"); 422 423 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 424 break; 425 426 case MBA_SYSTEM_ERR: /* System Error */ 427 mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha)) ? 428 RD_REG_WORD(®24->mailbox7) : 0; 429 ql_log(ql_log_warn, vha, 0x5003, 430 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh " 431 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx); 432 433 ha->isp_ops->fw_dump(vha, 1); 434 435 if (IS_FWI2_CAPABLE(ha)) { 436 if (mb[1] == 0 && mb[2] == 0) { 437 ql_log(ql_log_fatal, vha, 0x5004, 438 "Unrecoverable Hardware Error: adapter " 439 "marked OFFLINE!\n"); 440 vha->flags.online = 0; 441 vha->device_flags |= DFLG_DEV_FAILED; 442 } else { 443 /* Check to see if MPI timeout occurred */ 444 if ((mbx & MBX_3) && (ha->flags.port0)) 445 set_bit(MPI_RESET_NEEDED, 446 &vha->dpc_flags); 447 448 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 449 } 450 } else if (mb[1] == 0) { 451 ql_log(ql_log_fatal, vha, 0x5005, 452 "Unrecoverable Hardware Error: adapter marked " 453 "OFFLINE!\n"); 454 vha->flags.online = 0; 455 vha->device_flags |= DFLG_DEV_FAILED; 456 } else 457 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 458 break; 459 460 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 461 ql_log(ql_log_warn, vha, 0x5006, 462 "ISP Request Transfer Error (%x).\n", mb[1]); 463 464 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 465 break; 466 467 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 468 ql_log(ql_log_warn, vha, 0x5007, 469 "ISP Response Transfer Error.\n"); 470 471 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 472 break; 473 474 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ 475 ql_dbg(ql_dbg_async, vha, 0x5008, 476 "Asynchronous WAKEUP_THRES.\n"); 477 478 break; 479 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ 480 ql_dbg(ql_dbg_async, vha, 0x5009, 481 "LIP occurred (%x).\n", mb[1]); 482 483 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 484 atomic_set(&vha->loop_state, LOOP_DOWN); 485 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 486 qla2x00_mark_all_devices_lost(vha, 1); 487 } 488 489 if (vha->vp_idx) { 490 atomic_set(&vha->vp_state, VP_FAILED); 491 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 492 } 493 494 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 495 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 496 497 vha->flags.management_server_logged_in = 0; 498 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]); 499 break; 500 501 case MBA_LOOP_UP: /* Loop Up Event */ 502 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 503 ha->link_data_rate = PORT_SPEED_1GB; 504 else 505 ha->link_data_rate = mb[1]; 506 507 ql_dbg(ql_dbg_async, vha, 0x500a, 508 "LOOP UP detected (%s Gbps).\n", 509 qla2x00_get_link_speed_str(ha)); 510 511 vha->flags.management_server_logged_in = 0; 512 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate); 513 break; 514 515 case MBA_LOOP_DOWN: /* Loop Down Event */ 516 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha)) 517 ? RD_REG_WORD(®24->mailbox4) : 0; 518 mbx = IS_QLA82XX(ha) ? RD_REG_WORD(®82->mailbox_out[4]) : mbx; 519 ql_dbg(ql_dbg_async, vha, 0x500b, 520 "LOOP DOWN detected (%x %x %x %x).\n", 521 mb[1], mb[2], mb[3], mbx); 522 523 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 524 atomic_set(&vha->loop_state, LOOP_DOWN); 525 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 526 vha->device_flags |= DFLG_NO_CABLE; 527 qla2x00_mark_all_devices_lost(vha, 1); 528 } 529 530 if (vha->vp_idx) { 531 atomic_set(&vha->vp_state, VP_FAILED); 532 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 533 } 534 535 vha->flags.management_server_logged_in = 0; 536 ha->link_data_rate = PORT_SPEED_UNKNOWN; 537 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0); 538 break; 539 540 case MBA_LIP_RESET: /* LIP reset occurred */ 541 ql_dbg(ql_dbg_async, vha, 0x500c, 542 "LIP reset occurred (%x).\n", mb[1]); 543 544 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 545 atomic_set(&vha->loop_state, LOOP_DOWN); 546 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 547 qla2x00_mark_all_devices_lost(vha, 1); 548 } 549 550 if (vha->vp_idx) { 551 atomic_set(&vha->vp_state, VP_FAILED); 552 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 553 } 554 555 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 556 557 ha->operating_mode = LOOP; 558 vha->flags.management_server_logged_in = 0; 559 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]); 560 break; 561 562 /* case MBA_DCBX_COMPLETE: */ 563 case MBA_POINT_TO_POINT: /* Point-to-Point */ 564 if (IS_QLA2100(ha)) 565 break; 566 567 if (IS_QLA81XX(ha) || IS_QLA82XX(ha) || IS_QLA8031(ha)) { 568 ql_dbg(ql_dbg_async, vha, 0x500d, 569 "DCBX Completed -- %04x %04x %04x.\n", 570 mb[1], mb[2], mb[3]); 571 if (ha->notify_dcbx_comp) 572 complete(&ha->dcbx_comp); 573 574 } else 575 ql_dbg(ql_dbg_async, vha, 0x500e, 576 "Asynchronous P2P MODE received.\n"); 577 578 /* 579 * Until there's a transition from loop down to loop up, treat 580 * this as loop down only. 581 */ 582 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 583 atomic_set(&vha->loop_state, LOOP_DOWN); 584 if (!atomic_read(&vha->loop_down_timer)) 585 atomic_set(&vha->loop_down_timer, 586 LOOP_DOWN_TIME); 587 qla2x00_mark_all_devices_lost(vha, 1); 588 } 589 590 if (vha->vp_idx) { 591 atomic_set(&vha->vp_state, VP_FAILED); 592 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 593 } 594 595 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) 596 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 597 598 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 599 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 600 601 ha->flags.gpsc_supported = 1; 602 vha->flags.management_server_logged_in = 0; 603 break; 604 605 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */ 606 if (IS_QLA2100(ha)) 607 break; 608 609 ql_dbg(ql_dbg_async, vha, 0x500f, 610 "Configuration change detected: value=%x.\n", mb[1]); 611 612 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 613 atomic_set(&vha->loop_state, LOOP_DOWN); 614 if (!atomic_read(&vha->loop_down_timer)) 615 atomic_set(&vha->loop_down_timer, 616 LOOP_DOWN_TIME); 617 qla2x00_mark_all_devices_lost(vha, 1); 618 } 619 620 if (vha->vp_idx) { 621 atomic_set(&vha->vp_state, VP_FAILED); 622 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 623 } 624 625 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 626 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 627 break; 628 629 case MBA_PORT_UPDATE: /* Port database update */ 630 /* 631 * Handle only global and vn-port update events 632 * 633 * Relevant inputs: 634 * mb[1] = N_Port handle of changed port 635 * OR 0xffff for global event 636 * mb[2] = New login state 637 * 7 = Port logged out 638 * mb[3] = LSB is vp_idx, 0xff = all vps 639 * 640 * Skip processing if: 641 * Event is global, vp_idx is NOT all vps, 642 * vp_idx does not match 643 * Event is not global, vp_idx does not match 644 */ 645 if (IS_QLA2XXX_MIDTYPE(ha) && 646 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) || 647 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff)) 648 break; 649 650 /* Global event -- port logout or port unavailable. */ 651 if (mb[1] == 0xffff && mb[2] == 0x7) { 652 ql_dbg(ql_dbg_async, vha, 0x5010, 653 "Port unavailable %04x %04x %04x.\n", 654 mb[1], mb[2], mb[3]); 655 ql_log(ql_log_warn, vha, 0x505e, 656 "Link is offline.\n"); 657 658 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 659 atomic_set(&vha->loop_state, LOOP_DOWN); 660 atomic_set(&vha->loop_down_timer, 661 LOOP_DOWN_TIME); 662 vha->device_flags |= DFLG_NO_CABLE; 663 qla2x00_mark_all_devices_lost(vha, 1); 664 } 665 666 if (vha->vp_idx) { 667 atomic_set(&vha->vp_state, VP_FAILED); 668 fc_vport_set_state(vha->fc_vport, 669 FC_VPORT_FAILED); 670 qla2x00_mark_all_devices_lost(vha, 1); 671 } 672 673 vha->flags.management_server_logged_in = 0; 674 ha->link_data_rate = PORT_SPEED_UNKNOWN; 675 break; 676 } 677 678 /* 679 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET 680 * event etc. earlier indicating loop is down) then process 681 * it. Otherwise ignore it and Wait for RSCN to come in. 682 */ 683 atomic_set(&vha->loop_down_timer, 0); 684 if (atomic_read(&vha->loop_state) != LOOP_DOWN && 685 atomic_read(&vha->loop_state) != LOOP_DEAD) { 686 ql_dbg(ql_dbg_async, vha, 0x5011, 687 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n", 688 mb[1], mb[2], mb[3]); 689 690 qlt_async_event(mb[0], vha, mb); 691 break; 692 } 693 694 ql_dbg(ql_dbg_async, vha, 0x5012, 695 "Port database changed %04x %04x %04x.\n", 696 mb[1], mb[2], mb[3]); 697 ql_log(ql_log_warn, vha, 0x505f, 698 "Link is operational (%s Gbps).\n", 699 qla2x00_get_link_speed_str(ha)); 700 701 /* 702 * Mark all devices as missing so we will login again. 703 */ 704 atomic_set(&vha->loop_state, LOOP_UP); 705 706 qla2x00_mark_all_devices_lost(vha, 1); 707 708 if (vha->vp_idx == 0 && !qla_ini_mode_enabled(vha)) 709 set_bit(SCR_PENDING, &vha->dpc_flags); 710 711 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 712 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 713 714 qlt_async_event(mb[0], vha, mb); 715 break; 716 717 case MBA_RSCN_UPDATE: /* State Change Registration */ 718 /* Check if the Vport has issued a SCR */ 719 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags)) 720 break; 721 /* Only handle SCNs for our Vport index. */ 722 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff)) 723 break; 724 725 ql_dbg(ql_dbg_async, vha, 0x5013, 726 "RSCN database changed -- %04x %04x %04x.\n", 727 mb[1], mb[2], mb[3]); 728 729 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2]; 730 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8) 731 | vha->d_id.b.al_pa; 732 if (rscn_entry == host_pid) { 733 ql_dbg(ql_dbg_async, vha, 0x5014, 734 "Ignoring RSCN update to local host " 735 "port ID (%06x).\n", host_pid); 736 break; 737 } 738 739 /* Ignore reserved bits from RSCN-payload. */ 740 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2]; 741 742 atomic_set(&vha->loop_down_timer, 0); 743 vha->flags.management_server_logged_in = 0; 744 745 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 746 set_bit(RSCN_UPDATE, &vha->dpc_flags); 747 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry); 748 break; 749 750 /* case MBA_RIO_RESPONSE: */ 751 case MBA_ZIO_RESPONSE: 752 ql_dbg(ql_dbg_async, vha, 0x5015, 753 "[R|Z]IO update completion.\n"); 754 755 if (IS_FWI2_CAPABLE(ha)) 756 qla24xx_process_response_queue(vha, rsp); 757 else 758 qla2x00_process_response_queue(rsp); 759 break; 760 761 case MBA_DISCARD_RND_FRAME: 762 ql_dbg(ql_dbg_async, vha, 0x5016, 763 "Discard RND Frame -- %04x %04x %04x.\n", 764 mb[1], mb[2], mb[3]); 765 break; 766 767 case MBA_TRACE_NOTIFICATION: 768 ql_dbg(ql_dbg_async, vha, 0x5017, 769 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]); 770 break; 771 772 case MBA_ISP84XX_ALERT: 773 ql_dbg(ql_dbg_async, vha, 0x5018, 774 "ISP84XX Alert Notification -- %04x %04x %04x.\n", 775 mb[1], mb[2], mb[3]); 776 777 spin_lock_irqsave(&ha->cs84xx->access_lock, flags); 778 switch (mb[1]) { 779 case A84_PANIC_RECOVERY: 780 ql_log(ql_log_info, vha, 0x5019, 781 "Alert 84XX: panic recovery %04x %04x.\n", 782 mb[2], mb[3]); 783 break; 784 case A84_OP_LOGIN_COMPLETE: 785 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2]; 786 ql_log(ql_log_info, vha, 0x501a, 787 "Alert 84XX: firmware version %x.\n", 788 ha->cs84xx->op_fw_version); 789 break; 790 case A84_DIAG_LOGIN_COMPLETE: 791 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 792 ql_log(ql_log_info, vha, 0x501b, 793 "Alert 84XX: diagnostic firmware version %x.\n", 794 ha->cs84xx->diag_fw_version); 795 break; 796 case A84_GOLD_LOGIN_COMPLETE: 797 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 798 ha->cs84xx->fw_update = 1; 799 ql_log(ql_log_info, vha, 0x501c, 800 "Alert 84XX: gold firmware version %x.\n", 801 ha->cs84xx->gold_fw_version); 802 break; 803 default: 804 ql_log(ql_log_warn, vha, 0x501d, 805 "Alert 84xx: Invalid Alert %04x %04x %04x.\n", 806 mb[1], mb[2], mb[3]); 807 } 808 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags); 809 break; 810 case MBA_DCBX_START: 811 ql_dbg(ql_dbg_async, vha, 0x501e, 812 "DCBX Started -- %04x %04x %04x.\n", 813 mb[1], mb[2], mb[3]); 814 break; 815 case MBA_DCBX_PARAM_UPDATE: 816 ql_dbg(ql_dbg_async, vha, 0x501f, 817 "DCBX Parameters Updated -- %04x %04x %04x.\n", 818 mb[1], mb[2], mb[3]); 819 break; 820 case MBA_FCF_CONF_ERR: 821 ql_dbg(ql_dbg_async, vha, 0x5020, 822 "FCF Configuration Error -- %04x %04x %04x.\n", 823 mb[1], mb[2], mb[3]); 824 break; 825 case MBA_IDC_COMPLETE: 826 case MBA_IDC_NOTIFY: 827 case MBA_IDC_TIME_EXT: 828 qla81xx_idc_event(vha, mb[0], mb[1]); 829 break; 830 default: 831 ql_dbg(ql_dbg_async, vha, 0x5057, 832 "Unknown AEN:%04x %04x %04x %04x\n", 833 mb[0], mb[1], mb[2], mb[3]); 834 } 835 836 qlt_async_event(mb[0], vha, mb); 837 838 if (!vha->vp_idx && ha->num_vhosts) 839 qla2x00_alert_all_vps(rsp, mb); 840 } 841 842 /** 843 * qla2x00_process_completed_request() - Process a Fast Post response. 844 * @ha: SCSI driver HA context 845 * @index: SRB index 846 */ 847 static void 848 qla2x00_process_completed_request(struct scsi_qla_host *vha, 849 struct req_que *req, uint32_t index) 850 { 851 srb_t *sp; 852 struct qla_hw_data *ha = vha->hw; 853 854 /* Validate handle. */ 855 if (index >= MAX_OUTSTANDING_COMMANDS) { 856 ql_log(ql_log_warn, vha, 0x3014, 857 "Invalid SCSI command index (%x).\n", index); 858 859 if (IS_QLA82XX(ha)) 860 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 861 else 862 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 863 return; 864 } 865 866 sp = req->outstanding_cmds[index]; 867 if (sp) { 868 /* Free outstanding command slot. */ 869 req->outstanding_cmds[index] = NULL; 870 871 /* Save ISP completion status */ 872 sp->done(ha, sp, DID_OK << 16); 873 } else { 874 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n"); 875 876 if (IS_QLA82XX(ha)) 877 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 878 else 879 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 880 } 881 } 882 883 static srb_t * 884 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, 885 struct req_que *req, void *iocb) 886 { 887 struct qla_hw_data *ha = vha->hw; 888 sts_entry_t *pkt = iocb; 889 srb_t *sp = NULL; 890 uint16_t index; 891 892 index = LSW(pkt->handle); 893 if (index >= MAX_OUTSTANDING_COMMANDS) { 894 ql_log(ql_log_warn, vha, 0x5031, 895 "Invalid command index (%x).\n", index); 896 if (IS_QLA82XX(ha)) 897 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 898 else 899 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 900 goto done; 901 } 902 sp = req->outstanding_cmds[index]; 903 if (!sp) { 904 ql_log(ql_log_warn, vha, 0x5032, 905 "Invalid completion handle (%x) -- timed-out.\n", index); 906 return sp; 907 } 908 if (sp->handle != index) { 909 ql_log(ql_log_warn, vha, 0x5033, 910 "SRB handle (%x) mismatch %x.\n", sp->handle, index); 911 return NULL; 912 } 913 914 req->outstanding_cmds[index] = NULL; 915 916 done: 917 return sp; 918 } 919 920 static void 921 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 922 struct mbx_entry *mbx) 923 { 924 const char func[] = "MBX-IOCB"; 925 const char *type; 926 fc_port_t *fcport; 927 srb_t *sp; 928 struct srb_iocb *lio; 929 uint16_t *data; 930 uint16_t status; 931 932 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx); 933 if (!sp) 934 return; 935 936 lio = &sp->u.iocb_cmd; 937 type = sp->name; 938 fcport = sp->fcport; 939 data = lio->u.logio.data; 940 941 data[0] = MBS_COMMAND_ERROR; 942 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 943 QLA_LOGIO_LOGIN_RETRIED : 0; 944 if (mbx->entry_status) { 945 ql_dbg(ql_dbg_async, vha, 0x5043, 946 "Async-%s error entry - hdl=%x portid=%02x%02x%02x " 947 "entry-status=%x status=%x state-flag=%x " 948 "status-flags=%x.\n", type, sp->handle, 949 fcport->d_id.b.domain, fcport->d_id.b.area, 950 fcport->d_id.b.al_pa, mbx->entry_status, 951 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags), 952 le16_to_cpu(mbx->status_flags)); 953 954 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029, 955 (uint8_t *)mbx, sizeof(*mbx)); 956 957 goto logio_done; 958 } 959 960 status = le16_to_cpu(mbx->status); 961 if (status == 0x30 && sp->type == SRB_LOGIN_CMD && 962 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) 963 status = 0; 964 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) { 965 ql_dbg(ql_dbg_async, vha, 0x5045, 966 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n", 967 type, sp->handle, fcport->d_id.b.domain, 968 fcport->d_id.b.area, fcport->d_id.b.al_pa, 969 le16_to_cpu(mbx->mb1)); 970 971 data[0] = MBS_COMMAND_COMPLETE; 972 if (sp->type == SRB_LOGIN_CMD) { 973 fcport->port_type = FCT_TARGET; 974 if (le16_to_cpu(mbx->mb1) & BIT_0) 975 fcport->port_type = FCT_INITIATOR; 976 else if (le16_to_cpu(mbx->mb1) & BIT_1) 977 fcport->flags |= FCF_FCP2_DEVICE; 978 } 979 goto logio_done; 980 } 981 982 data[0] = le16_to_cpu(mbx->mb0); 983 switch (data[0]) { 984 case MBS_PORT_ID_USED: 985 data[1] = le16_to_cpu(mbx->mb1); 986 break; 987 case MBS_LOOP_ID_USED: 988 break; 989 default: 990 data[0] = MBS_COMMAND_ERROR; 991 break; 992 } 993 994 ql_log(ql_log_warn, vha, 0x5046, 995 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x " 996 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle, 997 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, 998 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1), 999 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6), 1000 le16_to_cpu(mbx->mb7)); 1001 1002 logio_done: 1003 sp->done(vha, sp, 0); 1004 } 1005 1006 static void 1007 qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req, 1008 sts_entry_t *pkt, int iocb_type) 1009 { 1010 const char func[] = "CT_IOCB"; 1011 const char *type; 1012 srb_t *sp; 1013 struct fc_bsg_job *bsg_job; 1014 uint16_t comp_status; 1015 int res; 1016 1017 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1018 if (!sp) 1019 return; 1020 1021 bsg_job = sp->u.bsg_job; 1022 1023 type = "ct pass-through"; 1024 1025 comp_status = le16_to_cpu(pkt->comp_status); 1026 1027 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT 1028 * fc payload to the caller 1029 */ 1030 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 1031 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1032 1033 if (comp_status != CS_COMPLETE) { 1034 if (comp_status == CS_DATA_UNDERRUN) { 1035 res = DID_OK << 16; 1036 bsg_job->reply->reply_payload_rcv_len = 1037 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len); 1038 1039 ql_log(ql_log_warn, vha, 0x5048, 1040 "CT pass-through-%s error " 1041 "comp_status-status=0x%x total_byte = 0x%x.\n", 1042 type, comp_status, 1043 bsg_job->reply->reply_payload_rcv_len); 1044 } else { 1045 ql_log(ql_log_warn, vha, 0x5049, 1046 "CT pass-through-%s error " 1047 "comp_status-status=0x%x.\n", type, comp_status); 1048 res = DID_ERROR << 16; 1049 bsg_job->reply->reply_payload_rcv_len = 0; 1050 } 1051 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035, 1052 (uint8_t *)pkt, sizeof(*pkt)); 1053 } else { 1054 res = DID_OK << 16; 1055 bsg_job->reply->reply_payload_rcv_len = 1056 bsg_job->reply_payload.payload_len; 1057 bsg_job->reply_len = 0; 1058 } 1059 1060 sp->done(vha, sp, res); 1061 } 1062 1063 static void 1064 qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, 1065 struct sts_entry_24xx *pkt, int iocb_type) 1066 { 1067 const char func[] = "ELS_CT_IOCB"; 1068 const char *type; 1069 srb_t *sp; 1070 struct fc_bsg_job *bsg_job; 1071 uint16_t comp_status; 1072 uint32_t fw_status[3]; 1073 uint8_t* fw_sts_ptr; 1074 int res; 1075 1076 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1077 if (!sp) 1078 return; 1079 bsg_job = sp->u.bsg_job; 1080 1081 type = NULL; 1082 switch (sp->type) { 1083 case SRB_ELS_CMD_RPT: 1084 case SRB_ELS_CMD_HST: 1085 type = "els"; 1086 break; 1087 case SRB_CT_CMD: 1088 type = "ct pass-through"; 1089 break; 1090 default: 1091 ql_dbg(ql_dbg_user, vha, 0x503e, 1092 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type); 1093 return; 1094 } 1095 1096 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status); 1097 fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1); 1098 fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2); 1099 1100 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT 1101 * fc payload to the caller 1102 */ 1103 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 1104 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status); 1105 1106 if (comp_status != CS_COMPLETE) { 1107 if (comp_status == CS_DATA_UNDERRUN) { 1108 res = DID_OK << 16; 1109 bsg_job->reply->reply_payload_rcv_len = 1110 le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count); 1111 1112 ql_dbg(ql_dbg_user, vha, 0x503f, 1113 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " 1114 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n", 1115 type, sp->handle, comp_status, fw_status[1], fw_status[2], 1116 le16_to_cpu(((struct els_sts_entry_24xx *) 1117 pkt)->total_byte_count)); 1118 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); 1119 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); 1120 } 1121 else { 1122 ql_dbg(ql_dbg_user, vha, 0x5040, 1123 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " 1124 "error subcode 1=0x%x error subcode 2=0x%x.\n", 1125 type, sp->handle, comp_status, 1126 le16_to_cpu(((struct els_sts_entry_24xx *) 1127 pkt)->error_subcode_1), 1128 le16_to_cpu(((struct els_sts_entry_24xx *) 1129 pkt)->error_subcode_2)); 1130 res = DID_ERROR << 16; 1131 bsg_job->reply->reply_payload_rcv_len = 0; 1132 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); 1133 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); 1134 } 1135 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056, 1136 (uint8_t *)pkt, sizeof(*pkt)); 1137 } 1138 else { 1139 res = DID_OK << 16; 1140 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; 1141 bsg_job->reply_len = 0; 1142 } 1143 1144 sp->done(vha, sp, res); 1145 } 1146 1147 static void 1148 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, 1149 struct logio_entry_24xx *logio) 1150 { 1151 const char func[] = "LOGIO-IOCB"; 1152 const char *type; 1153 fc_port_t *fcport; 1154 srb_t *sp; 1155 struct srb_iocb *lio; 1156 uint16_t *data; 1157 uint32_t iop[2]; 1158 1159 sp = qla2x00_get_sp_from_handle(vha, func, req, logio); 1160 if (!sp) 1161 return; 1162 1163 lio = &sp->u.iocb_cmd; 1164 type = sp->name; 1165 fcport = sp->fcport; 1166 data = lio->u.logio.data; 1167 1168 data[0] = MBS_COMMAND_ERROR; 1169 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 1170 QLA_LOGIO_LOGIN_RETRIED : 0; 1171 if (logio->entry_status) { 1172 ql_log(ql_log_warn, fcport->vha, 0x5034, 1173 "Async-%s error entry - hdl=%x" 1174 "portid=%02x%02x%02x entry-status=%x.\n", 1175 type, sp->handle, fcport->d_id.b.domain, 1176 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1177 logio->entry_status); 1178 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d, 1179 (uint8_t *)logio, sizeof(*logio)); 1180 1181 goto logio_done; 1182 } 1183 1184 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) { 1185 ql_dbg(ql_dbg_async, fcport->vha, 0x5036, 1186 "Async-%s complete - hdl=%x portid=%02x%02x%02x " 1187 "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain, 1188 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1189 le32_to_cpu(logio->io_parameter[0])); 1190 1191 data[0] = MBS_COMMAND_COMPLETE; 1192 if (sp->type != SRB_LOGIN_CMD) 1193 goto logio_done; 1194 1195 iop[0] = le32_to_cpu(logio->io_parameter[0]); 1196 if (iop[0] & BIT_4) { 1197 fcport->port_type = FCT_TARGET; 1198 if (iop[0] & BIT_8) 1199 fcport->flags |= FCF_FCP2_DEVICE; 1200 } else if (iop[0] & BIT_5) 1201 fcport->port_type = FCT_INITIATOR; 1202 1203 if (iop[0] & BIT_7) 1204 fcport->flags |= FCF_CONF_COMP_SUPPORTED; 1205 1206 if (logio->io_parameter[7] || logio->io_parameter[8]) 1207 fcport->supported_classes |= FC_COS_CLASS2; 1208 if (logio->io_parameter[9] || logio->io_parameter[10]) 1209 fcport->supported_classes |= FC_COS_CLASS3; 1210 1211 goto logio_done; 1212 } 1213 1214 iop[0] = le32_to_cpu(logio->io_parameter[0]); 1215 iop[1] = le32_to_cpu(logio->io_parameter[1]); 1216 switch (iop[0]) { 1217 case LSC_SCODE_PORTID_USED: 1218 data[0] = MBS_PORT_ID_USED; 1219 data[1] = LSW(iop[1]); 1220 break; 1221 case LSC_SCODE_NPORT_USED: 1222 data[0] = MBS_LOOP_ID_USED; 1223 break; 1224 default: 1225 data[0] = MBS_COMMAND_ERROR; 1226 break; 1227 } 1228 1229 ql_dbg(ql_dbg_async, fcport->vha, 0x5037, 1230 "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x " 1231 "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain, 1232 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1233 le16_to_cpu(logio->comp_status), 1234 le32_to_cpu(logio->io_parameter[0]), 1235 le32_to_cpu(logio->io_parameter[1])); 1236 1237 logio_done: 1238 sp->done(vha, sp, 0); 1239 } 1240 1241 static void 1242 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 1243 struct tsk_mgmt_entry *tsk) 1244 { 1245 const char func[] = "TMF-IOCB"; 1246 const char *type; 1247 fc_port_t *fcport; 1248 srb_t *sp; 1249 struct srb_iocb *iocb; 1250 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; 1251 int error = 1; 1252 1253 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk); 1254 if (!sp) 1255 return; 1256 1257 iocb = &sp->u.iocb_cmd; 1258 type = sp->name; 1259 fcport = sp->fcport; 1260 1261 if (sts->entry_status) { 1262 ql_log(ql_log_warn, fcport->vha, 0x5038, 1263 "Async-%s error - hdl=%x entry-status(%x).\n", 1264 type, sp->handle, sts->entry_status); 1265 } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 1266 ql_log(ql_log_warn, fcport->vha, 0x5039, 1267 "Async-%s error - hdl=%x completion status(%x).\n", 1268 type, sp->handle, sts->comp_status); 1269 } else if (!(le16_to_cpu(sts->scsi_status) & 1270 SS_RESPONSE_INFO_LEN_VALID)) { 1271 ql_log(ql_log_warn, fcport->vha, 0x503a, 1272 "Async-%s error - hdl=%x no response info(%x).\n", 1273 type, sp->handle, sts->scsi_status); 1274 } else if (le32_to_cpu(sts->rsp_data_len) < 4) { 1275 ql_log(ql_log_warn, fcport->vha, 0x503b, 1276 "Async-%s error - hdl=%x not enough response(%d).\n", 1277 type, sp->handle, sts->rsp_data_len); 1278 } else if (sts->data[3]) { 1279 ql_log(ql_log_warn, fcport->vha, 0x503c, 1280 "Async-%s error - hdl=%x response(%x).\n", 1281 type, sp->handle, sts->data[3]); 1282 } else { 1283 error = 0; 1284 } 1285 1286 if (error) { 1287 iocb->u.tmf.data = error; 1288 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055, 1289 (uint8_t *)sts, sizeof(*sts)); 1290 } 1291 1292 sp->done(vha, sp, 0); 1293 } 1294 1295 /** 1296 * qla2x00_process_response_queue() - Process response queue entries. 1297 * @ha: SCSI driver HA context 1298 */ 1299 void 1300 qla2x00_process_response_queue(struct rsp_que *rsp) 1301 { 1302 struct scsi_qla_host *vha; 1303 struct qla_hw_data *ha = rsp->hw; 1304 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1305 sts_entry_t *pkt; 1306 uint16_t handle_cnt; 1307 uint16_t cnt; 1308 1309 vha = pci_get_drvdata(ha->pdev); 1310 1311 if (!vha->flags.online) 1312 return; 1313 1314 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 1315 pkt = (sts_entry_t *)rsp->ring_ptr; 1316 1317 rsp->ring_index++; 1318 if (rsp->ring_index == rsp->length) { 1319 rsp->ring_index = 0; 1320 rsp->ring_ptr = rsp->ring; 1321 } else { 1322 rsp->ring_ptr++; 1323 } 1324 1325 if (pkt->entry_status != 0) { 1326 qla2x00_error_entry(vha, rsp, pkt); 1327 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1328 wmb(); 1329 continue; 1330 } 1331 1332 switch (pkt->entry_type) { 1333 case STATUS_TYPE: 1334 qla2x00_status_entry(vha, rsp, pkt); 1335 break; 1336 case STATUS_TYPE_21: 1337 handle_cnt = ((sts21_entry_t *)pkt)->handle_count; 1338 for (cnt = 0; cnt < handle_cnt; cnt++) { 1339 qla2x00_process_completed_request(vha, rsp->req, 1340 ((sts21_entry_t *)pkt)->handle[cnt]); 1341 } 1342 break; 1343 case STATUS_TYPE_22: 1344 handle_cnt = ((sts22_entry_t *)pkt)->handle_count; 1345 for (cnt = 0; cnt < handle_cnt; cnt++) { 1346 qla2x00_process_completed_request(vha, rsp->req, 1347 ((sts22_entry_t *)pkt)->handle[cnt]); 1348 } 1349 break; 1350 case STATUS_CONT_TYPE: 1351 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); 1352 break; 1353 case MBX_IOCB_TYPE: 1354 qla2x00_mbx_iocb_entry(vha, rsp->req, 1355 (struct mbx_entry *)pkt); 1356 break; 1357 case CT_IOCB_TYPE: 1358 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); 1359 break; 1360 default: 1361 /* Type Not Supported. */ 1362 ql_log(ql_log_warn, vha, 0x504a, 1363 "Received unknown response pkt type %x " 1364 "entry status=%x.\n", 1365 pkt->entry_type, pkt->entry_status); 1366 break; 1367 } 1368 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1369 wmb(); 1370 } 1371 1372 /* Adjust ring index */ 1373 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index); 1374 } 1375 1376 static inline void 1377 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, 1378 uint32_t sense_len, struct rsp_que *rsp, int res) 1379 { 1380 struct scsi_qla_host *vha = sp->fcport->vha; 1381 struct scsi_cmnd *cp = GET_CMD_SP(sp); 1382 uint32_t track_sense_len; 1383 1384 if (sense_len >= SCSI_SENSE_BUFFERSIZE) 1385 sense_len = SCSI_SENSE_BUFFERSIZE; 1386 1387 SET_CMD_SENSE_LEN(sp, sense_len); 1388 SET_CMD_SENSE_PTR(sp, cp->sense_buffer); 1389 track_sense_len = sense_len; 1390 1391 if (sense_len > par_sense_len) 1392 sense_len = par_sense_len; 1393 1394 memcpy(cp->sense_buffer, sense_data, sense_len); 1395 1396 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len); 1397 track_sense_len -= sense_len; 1398 SET_CMD_SENSE_LEN(sp, track_sense_len); 1399 1400 if (track_sense_len != 0) { 1401 rsp->status_srb = sp; 1402 cp->result = res; 1403 } 1404 1405 if (sense_len) { 1406 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c, 1407 "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n", 1408 sp->fcport->vha->host_no, cp->device->id, cp->device->lun, 1409 cp); 1410 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b, 1411 cp->sense_buffer, sense_len); 1412 } 1413 } 1414 1415 struct scsi_dif_tuple { 1416 __be16 guard; /* Checksum */ 1417 __be16 app_tag; /* APPL identifer */ 1418 __be32 ref_tag; /* Target LBA or indirect LBA */ 1419 }; 1420 1421 /* 1422 * Checks the guard or meta-data for the type of error 1423 * detected by the HBA. In case of errors, we set the 1424 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST 1425 * to indicate to the kernel that the HBA detected error. 1426 */ 1427 static inline int 1428 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) 1429 { 1430 struct scsi_qla_host *vha = sp->fcport->vha; 1431 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1432 uint8_t *ap = &sts24->data[12]; 1433 uint8_t *ep = &sts24->data[20]; 1434 uint32_t e_ref_tag, a_ref_tag; 1435 uint16_t e_app_tag, a_app_tag; 1436 uint16_t e_guard, a_guard; 1437 1438 /* 1439 * swab32 of the "data" field in the beginning of qla2x00_status_entry() 1440 * would make guard field appear at offset 2 1441 */ 1442 a_guard = le16_to_cpu(*(uint16_t *)(ap + 2)); 1443 a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0)); 1444 a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4)); 1445 e_guard = le16_to_cpu(*(uint16_t *)(ep + 2)); 1446 e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0)); 1447 e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4)); 1448 1449 ql_dbg(ql_dbg_io, vha, 0x3023, 1450 "iocb(s) %p Returned STATUS.\n", sts24); 1451 1452 ql_dbg(ql_dbg_io, vha, 0x3024, 1453 "DIF ERROR in cmd 0x%x lba 0x%llx act ref" 1454 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app" 1455 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n", 1456 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag, 1457 a_app_tag, e_app_tag, a_guard, e_guard); 1458 1459 /* 1460 * Ignore sector if: 1461 * For type 3: ref & app tag is all 'f's 1462 * For type 0,1,2: app tag is all 'f's 1463 */ 1464 if ((a_app_tag == 0xffff) && 1465 ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) || 1466 (a_ref_tag == 0xffffffff))) { 1467 uint32_t blocks_done, resid; 1468 sector_t lba_s = scsi_get_lba(cmd); 1469 1470 /* 2TB boundary case covered automatically with this */ 1471 blocks_done = e_ref_tag - (uint32_t)lba_s + 1; 1472 1473 resid = scsi_bufflen(cmd) - (blocks_done * 1474 cmd->device->sector_size); 1475 1476 scsi_set_resid(cmd, resid); 1477 cmd->result = DID_OK << 16; 1478 1479 /* Update protection tag */ 1480 if (scsi_prot_sg_count(cmd)) { 1481 uint32_t i, j = 0, k = 0, num_ent; 1482 struct scatterlist *sg; 1483 struct sd_dif_tuple *spt; 1484 1485 /* Patch the corresponding protection tags */ 1486 scsi_for_each_prot_sg(cmd, sg, 1487 scsi_prot_sg_count(cmd), i) { 1488 num_ent = sg_dma_len(sg) / 8; 1489 if (k + num_ent < blocks_done) { 1490 k += num_ent; 1491 continue; 1492 } 1493 j = blocks_done - k - 1; 1494 k = blocks_done; 1495 break; 1496 } 1497 1498 if (k != blocks_done) { 1499 ql_log(ql_log_warn, vha, 0x302f, 1500 "unexpected tag values tag:lba=%x:%llx)\n", 1501 e_ref_tag, (unsigned long long)lba_s); 1502 return 1; 1503 } 1504 1505 spt = page_address(sg_page(sg)) + sg->offset; 1506 spt += j; 1507 1508 spt->app_tag = 0xffff; 1509 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3) 1510 spt->ref_tag = 0xffffffff; 1511 } 1512 1513 return 0; 1514 } 1515 1516 /* check guard */ 1517 if (e_guard != a_guard) { 1518 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1519 0x10, 0x1); 1520 set_driver_byte(cmd, DRIVER_SENSE); 1521 set_host_byte(cmd, DID_ABORT); 1522 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1523 return 1; 1524 } 1525 1526 /* check ref tag */ 1527 if (e_ref_tag != a_ref_tag) { 1528 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1529 0x10, 0x3); 1530 set_driver_byte(cmd, DRIVER_SENSE); 1531 set_host_byte(cmd, DID_ABORT); 1532 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1533 return 1; 1534 } 1535 1536 /* check appl tag */ 1537 if (e_app_tag != a_app_tag) { 1538 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1539 0x10, 0x2); 1540 set_driver_byte(cmd, DRIVER_SENSE); 1541 set_host_byte(cmd, DID_ABORT); 1542 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1543 return 1; 1544 } 1545 1546 return 1; 1547 } 1548 1549 /** 1550 * qla2x00_status_entry() - Process a Status IOCB entry. 1551 * @ha: SCSI driver HA context 1552 * @pkt: Entry pointer 1553 */ 1554 static void 1555 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) 1556 { 1557 srb_t *sp; 1558 fc_port_t *fcport; 1559 struct scsi_cmnd *cp; 1560 sts_entry_t *sts; 1561 struct sts_entry_24xx *sts24; 1562 uint16_t comp_status; 1563 uint16_t scsi_status; 1564 uint16_t ox_id; 1565 uint8_t lscsi_status; 1566 int32_t resid; 1567 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len, 1568 fw_resid_len; 1569 uint8_t *rsp_info, *sense_data; 1570 struct qla_hw_data *ha = vha->hw; 1571 uint32_t handle; 1572 uint16_t que; 1573 struct req_que *req; 1574 int logit = 1; 1575 int res = 0; 1576 1577 sts = (sts_entry_t *) pkt; 1578 sts24 = (struct sts_entry_24xx *) pkt; 1579 if (IS_FWI2_CAPABLE(ha)) { 1580 comp_status = le16_to_cpu(sts24->comp_status); 1581 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; 1582 } else { 1583 comp_status = le16_to_cpu(sts->comp_status); 1584 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 1585 } 1586 handle = (uint32_t) LSW(sts->handle); 1587 que = MSW(sts->handle); 1588 req = ha->req_q_map[que]; 1589 1590 /* Fast path completion. */ 1591 if (comp_status == CS_COMPLETE && scsi_status == 0) { 1592 qla2x00_process_completed_request(vha, req, handle); 1593 1594 return; 1595 } 1596 1597 /* Validate handle. */ 1598 if (handle < MAX_OUTSTANDING_COMMANDS) { 1599 sp = req->outstanding_cmds[handle]; 1600 req->outstanding_cmds[handle] = NULL; 1601 } else 1602 sp = NULL; 1603 1604 if (sp == NULL) { 1605 ql_dbg(ql_dbg_io, vha, 0x3017, 1606 "Invalid status handle (0x%x).\n", sts->handle); 1607 1608 if (IS_QLA82XX(ha)) 1609 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1610 else 1611 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1612 qla2xxx_wake_dpc(vha); 1613 return; 1614 } 1615 cp = GET_CMD_SP(sp); 1616 if (cp == NULL) { 1617 ql_dbg(ql_dbg_io, vha, 0x3018, 1618 "Command already returned (0x%x/%p).\n", 1619 sts->handle, sp); 1620 1621 return; 1622 } 1623 1624 lscsi_status = scsi_status & STATUS_MASK; 1625 1626 fcport = sp->fcport; 1627 1628 ox_id = 0; 1629 sense_len = par_sense_len = rsp_info_len = resid_len = 1630 fw_resid_len = 0; 1631 if (IS_FWI2_CAPABLE(ha)) { 1632 if (scsi_status & SS_SENSE_LEN_VALID) 1633 sense_len = le32_to_cpu(sts24->sense_len); 1634 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) 1635 rsp_info_len = le32_to_cpu(sts24->rsp_data_len); 1636 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) 1637 resid_len = le32_to_cpu(sts24->rsp_residual_count); 1638 if (comp_status == CS_DATA_UNDERRUN) 1639 fw_resid_len = le32_to_cpu(sts24->residual_len); 1640 rsp_info = sts24->data; 1641 sense_data = sts24->data; 1642 host_to_fcp_swap(sts24->data, sizeof(sts24->data)); 1643 ox_id = le16_to_cpu(sts24->ox_id); 1644 par_sense_len = sizeof(sts24->data); 1645 } else { 1646 if (scsi_status & SS_SENSE_LEN_VALID) 1647 sense_len = le16_to_cpu(sts->req_sense_length); 1648 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) 1649 rsp_info_len = le16_to_cpu(sts->rsp_info_len); 1650 resid_len = le32_to_cpu(sts->residual_length); 1651 rsp_info = sts->rsp_info; 1652 sense_data = sts->req_sense_data; 1653 par_sense_len = sizeof(sts->req_sense_data); 1654 } 1655 1656 /* Check for any FCP transport errors. */ 1657 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) { 1658 /* Sense data lies beyond any FCP RESPONSE data. */ 1659 if (IS_FWI2_CAPABLE(ha)) { 1660 sense_data += rsp_info_len; 1661 par_sense_len -= rsp_info_len; 1662 } 1663 if (rsp_info_len > 3 && rsp_info[3]) { 1664 ql_dbg(ql_dbg_io, fcport->vha, 0x3019, 1665 "FCP I/O protocol failure (0x%x/0x%x).\n", 1666 rsp_info_len, rsp_info[3]); 1667 1668 res = DID_BUS_BUSY << 16; 1669 goto out; 1670 } 1671 } 1672 1673 /* Check for overrun. */ 1674 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE && 1675 scsi_status & SS_RESIDUAL_OVER) 1676 comp_status = CS_DATA_OVERRUN; 1677 1678 /* 1679 * Based on Host and scsi status generate status code for Linux 1680 */ 1681 switch (comp_status) { 1682 case CS_COMPLETE: 1683 case CS_QUEUE_FULL: 1684 if (scsi_status == 0) { 1685 res = DID_OK << 16; 1686 break; 1687 } 1688 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) { 1689 resid = resid_len; 1690 scsi_set_resid(cp, resid); 1691 1692 if (!lscsi_status && 1693 ((unsigned)(scsi_bufflen(cp) - resid) < 1694 cp->underflow)) { 1695 ql_dbg(ql_dbg_io, fcport->vha, 0x301a, 1696 "Mid-layer underflow " 1697 "detected (0x%x of 0x%x bytes).\n", 1698 resid, scsi_bufflen(cp)); 1699 1700 res = DID_ERROR << 16; 1701 break; 1702 } 1703 } 1704 res = DID_OK << 16 | lscsi_status; 1705 1706 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 1707 ql_dbg(ql_dbg_io, fcport->vha, 0x301b, 1708 "QUEUE FULL detected.\n"); 1709 break; 1710 } 1711 logit = 0; 1712 if (lscsi_status != SS_CHECK_CONDITION) 1713 break; 1714 1715 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 1716 if (!(scsi_status & SS_SENSE_LEN_VALID)) 1717 break; 1718 1719 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len, 1720 rsp, res); 1721 break; 1722 1723 case CS_DATA_UNDERRUN: 1724 /* Use F/W calculated residual length. */ 1725 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len; 1726 scsi_set_resid(cp, resid); 1727 if (scsi_status & SS_RESIDUAL_UNDER) { 1728 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) { 1729 ql_dbg(ql_dbg_io, fcport->vha, 0x301d, 1730 "Dropped frame(s) detected " 1731 "(0x%x of 0x%x bytes).\n", 1732 resid, scsi_bufflen(cp)); 1733 1734 res = DID_ERROR << 16 | lscsi_status; 1735 goto check_scsi_status; 1736 } 1737 1738 if (!lscsi_status && 1739 ((unsigned)(scsi_bufflen(cp) - resid) < 1740 cp->underflow)) { 1741 ql_dbg(ql_dbg_io, fcport->vha, 0x301e, 1742 "Mid-layer underflow " 1743 "detected (0x%x of 0x%x bytes).\n", 1744 resid, scsi_bufflen(cp)); 1745 1746 res = DID_ERROR << 16; 1747 break; 1748 } 1749 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL && 1750 lscsi_status != SAM_STAT_BUSY) { 1751 /* 1752 * scsi status of task set and busy are considered to be 1753 * task not completed. 1754 */ 1755 1756 ql_dbg(ql_dbg_io, fcport->vha, 0x301f, 1757 "Dropped frame(s) detected (0x%x " 1758 "of 0x%x bytes).\n", resid, 1759 scsi_bufflen(cp)); 1760 1761 res = DID_ERROR << 16 | lscsi_status; 1762 goto check_scsi_status; 1763 } else { 1764 ql_dbg(ql_dbg_io, fcport->vha, 0x3030, 1765 "scsi_status: 0x%x, lscsi_status: 0x%x\n", 1766 scsi_status, lscsi_status); 1767 } 1768 1769 res = DID_OK << 16 | lscsi_status; 1770 logit = 0; 1771 1772 check_scsi_status: 1773 /* 1774 * Check to see if SCSI Status is non zero. If so report SCSI 1775 * Status. 1776 */ 1777 if (lscsi_status != 0) { 1778 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 1779 ql_dbg(ql_dbg_io, fcport->vha, 0x3020, 1780 "QUEUE FULL detected.\n"); 1781 logit = 1; 1782 break; 1783 } 1784 if (lscsi_status != SS_CHECK_CONDITION) 1785 break; 1786 1787 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 1788 if (!(scsi_status & SS_SENSE_LEN_VALID)) 1789 break; 1790 1791 qla2x00_handle_sense(sp, sense_data, par_sense_len, 1792 sense_len, rsp, res); 1793 } 1794 break; 1795 1796 case CS_PORT_LOGGED_OUT: 1797 case CS_PORT_CONFIG_CHG: 1798 case CS_PORT_BUSY: 1799 case CS_INCOMPLETE: 1800 case CS_PORT_UNAVAILABLE: 1801 case CS_TIMEOUT: 1802 case CS_RESET: 1803 1804 /* 1805 * We are going to have the fc class block the rport 1806 * while we try to recover so instruct the mid layer 1807 * to requeue until the class decides how to handle this. 1808 */ 1809 res = DID_TRANSPORT_DISRUPTED << 16; 1810 1811 if (comp_status == CS_TIMEOUT) { 1812 if (IS_FWI2_CAPABLE(ha)) 1813 break; 1814 else if ((le16_to_cpu(sts->status_flags) & 1815 SF_LOGOUT_SENT) == 0) 1816 break; 1817 } 1818 1819 ql_dbg(ql_dbg_io, fcport->vha, 0x3021, 1820 "Port down status: port-state=0x%x.\n", 1821 atomic_read(&fcport->state)); 1822 1823 if (atomic_read(&fcport->state) == FCS_ONLINE) 1824 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); 1825 break; 1826 1827 case CS_ABORTED: 1828 res = DID_RESET << 16; 1829 break; 1830 1831 case CS_DIF_ERROR: 1832 logit = qla2x00_handle_dif_error(sp, sts24); 1833 break; 1834 default: 1835 res = DID_ERROR << 16; 1836 break; 1837 } 1838 1839 out: 1840 if (logit) 1841 ql_dbg(ql_dbg_io, fcport->vha, 0x3022, 1842 "FCP command status: 0x%x-0x%x (0x%x) " 1843 "nexus=%ld:%d:%d portid=%02x%02x%02x oxid=0x%x " 1844 "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x " 1845 "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n", 1846 comp_status, scsi_status, res, vha->host_no, 1847 cp->device->id, cp->device->lun, fcport->d_id.b.domain, 1848 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id, 1849 cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3], 1850 cp->cmnd[4], cp->cmnd[5], cp->cmnd[6], cp->cmnd[7], 1851 cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp), rsp_info_len, 1852 resid_len, fw_resid_len); 1853 1854 if (rsp->status_srb == NULL) 1855 sp->done(ha, sp, res); 1856 } 1857 1858 /** 1859 * qla2x00_status_cont_entry() - Process a Status Continuations entry. 1860 * @ha: SCSI driver HA context 1861 * @pkt: Entry pointer 1862 * 1863 * Extended sense data. 1864 */ 1865 static void 1866 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) 1867 { 1868 uint8_t sense_sz = 0; 1869 struct qla_hw_data *ha = rsp->hw; 1870 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); 1871 srb_t *sp = rsp->status_srb; 1872 struct scsi_cmnd *cp; 1873 uint32_t sense_len; 1874 uint8_t *sense_ptr; 1875 1876 if (!sp || !GET_CMD_SENSE_LEN(sp)) 1877 return; 1878 1879 sense_len = GET_CMD_SENSE_LEN(sp); 1880 sense_ptr = GET_CMD_SENSE_PTR(sp); 1881 1882 cp = GET_CMD_SP(sp); 1883 if (cp == NULL) { 1884 ql_log(ql_log_warn, vha, 0x3025, 1885 "cmd is NULL: already returned to OS (sp=%p).\n", sp); 1886 1887 rsp->status_srb = NULL; 1888 return; 1889 } 1890 1891 if (sense_len > sizeof(pkt->data)) 1892 sense_sz = sizeof(pkt->data); 1893 else 1894 sense_sz = sense_len; 1895 1896 /* Move sense data. */ 1897 if (IS_FWI2_CAPABLE(ha)) 1898 host_to_fcp_swap(pkt->data, sizeof(pkt->data)); 1899 memcpy(sense_ptr, pkt->data, sense_sz); 1900 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c, 1901 sense_ptr, sense_sz); 1902 1903 sense_len -= sense_sz; 1904 sense_ptr += sense_sz; 1905 1906 SET_CMD_SENSE_PTR(sp, sense_ptr); 1907 SET_CMD_SENSE_LEN(sp, sense_len); 1908 1909 /* Place command on done queue. */ 1910 if (sense_len == 0) { 1911 rsp->status_srb = NULL; 1912 sp->done(ha, sp, cp->result); 1913 } 1914 } 1915 1916 /** 1917 * qla2x00_error_entry() - Process an error entry. 1918 * @ha: SCSI driver HA context 1919 * @pkt: Entry pointer 1920 */ 1921 static void 1922 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) 1923 { 1924 srb_t *sp; 1925 struct qla_hw_data *ha = vha->hw; 1926 const char func[] = "ERROR-IOCB"; 1927 uint16_t que = MSW(pkt->handle); 1928 struct req_que *req = NULL; 1929 int res = DID_ERROR << 16; 1930 1931 ql_dbg(ql_dbg_async, vha, 0x502a, 1932 "type of error status in response: 0x%x\n", pkt->entry_status); 1933 1934 if (que >= ha->max_req_queues || !ha->req_q_map[que]) 1935 goto fatal; 1936 1937 req = ha->req_q_map[que]; 1938 1939 if (pkt->entry_status & RF_BUSY) 1940 res = DID_BUS_BUSY << 16; 1941 1942 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1943 if (sp) { 1944 sp->done(ha, sp, res); 1945 return; 1946 } 1947 fatal: 1948 ql_log(ql_log_warn, vha, 0x5030, 1949 "Error entry - invalid handle/queue.\n"); 1950 1951 if (IS_QLA82XX(ha)) 1952 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1953 else 1954 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1955 qla2xxx_wake_dpc(vha); 1956 } 1957 1958 /** 1959 * qla24xx_mbx_completion() - Process mailbox command completions. 1960 * @ha: SCSI driver HA context 1961 * @mb0: Mailbox0 register 1962 */ 1963 static void 1964 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 1965 { 1966 uint16_t cnt; 1967 uint32_t mboxes; 1968 uint16_t __iomem *wptr; 1969 struct qla_hw_data *ha = vha->hw; 1970 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1971 1972 /* Read all mbox registers? */ 1973 mboxes = (1 << ha->mbx_count) - 1; 1974 if (!ha->mcp) 1975 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERRROR.\n"); 1976 else 1977 mboxes = ha->mcp->in_mb; 1978 1979 /* Load return mailbox registers. */ 1980 ha->flags.mbox_int = 1; 1981 ha->mailbox_out[0] = mb0; 1982 mboxes >>= 1; 1983 wptr = (uint16_t __iomem *)®->mailbox1; 1984 1985 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 1986 if (mboxes & BIT_0) 1987 ha->mailbox_out[cnt] = RD_REG_WORD(wptr); 1988 1989 mboxes >>= 1; 1990 wptr++; 1991 } 1992 } 1993 1994 /** 1995 * qla24xx_process_response_queue() - Process response queue entries. 1996 * @ha: SCSI driver HA context 1997 */ 1998 void qla24xx_process_response_queue(struct scsi_qla_host *vha, 1999 struct rsp_que *rsp) 2000 { 2001 struct sts_entry_24xx *pkt; 2002 struct qla_hw_data *ha = vha->hw; 2003 2004 if (!vha->flags.online) 2005 return; 2006 2007 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 2008 pkt = (struct sts_entry_24xx *)rsp->ring_ptr; 2009 2010 rsp->ring_index++; 2011 if (rsp->ring_index == rsp->length) { 2012 rsp->ring_index = 0; 2013 rsp->ring_ptr = rsp->ring; 2014 } else { 2015 rsp->ring_ptr++; 2016 } 2017 2018 if (pkt->entry_status != 0) { 2019 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt); 2020 2021 (void)qlt_24xx_process_response_error(vha, pkt); 2022 2023 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2024 wmb(); 2025 continue; 2026 } 2027 2028 switch (pkt->entry_type) { 2029 case STATUS_TYPE: 2030 qla2x00_status_entry(vha, rsp, pkt); 2031 break; 2032 case STATUS_CONT_TYPE: 2033 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); 2034 break; 2035 case VP_RPT_ID_IOCB_TYPE: 2036 qla24xx_report_id_acquisition(vha, 2037 (struct vp_rpt_id_entry_24xx *)pkt); 2038 break; 2039 case LOGINOUT_PORT_IOCB_TYPE: 2040 qla24xx_logio_entry(vha, rsp->req, 2041 (struct logio_entry_24xx *)pkt); 2042 break; 2043 case TSK_MGMT_IOCB_TYPE: 2044 qla24xx_tm_iocb_entry(vha, rsp->req, 2045 (struct tsk_mgmt_entry *)pkt); 2046 break; 2047 case CT_IOCB_TYPE: 2048 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); 2049 break; 2050 case ELS_IOCB_TYPE: 2051 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE); 2052 break; 2053 case ABTS_RECV_24XX: 2054 /* ensure that the ATIO queue is empty */ 2055 qlt_24xx_process_atio_queue(vha); 2056 case ABTS_RESP_24XX: 2057 case CTIO_TYPE7: 2058 case NOTIFY_ACK_TYPE: 2059 qlt_response_pkt_all_vps(vha, (response_t *)pkt); 2060 break; 2061 case MARKER_TYPE: 2062 /* Do nothing in this case, this check is to prevent it 2063 * from falling into default case 2064 */ 2065 break; 2066 default: 2067 /* Type Not Supported. */ 2068 ql_dbg(ql_dbg_async, vha, 0x5042, 2069 "Received unknown response pkt type %x " 2070 "entry status=%x.\n", 2071 pkt->entry_type, pkt->entry_status); 2072 break; 2073 } 2074 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2075 wmb(); 2076 } 2077 2078 /* Adjust ring index */ 2079 if (IS_QLA82XX(ha)) { 2080 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; 2081 WRT_REG_DWORD(®->rsp_q_out[0], rsp->ring_index); 2082 } else 2083 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index); 2084 } 2085 2086 static void 2087 qla2xxx_check_risc_status(scsi_qla_host_t *vha) 2088 { 2089 int rval; 2090 uint32_t cnt; 2091 struct qla_hw_data *ha = vha->hw; 2092 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 2093 2094 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha)) 2095 return; 2096 2097 rval = QLA_SUCCESS; 2098 WRT_REG_DWORD(®->iobase_addr, 0x7C00); 2099 RD_REG_DWORD(®->iobase_addr); 2100 WRT_REG_DWORD(®->iobase_window, 0x0001); 2101 for (cnt = 10000; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 && 2102 rval == QLA_SUCCESS; cnt--) { 2103 if (cnt) { 2104 WRT_REG_DWORD(®->iobase_window, 0x0001); 2105 udelay(10); 2106 } else 2107 rval = QLA_FUNCTION_TIMEOUT; 2108 } 2109 if (rval == QLA_SUCCESS) 2110 goto next_test; 2111 2112 WRT_REG_DWORD(®->iobase_window, 0x0003); 2113 for (cnt = 100; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 && 2114 rval == QLA_SUCCESS; cnt--) { 2115 if (cnt) { 2116 WRT_REG_DWORD(®->iobase_window, 0x0003); 2117 udelay(10); 2118 } else 2119 rval = QLA_FUNCTION_TIMEOUT; 2120 } 2121 if (rval != QLA_SUCCESS) 2122 goto done; 2123 2124 next_test: 2125 if (RD_REG_DWORD(®->iobase_c8) & BIT_3) 2126 ql_log(ql_log_info, vha, 0x504c, 2127 "Additional code -- 0x55AA.\n"); 2128 2129 done: 2130 WRT_REG_DWORD(®->iobase_window, 0x0000); 2131 RD_REG_DWORD(®->iobase_window); 2132 } 2133 2134 /** 2135 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx. 2136 * @irq: 2137 * @dev_id: SCSI driver HA context 2138 * 2139 * Called by system whenever the host adapter generates an interrupt. 2140 * 2141 * Returns handled flag. 2142 */ 2143 irqreturn_t 2144 qla24xx_intr_handler(int irq, void *dev_id) 2145 { 2146 scsi_qla_host_t *vha; 2147 struct qla_hw_data *ha; 2148 struct device_reg_24xx __iomem *reg; 2149 int status; 2150 unsigned long iter; 2151 uint32_t stat; 2152 uint32_t hccr; 2153 uint16_t mb[4]; 2154 struct rsp_que *rsp; 2155 unsigned long flags; 2156 2157 rsp = (struct rsp_que *) dev_id; 2158 if (!rsp) { 2159 ql_log(ql_log_info, NULL, 0x5059, 2160 "%s: NULL response queue pointer.\n", __func__); 2161 return IRQ_NONE; 2162 } 2163 2164 ha = rsp->hw; 2165 reg = &ha->iobase->isp24; 2166 status = 0; 2167 2168 if (unlikely(pci_channel_offline(ha->pdev))) 2169 return IRQ_HANDLED; 2170 2171 spin_lock_irqsave(&ha->hardware_lock, flags); 2172 vha = pci_get_drvdata(ha->pdev); 2173 for (iter = 50; iter--; ) { 2174 stat = RD_REG_DWORD(®->host_status); 2175 if (stat & HSRX_RISC_PAUSED) { 2176 if (unlikely(pci_channel_offline(ha->pdev))) 2177 break; 2178 2179 hccr = RD_REG_DWORD(®->hccr); 2180 2181 ql_log(ql_log_warn, vha, 0x504b, 2182 "RISC paused -- HCCR=%x, Dumping firmware.\n", 2183 hccr); 2184 2185 qla2xxx_check_risc_status(vha); 2186 2187 ha->isp_ops->fw_dump(vha, 1); 2188 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2189 break; 2190 } else if ((stat & HSRX_RISC_INT) == 0) 2191 break; 2192 2193 switch (stat & 0xff) { 2194 case 0x1: 2195 case 0x2: 2196 case 0x10: 2197 case 0x11: 2198 qla24xx_mbx_completion(vha, MSW(stat)); 2199 status |= MBX_INTERRUPT; 2200 2201 break; 2202 case 0x12: 2203 mb[0] = MSW(stat); 2204 mb[1] = RD_REG_WORD(®->mailbox1); 2205 mb[2] = RD_REG_WORD(®->mailbox2); 2206 mb[3] = RD_REG_WORD(®->mailbox3); 2207 qla2x00_async_event(vha, rsp, mb); 2208 break; 2209 case 0x13: 2210 case 0x14: 2211 qla24xx_process_response_queue(vha, rsp); 2212 break; 2213 case 0x1C: /* ATIO queue updated */ 2214 qlt_24xx_process_atio_queue(vha); 2215 break; 2216 case 0x1D: /* ATIO and response queues updated */ 2217 qlt_24xx_process_atio_queue(vha); 2218 qla24xx_process_response_queue(vha, rsp); 2219 break; 2220 default: 2221 ql_dbg(ql_dbg_async, vha, 0x504f, 2222 "Unrecognized interrupt type (%d).\n", stat * 0xff); 2223 break; 2224 } 2225 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 2226 RD_REG_DWORD_RELAXED(®->hccr); 2227 } 2228 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2229 2230 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 2231 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 2232 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 2233 complete(&ha->mbx_intr_comp); 2234 } 2235 2236 return IRQ_HANDLED; 2237 } 2238 2239 static irqreturn_t 2240 qla24xx_msix_rsp_q(int irq, void *dev_id) 2241 { 2242 struct qla_hw_data *ha; 2243 struct rsp_que *rsp; 2244 struct device_reg_24xx __iomem *reg; 2245 struct scsi_qla_host *vha; 2246 unsigned long flags; 2247 2248 rsp = (struct rsp_que *) dev_id; 2249 if (!rsp) { 2250 ql_log(ql_log_info, NULL, 0x505a, 2251 "%s: NULL response queue pointer.\n", __func__); 2252 return IRQ_NONE; 2253 } 2254 ha = rsp->hw; 2255 reg = &ha->iobase->isp24; 2256 2257 spin_lock_irqsave(&ha->hardware_lock, flags); 2258 2259 vha = pci_get_drvdata(ha->pdev); 2260 qla24xx_process_response_queue(vha, rsp); 2261 if (!ha->flags.disable_msix_handshake) { 2262 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 2263 RD_REG_DWORD_RELAXED(®->hccr); 2264 } 2265 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2266 2267 return IRQ_HANDLED; 2268 } 2269 2270 static irqreturn_t 2271 qla25xx_msix_rsp_q(int irq, void *dev_id) 2272 { 2273 struct qla_hw_data *ha; 2274 struct rsp_que *rsp; 2275 struct device_reg_24xx __iomem *reg; 2276 unsigned long flags; 2277 2278 rsp = (struct rsp_que *) dev_id; 2279 if (!rsp) { 2280 ql_log(ql_log_info, NULL, 0x505b, 2281 "%s: NULL response queue pointer.\n", __func__); 2282 return IRQ_NONE; 2283 } 2284 ha = rsp->hw; 2285 2286 /* Clear the interrupt, if enabled, for this response queue */ 2287 if (!ha->flags.disable_msix_handshake) { 2288 reg = &ha->iobase->isp24; 2289 spin_lock_irqsave(&ha->hardware_lock, flags); 2290 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 2291 RD_REG_DWORD_RELAXED(®->hccr); 2292 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2293 } 2294 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work); 2295 2296 return IRQ_HANDLED; 2297 } 2298 2299 static irqreturn_t 2300 qla24xx_msix_default(int irq, void *dev_id) 2301 { 2302 scsi_qla_host_t *vha; 2303 struct qla_hw_data *ha; 2304 struct rsp_que *rsp; 2305 struct device_reg_24xx __iomem *reg; 2306 int status; 2307 uint32_t stat; 2308 uint32_t hccr; 2309 uint16_t mb[4]; 2310 unsigned long flags; 2311 2312 rsp = (struct rsp_que *) dev_id; 2313 if (!rsp) { 2314 ql_log(ql_log_info, NULL, 0x505c, 2315 "%s: NULL response queue pointer.\n", __func__); 2316 return IRQ_NONE; 2317 } 2318 ha = rsp->hw; 2319 reg = &ha->iobase->isp24; 2320 status = 0; 2321 2322 spin_lock_irqsave(&ha->hardware_lock, flags); 2323 vha = pci_get_drvdata(ha->pdev); 2324 do { 2325 stat = RD_REG_DWORD(®->host_status); 2326 if (stat & HSRX_RISC_PAUSED) { 2327 if (unlikely(pci_channel_offline(ha->pdev))) 2328 break; 2329 2330 hccr = RD_REG_DWORD(®->hccr); 2331 2332 ql_log(ql_log_info, vha, 0x5050, 2333 "RISC paused -- HCCR=%x, Dumping firmware.\n", 2334 hccr); 2335 2336 qla2xxx_check_risc_status(vha); 2337 2338 ha->isp_ops->fw_dump(vha, 1); 2339 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2340 break; 2341 } else if ((stat & HSRX_RISC_INT) == 0) 2342 break; 2343 2344 switch (stat & 0xff) { 2345 case 0x1: 2346 case 0x2: 2347 case 0x10: 2348 case 0x11: 2349 qla24xx_mbx_completion(vha, MSW(stat)); 2350 status |= MBX_INTERRUPT; 2351 2352 break; 2353 case 0x12: 2354 mb[0] = MSW(stat); 2355 mb[1] = RD_REG_WORD(®->mailbox1); 2356 mb[2] = RD_REG_WORD(®->mailbox2); 2357 mb[3] = RD_REG_WORD(®->mailbox3); 2358 qla2x00_async_event(vha, rsp, mb); 2359 break; 2360 case 0x13: 2361 case 0x14: 2362 qla24xx_process_response_queue(vha, rsp); 2363 break; 2364 case 0x1C: /* ATIO queue updated */ 2365 qlt_24xx_process_atio_queue(vha); 2366 break; 2367 case 0x1D: /* ATIO and response queues updated */ 2368 qlt_24xx_process_atio_queue(vha); 2369 qla24xx_process_response_queue(vha, rsp); 2370 break; 2371 default: 2372 ql_dbg(ql_dbg_async, vha, 0x5051, 2373 "Unrecognized interrupt type (%d).\n", stat & 0xff); 2374 break; 2375 } 2376 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 2377 } while (0); 2378 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2379 2380 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 2381 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 2382 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 2383 complete(&ha->mbx_intr_comp); 2384 } 2385 return IRQ_HANDLED; 2386 } 2387 2388 /* Interrupt handling helpers. */ 2389 2390 struct qla_init_msix_entry { 2391 const char *name; 2392 irq_handler_t handler; 2393 }; 2394 2395 static struct qla_init_msix_entry msix_entries[3] = { 2396 { "qla2xxx (default)", qla24xx_msix_default }, 2397 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q }, 2398 { "qla2xxx (multiq)", qla25xx_msix_rsp_q }, 2399 }; 2400 2401 static struct qla_init_msix_entry qla82xx_msix_entries[2] = { 2402 { "qla2xxx (default)", qla82xx_msix_default }, 2403 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q }, 2404 }; 2405 2406 static void 2407 qla24xx_disable_msix(struct qla_hw_data *ha) 2408 { 2409 int i; 2410 struct qla_msix_entry *qentry; 2411 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 2412 2413 for (i = 0; i < ha->msix_count; i++) { 2414 qentry = &ha->msix_entries[i]; 2415 if (qentry->have_irq) 2416 free_irq(qentry->vector, qentry->rsp); 2417 } 2418 pci_disable_msix(ha->pdev); 2419 kfree(ha->msix_entries); 2420 ha->msix_entries = NULL; 2421 ha->flags.msix_enabled = 0; 2422 ql_dbg(ql_dbg_init, vha, 0x0042, 2423 "Disabled the MSI.\n"); 2424 } 2425 2426 static int 2427 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) 2428 { 2429 #define MIN_MSIX_COUNT 2 2430 int i, ret; 2431 struct msix_entry *entries; 2432 struct qla_msix_entry *qentry; 2433 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 2434 2435 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count, 2436 GFP_KERNEL); 2437 if (!entries) { 2438 ql_log(ql_log_warn, vha, 0x00bc, 2439 "Failed to allocate memory for msix_entry.\n"); 2440 return -ENOMEM; 2441 } 2442 2443 for (i = 0; i < ha->msix_count; i++) 2444 entries[i].entry = i; 2445 2446 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count); 2447 if (ret) { 2448 if (ret < MIN_MSIX_COUNT) 2449 goto msix_failed; 2450 2451 ql_log(ql_log_warn, vha, 0x00c6, 2452 "MSI-X: Failed to enable support " 2453 "-- %d/%d\n Retry with %d vectors.\n", 2454 ha->msix_count, ret, ret); 2455 ha->msix_count = ret; 2456 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count); 2457 if (ret) { 2458 msix_failed: 2459 ql_log(ql_log_fatal, vha, 0x00c7, 2460 "MSI-X: Failed to enable support, " 2461 "giving up -- %d/%d.\n", 2462 ha->msix_count, ret); 2463 goto msix_out; 2464 } 2465 ha->max_rsp_queues = ha->msix_count - 1; 2466 } 2467 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) * 2468 ha->msix_count, GFP_KERNEL); 2469 if (!ha->msix_entries) { 2470 ql_log(ql_log_fatal, vha, 0x00c8, 2471 "Failed to allocate memory for ha->msix_entries.\n"); 2472 ret = -ENOMEM; 2473 goto msix_out; 2474 } 2475 ha->flags.msix_enabled = 1; 2476 2477 for (i = 0; i < ha->msix_count; i++) { 2478 qentry = &ha->msix_entries[i]; 2479 qentry->vector = entries[i].vector; 2480 qentry->entry = entries[i].entry; 2481 qentry->have_irq = 0; 2482 qentry->rsp = NULL; 2483 } 2484 2485 /* Enable MSI-X vectors for the base queue */ 2486 for (i = 0; i < 2; i++) { 2487 qentry = &ha->msix_entries[i]; 2488 if (IS_QLA82XX(ha)) { 2489 ret = request_irq(qentry->vector, 2490 qla82xx_msix_entries[i].handler, 2491 0, qla82xx_msix_entries[i].name, rsp); 2492 } else { 2493 ret = request_irq(qentry->vector, 2494 msix_entries[i].handler, 2495 0, msix_entries[i].name, rsp); 2496 } 2497 if (ret) { 2498 ql_log(ql_log_fatal, vha, 0x00cb, 2499 "MSI-X: unable to register handler -- %x/%d.\n", 2500 qentry->vector, ret); 2501 qla24xx_disable_msix(ha); 2502 ha->mqenable = 0; 2503 goto msix_out; 2504 } 2505 qentry->have_irq = 1; 2506 qentry->rsp = rsp; 2507 rsp->msix = qentry; 2508 } 2509 2510 /* Enable MSI-X vector for response queue update for queue 0 */ 2511 if (IS_QLA83XX(ha)) { 2512 if (ha->msixbase && ha->mqiobase && 2513 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1)) 2514 ha->mqenable = 1; 2515 } else 2516 if (ha->mqiobase 2517 && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1)) 2518 ha->mqenable = 1; 2519 ql_dbg(ql_dbg_multiq, vha, 0xc005, 2520 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", 2521 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); 2522 ql_dbg(ql_dbg_init, vha, 0x0055, 2523 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", 2524 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); 2525 2526 msix_out: 2527 kfree(entries); 2528 return ret; 2529 } 2530 2531 int 2532 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) 2533 { 2534 int ret; 2535 device_reg_t __iomem *reg = ha->iobase; 2536 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 2537 2538 /* If possible, enable MSI-X. */ 2539 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && 2540 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha)) 2541 goto skip_msi; 2542 2543 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && 2544 (ha->pdev->subsystem_device == 0x7040 || 2545 ha->pdev->subsystem_device == 0x7041 || 2546 ha->pdev->subsystem_device == 0x1705)) { 2547 ql_log(ql_log_warn, vha, 0x0034, 2548 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n", 2549 ha->pdev->subsystem_vendor, 2550 ha->pdev->subsystem_device); 2551 goto skip_msi; 2552 } 2553 2554 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) { 2555 ql_log(ql_log_warn, vha, 0x0035, 2556 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n", 2557 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX); 2558 goto skip_msix; 2559 } 2560 2561 ret = qla24xx_enable_msix(ha, rsp); 2562 if (!ret) { 2563 ql_dbg(ql_dbg_init, vha, 0x0036, 2564 "MSI-X: Enabled (0x%X, 0x%X).\n", 2565 ha->chip_revision, ha->fw_attributes); 2566 goto clear_risc_ints; 2567 } 2568 ql_log(ql_log_info, vha, 0x0037, 2569 "MSI-X Falling back-to MSI mode -%d.\n", ret); 2570 skip_msix: 2571 2572 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && 2573 !IS_QLA8001(ha)) 2574 goto skip_msi; 2575 2576 ret = pci_enable_msi(ha->pdev); 2577 if (!ret) { 2578 ql_dbg(ql_dbg_init, vha, 0x0038, 2579 "MSI: Enabled.\n"); 2580 ha->flags.msi_enabled = 1; 2581 } else 2582 ql_log(ql_log_warn, vha, 0x0039, 2583 "MSI-X; Falling back-to INTa mode -- %d.\n", ret); 2584 skip_msi: 2585 2586 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, 2587 ha->flags.msi_enabled ? 0 : IRQF_SHARED, 2588 QLA2XXX_DRIVER_NAME, rsp); 2589 if (ret) { 2590 ql_log(ql_log_warn, vha, 0x003a, 2591 "Failed to reserve interrupt %d already in use.\n", 2592 ha->pdev->irq); 2593 goto fail; 2594 } 2595 2596 clear_risc_ints: 2597 2598 /* 2599 * FIXME: Noted that 8014s were being dropped during NK testing. 2600 * Timing deltas during MSI-X/INTa transitions? 2601 */ 2602 if (IS_QLA81XX(ha) || IS_QLA82XX(ha) || IS_QLA83XX(ha)) 2603 goto fail; 2604 spin_lock_irq(&ha->hardware_lock); 2605 if (IS_FWI2_CAPABLE(ha)) { 2606 WRT_REG_DWORD(®->isp24.hccr, HCCRX_CLR_HOST_INT); 2607 WRT_REG_DWORD(®->isp24.hccr, HCCRX_CLR_RISC_INT); 2608 } else { 2609 WRT_REG_WORD(®->isp.semaphore, 0); 2610 WRT_REG_WORD(®->isp.hccr, HCCR_CLR_RISC_INT); 2611 WRT_REG_WORD(®->isp.hccr, HCCR_CLR_HOST_INT); 2612 } 2613 spin_unlock_irq(&ha->hardware_lock); 2614 2615 fail: 2616 return ret; 2617 } 2618 2619 void 2620 qla2x00_free_irqs(scsi_qla_host_t *vha) 2621 { 2622 struct qla_hw_data *ha = vha->hw; 2623 struct rsp_que *rsp; 2624 2625 /* 2626 * We need to check that ha->rsp_q_map is valid in case we are called 2627 * from a probe failure context. 2628 */ 2629 if (!ha->rsp_q_map || !ha->rsp_q_map[0]) 2630 return; 2631 rsp = ha->rsp_q_map[0]; 2632 2633 if (ha->flags.msix_enabled) 2634 qla24xx_disable_msix(ha); 2635 else if (ha->flags.msi_enabled) { 2636 free_irq(ha->pdev->irq, rsp); 2637 pci_disable_msi(ha->pdev); 2638 } else 2639 free_irq(ha->pdev->irq, rsp); 2640 } 2641 2642 2643 int qla25xx_request_irq(struct rsp_que *rsp) 2644 { 2645 struct qla_hw_data *ha = rsp->hw; 2646 struct qla_init_msix_entry *intr = &msix_entries[2]; 2647 struct qla_msix_entry *msix = rsp->msix; 2648 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 2649 int ret; 2650 2651 ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp); 2652 if (ret) { 2653 ql_log(ql_log_fatal, vha, 0x00e6, 2654 "MSI-X: Unable to register handler -- %x/%d.\n", 2655 msix->vector, ret); 2656 return ret; 2657 } 2658 msix->have_irq = 1; 2659 msix->rsp = rsp; 2660 return ret; 2661 } 2662