1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2011 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 9 #include <linux/delay.h> 10 #include <linux/slab.h> 11 #include <scsi/scsi_tcq.h> 12 #include <scsi/scsi_bsg_fc.h> 13 #include <scsi/scsi_eh.h> 14 15 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); 16 static void qla2x00_process_completed_request(struct scsi_qla_host *, 17 struct req_que *, uint32_t); 18 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *); 19 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *); 20 static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, 21 sts_entry_t *); 22 23 /** 24 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. 25 * @irq: 26 * @dev_id: SCSI driver HA context 27 * 28 * Called by system whenever the host adapter generates an interrupt. 29 * 30 * Returns handled flag. 31 */ 32 irqreturn_t 33 qla2100_intr_handler(int irq, void *dev_id) 34 { 35 scsi_qla_host_t *vha; 36 struct qla_hw_data *ha; 37 struct device_reg_2xxx __iomem *reg; 38 int status; 39 unsigned long iter; 40 uint16_t hccr; 41 uint16_t mb[4]; 42 struct rsp_que *rsp; 43 unsigned long flags; 44 45 rsp = (struct rsp_que *) dev_id; 46 if (!rsp) { 47 printk(KERN_INFO 48 "%s(): NULL response queue pointer.\n", __func__); 49 return (IRQ_NONE); 50 } 51 52 ha = rsp->hw; 53 reg = &ha->iobase->isp; 54 status = 0; 55 56 spin_lock_irqsave(&ha->hardware_lock, flags); 57 vha = pci_get_drvdata(ha->pdev); 58 for (iter = 50; iter--; ) { 59 hccr = RD_REG_WORD(®->hccr); 60 if (hccr & HCCR_RISC_PAUSE) { 61 if (pci_channel_offline(ha->pdev)) 62 break; 63 64 /* 65 * Issue a "HARD" reset in order for the RISC interrupt 66 * bit to be cleared. Schedule a big hammer to get 67 * out of the RISC PAUSED state. 68 */ 69 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 70 RD_REG_WORD(®->hccr); 71 72 ha->isp_ops->fw_dump(vha, 1); 73 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 74 break; 75 } else if ((RD_REG_WORD(®->istatus) & ISR_RISC_INT) == 0) 76 break; 77 78 if (RD_REG_WORD(®->semaphore) & BIT_0) { 79 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 80 RD_REG_WORD(®->hccr); 81 82 /* Get mailbox data. */ 83 mb[0] = RD_MAILBOX_REG(ha, reg, 0); 84 if (mb[0] > 0x3fff && mb[0] < 0x8000) { 85 qla2x00_mbx_completion(vha, mb[0]); 86 status |= MBX_INTERRUPT; 87 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) { 88 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 89 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 90 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 91 qla2x00_async_event(vha, rsp, mb); 92 } else { 93 /*EMPTY*/ 94 ql_dbg(ql_dbg_async, vha, 0x5025, 95 "Unrecognized interrupt type (%d).\n", 96 mb[0]); 97 } 98 /* Release mailbox registers. */ 99 WRT_REG_WORD(®->semaphore, 0); 100 RD_REG_WORD(®->semaphore); 101 } else { 102 qla2x00_process_response_queue(rsp); 103 104 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 105 RD_REG_WORD(®->hccr); 106 } 107 } 108 spin_unlock_irqrestore(&ha->hardware_lock, flags); 109 110 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 111 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 112 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 113 complete(&ha->mbx_intr_comp); 114 } 115 116 return (IRQ_HANDLED); 117 } 118 119 /** 120 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. 121 * @irq: 122 * @dev_id: SCSI driver HA context 123 * 124 * Called by system whenever the host adapter generates an interrupt. 125 * 126 * Returns handled flag. 127 */ 128 irqreturn_t 129 qla2300_intr_handler(int irq, void *dev_id) 130 { 131 scsi_qla_host_t *vha; 132 struct device_reg_2xxx __iomem *reg; 133 int status; 134 unsigned long iter; 135 uint32_t stat; 136 uint16_t hccr; 137 uint16_t mb[4]; 138 struct rsp_que *rsp; 139 struct qla_hw_data *ha; 140 unsigned long flags; 141 142 rsp = (struct rsp_que *) dev_id; 143 if (!rsp) { 144 printk(KERN_INFO 145 "%s(): NULL response queue pointer.\n", __func__); 146 return (IRQ_NONE); 147 } 148 149 ha = rsp->hw; 150 reg = &ha->iobase->isp; 151 status = 0; 152 153 spin_lock_irqsave(&ha->hardware_lock, flags); 154 vha = pci_get_drvdata(ha->pdev); 155 for (iter = 50; iter--; ) { 156 stat = RD_REG_DWORD(®->u.isp2300.host_status); 157 if (stat & HSR_RISC_PAUSED) { 158 if (unlikely(pci_channel_offline(ha->pdev))) 159 break; 160 161 hccr = RD_REG_WORD(®->hccr); 162 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8)) 163 ql_log(ql_log_warn, vha, 0x5026, 164 "Parity error -- HCCR=%x, Dumping " 165 "firmware.\n", hccr); 166 else 167 ql_log(ql_log_warn, vha, 0x5027, 168 "RISC paused -- HCCR=%x, Dumping " 169 "firmware.\n", hccr); 170 171 /* 172 * Issue a "HARD" reset in order for the RISC 173 * interrupt bit to be cleared. Schedule a big 174 * hammer to get out of the RISC PAUSED state. 175 */ 176 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 177 RD_REG_WORD(®->hccr); 178 179 ha->isp_ops->fw_dump(vha, 1); 180 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 181 break; 182 } else if ((stat & HSR_RISC_INT) == 0) 183 break; 184 185 switch (stat & 0xff) { 186 case 0x1: 187 case 0x2: 188 case 0x10: 189 case 0x11: 190 qla2x00_mbx_completion(vha, MSW(stat)); 191 status |= MBX_INTERRUPT; 192 193 /* Release mailbox registers. */ 194 WRT_REG_WORD(®->semaphore, 0); 195 break; 196 case 0x12: 197 mb[0] = MSW(stat); 198 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 199 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 200 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 201 qla2x00_async_event(vha, rsp, mb); 202 break; 203 case 0x13: 204 qla2x00_process_response_queue(rsp); 205 break; 206 case 0x15: 207 mb[0] = MBA_CMPLT_1_16BIT; 208 mb[1] = MSW(stat); 209 qla2x00_async_event(vha, rsp, mb); 210 break; 211 case 0x16: 212 mb[0] = MBA_SCSI_COMPLETION; 213 mb[1] = MSW(stat); 214 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 215 qla2x00_async_event(vha, rsp, mb); 216 break; 217 default: 218 ql_dbg(ql_dbg_async, vha, 0x5028, 219 "Unrecognized interrupt type (%d).\n", stat & 0xff); 220 break; 221 } 222 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 223 RD_REG_WORD_RELAXED(®->hccr); 224 } 225 spin_unlock_irqrestore(&ha->hardware_lock, flags); 226 227 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 228 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 229 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 230 complete(&ha->mbx_intr_comp); 231 } 232 233 return (IRQ_HANDLED); 234 } 235 236 /** 237 * qla2x00_mbx_completion() - Process mailbox command completions. 238 * @ha: SCSI driver HA context 239 * @mb0: Mailbox0 register 240 */ 241 static void 242 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 243 { 244 uint16_t cnt; 245 uint32_t mboxes; 246 uint16_t __iomem *wptr; 247 struct qla_hw_data *ha = vha->hw; 248 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 249 250 /* Read all mbox registers? */ 251 mboxes = (1 << ha->mbx_count) - 1; 252 if (!ha->mcp) 253 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERRROR.\n"); 254 else 255 mboxes = ha->mcp->in_mb; 256 257 /* Load return mailbox registers. */ 258 ha->flags.mbox_int = 1; 259 ha->mailbox_out[0] = mb0; 260 mboxes >>= 1; 261 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1); 262 263 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 264 if (IS_QLA2200(ha) && cnt == 8) 265 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8); 266 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0)) 267 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr); 268 else if (mboxes & BIT_0) 269 ha->mailbox_out[cnt] = RD_REG_WORD(wptr); 270 271 wptr++; 272 mboxes >>= 1; 273 } 274 } 275 276 static void 277 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr) 278 { 279 static char *event[] = 280 { "Complete", "Request Notification", "Time Extension" }; 281 int rval; 282 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24; 283 uint16_t __iomem *wptr; 284 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS]; 285 286 /* Seed data -- mailbox1 -> mailbox7. */ 287 wptr = (uint16_t __iomem *)®24->mailbox1; 288 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++) 289 mb[cnt] = RD_REG_WORD(wptr); 290 291 ql_dbg(ql_dbg_async, vha, 0x5021, 292 "Inter-Driver Commucation %s -- " 293 "%04x %04x %04x %04x %04x %04x %04x.\n", 294 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3], 295 mb[4], mb[5], mb[6]); 296 297 /* Acknowledgement needed? [Notify && non-zero timeout]. */ 298 timeout = (descr >> 8) & 0xf; 299 if (aen != MBA_IDC_NOTIFY || !timeout) 300 return; 301 302 ql_dbg(ql_dbg_async, vha, 0x5022, 303 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n", 304 vha->host_no, event[aen & 0xff], timeout); 305 306 rval = qla2x00_post_idc_ack_work(vha, mb); 307 if (rval != QLA_SUCCESS) 308 ql_log(ql_log_warn, vha, 0x5023, 309 "IDC failed to post ACK.\n"); 310 } 311 312 /** 313 * qla2x00_async_event() - Process aynchronous events. 314 * @ha: SCSI driver HA context 315 * @mb: Mailbox registers (0 - 3) 316 */ 317 void 318 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) 319 { 320 #define LS_UNKNOWN 2 321 static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" }; 322 char *link_speed; 323 uint16_t handle_cnt; 324 uint16_t cnt, mbx; 325 uint32_t handles[5]; 326 struct qla_hw_data *ha = vha->hw; 327 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 328 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 329 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; 330 uint32_t rscn_entry, host_pid; 331 uint8_t rscn_queue_index; 332 unsigned long flags; 333 334 /* Setup to process RIO completion. */ 335 handle_cnt = 0; 336 if (IS_QLA8XXX_TYPE(ha)) 337 goto skip_rio; 338 switch (mb[0]) { 339 case MBA_SCSI_COMPLETION: 340 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1])); 341 handle_cnt = 1; 342 break; 343 case MBA_CMPLT_1_16BIT: 344 handles[0] = mb[1]; 345 handle_cnt = 1; 346 mb[0] = MBA_SCSI_COMPLETION; 347 break; 348 case MBA_CMPLT_2_16BIT: 349 handles[0] = mb[1]; 350 handles[1] = mb[2]; 351 handle_cnt = 2; 352 mb[0] = MBA_SCSI_COMPLETION; 353 break; 354 case MBA_CMPLT_3_16BIT: 355 handles[0] = mb[1]; 356 handles[1] = mb[2]; 357 handles[2] = mb[3]; 358 handle_cnt = 3; 359 mb[0] = MBA_SCSI_COMPLETION; 360 break; 361 case MBA_CMPLT_4_16BIT: 362 handles[0] = mb[1]; 363 handles[1] = mb[2]; 364 handles[2] = mb[3]; 365 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); 366 handle_cnt = 4; 367 mb[0] = MBA_SCSI_COMPLETION; 368 break; 369 case MBA_CMPLT_5_16BIT: 370 handles[0] = mb[1]; 371 handles[1] = mb[2]; 372 handles[2] = mb[3]; 373 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); 374 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7); 375 handle_cnt = 5; 376 mb[0] = MBA_SCSI_COMPLETION; 377 break; 378 case MBA_CMPLT_2_32BIT: 379 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1])); 380 handles[1] = le32_to_cpu( 381 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) | 382 RD_MAILBOX_REG(ha, reg, 6)); 383 handle_cnt = 2; 384 mb[0] = MBA_SCSI_COMPLETION; 385 break; 386 default: 387 break; 388 } 389 skip_rio: 390 switch (mb[0]) { 391 case MBA_SCSI_COMPLETION: /* Fast Post */ 392 if (!vha->flags.online) 393 break; 394 395 for (cnt = 0; cnt < handle_cnt; cnt++) 396 qla2x00_process_completed_request(vha, rsp->req, 397 handles[cnt]); 398 break; 399 400 case MBA_RESET: /* Reset */ 401 ql_dbg(ql_dbg_async, vha, 0x5002, 402 "Asynchronous RESET.\n"); 403 404 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 405 break; 406 407 case MBA_SYSTEM_ERR: /* System Error */ 408 mbx = IS_QLA81XX(ha) ? RD_REG_WORD(®24->mailbox7) : 0; 409 ql_log(ql_log_warn, vha, 0x5003, 410 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh " 411 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx); 412 413 ha->isp_ops->fw_dump(vha, 1); 414 415 if (IS_FWI2_CAPABLE(ha)) { 416 if (mb[1] == 0 && mb[2] == 0) { 417 ql_log(ql_log_fatal, vha, 0x5004, 418 "Unrecoverable Hardware Error: adapter " 419 "marked OFFLINE!\n"); 420 vha->flags.online = 0; 421 } else { 422 /* Check to see if MPI timeout occurred */ 423 if ((mbx & MBX_3) && (ha->flags.port0)) 424 set_bit(MPI_RESET_NEEDED, 425 &vha->dpc_flags); 426 427 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 428 } 429 } else if (mb[1] == 0) { 430 ql_log(ql_log_fatal, vha, 0x5005, 431 "Unrecoverable Hardware Error: adapter marked " 432 "OFFLINE!\n"); 433 vha->flags.online = 0; 434 } else 435 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 436 break; 437 438 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 439 ql_log(ql_log_warn, vha, 0x5006, 440 "ISP Request Transfer Error (%x).\n", mb[1]); 441 442 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 443 break; 444 445 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 446 ql_log(ql_log_warn, vha, 0x5007, 447 "ISP Response Transfer Error.\n"); 448 449 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 450 break; 451 452 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ 453 ql_dbg(ql_dbg_async, vha, 0x5008, 454 "Asynchronous WAKEUP_THRES.\n"); 455 break; 456 457 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ 458 ql_dbg(ql_dbg_async, vha, 0x5009, 459 "LIP occurred (%x).\n", mb[1]); 460 461 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 462 atomic_set(&vha->loop_state, LOOP_DOWN); 463 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 464 qla2x00_mark_all_devices_lost(vha, 1); 465 } 466 467 if (vha->vp_idx) { 468 atomic_set(&vha->vp_state, VP_FAILED); 469 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 470 } 471 472 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 473 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 474 475 vha->flags.management_server_logged_in = 0; 476 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]); 477 break; 478 479 case MBA_LOOP_UP: /* Loop Up Event */ 480 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 481 link_speed = link_speeds[0]; 482 ha->link_data_rate = PORT_SPEED_1GB; 483 } else { 484 link_speed = link_speeds[LS_UNKNOWN]; 485 if (mb[1] < 5) 486 link_speed = link_speeds[mb[1]]; 487 else if (mb[1] == 0x13) 488 link_speed = link_speeds[5]; 489 ha->link_data_rate = mb[1]; 490 } 491 492 ql_dbg(ql_dbg_async, vha, 0x500a, 493 "LOOP UP detected (%s Gbps).\n", link_speed); 494 495 vha->flags.management_server_logged_in = 0; 496 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate); 497 break; 498 499 case MBA_LOOP_DOWN: /* Loop Down Event */ 500 mbx = IS_QLA81XX(ha) ? RD_REG_WORD(®24->mailbox4) : 0; 501 mbx = IS_QLA82XX(ha) ? RD_REG_WORD(®82->mailbox_out[4]) : mbx; 502 ql_dbg(ql_dbg_async, vha, 0x500b, 503 "LOOP DOWN detected (%x %x %x %x).\n", 504 mb[1], mb[2], mb[3], mbx); 505 506 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 507 atomic_set(&vha->loop_state, LOOP_DOWN); 508 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 509 vha->device_flags |= DFLG_NO_CABLE; 510 qla2x00_mark_all_devices_lost(vha, 1); 511 } 512 513 if (vha->vp_idx) { 514 atomic_set(&vha->vp_state, VP_FAILED); 515 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 516 } 517 518 vha->flags.management_server_logged_in = 0; 519 ha->link_data_rate = PORT_SPEED_UNKNOWN; 520 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0); 521 break; 522 523 case MBA_LIP_RESET: /* LIP reset occurred */ 524 ql_dbg(ql_dbg_async, vha, 0x500c, 525 "LIP reset occurred (%x).\n", mb[1]); 526 527 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 528 atomic_set(&vha->loop_state, LOOP_DOWN); 529 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 530 qla2x00_mark_all_devices_lost(vha, 1); 531 } 532 533 if (vha->vp_idx) { 534 atomic_set(&vha->vp_state, VP_FAILED); 535 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 536 } 537 538 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 539 540 ha->operating_mode = LOOP; 541 vha->flags.management_server_logged_in = 0; 542 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]); 543 break; 544 545 /* case MBA_DCBX_COMPLETE: */ 546 case MBA_POINT_TO_POINT: /* Point-to-Point */ 547 if (IS_QLA2100(ha)) 548 break; 549 550 if (IS_QLA8XXX_TYPE(ha)) { 551 ql_dbg(ql_dbg_async, vha, 0x500d, 552 "DCBX Completed -- %04x %04x %04x.\n", 553 mb[1], mb[2], mb[3]); 554 if (ha->notify_dcbx_comp) 555 complete(&ha->dcbx_comp); 556 557 } else 558 ql_dbg(ql_dbg_async, vha, 0x500e, 559 "Asynchronous P2P MODE received.\n"); 560 561 /* 562 * Until there's a transition from loop down to loop up, treat 563 * this as loop down only. 564 */ 565 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 566 atomic_set(&vha->loop_state, LOOP_DOWN); 567 if (!atomic_read(&vha->loop_down_timer)) 568 atomic_set(&vha->loop_down_timer, 569 LOOP_DOWN_TIME); 570 qla2x00_mark_all_devices_lost(vha, 1); 571 } 572 573 if (vha->vp_idx) { 574 atomic_set(&vha->vp_state, VP_FAILED); 575 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 576 } 577 578 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) 579 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 580 581 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 582 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 583 584 ha->flags.gpsc_supported = 1; 585 vha->flags.management_server_logged_in = 0; 586 break; 587 588 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */ 589 if (IS_QLA2100(ha)) 590 break; 591 592 ql_dbg(ql_dbg_async, vha, 0x500f, 593 "Configuration change detected: value=%x.\n", mb[1]); 594 595 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 596 atomic_set(&vha->loop_state, LOOP_DOWN); 597 if (!atomic_read(&vha->loop_down_timer)) 598 atomic_set(&vha->loop_down_timer, 599 LOOP_DOWN_TIME); 600 qla2x00_mark_all_devices_lost(vha, 1); 601 } 602 603 if (vha->vp_idx) { 604 atomic_set(&vha->vp_state, VP_FAILED); 605 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 606 } 607 608 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 609 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 610 break; 611 612 case MBA_PORT_UPDATE: /* Port database update */ 613 /* 614 * Handle only global and vn-port update events 615 * 616 * Relevant inputs: 617 * mb[1] = N_Port handle of changed port 618 * OR 0xffff for global event 619 * mb[2] = New login state 620 * 7 = Port logged out 621 * mb[3] = LSB is vp_idx, 0xff = all vps 622 * 623 * Skip processing if: 624 * Event is global, vp_idx is NOT all vps, 625 * vp_idx does not match 626 * Event is not global, vp_idx does not match 627 */ 628 if (IS_QLA2XXX_MIDTYPE(ha) && 629 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) || 630 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff)) 631 break; 632 633 /* Global event -- port logout or port unavailable. */ 634 if (mb[1] == 0xffff && mb[2] == 0x7) { 635 ql_dbg(ql_dbg_async, vha, 0x5010, 636 "Port unavailable %04x %04x %04x.\n", 637 mb[1], mb[2], mb[3]); 638 639 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 640 atomic_set(&vha->loop_state, LOOP_DOWN); 641 atomic_set(&vha->loop_down_timer, 642 LOOP_DOWN_TIME); 643 vha->device_flags |= DFLG_NO_CABLE; 644 qla2x00_mark_all_devices_lost(vha, 1); 645 } 646 647 if (vha->vp_idx) { 648 atomic_set(&vha->vp_state, VP_FAILED); 649 fc_vport_set_state(vha->fc_vport, 650 FC_VPORT_FAILED); 651 qla2x00_mark_all_devices_lost(vha, 1); 652 } 653 654 vha->flags.management_server_logged_in = 0; 655 ha->link_data_rate = PORT_SPEED_UNKNOWN; 656 break; 657 } 658 659 /* 660 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET 661 * event etc. earlier indicating loop is down) then process 662 * it. Otherwise ignore it and Wait for RSCN to come in. 663 */ 664 atomic_set(&vha->loop_down_timer, 0); 665 if (atomic_read(&vha->loop_state) != LOOP_DOWN && 666 atomic_read(&vha->loop_state) != LOOP_DEAD) { 667 ql_dbg(ql_dbg_async, vha, 0x5011, 668 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n", 669 mb[1], mb[2], mb[3]); 670 break; 671 } 672 673 ql_dbg(ql_dbg_async, vha, 0x5012, 674 "Port database changed %04x %04x %04x.\n", 675 mb[1], mb[2], mb[3]); 676 677 /* 678 * Mark all devices as missing so we will login again. 679 */ 680 atomic_set(&vha->loop_state, LOOP_UP); 681 682 qla2x00_mark_all_devices_lost(vha, 1); 683 684 vha->flags.rscn_queue_overflow = 1; 685 686 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 687 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 688 break; 689 690 case MBA_RSCN_UPDATE: /* State Change Registration */ 691 /* Check if the Vport has issued a SCR */ 692 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags)) 693 break; 694 /* Only handle SCNs for our Vport index. */ 695 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff)) 696 break; 697 698 ql_dbg(ql_dbg_async, vha, 0x5013, 699 "RSCN database changed -- %04x %04x %04x.\n", 700 mb[1], mb[2], mb[3]); 701 702 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2]; 703 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8) 704 | vha->d_id.b.al_pa; 705 if (rscn_entry == host_pid) { 706 ql_dbg(ql_dbg_async, vha, 0x5014, 707 "Ignoring RSCN update to local host " 708 "port ID (%06x).\n", host_pid); 709 break; 710 } 711 712 /* Ignore reserved bits from RSCN-payload. */ 713 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2]; 714 rscn_queue_index = vha->rscn_in_ptr + 1; 715 if (rscn_queue_index == MAX_RSCN_COUNT) 716 rscn_queue_index = 0; 717 if (rscn_queue_index != vha->rscn_out_ptr) { 718 vha->rscn_queue[vha->rscn_in_ptr] = rscn_entry; 719 vha->rscn_in_ptr = rscn_queue_index; 720 } else { 721 vha->flags.rscn_queue_overflow = 1; 722 } 723 724 atomic_set(&vha->loop_down_timer, 0); 725 vha->flags.management_server_logged_in = 0; 726 727 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 728 set_bit(RSCN_UPDATE, &vha->dpc_flags); 729 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry); 730 break; 731 732 /* case MBA_RIO_RESPONSE: */ 733 case MBA_ZIO_RESPONSE: 734 ql_dbg(ql_dbg_async, vha, 0x5015, 735 "[R|Z]IO update completion.\n"); 736 737 if (IS_FWI2_CAPABLE(ha)) 738 qla24xx_process_response_queue(vha, rsp); 739 else 740 qla2x00_process_response_queue(rsp); 741 break; 742 743 case MBA_DISCARD_RND_FRAME: 744 ql_dbg(ql_dbg_async, vha, 0x5016, 745 "Discard RND Frame -- %04x %04x %04x.\n", 746 mb[1], mb[2], mb[3]); 747 break; 748 749 case MBA_TRACE_NOTIFICATION: 750 ql_dbg(ql_dbg_async, vha, 0x5017, 751 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]); 752 break; 753 754 case MBA_ISP84XX_ALERT: 755 ql_dbg(ql_dbg_async, vha, 0x5018, 756 "ISP84XX Alert Notification -- %04x %04x %04x.\n", 757 mb[1], mb[2], mb[3]); 758 759 spin_lock_irqsave(&ha->cs84xx->access_lock, flags); 760 switch (mb[1]) { 761 case A84_PANIC_RECOVERY: 762 ql_log(ql_log_info, vha, 0x5019, 763 "Alert 84XX: panic recovery %04x %04x.\n", 764 mb[2], mb[3]); 765 break; 766 case A84_OP_LOGIN_COMPLETE: 767 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2]; 768 ql_log(ql_log_info, vha, 0x501a, 769 "Alert 84XX: firmware version %x.\n", 770 ha->cs84xx->op_fw_version); 771 break; 772 case A84_DIAG_LOGIN_COMPLETE: 773 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 774 ql_log(ql_log_info, vha, 0x501b, 775 "Alert 84XX: diagnostic firmware version %x.\n", 776 ha->cs84xx->diag_fw_version); 777 break; 778 case A84_GOLD_LOGIN_COMPLETE: 779 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 780 ha->cs84xx->fw_update = 1; 781 ql_log(ql_log_info, vha, 0x501c, 782 "Alert 84XX: gold firmware version %x.\n", 783 ha->cs84xx->gold_fw_version); 784 break; 785 default: 786 ql_log(ql_log_warn, vha, 0x501d, 787 "Alert 84xx: Invalid Alert %04x %04x %04x.\n", 788 mb[1], mb[2], mb[3]); 789 } 790 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags); 791 break; 792 case MBA_DCBX_START: 793 ql_dbg(ql_dbg_async, vha, 0x501e, 794 "DCBX Started -- %04x %04x %04x.\n", 795 mb[1], mb[2], mb[3]); 796 break; 797 case MBA_DCBX_PARAM_UPDATE: 798 ql_dbg(ql_dbg_async, vha, 0x501f, 799 "DCBX Parameters Updated -- %04x %04x %04x.\n", 800 mb[1], mb[2], mb[3]); 801 break; 802 case MBA_FCF_CONF_ERR: 803 ql_dbg(ql_dbg_async, vha, 0x5020, 804 "FCF Configuration Error -- %04x %04x %04x.\n", 805 mb[1], mb[2], mb[3]); 806 break; 807 case MBA_IDC_COMPLETE: 808 case MBA_IDC_NOTIFY: 809 case MBA_IDC_TIME_EXT: 810 qla81xx_idc_event(vha, mb[0], mb[1]); 811 break; 812 } 813 814 if (!vha->vp_idx && ha->num_vhosts) 815 qla2x00_alert_all_vps(rsp, mb); 816 } 817 818 /** 819 * qla2x00_process_completed_request() - Process a Fast Post response. 820 * @ha: SCSI driver HA context 821 * @index: SRB index 822 */ 823 static void 824 qla2x00_process_completed_request(struct scsi_qla_host *vha, 825 struct req_que *req, uint32_t index) 826 { 827 srb_t *sp; 828 struct qla_hw_data *ha = vha->hw; 829 830 /* Validate handle. */ 831 if (index >= MAX_OUTSTANDING_COMMANDS) { 832 ql_log(ql_log_warn, vha, 0x3014, 833 "Invalid SCSI command index (%x).\n", index); 834 835 if (IS_QLA82XX(ha)) 836 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 837 else 838 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 839 return; 840 } 841 842 sp = req->outstanding_cmds[index]; 843 if (sp) { 844 /* Free outstanding command slot. */ 845 req->outstanding_cmds[index] = NULL; 846 847 /* Save ISP completion status */ 848 sp->cmd->result = DID_OK << 16; 849 qla2x00_sp_compl(ha, sp); 850 } else { 851 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n"); 852 853 if (IS_QLA82XX(ha)) 854 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 855 else 856 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 857 } 858 } 859 860 static srb_t * 861 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, 862 struct req_que *req, void *iocb) 863 { 864 struct qla_hw_data *ha = vha->hw; 865 sts_entry_t *pkt = iocb; 866 srb_t *sp = NULL; 867 uint16_t index; 868 869 index = LSW(pkt->handle); 870 if (index >= MAX_OUTSTANDING_COMMANDS) { 871 ql_log(ql_log_warn, vha, 0x5031, 872 "Invalid command index (%x).\n", index); 873 if (IS_QLA82XX(ha)) 874 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 875 else 876 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 877 goto done; 878 } 879 sp = req->outstanding_cmds[index]; 880 if (!sp) { 881 ql_log(ql_log_warn, vha, 0x5032, 882 "Invalid completion handle (%x) -- timed-out.\n", index); 883 return sp; 884 } 885 if (sp->handle != index) { 886 ql_log(ql_log_warn, vha, 0x5033, 887 "SRB handle (%x) mismatch %x.\n", sp->handle, index); 888 return NULL; 889 } 890 891 req->outstanding_cmds[index] = NULL; 892 893 done: 894 return sp; 895 } 896 897 static void 898 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 899 struct mbx_entry *mbx) 900 { 901 const char func[] = "MBX-IOCB"; 902 const char *type; 903 fc_port_t *fcport; 904 srb_t *sp; 905 struct srb_iocb *lio; 906 struct srb_ctx *ctx; 907 uint16_t *data; 908 uint16_t status; 909 910 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx); 911 if (!sp) 912 return; 913 914 ctx = sp->ctx; 915 lio = ctx->u.iocb_cmd; 916 type = ctx->name; 917 fcport = sp->fcport; 918 data = lio->u.logio.data; 919 920 data[0] = MBS_COMMAND_ERROR; 921 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 922 QLA_LOGIO_LOGIN_RETRIED : 0; 923 if (mbx->entry_status) { 924 ql_dbg(ql_dbg_async, vha, 0x5043, 925 "Async-%s error entry - hdl=%x portid=%02x%02x%02x " 926 "entry-status=%x status=%x state-flag=%x " 927 "status-flags=%x.\n", type, sp->handle, 928 fcport->d_id.b.domain, fcport->d_id.b.area, 929 fcport->d_id.b.al_pa, mbx->entry_status, 930 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags), 931 le16_to_cpu(mbx->status_flags)); 932 933 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029, 934 (uint8_t *)mbx, sizeof(*mbx)); 935 936 goto logio_done; 937 } 938 939 status = le16_to_cpu(mbx->status); 940 if (status == 0x30 && ctx->type == SRB_LOGIN_CMD && 941 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) 942 status = 0; 943 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) { 944 ql_dbg(ql_dbg_async, vha, 0x5045, 945 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n", 946 type, sp->handle, fcport->d_id.b.domain, 947 fcport->d_id.b.area, fcport->d_id.b.al_pa, 948 le16_to_cpu(mbx->mb1)); 949 950 data[0] = MBS_COMMAND_COMPLETE; 951 if (ctx->type == SRB_LOGIN_CMD) { 952 fcport->port_type = FCT_TARGET; 953 if (le16_to_cpu(mbx->mb1) & BIT_0) 954 fcport->port_type = FCT_INITIATOR; 955 else if (le16_to_cpu(mbx->mb1) & BIT_1) 956 fcport->flags |= FCF_FCP2_DEVICE; 957 } 958 goto logio_done; 959 } 960 961 data[0] = le16_to_cpu(mbx->mb0); 962 switch (data[0]) { 963 case MBS_PORT_ID_USED: 964 data[1] = le16_to_cpu(mbx->mb1); 965 break; 966 case MBS_LOOP_ID_USED: 967 break; 968 default: 969 data[0] = MBS_COMMAND_ERROR; 970 break; 971 } 972 973 ql_log(ql_log_warn, vha, 0x5046, 974 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x " 975 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle, 976 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, 977 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1), 978 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6), 979 le16_to_cpu(mbx->mb7)); 980 981 logio_done: 982 lio->done(sp); 983 } 984 985 static void 986 qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req, 987 sts_entry_t *pkt, int iocb_type) 988 { 989 const char func[] = "CT_IOCB"; 990 const char *type; 991 struct qla_hw_data *ha = vha->hw; 992 srb_t *sp; 993 struct srb_ctx *sp_bsg; 994 struct fc_bsg_job *bsg_job; 995 uint16_t comp_status; 996 997 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 998 if (!sp) 999 return; 1000 1001 sp_bsg = sp->ctx; 1002 bsg_job = sp_bsg->u.bsg_job; 1003 1004 type = NULL; 1005 switch (sp_bsg->type) { 1006 case SRB_CT_CMD: 1007 type = "ct pass-through"; 1008 break; 1009 default: 1010 ql_log(ql_log_warn, vha, 0x5047, 1011 "Unrecognized SRB: (%p) type=%d.\n", sp, sp_bsg->type); 1012 return; 1013 } 1014 1015 comp_status = le16_to_cpu(pkt->comp_status); 1016 1017 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT 1018 * fc payload to the caller 1019 */ 1020 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 1021 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1022 1023 if (comp_status != CS_COMPLETE) { 1024 if (comp_status == CS_DATA_UNDERRUN) { 1025 bsg_job->reply->result = DID_OK << 16; 1026 bsg_job->reply->reply_payload_rcv_len = 1027 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len); 1028 1029 ql_log(ql_log_warn, vha, 0x5048, 1030 "CT pass-through-%s error " 1031 "comp_status-status=0x%x total_byte = 0x%x.\n", 1032 type, comp_status, 1033 bsg_job->reply->reply_payload_rcv_len); 1034 } else { 1035 ql_log(ql_log_warn, vha, 0x5049, 1036 "CT pass-through-%s error " 1037 "comp_status-status=0x%x.\n", type, comp_status); 1038 bsg_job->reply->result = DID_ERROR << 16; 1039 bsg_job->reply->reply_payload_rcv_len = 0; 1040 } 1041 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035, 1042 (uint8_t *)pkt, sizeof(*pkt)); 1043 } else { 1044 bsg_job->reply->result = DID_OK << 16; 1045 bsg_job->reply->reply_payload_rcv_len = 1046 bsg_job->reply_payload.payload_len; 1047 bsg_job->reply_len = 0; 1048 } 1049 1050 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 1051 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1052 1053 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 1054 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1055 1056 if (sp_bsg->type == SRB_ELS_CMD_HST || sp_bsg->type == SRB_CT_CMD) 1057 kfree(sp->fcport); 1058 1059 kfree(sp->ctx); 1060 mempool_free(sp, ha->srb_mempool); 1061 bsg_job->job_done(bsg_job); 1062 } 1063 1064 static void 1065 qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, 1066 struct sts_entry_24xx *pkt, int iocb_type) 1067 { 1068 const char func[] = "ELS_CT_IOCB"; 1069 const char *type; 1070 struct qla_hw_data *ha = vha->hw; 1071 srb_t *sp; 1072 struct srb_ctx *sp_bsg; 1073 struct fc_bsg_job *bsg_job; 1074 uint16_t comp_status; 1075 uint32_t fw_status[3]; 1076 uint8_t* fw_sts_ptr; 1077 1078 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1079 if (!sp) 1080 return; 1081 sp_bsg = sp->ctx; 1082 bsg_job = sp_bsg->u.bsg_job; 1083 1084 type = NULL; 1085 switch (sp_bsg->type) { 1086 case SRB_ELS_CMD_RPT: 1087 case SRB_ELS_CMD_HST: 1088 type = "els"; 1089 break; 1090 case SRB_CT_CMD: 1091 type = "ct pass-through"; 1092 break; 1093 default: 1094 ql_log(ql_log_warn, vha, 0x503e, 1095 "Unrecognized SRB: (%p) type=%d.\n", sp, sp_bsg->type); 1096 return; 1097 } 1098 1099 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status); 1100 fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1); 1101 fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2); 1102 1103 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT 1104 * fc payload to the caller 1105 */ 1106 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 1107 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status); 1108 1109 if (comp_status != CS_COMPLETE) { 1110 if (comp_status == CS_DATA_UNDERRUN) { 1111 bsg_job->reply->result = DID_OK << 16; 1112 bsg_job->reply->reply_payload_rcv_len = 1113 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count); 1114 1115 ql_log(ql_log_info, vha, 0x503f, 1116 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " 1117 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n", 1118 type, sp->handle, comp_status, fw_status[1], fw_status[2], 1119 le16_to_cpu(((struct els_sts_entry_24xx *) 1120 pkt)->total_byte_count)); 1121 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); 1122 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); 1123 } 1124 else { 1125 ql_log(ql_log_info, vha, 0x5040, 1126 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " 1127 "error subcode 1=0x%x error subcode 2=0x%x.\n", 1128 type, sp->handle, comp_status, 1129 le16_to_cpu(((struct els_sts_entry_24xx *) 1130 pkt)->error_subcode_1), 1131 le16_to_cpu(((struct els_sts_entry_24xx *) 1132 pkt)->error_subcode_2)); 1133 bsg_job->reply->result = DID_ERROR << 16; 1134 bsg_job->reply->reply_payload_rcv_len = 0; 1135 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); 1136 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); 1137 } 1138 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5056, 1139 (uint8_t *)pkt, sizeof(*pkt)); 1140 } 1141 else { 1142 bsg_job->reply->result = DID_OK << 16; 1143 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; 1144 bsg_job->reply_len = 0; 1145 } 1146 1147 dma_unmap_sg(&ha->pdev->dev, 1148 bsg_job->request_payload.sg_list, 1149 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1150 dma_unmap_sg(&ha->pdev->dev, 1151 bsg_job->reply_payload.sg_list, 1152 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1153 if ((sp_bsg->type == SRB_ELS_CMD_HST) || 1154 (sp_bsg->type == SRB_CT_CMD)) 1155 kfree(sp->fcport); 1156 kfree(sp->ctx); 1157 mempool_free(sp, ha->srb_mempool); 1158 bsg_job->job_done(bsg_job); 1159 } 1160 1161 static void 1162 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, 1163 struct logio_entry_24xx *logio) 1164 { 1165 const char func[] = "LOGIO-IOCB"; 1166 const char *type; 1167 fc_port_t *fcport; 1168 srb_t *sp; 1169 struct srb_iocb *lio; 1170 struct srb_ctx *ctx; 1171 uint16_t *data; 1172 uint32_t iop[2]; 1173 1174 sp = qla2x00_get_sp_from_handle(vha, func, req, logio); 1175 if (!sp) 1176 return; 1177 1178 ctx = sp->ctx; 1179 lio = ctx->u.iocb_cmd; 1180 type = ctx->name; 1181 fcport = sp->fcport; 1182 data = lio->u.logio.data; 1183 1184 data[0] = MBS_COMMAND_ERROR; 1185 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 1186 QLA_LOGIO_LOGIN_RETRIED : 0; 1187 if (logio->entry_status) { 1188 ql_log(ql_log_warn, vha, 0x5034, 1189 "Async-%s error entry - hdl=%x" 1190 "portid=%02x%02x%02x entry-status=%x.\n", 1191 type, sp->handle, fcport->d_id.b.domain, 1192 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1193 logio->entry_status); 1194 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d, 1195 (uint8_t *)logio, sizeof(*logio)); 1196 1197 goto logio_done; 1198 } 1199 1200 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) { 1201 ql_dbg(ql_dbg_async, vha, 0x5036, 1202 "Async-%s complete - hdl=%x portid=%02x%02x%02x " 1203 "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain, 1204 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1205 le32_to_cpu(logio->io_parameter[0])); 1206 1207 data[0] = MBS_COMMAND_COMPLETE; 1208 if (ctx->type != SRB_LOGIN_CMD) 1209 goto logio_done; 1210 1211 iop[0] = le32_to_cpu(logio->io_parameter[0]); 1212 if (iop[0] & BIT_4) { 1213 fcport->port_type = FCT_TARGET; 1214 if (iop[0] & BIT_8) 1215 fcport->flags |= FCF_FCP2_DEVICE; 1216 } else if (iop[0] & BIT_5) 1217 fcport->port_type = FCT_INITIATOR; 1218 1219 if (logio->io_parameter[7] || logio->io_parameter[8]) 1220 fcport->supported_classes |= FC_COS_CLASS2; 1221 if (logio->io_parameter[9] || logio->io_parameter[10]) 1222 fcport->supported_classes |= FC_COS_CLASS3; 1223 1224 goto logio_done; 1225 } 1226 1227 iop[0] = le32_to_cpu(logio->io_parameter[0]); 1228 iop[1] = le32_to_cpu(logio->io_parameter[1]); 1229 switch (iop[0]) { 1230 case LSC_SCODE_PORTID_USED: 1231 data[0] = MBS_PORT_ID_USED; 1232 data[1] = LSW(iop[1]); 1233 break; 1234 case LSC_SCODE_NPORT_USED: 1235 data[0] = MBS_LOOP_ID_USED; 1236 break; 1237 default: 1238 data[0] = MBS_COMMAND_ERROR; 1239 break; 1240 } 1241 1242 ql_dbg(ql_dbg_async, vha, 0x5037, 1243 "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x " 1244 "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain, 1245 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1246 le16_to_cpu(logio->comp_status), 1247 le32_to_cpu(logio->io_parameter[0]), 1248 le32_to_cpu(logio->io_parameter[1])); 1249 1250 logio_done: 1251 lio->done(sp); 1252 } 1253 1254 static void 1255 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 1256 struct tsk_mgmt_entry *tsk) 1257 { 1258 const char func[] = "TMF-IOCB"; 1259 const char *type; 1260 fc_port_t *fcport; 1261 srb_t *sp; 1262 struct srb_iocb *iocb; 1263 struct srb_ctx *ctx; 1264 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; 1265 int error = 1; 1266 1267 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk); 1268 if (!sp) 1269 return; 1270 1271 ctx = sp->ctx; 1272 iocb = ctx->u.iocb_cmd; 1273 type = ctx->name; 1274 fcport = sp->fcport; 1275 1276 if (sts->entry_status) { 1277 ql_log(ql_log_warn, vha, 0x5038, 1278 "Async-%s error - hdl=%x entry-status(%x).\n", 1279 type, sp->handle, sts->entry_status); 1280 } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 1281 ql_log(ql_log_warn, vha, 0x5039, 1282 "Async-%s error - hdl=%x completion status(%x).\n", 1283 type, sp->handle, sts->comp_status); 1284 } else if (!(le16_to_cpu(sts->scsi_status) & 1285 SS_RESPONSE_INFO_LEN_VALID)) { 1286 ql_log(ql_log_warn, vha, 0x503a, 1287 "Async-%s error - hdl=%x no response info(%x).\n", 1288 type, sp->handle, sts->scsi_status); 1289 } else if (le32_to_cpu(sts->rsp_data_len) < 4) { 1290 ql_log(ql_log_warn, vha, 0x503b, 1291 "Async-%s error - hdl=%x not enough response(%d).\n", 1292 type, sp->handle, sts->rsp_data_len); 1293 } else if (sts->data[3]) { 1294 ql_log(ql_log_warn, vha, 0x503c, 1295 "Async-%s error - hdl=%x response(%x).\n", 1296 type, sp->handle, sts->data[3]); 1297 } else { 1298 error = 0; 1299 } 1300 1301 if (error) { 1302 iocb->u.tmf.data = error; 1303 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055, 1304 (uint8_t *)sts, sizeof(*sts)); 1305 } 1306 1307 iocb->done(sp); 1308 } 1309 1310 /** 1311 * qla2x00_process_response_queue() - Process response queue entries. 1312 * @ha: SCSI driver HA context 1313 */ 1314 void 1315 qla2x00_process_response_queue(struct rsp_que *rsp) 1316 { 1317 struct scsi_qla_host *vha; 1318 struct qla_hw_data *ha = rsp->hw; 1319 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1320 sts_entry_t *pkt; 1321 uint16_t handle_cnt; 1322 uint16_t cnt; 1323 1324 vha = pci_get_drvdata(ha->pdev); 1325 1326 if (!vha->flags.online) 1327 return; 1328 1329 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 1330 pkt = (sts_entry_t *)rsp->ring_ptr; 1331 1332 rsp->ring_index++; 1333 if (rsp->ring_index == rsp->length) { 1334 rsp->ring_index = 0; 1335 rsp->ring_ptr = rsp->ring; 1336 } else { 1337 rsp->ring_ptr++; 1338 } 1339 1340 if (pkt->entry_status != 0) { 1341 qla2x00_error_entry(vha, rsp, pkt); 1342 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1343 wmb(); 1344 continue; 1345 } 1346 1347 switch (pkt->entry_type) { 1348 case STATUS_TYPE: 1349 qla2x00_status_entry(vha, rsp, pkt); 1350 break; 1351 case STATUS_TYPE_21: 1352 handle_cnt = ((sts21_entry_t *)pkt)->handle_count; 1353 for (cnt = 0; cnt < handle_cnt; cnt++) { 1354 qla2x00_process_completed_request(vha, rsp->req, 1355 ((sts21_entry_t *)pkt)->handle[cnt]); 1356 } 1357 break; 1358 case STATUS_TYPE_22: 1359 handle_cnt = ((sts22_entry_t *)pkt)->handle_count; 1360 for (cnt = 0; cnt < handle_cnt; cnt++) { 1361 qla2x00_process_completed_request(vha, rsp->req, 1362 ((sts22_entry_t *)pkt)->handle[cnt]); 1363 } 1364 break; 1365 case STATUS_CONT_TYPE: 1366 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); 1367 break; 1368 case MBX_IOCB_TYPE: 1369 qla2x00_mbx_iocb_entry(vha, rsp->req, 1370 (struct mbx_entry *)pkt); 1371 break; 1372 case CT_IOCB_TYPE: 1373 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); 1374 break; 1375 default: 1376 /* Type Not Supported. */ 1377 ql_log(ql_log_warn, vha, 0x504a, 1378 "Received unknown response pkt type %x " 1379 "entry status=%x.\n", 1380 pkt->entry_type, pkt->entry_status); 1381 break; 1382 } 1383 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1384 wmb(); 1385 } 1386 1387 /* Adjust ring index */ 1388 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index); 1389 } 1390 1391 static inline void 1392 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, 1393 uint32_t sense_len, struct rsp_que *rsp) 1394 { 1395 struct scsi_qla_host *vha = sp->fcport->vha; 1396 struct scsi_cmnd *cp = sp->cmd; 1397 1398 if (sense_len >= SCSI_SENSE_BUFFERSIZE) 1399 sense_len = SCSI_SENSE_BUFFERSIZE; 1400 1401 sp->request_sense_length = sense_len; 1402 sp->request_sense_ptr = cp->sense_buffer; 1403 if (sp->request_sense_length > par_sense_len) 1404 sense_len = par_sense_len; 1405 1406 memcpy(cp->sense_buffer, sense_data, sense_len); 1407 1408 sp->request_sense_ptr += sense_len; 1409 sp->request_sense_length -= sense_len; 1410 if (sp->request_sense_length != 0) 1411 rsp->status_srb = sp; 1412 1413 if (sense_len) { 1414 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c, 1415 "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n", 1416 sp->fcport->vha->host_no, cp->device->id, cp->device->lun, 1417 cp); 1418 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b, 1419 cp->sense_buffer, sense_len); 1420 } 1421 } 1422 1423 struct scsi_dif_tuple { 1424 __be16 guard; /* Checksum */ 1425 __be16 app_tag; /* APPL identifer */ 1426 __be32 ref_tag; /* Target LBA or indirect LBA */ 1427 }; 1428 1429 /* 1430 * Checks the guard or meta-data for the type of error 1431 * detected by the HBA. In case of errors, we set the 1432 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST 1433 * to indicate to the kernel that the HBA detected error. 1434 */ 1435 static inline int 1436 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) 1437 { 1438 struct scsi_qla_host *vha = sp->fcport->vha; 1439 struct scsi_cmnd *cmd = sp->cmd; 1440 uint8_t *ap = &sts24->data[12]; 1441 uint8_t *ep = &sts24->data[20]; 1442 uint32_t e_ref_tag, a_ref_tag; 1443 uint16_t e_app_tag, a_app_tag; 1444 uint16_t e_guard, a_guard; 1445 1446 /* 1447 * swab32 of the "data" field in the beginning of qla2x00_status_entry() 1448 * would make guard field appear at offset 2 1449 */ 1450 a_guard = le16_to_cpu(*(uint16_t *)(ap + 2)); 1451 a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0)); 1452 a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4)); 1453 e_guard = le16_to_cpu(*(uint16_t *)(ep + 2)); 1454 e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0)); 1455 e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4)); 1456 1457 ql_dbg(ql_dbg_io, vha, 0x3023, 1458 "iocb(s) %p Returned STATUS.\n", sts24); 1459 1460 ql_dbg(ql_dbg_io, vha, 0x3024, 1461 "DIF ERROR in cmd 0x%x lba 0x%llx act ref" 1462 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app" 1463 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n", 1464 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag, 1465 a_app_tag, e_app_tag, a_guard, e_guard); 1466 1467 /* 1468 * Ignore sector if: 1469 * For type 3: ref & app tag is all 'f's 1470 * For type 0,1,2: app tag is all 'f's 1471 */ 1472 if ((a_app_tag == 0xffff) && 1473 ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) || 1474 (a_ref_tag == 0xffffffff))) { 1475 uint32_t blocks_done, resid; 1476 sector_t lba_s = scsi_get_lba(cmd); 1477 1478 /* 2TB boundary case covered automatically with this */ 1479 blocks_done = e_ref_tag - (uint32_t)lba_s + 1; 1480 1481 resid = scsi_bufflen(cmd) - (blocks_done * 1482 cmd->device->sector_size); 1483 1484 scsi_set_resid(cmd, resid); 1485 cmd->result = DID_OK << 16; 1486 1487 /* Update protection tag */ 1488 if (scsi_prot_sg_count(cmd)) { 1489 uint32_t i, j = 0, k = 0, num_ent; 1490 struct scatterlist *sg; 1491 struct sd_dif_tuple *spt; 1492 1493 /* Patch the corresponding protection tags */ 1494 scsi_for_each_prot_sg(cmd, sg, 1495 scsi_prot_sg_count(cmd), i) { 1496 num_ent = sg_dma_len(sg) / 8; 1497 if (k + num_ent < blocks_done) { 1498 k += num_ent; 1499 continue; 1500 } 1501 j = blocks_done - k - 1; 1502 k = blocks_done; 1503 break; 1504 } 1505 1506 if (k != blocks_done) { 1507 ql_log(ql_log_warn, vha, 0x302f, 1508 "unexpected tag values tag:lba=%x:%llx)\n", 1509 e_ref_tag, (unsigned long long)lba_s); 1510 return 1; 1511 } 1512 1513 spt = page_address(sg_page(sg)) + sg->offset; 1514 spt += j; 1515 1516 spt->app_tag = 0xffff; 1517 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3) 1518 spt->ref_tag = 0xffffffff; 1519 } 1520 1521 return 0; 1522 } 1523 1524 /* check guard */ 1525 if (e_guard != a_guard) { 1526 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1527 0x10, 0x1); 1528 set_driver_byte(cmd, DRIVER_SENSE); 1529 set_host_byte(cmd, DID_ABORT); 1530 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1531 return 1; 1532 } 1533 1534 /* check ref tag */ 1535 if (e_ref_tag != a_ref_tag) { 1536 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1537 0x10, 0x3); 1538 set_driver_byte(cmd, DRIVER_SENSE); 1539 set_host_byte(cmd, DID_ABORT); 1540 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1541 return 1; 1542 } 1543 1544 /* check appl tag */ 1545 if (e_app_tag != a_app_tag) { 1546 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1547 0x10, 0x2); 1548 set_driver_byte(cmd, DRIVER_SENSE); 1549 set_host_byte(cmd, DID_ABORT); 1550 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1551 return 1; 1552 } 1553 1554 return 1; 1555 } 1556 1557 /** 1558 * qla2x00_status_entry() - Process a Status IOCB entry. 1559 * @ha: SCSI driver HA context 1560 * @pkt: Entry pointer 1561 */ 1562 static void 1563 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) 1564 { 1565 srb_t *sp; 1566 fc_port_t *fcport; 1567 struct scsi_cmnd *cp; 1568 sts_entry_t *sts; 1569 struct sts_entry_24xx *sts24; 1570 uint16_t comp_status; 1571 uint16_t scsi_status; 1572 uint16_t ox_id; 1573 uint8_t lscsi_status; 1574 int32_t resid; 1575 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len, 1576 fw_resid_len; 1577 uint8_t *rsp_info, *sense_data; 1578 struct qla_hw_data *ha = vha->hw; 1579 uint32_t handle; 1580 uint16_t que; 1581 struct req_que *req; 1582 int logit = 1; 1583 1584 sts = (sts_entry_t *) pkt; 1585 sts24 = (struct sts_entry_24xx *) pkt; 1586 if (IS_FWI2_CAPABLE(ha)) { 1587 comp_status = le16_to_cpu(sts24->comp_status); 1588 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; 1589 } else { 1590 comp_status = le16_to_cpu(sts->comp_status); 1591 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 1592 } 1593 handle = (uint32_t) LSW(sts->handle); 1594 que = MSW(sts->handle); 1595 req = ha->req_q_map[que]; 1596 1597 /* Fast path completion. */ 1598 if (comp_status == CS_COMPLETE && scsi_status == 0) { 1599 qla2x00_process_completed_request(vha, req, handle); 1600 1601 return; 1602 } 1603 1604 /* Validate handle. */ 1605 if (handle < MAX_OUTSTANDING_COMMANDS) { 1606 sp = req->outstanding_cmds[handle]; 1607 req->outstanding_cmds[handle] = NULL; 1608 } else 1609 sp = NULL; 1610 1611 if (sp == NULL) { 1612 ql_dbg(ql_dbg_io, vha, 0x3017, 1613 "Invalid status handle (0x%x).\n", sts->handle); 1614 1615 if (IS_QLA82XX(ha)) 1616 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1617 else 1618 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1619 qla2xxx_wake_dpc(vha); 1620 return; 1621 } 1622 cp = sp->cmd; 1623 if (cp == NULL) { 1624 ql_dbg(ql_dbg_io, vha, 0x3018, 1625 "Command already returned (0x%x/%p).\n", 1626 sts->handle, sp); 1627 1628 return; 1629 } 1630 1631 lscsi_status = scsi_status & STATUS_MASK; 1632 1633 fcport = sp->fcport; 1634 1635 ox_id = 0; 1636 sense_len = par_sense_len = rsp_info_len = resid_len = 1637 fw_resid_len = 0; 1638 if (IS_FWI2_CAPABLE(ha)) { 1639 if (scsi_status & SS_SENSE_LEN_VALID) 1640 sense_len = le32_to_cpu(sts24->sense_len); 1641 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) 1642 rsp_info_len = le32_to_cpu(sts24->rsp_data_len); 1643 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) 1644 resid_len = le32_to_cpu(sts24->rsp_residual_count); 1645 if (comp_status == CS_DATA_UNDERRUN) 1646 fw_resid_len = le32_to_cpu(sts24->residual_len); 1647 rsp_info = sts24->data; 1648 sense_data = sts24->data; 1649 host_to_fcp_swap(sts24->data, sizeof(sts24->data)); 1650 ox_id = le16_to_cpu(sts24->ox_id); 1651 par_sense_len = sizeof(sts24->data); 1652 } else { 1653 if (scsi_status & SS_SENSE_LEN_VALID) 1654 sense_len = le16_to_cpu(sts->req_sense_length); 1655 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) 1656 rsp_info_len = le16_to_cpu(sts->rsp_info_len); 1657 resid_len = le32_to_cpu(sts->residual_length); 1658 rsp_info = sts->rsp_info; 1659 sense_data = sts->req_sense_data; 1660 par_sense_len = sizeof(sts->req_sense_data); 1661 } 1662 1663 /* Check for any FCP transport errors. */ 1664 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) { 1665 /* Sense data lies beyond any FCP RESPONSE data. */ 1666 if (IS_FWI2_CAPABLE(ha)) { 1667 sense_data += rsp_info_len; 1668 par_sense_len -= rsp_info_len; 1669 } 1670 if (rsp_info_len > 3 && rsp_info[3]) { 1671 ql_dbg(ql_dbg_io, vha, 0x3019, 1672 "FCP I/O protocol failure (0x%x/0x%x).\n", 1673 rsp_info_len, rsp_info[3]); 1674 1675 cp->result = DID_BUS_BUSY << 16; 1676 goto out; 1677 } 1678 } 1679 1680 /* Check for overrun. */ 1681 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE && 1682 scsi_status & SS_RESIDUAL_OVER) 1683 comp_status = CS_DATA_OVERRUN; 1684 1685 /* 1686 * Based on Host and scsi status generate status code for Linux 1687 */ 1688 switch (comp_status) { 1689 case CS_COMPLETE: 1690 case CS_QUEUE_FULL: 1691 if (scsi_status == 0) { 1692 cp->result = DID_OK << 16; 1693 break; 1694 } 1695 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) { 1696 resid = resid_len; 1697 scsi_set_resid(cp, resid); 1698 1699 if (!lscsi_status && 1700 ((unsigned)(scsi_bufflen(cp) - resid) < 1701 cp->underflow)) { 1702 ql_dbg(ql_dbg_io, vha, 0x301a, 1703 "Mid-layer underflow " 1704 "detected (0x%x of 0x%x bytes).\n", 1705 resid, scsi_bufflen(cp)); 1706 1707 cp->result = DID_ERROR << 16; 1708 break; 1709 } 1710 } 1711 cp->result = DID_OK << 16 | lscsi_status; 1712 1713 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 1714 ql_dbg(ql_dbg_io, vha, 0x301b, 1715 "QUEUE FULL detected.\n"); 1716 break; 1717 } 1718 logit = 0; 1719 if (lscsi_status != SS_CHECK_CONDITION) 1720 break; 1721 1722 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 1723 if (!(scsi_status & SS_SENSE_LEN_VALID)) 1724 break; 1725 1726 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len, 1727 rsp); 1728 break; 1729 1730 case CS_DATA_UNDERRUN: 1731 /* Use F/W calculated residual length. */ 1732 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len; 1733 scsi_set_resid(cp, resid); 1734 if (scsi_status & SS_RESIDUAL_UNDER) { 1735 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) { 1736 ql_dbg(ql_dbg_io, vha, 0x301d, 1737 "Dropped frame(s) detected " 1738 "(0x%x of 0x%x bytes).\n", 1739 resid, scsi_bufflen(cp)); 1740 1741 cp->result = DID_ERROR << 16 | lscsi_status; 1742 goto check_scsi_status; 1743 } 1744 1745 if (!lscsi_status && 1746 ((unsigned)(scsi_bufflen(cp) - resid) < 1747 cp->underflow)) { 1748 ql_dbg(ql_dbg_io, vha, 0x301e, 1749 "Mid-layer underflow " 1750 "detected (0x%x of 0x%x bytes).\n", 1751 resid, scsi_bufflen(cp)); 1752 1753 cp->result = DID_ERROR << 16; 1754 break; 1755 } 1756 } else { 1757 ql_dbg(ql_dbg_io, vha, 0x301f, 1758 "Dropped frame(s) detected (0x%x " 1759 "of 0x%x bytes).\n", resid, scsi_bufflen(cp)); 1760 1761 cp->result = DID_ERROR << 16 | lscsi_status; 1762 goto check_scsi_status; 1763 } 1764 1765 cp->result = DID_OK << 16 | lscsi_status; 1766 logit = 0; 1767 1768 check_scsi_status: 1769 /* 1770 * Check to see if SCSI Status is non zero. If so report SCSI 1771 * Status. 1772 */ 1773 if (lscsi_status != 0) { 1774 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 1775 ql_dbg(ql_dbg_io, vha, 0x3020, 1776 "QUEUE FULL detected.\n"); 1777 logit = 1; 1778 break; 1779 } 1780 if (lscsi_status != SS_CHECK_CONDITION) 1781 break; 1782 1783 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 1784 if (!(scsi_status & SS_SENSE_LEN_VALID)) 1785 break; 1786 1787 qla2x00_handle_sense(sp, sense_data, par_sense_len, 1788 sense_len, rsp); 1789 } 1790 break; 1791 1792 case CS_PORT_LOGGED_OUT: 1793 case CS_PORT_CONFIG_CHG: 1794 case CS_PORT_BUSY: 1795 case CS_INCOMPLETE: 1796 case CS_PORT_UNAVAILABLE: 1797 case CS_TIMEOUT: 1798 case CS_RESET: 1799 1800 /* 1801 * We are going to have the fc class block the rport 1802 * while we try to recover so instruct the mid layer 1803 * to requeue until the class decides how to handle this. 1804 */ 1805 cp->result = DID_TRANSPORT_DISRUPTED << 16; 1806 1807 if (comp_status == CS_TIMEOUT) { 1808 if (IS_FWI2_CAPABLE(ha)) 1809 break; 1810 else if ((le16_to_cpu(sts->status_flags) & 1811 SF_LOGOUT_SENT) == 0) 1812 break; 1813 } 1814 1815 ql_dbg(ql_dbg_io, vha, 0x3021, 1816 "Port down status: port-state=0x%x.\n", 1817 atomic_read(&fcport->state)); 1818 1819 if (atomic_read(&fcport->state) == FCS_ONLINE) 1820 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); 1821 break; 1822 1823 case CS_ABORTED: 1824 cp->result = DID_RESET << 16; 1825 break; 1826 1827 case CS_DIF_ERROR: 1828 logit = qla2x00_handle_dif_error(sp, sts24); 1829 break; 1830 default: 1831 cp->result = DID_ERROR << 16; 1832 break; 1833 } 1834 1835 out: 1836 if (logit) 1837 ql_dbg(ql_dbg_io, vha, 0x3022, 1838 "FCP command status: 0x%x-0x%x (0x%x) " 1839 "nexus=%ld:%d:%d portid=%02x%02x%02x oxid=0x%x " 1840 "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x " 1841 "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n", 1842 comp_status, scsi_status, cp->result, vha->host_no, 1843 cp->device->id, cp->device->lun, fcport->d_id.b.domain, 1844 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id, 1845 cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3], 1846 cp->cmnd[4], cp->cmnd[5], cp->cmnd[6], cp->cmnd[7], 1847 cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp), rsp_info_len, 1848 resid_len, fw_resid_len); 1849 1850 if (rsp->status_srb == NULL) 1851 qla2x00_sp_compl(ha, sp); 1852 } 1853 1854 /** 1855 * qla2x00_status_cont_entry() - Process a Status Continuations entry. 1856 * @ha: SCSI driver HA context 1857 * @pkt: Entry pointer 1858 * 1859 * Extended sense data. 1860 */ 1861 static void 1862 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) 1863 { 1864 uint8_t sense_sz = 0; 1865 struct qla_hw_data *ha = rsp->hw; 1866 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); 1867 srb_t *sp = rsp->status_srb; 1868 struct scsi_cmnd *cp; 1869 1870 if (sp != NULL && sp->request_sense_length != 0) { 1871 cp = sp->cmd; 1872 if (cp == NULL) { 1873 ql_log(ql_log_warn, vha, 0x3025, 1874 "cmd is NULL: already returned to OS (sp=%p).\n", 1875 sp); 1876 1877 rsp->status_srb = NULL; 1878 return; 1879 } 1880 1881 if (sp->request_sense_length > sizeof(pkt->data)) { 1882 sense_sz = sizeof(pkt->data); 1883 } else { 1884 sense_sz = sp->request_sense_length; 1885 } 1886 1887 /* Move sense data. */ 1888 if (IS_FWI2_CAPABLE(ha)) 1889 host_to_fcp_swap(pkt->data, sizeof(pkt->data)); 1890 memcpy(sp->request_sense_ptr, pkt->data, sense_sz); 1891 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c, 1892 sp->request_sense_ptr, sense_sz); 1893 1894 sp->request_sense_ptr += sense_sz; 1895 sp->request_sense_length -= sense_sz; 1896 1897 /* Place command on done queue. */ 1898 if (sp->request_sense_length == 0) { 1899 rsp->status_srb = NULL; 1900 qla2x00_sp_compl(ha, sp); 1901 } 1902 } 1903 } 1904 1905 static int 1906 qla2x00_free_sp_ctx(scsi_qla_host_t *vha, srb_t *sp) 1907 { 1908 struct qla_hw_data *ha = vha->hw; 1909 struct srb_ctx *ctx; 1910 1911 if (!sp->ctx) 1912 return 1; 1913 1914 ctx = sp->ctx; 1915 1916 if (ctx->type == SRB_LOGIN_CMD || 1917 ctx->type == SRB_LOGOUT_CMD || 1918 ctx->type == SRB_TM_CMD) { 1919 ctx->u.iocb_cmd->done(sp); 1920 return 0; 1921 } else if (ctx->type == SRB_ADISC_CMD) { 1922 ctx->u.iocb_cmd->free(sp); 1923 return 0; 1924 } else { 1925 struct fc_bsg_job *bsg_job; 1926 1927 bsg_job = ctx->u.bsg_job; 1928 if (ctx->type == SRB_ELS_CMD_HST || 1929 ctx->type == SRB_CT_CMD) 1930 kfree(sp->fcport); 1931 1932 bsg_job->reply->reply_data.ctels_reply.status = 1933 FC_CTELS_STATUS_OK; 1934 bsg_job->reply->result = DID_ERROR << 16; 1935 bsg_job->reply->reply_payload_rcv_len = 0; 1936 kfree(sp->ctx); 1937 mempool_free(sp, ha->srb_mempool); 1938 bsg_job->job_done(bsg_job); 1939 return 0; 1940 } 1941 return 1; 1942 } 1943 1944 /** 1945 * qla2x00_error_entry() - Process an error entry. 1946 * @ha: SCSI driver HA context 1947 * @pkt: Entry pointer 1948 */ 1949 static void 1950 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) 1951 { 1952 srb_t *sp; 1953 struct qla_hw_data *ha = vha->hw; 1954 const char func[] = "ERROR-IOCB"; 1955 uint16_t que = MSW(pkt->handle); 1956 struct req_que *req = ha->req_q_map[que]; 1957 1958 if (pkt->entry_status & RF_INV_E_ORDER) 1959 ql_dbg(ql_dbg_async, vha, 0x502a, 1960 "Invalid Entry Order.\n"); 1961 else if (pkt->entry_status & RF_INV_E_COUNT) 1962 ql_dbg(ql_dbg_async, vha, 0x502b, 1963 "Invalid Entry Count.\n"); 1964 else if (pkt->entry_status & RF_INV_E_PARAM) 1965 ql_dbg(ql_dbg_async, vha, 0x502c, 1966 "Invalid Entry Parameter.\n"); 1967 else if (pkt->entry_status & RF_INV_E_TYPE) 1968 ql_dbg(ql_dbg_async, vha, 0x502d, 1969 "Invalid Entry Type.\n"); 1970 else if (pkt->entry_status & RF_BUSY) 1971 ql_dbg(ql_dbg_async, vha, 0x502e, 1972 "Busy.\n"); 1973 else 1974 ql_dbg(ql_dbg_async, vha, 0x502f, 1975 "UNKNOWN flag error.\n"); 1976 1977 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1978 if (sp) { 1979 if (qla2x00_free_sp_ctx(vha, sp)) { 1980 if (pkt->entry_status & 1981 (RF_INV_E_ORDER | RF_INV_E_COUNT | 1982 RF_INV_E_PARAM | RF_INV_E_TYPE)) { 1983 sp->cmd->result = DID_ERROR << 16; 1984 } else if (pkt->entry_status & RF_BUSY) { 1985 sp->cmd->result = DID_BUS_BUSY << 16; 1986 } else { 1987 sp->cmd->result = DID_ERROR << 16; 1988 } 1989 qla2x00_sp_compl(ha, sp); 1990 } 1991 } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type == 1992 COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7 1993 || pkt->entry_type == COMMAND_TYPE_6) { 1994 ql_log(ql_log_warn, vha, 0x5030, 1995 "Error entry - invalid handle.\n"); 1996 1997 if (IS_QLA82XX(ha)) 1998 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1999 else 2000 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2001 qla2xxx_wake_dpc(vha); 2002 } 2003 } 2004 2005 /** 2006 * qla24xx_mbx_completion() - Process mailbox command completions. 2007 * @ha: SCSI driver HA context 2008 * @mb0: Mailbox0 register 2009 */ 2010 static void 2011 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 2012 { 2013 uint16_t cnt; 2014 uint32_t mboxes; 2015 uint16_t __iomem *wptr; 2016 struct qla_hw_data *ha = vha->hw; 2017 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 2018 2019 /* Read all mbox registers? */ 2020 mboxes = (1 << ha->mbx_count) - 1; 2021 if (!ha->mcp) 2022 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERRROR.\n"); 2023 else 2024 mboxes = ha->mcp->in_mb; 2025 2026 /* Load return mailbox registers. */ 2027 ha->flags.mbox_int = 1; 2028 ha->mailbox_out[0] = mb0; 2029 mboxes >>= 1; 2030 wptr = (uint16_t __iomem *)®->mailbox1; 2031 2032 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 2033 if (mboxes & BIT_0) 2034 ha->mailbox_out[cnt] = RD_REG_WORD(wptr); 2035 2036 mboxes >>= 1; 2037 wptr++; 2038 } 2039 } 2040 2041 /** 2042 * qla24xx_process_response_queue() - Process response queue entries. 2043 * @ha: SCSI driver HA context 2044 */ 2045 void qla24xx_process_response_queue(struct scsi_qla_host *vha, 2046 struct rsp_que *rsp) 2047 { 2048 struct sts_entry_24xx *pkt; 2049 struct qla_hw_data *ha = vha->hw; 2050 2051 if (!vha->flags.online) 2052 return; 2053 2054 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 2055 pkt = (struct sts_entry_24xx *)rsp->ring_ptr; 2056 2057 rsp->ring_index++; 2058 if (rsp->ring_index == rsp->length) { 2059 rsp->ring_index = 0; 2060 rsp->ring_ptr = rsp->ring; 2061 } else { 2062 rsp->ring_ptr++; 2063 } 2064 2065 if (pkt->entry_status != 0) { 2066 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt); 2067 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2068 wmb(); 2069 continue; 2070 } 2071 2072 switch (pkt->entry_type) { 2073 case STATUS_TYPE: 2074 qla2x00_status_entry(vha, rsp, pkt); 2075 break; 2076 case STATUS_CONT_TYPE: 2077 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); 2078 break; 2079 case VP_RPT_ID_IOCB_TYPE: 2080 qla24xx_report_id_acquisition(vha, 2081 (struct vp_rpt_id_entry_24xx *)pkt); 2082 break; 2083 case LOGINOUT_PORT_IOCB_TYPE: 2084 qla24xx_logio_entry(vha, rsp->req, 2085 (struct logio_entry_24xx *)pkt); 2086 break; 2087 case TSK_MGMT_IOCB_TYPE: 2088 qla24xx_tm_iocb_entry(vha, rsp->req, 2089 (struct tsk_mgmt_entry *)pkt); 2090 break; 2091 case CT_IOCB_TYPE: 2092 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); 2093 clear_bit(MBX_INTERRUPT, &vha->hw->mbx_cmd_flags); 2094 break; 2095 case ELS_IOCB_TYPE: 2096 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE); 2097 break; 2098 case MARKER_TYPE: 2099 /* Do nothing in this case, this check is to prevent it 2100 * from falling into default case 2101 */ 2102 break; 2103 default: 2104 /* Type Not Supported. */ 2105 ql_dbg(ql_dbg_async, vha, 0x5042, 2106 "Received unknown response pkt type %x " 2107 "entry status=%x.\n", 2108 pkt->entry_type, pkt->entry_status); 2109 break; 2110 } 2111 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2112 wmb(); 2113 } 2114 2115 /* Adjust ring index */ 2116 if (IS_QLA82XX(ha)) { 2117 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; 2118 WRT_REG_DWORD(®->rsp_q_out[0], rsp->ring_index); 2119 } else 2120 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index); 2121 } 2122 2123 static void 2124 qla2xxx_check_risc_status(scsi_qla_host_t *vha) 2125 { 2126 int rval; 2127 uint32_t cnt; 2128 struct qla_hw_data *ha = vha->hw; 2129 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 2130 2131 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)) 2132 return; 2133 2134 rval = QLA_SUCCESS; 2135 WRT_REG_DWORD(®->iobase_addr, 0x7C00); 2136 RD_REG_DWORD(®->iobase_addr); 2137 WRT_REG_DWORD(®->iobase_window, 0x0001); 2138 for (cnt = 10000; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 && 2139 rval == QLA_SUCCESS; cnt--) { 2140 if (cnt) { 2141 WRT_REG_DWORD(®->iobase_window, 0x0001); 2142 udelay(10); 2143 } else 2144 rval = QLA_FUNCTION_TIMEOUT; 2145 } 2146 if (rval == QLA_SUCCESS) 2147 goto next_test; 2148 2149 WRT_REG_DWORD(®->iobase_window, 0x0003); 2150 for (cnt = 100; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 && 2151 rval == QLA_SUCCESS; cnt--) { 2152 if (cnt) { 2153 WRT_REG_DWORD(®->iobase_window, 0x0003); 2154 udelay(10); 2155 } else 2156 rval = QLA_FUNCTION_TIMEOUT; 2157 } 2158 if (rval != QLA_SUCCESS) 2159 goto done; 2160 2161 next_test: 2162 if (RD_REG_DWORD(®->iobase_c8) & BIT_3) 2163 ql_log(ql_log_info, vha, 0x504c, 2164 "Additional code -- 0x55AA.\n"); 2165 2166 done: 2167 WRT_REG_DWORD(®->iobase_window, 0x0000); 2168 RD_REG_DWORD(®->iobase_window); 2169 } 2170 2171 /** 2172 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. 2173 * @irq: 2174 * @dev_id: SCSI driver HA context 2175 * 2176 * Called by system whenever the host adapter generates an interrupt. 2177 * 2178 * Returns handled flag. 2179 */ 2180 irqreturn_t 2181 qla24xx_intr_handler(int irq, void *dev_id) 2182 { 2183 scsi_qla_host_t *vha; 2184 struct qla_hw_data *ha; 2185 struct device_reg_24xx __iomem *reg; 2186 int status; 2187 unsigned long iter; 2188 uint32_t stat; 2189 uint32_t hccr; 2190 uint16_t mb[4]; 2191 struct rsp_que *rsp; 2192 unsigned long flags; 2193 2194 rsp = (struct rsp_que *) dev_id; 2195 if (!rsp) { 2196 printk(KERN_INFO 2197 "%s(): NULL response queue pointer.\n", __func__); 2198 return IRQ_NONE; 2199 } 2200 2201 ha = rsp->hw; 2202 reg = &ha->iobase->isp24; 2203 status = 0; 2204 2205 if (unlikely(pci_channel_offline(ha->pdev))) 2206 return IRQ_HANDLED; 2207 2208 spin_lock_irqsave(&ha->hardware_lock, flags); 2209 vha = pci_get_drvdata(ha->pdev); 2210 for (iter = 50; iter--; ) { 2211 stat = RD_REG_DWORD(®->host_status); 2212 if (stat & HSRX_RISC_PAUSED) { 2213 if (unlikely(pci_channel_offline(ha->pdev))) 2214 break; 2215 2216 hccr = RD_REG_DWORD(®->hccr); 2217 2218 ql_log(ql_log_warn, vha, 0x504b, 2219 "RISC paused -- HCCR=%x, Dumping firmware.\n", 2220 hccr); 2221 2222 qla2xxx_check_risc_status(vha); 2223 2224 ha->isp_ops->fw_dump(vha, 1); 2225 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2226 break; 2227 } else if ((stat & HSRX_RISC_INT) == 0) 2228 break; 2229 2230 switch (stat & 0xff) { 2231 case 0x1: 2232 case 0x2: 2233 case 0x10: 2234 case 0x11: 2235 qla24xx_mbx_completion(vha, MSW(stat)); 2236 status |= MBX_INTERRUPT; 2237 2238 break; 2239 case 0x12: 2240 mb[0] = MSW(stat); 2241 mb[1] = RD_REG_WORD(®->mailbox1); 2242 mb[2] = RD_REG_WORD(®->mailbox2); 2243 mb[3] = RD_REG_WORD(®->mailbox3); 2244 qla2x00_async_event(vha, rsp, mb); 2245 break; 2246 case 0x13: 2247 case 0x14: 2248 qla24xx_process_response_queue(vha, rsp); 2249 break; 2250 default: 2251 ql_dbg(ql_dbg_async, vha, 0x504f, 2252 "Unrecognized interrupt type (%d).\n", stat * 0xff); 2253 break; 2254 } 2255 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 2256 RD_REG_DWORD_RELAXED(®->hccr); 2257 } 2258 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2259 2260 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 2261 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 2262 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 2263 complete(&ha->mbx_intr_comp); 2264 } 2265 2266 return IRQ_HANDLED; 2267 } 2268 2269 static irqreturn_t 2270 qla24xx_msix_rsp_q(int irq, void *dev_id) 2271 { 2272 struct qla_hw_data *ha; 2273 struct rsp_que *rsp; 2274 struct device_reg_24xx __iomem *reg; 2275 struct scsi_qla_host *vha; 2276 unsigned long flags; 2277 2278 rsp = (struct rsp_que *) dev_id; 2279 if (!rsp) { 2280 printk(KERN_INFO 2281 "%s(): NULL response queue pointer.\n", __func__); 2282 return IRQ_NONE; 2283 } 2284 ha = rsp->hw; 2285 reg = &ha->iobase->isp24; 2286 2287 spin_lock_irqsave(&ha->hardware_lock, flags); 2288 2289 vha = pci_get_drvdata(ha->pdev); 2290 qla24xx_process_response_queue(vha, rsp); 2291 if (!ha->flags.disable_msix_handshake) { 2292 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 2293 RD_REG_DWORD_RELAXED(®->hccr); 2294 } 2295 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2296 2297 return IRQ_HANDLED; 2298 } 2299 2300 static irqreturn_t 2301 qla25xx_msix_rsp_q(int irq, void *dev_id) 2302 { 2303 struct qla_hw_data *ha; 2304 struct rsp_que *rsp; 2305 struct device_reg_24xx __iomem *reg; 2306 unsigned long flags; 2307 2308 rsp = (struct rsp_que *) dev_id; 2309 if (!rsp) { 2310 printk(KERN_INFO 2311 "%s(): NULL response queue pointer.\n", __func__); 2312 return IRQ_NONE; 2313 } 2314 ha = rsp->hw; 2315 2316 /* Clear the interrupt, if enabled, for this response queue */ 2317 if (!ha->flags.disable_msix_handshake) { 2318 reg = &ha->iobase->isp24; 2319 spin_lock_irqsave(&ha->hardware_lock, flags); 2320 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 2321 RD_REG_DWORD_RELAXED(®->hccr); 2322 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2323 } 2324 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work); 2325 2326 return IRQ_HANDLED; 2327 } 2328 2329 static irqreturn_t 2330 qla24xx_msix_default(int irq, void *dev_id) 2331 { 2332 scsi_qla_host_t *vha; 2333 struct qla_hw_data *ha; 2334 struct rsp_que *rsp; 2335 struct device_reg_24xx __iomem *reg; 2336 int status; 2337 uint32_t stat; 2338 uint32_t hccr; 2339 uint16_t mb[4]; 2340 unsigned long flags; 2341 2342 rsp = (struct rsp_que *) dev_id; 2343 if (!rsp) { 2344 printk(KERN_INFO 2345 "%s(): NULL response queue pointer.\n", __func__); 2346 return IRQ_NONE; 2347 } 2348 ha = rsp->hw; 2349 reg = &ha->iobase->isp24; 2350 status = 0; 2351 2352 spin_lock_irqsave(&ha->hardware_lock, flags); 2353 vha = pci_get_drvdata(ha->pdev); 2354 do { 2355 stat = RD_REG_DWORD(®->host_status); 2356 if (stat & HSRX_RISC_PAUSED) { 2357 if (unlikely(pci_channel_offline(ha->pdev))) 2358 break; 2359 2360 hccr = RD_REG_DWORD(®->hccr); 2361 2362 ql_log(ql_log_info, vha, 0x5050, 2363 "RISC paused -- HCCR=%x, Dumping firmware.\n", 2364 hccr); 2365 2366 qla2xxx_check_risc_status(vha); 2367 2368 ha->isp_ops->fw_dump(vha, 1); 2369 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2370 break; 2371 } else if ((stat & HSRX_RISC_INT) == 0) 2372 break; 2373 2374 switch (stat & 0xff) { 2375 case 0x1: 2376 case 0x2: 2377 case 0x10: 2378 case 0x11: 2379 qla24xx_mbx_completion(vha, MSW(stat)); 2380 status |= MBX_INTERRUPT; 2381 2382 break; 2383 case 0x12: 2384 mb[0] = MSW(stat); 2385 mb[1] = RD_REG_WORD(®->mailbox1); 2386 mb[2] = RD_REG_WORD(®->mailbox2); 2387 mb[3] = RD_REG_WORD(®->mailbox3); 2388 qla2x00_async_event(vha, rsp, mb); 2389 break; 2390 case 0x13: 2391 case 0x14: 2392 qla24xx_process_response_queue(vha, rsp); 2393 break; 2394 default: 2395 ql_dbg(ql_dbg_async, vha, 0x5051, 2396 "Unrecognized interrupt type (%d).\n", stat & 0xff); 2397 break; 2398 } 2399 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 2400 } while (0); 2401 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2402 2403 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 2404 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 2405 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 2406 complete(&ha->mbx_intr_comp); 2407 } 2408 return IRQ_HANDLED; 2409 } 2410 2411 /* Interrupt handling helpers. */ 2412 2413 struct qla_init_msix_entry { 2414 const char *name; 2415 irq_handler_t handler; 2416 }; 2417 2418 static struct qla_init_msix_entry msix_entries[3] = { 2419 { "qla2xxx (default)", qla24xx_msix_default }, 2420 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q }, 2421 { "qla2xxx (multiq)", qla25xx_msix_rsp_q }, 2422 }; 2423 2424 static struct qla_init_msix_entry qla82xx_msix_entries[2] = { 2425 { "qla2xxx (default)", qla82xx_msix_default }, 2426 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q }, 2427 }; 2428 2429 static void 2430 qla24xx_disable_msix(struct qla_hw_data *ha) 2431 { 2432 int i; 2433 struct qla_msix_entry *qentry; 2434 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 2435 2436 for (i = 0; i < ha->msix_count; i++) { 2437 qentry = &ha->msix_entries[i]; 2438 if (qentry->have_irq) 2439 free_irq(qentry->vector, qentry->rsp); 2440 } 2441 pci_disable_msix(ha->pdev); 2442 kfree(ha->msix_entries); 2443 ha->msix_entries = NULL; 2444 ha->flags.msix_enabled = 0; 2445 ql_dbg(ql_dbg_init, vha, 0x0042, 2446 "Disabled the MSI.\n"); 2447 } 2448 2449 static int 2450 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) 2451 { 2452 #define MIN_MSIX_COUNT 2 2453 int i, ret; 2454 struct msix_entry *entries; 2455 struct qla_msix_entry *qentry; 2456 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 2457 2458 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count, 2459 GFP_KERNEL); 2460 if (!entries) { 2461 ql_log(ql_log_warn, vha, 0x00bc, 2462 "Failed to allocate memory for msix_entry.\n"); 2463 return -ENOMEM; 2464 } 2465 2466 for (i = 0; i < ha->msix_count; i++) 2467 entries[i].entry = i; 2468 2469 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count); 2470 if (ret) { 2471 if (ret < MIN_MSIX_COUNT) 2472 goto msix_failed; 2473 2474 ql_log(ql_log_warn, vha, 0x00c6, 2475 "MSI-X: Failed to enable support " 2476 "-- %d/%d\n Retry with %d vectors.\n", 2477 ha->msix_count, ret, ret); 2478 ha->msix_count = ret; 2479 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count); 2480 if (ret) { 2481 msix_failed: 2482 ql_log(ql_log_fatal, vha, 0x00c7, 2483 "MSI-X: Failed to enable support, " 2484 "giving up -- %d/%d.\n", 2485 ha->msix_count, ret); 2486 goto msix_out; 2487 } 2488 ha->max_rsp_queues = ha->msix_count - 1; 2489 } 2490 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) * 2491 ha->msix_count, GFP_KERNEL); 2492 if (!ha->msix_entries) { 2493 ql_log(ql_log_fatal, vha, 0x00c8, 2494 "Failed to allocate memory for ha->msix_entries.\n"); 2495 ret = -ENOMEM; 2496 goto msix_out; 2497 } 2498 ha->flags.msix_enabled = 1; 2499 2500 for (i = 0; i < ha->msix_count; i++) { 2501 qentry = &ha->msix_entries[i]; 2502 qentry->vector = entries[i].vector; 2503 qentry->entry = entries[i].entry; 2504 qentry->have_irq = 0; 2505 qentry->rsp = NULL; 2506 } 2507 2508 /* Enable MSI-X vectors for the base queue */ 2509 for (i = 0; i < 2; i++) { 2510 qentry = &ha->msix_entries[i]; 2511 if (IS_QLA82XX(ha)) { 2512 ret = request_irq(qentry->vector, 2513 qla82xx_msix_entries[i].handler, 2514 0, qla82xx_msix_entries[i].name, rsp); 2515 } else { 2516 ret = request_irq(qentry->vector, 2517 msix_entries[i].handler, 2518 0, msix_entries[i].name, rsp); 2519 } 2520 if (ret) { 2521 ql_log(ql_log_fatal, vha, 0x00cb, 2522 "MSI-X: unable to register handler -- %x/%d.\n", 2523 qentry->vector, ret); 2524 qla24xx_disable_msix(ha); 2525 ha->mqenable = 0; 2526 goto msix_out; 2527 } 2528 qentry->have_irq = 1; 2529 qentry->rsp = rsp; 2530 rsp->msix = qentry; 2531 } 2532 2533 /* Enable MSI-X vector for response queue update for queue 0 */ 2534 if (ha->mqiobase && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1)) 2535 ha->mqenable = 1; 2536 ql_dbg(ql_dbg_multiq, vha, 0xc005, 2537 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", 2538 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); 2539 ql_dbg(ql_dbg_init, vha, 0x0055, 2540 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", 2541 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); 2542 2543 msix_out: 2544 kfree(entries); 2545 return ret; 2546 } 2547 2548 int 2549 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) 2550 { 2551 int ret; 2552 device_reg_t __iomem *reg = ha->iobase; 2553 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 2554 2555 /* If possible, enable MSI-X. */ 2556 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && 2557 !IS_QLA8432(ha) && !IS_QLA8XXX_TYPE(ha)) 2558 goto skip_msi; 2559 2560 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && 2561 (ha->pdev->subsystem_device == 0x7040 || 2562 ha->pdev->subsystem_device == 0x7041 || 2563 ha->pdev->subsystem_device == 0x1705)) { 2564 ql_log(ql_log_warn, vha, 0x0034, 2565 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n", 2566 ha->pdev->subsystem_vendor, 2567 ha->pdev->subsystem_device); 2568 goto skip_msi; 2569 } 2570 2571 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) { 2572 ql_log(ql_log_warn, vha, 0x0035, 2573 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n", 2574 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX); 2575 goto skip_msix; 2576 } 2577 2578 ret = qla24xx_enable_msix(ha, rsp); 2579 if (!ret) { 2580 ql_dbg(ql_dbg_init, vha, 0x0036, 2581 "MSI-X: Enabled (0x%X, 0x%X).\n", 2582 ha->chip_revision, ha->fw_attributes); 2583 goto clear_risc_ints; 2584 } 2585 ql_log(ql_log_info, vha, 0x0037, 2586 "MSI-X Falling back-to MSI mode -%d.\n", ret); 2587 skip_msix: 2588 2589 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && 2590 !IS_QLA8001(ha)) 2591 goto skip_msi; 2592 2593 ret = pci_enable_msi(ha->pdev); 2594 if (!ret) { 2595 ql_dbg(ql_dbg_init, vha, 0x0038, 2596 "MSI: Enabled.\n"); 2597 ha->flags.msi_enabled = 1; 2598 } else 2599 ql_log(ql_log_warn, vha, 0x0039, 2600 "MSI-X; Falling back-to INTa mode -- %d.\n", ret); 2601 skip_msi: 2602 2603 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, 2604 ha->flags.msi_enabled ? 0 : IRQF_SHARED, 2605 QLA2XXX_DRIVER_NAME, rsp); 2606 if (ret) { 2607 ql_log(ql_log_warn, vha, 0x003a, 2608 "Failed to reserve interrupt %d already in use.\n", 2609 ha->pdev->irq); 2610 goto fail; 2611 } 2612 2613 clear_risc_ints: 2614 2615 /* 2616 * FIXME: Noted that 8014s were being dropped during NK testing. 2617 * Timing deltas during MSI-X/INTa transitions? 2618 */ 2619 if (IS_QLA81XX(ha) || IS_QLA82XX(ha)) 2620 goto fail; 2621 spin_lock_irq(&ha->hardware_lock); 2622 if (IS_FWI2_CAPABLE(ha)) { 2623 WRT_REG_DWORD(®->isp24.hccr, HCCRX_CLR_HOST_INT); 2624 WRT_REG_DWORD(®->isp24.hccr, HCCRX_CLR_RISC_INT); 2625 } else { 2626 WRT_REG_WORD(®->isp.semaphore, 0); 2627 WRT_REG_WORD(®->isp.hccr, HCCR_CLR_RISC_INT); 2628 WRT_REG_WORD(®->isp.hccr, HCCR_CLR_HOST_INT); 2629 } 2630 spin_unlock_irq(&ha->hardware_lock); 2631 2632 fail: 2633 return ret; 2634 } 2635 2636 void 2637 qla2x00_free_irqs(scsi_qla_host_t *vha) 2638 { 2639 struct qla_hw_data *ha = vha->hw; 2640 struct rsp_que *rsp = ha->rsp_q_map[0]; 2641 2642 if (ha->flags.msix_enabled) 2643 qla24xx_disable_msix(ha); 2644 else if (ha->flags.msi_enabled) { 2645 free_irq(ha->pdev->irq, rsp); 2646 pci_disable_msi(ha->pdev); 2647 } else 2648 free_irq(ha->pdev->irq, rsp); 2649 } 2650 2651 2652 int qla25xx_request_irq(struct rsp_que *rsp) 2653 { 2654 struct qla_hw_data *ha = rsp->hw; 2655 struct qla_init_msix_entry *intr = &msix_entries[2]; 2656 struct qla_msix_entry *msix = rsp->msix; 2657 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 2658 int ret; 2659 2660 ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp); 2661 if (ret) { 2662 ql_log(ql_log_fatal, vha, 0x00e6, 2663 "MSI-X: Unable to register handler -- %x/%d.\n", 2664 msix->vector, ret); 2665 return ret; 2666 } 2667 msix->have_irq = 1; 2668 msix->rsp = rsp; 2669 return ret; 2670 } 2671