1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2011 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 9 #include <linux/delay.h> 10 #include <linux/slab.h> 11 #include <scsi/scsi_tcq.h> 12 #include <scsi/scsi_bsg_fc.h> 13 #include <scsi/scsi_eh.h> 14 15 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); 16 static void qla2x00_process_completed_request(struct scsi_qla_host *, 17 struct req_que *, uint32_t); 18 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *); 19 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *); 20 static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, 21 sts_entry_t *); 22 23 /** 24 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. 25 * @irq: 26 * @dev_id: SCSI driver HA context 27 * 28 * Called by system whenever the host adapter generates an interrupt. 29 * 30 * Returns handled flag. 31 */ 32 irqreturn_t 33 qla2100_intr_handler(int irq, void *dev_id) 34 { 35 scsi_qla_host_t *vha; 36 struct qla_hw_data *ha; 37 struct device_reg_2xxx __iomem *reg; 38 int status; 39 unsigned long iter; 40 uint16_t hccr; 41 uint16_t mb[4]; 42 struct rsp_que *rsp; 43 unsigned long flags; 44 45 rsp = (struct rsp_que *) dev_id; 46 if (!rsp) { 47 printk(KERN_INFO 48 "%s(): NULL response queue pointer.\n", __func__); 49 return (IRQ_NONE); 50 } 51 52 ha = rsp->hw; 53 reg = &ha->iobase->isp; 54 status = 0; 55 56 spin_lock_irqsave(&ha->hardware_lock, flags); 57 vha = pci_get_drvdata(ha->pdev); 58 for (iter = 50; iter--; ) { 59 hccr = RD_REG_WORD(®->hccr); 60 if (hccr & HCCR_RISC_PAUSE) { 61 if (pci_channel_offline(ha->pdev)) 62 break; 63 64 /* 65 * Issue a "HARD" reset in order for the RISC interrupt 66 * bit to be cleared. Schedule a big hammer to get 67 * out of the RISC PAUSED state. 68 */ 69 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 70 RD_REG_WORD(®->hccr); 71 72 ha->isp_ops->fw_dump(vha, 1); 73 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 74 break; 75 } else if ((RD_REG_WORD(®->istatus) & ISR_RISC_INT) == 0) 76 break; 77 78 if (RD_REG_WORD(®->semaphore) & BIT_0) { 79 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 80 RD_REG_WORD(®->hccr); 81 82 /* Get mailbox data. */ 83 mb[0] = RD_MAILBOX_REG(ha, reg, 0); 84 if (mb[0] > 0x3fff && mb[0] < 0x8000) { 85 qla2x00_mbx_completion(vha, mb[0]); 86 status |= MBX_INTERRUPT; 87 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) { 88 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 89 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 90 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 91 qla2x00_async_event(vha, rsp, mb); 92 } else { 93 /*EMPTY*/ 94 ql_dbg(ql_dbg_async, vha, 0x5025, 95 "Unrecognized interrupt type (%d).\n", 96 mb[0]); 97 } 98 /* Release mailbox registers. */ 99 WRT_REG_WORD(®->semaphore, 0); 100 RD_REG_WORD(®->semaphore); 101 } else { 102 qla2x00_process_response_queue(rsp); 103 104 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 105 RD_REG_WORD(®->hccr); 106 } 107 } 108 spin_unlock_irqrestore(&ha->hardware_lock, flags); 109 110 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 111 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 112 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 113 complete(&ha->mbx_intr_comp); 114 } 115 116 return (IRQ_HANDLED); 117 } 118 119 /** 120 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. 121 * @irq: 122 * @dev_id: SCSI driver HA context 123 * 124 * Called by system whenever the host adapter generates an interrupt. 125 * 126 * Returns handled flag. 127 */ 128 irqreturn_t 129 qla2300_intr_handler(int irq, void *dev_id) 130 { 131 scsi_qla_host_t *vha; 132 struct device_reg_2xxx __iomem *reg; 133 int status; 134 unsigned long iter; 135 uint32_t stat; 136 uint16_t hccr; 137 uint16_t mb[4]; 138 struct rsp_que *rsp; 139 struct qla_hw_data *ha; 140 unsigned long flags; 141 142 rsp = (struct rsp_que *) dev_id; 143 if (!rsp) { 144 printk(KERN_INFO 145 "%s(): NULL response queue pointer.\n", __func__); 146 return (IRQ_NONE); 147 } 148 149 ha = rsp->hw; 150 reg = &ha->iobase->isp; 151 status = 0; 152 153 spin_lock_irqsave(&ha->hardware_lock, flags); 154 vha = pci_get_drvdata(ha->pdev); 155 for (iter = 50; iter--; ) { 156 stat = RD_REG_DWORD(®->u.isp2300.host_status); 157 if (stat & HSR_RISC_PAUSED) { 158 if (unlikely(pci_channel_offline(ha->pdev))) 159 break; 160 161 hccr = RD_REG_WORD(®->hccr); 162 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8)) 163 ql_log(ql_log_warn, vha, 0x5026, 164 "Parity error -- HCCR=%x, Dumping " 165 "firmware.\n", hccr); 166 else 167 ql_log(ql_log_warn, vha, 0x5027, 168 "RISC paused -- HCCR=%x, Dumping " 169 "firmware.\n", hccr); 170 171 /* 172 * Issue a "HARD" reset in order for the RISC 173 * interrupt bit to be cleared. Schedule a big 174 * hammer to get out of the RISC PAUSED state. 175 */ 176 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 177 RD_REG_WORD(®->hccr); 178 179 ha->isp_ops->fw_dump(vha, 1); 180 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 181 break; 182 } else if ((stat & HSR_RISC_INT) == 0) 183 break; 184 185 switch (stat & 0xff) { 186 case 0x1: 187 case 0x2: 188 case 0x10: 189 case 0x11: 190 qla2x00_mbx_completion(vha, MSW(stat)); 191 status |= MBX_INTERRUPT; 192 193 /* Release mailbox registers. */ 194 WRT_REG_WORD(®->semaphore, 0); 195 break; 196 case 0x12: 197 mb[0] = MSW(stat); 198 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 199 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 200 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 201 qla2x00_async_event(vha, rsp, mb); 202 break; 203 case 0x13: 204 qla2x00_process_response_queue(rsp); 205 break; 206 case 0x15: 207 mb[0] = MBA_CMPLT_1_16BIT; 208 mb[1] = MSW(stat); 209 qla2x00_async_event(vha, rsp, mb); 210 break; 211 case 0x16: 212 mb[0] = MBA_SCSI_COMPLETION; 213 mb[1] = MSW(stat); 214 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 215 qla2x00_async_event(vha, rsp, mb); 216 break; 217 default: 218 ql_dbg(ql_dbg_async, vha, 0x5028, 219 "Unrecognized interrupt type (%d).\n", stat & 0xff); 220 break; 221 } 222 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 223 RD_REG_WORD_RELAXED(®->hccr); 224 } 225 spin_unlock_irqrestore(&ha->hardware_lock, flags); 226 227 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 228 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 229 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 230 complete(&ha->mbx_intr_comp); 231 } 232 233 return (IRQ_HANDLED); 234 } 235 236 /** 237 * qla2x00_mbx_completion() - Process mailbox command completions. 238 * @ha: SCSI driver HA context 239 * @mb0: Mailbox0 register 240 */ 241 static void 242 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 243 { 244 uint16_t cnt; 245 uint16_t __iomem *wptr; 246 struct qla_hw_data *ha = vha->hw; 247 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 248 249 /* Load return mailbox registers. */ 250 ha->flags.mbox_int = 1; 251 ha->mailbox_out[0] = mb0; 252 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1); 253 254 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 255 if (IS_QLA2200(ha) && cnt == 8) 256 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8); 257 if (cnt == 4 || cnt == 5) 258 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr); 259 else 260 ha->mailbox_out[cnt] = RD_REG_WORD(wptr); 261 262 wptr++; 263 } 264 265 if (ha->mcp) { 266 ql_dbg(ql_dbg_async, vha, 0x5000, 267 "Got mbx completion. cmd=%x.\n", ha->mcp->mb[0]); 268 } else { 269 ql_dbg(ql_dbg_async, vha, 0x5001, 270 "MBX pointer ERROR.\n"); 271 } 272 } 273 274 static void 275 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr) 276 { 277 static char *event[] = 278 { "Complete", "Request Notification", "Time Extension" }; 279 int rval; 280 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24; 281 uint16_t __iomem *wptr; 282 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS]; 283 284 /* Seed data -- mailbox1 -> mailbox7. */ 285 wptr = (uint16_t __iomem *)®24->mailbox1; 286 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++) 287 mb[cnt] = RD_REG_WORD(wptr); 288 289 ql_dbg(ql_dbg_async, vha, 0x5021, 290 "Inter-Driver Commucation %s -- " 291 "%04x %04x %04x %04x %04x %04x %04x.\n", 292 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3], 293 mb[4], mb[5], mb[6]); 294 295 /* Acknowledgement needed? [Notify && non-zero timeout]. */ 296 timeout = (descr >> 8) & 0xf; 297 if (aen != MBA_IDC_NOTIFY || !timeout) 298 return; 299 300 ql_dbg(ql_dbg_async, vha, 0x5022, 301 "Inter-Driver Commucation %s -- ACK timeout=%d.\n", 302 vha->host_no, event[aen & 0xff], timeout); 303 304 rval = qla2x00_post_idc_ack_work(vha, mb); 305 if (rval != QLA_SUCCESS) 306 ql_log(ql_log_warn, vha, 0x5023, 307 "IDC failed to post ACK.\n"); 308 } 309 310 /** 311 * qla2x00_async_event() - Process aynchronous events. 312 * @ha: SCSI driver HA context 313 * @mb: Mailbox registers (0 - 3) 314 */ 315 void 316 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) 317 { 318 #define LS_UNKNOWN 2 319 static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" }; 320 char *link_speed; 321 uint16_t handle_cnt; 322 uint16_t cnt, mbx; 323 uint32_t handles[5]; 324 struct qla_hw_data *ha = vha->hw; 325 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 326 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 327 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; 328 uint32_t rscn_entry, host_pid; 329 uint8_t rscn_queue_index; 330 unsigned long flags; 331 332 /* Setup to process RIO completion. */ 333 handle_cnt = 0; 334 if (IS_QLA8XXX_TYPE(ha)) 335 goto skip_rio; 336 switch (mb[0]) { 337 case MBA_SCSI_COMPLETION: 338 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1])); 339 handle_cnt = 1; 340 break; 341 case MBA_CMPLT_1_16BIT: 342 handles[0] = mb[1]; 343 handle_cnt = 1; 344 mb[0] = MBA_SCSI_COMPLETION; 345 break; 346 case MBA_CMPLT_2_16BIT: 347 handles[0] = mb[1]; 348 handles[1] = mb[2]; 349 handle_cnt = 2; 350 mb[0] = MBA_SCSI_COMPLETION; 351 break; 352 case MBA_CMPLT_3_16BIT: 353 handles[0] = mb[1]; 354 handles[1] = mb[2]; 355 handles[2] = mb[3]; 356 handle_cnt = 3; 357 mb[0] = MBA_SCSI_COMPLETION; 358 break; 359 case MBA_CMPLT_4_16BIT: 360 handles[0] = mb[1]; 361 handles[1] = mb[2]; 362 handles[2] = mb[3]; 363 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); 364 handle_cnt = 4; 365 mb[0] = MBA_SCSI_COMPLETION; 366 break; 367 case MBA_CMPLT_5_16BIT: 368 handles[0] = mb[1]; 369 handles[1] = mb[2]; 370 handles[2] = mb[3]; 371 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); 372 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7); 373 handle_cnt = 5; 374 mb[0] = MBA_SCSI_COMPLETION; 375 break; 376 case MBA_CMPLT_2_32BIT: 377 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1])); 378 handles[1] = le32_to_cpu( 379 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) | 380 RD_MAILBOX_REG(ha, reg, 6)); 381 handle_cnt = 2; 382 mb[0] = MBA_SCSI_COMPLETION; 383 break; 384 default: 385 break; 386 } 387 skip_rio: 388 switch (mb[0]) { 389 case MBA_SCSI_COMPLETION: /* Fast Post */ 390 if (!vha->flags.online) 391 break; 392 393 for (cnt = 0; cnt < handle_cnt; cnt++) 394 qla2x00_process_completed_request(vha, rsp->req, 395 handles[cnt]); 396 break; 397 398 case MBA_RESET: /* Reset */ 399 ql_dbg(ql_dbg_async, vha, 0x5002, 400 "Asynchronous RESET.\n"); 401 402 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 403 break; 404 405 case MBA_SYSTEM_ERR: /* System Error */ 406 mbx = IS_QLA81XX(ha) ? RD_REG_WORD(®24->mailbox7) : 0; 407 ql_log(ql_log_warn, vha, 0x5003, 408 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh " 409 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx); 410 411 ha->isp_ops->fw_dump(vha, 1); 412 413 if (IS_FWI2_CAPABLE(ha)) { 414 if (mb[1] == 0 && mb[2] == 0) { 415 ql_log(ql_log_fatal, vha, 0x5004, 416 "Unrecoverable Hardware Error: adapter " 417 "marked OFFLINE!\n"); 418 vha->flags.online = 0; 419 } else { 420 /* Check to see if MPI timeout occurred */ 421 if ((mbx & MBX_3) && (ha->flags.port0)) 422 set_bit(MPI_RESET_NEEDED, 423 &vha->dpc_flags); 424 425 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 426 } 427 } else if (mb[1] == 0) { 428 ql_log(ql_log_fatal, vha, 0x5005, 429 "Unrecoverable Hardware Error: adapter marked " 430 "OFFLINE!\n"); 431 vha->flags.online = 0; 432 } else 433 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 434 break; 435 436 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 437 ql_log(ql_log_warn, vha, 0x5006, 438 "ISP Request Transfer Error (%x).\n", mb[1]); 439 440 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 441 break; 442 443 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 444 ql_log(ql_log_warn, vha, 0x5007, 445 "ISP Response Transfer Error.\n"); 446 447 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 448 break; 449 450 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ 451 ql_dbg(ql_dbg_async, vha, 0x5008, 452 "Asynchronous WAKEUP_THRES.\n"); 453 break; 454 455 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ 456 ql_log(ql_log_info, vha, 0x5009, 457 "LIP occurred (%x).\n", mb[1]); 458 459 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 460 atomic_set(&vha->loop_state, LOOP_DOWN); 461 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 462 qla2x00_mark_all_devices_lost(vha, 1); 463 } 464 465 if (vha->vp_idx) { 466 atomic_set(&vha->vp_state, VP_FAILED); 467 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 468 } 469 470 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 471 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 472 473 vha->flags.management_server_logged_in = 0; 474 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]); 475 break; 476 477 case MBA_LOOP_UP: /* Loop Up Event */ 478 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 479 link_speed = link_speeds[0]; 480 ha->link_data_rate = PORT_SPEED_1GB; 481 } else { 482 link_speed = link_speeds[LS_UNKNOWN]; 483 if (mb[1] < 5) 484 link_speed = link_speeds[mb[1]]; 485 else if (mb[1] == 0x13) 486 link_speed = link_speeds[5]; 487 ha->link_data_rate = mb[1]; 488 } 489 490 ql_log(ql_log_info, vha, 0x500a, 491 "LOOP UP detected (%s Gbps).\n", link_speed); 492 493 vha->flags.management_server_logged_in = 0; 494 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate); 495 break; 496 497 case MBA_LOOP_DOWN: /* Loop Down Event */ 498 mbx = IS_QLA81XX(ha) ? RD_REG_WORD(®24->mailbox4) : 0; 499 mbx = IS_QLA82XX(ha) ? RD_REG_WORD(®82->mailbox_out[4]) : mbx; 500 ql_log(ql_log_info, vha, 0x500b, 501 "LOOP DOWN detected (%x %x %x %x).\n", 502 mb[1], mb[2], mb[3], mbx); 503 504 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 505 atomic_set(&vha->loop_state, LOOP_DOWN); 506 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 507 vha->device_flags |= DFLG_NO_CABLE; 508 qla2x00_mark_all_devices_lost(vha, 1); 509 } 510 511 if (vha->vp_idx) { 512 atomic_set(&vha->vp_state, VP_FAILED); 513 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 514 } 515 516 vha->flags.management_server_logged_in = 0; 517 ha->link_data_rate = PORT_SPEED_UNKNOWN; 518 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0); 519 break; 520 521 case MBA_LIP_RESET: /* LIP reset occurred */ 522 ql_log(ql_log_info, vha, 0x500c, 523 "LIP reset occurred (%x).\n", mb[1]); 524 525 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 526 atomic_set(&vha->loop_state, LOOP_DOWN); 527 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 528 qla2x00_mark_all_devices_lost(vha, 1); 529 } 530 531 if (vha->vp_idx) { 532 atomic_set(&vha->vp_state, VP_FAILED); 533 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 534 } 535 536 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 537 538 ha->operating_mode = LOOP; 539 vha->flags.management_server_logged_in = 0; 540 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]); 541 break; 542 543 /* case MBA_DCBX_COMPLETE: */ 544 case MBA_POINT_TO_POINT: /* Point-to-Point */ 545 if (IS_QLA2100(ha)) 546 break; 547 548 if (IS_QLA8XXX_TYPE(ha)) { 549 ql_dbg(ql_dbg_async, vha, 0x500d, 550 "DCBX Completed -- %04x %04x %04x.\n", 551 mb[1], mb[2], mb[3]); 552 if (ha->notify_dcbx_comp) 553 complete(&ha->dcbx_comp); 554 555 } else 556 ql_dbg(ql_dbg_async, vha, 0x500e, 557 "Asynchronous P2P MODE received.\n"); 558 559 /* 560 * Until there's a transition from loop down to loop up, treat 561 * this as loop down only. 562 */ 563 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 564 atomic_set(&vha->loop_state, LOOP_DOWN); 565 if (!atomic_read(&vha->loop_down_timer)) 566 atomic_set(&vha->loop_down_timer, 567 LOOP_DOWN_TIME); 568 qla2x00_mark_all_devices_lost(vha, 1); 569 } 570 571 if (vha->vp_idx) { 572 atomic_set(&vha->vp_state, VP_FAILED); 573 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 574 } 575 576 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) 577 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 578 579 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 580 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 581 582 ha->flags.gpsc_supported = 1; 583 vha->flags.management_server_logged_in = 0; 584 break; 585 586 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */ 587 if (IS_QLA2100(ha)) 588 break; 589 590 ql_log(ql_log_info, vha, 0x500f, 591 "Configuration change detected: value=%x.\n", mb[1]); 592 593 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 594 atomic_set(&vha->loop_state, LOOP_DOWN); 595 if (!atomic_read(&vha->loop_down_timer)) 596 atomic_set(&vha->loop_down_timer, 597 LOOP_DOWN_TIME); 598 qla2x00_mark_all_devices_lost(vha, 1); 599 } 600 601 if (vha->vp_idx) { 602 atomic_set(&vha->vp_state, VP_FAILED); 603 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 604 } 605 606 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 607 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 608 break; 609 610 case MBA_PORT_UPDATE: /* Port database update */ 611 /* 612 * Handle only global and vn-port update events 613 * 614 * Relevant inputs: 615 * mb[1] = N_Port handle of changed port 616 * OR 0xffff for global event 617 * mb[2] = New login state 618 * 7 = Port logged out 619 * mb[3] = LSB is vp_idx, 0xff = all vps 620 * 621 * Skip processing if: 622 * Event is global, vp_idx is NOT all vps, 623 * vp_idx does not match 624 * Event is not global, vp_idx does not match 625 */ 626 if (IS_QLA2XXX_MIDTYPE(ha) && 627 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) || 628 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff)) 629 break; 630 631 /* Global event -- port logout or port unavailable. */ 632 if (mb[1] == 0xffff && mb[2] == 0x7) { 633 ql_dbg(ql_dbg_async, vha, 0x5010, 634 "Port unavailable %04x %04x %04x.\n", 635 mb[1], mb[2], mb[3]); 636 637 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 638 atomic_set(&vha->loop_state, LOOP_DOWN); 639 atomic_set(&vha->loop_down_timer, 640 LOOP_DOWN_TIME); 641 vha->device_flags |= DFLG_NO_CABLE; 642 qla2x00_mark_all_devices_lost(vha, 1); 643 } 644 645 if (vha->vp_idx) { 646 atomic_set(&vha->vp_state, VP_FAILED); 647 fc_vport_set_state(vha->fc_vport, 648 FC_VPORT_FAILED); 649 qla2x00_mark_all_devices_lost(vha, 1); 650 } 651 652 vha->flags.management_server_logged_in = 0; 653 ha->link_data_rate = PORT_SPEED_UNKNOWN; 654 break; 655 } 656 657 /* 658 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET 659 * event etc. earlier indicating loop is down) then process 660 * it. Otherwise ignore it and Wait for RSCN to come in. 661 */ 662 atomic_set(&vha->loop_down_timer, 0); 663 if (atomic_read(&vha->loop_state) != LOOP_DOWN && 664 atomic_read(&vha->loop_state) != LOOP_DEAD) { 665 ql_dbg(ql_dbg_async, vha, 0x5011, 666 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n", 667 mb[1], mb[2], mb[3]); 668 break; 669 } 670 671 ql_dbg(ql_dbg_async, vha, 0x5012, 672 "Port database changed %04x %04x %04x.\n", 673 mb[1], mb[2], mb[3]); 674 675 /* 676 * Mark all devices as missing so we will login again. 677 */ 678 atomic_set(&vha->loop_state, LOOP_UP); 679 680 qla2x00_mark_all_devices_lost(vha, 1); 681 682 vha->flags.rscn_queue_overflow = 1; 683 684 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 685 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 686 break; 687 688 case MBA_RSCN_UPDATE: /* State Change Registration */ 689 /* Check if the Vport has issued a SCR */ 690 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags)) 691 break; 692 /* Only handle SCNs for our Vport index. */ 693 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff)) 694 break; 695 696 ql_dbg(ql_dbg_async, vha, 0x5013, 697 "RSCN database changed -- %04x %04x %04x.\n", 698 mb[1], mb[2], mb[3]); 699 700 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2]; 701 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8) 702 | vha->d_id.b.al_pa; 703 if (rscn_entry == host_pid) { 704 ql_dbg(ql_dbg_async, vha, 0x5014, 705 "Ignoring RSCN update to local host " 706 "port ID (%06x).\n", host_pid); 707 break; 708 } 709 710 /* Ignore reserved bits from RSCN-payload. */ 711 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2]; 712 rscn_queue_index = vha->rscn_in_ptr + 1; 713 if (rscn_queue_index == MAX_RSCN_COUNT) 714 rscn_queue_index = 0; 715 if (rscn_queue_index != vha->rscn_out_ptr) { 716 vha->rscn_queue[vha->rscn_in_ptr] = rscn_entry; 717 vha->rscn_in_ptr = rscn_queue_index; 718 } else { 719 vha->flags.rscn_queue_overflow = 1; 720 } 721 722 atomic_set(&vha->loop_down_timer, 0); 723 vha->flags.management_server_logged_in = 0; 724 725 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 726 set_bit(RSCN_UPDATE, &vha->dpc_flags); 727 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry); 728 break; 729 730 /* case MBA_RIO_RESPONSE: */ 731 case MBA_ZIO_RESPONSE: 732 ql_dbg(ql_dbg_async, vha, 0x5015, 733 "[R|Z]IO update completion.\n"); 734 735 if (IS_FWI2_CAPABLE(ha)) 736 qla24xx_process_response_queue(vha, rsp); 737 else 738 qla2x00_process_response_queue(rsp); 739 break; 740 741 case MBA_DISCARD_RND_FRAME: 742 ql_dbg(ql_dbg_async, vha, 0x5016, 743 "Discard RND Frame -- %04x %04x %04x.\n", 744 mb[1], mb[2], mb[3]); 745 break; 746 747 case MBA_TRACE_NOTIFICATION: 748 ql_dbg(ql_dbg_async, vha, 0x5017, 749 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]); 750 break; 751 752 case MBA_ISP84XX_ALERT: 753 ql_dbg(ql_dbg_async, vha, 0x5018, 754 "ISP84XX Alert Notification -- %04x %04x %04x.\n", 755 mb[1], mb[2], mb[3]); 756 757 spin_lock_irqsave(&ha->cs84xx->access_lock, flags); 758 switch (mb[1]) { 759 case A84_PANIC_RECOVERY: 760 ql_log(ql_log_info, vha, 0x5019, 761 "Alert 84XX: panic recovery %04x %04x.\n", 762 mb[2], mb[3]); 763 break; 764 case A84_OP_LOGIN_COMPLETE: 765 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2]; 766 ql_log(ql_log_info, vha, 0x501a, 767 "Alert 84XX: firmware version %x.\n", 768 ha->cs84xx->op_fw_version); 769 break; 770 case A84_DIAG_LOGIN_COMPLETE: 771 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 772 ql_log(ql_log_info, vha, 0x501b, 773 "Alert 84XX: diagnostic firmware version %x.\n", 774 ha->cs84xx->diag_fw_version); 775 break; 776 case A84_GOLD_LOGIN_COMPLETE: 777 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 778 ha->cs84xx->fw_update = 1; 779 ql_log(ql_log_info, vha, 0x501c, 780 "Alert 84XX: gold firmware version %x.\n", 781 ha->cs84xx->gold_fw_version); 782 break; 783 default: 784 ql_log(ql_log_warn, vha, 0x501d, 785 "Alert 84xx: Invalid Alert %04x %04x %04x.\n", 786 mb[1], mb[2], mb[3]); 787 } 788 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags); 789 break; 790 case MBA_DCBX_START: 791 ql_dbg(ql_dbg_async, vha, 0x501e, 792 "DCBX Started -- %04x %04x %04x.\n", 793 mb[1], mb[2], mb[3]); 794 break; 795 case MBA_DCBX_PARAM_UPDATE: 796 ql_dbg(ql_dbg_async, vha, 0x501f, 797 "DCBX Parameters Updated -- %04x %04x %04x.\n", 798 mb[1], mb[2], mb[3]); 799 break; 800 case MBA_FCF_CONF_ERR: 801 ql_dbg(ql_dbg_async, vha, 0x5020, 802 "FCF Configuration Error -- %04x %04x %04x.\n", 803 mb[1], mb[2], mb[3]); 804 break; 805 case MBA_IDC_COMPLETE: 806 case MBA_IDC_NOTIFY: 807 case MBA_IDC_TIME_EXT: 808 qla81xx_idc_event(vha, mb[0], mb[1]); 809 break; 810 } 811 812 if (!vha->vp_idx && ha->num_vhosts) 813 qla2x00_alert_all_vps(rsp, mb); 814 } 815 816 /** 817 * qla2x00_process_completed_request() - Process a Fast Post response. 818 * @ha: SCSI driver HA context 819 * @index: SRB index 820 */ 821 static void 822 qla2x00_process_completed_request(struct scsi_qla_host *vha, 823 struct req_que *req, uint32_t index) 824 { 825 srb_t *sp; 826 struct qla_hw_data *ha = vha->hw; 827 828 /* Validate handle. */ 829 if (index >= MAX_OUTSTANDING_COMMANDS) { 830 ql_log(ql_log_warn, vha, 0x3014, 831 "Invalid SCSI command index (%x).\n", index); 832 833 if (IS_QLA82XX(ha)) 834 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 835 else 836 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 837 return; 838 } 839 840 sp = req->outstanding_cmds[index]; 841 if (sp) { 842 /* Free outstanding command slot. */ 843 req->outstanding_cmds[index] = NULL; 844 845 /* Save ISP completion status */ 846 sp->cmd->result = DID_OK << 16; 847 qla2x00_sp_compl(ha, sp); 848 } else { 849 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n"); 850 851 if (IS_QLA82XX(ha)) 852 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 853 else 854 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 855 } 856 } 857 858 static srb_t * 859 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, 860 struct req_que *req, void *iocb) 861 { 862 struct qla_hw_data *ha = vha->hw; 863 sts_entry_t *pkt = iocb; 864 srb_t *sp = NULL; 865 uint16_t index; 866 867 index = LSW(pkt->handle); 868 if (index >= MAX_OUTSTANDING_COMMANDS) { 869 ql_log(ql_log_warn, vha, 0x5031, 870 "Invalid command index (%x).\n", index); 871 if (IS_QLA82XX(ha)) 872 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 873 else 874 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 875 goto done; 876 } 877 sp = req->outstanding_cmds[index]; 878 if (!sp) { 879 ql_log(ql_log_warn, vha, 0x5032, 880 "Invalid completion handle (%x) -- timed-out.\n", index); 881 return sp; 882 } 883 if (sp->handle != index) { 884 ql_log(ql_log_warn, vha, 0x5033, 885 "SRB handle (%x) mismatch %x.\n", sp->handle, index); 886 return NULL; 887 } 888 889 req->outstanding_cmds[index] = NULL; 890 891 done: 892 return sp; 893 } 894 895 static void 896 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 897 struct mbx_entry *mbx) 898 { 899 const char func[] = "MBX-IOCB"; 900 const char *type; 901 fc_port_t *fcport; 902 srb_t *sp; 903 struct srb_iocb *lio; 904 struct srb_ctx *ctx; 905 uint16_t *data; 906 uint16_t status; 907 908 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx); 909 if (!sp) 910 return; 911 912 ctx = sp->ctx; 913 lio = ctx->u.iocb_cmd; 914 type = ctx->name; 915 fcport = sp->fcport; 916 data = lio->u.logio.data; 917 918 data[0] = MBS_COMMAND_ERROR; 919 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 920 QLA_LOGIO_LOGIN_RETRIED : 0; 921 if (mbx->entry_status) { 922 ql_dbg(ql_dbg_async, vha, 0x5043, 923 "Async-%s error entry - portid=%02x%02x%02x " 924 "entry-status=%x status=%x state-flag=%x " 925 "status-flags=%x.\n", 926 type, fcport->d_id.b.domain, fcport->d_id.b.area, 927 fcport->d_id.b.al_pa, mbx->entry_status, 928 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags), 929 le16_to_cpu(mbx->status_flags)); 930 931 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5057, 932 (uint8_t *)mbx, sizeof(*mbx)); 933 934 goto logio_done; 935 } 936 937 status = le16_to_cpu(mbx->status); 938 if (status == 0x30 && ctx->type == SRB_LOGIN_CMD && 939 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) 940 status = 0; 941 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) { 942 ql_dbg(ql_dbg_async, vha, 0x5045, 943 "Async-%s complete - portid=%02x%02x%02x mbx1=%x.\n", 944 type, fcport->d_id.b.domain, fcport->d_id.b.area, 945 fcport->d_id.b.al_pa, le16_to_cpu(mbx->mb1)); 946 947 data[0] = MBS_COMMAND_COMPLETE; 948 if (ctx->type == SRB_LOGIN_CMD) { 949 fcport->port_type = FCT_TARGET; 950 if (le16_to_cpu(mbx->mb1) & BIT_0) 951 fcport->port_type = FCT_INITIATOR; 952 else if (le16_to_cpu(mbx->mb1) & BIT_1) 953 fcport->flags |= FCF_FCP2_DEVICE; 954 } 955 goto logio_done; 956 } 957 958 data[0] = le16_to_cpu(mbx->mb0); 959 switch (data[0]) { 960 case MBS_PORT_ID_USED: 961 data[1] = le16_to_cpu(mbx->mb1); 962 break; 963 case MBS_LOOP_ID_USED: 964 break; 965 default: 966 data[0] = MBS_COMMAND_ERROR; 967 break; 968 } 969 970 ql_log(ql_log_warn, vha, 0x5046, 971 "Async-%s failed - portid=%02x%02x%02x status=%x " 972 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", 973 type, fcport->d_id.b.domain, 974 fcport->d_id.b.area, fcport->d_id.b.al_pa, status, 975 le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1), 976 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6), 977 le16_to_cpu(mbx->mb7)); 978 979 logio_done: 980 lio->done(sp); 981 } 982 983 static void 984 qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req, 985 sts_entry_t *pkt, int iocb_type) 986 { 987 const char func[] = "CT_IOCB"; 988 const char *type; 989 struct qla_hw_data *ha = vha->hw; 990 srb_t *sp; 991 struct srb_ctx *sp_bsg; 992 struct fc_bsg_job *bsg_job; 993 uint16_t comp_status; 994 995 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 996 if (!sp) 997 return; 998 999 sp_bsg = sp->ctx; 1000 bsg_job = sp_bsg->u.bsg_job; 1001 1002 type = NULL; 1003 switch (sp_bsg->type) { 1004 case SRB_CT_CMD: 1005 type = "ct pass-through"; 1006 break; 1007 default: 1008 ql_log(ql_log_warn, vha, 0x5047, 1009 "Unrecognized SRB: (%p) type=%d.\n", sp, sp_bsg->type); 1010 return; 1011 } 1012 1013 comp_status = le16_to_cpu(pkt->comp_status); 1014 1015 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT 1016 * fc payload to the caller 1017 */ 1018 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 1019 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1020 1021 if (comp_status != CS_COMPLETE) { 1022 if (comp_status == CS_DATA_UNDERRUN) { 1023 bsg_job->reply->result = DID_OK << 16; 1024 bsg_job->reply->reply_payload_rcv_len = 1025 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len); 1026 1027 ql_log(ql_log_warn, vha, 0x5048, 1028 "CT pass-through-%s error " 1029 "comp_status-status=0x%x total_byte = 0x%x.\n", 1030 type, comp_status, 1031 bsg_job->reply->reply_payload_rcv_len); 1032 } else { 1033 ql_log(ql_log_warn, vha, 0x5049, 1034 "CT pass-through-%s error " 1035 "comp_status-status=0x%x.\n", type, comp_status); 1036 bsg_job->reply->result = DID_ERROR << 16; 1037 bsg_job->reply->reply_payload_rcv_len = 0; 1038 } 1039 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5058, 1040 (uint8_t *)pkt, sizeof(*pkt)); 1041 } else { 1042 bsg_job->reply->result = DID_OK << 16; 1043 bsg_job->reply->reply_payload_rcv_len = 1044 bsg_job->reply_payload.payload_len; 1045 bsg_job->reply_len = 0; 1046 } 1047 1048 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 1049 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1050 1051 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 1052 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1053 1054 if (sp_bsg->type == SRB_ELS_CMD_HST || sp_bsg->type == SRB_CT_CMD) 1055 kfree(sp->fcport); 1056 1057 kfree(sp->ctx); 1058 mempool_free(sp, ha->srb_mempool); 1059 bsg_job->job_done(bsg_job); 1060 } 1061 1062 static void 1063 qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, 1064 struct sts_entry_24xx *pkt, int iocb_type) 1065 { 1066 const char func[] = "ELS_CT_IOCB"; 1067 const char *type; 1068 struct qla_hw_data *ha = vha->hw; 1069 srb_t *sp; 1070 struct srb_ctx *sp_bsg; 1071 struct fc_bsg_job *bsg_job; 1072 uint16_t comp_status; 1073 uint32_t fw_status[3]; 1074 uint8_t* fw_sts_ptr; 1075 1076 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1077 if (!sp) 1078 return; 1079 sp_bsg = sp->ctx; 1080 bsg_job = sp_bsg->u.bsg_job; 1081 1082 type = NULL; 1083 switch (sp_bsg->type) { 1084 case SRB_ELS_CMD_RPT: 1085 case SRB_ELS_CMD_HST: 1086 type = "els"; 1087 break; 1088 case SRB_CT_CMD: 1089 type = "ct pass-through"; 1090 break; 1091 default: 1092 ql_log(ql_log_warn, vha, 0x503e, 1093 "Unrecognized SRB: (%p) type=%d.\n", sp, sp_bsg->type); 1094 return; 1095 } 1096 1097 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status); 1098 fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1); 1099 fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2); 1100 1101 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT 1102 * fc payload to the caller 1103 */ 1104 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 1105 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status); 1106 1107 if (comp_status != CS_COMPLETE) { 1108 if (comp_status == CS_DATA_UNDERRUN) { 1109 bsg_job->reply->result = DID_OK << 16; 1110 bsg_job->reply->reply_payload_rcv_len = 1111 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count); 1112 1113 ql_log(ql_log_info, vha, 0x503f, 1114 "ELS-CT pass-through-%s error comp_status-status=0x%x " 1115 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n", 1116 type, comp_status, fw_status[1], fw_status[2], 1117 le16_to_cpu(((struct els_sts_entry_24xx *) 1118 pkt)->total_byte_count)); 1119 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); 1120 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); 1121 } 1122 else { 1123 ql_log(ql_log_info, vha, 0x5040, 1124 "ELS-CT pass-through-%s error comp_status-status=0x%x " 1125 "error subcode 1=0x%x error subcode 2=0x%x.\n", 1126 type, comp_status, 1127 le16_to_cpu(((struct els_sts_entry_24xx *) 1128 pkt)->error_subcode_1), 1129 le16_to_cpu(((struct els_sts_entry_24xx *) 1130 pkt)->error_subcode_2)); 1131 bsg_job->reply->result = DID_ERROR << 16; 1132 bsg_job->reply->reply_payload_rcv_len = 0; 1133 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); 1134 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); 1135 } 1136 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5056, 1137 (uint8_t *)pkt, sizeof(*pkt)); 1138 } 1139 else { 1140 bsg_job->reply->result = DID_OK << 16; 1141 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; 1142 bsg_job->reply_len = 0; 1143 } 1144 1145 dma_unmap_sg(&ha->pdev->dev, 1146 bsg_job->request_payload.sg_list, 1147 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1148 dma_unmap_sg(&ha->pdev->dev, 1149 bsg_job->reply_payload.sg_list, 1150 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1151 if ((sp_bsg->type == SRB_ELS_CMD_HST) || 1152 (sp_bsg->type == SRB_CT_CMD)) 1153 kfree(sp->fcport); 1154 kfree(sp->ctx); 1155 mempool_free(sp, ha->srb_mempool); 1156 bsg_job->job_done(bsg_job); 1157 } 1158 1159 static void 1160 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, 1161 struct logio_entry_24xx *logio) 1162 { 1163 const char func[] = "LOGIO-IOCB"; 1164 const char *type; 1165 fc_port_t *fcport; 1166 srb_t *sp; 1167 struct srb_iocb *lio; 1168 struct srb_ctx *ctx; 1169 uint16_t *data; 1170 uint32_t iop[2]; 1171 1172 sp = qla2x00_get_sp_from_handle(vha, func, req, logio); 1173 if (!sp) 1174 return; 1175 1176 ctx = sp->ctx; 1177 lio = ctx->u.iocb_cmd; 1178 type = ctx->name; 1179 fcport = sp->fcport; 1180 data = lio->u.logio.data; 1181 1182 data[0] = MBS_COMMAND_ERROR; 1183 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 1184 QLA_LOGIO_LOGIN_RETRIED : 0; 1185 if (logio->entry_status) { 1186 ql_log(ql_log_warn, vha, 0x5034, 1187 "Async-%s error entry - " 1188 "portid=%02x%02x%02x entry-status=%x.\n", 1189 type, fcport->d_id.b.domain, fcport->d_id.b.area, 1190 fcport->d_id.b.al_pa, logio->entry_status); 1191 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5059, 1192 (uint8_t *)logio, sizeof(*logio)); 1193 1194 goto logio_done; 1195 } 1196 1197 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) { 1198 ql_dbg(ql_dbg_async, vha, 0x5036, 1199 "Async-%s complete - portid=%02x%02x%02x " 1200 "iop0=%x.\n", 1201 type, fcport->d_id.b.domain, fcport->d_id.b.area, 1202 fcport->d_id.b.al_pa, 1203 le32_to_cpu(logio->io_parameter[0])); 1204 1205 data[0] = MBS_COMMAND_COMPLETE; 1206 if (ctx->type != SRB_LOGIN_CMD) 1207 goto logio_done; 1208 1209 iop[0] = le32_to_cpu(logio->io_parameter[0]); 1210 if (iop[0] & BIT_4) { 1211 fcport->port_type = FCT_TARGET; 1212 if (iop[0] & BIT_8) 1213 fcport->flags |= FCF_FCP2_DEVICE; 1214 } else if (iop[0] & BIT_5) 1215 fcport->port_type = FCT_INITIATOR; 1216 1217 if (logio->io_parameter[7] || logio->io_parameter[8]) 1218 fcport->supported_classes |= FC_COS_CLASS2; 1219 if (logio->io_parameter[9] || logio->io_parameter[10]) 1220 fcport->supported_classes |= FC_COS_CLASS3; 1221 1222 goto logio_done; 1223 } 1224 1225 iop[0] = le32_to_cpu(logio->io_parameter[0]); 1226 iop[1] = le32_to_cpu(logio->io_parameter[1]); 1227 switch (iop[0]) { 1228 case LSC_SCODE_PORTID_USED: 1229 data[0] = MBS_PORT_ID_USED; 1230 data[1] = LSW(iop[1]); 1231 break; 1232 case LSC_SCODE_NPORT_USED: 1233 data[0] = MBS_LOOP_ID_USED; 1234 break; 1235 default: 1236 data[0] = MBS_COMMAND_ERROR; 1237 break; 1238 } 1239 1240 ql_dbg(ql_dbg_async, vha, 0x5037, 1241 "Async-%s failed - portid=%02x%02x%02x comp=%x " 1242 "iop0=%x iop1=%x.\n", 1243 type, fcport->d_id.b.domain, 1244 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1245 le16_to_cpu(logio->comp_status), 1246 le32_to_cpu(logio->io_parameter[0]), 1247 le32_to_cpu(logio->io_parameter[1])); 1248 1249 logio_done: 1250 lio->done(sp); 1251 } 1252 1253 static void 1254 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 1255 struct tsk_mgmt_entry *tsk) 1256 { 1257 const char func[] = "TMF-IOCB"; 1258 const char *type; 1259 fc_port_t *fcport; 1260 srb_t *sp; 1261 struct srb_iocb *iocb; 1262 struct srb_ctx *ctx; 1263 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; 1264 int error = 1; 1265 1266 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk); 1267 if (!sp) 1268 return; 1269 1270 ctx = sp->ctx; 1271 iocb = ctx->u.iocb_cmd; 1272 type = ctx->name; 1273 fcport = sp->fcport; 1274 1275 if (sts->entry_status) { 1276 ql_log(ql_log_warn, vha, 0x5038, 1277 "Async-%s error - entry-status(%x).\n", 1278 type, sts->entry_status); 1279 } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 1280 ql_log(ql_log_warn, vha, 0x5039, 1281 "Async-%s error - completion status(%x).\n", 1282 type, sts->comp_status); 1283 } else if (!(le16_to_cpu(sts->scsi_status) & 1284 SS_RESPONSE_INFO_LEN_VALID)) { 1285 ql_log(ql_log_warn, vha, 0x503a, 1286 "Async-%s error - no response info(%x).\n", 1287 type, sts->scsi_status); 1288 } else if (le32_to_cpu(sts->rsp_data_len) < 4) { 1289 ql_log(ql_log_warn, vha, 0x503b, 1290 "Async-%s error - not enough response(%d).\n", 1291 type, sts->rsp_data_len); 1292 } else if (sts->data[3]) { 1293 ql_log(ql_log_warn, vha, 0x503c, 1294 "Async-%s error - response(%x).\n", 1295 type, sts->data[3]); 1296 } else { 1297 error = 0; 1298 } 1299 1300 if (error) { 1301 iocb->u.tmf.data = error; 1302 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055, 1303 (uint8_t *)sts, sizeof(*sts)); 1304 } 1305 1306 iocb->done(sp); 1307 } 1308 1309 /** 1310 * qla2x00_process_response_queue() - Process response queue entries. 1311 * @ha: SCSI driver HA context 1312 */ 1313 void 1314 qla2x00_process_response_queue(struct rsp_que *rsp) 1315 { 1316 struct scsi_qla_host *vha; 1317 struct qla_hw_data *ha = rsp->hw; 1318 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1319 sts_entry_t *pkt; 1320 uint16_t handle_cnt; 1321 uint16_t cnt; 1322 1323 vha = pci_get_drvdata(ha->pdev); 1324 1325 if (!vha->flags.online) 1326 return; 1327 1328 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 1329 pkt = (sts_entry_t *)rsp->ring_ptr; 1330 1331 rsp->ring_index++; 1332 if (rsp->ring_index == rsp->length) { 1333 rsp->ring_index = 0; 1334 rsp->ring_ptr = rsp->ring; 1335 } else { 1336 rsp->ring_ptr++; 1337 } 1338 1339 if (pkt->entry_status != 0) { 1340 ql_log(ql_log_warn, vha, 0x5035, 1341 "Process error entry.\n"); 1342 1343 qla2x00_error_entry(vha, rsp, pkt); 1344 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1345 wmb(); 1346 continue; 1347 } 1348 1349 switch (pkt->entry_type) { 1350 case STATUS_TYPE: 1351 qla2x00_status_entry(vha, rsp, pkt); 1352 break; 1353 case STATUS_TYPE_21: 1354 handle_cnt = ((sts21_entry_t *)pkt)->handle_count; 1355 for (cnt = 0; cnt < handle_cnt; cnt++) { 1356 qla2x00_process_completed_request(vha, rsp->req, 1357 ((sts21_entry_t *)pkt)->handle[cnt]); 1358 } 1359 break; 1360 case STATUS_TYPE_22: 1361 handle_cnt = ((sts22_entry_t *)pkt)->handle_count; 1362 for (cnt = 0; cnt < handle_cnt; cnt++) { 1363 qla2x00_process_completed_request(vha, rsp->req, 1364 ((sts22_entry_t *)pkt)->handle[cnt]); 1365 } 1366 break; 1367 case STATUS_CONT_TYPE: 1368 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); 1369 break; 1370 case MBX_IOCB_TYPE: 1371 qla2x00_mbx_iocb_entry(vha, rsp->req, 1372 (struct mbx_entry *)pkt); 1373 break; 1374 case CT_IOCB_TYPE: 1375 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); 1376 break; 1377 default: 1378 /* Type Not Supported. */ 1379 ql_log(ql_log_warn, vha, 0x504a, 1380 "Received unknown response pkt type %x " 1381 "entry status=%x.\n", 1382 pkt->entry_type, pkt->entry_status); 1383 break; 1384 } 1385 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1386 wmb(); 1387 } 1388 1389 /* Adjust ring index */ 1390 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index); 1391 } 1392 1393 static inline void 1394 1395 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, 1396 uint32_t sense_len, struct rsp_que *rsp) 1397 { 1398 struct scsi_qla_host *vha = sp->fcport->vha; 1399 struct scsi_cmnd *cp = sp->cmd; 1400 1401 if (sense_len >= SCSI_SENSE_BUFFERSIZE) 1402 sense_len = SCSI_SENSE_BUFFERSIZE; 1403 1404 sp->request_sense_length = sense_len; 1405 sp->request_sense_ptr = cp->sense_buffer; 1406 if (sp->request_sense_length > par_sense_len) 1407 sense_len = par_sense_len; 1408 1409 memcpy(cp->sense_buffer, sense_data, sense_len); 1410 1411 sp->request_sense_ptr += sense_len; 1412 sp->request_sense_length -= sense_len; 1413 if (sp->request_sense_length != 0) 1414 rsp->status_srb = sp; 1415 1416 ql_dbg(ql_dbg_io, vha, 0x301c, 1417 "Check condition Sense data, scsi(%ld:%d:%d:%d) cmd=%p.\n", 1418 sp->fcport->vha->host_no, cp->device->channel, cp->device->id, 1419 cp->device->lun, cp); 1420 if (sense_len) 1421 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b, 1422 cp->sense_buffer, sense_len); 1423 } 1424 1425 struct scsi_dif_tuple { 1426 __be16 guard; /* Checksum */ 1427 __be16 app_tag; /* APPL identifer */ 1428 __be32 ref_tag; /* Target LBA or indirect LBA */ 1429 }; 1430 1431 /* 1432 * Checks the guard or meta-data for the type of error 1433 * detected by the HBA. In case of errors, we set the 1434 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST 1435 * to indicate to the kernel that the HBA detected error. 1436 */ 1437 static inline int 1438 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) 1439 { 1440 struct scsi_qla_host *vha = sp->fcport->vha; 1441 struct scsi_cmnd *cmd = sp->cmd; 1442 uint8_t *ap = &sts24->data[12]; 1443 uint8_t *ep = &sts24->data[20]; 1444 uint32_t e_ref_tag, a_ref_tag; 1445 uint16_t e_app_tag, a_app_tag; 1446 uint16_t e_guard, a_guard; 1447 1448 /* 1449 * swab32 of the "data" field in the beginning of qla2x00_status_entry() 1450 * would make guard field appear at offset 2 1451 */ 1452 a_guard = le16_to_cpu(*(uint16_t *)(ap + 2)); 1453 a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0)); 1454 a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4)); 1455 e_guard = le16_to_cpu(*(uint16_t *)(ep + 2)); 1456 e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0)); 1457 e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4)); 1458 1459 ql_dbg(ql_dbg_io, vha, 0x3023, 1460 "iocb(s) %p Returned STATUS.\n", sts24); 1461 1462 ql_dbg(ql_dbg_io, vha, 0x3024, 1463 "DIF ERROR in cmd 0x%x lba 0x%llx act ref" 1464 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app" 1465 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n", 1466 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag, 1467 a_app_tag, e_app_tag, a_guard, e_guard); 1468 1469 /* 1470 * Ignore sector if: 1471 * For type 3: ref & app tag is all 'f's 1472 * For type 0,1,2: app tag is all 'f's 1473 */ 1474 if ((a_app_tag == 0xffff) && 1475 ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) || 1476 (a_ref_tag == 0xffffffff))) { 1477 uint32_t blocks_done, resid; 1478 sector_t lba_s = scsi_get_lba(cmd); 1479 1480 /* 2TB boundary case covered automatically with this */ 1481 blocks_done = e_ref_tag - (uint32_t)lba_s + 1; 1482 1483 resid = scsi_bufflen(cmd) - (blocks_done * 1484 cmd->device->sector_size); 1485 1486 scsi_set_resid(cmd, resid); 1487 cmd->result = DID_OK << 16; 1488 1489 /* Update protection tag */ 1490 if (scsi_prot_sg_count(cmd)) { 1491 uint32_t i, j = 0, k = 0, num_ent; 1492 struct scatterlist *sg; 1493 struct sd_dif_tuple *spt; 1494 1495 /* Patch the corresponding protection tags */ 1496 scsi_for_each_prot_sg(cmd, sg, 1497 scsi_prot_sg_count(cmd), i) { 1498 num_ent = sg_dma_len(sg) / 8; 1499 if (k + num_ent < blocks_done) { 1500 k += num_ent; 1501 continue; 1502 } 1503 j = blocks_done - k - 1; 1504 k = blocks_done; 1505 break; 1506 } 1507 1508 if (k != blocks_done) { 1509 qla_printk(KERN_WARNING, sp->fcport->vha->hw, 1510 "unexpected tag values tag:lba=%x:%llx)\n", 1511 e_ref_tag, (unsigned long long)lba_s); 1512 return 1; 1513 } 1514 1515 spt = page_address(sg_page(sg)) + sg->offset; 1516 spt += j; 1517 1518 spt->app_tag = 0xffff; 1519 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3) 1520 spt->ref_tag = 0xffffffff; 1521 } 1522 1523 return 0; 1524 } 1525 1526 /* check guard */ 1527 if (e_guard != a_guard) { 1528 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1529 0x10, 0x1); 1530 set_driver_byte(cmd, DRIVER_SENSE); 1531 set_host_byte(cmd, DID_ABORT); 1532 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1533 return 1; 1534 } 1535 1536 /* check ref tag */ 1537 if (e_ref_tag != a_ref_tag) { 1538 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1539 0x10, 0x3); 1540 set_driver_byte(cmd, DRIVER_SENSE); 1541 set_host_byte(cmd, DID_ABORT); 1542 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1543 return 1; 1544 } 1545 1546 /* check appl tag */ 1547 if (e_app_tag != a_app_tag) { 1548 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1549 0x10, 0x2); 1550 set_driver_byte(cmd, DRIVER_SENSE); 1551 set_host_byte(cmd, DID_ABORT); 1552 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1553 return 1; 1554 } 1555 1556 return 1; 1557 } 1558 1559 /** 1560 * qla2x00_status_entry() - Process a Status IOCB entry. 1561 * @ha: SCSI driver HA context 1562 * @pkt: Entry pointer 1563 */ 1564 static void 1565 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) 1566 { 1567 srb_t *sp; 1568 fc_port_t *fcport; 1569 struct scsi_cmnd *cp; 1570 sts_entry_t *sts; 1571 struct sts_entry_24xx *sts24; 1572 uint16_t comp_status; 1573 uint16_t scsi_status; 1574 uint16_t ox_id; 1575 uint8_t lscsi_status; 1576 int32_t resid; 1577 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len, 1578 fw_resid_len; 1579 uint8_t *rsp_info, *sense_data; 1580 struct qla_hw_data *ha = vha->hw; 1581 uint32_t handle; 1582 uint16_t que; 1583 struct req_que *req; 1584 int logit = 1; 1585 1586 sts = (sts_entry_t *) pkt; 1587 sts24 = (struct sts_entry_24xx *) pkt; 1588 if (IS_FWI2_CAPABLE(ha)) { 1589 comp_status = le16_to_cpu(sts24->comp_status); 1590 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; 1591 } else { 1592 comp_status = le16_to_cpu(sts->comp_status); 1593 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 1594 } 1595 handle = (uint32_t) LSW(sts->handle); 1596 que = MSW(sts->handle); 1597 req = ha->req_q_map[que]; 1598 1599 /* Fast path completion. */ 1600 if (comp_status == CS_COMPLETE && scsi_status == 0) { 1601 qla2x00_process_completed_request(vha, req, handle); 1602 1603 return; 1604 } 1605 1606 /* Validate handle. */ 1607 if (handle < MAX_OUTSTANDING_COMMANDS) { 1608 sp = req->outstanding_cmds[handle]; 1609 req->outstanding_cmds[handle] = NULL; 1610 } else 1611 sp = NULL; 1612 1613 if (sp == NULL) { 1614 ql_log(ql_log_warn, vha, 0x3017, 1615 "Invalid status handle (0x%x).\n", sts->handle); 1616 1617 if (IS_QLA82XX(ha)) 1618 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1619 else 1620 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1621 qla2xxx_wake_dpc(vha); 1622 return; 1623 } 1624 cp = sp->cmd; 1625 if (cp == NULL) { 1626 ql_log(ql_log_warn, vha, 0x3018, 1627 "Command already returned (0x%x/%p).\n", 1628 sts->handle, sp); 1629 1630 return; 1631 } 1632 1633 lscsi_status = scsi_status & STATUS_MASK; 1634 1635 fcport = sp->fcport; 1636 1637 ox_id = 0; 1638 sense_len = par_sense_len = rsp_info_len = resid_len = 1639 fw_resid_len = 0; 1640 if (IS_FWI2_CAPABLE(ha)) { 1641 if (scsi_status & SS_SENSE_LEN_VALID) 1642 sense_len = le32_to_cpu(sts24->sense_len); 1643 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) 1644 rsp_info_len = le32_to_cpu(sts24->rsp_data_len); 1645 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) 1646 resid_len = le32_to_cpu(sts24->rsp_residual_count); 1647 if (comp_status == CS_DATA_UNDERRUN) 1648 fw_resid_len = le32_to_cpu(sts24->residual_len); 1649 rsp_info = sts24->data; 1650 sense_data = sts24->data; 1651 host_to_fcp_swap(sts24->data, sizeof(sts24->data)); 1652 ox_id = le16_to_cpu(sts24->ox_id); 1653 par_sense_len = sizeof(sts24->data); 1654 } else { 1655 if (scsi_status & SS_SENSE_LEN_VALID) 1656 sense_len = le16_to_cpu(sts->req_sense_length); 1657 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) 1658 rsp_info_len = le16_to_cpu(sts->rsp_info_len); 1659 resid_len = le32_to_cpu(sts->residual_length); 1660 rsp_info = sts->rsp_info; 1661 sense_data = sts->req_sense_data; 1662 par_sense_len = sizeof(sts->req_sense_data); 1663 } 1664 1665 /* Check for any FCP transport errors. */ 1666 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) { 1667 /* Sense data lies beyond any FCP RESPONSE data. */ 1668 if (IS_FWI2_CAPABLE(ha)) { 1669 sense_data += rsp_info_len; 1670 par_sense_len -= rsp_info_len; 1671 } 1672 if (rsp_info_len > 3 && rsp_info[3]) { 1673 ql_log(ql_log_warn, vha, 0x3019, 1674 "FCP I/O protocol failure (0x%x/0x%x).\n", 1675 rsp_info_len, rsp_info[3]); 1676 1677 cp->result = DID_BUS_BUSY << 16; 1678 goto out; 1679 } 1680 } 1681 1682 /* Check for overrun. */ 1683 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE && 1684 scsi_status & SS_RESIDUAL_OVER) 1685 comp_status = CS_DATA_OVERRUN; 1686 1687 /* 1688 * Based on Host and scsi status generate status code for Linux 1689 */ 1690 switch (comp_status) { 1691 case CS_COMPLETE: 1692 case CS_QUEUE_FULL: 1693 if (scsi_status == 0) { 1694 cp->result = DID_OK << 16; 1695 break; 1696 } 1697 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) { 1698 resid = resid_len; 1699 scsi_set_resid(cp, resid); 1700 1701 if (!lscsi_status && 1702 ((unsigned)(scsi_bufflen(cp) - resid) < 1703 cp->underflow)) { 1704 ql_log(ql_log_warn, vha, 0x301a, 1705 "Mid-layer underflow " 1706 "detected (0x%x of 0x%x bytes).\n", 1707 resid, scsi_bufflen(cp)); 1708 1709 cp->result = DID_ERROR << 16; 1710 break; 1711 } 1712 } 1713 cp->result = DID_OK << 16 | lscsi_status; 1714 1715 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 1716 ql_log(ql_log_warn, vha, 0x301b, 1717 "QUEUE FULL detected.\n"); 1718 break; 1719 } 1720 logit = 0; 1721 if (lscsi_status != SS_CHECK_CONDITION) 1722 break; 1723 1724 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 1725 if (!(scsi_status & SS_SENSE_LEN_VALID)) 1726 break; 1727 1728 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len, 1729 rsp); 1730 break; 1731 1732 case CS_DATA_UNDERRUN: 1733 /* Use F/W calculated residual length. */ 1734 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len; 1735 scsi_set_resid(cp, resid); 1736 if (scsi_status & SS_RESIDUAL_UNDER) { 1737 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) { 1738 ql_log(ql_log_warn, vha, 0x301d, 1739 "Dropped frame(s) detected " 1740 "(0x%x of 0x%x bytes).\n", 1741 resid, scsi_bufflen(cp)); 1742 1743 cp->result = DID_ERROR << 16 | lscsi_status; 1744 break; 1745 } 1746 1747 if (!lscsi_status && 1748 ((unsigned)(scsi_bufflen(cp) - resid) < 1749 cp->underflow)) { 1750 ql_log(ql_log_warn, vha, 0x301e, 1751 "Mid-layer underflow " 1752 "detected (0x%x of 0x%x bytes).\n", 1753 resid, scsi_bufflen(cp)); 1754 1755 cp->result = DID_ERROR << 16; 1756 break; 1757 } 1758 } else { 1759 ql_log(ql_log_warn, vha, 0x301f, 1760 "Dropped frame(s) detected (0x%x " 1761 "of 0x%x bytes).\n", resid, scsi_bufflen(cp)); 1762 1763 cp->result = DID_ERROR << 16 | lscsi_status; 1764 goto check_scsi_status; 1765 } 1766 1767 cp->result = DID_OK << 16 | lscsi_status; 1768 logit = 0; 1769 1770 check_scsi_status: 1771 /* 1772 * Check to see if SCSI Status is non zero. If so report SCSI 1773 * Status. 1774 */ 1775 if (lscsi_status != 0) { 1776 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 1777 ql_log(ql_log_warn, vha, 0x3020, 1778 "QUEUE FULL detected.\n"); 1779 logit = 1; 1780 break; 1781 } 1782 if (lscsi_status != SS_CHECK_CONDITION) 1783 break; 1784 1785 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 1786 if (!(scsi_status & SS_SENSE_LEN_VALID)) 1787 break; 1788 1789 qla2x00_handle_sense(sp, sense_data, par_sense_len, 1790 sense_len, rsp); 1791 } 1792 break; 1793 1794 case CS_PORT_LOGGED_OUT: 1795 case CS_PORT_CONFIG_CHG: 1796 case CS_PORT_BUSY: 1797 case CS_INCOMPLETE: 1798 case CS_PORT_UNAVAILABLE: 1799 case CS_TIMEOUT: 1800 case CS_RESET: 1801 1802 /* 1803 * We are going to have the fc class block the rport 1804 * while we try to recover so instruct the mid layer 1805 * to requeue until the class decides how to handle this. 1806 */ 1807 cp->result = DID_TRANSPORT_DISRUPTED << 16; 1808 1809 if (comp_status == CS_TIMEOUT) { 1810 if (IS_FWI2_CAPABLE(ha)) 1811 break; 1812 else if ((le16_to_cpu(sts->status_flags) & 1813 SF_LOGOUT_SENT) == 0) 1814 break; 1815 } 1816 1817 ql_dbg(ql_dbg_io, vha, 0x3021, 1818 "Port down status: port-state=0x%x.\n", 1819 atomic_read(&fcport->state)); 1820 1821 if (atomic_read(&fcport->state) == FCS_ONLINE) 1822 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); 1823 break; 1824 1825 case CS_ABORTED: 1826 cp->result = DID_RESET << 16; 1827 break; 1828 1829 case CS_DIF_ERROR: 1830 logit = qla2x00_handle_dif_error(sp, sts24); 1831 break; 1832 default: 1833 cp->result = DID_ERROR << 16; 1834 break; 1835 } 1836 1837 out: 1838 if (logit) 1839 ql_dbg(ql_dbg_io, vha, 0x3022, 1840 "FCP command status: 0x%x-0x%x (0x%x) " 1841 "oxid=0x%x cdb=%02x%02x%02x len=0x%x " 1842 "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n", 1843 comp_status, scsi_status, cp->result, ox_id, cp->cmnd[0], 1844 cp->cmnd[1], cp->cmnd[2], scsi_bufflen(cp), rsp_info_len, 1845 resid_len, fw_resid_len); 1846 1847 if (rsp->status_srb == NULL) 1848 qla2x00_sp_compl(ha, sp); 1849 } 1850 1851 /** 1852 * qla2x00_status_cont_entry() - Process a Status Continuations entry. 1853 * @ha: SCSI driver HA context 1854 * @pkt: Entry pointer 1855 * 1856 * Extended sense data. 1857 */ 1858 static void 1859 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) 1860 { 1861 uint8_t sense_sz = 0; 1862 struct qla_hw_data *ha = rsp->hw; 1863 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); 1864 srb_t *sp = rsp->status_srb; 1865 struct scsi_cmnd *cp; 1866 1867 if (sp != NULL && sp->request_sense_length != 0) { 1868 cp = sp->cmd; 1869 if (cp == NULL) { 1870 ql_log(ql_log_warn, vha, 0x3025, 1871 "cmd is NULL: already returned to OS (sp=%p).\n", 1872 sp); 1873 1874 rsp->status_srb = NULL; 1875 return; 1876 } 1877 1878 if (sp->request_sense_length > sizeof(pkt->data)) { 1879 sense_sz = sizeof(pkt->data); 1880 } else { 1881 sense_sz = sp->request_sense_length; 1882 } 1883 1884 /* Move sense data. */ 1885 if (IS_FWI2_CAPABLE(ha)) 1886 host_to_fcp_swap(pkt->data, sizeof(pkt->data)); 1887 memcpy(sp->request_sense_ptr, pkt->data, sense_sz); 1888 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c, 1889 sp->request_sense_ptr, sense_sz); 1890 1891 sp->request_sense_ptr += sense_sz; 1892 sp->request_sense_length -= sense_sz; 1893 1894 /* Place command on done queue. */ 1895 if (sp->request_sense_length == 0) { 1896 rsp->status_srb = NULL; 1897 qla2x00_sp_compl(ha, sp); 1898 } 1899 } 1900 } 1901 1902 /** 1903 * qla2x00_error_entry() - Process an error entry. 1904 * @ha: SCSI driver HA context 1905 * @pkt: Entry pointer 1906 */ 1907 static void 1908 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) 1909 { 1910 srb_t *sp; 1911 struct qla_hw_data *ha = vha->hw; 1912 uint32_t handle = LSW(pkt->handle); 1913 uint16_t que = MSW(pkt->handle); 1914 struct req_que *req = ha->req_q_map[que]; 1915 1916 if (pkt->entry_status & RF_INV_E_ORDER) 1917 ql_dbg(ql_dbg_async, vha, 0x502a, 1918 "Invalid Entry Order.\n"); 1919 else if (pkt->entry_status & RF_INV_E_COUNT) 1920 ql_dbg(ql_dbg_async, vha, 0x502b, 1921 "Invalid Entry Count.\n"); 1922 else if (pkt->entry_status & RF_INV_E_PARAM) 1923 ql_dbg(ql_dbg_async, vha, 0x502c, 1924 "Invalid Entry Parameter.\n"); 1925 else if (pkt->entry_status & RF_INV_E_TYPE) 1926 ql_dbg(ql_dbg_async, vha, 0x502d, 1927 "Invalid Entry Type.\n"); 1928 else if (pkt->entry_status & RF_BUSY) 1929 ql_dbg(ql_dbg_async, vha, 0x502e, 1930 "Busy.\n"); 1931 else 1932 ql_dbg(ql_dbg_async, vha, 0x502f, 1933 "UNKNOWN flag error.\n"); 1934 1935 /* Validate handle. */ 1936 if (handle < MAX_OUTSTANDING_COMMANDS) 1937 sp = req->outstanding_cmds[handle]; 1938 else 1939 sp = NULL; 1940 1941 if (sp) { 1942 /* Free outstanding command slot. */ 1943 req->outstanding_cmds[handle] = NULL; 1944 1945 /* Bad payload or header */ 1946 if (pkt->entry_status & 1947 (RF_INV_E_ORDER | RF_INV_E_COUNT | 1948 RF_INV_E_PARAM | RF_INV_E_TYPE)) { 1949 sp->cmd->result = DID_ERROR << 16; 1950 } else if (pkt->entry_status & RF_BUSY) { 1951 sp->cmd->result = DID_BUS_BUSY << 16; 1952 } else { 1953 sp->cmd->result = DID_ERROR << 16; 1954 } 1955 qla2x00_sp_compl(ha, sp); 1956 1957 } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type == 1958 COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7 1959 || pkt->entry_type == COMMAND_TYPE_6) { 1960 ql_log(ql_log_warn, vha, 0x5030, 1961 "Error entry - invalid handle.\n"); 1962 1963 if (IS_QLA82XX(ha)) 1964 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1965 else 1966 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1967 qla2xxx_wake_dpc(vha); 1968 } 1969 } 1970 1971 /** 1972 * qla24xx_mbx_completion() - Process mailbox command completions. 1973 * @ha: SCSI driver HA context 1974 * @mb0: Mailbox0 register 1975 */ 1976 static void 1977 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 1978 { 1979 uint16_t cnt; 1980 uint16_t __iomem *wptr; 1981 struct qla_hw_data *ha = vha->hw; 1982 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1983 1984 /* Load return mailbox registers. */ 1985 ha->flags.mbox_int = 1; 1986 ha->mailbox_out[0] = mb0; 1987 wptr = (uint16_t __iomem *)®->mailbox1; 1988 1989 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 1990 ha->mailbox_out[cnt] = RD_REG_WORD(wptr); 1991 wptr++; 1992 } 1993 1994 if (ha->mcp) { 1995 ql_dbg(ql_dbg_async, vha, 0x504d, 1996 "Got mailbox completion. cmd=%x.\n", ha->mcp->mb[0]); 1997 } else { 1998 ql_dbg(ql_dbg_async, vha, 0x504e, 1999 "MBX pointer ERROR.\n"); 2000 } 2001 } 2002 2003 /** 2004 * qla24xx_process_response_queue() - Process response queue entries. 2005 * @ha: SCSI driver HA context 2006 */ 2007 void qla24xx_process_response_queue(struct scsi_qla_host *vha, 2008 struct rsp_que *rsp) 2009 { 2010 struct sts_entry_24xx *pkt; 2011 struct qla_hw_data *ha = vha->hw; 2012 2013 if (!vha->flags.online) 2014 return; 2015 2016 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 2017 pkt = (struct sts_entry_24xx *)rsp->ring_ptr; 2018 2019 rsp->ring_index++; 2020 if (rsp->ring_index == rsp->length) { 2021 rsp->ring_index = 0; 2022 rsp->ring_ptr = rsp->ring; 2023 } else { 2024 rsp->ring_ptr++; 2025 } 2026 2027 if (pkt->entry_status != 0) { 2028 ql_dbg(ql_dbg_async, vha, 0x5029, 2029 "Process error entry.\n"); 2030 2031 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt); 2032 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2033 wmb(); 2034 continue; 2035 } 2036 2037 switch (pkt->entry_type) { 2038 case STATUS_TYPE: 2039 qla2x00_status_entry(vha, rsp, pkt); 2040 break; 2041 case STATUS_CONT_TYPE: 2042 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); 2043 break; 2044 case VP_RPT_ID_IOCB_TYPE: 2045 qla24xx_report_id_acquisition(vha, 2046 (struct vp_rpt_id_entry_24xx *)pkt); 2047 break; 2048 case LOGINOUT_PORT_IOCB_TYPE: 2049 qla24xx_logio_entry(vha, rsp->req, 2050 (struct logio_entry_24xx *)pkt); 2051 break; 2052 case TSK_MGMT_IOCB_TYPE: 2053 qla24xx_tm_iocb_entry(vha, rsp->req, 2054 (struct tsk_mgmt_entry *)pkt); 2055 break; 2056 case CT_IOCB_TYPE: 2057 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); 2058 clear_bit(MBX_INTERRUPT, &vha->hw->mbx_cmd_flags); 2059 break; 2060 case ELS_IOCB_TYPE: 2061 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE); 2062 break; 2063 case MARKER_TYPE: 2064 /* Do nothing in this case, this check is to prevent it 2065 * from falling into default case 2066 */ 2067 break; 2068 default: 2069 /* Type Not Supported. */ 2070 ql_dbg(ql_dbg_async, vha, 0x5042, 2071 "Received unknown response pkt type %x " 2072 "entry status=%x.\n", 2073 pkt->entry_type, pkt->entry_status); 2074 break; 2075 } 2076 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2077 wmb(); 2078 } 2079 2080 /* Adjust ring index */ 2081 if (IS_QLA82XX(ha)) { 2082 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; 2083 WRT_REG_DWORD(®->rsp_q_out[0], rsp->ring_index); 2084 } else 2085 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index); 2086 } 2087 2088 static void 2089 qla2xxx_check_risc_status(scsi_qla_host_t *vha) 2090 { 2091 int rval; 2092 uint32_t cnt; 2093 struct qla_hw_data *ha = vha->hw; 2094 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 2095 2096 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)) 2097 return; 2098 2099 rval = QLA_SUCCESS; 2100 WRT_REG_DWORD(®->iobase_addr, 0x7C00); 2101 RD_REG_DWORD(®->iobase_addr); 2102 WRT_REG_DWORD(®->iobase_window, 0x0001); 2103 for (cnt = 10000; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 && 2104 rval == QLA_SUCCESS; cnt--) { 2105 if (cnt) { 2106 WRT_REG_DWORD(®->iobase_window, 0x0001); 2107 udelay(10); 2108 } else 2109 rval = QLA_FUNCTION_TIMEOUT; 2110 } 2111 if (rval == QLA_SUCCESS) 2112 goto next_test; 2113 2114 WRT_REG_DWORD(®->iobase_window, 0x0003); 2115 for (cnt = 100; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 && 2116 rval == QLA_SUCCESS; cnt--) { 2117 if (cnt) { 2118 WRT_REG_DWORD(®->iobase_window, 0x0003); 2119 udelay(10); 2120 } else 2121 rval = QLA_FUNCTION_TIMEOUT; 2122 } 2123 if (rval != QLA_SUCCESS) 2124 goto done; 2125 2126 next_test: 2127 if (RD_REG_DWORD(®->iobase_c8) & BIT_3) 2128 ql_log(ql_log_info, vha, 0x504c, 2129 "Additional code -- 0x55AA.\n"); 2130 2131 done: 2132 WRT_REG_DWORD(®->iobase_window, 0x0000); 2133 RD_REG_DWORD(®->iobase_window); 2134 } 2135 2136 /** 2137 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. 2138 * @irq: 2139 * @dev_id: SCSI driver HA context 2140 * 2141 * Called by system whenever the host adapter generates an interrupt. 2142 * 2143 * Returns handled flag. 2144 */ 2145 irqreturn_t 2146 qla24xx_intr_handler(int irq, void *dev_id) 2147 { 2148 scsi_qla_host_t *vha; 2149 struct qla_hw_data *ha; 2150 struct device_reg_24xx __iomem *reg; 2151 int status; 2152 unsigned long iter; 2153 uint32_t stat; 2154 uint32_t hccr; 2155 uint16_t mb[4]; 2156 struct rsp_que *rsp; 2157 unsigned long flags; 2158 2159 rsp = (struct rsp_que *) dev_id; 2160 if (!rsp) { 2161 printk(KERN_INFO 2162 "%s(): NULL response queue pointer.\n", __func__); 2163 return IRQ_NONE; 2164 } 2165 2166 ha = rsp->hw; 2167 reg = &ha->iobase->isp24; 2168 status = 0; 2169 2170 if (unlikely(pci_channel_offline(ha->pdev))) 2171 return IRQ_HANDLED; 2172 2173 spin_lock_irqsave(&ha->hardware_lock, flags); 2174 vha = pci_get_drvdata(ha->pdev); 2175 for (iter = 50; iter--; ) { 2176 stat = RD_REG_DWORD(®->host_status); 2177 if (stat & HSRX_RISC_PAUSED) { 2178 if (unlikely(pci_channel_offline(ha->pdev))) 2179 break; 2180 2181 hccr = RD_REG_DWORD(®->hccr); 2182 2183 ql_log(ql_log_warn, vha, 0x504b, 2184 "RISC paused -- HCCR=%x, Dumping firmware.\n", 2185 hccr); 2186 2187 qla2xxx_check_risc_status(vha); 2188 2189 ha->isp_ops->fw_dump(vha, 1); 2190 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2191 break; 2192 } else if ((stat & HSRX_RISC_INT) == 0) 2193 break; 2194 2195 switch (stat & 0xff) { 2196 case 0x1: 2197 case 0x2: 2198 case 0x10: 2199 case 0x11: 2200 qla24xx_mbx_completion(vha, MSW(stat)); 2201 status |= MBX_INTERRUPT; 2202 2203 break; 2204 case 0x12: 2205 mb[0] = MSW(stat); 2206 mb[1] = RD_REG_WORD(®->mailbox1); 2207 mb[2] = RD_REG_WORD(®->mailbox2); 2208 mb[3] = RD_REG_WORD(®->mailbox3); 2209 qla2x00_async_event(vha, rsp, mb); 2210 break; 2211 case 0x13: 2212 case 0x14: 2213 qla24xx_process_response_queue(vha, rsp); 2214 break; 2215 default: 2216 ql_dbg(ql_dbg_async, vha, 0x504f, 2217 "Unrecognized interrupt type (%d).\n", stat * 0xff); 2218 break; 2219 } 2220 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 2221 RD_REG_DWORD_RELAXED(®->hccr); 2222 } 2223 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2224 2225 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 2226 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 2227 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 2228 complete(&ha->mbx_intr_comp); 2229 } 2230 2231 return IRQ_HANDLED; 2232 } 2233 2234 static irqreturn_t 2235 qla24xx_msix_rsp_q(int irq, void *dev_id) 2236 { 2237 struct qla_hw_data *ha; 2238 struct rsp_que *rsp; 2239 struct device_reg_24xx __iomem *reg; 2240 struct scsi_qla_host *vha; 2241 unsigned long flags; 2242 2243 rsp = (struct rsp_que *) dev_id; 2244 if (!rsp) { 2245 printk(KERN_INFO 2246 "%s(): NULL response queue pointer.\n", __func__); 2247 return IRQ_NONE; 2248 } 2249 ha = rsp->hw; 2250 reg = &ha->iobase->isp24; 2251 2252 spin_lock_irqsave(&ha->hardware_lock, flags); 2253 2254 vha = pci_get_drvdata(ha->pdev); 2255 qla24xx_process_response_queue(vha, rsp); 2256 if (!ha->flags.disable_msix_handshake) { 2257 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 2258 RD_REG_DWORD_RELAXED(®->hccr); 2259 } 2260 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2261 2262 return IRQ_HANDLED; 2263 } 2264 2265 static irqreturn_t 2266 qla25xx_msix_rsp_q(int irq, void *dev_id) 2267 { 2268 struct qla_hw_data *ha; 2269 struct rsp_que *rsp; 2270 struct device_reg_24xx __iomem *reg; 2271 unsigned long flags; 2272 2273 rsp = (struct rsp_que *) dev_id; 2274 if (!rsp) { 2275 printk(KERN_INFO 2276 "%s(): NULL response queue pointer.\n", __func__); 2277 return IRQ_NONE; 2278 } 2279 ha = rsp->hw; 2280 2281 /* Clear the interrupt, if enabled, for this response queue */ 2282 if (!ha->flags.disable_msix_handshake) { 2283 reg = &ha->iobase->isp24; 2284 spin_lock_irqsave(&ha->hardware_lock, flags); 2285 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 2286 RD_REG_DWORD_RELAXED(®->hccr); 2287 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2288 } 2289 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work); 2290 2291 return IRQ_HANDLED; 2292 } 2293 2294 static irqreturn_t 2295 qla24xx_msix_default(int irq, void *dev_id) 2296 { 2297 scsi_qla_host_t *vha; 2298 struct qla_hw_data *ha; 2299 struct rsp_que *rsp; 2300 struct device_reg_24xx __iomem *reg; 2301 int status; 2302 uint32_t stat; 2303 uint32_t hccr; 2304 uint16_t mb[4]; 2305 unsigned long flags; 2306 2307 rsp = (struct rsp_que *) dev_id; 2308 if (!rsp) { 2309 printk(KERN_INFO 2310 "%s(): NULL response queue pointer.\n", __func__); 2311 return IRQ_NONE; 2312 } 2313 ha = rsp->hw; 2314 reg = &ha->iobase->isp24; 2315 status = 0; 2316 2317 spin_lock_irqsave(&ha->hardware_lock, flags); 2318 vha = pci_get_drvdata(ha->pdev); 2319 do { 2320 stat = RD_REG_DWORD(®->host_status); 2321 if (stat & HSRX_RISC_PAUSED) { 2322 if (unlikely(pci_channel_offline(ha->pdev))) 2323 break; 2324 2325 hccr = RD_REG_DWORD(®->hccr); 2326 2327 ql_log(ql_log_info, vha, 0x5050, 2328 "RISC paused -- HCCR=%x, Dumping firmware.\n", 2329 hccr); 2330 2331 qla2xxx_check_risc_status(vha); 2332 2333 ha->isp_ops->fw_dump(vha, 1); 2334 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2335 break; 2336 } else if ((stat & HSRX_RISC_INT) == 0) 2337 break; 2338 2339 switch (stat & 0xff) { 2340 case 0x1: 2341 case 0x2: 2342 case 0x10: 2343 case 0x11: 2344 qla24xx_mbx_completion(vha, MSW(stat)); 2345 status |= MBX_INTERRUPT; 2346 2347 break; 2348 case 0x12: 2349 mb[0] = MSW(stat); 2350 mb[1] = RD_REG_WORD(®->mailbox1); 2351 mb[2] = RD_REG_WORD(®->mailbox2); 2352 mb[3] = RD_REG_WORD(®->mailbox3); 2353 qla2x00_async_event(vha, rsp, mb); 2354 break; 2355 case 0x13: 2356 case 0x14: 2357 qla24xx_process_response_queue(vha, rsp); 2358 break; 2359 default: 2360 ql_dbg(ql_dbg_async, vha, 0x5051, 2361 "Unrecognized interrupt type (%d).\n", stat & 0xff); 2362 break; 2363 } 2364 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 2365 } while (0); 2366 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2367 2368 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 2369 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 2370 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 2371 complete(&ha->mbx_intr_comp); 2372 } 2373 return IRQ_HANDLED; 2374 } 2375 2376 /* Interrupt handling helpers. */ 2377 2378 struct qla_init_msix_entry { 2379 const char *name; 2380 irq_handler_t handler; 2381 }; 2382 2383 static struct qla_init_msix_entry msix_entries[3] = { 2384 { "qla2xxx (default)", qla24xx_msix_default }, 2385 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q }, 2386 { "qla2xxx (multiq)", qla25xx_msix_rsp_q }, 2387 }; 2388 2389 static struct qla_init_msix_entry qla82xx_msix_entries[2] = { 2390 { "qla2xxx (default)", qla82xx_msix_default }, 2391 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q }, 2392 }; 2393 2394 static void 2395 qla24xx_disable_msix(struct qla_hw_data *ha) 2396 { 2397 int i; 2398 struct qla_msix_entry *qentry; 2399 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 2400 2401 for (i = 0; i < ha->msix_count; i++) { 2402 qentry = &ha->msix_entries[i]; 2403 if (qentry->have_irq) 2404 free_irq(qentry->vector, qentry->rsp); 2405 } 2406 pci_disable_msix(ha->pdev); 2407 kfree(ha->msix_entries); 2408 ha->msix_entries = NULL; 2409 ha->flags.msix_enabled = 0; 2410 ql_dbg(ql_dbg_init, vha, 0x0042, 2411 "Disabled the MSI.\n"); 2412 } 2413 2414 static int 2415 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) 2416 { 2417 #define MIN_MSIX_COUNT 2 2418 int i, ret; 2419 struct msix_entry *entries; 2420 struct qla_msix_entry *qentry; 2421 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 2422 2423 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count, 2424 GFP_KERNEL); 2425 if (!entries) { 2426 ql_log(ql_log_warn, vha, 0x00bc, 2427 "Failed to allocate memory for msix_entry.\n"); 2428 return -ENOMEM; 2429 } 2430 2431 for (i = 0; i < ha->msix_count; i++) 2432 entries[i].entry = i; 2433 2434 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count); 2435 if (ret) { 2436 if (ret < MIN_MSIX_COUNT) 2437 goto msix_failed; 2438 2439 ql_log(ql_log_warn, vha, 0x00c6, 2440 "MSI-X: Failed to enable support " 2441 "-- %d/%d\n Retry with %d vectors.\n", 2442 ha->msix_count, ret, ret); 2443 ha->msix_count = ret; 2444 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count); 2445 if (ret) { 2446 msix_failed: 2447 ql_log(ql_log_fatal, vha, 0x00c7, 2448 "MSI-X: Failed to enable support, " 2449 "giving up -- %d/%d.\n", 2450 ha->msix_count, ret); 2451 goto msix_out; 2452 } 2453 ha->max_rsp_queues = ha->msix_count - 1; 2454 } 2455 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) * 2456 ha->msix_count, GFP_KERNEL); 2457 if (!ha->msix_entries) { 2458 ql_log(ql_log_fatal, vha, 0x00c8, 2459 "Failed to allocate memory for ha->msix_entries.\n"); 2460 ret = -ENOMEM; 2461 goto msix_out; 2462 } 2463 ha->flags.msix_enabled = 1; 2464 2465 for (i = 0; i < ha->msix_count; i++) { 2466 qentry = &ha->msix_entries[i]; 2467 qentry->vector = entries[i].vector; 2468 qentry->entry = entries[i].entry; 2469 qentry->have_irq = 0; 2470 qentry->rsp = NULL; 2471 } 2472 2473 /* Enable MSI-X vectors for the base queue */ 2474 for (i = 0; i < 2; i++) { 2475 qentry = &ha->msix_entries[i]; 2476 if (IS_QLA82XX(ha)) { 2477 ret = request_irq(qentry->vector, 2478 qla82xx_msix_entries[i].handler, 2479 0, qla82xx_msix_entries[i].name, rsp); 2480 } else { 2481 ret = request_irq(qentry->vector, 2482 msix_entries[i].handler, 2483 0, msix_entries[i].name, rsp); 2484 } 2485 if (ret) { 2486 ql_log(ql_log_fatal, vha, 0x00cb, 2487 "MSI-X: unable to register handler -- %x/%d.\n", 2488 qentry->vector, ret); 2489 qla24xx_disable_msix(ha); 2490 ha->mqenable = 0; 2491 goto msix_out; 2492 } 2493 qentry->have_irq = 1; 2494 qentry->rsp = rsp; 2495 rsp->msix = qentry; 2496 } 2497 2498 /* Enable MSI-X vector for response queue update for queue 0 */ 2499 if (ha->mqiobase && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1)) 2500 ha->mqenable = 1; 2501 ql_dbg(ql_dbg_multiq, vha, 0xc005, 2502 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", 2503 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); 2504 ql_dbg(ql_dbg_init, vha, 0x0055, 2505 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", 2506 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); 2507 2508 msix_out: 2509 kfree(entries); 2510 return ret; 2511 } 2512 2513 int 2514 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) 2515 { 2516 int ret; 2517 device_reg_t __iomem *reg = ha->iobase; 2518 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 2519 2520 /* If possible, enable MSI-X. */ 2521 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && 2522 !IS_QLA8432(ha) && !IS_QLA8XXX_TYPE(ha)) 2523 goto skip_msi; 2524 2525 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && 2526 (ha->pdev->subsystem_device == 0x7040 || 2527 ha->pdev->subsystem_device == 0x7041 || 2528 ha->pdev->subsystem_device == 0x1705)) { 2529 ql_log(ql_log_warn, vha, 0x0034, 2530 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n", 2531 ha->pdev->subsystem_vendor, 2532 ha->pdev->subsystem_device); 2533 goto skip_msi; 2534 } 2535 2536 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) { 2537 ql_log(ql_log_warn, vha, 0x0035, 2538 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n", 2539 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX); 2540 goto skip_msix; 2541 } 2542 2543 ret = qla24xx_enable_msix(ha, rsp); 2544 if (!ret) { 2545 ql_dbg(ql_dbg_init, vha, 0x0036, 2546 "MSI-X: Enabled (0x%X, 0x%X).\n", 2547 ha->chip_revision, ha->fw_attributes); 2548 goto clear_risc_ints; 2549 } 2550 ql_log(ql_log_info, vha, 0x0037, 2551 "MSI-X Falling back-to MSI mode -%d.\n", ret); 2552 skip_msix: 2553 2554 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && 2555 !IS_QLA8001(ha)) 2556 goto skip_msi; 2557 2558 ret = pci_enable_msi(ha->pdev); 2559 if (!ret) { 2560 ql_dbg(ql_dbg_init, vha, 0x0038, 2561 "MSI: Enabled.\n"); 2562 ha->flags.msi_enabled = 1; 2563 } else 2564 ql_log(ql_log_warn, vha, 0x0039, 2565 "MSI-X; Falling back-to INTa mode -- %d.\n", ret); 2566 skip_msi: 2567 2568 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, 2569 ha->flags.msi_enabled ? 0 : IRQF_SHARED, 2570 QLA2XXX_DRIVER_NAME, rsp); 2571 if (ret) { 2572 ql_log(ql_log_warn, vha, 0x003a, 2573 "Failed to reserve interrupt %d already in use.\n", 2574 ha->pdev->irq); 2575 goto fail; 2576 } 2577 2578 clear_risc_ints: 2579 2580 /* 2581 * FIXME: Noted that 8014s were being dropped during NK testing. 2582 * Timing deltas during MSI-X/INTa transitions? 2583 */ 2584 if (IS_QLA81XX(ha) || IS_QLA82XX(ha)) 2585 goto fail; 2586 spin_lock_irq(&ha->hardware_lock); 2587 if (IS_FWI2_CAPABLE(ha)) { 2588 WRT_REG_DWORD(®->isp24.hccr, HCCRX_CLR_HOST_INT); 2589 WRT_REG_DWORD(®->isp24.hccr, HCCRX_CLR_RISC_INT); 2590 } else { 2591 WRT_REG_WORD(®->isp.semaphore, 0); 2592 WRT_REG_WORD(®->isp.hccr, HCCR_CLR_RISC_INT); 2593 WRT_REG_WORD(®->isp.hccr, HCCR_CLR_HOST_INT); 2594 } 2595 spin_unlock_irq(&ha->hardware_lock); 2596 2597 fail: 2598 return ret; 2599 } 2600 2601 void 2602 qla2x00_free_irqs(scsi_qla_host_t *vha) 2603 { 2604 struct qla_hw_data *ha = vha->hw; 2605 struct rsp_que *rsp = ha->rsp_q_map[0]; 2606 2607 if (ha->flags.msix_enabled) 2608 qla24xx_disable_msix(ha); 2609 else if (ha->flags.msi_enabled) { 2610 free_irq(ha->pdev->irq, rsp); 2611 pci_disable_msi(ha->pdev); 2612 } else 2613 free_irq(ha->pdev->irq, rsp); 2614 } 2615 2616 2617 int qla25xx_request_irq(struct rsp_que *rsp) 2618 { 2619 struct qla_hw_data *ha = rsp->hw; 2620 struct qla_init_msix_entry *intr = &msix_entries[2]; 2621 struct qla_msix_entry *msix = rsp->msix; 2622 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 2623 int ret; 2624 2625 ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp); 2626 if (ret) { 2627 ql_log(ql_log_fatal, vha, 0x00e6, 2628 "MSI-X: Unable to register handler -- %x/%d.\n", 2629 msix->vector, ret); 2630 return ret; 2631 } 2632 msix->have_irq = 1; 2633 msix->rsp = rsp; 2634 return ret; 2635 } 2636