1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2013 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 #include "qla_target.h" 9 10 #include <linux/delay.h> 11 #include <linux/slab.h> 12 #include <scsi/scsi_tcq.h> 13 #include <scsi/scsi_bsg_fc.h> 14 #include <scsi/scsi_eh.h> 15 16 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); 17 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *); 18 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *); 19 static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, 20 sts_entry_t *); 21 22 /** 23 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. 24 * @irq: 25 * @dev_id: SCSI driver HA context 26 * 27 * Called by system whenever the host adapter generates an interrupt. 28 * 29 * Returns handled flag. 30 */ 31 irqreturn_t 32 qla2100_intr_handler(int irq, void *dev_id) 33 { 34 scsi_qla_host_t *vha; 35 struct qla_hw_data *ha; 36 struct device_reg_2xxx __iomem *reg; 37 int status; 38 unsigned long iter; 39 uint16_t hccr; 40 uint16_t mb[4]; 41 struct rsp_que *rsp; 42 unsigned long flags; 43 44 rsp = (struct rsp_que *) dev_id; 45 if (!rsp) { 46 ql_log(ql_log_info, NULL, 0x505d, 47 "%s: NULL response queue pointer.\n", __func__); 48 return (IRQ_NONE); 49 } 50 51 ha = rsp->hw; 52 reg = &ha->iobase->isp; 53 status = 0; 54 55 spin_lock_irqsave(&ha->hardware_lock, flags); 56 vha = pci_get_drvdata(ha->pdev); 57 for (iter = 50; iter--; ) { 58 hccr = RD_REG_WORD(®->hccr); 59 if (hccr & HCCR_RISC_PAUSE) { 60 if (pci_channel_offline(ha->pdev)) 61 break; 62 63 /* 64 * Issue a "HARD" reset in order for the RISC interrupt 65 * bit to be cleared. Schedule a big hammer to get 66 * out of the RISC PAUSED state. 67 */ 68 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 69 RD_REG_WORD(®->hccr); 70 71 ha->isp_ops->fw_dump(vha, 1); 72 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 73 break; 74 } else if ((RD_REG_WORD(®->istatus) & ISR_RISC_INT) == 0) 75 break; 76 77 if (RD_REG_WORD(®->semaphore) & BIT_0) { 78 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 79 RD_REG_WORD(®->hccr); 80 81 /* Get mailbox data. */ 82 mb[0] = RD_MAILBOX_REG(ha, reg, 0); 83 if (mb[0] > 0x3fff && mb[0] < 0x8000) { 84 qla2x00_mbx_completion(vha, mb[0]); 85 status |= MBX_INTERRUPT; 86 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) { 87 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 88 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 89 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 90 qla2x00_async_event(vha, rsp, mb); 91 } else { 92 /*EMPTY*/ 93 ql_dbg(ql_dbg_async, vha, 0x5025, 94 "Unrecognized interrupt type (%d).\n", 95 mb[0]); 96 } 97 /* Release mailbox registers. */ 98 WRT_REG_WORD(®->semaphore, 0); 99 RD_REG_WORD(®->semaphore); 100 } else { 101 qla2x00_process_response_queue(rsp); 102 103 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 104 RD_REG_WORD(®->hccr); 105 } 106 } 107 spin_unlock_irqrestore(&ha->hardware_lock, flags); 108 109 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 110 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 111 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 112 complete(&ha->mbx_intr_comp); 113 } 114 115 return (IRQ_HANDLED); 116 } 117 118 /** 119 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. 120 * @irq: 121 * @dev_id: SCSI driver HA context 122 * 123 * Called by system whenever the host adapter generates an interrupt. 124 * 125 * Returns handled flag. 126 */ 127 irqreturn_t 128 qla2300_intr_handler(int irq, void *dev_id) 129 { 130 scsi_qla_host_t *vha; 131 struct device_reg_2xxx __iomem *reg; 132 int status; 133 unsigned long iter; 134 uint32_t stat; 135 uint16_t hccr; 136 uint16_t mb[4]; 137 struct rsp_que *rsp; 138 struct qla_hw_data *ha; 139 unsigned long flags; 140 141 rsp = (struct rsp_que *) dev_id; 142 if (!rsp) { 143 ql_log(ql_log_info, NULL, 0x5058, 144 "%s: NULL response queue pointer.\n", __func__); 145 return (IRQ_NONE); 146 } 147 148 ha = rsp->hw; 149 reg = &ha->iobase->isp; 150 status = 0; 151 152 spin_lock_irqsave(&ha->hardware_lock, flags); 153 vha = pci_get_drvdata(ha->pdev); 154 for (iter = 50; iter--; ) { 155 stat = RD_REG_DWORD(®->u.isp2300.host_status); 156 if (stat & HSR_RISC_PAUSED) { 157 if (unlikely(pci_channel_offline(ha->pdev))) 158 break; 159 160 hccr = RD_REG_WORD(®->hccr); 161 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8)) 162 ql_log(ql_log_warn, vha, 0x5026, 163 "Parity error -- HCCR=%x, Dumping " 164 "firmware.\n", hccr); 165 else 166 ql_log(ql_log_warn, vha, 0x5027, 167 "RISC paused -- HCCR=%x, Dumping " 168 "firmware.\n", hccr); 169 170 /* 171 * Issue a "HARD" reset in order for the RISC 172 * interrupt bit to be cleared. Schedule a big 173 * hammer to get out of the RISC PAUSED state. 174 */ 175 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 176 RD_REG_WORD(®->hccr); 177 178 ha->isp_ops->fw_dump(vha, 1); 179 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 180 break; 181 } else if ((stat & HSR_RISC_INT) == 0) 182 break; 183 184 switch (stat & 0xff) { 185 case 0x1: 186 case 0x2: 187 case 0x10: 188 case 0x11: 189 qla2x00_mbx_completion(vha, MSW(stat)); 190 status |= MBX_INTERRUPT; 191 192 /* Release mailbox registers. */ 193 WRT_REG_WORD(®->semaphore, 0); 194 break; 195 case 0x12: 196 mb[0] = MSW(stat); 197 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 198 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 199 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 200 qla2x00_async_event(vha, rsp, mb); 201 break; 202 case 0x13: 203 qla2x00_process_response_queue(rsp); 204 break; 205 case 0x15: 206 mb[0] = MBA_CMPLT_1_16BIT; 207 mb[1] = MSW(stat); 208 qla2x00_async_event(vha, rsp, mb); 209 break; 210 case 0x16: 211 mb[0] = MBA_SCSI_COMPLETION; 212 mb[1] = MSW(stat); 213 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 214 qla2x00_async_event(vha, rsp, mb); 215 break; 216 default: 217 ql_dbg(ql_dbg_async, vha, 0x5028, 218 "Unrecognized interrupt type (%d).\n", stat & 0xff); 219 break; 220 } 221 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 222 RD_REG_WORD_RELAXED(®->hccr); 223 } 224 spin_unlock_irqrestore(&ha->hardware_lock, flags); 225 226 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 227 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 228 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 229 complete(&ha->mbx_intr_comp); 230 } 231 232 return (IRQ_HANDLED); 233 } 234 235 /** 236 * qla2x00_mbx_completion() - Process mailbox command completions. 237 * @ha: SCSI driver HA context 238 * @mb0: Mailbox0 register 239 */ 240 static void 241 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 242 { 243 uint16_t cnt; 244 uint32_t mboxes; 245 uint16_t __iomem *wptr; 246 struct qla_hw_data *ha = vha->hw; 247 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 248 249 /* Read all mbox registers? */ 250 mboxes = (1 << ha->mbx_count) - 1; 251 if (!ha->mcp) 252 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n"); 253 else 254 mboxes = ha->mcp->in_mb; 255 256 /* Load return mailbox registers. */ 257 ha->flags.mbox_int = 1; 258 ha->mailbox_out[0] = mb0; 259 mboxes >>= 1; 260 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1); 261 262 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 263 if (IS_QLA2200(ha) && cnt == 8) 264 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8); 265 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0)) 266 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr); 267 else if (mboxes & BIT_0) 268 ha->mailbox_out[cnt] = RD_REG_WORD(wptr); 269 270 wptr++; 271 mboxes >>= 1; 272 } 273 } 274 275 static void 276 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr) 277 { 278 static char *event[] = 279 { "Complete", "Request Notification", "Time Extension" }; 280 int rval; 281 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24; 282 uint16_t __iomem *wptr; 283 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS]; 284 285 /* Seed data -- mailbox1 -> mailbox7. */ 286 wptr = (uint16_t __iomem *)®24->mailbox1; 287 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++) 288 mb[cnt] = RD_REG_WORD(wptr); 289 290 ql_dbg(ql_dbg_async, vha, 0x5021, 291 "Inter-Driver Communication %s -- " 292 "%04x %04x %04x %04x %04x %04x %04x.\n", 293 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3], 294 mb[4], mb[5], mb[6]); 295 if ((aen == MBA_IDC_COMPLETE && mb[1] >> 15)) { 296 vha->hw->flags.idc_compl_status = 1; 297 if (vha->hw->notify_dcbx_comp) 298 complete(&vha->hw->dcbx_comp); 299 } 300 301 /* Acknowledgement needed? [Notify && non-zero timeout]. */ 302 timeout = (descr >> 8) & 0xf; 303 if (aen != MBA_IDC_NOTIFY || !timeout) 304 return; 305 306 ql_dbg(ql_dbg_async, vha, 0x5022, 307 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n", 308 vha->host_no, event[aen & 0xff], timeout); 309 310 rval = qla2x00_post_idc_ack_work(vha, mb); 311 if (rval != QLA_SUCCESS) 312 ql_log(ql_log_warn, vha, 0x5023, 313 "IDC failed to post ACK.\n"); 314 } 315 316 #define LS_UNKNOWN 2 317 const char * 318 qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed) 319 { 320 static const char * const link_speeds[] = { 321 "1", "2", "?", "4", "8", "16", "10" 322 }; 323 324 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 325 return link_speeds[0]; 326 else if (speed == 0x13) 327 return link_speeds[6]; 328 else if (speed < 6) 329 return link_speeds[speed]; 330 else 331 return link_speeds[LS_UNKNOWN]; 332 } 333 334 static void 335 qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb) 336 { 337 struct qla_hw_data *ha = vha->hw; 338 339 /* 340 * 8200 AEN Interpretation: 341 * mb[0] = AEN code 342 * mb[1] = AEN Reason code 343 * mb[2] = LSW of Peg-Halt Status-1 Register 344 * mb[6] = MSW of Peg-Halt Status-1 Register 345 * mb[3] = LSW of Peg-Halt Status-2 register 346 * mb[7] = MSW of Peg-Halt Status-2 register 347 * mb[4] = IDC Device-State Register value 348 * mb[5] = IDC Driver-Presence Register value 349 */ 350 ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: " 351 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n", 352 mb[0], mb[1], mb[2], mb[6]); 353 ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x " 354 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x " 355 "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]); 356 357 if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE | 358 IDC_HEARTBEAT_FAILURE)) { 359 ha->flags.nic_core_hung = 1; 360 ql_log(ql_log_warn, vha, 0x5060, 361 "83XX: F/W Error Reported: Check if reset required.\n"); 362 363 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) { 364 uint32_t protocol_engine_id, fw_err_code, err_level; 365 366 /* 367 * IDC_PEG_HALT_STATUS_CHANGE interpretation: 368 * - PEG-Halt Status-1 Register: 369 * (LSW = mb[2], MSW = mb[6]) 370 * Bits 0-7 = protocol-engine ID 371 * Bits 8-28 = f/w error code 372 * Bits 29-31 = Error-level 373 * Error-level 0x1 = Non-Fatal error 374 * Error-level 0x2 = Recoverable Fatal error 375 * Error-level 0x4 = UnRecoverable Fatal error 376 * - PEG-Halt Status-2 Register: 377 * (LSW = mb[3], MSW = mb[7]) 378 */ 379 protocol_engine_id = (mb[2] & 0xff); 380 fw_err_code = (((mb[2] & 0xff00) >> 8) | 381 ((mb[6] & 0x1fff) << 8)); 382 err_level = ((mb[6] & 0xe000) >> 13); 383 ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 " 384 "Register: protocol_engine_id=0x%x " 385 "fw_err_code=0x%x err_level=0x%x.\n", 386 protocol_engine_id, fw_err_code, err_level); 387 ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 " 388 "Register: 0x%x%x.\n", mb[7], mb[3]); 389 if (err_level == ERR_LEVEL_NON_FATAL) { 390 ql_log(ql_log_warn, vha, 0x5063, 391 "Not a fatal error, f/w has recovered " 392 "iteself.\n"); 393 } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) { 394 ql_log(ql_log_fatal, vha, 0x5064, 395 "Recoverable Fatal error: Chip reset " 396 "required.\n"); 397 qla83xx_schedule_work(vha, 398 QLA83XX_NIC_CORE_RESET); 399 } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) { 400 ql_log(ql_log_fatal, vha, 0x5065, 401 "Unrecoverable Fatal error: Set FAILED " 402 "state, reboot required.\n"); 403 qla83xx_schedule_work(vha, 404 QLA83XX_NIC_CORE_UNRECOVERABLE); 405 } 406 } 407 408 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) { 409 uint16_t peg_fw_state, nw_interface_link_up; 410 uint16_t nw_interface_signal_detect, sfp_status; 411 uint16_t htbt_counter, htbt_monitor_enable; 412 uint16_t sfp_additonal_info, sfp_multirate; 413 uint16_t sfp_tx_fault, link_speed, dcbx_status; 414 415 /* 416 * IDC_NIC_FW_REPORTED_FAILURE interpretation: 417 * - PEG-to-FC Status Register: 418 * (LSW = mb[2], MSW = mb[6]) 419 * Bits 0-7 = Peg-Firmware state 420 * Bit 8 = N/W Interface Link-up 421 * Bit 9 = N/W Interface signal detected 422 * Bits 10-11 = SFP Status 423 * SFP Status 0x0 = SFP+ transceiver not expected 424 * SFP Status 0x1 = SFP+ transceiver not present 425 * SFP Status 0x2 = SFP+ transceiver invalid 426 * SFP Status 0x3 = SFP+ transceiver present and 427 * valid 428 * Bits 12-14 = Heartbeat Counter 429 * Bit 15 = Heartbeat Monitor Enable 430 * Bits 16-17 = SFP Additional Info 431 * SFP info 0x0 = Unregocnized transceiver for 432 * Ethernet 433 * SFP info 0x1 = SFP+ brand validation failed 434 * SFP info 0x2 = SFP+ speed validation failed 435 * SFP info 0x3 = SFP+ access error 436 * Bit 18 = SFP Multirate 437 * Bit 19 = SFP Tx Fault 438 * Bits 20-22 = Link Speed 439 * Bits 23-27 = Reserved 440 * Bits 28-30 = DCBX Status 441 * DCBX Status 0x0 = DCBX Disabled 442 * DCBX Status 0x1 = DCBX Enabled 443 * DCBX Status 0x2 = DCBX Exchange error 444 * Bit 31 = Reserved 445 */ 446 peg_fw_state = (mb[2] & 0x00ff); 447 nw_interface_link_up = ((mb[2] & 0x0100) >> 8); 448 nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9); 449 sfp_status = ((mb[2] & 0x0c00) >> 10); 450 htbt_counter = ((mb[2] & 0x7000) >> 12); 451 htbt_monitor_enable = ((mb[2] & 0x8000) >> 15); 452 sfp_additonal_info = (mb[6] & 0x0003); 453 sfp_multirate = ((mb[6] & 0x0004) >> 2); 454 sfp_tx_fault = ((mb[6] & 0x0008) >> 3); 455 link_speed = ((mb[6] & 0x0070) >> 4); 456 dcbx_status = ((mb[6] & 0x7000) >> 12); 457 458 ql_log(ql_log_warn, vha, 0x5066, 459 "Peg-to-Fc Status Register:\n" 460 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, " 461 "nw_interface_signal_detect=0x%x" 462 "\nsfp_statis=0x%x.\n ", peg_fw_state, 463 nw_interface_link_up, nw_interface_signal_detect, 464 sfp_status); 465 ql_log(ql_log_warn, vha, 0x5067, 466 "htbt_counter=0x%x, htbt_monitor_enable=0x%x, " 467 "sfp_additonal_info=0x%x, sfp_multirate=0x%x.\n ", 468 htbt_counter, htbt_monitor_enable, 469 sfp_additonal_info, sfp_multirate); 470 ql_log(ql_log_warn, vha, 0x5068, 471 "sfp_tx_fault=0x%x, link_state=0x%x, " 472 "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed, 473 dcbx_status); 474 475 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET); 476 } 477 478 if (mb[1] & IDC_HEARTBEAT_FAILURE) { 479 ql_log(ql_log_warn, vha, 0x5069, 480 "Heartbeat Failure encountered, chip reset " 481 "required.\n"); 482 483 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET); 484 } 485 } 486 487 if (mb[1] & IDC_DEVICE_STATE_CHANGE) { 488 ql_log(ql_log_info, vha, 0x506a, 489 "IDC Device-State changed = 0x%x.\n", mb[4]); 490 if (ha->flags.nic_core_reset_owner) 491 return; 492 qla83xx_schedule_work(vha, MBA_IDC_AEN); 493 } 494 } 495 496 int 497 qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry) 498 { 499 struct qla_hw_data *ha = vha->hw; 500 scsi_qla_host_t *vp; 501 uint32_t vp_did; 502 unsigned long flags; 503 int ret = 0; 504 505 if (!ha->num_vhosts) 506 return ret; 507 508 spin_lock_irqsave(&ha->vport_slock, flags); 509 list_for_each_entry(vp, &ha->vp_list, list) { 510 vp_did = vp->d_id.b24; 511 if (vp_did == rscn_entry) { 512 ret = 1; 513 break; 514 } 515 } 516 spin_unlock_irqrestore(&ha->vport_slock, flags); 517 518 return ret; 519 } 520 521 /** 522 * qla2x00_async_event() - Process aynchronous events. 523 * @ha: SCSI driver HA context 524 * @mb: Mailbox registers (0 - 3) 525 */ 526 void 527 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) 528 { 529 uint16_t handle_cnt; 530 uint16_t cnt, mbx; 531 uint32_t handles[5]; 532 struct qla_hw_data *ha = vha->hw; 533 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 534 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 535 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; 536 uint32_t rscn_entry, host_pid; 537 unsigned long flags; 538 539 /* Setup to process RIO completion. */ 540 handle_cnt = 0; 541 if (IS_CNA_CAPABLE(ha)) 542 goto skip_rio; 543 switch (mb[0]) { 544 case MBA_SCSI_COMPLETION: 545 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1])); 546 handle_cnt = 1; 547 break; 548 case MBA_CMPLT_1_16BIT: 549 handles[0] = mb[1]; 550 handle_cnt = 1; 551 mb[0] = MBA_SCSI_COMPLETION; 552 break; 553 case MBA_CMPLT_2_16BIT: 554 handles[0] = mb[1]; 555 handles[1] = mb[2]; 556 handle_cnt = 2; 557 mb[0] = MBA_SCSI_COMPLETION; 558 break; 559 case MBA_CMPLT_3_16BIT: 560 handles[0] = mb[1]; 561 handles[1] = mb[2]; 562 handles[2] = mb[3]; 563 handle_cnt = 3; 564 mb[0] = MBA_SCSI_COMPLETION; 565 break; 566 case MBA_CMPLT_4_16BIT: 567 handles[0] = mb[1]; 568 handles[1] = mb[2]; 569 handles[2] = mb[3]; 570 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); 571 handle_cnt = 4; 572 mb[0] = MBA_SCSI_COMPLETION; 573 break; 574 case MBA_CMPLT_5_16BIT: 575 handles[0] = mb[1]; 576 handles[1] = mb[2]; 577 handles[2] = mb[3]; 578 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); 579 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7); 580 handle_cnt = 5; 581 mb[0] = MBA_SCSI_COMPLETION; 582 break; 583 case MBA_CMPLT_2_32BIT: 584 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1])); 585 handles[1] = le32_to_cpu( 586 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) | 587 RD_MAILBOX_REG(ha, reg, 6)); 588 handle_cnt = 2; 589 mb[0] = MBA_SCSI_COMPLETION; 590 break; 591 default: 592 break; 593 } 594 skip_rio: 595 switch (mb[0]) { 596 case MBA_SCSI_COMPLETION: /* Fast Post */ 597 if (!vha->flags.online) 598 break; 599 600 for (cnt = 0; cnt < handle_cnt; cnt++) 601 qla2x00_process_completed_request(vha, rsp->req, 602 handles[cnt]); 603 break; 604 605 case MBA_RESET: /* Reset */ 606 ql_dbg(ql_dbg_async, vha, 0x5002, 607 "Asynchronous RESET.\n"); 608 609 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 610 break; 611 612 case MBA_SYSTEM_ERR: /* System Error */ 613 mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha)) ? 614 RD_REG_WORD(®24->mailbox7) : 0; 615 ql_log(ql_log_warn, vha, 0x5003, 616 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh " 617 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx); 618 619 ha->isp_ops->fw_dump(vha, 1); 620 621 if (IS_FWI2_CAPABLE(ha)) { 622 if (mb[1] == 0 && mb[2] == 0) { 623 ql_log(ql_log_fatal, vha, 0x5004, 624 "Unrecoverable Hardware Error: adapter " 625 "marked OFFLINE!\n"); 626 vha->flags.online = 0; 627 vha->device_flags |= DFLG_DEV_FAILED; 628 } else { 629 /* Check to see if MPI timeout occurred */ 630 if ((mbx & MBX_3) && (ha->flags.port0)) 631 set_bit(MPI_RESET_NEEDED, 632 &vha->dpc_flags); 633 634 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 635 } 636 } else if (mb[1] == 0) { 637 ql_log(ql_log_fatal, vha, 0x5005, 638 "Unrecoverable Hardware Error: adapter marked " 639 "OFFLINE!\n"); 640 vha->flags.online = 0; 641 vha->device_flags |= DFLG_DEV_FAILED; 642 } else 643 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 644 break; 645 646 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 647 ql_log(ql_log_warn, vha, 0x5006, 648 "ISP Request Transfer Error (%x).\n", mb[1]); 649 650 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 651 break; 652 653 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 654 ql_log(ql_log_warn, vha, 0x5007, 655 "ISP Response Transfer Error.\n"); 656 657 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 658 break; 659 660 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ 661 ql_dbg(ql_dbg_async, vha, 0x5008, 662 "Asynchronous WAKEUP_THRES.\n"); 663 664 break; 665 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ 666 ql_dbg(ql_dbg_async, vha, 0x5009, 667 "LIP occurred (%x).\n", mb[1]); 668 669 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 670 atomic_set(&vha->loop_state, LOOP_DOWN); 671 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 672 qla2x00_mark_all_devices_lost(vha, 1); 673 } 674 675 if (vha->vp_idx) { 676 atomic_set(&vha->vp_state, VP_FAILED); 677 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 678 } 679 680 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 681 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 682 683 vha->flags.management_server_logged_in = 0; 684 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]); 685 break; 686 687 case MBA_LOOP_UP: /* Loop Up Event */ 688 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 689 ha->link_data_rate = PORT_SPEED_1GB; 690 else 691 ha->link_data_rate = mb[1]; 692 693 ql_dbg(ql_dbg_async, vha, 0x500a, 694 "LOOP UP detected (%s Gbps).\n", 695 qla2x00_get_link_speed_str(ha, ha->link_data_rate)); 696 697 vha->flags.management_server_logged_in = 0; 698 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate); 699 break; 700 701 case MBA_LOOP_DOWN: /* Loop Down Event */ 702 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha)) 703 ? RD_REG_WORD(®24->mailbox4) : 0; 704 mbx = IS_QLA82XX(ha) ? RD_REG_WORD(®82->mailbox_out[4]) : mbx; 705 ql_dbg(ql_dbg_async, vha, 0x500b, 706 "LOOP DOWN detected (%x %x %x %x).\n", 707 mb[1], mb[2], mb[3], mbx); 708 709 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 710 atomic_set(&vha->loop_state, LOOP_DOWN); 711 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 712 vha->device_flags |= DFLG_NO_CABLE; 713 qla2x00_mark_all_devices_lost(vha, 1); 714 } 715 716 if (vha->vp_idx) { 717 atomic_set(&vha->vp_state, VP_FAILED); 718 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 719 } 720 721 vha->flags.management_server_logged_in = 0; 722 ha->link_data_rate = PORT_SPEED_UNKNOWN; 723 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0); 724 break; 725 726 case MBA_LIP_RESET: /* LIP reset occurred */ 727 ql_dbg(ql_dbg_async, vha, 0x500c, 728 "LIP reset occurred (%x).\n", mb[1]); 729 730 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 731 atomic_set(&vha->loop_state, LOOP_DOWN); 732 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 733 qla2x00_mark_all_devices_lost(vha, 1); 734 } 735 736 if (vha->vp_idx) { 737 atomic_set(&vha->vp_state, VP_FAILED); 738 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 739 } 740 741 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 742 743 ha->operating_mode = LOOP; 744 vha->flags.management_server_logged_in = 0; 745 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]); 746 break; 747 748 /* case MBA_DCBX_COMPLETE: */ 749 case MBA_POINT_TO_POINT: /* Point-to-Point */ 750 if (IS_QLA2100(ha)) 751 break; 752 753 if (IS_QLA81XX(ha) || IS_QLA82XX(ha) || IS_QLA8031(ha)) { 754 ql_dbg(ql_dbg_async, vha, 0x500d, 755 "DCBX Completed -- %04x %04x %04x.\n", 756 mb[1], mb[2], mb[3]); 757 if (ha->notify_dcbx_comp) 758 complete(&ha->dcbx_comp); 759 760 } else 761 ql_dbg(ql_dbg_async, vha, 0x500e, 762 "Asynchronous P2P MODE received.\n"); 763 764 /* 765 * Until there's a transition from loop down to loop up, treat 766 * this as loop down only. 767 */ 768 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 769 atomic_set(&vha->loop_state, LOOP_DOWN); 770 if (!atomic_read(&vha->loop_down_timer)) 771 atomic_set(&vha->loop_down_timer, 772 LOOP_DOWN_TIME); 773 qla2x00_mark_all_devices_lost(vha, 1); 774 } 775 776 if (vha->vp_idx) { 777 atomic_set(&vha->vp_state, VP_FAILED); 778 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 779 } 780 781 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) 782 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 783 784 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 785 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 786 787 ha->flags.gpsc_supported = 1; 788 vha->flags.management_server_logged_in = 0; 789 break; 790 791 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */ 792 if (IS_QLA2100(ha)) 793 break; 794 795 ql_dbg(ql_dbg_async, vha, 0x500f, 796 "Configuration change detected: value=%x.\n", mb[1]); 797 798 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 799 atomic_set(&vha->loop_state, LOOP_DOWN); 800 if (!atomic_read(&vha->loop_down_timer)) 801 atomic_set(&vha->loop_down_timer, 802 LOOP_DOWN_TIME); 803 qla2x00_mark_all_devices_lost(vha, 1); 804 } 805 806 if (vha->vp_idx) { 807 atomic_set(&vha->vp_state, VP_FAILED); 808 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 809 } 810 811 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 812 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 813 break; 814 815 case MBA_PORT_UPDATE: /* Port database update */ 816 /* 817 * Handle only global and vn-port update events 818 * 819 * Relevant inputs: 820 * mb[1] = N_Port handle of changed port 821 * OR 0xffff for global event 822 * mb[2] = New login state 823 * 7 = Port logged out 824 * mb[3] = LSB is vp_idx, 0xff = all vps 825 * 826 * Skip processing if: 827 * Event is global, vp_idx is NOT all vps, 828 * vp_idx does not match 829 * Event is not global, vp_idx does not match 830 */ 831 if (IS_QLA2XXX_MIDTYPE(ha) && 832 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) || 833 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff)) 834 break; 835 836 /* Global event -- port logout or port unavailable. */ 837 if (mb[1] == 0xffff && mb[2] == 0x7) { 838 ql_dbg(ql_dbg_async, vha, 0x5010, 839 "Port unavailable %04x %04x %04x.\n", 840 mb[1], mb[2], mb[3]); 841 ql_log(ql_log_warn, vha, 0x505e, 842 "Link is offline.\n"); 843 844 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 845 atomic_set(&vha->loop_state, LOOP_DOWN); 846 atomic_set(&vha->loop_down_timer, 847 LOOP_DOWN_TIME); 848 vha->device_flags |= DFLG_NO_CABLE; 849 qla2x00_mark_all_devices_lost(vha, 1); 850 } 851 852 if (vha->vp_idx) { 853 atomic_set(&vha->vp_state, VP_FAILED); 854 fc_vport_set_state(vha->fc_vport, 855 FC_VPORT_FAILED); 856 qla2x00_mark_all_devices_lost(vha, 1); 857 } 858 859 vha->flags.management_server_logged_in = 0; 860 ha->link_data_rate = PORT_SPEED_UNKNOWN; 861 break; 862 } 863 864 /* 865 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET 866 * event etc. earlier indicating loop is down) then process 867 * it. Otherwise ignore it and Wait for RSCN to come in. 868 */ 869 atomic_set(&vha->loop_down_timer, 0); 870 if (mb[1] != 0xffff || (mb[2] != 0x6 && mb[2] != 0x4)) { 871 ql_dbg(ql_dbg_async, vha, 0x5011, 872 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n", 873 mb[1], mb[2], mb[3]); 874 875 qlt_async_event(mb[0], vha, mb); 876 break; 877 } 878 879 ql_dbg(ql_dbg_async, vha, 0x5012, 880 "Port database changed %04x %04x %04x.\n", 881 mb[1], mb[2], mb[3]); 882 ql_log(ql_log_warn, vha, 0x505f, 883 "Link is operational (%s Gbps).\n", 884 qla2x00_get_link_speed_str(ha, ha->link_data_rate)); 885 886 /* 887 * Mark all devices as missing so we will login again. 888 */ 889 atomic_set(&vha->loop_state, LOOP_UP); 890 891 qla2x00_mark_all_devices_lost(vha, 1); 892 893 if (vha->vp_idx == 0 && !qla_ini_mode_enabled(vha)) 894 set_bit(SCR_PENDING, &vha->dpc_flags); 895 896 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 897 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 898 899 qlt_async_event(mb[0], vha, mb); 900 break; 901 902 case MBA_RSCN_UPDATE: /* State Change Registration */ 903 /* Check if the Vport has issued a SCR */ 904 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags)) 905 break; 906 /* Only handle SCNs for our Vport index. */ 907 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff)) 908 break; 909 910 ql_dbg(ql_dbg_async, vha, 0x5013, 911 "RSCN database changed -- %04x %04x %04x.\n", 912 mb[1], mb[2], mb[3]); 913 914 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2]; 915 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8) 916 | vha->d_id.b.al_pa; 917 if (rscn_entry == host_pid) { 918 ql_dbg(ql_dbg_async, vha, 0x5014, 919 "Ignoring RSCN update to local host " 920 "port ID (%06x).\n", host_pid); 921 break; 922 } 923 924 /* Ignore reserved bits from RSCN-payload. */ 925 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2]; 926 927 /* Skip RSCNs for virtual ports on the same physical port */ 928 if (qla2x00_is_a_vp_did(vha, rscn_entry)) 929 break; 930 931 atomic_set(&vha->loop_down_timer, 0); 932 vha->flags.management_server_logged_in = 0; 933 934 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 935 set_bit(RSCN_UPDATE, &vha->dpc_flags); 936 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry); 937 break; 938 939 /* case MBA_RIO_RESPONSE: */ 940 case MBA_ZIO_RESPONSE: 941 ql_dbg(ql_dbg_async, vha, 0x5015, 942 "[R|Z]IO update completion.\n"); 943 944 if (IS_FWI2_CAPABLE(ha)) 945 qla24xx_process_response_queue(vha, rsp); 946 else 947 qla2x00_process_response_queue(rsp); 948 break; 949 950 case MBA_DISCARD_RND_FRAME: 951 ql_dbg(ql_dbg_async, vha, 0x5016, 952 "Discard RND Frame -- %04x %04x %04x.\n", 953 mb[1], mb[2], mb[3]); 954 break; 955 956 case MBA_TRACE_NOTIFICATION: 957 ql_dbg(ql_dbg_async, vha, 0x5017, 958 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]); 959 break; 960 961 case MBA_ISP84XX_ALERT: 962 ql_dbg(ql_dbg_async, vha, 0x5018, 963 "ISP84XX Alert Notification -- %04x %04x %04x.\n", 964 mb[1], mb[2], mb[3]); 965 966 spin_lock_irqsave(&ha->cs84xx->access_lock, flags); 967 switch (mb[1]) { 968 case A84_PANIC_RECOVERY: 969 ql_log(ql_log_info, vha, 0x5019, 970 "Alert 84XX: panic recovery %04x %04x.\n", 971 mb[2], mb[3]); 972 break; 973 case A84_OP_LOGIN_COMPLETE: 974 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2]; 975 ql_log(ql_log_info, vha, 0x501a, 976 "Alert 84XX: firmware version %x.\n", 977 ha->cs84xx->op_fw_version); 978 break; 979 case A84_DIAG_LOGIN_COMPLETE: 980 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 981 ql_log(ql_log_info, vha, 0x501b, 982 "Alert 84XX: diagnostic firmware version %x.\n", 983 ha->cs84xx->diag_fw_version); 984 break; 985 case A84_GOLD_LOGIN_COMPLETE: 986 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 987 ha->cs84xx->fw_update = 1; 988 ql_log(ql_log_info, vha, 0x501c, 989 "Alert 84XX: gold firmware version %x.\n", 990 ha->cs84xx->gold_fw_version); 991 break; 992 default: 993 ql_log(ql_log_warn, vha, 0x501d, 994 "Alert 84xx: Invalid Alert %04x %04x %04x.\n", 995 mb[1], mb[2], mb[3]); 996 } 997 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags); 998 break; 999 case MBA_DCBX_START: 1000 ql_dbg(ql_dbg_async, vha, 0x501e, 1001 "DCBX Started -- %04x %04x %04x.\n", 1002 mb[1], mb[2], mb[3]); 1003 break; 1004 case MBA_DCBX_PARAM_UPDATE: 1005 ql_dbg(ql_dbg_async, vha, 0x501f, 1006 "DCBX Parameters Updated -- %04x %04x %04x.\n", 1007 mb[1], mb[2], mb[3]); 1008 break; 1009 case MBA_FCF_CONF_ERR: 1010 ql_dbg(ql_dbg_async, vha, 0x5020, 1011 "FCF Configuration Error -- %04x %04x %04x.\n", 1012 mb[1], mb[2], mb[3]); 1013 break; 1014 case MBA_IDC_NOTIFY: 1015 if (IS_QLA8031(vha->hw)) { 1016 mb[4] = RD_REG_WORD(®24->mailbox4); 1017 if (((mb[2] & 0x7fff) == MBC_PORT_RESET || 1018 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) && 1019 (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) { 1020 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); 1021 /* 1022 * Extend loop down timer since port is active. 1023 */ 1024 if (atomic_read(&vha->loop_state) == LOOP_DOWN) 1025 atomic_set(&vha->loop_down_timer, 1026 LOOP_DOWN_TIME); 1027 qla2xxx_wake_dpc(vha); 1028 } 1029 } 1030 case MBA_IDC_COMPLETE: 1031 if (ha->notify_lb_portup_comp) 1032 complete(&ha->lb_portup_comp); 1033 /* Fallthru */ 1034 case MBA_IDC_TIME_EXT: 1035 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw)) 1036 qla81xx_idc_event(vha, mb[0], mb[1]); 1037 break; 1038 1039 case MBA_IDC_AEN: 1040 mb[4] = RD_REG_WORD(®24->mailbox4); 1041 mb[5] = RD_REG_WORD(®24->mailbox5); 1042 mb[6] = RD_REG_WORD(®24->mailbox6); 1043 mb[7] = RD_REG_WORD(®24->mailbox7); 1044 qla83xx_handle_8200_aen(vha, mb); 1045 break; 1046 1047 default: 1048 ql_dbg(ql_dbg_async, vha, 0x5057, 1049 "Unknown AEN:%04x %04x %04x %04x\n", 1050 mb[0], mb[1], mb[2], mb[3]); 1051 } 1052 1053 qlt_async_event(mb[0], vha, mb); 1054 1055 if (!vha->vp_idx && ha->num_vhosts) 1056 qla2x00_alert_all_vps(rsp, mb); 1057 } 1058 1059 /** 1060 * qla2x00_process_completed_request() - Process a Fast Post response. 1061 * @ha: SCSI driver HA context 1062 * @index: SRB index 1063 */ 1064 void 1065 qla2x00_process_completed_request(struct scsi_qla_host *vha, 1066 struct req_que *req, uint32_t index) 1067 { 1068 srb_t *sp; 1069 struct qla_hw_data *ha = vha->hw; 1070 1071 /* Validate handle. */ 1072 if (index >= req->num_outstanding_cmds) { 1073 ql_log(ql_log_warn, vha, 0x3014, 1074 "Invalid SCSI command index (%x).\n", index); 1075 1076 if (IS_QLA82XX(ha)) 1077 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1078 else 1079 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1080 return; 1081 } 1082 1083 sp = req->outstanding_cmds[index]; 1084 if (sp) { 1085 /* Free outstanding command slot. */ 1086 req->outstanding_cmds[index] = NULL; 1087 1088 /* Save ISP completion status */ 1089 sp->done(ha, sp, DID_OK << 16); 1090 } else { 1091 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n"); 1092 1093 if (IS_QLA82XX(ha)) 1094 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1095 else 1096 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1097 } 1098 } 1099 1100 srb_t * 1101 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, 1102 struct req_que *req, void *iocb) 1103 { 1104 struct qla_hw_data *ha = vha->hw; 1105 sts_entry_t *pkt = iocb; 1106 srb_t *sp = NULL; 1107 uint16_t index; 1108 1109 index = LSW(pkt->handle); 1110 if (index >= req->num_outstanding_cmds) { 1111 ql_log(ql_log_warn, vha, 0x5031, 1112 "Invalid command index (%x).\n", index); 1113 if (IS_QLA82XX(ha)) 1114 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1115 else 1116 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1117 goto done; 1118 } 1119 sp = req->outstanding_cmds[index]; 1120 if (!sp) { 1121 ql_log(ql_log_warn, vha, 0x5032, 1122 "Invalid completion handle (%x) -- timed-out.\n", index); 1123 return sp; 1124 } 1125 if (sp->handle != index) { 1126 ql_log(ql_log_warn, vha, 0x5033, 1127 "SRB handle (%x) mismatch %x.\n", sp->handle, index); 1128 return NULL; 1129 } 1130 1131 req->outstanding_cmds[index] = NULL; 1132 1133 done: 1134 return sp; 1135 } 1136 1137 static void 1138 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 1139 struct mbx_entry *mbx) 1140 { 1141 const char func[] = "MBX-IOCB"; 1142 const char *type; 1143 fc_port_t *fcport; 1144 srb_t *sp; 1145 struct srb_iocb *lio; 1146 uint16_t *data; 1147 uint16_t status; 1148 1149 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx); 1150 if (!sp) 1151 return; 1152 1153 lio = &sp->u.iocb_cmd; 1154 type = sp->name; 1155 fcport = sp->fcport; 1156 data = lio->u.logio.data; 1157 1158 data[0] = MBS_COMMAND_ERROR; 1159 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 1160 QLA_LOGIO_LOGIN_RETRIED : 0; 1161 if (mbx->entry_status) { 1162 ql_dbg(ql_dbg_async, vha, 0x5043, 1163 "Async-%s error entry - hdl=%x portid=%02x%02x%02x " 1164 "entry-status=%x status=%x state-flag=%x " 1165 "status-flags=%x.\n", type, sp->handle, 1166 fcport->d_id.b.domain, fcport->d_id.b.area, 1167 fcport->d_id.b.al_pa, mbx->entry_status, 1168 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags), 1169 le16_to_cpu(mbx->status_flags)); 1170 1171 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029, 1172 (uint8_t *)mbx, sizeof(*mbx)); 1173 1174 goto logio_done; 1175 } 1176 1177 status = le16_to_cpu(mbx->status); 1178 if (status == 0x30 && sp->type == SRB_LOGIN_CMD && 1179 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) 1180 status = 0; 1181 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) { 1182 ql_dbg(ql_dbg_async, vha, 0x5045, 1183 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n", 1184 type, sp->handle, fcport->d_id.b.domain, 1185 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1186 le16_to_cpu(mbx->mb1)); 1187 1188 data[0] = MBS_COMMAND_COMPLETE; 1189 if (sp->type == SRB_LOGIN_CMD) { 1190 fcport->port_type = FCT_TARGET; 1191 if (le16_to_cpu(mbx->mb1) & BIT_0) 1192 fcport->port_type = FCT_INITIATOR; 1193 else if (le16_to_cpu(mbx->mb1) & BIT_1) 1194 fcport->flags |= FCF_FCP2_DEVICE; 1195 } 1196 goto logio_done; 1197 } 1198 1199 data[0] = le16_to_cpu(mbx->mb0); 1200 switch (data[0]) { 1201 case MBS_PORT_ID_USED: 1202 data[1] = le16_to_cpu(mbx->mb1); 1203 break; 1204 case MBS_LOOP_ID_USED: 1205 break; 1206 default: 1207 data[0] = MBS_COMMAND_ERROR; 1208 break; 1209 } 1210 1211 ql_log(ql_log_warn, vha, 0x5046, 1212 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x " 1213 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle, 1214 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, 1215 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1), 1216 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6), 1217 le16_to_cpu(mbx->mb7)); 1218 1219 logio_done: 1220 sp->done(vha, sp, 0); 1221 } 1222 1223 static void 1224 qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req, 1225 sts_entry_t *pkt, int iocb_type) 1226 { 1227 const char func[] = "CT_IOCB"; 1228 const char *type; 1229 srb_t *sp; 1230 struct fc_bsg_job *bsg_job; 1231 uint16_t comp_status; 1232 int res; 1233 1234 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1235 if (!sp) 1236 return; 1237 1238 bsg_job = sp->u.bsg_job; 1239 1240 type = "ct pass-through"; 1241 1242 comp_status = le16_to_cpu(pkt->comp_status); 1243 1244 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT 1245 * fc payload to the caller 1246 */ 1247 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 1248 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1249 1250 if (comp_status != CS_COMPLETE) { 1251 if (comp_status == CS_DATA_UNDERRUN) { 1252 res = DID_OK << 16; 1253 bsg_job->reply->reply_payload_rcv_len = 1254 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len); 1255 1256 ql_log(ql_log_warn, vha, 0x5048, 1257 "CT pass-through-%s error " 1258 "comp_status-status=0x%x total_byte = 0x%x.\n", 1259 type, comp_status, 1260 bsg_job->reply->reply_payload_rcv_len); 1261 } else { 1262 ql_log(ql_log_warn, vha, 0x5049, 1263 "CT pass-through-%s error " 1264 "comp_status-status=0x%x.\n", type, comp_status); 1265 res = DID_ERROR << 16; 1266 bsg_job->reply->reply_payload_rcv_len = 0; 1267 } 1268 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035, 1269 (uint8_t *)pkt, sizeof(*pkt)); 1270 } else { 1271 res = DID_OK << 16; 1272 bsg_job->reply->reply_payload_rcv_len = 1273 bsg_job->reply_payload.payload_len; 1274 bsg_job->reply_len = 0; 1275 } 1276 1277 sp->done(vha, sp, res); 1278 } 1279 1280 static void 1281 qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, 1282 struct sts_entry_24xx *pkt, int iocb_type) 1283 { 1284 const char func[] = "ELS_CT_IOCB"; 1285 const char *type; 1286 srb_t *sp; 1287 struct fc_bsg_job *bsg_job; 1288 uint16_t comp_status; 1289 uint32_t fw_status[3]; 1290 uint8_t* fw_sts_ptr; 1291 int res; 1292 1293 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1294 if (!sp) 1295 return; 1296 bsg_job = sp->u.bsg_job; 1297 1298 type = NULL; 1299 switch (sp->type) { 1300 case SRB_ELS_CMD_RPT: 1301 case SRB_ELS_CMD_HST: 1302 type = "els"; 1303 break; 1304 case SRB_CT_CMD: 1305 type = "ct pass-through"; 1306 break; 1307 default: 1308 ql_dbg(ql_dbg_user, vha, 0x503e, 1309 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type); 1310 return; 1311 } 1312 1313 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status); 1314 fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1); 1315 fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2); 1316 1317 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT 1318 * fc payload to the caller 1319 */ 1320 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 1321 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status); 1322 1323 if (comp_status != CS_COMPLETE) { 1324 if (comp_status == CS_DATA_UNDERRUN) { 1325 res = DID_OK << 16; 1326 bsg_job->reply->reply_payload_rcv_len = 1327 le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count); 1328 1329 ql_dbg(ql_dbg_user, vha, 0x503f, 1330 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " 1331 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n", 1332 type, sp->handle, comp_status, fw_status[1], fw_status[2], 1333 le16_to_cpu(((struct els_sts_entry_24xx *) 1334 pkt)->total_byte_count)); 1335 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); 1336 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); 1337 } 1338 else { 1339 ql_dbg(ql_dbg_user, vha, 0x5040, 1340 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " 1341 "error subcode 1=0x%x error subcode 2=0x%x.\n", 1342 type, sp->handle, comp_status, 1343 le16_to_cpu(((struct els_sts_entry_24xx *) 1344 pkt)->error_subcode_1), 1345 le16_to_cpu(((struct els_sts_entry_24xx *) 1346 pkt)->error_subcode_2)); 1347 res = DID_ERROR << 16; 1348 bsg_job->reply->reply_payload_rcv_len = 0; 1349 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); 1350 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); 1351 } 1352 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056, 1353 (uint8_t *)pkt, sizeof(*pkt)); 1354 } 1355 else { 1356 res = DID_OK << 16; 1357 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; 1358 bsg_job->reply_len = 0; 1359 } 1360 1361 sp->done(vha, sp, res); 1362 } 1363 1364 static void 1365 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, 1366 struct logio_entry_24xx *logio) 1367 { 1368 const char func[] = "LOGIO-IOCB"; 1369 const char *type; 1370 fc_port_t *fcport; 1371 srb_t *sp; 1372 struct srb_iocb *lio; 1373 uint16_t *data; 1374 uint32_t iop[2]; 1375 1376 sp = qla2x00_get_sp_from_handle(vha, func, req, logio); 1377 if (!sp) 1378 return; 1379 1380 lio = &sp->u.iocb_cmd; 1381 type = sp->name; 1382 fcport = sp->fcport; 1383 data = lio->u.logio.data; 1384 1385 data[0] = MBS_COMMAND_ERROR; 1386 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 1387 QLA_LOGIO_LOGIN_RETRIED : 0; 1388 if (logio->entry_status) { 1389 ql_log(ql_log_warn, fcport->vha, 0x5034, 1390 "Async-%s error entry - hdl=%x" 1391 "portid=%02x%02x%02x entry-status=%x.\n", 1392 type, sp->handle, fcport->d_id.b.domain, 1393 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1394 logio->entry_status); 1395 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d, 1396 (uint8_t *)logio, sizeof(*logio)); 1397 1398 goto logio_done; 1399 } 1400 1401 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) { 1402 ql_dbg(ql_dbg_async, fcport->vha, 0x5036, 1403 "Async-%s complete - hdl=%x portid=%02x%02x%02x " 1404 "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain, 1405 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1406 le32_to_cpu(logio->io_parameter[0])); 1407 1408 data[0] = MBS_COMMAND_COMPLETE; 1409 if (sp->type != SRB_LOGIN_CMD) 1410 goto logio_done; 1411 1412 iop[0] = le32_to_cpu(logio->io_parameter[0]); 1413 if (iop[0] & BIT_4) { 1414 fcport->port_type = FCT_TARGET; 1415 if (iop[0] & BIT_8) 1416 fcport->flags |= FCF_FCP2_DEVICE; 1417 } else if (iop[0] & BIT_5) 1418 fcport->port_type = FCT_INITIATOR; 1419 1420 if (iop[0] & BIT_7) 1421 fcport->flags |= FCF_CONF_COMP_SUPPORTED; 1422 1423 if (logio->io_parameter[7] || logio->io_parameter[8]) 1424 fcport->supported_classes |= FC_COS_CLASS2; 1425 if (logio->io_parameter[9] || logio->io_parameter[10]) 1426 fcport->supported_classes |= FC_COS_CLASS3; 1427 1428 goto logio_done; 1429 } 1430 1431 iop[0] = le32_to_cpu(logio->io_parameter[0]); 1432 iop[1] = le32_to_cpu(logio->io_parameter[1]); 1433 switch (iop[0]) { 1434 case LSC_SCODE_PORTID_USED: 1435 data[0] = MBS_PORT_ID_USED; 1436 data[1] = LSW(iop[1]); 1437 break; 1438 case LSC_SCODE_NPORT_USED: 1439 data[0] = MBS_LOOP_ID_USED; 1440 break; 1441 default: 1442 data[0] = MBS_COMMAND_ERROR; 1443 break; 1444 } 1445 1446 ql_dbg(ql_dbg_async, fcport->vha, 0x5037, 1447 "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x " 1448 "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain, 1449 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1450 le16_to_cpu(logio->comp_status), 1451 le32_to_cpu(logio->io_parameter[0]), 1452 le32_to_cpu(logio->io_parameter[1])); 1453 1454 logio_done: 1455 sp->done(vha, sp, 0); 1456 } 1457 1458 static void 1459 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 1460 struct tsk_mgmt_entry *tsk) 1461 { 1462 const char func[] = "TMF-IOCB"; 1463 const char *type; 1464 fc_port_t *fcport; 1465 srb_t *sp; 1466 struct srb_iocb *iocb; 1467 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; 1468 int error = 1; 1469 1470 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk); 1471 if (!sp) 1472 return; 1473 1474 iocb = &sp->u.iocb_cmd; 1475 type = sp->name; 1476 fcport = sp->fcport; 1477 1478 if (sts->entry_status) { 1479 ql_log(ql_log_warn, fcport->vha, 0x5038, 1480 "Async-%s error - hdl=%x entry-status(%x).\n", 1481 type, sp->handle, sts->entry_status); 1482 } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 1483 ql_log(ql_log_warn, fcport->vha, 0x5039, 1484 "Async-%s error - hdl=%x completion status(%x).\n", 1485 type, sp->handle, sts->comp_status); 1486 } else if (!(le16_to_cpu(sts->scsi_status) & 1487 SS_RESPONSE_INFO_LEN_VALID)) { 1488 ql_log(ql_log_warn, fcport->vha, 0x503a, 1489 "Async-%s error - hdl=%x no response info(%x).\n", 1490 type, sp->handle, sts->scsi_status); 1491 } else if (le32_to_cpu(sts->rsp_data_len) < 4) { 1492 ql_log(ql_log_warn, fcport->vha, 0x503b, 1493 "Async-%s error - hdl=%x not enough response(%d).\n", 1494 type, sp->handle, sts->rsp_data_len); 1495 } else if (sts->data[3]) { 1496 ql_log(ql_log_warn, fcport->vha, 0x503c, 1497 "Async-%s error - hdl=%x response(%x).\n", 1498 type, sp->handle, sts->data[3]); 1499 } else { 1500 error = 0; 1501 } 1502 1503 if (error) { 1504 iocb->u.tmf.data = error; 1505 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055, 1506 (uint8_t *)sts, sizeof(*sts)); 1507 } 1508 1509 sp->done(vha, sp, 0); 1510 } 1511 1512 /** 1513 * qla2x00_process_response_queue() - Process response queue entries. 1514 * @ha: SCSI driver HA context 1515 */ 1516 void 1517 qla2x00_process_response_queue(struct rsp_que *rsp) 1518 { 1519 struct scsi_qla_host *vha; 1520 struct qla_hw_data *ha = rsp->hw; 1521 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1522 sts_entry_t *pkt; 1523 uint16_t handle_cnt; 1524 uint16_t cnt; 1525 1526 vha = pci_get_drvdata(ha->pdev); 1527 1528 if (!vha->flags.online) 1529 return; 1530 1531 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 1532 pkt = (sts_entry_t *)rsp->ring_ptr; 1533 1534 rsp->ring_index++; 1535 if (rsp->ring_index == rsp->length) { 1536 rsp->ring_index = 0; 1537 rsp->ring_ptr = rsp->ring; 1538 } else { 1539 rsp->ring_ptr++; 1540 } 1541 1542 if (pkt->entry_status != 0) { 1543 qla2x00_error_entry(vha, rsp, pkt); 1544 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1545 wmb(); 1546 continue; 1547 } 1548 1549 switch (pkt->entry_type) { 1550 case STATUS_TYPE: 1551 qla2x00_status_entry(vha, rsp, pkt); 1552 break; 1553 case STATUS_TYPE_21: 1554 handle_cnt = ((sts21_entry_t *)pkt)->handle_count; 1555 for (cnt = 0; cnt < handle_cnt; cnt++) { 1556 qla2x00_process_completed_request(vha, rsp->req, 1557 ((sts21_entry_t *)pkt)->handle[cnt]); 1558 } 1559 break; 1560 case STATUS_TYPE_22: 1561 handle_cnt = ((sts22_entry_t *)pkt)->handle_count; 1562 for (cnt = 0; cnt < handle_cnt; cnt++) { 1563 qla2x00_process_completed_request(vha, rsp->req, 1564 ((sts22_entry_t *)pkt)->handle[cnt]); 1565 } 1566 break; 1567 case STATUS_CONT_TYPE: 1568 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); 1569 break; 1570 case MBX_IOCB_TYPE: 1571 qla2x00_mbx_iocb_entry(vha, rsp->req, 1572 (struct mbx_entry *)pkt); 1573 break; 1574 case CT_IOCB_TYPE: 1575 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); 1576 break; 1577 default: 1578 /* Type Not Supported. */ 1579 ql_log(ql_log_warn, vha, 0x504a, 1580 "Received unknown response pkt type %x " 1581 "entry status=%x.\n", 1582 pkt->entry_type, pkt->entry_status); 1583 break; 1584 } 1585 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1586 wmb(); 1587 } 1588 1589 /* Adjust ring index */ 1590 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index); 1591 } 1592 1593 static inline void 1594 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, 1595 uint32_t sense_len, struct rsp_que *rsp, int res) 1596 { 1597 struct scsi_qla_host *vha = sp->fcport->vha; 1598 struct scsi_cmnd *cp = GET_CMD_SP(sp); 1599 uint32_t track_sense_len; 1600 1601 if (sense_len >= SCSI_SENSE_BUFFERSIZE) 1602 sense_len = SCSI_SENSE_BUFFERSIZE; 1603 1604 SET_CMD_SENSE_LEN(sp, sense_len); 1605 SET_CMD_SENSE_PTR(sp, cp->sense_buffer); 1606 track_sense_len = sense_len; 1607 1608 if (sense_len > par_sense_len) 1609 sense_len = par_sense_len; 1610 1611 memcpy(cp->sense_buffer, sense_data, sense_len); 1612 1613 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len); 1614 track_sense_len -= sense_len; 1615 SET_CMD_SENSE_LEN(sp, track_sense_len); 1616 1617 if (track_sense_len != 0) { 1618 rsp->status_srb = sp; 1619 cp->result = res; 1620 } 1621 1622 if (sense_len) { 1623 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c, 1624 "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n", 1625 sp->fcport->vha->host_no, cp->device->id, cp->device->lun, 1626 cp); 1627 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b, 1628 cp->sense_buffer, sense_len); 1629 } 1630 } 1631 1632 struct scsi_dif_tuple { 1633 __be16 guard; /* Checksum */ 1634 __be16 app_tag; /* APPL identifier */ 1635 __be32 ref_tag; /* Target LBA or indirect LBA */ 1636 }; 1637 1638 /* 1639 * Checks the guard or meta-data for the type of error 1640 * detected by the HBA. In case of errors, we set the 1641 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST 1642 * to indicate to the kernel that the HBA detected error. 1643 */ 1644 static inline int 1645 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) 1646 { 1647 struct scsi_qla_host *vha = sp->fcport->vha; 1648 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1649 uint8_t *ap = &sts24->data[12]; 1650 uint8_t *ep = &sts24->data[20]; 1651 uint32_t e_ref_tag, a_ref_tag; 1652 uint16_t e_app_tag, a_app_tag; 1653 uint16_t e_guard, a_guard; 1654 1655 /* 1656 * swab32 of the "data" field in the beginning of qla2x00_status_entry() 1657 * would make guard field appear at offset 2 1658 */ 1659 a_guard = le16_to_cpu(*(uint16_t *)(ap + 2)); 1660 a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0)); 1661 a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4)); 1662 e_guard = le16_to_cpu(*(uint16_t *)(ep + 2)); 1663 e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0)); 1664 e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4)); 1665 1666 ql_dbg(ql_dbg_io, vha, 0x3023, 1667 "iocb(s) %p Returned STATUS.\n", sts24); 1668 1669 ql_dbg(ql_dbg_io, vha, 0x3024, 1670 "DIF ERROR in cmd 0x%x lba 0x%llx act ref" 1671 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app" 1672 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n", 1673 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag, 1674 a_app_tag, e_app_tag, a_guard, e_guard); 1675 1676 /* 1677 * Ignore sector if: 1678 * For type 3: ref & app tag is all 'f's 1679 * For type 0,1,2: app tag is all 'f's 1680 */ 1681 if ((a_app_tag == 0xffff) && 1682 ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) || 1683 (a_ref_tag == 0xffffffff))) { 1684 uint32_t blocks_done, resid; 1685 sector_t lba_s = scsi_get_lba(cmd); 1686 1687 /* 2TB boundary case covered automatically with this */ 1688 blocks_done = e_ref_tag - (uint32_t)lba_s + 1; 1689 1690 resid = scsi_bufflen(cmd) - (blocks_done * 1691 cmd->device->sector_size); 1692 1693 scsi_set_resid(cmd, resid); 1694 cmd->result = DID_OK << 16; 1695 1696 /* Update protection tag */ 1697 if (scsi_prot_sg_count(cmd)) { 1698 uint32_t i, j = 0, k = 0, num_ent; 1699 struct scatterlist *sg; 1700 struct sd_dif_tuple *spt; 1701 1702 /* Patch the corresponding protection tags */ 1703 scsi_for_each_prot_sg(cmd, sg, 1704 scsi_prot_sg_count(cmd), i) { 1705 num_ent = sg_dma_len(sg) / 8; 1706 if (k + num_ent < blocks_done) { 1707 k += num_ent; 1708 continue; 1709 } 1710 j = blocks_done - k - 1; 1711 k = blocks_done; 1712 break; 1713 } 1714 1715 if (k != blocks_done) { 1716 ql_log(ql_log_warn, vha, 0x302f, 1717 "unexpected tag values tag:lba=%x:%llx)\n", 1718 e_ref_tag, (unsigned long long)lba_s); 1719 return 1; 1720 } 1721 1722 spt = page_address(sg_page(sg)) + sg->offset; 1723 spt += j; 1724 1725 spt->app_tag = 0xffff; 1726 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3) 1727 spt->ref_tag = 0xffffffff; 1728 } 1729 1730 return 0; 1731 } 1732 1733 /* check guard */ 1734 if (e_guard != a_guard) { 1735 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1736 0x10, 0x1); 1737 set_driver_byte(cmd, DRIVER_SENSE); 1738 set_host_byte(cmd, DID_ABORT); 1739 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1740 return 1; 1741 } 1742 1743 /* check ref tag */ 1744 if (e_ref_tag != a_ref_tag) { 1745 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1746 0x10, 0x3); 1747 set_driver_byte(cmd, DRIVER_SENSE); 1748 set_host_byte(cmd, DID_ABORT); 1749 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1750 return 1; 1751 } 1752 1753 /* check appl tag */ 1754 if (e_app_tag != a_app_tag) { 1755 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1756 0x10, 0x2); 1757 set_driver_byte(cmd, DRIVER_SENSE); 1758 set_host_byte(cmd, DID_ABORT); 1759 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1760 return 1; 1761 } 1762 1763 return 1; 1764 } 1765 1766 static void 1767 qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt, 1768 struct req_que *req, uint32_t index) 1769 { 1770 struct qla_hw_data *ha = vha->hw; 1771 srb_t *sp; 1772 uint16_t comp_status; 1773 uint16_t scsi_status; 1774 uint16_t thread_id; 1775 uint32_t rval = EXT_STATUS_OK; 1776 struct fc_bsg_job *bsg_job = NULL; 1777 sts_entry_t *sts; 1778 struct sts_entry_24xx *sts24; 1779 sts = (sts_entry_t *) pkt; 1780 sts24 = (struct sts_entry_24xx *) pkt; 1781 1782 /* Validate handle. */ 1783 if (index >= req->num_outstanding_cmds) { 1784 ql_log(ql_log_warn, vha, 0x70af, 1785 "Invalid SCSI completion handle 0x%x.\n", index); 1786 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1787 return; 1788 } 1789 1790 sp = req->outstanding_cmds[index]; 1791 if (sp) { 1792 /* Free outstanding command slot. */ 1793 req->outstanding_cmds[index] = NULL; 1794 bsg_job = sp->u.bsg_job; 1795 } else { 1796 ql_log(ql_log_warn, vha, 0x70b0, 1797 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n", 1798 req->id, index); 1799 1800 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1801 return; 1802 } 1803 1804 if (IS_FWI2_CAPABLE(ha)) { 1805 comp_status = le16_to_cpu(sts24->comp_status); 1806 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; 1807 } else { 1808 comp_status = le16_to_cpu(sts->comp_status); 1809 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 1810 } 1811 1812 thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 1813 switch (comp_status) { 1814 case CS_COMPLETE: 1815 if (scsi_status == 0) { 1816 bsg_job->reply->reply_payload_rcv_len = 1817 bsg_job->reply_payload.payload_len; 1818 rval = EXT_STATUS_OK; 1819 } 1820 goto done; 1821 1822 case CS_DATA_OVERRUN: 1823 ql_dbg(ql_dbg_user, vha, 0x70b1, 1824 "Command completed with date overrun thread_id=%d\n", 1825 thread_id); 1826 rval = EXT_STATUS_DATA_OVERRUN; 1827 break; 1828 1829 case CS_DATA_UNDERRUN: 1830 ql_dbg(ql_dbg_user, vha, 0x70b2, 1831 "Command completed with date underrun thread_id=%d\n", 1832 thread_id); 1833 rval = EXT_STATUS_DATA_UNDERRUN; 1834 break; 1835 case CS_BIDIR_RD_OVERRUN: 1836 ql_dbg(ql_dbg_user, vha, 0x70b3, 1837 "Command completed with read data overrun thread_id=%d\n", 1838 thread_id); 1839 rval = EXT_STATUS_DATA_OVERRUN; 1840 break; 1841 1842 case CS_BIDIR_RD_WR_OVERRUN: 1843 ql_dbg(ql_dbg_user, vha, 0x70b4, 1844 "Command completed with read and write data overrun " 1845 "thread_id=%d\n", thread_id); 1846 rval = EXT_STATUS_DATA_OVERRUN; 1847 break; 1848 1849 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN: 1850 ql_dbg(ql_dbg_user, vha, 0x70b5, 1851 "Command completed with read data over and write data " 1852 "underrun thread_id=%d\n", thread_id); 1853 rval = EXT_STATUS_DATA_OVERRUN; 1854 break; 1855 1856 case CS_BIDIR_RD_UNDERRUN: 1857 ql_dbg(ql_dbg_user, vha, 0x70b6, 1858 "Command completed with read data data underrun " 1859 "thread_id=%d\n", thread_id); 1860 rval = EXT_STATUS_DATA_UNDERRUN; 1861 break; 1862 1863 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN: 1864 ql_dbg(ql_dbg_user, vha, 0x70b7, 1865 "Command completed with read data under and write data " 1866 "overrun thread_id=%d\n", thread_id); 1867 rval = EXT_STATUS_DATA_UNDERRUN; 1868 break; 1869 1870 case CS_BIDIR_RD_WR_UNDERRUN: 1871 ql_dbg(ql_dbg_user, vha, 0x70b8, 1872 "Command completed with read and write data underrun " 1873 "thread_id=%d\n", thread_id); 1874 rval = EXT_STATUS_DATA_UNDERRUN; 1875 break; 1876 1877 case CS_BIDIR_DMA: 1878 ql_dbg(ql_dbg_user, vha, 0x70b9, 1879 "Command completed with data DMA error thread_id=%d\n", 1880 thread_id); 1881 rval = EXT_STATUS_DMA_ERR; 1882 break; 1883 1884 case CS_TIMEOUT: 1885 ql_dbg(ql_dbg_user, vha, 0x70ba, 1886 "Command completed with timeout thread_id=%d\n", 1887 thread_id); 1888 rval = EXT_STATUS_TIMEOUT; 1889 break; 1890 default: 1891 ql_dbg(ql_dbg_user, vha, 0x70bb, 1892 "Command completed with completion status=0x%x " 1893 "thread_id=%d\n", comp_status, thread_id); 1894 rval = EXT_STATUS_ERR; 1895 break; 1896 } 1897 bsg_job->reply->reply_payload_rcv_len = 0; 1898 1899 done: 1900 /* Return the vendor specific reply to API */ 1901 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval; 1902 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1903 /* Always return DID_OK, bsg will send the vendor specific response 1904 * in this case only */ 1905 sp->done(vha, sp, (DID_OK << 6)); 1906 1907 } 1908 1909 /** 1910 * qla2x00_status_entry() - Process a Status IOCB entry. 1911 * @ha: SCSI driver HA context 1912 * @pkt: Entry pointer 1913 */ 1914 static void 1915 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) 1916 { 1917 srb_t *sp; 1918 fc_port_t *fcport; 1919 struct scsi_cmnd *cp; 1920 sts_entry_t *sts; 1921 struct sts_entry_24xx *sts24; 1922 uint16_t comp_status; 1923 uint16_t scsi_status; 1924 uint16_t ox_id; 1925 uint8_t lscsi_status; 1926 int32_t resid; 1927 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len, 1928 fw_resid_len; 1929 uint8_t *rsp_info, *sense_data; 1930 struct qla_hw_data *ha = vha->hw; 1931 uint32_t handle; 1932 uint16_t que; 1933 struct req_que *req; 1934 int logit = 1; 1935 int res = 0; 1936 uint16_t state_flags = 0; 1937 1938 sts = (sts_entry_t *) pkt; 1939 sts24 = (struct sts_entry_24xx *) pkt; 1940 if (IS_FWI2_CAPABLE(ha)) { 1941 comp_status = le16_to_cpu(sts24->comp_status); 1942 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; 1943 state_flags = le16_to_cpu(sts24->state_flags); 1944 } else { 1945 comp_status = le16_to_cpu(sts->comp_status); 1946 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 1947 } 1948 handle = (uint32_t) LSW(sts->handle); 1949 que = MSW(sts->handle); 1950 req = ha->req_q_map[que]; 1951 1952 /* Validate handle. */ 1953 if (handle < req->num_outstanding_cmds) 1954 sp = req->outstanding_cmds[handle]; 1955 else 1956 sp = NULL; 1957 1958 if (sp == NULL) { 1959 ql_dbg(ql_dbg_io, vha, 0x3017, 1960 "Invalid status handle (0x%x).\n", sts->handle); 1961 1962 if (IS_QLA82XX(ha)) 1963 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1964 else 1965 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1966 qla2xxx_wake_dpc(vha); 1967 return; 1968 } 1969 1970 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) { 1971 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle); 1972 return; 1973 } 1974 1975 /* Fast path completion. */ 1976 if (comp_status == CS_COMPLETE && scsi_status == 0) { 1977 qla2x00_do_host_ramp_up(vha); 1978 qla2x00_process_completed_request(vha, req, handle); 1979 1980 return; 1981 } 1982 1983 req->outstanding_cmds[handle] = NULL; 1984 cp = GET_CMD_SP(sp); 1985 if (cp == NULL) { 1986 ql_dbg(ql_dbg_io, vha, 0x3018, 1987 "Command already returned (0x%x/%p).\n", 1988 sts->handle, sp); 1989 1990 return; 1991 } 1992 1993 lscsi_status = scsi_status & STATUS_MASK; 1994 1995 fcport = sp->fcport; 1996 1997 ox_id = 0; 1998 sense_len = par_sense_len = rsp_info_len = resid_len = 1999 fw_resid_len = 0; 2000 if (IS_FWI2_CAPABLE(ha)) { 2001 if (scsi_status & SS_SENSE_LEN_VALID) 2002 sense_len = le32_to_cpu(sts24->sense_len); 2003 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) 2004 rsp_info_len = le32_to_cpu(sts24->rsp_data_len); 2005 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) 2006 resid_len = le32_to_cpu(sts24->rsp_residual_count); 2007 if (comp_status == CS_DATA_UNDERRUN) 2008 fw_resid_len = le32_to_cpu(sts24->residual_len); 2009 rsp_info = sts24->data; 2010 sense_data = sts24->data; 2011 host_to_fcp_swap(sts24->data, sizeof(sts24->data)); 2012 ox_id = le16_to_cpu(sts24->ox_id); 2013 par_sense_len = sizeof(sts24->data); 2014 } else { 2015 if (scsi_status & SS_SENSE_LEN_VALID) 2016 sense_len = le16_to_cpu(sts->req_sense_length); 2017 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) 2018 rsp_info_len = le16_to_cpu(sts->rsp_info_len); 2019 resid_len = le32_to_cpu(sts->residual_length); 2020 rsp_info = sts->rsp_info; 2021 sense_data = sts->req_sense_data; 2022 par_sense_len = sizeof(sts->req_sense_data); 2023 } 2024 2025 /* Check for any FCP transport errors. */ 2026 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) { 2027 /* Sense data lies beyond any FCP RESPONSE data. */ 2028 if (IS_FWI2_CAPABLE(ha)) { 2029 sense_data += rsp_info_len; 2030 par_sense_len -= rsp_info_len; 2031 } 2032 if (rsp_info_len > 3 && rsp_info[3]) { 2033 ql_dbg(ql_dbg_io, fcport->vha, 0x3019, 2034 "FCP I/O protocol failure (0x%x/0x%x).\n", 2035 rsp_info_len, rsp_info[3]); 2036 2037 res = DID_BUS_BUSY << 16; 2038 goto out; 2039 } 2040 } 2041 2042 /* Check for overrun. */ 2043 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE && 2044 scsi_status & SS_RESIDUAL_OVER) 2045 comp_status = CS_DATA_OVERRUN; 2046 2047 /* 2048 * Based on Host and scsi status generate status code for Linux 2049 */ 2050 switch (comp_status) { 2051 case CS_COMPLETE: 2052 case CS_QUEUE_FULL: 2053 if (scsi_status == 0) { 2054 res = DID_OK << 16; 2055 break; 2056 } 2057 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) { 2058 resid = resid_len; 2059 scsi_set_resid(cp, resid); 2060 2061 if (!lscsi_status && 2062 ((unsigned)(scsi_bufflen(cp) - resid) < 2063 cp->underflow)) { 2064 ql_dbg(ql_dbg_io, fcport->vha, 0x301a, 2065 "Mid-layer underflow " 2066 "detected (0x%x of 0x%x bytes).\n", 2067 resid, scsi_bufflen(cp)); 2068 2069 res = DID_ERROR << 16; 2070 break; 2071 } 2072 } 2073 res = DID_OK << 16 | lscsi_status; 2074 2075 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 2076 ql_dbg(ql_dbg_io, fcport->vha, 0x301b, 2077 "QUEUE FULL detected.\n"); 2078 break; 2079 } 2080 logit = 0; 2081 if (lscsi_status != SS_CHECK_CONDITION) 2082 break; 2083 2084 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 2085 if (!(scsi_status & SS_SENSE_LEN_VALID)) 2086 break; 2087 2088 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len, 2089 rsp, res); 2090 break; 2091 2092 case CS_DATA_UNDERRUN: 2093 /* Use F/W calculated residual length. */ 2094 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len; 2095 scsi_set_resid(cp, resid); 2096 if (scsi_status & SS_RESIDUAL_UNDER) { 2097 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) { 2098 ql_dbg(ql_dbg_io, fcport->vha, 0x301d, 2099 "Dropped frame(s) detected " 2100 "(0x%x of 0x%x bytes).\n", 2101 resid, scsi_bufflen(cp)); 2102 2103 res = DID_ERROR << 16 | lscsi_status; 2104 goto check_scsi_status; 2105 } 2106 2107 if (!lscsi_status && 2108 ((unsigned)(scsi_bufflen(cp) - resid) < 2109 cp->underflow)) { 2110 ql_dbg(ql_dbg_io, fcport->vha, 0x301e, 2111 "Mid-layer underflow " 2112 "detected (0x%x of 0x%x bytes).\n", 2113 resid, scsi_bufflen(cp)); 2114 2115 res = DID_ERROR << 16; 2116 break; 2117 } 2118 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL && 2119 lscsi_status != SAM_STAT_BUSY) { 2120 /* 2121 * scsi status of task set and busy are considered to be 2122 * task not completed. 2123 */ 2124 2125 ql_dbg(ql_dbg_io, fcport->vha, 0x301f, 2126 "Dropped frame(s) detected (0x%x " 2127 "of 0x%x bytes).\n", resid, 2128 scsi_bufflen(cp)); 2129 2130 res = DID_ERROR << 16 | lscsi_status; 2131 goto check_scsi_status; 2132 } else { 2133 ql_dbg(ql_dbg_io, fcport->vha, 0x3030, 2134 "scsi_status: 0x%x, lscsi_status: 0x%x\n", 2135 scsi_status, lscsi_status); 2136 } 2137 2138 res = DID_OK << 16 | lscsi_status; 2139 logit = 0; 2140 2141 check_scsi_status: 2142 /* 2143 * Check to see if SCSI Status is non zero. If so report SCSI 2144 * Status. 2145 */ 2146 if (lscsi_status != 0) { 2147 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 2148 ql_dbg(ql_dbg_io, fcport->vha, 0x3020, 2149 "QUEUE FULL detected.\n"); 2150 logit = 1; 2151 break; 2152 } 2153 if (lscsi_status != SS_CHECK_CONDITION) 2154 break; 2155 2156 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 2157 if (!(scsi_status & SS_SENSE_LEN_VALID)) 2158 break; 2159 2160 qla2x00_handle_sense(sp, sense_data, par_sense_len, 2161 sense_len, rsp, res); 2162 } 2163 break; 2164 2165 case CS_PORT_LOGGED_OUT: 2166 case CS_PORT_CONFIG_CHG: 2167 case CS_PORT_BUSY: 2168 case CS_INCOMPLETE: 2169 case CS_PORT_UNAVAILABLE: 2170 case CS_TIMEOUT: 2171 case CS_RESET: 2172 2173 /* 2174 * We are going to have the fc class block the rport 2175 * while we try to recover so instruct the mid layer 2176 * to requeue until the class decides how to handle this. 2177 */ 2178 res = DID_TRANSPORT_DISRUPTED << 16; 2179 2180 if (comp_status == CS_TIMEOUT) { 2181 if (IS_FWI2_CAPABLE(ha)) 2182 break; 2183 else if ((le16_to_cpu(sts->status_flags) & 2184 SF_LOGOUT_SENT) == 0) 2185 break; 2186 } 2187 2188 ql_dbg(ql_dbg_io, fcport->vha, 0x3021, 2189 "Port down status: port-state=0x%x.\n", 2190 atomic_read(&fcport->state)); 2191 2192 if (atomic_read(&fcport->state) == FCS_ONLINE) 2193 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); 2194 break; 2195 2196 case CS_ABORTED: 2197 res = DID_RESET << 16; 2198 break; 2199 2200 case CS_DIF_ERROR: 2201 logit = qla2x00_handle_dif_error(sp, sts24); 2202 res = cp->result; 2203 break; 2204 2205 case CS_TRANSPORT: 2206 res = DID_ERROR << 16; 2207 2208 if (!IS_PI_SPLIT_DET_CAPABLE(ha)) 2209 break; 2210 2211 if (state_flags & BIT_4) 2212 scmd_printk(KERN_WARNING, cp, 2213 "Unsupported device '%s' found.\n", 2214 cp->device->vendor); 2215 break; 2216 2217 default: 2218 res = DID_ERROR << 16; 2219 break; 2220 } 2221 2222 out: 2223 if (logit) 2224 ql_dbg(ql_dbg_io, fcport->vha, 0x3022, 2225 "FCP command status: 0x%x-0x%x (0x%x) " 2226 "nexus=%ld:%d:%d portid=%02x%02x%02x oxid=0x%x " 2227 "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x " 2228 "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n", 2229 comp_status, scsi_status, res, vha->host_no, 2230 cp->device->id, cp->device->lun, fcport->d_id.b.domain, 2231 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id, 2232 cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3], 2233 cp->cmnd[4], cp->cmnd[5], cp->cmnd[6], cp->cmnd[7], 2234 cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp), rsp_info_len, 2235 resid_len, fw_resid_len); 2236 2237 if (!res) 2238 qla2x00_do_host_ramp_up(vha); 2239 2240 if (rsp->status_srb == NULL) 2241 sp->done(ha, sp, res); 2242 } 2243 2244 /** 2245 * qla2x00_status_cont_entry() - Process a Status Continuations entry. 2246 * @ha: SCSI driver HA context 2247 * @pkt: Entry pointer 2248 * 2249 * Extended sense data. 2250 */ 2251 static void 2252 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) 2253 { 2254 uint8_t sense_sz = 0; 2255 struct qla_hw_data *ha = rsp->hw; 2256 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); 2257 srb_t *sp = rsp->status_srb; 2258 struct scsi_cmnd *cp; 2259 uint32_t sense_len; 2260 uint8_t *sense_ptr; 2261 2262 if (!sp || !GET_CMD_SENSE_LEN(sp)) 2263 return; 2264 2265 sense_len = GET_CMD_SENSE_LEN(sp); 2266 sense_ptr = GET_CMD_SENSE_PTR(sp); 2267 2268 cp = GET_CMD_SP(sp); 2269 if (cp == NULL) { 2270 ql_log(ql_log_warn, vha, 0x3025, 2271 "cmd is NULL: already returned to OS (sp=%p).\n", sp); 2272 2273 rsp->status_srb = NULL; 2274 return; 2275 } 2276 2277 if (sense_len > sizeof(pkt->data)) 2278 sense_sz = sizeof(pkt->data); 2279 else 2280 sense_sz = sense_len; 2281 2282 /* Move sense data. */ 2283 if (IS_FWI2_CAPABLE(ha)) 2284 host_to_fcp_swap(pkt->data, sizeof(pkt->data)); 2285 memcpy(sense_ptr, pkt->data, sense_sz); 2286 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c, 2287 sense_ptr, sense_sz); 2288 2289 sense_len -= sense_sz; 2290 sense_ptr += sense_sz; 2291 2292 SET_CMD_SENSE_PTR(sp, sense_ptr); 2293 SET_CMD_SENSE_LEN(sp, sense_len); 2294 2295 /* Place command on done queue. */ 2296 if (sense_len == 0) { 2297 rsp->status_srb = NULL; 2298 sp->done(ha, sp, cp->result); 2299 } 2300 } 2301 2302 /** 2303 * qla2x00_error_entry() - Process an error entry. 2304 * @ha: SCSI driver HA context 2305 * @pkt: Entry pointer 2306 */ 2307 static void 2308 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) 2309 { 2310 srb_t *sp; 2311 struct qla_hw_data *ha = vha->hw; 2312 const char func[] = "ERROR-IOCB"; 2313 uint16_t que = MSW(pkt->handle); 2314 struct req_que *req = NULL; 2315 int res = DID_ERROR << 16; 2316 2317 ql_dbg(ql_dbg_async, vha, 0x502a, 2318 "type of error status in response: 0x%x\n", pkt->entry_status); 2319 2320 if (que >= ha->max_req_queues || !ha->req_q_map[que]) 2321 goto fatal; 2322 2323 req = ha->req_q_map[que]; 2324 2325 if (pkt->entry_status & RF_BUSY) 2326 res = DID_BUS_BUSY << 16; 2327 2328 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 2329 if (sp) { 2330 sp->done(ha, sp, res); 2331 return; 2332 } 2333 fatal: 2334 ql_log(ql_log_warn, vha, 0x5030, 2335 "Error entry - invalid handle/queue.\n"); 2336 2337 if (IS_QLA82XX(ha)) 2338 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 2339 else 2340 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2341 qla2xxx_wake_dpc(vha); 2342 } 2343 2344 /** 2345 * qla24xx_mbx_completion() - Process mailbox command completions. 2346 * @ha: SCSI driver HA context 2347 * @mb0: Mailbox0 register 2348 */ 2349 static void 2350 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 2351 { 2352 uint16_t cnt; 2353 uint32_t mboxes; 2354 uint16_t __iomem *wptr; 2355 struct qla_hw_data *ha = vha->hw; 2356 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 2357 2358 /* Read all mbox registers? */ 2359 mboxes = (1 << ha->mbx_count) - 1; 2360 if (!ha->mcp) 2361 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n"); 2362 else 2363 mboxes = ha->mcp->in_mb; 2364 2365 /* Load return mailbox registers. */ 2366 ha->flags.mbox_int = 1; 2367 ha->mailbox_out[0] = mb0; 2368 mboxes >>= 1; 2369 wptr = (uint16_t __iomem *)®->mailbox1; 2370 2371 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 2372 if (mboxes & BIT_0) 2373 ha->mailbox_out[cnt] = RD_REG_WORD(wptr); 2374 2375 mboxes >>= 1; 2376 wptr++; 2377 } 2378 } 2379 2380 /** 2381 * qla24xx_process_response_queue() - Process response queue entries. 2382 * @ha: SCSI driver HA context 2383 */ 2384 void qla24xx_process_response_queue(struct scsi_qla_host *vha, 2385 struct rsp_que *rsp) 2386 { 2387 struct sts_entry_24xx *pkt; 2388 struct qla_hw_data *ha = vha->hw; 2389 2390 if (!vha->flags.online) 2391 return; 2392 2393 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 2394 pkt = (struct sts_entry_24xx *)rsp->ring_ptr; 2395 2396 rsp->ring_index++; 2397 if (rsp->ring_index == rsp->length) { 2398 rsp->ring_index = 0; 2399 rsp->ring_ptr = rsp->ring; 2400 } else { 2401 rsp->ring_ptr++; 2402 } 2403 2404 if (pkt->entry_status != 0) { 2405 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt); 2406 2407 (void)qlt_24xx_process_response_error(vha, pkt); 2408 2409 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2410 wmb(); 2411 continue; 2412 } 2413 2414 switch (pkt->entry_type) { 2415 case STATUS_TYPE: 2416 qla2x00_status_entry(vha, rsp, pkt); 2417 break; 2418 case STATUS_CONT_TYPE: 2419 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); 2420 break; 2421 case VP_RPT_ID_IOCB_TYPE: 2422 qla24xx_report_id_acquisition(vha, 2423 (struct vp_rpt_id_entry_24xx *)pkt); 2424 break; 2425 case LOGINOUT_PORT_IOCB_TYPE: 2426 qla24xx_logio_entry(vha, rsp->req, 2427 (struct logio_entry_24xx *)pkt); 2428 break; 2429 case TSK_MGMT_IOCB_TYPE: 2430 qla24xx_tm_iocb_entry(vha, rsp->req, 2431 (struct tsk_mgmt_entry *)pkt); 2432 break; 2433 case CT_IOCB_TYPE: 2434 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); 2435 break; 2436 case ELS_IOCB_TYPE: 2437 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE); 2438 break; 2439 case ABTS_RECV_24XX: 2440 /* ensure that the ATIO queue is empty */ 2441 qlt_24xx_process_atio_queue(vha); 2442 case ABTS_RESP_24XX: 2443 case CTIO_TYPE7: 2444 case NOTIFY_ACK_TYPE: 2445 qlt_response_pkt_all_vps(vha, (response_t *)pkt); 2446 break; 2447 case MARKER_TYPE: 2448 /* Do nothing in this case, this check is to prevent it 2449 * from falling into default case 2450 */ 2451 break; 2452 default: 2453 /* Type Not Supported. */ 2454 ql_dbg(ql_dbg_async, vha, 0x5042, 2455 "Received unknown response pkt type %x " 2456 "entry status=%x.\n", 2457 pkt->entry_type, pkt->entry_status); 2458 break; 2459 } 2460 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2461 wmb(); 2462 } 2463 2464 /* Adjust ring index */ 2465 if (IS_QLA82XX(ha)) { 2466 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; 2467 WRT_REG_DWORD(®->rsp_q_out[0], rsp->ring_index); 2468 } else 2469 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index); 2470 } 2471 2472 static void 2473 qla2xxx_check_risc_status(scsi_qla_host_t *vha) 2474 { 2475 int rval; 2476 uint32_t cnt; 2477 struct qla_hw_data *ha = vha->hw; 2478 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 2479 2480 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha)) 2481 return; 2482 2483 rval = QLA_SUCCESS; 2484 WRT_REG_DWORD(®->iobase_addr, 0x7C00); 2485 RD_REG_DWORD(®->iobase_addr); 2486 WRT_REG_DWORD(®->iobase_window, 0x0001); 2487 for (cnt = 10000; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 && 2488 rval == QLA_SUCCESS; cnt--) { 2489 if (cnt) { 2490 WRT_REG_DWORD(®->iobase_window, 0x0001); 2491 udelay(10); 2492 } else 2493 rval = QLA_FUNCTION_TIMEOUT; 2494 } 2495 if (rval == QLA_SUCCESS) 2496 goto next_test; 2497 2498 WRT_REG_DWORD(®->iobase_window, 0x0003); 2499 for (cnt = 100; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 && 2500 rval == QLA_SUCCESS; cnt--) { 2501 if (cnt) { 2502 WRT_REG_DWORD(®->iobase_window, 0x0003); 2503 udelay(10); 2504 } else 2505 rval = QLA_FUNCTION_TIMEOUT; 2506 } 2507 if (rval != QLA_SUCCESS) 2508 goto done; 2509 2510 next_test: 2511 if (RD_REG_DWORD(®->iobase_c8) & BIT_3) 2512 ql_log(ql_log_info, vha, 0x504c, 2513 "Additional code -- 0x55AA.\n"); 2514 2515 done: 2516 WRT_REG_DWORD(®->iobase_window, 0x0000); 2517 RD_REG_DWORD(®->iobase_window); 2518 } 2519 2520 /** 2521 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx. 2522 * @irq: 2523 * @dev_id: SCSI driver HA context 2524 * 2525 * Called by system whenever the host adapter generates an interrupt. 2526 * 2527 * Returns handled flag. 2528 */ 2529 irqreturn_t 2530 qla24xx_intr_handler(int irq, void *dev_id) 2531 { 2532 scsi_qla_host_t *vha; 2533 struct qla_hw_data *ha; 2534 struct device_reg_24xx __iomem *reg; 2535 int status; 2536 unsigned long iter; 2537 uint32_t stat; 2538 uint32_t hccr; 2539 uint16_t mb[8]; 2540 struct rsp_que *rsp; 2541 unsigned long flags; 2542 2543 rsp = (struct rsp_que *) dev_id; 2544 if (!rsp) { 2545 ql_log(ql_log_info, NULL, 0x5059, 2546 "%s: NULL response queue pointer.\n", __func__); 2547 return IRQ_NONE; 2548 } 2549 2550 ha = rsp->hw; 2551 reg = &ha->iobase->isp24; 2552 status = 0; 2553 2554 if (unlikely(pci_channel_offline(ha->pdev))) 2555 return IRQ_HANDLED; 2556 2557 spin_lock_irqsave(&ha->hardware_lock, flags); 2558 vha = pci_get_drvdata(ha->pdev); 2559 for (iter = 50; iter--; ) { 2560 stat = RD_REG_DWORD(®->host_status); 2561 if (stat & HSRX_RISC_PAUSED) { 2562 if (unlikely(pci_channel_offline(ha->pdev))) 2563 break; 2564 2565 hccr = RD_REG_DWORD(®->hccr); 2566 2567 ql_log(ql_log_warn, vha, 0x504b, 2568 "RISC paused -- HCCR=%x, Dumping firmware.\n", 2569 hccr); 2570 2571 qla2xxx_check_risc_status(vha); 2572 2573 ha->isp_ops->fw_dump(vha, 1); 2574 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2575 break; 2576 } else if ((stat & HSRX_RISC_INT) == 0) 2577 break; 2578 2579 switch (stat & 0xff) { 2580 case INTR_ROM_MB_SUCCESS: 2581 case INTR_ROM_MB_FAILED: 2582 case INTR_MB_SUCCESS: 2583 case INTR_MB_FAILED: 2584 qla24xx_mbx_completion(vha, MSW(stat)); 2585 status |= MBX_INTERRUPT; 2586 2587 break; 2588 case INTR_ASYNC_EVENT: 2589 mb[0] = MSW(stat); 2590 mb[1] = RD_REG_WORD(®->mailbox1); 2591 mb[2] = RD_REG_WORD(®->mailbox2); 2592 mb[3] = RD_REG_WORD(®->mailbox3); 2593 qla2x00_async_event(vha, rsp, mb); 2594 break; 2595 case INTR_RSP_QUE_UPDATE: 2596 case INTR_RSP_QUE_UPDATE_83XX: 2597 qla24xx_process_response_queue(vha, rsp); 2598 break; 2599 case INTR_ATIO_QUE_UPDATE: 2600 qlt_24xx_process_atio_queue(vha); 2601 break; 2602 case INTR_ATIO_RSP_QUE_UPDATE: 2603 qlt_24xx_process_atio_queue(vha); 2604 qla24xx_process_response_queue(vha, rsp); 2605 break; 2606 default: 2607 ql_dbg(ql_dbg_async, vha, 0x504f, 2608 "Unrecognized interrupt type (%d).\n", stat * 0xff); 2609 break; 2610 } 2611 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 2612 RD_REG_DWORD_RELAXED(®->hccr); 2613 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1))) 2614 ndelay(3500); 2615 } 2616 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2617 2618 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 2619 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 2620 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 2621 complete(&ha->mbx_intr_comp); 2622 } 2623 2624 return IRQ_HANDLED; 2625 } 2626 2627 static irqreturn_t 2628 qla24xx_msix_rsp_q(int irq, void *dev_id) 2629 { 2630 struct qla_hw_data *ha; 2631 struct rsp_que *rsp; 2632 struct device_reg_24xx __iomem *reg; 2633 struct scsi_qla_host *vha; 2634 unsigned long flags; 2635 2636 rsp = (struct rsp_que *) dev_id; 2637 if (!rsp) { 2638 ql_log(ql_log_info, NULL, 0x505a, 2639 "%s: NULL response queue pointer.\n", __func__); 2640 return IRQ_NONE; 2641 } 2642 ha = rsp->hw; 2643 reg = &ha->iobase->isp24; 2644 2645 spin_lock_irqsave(&ha->hardware_lock, flags); 2646 2647 vha = pci_get_drvdata(ha->pdev); 2648 qla24xx_process_response_queue(vha, rsp); 2649 if (!ha->flags.disable_msix_handshake) { 2650 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 2651 RD_REG_DWORD_RELAXED(®->hccr); 2652 } 2653 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2654 2655 return IRQ_HANDLED; 2656 } 2657 2658 static irqreturn_t 2659 qla25xx_msix_rsp_q(int irq, void *dev_id) 2660 { 2661 struct qla_hw_data *ha; 2662 struct rsp_que *rsp; 2663 struct device_reg_24xx __iomem *reg; 2664 unsigned long flags; 2665 2666 rsp = (struct rsp_que *) dev_id; 2667 if (!rsp) { 2668 ql_log(ql_log_info, NULL, 0x505b, 2669 "%s: NULL response queue pointer.\n", __func__); 2670 return IRQ_NONE; 2671 } 2672 ha = rsp->hw; 2673 2674 /* Clear the interrupt, if enabled, for this response queue */ 2675 if (!ha->flags.disable_msix_handshake) { 2676 reg = &ha->iobase->isp24; 2677 spin_lock_irqsave(&ha->hardware_lock, flags); 2678 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 2679 RD_REG_DWORD_RELAXED(®->hccr); 2680 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2681 } 2682 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work); 2683 2684 return IRQ_HANDLED; 2685 } 2686 2687 static irqreturn_t 2688 qla24xx_msix_default(int irq, void *dev_id) 2689 { 2690 scsi_qla_host_t *vha; 2691 struct qla_hw_data *ha; 2692 struct rsp_que *rsp; 2693 struct device_reg_24xx __iomem *reg; 2694 int status; 2695 uint32_t stat; 2696 uint32_t hccr; 2697 uint16_t mb[8]; 2698 unsigned long flags; 2699 2700 rsp = (struct rsp_que *) dev_id; 2701 if (!rsp) { 2702 ql_log(ql_log_info, NULL, 0x505c, 2703 "%s: NULL response queue pointer.\n", __func__); 2704 return IRQ_NONE; 2705 } 2706 ha = rsp->hw; 2707 reg = &ha->iobase->isp24; 2708 status = 0; 2709 2710 spin_lock_irqsave(&ha->hardware_lock, flags); 2711 vha = pci_get_drvdata(ha->pdev); 2712 do { 2713 stat = RD_REG_DWORD(®->host_status); 2714 if (stat & HSRX_RISC_PAUSED) { 2715 if (unlikely(pci_channel_offline(ha->pdev))) 2716 break; 2717 2718 hccr = RD_REG_DWORD(®->hccr); 2719 2720 ql_log(ql_log_info, vha, 0x5050, 2721 "RISC paused -- HCCR=%x, Dumping firmware.\n", 2722 hccr); 2723 2724 qla2xxx_check_risc_status(vha); 2725 2726 ha->isp_ops->fw_dump(vha, 1); 2727 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2728 break; 2729 } else if ((stat & HSRX_RISC_INT) == 0) 2730 break; 2731 2732 switch (stat & 0xff) { 2733 case INTR_ROM_MB_SUCCESS: 2734 case INTR_ROM_MB_FAILED: 2735 case INTR_MB_SUCCESS: 2736 case INTR_MB_FAILED: 2737 qla24xx_mbx_completion(vha, MSW(stat)); 2738 status |= MBX_INTERRUPT; 2739 2740 break; 2741 case INTR_ASYNC_EVENT: 2742 mb[0] = MSW(stat); 2743 mb[1] = RD_REG_WORD(®->mailbox1); 2744 mb[2] = RD_REG_WORD(®->mailbox2); 2745 mb[3] = RD_REG_WORD(®->mailbox3); 2746 qla2x00_async_event(vha, rsp, mb); 2747 break; 2748 case INTR_RSP_QUE_UPDATE: 2749 case INTR_RSP_QUE_UPDATE_83XX: 2750 qla24xx_process_response_queue(vha, rsp); 2751 break; 2752 case INTR_ATIO_QUE_UPDATE: 2753 qlt_24xx_process_atio_queue(vha); 2754 break; 2755 case INTR_ATIO_RSP_QUE_UPDATE: 2756 qlt_24xx_process_atio_queue(vha); 2757 qla24xx_process_response_queue(vha, rsp); 2758 break; 2759 default: 2760 ql_dbg(ql_dbg_async, vha, 0x5051, 2761 "Unrecognized interrupt type (%d).\n", stat & 0xff); 2762 break; 2763 } 2764 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 2765 } while (0); 2766 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2767 2768 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 2769 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 2770 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 2771 complete(&ha->mbx_intr_comp); 2772 } 2773 return IRQ_HANDLED; 2774 } 2775 2776 /* Interrupt handling helpers. */ 2777 2778 struct qla_init_msix_entry { 2779 const char *name; 2780 irq_handler_t handler; 2781 }; 2782 2783 static struct qla_init_msix_entry msix_entries[3] = { 2784 { "qla2xxx (default)", qla24xx_msix_default }, 2785 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q }, 2786 { "qla2xxx (multiq)", qla25xx_msix_rsp_q }, 2787 }; 2788 2789 static struct qla_init_msix_entry qla82xx_msix_entries[2] = { 2790 { "qla2xxx (default)", qla82xx_msix_default }, 2791 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q }, 2792 }; 2793 2794 static struct qla_init_msix_entry qla83xx_msix_entries[3] = { 2795 { "qla2xxx (default)", qla24xx_msix_default }, 2796 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q }, 2797 { "qla2xxx (atio_q)", qla83xx_msix_atio_q }, 2798 }; 2799 2800 static void 2801 qla24xx_disable_msix(struct qla_hw_data *ha) 2802 { 2803 int i; 2804 struct qla_msix_entry *qentry; 2805 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 2806 2807 for (i = 0; i < ha->msix_count; i++) { 2808 qentry = &ha->msix_entries[i]; 2809 if (qentry->have_irq) 2810 free_irq(qentry->vector, qentry->rsp); 2811 } 2812 pci_disable_msix(ha->pdev); 2813 kfree(ha->msix_entries); 2814 ha->msix_entries = NULL; 2815 ha->flags.msix_enabled = 0; 2816 ql_dbg(ql_dbg_init, vha, 0x0042, 2817 "Disabled the MSI.\n"); 2818 } 2819 2820 static int 2821 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) 2822 { 2823 #define MIN_MSIX_COUNT 2 2824 int i, ret; 2825 struct msix_entry *entries; 2826 struct qla_msix_entry *qentry; 2827 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 2828 2829 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count, 2830 GFP_KERNEL); 2831 if (!entries) { 2832 ql_log(ql_log_warn, vha, 0x00bc, 2833 "Failed to allocate memory for msix_entry.\n"); 2834 return -ENOMEM; 2835 } 2836 2837 for (i = 0; i < ha->msix_count; i++) 2838 entries[i].entry = i; 2839 2840 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count); 2841 if (ret) { 2842 if (ret < MIN_MSIX_COUNT) 2843 goto msix_failed; 2844 2845 ql_log(ql_log_warn, vha, 0x00c6, 2846 "MSI-X: Failed to enable support " 2847 "-- %d/%d\n Retry with %d vectors.\n", 2848 ha->msix_count, ret, ret); 2849 ha->msix_count = ret; 2850 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count); 2851 if (ret) { 2852 msix_failed: 2853 ql_log(ql_log_fatal, vha, 0x00c7, 2854 "MSI-X: Failed to enable support, " 2855 "giving up -- %d/%d.\n", 2856 ha->msix_count, ret); 2857 goto msix_out; 2858 } 2859 ha->max_rsp_queues = ha->msix_count - 1; 2860 } 2861 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) * 2862 ha->msix_count, GFP_KERNEL); 2863 if (!ha->msix_entries) { 2864 ql_log(ql_log_fatal, vha, 0x00c8, 2865 "Failed to allocate memory for ha->msix_entries.\n"); 2866 ret = -ENOMEM; 2867 goto msix_out; 2868 } 2869 ha->flags.msix_enabled = 1; 2870 2871 for (i = 0; i < ha->msix_count; i++) { 2872 qentry = &ha->msix_entries[i]; 2873 qentry->vector = entries[i].vector; 2874 qentry->entry = entries[i].entry; 2875 qentry->have_irq = 0; 2876 qentry->rsp = NULL; 2877 } 2878 2879 /* Enable MSI-X vectors for the base queue */ 2880 for (i = 0; i < ha->msix_count; i++) { 2881 qentry = &ha->msix_entries[i]; 2882 if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) { 2883 ret = request_irq(qentry->vector, 2884 qla83xx_msix_entries[i].handler, 2885 0, qla83xx_msix_entries[i].name, rsp); 2886 } else if (IS_QLA82XX(ha)) { 2887 ret = request_irq(qentry->vector, 2888 qla82xx_msix_entries[i].handler, 2889 0, qla82xx_msix_entries[i].name, rsp); 2890 } else { 2891 ret = request_irq(qentry->vector, 2892 msix_entries[i].handler, 2893 0, msix_entries[i].name, rsp); 2894 } 2895 if (ret) { 2896 ql_log(ql_log_fatal, vha, 0x00cb, 2897 "MSI-X: unable to register handler -- %x/%d.\n", 2898 qentry->vector, ret); 2899 qla24xx_disable_msix(ha); 2900 ha->mqenable = 0; 2901 goto msix_out; 2902 } 2903 qentry->have_irq = 1; 2904 qentry->rsp = rsp; 2905 rsp->msix = qentry; 2906 } 2907 2908 /* Enable MSI-X vector for response queue update for queue 0 */ 2909 if (IS_QLA83XX(ha)) { 2910 if (ha->msixbase && ha->mqiobase && 2911 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1)) 2912 ha->mqenable = 1; 2913 } else 2914 if (ha->mqiobase 2915 && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1)) 2916 ha->mqenable = 1; 2917 ql_dbg(ql_dbg_multiq, vha, 0xc005, 2918 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", 2919 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); 2920 ql_dbg(ql_dbg_init, vha, 0x0055, 2921 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", 2922 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); 2923 2924 msix_out: 2925 kfree(entries); 2926 return ret; 2927 } 2928 2929 int 2930 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) 2931 { 2932 int ret; 2933 device_reg_t __iomem *reg = ha->iobase; 2934 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 2935 2936 /* If possible, enable MSI-X. */ 2937 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && 2938 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha)) 2939 goto skip_msi; 2940 2941 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && 2942 (ha->pdev->subsystem_device == 0x7040 || 2943 ha->pdev->subsystem_device == 0x7041 || 2944 ha->pdev->subsystem_device == 0x1705)) { 2945 ql_log(ql_log_warn, vha, 0x0034, 2946 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n", 2947 ha->pdev->subsystem_vendor, 2948 ha->pdev->subsystem_device); 2949 goto skip_msi; 2950 } 2951 2952 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) { 2953 ql_log(ql_log_warn, vha, 0x0035, 2954 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n", 2955 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX); 2956 goto skip_msix; 2957 } 2958 2959 ret = qla24xx_enable_msix(ha, rsp); 2960 if (!ret) { 2961 ql_dbg(ql_dbg_init, vha, 0x0036, 2962 "MSI-X: Enabled (0x%X, 0x%X).\n", 2963 ha->chip_revision, ha->fw_attributes); 2964 goto clear_risc_ints; 2965 } 2966 ql_log(ql_log_info, vha, 0x0037, 2967 "MSI-X Falling back-to MSI mode -%d.\n", ret); 2968 skip_msix: 2969 2970 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && 2971 !IS_QLA8001(ha) && !IS_QLA82XX(ha) && !IS_QLAFX00(ha)) 2972 goto skip_msi; 2973 2974 ret = pci_enable_msi(ha->pdev); 2975 if (!ret) { 2976 ql_dbg(ql_dbg_init, vha, 0x0038, 2977 "MSI: Enabled.\n"); 2978 ha->flags.msi_enabled = 1; 2979 } else 2980 ql_log(ql_log_warn, vha, 0x0039, 2981 "MSI-X; Falling back-to INTa mode -- %d.\n", ret); 2982 2983 /* Skip INTx on ISP82xx. */ 2984 if (!ha->flags.msi_enabled && IS_QLA82XX(ha)) 2985 return QLA_FUNCTION_FAILED; 2986 2987 skip_msi: 2988 2989 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, 2990 ha->flags.msi_enabled ? 0 : IRQF_SHARED, 2991 QLA2XXX_DRIVER_NAME, rsp); 2992 if (ret) { 2993 ql_log(ql_log_warn, vha, 0x003a, 2994 "Failed to reserve interrupt %d already in use.\n", 2995 ha->pdev->irq); 2996 goto fail; 2997 } else if (!ha->flags.msi_enabled) { 2998 ql_dbg(ql_dbg_init, vha, 0x0125, 2999 "INTa mode: Enabled.\n"); 3000 ha->flags.mr_intr_valid = 1; 3001 } 3002 3003 clear_risc_ints: 3004 3005 spin_lock_irq(&ha->hardware_lock); 3006 if (!IS_FWI2_CAPABLE(ha)) 3007 WRT_REG_WORD(®->isp.semaphore, 0); 3008 spin_unlock_irq(&ha->hardware_lock); 3009 3010 fail: 3011 return ret; 3012 } 3013 3014 void 3015 qla2x00_free_irqs(scsi_qla_host_t *vha) 3016 { 3017 struct qla_hw_data *ha = vha->hw; 3018 struct rsp_que *rsp; 3019 3020 /* 3021 * We need to check that ha->rsp_q_map is valid in case we are called 3022 * from a probe failure context. 3023 */ 3024 if (!ha->rsp_q_map || !ha->rsp_q_map[0]) 3025 return; 3026 rsp = ha->rsp_q_map[0]; 3027 3028 if (ha->flags.msix_enabled) 3029 qla24xx_disable_msix(ha); 3030 else if (ha->flags.msi_enabled) { 3031 free_irq(ha->pdev->irq, rsp); 3032 pci_disable_msi(ha->pdev); 3033 } else 3034 free_irq(ha->pdev->irq, rsp); 3035 } 3036 3037 3038 int qla25xx_request_irq(struct rsp_que *rsp) 3039 { 3040 struct qla_hw_data *ha = rsp->hw; 3041 struct qla_init_msix_entry *intr = &msix_entries[2]; 3042 struct qla_msix_entry *msix = rsp->msix; 3043 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 3044 int ret; 3045 3046 ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp); 3047 if (ret) { 3048 ql_log(ql_log_fatal, vha, 0x00e6, 3049 "MSI-X: Unable to register handler -- %x/%d.\n", 3050 msix->vector, ret); 3051 return ret; 3052 } 3053 msix->have_irq = 1; 3054 msix->rsp = rsp; 3055 return ret; 3056 } 3057