1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2014 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 #include "qla_target.h" 9 10 #include <linux/delay.h> 11 #include <linux/slab.h> 12 #include <scsi/scsi_tcq.h> 13 #include <scsi/scsi_bsg_fc.h> 14 #include <scsi/scsi_eh.h> 15 16 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); 17 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *); 18 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *); 19 static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, 20 sts_entry_t *); 21 22 /** 23 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. 24 * @irq: 25 * @dev_id: SCSI driver HA context 26 * 27 * Called by system whenever the host adapter generates an interrupt. 28 * 29 * Returns handled flag. 30 */ 31 irqreturn_t 32 qla2100_intr_handler(int irq, void *dev_id) 33 { 34 scsi_qla_host_t *vha; 35 struct qla_hw_data *ha; 36 struct device_reg_2xxx __iomem *reg; 37 int status; 38 unsigned long iter; 39 uint16_t hccr; 40 uint16_t mb[4]; 41 struct rsp_que *rsp; 42 unsigned long flags; 43 44 rsp = (struct rsp_que *) dev_id; 45 if (!rsp) { 46 ql_log(ql_log_info, NULL, 0x505d, 47 "%s: NULL response queue pointer.\n", __func__); 48 return (IRQ_NONE); 49 } 50 51 ha = rsp->hw; 52 reg = &ha->iobase->isp; 53 status = 0; 54 55 spin_lock_irqsave(&ha->hardware_lock, flags); 56 vha = pci_get_drvdata(ha->pdev); 57 for (iter = 50; iter--; ) { 58 hccr = RD_REG_WORD(®->hccr); 59 /* Check for PCI disconnection */ 60 if (hccr == 0xffff) { 61 /* 62 * Schedule this on the default system workqueue so that 63 * all the adapter workqueues and the DPC thread can be 64 * shutdown cleanly. 65 */ 66 schedule_work(&ha->board_disable); 67 break; 68 } 69 if (hccr & HCCR_RISC_PAUSE) { 70 if (pci_channel_offline(ha->pdev)) 71 break; 72 73 /* 74 * Issue a "HARD" reset in order for the RISC interrupt 75 * bit to be cleared. Schedule a big hammer to get 76 * out of the RISC PAUSED state. 77 */ 78 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 79 RD_REG_WORD(®->hccr); 80 81 ha->isp_ops->fw_dump(vha, 1); 82 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 83 break; 84 } else if ((RD_REG_WORD(®->istatus) & ISR_RISC_INT) == 0) 85 break; 86 87 if (RD_REG_WORD(®->semaphore) & BIT_0) { 88 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 89 RD_REG_WORD(®->hccr); 90 91 /* Get mailbox data. */ 92 mb[0] = RD_MAILBOX_REG(ha, reg, 0); 93 if (mb[0] > 0x3fff && mb[0] < 0x8000) { 94 qla2x00_mbx_completion(vha, mb[0]); 95 status |= MBX_INTERRUPT; 96 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) { 97 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 98 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 99 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 100 qla2x00_async_event(vha, rsp, mb); 101 } else { 102 /*EMPTY*/ 103 ql_dbg(ql_dbg_async, vha, 0x5025, 104 "Unrecognized interrupt type (%d).\n", 105 mb[0]); 106 } 107 /* Release mailbox registers. */ 108 WRT_REG_WORD(®->semaphore, 0); 109 RD_REG_WORD(®->semaphore); 110 } else { 111 qla2x00_process_response_queue(rsp); 112 113 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 114 RD_REG_WORD(®->hccr); 115 } 116 } 117 qla2x00_handle_mbx_completion(ha, status); 118 spin_unlock_irqrestore(&ha->hardware_lock, flags); 119 120 return (IRQ_HANDLED); 121 } 122 123 bool 124 qla2x00_check_reg_for_disconnect(scsi_qla_host_t *vha, uint32_t reg) 125 { 126 /* Check for PCI disconnection */ 127 if (reg == 0xffffffff) { 128 /* 129 * Schedule this on the default system workqueue so that all the 130 * adapter workqueues and the DPC thread can be shutdown 131 * cleanly. 132 */ 133 schedule_work(&vha->hw->board_disable); 134 return true; 135 } else 136 return false; 137 } 138 139 /** 140 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. 141 * @irq: 142 * @dev_id: SCSI driver HA context 143 * 144 * Called by system whenever the host adapter generates an interrupt. 145 * 146 * Returns handled flag. 147 */ 148 irqreturn_t 149 qla2300_intr_handler(int irq, void *dev_id) 150 { 151 scsi_qla_host_t *vha; 152 struct device_reg_2xxx __iomem *reg; 153 int status; 154 unsigned long iter; 155 uint32_t stat; 156 uint16_t hccr; 157 uint16_t mb[4]; 158 struct rsp_que *rsp; 159 struct qla_hw_data *ha; 160 unsigned long flags; 161 162 rsp = (struct rsp_que *) dev_id; 163 if (!rsp) { 164 ql_log(ql_log_info, NULL, 0x5058, 165 "%s: NULL response queue pointer.\n", __func__); 166 return (IRQ_NONE); 167 } 168 169 ha = rsp->hw; 170 reg = &ha->iobase->isp; 171 status = 0; 172 173 spin_lock_irqsave(&ha->hardware_lock, flags); 174 vha = pci_get_drvdata(ha->pdev); 175 for (iter = 50; iter--; ) { 176 stat = RD_REG_DWORD(®->u.isp2300.host_status); 177 if (qla2x00_check_reg_for_disconnect(vha, stat)) 178 break; 179 if (stat & HSR_RISC_PAUSED) { 180 if (unlikely(pci_channel_offline(ha->pdev))) 181 break; 182 183 hccr = RD_REG_WORD(®->hccr); 184 185 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8)) 186 ql_log(ql_log_warn, vha, 0x5026, 187 "Parity error -- HCCR=%x, Dumping " 188 "firmware.\n", hccr); 189 else 190 ql_log(ql_log_warn, vha, 0x5027, 191 "RISC paused -- HCCR=%x, Dumping " 192 "firmware.\n", hccr); 193 194 /* 195 * Issue a "HARD" reset in order for the RISC 196 * interrupt bit to be cleared. Schedule a big 197 * hammer to get out of the RISC PAUSED state. 198 */ 199 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 200 RD_REG_WORD(®->hccr); 201 202 ha->isp_ops->fw_dump(vha, 1); 203 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 204 break; 205 } else if ((stat & HSR_RISC_INT) == 0) 206 break; 207 208 switch (stat & 0xff) { 209 case 0x1: 210 case 0x2: 211 case 0x10: 212 case 0x11: 213 qla2x00_mbx_completion(vha, MSW(stat)); 214 status |= MBX_INTERRUPT; 215 216 /* Release mailbox registers. */ 217 WRT_REG_WORD(®->semaphore, 0); 218 break; 219 case 0x12: 220 mb[0] = MSW(stat); 221 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 222 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 223 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 224 qla2x00_async_event(vha, rsp, mb); 225 break; 226 case 0x13: 227 qla2x00_process_response_queue(rsp); 228 break; 229 case 0x15: 230 mb[0] = MBA_CMPLT_1_16BIT; 231 mb[1] = MSW(stat); 232 qla2x00_async_event(vha, rsp, mb); 233 break; 234 case 0x16: 235 mb[0] = MBA_SCSI_COMPLETION; 236 mb[1] = MSW(stat); 237 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 238 qla2x00_async_event(vha, rsp, mb); 239 break; 240 default: 241 ql_dbg(ql_dbg_async, vha, 0x5028, 242 "Unrecognized interrupt type (%d).\n", stat & 0xff); 243 break; 244 } 245 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 246 RD_REG_WORD_RELAXED(®->hccr); 247 } 248 qla2x00_handle_mbx_completion(ha, status); 249 spin_unlock_irqrestore(&ha->hardware_lock, flags); 250 251 return (IRQ_HANDLED); 252 } 253 254 /** 255 * qla2x00_mbx_completion() - Process mailbox command completions. 256 * @ha: SCSI driver HA context 257 * @mb0: Mailbox0 register 258 */ 259 static void 260 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 261 { 262 uint16_t cnt; 263 uint32_t mboxes; 264 uint16_t __iomem *wptr; 265 struct qla_hw_data *ha = vha->hw; 266 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 267 268 /* Read all mbox registers? */ 269 mboxes = (1 << ha->mbx_count) - 1; 270 if (!ha->mcp) 271 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n"); 272 else 273 mboxes = ha->mcp->in_mb; 274 275 /* Load return mailbox registers. */ 276 ha->flags.mbox_int = 1; 277 ha->mailbox_out[0] = mb0; 278 mboxes >>= 1; 279 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1); 280 281 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 282 if (IS_QLA2200(ha) && cnt == 8) 283 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8); 284 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0)) 285 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr); 286 else if (mboxes & BIT_0) 287 ha->mailbox_out[cnt] = RD_REG_WORD(wptr); 288 289 wptr++; 290 mboxes >>= 1; 291 } 292 } 293 294 static void 295 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr) 296 { 297 static char *event[] = 298 { "Complete", "Request Notification", "Time Extension" }; 299 int rval; 300 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24; 301 struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82; 302 uint16_t __iomem *wptr; 303 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS]; 304 305 /* Seed data -- mailbox1 -> mailbox7. */ 306 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) 307 wptr = (uint16_t __iomem *)®24->mailbox1; 308 else if (IS_QLA8044(vha->hw)) 309 wptr = (uint16_t __iomem *)®82->mailbox_out[1]; 310 else 311 return; 312 313 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++) 314 mb[cnt] = RD_REG_WORD(wptr); 315 316 ql_dbg(ql_dbg_async, vha, 0x5021, 317 "Inter-Driver Communication %s -- " 318 "%04x %04x %04x %04x %04x %04x %04x.\n", 319 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3], 320 mb[4], mb[5], mb[6]); 321 switch (aen) { 322 /* Handle IDC Error completion case. */ 323 case MBA_IDC_COMPLETE: 324 if (mb[1] >> 15) { 325 vha->hw->flags.idc_compl_status = 1; 326 if (vha->hw->notify_dcbx_comp && !vha->vp_idx) 327 complete(&vha->hw->dcbx_comp); 328 } 329 break; 330 331 case MBA_IDC_NOTIFY: 332 /* Acknowledgement needed? [Notify && non-zero timeout]. */ 333 timeout = (descr >> 8) & 0xf; 334 ql_dbg(ql_dbg_async, vha, 0x5022, 335 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n", 336 vha->host_no, event[aen & 0xff], timeout); 337 338 if (!timeout) 339 return; 340 rval = qla2x00_post_idc_ack_work(vha, mb); 341 if (rval != QLA_SUCCESS) 342 ql_log(ql_log_warn, vha, 0x5023, 343 "IDC failed to post ACK.\n"); 344 break; 345 case MBA_IDC_TIME_EXT: 346 vha->hw->idc_extend_tmo = descr; 347 ql_dbg(ql_dbg_async, vha, 0x5087, 348 "%lu Inter-Driver Communication %s -- " 349 "Extend timeout by=%d.\n", 350 vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo); 351 break; 352 } 353 } 354 355 #define LS_UNKNOWN 2 356 const char * 357 qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed) 358 { 359 static const char *const link_speeds[] = { 360 "1", "2", "?", "4", "8", "16", "32", "10" 361 }; 362 #define QLA_LAST_SPEED 7 363 364 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 365 return link_speeds[0]; 366 else if (speed == 0x13) 367 return link_speeds[QLA_LAST_SPEED]; 368 else if (speed < QLA_LAST_SPEED) 369 return link_speeds[speed]; 370 else 371 return link_speeds[LS_UNKNOWN]; 372 } 373 374 static void 375 qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb) 376 { 377 struct qla_hw_data *ha = vha->hw; 378 379 /* 380 * 8200 AEN Interpretation: 381 * mb[0] = AEN code 382 * mb[1] = AEN Reason code 383 * mb[2] = LSW of Peg-Halt Status-1 Register 384 * mb[6] = MSW of Peg-Halt Status-1 Register 385 * mb[3] = LSW of Peg-Halt Status-2 register 386 * mb[7] = MSW of Peg-Halt Status-2 register 387 * mb[4] = IDC Device-State Register value 388 * mb[5] = IDC Driver-Presence Register value 389 */ 390 ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: " 391 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n", 392 mb[0], mb[1], mb[2], mb[6]); 393 ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x " 394 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x " 395 "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]); 396 397 if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE | 398 IDC_HEARTBEAT_FAILURE)) { 399 ha->flags.nic_core_hung = 1; 400 ql_log(ql_log_warn, vha, 0x5060, 401 "83XX: F/W Error Reported: Check if reset required.\n"); 402 403 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) { 404 uint32_t protocol_engine_id, fw_err_code, err_level; 405 406 /* 407 * IDC_PEG_HALT_STATUS_CHANGE interpretation: 408 * - PEG-Halt Status-1 Register: 409 * (LSW = mb[2], MSW = mb[6]) 410 * Bits 0-7 = protocol-engine ID 411 * Bits 8-28 = f/w error code 412 * Bits 29-31 = Error-level 413 * Error-level 0x1 = Non-Fatal error 414 * Error-level 0x2 = Recoverable Fatal error 415 * Error-level 0x4 = UnRecoverable Fatal error 416 * - PEG-Halt Status-2 Register: 417 * (LSW = mb[3], MSW = mb[7]) 418 */ 419 protocol_engine_id = (mb[2] & 0xff); 420 fw_err_code = (((mb[2] & 0xff00) >> 8) | 421 ((mb[6] & 0x1fff) << 8)); 422 err_level = ((mb[6] & 0xe000) >> 13); 423 ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 " 424 "Register: protocol_engine_id=0x%x " 425 "fw_err_code=0x%x err_level=0x%x.\n", 426 protocol_engine_id, fw_err_code, err_level); 427 ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 " 428 "Register: 0x%x%x.\n", mb[7], mb[3]); 429 if (err_level == ERR_LEVEL_NON_FATAL) { 430 ql_log(ql_log_warn, vha, 0x5063, 431 "Not a fatal error, f/w has recovered " 432 "iteself.\n"); 433 } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) { 434 ql_log(ql_log_fatal, vha, 0x5064, 435 "Recoverable Fatal error: Chip reset " 436 "required.\n"); 437 qla83xx_schedule_work(vha, 438 QLA83XX_NIC_CORE_RESET); 439 } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) { 440 ql_log(ql_log_fatal, vha, 0x5065, 441 "Unrecoverable Fatal error: Set FAILED " 442 "state, reboot required.\n"); 443 qla83xx_schedule_work(vha, 444 QLA83XX_NIC_CORE_UNRECOVERABLE); 445 } 446 } 447 448 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) { 449 uint16_t peg_fw_state, nw_interface_link_up; 450 uint16_t nw_interface_signal_detect, sfp_status; 451 uint16_t htbt_counter, htbt_monitor_enable; 452 uint16_t sfp_additonal_info, sfp_multirate; 453 uint16_t sfp_tx_fault, link_speed, dcbx_status; 454 455 /* 456 * IDC_NIC_FW_REPORTED_FAILURE interpretation: 457 * - PEG-to-FC Status Register: 458 * (LSW = mb[2], MSW = mb[6]) 459 * Bits 0-7 = Peg-Firmware state 460 * Bit 8 = N/W Interface Link-up 461 * Bit 9 = N/W Interface signal detected 462 * Bits 10-11 = SFP Status 463 * SFP Status 0x0 = SFP+ transceiver not expected 464 * SFP Status 0x1 = SFP+ transceiver not present 465 * SFP Status 0x2 = SFP+ transceiver invalid 466 * SFP Status 0x3 = SFP+ transceiver present and 467 * valid 468 * Bits 12-14 = Heartbeat Counter 469 * Bit 15 = Heartbeat Monitor Enable 470 * Bits 16-17 = SFP Additional Info 471 * SFP info 0x0 = Unregocnized transceiver for 472 * Ethernet 473 * SFP info 0x1 = SFP+ brand validation failed 474 * SFP info 0x2 = SFP+ speed validation failed 475 * SFP info 0x3 = SFP+ access error 476 * Bit 18 = SFP Multirate 477 * Bit 19 = SFP Tx Fault 478 * Bits 20-22 = Link Speed 479 * Bits 23-27 = Reserved 480 * Bits 28-30 = DCBX Status 481 * DCBX Status 0x0 = DCBX Disabled 482 * DCBX Status 0x1 = DCBX Enabled 483 * DCBX Status 0x2 = DCBX Exchange error 484 * Bit 31 = Reserved 485 */ 486 peg_fw_state = (mb[2] & 0x00ff); 487 nw_interface_link_up = ((mb[2] & 0x0100) >> 8); 488 nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9); 489 sfp_status = ((mb[2] & 0x0c00) >> 10); 490 htbt_counter = ((mb[2] & 0x7000) >> 12); 491 htbt_monitor_enable = ((mb[2] & 0x8000) >> 15); 492 sfp_additonal_info = (mb[6] & 0x0003); 493 sfp_multirate = ((mb[6] & 0x0004) >> 2); 494 sfp_tx_fault = ((mb[6] & 0x0008) >> 3); 495 link_speed = ((mb[6] & 0x0070) >> 4); 496 dcbx_status = ((mb[6] & 0x7000) >> 12); 497 498 ql_log(ql_log_warn, vha, 0x5066, 499 "Peg-to-Fc Status Register:\n" 500 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, " 501 "nw_interface_signal_detect=0x%x" 502 "\nsfp_statis=0x%x.\n ", peg_fw_state, 503 nw_interface_link_up, nw_interface_signal_detect, 504 sfp_status); 505 ql_log(ql_log_warn, vha, 0x5067, 506 "htbt_counter=0x%x, htbt_monitor_enable=0x%x, " 507 "sfp_additonal_info=0x%x, sfp_multirate=0x%x.\n ", 508 htbt_counter, htbt_monitor_enable, 509 sfp_additonal_info, sfp_multirate); 510 ql_log(ql_log_warn, vha, 0x5068, 511 "sfp_tx_fault=0x%x, link_state=0x%x, " 512 "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed, 513 dcbx_status); 514 515 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET); 516 } 517 518 if (mb[1] & IDC_HEARTBEAT_FAILURE) { 519 ql_log(ql_log_warn, vha, 0x5069, 520 "Heartbeat Failure encountered, chip reset " 521 "required.\n"); 522 523 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET); 524 } 525 } 526 527 if (mb[1] & IDC_DEVICE_STATE_CHANGE) { 528 ql_log(ql_log_info, vha, 0x506a, 529 "IDC Device-State changed = 0x%x.\n", mb[4]); 530 if (ha->flags.nic_core_reset_owner) 531 return; 532 qla83xx_schedule_work(vha, MBA_IDC_AEN); 533 } 534 } 535 536 int 537 qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry) 538 { 539 struct qla_hw_data *ha = vha->hw; 540 scsi_qla_host_t *vp; 541 uint32_t vp_did; 542 unsigned long flags; 543 int ret = 0; 544 545 if (!ha->num_vhosts) 546 return ret; 547 548 spin_lock_irqsave(&ha->vport_slock, flags); 549 list_for_each_entry(vp, &ha->vp_list, list) { 550 vp_did = vp->d_id.b24; 551 if (vp_did == rscn_entry) { 552 ret = 1; 553 break; 554 } 555 } 556 spin_unlock_irqrestore(&ha->vport_slock, flags); 557 558 return ret; 559 } 560 561 /** 562 * qla2x00_async_event() - Process aynchronous events. 563 * @ha: SCSI driver HA context 564 * @mb: Mailbox registers (0 - 3) 565 */ 566 void 567 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) 568 { 569 uint16_t handle_cnt; 570 uint16_t cnt, mbx; 571 uint32_t handles[5]; 572 struct qla_hw_data *ha = vha->hw; 573 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 574 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 575 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; 576 uint32_t rscn_entry, host_pid; 577 unsigned long flags; 578 579 /* Setup to process RIO completion. */ 580 handle_cnt = 0; 581 if (IS_CNA_CAPABLE(ha)) 582 goto skip_rio; 583 switch (mb[0]) { 584 case MBA_SCSI_COMPLETION: 585 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1])); 586 handle_cnt = 1; 587 break; 588 case MBA_CMPLT_1_16BIT: 589 handles[0] = mb[1]; 590 handle_cnt = 1; 591 mb[0] = MBA_SCSI_COMPLETION; 592 break; 593 case MBA_CMPLT_2_16BIT: 594 handles[0] = mb[1]; 595 handles[1] = mb[2]; 596 handle_cnt = 2; 597 mb[0] = MBA_SCSI_COMPLETION; 598 break; 599 case MBA_CMPLT_3_16BIT: 600 handles[0] = mb[1]; 601 handles[1] = mb[2]; 602 handles[2] = mb[3]; 603 handle_cnt = 3; 604 mb[0] = MBA_SCSI_COMPLETION; 605 break; 606 case MBA_CMPLT_4_16BIT: 607 handles[0] = mb[1]; 608 handles[1] = mb[2]; 609 handles[2] = mb[3]; 610 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); 611 handle_cnt = 4; 612 mb[0] = MBA_SCSI_COMPLETION; 613 break; 614 case MBA_CMPLT_5_16BIT: 615 handles[0] = mb[1]; 616 handles[1] = mb[2]; 617 handles[2] = mb[3]; 618 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); 619 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7); 620 handle_cnt = 5; 621 mb[0] = MBA_SCSI_COMPLETION; 622 break; 623 case MBA_CMPLT_2_32BIT: 624 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1])); 625 handles[1] = le32_to_cpu( 626 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) | 627 RD_MAILBOX_REG(ha, reg, 6)); 628 handle_cnt = 2; 629 mb[0] = MBA_SCSI_COMPLETION; 630 break; 631 default: 632 break; 633 } 634 skip_rio: 635 switch (mb[0]) { 636 case MBA_SCSI_COMPLETION: /* Fast Post */ 637 if (!vha->flags.online) 638 break; 639 640 for (cnt = 0; cnt < handle_cnt; cnt++) 641 qla2x00_process_completed_request(vha, rsp->req, 642 handles[cnt]); 643 break; 644 645 case MBA_RESET: /* Reset */ 646 ql_dbg(ql_dbg_async, vha, 0x5002, 647 "Asynchronous RESET.\n"); 648 649 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 650 break; 651 652 case MBA_SYSTEM_ERR: /* System Error */ 653 mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) ? 654 RD_REG_WORD(®24->mailbox7) : 0; 655 ql_log(ql_log_warn, vha, 0x5003, 656 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh " 657 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx); 658 659 ha->isp_ops->fw_dump(vha, 1); 660 661 if (IS_FWI2_CAPABLE(ha)) { 662 if (mb[1] == 0 && mb[2] == 0) { 663 ql_log(ql_log_fatal, vha, 0x5004, 664 "Unrecoverable Hardware Error: adapter " 665 "marked OFFLINE!\n"); 666 vha->flags.online = 0; 667 vha->device_flags |= DFLG_DEV_FAILED; 668 } else { 669 /* Check to see if MPI timeout occurred */ 670 if ((mbx & MBX_3) && (ha->port_no == 0)) 671 set_bit(MPI_RESET_NEEDED, 672 &vha->dpc_flags); 673 674 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 675 } 676 } else if (mb[1] == 0) { 677 ql_log(ql_log_fatal, vha, 0x5005, 678 "Unrecoverable Hardware Error: adapter marked " 679 "OFFLINE!\n"); 680 vha->flags.online = 0; 681 vha->device_flags |= DFLG_DEV_FAILED; 682 } else 683 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 684 break; 685 686 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 687 ql_log(ql_log_warn, vha, 0x5006, 688 "ISP Request Transfer Error (%x).\n", mb[1]); 689 690 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 691 break; 692 693 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 694 ql_log(ql_log_warn, vha, 0x5007, 695 "ISP Response Transfer Error.\n"); 696 697 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 698 break; 699 700 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ 701 ql_dbg(ql_dbg_async, vha, 0x5008, 702 "Asynchronous WAKEUP_THRES.\n"); 703 704 break; 705 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ 706 ql_dbg(ql_dbg_async, vha, 0x5009, 707 "LIP occurred (%x).\n", mb[1]); 708 709 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 710 atomic_set(&vha->loop_state, LOOP_DOWN); 711 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 712 qla2x00_mark_all_devices_lost(vha, 1); 713 } 714 715 if (vha->vp_idx) { 716 atomic_set(&vha->vp_state, VP_FAILED); 717 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 718 } 719 720 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 721 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 722 723 vha->flags.management_server_logged_in = 0; 724 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]); 725 break; 726 727 case MBA_LOOP_UP: /* Loop Up Event */ 728 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 729 ha->link_data_rate = PORT_SPEED_1GB; 730 else 731 ha->link_data_rate = mb[1]; 732 733 ql_dbg(ql_dbg_async, vha, 0x500a, 734 "LOOP UP detected (%s Gbps).\n", 735 qla2x00_get_link_speed_str(ha, ha->link_data_rate)); 736 737 vha->flags.management_server_logged_in = 0; 738 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate); 739 break; 740 741 case MBA_LOOP_DOWN: /* Loop Down Event */ 742 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha)) 743 ? RD_REG_WORD(®24->mailbox4) : 0; 744 mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(®82->mailbox_out[4]) 745 : mbx; 746 ql_dbg(ql_dbg_async, vha, 0x500b, 747 "LOOP DOWN detected (%x %x %x %x).\n", 748 mb[1], mb[2], mb[3], mbx); 749 750 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 751 atomic_set(&vha->loop_state, LOOP_DOWN); 752 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 753 vha->device_flags |= DFLG_NO_CABLE; 754 qla2x00_mark_all_devices_lost(vha, 1); 755 } 756 757 if (vha->vp_idx) { 758 atomic_set(&vha->vp_state, VP_FAILED); 759 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 760 } 761 762 vha->flags.management_server_logged_in = 0; 763 ha->link_data_rate = PORT_SPEED_UNKNOWN; 764 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0); 765 break; 766 767 case MBA_LIP_RESET: /* LIP reset occurred */ 768 ql_dbg(ql_dbg_async, vha, 0x500c, 769 "LIP reset occurred (%x).\n", mb[1]); 770 771 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 772 atomic_set(&vha->loop_state, LOOP_DOWN); 773 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 774 qla2x00_mark_all_devices_lost(vha, 1); 775 } 776 777 if (vha->vp_idx) { 778 atomic_set(&vha->vp_state, VP_FAILED); 779 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 780 } 781 782 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 783 784 ha->operating_mode = LOOP; 785 vha->flags.management_server_logged_in = 0; 786 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]); 787 break; 788 789 /* case MBA_DCBX_COMPLETE: */ 790 case MBA_POINT_TO_POINT: /* Point-to-Point */ 791 if (IS_QLA2100(ha)) 792 break; 793 794 if (IS_CNA_CAPABLE(ha)) { 795 ql_dbg(ql_dbg_async, vha, 0x500d, 796 "DCBX Completed -- %04x %04x %04x.\n", 797 mb[1], mb[2], mb[3]); 798 if (ha->notify_dcbx_comp && !vha->vp_idx) 799 complete(&ha->dcbx_comp); 800 801 } else 802 ql_dbg(ql_dbg_async, vha, 0x500e, 803 "Asynchronous P2P MODE received.\n"); 804 805 /* 806 * Until there's a transition from loop down to loop up, treat 807 * this as loop down only. 808 */ 809 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 810 atomic_set(&vha->loop_state, LOOP_DOWN); 811 if (!atomic_read(&vha->loop_down_timer)) 812 atomic_set(&vha->loop_down_timer, 813 LOOP_DOWN_TIME); 814 qla2x00_mark_all_devices_lost(vha, 1); 815 } 816 817 if (vha->vp_idx) { 818 atomic_set(&vha->vp_state, VP_FAILED); 819 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 820 } 821 822 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) 823 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 824 825 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 826 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 827 828 ha->flags.gpsc_supported = 1; 829 vha->flags.management_server_logged_in = 0; 830 break; 831 832 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */ 833 if (IS_QLA2100(ha)) 834 break; 835 836 ql_dbg(ql_dbg_async, vha, 0x500f, 837 "Configuration change detected: value=%x.\n", mb[1]); 838 839 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 840 atomic_set(&vha->loop_state, LOOP_DOWN); 841 if (!atomic_read(&vha->loop_down_timer)) 842 atomic_set(&vha->loop_down_timer, 843 LOOP_DOWN_TIME); 844 qla2x00_mark_all_devices_lost(vha, 1); 845 } 846 847 if (vha->vp_idx) { 848 atomic_set(&vha->vp_state, VP_FAILED); 849 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 850 } 851 852 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 853 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 854 break; 855 856 case MBA_PORT_UPDATE: /* Port database update */ 857 /* 858 * Handle only global and vn-port update events 859 * 860 * Relevant inputs: 861 * mb[1] = N_Port handle of changed port 862 * OR 0xffff for global event 863 * mb[2] = New login state 864 * 7 = Port logged out 865 * mb[3] = LSB is vp_idx, 0xff = all vps 866 * 867 * Skip processing if: 868 * Event is global, vp_idx is NOT all vps, 869 * vp_idx does not match 870 * Event is not global, vp_idx does not match 871 */ 872 if (IS_QLA2XXX_MIDTYPE(ha) && 873 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) || 874 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff)) 875 break; 876 877 /* Global event -- port logout or port unavailable. */ 878 if (mb[1] == 0xffff && mb[2] == 0x7) { 879 ql_dbg(ql_dbg_async, vha, 0x5010, 880 "Port unavailable %04x %04x %04x.\n", 881 mb[1], mb[2], mb[3]); 882 ql_log(ql_log_warn, vha, 0x505e, 883 "Link is offline.\n"); 884 885 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 886 atomic_set(&vha->loop_state, LOOP_DOWN); 887 atomic_set(&vha->loop_down_timer, 888 LOOP_DOWN_TIME); 889 vha->device_flags |= DFLG_NO_CABLE; 890 qla2x00_mark_all_devices_lost(vha, 1); 891 } 892 893 if (vha->vp_idx) { 894 atomic_set(&vha->vp_state, VP_FAILED); 895 fc_vport_set_state(vha->fc_vport, 896 FC_VPORT_FAILED); 897 qla2x00_mark_all_devices_lost(vha, 1); 898 } 899 900 vha->flags.management_server_logged_in = 0; 901 ha->link_data_rate = PORT_SPEED_UNKNOWN; 902 break; 903 } 904 905 /* 906 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET 907 * event etc. earlier indicating loop is down) then process 908 * it. Otherwise ignore it and Wait for RSCN to come in. 909 */ 910 atomic_set(&vha->loop_down_timer, 0); 911 if (mb[1] != 0xffff || (mb[2] != 0x6 && mb[2] != 0x4)) { 912 ql_dbg(ql_dbg_async, vha, 0x5011, 913 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n", 914 mb[1], mb[2], mb[3]); 915 916 qlt_async_event(mb[0], vha, mb); 917 break; 918 } 919 920 ql_dbg(ql_dbg_async, vha, 0x5012, 921 "Port database changed %04x %04x %04x.\n", 922 mb[1], mb[2], mb[3]); 923 ql_log(ql_log_warn, vha, 0x505f, 924 "Link is operational (%s Gbps).\n", 925 qla2x00_get_link_speed_str(ha, ha->link_data_rate)); 926 927 /* 928 * Mark all devices as missing so we will login again. 929 */ 930 atomic_set(&vha->loop_state, LOOP_UP); 931 932 qla2x00_mark_all_devices_lost(vha, 1); 933 934 if (vha->vp_idx == 0 && !qla_ini_mode_enabled(vha)) 935 set_bit(SCR_PENDING, &vha->dpc_flags); 936 937 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 938 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 939 940 qlt_async_event(mb[0], vha, mb); 941 break; 942 943 case MBA_RSCN_UPDATE: /* State Change Registration */ 944 /* Check if the Vport has issued a SCR */ 945 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags)) 946 break; 947 /* Only handle SCNs for our Vport index. */ 948 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff)) 949 break; 950 951 ql_dbg(ql_dbg_async, vha, 0x5013, 952 "RSCN database changed -- %04x %04x %04x.\n", 953 mb[1], mb[2], mb[3]); 954 955 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2]; 956 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8) 957 | vha->d_id.b.al_pa; 958 if (rscn_entry == host_pid) { 959 ql_dbg(ql_dbg_async, vha, 0x5014, 960 "Ignoring RSCN update to local host " 961 "port ID (%06x).\n", host_pid); 962 break; 963 } 964 965 /* Ignore reserved bits from RSCN-payload. */ 966 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2]; 967 968 /* Skip RSCNs for virtual ports on the same physical port */ 969 if (qla2x00_is_a_vp_did(vha, rscn_entry)) 970 break; 971 972 atomic_set(&vha->loop_down_timer, 0); 973 vha->flags.management_server_logged_in = 0; 974 975 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 976 set_bit(RSCN_UPDATE, &vha->dpc_flags); 977 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry); 978 break; 979 980 /* case MBA_RIO_RESPONSE: */ 981 case MBA_ZIO_RESPONSE: 982 ql_dbg(ql_dbg_async, vha, 0x5015, 983 "[R|Z]IO update completion.\n"); 984 985 if (IS_FWI2_CAPABLE(ha)) 986 qla24xx_process_response_queue(vha, rsp); 987 else 988 qla2x00_process_response_queue(rsp); 989 break; 990 991 case MBA_DISCARD_RND_FRAME: 992 ql_dbg(ql_dbg_async, vha, 0x5016, 993 "Discard RND Frame -- %04x %04x %04x.\n", 994 mb[1], mb[2], mb[3]); 995 break; 996 997 case MBA_TRACE_NOTIFICATION: 998 ql_dbg(ql_dbg_async, vha, 0x5017, 999 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]); 1000 break; 1001 1002 case MBA_ISP84XX_ALERT: 1003 ql_dbg(ql_dbg_async, vha, 0x5018, 1004 "ISP84XX Alert Notification -- %04x %04x %04x.\n", 1005 mb[1], mb[2], mb[3]); 1006 1007 spin_lock_irqsave(&ha->cs84xx->access_lock, flags); 1008 switch (mb[1]) { 1009 case A84_PANIC_RECOVERY: 1010 ql_log(ql_log_info, vha, 0x5019, 1011 "Alert 84XX: panic recovery %04x %04x.\n", 1012 mb[2], mb[3]); 1013 break; 1014 case A84_OP_LOGIN_COMPLETE: 1015 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2]; 1016 ql_log(ql_log_info, vha, 0x501a, 1017 "Alert 84XX: firmware version %x.\n", 1018 ha->cs84xx->op_fw_version); 1019 break; 1020 case A84_DIAG_LOGIN_COMPLETE: 1021 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 1022 ql_log(ql_log_info, vha, 0x501b, 1023 "Alert 84XX: diagnostic firmware version %x.\n", 1024 ha->cs84xx->diag_fw_version); 1025 break; 1026 case A84_GOLD_LOGIN_COMPLETE: 1027 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 1028 ha->cs84xx->fw_update = 1; 1029 ql_log(ql_log_info, vha, 0x501c, 1030 "Alert 84XX: gold firmware version %x.\n", 1031 ha->cs84xx->gold_fw_version); 1032 break; 1033 default: 1034 ql_log(ql_log_warn, vha, 0x501d, 1035 "Alert 84xx: Invalid Alert %04x %04x %04x.\n", 1036 mb[1], mb[2], mb[3]); 1037 } 1038 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags); 1039 break; 1040 case MBA_DCBX_START: 1041 ql_dbg(ql_dbg_async, vha, 0x501e, 1042 "DCBX Started -- %04x %04x %04x.\n", 1043 mb[1], mb[2], mb[3]); 1044 break; 1045 case MBA_DCBX_PARAM_UPDATE: 1046 ql_dbg(ql_dbg_async, vha, 0x501f, 1047 "DCBX Parameters Updated -- %04x %04x %04x.\n", 1048 mb[1], mb[2], mb[3]); 1049 break; 1050 case MBA_FCF_CONF_ERR: 1051 ql_dbg(ql_dbg_async, vha, 0x5020, 1052 "FCF Configuration Error -- %04x %04x %04x.\n", 1053 mb[1], mb[2], mb[3]); 1054 break; 1055 case MBA_IDC_NOTIFY: 1056 if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) { 1057 mb[4] = RD_REG_WORD(®24->mailbox4); 1058 if (((mb[2] & 0x7fff) == MBC_PORT_RESET || 1059 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) && 1060 (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) { 1061 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); 1062 /* 1063 * Extend loop down timer since port is active. 1064 */ 1065 if (atomic_read(&vha->loop_state) == LOOP_DOWN) 1066 atomic_set(&vha->loop_down_timer, 1067 LOOP_DOWN_TIME); 1068 qla2xxx_wake_dpc(vha); 1069 } 1070 } 1071 case MBA_IDC_COMPLETE: 1072 if (ha->notify_lb_portup_comp && !vha->vp_idx) 1073 complete(&ha->lb_portup_comp); 1074 /* Fallthru */ 1075 case MBA_IDC_TIME_EXT: 1076 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || 1077 IS_QLA8044(ha)) 1078 qla81xx_idc_event(vha, mb[0], mb[1]); 1079 break; 1080 1081 case MBA_IDC_AEN: 1082 mb[4] = RD_REG_WORD(®24->mailbox4); 1083 mb[5] = RD_REG_WORD(®24->mailbox5); 1084 mb[6] = RD_REG_WORD(®24->mailbox6); 1085 mb[7] = RD_REG_WORD(®24->mailbox7); 1086 qla83xx_handle_8200_aen(vha, mb); 1087 break; 1088 1089 default: 1090 ql_dbg(ql_dbg_async, vha, 0x5057, 1091 "Unknown AEN:%04x %04x %04x %04x\n", 1092 mb[0], mb[1], mb[2], mb[3]); 1093 } 1094 1095 qlt_async_event(mb[0], vha, mb); 1096 1097 if (!vha->vp_idx && ha->num_vhosts) 1098 qla2x00_alert_all_vps(rsp, mb); 1099 } 1100 1101 /** 1102 * qla2x00_process_completed_request() - Process a Fast Post response. 1103 * @ha: SCSI driver HA context 1104 * @index: SRB index 1105 */ 1106 void 1107 qla2x00_process_completed_request(struct scsi_qla_host *vha, 1108 struct req_que *req, uint32_t index) 1109 { 1110 srb_t *sp; 1111 struct qla_hw_data *ha = vha->hw; 1112 1113 /* Validate handle. */ 1114 if (index >= req->num_outstanding_cmds) { 1115 ql_log(ql_log_warn, vha, 0x3014, 1116 "Invalid SCSI command index (%x).\n", index); 1117 1118 if (IS_P3P_TYPE(ha)) 1119 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1120 else 1121 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1122 return; 1123 } 1124 1125 sp = req->outstanding_cmds[index]; 1126 if (sp) { 1127 /* Free outstanding command slot. */ 1128 req->outstanding_cmds[index] = NULL; 1129 1130 /* Save ISP completion status */ 1131 sp->done(ha, sp, DID_OK << 16); 1132 } else { 1133 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n"); 1134 1135 if (IS_P3P_TYPE(ha)) 1136 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1137 else 1138 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1139 } 1140 } 1141 1142 srb_t * 1143 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, 1144 struct req_que *req, void *iocb) 1145 { 1146 struct qla_hw_data *ha = vha->hw; 1147 sts_entry_t *pkt = iocb; 1148 srb_t *sp = NULL; 1149 uint16_t index; 1150 1151 index = LSW(pkt->handle); 1152 if (index >= req->num_outstanding_cmds) { 1153 ql_log(ql_log_warn, vha, 0x5031, 1154 "Invalid command index (%x).\n", index); 1155 if (IS_P3P_TYPE(ha)) 1156 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1157 else 1158 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1159 goto done; 1160 } 1161 sp = req->outstanding_cmds[index]; 1162 if (!sp) { 1163 ql_log(ql_log_warn, vha, 0x5032, 1164 "Invalid completion handle (%x) -- timed-out.\n", index); 1165 return sp; 1166 } 1167 if (sp->handle != index) { 1168 ql_log(ql_log_warn, vha, 0x5033, 1169 "SRB handle (%x) mismatch %x.\n", sp->handle, index); 1170 return NULL; 1171 } 1172 1173 req->outstanding_cmds[index] = NULL; 1174 1175 done: 1176 return sp; 1177 } 1178 1179 static void 1180 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 1181 struct mbx_entry *mbx) 1182 { 1183 const char func[] = "MBX-IOCB"; 1184 const char *type; 1185 fc_port_t *fcport; 1186 srb_t *sp; 1187 struct srb_iocb *lio; 1188 uint16_t *data; 1189 uint16_t status; 1190 1191 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx); 1192 if (!sp) 1193 return; 1194 1195 lio = &sp->u.iocb_cmd; 1196 type = sp->name; 1197 fcport = sp->fcport; 1198 data = lio->u.logio.data; 1199 1200 data[0] = MBS_COMMAND_ERROR; 1201 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 1202 QLA_LOGIO_LOGIN_RETRIED : 0; 1203 if (mbx->entry_status) { 1204 ql_dbg(ql_dbg_async, vha, 0x5043, 1205 "Async-%s error entry - hdl=%x portid=%02x%02x%02x " 1206 "entry-status=%x status=%x state-flag=%x " 1207 "status-flags=%x.\n", type, sp->handle, 1208 fcport->d_id.b.domain, fcport->d_id.b.area, 1209 fcport->d_id.b.al_pa, mbx->entry_status, 1210 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags), 1211 le16_to_cpu(mbx->status_flags)); 1212 1213 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029, 1214 (uint8_t *)mbx, sizeof(*mbx)); 1215 1216 goto logio_done; 1217 } 1218 1219 status = le16_to_cpu(mbx->status); 1220 if (status == 0x30 && sp->type == SRB_LOGIN_CMD && 1221 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) 1222 status = 0; 1223 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) { 1224 ql_dbg(ql_dbg_async, vha, 0x5045, 1225 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n", 1226 type, sp->handle, fcport->d_id.b.domain, 1227 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1228 le16_to_cpu(mbx->mb1)); 1229 1230 data[0] = MBS_COMMAND_COMPLETE; 1231 if (sp->type == SRB_LOGIN_CMD) { 1232 fcport->port_type = FCT_TARGET; 1233 if (le16_to_cpu(mbx->mb1) & BIT_0) 1234 fcport->port_type = FCT_INITIATOR; 1235 else if (le16_to_cpu(mbx->mb1) & BIT_1) 1236 fcport->flags |= FCF_FCP2_DEVICE; 1237 } 1238 goto logio_done; 1239 } 1240 1241 data[0] = le16_to_cpu(mbx->mb0); 1242 switch (data[0]) { 1243 case MBS_PORT_ID_USED: 1244 data[1] = le16_to_cpu(mbx->mb1); 1245 break; 1246 case MBS_LOOP_ID_USED: 1247 break; 1248 default: 1249 data[0] = MBS_COMMAND_ERROR; 1250 break; 1251 } 1252 1253 ql_log(ql_log_warn, vha, 0x5046, 1254 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x " 1255 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle, 1256 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, 1257 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1), 1258 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6), 1259 le16_to_cpu(mbx->mb7)); 1260 1261 logio_done: 1262 sp->done(vha, sp, 0); 1263 } 1264 1265 static void 1266 qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req, 1267 sts_entry_t *pkt, int iocb_type) 1268 { 1269 const char func[] = "CT_IOCB"; 1270 const char *type; 1271 srb_t *sp; 1272 struct fc_bsg_job *bsg_job; 1273 uint16_t comp_status; 1274 int res; 1275 1276 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1277 if (!sp) 1278 return; 1279 1280 bsg_job = sp->u.bsg_job; 1281 1282 type = "ct pass-through"; 1283 1284 comp_status = le16_to_cpu(pkt->comp_status); 1285 1286 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT 1287 * fc payload to the caller 1288 */ 1289 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 1290 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1291 1292 if (comp_status != CS_COMPLETE) { 1293 if (comp_status == CS_DATA_UNDERRUN) { 1294 res = DID_OK << 16; 1295 bsg_job->reply->reply_payload_rcv_len = 1296 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len); 1297 1298 ql_log(ql_log_warn, vha, 0x5048, 1299 "CT pass-through-%s error " 1300 "comp_status-status=0x%x total_byte = 0x%x.\n", 1301 type, comp_status, 1302 bsg_job->reply->reply_payload_rcv_len); 1303 } else { 1304 ql_log(ql_log_warn, vha, 0x5049, 1305 "CT pass-through-%s error " 1306 "comp_status-status=0x%x.\n", type, comp_status); 1307 res = DID_ERROR << 16; 1308 bsg_job->reply->reply_payload_rcv_len = 0; 1309 } 1310 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035, 1311 (uint8_t *)pkt, sizeof(*pkt)); 1312 } else { 1313 res = DID_OK << 16; 1314 bsg_job->reply->reply_payload_rcv_len = 1315 bsg_job->reply_payload.payload_len; 1316 bsg_job->reply_len = 0; 1317 } 1318 1319 sp->done(vha, sp, res); 1320 } 1321 1322 static void 1323 qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, 1324 struct sts_entry_24xx *pkt, int iocb_type) 1325 { 1326 const char func[] = "ELS_CT_IOCB"; 1327 const char *type; 1328 srb_t *sp; 1329 struct fc_bsg_job *bsg_job; 1330 uint16_t comp_status; 1331 uint32_t fw_status[3]; 1332 uint8_t* fw_sts_ptr; 1333 int res; 1334 1335 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1336 if (!sp) 1337 return; 1338 bsg_job = sp->u.bsg_job; 1339 1340 type = NULL; 1341 switch (sp->type) { 1342 case SRB_ELS_CMD_RPT: 1343 case SRB_ELS_CMD_HST: 1344 type = "els"; 1345 break; 1346 case SRB_CT_CMD: 1347 type = "ct pass-through"; 1348 break; 1349 default: 1350 ql_dbg(ql_dbg_user, vha, 0x503e, 1351 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type); 1352 return; 1353 } 1354 1355 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status); 1356 fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1); 1357 fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2); 1358 1359 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT 1360 * fc payload to the caller 1361 */ 1362 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 1363 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status); 1364 1365 if (comp_status != CS_COMPLETE) { 1366 if (comp_status == CS_DATA_UNDERRUN) { 1367 res = DID_OK << 16; 1368 bsg_job->reply->reply_payload_rcv_len = 1369 le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count); 1370 1371 ql_dbg(ql_dbg_user, vha, 0x503f, 1372 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " 1373 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n", 1374 type, sp->handle, comp_status, fw_status[1], fw_status[2], 1375 le16_to_cpu(((struct els_sts_entry_24xx *) 1376 pkt)->total_byte_count)); 1377 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); 1378 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); 1379 } 1380 else { 1381 ql_dbg(ql_dbg_user, vha, 0x5040, 1382 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " 1383 "error subcode 1=0x%x error subcode 2=0x%x.\n", 1384 type, sp->handle, comp_status, 1385 le16_to_cpu(((struct els_sts_entry_24xx *) 1386 pkt)->error_subcode_1), 1387 le16_to_cpu(((struct els_sts_entry_24xx *) 1388 pkt)->error_subcode_2)); 1389 res = DID_ERROR << 16; 1390 bsg_job->reply->reply_payload_rcv_len = 0; 1391 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); 1392 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); 1393 } 1394 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056, 1395 (uint8_t *)pkt, sizeof(*pkt)); 1396 } 1397 else { 1398 res = DID_OK << 16; 1399 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; 1400 bsg_job->reply_len = 0; 1401 } 1402 1403 sp->done(vha, sp, res); 1404 } 1405 1406 static void 1407 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, 1408 struct logio_entry_24xx *logio) 1409 { 1410 const char func[] = "LOGIO-IOCB"; 1411 const char *type; 1412 fc_port_t *fcport; 1413 srb_t *sp; 1414 struct srb_iocb *lio; 1415 uint16_t *data; 1416 uint32_t iop[2]; 1417 1418 sp = qla2x00_get_sp_from_handle(vha, func, req, logio); 1419 if (!sp) 1420 return; 1421 1422 lio = &sp->u.iocb_cmd; 1423 type = sp->name; 1424 fcport = sp->fcport; 1425 data = lio->u.logio.data; 1426 1427 data[0] = MBS_COMMAND_ERROR; 1428 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 1429 QLA_LOGIO_LOGIN_RETRIED : 0; 1430 if (logio->entry_status) { 1431 ql_log(ql_log_warn, fcport->vha, 0x5034, 1432 "Async-%s error entry - hdl=%x" 1433 "portid=%02x%02x%02x entry-status=%x.\n", 1434 type, sp->handle, fcport->d_id.b.domain, 1435 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1436 logio->entry_status); 1437 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d, 1438 (uint8_t *)logio, sizeof(*logio)); 1439 1440 goto logio_done; 1441 } 1442 1443 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) { 1444 ql_dbg(ql_dbg_async, fcport->vha, 0x5036, 1445 "Async-%s complete - hdl=%x portid=%02x%02x%02x " 1446 "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain, 1447 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1448 le32_to_cpu(logio->io_parameter[0])); 1449 1450 data[0] = MBS_COMMAND_COMPLETE; 1451 if (sp->type != SRB_LOGIN_CMD) 1452 goto logio_done; 1453 1454 iop[0] = le32_to_cpu(logio->io_parameter[0]); 1455 if (iop[0] & BIT_4) { 1456 fcport->port_type = FCT_TARGET; 1457 if (iop[0] & BIT_8) 1458 fcport->flags |= FCF_FCP2_DEVICE; 1459 } else if (iop[0] & BIT_5) 1460 fcport->port_type = FCT_INITIATOR; 1461 1462 if (iop[0] & BIT_7) 1463 fcport->flags |= FCF_CONF_COMP_SUPPORTED; 1464 1465 if (logio->io_parameter[7] || logio->io_parameter[8]) 1466 fcport->supported_classes |= FC_COS_CLASS2; 1467 if (logio->io_parameter[9] || logio->io_parameter[10]) 1468 fcport->supported_classes |= FC_COS_CLASS3; 1469 1470 goto logio_done; 1471 } 1472 1473 iop[0] = le32_to_cpu(logio->io_parameter[0]); 1474 iop[1] = le32_to_cpu(logio->io_parameter[1]); 1475 switch (iop[0]) { 1476 case LSC_SCODE_PORTID_USED: 1477 data[0] = MBS_PORT_ID_USED; 1478 data[1] = LSW(iop[1]); 1479 break; 1480 case LSC_SCODE_NPORT_USED: 1481 data[0] = MBS_LOOP_ID_USED; 1482 break; 1483 default: 1484 data[0] = MBS_COMMAND_ERROR; 1485 break; 1486 } 1487 1488 ql_dbg(ql_dbg_async, fcport->vha, 0x5037, 1489 "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x " 1490 "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain, 1491 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1492 le16_to_cpu(logio->comp_status), 1493 le32_to_cpu(logio->io_parameter[0]), 1494 le32_to_cpu(logio->io_parameter[1])); 1495 1496 logio_done: 1497 sp->done(vha, sp, 0); 1498 } 1499 1500 static void 1501 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk) 1502 { 1503 const char func[] = "TMF-IOCB"; 1504 const char *type; 1505 fc_port_t *fcport; 1506 srb_t *sp; 1507 struct srb_iocb *iocb; 1508 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; 1509 1510 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk); 1511 if (!sp) 1512 return; 1513 1514 iocb = &sp->u.iocb_cmd; 1515 type = sp->name; 1516 fcport = sp->fcport; 1517 iocb->u.tmf.data = QLA_SUCCESS; 1518 1519 if (sts->entry_status) { 1520 ql_log(ql_log_warn, fcport->vha, 0x5038, 1521 "Async-%s error - hdl=%x entry-status(%x).\n", 1522 type, sp->handle, sts->entry_status); 1523 iocb->u.tmf.data = QLA_FUNCTION_FAILED; 1524 } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 1525 ql_log(ql_log_warn, fcport->vha, 0x5039, 1526 "Async-%s error - hdl=%x completion status(%x).\n", 1527 type, sp->handle, sts->comp_status); 1528 iocb->u.tmf.data = QLA_FUNCTION_FAILED; 1529 } else if ((le16_to_cpu(sts->scsi_status) & 1530 SS_RESPONSE_INFO_LEN_VALID)) { 1531 if (le32_to_cpu(sts->rsp_data_len) < 4) { 1532 ql_log(ql_log_warn, fcport->vha, 0x503b, 1533 "Async-%s error - hdl=%x not enough response(%d).\n", 1534 type, sp->handle, sts->rsp_data_len); 1535 } else if (sts->data[3]) { 1536 ql_log(ql_log_warn, fcport->vha, 0x503c, 1537 "Async-%s error - hdl=%x response(%x).\n", 1538 type, sp->handle, sts->data[3]); 1539 iocb->u.tmf.data = QLA_FUNCTION_FAILED; 1540 } 1541 } 1542 1543 if (iocb->u.tmf.data != QLA_SUCCESS) 1544 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055, 1545 (uint8_t *)sts, sizeof(*sts)); 1546 1547 sp->done(vha, sp, 0); 1548 } 1549 1550 /** 1551 * qla2x00_process_response_queue() - Process response queue entries. 1552 * @ha: SCSI driver HA context 1553 */ 1554 void 1555 qla2x00_process_response_queue(struct rsp_que *rsp) 1556 { 1557 struct scsi_qla_host *vha; 1558 struct qla_hw_data *ha = rsp->hw; 1559 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1560 sts_entry_t *pkt; 1561 uint16_t handle_cnt; 1562 uint16_t cnt; 1563 1564 vha = pci_get_drvdata(ha->pdev); 1565 1566 if (!vha->flags.online) 1567 return; 1568 1569 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 1570 pkt = (sts_entry_t *)rsp->ring_ptr; 1571 1572 rsp->ring_index++; 1573 if (rsp->ring_index == rsp->length) { 1574 rsp->ring_index = 0; 1575 rsp->ring_ptr = rsp->ring; 1576 } else { 1577 rsp->ring_ptr++; 1578 } 1579 1580 if (pkt->entry_status != 0) { 1581 qla2x00_error_entry(vha, rsp, pkt); 1582 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1583 wmb(); 1584 continue; 1585 } 1586 1587 switch (pkt->entry_type) { 1588 case STATUS_TYPE: 1589 qla2x00_status_entry(vha, rsp, pkt); 1590 break; 1591 case STATUS_TYPE_21: 1592 handle_cnt = ((sts21_entry_t *)pkt)->handle_count; 1593 for (cnt = 0; cnt < handle_cnt; cnt++) { 1594 qla2x00_process_completed_request(vha, rsp->req, 1595 ((sts21_entry_t *)pkt)->handle[cnt]); 1596 } 1597 break; 1598 case STATUS_TYPE_22: 1599 handle_cnt = ((sts22_entry_t *)pkt)->handle_count; 1600 for (cnt = 0; cnt < handle_cnt; cnt++) { 1601 qla2x00_process_completed_request(vha, rsp->req, 1602 ((sts22_entry_t *)pkt)->handle[cnt]); 1603 } 1604 break; 1605 case STATUS_CONT_TYPE: 1606 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); 1607 break; 1608 case MBX_IOCB_TYPE: 1609 qla2x00_mbx_iocb_entry(vha, rsp->req, 1610 (struct mbx_entry *)pkt); 1611 break; 1612 case CT_IOCB_TYPE: 1613 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); 1614 break; 1615 default: 1616 /* Type Not Supported. */ 1617 ql_log(ql_log_warn, vha, 0x504a, 1618 "Received unknown response pkt type %x " 1619 "entry status=%x.\n", 1620 pkt->entry_type, pkt->entry_status); 1621 break; 1622 } 1623 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1624 wmb(); 1625 } 1626 1627 /* Adjust ring index */ 1628 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index); 1629 } 1630 1631 static inline void 1632 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, 1633 uint32_t sense_len, struct rsp_que *rsp, int res) 1634 { 1635 struct scsi_qla_host *vha = sp->fcport->vha; 1636 struct scsi_cmnd *cp = GET_CMD_SP(sp); 1637 uint32_t track_sense_len; 1638 1639 if (sense_len >= SCSI_SENSE_BUFFERSIZE) 1640 sense_len = SCSI_SENSE_BUFFERSIZE; 1641 1642 SET_CMD_SENSE_LEN(sp, sense_len); 1643 SET_CMD_SENSE_PTR(sp, cp->sense_buffer); 1644 track_sense_len = sense_len; 1645 1646 if (sense_len > par_sense_len) 1647 sense_len = par_sense_len; 1648 1649 memcpy(cp->sense_buffer, sense_data, sense_len); 1650 1651 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len); 1652 track_sense_len -= sense_len; 1653 SET_CMD_SENSE_LEN(sp, track_sense_len); 1654 1655 if (track_sense_len != 0) { 1656 rsp->status_srb = sp; 1657 cp->result = res; 1658 } 1659 1660 if (sense_len) { 1661 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c, 1662 "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n", 1663 sp->fcport->vha->host_no, cp->device->id, cp->device->lun, 1664 cp); 1665 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b, 1666 cp->sense_buffer, sense_len); 1667 } 1668 } 1669 1670 struct scsi_dif_tuple { 1671 __be16 guard; /* Checksum */ 1672 __be16 app_tag; /* APPL identifier */ 1673 __be32 ref_tag; /* Target LBA or indirect LBA */ 1674 }; 1675 1676 /* 1677 * Checks the guard or meta-data for the type of error 1678 * detected by the HBA. In case of errors, we set the 1679 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST 1680 * to indicate to the kernel that the HBA detected error. 1681 */ 1682 static inline int 1683 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) 1684 { 1685 struct scsi_qla_host *vha = sp->fcport->vha; 1686 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1687 uint8_t *ap = &sts24->data[12]; 1688 uint8_t *ep = &sts24->data[20]; 1689 uint32_t e_ref_tag, a_ref_tag; 1690 uint16_t e_app_tag, a_app_tag; 1691 uint16_t e_guard, a_guard; 1692 1693 /* 1694 * swab32 of the "data" field in the beginning of qla2x00_status_entry() 1695 * would make guard field appear at offset 2 1696 */ 1697 a_guard = le16_to_cpu(*(uint16_t *)(ap + 2)); 1698 a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0)); 1699 a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4)); 1700 e_guard = le16_to_cpu(*(uint16_t *)(ep + 2)); 1701 e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0)); 1702 e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4)); 1703 1704 ql_dbg(ql_dbg_io, vha, 0x3023, 1705 "iocb(s) %p Returned STATUS.\n", sts24); 1706 1707 ql_dbg(ql_dbg_io, vha, 0x3024, 1708 "DIF ERROR in cmd 0x%x lba 0x%llx act ref" 1709 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app" 1710 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n", 1711 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag, 1712 a_app_tag, e_app_tag, a_guard, e_guard); 1713 1714 /* 1715 * Ignore sector if: 1716 * For type 3: ref & app tag is all 'f's 1717 * For type 0,1,2: app tag is all 'f's 1718 */ 1719 if ((a_app_tag == 0xffff) && 1720 ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) || 1721 (a_ref_tag == 0xffffffff))) { 1722 uint32_t blocks_done, resid; 1723 sector_t lba_s = scsi_get_lba(cmd); 1724 1725 /* 2TB boundary case covered automatically with this */ 1726 blocks_done = e_ref_tag - (uint32_t)lba_s + 1; 1727 1728 resid = scsi_bufflen(cmd) - (blocks_done * 1729 cmd->device->sector_size); 1730 1731 scsi_set_resid(cmd, resid); 1732 cmd->result = DID_OK << 16; 1733 1734 /* Update protection tag */ 1735 if (scsi_prot_sg_count(cmd)) { 1736 uint32_t i, j = 0, k = 0, num_ent; 1737 struct scatterlist *sg; 1738 struct sd_dif_tuple *spt; 1739 1740 /* Patch the corresponding protection tags */ 1741 scsi_for_each_prot_sg(cmd, sg, 1742 scsi_prot_sg_count(cmd), i) { 1743 num_ent = sg_dma_len(sg) / 8; 1744 if (k + num_ent < blocks_done) { 1745 k += num_ent; 1746 continue; 1747 } 1748 j = blocks_done - k - 1; 1749 k = blocks_done; 1750 break; 1751 } 1752 1753 if (k != blocks_done) { 1754 ql_log(ql_log_warn, vha, 0x302f, 1755 "unexpected tag values tag:lba=%x:%llx)\n", 1756 e_ref_tag, (unsigned long long)lba_s); 1757 return 1; 1758 } 1759 1760 spt = page_address(sg_page(sg)) + sg->offset; 1761 spt += j; 1762 1763 spt->app_tag = 0xffff; 1764 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3) 1765 spt->ref_tag = 0xffffffff; 1766 } 1767 1768 return 0; 1769 } 1770 1771 /* check guard */ 1772 if (e_guard != a_guard) { 1773 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1774 0x10, 0x1); 1775 set_driver_byte(cmd, DRIVER_SENSE); 1776 set_host_byte(cmd, DID_ABORT); 1777 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1778 return 1; 1779 } 1780 1781 /* check ref tag */ 1782 if (e_ref_tag != a_ref_tag) { 1783 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1784 0x10, 0x3); 1785 set_driver_byte(cmd, DRIVER_SENSE); 1786 set_host_byte(cmd, DID_ABORT); 1787 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1788 return 1; 1789 } 1790 1791 /* check appl tag */ 1792 if (e_app_tag != a_app_tag) { 1793 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1794 0x10, 0x2); 1795 set_driver_byte(cmd, DRIVER_SENSE); 1796 set_host_byte(cmd, DID_ABORT); 1797 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1798 return 1; 1799 } 1800 1801 return 1; 1802 } 1803 1804 static void 1805 qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt, 1806 struct req_que *req, uint32_t index) 1807 { 1808 struct qla_hw_data *ha = vha->hw; 1809 srb_t *sp; 1810 uint16_t comp_status; 1811 uint16_t scsi_status; 1812 uint16_t thread_id; 1813 uint32_t rval = EXT_STATUS_OK; 1814 struct fc_bsg_job *bsg_job = NULL; 1815 sts_entry_t *sts; 1816 struct sts_entry_24xx *sts24; 1817 sts = (sts_entry_t *) pkt; 1818 sts24 = (struct sts_entry_24xx *) pkt; 1819 1820 /* Validate handle. */ 1821 if (index >= req->num_outstanding_cmds) { 1822 ql_log(ql_log_warn, vha, 0x70af, 1823 "Invalid SCSI completion handle 0x%x.\n", index); 1824 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1825 return; 1826 } 1827 1828 sp = req->outstanding_cmds[index]; 1829 if (sp) { 1830 /* Free outstanding command slot. */ 1831 req->outstanding_cmds[index] = NULL; 1832 bsg_job = sp->u.bsg_job; 1833 } else { 1834 ql_log(ql_log_warn, vha, 0x70b0, 1835 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n", 1836 req->id, index); 1837 1838 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1839 return; 1840 } 1841 1842 if (IS_FWI2_CAPABLE(ha)) { 1843 comp_status = le16_to_cpu(sts24->comp_status); 1844 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; 1845 } else { 1846 comp_status = le16_to_cpu(sts->comp_status); 1847 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 1848 } 1849 1850 thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 1851 switch (comp_status) { 1852 case CS_COMPLETE: 1853 if (scsi_status == 0) { 1854 bsg_job->reply->reply_payload_rcv_len = 1855 bsg_job->reply_payload.payload_len; 1856 vha->qla_stats.input_bytes += 1857 bsg_job->reply->reply_payload_rcv_len; 1858 vha->qla_stats.input_requests++; 1859 rval = EXT_STATUS_OK; 1860 } 1861 goto done; 1862 1863 case CS_DATA_OVERRUN: 1864 ql_dbg(ql_dbg_user, vha, 0x70b1, 1865 "Command completed with date overrun thread_id=%d\n", 1866 thread_id); 1867 rval = EXT_STATUS_DATA_OVERRUN; 1868 break; 1869 1870 case CS_DATA_UNDERRUN: 1871 ql_dbg(ql_dbg_user, vha, 0x70b2, 1872 "Command completed with date underrun thread_id=%d\n", 1873 thread_id); 1874 rval = EXT_STATUS_DATA_UNDERRUN; 1875 break; 1876 case CS_BIDIR_RD_OVERRUN: 1877 ql_dbg(ql_dbg_user, vha, 0x70b3, 1878 "Command completed with read data overrun thread_id=%d\n", 1879 thread_id); 1880 rval = EXT_STATUS_DATA_OVERRUN; 1881 break; 1882 1883 case CS_BIDIR_RD_WR_OVERRUN: 1884 ql_dbg(ql_dbg_user, vha, 0x70b4, 1885 "Command completed with read and write data overrun " 1886 "thread_id=%d\n", thread_id); 1887 rval = EXT_STATUS_DATA_OVERRUN; 1888 break; 1889 1890 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN: 1891 ql_dbg(ql_dbg_user, vha, 0x70b5, 1892 "Command completed with read data over and write data " 1893 "underrun thread_id=%d\n", thread_id); 1894 rval = EXT_STATUS_DATA_OVERRUN; 1895 break; 1896 1897 case CS_BIDIR_RD_UNDERRUN: 1898 ql_dbg(ql_dbg_user, vha, 0x70b6, 1899 "Command completed with read data data underrun " 1900 "thread_id=%d\n", thread_id); 1901 rval = EXT_STATUS_DATA_UNDERRUN; 1902 break; 1903 1904 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN: 1905 ql_dbg(ql_dbg_user, vha, 0x70b7, 1906 "Command completed with read data under and write data " 1907 "overrun thread_id=%d\n", thread_id); 1908 rval = EXT_STATUS_DATA_UNDERRUN; 1909 break; 1910 1911 case CS_BIDIR_RD_WR_UNDERRUN: 1912 ql_dbg(ql_dbg_user, vha, 0x70b8, 1913 "Command completed with read and write data underrun " 1914 "thread_id=%d\n", thread_id); 1915 rval = EXT_STATUS_DATA_UNDERRUN; 1916 break; 1917 1918 case CS_BIDIR_DMA: 1919 ql_dbg(ql_dbg_user, vha, 0x70b9, 1920 "Command completed with data DMA error thread_id=%d\n", 1921 thread_id); 1922 rval = EXT_STATUS_DMA_ERR; 1923 break; 1924 1925 case CS_TIMEOUT: 1926 ql_dbg(ql_dbg_user, vha, 0x70ba, 1927 "Command completed with timeout thread_id=%d\n", 1928 thread_id); 1929 rval = EXT_STATUS_TIMEOUT; 1930 break; 1931 default: 1932 ql_dbg(ql_dbg_user, vha, 0x70bb, 1933 "Command completed with completion status=0x%x " 1934 "thread_id=%d\n", comp_status, thread_id); 1935 rval = EXT_STATUS_ERR; 1936 break; 1937 } 1938 bsg_job->reply->reply_payload_rcv_len = 0; 1939 1940 done: 1941 /* Return the vendor specific reply to API */ 1942 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval; 1943 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1944 /* Always return DID_OK, bsg will send the vendor specific response 1945 * in this case only */ 1946 sp->done(vha, sp, (DID_OK << 6)); 1947 1948 } 1949 1950 /** 1951 * qla2x00_status_entry() - Process a Status IOCB entry. 1952 * @ha: SCSI driver HA context 1953 * @pkt: Entry pointer 1954 */ 1955 static void 1956 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) 1957 { 1958 srb_t *sp; 1959 fc_port_t *fcport; 1960 struct scsi_cmnd *cp; 1961 sts_entry_t *sts; 1962 struct sts_entry_24xx *sts24; 1963 uint16_t comp_status; 1964 uint16_t scsi_status; 1965 uint16_t ox_id; 1966 uint8_t lscsi_status; 1967 int32_t resid; 1968 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len, 1969 fw_resid_len; 1970 uint8_t *rsp_info, *sense_data; 1971 struct qla_hw_data *ha = vha->hw; 1972 uint32_t handle; 1973 uint16_t que; 1974 struct req_que *req; 1975 int logit = 1; 1976 int res = 0; 1977 uint16_t state_flags = 0; 1978 1979 sts = (sts_entry_t *) pkt; 1980 sts24 = (struct sts_entry_24xx *) pkt; 1981 if (IS_FWI2_CAPABLE(ha)) { 1982 comp_status = le16_to_cpu(sts24->comp_status); 1983 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; 1984 state_flags = le16_to_cpu(sts24->state_flags); 1985 } else { 1986 comp_status = le16_to_cpu(sts->comp_status); 1987 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 1988 } 1989 handle = (uint32_t) LSW(sts->handle); 1990 que = MSW(sts->handle); 1991 req = ha->req_q_map[que]; 1992 1993 /* Check for invalid queue pointer */ 1994 if (req == NULL || 1995 que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) { 1996 ql_dbg(ql_dbg_io, vha, 0x3059, 1997 "Invalid status handle (0x%x): Bad req pointer. req=%p, " 1998 "que=%u.\n", sts->handle, req, que); 1999 return; 2000 } 2001 2002 /* Validate handle. */ 2003 if (handle < req->num_outstanding_cmds) 2004 sp = req->outstanding_cmds[handle]; 2005 else 2006 sp = NULL; 2007 2008 if (sp == NULL) { 2009 ql_dbg(ql_dbg_io, vha, 0x3017, 2010 "Invalid status handle (0x%x).\n", sts->handle); 2011 2012 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) { 2013 if (IS_P3P_TYPE(ha)) 2014 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 2015 else 2016 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2017 qla2xxx_wake_dpc(vha); 2018 } 2019 return; 2020 } 2021 2022 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) { 2023 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle); 2024 return; 2025 } 2026 2027 /* Task Management completion. */ 2028 if (sp->type == SRB_TM_CMD) { 2029 qla24xx_tm_iocb_entry(vha, req, pkt); 2030 return; 2031 } 2032 2033 /* Fast path completion. */ 2034 if (comp_status == CS_COMPLETE && scsi_status == 0) { 2035 qla2x00_process_completed_request(vha, req, handle); 2036 2037 return; 2038 } 2039 2040 req->outstanding_cmds[handle] = NULL; 2041 cp = GET_CMD_SP(sp); 2042 if (cp == NULL) { 2043 ql_dbg(ql_dbg_io, vha, 0x3018, 2044 "Command already returned (0x%x/%p).\n", 2045 sts->handle, sp); 2046 2047 return; 2048 } 2049 2050 lscsi_status = scsi_status & STATUS_MASK; 2051 2052 fcport = sp->fcport; 2053 2054 ox_id = 0; 2055 sense_len = par_sense_len = rsp_info_len = resid_len = 2056 fw_resid_len = 0; 2057 if (IS_FWI2_CAPABLE(ha)) { 2058 if (scsi_status & SS_SENSE_LEN_VALID) 2059 sense_len = le32_to_cpu(sts24->sense_len); 2060 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) 2061 rsp_info_len = le32_to_cpu(sts24->rsp_data_len); 2062 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) 2063 resid_len = le32_to_cpu(sts24->rsp_residual_count); 2064 if (comp_status == CS_DATA_UNDERRUN) 2065 fw_resid_len = le32_to_cpu(sts24->residual_len); 2066 rsp_info = sts24->data; 2067 sense_data = sts24->data; 2068 host_to_fcp_swap(sts24->data, sizeof(sts24->data)); 2069 ox_id = le16_to_cpu(sts24->ox_id); 2070 par_sense_len = sizeof(sts24->data); 2071 } else { 2072 if (scsi_status & SS_SENSE_LEN_VALID) 2073 sense_len = le16_to_cpu(sts->req_sense_length); 2074 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) 2075 rsp_info_len = le16_to_cpu(sts->rsp_info_len); 2076 resid_len = le32_to_cpu(sts->residual_length); 2077 rsp_info = sts->rsp_info; 2078 sense_data = sts->req_sense_data; 2079 par_sense_len = sizeof(sts->req_sense_data); 2080 } 2081 2082 /* Check for any FCP transport errors. */ 2083 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) { 2084 /* Sense data lies beyond any FCP RESPONSE data. */ 2085 if (IS_FWI2_CAPABLE(ha)) { 2086 sense_data += rsp_info_len; 2087 par_sense_len -= rsp_info_len; 2088 } 2089 if (rsp_info_len > 3 && rsp_info[3]) { 2090 ql_dbg(ql_dbg_io, fcport->vha, 0x3019, 2091 "FCP I/O protocol failure (0x%x/0x%x).\n", 2092 rsp_info_len, rsp_info[3]); 2093 2094 res = DID_BUS_BUSY << 16; 2095 goto out; 2096 } 2097 } 2098 2099 /* Check for overrun. */ 2100 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE && 2101 scsi_status & SS_RESIDUAL_OVER) 2102 comp_status = CS_DATA_OVERRUN; 2103 2104 /* 2105 * Based on Host and scsi status generate status code for Linux 2106 */ 2107 switch (comp_status) { 2108 case CS_COMPLETE: 2109 case CS_QUEUE_FULL: 2110 if (scsi_status == 0) { 2111 res = DID_OK << 16; 2112 break; 2113 } 2114 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) { 2115 resid = resid_len; 2116 scsi_set_resid(cp, resid); 2117 2118 if (!lscsi_status && 2119 ((unsigned)(scsi_bufflen(cp) - resid) < 2120 cp->underflow)) { 2121 ql_dbg(ql_dbg_io, fcport->vha, 0x301a, 2122 "Mid-layer underflow " 2123 "detected (0x%x of 0x%x bytes).\n", 2124 resid, scsi_bufflen(cp)); 2125 2126 res = DID_ERROR << 16; 2127 break; 2128 } 2129 } 2130 res = DID_OK << 16 | lscsi_status; 2131 2132 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 2133 ql_dbg(ql_dbg_io, fcport->vha, 0x301b, 2134 "QUEUE FULL detected.\n"); 2135 break; 2136 } 2137 logit = 0; 2138 if (lscsi_status != SS_CHECK_CONDITION) 2139 break; 2140 2141 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 2142 if (!(scsi_status & SS_SENSE_LEN_VALID)) 2143 break; 2144 2145 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len, 2146 rsp, res); 2147 break; 2148 2149 case CS_DATA_UNDERRUN: 2150 /* Use F/W calculated residual length. */ 2151 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len; 2152 scsi_set_resid(cp, resid); 2153 if (scsi_status & SS_RESIDUAL_UNDER) { 2154 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) { 2155 ql_dbg(ql_dbg_io, fcport->vha, 0x301d, 2156 "Dropped frame(s) detected " 2157 "(0x%x of 0x%x bytes).\n", 2158 resid, scsi_bufflen(cp)); 2159 2160 res = DID_ERROR << 16 | lscsi_status; 2161 goto check_scsi_status; 2162 } 2163 2164 if (!lscsi_status && 2165 ((unsigned)(scsi_bufflen(cp) - resid) < 2166 cp->underflow)) { 2167 ql_dbg(ql_dbg_io, fcport->vha, 0x301e, 2168 "Mid-layer underflow " 2169 "detected (0x%x of 0x%x bytes).\n", 2170 resid, scsi_bufflen(cp)); 2171 2172 res = DID_ERROR << 16; 2173 break; 2174 } 2175 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL && 2176 lscsi_status != SAM_STAT_BUSY) { 2177 /* 2178 * scsi status of task set and busy are considered to be 2179 * task not completed. 2180 */ 2181 2182 ql_dbg(ql_dbg_io, fcport->vha, 0x301f, 2183 "Dropped frame(s) detected (0x%x " 2184 "of 0x%x bytes).\n", resid, 2185 scsi_bufflen(cp)); 2186 2187 res = DID_ERROR << 16 | lscsi_status; 2188 goto check_scsi_status; 2189 } else { 2190 ql_dbg(ql_dbg_io, fcport->vha, 0x3030, 2191 "scsi_status: 0x%x, lscsi_status: 0x%x\n", 2192 scsi_status, lscsi_status); 2193 } 2194 2195 res = DID_OK << 16 | lscsi_status; 2196 logit = 0; 2197 2198 check_scsi_status: 2199 /* 2200 * Check to see if SCSI Status is non zero. If so report SCSI 2201 * Status. 2202 */ 2203 if (lscsi_status != 0) { 2204 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 2205 ql_dbg(ql_dbg_io, fcport->vha, 0x3020, 2206 "QUEUE FULL detected.\n"); 2207 logit = 1; 2208 break; 2209 } 2210 if (lscsi_status != SS_CHECK_CONDITION) 2211 break; 2212 2213 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 2214 if (!(scsi_status & SS_SENSE_LEN_VALID)) 2215 break; 2216 2217 qla2x00_handle_sense(sp, sense_data, par_sense_len, 2218 sense_len, rsp, res); 2219 } 2220 break; 2221 2222 case CS_PORT_LOGGED_OUT: 2223 case CS_PORT_CONFIG_CHG: 2224 case CS_PORT_BUSY: 2225 case CS_INCOMPLETE: 2226 case CS_PORT_UNAVAILABLE: 2227 case CS_TIMEOUT: 2228 case CS_RESET: 2229 2230 /* 2231 * We are going to have the fc class block the rport 2232 * while we try to recover so instruct the mid layer 2233 * to requeue until the class decides how to handle this. 2234 */ 2235 res = DID_TRANSPORT_DISRUPTED << 16; 2236 2237 if (comp_status == CS_TIMEOUT) { 2238 if (IS_FWI2_CAPABLE(ha)) 2239 break; 2240 else if ((le16_to_cpu(sts->status_flags) & 2241 SF_LOGOUT_SENT) == 0) 2242 break; 2243 } 2244 2245 ql_dbg(ql_dbg_io, fcport->vha, 0x3021, 2246 "Port to be marked lost on fcport=%02x%02x%02x, current " 2247 "port state= %s.\n", fcport->d_id.b.domain, 2248 fcport->d_id.b.area, fcport->d_id.b.al_pa, 2249 port_state_str[atomic_read(&fcport->state)]); 2250 2251 if (atomic_read(&fcport->state) == FCS_ONLINE) 2252 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); 2253 break; 2254 2255 case CS_ABORTED: 2256 res = DID_RESET << 16; 2257 break; 2258 2259 case CS_DIF_ERROR: 2260 logit = qla2x00_handle_dif_error(sp, sts24); 2261 res = cp->result; 2262 break; 2263 2264 case CS_TRANSPORT: 2265 res = DID_ERROR << 16; 2266 2267 if (!IS_PI_SPLIT_DET_CAPABLE(ha)) 2268 break; 2269 2270 if (state_flags & BIT_4) 2271 scmd_printk(KERN_WARNING, cp, 2272 "Unsupported device '%s' found.\n", 2273 cp->device->vendor); 2274 break; 2275 2276 default: 2277 res = DID_ERROR << 16; 2278 break; 2279 } 2280 2281 out: 2282 if (logit) 2283 ql_dbg(ql_dbg_io, fcport->vha, 0x3022, 2284 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu " 2285 "portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x " 2286 "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n", 2287 comp_status, scsi_status, res, vha->host_no, 2288 cp->device->id, cp->device->lun, fcport->d_id.b.domain, 2289 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id, 2290 cp->cmnd, scsi_bufflen(cp), rsp_info_len, 2291 resid_len, fw_resid_len); 2292 2293 if (rsp->status_srb == NULL) 2294 sp->done(ha, sp, res); 2295 } 2296 2297 /** 2298 * qla2x00_status_cont_entry() - Process a Status Continuations entry. 2299 * @ha: SCSI driver HA context 2300 * @pkt: Entry pointer 2301 * 2302 * Extended sense data. 2303 */ 2304 static void 2305 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) 2306 { 2307 uint8_t sense_sz = 0; 2308 struct qla_hw_data *ha = rsp->hw; 2309 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); 2310 srb_t *sp = rsp->status_srb; 2311 struct scsi_cmnd *cp; 2312 uint32_t sense_len; 2313 uint8_t *sense_ptr; 2314 2315 if (!sp || !GET_CMD_SENSE_LEN(sp)) 2316 return; 2317 2318 sense_len = GET_CMD_SENSE_LEN(sp); 2319 sense_ptr = GET_CMD_SENSE_PTR(sp); 2320 2321 cp = GET_CMD_SP(sp); 2322 if (cp == NULL) { 2323 ql_log(ql_log_warn, vha, 0x3025, 2324 "cmd is NULL: already returned to OS (sp=%p).\n", sp); 2325 2326 rsp->status_srb = NULL; 2327 return; 2328 } 2329 2330 if (sense_len > sizeof(pkt->data)) 2331 sense_sz = sizeof(pkt->data); 2332 else 2333 sense_sz = sense_len; 2334 2335 /* Move sense data. */ 2336 if (IS_FWI2_CAPABLE(ha)) 2337 host_to_fcp_swap(pkt->data, sizeof(pkt->data)); 2338 memcpy(sense_ptr, pkt->data, sense_sz); 2339 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c, 2340 sense_ptr, sense_sz); 2341 2342 sense_len -= sense_sz; 2343 sense_ptr += sense_sz; 2344 2345 SET_CMD_SENSE_PTR(sp, sense_ptr); 2346 SET_CMD_SENSE_LEN(sp, sense_len); 2347 2348 /* Place command on done queue. */ 2349 if (sense_len == 0) { 2350 rsp->status_srb = NULL; 2351 sp->done(ha, sp, cp->result); 2352 } 2353 } 2354 2355 /** 2356 * qla2x00_error_entry() - Process an error entry. 2357 * @ha: SCSI driver HA context 2358 * @pkt: Entry pointer 2359 */ 2360 static void 2361 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) 2362 { 2363 srb_t *sp; 2364 struct qla_hw_data *ha = vha->hw; 2365 const char func[] = "ERROR-IOCB"; 2366 uint16_t que = MSW(pkt->handle); 2367 struct req_que *req = NULL; 2368 int res = DID_ERROR << 16; 2369 2370 ql_dbg(ql_dbg_async, vha, 0x502a, 2371 "type of error status in response: 0x%x\n", pkt->entry_status); 2372 2373 if (que >= ha->max_req_queues || !ha->req_q_map[que]) 2374 goto fatal; 2375 2376 req = ha->req_q_map[que]; 2377 2378 if (pkt->entry_status & RF_BUSY) 2379 res = DID_BUS_BUSY << 16; 2380 2381 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 2382 if (sp) { 2383 sp->done(ha, sp, res); 2384 return; 2385 } 2386 fatal: 2387 ql_log(ql_log_warn, vha, 0x5030, 2388 "Error entry - invalid handle/queue.\n"); 2389 2390 if (IS_P3P_TYPE(ha)) 2391 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 2392 else 2393 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2394 qla2xxx_wake_dpc(vha); 2395 } 2396 2397 /** 2398 * qla24xx_mbx_completion() - Process mailbox command completions. 2399 * @ha: SCSI driver HA context 2400 * @mb0: Mailbox0 register 2401 */ 2402 static void 2403 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 2404 { 2405 uint16_t cnt; 2406 uint32_t mboxes; 2407 uint16_t __iomem *wptr; 2408 struct qla_hw_data *ha = vha->hw; 2409 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 2410 2411 /* Read all mbox registers? */ 2412 mboxes = (1 << ha->mbx_count) - 1; 2413 if (!ha->mcp) 2414 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n"); 2415 else 2416 mboxes = ha->mcp->in_mb; 2417 2418 /* Load return mailbox registers. */ 2419 ha->flags.mbox_int = 1; 2420 ha->mailbox_out[0] = mb0; 2421 mboxes >>= 1; 2422 wptr = (uint16_t __iomem *)®->mailbox1; 2423 2424 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 2425 if (mboxes & BIT_0) 2426 ha->mailbox_out[cnt] = RD_REG_WORD(wptr); 2427 2428 mboxes >>= 1; 2429 wptr++; 2430 } 2431 } 2432 2433 static void 2434 qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 2435 struct abort_entry_24xx *pkt) 2436 { 2437 const char func[] = "ABT_IOCB"; 2438 srb_t *sp; 2439 struct srb_iocb *abt; 2440 2441 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 2442 if (!sp) 2443 return; 2444 2445 abt = &sp->u.iocb_cmd; 2446 abt->u.abt.comp_status = le32_to_cpu(pkt->nport_handle); 2447 sp->done(vha, sp, 0); 2448 } 2449 2450 /** 2451 * qla24xx_process_response_queue() - Process response queue entries. 2452 * @ha: SCSI driver HA context 2453 */ 2454 void qla24xx_process_response_queue(struct scsi_qla_host *vha, 2455 struct rsp_que *rsp) 2456 { 2457 struct sts_entry_24xx *pkt; 2458 struct qla_hw_data *ha = vha->hw; 2459 2460 if (!vha->flags.online) 2461 return; 2462 2463 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 2464 pkt = (struct sts_entry_24xx *)rsp->ring_ptr; 2465 2466 rsp->ring_index++; 2467 if (rsp->ring_index == rsp->length) { 2468 rsp->ring_index = 0; 2469 rsp->ring_ptr = rsp->ring; 2470 } else { 2471 rsp->ring_ptr++; 2472 } 2473 2474 if (pkt->entry_status != 0) { 2475 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt); 2476 2477 if (qlt_24xx_process_response_error(vha, pkt)) 2478 goto process_err; 2479 2480 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2481 wmb(); 2482 continue; 2483 } 2484 process_err: 2485 2486 switch (pkt->entry_type) { 2487 case STATUS_TYPE: 2488 qla2x00_status_entry(vha, rsp, pkt); 2489 break; 2490 case STATUS_CONT_TYPE: 2491 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); 2492 break; 2493 case VP_RPT_ID_IOCB_TYPE: 2494 qla24xx_report_id_acquisition(vha, 2495 (struct vp_rpt_id_entry_24xx *)pkt); 2496 break; 2497 case LOGINOUT_PORT_IOCB_TYPE: 2498 qla24xx_logio_entry(vha, rsp->req, 2499 (struct logio_entry_24xx *)pkt); 2500 break; 2501 case CT_IOCB_TYPE: 2502 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); 2503 break; 2504 case ELS_IOCB_TYPE: 2505 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE); 2506 break; 2507 case ABTS_RECV_24XX: 2508 /* ensure that the ATIO queue is empty */ 2509 qlt_24xx_process_atio_queue(vha); 2510 case ABTS_RESP_24XX: 2511 case CTIO_TYPE7: 2512 case NOTIFY_ACK_TYPE: 2513 case CTIO_CRC2: 2514 qlt_response_pkt_all_vps(vha, (response_t *)pkt); 2515 break; 2516 case MARKER_TYPE: 2517 /* Do nothing in this case, this check is to prevent it 2518 * from falling into default case 2519 */ 2520 break; 2521 case ABORT_IOCB_TYPE: 2522 qla24xx_abort_iocb_entry(vha, rsp->req, 2523 (struct abort_entry_24xx *)pkt); 2524 break; 2525 default: 2526 /* Type Not Supported. */ 2527 ql_dbg(ql_dbg_async, vha, 0x5042, 2528 "Received unknown response pkt type %x " 2529 "entry status=%x.\n", 2530 pkt->entry_type, pkt->entry_status); 2531 break; 2532 } 2533 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2534 wmb(); 2535 } 2536 2537 /* Adjust ring index */ 2538 if (IS_P3P_TYPE(ha)) { 2539 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; 2540 WRT_REG_DWORD(®->rsp_q_out[0], rsp->ring_index); 2541 } else 2542 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index); 2543 } 2544 2545 static void 2546 qla2xxx_check_risc_status(scsi_qla_host_t *vha) 2547 { 2548 int rval; 2549 uint32_t cnt; 2550 struct qla_hw_data *ha = vha->hw; 2551 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 2552 2553 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && 2554 !IS_QLA27XX(ha)) 2555 return; 2556 2557 rval = QLA_SUCCESS; 2558 WRT_REG_DWORD(®->iobase_addr, 0x7C00); 2559 RD_REG_DWORD(®->iobase_addr); 2560 WRT_REG_DWORD(®->iobase_window, 0x0001); 2561 for (cnt = 10000; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 && 2562 rval == QLA_SUCCESS; cnt--) { 2563 if (cnt) { 2564 WRT_REG_DWORD(®->iobase_window, 0x0001); 2565 udelay(10); 2566 } else 2567 rval = QLA_FUNCTION_TIMEOUT; 2568 } 2569 if (rval == QLA_SUCCESS) 2570 goto next_test; 2571 2572 rval = QLA_SUCCESS; 2573 WRT_REG_DWORD(®->iobase_window, 0x0003); 2574 for (cnt = 100; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 && 2575 rval == QLA_SUCCESS; cnt--) { 2576 if (cnt) { 2577 WRT_REG_DWORD(®->iobase_window, 0x0003); 2578 udelay(10); 2579 } else 2580 rval = QLA_FUNCTION_TIMEOUT; 2581 } 2582 if (rval != QLA_SUCCESS) 2583 goto done; 2584 2585 next_test: 2586 if (RD_REG_DWORD(®->iobase_c8) & BIT_3) 2587 ql_log(ql_log_info, vha, 0x504c, 2588 "Additional code -- 0x55AA.\n"); 2589 2590 done: 2591 WRT_REG_DWORD(®->iobase_window, 0x0000); 2592 RD_REG_DWORD(®->iobase_window); 2593 } 2594 2595 /** 2596 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx. 2597 * @irq: 2598 * @dev_id: SCSI driver HA context 2599 * 2600 * Called by system whenever the host adapter generates an interrupt. 2601 * 2602 * Returns handled flag. 2603 */ 2604 irqreturn_t 2605 qla24xx_intr_handler(int irq, void *dev_id) 2606 { 2607 scsi_qla_host_t *vha; 2608 struct qla_hw_data *ha; 2609 struct device_reg_24xx __iomem *reg; 2610 int status; 2611 unsigned long iter; 2612 uint32_t stat; 2613 uint32_t hccr; 2614 uint16_t mb[8]; 2615 struct rsp_que *rsp; 2616 unsigned long flags; 2617 2618 rsp = (struct rsp_que *) dev_id; 2619 if (!rsp) { 2620 ql_log(ql_log_info, NULL, 0x5059, 2621 "%s: NULL response queue pointer.\n", __func__); 2622 return IRQ_NONE; 2623 } 2624 2625 ha = rsp->hw; 2626 reg = &ha->iobase->isp24; 2627 status = 0; 2628 2629 if (unlikely(pci_channel_offline(ha->pdev))) 2630 return IRQ_HANDLED; 2631 2632 spin_lock_irqsave(&ha->hardware_lock, flags); 2633 vha = pci_get_drvdata(ha->pdev); 2634 for (iter = 50; iter--; ) { 2635 stat = RD_REG_DWORD(®->host_status); 2636 if (qla2x00_check_reg_for_disconnect(vha, stat)) 2637 break; 2638 if (stat & HSRX_RISC_PAUSED) { 2639 if (unlikely(pci_channel_offline(ha->pdev))) 2640 break; 2641 2642 hccr = RD_REG_DWORD(®->hccr); 2643 2644 ql_log(ql_log_warn, vha, 0x504b, 2645 "RISC paused -- HCCR=%x, Dumping firmware.\n", 2646 hccr); 2647 2648 qla2xxx_check_risc_status(vha); 2649 2650 ha->isp_ops->fw_dump(vha, 1); 2651 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2652 break; 2653 } else if ((stat & HSRX_RISC_INT) == 0) 2654 break; 2655 2656 switch (stat & 0xff) { 2657 case INTR_ROM_MB_SUCCESS: 2658 case INTR_ROM_MB_FAILED: 2659 case INTR_MB_SUCCESS: 2660 case INTR_MB_FAILED: 2661 qla24xx_mbx_completion(vha, MSW(stat)); 2662 status |= MBX_INTERRUPT; 2663 2664 break; 2665 case INTR_ASYNC_EVENT: 2666 mb[0] = MSW(stat); 2667 mb[1] = RD_REG_WORD(®->mailbox1); 2668 mb[2] = RD_REG_WORD(®->mailbox2); 2669 mb[3] = RD_REG_WORD(®->mailbox3); 2670 qla2x00_async_event(vha, rsp, mb); 2671 break; 2672 case INTR_RSP_QUE_UPDATE: 2673 case INTR_RSP_QUE_UPDATE_83XX: 2674 qla24xx_process_response_queue(vha, rsp); 2675 break; 2676 case INTR_ATIO_QUE_UPDATE: 2677 qlt_24xx_process_atio_queue(vha); 2678 break; 2679 case INTR_ATIO_RSP_QUE_UPDATE: 2680 qlt_24xx_process_atio_queue(vha); 2681 qla24xx_process_response_queue(vha, rsp); 2682 break; 2683 default: 2684 ql_dbg(ql_dbg_async, vha, 0x504f, 2685 "Unrecognized interrupt type (%d).\n", stat * 0xff); 2686 break; 2687 } 2688 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 2689 RD_REG_DWORD_RELAXED(®->hccr); 2690 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1))) 2691 ndelay(3500); 2692 } 2693 qla2x00_handle_mbx_completion(ha, status); 2694 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2695 2696 return IRQ_HANDLED; 2697 } 2698 2699 static irqreturn_t 2700 qla24xx_msix_rsp_q(int irq, void *dev_id) 2701 { 2702 struct qla_hw_data *ha; 2703 struct rsp_que *rsp; 2704 struct device_reg_24xx __iomem *reg; 2705 struct scsi_qla_host *vha; 2706 unsigned long flags; 2707 uint32_t stat = 0; 2708 2709 rsp = (struct rsp_que *) dev_id; 2710 if (!rsp) { 2711 ql_log(ql_log_info, NULL, 0x505a, 2712 "%s: NULL response queue pointer.\n", __func__); 2713 return IRQ_NONE; 2714 } 2715 ha = rsp->hw; 2716 reg = &ha->iobase->isp24; 2717 2718 spin_lock_irqsave(&ha->hardware_lock, flags); 2719 2720 vha = pci_get_drvdata(ha->pdev); 2721 /* 2722 * Use host_status register to check to PCI disconnection before we 2723 * we process the response queue. 2724 */ 2725 stat = RD_REG_DWORD(®->host_status); 2726 if (qla2x00_check_reg_for_disconnect(vha, stat)) 2727 goto out; 2728 qla24xx_process_response_queue(vha, rsp); 2729 if (!ha->flags.disable_msix_handshake) { 2730 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 2731 RD_REG_DWORD_RELAXED(®->hccr); 2732 } 2733 out: 2734 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2735 2736 return IRQ_HANDLED; 2737 } 2738 2739 static irqreturn_t 2740 qla25xx_msix_rsp_q(int irq, void *dev_id) 2741 { 2742 struct qla_hw_data *ha; 2743 scsi_qla_host_t *vha; 2744 struct rsp_que *rsp; 2745 struct device_reg_24xx __iomem *reg; 2746 unsigned long flags; 2747 uint32_t hccr = 0; 2748 2749 rsp = (struct rsp_que *) dev_id; 2750 if (!rsp) { 2751 ql_log(ql_log_info, NULL, 0x505b, 2752 "%s: NULL response queue pointer.\n", __func__); 2753 return IRQ_NONE; 2754 } 2755 ha = rsp->hw; 2756 vha = pci_get_drvdata(ha->pdev); 2757 2758 /* Clear the interrupt, if enabled, for this response queue */ 2759 if (!ha->flags.disable_msix_handshake) { 2760 reg = &ha->iobase->isp24; 2761 spin_lock_irqsave(&ha->hardware_lock, flags); 2762 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 2763 hccr = RD_REG_DWORD_RELAXED(®->hccr); 2764 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2765 } 2766 if (qla2x00_check_reg_for_disconnect(vha, hccr)) 2767 goto out; 2768 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work); 2769 2770 out: 2771 return IRQ_HANDLED; 2772 } 2773 2774 static irqreturn_t 2775 qla24xx_msix_default(int irq, void *dev_id) 2776 { 2777 scsi_qla_host_t *vha; 2778 struct qla_hw_data *ha; 2779 struct rsp_que *rsp; 2780 struct device_reg_24xx __iomem *reg; 2781 int status; 2782 uint32_t stat; 2783 uint32_t hccr; 2784 uint16_t mb[8]; 2785 unsigned long flags; 2786 2787 rsp = (struct rsp_que *) dev_id; 2788 if (!rsp) { 2789 ql_log(ql_log_info, NULL, 0x505c, 2790 "%s: NULL response queue pointer.\n", __func__); 2791 return IRQ_NONE; 2792 } 2793 ha = rsp->hw; 2794 reg = &ha->iobase->isp24; 2795 status = 0; 2796 2797 spin_lock_irqsave(&ha->hardware_lock, flags); 2798 vha = pci_get_drvdata(ha->pdev); 2799 do { 2800 stat = RD_REG_DWORD(®->host_status); 2801 if (qla2x00_check_reg_for_disconnect(vha, stat)) 2802 break; 2803 if (stat & HSRX_RISC_PAUSED) { 2804 if (unlikely(pci_channel_offline(ha->pdev))) 2805 break; 2806 2807 hccr = RD_REG_DWORD(®->hccr); 2808 2809 ql_log(ql_log_info, vha, 0x5050, 2810 "RISC paused -- HCCR=%x, Dumping firmware.\n", 2811 hccr); 2812 2813 qla2xxx_check_risc_status(vha); 2814 2815 ha->isp_ops->fw_dump(vha, 1); 2816 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2817 break; 2818 } else if ((stat & HSRX_RISC_INT) == 0) 2819 break; 2820 2821 switch (stat & 0xff) { 2822 case INTR_ROM_MB_SUCCESS: 2823 case INTR_ROM_MB_FAILED: 2824 case INTR_MB_SUCCESS: 2825 case INTR_MB_FAILED: 2826 qla24xx_mbx_completion(vha, MSW(stat)); 2827 status |= MBX_INTERRUPT; 2828 2829 break; 2830 case INTR_ASYNC_EVENT: 2831 mb[0] = MSW(stat); 2832 mb[1] = RD_REG_WORD(®->mailbox1); 2833 mb[2] = RD_REG_WORD(®->mailbox2); 2834 mb[3] = RD_REG_WORD(®->mailbox3); 2835 qla2x00_async_event(vha, rsp, mb); 2836 break; 2837 case INTR_RSP_QUE_UPDATE: 2838 case INTR_RSP_QUE_UPDATE_83XX: 2839 qla24xx_process_response_queue(vha, rsp); 2840 break; 2841 case INTR_ATIO_QUE_UPDATE: 2842 qlt_24xx_process_atio_queue(vha); 2843 break; 2844 case INTR_ATIO_RSP_QUE_UPDATE: 2845 qlt_24xx_process_atio_queue(vha); 2846 qla24xx_process_response_queue(vha, rsp); 2847 break; 2848 default: 2849 ql_dbg(ql_dbg_async, vha, 0x5051, 2850 "Unrecognized interrupt type (%d).\n", stat & 0xff); 2851 break; 2852 } 2853 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 2854 } while (0); 2855 qla2x00_handle_mbx_completion(ha, status); 2856 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2857 2858 return IRQ_HANDLED; 2859 } 2860 2861 /* Interrupt handling helpers. */ 2862 2863 struct qla_init_msix_entry { 2864 const char *name; 2865 irq_handler_t handler; 2866 }; 2867 2868 static struct qla_init_msix_entry msix_entries[3] = { 2869 { "qla2xxx (default)", qla24xx_msix_default }, 2870 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q }, 2871 { "qla2xxx (multiq)", qla25xx_msix_rsp_q }, 2872 }; 2873 2874 static struct qla_init_msix_entry qla82xx_msix_entries[2] = { 2875 { "qla2xxx (default)", qla82xx_msix_default }, 2876 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q }, 2877 }; 2878 2879 static struct qla_init_msix_entry qla83xx_msix_entries[3] = { 2880 { "qla2xxx (default)", qla24xx_msix_default }, 2881 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q }, 2882 { "qla2xxx (atio_q)", qla83xx_msix_atio_q }, 2883 }; 2884 2885 static void 2886 qla24xx_disable_msix(struct qla_hw_data *ha) 2887 { 2888 int i; 2889 struct qla_msix_entry *qentry; 2890 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 2891 2892 for (i = 0; i < ha->msix_count; i++) { 2893 qentry = &ha->msix_entries[i]; 2894 if (qentry->have_irq) 2895 free_irq(qentry->vector, qentry->rsp); 2896 } 2897 pci_disable_msix(ha->pdev); 2898 kfree(ha->msix_entries); 2899 ha->msix_entries = NULL; 2900 ha->flags.msix_enabled = 0; 2901 ql_dbg(ql_dbg_init, vha, 0x0042, 2902 "Disabled the MSI.\n"); 2903 } 2904 2905 static int 2906 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) 2907 { 2908 #define MIN_MSIX_COUNT 2 2909 #define ATIO_VECTOR 2 2910 int i, ret; 2911 struct msix_entry *entries; 2912 struct qla_msix_entry *qentry; 2913 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 2914 2915 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count, 2916 GFP_KERNEL); 2917 if (!entries) { 2918 ql_log(ql_log_warn, vha, 0x00bc, 2919 "Failed to allocate memory for msix_entry.\n"); 2920 return -ENOMEM; 2921 } 2922 2923 for (i = 0; i < ha->msix_count; i++) 2924 entries[i].entry = i; 2925 2926 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count); 2927 if (ret) { 2928 if (ret < MIN_MSIX_COUNT) 2929 goto msix_failed; 2930 2931 ql_log(ql_log_warn, vha, 0x00c6, 2932 "MSI-X: Failed to enable support " 2933 "-- %d/%d\n Retry with %d vectors.\n", 2934 ha->msix_count, ret, ret); 2935 ha->msix_count = ret; 2936 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count); 2937 if (ret) { 2938 msix_failed: 2939 ql_log(ql_log_fatal, vha, 0x00c7, 2940 "MSI-X: Failed to enable support, " 2941 "giving up -- %d/%d.\n", 2942 ha->msix_count, ret); 2943 goto msix_out; 2944 } 2945 ha->max_rsp_queues = ha->msix_count - 1; 2946 } 2947 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) * 2948 ha->msix_count, GFP_KERNEL); 2949 if (!ha->msix_entries) { 2950 ql_log(ql_log_fatal, vha, 0x00c8, 2951 "Failed to allocate memory for ha->msix_entries.\n"); 2952 ret = -ENOMEM; 2953 goto msix_out; 2954 } 2955 ha->flags.msix_enabled = 1; 2956 2957 for (i = 0; i < ha->msix_count; i++) { 2958 qentry = &ha->msix_entries[i]; 2959 qentry->vector = entries[i].vector; 2960 qentry->entry = entries[i].entry; 2961 qentry->have_irq = 0; 2962 qentry->rsp = NULL; 2963 } 2964 2965 /* Enable MSI-X vectors for the base queue */ 2966 for (i = 0; i < 2; i++) { 2967 qentry = &ha->msix_entries[i]; 2968 if (IS_P3P_TYPE(ha)) 2969 ret = request_irq(qentry->vector, 2970 qla82xx_msix_entries[i].handler, 2971 0, qla82xx_msix_entries[i].name, rsp); 2972 else 2973 ret = request_irq(qentry->vector, 2974 msix_entries[i].handler, 2975 0, msix_entries[i].name, rsp); 2976 if (ret) 2977 goto msix_register_fail; 2978 qentry->have_irq = 1; 2979 qentry->rsp = rsp; 2980 rsp->msix = qentry; 2981 } 2982 2983 /* 2984 * If target mode is enable, also request the vector for the ATIO 2985 * queue. 2986 */ 2987 if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) { 2988 qentry = &ha->msix_entries[ATIO_VECTOR]; 2989 ret = request_irq(qentry->vector, 2990 qla83xx_msix_entries[ATIO_VECTOR].handler, 2991 0, qla83xx_msix_entries[ATIO_VECTOR].name, rsp); 2992 qentry->have_irq = 1; 2993 qentry->rsp = rsp; 2994 rsp->msix = qentry; 2995 } 2996 2997 msix_register_fail: 2998 if (ret) { 2999 ql_log(ql_log_fatal, vha, 0x00cb, 3000 "MSI-X: unable to register handler -- %x/%d.\n", 3001 qentry->vector, ret); 3002 qla24xx_disable_msix(ha); 3003 ha->mqenable = 0; 3004 goto msix_out; 3005 } 3006 3007 /* Enable MSI-X vector for response queue update for queue 0 */ 3008 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 3009 if (ha->msixbase && ha->mqiobase && 3010 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1)) 3011 ha->mqenable = 1; 3012 } else 3013 if (ha->mqiobase 3014 && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1)) 3015 ha->mqenable = 1; 3016 ql_dbg(ql_dbg_multiq, vha, 0xc005, 3017 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", 3018 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); 3019 ql_dbg(ql_dbg_init, vha, 0x0055, 3020 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", 3021 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); 3022 3023 msix_out: 3024 kfree(entries); 3025 return ret; 3026 } 3027 3028 int 3029 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) 3030 { 3031 int ret = QLA_FUNCTION_FAILED; 3032 device_reg_t *reg = ha->iobase; 3033 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 3034 3035 /* If possible, enable MSI-X. */ 3036 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && 3037 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha) && 3038 !IS_QLA27XX(ha)) 3039 goto skip_msi; 3040 3041 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && 3042 (ha->pdev->subsystem_device == 0x7040 || 3043 ha->pdev->subsystem_device == 0x7041 || 3044 ha->pdev->subsystem_device == 0x1705)) { 3045 ql_log(ql_log_warn, vha, 0x0034, 3046 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n", 3047 ha->pdev->subsystem_vendor, 3048 ha->pdev->subsystem_device); 3049 goto skip_msi; 3050 } 3051 3052 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) { 3053 ql_log(ql_log_warn, vha, 0x0035, 3054 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n", 3055 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX); 3056 goto skip_msix; 3057 } 3058 3059 ret = qla24xx_enable_msix(ha, rsp); 3060 if (!ret) { 3061 ql_dbg(ql_dbg_init, vha, 0x0036, 3062 "MSI-X: Enabled (0x%X, 0x%X).\n", 3063 ha->chip_revision, ha->fw_attributes); 3064 goto clear_risc_ints; 3065 } 3066 3067 skip_msix: 3068 3069 ql_log(ql_log_info, vha, 0x0037, 3070 "Falling back-to MSI mode -%d.\n", ret); 3071 3072 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && 3073 !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) && 3074 !IS_QLA27XX(ha)) 3075 goto skip_msi; 3076 3077 ret = pci_enable_msi(ha->pdev); 3078 if (!ret) { 3079 ql_dbg(ql_dbg_init, vha, 0x0038, 3080 "MSI: Enabled.\n"); 3081 ha->flags.msi_enabled = 1; 3082 } else 3083 ql_log(ql_log_warn, vha, 0x0039, 3084 "Falling back-to INTa mode -- %d.\n", ret); 3085 skip_msi: 3086 3087 /* Skip INTx on ISP82xx. */ 3088 if (!ha->flags.msi_enabled && IS_QLA82XX(ha)) 3089 return QLA_FUNCTION_FAILED; 3090 3091 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, 3092 ha->flags.msi_enabled ? 0 : IRQF_SHARED, 3093 QLA2XXX_DRIVER_NAME, rsp); 3094 if (ret) { 3095 ql_log(ql_log_warn, vha, 0x003a, 3096 "Failed to reserve interrupt %d already in use.\n", 3097 ha->pdev->irq); 3098 goto fail; 3099 } else if (!ha->flags.msi_enabled) { 3100 ql_dbg(ql_dbg_init, vha, 0x0125, 3101 "INTa mode: Enabled.\n"); 3102 ha->flags.mr_intr_valid = 1; 3103 } 3104 3105 clear_risc_ints: 3106 3107 spin_lock_irq(&ha->hardware_lock); 3108 if (!IS_FWI2_CAPABLE(ha)) 3109 WRT_REG_WORD(®->isp.semaphore, 0); 3110 spin_unlock_irq(&ha->hardware_lock); 3111 3112 fail: 3113 return ret; 3114 } 3115 3116 void 3117 qla2x00_free_irqs(scsi_qla_host_t *vha) 3118 { 3119 struct qla_hw_data *ha = vha->hw; 3120 struct rsp_que *rsp; 3121 3122 /* 3123 * We need to check that ha->rsp_q_map is valid in case we are called 3124 * from a probe failure context. 3125 */ 3126 if (!ha->rsp_q_map || !ha->rsp_q_map[0]) 3127 return; 3128 rsp = ha->rsp_q_map[0]; 3129 3130 if (ha->flags.msix_enabled) 3131 qla24xx_disable_msix(ha); 3132 else if (ha->flags.msi_enabled) { 3133 free_irq(ha->pdev->irq, rsp); 3134 pci_disable_msi(ha->pdev); 3135 } else 3136 free_irq(ha->pdev->irq, rsp); 3137 } 3138 3139 3140 int qla25xx_request_irq(struct rsp_que *rsp) 3141 { 3142 struct qla_hw_data *ha = rsp->hw; 3143 struct qla_init_msix_entry *intr = &msix_entries[2]; 3144 struct qla_msix_entry *msix = rsp->msix; 3145 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 3146 int ret; 3147 3148 ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp); 3149 if (ret) { 3150 ql_log(ql_log_fatal, vha, 0x00e6, 3151 "MSI-X: Unable to register handler -- %x/%d.\n", 3152 msix->vector, ret); 3153 return ret; 3154 } 3155 msix->have_irq = 1; 3156 msix->rsp = rsp; 3157 return ret; 3158 } 3159