1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * QLogic Fibre Channel HBA Driver 4 * Copyright (c) 2003-2014 QLogic Corporation 5 */ 6 #include "qla_def.h" 7 #include "qla_target.h" 8 #include "qla_gbl.h" 9 10 #include <linux/delay.h> 11 #include <linux/slab.h> 12 #include <linux/cpu.h> 13 #include <linux/t10-pi.h> 14 #include <scsi/scsi_tcq.h> 15 #include <scsi/scsi_bsg_fc.h> 16 #include <scsi/scsi_eh.h> 17 #include <scsi/fc/fc_fs.h> 18 #include <linux/nvme-fc-driver.h> 19 20 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); 21 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *); 22 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *); 23 static int qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, 24 sts_entry_t *); 25 static void qla27xx_process_purex_fpin(struct scsi_qla_host *vha, 26 struct purex_item *item); 27 static struct purex_item *qla24xx_alloc_purex_item(scsi_qla_host_t *vha, 28 uint16_t size); 29 static struct purex_item *qla24xx_copy_std_pkt(struct scsi_qla_host *vha, 30 void *pkt); 31 static struct purex_item *qla27xx_copy_fpin_pkt(struct scsi_qla_host *vha, 32 void **pkt, struct rsp_que **rsp); 33 34 static void 35 qla27xx_process_purex_fpin(struct scsi_qla_host *vha, struct purex_item *item) 36 { 37 void *pkt = &item->iocb; 38 uint16_t pkt_size = item->size; 39 40 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x508d, 41 "%s: Enter\n", __func__); 42 43 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x508e, 44 "-------- ELS REQ -------\n"); 45 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x508f, 46 pkt, pkt_size); 47 48 fc_host_fpin_rcv(vha->host, pkt_size, (char *)pkt, 0); 49 } 50 51 const char *const port_state_str[] = { 52 [FCS_UNKNOWN] = "Unknown", 53 [FCS_UNCONFIGURED] = "UNCONFIGURED", 54 [FCS_DEVICE_DEAD] = "DEAD", 55 [FCS_DEVICE_LOST] = "LOST", 56 [FCS_ONLINE] = "ONLINE" 57 }; 58 59 #define SFP_DISABLE_LASER_INITIATED 0x15 /* Sub code of 8070 AEN */ 60 #define SFP_ENABLE_LASER_INITIATED 0x16 /* Sub code of 8070 AEN */ 61 62 static inline void display_Laser_info(scsi_qla_host_t *vha, 63 u16 mb1, u16 mb2, u16 mb3) { 64 65 if (mb1 == SFP_DISABLE_LASER_INITIATED) 66 ql_log(ql_log_warn, vha, 0xf0a2, 67 "SFP temperature (%d C) reached/exceeded the threshold (%d C). Laser is disabled.\n", 68 mb3, mb2); 69 if (mb1 == SFP_ENABLE_LASER_INITIATED) 70 ql_log(ql_log_warn, vha, 0xf0a3, 71 "SFP temperature (%d C) reached normal operating level. Laser is enabled.\n", 72 mb3); 73 } 74 75 static void 76 qla24xx_process_abts(struct scsi_qla_host *vha, struct purex_item *pkt) 77 { 78 struct abts_entry_24xx *abts = 79 (struct abts_entry_24xx *)&pkt->iocb; 80 struct qla_hw_data *ha = vha->hw; 81 struct els_entry_24xx *rsp_els; 82 struct abts_entry_24xx *abts_rsp; 83 dma_addr_t dma; 84 uint32_t fctl; 85 int rval; 86 87 ql_dbg(ql_dbg_init, vha, 0x0286, "%s: entered.\n", __func__); 88 89 ql_log(ql_log_warn, vha, 0x0287, 90 "Processing ABTS xchg=%#x oxid=%#x rxid=%#x seqid=%#x seqcnt=%#x\n", 91 abts->rx_xch_addr_to_abort, abts->ox_id, abts->rx_id, 92 abts->seq_id, abts->seq_cnt); 93 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0287, 94 "-------- ABTS RCV -------\n"); 95 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0287, 96 (uint8_t *)abts, sizeof(*abts)); 97 98 rsp_els = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_els), &dma, 99 GFP_KERNEL); 100 if (!rsp_els) { 101 ql_log(ql_log_warn, vha, 0x0287, 102 "Failed allocate dma buffer ABTS/ELS RSP.\n"); 103 return; 104 } 105 106 /* terminate exchange */ 107 rsp_els->entry_type = ELS_IOCB_TYPE; 108 rsp_els->entry_count = 1; 109 rsp_els->nport_handle = cpu_to_le16(~0); 110 rsp_els->rx_xchg_address = abts->rx_xch_addr_to_abort; 111 rsp_els->control_flags = cpu_to_le16(EPD_RX_XCHG); 112 ql_dbg(ql_dbg_init, vha, 0x0283, 113 "Sending ELS Response to terminate exchange %#x...\n", 114 abts->rx_xch_addr_to_abort); 115 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0283, 116 "-------- ELS RSP -------\n"); 117 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0283, 118 (uint8_t *)rsp_els, sizeof(*rsp_els)); 119 rval = qla2x00_issue_iocb(vha, rsp_els, dma, 0); 120 if (rval) { 121 ql_log(ql_log_warn, vha, 0x0288, 122 "%s: iocb failed to execute -> %x\n", __func__, rval); 123 } else if (rsp_els->comp_status) { 124 ql_log(ql_log_warn, vha, 0x0289, 125 "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n", 126 __func__, rsp_els->comp_status, 127 rsp_els->error_subcode_1, rsp_els->error_subcode_2); 128 } else { 129 ql_dbg(ql_dbg_init, vha, 0x028a, 130 "%s: abort exchange done.\n", __func__); 131 } 132 133 /* send ABTS response */ 134 abts_rsp = (void *)rsp_els; 135 memset(abts_rsp, 0, sizeof(*abts_rsp)); 136 abts_rsp->entry_type = ABTS_RSP_TYPE; 137 abts_rsp->entry_count = 1; 138 abts_rsp->nport_handle = abts->nport_handle; 139 abts_rsp->vp_idx = abts->vp_idx; 140 abts_rsp->sof_type = abts->sof_type & 0xf0; 141 abts_rsp->rx_xch_addr = abts->rx_xch_addr; 142 abts_rsp->d_id[0] = abts->s_id[0]; 143 abts_rsp->d_id[1] = abts->s_id[1]; 144 abts_rsp->d_id[2] = abts->s_id[2]; 145 abts_rsp->r_ctl = FC_ROUTING_BLD | FC_R_CTL_BLD_BA_ACC; 146 abts_rsp->s_id[0] = abts->d_id[0]; 147 abts_rsp->s_id[1] = abts->d_id[1]; 148 abts_rsp->s_id[2] = abts->d_id[2]; 149 abts_rsp->cs_ctl = abts->cs_ctl; 150 /* include flipping bit23 in fctl */ 151 fctl = ~(abts->f_ctl[2] | 0x7F) << 16 | 152 FC_F_CTL_LAST_SEQ | FC_F_CTL_END_SEQ | FC_F_CTL_SEQ_INIT; 153 abts_rsp->f_ctl[0] = fctl >> 0 & 0xff; 154 abts_rsp->f_ctl[1] = fctl >> 8 & 0xff; 155 abts_rsp->f_ctl[2] = fctl >> 16 & 0xff; 156 abts_rsp->type = FC_TYPE_BLD; 157 abts_rsp->rx_id = abts->rx_id; 158 abts_rsp->ox_id = abts->ox_id; 159 abts_rsp->payload.ba_acc.aborted_rx_id = abts->rx_id; 160 abts_rsp->payload.ba_acc.aborted_ox_id = abts->ox_id; 161 abts_rsp->payload.ba_acc.high_seq_cnt = cpu_to_le16(~0); 162 abts_rsp->rx_xch_addr_to_abort = abts->rx_xch_addr_to_abort; 163 ql_dbg(ql_dbg_init, vha, 0x028b, 164 "Sending BA ACC response to ABTS %#x...\n", 165 abts->rx_xch_addr_to_abort); 166 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x028b, 167 "-------- ELS RSP -------\n"); 168 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x028b, 169 (uint8_t *)abts_rsp, sizeof(*abts_rsp)); 170 rval = qla2x00_issue_iocb(vha, abts_rsp, dma, 0); 171 if (rval) { 172 ql_log(ql_log_warn, vha, 0x028c, 173 "%s: iocb failed to execute -> %x\n", __func__, rval); 174 } else if (abts_rsp->comp_status) { 175 ql_log(ql_log_warn, vha, 0x028d, 176 "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n", 177 __func__, abts_rsp->comp_status, 178 abts_rsp->payload.error.subcode1, 179 abts_rsp->payload.error.subcode2); 180 } else { 181 ql_dbg(ql_dbg_init, vha, 0x028ea, 182 "%s: done.\n", __func__); 183 } 184 185 dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_els), rsp_els, dma); 186 } 187 188 /** 189 * __qla_consume_iocb - this routine is used to tell fw driver has processed 190 * or consumed the head IOCB along with the continuation IOCB's from the 191 * provided respond queue. 192 * @vha: host adapter pointer 193 * @pkt: pointer to current packet. On return, this pointer shall move 194 * to the next packet. 195 * @rsp: respond queue pointer. 196 * 197 * it is assumed pkt is the head iocb, not the continuation iocbk 198 */ 199 void __qla_consume_iocb(struct scsi_qla_host *vha, 200 void **pkt, struct rsp_que **rsp) 201 { 202 struct rsp_que *rsp_q = *rsp; 203 response_t *new_pkt; 204 uint16_t entry_count_remaining; 205 struct purex_entry_24xx *purex = *pkt; 206 207 entry_count_remaining = purex->entry_count; 208 while (entry_count_remaining > 0) { 209 new_pkt = rsp_q->ring_ptr; 210 *pkt = new_pkt; 211 212 rsp_q->ring_index++; 213 if (rsp_q->ring_index == rsp_q->length) { 214 rsp_q->ring_index = 0; 215 rsp_q->ring_ptr = rsp_q->ring; 216 } else { 217 rsp_q->ring_ptr++; 218 } 219 220 new_pkt->signature = RESPONSE_PROCESSED; 221 /* flush signature */ 222 wmb(); 223 --entry_count_remaining; 224 } 225 } 226 227 /** 228 * __qla_copy_purex_to_buffer - extract ELS payload from Purex IOCB 229 * and save to provided buffer 230 * @vha: host adapter pointer 231 * @pkt: pointer Purex IOCB 232 * @rsp: respond queue 233 * @buf: extracted ELS payload copy here 234 * @buf_len: buffer length 235 */ 236 int __qla_copy_purex_to_buffer(struct scsi_qla_host *vha, 237 void **pkt, struct rsp_que **rsp, u8 *buf, u32 buf_len) 238 { 239 struct purex_entry_24xx *purex = *pkt; 240 struct rsp_que *rsp_q = *rsp; 241 sts_cont_entry_t *new_pkt; 242 uint16_t no_bytes = 0, total_bytes = 0, pending_bytes = 0; 243 uint16_t buffer_copy_offset = 0; 244 uint16_t entry_count_remaining; 245 u16 tpad; 246 247 entry_count_remaining = purex->entry_count; 248 total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF) 249 - PURX_ELS_HEADER_SIZE; 250 251 /* 252 * end of payload may not end in 4bytes boundary. Need to 253 * round up / pad for room to swap, before saving data 254 */ 255 tpad = roundup(total_bytes, 4); 256 257 if (buf_len < tpad) { 258 ql_dbg(ql_dbg_async, vha, 0x5084, 259 "%s buffer is too small %d < %d\n", 260 __func__, buf_len, tpad); 261 __qla_consume_iocb(vha, pkt, rsp); 262 return -EIO; 263 } 264 265 pending_bytes = total_bytes = tpad; 266 no_bytes = (pending_bytes > sizeof(purex->els_frame_payload)) ? 267 sizeof(purex->els_frame_payload) : pending_bytes; 268 269 memcpy(buf, &purex->els_frame_payload[0], no_bytes); 270 buffer_copy_offset += no_bytes; 271 pending_bytes -= no_bytes; 272 --entry_count_remaining; 273 274 ((response_t *)purex)->signature = RESPONSE_PROCESSED; 275 /* flush signature */ 276 wmb(); 277 278 do { 279 while ((total_bytes > 0) && (entry_count_remaining > 0)) { 280 new_pkt = (sts_cont_entry_t *)rsp_q->ring_ptr; 281 *pkt = new_pkt; 282 283 if (new_pkt->entry_type != STATUS_CONT_TYPE) { 284 ql_log(ql_log_warn, vha, 0x507a, 285 "Unexpected IOCB type, partial data 0x%x\n", 286 buffer_copy_offset); 287 break; 288 } 289 290 rsp_q->ring_index++; 291 if (rsp_q->ring_index == rsp_q->length) { 292 rsp_q->ring_index = 0; 293 rsp_q->ring_ptr = rsp_q->ring; 294 } else { 295 rsp_q->ring_ptr++; 296 } 297 no_bytes = (pending_bytes > sizeof(new_pkt->data)) ? 298 sizeof(new_pkt->data) : pending_bytes; 299 if ((buffer_copy_offset + no_bytes) <= total_bytes) { 300 memcpy((buf + buffer_copy_offset), new_pkt->data, 301 no_bytes); 302 buffer_copy_offset += no_bytes; 303 pending_bytes -= no_bytes; 304 --entry_count_remaining; 305 } else { 306 ql_log(ql_log_warn, vha, 0x5044, 307 "Attempt to copy more that we got, optimizing..%x\n", 308 buffer_copy_offset); 309 memcpy((buf + buffer_copy_offset), new_pkt->data, 310 total_bytes - buffer_copy_offset); 311 } 312 313 ((response_t *)new_pkt)->signature = RESPONSE_PROCESSED; 314 /* flush signature */ 315 wmb(); 316 } 317 318 if (pending_bytes != 0 || entry_count_remaining != 0) { 319 ql_log(ql_log_fatal, vha, 0x508b, 320 "Dropping partial Data, underrun bytes = 0x%x, entry cnts 0x%x\n", 321 total_bytes, entry_count_remaining); 322 return -EIO; 323 } 324 } while (entry_count_remaining > 0); 325 326 be32_to_cpu_array((u32 *)buf, (__be32 *)buf, total_bytes >> 2); 327 328 return 0; 329 } 330 331 /** 332 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. 333 * @irq: interrupt number 334 * @dev_id: SCSI driver HA context 335 * 336 * Called by system whenever the host adapter generates an interrupt. 337 * 338 * Returns handled flag. 339 */ 340 irqreturn_t 341 qla2100_intr_handler(int irq, void *dev_id) 342 { 343 scsi_qla_host_t *vha; 344 struct qla_hw_data *ha; 345 struct device_reg_2xxx __iomem *reg; 346 int status; 347 unsigned long iter; 348 uint16_t hccr; 349 uint16_t mb[8]; 350 struct rsp_que *rsp; 351 unsigned long flags; 352 353 rsp = (struct rsp_que *) dev_id; 354 if (!rsp) { 355 ql_log(ql_log_info, NULL, 0x505d, 356 "%s: NULL response queue pointer.\n", __func__); 357 return (IRQ_NONE); 358 } 359 360 ha = rsp->hw; 361 reg = &ha->iobase->isp; 362 status = 0; 363 364 spin_lock_irqsave(&ha->hardware_lock, flags); 365 vha = pci_get_drvdata(ha->pdev); 366 for (iter = 50; iter--; ) { 367 hccr = rd_reg_word(®->hccr); 368 if (qla2x00_check_reg16_for_disconnect(vha, hccr)) 369 break; 370 if (hccr & HCCR_RISC_PAUSE) { 371 if (pci_channel_offline(ha->pdev)) 372 break; 373 374 /* 375 * Issue a "HARD" reset in order for the RISC interrupt 376 * bit to be cleared. Schedule a big hammer to get 377 * out of the RISC PAUSED state. 378 */ 379 wrt_reg_word(®->hccr, HCCR_RESET_RISC); 380 rd_reg_word(®->hccr); 381 382 ha->isp_ops->fw_dump(vha); 383 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 384 break; 385 } else if ((rd_reg_word(®->istatus) & ISR_RISC_INT) == 0) 386 break; 387 388 if (rd_reg_word(®->semaphore) & BIT_0) { 389 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT); 390 rd_reg_word(®->hccr); 391 392 /* Get mailbox data. */ 393 mb[0] = RD_MAILBOX_REG(ha, reg, 0); 394 if (mb[0] > 0x3fff && mb[0] < 0x8000) { 395 qla2x00_mbx_completion(vha, mb[0]); 396 status |= MBX_INTERRUPT; 397 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) { 398 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 399 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 400 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 401 qla2x00_async_event(vha, rsp, mb); 402 } else { 403 /*EMPTY*/ 404 ql_dbg(ql_dbg_async, vha, 0x5025, 405 "Unrecognized interrupt type (%d).\n", 406 mb[0]); 407 } 408 /* Release mailbox registers. */ 409 wrt_reg_word(®->semaphore, 0); 410 rd_reg_word(®->semaphore); 411 } else { 412 qla2x00_process_response_queue(rsp); 413 414 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT); 415 rd_reg_word(®->hccr); 416 } 417 } 418 qla2x00_handle_mbx_completion(ha, status); 419 spin_unlock_irqrestore(&ha->hardware_lock, flags); 420 421 return (IRQ_HANDLED); 422 } 423 424 bool 425 qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg) 426 { 427 /* Check for PCI disconnection */ 428 if (reg == 0xffffffff && !pci_channel_offline(vha->hw->pdev)) { 429 if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) && 430 !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) && 431 !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) { 432 qla_schedule_eeh_work(vha); 433 } 434 return true; 435 } else 436 return false; 437 } 438 439 bool 440 qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg) 441 { 442 return qla2x00_check_reg32_for_disconnect(vha, 0xffff0000 | reg); 443 } 444 445 /** 446 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. 447 * @irq: interrupt number 448 * @dev_id: SCSI driver HA context 449 * 450 * Called by system whenever the host adapter generates an interrupt. 451 * 452 * Returns handled flag. 453 */ 454 irqreturn_t 455 qla2300_intr_handler(int irq, void *dev_id) 456 { 457 scsi_qla_host_t *vha; 458 struct device_reg_2xxx __iomem *reg; 459 int status; 460 unsigned long iter; 461 uint32_t stat; 462 uint16_t hccr; 463 uint16_t mb[8]; 464 struct rsp_que *rsp; 465 struct qla_hw_data *ha; 466 unsigned long flags; 467 468 rsp = (struct rsp_que *) dev_id; 469 if (!rsp) { 470 ql_log(ql_log_info, NULL, 0x5058, 471 "%s: NULL response queue pointer.\n", __func__); 472 return (IRQ_NONE); 473 } 474 475 ha = rsp->hw; 476 reg = &ha->iobase->isp; 477 status = 0; 478 479 spin_lock_irqsave(&ha->hardware_lock, flags); 480 vha = pci_get_drvdata(ha->pdev); 481 for (iter = 50; iter--; ) { 482 stat = rd_reg_dword(®->u.isp2300.host_status); 483 if (qla2x00_check_reg32_for_disconnect(vha, stat)) 484 break; 485 if (stat & HSR_RISC_PAUSED) { 486 if (unlikely(pci_channel_offline(ha->pdev))) 487 break; 488 489 hccr = rd_reg_word(®->hccr); 490 491 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8)) 492 ql_log(ql_log_warn, vha, 0x5026, 493 "Parity error -- HCCR=%x, Dumping " 494 "firmware.\n", hccr); 495 else 496 ql_log(ql_log_warn, vha, 0x5027, 497 "RISC paused -- HCCR=%x, Dumping " 498 "firmware.\n", hccr); 499 500 /* 501 * Issue a "HARD" reset in order for the RISC 502 * interrupt bit to be cleared. Schedule a big 503 * hammer to get out of the RISC PAUSED state. 504 */ 505 wrt_reg_word(®->hccr, HCCR_RESET_RISC); 506 rd_reg_word(®->hccr); 507 508 ha->isp_ops->fw_dump(vha); 509 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 510 break; 511 } else if ((stat & HSR_RISC_INT) == 0) 512 break; 513 514 switch (stat & 0xff) { 515 case 0x1: 516 case 0x2: 517 case 0x10: 518 case 0x11: 519 qla2x00_mbx_completion(vha, MSW(stat)); 520 status |= MBX_INTERRUPT; 521 522 /* Release mailbox registers. */ 523 wrt_reg_word(®->semaphore, 0); 524 break; 525 case 0x12: 526 mb[0] = MSW(stat); 527 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 528 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 529 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 530 qla2x00_async_event(vha, rsp, mb); 531 break; 532 case 0x13: 533 qla2x00_process_response_queue(rsp); 534 break; 535 case 0x15: 536 mb[0] = MBA_CMPLT_1_16BIT; 537 mb[1] = MSW(stat); 538 qla2x00_async_event(vha, rsp, mb); 539 break; 540 case 0x16: 541 mb[0] = MBA_SCSI_COMPLETION; 542 mb[1] = MSW(stat); 543 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 544 qla2x00_async_event(vha, rsp, mb); 545 break; 546 default: 547 ql_dbg(ql_dbg_async, vha, 0x5028, 548 "Unrecognized interrupt type (%d).\n", stat & 0xff); 549 break; 550 } 551 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT); 552 rd_reg_word_relaxed(®->hccr); 553 } 554 qla2x00_handle_mbx_completion(ha, status); 555 spin_unlock_irqrestore(&ha->hardware_lock, flags); 556 557 return (IRQ_HANDLED); 558 } 559 560 /** 561 * qla2x00_mbx_completion() - Process mailbox command completions. 562 * @vha: SCSI driver HA context 563 * @mb0: Mailbox0 register 564 */ 565 static void 566 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 567 { 568 uint16_t cnt; 569 uint32_t mboxes; 570 __le16 __iomem *wptr; 571 struct qla_hw_data *ha = vha->hw; 572 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 573 574 /* Read all mbox registers? */ 575 WARN_ON_ONCE(ha->mbx_count > 32); 576 mboxes = (1ULL << ha->mbx_count) - 1; 577 if (!ha->mcp) 578 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n"); 579 else 580 mboxes = ha->mcp->in_mb; 581 582 /* Load return mailbox registers. */ 583 ha->flags.mbox_int = 1; 584 ha->mailbox_out[0] = mb0; 585 mboxes >>= 1; 586 wptr = MAILBOX_REG(ha, reg, 1); 587 588 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 589 if (IS_QLA2200(ha) && cnt == 8) 590 wptr = MAILBOX_REG(ha, reg, 8); 591 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0)) 592 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr); 593 else if (mboxes & BIT_0) 594 ha->mailbox_out[cnt] = rd_reg_word(wptr); 595 596 wptr++; 597 mboxes >>= 1; 598 } 599 } 600 601 static void 602 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr) 603 { 604 static char *event[] = 605 { "Complete", "Request Notification", "Time Extension" }; 606 int rval; 607 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24; 608 struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82; 609 __le16 __iomem *wptr; 610 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS]; 611 612 /* Seed data -- mailbox1 -> mailbox7. */ 613 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) 614 wptr = ®24->mailbox1; 615 else if (IS_QLA8044(vha->hw)) 616 wptr = ®82->mailbox_out[1]; 617 else 618 return; 619 620 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++) 621 mb[cnt] = rd_reg_word(wptr); 622 623 ql_dbg(ql_dbg_async, vha, 0x5021, 624 "Inter-Driver Communication %s -- " 625 "%04x %04x %04x %04x %04x %04x %04x.\n", 626 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3], 627 mb[4], mb[5], mb[6]); 628 switch (aen) { 629 /* Handle IDC Error completion case. */ 630 case MBA_IDC_COMPLETE: 631 if (mb[1] >> 15) { 632 vha->hw->flags.idc_compl_status = 1; 633 if (vha->hw->notify_dcbx_comp && !vha->vp_idx) 634 complete(&vha->hw->dcbx_comp); 635 } 636 break; 637 638 case MBA_IDC_NOTIFY: 639 /* Acknowledgement needed? [Notify && non-zero timeout]. */ 640 timeout = (descr >> 8) & 0xf; 641 ql_dbg(ql_dbg_async, vha, 0x5022, 642 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n", 643 vha->host_no, event[aen & 0xff], timeout); 644 645 if (!timeout) 646 return; 647 rval = qla2x00_post_idc_ack_work(vha, mb); 648 if (rval != QLA_SUCCESS) 649 ql_log(ql_log_warn, vha, 0x5023, 650 "IDC failed to post ACK.\n"); 651 break; 652 case MBA_IDC_TIME_EXT: 653 vha->hw->idc_extend_tmo = descr; 654 ql_dbg(ql_dbg_async, vha, 0x5087, 655 "%lu Inter-Driver Communication %s -- " 656 "Extend timeout by=%d.\n", 657 vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo); 658 break; 659 } 660 } 661 662 #define LS_UNKNOWN 2 663 const char * 664 qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed) 665 { 666 static const char *const link_speeds[] = { 667 "1", "2", "?", "4", "8", "16", "32", "64", "10" 668 }; 669 #define QLA_LAST_SPEED (ARRAY_SIZE(link_speeds) - 1) 670 671 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 672 return link_speeds[0]; 673 else if (speed == 0x13) 674 return link_speeds[QLA_LAST_SPEED]; 675 else if (speed < QLA_LAST_SPEED) 676 return link_speeds[speed]; 677 else 678 return link_speeds[LS_UNKNOWN]; 679 } 680 681 static void 682 qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb) 683 { 684 struct qla_hw_data *ha = vha->hw; 685 686 /* 687 * 8200 AEN Interpretation: 688 * mb[0] = AEN code 689 * mb[1] = AEN Reason code 690 * mb[2] = LSW of Peg-Halt Status-1 Register 691 * mb[6] = MSW of Peg-Halt Status-1 Register 692 * mb[3] = LSW of Peg-Halt Status-2 register 693 * mb[7] = MSW of Peg-Halt Status-2 register 694 * mb[4] = IDC Device-State Register value 695 * mb[5] = IDC Driver-Presence Register value 696 */ 697 ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: " 698 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n", 699 mb[0], mb[1], mb[2], mb[6]); 700 ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x " 701 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x " 702 "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]); 703 704 if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE | 705 IDC_HEARTBEAT_FAILURE)) { 706 ha->flags.nic_core_hung = 1; 707 ql_log(ql_log_warn, vha, 0x5060, 708 "83XX: F/W Error Reported: Check if reset required.\n"); 709 710 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) { 711 uint32_t protocol_engine_id, fw_err_code, err_level; 712 713 /* 714 * IDC_PEG_HALT_STATUS_CHANGE interpretation: 715 * - PEG-Halt Status-1 Register: 716 * (LSW = mb[2], MSW = mb[6]) 717 * Bits 0-7 = protocol-engine ID 718 * Bits 8-28 = f/w error code 719 * Bits 29-31 = Error-level 720 * Error-level 0x1 = Non-Fatal error 721 * Error-level 0x2 = Recoverable Fatal error 722 * Error-level 0x4 = UnRecoverable Fatal error 723 * - PEG-Halt Status-2 Register: 724 * (LSW = mb[3], MSW = mb[7]) 725 */ 726 protocol_engine_id = (mb[2] & 0xff); 727 fw_err_code = (((mb[2] & 0xff00) >> 8) | 728 ((mb[6] & 0x1fff) << 8)); 729 err_level = ((mb[6] & 0xe000) >> 13); 730 ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 " 731 "Register: protocol_engine_id=0x%x " 732 "fw_err_code=0x%x err_level=0x%x.\n", 733 protocol_engine_id, fw_err_code, err_level); 734 ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 " 735 "Register: 0x%x%x.\n", mb[7], mb[3]); 736 if (err_level == ERR_LEVEL_NON_FATAL) { 737 ql_log(ql_log_warn, vha, 0x5063, 738 "Not a fatal error, f/w has recovered itself.\n"); 739 } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) { 740 ql_log(ql_log_fatal, vha, 0x5064, 741 "Recoverable Fatal error: Chip reset " 742 "required.\n"); 743 qla83xx_schedule_work(vha, 744 QLA83XX_NIC_CORE_RESET); 745 } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) { 746 ql_log(ql_log_fatal, vha, 0x5065, 747 "Unrecoverable Fatal error: Set FAILED " 748 "state, reboot required.\n"); 749 qla83xx_schedule_work(vha, 750 QLA83XX_NIC_CORE_UNRECOVERABLE); 751 } 752 } 753 754 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) { 755 uint16_t peg_fw_state, nw_interface_link_up; 756 uint16_t nw_interface_signal_detect, sfp_status; 757 uint16_t htbt_counter, htbt_monitor_enable; 758 uint16_t sfp_additional_info, sfp_multirate; 759 uint16_t sfp_tx_fault, link_speed, dcbx_status; 760 761 /* 762 * IDC_NIC_FW_REPORTED_FAILURE interpretation: 763 * - PEG-to-FC Status Register: 764 * (LSW = mb[2], MSW = mb[6]) 765 * Bits 0-7 = Peg-Firmware state 766 * Bit 8 = N/W Interface Link-up 767 * Bit 9 = N/W Interface signal detected 768 * Bits 10-11 = SFP Status 769 * SFP Status 0x0 = SFP+ transceiver not expected 770 * SFP Status 0x1 = SFP+ transceiver not present 771 * SFP Status 0x2 = SFP+ transceiver invalid 772 * SFP Status 0x3 = SFP+ transceiver present and 773 * valid 774 * Bits 12-14 = Heartbeat Counter 775 * Bit 15 = Heartbeat Monitor Enable 776 * Bits 16-17 = SFP Additional Info 777 * SFP info 0x0 = Unregocnized transceiver for 778 * Ethernet 779 * SFP info 0x1 = SFP+ brand validation failed 780 * SFP info 0x2 = SFP+ speed validation failed 781 * SFP info 0x3 = SFP+ access error 782 * Bit 18 = SFP Multirate 783 * Bit 19 = SFP Tx Fault 784 * Bits 20-22 = Link Speed 785 * Bits 23-27 = Reserved 786 * Bits 28-30 = DCBX Status 787 * DCBX Status 0x0 = DCBX Disabled 788 * DCBX Status 0x1 = DCBX Enabled 789 * DCBX Status 0x2 = DCBX Exchange error 790 * Bit 31 = Reserved 791 */ 792 peg_fw_state = (mb[2] & 0x00ff); 793 nw_interface_link_up = ((mb[2] & 0x0100) >> 8); 794 nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9); 795 sfp_status = ((mb[2] & 0x0c00) >> 10); 796 htbt_counter = ((mb[2] & 0x7000) >> 12); 797 htbt_monitor_enable = ((mb[2] & 0x8000) >> 15); 798 sfp_additional_info = (mb[6] & 0x0003); 799 sfp_multirate = ((mb[6] & 0x0004) >> 2); 800 sfp_tx_fault = ((mb[6] & 0x0008) >> 3); 801 link_speed = ((mb[6] & 0x0070) >> 4); 802 dcbx_status = ((mb[6] & 0x7000) >> 12); 803 804 ql_log(ql_log_warn, vha, 0x5066, 805 "Peg-to-Fc Status Register:\n" 806 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, " 807 "nw_interface_signal_detect=0x%x" 808 "\nsfp_statis=0x%x.\n ", peg_fw_state, 809 nw_interface_link_up, nw_interface_signal_detect, 810 sfp_status); 811 ql_log(ql_log_warn, vha, 0x5067, 812 "htbt_counter=0x%x, htbt_monitor_enable=0x%x, " 813 "sfp_additional_info=0x%x, sfp_multirate=0x%x.\n ", 814 htbt_counter, htbt_monitor_enable, 815 sfp_additional_info, sfp_multirate); 816 ql_log(ql_log_warn, vha, 0x5068, 817 "sfp_tx_fault=0x%x, link_state=0x%x, " 818 "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed, 819 dcbx_status); 820 821 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET); 822 } 823 824 if (mb[1] & IDC_HEARTBEAT_FAILURE) { 825 ql_log(ql_log_warn, vha, 0x5069, 826 "Heartbeat Failure encountered, chip reset " 827 "required.\n"); 828 829 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET); 830 } 831 } 832 833 if (mb[1] & IDC_DEVICE_STATE_CHANGE) { 834 ql_log(ql_log_info, vha, 0x506a, 835 "IDC Device-State changed = 0x%x.\n", mb[4]); 836 if (ha->flags.nic_core_reset_owner) 837 return; 838 qla83xx_schedule_work(vha, MBA_IDC_AEN); 839 } 840 } 841 842 /** 843 * qla27xx_copy_multiple_pkt() - Copy over purex/purls packets that can 844 * span over multiple IOCBs. 845 * @vha: SCSI driver HA context 846 * @pkt: ELS packet 847 * @rsp: Response queue 848 * @is_purls: True, for Unsolicited Received FC-NVMe LS rsp IOCB 849 * false, for Unsolicited Received ELS IOCB 850 * @byte_order: True, to change the byte ordering of iocb payload 851 */ 852 struct purex_item * 853 qla27xx_copy_multiple_pkt(struct scsi_qla_host *vha, void **pkt, 854 struct rsp_que **rsp, bool is_purls, 855 bool byte_order) 856 { 857 struct purex_entry_24xx *purex = NULL; 858 struct pt_ls4_rx_unsol *purls = NULL; 859 struct rsp_que *rsp_q = *rsp; 860 sts_cont_entry_t *new_pkt; 861 uint16_t no_bytes = 0, total_bytes = 0, pending_bytes = 0; 862 uint16_t buffer_copy_offset = 0, payload_size = 0; 863 uint16_t entry_count, entry_count_remaining; 864 struct purex_item *item; 865 void *iocb_pkt = NULL; 866 867 if (is_purls) { 868 purls = *pkt; 869 total_bytes = (le16_to_cpu(purls->frame_size) & 0x0FFF) - 870 PURX_ELS_HEADER_SIZE; 871 entry_count = entry_count_remaining = purls->entry_count; 872 payload_size = sizeof(purls->payload); 873 } else { 874 purex = *pkt; 875 total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF) - 876 PURX_ELS_HEADER_SIZE; 877 entry_count = entry_count_remaining = purex->entry_count; 878 payload_size = sizeof(purex->els_frame_payload); 879 } 880 881 if (total_bytes > sizeof(item->iocb.iocb)) 882 total_bytes = sizeof(item->iocb.iocb); 883 884 pending_bytes = total_bytes; 885 no_bytes = (pending_bytes > payload_size) ? payload_size : 886 pending_bytes; 887 ql_dbg(ql_dbg_async, vha, 0x509a, 888 "%s LS, frame_size 0x%x, entry count %d\n", 889 (is_purls ? "PURLS" : "FPIN"), total_bytes, entry_count); 890 891 item = qla24xx_alloc_purex_item(vha, total_bytes); 892 if (!item) 893 return item; 894 895 iocb_pkt = &item->iocb; 896 897 if (is_purls) 898 memcpy(iocb_pkt, &purls->payload[0], no_bytes); 899 else 900 memcpy(iocb_pkt, &purex->els_frame_payload[0], no_bytes); 901 buffer_copy_offset += no_bytes; 902 pending_bytes -= no_bytes; 903 --entry_count_remaining; 904 905 if (is_purls) 906 ((response_t *)purls)->signature = RESPONSE_PROCESSED; 907 else 908 ((response_t *)purex)->signature = RESPONSE_PROCESSED; 909 wmb(); 910 911 do { 912 while ((total_bytes > 0) && (entry_count_remaining > 0)) { 913 if (rsp_q->ring_ptr->signature == RESPONSE_PROCESSED) { 914 ql_dbg(ql_dbg_async, vha, 0x5084, 915 "Ran out of IOCBs, partial data 0x%x\n", 916 buffer_copy_offset); 917 cpu_relax(); 918 continue; 919 } 920 921 new_pkt = (sts_cont_entry_t *)rsp_q->ring_ptr; 922 *pkt = new_pkt; 923 924 if (new_pkt->entry_type != STATUS_CONT_TYPE) { 925 ql_log(ql_log_warn, vha, 0x507a, 926 "Unexpected IOCB type, partial data 0x%x\n", 927 buffer_copy_offset); 928 break; 929 } 930 931 rsp_q->ring_index++; 932 if (rsp_q->ring_index == rsp_q->length) { 933 rsp_q->ring_index = 0; 934 rsp_q->ring_ptr = rsp_q->ring; 935 } else { 936 rsp_q->ring_ptr++; 937 } 938 no_bytes = (pending_bytes > sizeof(new_pkt->data)) ? 939 sizeof(new_pkt->data) : pending_bytes; 940 if ((buffer_copy_offset + no_bytes) <= total_bytes) { 941 memcpy(((uint8_t *)iocb_pkt + buffer_copy_offset), 942 new_pkt->data, no_bytes); 943 buffer_copy_offset += no_bytes; 944 pending_bytes -= no_bytes; 945 --entry_count_remaining; 946 } else { 947 ql_log(ql_log_warn, vha, 0x5044, 948 "Attempt to copy more that we got, optimizing..%x\n", 949 buffer_copy_offset); 950 memcpy(((uint8_t *)iocb_pkt + buffer_copy_offset), 951 new_pkt->data, 952 total_bytes - buffer_copy_offset); 953 } 954 955 ((response_t *)new_pkt)->signature = RESPONSE_PROCESSED; 956 wmb(); 957 } 958 959 if (pending_bytes != 0 || entry_count_remaining != 0) { 960 ql_log(ql_log_fatal, vha, 0x508b, 961 "Dropping partial FPIN, underrun bytes = 0x%x, entry cnts 0x%x\n", 962 total_bytes, entry_count_remaining); 963 qla24xx_free_purex_item(item); 964 return NULL; 965 } 966 } while (entry_count_remaining > 0); 967 968 if (byte_order) 969 host_to_fcp_swap((uint8_t *)&item->iocb, total_bytes); 970 971 return item; 972 } 973 974 int 975 qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry) 976 { 977 struct qla_hw_data *ha = vha->hw; 978 scsi_qla_host_t *vp; 979 uint32_t vp_did; 980 unsigned long flags; 981 int ret = 0; 982 983 if (!ha->num_vhosts) 984 return ret; 985 986 spin_lock_irqsave(&ha->vport_slock, flags); 987 list_for_each_entry(vp, &ha->vp_list, list) { 988 vp_did = vp->d_id.b24; 989 if (vp_did == rscn_entry) { 990 ret = 1; 991 break; 992 } 993 } 994 spin_unlock_irqrestore(&ha->vport_slock, flags); 995 996 return ret; 997 } 998 999 fc_port_t * 1000 qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id) 1001 { 1002 fc_port_t *f, *tf; 1003 1004 f = tf = NULL; 1005 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) 1006 if (f->loop_id == loop_id) 1007 return f; 1008 return NULL; 1009 } 1010 1011 fc_port_t * 1012 qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *vha, u8 *wwpn, u8 incl_deleted) 1013 { 1014 fc_port_t *f, *tf; 1015 1016 f = tf = NULL; 1017 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) { 1018 if (memcmp(f->port_name, wwpn, WWN_SIZE) == 0) { 1019 if (incl_deleted) 1020 return f; 1021 else if (f->deleted == 0) 1022 return f; 1023 } 1024 } 1025 return NULL; 1026 } 1027 1028 fc_port_t * 1029 qla2x00_find_fcport_by_nportid(scsi_qla_host_t *vha, port_id_t *id, 1030 u8 incl_deleted) 1031 { 1032 fc_port_t *f, *tf; 1033 1034 f = tf = NULL; 1035 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) { 1036 if (f->d_id.b24 == id->b24) { 1037 if (incl_deleted) 1038 return f; 1039 else if (f->deleted == 0) 1040 return f; 1041 } 1042 } 1043 return NULL; 1044 } 1045 1046 /* Shall be called only on supported adapters. */ 1047 static void 1048 qla27xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb) 1049 { 1050 struct qla_hw_data *ha = vha->hw; 1051 bool reset_isp_needed = false; 1052 1053 ql_log(ql_log_warn, vha, 0x02f0, 1054 "MPI Heartbeat stop. MPI reset is%s needed. " 1055 "MB0[%xh] MB1[%xh] MB2[%xh] MB3[%xh]\n", 1056 mb[1] & BIT_8 ? "" : " not", 1057 mb[0], mb[1], mb[2], mb[3]); 1058 1059 if ((mb[1] & BIT_8) == 0) 1060 return; 1061 1062 ql_log(ql_log_warn, vha, 0x02f1, 1063 "MPI Heartbeat stop. FW dump needed\n"); 1064 1065 if (ql2xfulldump_on_mpifail) { 1066 ha->isp_ops->fw_dump(vha); 1067 reset_isp_needed = true; 1068 } 1069 1070 ha->isp_ops->mpi_fw_dump(vha, 1); 1071 1072 if (reset_isp_needed) { 1073 vha->hw->flags.fw_init_done = 0; 1074 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1075 qla2xxx_wake_dpc(vha); 1076 } 1077 } 1078 1079 static struct purex_item * 1080 qla24xx_alloc_purex_item(scsi_qla_host_t *vha, uint16_t size) 1081 { 1082 struct purex_item *item = NULL; 1083 uint8_t item_hdr_size = sizeof(*item); 1084 1085 if (size > QLA_DEFAULT_PAYLOAD_SIZE) { 1086 item = kzalloc(item_hdr_size + 1087 (size - QLA_DEFAULT_PAYLOAD_SIZE), GFP_ATOMIC); 1088 } else { 1089 if (atomic_inc_return(&vha->default_item.in_use) == 1) { 1090 item = &vha->default_item; 1091 goto initialize_purex_header; 1092 } else { 1093 item = kzalloc(item_hdr_size, GFP_ATOMIC); 1094 } 1095 } 1096 if (!item) { 1097 ql_log(ql_log_warn, vha, 0x5092, 1098 ">> Failed allocate purex list item.\n"); 1099 1100 return NULL; 1101 } 1102 1103 initialize_purex_header: 1104 item->vha = vha; 1105 item->size = size; 1106 return item; 1107 } 1108 1109 void 1110 qla24xx_queue_purex_item(scsi_qla_host_t *vha, struct purex_item *pkt, 1111 void (*process_item)(struct scsi_qla_host *vha, 1112 struct purex_item *pkt)) 1113 { 1114 struct purex_list *list = &vha->purex_list; 1115 ulong flags; 1116 1117 pkt->process_item = process_item; 1118 1119 spin_lock_irqsave(&list->lock, flags); 1120 list_add_tail(&pkt->list, &list->head); 1121 spin_unlock_irqrestore(&list->lock, flags); 1122 1123 set_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags); 1124 } 1125 1126 /** 1127 * qla24xx_copy_std_pkt() - Copy over purex ELS which is 1128 * contained in a single IOCB. 1129 * purex packet. 1130 * @vha: SCSI driver HA context 1131 * @pkt: ELS packet 1132 */ 1133 static struct purex_item 1134 *qla24xx_copy_std_pkt(struct scsi_qla_host *vha, void *pkt) 1135 { 1136 struct purex_item *item; 1137 1138 item = qla24xx_alloc_purex_item(vha, 1139 QLA_DEFAULT_PAYLOAD_SIZE); 1140 if (!item) 1141 return item; 1142 1143 memcpy(&item->iocb, pkt, sizeof(item->iocb)); 1144 return item; 1145 } 1146 1147 /** 1148 * qla27xx_copy_fpin_pkt() - Copy over fpin packets that can 1149 * span over multiple IOCBs. 1150 * @vha: SCSI driver HA context 1151 * @pkt: ELS packet 1152 * @rsp: Response queue 1153 */ 1154 static struct purex_item * 1155 qla27xx_copy_fpin_pkt(struct scsi_qla_host *vha, void **pkt, 1156 struct rsp_que **rsp) 1157 { 1158 struct purex_entry_24xx *purex = *pkt; 1159 struct rsp_que *rsp_q = *rsp; 1160 sts_cont_entry_t *new_pkt; 1161 uint16_t no_bytes = 0, total_bytes = 0, pending_bytes = 0; 1162 uint16_t buffer_copy_offset = 0; 1163 uint16_t entry_count, entry_count_remaining; 1164 struct purex_item *item; 1165 void *fpin_pkt = NULL; 1166 1167 total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF) 1168 - PURX_ELS_HEADER_SIZE; 1169 1170 if (total_bytes > sizeof(item->iocb.iocb)) 1171 total_bytes = sizeof(item->iocb.iocb); 1172 1173 pending_bytes = total_bytes; 1174 entry_count = entry_count_remaining = purex->entry_count; 1175 no_bytes = (pending_bytes > sizeof(purex->els_frame_payload)) ? 1176 sizeof(purex->els_frame_payload) : pending_bytes; 1177 ql_log(ql_log_info, vha, 0x509a, 1178 "FPIN ELS, frame_size 0x%x, entry count %d\n", 1179 total_bytes, entry_count); 1180 1181 item = qla24xx_alloc_purex_item(vha, total_bytes); 1182 if (!item) 1183 return item; 1184 1185 fpin_pkt = &item->iocb; 1186 1187 memcpy(fpin_pkt, &purex->els_frame_payload[0], no_bytes); 1188 buffer_copy_offset += no_bytes; 1189 pending_bytes -= no_bytes; 1190 --entry_count_remaining; 1191 1192 ((response_t *)purex)->signature = RESPONSE_PROCESSED; 1193 wmb(); 1194 1195 do { 1196 while ((total_bytes > 0) && (entry_count_remaining > 0)) { 1197 if (rsp_q->ring_ptr->signature == RESPONSE_PROCESSED) { 1198 ql_dbg(ql_dbg_async, vha, 0x5084, 1199 "Ran out of IOCBs, partial data 0x%x\n", 1200 buffer_copy_offset); 1201 cpu_relax(); 1202 continue; 1203 } 1204 1205 new_pkt = (sts_cont_entry_t *)rsp_q->ring_ptr; 1206 *pkt = new_pkt; 1207 1208 if (new_pkt->entry_type != STATUS_CONT_TYPE) { 1209 ql_log(ql_log_warn, vha, 0x507a, 1210 "Unexpected IOCB type, partial data 0x%x\n", 1211 buffer_copy_offset); 1212 break; 1213 } 1214 1215 rsp_q->ring_index++; 1216 if (rsp_q->ring_index == rsp_q->length) { 1217 rsp_q->ring_index = 0; 1218 rsp_q->ring_ptr = rsp_q->ring; 1219 } else { 1220 rsp_q->ring_ptr++; 1221 } 1222 no_bytes = (pending_bytes > sizeof(new_pkt->data)) ? 1223 sizeof(new_pkt->data) : pending_bytes; 1224 if ((buffer_copy_offset + no_bytes) <= total_bytes) { 1225 memcpy(((uint8_t *)fpin_pkt + 1226 buffer_copy_offset), new_pkt->data, 1227 no_bytes); 1228 buffer_copy_offset += no_bytes; 1229 pending_bytes -= no_bytes; 1230 --entry_count_remaining; 1231 } else { 1232 ql_log(ql_log_warn, vha, 0x5044, 1233 "Attempt to copy more that we got, optimizing..%x\n", 1234 buffer_copy_offset); 1235 memcpy(((uint8_t *)fpin_pkt + 1236 buffer_copy_offset), new_pkt->data, 1237 total_bytes - buffer_copy_offset); 1238 } 1239 1240 ((response_t *)new_pkt)->signature = RESPONSE_PROCESSED; 1241 wmb(); 1242 } 1243 1244 if (pending_bytes != 0 || entry_count_remaining != 0) { 1245 ql_log(ql_log_fatal, vha, 0x508b, 1246 "Dropping partial FPIN, underrun bytes = 0x%x, entry cnts 0x%x\n", 1247 total_bytes, entry_count_remaining); 1248 qla24xx_free_purex_item(item); 1249 return NULL; 1250 } 1251 } while (entry_count_remaining > 0); 1252 host_to_fcp_swap((uint8_t *)&item->iocb, total_bytes); 1253 return item; 1254 } 1255 1256 /** 1257 * qla2x00_async_event() - Process aynchronous events. 1258 * @vha: SCSI driver HA context 1259 * @rsp: response queue 1260 * @mb: Mailbox registers (0 - 3) 1261 */ 1262 void 1263 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) 1264 { 1265 uint16_t handle_cnt; 1266 uint16_t cnt, mbx; 1267 uint32_t handles[5]; 1268 struct qla_hw_data *ha = vha->hw; 1269 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1270 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 1271 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; 1272 uint32_t rscn_entry, host_pid; 1273 unsigned long flags; 1274 fc_port_t *fcport = NULL; 1275 1276 if (!vha->hw->flags.fw_started) { 1277 ql_log(ql_log_warn, vha, 0x50ff, 1278 "Dropping AEN - %04x %04x %04x %04x.\n", 1279 mb[0], mb[1], mb[2], mb[3]); 1280 return; 1281 } 1282 1283 /* Setup to process RIO completion. */ 1284 handle_cnt = 0; 1285 if (IS_CNA_CAPABLE(ha)) 1286 goto skip_rio; 1287 switch (mb[0]) { 1288 case MBA_SCSI_COMPLETION: 1289 handles[0] = make_handle(mb[2], mb[1]); 1290 handle_cnt = 1; 1291 break; 1292 case MBA_CMPLT_1_16BIT: 1293 handles[0] = mb[1]; 1294 handle_cnt = 1; 1295 mb[0] = MBA_SCSI_COMPLETION; 1296 break; 1297 case MBA_CMPLT_2_16BIT: 1298 handles[0] = mb[1]; 1299 handles[1] = mb[2]; 1300 handle_cnt = 2; 1301 mb[0] = MBA_SCSI_COMPLETION; 1302 break; 1303 case MBA_CMPLT_3_16BIT: 1304 handles[0] = mb[1]; 1305 handles[1] = mb[2]; 1306 handles[2] = mb[3]; 1307 handle_cnt = 3; 1308 mb[0] = MBA_SCSI_COMPLETION; 1309 break; 1310 case MBA_CMPLT_4_16BIT: 1311 handles[0] = mb[1]; 1312 handles[1] = mb[2]; 1313 handles[2] = mb[3]; 1314 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); 1315 handle_cnt = 4; 1316 mb[0] = MBA_SCSI_COMPLETION; 1317 break; 1318 case MBA_CMPLT_5_16BIT: 1319 handles[0] = mb[1]; 1320 handles[1] = mb[2]; 1321 handles[2] = mb[3]; 1322 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); 1323 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7); 1324 handle_cnt = 5; 1325 mb[0] = MBA_SCSI_COMPLETION; 1326 break; 1327 case MBA_CMPLT_2_32BIT: 1328 handles[0] = make_handle(mb[2], mb[1]); 1329 handles[1] = make_handle(RD_MAILBOX_REG(ha, reg, 7), 1330 RD_MAILBOX_REG(ha, reg, 6)); 1331 handle_cnt = 2; 1332 mb[0] = MBA_SCSI_COMPLETION; 1333 break; 1334 default: 1335 break; 1336 } 1337 skip_rio: 1338 switch (mb[0]) { 1339 case MBA_SCSI_COMPLETION: /* Fast Post */ 1340 if (!vha->flags.online) 1341 break; 1342 1343 for (cnt = 0; cnt < handle_cnt; cnt++) 1344 qla2x00_process_completed_request(vha, rsp->req, 1345 handles[cnt]); 1346 break; 1347 1348 case MBA_RESET: /* Reset */ 1349 ql_dbg(ql_dbg_async, vha, 0x5002, 1350 "Asynchronous RESET.\n"); 1351 1352 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 1353 break; 1354 1355 case MBA_SYSTEM_ERR: /* System Error */ 1356 mbx = 0; 1357 1358 vha->hw_err_cnt++; 1359 1360 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || 1361 IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 1362 u16 m[4]; 1363 1364 m[0] = rd_reg_word(®24->mailbox4); 1365 m[1] = rd_reg_word(®24->mailbox5); 1366 m[2] = rd_reg_word(®24->mailbox6); 1367 mbx = m[3] = rd_reg_word(®24->mailbox7); 1368 1369 ql_log(ql_log_warn, vha, 0x5003, 1370 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh mbx4=%xh mbx5=%xh mbx6=%xh mbx7=%xh.\n", 1371 mb[1], mb[2], mb[3], m[0], m[1], m[2], m[3]); 1372 } else 1373 ql_log(ql_log_warn, vha, 0x5003, 1374 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n ", 1375 mb[1], mb[2], mb[3]); 1376 1377 if ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) && 1378 rd_reg_word(®24->mailbox7) & BIT_8) 1379 ha->isp_ops->mpi_fw_dump(vha, 1); 1380 ha->isp_ops->fw_dump(vha); 1381 ha->flags.fw_init_done = 0; 1382 QLA_FW_STOPPED(ha); 1383 1384 if (IS_FWI2_CAPABLE(ha)) { 1385 if (mb[1] == 0 && mb[2] == 0) { 1386 ql_log(ql_log_fatal, vha, 0x5004, 1387 "Unrecoverable Hardware Error: adapter " 1388 "marked OFFLINE!\n"); 1389 vha->flags.online = 0; 1390 vha->device_flags |= DFLG_DEV_FAILED; 1391 } else { 1392 /* Check to see if MPI timeout occurred */ 1393 if ((mbx & MBX_3) && (ha->port_no == 0)) 1394 set_bit(MPI_RESET_NEEDED, 1395 &vha->dpc_flags); 1396 1397 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1398 } 1399 } else if (mb[1] == 0) { 1400 ql_log(ql_log_fatal, vha, 0x5005, 1401 "Unrecoverable Hardware Error: adapter marked " 1402 "OFFLINE!\n"); 1403 vha->flags.online = 0; 1404 vha->device_flags |= DFLG_DEV_FAILED; 1405 } else 1406 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1407 break; 1408 1409 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 1410 ql_log(ql_log_warn, vha, 0x5006, 1411 "ISP Request Transfer Error (%x).\n", mb[1]); 1412 1413 vha->hw_err_cnt++; 1414 1415 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1416 break; 1417 1418 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 1419 ql_log(ql_log_warn, vha, 0x5007, 1420 "ISP Response Transfer Error (%x).\n", mb[1]); 1421 1422 vha->hw_err_cnt++; 1423 1424 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1425 break; 1426 1427 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ 1428 ql_dbg(ql_dbg_async, vha, 0x5008, 1429 "Asynchronous WAKEUP_THRES (%x).\n", mb[1]); 1430 break; 1431 1432 case MBA_LOOP_INIT_ERR: 1433 ql_log(ql_log_warn, vha, 0x5090, 1434 "LOOP INIT ERROR (%x).\n", mb[1]); 1435 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1436 break; 1437 1438 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ 1439 ha->flags.lip_ae = 1; 1440 1441 ql_dbg(ql_dbg_async, vha, 0x5009, 1442 "LIP occurred (%x).\n", mb[1]); 1443 1444 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1445 atomic_set(&vha->loop_state, LOOP_DOWN); 1446 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 1447 qla2x00_mark_all_devices_lost(vha); 1448 } 1449 1450 if (vha->vp_idx) { 1451 atomic_set(&vha->vp_state, VP_FAILED); 1452 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 1453 } 1454 1455 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 1456 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 1457 1458 vha->flags.management_server_logged_in = 0; 1459 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]); 1460 break; 1461 1462 case MBA_LOOP_UP: /* Loop Up Event */ 1463 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 1464 ha->link_data_rate = PORT_SPEED_1GB; 1465 else 1466 ha->link_data_rate = mb[1]; 1467 1468 ql_log(ql_log_info, vha, 0x500a, 1469 "LOOP UP detected (%s Gbps).\n", 1470 qla2x00_get_link_speed_str(ha, ha->link_data_rate)); 1471 1472 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 1473 if (mb[2] & BIT_0) 1474 ql_log(ql_log_info, vha, 0x11a0, 1475 "FEC=enabled (link up).\n"); 1476 } 1477 1478 vha->flags.management_server_logged_in = 0; 1479 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate); 1480 1481 if (vha->link_down_time < vha->hw->port_down_retry_count) { 1482 vha->short_link_down_cnt++; 1483 vha->link_down_time = QLA2XX_MAX_LINK_DOWN_TIME; 1484 } 1485 1486 break; 1487 1488 case MBA_LOOP_DOWN: /* Loop Down Event */ 1489 SAVE_TOPO(ha); 1490 ha->flags.lip_ae = 0; 1491 ha->current_topology = 0; 1492 vha->link_down_time = 0; 1493 1494 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha)) 1495 ? rd_reg_word(®24->mailbox4) : 0; 1496 mbx = (IS_P3P_TYPE(ha)) ? rd_reg_word(®82->mailbox_out[4]) 1497 : mbx; 1498 ql_log(ql_log_info, vha, 0x500b, 1499 "LOOP DOWN detected (%x %x %x %x).\n", 1500 mb[1], mb[2], mb[3], mbx); 1501 1502 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1503 atomic_set(&vha->loop_state, LOOP_DOWN); 1504 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 1505 /* 1506 * In case of loop down, restore WWPN from 1507 * NVRAM in case of FA-WWPN capable ISP 1508 * Restore for Physical Port only 1509 */ 1510 if (!vha->vp_idx) { 1511 if (ha->flags.fawwpn_enabled && 1512 (ha->current_topology == ISP_CFG_F)) { 1513 memcpy(vha->port_name, ha->port_name, WWN_SIZE); 1514 fc_host_port_name(vha->host) = 1515 wwn_to_u64(vha->port_name); 1516 ql_dbg(ql_dbg_init + ql_dbg_verbose, 1517 vha, 0x00d8, "LOOP DOWN detected," 1518 "restore WWPN %016llx\n", 1519 wwn_to_u64(vha->port_name)); 1520 } 1521 1522 clear_bit(VP_CONFIG_OK, &vha->vp_flags); 1523 } 1524 1525 vha->device_flags |= DFLG_NO_CABLE; 1526 qla2x00_mark_all_devices_lost(vha); 1527 } 1528 1529 if (vha->vp_idx) { 1530 atomic_set(&vha->vp_state, VP_FAILED); 1531 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 1532 } 1533 1534 vha->flags.management_server_logged_in = 0; 1535 ha->link_data_rate = PORT_SPEED_UNKNOWN; 1536 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0); 1537 break; 1538 1539 case MBA_LIP_RESET: /* LIP reset occurred */ 1540 ql_dbg(ql_dbg_async, vha, 0x500c, 1541 "LIP reset occurred (%x).\n", mb[1]); 1542 1543 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1544 atomic_set(&vha->loop_state, LOOP_DOWN); 1545 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 1546 qla2x00_mark_all_devices_lost(vha); 1547 } 1548 1549 if (vha->vp_idx) { 1550 atomic_set(&vha->vp_state, VP_FAILED); 1551 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 1552 } 1553 1554 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 1555 1556 ha->operating_mode = LOOP; 1557 vha->flags.management_server_logged_in = 0; 1558 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]); 1559 break; 1560 1561 /* case MBA_DCBX_COMPLETE: */ 1562 case MBA_POINT_TO_POINT: /* Point-to-Point */ 1563 ha->flags.lip_ae = 0; 1564 1565 if (IS_QLA2100(ha)) 1566 break; 1567 1568 if (IS_CNA_CAPABLE(ha)) { 1569 ql_dbg(ql_dbg_async, vha, 0x500d, 1570 "DCBX Completed -- %04x %04x %04x.\n", 1571 mb[1], mb[2], mb[3]); 1572 if (ha->notify_dcbx_comp && !vha->vp_idx) 1573 complete(&ha->dcbx_comp); 1574 1575 } else 1576 ql_dbg(ql_dbg_async, vha, 0x500e, 1577 "Asynchronous P2P MODE received.\n"); 1578 1579 /* 1580 * Until there's a transition from loop down to loop up, treat 1581 * this as loop down only. 1582 */ 1583 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1584 atomic_set(&vha->loop_state, LOOP_DOWN); 1585 if (!atomic_read(&vha->loop_down_timer)) 1586 atomic_set(&vha->loop_down_timer, 1587 LOOP_DOWN_TIME); 1588 if (!N2N_TOPO(ha)) 1589 qla2x00_mark_all_devices_lost(vha); 1590 } 1591 1592 if (vha->vp_idx) { 1593 atomic_set(&vha->vp_state, VP_FAILED); 1594 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 1595 } 1596 1597 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) 1598 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 1599 1600 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 1601 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 1602 1603 vha->flags.management_server_logged_in = 0; 1604 break; 1605 1606 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */ 1607 if (IS_QLA2100(ha)) 1608 break; 1609 1610 ql_dbg(ql_dbg_async, vha, 0x500f, 1611 "Configuration change detected: value=%x.\n", mb[1]); 1612 1613 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1614 atomic_set(&vha->loop_state, LOOP_DOWN); 1615 if (!atomic_read(&vha->loop_down_timer)) 1616 atomic_set(&vha->loop_down_timer, 1617 LOOP_DOWN_TIME); 1618 qla2x00_mark_all_devices_lost(vha); 1619 } 1620 1621 if (vha->vp_idx) { 1622 atomic_set(&vha->vp_state, VP_FAILED); 1623 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 1624 } 1625 1626 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1627 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 1628 break; 1629 1630 case MBA_PORT_UPDATE: /* Port database update */ 1631 /* 1632 * Handle only global and vn-port update events 1633 * 1634 * Relevant inputs: 1635 * mb[1] = N_Port handle of changed port 1636 * OR 0xffff for global event 1637 * mb[2] = New login state 1638 * 7 = Port logged out 1639 * mb[3] = LSB is vp_idx, 0xff = all vps 1640 * 1641 * Skip processing if: 1642 * Event is global, vp_idx is NOT all vps, 1643 * vp_idx does not match 1644 * Event is not global, vp_idx does not match 1645 */ 1646 if (IS_QLA2XXX_MIDTYPE(ha) && 1647 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) || 1648 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff)) 1649 break; 1650 1651 if (mb[2] == 0x7) { 1652 ql_dbg(ql_dbg_async, vha, 0x5010, 1653 "Port %s %04x %04x %04x.\n", 1654 mb[1] == 0xffff ? "unavailable" : "logout", 1655 mb[1], mb[2], mb[3]); 1656 1657 if (mb[1] == 0xffff) 1658 goto global_port_update; 1659 1660 if (mb[1] == NPH_SNS_LID(ha)) { 1661 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1662 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 1663 break; 1664 } 1665 1666 /* use handle_cnt for loop id/nport handle */ 1667 if (IS_FWI2_CAPABLE(ha)) 1668 handle_cnt = NPH_SNS; 1669 else 1670 handle_cnt = SIMPLE_NAME_SERVER; 1671 if (mb[1] == handle_cnt) { 1672 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1673 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 1674 break; 1675 } 1676 1677 /* Port logout */ 1678 fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]); 1679 if (!fcport) { 1680 ql_dbg(ql_dbg_async, vha, 0x5011, 1681 "Could not find fcport:%04x %04x %04x\n", 1682 mb[1], mb[2], mb[3]); 1683 break; 1684 } 1685 1686 if (atomic_read(&fcport->state) != FCS_ONLINE) { 1687 ql_dbg(ql_dbg_async, vha, 0x5012, 1688 "Port state is not online State:0x%x \n", 1689 atomic_read(&fcport->state)); 1690 ql_dbg(ql_dbg_async, vha, 0x5012, 1691 "Scheduling session for deletion \n"); 1692 fcport->logout_on_delete = 0; 1693 qlt_schedule_sess_for_deletion(fcport); 1694 break; 1695 } 1696 1697 ql_dbg(ql_dbg_async, vha, 0x508a, 1698 "Marking port lost loopid=%04x portid=%06x.\n", 1699 fcport->loop_id, fcport->d_id.b24); 1700 1701 if (qla_ini_mode_enabled(vha)) { 1702 fcport->logout_on_delete = 0; 1703 qlt_schedule_sess_for_deletion(fcport); 1704 } 1705 break; 1706 1707 global_port_update: 1708 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1709 atomic_set(&vha->loop_state, LOOP_DOWN); 1710 atomic_set(&vha->loop_down_timer, 1711 LOOP_DOWN_TIME); 1712 vha->device_flags |= DFLG_NO_CABLE; 1713 qla2x00_mark_all_devices_lost(vha); 1714 } 1715 1716 if (vha->vp_idx) { 1717 atomic_set(&vha->vp_state, VP_FAILED); 1718 fc_vport_set_state(vha->fc_vport, 1719 FC_VPORT_FAILED); 1720 qla2x00_mark_all_devices_lost(vha); 1721 } 1722 1723 vha->flags.management_server_logged_in = 0; 1724 ha->link_data_rate = PORT_SPEED_UNKNOWN; 1725 break; 1726 } 1727 1728 /* 1729 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET 1730 * event etc. earlier indicating loop is down) then process 1731 * it. Otherwise ignore it and Wait for RSCN to come in. 1732 */ 1733 atomic_set(&vha->loop_down_timer, 0); 1734 if (atomic_read(&vha->loop_state) != LOOP_DOWN && 1735 !ha->flags.n2n_ae && 1736 atomic_read(&vha->loop_state) != LOOP_DEAD) { 1737 ql_dbg(ql_dbg_async, vha, 0x5011, 1738 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n", 1739 mb[1], mb[2], mb[3]); 1740 break; 1741 } 1742 1743 ql_dbg(ql_dbg_async, vha, 0x5012, 1744 "Port database changed %04x %04x %04x.\n", 1745 mb[1], mb[2], mb[3]); 1746 1747 /* 1748 * Mark all devices as missing so we will login again. 1749 */ 1750 atomic_set(&vha->loop_state, LOOP_UP); 1751 vha->scan.scan_retry = 0; 1752 1753 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1754 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 1755 set_bit(VP_CONFIG_OK, &vha->vp_flags); 1756 break; 1757 1758 case MBA_RSCN_UPDATE: /* State Change Registration */ 1759 /* Check if the Vport has issued a SCR */ 1760 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags)) 1761 break; 1762 /* Only handle SCNs for our Vport index. */ 1763 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff)) 1764 break; 1765 1766 ql_log(ql_log_warn, vha, 0x5013, 1767 "RSCN database changed -- %04x %04x %04x.\n", 1768 mb[1], mb[2], mb[3]); 1769 1770 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2]; 1771 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8) 1772 | vha->d_id.b.al_pa; 1773 if (rscn_entry == host_pid) { 1774 ql_dbg(ql_dbg_async, vha, 0x5014, 1775 "Ignoring RSCN update to local host " 1776 "port ID (%06x).\n", host_pid); 1777 break; 1778 } 1779 1780 /* Ignore reserved bits from RSCN-payload. */ 1781 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2]; 1782 1783 /* Skip RSCNs for virtual ports on the same physical port */ 1784 if (qla2x00_is_a_vp_did(vha, rscn_entry)) 1785 break; 1786 1787 atomic_set(&vha->loop_down_timer, 0); 1788 vha->flags.management_server_logged_in = 0; 1789 { 1790 struct event_arg ea; 1791 1792 memset(&ea, 0, sizeof(ea)); 1793 ea.id.b24 = rscn_entry; 1794 ea.id.b.rsvd_1 = rscn_entry >> 24; 1795 qla2x00_handle_rscn(vha, &ea); 1796 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry); 1797 } 1798 break; 1799 case MBA_CONGN_NOTI_RECV: 1800 if (!ha->flags.scm_enabled || 1801 mb[1] != QLA_CON_PRIMITIVE_RECEIVED) 1802 break; 1803 1804 if (mb[2] == QLA_CONGESTION_ARB_WARNING) { 1805 ql_dbg(ql_dbg_async, vha, 0x509b, 1806 "Congestion Warning %04x %04x.\n", mb[1], mb[2]); 1807 } else if (mb[2] == QLA_CONGESTION_ARB_ALARM) { 1808 ql_log(ql_log_warn, vha, 0x509b, 1809 "Congestion Alarm %04x %04x.\n", mb[1], mb[2]); 1810 } 1811 break; 1812 /* case MBA_RIO_RESPONSE: */ 1813 case MBA_ZIO_RESPONSE: 1814 ql_dbg(ql_dbg_async, vha, 0x5015, 1815 "[R|Z]IO update completion.\n"); 1816 1817 if (IS_FWI2_CAPABLE(ha)) 1818 qla24xx_process_response_queue(vha, rsp); 1819 else 1820 qla2x00_process_response_queue(rsp); 1821 break; 1822 1823 case MBA_DISCARD_RND_FRAME: 1824 ql_dbg(ql_dbg_async, vha, 0x5016, 1825 "Discard RND Frame -- %04x %04x %04x.\n", 1826 mb[1], mb[2], mb[3]); 1827 vha->interface_err_cnt++; 1828 break; 1829 1830 case MBA_TRACE_NOTIFICATION: 1831 ql_dbg(ql_dbg_async, vha, 0x5017, 1832 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]); 1833 break; 1834 1835 case MBA_ISP84XX_ALERT: 1836 ql_dbg(ql_dbg_async, vha, 0x5018, 1837 "ISP84XX Alert Notification -- %04x %04x %04x.\n", 1838 mb[1], mb[2], mb[3]); 1839 1840 spin_lock_irqsave(&ha->cs84xx->access_lock, flags); 1841 switch (mb[1]) { 1842 case A84_PANIC_RECOVERY: 1843 ql_log(ql_log_info, vha, 0x5019, 1844 "Alert 84XX: panic recovery %04x %04x.\n", 1845 mb[2], mb[3]); 1846 break; 1847 case A84_OP_LOGIN_COMPLETE: 1848 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2]; 1849 ql_log(ql_log_info, vha, 0x501a, 1850 "Alert 84XX: firmware version %x.\n", 1851 ha->cs84xx->op_fw_version); 1852 break; 1853 case A84_DIAG_LOGIN_COMPLETE: 1854 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 1855 ql_log(ql_log_info, vha, 0x501b, 1856 "Alert 84XX: diagnostic firmware version %x.\n", 1857 ha->cs84xx->diag_fw_version); 1858 break; 1859 case A84_GOLD_LOGIN_COMPLETE: 1860 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 1861 ha->cs84xx->fw_update = 1; 1862 ql_log(ql_log_info, vha, 0x501c, 1863 "Alert 84XX: gold firmware version %x.\n", 1864 ha->cs84xx->gold_fw_version); 1865 break; 1866 default: 1867 ql_log(ql_log_warn, vha, 0x501d, 1868 "Alert 84xx: Invalid Alert %04x %04x %04x.\n", 1869 mb[1], mb[2], mb[3]); 1870 } 1871 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags); 1872 break; 1873 case MBA_DCBX_START: 1874 ql_dbg(ql_dbg_async, vha, 0x501e, 1875 "DCBX Started -- %04x %04x %04x.\n", 1876 mb[1], mb[2], mb[3]); 1877 break; 1878 case MBA_DCBX_PARAM_UPDATE: 1879 ql_dbg(ql_dbg_async, vha, 0x501f, 1880 "DCBX Parameters Updated -- %04x %04x %04x.\n", 1881 mb[1], mb[2], mb[3]); 1882 break; 1883 case MBA_FCF_CONF_ERR: 1884 ql_dbg(ql_dbg_async, vha, 0x5020, 1885 "FCF Configuration Error -- %04x %04x %04x.\n", 1886 mb[1], mb[2], mb[3]); 1887 break; 1888 case MBA_IDC_NOTIFY: 1889 if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) { 1890 mb[4] = rd_reg_word(®24->mailbox4); 1891 if (((mb[2] & 0x7fff) == MBC_PORT_RESET || 1892 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) && 1893 (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) { 1894 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); 1895 /* 1896 * Extend loop down timer since port is active. 1897 */ 1898 if (atomic_read(&vha->loop_state) == LOOP_DOWN) 1899 atomic_set(&vha->loop_down_timer, 1900 LOOP_DOWN_TIME); 1901 qla2xxx_wake_dpc(vha); 1902 } 1903 } 1904 fallthrough; 1905 case MBA_IDC_COMPLETE: 1906 if (ha->notify_lb_portup_comp && !vha->vp_idx) 1907 complete(&ha->lb_portup_comp); 1908 fallthrough; 1909 case MBA_IDC_TIME_EXT: 1910 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || 1911 IS_QLA8044(ha)) 1912 qla81xx_idc_event(vha, mb[0], mb[1]); 1913 break; 1914 1915 case MBA_IDC_AEN: 1916 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 1917 vha->hw_err_cnt++; 1918 qla27xx_handle_8200_aen(vha, mb); 1919 } else if (IS_QLA83XX(ha)) { 1920 mb[4] = rd_reg_word(®24->mailbox4); 1921 mb[5] = rd_reg_word(®24->mailbox5); 1922 mb[6] = rd_reg_word(®24->mailbox6); 1923 mb[7] = rd_reg_word(®24->mailbox7); 1924 qla83xx_handle_8200_aen(vha, mb); 1925 } else { 1926 ql_dbg(ql_dbg_async, vha, 0x5052, 1927 "skip Heartbeat processing mb0-3=[0x%04x] [0x%04x] [0x%04x] [0x%04x]\n", 1928 mb[0], mb[1], mb[2], mb[3]); 1929 } 1930 break; 1931 1932 case MBA_DPORT_DIAGNOSTICS: 1933 if ((mb[1] & 0xF) == AEN_DONE_DIAG_TEST_WITH_NOERR || 1934 (mb[1] & 0xF) == AEN_DONE_DIAG_TEST_WITH_ERR) 1935 vha->dport_status &= ~DPORT_DIAG_IN_PROGRESS; 1936 ql_dbg(ql_dbg_async, vha, 0x5052, 1937 "D-Port Diagnostics: %04x %04x %04x %04x\n", 1938 mb[0], mb[1], mb[2], mb[3]); 1939 memcpy(vha->dport_data, mb, sizeof(vha->dport_data)); 1940 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 1941 static char *results[] = { 1942 "start", "done(pass)", "done(error)", "undefined" }; 1943 static char *types[] = { 1944 "none", "dynamic", "static", "other" }; 1945 uint result = mb[1] >> 0 & 0x3; 1946 uint type = mb[1] >> 6 & 0x3; 1947 uint sw = mb[1] >> 15 & 0x1; 1948 ql_dbg(ql_dbg_async, vha, 0x5052, 1949 "D-Port Diagnostics: result=%s type=%s [sw=%u]\n", 1950 results[result], types[type], sw); 1951 if (result == 2) { 1952 static char *reasons[] = { 1953 "reserved", "unexpected reject", 1954 "unexpected phase", "retry exceeded", 1955 "timed out", "not supported", 1956 "user stopped" }; 1957 uint reason = mb[2] >> 0 & 0xf; 1958 uint phase = mb[2] >> 12 & 0xf; 1959 ql_dbg(ql_dbg_async, vha, 0x5052, 1960 "D-Port Diagnostics: reason=%s phase=%u \n", 1961 reason < 7 ? reasons[reason] : "other", 1962 phase >> 1); 1963 } 1964 } 1965 break; 1966 1967 case MBA_TEMPERATURE_ALERT: 1968 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) 1969 display_Laser_info(vha, mb[1], mb[2], mb[3]); 1970 ql_dbg(ql_dbg_async, vha, 0x505e, 1971 "TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]); 1972 break; 1973 1974 case MBA_TRANS_INSERT: 1975 ql_dbg(ql_dbg_async, vha, 0x5091, 1976 "Transceiver Insertion: %04x\n", mb[1]); 1977 set_bit(DETECT_SFP_CHANGE, &vha->dpc_flags); 1978 break; 1979 1980 case MBA_TRANS_REMOVE: 1981 ql_dbg(ql_dbg_async, vha, 0x5091, "Transceiver Removal\n"); 1982 break; 1983 1984 default: 1985 ql_dbg(ql_dbg_async, vha, 0x5057, 1986 "Unknown AEN:%04x %04x %04x %04x\n", 1987 mb[0], mb[1], mb[2], mb[3]); 1988 } 1989 1990 qlt_async_event(mb[0], vha, mb); 1991 1992 if (!vha->vp_idx && ha->num_vhosts) 1993 qla2x00_alert_all_vps(rsp, mb); 1994 } 1995 1996 /** 1997 * qla2x00_process_completed_request() - Process a Fast Post response. 1998 * @vha: SCSI driver HA context 1999 * @req: request queue 2000 * @index: SRB index 2001 */ 2002 void 2003 qla2x00_process_completed_request(struct scsi_qla_host *vha, 2004 struct req_que *req, uint32_t index) 2005 { 2006 srb_t *sp; 2007 struct qla_hw_data *ha = vha->hw; 2008 2009 /* Validate handle. */ 2010 if (index >= req->num_outstanding_cmds) { 2011 ql_log(ql_log_warn, vha, 0x3014, 2012 "Invalid SCSI command index (%x).\n", index); 2013 2014 if (IS_P3P_TYPE(ha)) 2015 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 2016 else 2017 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2018 return; 2019 } 2020 2021 sp = req->outstanding_cmds[index]; 2022 if (sp) { 2023 /* Free outstanding command slot. */ 2024 req->outstanding_cmds[index] = NULL; 2025 2026 /* Save ISP completion status */ 2027 sp->done(sp, DID_OK << 16); 2028 } else { 2029 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n"); 2030 2031 if (IS_P3P_TYPE(ha)) 2032 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 2033 else 2034 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2035 } 2036 } 2037 2038 static srb_t * 2039 qla_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, 2040 struct req_que *req, void *iocb, u16 *ret_index) 2041 { 2042 struct qla_hw_data *ha = vha->hw; 2043 sts_entry_t *pkt = iocb; 2044 srb_t *sp; 2045 uint16_t index; 2046 2047 if (pkt->handle == QLA_SKIP_HANDLE) 2048 return NULL; 2049 2050 index = LSW(pkt->handle); 2051 if (index >= req->num_outstanding_cmds) { 2052 ql_log(ql_log_warn, vha, 0x5031, 2053 "%s: Invalid command index (%x) type %8ph.\n", 2054 func, index, iocb); 2055 if (IS_P3P_TYPE(ha)) 2056 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 2057 else 2058 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2059 return NULL; 2060 } 2061 sp = req->outstanding_cmds[index]; 2062 if (!sp) { 2063 ql_log(ql_log_warn, vha, 0x5032, 2064 "%s: Invalid completion handle (%x) -- timed-out.\n", 2065 func, index); 2066 return NULL; 2067 } 2068 if (sp->handle != index) { 2069 ql_log(ql_log_warn, vha, 0x5033, 2070 "%s: SRB handle (%x) mismatch %x.\n", func, 2071 sp->handle, index); 2072 return NULL; 2073 } 2074 2075 *ret_index = index; 2076 qla_put_fw_resources(sp->qpair, &sp->iores); 2077 return sp; 2078 } 2079 2080 srb_t * 2081 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, 2082 struct req_que *req, void *iocb) 2083 { 2084 uint16_t index; 2085 srb_t *sp; 2086 2087 sp = qla_get_sp_from_handle(vha, func, req, iocb, &index); 2088 if (sp) 2089 req->outstanding_cmds[index] = NULL; 2090 2091 return sp; 2092 } 2093 2094 static void 2095 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 2096 struct mbx_entry *mbx) 2097 { 2098 const char func[] = "MBX-IOCB"; 2099 const char *type; 2100 fc_port_t *fcport; 2101 srb_t *sp; 2102 struct srb_iocb *lio; 2103 uint16_t *data; 2104 uint16_t status; 2105 2106 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx); 2107 if (!sp) 2108 return; 2109 2110 lio = &sp->u.iocb_cmd; 2111 type = sp->name; 2112 fcport = sp->fcport; 2113 data = lio->u.logio.data; 2114 2115 data[0] = MBS_COMMAND_ERROR; 2116 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 2117 QLA_LOGIO_LOGIN_RETRIED : 0; 2118 if (mbx->entry_status) { 2119 ql_dbg(ql_dbg_async, vha, 0x5043, 2120 "Async-%s error entry - hdl=%x portid=%02x%02x%02x " 2121 "entry-status=%x status=%x state-flag=%x " 2122 "status-flags=%x.\n", type, sp->handle, 2123 fcport->d_id.b.domain, fcport->d_id.b.area, 2124 fcport->d_id.b.al_pa, mbx->entry_status, 2125 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags), 2126 le16_to_cpu(mbx->status_flags)); 2127 2128 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029, 2129 mbx, sizeof(*mbx)); 2130 2131 goto logio_done; 2132 } 2133 2134 status = le16_to_cpu(mbx->status); 2135 if (status == 0x30 && sp->type == SRB_LOGIN_CMD && 2136 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) 2137 status = 0; 2138 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) { 2139 ql_dbg(ql_dbg_async, vha, 0x5045, 2140 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n", 2141 type, sp->handle, fcport->d_id.b.domain, 2142 fcport->d_id.b.area, fcport->d_id.b.al_pa, 2143 le16_to_cpu(mbx->mb1)); 2144 2145 data[0] = MBS_COMMAND_COMPLETE; 2146 if (sp->type == SRB_LOGIN_CMD) { 2147 fcport->port_type = FCT_TARGET; 2148 if (le16_to_cpu(mbx->mb1) & BIT_0) 2149 fcport->port_type = FCT_INITIATOR; 2150 else if (le16_to_cpu(mbx->mb1) & BIT_1) 2151 fcport->flags |= FCF_FCP2_DEVICE; 2152 } 2153 goto logio_done; 2154 } 2155 2156 data[0] = le16_to_cpu(mbx->mb0); 2157 switch (data[0]) { 2158 case MBS_PORT_ID_USED: 2159 data[1] = le16_to_cpu(mbx->mb1); 2160 break; 2161 case MBS_LOOP_ID_USED: 2162 break; 2163 default: 2164 data[0] = MBS_COMMAND_ERROR; 2165 break; 2166 } 2167 2168 ql_log(ql_log_warn, vha, 0x5046, 2169 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x " 2170 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle, 2171 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, 2172 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1), 2173 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6), 2174 le16_to_cpu(mbx->mb7)); 2175 2176 logio_done: 2177 sp->done(sp, 0); 2178 } 2179 2180 static void 2181 qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 2182 struct mbx_24xx_entry *pkt) 2183 { 2184 const char func[] = "MBX-IOCB2"; 2185 struct qla_hw_data *ha = vha->hw; 2186 srb_t *sp; 2187 struct srb_iocb *si; 2188 u16 sz, i; 2189 int res; 2190 2191 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 2192 if (!sp) 2193 return; 2194 2195 if (sp->type == SRB_SCSI_CMD || 2196 sp->type == SRB_NVME_CMD || 2197 sp->type == SRB_TM_CMD) { 2198 ql_log(ql_log_warn, vha, 0x509d, 2199 "Inconsistent event entry type %d\n", sp->type); 2200 if (IS_P3P_TYPE(ha)) 2201 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 2202 else 2203 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2204 return; 2205 } 2206 2207 si = &sp->u.iocb_cmd; 2208 sz = min(ARRAY_SIZE(pkt->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.in_mb)); 2209 2210 for (i = 0; i < sz; i++) 2211 si->u.mbx.in_mb[i] = pkt->mb[i]; 2212 2213 res = (si->u.mbx.in_mb[0] & MBS_MASK); 2214 2215 sp->done(sp, res); 2216 } 2217 2218 static void 2219 qla24xxx_nack_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 2220 struct nack_to_isp *pkt) 2221 { 2222 const char func[] = "nack"; 2223 srb_t *sp; 2224 int res = 0; 2225 2226 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 2227 if (!sp) 2228 return; 2229 2230 if (pkt->u.isp2x.status != cpu_to_le16(NOTIFY_ACK_SUCCESS)) 2231 res = QLA_FUNCTION_FAILED; 2232 2233 sp->done(sp, res); 2234 } 2235 2236 static void 2237 qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req, 2238 sts_entry_t *pkt, int iocb_type) 2239 { 2240 const char func[] = "CT_IOCB"; 2241 const char *type; 2242 srb_t *sp; 2243 struct bsg_job *bsg_job; 2244 struct fc_bsg_reply *bsg_reply; 2245 uint16_t comp_status; 2246 int res = 0; 2247 2248 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 2249 if (!sp) 2250 return; 2251 2252 switch (sp->type) { 2253 case SRB_CT_CMD: 2254 bsg_job = sp->u.bsg_job; 2255 bsg_reply = bsg_job->reply; 2256 2257 type = "ct pass-through"; 2258 2259 comp_status = le16_to_cpu(pkt->comp_status); 2260 2261 /* 2262 * return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT 2263 * fc payload to the caller 2264 */ 2265 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 2266 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2267 2268 if (comp_status != CS_COMPLETE) { 2269 if (comp_status == CS_DATA_UNDERRUN) { 2270 res = DID_OK << 16; 2271 bsg_reply->reply_payload_rcv_len = 2272 le16_to_cpu(pkt->rsp_info_len); 2273 2274 ql_log(ql_log_warn, vha, 0x5048, 2275 "CT pass-through-%s error comp_status=0x%x total_byte=0x%x.\n", 2276 type, comp_status, 2277 bsg_reply->reply_payload_rcv_len); 2278 } else { 2279 ql_log(ql_log_warn, vha, 0x5049, 2280 "CT pass-through-%s error comp_status=0x%x.\n", 2281 type, comp_status); 2282 res = DID_ERROR << 16; 2283 bsg_reply->reply_payload_rcv_len = 0; 2284 } 2285 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035, 2286 pkt, sizeof(*pkt)); 2287 } else { 2288 res = DID_OK << 16; 2289 bsg_reply->reply_payload_rcv_len = 2290 bsg_job->reply_payload.payload_len; 2291 bsg_job->reply_len = 0; 2292 } 2293 break; 2294 case SRB_CT_PTHRU_CMD: 2295 /* 2296 * borrowing sts_entry_24xx.comp_status. 2297 * same location as ct_entry_24xx.comp_status 2298 */ 2299 res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt, 2300 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp, 2301 sp->name); 2302 break; 2303 } 2304 2305 sp->done(sp, res); 2306 } 2307 2308 static void 2309 qla24xx_els_ct_entry(scsi_qla_host_t *v, struct req_que *req, 2310 struct sts_entry_24xx *pkt, int iocb_type) 2311 { 2312 struct els_sts_entry_24xx *ese = (struct els_sts_entry_24xx *)pkt; 2313 const char func[] = "ELS_CT_IOCB"; 2314 const char *type; 2315 srb_t *sp; 2316 struct bsg_job *bsg_job; 2317 struct fc_bsg_reply *bsg_reply; 2318 uint16_t comp_status; 2319 uint32_t fw_status[3]; 2320 int res, logit = 1; 2321 struct srb_iocb *els; 2322 uint n; 2323 scsi_qla_host_t *vha; 2324 struct els_sts_entry_24xx *e = (struct els_sts_entry_24xx *)pkt; 2325 2326 sp = qla2x00_get_sp_from_handle(v, func, req, pkt); 2327 if (!sp) 2328 return; 2329 bsg_job = sp->u.bsg_job; 2330 vha = sp->vha; 2331 2332 type = NULL; 2333 2334 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status); 2335 fw_status[1] = le32_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_1); 2336 fw_status[2] = le32_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_2); 2337 2338 switch (sp->type) { 2339 case SRB_ELS_CMD_RPT: 2340 case SRB_ELS_CMD_HST: 2341 type = "rpt hst"; 2342 break; 2343 case SRB_ELS_CMD_HST_NOLOGIN: 2344 type = "els"; 2345 { 2346 struct els_entry_24xx *els = (void *)pkt; 2347 struct qla_bsg_auth_els_request *p = 2348 (struct qla_bsg_auth_els_request *)bsg_job->request; 2349 2350 ql_dbg(ql_dbg_user, vha, 0x700f, 2351 "%s %s. portid=%02x%02x%02x status %x xchg %x bsg ptr %p\n", 2352 __func__, sc_to_str(p->e.sub_cmd), 2353 e->d_id[2], e->d_id[1], e->d_id[0], 2354 comp_status, p->e.extra_rx_xchg_address, bsg_job); 2355 2356 if (!(le16_to_cpu(els->control_flags) & ECF_PAYLOAD_DESCR_MASK)) { 2357 if (sp->remap.remapped) { 2358 n = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2359 bsg_job->reply_payload.sg_cnt, 2360 sp->remap.rsp.buf, 2361 sp->remap.rsp.len); 2362 ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x700e, 2363 "%s: SG copied %x of %x\n", 2364 __func__, n, sp->remap.rsp.len); 2365 } else { 2366 ql_dbg(ql_dbg_user, vha, 0x700f, 2367 "%s: NOT REMAPPED (error)...!!!\n", 2368 __func__); 2369 } 2370 } 2371 } 2372 break; 2373 case SRB_CT_CMD: 2374 type = "ct pass-through"; 2375 break; 2376 case SRB_ELS_DCMD: 2377 type = "Driver ELS logo"; 2378 if (iocb_type != ELS_IOCB_TYPE) { 2379 ql_dbg(ql_dbg_user, vha, 0x5047, 2380 "Completing %s: (%p) type=%d.\n", 2381 type, sp, sp->type); 2382 sp->done(sp, 0); 2383 return; 2384 } 2385 break; 2386 case SRB_CT_PTHRU_CMD: 2387 /* borrowing sts_entry_24xx.comp_status. 2388 same location as ct_entry_24xx.comp_status 2389 */ 2390 res = qla2x00_chk_ms_status(sp->vha, (ms_iocb_entry_t *)pkt, 2391 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp, 2392 sp->name); 2393 sp->done(sp, res); 2394 return; 2395 default: 2396 ql_dbg(ql_dbg_user, vha, 0x503e, 2397 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type); 2398 return; 2399 } 2400 2401 if (iocb_type == ELS_IOCB_TYPE) { 2402 els = &sp->u.iocb_cmd; 2403 els->u.els_plogi.fw_status[0] = cpu_to_le32(fw_status[0]); 2404 els->u.els_plogi.fw_status[1] = cpu_to_le32(fw_status[1]); 2405 els->u.els_plogi.fw_status[2] = cpu_to_le32(fw_status[2]); 2406 els->u.els_plogi.comp_status = cpu_to_le16(fw_status[0]); 2407 if (comp_status == CS_COMPLETE) { 2408 res = DID_OK << 16; 2409 } else { 2410 if (comp_status == CS_DATA_UNDERRUN) { 2411 res = DID_OK << 16; 2412 els->u.els_plogi.len = cpu_to_le16(le32_to_cpu( 2413 ese->total_byte_count)); 2414 2415 if (sp->remap.remapped && 2416 ((u8 *)sp->remap.rsp.buf)[0] == ELS_LS_ACC) { 2417 ql_dbg(ql_dbg_user, vha, 0x503f, 2418 "%s IOCB Done LS_ACC %02x%02x%02x -> %02x%02x%02x", 2419 __func__, e->s_id[0], e->s_id[2], e->s_id[1], 2420 e->d_id[2], e->d_id[1], e->d_id[0]); 2421 logit = 0; 2422 } 2423 2424 } else if (comp_status == CS_PORT_LOGGED_OUT) { 2425 ql_dbg(ql_dbg_disc, vha, 0x911e, 2426 "%s %d schedule session deletion\n", 2427 __func__, __LINE__); 2428 2429 els->u.els_plogi.len = 0; 2430 res = DID_IMM_RETRY << 16; 2431 qlt_schedule_sess_for_deletion(sp->fcport); 2432 } else { 2433 els->u.els_plogi.len = 0; 2434 res = DID_ERROR << 16; 2435 } 2436 2437 if (sp->remap.remapped && 2438 ((u8 *)sp->remap.rsp.buf)[0] == ELS_LS_RJT) { 2439 if (logit) { 2440 ql_dbg(ql_dbg_user, vha, 0x503f, 2441 "%s IOCB Done LS_RJT hdl=%x comp_status=0x%x\n", 2442 type, sp->handle, comp_status); 2443 2444 ql_dbg(ql_dbg_user, vha, 0x503f, 2445 "subcode 1=0x%x subcode 2=0x%x bytes=0x%x %02x%02x%02x -> %02x%02x%02x\n", 2446 fw_status[1], fw_status[2], 2447 le32_to_cpu(((struct els_sts_entry_24xx *) 2448 pkt)->total_byte_count), 2449 e->s_id[0], e->s_id[2], e->s_id[1], 2450 e->d_id[2], e->d_id[1], e->d_id[0]); 2451 } 2452 if (sp->fcport && sp->fcport->flags & FCF_FCSP_DEVICE && 2453 sp->type == SRB_ELS_CMD_HST_NOLOGIN) { 2454 ql_dbg(ql_dbg_edif, vha, 0x911e, 2455 "%s rcv reject. Sched delete\n", __func__); 2456 qlt_schedule_sess_for_deletion(sp->fcport); 2457 } 2458 } else if (logit) { 2459 ql_log(ql_log_info, vha, 0x503f, 2460 "%s IOCB Done hdl=%x comp_status=0x%x\n", 2461 type, sp->handle, comp_status); 2462 ql_log(ql_log_info, vha, 0x503f, 2463 "subcode 1=0x%x subcode 2=0x%x bytes=0x%x %02x%02x%02x -> %02x%02x%02x\n", 2464 fw_status[1], fw_status[2], 2465 le32_to_cpu(((struct els_sts_entry_24xx *) 2466 pkt)->total_byte_count), 2467 e->s_id[0], e->s_id[2], e->s_id[1], 2468 e->d_id[2], e->d_id[1], e->d_id[0]); 2469 } 2470 } 2471 goto els_ct_done; 2472 } 2473 2474 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT 2475 * fc payload to the caller 2476 */ 2477 bsg_job = sp->u.bsg_job; 2478 bsg_reply = bsg_job->reply; 2479 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 2480 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status); 2481 2482 if (comp_status != CS_COMPLETE) { 2483 if (comp_status == CS_DATA_UNDERRUN) { 2484 res = DID_OK << 16; 2485 bsg_reply->reply_payload_rcv_len = 2486 le32_to_cpu(ese->total_byte_count); 2487 2488 ql_dbg(ql_dbg_user, vha, 0x503f, 2489 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " 2490 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n", 2491 type, sp->handle, comp_status, fw_status[1], fw_status[2], 2492 le32_to_cpu(ese->total_byte_count)); 2493 } else { 2494 ql_dbg(ql_dbg_user, vha, 0x5040, 2495 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " 2496 "error subcode 1=0x%x error subcode 2=0x%x.\n", 2497 type, sp->handle, comp_status, 2498 le32_to_cpu(ese->error_subcode_1), 2499 le32_to_cpu(ese->error_subcode_2)); 2500 res = DID_ERROR << 16; 2501 bsg_reply->reply_payload_rcv_len = 0; 2502 } 2503 memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply), 2504 fw_status, sizeof(fw_status)); 2505 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056, 2506 pkt, sizeof(*pkt)); 2507 } 2508 else { 2509 res = DID_OK << 16; 2510 bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; 2511 bsg_job->reply_len = 0; 2512 } 2513 els_ct_done: 2514 2515 sp->done(sp, res); 2516 } 2517 2518 static void 2519 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, 2520 struct logio_entry_24xx *logio) 2521 { 2522 const char func[] = "LOGIO-IOCB"; 2523 const char *type; 2524 fc_port_t *fcport; 2525 srb_t *sp; 2526 struct srb_iocb *lio; 2527 uint16_t *data; 2528 uint32_t iop[2]; 2529 int logit = 1; 2530 2531 sp = qla2x00_get_sp_from_handle(vha, func, req, logio); 2532 if (!sp) 2533 return; 2534 2535 lio = &sp->u.iocb_cmd; 2536 type = sp->name; 2537 fcport = sp->fcport; 2538 data = lio->u.logio.data; 2539 2540 data[0] = MBS_COMMAND_ERROR; 2541 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 2542 QLA_LOGIO_LOGIN_RETRIED : 0; 2543 if (logio->entry_status) { 2544 ql_log(ql_log_warn, fcport->vha, 0x5034, 2545 "Async-%s error entry - %8phC hdl=%x" 2546 "portid=%02x%02x%02x entry-status=%x.\n", 2547 type, fcport->port_name, sp->handle, fcport->d_id.b.domain, 2548 fcport->d_id.b.area, fcport->d_id.b.al_pa, 2549 logio->entry_status); 2550 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d, 2551 logio, sizeof(*logio)); 2552 2553 goto logio_done; 2554 } 2555 2556 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) { 2557 ql_dbg(ql_dbg_async, sp->vha, 0x5036, 2558 "Async-%s complete: handle=%x pid=%06x wwpn=%8phC iop0=%x\n", 2559 type, sp->handle, fcport->d_id.b24, fcport->port_name, 2560 le32_to_cpu(logio->io_parameter[0])); 2561 2562 vha->hw->exch_starvation = 0; 2563 data[0] = MBS_COMMAND_COMPLETE; 2564 2565 if (sp->type == SRB_PRLI_CMD) { 2566 lio->u.logio.iop[0] = 2567 le32_to_cpu(logio->io_parameter[0]); 2568 lio->u.logio.iop[1] = 2569 le32_to_cpu(logio->io_parameter[1]); 2570 goto logio_done; 2571 } 2572 2573 if (sp->type != SRB_LOGIN_CMD) 2574 goto logio_done; 2575 2576 lio->u.logio.iop[1] = le32_to_cpu(logio->io_parameter[5]); 2577 if (le32_to_cpu(logio->io_parameter[5]) & LIO_COMM_FEAT_FCSP) 2578 fcport->flags |= FCF_FCSP_DEVICE; 2579 2580 iop[0] = le32_to_cpu(logio->io_parameter[0]); 2581 if (iop[0] & BIT_4) { 2582 fcport->port_type = FCT_TARGET; 2583 if (iop[0] & BIT_8) 2584 fcport->flags |= FCF_FCP2_DEVICE; 2585 } else if (iop[0] & BIT_5) 2586 fcport->port_type = FCT_INITIATOR; 2587 2588 if (iop[0] & BIT_7) 2589 fcport->flags |= FCF_CONF_COMP_SUPPORTED; 2590 2591 if (logio->io_parameter[7] || logio->io_parameter[8]) 2592 fcport->supported_classes |= FC_COS_CLASS2; 2593 if (logio->io_parameter[9] || logio->io_parameter[10]) 2594 fcport->supported_classes |= FC_COS_CLASS3; 2595 2596 goto logio_done; 2597 } 2598 2599 iop[0] = le32_to_cpu(logio->io_parameter[0]); 2600 iop[1] = le32_to_cpu(logio->io_parameter[1]); 2601 lio->u.logio.iop[0] = iop[0]; 2602 lio->u.logio.iop[1] = iop[1]; 2603 switch (iop[0]) { 2604 case LSC_SCODE_PORTID_USED: 2605 data[0] = MBS_PORT_ID_USED; 2606 data[1] = LSW(iop[1]); 2607 logit = 0; 2608 break; 2609 case LSC_SCODE_NPORT_USED: 2610 data[0] = MBS_LOOP_ID_USED; 2611 logit = 0; 2612 break; 2613 case LSC_SCODE_CMD_FAILED: 2614 if (iop[1] == 0x0606) { 2615 /* 2616 * PLOGI/PRLI Completed. We must have Recv PLOGI/PRLI, 2617 * Target side acked. 2618 */ 2619 data[0] = MBS_COMMAND_COMPLETE; 2620 goto logio_done; 2621 } 2622 data[0] = MBS_COMMAND_ERROR; 2623 break; 2624 case LSC_SCODE_NOXCB: 2625 vha->hw->exch_starvation++; 2626 if (vha->hw->exch_starvation > 5) { 2627 ql_log(ql_log_warn, vha, 0xd046, 2628 "Exchange starvation. Resetting RISC\n"); 2629 2630 vha->hw->exch_starvation = 0; 2631 2632 if (IS_P3P_TYPE(vha->hw)) 2633 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 2634 else 2635 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2636 qla2xxx_wake_dpc(vha); 2637 } 2638 fallthrough; 2639 default: 2640 data[0] = MBS_COMMAND_ERROR; 2641 break; 2642 } 2643 2644 if (logit) 2645 ql_log(ql_log_warn, sp->vha, 0x5037, "Async-%s failed: " 2646 "handle=%x pid=%06x wwpn=%8phC comp_status=%x iop0=%x iop1=%x\n", 2647 type, sp->handle, fcport->d_id.b24, fcport->port_name, 2648 le16_to_cpu(logio->comp_status), 2649 le32_to_cpu(logio->io_parameter[0]), 2650 le32_to_cpu(logio->io_parameter[1])); 2651 else 2652 ql_dbg(ql_dbg_disc, sp->vha, 0x5037, "Async-%s failed: " 2653 "handle=%x pid=%06x wwpn=%8phC comp_status=%x iop0=%x iop1=%x\n", 2654 type, sp->handle, fcport->d_id.b24, fcport->port_name, 2655 le16_to_cpu(logio->comp_status), 2656 le32_to_cpu(logio->io_parameter[0]), 2657 le32_to_cpu(logio->io_parameter[1])); 2658 2659 logio_done: 2660 sp->done(sp, 0); 2661 } 2662 2663 static void 2664 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk) 2665 { 2666 const char func[] = "TMF-IOCB"; 2667 const char *type; 2668 fc_port_t *fcport; 2669 srb_t *sp; 2670 struct srb_iocb *iocb; 2671 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; 2672 u16 comp_status; 2673 2674 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk); 2675 if (!sp) 2676 return; 2677 2678 comp_status = le16_to_cpu(sts->comp_status); 2679 iocb = &sp->u.iocb_cmd; 2680 type = sp->name; 2681 fcport = sp->fcport; 2682 iocb->u.tmf.data = QLA_SUCCESS; 2683 2684 if (sts->entry_status) { 2685 ql_log(ql_log_warn, fcport->vha, 0x5038, 2686 "Async-%s error - hdl=%x entry-status(%x).\n", 2687 type, sp->handle, sts->entry_status); 2688 iocb->u.tmf.data = QLA_FUNCTION_FAILED; 2689 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) { 2690 ql_log(ql_log_warn, fcport->vha, 0x5039, 2691 "Async-%s error - hdl=%x completion status(%x).\n", 2692 type, sp->handle, comp_status); 2693 iocb->u.tmf.data = QLA_FUNCTION_FAILED; 2694 } else if ((le16_to_cpu(sts->scsi_status) & 2695 SS_RESPONSE_INFO_LEN_VALID)) { 2696 host_to_fcp_swap(sts->data, sizeof(sts->data)); 2697 if (le32_to_cpu(sts->rsp_data_len) < 4) { 2698 ql_log(ql_log_warn, fcport->vha, 0x503b, 2699 "Async-%s error - hdl=%x not enough response(%d).\n", 2700 type, sp->handle, sts->rsp_data_len); 2701 } else if (sts->data[3]) { 2702 ql_log(ql_log_warn, fcport->vha, 0x503c, 2703 "Async-%s error - hdl=%x response(%x).\n", 2704 type, sp->handle, sts->data[3]); 2705 iocb->u.tmf.data = QLA_FUNCTION_FAILED; 2706 } 2707 } 2708 2709 switch (comp_status) { 2710 case CS_PORT_LOGGED_OUT: 2711 case CS_PORT_CONFIG_CHG: 2712 case CS_PORT_BUSY: 2713 case CS_INCOMPLETE: 2714 case CS_PORT_UNAVAILABLE: 2715 case CS_RESET: 2716 if (atomic_read(&fcport->state) == FCS_ONLINE) { 2717 ql_dbg(ql_dbg_disc, fcport->vha, 0x3021, 2718 "-Port to be marked lost on fcport=%02x%02x%02x, current port state= %s comp_status %x.\n", 2719 fcport->d_id.b.domain, fcport->d_id.b.area, 2720 fcport->d_id.b.al_pa, 2721 port_state_str[FCS_ONLINE], 2722 comp_status); 2723 2724 qlt_schedule_sess_for_deletion(fcport); 2725 } 2726 break; 2727 2728 default: 2729 break; 2730 } 2731 2732 if (iocb->u.tmf.data != QLA_SUCCESS) 2733 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, sp->vha, 0x5055, 2734 sts, sizeof(*sts)); 2735 2736 sp->done(sp, 0); 2737 } 2738 2739 static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 2740 void *tsk, srb_t *sp) 2741 { 2742 fc_port_t *fcport; 2743 struct srb_iocb *iocb; 2744 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; 2745 uint16_t state_flags; 2746 struct nvmefc_fcp_req *fd; 2747 uint16_t ret = QLA_SUCCESS; 2748 __le16 comp_status = sts->comp_status; 2749 int logit = 0; 2750 2751 iocb = &sp->u.iocb_cmd; 2752 fcport = sp->fcport; 2753 iocb->u.nvme.comp_status = comp_status; 2754 state_flags = le16_to_cpu(sts->state_flags); 2755 fd = iocb->u.nvme.desc; 2756 2757 if (unlikely(iocb->u.nvme.aen_op)) 2758 atomic_dec(&sp->vha->hw->nvme_active_aen_cnt); 2759 else 2760 sp->qpair->cmd_completion_cnt++; 2761 2762 if (unlikely(comp_status != CS_COMPLETE)) 2763 logit = 1; 2764 2765 fd->transferred_length = fd->payload_length - 2766 le32_to_cpu(sts->residual_len); 2767 2768 /* 2769 * State flags: Bit 6 and 0. 2770 * If 0 is set, we don't care about 6. 2771 * both cases resp was dma'd to host buffer 2772 * if both are 0, that is good path case. 2773 * if six is set and 0 is clear, we need to 2774 * copy resp data from status iocb to resp buffer. 2775 */ 2776 if (!(state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP))) { 2777 iocb->u.nvme.rsp_pyld_len = 0; 2778 } else if ((state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP)) == 2779 (SF_FCP_RSP_DMA | SF_NVME_ERSP)) { 2780 /* Response already DMA'd to fd->rspaddr. */ 2781 iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len; 2782 } else if ((state_flags & SF_FCP_RSP_DMA)) { 2783 /* 2784 * Non-zero value in first 12 bytes of NVMe_RSP IU, treat this 2785 * as an error. 2786 */ 2787 iocb->u.nvme.rsp_pyld_len = 0; 2788 fd->transferred_length = 0; 2789 ql_dbg(ql_dbg_io, fcport->vha, 0x307a, 2790 "Unexpected values in NVMe_RSP IU.\n"); 2791 logit = 1; 2792 } else if (state_flags & SF_NVME_ERSP) { 2793 uint32_t *inbuf, *outbuf; 2794 uint16_t iter; 2795 2796 inbuf = (uint32_t *)&sts->nvme_ersp_data; 2797 outbuf = (uint32_t *)fd->rspaddr; 2798 iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len; 2799 if (unlikely(le16_to_cpu(iocb->u.nvme.rsp_pyld_len) > 2800 sizeof(struct nvme_fc_ersp_iu))) { 2801 if (ql_mask_match(ql_dbg_io)) { 2802 WARN_ONCE(1, "Unexpected response payload length %u.\n", 2803 iocb->u.nvme.rsp_pyld_len); 2804 ql_log(ql_log_warn, fcport->vha, 0x5100, 2805 "Unexpected response payload length %u.\n", 2806 iocb->u.nvme.rsp_pyld_len); 2807 } 2808 iocb->u.nvme.rsp_pyld_len = 2809 cpu_to_le16(sizeof(struct nvme_fc_ersp_iu)); 2810 } 2811 iter = le16_to_cpu(iocb->u.nvme.rsp_pyld_len) >> 2; 2812 for (; iter; iter--) 2813 *outbuf++ = swab32(*inbuf++); 2814 } 2815 2816 if (state_flags & SF_NVME_ERSP) { 2817 struct nvme_fc_ersp_iu *rsp_iu = fd->rspaddr; 2818 u32 tgt_xfer_len; 2819 2820 tgt_xfer_len = be32_to_cpu(rsp_iu->xfrd_len); 2821 if (fd->transferred_length != tgt_xfer_len) { 2822 ql_log(ql_log_warn, fcport->vha, 0x3079, 2823 "Dropped frame(s) detected (sent/rcvd=%u/%u).\n", 2824 tgt_xfer_len, fd->transferred_length); 2825 logit = 1; 2826 } else if (le16_to_cpu(comp_status) == CS_DATA_UNDERRUN) { 2827 /* 2828 * Do not log if this is just an underflow and there 2829 * is no data loss. 2830 */ 2831 logit = 0; 2832 } 2833 } 2834 2835 if (unlikely(logit)) 2836 ql_dbg(ql_dbg_io, fcport->vha, 0x5060, 2837 "NVME-%s ERR Handling - hdl=%x status(%x) tr_len:%x resid=%x ox_id=%x\n", 2838 sp->name, sp->handle, comp_status, 2839 fd->transferred_length, le32_to_cpu(sts->residual_len), 2840 sts->ox_id); 2841 2842 /* 2843 * If transport error then Failure (HBA rejects request) 2844 * otherwise transport will handle. 2845 */ 2846 switch (le16_to_cpu(comp_status)) { 2847 case CS_COMPLETE: 2848 break; 2849 2850 case CS_RESET: 2851 case CS_PORT_UNAVAILABLE: 2852 case CS_PORT_LOGGED_OUT: 2853 fcport->nvme_flag |= NVME_FLAG_RESETTING; 2854 if (atomic_read(&fcport->state) == FCS_ONLINE) { 2855 ql_dbg(ql_dbg_disc, fcport->vha, 0x3021, 2856 "Port to be marked lost on fcport=%06x, current " 2857 "port state= %s comp_status %x.\n", 2858 fcport->d_id.b24, port_state_str[FCS_ONLINE], 2859 comp_status); 2860 2861 qlt_schedule_sess_for_deletion(fcport); 2862 } 2863 fallthrough; 2864 case CS_ABORTED: 2865 case CS_PORT_BUSY: 2866 fd->transferred_length = 0; 2867 iocb->u.nvme.rsp_pyld_len = 0; 2868 ret = QLA_ABORTED; 2869 break; 2870 case CS_DATA_UNDERRUN: 2871 break; 2872 default: 2873 ret = QLA_FUNCTION_FAILED; 2874 break; 2875 } 2876 sp->done(sp, ret); 2877 } 2878 2879 static void qla_ctrlvp_completed(scsi_qla_host_t *vha, struct req_que *req, 2880 struct vp_ctrl_entry_24xx *vce) 2881 { 2882 const char func[] = "CTRLVP-IOCB"; 2883 srb_t *sp; 2884 int rval = QLA_SUCCESS; 2885 2886 sp = qla2x00_get_sp_from_handle(vha, func, req, vce); 2887 if (!sp) 2888 return; 2889 2890 if (vce->entry_status != 0) { 2891 ql_dbg(ql_dbg_vport, vha, 0x10c4, 2892 "%s: Failed to complete IOCB -- error status (%x)\n", 2893 sp->name, vce->entry_status); 2894 rval = QLA_FUNCTION_FAILED; 2895 } else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) { 2896 ql_dbg(ql_dbg_vport, vha, 0x10c5, 2897 "%s: Failed to complete IOCB -- completion status (%x) vpidx %x\n", 2898 sp->name, le16_to_cpu(vce->comp_status), 2899 le16_to_cpu(vce->vp_idx_failed)); 2900 rval = QLA_FUNCTION_FAILED; 2901 } else { 2902 ql_dbg(ql_dbg_vport, vha, 0x10c6, 2903 "Done %s.\n", __func__); 2904 } 2905 2906 sp->rc = rval; 2907 sp->done(sp, rval); 2908 } 2909 2910 /* Process a single response queue entry. */ 2911 static void qla2x00_process_response_entry(struct scsi_qla_host *vha, 2912 struct rsp_que *rsp, 2913 sts_entry_t *pkt) 2914 { 2915 sts21_entry_t *sts21_entry; 2916 sts22_entry_t *sts22_entry; 2917 uint16_t handle_cnt; 2918 uint16_t cnt; 2919 2920 switch (pkt->entry_type) { 2921 case STATUS_TYPE: 2922 qla2x00_status_entry(vha, rsp, pkt); 2923 break; 2924 case STATUS_TYPE_21: 2925 sts21_entry = (sts21_entry_t *)pkt; 2926 handle_cnt = sts21_entry->handle_count; 2927 for (cnt = 0; cnt < handle_cnt; cnt++) 2928 qla2x00_process_completed_request(vha, rsp->req, 2929 sts21_entry->handle[cnt]); 2930 break; 2931 case STATUS_TYPE_22: 2932 sts22_entry = (sts22_entry_t *)pkt; 2933 handle_cnt = sts22_entry->handle_count; 2934 for (cnt = 0; cnt < handle_cnt; cnt++) 2935 qla2x00_process_completed_request(vha, rsp->req, 2936 sts22_entry->handle[cnt]); 2937 break; 2938 case STATUS_CONT_TYPE: 2939 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); 2940 break; 2941 case MBX_IOCB_TYPE: 2942 qla2x00_mbx_iocb_entry(vha, rsp->req, (struct mbx_entry *)pkt); 2943 break; 2944 case CT_IOCB_TYPE: 2945 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); 2946 break; 2947 default: 2948 /* Type Not Supported. */ 2949 ql_log(ql_log_warn, vha, 0x504a, 2950 "Received unknown response pkt type %x entry status=%x.\n", 2951 pkt->entry_type, pkt->entry_status); 2952 break; 2953 } 2954 } 2955 2956 /** 2957 * qla2x00_process_response_queue() - Process response queue entries. 2958 * @rsp: response queue 2959 */ 2960 void 2961 qla2x00_process_response_queue(struct rsp_que *rsp) 2962 { 2963 struct scsi_qla_host *vha; 2964 struct qla_hw_data *ha = rsp->hw; 2965 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 2966 sts_entry_t *pkt; 2967 2968 vha = pci_get_drvdata(ha->pdev); 2969 2970 if (!vha->flags.online) 2971 return; 2972 2973 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 2974 pkt = (sts_entry_t *)rsp->ring_ptr; 2975 2976 rsp->ring_index++; 2977 if (rsp->ring_index == rsp->length) { 2978 rsp->ring_index = 0; 2979 rsp->ring_ptr = rsp->ring; 2980 } else { 2981 rsp->ring_ptr++; 2982 } 2983 2984 if (pkt->entry_status != 0) { 2985 qla2x00_error_entry(vha, rsp, pkt); 2986 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2987 wmb(); 2988 continue; 2989 } 2990 2991 qla2x00_process_response_entry(vha, rsp, pkt); 2992 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2993 wmb(); 2994 } 2995 2996 /* Adjust ring index */ 2997 wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index); 2998 } 2999 3000 static inline void 3001 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, 3002 uint32_t sense_len, struct rsp_que *rsp, int res) 3003 { 3004 struct scsi_qla_host *vha = sp->vha; 3005 struct scsi_cmnd *cp = GET_CMD_SP(sp); 3006 uint32_t track_sense_len; 3007 3008 if (sense_len >= SCSI_SENSE_BUFFERSIZE) 3009 sense_len = SCSI_SENSE_BUFFERSIZE; 3010 3011 SET_CMD_SENSE_LEN(sp, sense_len); 3012 SET_CMD_SENSE_PTR(sp, cp->sense_buffer); 3013 track_sense_len = sense_len; 3014 3015 if (sense_len > par_sense_len) 3016 sense_len = par_sense_len; 3017 3018 memcpy(cp->sense_buffer, sense_data, sense_len); 3019 3020 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len); 3021 track_sense_len -= sense_len; 3022 SET_CMD_SENSE_LEN(sp, track_sense_len); 3023 3024 if (track_sense_len != 0) { 3025 rsp->status_srb = sp; 3026 cp->result = res; 3027 } 3028 3029 if (sense_len) { 3030 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c, 3031 "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n", 3032 sp->vha->host_no, cp->device->id, cp->device->lun, 3033 cp); 3034 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b, 3035 cp->sense_buffer, sense_len); 3036 } 3037 } 3038 3039 /* 3040 * Checks the guard or meta-data for the type of error 3041 * detected by the HBA. In case of errors, we set the 3042 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST 3043 * to indicate to the kernel that the HBA detected error. 3044 */ 3045 static inline int 3046 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) 3047 { 3048 struct scsi_qla_host *vha = sp->vha; 3049 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 3050 uint8_t *ap = &sts24->data[12]; 3051 uint8_t *ep = &sts24->data[20]; 3052 uint32_t e_ref_tag, a_ref_tag; 3053 uint16_t e_app_tag, a_app_tag; 3054 uint16_t e_guard, a_guard; 3055 3056 /* 3057 * swab32 of the "data" field in the beginning of qla2x00_status_entry() 3058 * would make guard field appear at offset 2 3059 */ 3060 a_guard = get_unaligned_le16(ap + 2); 3061 a_app_tag = get_unaligned_le16(ap + 0); 3062 a_ref_tag = get_unaligned_le32(ap + 4); 3063 e_guard = get_unaligned_le16(ep + 2); 3064 e_app_tag = get_unaligned_le16(ep + 0); 3065 e_ref_tag = get_unaligned_le32(ep + 4); 3066 3067 ql_dbg(ql_dbg_io, vha, 0x3023, 3068 "iocb(s) %p Returned STATUS.\n", sts24); 3069 3070 ql_dbg(ql_dbg_io, vha, 0x3024, 3071 "DIF ERROR in cmd 0x%x lba 0x%llx act ref" 3072 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app" 3073 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n", 3074 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag, 3075 a_app_tag, e_app_tag, a_guard, e_guard); 3076 3077 /* 3078 * Ignore sector if: 3079 * For type 3: ref & app tag is all 'f's 3080 * For type 0,1,2: app tag is all 'f's 3081 */ 3082 if (a_app_tag == be16_to_cpu(T10_PI_APP_ESCAPE) && 3083 (scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3 || 3084 a_ref_tag == be32_to_cpu(T10_PI_REF_ESCAPE))) { 3085 uint32_t blocks_done, resid; 3086 sector_t lba_s = scsi_get_lba(cmd); 3087 3088 /* 2TB boundary case covered automatically with this */ 3089 blocks_done = e_ref_tag - (uint32_t)lba_s + 1; 3090 3091 resid = scsi_bufflen(cmd) - (blocks_done * 3092 cmd->device->sector_size); 3093 3094 scsi_set_resid(cmd, resid); 3095 cmd->result = DID_OK << 16; 3096 3097 /* Update protection tag */ 3098 if (scsi_prot_sg_count(cmd)) { 3099 uint32_t i, j = 0, k = 0, num_ent; 3100 struct scatterlist *sg; 3101 struct t10_pi_tuple *spt; 3102 3103 /* Patch the corresponding protection tags */ 3104 scsi_for_each_prot_sg(cmd, sg, 3105 scsi_prot_sg_count(cmd), i) { 3106 num_ent = sg_dma_len(sg) / 8; 3107 if (k + num_ent < blocks_done) { 3108 k += num_ent; 3109 continue; 3110 } 3111 j = blocks_done - k - 1; 3112 k = blocks_done; 3113 break; 3114 } 3115 3116 if (k != blocks_done) { 3117 ql_log(ql_log_warn, vha, 0x302f, 3118 "unexpected tag values tag:lba=%x:%llx)\n", 3119 e_ref_tag, (unsigned long long)lba_s); 3120 return 1; 3121 } 3122 3123 spt = page_address(sg_page(sg)) + sg->offset; 3124 spt += j; 3125 3126 spt->app_tag = T10_PI_APP_ESCAPE; 3127 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3) 3128 spt->ref_tag = T10_PI_REF_ESCAPE; 3129 } 3130 3131 return 0; 3132 } 3133 3134 /* check guard */ 3135 if (e_guard != a_guard) { 3136 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1); 3137 set_host_byte(cmd, DID_ABORT); 3138 return 1; 3139 } 3140 3141 /* check ref tag */ 3142 if (e_ref_tag != a_ref_tag) { 3143 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3); 3144 set_host_byte(cmd, DID_ABORT); 3145 return 1; 3146 } 3147 3148 /* check appl tag */ 3149 if (e_app_tag != a_app_tag) { 3150 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2); 3151 set_host_byte(cmd, DID_ABORT); 3152 return 1; 3153 } 3154 3155 return 1; 3156 } 3157 3158 static void 3159 qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt, 3160 struct req_que *req, uint32_t index) 3161 { 3162 struct qla_hw_data *ha = vha->hw; 3163 srb_t *sp; 3164 uint16_t comp_status; 3165 uint16_t scsi_status; 3166 uint16_t thread_id; 3167 uint32_t rval = EXT_STATUS_OK; 3168 struct bsg_job *bsg_job = NULL; 3169 struct fc_bsg_request *bsg_request; 3170 struct fc_bsg_reply *bsg_reply; 3171 sts_entry_t *sts = pkt; 3172 struct sts_entry_24xx *sts24 = pkt; 3173 3174 /* Validate handle. */ 3175 if (index >= req->num_outstanding_cmds) { 3176 ql_log(ql_log_warn, vha, 0x70af, 3177 "Invalid SCSI completion handle 0x%x.\n", index); 3178 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3179 return; 3180 } 3181 3182 sp = req->outstanding_cmds[index]; 3183 if (!sp) { 3184 ql_log(ql_log_warn, vha, 0x70b0, 3185 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n", 3186 req->id, index); 3187 3188 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3189 return; 3190 } 3191 3192 /* Free outstanding command slot. */ 3193 req->outstanding_cmds[index] = NULL; 3194 bsg_job = sp->u.bsg_job; 3195 bsg_request = bsg_job->request; 3196 bsg_reply = bsg_job->reply; 3197 3198 if (IS_FWI2_CAPABLE(ha)) { 3199 comp_status = le16_to_cpu(sts24->comp_status); 3200 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; 3201 } else { 3202 comp_status = le16_to_cpu(sts->comp_status); 3203 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 3204 } 3205 3206 thread_id = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 3207 switch (comp_status) { 3208 case CS_COMPLETE: 3209 if (scsi_status == 0) { 3210 bsg_reply->reply_payload_rcv_len = 3211 bsg_job->reply_payload.payload_len; 3212 vha->qla_stats.input_bytes += 3213 bsg_reply->reply_payload_rcv_len; 3214 vha->qla_stats.input_requests++; 3215 rval = EXT_STATUS_OK; 3216 } 3217 goto done; 3218 3219 case CS_DATA_OVERRUN: 3220 ql_dbg(ql_dbg_user, vha, 0x70b1, 3221 "Command completed with data overrun thread_id=%d\n", 3222 thread_id); 3223 rval = EXT_STATUS_DATA_OVERRUN; 3224 break; 3225 3226 case CS_DATA_UNDERRUN: 3227 ql_dbg(ql_dbg_user, vha, 0x70b2, 3228 "Command completed with data underrun thread_id=%d\n", 3229 thread_id); 3230 rval = EXT_STATUS_DATA_UNDERRUN; 3231 break; 3232 case CS_BIDIR_RD_OVERRUN: 3233 ql_dbg(ql_dbg_user, vha, 0x70b3, 3234 "Command completed with read data overrun thread_id=%d\n", 3235 thread_id); 3236 rval = EXT_STATUS_DATA_OVERRUN; 3237 break; 3238 3239 case CS_BIDIR_RD_WR_OVERRUN: 3240 ql_dbg(ql_dbg_user, vha, 0x70b4, 3241 "Command completed with read and write data overrun " 3242 "thread_id=%d\n", thread_id); 3243 rval = EXT_STATUS_DATA_OVERRUN; 3244 break; 3245 3246 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN: 3247 ql_dbg(ql_dbg_user, vha, 0x70b5, 3248 "Command completed with read data over and write data " 3249 "underrun thread_id=%d\n", thread_id); 3250 rval = EXT_STATUS_DATA_OVERRUN; 3251 break; 3252 3253 case CS_BIDIR_RD_UNDERRUN: 3254 ql_dbg(ql_dbg_user, vha, 0x70b6, 3255 "Command completed with read data underrun " 3256 "thread_id=%d\n", thread_id); 3257 rval = EXT_STATUS_DATA_UNDERRUN; 3258 break; 3259 3260 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN: 3261 ql_dbg(ql_dbg_user, vha, 0x70b7, 3262 "Command completed with read data under and write data " 3263 "overrun thread_id=%d\n", thread_id); 3264 rval = EXT_STATUS_DATA_UNDERRUN; 3265 break; 3266 3267 case CS_BIDIR_RD_WR_UNDERRUN: 3268 ql_dbg(ql_dbg_user, vha, 0x70b8, 3269 "Command completed with read and write data underrun " 3270 "thread_id=%d\n", thread_id); 3271 rval = EXT_STATUS_DATA_UNDERRUN; 3272 break; 3273 3274 case CS_BIDIR_DMA: 3275 ql_dbg(ql_dbg_user, vha, 0x70b9, 3276 "Command completed with data DMA error thread_id=%d\n", 3277 thread_id); 3278 rval = EXT_STATUS_DMA_ERR; 3279 break; 3280 3281 case CS_TIMEOUT: 3282 ql_dbg(ql_dbg_user, vha, 0x70ba, 3283 "Command completed with timeout thread_id=%d\n", 3284 thread_id); 3285 rval = EXT_STATUS_TIMEOUT; 3286 break; 3287 default: 3288 ql_dbg(ql_dbg_user, vha, 0x70bb, 3289 "Command completed with completion status=0x%x " 3290 "thread_id=%d\n", comp_status, thread_id); 3291 rval = EXT_STATUS_ERR; 3292 break; 3293 } 3294 bsg_reply->reply_payload_rcv_len = 0; 3295 3296 done: 3297 /* Return the vendor specific reply to API */ 3298 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval; 3299 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 3300 /* Always return DID_OK, bsg will send the vendor specific response 3301 * in this case only */ 3302 sp->done(sp, DID_OK << 16); 3303 3304 } 3305 3306 /** 3307 * qla2x00_status_entry() - Process a Status IOCB entry. 3308 * @vha: SCSI driver HA context 3309 * @rsp: response queue 3310 * @pkt: Entry pointer 3311 */ 3312 static void 3313 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) 3314 { 3315 srb_t *sp; 3316 fc_port_t *fcport; 3317 struct scsi_cmnd *cp; 3318 sts_entry_t *sts = pkt; 3319 struct sts_entry_24xx *sts24 = pkt; 3320 uint16_t comp_status; 3321 uint16_t scsi_status; 3322 uint16_t ox_id; 3323 uint8_t lscsi_status; 3324 int32_t resid; 3325 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len, 3326 fw_resid_len; 3327 uint8_t *rsp_info, *sense_data; 3328 struct qla_hw_data *ha = vha->hw; 3329 uint32_t handle; 3330 uint16_t que; 3331 struct req_que *req; 3332 int logit = 1; 3333 int res = 0; 3334 uint16_t state_flags = 0; 3335 uint16_t sts_qual = 0; 3336 3337 if (IS_FWI2_CAPABLE(ha)) { 3338 comp_status = le16_to_cpu(sts24->comp_status); 3339 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; 3340 state_flags = le16_to_cpu(sts24->state_flags); 3341 } else { 3342 comp_status = le16_to_cpu(sts->comp_status); 3343 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 3344 } 3345 handle = (uint32_t) LSW(sts->handle); 3346 que = MSW(sts->handle); 3347 req = ha->req_q_map[que]; 3348 3349 /* Check for invalid queue pointer */ 3350 if (req == NULL || 3351 que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) { 3352 ql_dbg(ql_dbg_io, vha, 0x3059, 3353 "Invalid status handle (0x%x): Bad req pointer. req=%p, " 3354 "que=%u.\n", sts->handle, req, que); 3355 return; 3356 } 3357 3358 /* Validate handle. */ 3359 if (handle < req->num_outstanding_cmds) { 3360 sp = req->outstanding_cmds[handle]; 3361 if (!sp) { 3362 ql_dbg(ql_dbg_io, vha, 0x3075, 3363 "%s(%ld): Already returned command for status handle (0x%x).\n", 3364 __func__, vha->host_no, sts->handle); 3365 return; 3366 } 3367 } else { 3368 ql_dbg(ql_dbg_io, vha, 0x3017, 3369 "Invalid status handle, out of range (0x%x).\n", 3370 sts->handle); 3371 3372 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) { 3373 if (IS_P3P_TYPE(ha)) 3374 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 3375 else 3376 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3377 qla2xxx_wake_dpc(vha); 3378 } 3379 return; 3380 } 3381 qla_put_fw_resources(sp->qpair, &sp->iores); 3382 3383 if (sp->cmd_type != TYPE_SRB) { 3384 req->outstanding_cmds[handle] = NULL; 3385 ql_dbg(ql_dbg_io, vha, 0x3015, 3386 "Unknown sp->cmd_type %x %p).\n", 3387 sp->cmd_type, sp); 3388 return; 3389 } 3390 3391 /* NVME completion. */ 3392 if (sp->type == SRB_NVME_CMD) { 3393 req->outstanding_cmds[handle] = NULL; 3394 qla24xx_nvme_iocb_entry(vha, req, pkt, sp); 3395 return; 3396 } 3397 3398 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) { 3399 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle); 3400 return; 3401 } 3402 3403 /* Task Management completion. */ 3404 if (sp->type == SRB_TM_CMD) { 3405 qla24xx_tm_iocb_entry(vha, req, pkt); 3406 return; 3407 } 3408 3409 /* Fast path completion. */ 3410 qla_chk_edif_rx_sa_delete_pending(vha, sp, sts24); 3411 sp->qpair->cmd_completion_cnt++; 3412 3413 if (comp_status == CS_COMPLETE && scsi_status == 0) { 3414 qla2x00_process_completed_request(vha, req, handle); 3415 3416 return; 3417 } 3418 3419 cp = GET_CMD_SP(sp); 3420 if (cp == NULL) { 3421 ql_dbg(ql_dbg_io, vha, 0x3018, 3422 "Command already returned (0x%x/%p).\n", 3423 sts->handle, sp); 3424 3425 req->outstanding_cmds[handle] = NULL; 3426 return; 3427 } 3428 3429 lscsi_status = scsi_status & STATUS_MASK; 3430 3431 fcport = sp->fcport; 3432 3433 ox_id = 0; 3434 sense_len = par_sense_len = rsp_info_len = resid_len = 3435 fw_resid_len = 0; 3436 if (IS_FWI2_CAPABLE(ha)) { 3437 if (scsi_status & SS_SENSE_LEN_VALID) 3438 sense_len = le32_to_cpu(sts24->sense_len); 3439 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) 3440 rsp_info_len = le32_to_cpu(sts24->rsp_data_len); 3441 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) 3442 resid_len = le32_to_cpu(sts24->rsp_residual_count); 3443 if (comp_status == CS_DATA_UNDERRUN) 3444 fw_resid_len = le32_to_cpu(sts24->residual_len); 3445 rsp_info = sts24->data; 3446 sense_data = sts24->data; 3447 host_to_fcp_swap(sts24->data, sizeof(sts24->data)); 3448 ox_id = le16_to_cpu(sts24->ox_id); 3449 par_sense_len = sizeof(sts24->data); 3450 sts_qual = le16_to_cpu(sts24->status_qualifier); 3451 } else { 3452 if (scsi_status & SS_SENSE_LEN_VALID) 3453 sense_len = le16_to_cpu(sts->req_sense_length); 3454 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) 3455 rsp_info_len = le16_to_cpu(sts->rsp_info_len); 3456 resid_len = le32_to_cpu(sts->residual_length); 3457 rsp_info = sts->rsp_info; 3458 sense_data = sts->req_sense_data; 3459 par_sense_len = sizeof(sts->req_sense_data); 3460 } 3461 3462 /* Check for any FCP transport errors. */ 3463 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) { 3464 /* Sense data lies beyond any FCP RESPONSE data. */ 3465 if (IS_FWI2_CAPABLE(ha)) { 3466 sense_data += rsp_info_len; 3467 par_sense_len -= rsp_info_len; 3468 } 3469 if (rsp_info_len > 3 && rsp_info[3]) { 3470 ql_dbg(ql_dbg_io, fcport->vha, 0x3019, 3471 "FCP I/O protocol failure (0x%x/0x%x).\n", 3472 rsp_info_len, rsp_info[3]); 3473 3474 res = DID_BUS_BUSY << 16; 3475 goto out; 3476 } 3477 } 3478 3479 /* Check for overrun. */ 3480 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE && 3481 scsi_status & SS_RESIDUAL_OVER) 3482 comp_status = CS_DATA_OVERRUN; 3483 3484 /* 3485 * Check retry_delay_timer value if we receive a busy or 3486 * queue full. 3487 */ 3488 if (unlikely(lscsi_status == SAM_STAT_TASK_SET_FULL || 3489 lscsi_status == SAM_STAT_BUSY)) 3490 qla2x00_set_retry_delay_timestamp(fcport, sts_qual); 3491 3492 /* 3493 * Based on Host and scsi status generate status code for Linux 3494 */ 3495 switch (comp_status) { 3496 case CS_COMPLETE: 3497 case CS_QUEUE_FULL: 3498 if (scsi_status == 0) { 3499 res = DID_OK << 16; 3500 break; 3501 } 3502 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) { 3503 resid = resid_len; 3504 scsi_set_resid(cp, resid); 3505 3506 if (!lscsi_status && 3507 ((unsigned)(scsi_bufflen(cp) - resid) < 3508 cp->underflow)) { 3509 ql_dbg(ql_dbg_io, fcport->vha, 0x301a, 3510 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n", 3511 resid, scsi_bufflen(cp)); 3512 3513 res = DID_ERROR << 16; 3514 break; 3515 } 3516 } 3517 res = DID_OK << 16 | lscsi_status; 3518 3519 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 3520 ql_dbg(ql_dbg_io, fcport->vha, 0x301b, 3521 "QUEUE FULL detected.\n"); 3522 break; 3523 } 3524 logit = 0; 3525 if (lscsi_status != SS_CHECK_CONDITION) 3526 break; 3527 3528 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 3529 if (!(scsi_status & SS_SENSE_LEN_VALID)) 3530 break; 3531 3532 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len, 3533 rsp, res); 3534 break; 3535 3536 case CS_DATA_UNDERRUN: 3537 /* Use F/W calculated residual length. */ 3538 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len; 3539 scsi_set_resid(cp, resid); 3540 if (scsi_status & SS_RESIDUAL_UNDER) { 3541 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) { 3542 ql_log(ql_log_warn, fcport->vha, 0x301d, 3543 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n", 3544 resid, scsi_bufflen(cp)); 3545 3546 res = DID_ERROR << 16 | lscsi_status; 3547 goto check_scsi_status; 3548 } 3549 3550 if (!lscsi_status && 3551 ((unsigned)(scsi_bufflen(cp) - resid) < 3552 cp->underflow)) { 3553 ql_dbg(ql_dbg_io, fcport->vha, 0x301e, 3554 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n", 3555 resid, scsi_bufflen(cp)); 3556 3557 res = DID_ERROR << 16; 3558 break; 3559 } 3560 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL && 3561 lscsi_status != SAM_STAT_BUSY) { 3562 /* 3563 * scsi status of task set and busy are considered to be 3564 * task not completed. 3565 */ 3566 3567 ql_log(ql_log_warn, fcport->vha, 0x301f, 3568 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n", 3569 resid, scsi_bufflen(cp)); 3570 3571 vha->interface_err_cnt++; 3572 3573 res = DID_ERROR << 16 | lscsi_status; 3574 goto check_scsi_status; 3575 } else { 3576 ql_dbg(ql_dbg_io, fcport->vha, 0x3030, 3577 "scsi_status: 0x%x, lscsi_status: 0x%x\n", 3578 scsi_status, lscsi_status); 3579 } 3580 3581 res = DID_OK << 16 | lscsi_status; 3582 logit = 0; 3583 3584 check_scsi_status: 3585 /* 3586 * Check to see if SCSI Status is non zero. If so report SCSI 3587 * Status. 3588 */ 3589 if (lscsi_status != 0) { 3590 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 3591 ql_dbg(ql_dbg_io, fcport->vha, 0x3020, 3592 "QUEUE FULL detected.\n"); 3593 logit = 1; 3594 break; 3595 } 3596 if (lscsi_status != SS_CHECK_CONDITION) 3597 break; 3598 3599 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 3600 if (!(scsi_status & SS_SENSE_LEN_VALID)) 3601 break; 3602 3603 qla2x00_handle_sense(sp, sense_data, par_sense_len, 3604 sense_len, rsp, res); 3605 } 3606 break; 3607 3608 case CS_PORT_LOGGED_OUT: 3609 case CS_PORT_CONFIG_CHG: 3610 case CS_PORT_BUSY: 3611 case CS_INCOMPLETE: 3612 case CS_PORT_UNAVAILABLE: 3613 case CS_TIMEOUT: 3614 case CS_RESET: 3615 case CS_EDIF_INV_REQ: 3616 3617 /* 3618 * We are going to have the fc class block the rport 3619 * while we try to recover so instruct the mid layer 3620 * to requeue until the class decides how to handle this. 3621 */ 3622 res = DID_TRANSPORT_DISRUPTED << 16; 3623 3624 if (comp_status == CS_TIMEOUT) { 3625 if (IS_FWI2_CAPABLE(ha)) 3626 break; 3627 else if ((le16_to_cpu(sts->status_flags) & 3628 SF_LOGOUT_SENT) == 0) 3629 break; 3630 } 3631 3632 if (atomic_read(&fcport->state) == FCS_ONLINE) { 3633 ql_dbg(ql_dbg_disc, fcport->vha, 0x3021, 3634 "Port to be marked lost on fcport=%02x%02x%02x, current " 3635 "port state= %s comp_status %x.\n", fcport->d_id.b.domain, 3636 fcport->d_id.b.area, fcport->d_id.b.al_pa, 3637 port_state_str[FCS_ONLINE], 3638 comp_status); 3639 3640 qlt_schedule_sess_for_deletion(fcport); 3641 } 3642 3643 break; 3644 3645 case CS_ABORTED: 3646 res = DID_RESET << 16; 3647 break; 3648 3649 case CS_DIF_ERROR: 3650 logit = qla2x00_handle_dif_error(sp, sts24); 3651 res = cp->result; 3652 break; 3653 3654 case CS_TRANSPORT: 3655 res = DID_ERROR << 16; 3656 vha->hw_err_cnt++; 3657 3658 if (!IS_PI_SPLIT_DET_CAPABLE(ha)) 3659 break; 3660 3661 if (state_flags & BIT_4) 3662 scmd_printk(KERN_WARNING, cp, 3663 "Unsupported device '%s' found.\n", 3664 cp->device->vendor); 3665 break; 3666 3667 case CS_DMA: 3668 ql_log(ql_log_info, fcport->vha, 0x3022, 3669 "CS_DMA error: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%06x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n", 3670 comp_status, scsi_status, res, vha->host_no, 3671 cp->device->id, cp->device->lun, fcport->d_id.b24, 3672 ox_id, cp->cmnd, scsi_bufflen(cp), rsp_info_len, 3673 resid_len, fw_resid_len, sp, cp); 3674 ql_dump_buffer(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe0ee, 3675 pkt, sizeof(*sts24)); 3676 res = DID_ERROR << 16; 3677 vha->hw_err_cnt++; 3678 break; 3679 default: 3680 res = DID_ERROR << 16; 3681 break; 3682 } 3683 3684 out: 3685 if (logit) 3686 ql_dbg(ql_dbg_io, fcport->vha, 0x3022, 3687 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n", 3688 comp_status, scsi_status, res, vha->host_no, 3689 cp->device->id, cp->device->lun, fcport->d_id.b.domain, 3690 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id, 3691 cp->cmnd, scsi_bufflen(cp), rsp_info_len, 3692 resid_len, fw_resid_len, sp, cp); 3693 3694 if (rsp->status_srb == NULL) 3695 sp->done(sp, res); 3696 3697 /* for io's, clearing of outstanding_cmds[handle] means scsi_done was called */ 3698 req->outstanding_cmds[handle] = NULL; 3699 } 3700 3701 /** 3702 * qla2x00_status_cont_entry() - Process a Status Continuations entry. 3703 * @rsp: response queue 3704 * @pkt: Entry pointer 3705 * 3706 * Extended sense data. 3707 */ 3708 static void 3709 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) 3710 { 3711 uint8_t sense_sz = 0; 3712 struct qla_hw_data *ha = rsp->hw; 3713 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); 3714 srb_t *sp = rsp->status_srb; 3715 struct scsi_cmnd *cp; 3716 uint32_t sense_len; 3717 uint8_t *sense_ptr; 3718 3719 if (!sp || !GET_CMD_SENSE_LEN(sp)) 3720 return; 3721 3722 sense_len = GET_CMD_SENSE_LEN(sp); 3723 sense_ptr = GET_CMD_SENSE_PTR(sp); 3724 3725 cp = GET_CMD_SP(sp); 3726 if (cp == NULL) { 3727 ql_log(ql_log_warn, vha, 0x3025, 3728 "cmd is NULL: already returned to OS (sp=%p).\n", sp); 3729 3730 rsp->status_srb = NULL; 3731 return; 3732 } 3733 3734 if (sense_len > sizeof(pkt->data)) 3735 sense_sz = sizeof(pkt->data); 3736 else 3737 sense_sz = sense_len; 3738 3739 /* Move sense data. */ 3740 if (IS_FWI2_CAPABLE(ha)) 3741 host_to_fcp_swap(pkt->data, sizeof(pkt->data)); 3742 memcpy(sense_ptr, pkt->data, sense_sz); 3743 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c, 3744 sense_ptr, sense_sz); 3745 3746 sense_len -= sense_sz; 3747 sense_ptr += sense_sz; 3748 3749 SET_CMD_SENSE_PTR(sp, sense_ptr); 3750 SET_CMD_SENSE_LEN(sp, sense_len); 3751 3752 /* Place command on done queue. */ 3753 if (sense_len == 0) { 3754 rsp->status_srb = NULL; 3755 sp->done(sp, cp->result); 3756 } 3757 } 3758 3759 /** 3760 * qla2x00_error_entry() - Process an error entry. 3761 * @vha: SCSI driver HA context 3762 * @rsp: response queue 3763 * @pkt: Entry pointer 3764 * return : 1=allow further error analysis. 0=no additional error analysis. 3765 */ 3766 static int 3767 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) 3768 { 3769 srb_t *sp; 3770 struct qla_hw_data *ha = vha->hw; 3771 const char func[] = "ERROR-IOCB"; 3772 uint16_t que = MSW(pkt->handle); 3773 struct req_que *req = NULL; 3774 int res = DID_ERROR << 16; 3775 u16 index; 3776 3777 ql_dbg(ql_dbg_async, vha, 0x502a, 3778 "iocb type %xh with error status %xh, handle %xh, rspq id %d\n", 3779 pkt->entry_type, pkt->entry_status, pkt->handle, rsp->id); 3780 3781 if (que >= ha->max_req_queues || !ha->req_q_map[que]) 3782 goto fatal; 3783 3784 req = ha->req_q_map[que]; 3785 3786 if (pkt->entry_status & RF_BUSY) 3787 res = DID_BUS_BUSY << 16; 3788 3789 if ((pkt->handle & ~QLA_TGT_HANDLE_MASK) == QLA_TGT_SKIP_HANDLE) 3790 return 0; 3791 3792 switch (pkt->entry_type) { 3793 case NOTIFY_ACK_TYPE: 3794 case STATUS_CONT_TYPE: 3795 case LOGINOUT_PORT_IOCB_TYPE: 3796 case CT_IOCB_TYPE: 3797 case ELS_IOCB_TYPE: 3798 case ABORT_IOCB_TYPE: 3799 case MBX_IOCB_TYPE: 3800 default: 3801 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 3802 if (sp) { 3803 sp->done(sp, res); 3804 return 0; 3805 } 3806 break; 3807 3808 case SA_UPDATE_IOCB_TYPE: 3809 case ABTS_RESP_24XX: 3810 case CTIO_TYPE7: 3811 case CTIO_CRC2: 3812 return 1; 3813 case STATUS_TYPE: 3814 sp = qla_get_sp_from_handle(vha, func, req, pkt, &index); 3815 if (sp) { 3816 sp->done(sp, res); 3817 req->outstanding_cmds[index] = NULL; 3818 return 0; 3819 } 3820 break; 3821 } 3822 fatal: 3823 ql_log(ql_log_warn, vha, 0x5030, 3824 "Error entry - invalid handle/queue (%04x).\n", que); 3825 return 0; 3826 } 3827 3828 /** 3829 * qla24xx_mbx_completion() - Process mailbox command completions. 3830 * @vha: SCSI driver HA context 3831 * @mb0: Mailbox0 register 3832 */ 3833 static void 3834 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 3835 { 3836 uint16_t cnt; 3837 uint32_t mboxes; 3838 __le16 __iomem *wptr; 3839 struct qla_hw_data *ha = vha->hw; 3840 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 3841 3842 /* Read all mbox registers? */ 3843 WARN_ON_ONCE(ha->mbx_count > 32); 3844 mboxes = (1ULL << ha->mbx_count) - 1; 3845 if (!ha->mcp) 3846 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n"); 3847 else 3848 mboxes = ha->mcp->in_mb; 3849 3850 /* Load return mailbox registers. */ 3851 ha->flags.mbox_int = 1; 3852 ha->mailbox_out[0] = mb0; 3853 mboxes >>= 1; 3854 wptr = ®->mailbox1; 3855 3856 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 3857 if (mboxes & BIT_0) 3858 ha->mailbox_out[cnt] = rd_reg_word(wptr); 3859 3860 mboxes >>= 1; 3861 wptr++; 3862 } 3863 } 3864 3865 static void 3866 qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 3867 struct abort_entry_24xx *pkt) 3868 { 3869 const char func[] = "ABT_IOCB"; 3870 srb_t *sp; 3871 srb_t *orig_sp = NULL; 3872 struct srb_iocb *abt; 3873 3874 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 3875 if (!sp) 3876 return; 3877 3878 abt = &sp->u.iocb_cmd; 3879 abt->u.abt.comp_status = pkt->comp_status; 3880 orig_sp = sp->cmd_sp; 3881 /* Need to pass original sp */ 3882 if (orig_sp) 3883 qla_nvme_abort_process_comp_status(pkt, orig_sp); 3884 3885 sp->done(sp, 0); 3886 } 3887 3888 void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha, 3889 struct pt_ls4_request *pkt, struct req_que *req) 3890 { 3891 srb_t *sp; 3892 const char func[] = "LS4_IOCB"; 3893 uint16_t comp_status; 3894 3895 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 3896 if (!sp) 3897 return; 3898 3899 comp_status = le16_to_cpu(pkt->status); 3900 sp->done(sp, comp_status); 3901 } 3902 3903 /** 3904 * qla_chk_cont_iocb_avail - check for all continuation iocbs are available 3905 * before iocb processing can start. 3906 * @vha: host adapter pointer 3907 * @rsp: respond queue 3908 * @pkt: head iocb describing how many continuation iocb 3909 * Return: 0 all iocbs has arrived, xx- all iocbs have not arrived. 3910 */ 3911 static int qla_chk_cont_iocb_avail(struct scsi_qla_host *vha, 3912 struct rsp_que *rsp, response_t *pkt, u32 rsp_q_in) 3913 { 3914 int start_pkt_ring_index; 3915 u32 iocb_cnt = 0; 3916 int rc = 0; 3917 3918 if (pkt->entry_count == 1) 3919 return rc; 3920 3921 /* ring_index was pre-increment. set it back to current pkt */ 3922 if (rsp->ring_index == 0) 3923 start_pkt_ring_index = rsp->length - 1; 3924 else 3925 start_pkt_ring_index = rsp->ring_index - 1; 3926 3927 if (rsp_q_in < start_pkt_ring_index) 3928 /* q in ptr is wrapped */ 3929 iocb_cnt = rsp->length - start_pkt_ring_index + rsp_q_in; 3930 else 3931 iocb_cnt = rsp_q_in - start_pkt_ring_index; 3932 3933 if (iocb_cnt < pkt->entry_count) 3934 rc = -EIO; 3935 3936 ql_dbg(ql_dbg_init, vha, 0x5091, 3937 "%s - ring %p pkt %p entry count %d iocb_cnt %d rsp_q_in %d rc %d\n", 3938 __func__, rsp->ring, pkt, pkt->entry_count, iocb_cnt, rsp_q_in, rc); 3939 3940 return rc; 3941 } 3942 3943 static void qla_marker_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 3944 struct mrk_entry_24xx *pkt) 3945 { 3946 const char func[] = "MRK-IOCB"; 3947 srb_t *sp; 3948 int res = QLA_SUCCESS; 3949 3950 if (!IS_FWI2_CAPABLE(vha->hw)) 3951 return; 3952 3953 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 3954 if (!sp) 3955 return; 3956 3957 if (pkt->entry_status) { 3958 ql_dbg(ql_dbg_taskm, vha, 0x8025, "marker failure.\n"); 3959 res = QLA_COMMAND_ERROR; 3960 } 3961 sp->u.iocb_cmd.u.tmf.data = res; 3962 sp->done(sp, res); 3963 } 3964 3965 /** 3966 * qla24xx_process_response_queue() - Process response queue entries. 3967 * @vha: SCSI driver HA context 3968 * @rsp: response queue 3969 */ 3970 void qla24xx_process_response_queue(struct scsi_qla_host *vha, 3971 struct rsp_que *rsp) 3972 { 3973 struct sts_entry_24xx *pkt; 3974 struct qla_hw_data *ha = vha->hw; 3975 struct purex_entry_24xx *purex_entry; 3976 struct purex_item *pure_item; 3977 struct pt_ls4_rx_unsol *p; 3978 u16 rsp_in = 0, cur_ring_index; 3979 int is_shadow_hba; 3980 3981 if (!ha->flags.fw_started) 3982 return; 3983 3984 if (rsp->qpair->cpuid != raw_smp_processor_id() || !rsp->qpair->rcv_intr) { 3985 rsp->qpair->rcv_intr = 1; 3986 3987 if (!rsp->qpair->cpu_mapped) 3988 qla_cpu_update(rsp->qpair, raw_smp_processor_id()); 3989 } 3990 3991 #define __update_rsp_in(_is_shadow_hba, _rsp, _rsp_in) \ 3992 do { \ 3993 _rsp_in = _is_shadow_hba ? *(_rsp)->in_ptr : \ 3994 rd_reg_dword_relaxed((_rsp)->rsp_q_in); \ 3995 } while (0) 3996 3997 is_shadow_hba = IS_SHADOW_REG_CAPABLE(ha); 3998 3999 __update_rsp_in(is_shadow_hba, rsp, rsp_in); 4000 4001 while (rsp->ring_index != rsp_in && 4002 rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 4003 pkt = (struct sts_entry_24xx *)rsp->ring_ptr; 4004 cur_ring_index = rsp->ring_index; 4005 4006 rsp->ring_index++; 4007 if (rsp->ring_index == rsp->length) { 4008 rsp->ring_index = 0; 4009 rsp->ring_ptr = rsp->ring; 4010 } else { 4011 rsp->ring_ptr++; 4012 } 4013 4014 if (pkt->entry_status != 0) { 4015 if (qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt)) 4016 goto process_err; 4017 4018 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 4019 wmb(); 4020 continue; 4021 } 4022 process_err: 4023 4024 switch (pkt->entry_type) { 4025 case STATUS_TYPE: 4026 qla2x00_status_entry(vha, rsp, pkt); 4027 break; 4028 case STATUS_CONT_TYPE: 4029 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); 4030 break; 4031 case VP_RPT_ID_IOCB_TYPE: 4032 qla24xx_report_id_acquisition(vha, 4033 (struct vp_rpt_id_entry_24xx *)pkt); 4034 break; 4035 case LOGINOUT_PORT_IOCB_TYPE: 4036 qla24xx_logio_entry(vha, rsp->req, 4037 (struct logio_entry_24xx *)pkt); 4038 break; 4039 case CT_IOCB_TYPE: 4040 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); 4041 break; 4042 case ELS_IOCB_TYPE: 4043 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE); 4044 break; 4045 case ABTS_RECV_24XX: 4046 if (qla_ini_mode_enabled(vha)) { 4047 pure_item = qla24xx_copy_std_pkt(vha, pkt); 4048 if (!pure_item) 4049 break; 4050 qla24xx_queue_purex_item(vha, pure_item, 4051 qla24xx_process_abts); 4052 break; 4053 } 4054 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || 4055 IS_QLA28XX(ha)) { 4056 /* ensure that the ATIO queue is empty */ 4057 qlt_handle_abts_recv(vha, rsp, 4058 (response_t *)pkt); 4059 break; 4060 } else { 4061 qlt_24xx_process_atio_queue(vha, 1); 4062 } 4063 fallthrough; 4064 case ABTS_RESP_24XX: 4065 case CTIO_TYPE7: 4066 case CTIO_CRC2: 4067 qlt_response_pkt_all_vps(vha, rsp, (response_t *)pkt); 4068 break; 4069 case PT_LS4_REQUEST: 4070 qla24xx_nvme_ls4_iocb(vha, (struct pt_ls4_request *)pkt, 4071 rsp->req); 4072 break; 4073 case NOTIFY_ACK_TYPE: 4074 if (pkt->handle == QLA_TGT_SKIP_HANDLE) 4075 qlt_response_pkt_all_vps(vha, rsp, 4076 (response_t *)pkt); 4077 else 4078 qla24xxx_nack_iocb_entry(vha, rsp->req, 4079 (struct nack_to_isp *)pkt); 4080 break; 4081 case MARKER_TYPE: 4082 qla_marker_iocb_entry(vha, rsp->req, (struct mrk_entry_24xx *)pkt); 4083 break; 4084 case ABORT_IOCB_TYPE: 4085 qla24xx_abort_iocb_entry(vha, rsp->req, 4086 (struct abort_entry_24xx *)pkt); 4087 break; 4088 case MBX_IOCB_TYPE: 4089 qla24xx_mbx_iocb_entry(vha, rsp->req, 4090 (struct mbx_24xx_entry *)pkt); 4091 break; 4092 case VP_CTRL_IOCB_TYPE: 4093 qla_ctrlvp_completed(vha, rsp->req, 4094 (struct vp_ctrl_entry_24xx *)pkt); 4095 break; 4096 case PUREX_IOCB_TYPE: 4097 purex_entry = (void *)pkt; 4098 switch (purex_entry->els_frame_payload[3]) { 4099 case ELS_RDP: 4100 pure_item = qla24xx_copy_std_pkt(vha, pkt); 4101 if (!pure_item) 4102 break; 4103 qla24xx_queue_purex_item(vha, pure_item, 4104 qla24xx_process_purex_rdp); 4105 break; 4106 case ELS_FPIN: 4107 if (!vha->hw->flags.scm_enabled) { 4108 ql_log(ql_log_warn, vha, 0x5094, 4109 "SCM not active for this port\n"); 4110 break; 4111 } 4112 pure_item = qla27xx_copy_fpin_pkt(vha, 4113 (void **)&pkt, &rsp); 4114 __update_rsp_in(is_shadow_hba, rsp, rsp_in); 4115 if (!pure_item) 4116 break; 4117 qla24xx_queue_purex_item(vha, pure_item, 4118 qla27xx_process_purex_fpin); 4119 break; 4120 4121 case ELS_AUTH_ELS: 4122 if (qla_chk_cont_iocb_avail(vha, rsp, (response_t *)pkt, rsp_in)) { 4123 /* 4124 * ring_ptr and ring_index were 4125 * pre-incremented above. Reset them 4126 * back to current. Wait for next 4127 * interrupt with all IOCBs to arrive 4128 * and re-process. 4129 */ 4130 rsp->ring_ptr = (response_t *)pkt; 4131 rsp->ring_index = cur_ring_index; 4132 4133 ql_dbg(ql_dbg_init, vha, 0x5091, 4134 "Defer processing ELS opcode %#x...\n", 4135 purex_entry->els_frame_payload[3]); 4136 return; 4137 } 4138 qla24xx_auth_els(vha, (void **)&pkt, &rsp); 4139 break; 4140 default: 4141 ql_log(ql_log_warn, vha, 0x509c, 4142 "Discarding ELS Request opcode 0x%x\n", 4143 purex_entry->els_frame_payload[3]); 4144 } 4145 break; 4146 case SA_UPDATE_IOCB_TYPE: 4147 qla28xx_sa_update_iocb_entry(vha, rsp->req, 4148 (struct sa_update_28xx *)pkt); 4149 break; 4150 case PT_LS4_UNSOL: 4151 p = (void *)pkt; 4152 if (qla_chk_cont_iocb_avail(vha, rsp, (response_t *)pkt, rsp_in)) { 4153 rsp->ring_ptr = (response_t *)pkt; 4154 rsp->ring_index = cur_ring_index; 4155 4156 ql_dbg(ql_dbg_init, vha, 0x2124, 4157 "Defer processing UNSOL LS req opcode %#x...\n", 4158 p->payload[0]); 4159 return; 4160 } 4161 qla2xxx_process_purls_iocb((void **)&pkt, &rsp); 4162 break; 4163 default: 4164 /* Type Not Supported. */ 4165 ql_dbg(ql_dbg_async, vha, 0x5042, 4166 "Received unknown response pkt type 0x%x entry status=%x.\n", 4167 pkt->entry_type, pkt->entry_status); 4168 break; 4169 } 4170 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 4171 wmb(); 4172 } 4173 4174 /* Adjust ring index */ 4175 if (IS_P3P_TYPE(ha)) { 4176 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; 4177 4178 wrt_reg_dword(®->rsp_q_out[0], rsp->ring_index); 4179 } else { 4180 wrt_reg_dword(rsp->rsp_q_out, rsp->ring_index); 4181 } 4182 } 4183 4184 static void 4185 qla2xxx_check_risc_status(scsi_qla_host_t *vha) 4186 { 4187 int rval; 4188 uint32_t cnt; 4189 struct qla_hw_data *ha = vha->hw; 4190 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 4191 4192 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && 4193 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 4194 return; 4195 4196 rval = QLA_SUCCESS; 4197 wrt_reg_dword(®->iobase_addr, 0x7C00); 4198 rd_reg_dword(®->iobase_addr); 4199 wrt_reg_dword(®->iobase_window, 0x0001); 4200 for (cnt = 10000; (rd_reg_dword(®->iobase_window) & BIT_0) == 0 && 4201 rval == QLA_SUCCESS; cnt--) { 4202 if (cnt) { 4203 wrt_reg_dword(®->iobase_window, 0x0001); 4204 udelay(10); 4205 } else 4206 rval = QLA_FUNCTION_TIMEOUT; 4207 } 4208 if (rval == QLA_SUCCESS) 4209 goto next_test; 4210 4211 rval = QLA_SUCCESS; 4212 wrt_reg_dword(®->iobase_window, 0x0003); 4213 for (cnt = 100; (rd_reg_dword(®->iobase_window) & BIT_0) == 0 && 4214 rval == QLA_SUCCESS; cnt--) { 4215 if (cnt) { 4216 wrt_reg_dword(®->iobase_window, 0x0003); 4217 udelay(10); 4218 } else 4219 rval = QLA_FUNCTION_TIMEOUT; 4220 } 4221 if (rval != QLA_SUCCESS) 4222 goto done; 4223 4224 next_test: 4225 if (rd_reg_dword(®->iobase_c8) & BIT_3) 4226 ql_log(ql_log_info, vha, 0x504c, 4227 "Additional code -- 0x55AA.\n"); 4228 4229 done: 4230 wrt_reg_dword(®->iobase_window, 0x0000); 4231 rd_reg_dword(®->iobase_window); 4232 } 4233 4234 /** 4235 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx. 4236 * @irq: interrupt number 4237 * @dev_id: SCSI driver HA context 4238 * 4239 * Called by system whenever the host adapter generates an interrupt. 4240 * 4241 * Returns handled flag. 4242 */ 4243 irqreturn_t 4244 qla24xx_intr_handler(int irq, void *dev_id) 4245 { 4246 scsi_qla_host_t *vha; 4247 struct qla_hw_data *ha; 4248 struct device_reg_24xx __iomem *reg; 4249 int status; 4250 unsigned long iter; 4251 uint32_t stat; 4252 uint32_t hccr; 4253 uint16_t mb[8]; 4254 struct rsp_que *rsp; 4255 unsigned long flags; 4256 bool process_atio = false; 4257 4258 rsp = (struct rsp_que *) dev_id; 4259 if (!rsp) { 4260 ql_log(ql_log_info, NULL, 0x5059, 4261 "%s: NULL response queue pointer.\n", __func__); 4262 return IRQ_NONE; 4263 } 4264 4265 ha = rsp->hw; 4266 reg = &ha->iobase->isp24; 4267 status = 0; 4268 4269 if (unlikely(pci_channel_offline(ha->pdev))) 4270 return IRQ_HANDLED; 4271 4272 spin_lock_irqsave(&ha->hardware_lock, flags); 4273 vha = pci_get_drvdata(ha->pdev); 4274 for (iter = 50; iter--; ) { 4275 stat = rd_reg_dword(®->host_status); 4276 if (qla2x00_check_reg32_for_disconnect(vha, stat)) 4277 break; 4278 if (stat & HSRX_RISC_PAUSED) { 4279 if (unlikely(pci_channel_offline(ha->pdev))) 4280 break; 4281 4282 hccr = rd_reg_dword(®->hccr); 4283 4284 ql_log(ql_log_warn, vha, 0x504b, 4285 "RISC paused -- HCCR=%x, Dumping firmware.\n", 4286 hccr); 4287 4288 qla2xxx_check_risc_status(vha); 4289 4290 ha->isp_ops->fw_dump(vha); 4291 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 4292 break; 4293 } else if ((stat & HSRX_RISC_INT) == 0) 4294 break; 4295 4296 switch (stat & 0xff) { 4297 case INTR_ROM_MB_SUCCESS: 4298 case INTR_ROM_MB_FAILED: 4299 case INTR_MB_SUCCESS: 4300 case INTR_MB_FAILED: 4301 qla24xx_mbx_completion(vha, MSW(stat)); 4302 status |= MBX_INTERRUPT; 4303 4304 break; 4305 case INTR_ASYNC_EVENT: 4306 mb[0] = MSW(stat); 4307 mb[1] = rd_reg_word(®->mailbox1); 4308 mb[2] = rd_reg_word(®->mailbox2); 4309 mb[3] = rd_reg_word(®->mailbox3); 4310 qla2x00_async_event(vha, rsp, mb); 4311 break; 4312 case INTR_RSP_QUE_UPDATE: 4313 case INTR_RSP_QUE_UPDATE_83XX: 4314 qla24xx_process_response_queue(vha, rsp); 4315 break; 4316 case INTR_ATIO_QUE_UPDATE_27XX: 4317 case INTR_ATIO_QUE_UPDATE: 4318 process_atio = true; 4319 break; 4320 case INTR_ATIO_RSP_QUE_UPDATE: 4321 process_atio = true; 4322 qla24xx_process_response_queue(vha, rsp); 4323 break; 4324 default: 4325 ql_dbg(ql_dbg_async, vha, 0x504f, 4326 "Unrecognized interrupt type (%d).\n", stat * 0xff); 4327 break; 4328 } 4329 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); 4330 rd_reg_dword_relaxed(®->hccr); 4331 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1))) 4332 ndelay(3500); 4333 } 4334 qla2x00_handle_mbx_completion(ha, status); 4335 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4336 4337 if (process_atio) { 4338 spin_lock_irqsave(&ha->tgt.atio_lock, flags); 4339 qlt_24xx_process_atio_queue(vha, 0); 4340 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); 4341 } 4342 4343 return IRQ_HANDLED; 4344 } 4345 4346 static irqreturn_t 4347 qla24xx_msix_rsp_q(int irq, void *dev_id) 4348 { 4349 struct qla_hw_data *ha; 4350 struct rsp_que *rsp; 4351 struct device_reg_24xx __iomem *reg; 4352 struct scsi_qla_host *vha; 4353 unsigned long flags; 4354 4355 rsp = (struct rsp_que *) dev_id; 4356 if (!rsp) { 4357 ql_log(ql_log_info, NULL, 0x505a, 4358 "%s: NULL response queue pointer.\n", __func__); 4359 return IRQ_NONE; 4360 } 4361 ha = rsp->hw; 4362 reg = &ha->iobase->isp24; 4363 4364 spin_lock_irqsave(&ha->hardware_lock, flags); 4365 4366 vha = pci_get_drvdata(ha->pdev); 4367 qla24xx_process_response_queue(vha, rsp); 4368 if (!ha->flags.disable_msix_handshake) { 4369 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); 4370 rd_reg_dword_relaxed(®->hccr); 4371 } 4372 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4373 4374 return IRQ_HANDLED; 4375 } 4376 4377 static irqreturn_t 4378 qla24xx_msix_default(int irq, void *dev_id) 4379 { 4380 scsi_qla_host_t *vha; 4381 struct qla_hw_data *ha; 4382 struct rsp_que *rsp; 4383 struct device_reg_24xx __iomem *reg; 4384 int status; 4385 uint32_t stat; 4386 uint32_t hccr; 4387 uint16_t mb[8]; 4388 unsigned long flags; 4389 bool process_atio = false; 4390 4391 rsp = (struct rsp_que *) dev_id; 4392 if (!rsp) { 4393 ql_log(ql_log_info, NULL, 0x505c, 4394 "%s: NULL response queue pointer.\n", __func__); 4395 return IRQ_NONE; 4396 } 4397 ha = rsp->hw; 4398 reg = &ha->iobase->isp24; 4399 status = 0; 4400 4401 spin_lock_irqsave(&ha->hardware_lock, flags); 4402 vha = pci_get_drvdata(ha->pdev); 4403 do { 4404 stat = rd_reg_dword(®->host_status); 4405 if (qla2x00_check_reg32_for_disconnect(vha, stat)) 4406 break; 4407 if (stat & HSRX_RISC_PAUSED) { 4408 if (unlikely(pci_channel_offline(ha->pdev))) 4409 break; 4410 4411 hccr = rd_reg_dword(®->hccr); 4412 4413 ql_log(ql_log_info, vha, 0x5050, 4414 "RISC paused -- HCCR=%x, Dumping firmware.\n", 4415 hccr); 4416 4417 qla2xxx_check_risc_status(vha); 4418 vha->hw_err_cnt++; 4419 4420 ha->isp_ops->fw_dump(vha); 4421 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 4422 break; 4423 } else if ((stat & HSRX_RISC_INT) == 0) 4424 break; 4425 4426 switch (stat & 0xff) { 4427 case INTR_ROM_MB_SUCCESS: 4428 case INTR_ROM_MB_FAILED: 4429 case INTR_MB_SUCCESS: 4430 case INTR_MB_FAILED: 4431 qla24xx_mbx_completion(vha, MSW(stat)); 4432 status |= MBX_INTERRUPT; 4433 4434 break; 4435 case INTR_ASYNC_EVENT: 4436 mb[0] = MSW(stat); 4437 mb[1] = rd_reg_word(®->mailbox1); 4438 mb[2] = rd_reg_word(®->mailbox2); 4439 mb[3] = rd_reg_word(®->mailbox3); 4440 qla2x00_async_event(vha, rsp, mb); 4441 break; 4442 case INTR_RSP_QUE_UPDATE: 4443 case INTR_RSP_QUE_UPDATE_83XX: 4444 qla24xx_process_response_queue(vha, rsp); 4445 break; 4446 case INTR_ATIO_QUE_UPDATE_27XX: 4447 case INTR_ATIO_QUE_UPDATE: 4448 process_atio = true; 4449 break; 4450 case INTR_ATIO_RSP_QUE_UPDATE: 4451 process_atio = true; 4452 qla24xx_process_response_queue(vha, rsp); 4453 break; 4454 default: 4455 ql_dbg(ql_dbg_async, vha, 0x5051, 4456 "Unrecognized interrupt type (%d).\n", stat & 0xff); 4457 break; 4458 } 4459 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); 4460 } while (0); 4461 qla2x00_handle_mbx_completion(ha, status); 4462 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4463 4464 if (process_atio) { 4465 spin_lock_irqsave(&ha->tgt.atio_lock, flags); 4466 qlt_24xx_process_atio_queue(vha, 0); 4467 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); 4468 } 4469 4470 return IRQ_HANDLED; 4471 } 4472 4473 irqreturn_t 4474 qla2xxx_msix_rsp_q(int irq, void *dev_id) 4475 { 4476 struct qla_hw_data *ha; 4477 struct qla_qpair *qpair; 4478 4479 qpair = dev_id; 4480 if (!qpair) { 4481 ql_log(ql_log_info, NULL, 0x505b, 4482 "%s: NULL response queue pointer.\n", __func__); 4483 return IRQ_NONE; 4484 } 4485 ha = qpair->hw; 4486 4487 queue_work(ha->wq, &qpair->q_work); 4488 4489 return IRQ_HANDLED; 4490 } 4491 4492 /* Interrupt handling helpers. */ 4493 4494 struct qla_init_msix_entry { 4495 const char *name; 4496 irq_handler_t handler; 4497 }; 4498 4499 static const struct qla_init_msix_entry msix_entries[] = { 4500 { "default", qla24xx_msix_default }, 4501 { "rsp_q", qla24xx_msix_rsp_q }, 4502 { "atio_q", qla83xx_msix_atio_q }, 4503 { "qpair_multiq", qla2xxx_msix_rsp_q }, 4504 }; 4505 4506 static const struct qla_init_msix_entry qla82xx_msix_entries[] = { 4507 { "qla2xxx (default)", qla82xx_msix_default }, 4508 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q }, 4509 }; 4510 4511 static int 4512 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) 4513 { 4514 int i, ret; 4515 struct qla_msix_entry *qentry; 4516 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 4517 int min_vecs = QLA_BASE_VECTORS; 4518 struct irq_affinity desc = { 4519 .pre_vectors = QLA_BASE_VECTORS, 4520 }; 4521 4522 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) && 4523 IS_ATIO_MSIX_CAPABLE(ha)) { 4524 desc.pre_vectors++; 4525 min_vecs++; 4526 } 4527 4528 if (USER_CTRL_IRQ(ha) || !ha->mqiobase) { 4529 /* user wants to control IRQ setting for target mode */ 4530 ret = pci_alloc_irq_vectors(ha->pdev, min_vecs, 4531 blk_mq_num_online_queues(ha->msix_count) + min_vecs, 4532 PCI_IRQ_MSIX); 4533 } else 4534 ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs, 4535 blk_mq_num_online_queues(ha->msix_count) + min_vecs, 4536 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, 4537 &desc); 4538 4539 if (ret < 0) { 4540 ql_log(ql_log_fatal, vha, 0x00c7, 4541 "MSI-X: Failed to enable support, " 4542 "giving up -- %d/%d.\n", 4543 ha->msix_count, ret); 4544 goto msix_out; 4545 } else if (ret < ha->msix_count) { 4546 ql_log(ql_log_info, vha, 0x00c6, 4547 "MSI-X: Using %d vectors\n", ret); 4548 ha->msix_count = ret; 4549 /* Recalculate queue values */ 4550 if (ha->mqiobase && (ql2xmqsupport || ql2xnvmeenable)) { 4551 ha->max_req_queues = ha->msix_count - 1; 4552 4553 /* ATIOQ needs 1 vector. That's 1 less QPair */ 4554 if (QLA_TGT_MODE_ENABLED()) 4555 ha->max_req_queues--; 4556 4557 ha->max_rsp_queues = ha->max_req_queues; 4558 4559 ha->max_qpairs = ha->max_req_queues - 1; 4560 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190, 4561 "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs); 4562 } 4563 } 4564 vha->irq_offset = desc.pre_vectors; 4565 ha->msix_entries = kcalloc(ha->msix_count, 4566 sizeof(struct qla_msix_entry), 4567 GFP_KERNEL); 4568 if (!ha->msix_entries) { 4569 ql_log(ql_log_fatal, vha, 0x00c8, 4570 "Failed to allocate memory for ha->msix_entries.\n"); 4571 ret = -ENOMEM; 4572 goto free_irqs; 4573 } 4574 ha->flags.msix_enabled = 1; 4575 4576 for (i = 0; i < ha->msix_count; i++) { 4577 qentry = &ha->msix_entries[i]; 4578 qentry->vector = pci_irq_vector(ha->pdev, i); 4579 qentry->vector_base0 = i; 4580 qentry->entry = i; 4581 qentry->have_irq = 0; 4582 qentry->in_use = 0; 4583 qentry->handle = NULL; 4584 } 4585 4586 /* Enable MSI-X vectors for the base queue */ 4587 for (i = 0; i < QLA_BASE_VECTORS; i++) { 4588 qentry = &ha->msix_entries[i]; 4589 qentry->handle = rsp; 4590 rsp->msix = qentry; 4591 scnprintf(qentry->name, sizeof(qentry->name), 4592 "qla2xxx%lu_%s", vha->host_no, msix_entries[i].name); 4593 if (IS_P3P_TYPE(ha)) 4594 ret = request_irq(qentry->vector, 4595 qla82xx_msix_entries[i].handler, 4596 0, qla82xx_msix_entries[i].name, rsp); 4597 else 4598 ret = request_irq(qentry->vector, 4599 msix_entries[i].handler, 4600 0, qentry->name, rsp); 4601 if (ret) 4602 goto msix_register_fail; 4603 qentry->have_irq = 1; 4604 qentry->in_use = 1; 4605 } 4606 4607 /* 4608 * If target mode is enable, also request the vector for the ATIO 4609 * queue. 4610 */ 4611 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) && 4612 IS_ATIO_MSIX_CAPABLE(ha)) { 4613 qentry = &ha->msix_entries[QLA_ATIO_VECTOR]; 4614 rsp->msix = qentry; 4615 qentry->handle = rsp; 4616 scnprintf(qentry->name, sizeof(qentry->name), 4617 "qla2xxx%lu_%s", vha->host_no, 4618 msix_entries[QLA_ATIO_VECTOR].name); 4619 qentry->in_use = 1; 4620 ret = request_irq(qentry->vector, 4621 msix_entries[QLA_ATIO_VECTOR].handler, 4622 0, qentry->name, rsp); 4623 qentry->have_irq = 1; 4624 } 4625 4626 msix_register_fail: 4627 if (ret) { 4628 ql_log(ql_log_fatal, vha, 0x00cb, 4629 "MSI-X: unable to register handler -- %x/%d.\n", 4630 qentry->vector, ret); 4631 qla2x00_free_irqs(vha); 4632 ha->mqenable = 0; 4633 goto msix_out; 4634 } 4635 4636 /* Enable MSI-X vector for response queue update for queue 0 */ 4637 if (IS_MQUE_CAPABLE(ha) && 4638 (ha->msixbase && ha->mqiobase && ha->max_qpairs)) 4639 ha->mqenable = 1; 4640 else 4641 ha->mqenable = 0; 4642 4643 ql_dbg(ql_dbg_multiq, vha, 0xc005, 4644 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", 4645 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); 4646 ql_dbg(ql_dbg_init, vha, 0x0055, 4647 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", 4648 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); 4649 4650 msix_out: 4651 return ret; 4652 4653 free_irqs: 4654 pci_free_irq_vectors(ha->pdev); 4655 goto msix_out; 4656 } 4657 4658 int 4659 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) 4660 { 4661 int ret = QLA_FUNCTION_FAILED; 4662 device_reg_t *reg = ha->iobase; 4663 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 4664 4665 /* If possible, enable MSI-X. */ 4666 if (ql2xenablemsix == 0 || (!IS_QLA2432(ha) && !IS_QLA2532(ha) && 4667 !IS_QLA8432(ha) && !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && 4668 !IS_QLAFX00(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))) 4669 goto skip_msi; 4670 4671 if (ql2xenablemsix == 2) 4672 goto skip_msix; 4673 4674 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && 4675 (ha->pdev->subsystem_device == 0x7040 || 4676 ha->pdev->subsystem_device == 0x7041 || 4677 ha->pdev->subsystem_device == 0x1705)) { 4678 ql_log(ql_log_warn, vha, 0x0034, 4679 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n", 4680 ha->pdev->subsystem_vendor, 4681 ha->pdev->subsystem_device); 4682 goto skip_msi; 4683 } 4684 4685 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) { 4686 ql_log(ql_log_warn, vha, 0x0035, 4687 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n", 4688 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX); 4689 goto skip_msix; 4690 } 4691 4692 ret = qla24xx_enable_msix(ha, rsp); 4693 if (!ret) { 4694 ql_dbg(ql_dbg_init, vha, 0x0036, 4695 "MSI-X: Enabled (0x%X, 0x%X).\n", 4696 ha->chip_revision, ha->fw_attributes); 4697 goto clear_risc_ints; 4698 } 4699 4700 skip_msix: 4701 4702 ql_log(ql_log_info, vha, 0x0037, 4703 "Falling back-to MSI mode -- ret=%d.\n", ret); 4704 4705 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && 4706 !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) && 4707 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 4708 goto skip_msi; 4709 4710 ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI); 4711 if (ret > 0) { 4712 ql_dbg(ql_dbg_init, vha, 0x0038, 4713 "MSI: Enabled.\n"); 4714 ha->flags.msi_enabled = 1; 4715 } else 4716 ql_log(ql_log_warn, vha, 0x0039, 4717 "Falling back-to INTa mode -- ret=%d.\n", ret); 4718 skip_msi: 4719 4720 /* Skip INTx on ISP82xx. */ 4721 if (!ha->flags.msi_enabled && IS_QLA82XX(ha)) 4722 return QLA_FUNCTION_FAILED; 4723 4724 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, 4725 ha->flags.msi_enabled ? 0 : IRQF_SHARED, 4726 QLA2XXX_DRIVER_NAME, rsp); 4727 if (ret) { 4728 ql_log(ql_log_warn, vha, 0x003a, 4729 "Failed to reserve interrupt %d already in use.\n", 4730 ha->pdev->irq); 4731 goto fail; 4732 } else if (!ha->flags.msi_enabled) { 4733 ql_dbg(ql_dbg_init, vha, 0x0125, 4734 "INTa mode: Enabled.\n"); 4735 ha->flags.mr_intr_valid = 1; 4736 /* Set max_qpair to 0, as MSI-X and MSI in not enabled */ 4737 ha->max_qpairs = 0; 4738 } 4739 4740 clear_risc_ints: 4741 if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha)) 4742 goto fail; 4743 4744 spin_lock_irq(&ha->hardware_lock); 4745 wrt_reg_word(®->isp.semaphore, 0); 4746 spin_unlock_irq(&ha->hardware_lock); 4747 4748 fail: 4749 return ret; 4750 } 4751 4752 void 4753 qla2x00_free_irqs(scsi_qla_host_t *vha) 4754 { 4755 struct qla_hw_data *ha = vha->hw; 4756 struct rsp_que *rsp; 4757 struct qla_msix_entry *qentry; 4758 int i; 4759 4760 /* 4761 * We need to check that ha->rsp_q_map is valid in case we are called 4762 * from a probe failure context. 4763 */ 4764 if (!ha->rsp_q_map || !ha->rsp_q_map[0]) 4765 goto free_irqs; 4766 rsp = ha->rsp_q_map[0]; 4767 4768 if (ha->flags.msix_enabled) { 4769 for (i = 0; i < ha->msix_count; i++) { 4770 qentry = &ha->msix_entries[i]; 4771 if (qentry->have_irq) { 4772 irq_set_affinity_notifier(qentry->vector, NULL); 4773 free_irq(pci_irq_vector(ha->pdev, i), qentry->handle); 4774 } 4775 } 4776 kfree(ha->msix_entries); 4777 ha->msix_entries = NULL; 4778 ha->flags.msix_enabled = 0; 4779 ql_dbg(ql_dbg_init, vha, 0x0042, 4780 "Disabled MSI-X.\n"); 4781 } else { 4782 free_irq(pci_irq_vector(ha->pdev, 0), rsp); 4783 } 4784 4785 free_irqs: 4786 pci_free_irq_vectors(ha->pdev); 4787 } 4788 4789 int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair, 4790 struct qla_msix_entry *msix) 4791 { 4792 const struct qla_init_msix_entry *intr = 4793 &msix_entries[QLA_MSIX_QPAIR_MULTIQ_RSP_Q]; 4794 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 4795 int ret; 4796 4797 scnprintf(msix->name, sizeof(msix->name), 4798 "qla2xxx%lu_qpair%d", vha->host_no, qpair->id); 4799 ret = request_irq(msix->vector, intr->handler, 0, msix->name, qpair); 4800 if (ret) { 4801 ql_log(ql_log_fatal, vha, 0x00e6, 4802 "MSI-X: Unable to register handler -- %x/%d.\n", 4803 msix->vector, ret); 4804 return ret; 4805 } 4806 msix->have_irq = 1; 4807 msix->handle = qpair; 4808 qla_mapq_init_qp_cpu_map(ha, msix, qpair); 4809 return ret; 4810 } 4811