Lines Matching +full:bp +full:- +full:disabled +full:- +full:ports

4  * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
28 #include <linux/crc-t10dif.h>
54 /* NVME initiator-based functions */
66 * lpfc_nvme_create_queue -
70 * @handle: An opaque driver handle used in follow-up calls.
74 * A hardware queue maps (qidx) to a specific driver MSI-X vector/EQ/CQ/WQ.
77 * 0 - Success
78 * -EINVAL - Unsupported input value.
79 * -ENOMEM - Could not alloc necessary memory
91 if (!pnvme_lport->private) in lpfc_nvme_create_queue()
92 return -ENOMEM; in lpfc_nvme_create_queue()
94 lport = (struct lpfc_nvme_lport *)pnvme_lport->private; in lpfc_nvme_create_queue()
95 vport = lport->vport; in lpfc_nvme_create_queue()
97 if (!vport || test_bit(FC_UNLOADING, &vport->load_flag) || in lpfc_nvme_create_queue()
98 test_bit(HBA_IOQ_FLUSH, &vport->phba->hba_flag)) in lpfc_nvme_create_queue()
99 return -ENODEV; in lpfc_nvme_create_queue()
103 return -ENOMEM; in lpfc_nvme_create_queue()
105 qhandle->cpu_id = raw_smp_processor_id(); in lpfc_nvme_create_queue()
106 qhandle->qidx = qidx; in lpfc_nvme_create_queue()
109 * and first IO queue will use MSI-X vector and associated in lpfc_nvme_create_queue()
114 qhandle->index = ((qidx - 1) % in lpfc_nvme_create_queue()
118 qhandle->index = qidx; in lpfc_nvme_create_queue()
124 qidx, qhandle->cpu_id, qhandle->index, qhandle); in lpfc_nvme_create_queue()
130 * lpfc_nvme_delete_queue -
140 * 0 - Success
151 if (!pnvme_lport->private) in lpfc_nvme_delete_queue()
154 lport = (struct lpfc_nvme_lport *)pnvme_lport->private; in lpfc_nvme_delete_queue()
155 vport = lport->vport; in lpfc_nvme_delete_queue()
166 struct lpfc_nvme_lport *lport = localport->private; in lpfc_nvme_localport_delete()
168 lpfc_printf_vlog(lport->vport, KERN_INFO, LOG_NVME, in lpfc_nvme_localport_delete()
173 if (lport->vport->localport) in lpfc_nvme_localport_delete()
174 complete(lport->lport_unreg_cmp); in lpfc_nvme_localport_delete()
191 struct lpfc_nvme_rport *rport = remoteport->private; in lpfc_nvme_remoteport_delete()
196 ndlp = rport->ndlp; in lpfc_nvme_remoteport_delete()
203 vport = ndlp->vport; in lpfc_nvme_remoteport_delete()
206 __func__, ndlp, ndlp->nlp_state, rport); in lpfc_nvme_remoteport_delete()
212 /* Remove this rport from the lport's list - memory is owned by the in lpfc_nvme_remoteport_delete()
219 remoteport, ndlp, ndlp->nlp_DID, ndlp->fc4_xpt_flags); in lpfc_nvme_remoteport_delete()
220 spin_lock_irq(&ndlp->lock); in lpfc_nvme_remoteport_delete()
225 if (ndlp->fc4_xpt_flags & NVME_XPT_UNREG_WAIT) in lpfc_nvme_remoteport_delete()
226 ndlp->fc4_xpt_flags &= ~(NVME_XPT_UNREG_WAIT | NVME_XPT_REGD); in lpfc_nvme_remoteport_delete()
228 spin_unlock_irq(&ndlp->lock); in lpfc_nvme_remoteport_delete()
233 if (!(ndlp->fc4_xpt_flags & fc4_xpt_flags)) in lpfc_nvme_remoteport_delete()
241 * lpfc_nvme_handle_lsreq - Process an unsolicited NVME LS request
247 * to the nvme-fc transport via nvme_fc_rcv_ls_req().
249 * The calling sequence should be: nvme_fc_rcv_ls_req() -> (processing)
250 * -> lpfc_nvme_xmt_ls_rsp/cmp -> req->done.
265 uint32_t *payload = axchg->payload; in lpfc_nvme_handle_lsreq()
268 vport = axchg->ndlp->vport; in lpfc_nvme_handle_lsreq()
269 lpfc_rport = axchg->ndlp->nrport; in lpfc_nvme_handle_lsreq()
271 return -EINVAL; in lpfc_nvme_handle_lsreq()
273 remoteport = lpfc_rport->remoteport; in lpfc_nvme_handle_lsreq()
274 if (!vport->localport || in lpfc_nvme_handle_lsreq()
275 test_bit(HBA_IOQ_FLUSH, &vport->phba->hba_flag)) in lpfc_nvme_handle_lsreq()
276 return -EINVAL; in lpfc_nvme_handle_lsreq()
278 lport = vport->localport->private; in lpfc_nvme_handle_lsreq()
280 return -EINVAL; in lpfc_nvme_handle_lsreq()
282 rc = nvme_fc_rcv_ls_req(remoteport, &axchg->ls_rsp, axchg->payload, in lpfc_nvme_handle_lsreq()
283 axchg->size); in lpfc_nvme_handle_lsreq()
288 axchg->size, rc, in lpfc_nvme_handle_lsreq()
299 * __lpfc_nvme_ls_req_cmp - Generic completion handler for a NVME
321 pnvme_lsreq = cmdwqe->context_un.nvme_lsreq; in __lpfc_nvme_ls_req_cmp()
322 ndlp = cmdwqe->ndlp; in __lpfc_nvme_ls_req_cmp()
323 buf_ptr = cmdwqe->bpl_dmabuf; in __lpfc_nvme_ls_req_cmp()
331 pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0, in __lpfc_nvme_ls_req_cmp()
332 cmdwqe->sli4_xritag, status, in __lpfc_nvme_ls_req_cmp()
333 (wcqe->parameter & 0xffff), in __lpfc_nvme_ls_req_cmp()
334 cmdwqe, pnvme_lsreq, cmdwqe->bpl_dmabuf, in __lpfc_nvme_ls_req_cmp()
338 cmdwqe->sli4_xritag, status, wcqe->parameter); in __lpfc_nvme_ls_req_cmp()
341 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); in __lpfc_nvme_ls_req_cmp()
343 cmdwqe->bpl_dmabuf = NULL; in __lpfc_nvme_ls_req_cmp()
345 if (pnvme_lsreq->done) { in __lpfc_nvme_ls_req_cmp()
347 status = -ENXIO; in __lpfc_nvme_ls_req_cmp()
348 pnvme_lsreq->done(pnvme_lsreq, status); in __lpfc_nvme_ls_req_cmp()
353 pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0, in __lpfc_nvme_ls_req_cmp()
354 cmdwqe->sli4_xritag, status); in __lpfc_nvme_ls_req_cmp()
358 cmdwqe->ndlp = NULL; in __lpfc_nvme_ls_req_cmp()
367 struct lpfc_vport *vport = cmdwqe->vport; in lpfc_nvme_ls_req_cmp()
370 struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl; in lpfc_nvme_ls_req_cmp()
374 if (vport->localport) { in lpfc_nvme_ls_req_cmp()
375 lport = (struct lpfc_nvme_lport *)vport->localport->private; in lpfc_nvme_ls_req_cmp()
377 atomic_inc(&lport->fc4NvmeLsCmpls); in lpfc_nvme_ls_req_cmp()
380 atomic_inc(&lport->cmpl_ls_xb); in lpfc_nvme_ls_req_cmp()
381 atomic_inc(&lport->cmpl_ls_err); in lpfc_nvme_ls_req_cmp()
398 struct lpfc_hba *phba = vport->phba; in lpfc_nvme_gen_req()
410 wqe = &genwqe->wqe; in lpfc_nvme_gen_req()
414 genwqe->bpl_dmabuf = bmp; in lpfc_nvme_gen_req()
415 genwqe->cmd_flag |= LPFC_IO_NVME_LS; in lpfc_nvme_gen_req()
418 genwqe->ndlp = lpfc_nlp_get(ndlp); in lpfc_nvme_gen_req()
419 if (!genwqe->ndlp) { in lpfc_nvme_gen_req()
420 dev_warn(&phba->pcidev->dev, in lpfc_nvme_gen_req()
426 genwqe->context_un.nvme_lsreq = pnvme_lsreq; in lpfc_nvme_gen_req()
427 /* Fill in payload, bp points to frame payload */ in lpfc_nvme_gen_req()
431 tmo = (3 * phba->fc_ratov); in lpfc_nvme_gen_req()
436 bpl = (struct ulp_bde64 *)bmp->virt; in lpfc_nvme_gen_req()
446 genwqe->num_bdes = num_entry; in lpfc_nvme_gen_req()
447 genwqe->hba_wqidx = 0; in lpfc_nvme_gen_req()
449 /* Words 0 - 2 */ in lpfc_nvme_gen_req()
450 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; in lpfc_nvme_gen_req()
451 wqe->generic.bde.tus.f.bdeSize = first_len; in lpfc_nvme_gen_req()
452 wqe->generic.bde.addrLow = bpl[0].addrLow; in lpfc_nvme_gen_req()
453 wqe->generic.bde.addrHigh = bpl[0].addrHigh; in lpfc_nvme_gen_req()
456 wqe->gen_req.request_payload_len = first_len; in lpfc_nvme_gen_req()
461 bf_set(wqe_dfctl, &wqe->gen_req.wge_ctl, 0); in lpfc_nvme_gen_req()
462 bf_set(wqe_si, &wqe->gen_req.wge_ctl, 1); in lpfc_nvme_gen_req()
463 bf_set(wqe_la, &wqe->gen_req.wge_ctl, 1); in lpfc_nvme_gen_req()
464 bf_set(wqe_rctl, &wqe->gen_req.wge_ctl, FC_RCTL_ELS4_REQ); in lpfc_nvme_gen_req()
465 bf_set(wqe_type, &wqe->gen_req.wge_ctl, FC_TYPE_NVME); in lpfc_nvme_gen_req()
468 bf_set(wqe_ctxt_tag, &wqe->gen_req.wqe_com, in lpfc_nvme_gen_req()
469 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); in lpfc_nvme_gen_req()
470 bf_set(wqe_xri_tag, &wqe->gen_req.wqe_com, genwqe->sli4_xritag); in lpfc_nvme_gen_req()
473 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, tmo); in lpfc_nvme_gen_req()
474 bf_set(wqe_class, &wqe->gen_req.wqe_com, CLASS3); in lpfc_nvme_gen_req()
475 bf_set(wqe_cmnd, &wqe->gen_req.wqe_com, CMD_GEN_REQUEST64_WQE); in lpfc_nvme_gen_req()
476 bf_set(wqe_ct, &wqe->gen_req.wqe_com, SLI4_CT_RPI); in lpfc_nvme_gen_req()
479 wqe->gen_req.wqe_com.abort_tag = genwqe->iotag; in lpfc_nvme_gen_req()
482 bf_set(wqe_reqtag, &wqe->gen_req.wqe_com, genwqe->iotag); in lpfc_nvme_gen_req()
485 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1); in lpfc_nvme_gen_req()
486 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ); in lpfc_nvme_gen_req()
487 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1); in lpfc_nvme_gen_req()
488 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE); in lpfc_nvme_gen_req()
489 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0); in lpfc_nvme_gen_req()
492 bf_set(wqe_cqid, &wqe->gen_req.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); in lpfc_nvme_gen_req()
493 bf_set(wqe_cmd_type, &wqe->gen_req.wqe_com, OTHER_COMMAND); in lpfc_nvme_gen_req()
497 genwqe->cmd_cmpl = cmpl; in lpfc_nvme_gen_req()
498 genwqe->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT; in lpfc_nvme_gen_req()
499 genwqe->vport = vport; in lpfc_nvme_gen_req()
500 genwqe->retry = retry; in lpfc_nvme_gen_req()
503 genwqe->sli4_xritag, genwqe->iotag, ndlp->nlp_DID); in lpfc_nvme_gen_req()
505 rc = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], genwqe); in lpfc_nvme_gen_req()
510 ndlp->nlp_DID, genwqe->iotag, in lpfc_nvme_gen_req()
511 vport->port_state, rc); in lpfc_nvme_gen_req()
521 ndlp->nlp_DID, genwqe->sli4_xritag, in lpfc_nvme_gen_req()
522 vport->port_state, in lpfc_nvme_gen_req()
529 * __lpfc_nvme_ls_req - Generic service routine to issue an NVME LS request
533 * @gen_req_cmp: Completion call-back
539 * 0 - Success
540 * non-zero: various error codes, in form of -Exxx
559 return -ENODEV; in __lpfc_nvme_ls_req()
562 ntype = ndlp->nlp_type; in __lpfc_nvme_ls_req()
563 nstate = ndlp->nlp_state; in __lpfc_nvme_ls_req()
569 ndlp->nlp_DID, ntype, nstate); in __lpfc_nvme_ls_req()
570 return -ENODEV; in __lpfc_nvme_ls_req()
572 if (test_bit(HBA_IOQ_FLUSH, &vport->phba->hba_flag)) in __lpfc_nvme_ls_req()
573 return -ENODEV; in __lpfc_nvme_ls_req()
575 if (!vport->phba->sli4_hba.nvmels_wq) in __lpfc_nvme_ls_req()
576 return -ENOMEM; in __lpfc_nvme_ls_req()
587 * in the nvme-fc layer. in __lpfc_nvme_ls_req()
595 ndlp->nlp_DID); in __lpfc_nvme_ls_req()
596 return -ENOMEM; in __lpfc_nvme_ls_req()
599 bmp->virt = lpfc_mbuf_alloc(vport->phba, MEM_PRI, &(bmp->phys)); in __lpfc_nvme_ls_req()
600 if (!bmp->virt) { in __lpfc_nvme_ls_req()
604 ndlp->nlp_DID); in __lpfc_nvme_ls_req()
606 return -ENOMEM; in __lpfc_nvme_ls_req()
609 INIT_LIST_HEAD(&bmp->list); in __lpfc_nvme_ls_req()
611 bpl = (struct ulp_bde64 *)bmp->virt; in __lpfc_nvme_ls_req()
612 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rqstdma)); in __lpfc_nvme_ls_req()
613 bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rqstdma)); in __lpfc_nvme_ls_req()
614 bpl->tus.f.bdeFlags = 0; in __lpfc_nvme_ls_req()
615 bpl->tus.f.bdeSize = pnvme_lsreq->rqstlen; in __lpfc_nvme_ls_req()
616 bpl->tus.w = le32_to_cpu(bpl->tus.w); in __lpfc_nvme_ls_req()
619 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rspdma)); in __lpfc_nvme_ls_req()
620 bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rspdma)); in __lpfc_nvme_ls_req()
621 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; in __lpfc_nvme_ls_req()
622 bpl->tus.f.bdeSize = pnvme_lsreq->rsplen; in __lpfc_nvme_ls_req()
623 bpl->tus.w = le32_to_cpu(bpl->tus.w); in __lpfc_nvme_ls_req()
628 ndlp->nlp_DID, pnvme_lsreq, pnvme_lsreq->rqstlen, in __lpfc_nvme_ls_req()
629 pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma, in __lpfc_nvme_ls_req()
630 &pnvme_lsreq->rspdma); in __lpfc_nvme_ls_req()
632 ret = lpfc_nvme_gen_req(vport, bmp, pnvme_lsreq->rqstaddr, in __lpfc_nvme_ls_req()
634 pnvme_lsreq->timeout, 0); in __lpfc_nvme_ls_req()
639 pnvme_lsreq, ret, ndlp->nlp_DID); in __lpfc_nvme_ls_req()
640 lpfc_mbuf_free(vport->phba, bmp->virt, bmp->phys); in __lpfc_nvme_ls_req()
642 return -EIO; in __lpfc_nvme_ls_req()
649 * lpfc_nvme_ls_req - Issue an NVME Link Service request
655 * from the nvme_fc transport to a remote nvme-aware port.
658 * 0 - Success
659 * non-zero: various error codes, in form of -Exxx
671 lport = (struct lpfc_nvme_lport *)pnvme_lport->private; in lpfc_nvme_ls_req()
672 rport = (struct lpfc_nvme_rport *)pnvme_rport->private; in lpfc_nvme_ls_req()
674 return -EINVAL; in lpfc_nvme_ls_req()
676 vport = lport->vport; in lpfc_nvme_ls_req()
677 if (test_bit(FC_UNLOADING, &vport->load_flag) || in lpfc_nvme_ls_req()
678 test_bit(HBA_IOQ_FLUSH, &vport->phba->hba_flag)) in lpfc_nvme_ls_req()
679 return -ENODEV; in lpfc_nvme_ls_req()
681 atomic_inc(&lport->fc4NvmeLsRequests); in lpfc_nvme_ls_req()
683 ret = __lpfc_nvme_ls_req(vport, rport->ndlp, pnvme_lsreq, in lpfc_nvme_ls_req()
686 atomic_inc(&lport->xmt_ls_err); in lpfc_nvme_ls_req()
692 * __lpfc_nvme_ls_abort - Generic service routine to abort a prior
703 * non-zero: various error conditions in form -Exxx
709 struct lpfc_hba *phba = vport->phba; in __lpfc_nvme_ls_abort()
718 ndlp, ndlp ? ndlp->nlp_DID : 0); in __lpfc_nvme_ls_abort()
719 return -EINVAL; in __lpfc_nvme_ls_abort()
725 pnvme_lsreq, pnvme_lsreq->rqstlen, in __lpfc_nvme_ls_abort()
726 pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma, in __lpfc_nvme_ls_abort()
727 &pnvme_lsreq->rspdma); in __lpfc_nvme_ls_abort()
733 pring = phba->sli4_hba.nvmels_wq->pring; in __lpfc_nvme_ls_abort()
734 spin_lock_irq(&phba->hbalock); in __lpfc_nvme_ls_abort()
735 spin_lock(&pring->ring_lock); in __lpfc_nvme_ls_abort()
736 list_for_each_entry_safe(wqe, next_wqe, &pring->txcmplq, list) { in __lpfc_nvme_ls_abort()
737 if (wqe->context_un.nvme_lsreq == pnvme_lsreq) { in __lpfc_nvme_ls_abort()
738 wqe->cmd_flag |= LPFC_DRIVER_ABORTED; in __lpfc_nvme_ls_abort()
743 spin_unlock(&pring->ring_lock); in __lpfc_nvme_ls_abort()
747 spin_unlock_irq(&phba->hbalock); in __lpfc_nvme_ls_abort()
755 return -EINVAL; in __lpfc_nvme_ls_abort()
768 if (test_bit(FC_UNLOADING, &axchg->phba->pport->load_flag)) in lpfc_nvme_xmt_ls_rsp()
769 return -ENODEV; in lpfc_nvme_xmt_ls_rsp()
771 lport = (struct lpfc_nvme_lport *)localport->private; in lpfc_nvme_xmt_ls_rsp()
781 if (rc != -EALREADY) in lpfc_nvme_xmt_ls_rsp()
782 atomic_inc(&lport->xmt_ls_abort); in lpfc_nvme_xmt_ls_rsp()
790 * lpfc_nvme_ls_abort - Abort a prior NVME LS request
808 lport = (struct lpfc_nvme_lport *)pnvme_lport->private; in lpfc_nvme_ls_abort()
811 vport = lport->vport; in lpfc_nvme_ls_abort()
813 if (test_bit(FC_UNLOADING, &vport->load_flag)) in lpfc_nvme_ls_abort()
816 ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id); in lpfc_nvme_ls_abort()
820 atomic_inc(&lport->xmt_ls_abort); in lpfc_nvme_ls_abort()
829 struct lpfc_hba *phba = vport->phba; in lpfc_nvme_adj_fcp_sgls()
835 * Get a local pointer to the built-in wqe and correct in lpfc_nvme_adj_fcp_sgls()
840 wqe = &lpfc_ncmd->cur_iocbq.wqe; in lpfc_nvme_adj_fcp_sgls()
849 sgl = lpfc_ncmd->dma_sgl; in lpfc_nvme_adj_fcp_sgls()
850 sgl->sge_len = cpu_to_le32(nCmd->cmdlen); in lpfc_nvme_adj_fcp_sgls()
851 if (phba->cfg_nvme_embed_cmd) { in lpfc_nvme_adj_fcp_sgls()
852 sgl->addr_hi = 0; in lpfc_nvme_adj_fcp_sgls()
853 sgl->addr_lo = 0; in lpfc_nvme_adj_fcp_sgls()
855 /* Word 0-2 - NVME CMND IU (embedded payload) */ in lpfc_nvme_adj_fcp_sgls()
856 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_IMMED; in lpfc_nvme_adj_fcp_sgls()
857 wqe->generic.bde.tus.f.bdeSize = 56; in lpfc_nvme_adj_fcp_sgls()
858 wqe->generic.bde.addrHigh = 0; in lpfc_nvme_adj_fcp_sgls()
859 wqe->generic.bde.addrLow = 64; /* Word 16 */ in lpfc_nvme_adj_fcp_sgls()
861 /* Word 10 - dbde is 0, wqes is 1 in template */ in lpfc_nvme_adj_fcp_sgls()
865 * WQE words 16-30 get the NVME CMD IU payload in lpfc_nvme_adj_fcp_sgls()
867 * WQE words 16-19 get payload Words 1-4 in lpfc_nvme_adj_fcp_sgls()
868 * WQE words 20-21 get payload Words 6-7 in lpfc_nvme_adj_fcp_sgls()
869 * WQE words 22-29 get payload Words 16-23 in lpfc_nvme_adj_fcp_sgls()
871 wptr = &wqe->words[16]; /* WQE ptr */ in lpfc_nvme_adj_fcp_sgls()
872 dptr = (uint32_t *)nCmd->cmdaddr; /* payload ptr */ in lpfc_nvme_adj_fcp_sgls()
882 dptr += 8; /* Skip Words 8-15 in payload */ in lpfc_nvme_adj_fcp_sgls()
892 sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->cmddma)); in lpfc_nvme_adj_fcp_sgls()
893 sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->cmddma)); in lpfc_nvme_adj_fcp_sgls()
895 /* Word 0-2 - NVME CMND IU Inline BDE */ in lpfc_nvme_adj_fcp_sgls()
896 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; in lpfc_nvme_adj_fcp_sgls()
897 wqe->generic.bde.tus.f.bdeSize = nCmd->cmdlen; in lpfc_nvme_adj_fcp_sgls()
898 wqe->generic.bde.addrHigh = sgl->addr_hi; in lpfc_nvme_adj_fcp_sgls()
899 wqe->generic.bde.addrLow = sgl->addr_lo; in lpfc_nvme_adj_fcp_sgls()
902 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1); in lpfc_nvme_adj_fcp_sgls()
903 bf_set(wqe_wqes, &wqe->generic.wqe_com, 0); in lpfc_nvme_adj_fcp_sgls()
909 sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->rspdma)); in lpfc_nvme_adj_fcp_sgls()
910 sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->rspdma)); in lpfc_nvme_adj_fcp_sgls()
911 sgl->word2 = le32_to_cpu(sgl->word2); in lpfc_nvme_adj_fcp_sgls()
912 if (nCmd->sg_cnt) in lpfc_nvme_adj_fcp_sgls()
916 sgl->word2 = cpu_to_le32(sgl->word2); in lpfc_nvme_adj_fcp_sgls()
917 sgl->sge_len = cpu_to_le32(nCmd->rsplen); in lpfc_nvme_adj_fcp_sgls()
922 * lpfc_nvme_io_cmd_cmpl - Complete an NVME-over-FCP IO
929 * 0 - Success
936 struct lpfc_io_buf *lpfc_ncmd = pwqeIn->io_buf; in lpfc_nvme_io_cmd_cmpl()
937 struct lpfc_wcqe_complete *wcqe = &pwqeOut->wcqe_cmpl; in lpfc_nvme_io_cmd_cmpl()
938 struct lpfc_vport *vport = pwqeIn->vport; in lpfc_nvme_io_cmd_cmpl()
964 spin_lock(&lpfc_ncmd->buf_lock); in lpfc_nvme_io_cmd_cmpl()
966 if (!lpfc_ncmd->nvmeCmd) { in lpfc_nvme_io_cmd_cmpl()
967 spin_unlock(&lpfc_ncmd->buf_lock); in lpfc_nvme_io_cmd_cmpl()
971 lpfc_ncmd, lpfc_ncmd->nvmeCmd); in lpfc_nvme_io_cmd_cmpl()
977 nCmd = lpfc_ncmd->nvmeCmd; in lpfc_nvme_io_cmd_cmpl()
980 idx = lpfc_ncmd->cur_iocbq.hba_wqidx; in lpfc_nvme_io_cmd_cmpl()
981 phba->sli4_hba.hdwq[idx].nvme_cstat.io_cmpls++; in lpfc_nvme_io_cmd_cmpl()
983 if (unlikely(status && vport->localport)) { in lpfc_nvme_io_cmd_cmpl()
984 lport = (struct lpfc_nvme_lport *)vport->localport->private; in lpfc_nvme_io_cmd_cmpl()
987 atomic_inc(&lport->cmpl_fcp_xb); in lpfc_nvme_io_cmd_cmpl()
988 atomic_inc(&lport->cmpl_fcp_err); in lpfc_nvme_io_cmd_cmpl()
993 lpfc_ncmd->cur_iocbq.sli4_xritag, in lpfc_nvme_io_cmd_cmpl()
994 status, wcqe->parameter); in lpfc_nvme_io_cmd_cmpl()
999 ndlp = lpfc_ncmd->ndlp; in lpfc_nvme_io_cmd_cmpl()
1009 ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr; in lpfc_nvme_io_cmd_cmpl()
1015 cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr; in lpfc_nvme_io_cmd_cmpl()
1016 cid = cp->sqe.common.command_id; in lpfc_nvme_io_cmd_cmpl()
1027 ep->iu_len = cpu_to_be16(8); in lpfc_nvme_io_cmd_cmpl()
1028 ep->rsn = wcqe->parameter; in lpfc_nvme_io_cmd_cmpl()
1029 ep->xfrd_len = cpu_to_be32(nCmd->payload_length); in lpfc_nvme_io_cmd_cmpl()
1030 ep->rsvd12 = 0; in lpfc_nvme_io_cmd_cmpl()
1031 ptr = (uint32_t *)&ep->cqe.result.u64; in lpfc_nvme_io_cmd_cmpl()
1032 *ptr++ = wcqe->total_data_placed; in lpfc_nvme_io_cmd_cmpl()
1035 ep->cqe.sq_head = sqhd; in lpfc_nvme_io_cmd_cmpl()
1036 ep->cqe.sq_id = nCmd->sqid; in lpfc_nvme_io_cmd_cmpl()
1037 ep->cqe.command_id = cid; in lpfc_nvme_io_cmd_cmpl()
1038 ep->cqe.status = 0; in lpfc_nvme_io_cmd_cmpl()
1040 lpfc_ncmd->status = IOSTAT_SUCCESS; in lpfc_nvme_io_cmd_cmpl()
1041 lpfc_ncmd->result = 0; in lpfc_nvme_io_cmd_cmpl()
1042 nCmd->rcv_rsplen = LPFC_NVME_ERSP_LEN; in lpfc_nvme_io_cmd_cmpl()
1043 nCmd->transferred_length = nCmd->payload_length; in lpfc_nvme_io_cmd_cmpl()
1045 lpfc_ncmd->status = status; in lpfc_nvme_io_cmd_cmpl()
1046 lpfc_ncmd->result = (wcqe->parameter & IOERR_PARAM_MASK); in lpfc_nvme_io_cmd_cmpl()
1059 switch (lpfc_ncmd->status) { in lpfc_nvme_io_cmd_cmpl()
1061 nCmd->transferred_length = wcqe->total_data_placed; in lpfc_nvme_io_cmd_cmpl()
1062 nCmd->rcv_rsplen = 0; in lpfc_nvme_io_cmd_cmpl()
1063 nCmd->status = 0; in lpfc_nvme_io_cmd_cmpl()
1066 nCmd->transferred_length = wcqe->total_data_placed; in lpfc_nvme_io_cmd_cmpl()
1067 nCmd->rcv_rsplen = wcqe->parameter; in lpfc_nvme_io_cmd_cmpl()
1068 nCmd->status = 0; in lpfc_nvme_io_cmd_cmpl()
1071 cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr; in lpfc_nvme_io_cmd_cmpl()
1072 ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr; in lpfc_nvme_io_cmd_cmpl()
1075 if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN) { in lpfc_nvme_io_cmd_cmpl()
1076 lpfc_ncmd->status = IOSTAT_SUCCESS; in lpfc_nvme_io_cmd_cmpl()
1077 lpfc_ncmd->result = 0; in lpfc_nvme_io_cmd_cmpl()
1083 lpfc_ncmd->cur_iocbq.sli4_xritag, in lpfc_nvme_io_cmd_cmpl()
1084 wcqe->total_data_placed, in lpfc_nvme_io_cmd_cmpl()
1085 cp->sqe.common.opcode, in lpfc_nvme_io_cmd_cmpl()
1086 cp->sqe.common.command_id, in lpfc_nvme_io_cmd_cmpl()
1087 ep->cqe.status); in lpfc_nvme_io_cmd_cmpl()
1095 lpfc_ncmd->cur_iocbq.sli4_xritag, in lpfc_nvme_io_cmd_cmpl()
1096 lpfc_ncmd->status, lpfc_ncmd->result, in lpfc_nvme_io_cmd_cmpl()
1097 wcqe->total_data_placed, in lpfc_nvme_io_cmd_cmpl()
1098 cp->sqe.common.opcode, in lpfc_nvme_io_cmd_cmpl()
1099 cp->sqe.common.command_id, in lpfc_nvme_io_cmd_cmpl()
1100 ep->cqe.status); in lpfc_nvme_io_cmd_cmpl()
1104 if (lpfc_ncmd->result == IOERR_ABORT_REQUESTED) in lpfc_nvme_io_cmd_cmpl()
1111 lpfc_ncmd->cur_iocbq.sli4_xritag, in lpfc_nvme_io_cmd_cmpl()
1120 lpfc_ncmd->cur_iocbq.sli4_xritag, in lpfc_nvme_io_cmd_cmpl()
1121 lpfc_ncmd->status, lpfc_ncmd->result, in lpfc_nvme_io_cmd_cmpl()
1122 wcqe->parameter, in lpfc_nvme_io_cmd_cmpl()
1123 wcqe->total_data_placed); in lpfc_nvme_io_cmd_cmpl()
1124 nCmd->transferred_length = 0; in lpfc_nvme_io_cmd_cmpl()
1125 nCmd->rcv_rsplen = 0; in lpfc_nvme_io_cmd_cmpl()
1126 nCmd->status = NVME_SC_INTERNAL; in lpfc_nvme_io_cmd_cmpl()
1127 if (pci_channel_offline(vport->phba->pcidev) || in lpfc_nvme_io_cmd_cmpl()
1128 lpfc_ncmd->result == IOERR_SLI_DOWN) in lpfc_nvme_io_cmd_cmpl()
1135 lpfc_ncmd->flags |= LPFC_SBUF_XBUSY; in lpfc_nvme_io_cmd_cmpl()
1137 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY; in lpfc_nvme_io_cmd_cmpl()
1144 if (lpfc_ncmd->ts_cmd_start) { in lpfc_nvme_io_cmd_cmpl()
1145 lpfc_ncmd->ts_isr_cmpl = pwqeIn->isr_timestamp; in lpfc_nvme_io_cmd_cmpl()
1146 lpfc_ncmd->ts_data_io = ktime_get_ns(); in lpfc_nvme_io_cmd_cmpl()
1147 phba->ktime_last_cmd = lpfc_ncmd->ts_data_io; in lpfc_nvme_io_cmd_cmpl()
1150 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_NVME_IO)) { in lpfc_nvme_io_cmd_cmpl()
1152 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io); in lpfc_nvme_io_cmd_cmpl()
1153 if (lpfc_ncmd->cpu != cpu) in lpfc_nvme_io_cmd_cmpl()
1158 cpu, lpfc_ncmd->cpu); in lpfc_nvme_io_cmd_cmpl()
1166 if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) { in lpfc_nvme_io_cmd_cmpl()
1167 freqpriv = nCmd->private; in lpfc_nvme_io_cmd_cmpl()
1168 freqpriv->nvme_buf = NULL; in lpfc_nvme_io_cmd_cmpl()
1169 lpfc_ncmd->nvmeCmd = NULL; in lpfc_nvme_io_cmd_cmpl()
1172 spin_unlock(&lpfc_ncmd->buf_lock); in lpfc_nvme_io_cmd_cmpl()
1175 if (phba->cmf_active_mode != LPFC_CFG_OFF && in lpfc_nvme_io_cmd_cmpl()
1176 nCmd->io_dir == NVMEFC_FCP_READ && in lpfc_nvme_io_cmd_cmpl()
1177 nCmd->payload_length) { in lpfc_nvme_io_cmd_cmpl()
1179 lat = ktime_get_ns() - lpfc_ncmd->rx_cmd_start; in lpfc_nvme_io_cmd_cmpl()
1180 lpfc_update_cmf_cmpl(phba, lat, nCmd->payload_length, NULL); in lpfc_nvme_io_cmd_cmpl()
1184 nCmd->done(nCmd); in lpfc_nvme_io_cmd_cmpl()
1192 * lpfc_nvme_prep_io_cmd - Issue an NVME-over-FCP IO
1195 * @pnode: pointer to a node-list data structure
1203 * 0 - Success
1212 struct lpfc_hba *phba = vport->phba; in lpfc_nvme_prep_io_cmd()
1213 struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd; in lpfc_nvme_prep_io_cmd()
1215 struct lpfc_iocbq *pwqeq = &lpfc_ncmd->cur_iocbq; in lpfc_nvme_prep_io_cmd()
1216 union lpfc_wqe128 *wqe = &pwqeq->wqe; in lpfc_nvme_prep_io_cmd()
1220 * There are three possibilities here - use scatter-gather segment, use in lpfc_nvme_prep_io_cmd()
1223 if (nCmd->sg_cnt) { in lpfc_nvme_prep_io_cmd()
1224 if (nCmd->io_dir == NVMEFC_FCP_WRITE) { in lpfc_nvme_prep_io_cmd()
1225 /* From the iwrite template, initialize words 7 - 11 */ in lpfc_nvme_prep_io_cmd()
1226 memcpy(&wqe->words[7], in lpfc_nvme_prep_io_cmd()
1231 wqe->fcp_iwrite.total_xfer_len = nCmd->payload_length; in lpfc_nvme_prep_io_cmd()
1234 if ((phba->cfg_nvme_enable_fb) && in lpfc_nvme_prep_io_cmd()
1235 test_bit(NLP_FIRSTBURST, &pnode->nlp_flag)) { in lpfc_nvme_prep_io_cmd()
1236 req_len = lpfc_ncmd->nvmeCmd->payload_length; in lpfc_nvme_prep_io_cmd()
1237 if (req_len < pnode->nvme_fb_size) in lpfc_nvme_prep_io_cmd()
1238 wqe->fcp_iwrite.initial_xfer_len = in lpfc_nvme_prep_io_cmd()
1241 wqe->fcp_iwrite.initial_xfer_len = in lpfc_nvme_prep_io_cmd()
1242 pnode->nvme_fb_size; in lpfc_nvme_prep_io_cmd()
1244 wqe->fcp_iwrite.initial_xfer_len = 0; in lpfc_nvme_prep_io_cmd()
1246 cstat->output_requests++; in lpfc_nvme_prep_io_cmd()
1248 /* From the iread template, initialize words 7 - 11 */ in lpfc_nvme_prep_io_cmd()
1249 memcpy(&wqe->words[7], in lpfc_nvme_prep_io_cmd()
1254 wqe->fcp_iread.total_xfer_len = nCmd->payload_length; in lpfc_nvme_prep_io_cmd()
1257 wqe->fcp_iread.rsrvd5 = 0; in lpfc_nvme_prep_io_cmd()
1260 if (phba->cmf_active_mode == LPFC_CFG_MANAGED) in lpfc_nvme_prep_io_cmd()
1261 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, in lpfc_nvme_prep_io_cmd()
1263 cstat->input_requests++; in lpfc_nvme_prep_io_cmd()
1266 /* From the icmnd template, initialize words 4 - 11 */ in lpfc_nvme_prep_io_cmd()
1267 memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4], in lpfc_nvme_prep_io_cmd()
1269 cstat->control_requests++; in lpfc_nvme_prep_io_cmd()
1272 if (pnode->nlp_nvme_info & NLP_NVME_NSLER) { in lpfc_nvme_prep_io_cmd()
1273 bf_set(wqe_erp, &wqe->generic.wqe_com, 1); in lpfc_nvme_prep_io_cmd()
1275 nCmd->cmdaddr)->sqe.common; in lpfc_nvme_prep_io_cmd()
1276 if (sqe->opcode == nvme_admin_async_event) in lpfc_nvme_prep_io_cmd()
1277 bf_set(wqe_ffrq, &wqe->generic.wqe_com, 1); in lpfc_nvme_prep_io_cmd()
1286 bf_set(payload_offset_len, &wqe->fcp_icmd, in lpfc_nvme_prep_io_cmd()
1287 (nCmd->rsplen + nCmd->cmdlen)); in lpfc_nvme_prep_io_cmd()
1290 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, in lpfc_nvme_prep_io_cmd()
1291 phba->sli4_hba.rpi_ids[pnode->nlp_rpi]); in lpfc_nvme_prep_io_cmd()
1292 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag); in lpfc_nvme_prep_io_cmd()
1295 wqe->generic.wqe_com.abort_tag = pwqeq->iotag; in lpfc_nvme_prep_io_cmd()
1298 bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag); in lpfc_nvme_prep_io_cmd()
1301 bf_set(wqe_xchg, &wqe->fcp_iwrite.wqe_com, LPFC_NVME_XCHG); in lpfc_nvme_prep_io_cmd()
1306 if (unlikely(lpfc_ncmd->cur_iocbq.cmd_flag & LPFC_IO_VMID)) { in lpfc_nvme_prep_io_cmd()
1307 if (phba->pport->vmid_priority_tagging) { in lpfc_nvme_prep_io_cmd()
1308 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1); in lpfc_nvme_prep_io_cmd()
1309 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com, in lpfc_nvme_prep_io_cmd()
1310 lpfc_ncmd->cur_iocbq.vmid_tag.cs_ctl_vmid); in lpfc_nvme_prep_io_cmd()
1312 bf_set(wqe_appid, &wqe->fcp_iwrite.wqe_com, 1); in lpfc_nvme_prep_io_cmd()
1313 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1); in lpfc_nvme_prep_io_cmd()
1314 wqe->words[31] = lpfc_ncmd->cur_iocbq.vmid_tag.app_id; in lpfc_nvme_prep_io_cmd()
1318 pwqeq->vport = vport; in lpfc_nvme_prep_io_cmd()
1324 * lpfc_nvme_prep_io_dma - Issue an NVME-over-FCP IO
1333 * 0 - Success
1340 struct lpfc_hba *phba = vport->phba; in lpfc_nvme_prep_io_dma()
1341 struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd; in lpfc_nvme_prep_io_dma()
1342 union lpfc_wqe128 *wqe = &lpfc_ncmd->cur_iocbq.wqe; in lpfc_nvme_prep_io_dma()
1343 struct sli4_sge *sgl = lpfc_ncmd->dma_sgl; in lpfc_nvme_prep_io_dma()
1358 * There are three possibilities here - use scatter-gather segment, use in lpfc_nvme_prep_io_dma()
1361 if (nCmd->sg_cnt) { in lpfc_nvme_prep_io_dma()
1369 lpfc_ncmd->seg_cnt = nCmd->sg_cnt; in lpfc_nvme_prep_io_dma()
1370 if (lpfc_ncmd->seg_cnt > lpfc_nvme_template.max_sgl_segments) { in lpfc_nvme_prep_io_dma()
1375 phba->cfg_nvme_seg_cnt + 1, in lpfc_nvme_prep_io_dma()
1376 lpfc_ncmd->seg_cnt); in lpfc_nvme_prep_io_dma()
1377 lpfc_ncmd->seg_cnt = 0; in lpfc_nvme_prep_io_dma()
1382 * The driver established a maximum scatter-gather segment count in lpfc_nvme_prep_io_dma()
1387 nseg = nCmd->sg_cnt; in lpfc_nvme_prep_io_dma()
1388 data_sg = nCmd->first_sgl; in lpfc_nvme_prep_io_dma()
1397 lpfc_ncmd->seg_cnt = 0; in lpfc_nvme_prep_io_dma()
1401 sgl->word2 = 0; in lpfc_nvme_prep_io_dma()
1411 !((j + 1) % phba->border_sge_num) && in lpfc_nvme_prep_io_dma()
1412 ((nseg - 1) != i)) { in lpfc_nvme_prep_io_dma()
1421 lpfc_ncmd->seg_cnt = 0; in lpfc_nvme_prep_io_dma()
1424 sgl->addr_lo = cpu_to_le32(putPaddrLow( in lpfc_nvme_prep_io_dma()
1425 sgl_xtra->dma_phys_sgl)); in lpfc_nvme_prep_io_dma()
1426 sgl->addr_hi = cpu_to_le32(putPaddrHigh( in lpfc_nvme_prep_io_dma()
1427 sgl_xtra->dma_phys_sgl)); in lpfc_nvme_prep_io_dma()
1437 if ((nseg - 1) == i) in lpfc_nvme_prep_io_dma()
1442 sgl->addr_lo = cpu_to_le32( in lpfc_nvme_prep_io_dma()
1444 sgl->addr_hi = cpu_to_le32( in lpfc_nvme_prep_io_dma()
1448 sgl->word2 = cpu_to_le32(sgl->word2); in lpfc_nvme_prep_io_dma()
1449 sgl->sge_len = cpu_to_le32(dma_len); in lpfc_nvme_prep_io_dma()
1458 sgl->word2 = cpu_to_le32(sgl->word2); in lpfc_nvme_prep_io_dma()
1460 sgl->sge_len = cpu_to_le32( in lpfc_nvme_prep_io_dma()
1461 phba->cfg_sg_dma_buf_size); in lpfc_nvme_prep_io_dma()
1463 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; in lpfc_nvme_prep_io_dma()
1464 i = i - 1; in lpfc_nvme_prep_io_dma()
1473 if (nseg == 1 && phba->cfg_enable_pbde) { in lpfc_nvme_prep_io_dma()
1474 /* Words 13-15 */ in lpfc_nvme_prep_io_dma()
1476 &wqe->words[13]; in lpfc_nvme_prep_io_dma()
1477 bde->addrLow = first_data_sgl->addr_lo; in lpfc_nvme_prep_io_dma()
1478 bde->addrHigh = first_data_sgl->addr_hi; in lpfc_nvme_prep_io_dma()
1479 bde->tus.f.bdeSize = in lpfc_nvme_prep_io_dma()
1480 le32_to_cpu(first_data_sgl->sge_len); in lpfc_nvme_prep_io_dma()
1481 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; in lpfc_nvme_prep_io_dma()
1482 bde->tus.w = cpu_to_le32(bde->tus.w); in lpfc_nvme_prep_io_dma()
1484 /* Word 11 - set PBDE bit */ in lpfc_nvme_prep_io_dma()
1485 bf_set(wqe_pbde, &wqe->generic.wqe_com, 1); in lpfc_nvme_prep_io_dma()
1487 memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3)); in lpfc_nvme_prep_io_dma()
1488 /* Word 11 - PBDE bit disabled by default template */ in lpfc_nvme_prep_io_dma()
1492 lpfc_ncmd->seg_cnt = 0; in lpfc_nvme_prep_io_dma()
1497 if (nCmd->payload_length != 0) { in lpfc_nvme_prep_io_dma()
1501 nCmd->sg_cnt, nCmd->payload_length); in lpfc_nvme_prep_io_dma()
1509 * lpfc_nvme_fcp_io_submit - Issue an NVME-over-FCP IO
1512 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1520 * 0 - Success
1552 lport = (struct lpfc_nvme_lport *)pnvme_lport->private; in lpfc_nvme_fcp_io_submit()
1554 ret = -EINVAL; in lpfc_nvme_fcp_io_submit()
1558 vport = lport->vport; in lpfc_nvme_fcp_io_submit()
1563 atomic_inc(&lport->xmt_fcp_err); in lpfc_nvme_fcp_io_submit()
1564 ret = -EBUSY; in lpfc_nvme_fcp_io_submit()
1568 phba = vport->phba; in lpfc_nvme_fcp_io_submit()
1570 if ((unlikely(test_bit(FC_UNLOADING, &vport->load_flag))) || in lpfc_nvme_fcp_io_submit()
1571 test_bit(HBA_IOQ_FLUSH, &phba->hba_flag)) { in lpfc_nvme_fcp_io_submit()
1574 atomic_inc(&lport->xmt_fcp_err); in lpfc_nvme_fcp_io_submit()
1575 ret = -ENODEV; in lpfc_nvme_fcp_io_submit()
1579 freqpriv = pnvme_fcreq->private; in lpfc_nvme_fcp_io_submit()
1583 atomic_inc(&lport->xmt_fcp_err); in lpfc_nvme_fcp_io_submit()
1584 ret = -EINVAL; in lpfc_nvme_fcp_io_submit()
1589 if (phba->ktime_on) in lpfc_nvme_fcp_io_submit()
1592 rport = (struct lpfc_nvme_rport *)pnvme_rport->private; in lpfc_nvme_fcp_io_submit()
1599 ndlp = rport->ndlp; in lpfc_nvme_fcp_io_submit()
1604 rport, ndlp, pnvme_rport->port_id); in lpfc_nvme_fcp_io_submit()
1605 atomic_inc(&lport->xmt_fcp_err); in lpfc_nvme_fcp_io_submit()
1606 ret = -EBUSY; in lpfc_nvme_fcp_io_submit()
1611 if ((ndlp->nlp_type & NLP_NVME_TARGET) && in lpfc_nvme_fcp_io_submit()
1612 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { in lpfc_nvme_fcp_io_submit()
1616 pnvme_rport->port_id, in lpfc_nvme_fcp_io_submit()
1617 ndlp->nlp_state, ndlp->nlp_type, in lpfc_nvme_fcp_io_submit()
1618 ndlp->fc4_xpt_flags); in lpfc_nvme_fcp_io_submit()
1619 atomic_inc(&lport->xmt_fcp_bad_ndlp); in lpfc_nvme_fcp_io_submit()
1620 ret = -EBUSY; in lpfc_nvme_fcp_io_submit()
1629 if (!lpfc_queue_info->qidx && !pnvme_fcreq->sg_cnt) { in lpfc_nvme_fcp_io_submit()
1631 pnvme_fcreq->cmdaddr)->sqe.common; in lpfc_nvme_fcp_io_submit()
1632 if (sqe->opcode == nvme_admin_keep_alive) in lpfc_nvme_fcp_io_submit()
1637 if (phba->cmf_active_mode != LPFC_CFG_OFF && in lpfc_nvme_fcp_io_submit()
1638 pnvme_fcreq->io_dir == NVMEFC_FCP_READ && in lpfc_nvme_fcp_io_submit()
1639 pnvme_fcreq->payload_length) { in lpfc_nvme_fcp_io_submit()
1640 ret = lpfc_update_cmf_cmd(phba, pnvme_fcreq->payload_length); in lpfc_nvme_fcp_io_submit()
1642 ret = -EBUSY; in lpfc_nvme_fcp_io_submit()
1653 if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) && in lpfc_nvme_fcp_io_submit()
1658 lpfc_queue_info->index, ndlp->nlp_DID, in lpfc_nvme_fcp_io_submit()
1659 atomic_read(&ndlp->cmd_pending), in lpfc_nvme_fcp_io_submit()
1660 ndlp->cmd_qdepth); in lpfc_nvme_fcp_io_submit()
1661 atomic_inc(&lport->xmt_fcp_qdepth); in lpfc_nvme_fcp_io_submit()
1662 ret = -EBUSY; in lpfc_nvme_fcp_io_submit()
1668 if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) { in lpfc_nvme_fcp_io_submit()
1669 idx = lpfc_queue_info->index; in lpfc_nvme_fcp_io_submit()
1672 idx = phba->sli4_hba.cpu_map[cpu].hdwq; in lpfc_nvme_fcp_io_submit()
1677 atomic_inc(&lport->xmt_fcp_noxri); in lpfc_nvme_fcp_io_submit()
1681 lpfc_queue_info->index, ndlp->nlp_DID); in lpfc_nvme_fcp_io_submit()
1682 ret = -EBUSY; in lpfc_nvme_fcp_io_submit()
1687 lpfc_ncmd->ts_cmd_start = start; in lpfc_nvme_fcp_io_submit()
1688 lpfc_ncmd->ts_last_cmd = phba->ktime_last_cmd; in lpfc_nvme_fcp_io_submit()
1690 lpfc_ncmd->ts_cmd_start = 0; in lpfc_nvme_fcp_io_submit()
1693 lpfc_ncmd->rx_cmd_start = start; in lpfc_nvme_fcp_io_submit()
1701 freqpriv->nvme_buf = lpfc_ncmd; in lpfc_nvme_fcp_io_submit()
1702 lpfc_ncmd->nvmeCmd = pnvme_fcreq; in lpfc_nvme_fcp_io_submit()
1703 lpfc_ncmd->ndlp = ndlp; in lpfc_nvme_fcp_io_submit()
1704 lpfc_ncmd->qidx = lpfc_queue_info->qidx; in lpfc_nvme_fcp_io_submit()
1709 (ndlp->vmid_support || in lpfc_nvme_fcp_io_submit()
1710 phba->pport->vmid_priority_tagging == in lpfc_nvme_fcp_io_submit()
1717 if (pnvme_fcreq->io_dir == NVMEFC_FCP_WRITE) in lpfc_nvme_fcp_io_submit()
1719 else if (pnvme_fcreq->io_dir == NVMEFC_FCP_READ) in lpfc_nvme_fcp_io_submit()
1726 &lpfc_ncmd->cur_iocbq.vmid_tag); in lpfc_nvme_fcp_io_submit()
1728 lpfc_ncmd->cur_iocbq.cmd_flag |= LPFC_IO_VMID; in lpfc_nvme_fcp_io_submit()
1739 * queue. A hardware queue maps to a driver MSI-X vector/EQ/CQ/WQ. in lpfc_nvme_fcp_io_submit()
1741 lpfc_ncmd->cur_iocbq.hba_wqidx = idx; in lpfc_nvme_fcp_io_submit()
1742 cstat = &phba->sli4_hba.hdwq[idx].nvme_cstat; in lpfc_nvme_fcp_io_submit()
1750 lpfc_queue_info->index, ndlp->nlp_DID); in lpfc_nvme_fcp_io_submit()
1751 atomic_inc(&lport->xmt_fcp_err); in lpfc_nvme_fcp_io_submit()
1752 ret = -ENOMEM; in lpfc_nvme_fcp_io_submit()
1757 lpfc_ncmd->cur_iocbq.sli4_xritag, in lpfc_nvme_fcp_io_submit()
1758 lpfc_queue_info->index, ndlp->nlp_DID); in lpfc_nvme_fcp_io_submit()
1760 ret = lpfc_sli4_issue_wqe(phba, lpfc_ncmd->hdwq, &lpfc_ncmd->cur_iocbq); in lpfc_nvme_fcp_io_submit()
1762 atomic_inc(&lport->xmt_fcp_wqerr); in lpfc_nvme_fcp_io_submit()
1766 ret, vport->fc_myDID, ndlp->nlp_DID, in lpfc_nvme_fcp_io_submit()
1767 lpfc_ncmd->cur_iocbq.sli4_xritag); in lpfc_nvme_fcp_io_submit()
1771 if (phba->cfg_xri_rebalancing) in lpfc_nvme_fcp_io_submit()
1772 lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_ncmd->hdwq_no); in lpfc_nvme_fcp_io_submit()
1775 if (lpfc_ncmd->ts_cmd_start) in lpfc_nvme_fcp_io_submit()
1776 lpfc_ncmd->ts_cmd_wqput = ktime_get_ns(); in lpfc_nvme_fcp_io_submit()
1778 if (phba->hdwqstat_on & LPFC_CHECK_NVME_IO) { in lpfc_nvme_fcp_io_submit()
1780 this_cpu_inc(phba->sli4_hba.c_stat->xmt_io); in lpfc_nvme_fcp_io_submit()
1781 lpfc_ncmd->cpu = cpu; in lpfc_nvme_fcp_io_submit()
1787 lpfc_ncmd->cpu, in lpfc_nvme_fcp_io_submit()
1788 lpfc_queue_info->index); in lpfc_nvme_fcp_io_submit()
1794 if (lpfc_ncmd->nvmeCmd->sg_cnt) { in lpfc_nvme_fcp_io_submit()
1795 if (lpfc_ncmd->nvmeCmd->io_dir == NVMEFC_FCP_WRITE) in lpfc_nvme_fcp_io_submit()
1796 cstat->output_requests--; in lpfc_nvme_fcp_io_submit()
1798 cstat->input_requests--; in lpfc_nvme_fcp_io_submit()
1800 cstat->control_requests--; in lpfc_nvme_fcp_io_submit()
1804 pnvme_fcreq->payload_length, NULL); in lpfc_nvme_fcp_io_submit()
1810 * lpfc_nvme_abort_fcreq_cmpl - Complete an NVME FCP abort request.
1824 struct lpfc_wcqe_complete *abts_cmpl = &rspiocb->wcqe_cmpl; in lpfc_nvme_abort_fcreq_cmpl()
1830 bf_get(wqe_ctxt_tag, &cmdiocb->wqe.generic.wqe_com), in lpfc_nvme_abort_fcreq_cmpl()
1831 get_job_abtsiotag(phba, cmdiocb), cmdiocb->iotag, in lpfc_nvme_abort_fcreq_cmpl()
1839 * lpfc_nvme_fcp_abort - Issue an NVME-over-FCP ABTS
1842 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1848 * is executed asynchronously - one the target is validated as "MAPPED" and
1872 lport = (struct lpfc_nvme_lport *)pnvme_lport->private; in lpfc_nvme_fcp_abort()
1876 vport = lport->vport; in lpfc_nvme_fcp_abort()
1884 phba = vport->phba; in lpfc_nvme_fcp_abort()
1885 freqpriv = pnvme_fcreq->private; in lpfc_nvme_fcp_abort()
1889 if (test_bit(FC_UNLOADING, &vport->load_flag)) in lpfc_nvme_fcp_abort()
1896 pnvme_rport->port_id, in lpfc_nvme_fcp_abort()
1899 lpfc_nbuf = freqpriv->nvme_buf; in lpfc_nvme_fcp_abort()
1905 } else if (!lpfc_nbuf->nvmeCmd) { in lpfc_nvme_fcp_abort()
1913 if (test_bit(HBA_IOQ_FLUSH, &phba->hba_flag)) { in lpfc_nvme_fcp_abort()
1915 "6139 Driver in reset cleanup - flushing " in lpfc_nvme_fcp_abort()
1917 phba->hba_flag); in lpfc_nvme_fcp_abort()
1922 spin_lock_irqsave(&lpfc_nbuf->buf_lock, flags); in lpfc_nvme_fcp_abort()
1923 spin_lock(&phba->hbalock); in lpfc_nvme_fcp_abort()
1925 nvmereq_wqe = &lpfc_nbuf->cur_iocbq; in lpfc_nvme_fcp_abort()
1934 if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) { in lpfc_nvme_fcp_abort()
1939 lpfc_nbuf, lpfc_nbuf->nvmeCmd, in lpfc_nvme_fcp_abort()
1940 pnvme_fcreq, nvmereq_wqe->sli4_xritag); in lpfc_nvme_fcp_abort()
1945 if (!(nvmereq_wqe->cmd_flag & LPFC_IO_ON_TXCMPLQ)) { in lpfc_nvme_fcp_abort()
1947 "6142 NVME IO req x%px not queued - skipping " in lpfc_nvme_fcp_abort()
1949 pnvme_fcreq, nvmereq_wqe->sli4_xritag); in lpfc_nvme_fcp_abort()
1953 atomic_inc(&lport->xmt_fcp_abort); in lpfc_nvme_fcp_abort()
1955 nvmereq_wqe->sli4_xritag, in lpfc_nvme_fcp_abort()
1956 nvmereq_wqe->hba_wqidx, pnvme_rport->port_id); in lpfc_nvme_fcp_abort()
1959 if (nvmereq_wqe->cmd_flag & LPFC_DRIVER_ABORTED) { in lpfc_nvme_fcp_abort()
1965 nvmereq_wqe->sli4_xritag); in lpfc_nvme_fcp_abort()
1972 spin_unlock(&phba->hbalock); in lpfc_nvme_fcp_abort()
1973 spin_unlock_irqrestore(&lpfc_nbuf->buf_lock, flags); in lpfc_nvme_fcp_abort()
1989 nvmereq_wqe->sli4_xritag); in lpfc_nvme_fcp_abort()
1993 spin_unlock(&phba->hbalock); in lpfc_nvme_fcp_abort()
1994 spin_unlock_irqrestore(&lpfc_nbuf->buf_lock, flags); in lpfc_nvme_fcp_abort()
2000 /* initiator-based functions */
2026 * lpfc_get_nvme_buf - Get a nvme buffer from io_buf_list of the HBA
2032 * NULL - Error
2033 * Pointer to lpfc_nvme_buf - Success
2048 pwqeq = &(lpfc_ncmd->cur_iocbq); in lpfc_get_nvme_buf()
2049 wqe = &pwqeq->wqe; in lpfc_get_nvme_buf()
2054 pwqeq->cmd_flag = LPFC_IO_NVME; in lpfc_get_nvme_buf()
2055 pwqeq->cmd_cmpl = lpfc_nvme_io_cmd_cmpl; in lpfc_get_nvme_buf()
2056 lpfc_ncmd->start_time = jiffies; in lpfc_get_nvme_buf()
2057 lpfc_ncmd->flags = 0; in lpfc_get_nvme_buf()
2063 sgl = lpfc_ncmd->dma_sgl; in lpfc_get_nvme_buf()
2066 sgl->word2 = cpu_to_le32(sgl->word2); in lpfc_get_nvme_buf()
2073 atomic_inc(&ndlp->cmd_pending); in lpfc_get_nvme_buf()
2074 lpfc_ncmd->flags |= LPFC_SBUF_BUMP_QDEPTH; in lpfc_get_nvme_buf()
2078 qp = &phba->sli4_hba.hdwq[idx]; in lpfc_get_nvme_buf()
2079 qp->empty_io_bufs++; in lpfc_get_nvme_buf()
2101 if ((lpfc_ncmd->flags & LPFC_SBUF_BUMP_QDEPTH) && lpfc_ncmd->ndlp) in lpfc_release_nvme_buf()
2102 atomic_dec(&lpfc_ncmd->ndlp->cmd_pending); in lpfc_release_nvme_buf()
2104 lpfc_ncmd->ndlp = NULL; in lpfc_release_nvme_buf()
2105 lpfc_ncmd->flags &= ~LPFC_SBUF_BUMP_QDEPTH; in lpfc_release_nvme_buf()
2107 qp = lpfc_ncmd->hdwq; in lpfc_release_nvme_buf()
2108 if (unlikely(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) { in lpfc_release_nvme_buf()
2112 lpfc_ncmd->cur_iocbq.sli4_xritag, in lpfc_release_nvme_buf()
2113 lpfc_ncmd->cur_iocbq.iotag); in lpfc_release_nvme_buf()
2115 spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag); in lpfc_release_nvme_buf()
2116 list_add_tail(&lpfc_ncmd->list, in lpfc_release_nvme_buf()
2117 &qp->lpfc_abts_io_buf_list); in lpfc_release_nvme_buf()
2118 qp->abts_nvme_io_bufs++; in lpfc_release_nvme_buf()
2119 spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag); in lpfc_release_nvme_buf()
2125 * lpfc_nvme_create_localport - Create/Bind an nvme localport instance.
2136 * 0 - successful
2137 * -ENOMEM - no heap memory available
2138 * other values - from nvme registration upcall
2144 struct lpfc_hba *phba = vport->phba; in lpfc_nvme_create_localport()
2154 nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn); in lpfc_nvme_create_localport()
2155 nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn); in lpfc_nvme_create_localport()
2161 lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1; in lpfc_nvme_create_localport()
2166 lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue; in lpfc_nvme_create_localport()
2176 &vport->phba->pcidev->dev, &localport); in lpfc_nvme_create_localport()
2182 localport->port_num, localport, in lpfc_nvme_create_localport()
2183 localport->private, in lpfc_nvme_create_localport()
2187 lport = (struct lpfc_nvme_lport *)localport->private; in lpfc_nvme_create_localport()
2188 vport->localport = localport; in lpfc_nvme_create_localport()
2189 lport->vport = vport; in lpfc_nvme_create_localport()
2190 vport->nvmei_support = 1; in lpfc_nvme_create_localport()
2192 atomic_set(&lport->xmt_fcp_noxri, 0); in lpfc_nvme_create_localport()
2193 atomic_set(&lport->xmt_fcp_bad_ndlp, 0); in lpfc_nvme_create_localport()
2194 atomic_set(&lport->xmt_fcp_qdepth, 0); in lpfc_nvme_create_localport()
2195 atomic_set(&lport->xmt_fcp_err, 0); in lpfc_nvme_create_localport()
2196 atomic_set(&lport->xmt_fcp_wqerr, 0); in lpfc_nvme_create_localport()
2197 atomic_set(&lport->xmt_fcp_abort, 0); in lpfc_nvme_create_localport()
2198 atomic_set(&lport->xmt_ls_abort, 0); in lpfc_nvme_create_localport()
2199 atomic_set(&lport->xmt_ls_err, 0); in lpfc_nvme_create_localport()
2200 atomic_set(&lport->cmpl_fcp_xb, 0); in lpfc_nvme_create_localport()
2201 atomic_set(&lport->cmpl_fcp_err, 0); in lpfc_nvme_create_localport()
2202 atomic_set(&lport->cmpl_ls_xb, 0); in lpfc_nvme_create_localport()
2203 atomic_set(&lport->cmpl_ls_err, 0); in lpfc_nvme_create_localport()
2205 atomic_set(&lport->fc4NvmeLsRequests, 0); in lpfc_nvme_create_localport()
2206 atomic_set(&lport->fc4NvmeLsCmpls, 0); in lpfc_nvme_create_localport()
2213 /* lpfc_nvme_lport_unreg_wait - Wait for the host to complete an lport unreg.
2220 * An uninterruptible wait is used because of the risk of transport-to-
2231 struct lpfc_hba *phba = vport->phba; in lpfc_nvme_lport_unreg_wait()
2248 for (i = 0; i < phba->cfg_hdw_queue; i++) { in lpfc_nvme_lport_unreg_wait()
2249 qp = &phba->sli4_hba.hdwq[i]; in lpfc_nvme_lport_unreg_wait()
2250 if (!vport->localport || !qp || !qp->io_wq) in lpfc_nvme_lport_unreg_wait()
2253 pring = qp->io_wq->pring; in lpfc_nvme_lport_unreg_wait()
2256 pending += pring->txcmplq_cnt; in lpfc_nvme_lport_unreg_wait()
2257 abts_scsi += qp->abts_scsi_io_bufs; in lpfc_nvme_lport_unreg_wait()
2258 abts_nvme += qp->abts_nvme_io_bufs; in lpfc_nvme_lport_unreg_wait()
2260 if (phba->sli4_hba.nvmels_wq) { in lpfc_nvme_lport_unreg_wait()
2261 pring = phba->sli4_hba.nvmels_wq->pring; in lpfc_nvme_lport_unreg_wait()
2263 nvmels_cnt = pring->txcmplq_cnt; in lpfc_nvme_lport_unreg_wait()
2265 if (!vport->localport || in lpfc_nvme_lport_unreg_wait()
2266 test_bit(HBA_PCI_ERR, &vport->phba->bit_flags) || in lpfc_nvme_lport_unreg_wait()
2267 phba->link_state == LPFC_HBA_ERROR || in lpfc_nvme_lport_unreg_wait()
2268 test_bit(FC_UNLOADING, &vport->load_flag)) in lpfc_nvme_lport_unreg_wait()
2275 lport, vport->localport, pending, in lpfc_nvme_lport_unreg_wait()
2283 lport, vport->localport); in lpfc_nvme_lport_unreg_wait()
2288 * lpfc_nvme_destroy_localport - Destroy lpfc_nvme bound to nvme transport.
2306 if (vport->nvmei_support == 0) in lpfc_nvme_destroy_localport()
2309 localport = vport->localport; in lpfc_nvme_destroy_localport()
2312 lport = (struct lpfc_nvme_lport *)localport->private; in lpfc_nvme_destroy_localport()
2321 lport->lport_unreg_cmp = &lport_unreg_cmp; in lpfc_nvme_destroy_localport()
2328 vport->localport = NULL; in lpfc_nvme_destroy_localport()
2334 vport->nvmei_support = 0; in lpfc_nvme_destroy_localport()
2356 localport = vport->localport; in lpfc_nvme_update_localport()
2362 lport = (struct lpfc_nvme_lport *)localport->private; in lpfc_nvme_update_localport()
2371 localport, vport->fc_myDID); in lpfc_nvme_update_localport()
2373 localport->port_id = vport->fc_myDID; in lpfc_nvme_update_localport()
2374 if (localport->port_id == 0) in lpfc_nvme_update_localport()
2375 localport->port_role = FC_PORT_ROLE_NVME_DISCOVERY; in lpfc_nvme_update_localport()
2377 localport->port_role = FC_PORT_ROLE_NVME_INITIATOR; in lpfc_nvme_update_localport()
2381 lport, localport->port_id); in lpfc_nvme_update_localport()
2397 struct fc_rport *srport = ndlp->rport; in lpfc_nvme_register_port()
2399 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC, in lpfc_nvme_register_port()
2401 ndlp->nlp_DID, ndlp->nlp_type); in lpfc_nvme_register_port()
2403 localport = vport->localport; in lpfc_nvme_register_port()
2407 lport = (struct lpfc_nvme_lport *)localport->private; in lpfc_nvme_register_port()
2410 * Just register this instance. Note, rpinfo->dev_loss_tmo in lpfc_nvme_register_port()
2416 rpinfo.port_id = ndlp->nlp_DID; in lpfc_nvme_register_port()
2417 if (ndlp->nlp_type & NLP_NVME_TARGET) in lpfc_nvme_register_port()
2419 if (ndlp->nlp_type & NLP_NVME_INITIATOR) in lpfc_nvme_register_port()
2422 if (ndlp->nlp_type & NLP_NVME_DISCOVERY) in lpfc_nvme_register_port()
2425 rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn); in lpfc_nvme_register_port()
2426 rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn); in lpfc_nvme_register_port()
2428 rpinfo.dev_loss_tmo = srport->dev_loss_tmo; in lpfc_nvme_register_port()
2430 rpinfo.dev_loss_tmo = vport->cfg_devloss_tmo; in lpfc_nvme_register_port()
2432 spin_lock_irq(&ndlp->lock); in lpfc_nvme_register_port()
2440 prev_ndlp = oldrport->ndlp; in lpfc_nvme_register_port()
2441 spin_unlock_irq(&ndlp->lock); in lpfc_nvme_register_port()
2443 spin_unlock_irq(&ndlp->lock); in lpfc_nvme_register_port()
2445 dev_warn(&vport->phba->pcidev->dev, in lpfc_nvme_register_port()
2446 "Warning - No node ref - exit register\n"); in lpfc_nvme_register_port()
2460 spin_lock_irq(&ndlp->lock); in lpfc_nvme_register_port()
2461 ndlp->fc4_xpt_flags &= ~NVME_XPT_UNREG_WAIT; in lpfc_nvme_register_port()
2462 ndlp->fc4_xpt_flags |= NVME_XPT_REGD; in lpfc_nvme_register_port()
2463 spin_unlock_irq(&ndlp->lock); in lpfc_nvme_register_port()
2464 rport = remote_port->private; in lpfc_nvme_register_port()
2467 /* Sever the ndlp<->rport association in lpfc_nvme_register_port()
2471 spin_lock_irq(&ndlp->lock); in lpfc_nvme_register_port()
2472 ndlp->nrport = NULL; in lpfc_nvme_register_port()
2473 ndlp->fc4_xpt_flags &= ~NVME_XPT_UNREG_WAIT; in lpfc_nvme_register_port()
2474 spin_unlock_irq(&ndlp->lock); in lpfc_nvme_register_port()
2475 rport->ndlp = NULL; in lpfc_nvme_register_port()
2476 rport->remoteport = NULL; in lpfc_nvme_register_port()
2483 if (!prev_ndlp->nrport) in lpfc_nvme_register_port()
2489 rport->remoteport = remote_port; in lpfc_nvme_register_port()
2490 rport->lport = lport; in lpfc_nvme_register_port()
2491 rport->ndlp = ndlp; in lpfc_nvme_register_port()
2492 spin_lock_irq(&ndlp->lock); in lpfc_nvme_register_port()
2493 ndlp->nrport = rport; in lpfc_nvme_register_port()
2494 spin_unlock_irq(&ndlp->lock); in lpfc_nvme_register_port()
2510 ret, ndlp->nlp_DID, kref_read(&ndlp->kref)); in lpfc_nvme_register_port()
2524 * lpfc_nvme_rescan_port - Check to see if we should rescan this remoteport
2537 spin_lock_irq(&ndlp->lock); in lpfc_nvme_rescan_port()
2540 remoteport = nrport->remoteport; in lpfc_nvme_rescan_port()
2541 spin_unlock_irq(&ndlp->lock); in lpfc_nvme_rescan_port()
2546 ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_state, in lpfc_nvme_rescan_port()
2553 if (remoteport->port_role & FC_PORT_ROLE_NVME_DISCOVERY && in lpfc_nvme_rescan_port()
2554 ndlp->nlp_state == NLP_STE_MAPPED_NODE) { in lpfc_nvme_rescan_port()
2560 ndlp->nlp_DID, remoteport->port_state); in lpfc_nvme_rescan_port()
2570 /* lpfc_nvme_unregister_port - unbind the DID and port_role from this rport.
2588 localport = vport->localport; in lpfc_nvme_unregister_port()
2596 lport = (struct lpfc_nvme_lport *)localport->private; in lpfc_nvme_unregister_port()
2600 spin_lock_irq(&ndlp->lock); in lpfc_nvme_unregister_port()
2603 remoteport = rport->remoteport; in lpfc_nvme_unregister_port()
2604 spin_unlock_irq(&ndlp->lock); in lpfc_nvme_unregister_port()
2612 remoteport, remoteport->port_name, in lpfc_nvme_unregister_port()
2613 remoteport->port_id, remoteport->port_state, in lpfc_nvme_unregister_port()
2614 ndlp->nlp_type, kref_read(&ndlp->kref)); in lpfc_nvme_unregister_port()
2616 /* Sanity check ndlp type. Only call for NVME ports. Don't in lpfc_nvme_unregister_port()
2620 if ((ndlp->nlp_type & NLP_NVME_TARGET) || in lpfc_nvme_unregister_port()
2621 (remoteport->port_role & FC_PORT_ROLE_NVME_TARGET)) { in lpfc_nvme_unregister_port()
2625 spin_lock_irq(&ndlp->lock); in lpfc_nvme_unregister_port()
2626 ndlp->fc4_xpt_flags |= NVME_XPT_UNREG_WAIT; in lpfc_nvme_unregister_port()
2627 spin_unlock_irq(&ndlp->lock); in lpfc_nvme_unregister_port()
2629 /* Don't let the host nvme transport keep sending keep-alives in lpfc_nvme_unregister_port()
2634 if (test_bit(FC_UNLOADING, &vport->load_flag) || in lpfc_nvme_unregister_port()
2635 unlikely(vport->phba->link_state == LPFC_HBA_ERROR)) in lpfc_nvme_unregister_port()
2645 ndlp->nrport = NULL; in lpfc_nvme_unregister_port()
2651 ret, remoteport->port_state); in lpfc_nvme_unregister_port()
2653 if (test_bit(FC_UNLOADING, &vport->load_flag)) { in lpfc_nvme_unregister_port()
2659 &ndlp->nlp_flag)) { in lpfc_nvme_unregister_port()
2672 vport->localport, ndlp->rport, ndlp->nlp_DID); in lpfc_nvme_unregister_port()
2676 * lpfc_sli4_nvme_pci_offline_aborted - Fast-path process of NVME xri abort
2680 * This routine is invoked by the worker thread to process a SLI4 fast-path
2693 lpfc_ncmd->nvmeCmd, in lpfc_sli4_nvme_pci_offline_aborted()
2694 lpfc_ncmd->cur_iocbq.iotag); in lpfc_sli4_nvme_pci_offline_aborted()
2700 if (lpfc_ncmd->nvmeCmd) { in lpfc_sli4_nvme_pci_offline_aborted()
2701 nvme_cmd = lpfc_ncmd->nvmeCmd; in lpfc_sli4_nvme_pci_offline_aborted()
2702 nvme_cmd->transferred_length = 0; in lpfc_sli4_nvme_pci_offline_aborted()
2703 nvme_cmd->rcv_rsplen = 0; in lpfc_sli4_nvme_pci_offline_aborted()
2704 nvme_cmd->status = NVME_SC_INTERNAL; in lpfc_sli4_nvme_pci_offline_aborted()
2705 nvme_cmd->done(nvme_cmd); in lpfc_sli4_nvme_pci_offline_aborted()
2706 lpfc_ncmd->nvmeCmd = NULL; in lpfc_sli4_nvme_pci_offline_aborted()
2712 * lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort
2717 * This routine is invoked by the worker thread to process a SLI4 fast-path
2728 struct lpfc_nodelist *ndlp = lpfc_ncmd->ndlp; in lpfc_sli4_nvme_xri_aborted()
2737 lpfc_ncmd->nvmeCmd, xri, in lpfc_sli4_nvme_xri_aborted()
2738 lpfc_ncmd->cur_iocbq.iotag); in lpfc_sli4_nvme_xri_aborted()
2744 if (lpfc_ncmd->nvmeCmd) { in lpfc_sli4_nvme_xri_aborted()
2745 nvme_cmd = lpfc_ncmd->nvmeCmd; in lpfc_sli4_nvme_xri_aborted()
2746 nvme_cmd->done(nvme_cmd); in lpfc_sli4_nvme_xri_aborted()
2747 lpfc_ncmd->nvmeCmd = NULL; in lpfc_sli4_nvme_xri_aborted()
2753 * lpfc_nvme_wait_for_io_drain - Wait for all NVME wqes to complete
2760 * slot has been permanently disabled.
2768 if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.hdwq) in lpfc_nvme_wait_for_io_drain()
2774 for (i = 0; i < phba->cfg_hdw_queue; i++) { in lpfc_nvme_wait_for_io_drain()
2775 if (!phba->sli4_hba.hdwq[i].io_wq) in lpfc_nvme_wait_for_io_drain()
2777 pring = phba->sli4_hba.hdwq[i].io_wq->pring; in lpfc_nvme_wait_for_io_drain()
2783 while (!list_empty(&pring->txcmplq)) { in lpfc_nvme_wait_for_io_drain()
2813 lpfc_ncmd = pwqeIn->io_buf; in lpfc_nvme_cancel_iocb()
2819 if (bf_get(wqe_cmnd, &pwqeIn->wqe.gen_req.wqe_com) == in lpfc_nvme_cancel_iocb()
2825 spin_lock(&lpfc_ncmd->buf_lock); in lpfc_nvme_cancel_iocb()
2826 nCmd = lpfc_ncmd->nvmeCmd; in lpfc_nvme_cancel_iocb()
2828 spin_unlock(&lpfc_ncmd->buf_lock); in lpfc_nvme_cancel_iocb()
2832 spin_unlock(&lpfc_ncmd->buf_lock); in lpfc_nvme_cancel_iocb()
2836 lpfc_ncmd->cur_iocbq.sli4_xritag); in lpfc_nvme_cancel_iocb()
2838 wcqep->word0 = 0; in lpfc_nvme_cancel_iocb()
2840 wcqep->parameter = param; in lpfc_nvme_cancel_iocb()
2841 wcqep->total_data_placed = 0; in lpfc_nvme_cancel_iocb()
2842 wcqep->word3 = 0; /* xb is 0 */ in lpfc_nvme_cancel_iocb()
2845 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) in lpfc_nvme_cancel_iocb()
2848 memcpy(&pwqeIn->wcqe_cmpl, wcqep, sizeof(*wcqep)); in lpfc_nvme_cancel_iocb()
2849 (pwqeIn->cmd_cmpl)(phba, pwqeIn, pwqeIn); in lpfc_nvme_cancel_iocb()
2854 * lpfc_nvmels_flush_cmd - Clean up outstanding nvmels commands for a port
2867 if (phba->sli4_hba.nvmels_wq) in lpfc_nvmels_flush_cmd()
2868 pring = phba->sli4_hba.nvmels_wq->pring; in lpfc_nvmels_flush_cmd()
2873 spin_lock_irqsave(&phba->hbalock, iflags); in lpfc_nvmels_flush_cmd()
2874 spin_lock(&pring->ring_lock); in lpfc_nvmels_flush_cmd()
2875 list_splice_init(&pring->txq, &cancel_list); in lpfc_nvmels_flush_cmd()
2876 pring->txq_cnt = 0; in lpfc_nvmels_flush_cmd()
2877 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { in lpfc_nvmels_flush_cmd()
2878 if (piocb->cmd_flag & LPFC_IO_NVME_LS) { in lpfc_nvmels_flush_cmd()
2879 list_move_tail(&piocb->list, &cancel_list); in lpfc_nvmels_flush_cmd()
2880 pring->txcmplq_cnt--; in lpfc_nvmels_flush_cmd()
2881 piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ; in lpfc_nvmels_flush_cmd()
2884 spin_unlock(&pring->ring_lock); in lpfc_nvmels_flush_cmd()
2885 spin_unlock_irqrestore(&phba->hbalock, iflags); in lpfc_nvmels_flush_cmd()