1*01649561SJames Smart /******************************************************************* 2*01649561SJames Smart * This file is part of the Emulex Linux Device Driver for * 3*01649561SJames Smart * Fibre Channel Host Bus Adapters. * 4*01649561SJames Smart * Copyright (C) 2004-2016 Emulex. All rights reserved. * 5*01649561SJames Smart * EMULEX and SLI are trademarks of Emulex. * 6*01649561SJames Smart * www.emulex.com * 7*01649561SJames Smart * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8*01649561SJames Smart * * 9*01649561SJames Smart * This program is free software; you can redistribute it and/or * 10*01649561SJames Smart * modify it under the terms of version 2 of the GNU General * 11*01649561SJames Smart * Public License as published by the Free Software Foundation. * 12*01649561SJames Smart * This program is distributed in the hope that it will be useful. * 13*01649561SJames Smart * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14*01649561SJames Smart * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15*01649561SJames Smart * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16*01649561SJames Smart * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17*01649561SJames Smart * TO BE LEGALLY INVALID. See the GNU General Public License for * 18*01649561SJames Smart * more details, a copy of which can be found in the file COPYING * 19*01649561SJames Smart * included with this package. * 20*01649561SJames Smart ********************************************************************/ 21*01649561SJames Smart #include <linux/pci.h> 22*01649561SJames Smart #include <linux/slab.h> 23*01649561SJames Smart #include <linux/interrupt.h> 24*01649561SJames Smart #include <linux/delay.h> 25*01649561SJames Smart #include <asm/unaligned.h> 26*01649561SJames Smart #include <linux/crc-t10dif.h> 27*01649561SJames Smart #include <net/checksum.h> 28*01649561SJames Smart 29*01649561SJames Smart #include <scsi/scsi.h> 30*01649561SJames Smart #include <scsi/scsi_device.h> 31*01649561SJames Smart #include <scsi/scsi_eh.h> 32*01649561SJames Smart #include <scsi/scsi_host.h> 33*01649561SJames Smart #include <scsi/scsi_tcq.h> 34*01649561SJames Smart #include <scsi/scsi_transport_fc.h> 35*01649561SJames Smart #include <scsi/fc/fc_fs.h> 36*01649561SJames Smart 37*01649561SJames Smart #include <linux/nvme.h> 38*01649561SJames Smart #include <linux/nvme-fc-driver.h> 39*01649561SJames Smart #include <linux/nvme-fc.h> 40*01649561SJames Smart #include "lpfc_version.h" 41*01649561SJames Smart #include "lpfc_hw4.h" 42*01649561SJames Smart #include "lpfc_hw.h" 43*01649561SJames Smart #include "lpfc_sli.h" 44*01649561SJames Smart #include "lpfc_sli4.h" 45*01649561SJames Smart #include "lpfc_nl.h" 46*01649561SJames Smart #include "lpfc_disc.h" 47*01649561SJames Smart #include "lpfc.h" 48*01649561SJames Smart #include "lpfc_nvme.h" 49*01649561SJames Smart #include "lpfc_scsi.h" 50*01649561SJames Smart #include "lpfc_logmsg.h" 51*01649561SJames Smart #include "lpfc_crtn.h" 52*01649561SJames Smart #include "lpfc_vport.h" 53*01649561SJames Smart 54*01649561SJames Smart /* NVME initiator-based functions */ 55*01649561SJames Smart 56*01649561SJames Smart static struct lpfc_nvme_buf * 57*01649561SJames Smart lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp); 58*01649561SJames Smart 59*01649561SJames Smart static void 60*01649561SJames Smart lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_nvme_buf *); 61*01649561SJames Smart 62*01649561SJames Smart 63*01649561SJames Smart /** 64*01649561SJames Smart * lpfc_nvme_create_queue - 65*01649561SJames Smart * @lpfc_pnvme: Pointer to the driver's nvme instance data 66*01649561SJames Smart * @qidx: An cpu index used to affinitize IO queues and MSIX vectors. 67*01649561SJames Smart * @handle: An opaque driver handle used in follow-up calls. 68*01649561SJames Smart * 69*01649561SJames Smart * Driver registers this routine to preallocate and initialize any 70*01649561SJames Smart * internal data structures to bind the @qidx to its internal IO queues. 71*01649561SJames Smart * A hardware queue maps (qidx) to a specific driver MSI-X vector/EQ/CQ/WQ. 72*01649561SJames Smart * 73*01649561SJames Smart * Return value : 74*01649561SJames Smart * 0 - Success 75*01649561SJames Smart * -EINVAL - Unsupported input value. 76*01649561SJames Smart * -ENOMEM - Could not alloc necessary memory 77*01649561SJames Smart **/ 78*01649561SJames Smart static int 79*01649561SJames Smart lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport, 80*01649561SJames Smart unsigned int qidx, u16 qsize, 81*01649561SJames Smart void **handle) 82*01649561SJames Smart { 83*01649561SJames Smart struct lpfc_nvme_lport *lport; 84*01649561SJames Smart struct lpfc_vport *vport; 85*01649561SJames Smart struct lpfc_nvme_qhandle *qhandle; 86*01649561SJames Smart char *str; 87*01649561SJames Smart 88*01649561SJames Smart lport = (struct lpfc_nvme_lport *)pnvme_lport->private; 89*01649561SJames Smart vport = lport->vport; 90*01649561SJames Smart qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL); 91*01649561SJames Smart if (qhandle == NULL) 92*01649561SJames Smart return -ENOMEM; 93*01649561SJames Smart 94*01649561SJames Smart qhandle->cpu_id = smp_processor_id(); 95*01649561SJames Smart qhandle->qidx = qidx; 96*01649561SJames Smart /* 97*01649561SJames Smart * NVME qidx == 0 is the admin queue, so both admin queue 98*01649561SJames Smart * and first IO queue will use MSI-X vector and associated 99*01649561SJames Smart * EQ/CQ/WQ at index 0. After that they are sequentially assigned. 100*01649561SJames Smart */ 101*01649561SJames Smart if (qidx) { 102*01649561SJames Smart str = "IO "; /* IO queue */ 103*01649561SJames Smart qhandle->index = ((qidx - 1) % 104*01649561SJames Smart vport->phba->cfg_nvme_io_channel); 105*01649561SJames Smart } else { 106*01649561SJames Smart str = "ADM"; /* Admin queue */ 107*01649561SJames Smart qhandle->index = qidx; 108*01649561SJames Smart } 109*01649561SJames Smart 110*01649561SJames Smart lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, 111*01649561SJames Smart "6073 Binding %s HdwQueue %d (cpu %d) to " 112*01649561SJames Smart "io_channel %d qhandle %p\n", str, 113*01649561SJames Smart qidx, qhandle->cpu_id, qhandle->index, qhandle); 114*01649561SJames Smart *handle = (void *)qhandle; 115*01649561SJames Smart return 0; 116*01649561SJames Smart } 117*01649561SJames Smart 118*01649561SJames Smart /** 119*01649561SJames Smart * lpfc_nvme_delete_queue - 120*01649561SJames Smart * @lpfc_pnvme: Pointer to the driver's nvme instance data 121*01649561SJames Smart * @qidx: An cpu index used to affinitize IO queues and MSIX vectors. 122*01649561SJames Smart * @handle: An opaque driver handle from lpfc_nvme_create_queue 123*01649561SJames Smart * 124*01649561SJames Smart * Driver registers this routine to free 125*01649561SJames Smart * any internal data structures to bind the @qidx to its internal 126*01649561SJames Smart * IO queues. 127*01649561SJames Smart * 128*01649561SJames Smart * Return value : 129*01649561SJames Smart * 0 - Success 130*01649561SJames Smart * TODO: What are the failure codes. 131*01649561SJames Smart **/ 132*01649561SJames Smart static void 133*01649561SJames Smart lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport, 134*01649561SJames Smart unsigned int qidx, 135*01649561SJames Smart void *handle) 136*01649561SJames Smart { 137*01649561SJames Smart struct lpfc_nvme_lport *lport; 138*01649561SJames Smart struct lpfc_vport *vport; 139*01649561SJames Smart 140*01649561SJames Smart lport = (struct lpfc_nvme_lport *)pnvme_lport->private; 141*01649561SJames Smart vport = lport->vport; 142*01649561SJames Smart 143*01649561SJames Smart lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, 144*01649561SJames Smart "6001 ENTER. lpfc_pnvme %p, qidx x%xi qhandle %p\n", 145*01649561SJames Smart lport, qidx, handle); 146*01649561SJames Smart kfree(handle); 147*01649561SJames Smart } 148*01649561SJames Smart 149*01649561SJames Smart static void 150*01649561SJames Smart lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport) 151*01649561SJames Smart { 152*01649561SJames Smart struct lpfc_nvme_lport *lport = localport->private; 153*01649561SJames Smart 154*01649561SJames Smart /* release any threads waiting for the unreg to complete */ 155*01649561SJames Smart complete(&lport->lport_unreg_done); 156*01649561SJames Smart } 157*01649561SJames Smart 158*01649561SJames Smart /* lpfc_nvme_remoteport_delete 159*01649561SJames Smart * 160*01649561SJames Smart * @remoteport: Pointer to an nvme transport remoteport instance. 161*01649561SJames Smart * 162*01649561SJames Smart * This is a template downcall. NVME transport calls this function 163*01649561SJames Smart * when it has completed the unregistration of a previously 164*01649561SJames Smart * registered remoteport. 165*01649561SJames Smart * 166*01649561SJames Smart * Return value : 167*01649561SJames Smart * None 168*01649561SJames Smart */ 169*01649561SJames Smart void 170*01649561SJames Smart lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport) 171*01649561SJames Smart { 172*01649561SJames Smart struct lpfc_nvme_rport *rport = remoteport->private; 173*01649561SJames Smart struct lpfc_vport *vport; 174*01649561SJames Smart struct lpfc_nodelist *ndlp; 175*01649561SJames Smart 176*01649561SJames Smart ndlp = rport->ndlp; 177*01649561SJames Smart if (!ndlp) 178*01649561SJames Smart goto rport_err; 179*01649561SJames Smart 180*01649561SJames Smart vport = ndlp->vport; 181*01649561SJames Smart if (!vport) 182*01649561SJames Smart goto rport_err; 183*01649561SJames Smart 184*01649561SJames Smart /* Remove this rport from the lport's list - memory is owned by the 185*01649561SJames Smart * transport. Remove the ndlp reference for the NVME transport before 186*01649561SJames Smart * calling state machine to remove the node, this is devloss = 0 187*01649561SJames Smart * semantics. 188*01649561SJames Smart */ 189*01649561SJames Smart lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 190*01649561SJames Smart "6146 remoteport delete complete %p\n", 191*01649561SJames Smart remoteport); 192*01649561SJames Smart list_del(&rport->list); 193*01649561SJames Smart lpfc_nlp_put(ndlp); 194*01649561SJames Smart 195*01649561SJames Smart rport_err: 196*01649561SJames Smart /* This call has to execute as long as the rport is valid. 197*01649561SJames Smart * Release any threads waiting for the unreg to complete. 198*01649561SJames Smart */ 199*01649561SJames Smart complete(&rport->rport_unreg_done); 200*01649561SJames Smart } 201*01649561SJames Smart 202*01649561SJames Smart static void 203*01649561SJames Smart lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, 204*01649561SJames Smart struct lpfc_wcqe_complete *wcqe) 205*01649561SJames Smart { 206*01649561SJames Smart struct lpfc_vport *vport = cmdwqe->vport; 207*01649561SJames Smart uint32_t status; 208*01649561SJames Smart struct nvmefc_ls_req *pnvme_lsreq; 209*01649561SJames Smart struct lpfc_dmabuf *buf_ptr; 210*01649561SJames Smart struct lpfc_nodelist *ndlp; 211*01649561SJames Smart 212*01649561SJames Smart vport->phba->fc4NvmeLsCmpls++; 213*01649561SJames Smart 214*01649561SJames Smart pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2; 215*01649561SJames Smart status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK; 216*01649561SJames Smart ndlp = (struct lpfc_nodelist *)cmdwqe->context1; 217*01649561SJames Smart lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 218*01649561SJames Smart "6047 nvme cmpl Enter " 219*01649561SJames Smart "Data %p DID %x Xri: %x status %x cmd:%p lsreg:%p " 220*01649561SJames Smart "bmp:%p ndlp:%p\n", 221*01649561SJames Smart pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0, 222*01649561SJames Smart cmdwqe->sli4_xritag, status, 223*01649561SJames Smart cmdwqe, pnvme_lsreq, cmdwqe->context3, ndlp); 224*01649561SJames Smart 225*01649561SJames Smart if (cmdwqe->context3) { 226*01649561SJames Smart buf_ptr = (struct lpfc_dmabuf *)cmdwqe->context3; 227*01649561SJames Smart lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 228*01649561SJames Smart kfree(buf_ptr); 229*01649561SJames Smart cmdwqe->context3 = NULL; 230*01649561SJames Smart } 231*01649561SJames Smart if (pnvme_lsreq->done) 232*01649561SJames Smart pnvme_lsreq->done(pnvme_lsreq, status); 233*01649561SJames Smart else 234*01649561SJames Smart lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, 235*01649561SJames Smart "6046 nvme cmpl without done call back? " 236*01649561SJames Smart "Data %p DID %x Xri: %x status %x\n", 237*01649561SJames Smart pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0, 238*01649561SJames Smart cmdwqe->sli4_xritag, status); 239*01649561SJames Smart if (ndlp) { 240*01649561SJames Smart lpfc_nlp_put(ndlp); 241*01649561SJames Smart cmdwqe->context1 = NULL; 242*01649561SJames Smart } 243*01649561SJames Smart lpfc_sli_release_iocbq(phba, cmdwqe); 244*01649561SJames Smart } 245*01649561SJames Smart 246*01649561SJames Smart static int 247*01649561SJames Smart lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, 248*01649561SJames Smart struct lpfc_dmabuf *inp, 249*01649561SJames Smart struct nvmefc_ls_req *pnvme_lsreq, 250*01649561SJames Smart void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *, 251*01649561SJames Smart struct lpfc_wcqe_complete *), 252*01649561SJames Smart struct lpfc_nodelist *ndlp, uint32_t num_entry, 253*01649561SJames Smart uint32_t tmo, uint8_t retry) 254*01649561SJames Smart { 255*01649561SJames Smart struct lpfc_hba *phba = vport->phba; 256*01649561SJames Smart union lpfc_wqe *wqe; 257*01649561SJames Smart struct lpfc_iocbq *genwqe; 258*01649561SJames Smart struct ulp_bde64 *bpl; 259*01649561SJames Smart struct ulp_bde64 bde; 260*01649561SJames Smart int i, rc, xmit_len, first_len; 261*01649561SJames Smart 262*01649561SJames Smart /* Allocate buffer for command WQE */ 263*01649561SJames Smart genwqe = lpfc_sli_get_iocbq(phba); 264*01649561SJames Smart if (genwqe == NULL) 265*01649561SJames Smart return 1; 266*01649561SJames Smart 267*01649561SJames Smart wqe = &genwqe->wqe; 268*01649561SJames Smart memset(wqe, 0, sizeof(union lpfc_wqe)); 269*01649561SJames Smart 270*01649561SJames Smart genwqe->context3 = (uint8_t *)bmp; 271*01649561SJames Smart genwqe->iocb_flag |= LPFC_IO_NVME_LS; 272*01649561SJames Smart 273*01649561SJames Smart /* Save for completion so we can release these resources */ 274*01649561SJames Smart genwqe->context1 = lpfc_nlp_get(ndlp); 275*01649561SJames Smart genwqe->context2 = (uint8_t *)pnvme_lsreq; 276*01649561SJames Smart /* Fill in payload, bp points to frame payload */ 277*01649561SJames Smart 278*01649561SJames Smart if (!tmo) 279*01649561SJames Smart /* FC spec states we need 3 * ratov for CT requests */ 280*01649561SJames Smart tmo = (3 * phba->fc_ratov); 281*01649561SJames Smart 282*01649561SJames Smart /* For this command calculate the xmit length of the request bde. */ 283*01649561SJames Smart xmit_len = 0; 284*01649561SJames Smart first_len = 0; 285*01649561SJames Smart bpl = (struct ulp_bde64 *)bmp->virt; 286*01649561SJames Smart for (i = 0; i < num_entry; i++) { 287*01649561SJames Smart bde.tus.w = bpl[i].tus.w; 288*01649561SJames Smart if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) 289*01649561SJames Smart break; 290*01649561SJames Smart xmit_len += bde.tus.f.bdeSize; 291*01649561SJames Smart if (i == 0) 292*01649561SJames Smart first_len = xmit_len; 293*01649561SJames Smart } 294*01649561SJames Smart 295*01649561SJames Smart genwqe->rsvd2 = num_entry; 296*01649561SJames Smart genwqe->hba_wqidx = 0; 297*01649561SJames Smart 298*01649561SJames Smart /* Words 0 - 2 */ 299*01649561SJames Smart wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; 300*01649561SJames Smart wqe->generic.bde.tus.f.bdeSize = first_len; 301*01649561SJames Smart wqe->generic.bde.addrLow = bpl[0].addrLow; 302*01649561SJames Smart wqe->generic.bde.addrHigh = bpl[0].addrHigh; 303*01649561SJames Smart 304*01649561SJames Smart /* Word 3 */ 305*01649561SJames Smart wqe->gen_req.request_payload_len = first_len; 306*01649561SJames Smart 307*01649561SJames Smart /* Word 4 */ 308*01649561SJames Smart 309*01649561SJames Smart /* Word 5 */ 310*01649561SJames Smart bf_set(wqe_dfctl, &wqe->gen_req.wge_ctl, 0); 311*01649561SJames Smart bf_set(wqe_si, &wqe->gen_req.wge_ctl, 1); 312*01649561SJames Smart bf_set(wqe_la, &wqe->gen_req.wge_ctl, 1); 313*01649561SJames Smart bf_set(wqe_rctl, &wqe->gen_req.wge_ctl, FC_RCTL_DD_UNSOL_CTL); 314*01649561SJames Smart bf_set(wqe_type, &wqe->gen_req.wge_ctl, FC_TYPE_NVME); 315*01649561SJames Smart 316*01649561SJames Smart /* Word 6 */ 317*01649561SJames Smart bf_set(wqe_ctxt_tag, &wqe->gen_req.wqe_com, 318*01649561SJames Smart phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 319*01649561SJames Smart bf_set(wqe_xri_tag, &wqe->gen_req.wqe_com, genwqe->sli4_xritag); 320*01649561SJames Smart 321*01649561SJames Smart /* Word 7 */ 322*01649561SJames Smart bf_set(wqe_tmo, &wqe->gen_req.wqe_com, (vport->phba->fc_ratov-1)); 323*01649561SJames Smart bf_set(wqe_class, &wqe->gen_req.wqe_com, CLASS3); 324*01649561SJames Smart bf_set(wqe_cmnd, &wqe->gen_req.wqe_com, CMD_GEN_REQUEST64_WQE); 325*01649561SJames Smart bf_set(wqe_ct, &wqe->gen_req.wqe_com, SLI4_CT_RPI); 326*01649561SJames Smart 327*01649561SJames Smart /* Word 8 */ 328*01649561SJames Smart wqe->gen_req.wqe_com.abort_tag = genwqe->iotag; 329*01649561SJames Smart 330*01649561SJames Smart /* Word 9 */ 331*01649561SJames Smart bf_set(wqe_reqtag, &wqe->gen_req.wqe_com, genwqe->iotag); 332*01649561SJames Smart 333*01649561SJames Smart /* Word 10 */ 334*01649561SJames Smart bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1); 335*01649561SJames Smart bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ); 336*01649561SJames Smart bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1); 337*01649561SJames Smart bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE); 338*01649561SJames Smart bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0); 339*01649561SJames Smart 340*01649561SJames Smart /* Word 11 */ 341*01649561SJames Smart bf_set(wqe_cqid, &wqe->gen_req.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 342*01649561SJames Smart bf_set(wqe_cmd_type, &wqe->gen_req.wqe_com, OTHER_COMMAND); 343*01649561SJames Smart 344*01649561SJames Smart 345*01649561SJames Smart /* Issue GEN REQ WQE for NPORT <did> */ 346*01649561SJames Smart lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 347*01649561SJames Smart "6050 Issue GEN REQ WQE to NPORT x%x " 348*01649561SJames Smart "Data: x%x x%x wq:%p lsreq:%p bmp:%p xmit:%d 1st:%d\n", 349*01649561SJames Smart ndlp->nlp_DID, genwqe->iotag, 350*01649561SJames Smart vport->port_state, 351*01649561SJames Smart genwqe, pnvme_lsreq, bmp, xmit_len, first_len); 352*01649561SJames Smart genwqe->wqe_cmpl = cmpl; 353*01649561SJames Smart genwqe->iocb_cmpl = NULL; 354*01649561SJames Smart genwqe->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT; 355*01649561SJames Smart genwqe->vport = vport; 356*01649561SJames Smart genwqe->retry = retry; 357*01649561SJames Smart 358*01649561SJames Smart rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, genwqe); 359*01649561SJames Smart if (rc == WQE_ERROR) { 360*01649561SJames Smart lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 361*01649561SJames Smart "6045 Issue GEN REQ WQE to NPORT x%x " 362*01649561SJames Smart "Data: x%x x%x\n", 363*01649561SJames Smart ndlp->nlp_DID, genwqe->iotag, 364*01649561SJames Smart vport->port_state); 365*01649561SJames Smart lpfc_sli_release_iocbq(phba, genwqe); 366*01649561SJames Smart return 1; 367*01649561SJames Smart } 368*01649561SJames Smart return 0; 369*01649561SJames Smart } 370*01649561SJames Smart 371*01649561SJames Smart /** 372*01649561SJames Smart * lpfc_nvme_ls_req - Issue an Link Service request 373*01649561SJames Smart * @lpfc_pnvme: Pointer to the driver's nvme instance data 374*01649561SJames Smart * @lpfc_nvme_lport: Pointer to the driver's local port data 375*01649561SJames Smart * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq 376*01649561SJames Smart * 377*01649561SJames Smart * Driver registers this routine to handle any link service request 378*01649561SJames Smart * from the nvme_fc transport to a remote nvme-aware port. 379*01649561SJames Smart * 380*01649561SJames Smart * Return value : 381*01649561SJames Smart * 0 - Success 382*01649561SJames Smart * TODO: What are the failure codes. 383*01649561SJames Smart **/ 384*01649561SJames Smart static int 385*01649561SJames Smart lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport, 386*01649561SJames Smart struct nvme_fc_remote_port *pnvme_rport, 387*01649561SJames Smart struct nvmefc_ls_req *pnvme_lsreq) 388*01649561SJames Smart { 389*01649561SJames Smart int ret = 0; 390*01649561SJames Smart struct lpfc_nvme_lport *lport; 391*01649561SJames Smart struct lpfc_vport *vport; 392*01649561SJames Smart struct lpfc_nodelist *ndlp; 393*01649561SJames Smart struct ulp_bde64 *bpl; 394*01649561SJames Smart struct lpfc_dmabuf *bmp; 395*01649561SJames Smart 396*01649561SJames Smart /* there are two dma buf in the request, actually there is one and 397*01649561SJames Smart * the second one is just the start address + cmd size. 398*01649561SJames Smart * Before calling lpfc_nvme_gen_req these buffers need to be wrapped 399*01649561SJames Smart * in a lpfc_dmabuf struct. When freeing we just free the wrapper 400*01649561SJames Smart * because the nvem layer owns the data bufs. 401*01649561SJames Smart * We do not have to break these packets open, we don't care what is in 402*01649561SJames Smart * them. And we do not have to look at the resonse data, we only care 403*01649561SJames Smart * that we got a response. All of the caring is going to happen in the 404*01649561SJames Smart * nvme-fc layer. 405*01649561SJames Smart */ 406*01649561SJames Smart 407*01649561SJames Smart lport = (struct lpfc_nvme_lport *)pnvme_lport->private; 408*01649561SJames Smart vport = lport->vport; 409*01649561SJames Smart 410*01649561SJames Smart ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id); 411*01649561SJames Smart if (!ndlp) { 412*01649561SJames Smart lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, 413*01649561SJames Smart "6043 Could not find node for DID %x\n", 414*01649561SJames Smart pnvme_rport->port_id); 415*01649561SJames Smart return 1; 416*01649561SJames Smart } 417*01649561SJames Smart bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 418*01649561SJames Smart if (!bmp) { 419*01649561SJames Smart 420*01649561SJames Smart lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, 421*01649561SJames Smart "6044 Could not find node for DID %x\n", 422*01649561SJames Smart pnvme_rport->port_id); 423*01649561SJames Smart return 2; 424*01649561SJames Smart } 425*01649561SJames Smart INIT_LIST_HEAD(&bmp->list); 426*01649561SJames Smart bmp->virt = lpfc_mbuf_alloc(vport->phba, MEM_PRI, &(bmp->phys)); 427*01649561SJames Smart if (!bmp->virt) { 428*01649561SJames Smart lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, 429*01649561SJames Smart "6042 Could not find node for DID %x\n", 430*01649561SJames Smart pnvme_rport->port_id); 431*01649561SJames Smart kfree(bmp); 432*01649561SJames Smart return 3; 433*01649561SJames Smart } 434*01649561SJames Smart bpl = (struct ulp_bde64 *)bmp->virt; 435*01649561SJames Smart bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rqstdma)); 436*01649561SJames Smart bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rqstdma)); 437*01649561SJames Smart bpl->tus.f.bdeFlags = 0; 438*01649561SJames Smart bpl->tus.f.bdeSize = pnvme_lsreq->rqstlen; 439*01649561SJames Smart bpl->tus.w = le32_to_cpu(bpl->tus.w); 440*01649561SJames Smart bpl++; 441*01649561SJames Smart 442*01649561SJames Smart bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rspdma)); 443*01649561SJames Smart bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rspdma)); 444*01649561SJames Smart bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 445*01649561SJames Smart bpl->tus.f.bdeSize = pnvme_lsreq->rsplen; 446*01649561SJames Smart bpl->tus.w = le32_to_cpu(bpl->tus.w); 447*01649561SJames Smart 448*01649561SJames Smart /* Expand print to include key fields. */ 449*01649561SJames Smart lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 450*01649561SJames Smart "6051 ENTER. lport %p, rport %p lsreq%p rqstlen:%d " 451*01649561SJames Smart "rsplen:%d %llux %llux\n", 452*01649561SJames Smart pnvme_lport, pnvme_rport, 453*01649561SJames Smart pnvme_lsreq, pnvme_lsreq->rqstlen, 454*01649561SJames Smart pnvme_lsreq->rsplen, pnvme_lsreq->rqstdma, 455*01649561SJames Smart pnvme_lsreq->rspdma); 456*01649561SJames Smart 457*01649561SJames Smart vport->phba->fc4NvmeLsRequests++; 458*01649561SJames Smart 459*01649561SJames Smart /* Hardcode the wait to 30 seconds. Connections are failing otherwise. 460*01649561SJames Smart * This code allows it all to work. 461*01649561SJames Smart */ 462*01649561SJames Smart ret = lpfc_nvme_gen_req(vport, bmp, pnvme_lsreq->rqstaddr, 463*01649561SJames Smart pnvme_lsreq, lpfc_nvme_cmpl_gen_req, 464*01649561SJames Smart ndlp, 2, 30, 0); 465*01649561SJames Smart if (ret != WQE_SUCCESS) { 466*01649561SJames Smart lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 467*01649561SJames Smart "6052 EXIT. issue ls wqe failed lport %p, " 468*01649561SJames Smart "rport %p lsreq%p Status %x DID %x\n", 469*01649561SJames Smart pnvme_lport, pnvme_rport, pnvme_lsreq, 470*01649561SJames Smart ret, ndlp->nlp_DID); 471*01649561SJames Smart lpfc_mbuf_free(vport->phba, bmp->virt, bmp->phys); 472*01649561SJames Smart kfree(bmp); 473*01649561SJames Smart return ret; 474*01649561SJames Smart } 475*01649561SJames Smart 476*01649561SJames Smart /* Stub in routine and return 0 for now. */ 477*01649561SJames Smart return ret; 478*01649561SJames Smart } 479*01649561SJames Smart 480*01649561SJames Smart /** 481*01649561SJames Smart * lpfc_nvme_ls_abort - Issue an Link Service request 482*01649561SJames Smart * @lpfc_pnvme: Pointer to the driver's nvme instance data 483*01649561SJames Smart * @lpfc_nvme_lport: Pointer to the driver's local port data 484*01649561SJames Smart * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq 485*01649561SJames Smart * 486*01649561SJames Smart * Driver registers this routine to handle any link service request 487*01649561SJames Smart * from the nvme_fc transport to a remote nvme-aware port. 488*01649561SJames Smart * 489*01649561SJames Smart * Return value : 490*01649561SJames Smart * 0 - Success 491*01649561SJames Smart * TODO: What are the failure codes. 492*01649561SJames Smart **/ 493*01649561SJames Smart static void 494*01649561SJames Smart lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport, 495*01649561SJames Smart struct nvme_fc_remote_port *pnvme_rport, 496*01649561SJames Smart struct nvmefc_ls_req *pnvme_lsreq) 497*01649561SJames Smart { 498*01649561SJames Smart struct lpfc_nvme_lport *lport; 499*01649561SJames Smart struct lpfc_vport *vport; 500*01649561SJames Smart struct lpfc_hba *phba; 501*01649561SJames Smart struct lpfc_nodelist *ndlp; 502*01649561SJames Smart LIST_HEAD(abort_list); 503*01649561SJames Smart struct lpfc_sli_ring *pring; 504*01649561SJames Smart struct lpfc_iocbq *wqe, *next_wqe; 505*01649561SJames Smart 506*01649561SJames Smart lport = (struct lpfc_nvme_lport *)pnvme_lport->private; 507*01649561SJames Smart vport = lport->vport; 508*01649561SJames Smart phba = vport->phba; 509*01649561SJames Smart 510*01649561SJames Smart ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id); 511*01649561SJames Smart if (!ndlp) { 512*01649561SJames Smart lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, 513*01649561SJames Smart "6049 Could not find node for DID %x\n", 514*01649561SJames Smart pnvme_rport->port_id); 515*01649561SJames Smart return; 516*01649561SJames Smart } 517*01649561SJames Smart 518*01649561SJames Smart /* Expand print to include key fields. */ 519*01649561SJames Smart lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS, 520*01649561SJames Smart "6040 ENTER. lport %p, rport %p lsreq %p rqstlen:%d " 521*01649561SJames Smart "rsplen:%d %llux %llux\n", 522*01649561SJames Smart pnvme_lport, pnvme_rport, 523*01649561SJames Smart pnvme_lsreq, pnvme_lsreq->rqstlen, 524*01649561SJames Smart pnvme_lsreq->rsplen, pnvme_lsreq->rqstdma, 525*01649561SJames Smart pnvme_lsreq->rspdma); 526*01649561SJames Smart 527*01649561SJames Smart /* 528*01649561SJames Smart * Lock the ELS ring txcmplq and build a local list of all ELS IOs 529*01649561SJames Smart * that need an ABTS. The IOs need to stay on the txcmplq so that 530*01649561SJames Smart * the abort operation completes them successfully. 531*01649561SJames Smart */ 532*01649561SJames Smart pring = phba->sli4_hba.nvmels_wq->pring; 533*01649561SJames Smart spin_lock_irq(&phba->hbalock); 534*01649561SJames Smart spin_lock(&pring->ring_lock); 535*01649561SJames Smart list_for_each_entry_safe(wqe, next_wqe, &pring->txcmplq, list) { 536*01649561SJames Smart /* Add to abort_list on on NDLP match. */ 537*01649561SJames Smart if (lpfc_check_sli_ndlp(phba, pring, wqe, ndlp)) { 538*01649561SJames Smart wqe->iocb_flag |= LPFC_DRIVER_ABORTED; 539*01649561SJames Smart list_add_tail(&wqe->dlist, &abort_list); 540*01649561SJames Smart } 541*01649561SJames Smart } 542*01649561SJames Smart spin_unlock(&pring->ring_lock); 543*01649561SJames Smart spin_unlock_irq(&phba->hbalock); 544*01649561SJames Smart 545*01649561SJames Smart /* Abort the targeted IOs and remove them from the abort list. */ 546*01649561SJames Smart list_for_each_entry_safe(wqe, next_wqe, &abort_list, dlist) { 547*01649561SJames Smart spin_lock_irq(&phba->hbalock); 548*01649561SJames Smart list_del_init(&wqe->dlist); 549*01649561SJames Smart lpfc_sli_issue_abort_iotag(phba, pring, wqe); 550*01649561SJames Smart spin_unlock_irq(&phba->hbalock); 551*01649561SJames Smart } 552*01649561SJames Smart } 553*01649561SJames Smart 554*01649561SJames Smart /* Fix up the existing sgls for NVME IO. */ 555*01649561SJames Smart static void 556*01649561SJames Smart lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport, 557*01649561SJames Smart struct lpfc_nvme_buf *lpfc_ncmd, 558*01649561SJames Smart struct nvmefc_fcp_req *nCmd) 559*01649561SJames Smart { 560*01649561SJames Smart struct sli4_sge *sgl; 561*01649561SJames Smart union lpfc_wqe128 *wqe; 562*01649561SJames Smart uint32_t *wptr, *dptr; 563*01649561SJames Smart 564*01649561SJames Smart /* 565*01649561SJames Smart * Adjust the FCP_CMD and FCP_RSP DMA data and sge_len to 566*01649561SJames Smart * match NVME. NVME sends 96 bytes. Also, use the 567*01649561SJames Smart * nvme commands command and response dma addresses 568*01649561SJames Smart * rather than the virtual memory to ease the restore 569*01649561SJames Smart * operation. 570*01649561SJames Smart */ 571*01649561SJames Smart sgl = lpfc_ncmd->nvme_sgl; 572*01649561SJames Smart sgl->sge_len = cpu_to_le32(nCmd->cmdlen); 573*01649561SJames Smart 574*01649561SJames Smart sgl++; 575*01649561SJames Smart 576*01649561SJames Smart /* Setup the physical region for the FCP RSP */ 577*01649561SJames Smart sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->rspdma)); 578*01649561SJames Smart sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->rspdma)); 579*01649561SJames Smart sgl->word2 = le32_to_cpu(sgl->word2); 580*01649561SJames Smart if (nCmd->sg_cnt) 581*01649561SJames Smart bf_set(lpfc_sli4_sge_last, sgl, 0); 582*01649561SJames Smart else 583*01649561SJames Smart bf_set(lpfc_sli4_sge_last, sgl, 1); 584*01649561SJames Smart sgl->word2 = cpu_to_le32(sgl->word2); 585*01649561SJames Smart sgl->sge_len = cpu_to_le32(nCmd->rsplen); 586*01649561SJames Smart 587*01649561SJames Smart /* 588*01649561SJames Smart * Get a local pointer to the built-in wqe and correct 589*01649561SJames Smart * the cmd size to match NVME's 96 bytes and fix 590*01649561SJames Smart * the dma address. 591*01649561SJames Smart */ 592*01649561SJames Smart 593*01649561SJames Smart /* 128 byte wqe support here */ 594*01649561SJames Smart wqe = (union lpfc_wqe128 *)&lpfc_ncmd->cur_iocbq.wqe; 595*01649561SJames Smart 596*01649561SJames Smart /* Word 0-2 - NVME CMND IU (embedded payload) */ 597*01649561SJames Smart wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_IMMED; 598*01649561SJames Smart wqe->generic.bde.tus.f.bdeSize = 60; 599*01649561SJames Smart wqe->generic.bde.addrHigh = 0; 600*01649561SJames Smart wqe->generic.bde.addrLow = 64; /* Word 16 */ 601*01649561SJames Smart 602*01649561SJames Smart /* Word 3 */ 603*01649561SJames Smart bf_set(payload_offset_len, &wqe->fcp_icmd, 604*01649561SJames Smart (nCmd->rsplen + nCmd->cmdlen)); 605*01649561SJames Smart 606*01649561SJames Smart /* Word 10 */ 607*01649561SJames Smart bf_set(wqe_nvme, &wqe->fcp_icmd.wqe_com, 1); 608*01649561SJames Smart bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1); 609*01649561SJames Smart 610*01649561SJames Smart /* 611*01649561SJames Smart * Embed the payload in the last half of the WQE 612*01649561SJames Smart * WQE words 16-30 get the NVME CMD IU payload 613*01649561SJames Smart * 614*01649561SJames Smart * WQE Word 16 is already setup with flags 615*01649561SJames Smart * WQE words 17-19 get payload Words 2-4 616*01649561SJames Smart * WQE words 20-21 get payload Words 6-7 617*01649561SJames Smart * WQE words 22-29 get payload Words 16-23 618*01649561SJames Smart */ 619*01649561SJames Smart wptr = &wqe->words[17]; /* WQE ptr */ 620*01649561SJames Smart dptr = (uint32_t *)nCmd->cmdaddr; /* payload ptr */ 621*01649561SJames Smart dptr += 2; /* Skip Words 0-1 in payload */ 622*01649561SJames Smart 623*01649561SJames Smart *wptr++ = *dptr++; /* Word 2 */ 624*01649561SJames Smart *wptr++ = *dptr++; /* Word 3 */ 625*01649561SJames Smart *wptr++ = *dptr++; /* Word 4 */ 626*01649561SJames Smart dptr++; /* Skip Word 5 in payload */ 627*01649561SJames Smart *wptr++ = *dptr++; /* Word 6 */ 628*01649561SJames Smart *wptr++ = *dptr++; /* Word 7 */ 629*01649561SJames Smart dptr += 8; /* Skip Words 8-15 in payload */ 630*01649561SJames Smart *wptr++ = *dptr++; /* Word 16 */ 631*01649561SJames Smart *wptr++ = *dptr++; /* Word 17 */ 632*01649561SJames Smart *wptr++ = *dptr++; /* Word 18 */ 633*01649561SJames Smart *wptr++ = *dptr++; /* Word 19 */ 634*01649561SJames Smart *wptr++ = *dptr++; /* Word 20 */ 635*01649561SJames Smart *wptr++ = *dptr++; /* Word 21 */ 636*01649561SJames Smart *wptr++ = *dptr++; /* Word 22 */ 637*01649561SJames Smart *wptr = *dptr; /* Word 23 */ 638*01649561SJames Smart } 639*01649561SJames Smart 640*01649561SJames Smart /** 641*01649561SJames Smart * lpfc_nvme_io_cmd_wqe_cmpl - Complete an NVME-over-FCP IO 642*01649561SJames Smart * @lpfc_pnvme: Pointer to the driver's nvme instance data 643*01649561SJames Smart * @lpfc_nvme_lport: Pointer to the driver's local port data 644*01649561SJames Smart * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq 645*01649561SJames Smart * 646*01649561SJames Smart * Driver registers this routine as it io request handler. This 647*01649561SJames Smart * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq 648*01649561SJames Smart * data structure to the rport indicated in @lpfc_nvme_rport. 649*01649561SJames Smart * 650*01649561SJames Smart * Return value : 651*01649561SJames Smart * 0 - Success 652*01649561SJames Smart * TODO: What are the failure codes. 653*01649561SJames Smart **/ 654*01649561SJames Smart static void 655*01649561SJames Smart lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, 656*01649561SJames Smart struct lpfc_wcqe_complete *wcqe) 657*01649561SJames Smart { 658*01649561SJames Smart struct lpfc_nvme_buf *lpfc_ncmd = 659*01649561SJames Smart (struct lpfc_nvme_buf *)pwqeIn->context1; 660*01649561SJames Smart struct lpfc_vport *vport = pwqeIn->vport; 661*01649561SJames Smart struct nvmefc_fcp_req *nCmd; 662*01649561SJames Smart struct nvme_fc_ersp_iu *ep; 663*01649561SJames Smart struct nvme_fc_cmd_iu *cp; 664*01649561SJames Smart struct lpfc_nvme_rport *rport; 665*01649561SJames Smart struct lpfc_nodelist *ndlp; 666*01649561SJames Smart unsigned long flags; 667*01649561SJames Smart uint32_t code; 668*01649561SJames Smart uint16_t cid, sqhd, data; 669*01649561SJames Smart uint32_t *ptr; 670*01649561SJames Smart 671*01649561SJames Smart /* Sanity check on return of outstanding command */ 672*01649561SJames Smart if (!lpfc_ncmd || !lpfc_ncmd->nvmeCmd || !lpfc_ncmd->nrport) { 673*01649561SJames Smart lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR, 674*01649561SJames Smart "6071 Completion pointers bad on wqe %p.\n", 675*01649561SJames Smart wcqe); 676*01649561SJames Smart return; 677*01649561SJames Smart } 678*01649561SJames Smart phba->fc4NvmeIoCmpls++; 679*01649561SJames Smart 680*01649561SJames Smart nCmd = lpfc_ncmd->nvmeCmd; 681*01649561SJames Smart rport = lpfc_ncmd->nrport; 682*01649561SJames Smart 683*01649561SJames Smart /* 684*01649561SJames Smart * Catch race where our node has transitioned, but the 685*01649561SJames Smart * transport is still transitioning. 686*01649561SJames Smart */ 687*01649561SJames Smart ndlp = rport->ndlp; 688*01649561SJames Smart if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 689*01649561SJames Smart lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR, 690*01649561SJames Smart "6061 rport %p, ndlp %p, DID x%06x ndlp " 691*01649561SJames Smart "not ready.\n", 692*01649561SJames Smart rport, ndlp, rport->remoteport->port_id); 693*01649561SJames Smart 694*01649561SJames Smart ndlp = lpfc_findnode_did(vport, rport->remoteport->port_id); 695*01649561SJames Smart if (!ndlp) { 696*01649561SJames Smart lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, 697*01649561SJames Smart "6062 Ignoring NVME cmpl. No ndlp\n"); 698*01649561SJames Smart goto out_err; 699*01649561SJames Smart } 700*01649561SJames Smart } 701*01649561SJames Smart 702*01649561SJames Smart code = bf_get(lpfc_wcqe_c_code, wcqe); 703*01649561SJames Smart if (code == CQE_CODE_NVME_ERSP) { 704*01649561SJames Smart /* For this type of CQE, we need to rebuild the rsp */ 705*01649561SJames Smart ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr; 706*01649561SJames Smart 707*01649561SJames Smart /* 708*01649561SJames Smart * Get Command Id from cmd to plug into response. This 709*01649561SJames Smart * code is not needed in the next NVME Transport drop. 710*01649561SJames Smart */ 711*01649561SJames Smart cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr; 712*01649561SJames Smart cid = cp->sqe.common.command_id; 713*01649561SJames Smart 714*01649561SJames Smart /* 715*01649561SJames Smart * RSN is in CQE word 2 716*01649561SJames Smart * SQHD is in CQE Word 3 bits 15:0 717*01649561SJames Smart * Cmd Specific info is in CQE Word 1 718*01649561SJames Smart * and in CQE Word 0 bits 15:0 719*01649561SJames Smart */ 720*01649561SJames Smart sqhd = bf_get(lpfc_wcqe_c_sqhead, wcqe); 721*01649561SJames Smart 722*01649561SJames Smart /* Now lets build the NVME ERSP IU */ 723*01649561SJames Smart ep->iu_len = cpu_to_be16(8); 724*01649561SJames Smart ep->rsn = wcqe->parameter; 725*01649561SJames Smart ep->xfrd_len = cpu_to_be32(nCmd->payload_length); 726*01649561SJames Smart ep->rsvd12 = 0; 727*01649561SJames Smart ptr = (uint32_t *)&ep->cqe.result.u64; 728*01649561SJames Smart *ptr++ = wcqe->total_data_placed; 729*01649561SJames Smart data = bf_get(lpfc_wcqe_c_ersp0, wcqe); 730*01649561SJames Smart *ptr = (uint32_t)data; 731*01649561SJames Smart ep->cqe.sq_head = sqhd; 732*01649561SJames Smart ep->cqe.sq_id = nCmd->sqid; 733*01649561SJames Smart ep->cqe.command_id = cid; 734*01649561SJames Smart ep->cqe.status = 0; 735*01649561SJames Smart 736*01649561SJames Smart lpfc_ncmd->status = IOSTAT_SUCCESS; 737*01649561SJames Smart lpfc_ncmd->result = 0; 738*01649561SJames Smart nCmd->rcv_rsplen = LPFC_NVME_ERSP_LEN; 739*01649561SJames Smart nCmd->transferred_length = nCmd->payload_length; 740*01649561SJames Smart } else { 741*01649561SJames Smart lpfc_ncmd->status = (bf_get(lpfc_wcqe_c_status, wcqe) & 742*01649561SJames Smart LPFC_IOCB_STATUS_MASK); 743*01649561SJames Smart lpfc_ncmd->result = wcqe->parameter; 744*01649561SJames Smart 745*01649561SJames Smart /* For NVME, the only failure path that results in an 746*01649561SJames Smart * IO error is when the adapter rejects it. All other 747*01649561SJames Smart * conditions are a success case and resolved by the 748*01649561SJames Smart * transport. 749*01649561SJames Smart * IOSTAT_FCP_RSP_ERROR means: 750*01649561SJames Smart * 1. Length of data received doesn't match total 751*01649561SJames Smart * transfer length in WQE 752*01649561SJames Smart * 2. If the RSP payload does NOT match these cases: 753*01649561SJames Smart * a. RSP length 12/24 bytes and all zeros 754*01649561SJames Smart * b. NVME ERSP 755*01649561SJames Smart */ 756*01649561SJames Smart switch (lpfc_ncmd->status) { 757*01649561SJames Smart case IOSTAT_SUCCESS: 758*01649561SJames Smart nCmd->transferred_length = wcqe->total_data_placed; 759*01649561SJames Smart nCmd->rcv_rsplen = 0; 760*01649561SJames Smart nCmd->status = 0; 761*01649561SJames Smart break; 762*01649561SJames Smart case IOSTAT_FCP_RSP_ERROR: 763*01649561SJames Smart nCmd->transferred_length = wcqe->total_data_placed; 764*01649561SJames Smart nCmd->rcv_rsplen = wcqe->parameter; 765*01649561SJames Smart nCmd->status = 0; 766*01649561SJames Smart /* Sanity check */ 767*01649561SJames Smart if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN) 768*01649561SJames Smart break; 769*01649561SJames Smart lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, 770*01649561SJames Smart "6081 NVME Completion Protocol Error: " 771*01649561SJames Smart "status x%x result x%x placed x%x\n", 772*01649561SJames Smart lpfc_ncmd->status, lpfc_ncmd->result, 773*01649561SJames Smart wcqe->total_data_placed); 774*01649561SJames Smart break; 775*01649561SJames Smart default: 776*01649561SJames Smart out_err: 777*01649561SJames Smart lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, 778*01649561SJames Smart "6072 NVME Completion Error: " 779*01649561SJames Smart "status x%x result x%x placed x%x\n", 780*01649561SJames Smart lpfc_ncmd->status, lpfc_ncmd->result, 781*01649561SJames Smart wcqe->total_data_placed); 782*01649561SJames Smart nCmd->transferred_length = 0; 783*01649561SJames Smart nCmd->rcv_rsplen = 0; 784*01649561SJames Smart nCmd->status = NVME_SC_FC_TRANSPORT_ERROR; 785*01649561SJames Smart } 786*01649561SJames Smart } 787*01649561SJames Smart 788*01649561SJames Smart /* pick up SLI4 exhange busy condition */ 789*01649561SJames Smart if (bf_get(lpfc_wcqe_c_xb, wcqe)) 790*01649561SJames Smart lpfc_ncmd->flags |= LPFC_SBUF_XBUSY; 791*01649561SJames Smart else 792*01649561SJames Smart lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY; 793*01649561SJames Smart 794*01649561SJames Smart if (ndlp && NLP_CHK_NODE_ACT(ndlp)) 795*01649561SJames Smart atomic_dec(&ndlp->cmd_pending); 796*01649561SJames Smart 797*01649561SJames Smart /* Update stats and complete the IO. There is 798*01649561SJames Smart * no need for dma unprep because the nvme_transport 799*01649561SJames Smart * owns the dma address. 800*01649561SJames Smart */ 801*01649561SJames Smart nCmd->done(nCmd); 802*01649561SJames Smart 803*01649561SJames Smart spin_lock_irqsave(&phba->hbalock, flags); 804*01649561SJames Smart lpfc_ncmd->nrport = NULL; 805*01649561SJames Smart spin_unlock_irqrestore(&phba->hbalock, flags); 806*01649561SJames Smart 807*01649561SJames Smart lpfc_release_nvme_buf(phba, lpfc_ncmd); 808*01649561SJames Smart } 809*01649561SJames Smart 810*01649561SJames Smart 811*01649561SJames Smart /** 812*01649561SJames Smart * lpfc_nvme_prep_io_cmd - Issue an NVME-over-FCP IO 813*01649561SJames Smart * @lpfc_pnvme: Pointer to the driver's nvme instance data 814*01649561SJames Smart * @lpfc_nvme_lport: Pointer to the driver's local port data 815*01649561SJames Smart * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq 816*01649561SJames Smart * @lpfc_nvme_fcreq: IO request from nvme fc to driver. 817*01649561SJames Smart * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue 818*01649561SJames Smart * 819*01649561SJames Smart * Driver registers this routine as it io request handler. This 820*01649561SJames Smart * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq 821*01649561SJames Smart * data structure to the rport indicated in @lpfc_nvme_rport. 822*01649561SJames Smart * 823*01649561SJames Smart * Return value : 824*01649561SJames Smart * 0 - Success 825*01649561SJames Smart * TODO: What are the failure codes. 826*01649561SJames Smart **/ 827*01649561SJames Smart static int 828*01649561SJames Smart lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport, 829*01649561SJames Smart struct lpfc_nvme_buf *lpfc_ncmd, 830*01649561SJames Smart struct lpfc_nodelist *pnode) 831*01649561SJames Smart { 832*01649561SJames Smart struct lpfc_hba *phba = vport->phba; 833*01649561SJames Smart struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd; 834*01649561SJames Smart struct lpfc_iocbq *pwqeq = &(lpfc_ncmd->cur_iocbq); 835*01649561SJames Smart union lpfc_wqe128 *wqe = (union lpfc_wqe128 *)&pwqeq->wqe; 836*01649561SJames Smart uint32_t req_len; 837*01649561SJames Smart 838*01649561SJames Smart if (!pnode || !NLP_CHK_NODE_ACT(pnode)) 839*01649561SJames Smart return -EINVAL; 840*01649561SJames Smart 841*01649561SJames Smart /* 842*01649561SJames Smart * There are three possibilities here - use scatter-gather segment, use 843*01649561SJames Smart * the single mapping, or neither. 844*01649561SJames Smart */ 845*01649561SJames Smart wqe->fcp_iwrite.initial_xfer_len = 0; 846*01649561SJames Smart if (nCmd->sg_cnt) { 847*01649561SJames Smart if (nCmd->io_dir == NVMEFC_FCP_WRITE) { 848*01649561SJames Smart /* Word 5 */ 849*01649561SJames Smart if ((phba->cfg_nvme_enable_fb) && 850*01649561SJames Smart (pnode->nlp_flag & NLP_FIRSTBURST)) { 851*01649561SJames Smart req_len = lpfc_ncmd->nvmeCmd->payload_length; 852*01649561SJames Smart if (req_len < pnode->nvme_fb_size) 853*01649561SJames Smart wqe->fcp_iwrite.initial_xfer_len = 854*01649561SJames Smart req_len; 855*01649561SJames Smart else 856*01649561SJames Smart wqe->fcp_iwrite.initial_xfer_len = 857*01649561SJames Smart pnode->nvme_fb_size; 858*01649561SJames Smart } 859*01649561SJames Smart 860*01649561SJames Smart /* Word 7 */ 861*01649561SJames Smart bf_set(wqe_cmnd, &wqe->generic.wqe_com, 862*01649561SJames Smart CMD_FCP_IWRITE64_WQE); 863*01649561SJames Smart bf_set(wqe_pu, &wqe->generic.wqe_com, 864*01649561SJames Smart PARM_READ_CHECK); 865*01649561SJames Smart 866*01649561SJames Smart /* Word 10 */ 867*01649561SJames Smart bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0); 868*01649561SJames Smart bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, 869*01649561SJames Smart LPFC_WQE_IOD_WRITE); 870*01649561SJames Smart bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, 871*01649561SJames Smart LPFC_WQE_LENLOC_WORD4); 872*01649561SJames Smart if (phba->cfg_nvme_oas) 873*01649561SJames Smart bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1); 874*01649561SJames Smart 875*01649561SJames Smart /* Word 11 */ 876*01649561SJames Smart bf_set(wqe_cmd_type, &wqe->generic.wqe_com, 877*01649561SJames Smart NVME_WRITE_CMD); 878*01649561SJames Smart 879*01649561SJames Smart /* Word 16 */ 880*01649561SJames Smart wqe->words[16] = LPFC_NVME_EMBED_WRITE; 881*01649561SJames Smart 882*01649561SJames Smart phba->fc4NvmeOutputRequests++; 883*01649561SJames Smart } else { 884*01649561SJames Smart /* Word 7 */ 885*01649561SJames Smart bf_set(wqe_cmnd, &wqe->generic.wqe_com, 886*01649561SJames Smart CMD_FCP_IREAD64_WQE); 887*01649561SJames Smart bf_set(wqe_pu, &wqe->generic.wqe_com, 888*01649561SJames Smart PARM_READ_CHECK); 889*01649561SJames Smart 890*01649561SJames Smart /* Word 10 */ 891*01649561SJames Smart bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0); 892*01649561SJames Smart bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, 893*01649561SJames Smart LPFC_WQE_IOD_READ); 894*01649561SJames Smart bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, 895*01649561SJames Smart LPFC_WQE_LENLOC_WORD4); 896*01649561SJames Smart if (phba->cfg_nvme_oas) 897*01649561SJames Smart bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1); 898*01649561SJames Smart 899*01649561SJames Smart /* Word 11 */ 900*01649561SJames Smart bf_set(wqe_cmd_type, &wqe->generic.wqe_com, 901*01649561SJames Smart NVME_READ_CMD); 902*01649561SJames Smart 903*01649561SJames Smart /* Word 16 */ 904*01649561SJames Smart wqe->words[16] = LPFC_NVME_EMBED_READ; 905*01649561SJames Smart 906*01649561SJames Smart phba->fc4NvmeInputRequests++; 907*01649561SJames Smart } 908*01649561SJames Smart } else { 909*01649561SJames Smart /* Word 4 */ 910*01649561SJames Smart wqe->fcp_icmd.rsrvd4 = 0; 911*01649561SJames Smart 912*01649561SJames Smart /* Word 7 */ 913*01649561SJames Smart bf_set(wqe_cmnd, &wqe->generic.wqe_com, CMD_FCP_ICMND64_WQE); 914*01649561SJames Smart bf_set(wqe_pu, &wqe->generic.wqe_com, 0); 915*01649561SJames Smart 916*01649561SJames Smart /* Word 10 */ 917*01649561SJames Smart bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1); 918*01649561SJames Smart bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE); 919*01649561SJames Smart bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, 920*01649561SJames Smart LPFC_WQE_LENLOC_NONE); 921*01649561SJames Smart if (phba->cfg_nvme_oas) 922*01649561SJames Smart bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1); 923*01649561SJames Smart 924*01649561SJames Smart /* Word 11 */ 925*01649561SJames Smart bf_set(wqe_cmd_type, &wqe->generic.wqe_com, NVME_READ_CMD); 926*01649561SJames Smart 927*01649561SJames Smart /* Word 16 */ 928*01649561SJames Smart wqe->words[16] = LPFC_NVME_EMBED_CMD; 929*01649561SJames Smart 930*01649561SJames Smart phba->fc4NvmeControlRequests++; 931*01649561SJames Smart } 932*01649561SJames Smart /* 933*01649561SJames Smart * Finish initializing those WQE fields that are independent 934*01649561SJames Smart * of the nvme_cmnd request_buffer 935*01649561SJames Smart */ 936*01649561SJames Smart 937*01649561SJames Smart /* Word 6 */ 938*01649561SJames Smart bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 939*01649561SJames Smart phba->sli4_hba.rpi_ids[pnode->nlp_rpi]); 940*01649561SJames Smart bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag); 941*01649561SJames Smart 942*01649561SJames Smart /* Word 7 */ 943*01649561SJames Smart /* Preserve Class data in the ndlp. */ 944*01649561SJames Smart bf_set(wqe_class, &wqe->generic.wqe_com, 945*01649561SJames Smart (pnode->nlp_fcp_info & 0x0f)); 946*01649561SJames Smart 947*01649561SJames Smart /* Word 8 */ 948*01649561SJames Smart wqe->generic.wqe_com.abort_tag = pwqeq->iotag; 949*01649561SJames Smart 950*01649561SJames Smart /* Word 9 */ 951*01649561SJames Smart bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag); 952*01649561SJames Smart 953*01649561SJames Smart /* Word 11 */ 954*01649561SJames Smart bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 955*01649561SJames Smart 956*01649561SJames Smart pwqeq->vport = vport; 957*01649561SJames Smart return 0; 958*01649561SJames Smart } 959*01649561SJames Smart 960*01649561SJames Smart 961*01649561SJames Smart /** 962*01649561SJames Smart * lpfc_nvme_prep_io_dma - Issue an NVME-over-FCP IO 963*01649561SJames Smart * @lpfc_pnvme: Pointer to the driver's nvme instance data 964*01649561SJames Smart * @lpfc_nvme_lport: Pointer to the driver's local port data 965*01649561SJames Smart * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq 966*01649561SJames Smart * @lpfc_nvme_fcreq: IO request from nvme fc to driver. 967*01649561SJames Smart * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue 968*01649561SJames Smart * 969*01649561SJames Smart * Driver registers this routine as it io request handler. This 970*01649561SJames Smart * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq 971*01649561SJames Smart * data structure to the rport indicated in @lpfc_nvme_rport. 972*01649561SJames Smart * 973*01649561SJames Smart * Return value : 974*01649561SJames Smart * 0 - Success 975*01649561SJames Smart * TODO: What are the failure codes. 976*01649561SJames Smart **/ 977*01649561SJames Smart static int 978*01649561SJames Smart lpfc_nvme_prep_io_dma(struct lpfc_vport *vport, 979*01649561SJames Smart struct lpfc_nvme_buf *lpfc_ncmd) 980*01649561SJames Smart { 981*01649561SJames Smart struct lpfc_hba *phba = vport->phba; 982*01649561SJames Smart struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd; 983*01649561SJames Smart union lpfc_wqe128 *wqe = (union lpfc_wqe128 *)&lpfc_ncmd->cur_iocbq.wqe; 984*01649561SJames Smart struct sli4_sge *sgl = lpfc_ncmd->nvme_sgl; 985*01649561SJames Smart struct scatterlist *data_sg; 986*01649561SJames Smart struct sli4_sge *first_data_sgl; 987*01649561SJames Smart dma_addr_t physaddr; 988*01649561SJames Smart uint32_t num_bde = 0; 989*01649561SJames Smart uint32_t dma_len; 990*01649561SJames Smart uint32_t dma_offset = 0; 991*01649561SJames Smart int nseg, i; 992*01649561SJames Smart 993*01649561SJames Smart /* Fix up the command and response DMA stuff. */ 994*01649561SJames Smart lpfc_nvme_adj_fcp_sgls(vport, lpfc_ncmd, nCmd); 995*01649561SJames Smart 996*01649561SJames Smart /* 997*01649561SJames Smart * There are three possibilities here - use scatter-gather segment, use 998*01649561SJames Smart * the single mapping, or neither. 999*01649561SJames Smart */ 1000*01649561SJames Smart if (nCmd->sg_cnt) { 1001*01649561SJames Smart /* 1002*01649561SJames Smart * Jump over the cmd and rsp SGEs. The fix routine 1003*01649561SJames Smart * has already adjusted for this. 1004*01649561SJames Smart */ 1005*01649561SJames Smart sgl += 2; 1006*01649561SJames Smart 1007*01649561SJames Smart first_data_sgl = sgl; 1008*01649561SJames Smart lpfc_ncmd->seg_cnt = nCmd->sg_cnt; 1009*01649561SJames Smart if (lpfc_ncmd->seg_cnt > phba->cfg_sg_seg_cnt) { 1010*01649561SJames Smart lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1011*01649561SJames Smart "6058 Too many sg segments from " 1012*01649561SJames Smart "NVME Transport. Max %d, " 1013*01649561SJames Smart "nvmeIO sg_cnt %d\n", 1014*01649561SJames Smart phba->cfg_sg_seg_cnt, 1015*01649561SJames Smart lpfc_ncmd->seg_cnt); 1016*01649561SJames Smart lpfc_ncmd->seg_cnt = 0; 1017*01649561SJames Smart return 1; 1018*01649561SJames Smart } 1019*01649561SJames Smart 1020*01649561SJames Smart /* 1021*01649561SJames Smart * The driver established a maximum scatter-gather segment count 1022*01649561SJames Smart * during probe that limits the number of sg elements in any 1023*01649561SJames Smart * single nvme command. Just run through the seg_cnt and format 1024*01649561SJames Smart * the sge's. 1025*01649561SJames Smart */ 1026*01649561SJames Smart nseg = nCmd->sg_cnt; 1027*01649561SJames Smart data_sg = nCmd->first_sgl; 1028*01649561SJames Smart for (i = 0; i < nseg; i++) { 1029*01649561SJames Smart if (data_sg == NULL) { 1030*01649561SJames Smart lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1031*01649561SJames Smart "6059 dptr err %d, nseg %d\n", 1032*01649561SJames Smart i, nseg); 1033*01649561SJames Smart lpfc_ncmd->seg_cnt = 0; 1034*01649561SJames Smart return 1; 1035*01649561SJames Smart } 1036*01649561SJames Smart physaddr = data_sg->dma_address; 1037*01649561SJames Smart dma_len = data_sg->length; 1038*01649561SJames Smart sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr)); 1039*01649561SJames Smart sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr)); 1040*01649561SJames Smart sgl->word2 = le32_to_cpu(sgl->word2); 1041*01649561SJames Smart if ((num_bde + 1) == nseg) 1042*01649561SJames Smart bf_set(lpfc_sli4_sge_last, sgl, 1); 1043*01649561SJames Smart else 1044*01649561SJames Smart bf_set(lpfc_sli4_sge_last, sgl, 0); 1045*01649561SJames Smart bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); 1046*01649561SJames Smart bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); 1047*01649561SJames Smart sgl->word2 = cpu_to_le32(sgl->word2); 1048*01649561SJames Smart sgl->sge_len = cpu_to_le32(dma_len); 1049*01649561SJames Smart 1050*01649561SJames Smart dma_offset += dma_len; 1051*01649561SJames Smart data_sg = sg_next(data_sg); 1052*01649561SJames Smart sgl++; 1053*01649561SJames Smart } 1054*01649561SJames Smart } else { 1055*01649561SJames Smart /* For this clause to be valid, the payload_length 1056*01649561SJames Smart * and sg_cnt must zero. 1057*01649561SJames Smart */ 1058*01649561SJames Smart if (nCmd->payload_length != 0) { 1059*01649561SJames Smart lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1060*01649561SJames Smart "6063 NVME DMA Prep Err: sg_cnt %d " 1061*01649561SJames Smart "payload_length x%x\n", 1062*01649561SJames Smart nCmd->sg_cnt, nCmd->payload_length); 1063*01649561SJames Smart return 1; 1064*01649561SJames Smart } 1065*01649561SJames Smart } 1066*01649561SJames Smart 1067*01649561SJames Smart /* 1068*01649561SJames Smart * Due to difference in data length between DIF/non-DIF paths, 1069*01649561SJames Smart * we need to set word 4 of WQE here 1070*01649561SJames Smart */ 1071*01649561SJames Smart wqe->fcp_iread.total_xfer_len = nCmd->payload_length; 1072*01649561SJames Smart return 0; 1073*01649561SJames Smart } 1074*01649561SJames Smart 1075*01649561SJames Smart /** 1076*01649561SJames Smart * lpfc_nvme_fcp_io_submit - Issue an NVME-over-FCP IO 1077*01649561SJames Smart * @lpfc_pnvme: Pointer to the driver's nvme instance data 1078*01649561SJames Smart * @lpfc_nvme_lport: Pointer to the driver's local port data 1079*01649561SJames Smart * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq 1080*01649561SJames Smart * @lpfc_nvme_fcreq: IO request from nvme fc to driver. 1081*01649561SJames Smart * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue 1082*01649561SJames Smart * 1083*01649561SJames Smart * Driver registers this routine as it io request handler. This 1084*01649561SJames Smart * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq 1085*01649561SJames Smart * data structure to the rport 1086*01649561SJames Smart indicated in @lpfc_nvme_rport. 1087*01649561SJames Smart * 1088*01649561SJames Smart * Return value : 1089*01649561SJames Smart * 0 - Success 1090*01649561SJames Smart * TODO: What are the failure codes. 1091*01649561SJames Smart **/ 1092*01649561SJames Smart static int 1093*01649561SJames Smart lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, 1094*01649561SJames Smart struct nvme_fc_remote_port *pnvme_rport, 1095*01649561SJames Smart void *hw_queue_handle, 1096*01649561SJames Smart struct nvmefc_fcp_req *pnvme_fcreq) 1097*01649561SJames Smart { 1098*01649561SJames Smart int ret = 0; 1099*01649561SJames Smart struct lpfc_nvme_lport *lport; 1100*01649561SJames Smart struct lpfc_vport *vport; 1101*01649561SJames Smart struct lpfc_hba *phba; 1102*01649561SJames Smart struct lpfc_nodelist *ndlp; 1103*01649561SJames Smart struct lpfc_nvme_buf *lpfc_ncmd; 1104*01649561SJames Smart struct lpfc_nvme_rport *rport; 1105*01649561SJames Smart struct lpfc_nvme_qhandle *lpfc_queue_info; 1106*01649561SJames Smart 1107*01649561SJames Smart lport = (struct lpfc_nvme_lport *)pnvme_lport->private; 1108*01649561SJames Smart vport = lport->vport; 1109*01649561SJames Smart phba = vport->phba; 1110*01649561SJames Smart 1111*01649561SJames Smart rport = (struct lpfc_nvme_rport *)pnvme_rport->private; 1112*01649561SJames Smart lpfc_queue_info = (struct lpfc_nvme_qhandle *)hw_queue_handle; 1113*01649561SJames Smart 1114*01649561SJames Smart /* 1115*01649561SJames Smart * Catch race where our node has transitioned, but the 1116*01649561SJames Smart * transport is still transitioning. 1117*01649561SJames Smart */ 1118*01649561SJames Smart ndlp = rport->ndlp; 1119*01649561SJames Smart if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 1120*01649561SJames Smart lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR, 1121*01649561SJames Smart "6053 rport %p, ndlp %p, DID x%06x " 1122*01649561SJames Smart "ndlp not ready.\n", 1123*01649561SJames Smart rport, ndlp, pnvme_rport->port_id); 1124*01649561SJames Smart 1125*01649561SJames Smart ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id); 1126*01649561SJames Smart if (!ndlp) { 1127*01649561SJames Smart lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, 1128*01649561SJames Smart "6066 Missing node for DID %x\n", 1129*01649561SJames Smart pnvme_rport->port_id); 1130*01649561SJames Smart ret = -ENODEV; 1131*01649561SJames Smart goto out_fail; 1132*01649561SJames Smart } 1133*01649561SJames Smart } 1134*01649561SJames Smart 1135*01649561SJames Smart /* The remote node has to be a mapped target or it's an error. */ 1136*01649561SJames Smart if ((ndlp->nlp_type & NLP_NVME_TARGET) && 1137*01649561SJames Smart (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { 1138*01649561SJames Smart lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR, 1139*01649561SJames Smart "6036 rport %p, DID x%06x not ready for " 1140*01649561SJames Smart "IO. State x%x, Type x%x\n", 1141*01649561SJames Smart rport, pnvme_rport->port_id, 1142*01649561SJames Smart ndlp->nlp_state, ndlp->nlp_type); 1143*01649561SJames Smart ret = -ENODEV; 1144*01649561SJames Smart goto out_fail; 1145*01649561SJames Smart 1146*01649561SJames Smart } 1147*01649561SJames Smart 1148*01649561SJames Smart /* The node is shared with FCP IO, make sure the IO pending count does 1149*01649561SJames Smart * not exceed the programmed depth. 1150*01649561SJames Smart */ 1151*01649561SJames Smart if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) { 1152*01649561SJames Smart ret = -EAGAIN; 1153*01649561SJames Smart goto out_fail; 1154*01649561SJames Smart } 1155*01649561SJames Smart 1156*01649561SJames Smart lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp); 1157*01649561SJames Smart if (lpfc_ncmd == NULL) { 1158*01649561SJames Smart lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, 1159*01649561SJames Smart "6065 driver's buffer pool is empty, " 1160*01649561SJames Smart "IO failed\n"); 1161*01649561SJames Smart ret = -ENOMEM; 1162*01649561SJames Smart goto out_fail; 1163*01649561SJames Smart } 1164*01649561SJames Smart 1165*01649561SJames Smart /* 1166*01649561SJames Smart * Store the data needed by the driver to issue, abort, and complete 1167*01649561SJames Smart * an IO. 1168*01649561SJames Smart * Do not let the IO hang out forever. There is no midlayer issuing 1169*01649561SJames Smart * an abort so inform the FW of the maximum IO pending time. 1170*01649561SJames Smart */ 1171*01649561SJames Smart pnvme_fcreq->private = (void *)lpfc_ncmd; 1172*01649561SJames Smart lpfc_ncmd->nvmeCmd = pnvme_fcreq; 1173*01649561SJames Smart lpfc_ncmd->nrport = rport; 1174*01649561SJames Smart lpfc_ncmd->start_time = jiffies; 1175*01649561SJames Smart 1176*01649561SJames Smart lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp); 1177*01649561SJames Smart ret = lpfc_nvme_prep_io_dma(vport, lpfc_ncmd); 1178*01649561SJames Smart if (ret) { 1179*01649561SJames Smart ret = -ENOMEM; 1180*01649561SJames Smart goto out_free_nvme_buf; 1181*01649561SJames Smart } 1182*01649561SJames Smart 1183*01649561SJames Smart atomic_inc(&ndlp->cmd_pending); 1184*01649561SJames Smart 1185*01649561SJames Smart /* 1186*01649561SJames Smart * Issue the IO on the WQ indicated by index in the hw_queue_handle. 1187*01649561SJames Smart * This identfier was create in our hardware queue create callback 1188*01649561SJames Smart * routine. The driver now is dependent on the IO queue steering from 1189*01649561SJames Smart * the transport. We are trusting the upper NVME layers know which 1190*01649561SJames Smart * index to use and that they have affinitized a CPU to this hardware 1191*01649561SJames Smart * queue. A hardware queue maps to a driver MSI-X vector/EQ/CQ/WQ. 1192*01649561SJames Smart */ 1193*01649561SJames Smart lpfc_ncmd->cur_iocbq.hba_wqidx = lpfc_queue_info->index; 1194*01649561SJames Smart 1195*01649561SJames Smart ret = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, &lpfc_ncmd->cur_iocbq); 1196*01649561SJames Smart if (ret) { 1197*01649561SJames Smart atomic_dec(&ndlp->cmd_pending); 1198*01649561SJames Smart lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, 1199*01649561SJames Smart "6113 FCP could not issue WQE err %x " 1200*01649561SJames Smart "sid: x%x did: x%x oxid: x%x\n", 1201*01649561SJames Smart ret, vport->fc_myDID, ndlp->nlp_DID, 1202*01649561SJames Smart lpfc_ncmd->cur_iocbq.sli4_xritag); 1203*01649561SJames Smart ret = -EINVAL; 1204*01649561SJames Smart goto out_free_nvme_buf; 1205*01649561SJames Smart } 1206*01649561SJames Smart 1207*01649561SJames Smart return 0; 1208*01649561SJames Smart 1209*01649561SJames Smart out_free_nvme_buf: 1210*01649561SJames Smart lpfc_release_nvme_buf(phba, lpfc_ncmd); 1211*01649561SJames Smart out_fail: 1212*01649561SJames Smart return ret; 1213*01649561SJames Smart } 1214*01649561SJames Smart 1215*01649561SJames Smart /** 1216*01649561SJames Smart * lpfc_nvme_abort_fcreq_cmpl - Complete an NVME FCP abort request. 1217*01649561SJames Smart * @phba: Pointer to HBA context object 1218*01649561SJames Smart * @cmdiocb: Pointer to command iocb object. 1219*01649561SJames Smart * @rspiocb: Pointer to response iocb object. 1220*01649561SJames Smart * 1221*01649561SJames Smart * This is the callback function for any NVME FCP IO that was aborted. 1222*01649561SJames Smart * 1223*01649561SJames Smart * Return value: 1224*01649561SJames Smart * None 1225*01649561SJames Smart **/ 1226*01649561SJames Smart void 1227*01649561SJames Smart lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1228*01649561SJames Smart struct lpfc_wcqe_complete *abts_cmpl) 1229*01649561SJames Smart { 1230*01649561SJames Smart lpfc_printf_log(phba, KERN_ERR, LOG_NVME, 1231*01649561SJames Smart "6145 ABORT_XRI_CN completing on rpi x%x " 1232*01649561SJames Smart "original iotag x%x, abort cmd iotag x%x " 1233*01649561SJames Smart "req_tag x%x, status x%x, hwstatus x%x\n", 1234*01649561SJames Smart cmdiocb->iocb.un.acxri.abortContextTag, 1235*01649561SJames Smart cmdiocb->iocb.un.acxri.abortIoTag, 1236*01649561SJames Smart cmdiocb->iotag, 1237*01649561SJames Smart bf_get(lpfc_wcqe_c_request_tag, abts_cmpl), 1238*01649561SJames Smart bf_get(lpfc_wcqe_c_status, abts_cmpl), 1239*01649561SJames Smart bf_get(lpfc_wcqe_c_hw_status, abts_cmpl)); 1240*01649561SJames Smart lpfc_sli_release_iocbq(phba, cmdiocb); 1241*01649561SJames Smart } 1242*01649561SJames Smart 1243*01649561SJames Smart /** 1244*01649561SJames Smart * lpfc_nvme_fcp_abort - Issue an NVME-over-FCP ABTS 1245*01649561SJames Smart * @lpfc_pnvme: Pointer to the driver's nvme instance data 1246*01649561SJames Smart * @lpfc_nvme_lport: Pointer to the driver's local port data 1247*01649561SJames Smart * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq 1248*01649561SJames Smart * @lpfc_nvme_fcreq: IO request from nvme fc to driver. 1249*01649561SJames Smart * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue 1250*01649561SJames Smart * 1251*01649561SJames Smart * Driver registers this routine as its nvme request io abort handler. This 1252*01649561SJames Smart * routine issues an fcp Abort WQE with data from the @lpfc_nvme_fcpreq 1253*01649561SJames Smart * data structure to the rport indicated in @lpfc_nvme_rport. This routine 1254*01649561SJames Smart * is executed asynchronously - one the target is validated as "MAPPED" and 1255*01649561SJames Smart * ready for IO, the driver issues the abort request and returns. 1256*01649561SJames Smart * 1257*01649561SJames Smart * Return value: 1258*01649561SJames Smart * None 1259*01649561SJames Smart **/ 1260*01649561SJames Smart static void 1261*01649561SJames Smart lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, 1262*01649561SJames Smart struct nvme_fc_remote_port *pnvme_rport, 1263*01649561SJames Smart void *hw_queue_handle, 1264*01649561SJames Smart struct nvmefc_fcp_req *pnvme_fcreq) 1265*01649561SJames Smart { 1266*01649561SJames Smart struct lpfc_nvme_lport *lport; 1267*01649561SJames Smart struct lpfc_vport *vport; 1268*01649561SJames Smart struct lpfc_hba *phba; 1269*01649561SJames Smart struct lpfc_nodelist *ndlp; 1270*01649561SJames Smart struct lpfc_nvme_rport *rport; 1271*01649561SJames Smart struct lpfc_nvme_buf *lpfc_nbuf; 1272*01649561SJames Smart struct lpfc_iocbq *abts_buf; 1273*01649561SJames Smart struct lpfc_iocbq *nvmereq_wqe; 1274*01649561SJames Smart union lpfc_wqe *abts_wqe; 1275*01649561SJames Smart unsigned long flags; 1276*01649561SJames Smart int ret_val; 1277*01649561SJames Smart 1278*01649561SJames Smart lport = (struct lpfc_nvme_lport *)pnvme_lport->private; 1279*01649561SJames Smart rport = (struct lpfc_nvme_rport *)pnvme_rport->private; 1280*01649561SJames Smart vport = lport->vport; 1281*01649561SJames Smart phba = vport->phba; 1282*01649561SJames Smart 1283*01649561SJames Smart /* Announce entry to new IO submit field. */ 1284*01649561SJames Smart lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, 1285*01649561SJames Smart "6002 Abort Request to rport DID x%06x " 1286*01649561SJames Smart "for nvme_fc_req %p\n", 1287*01649561SJames Smart pnvme_rport->port_id, 1288*01649561SJames Smart pnvme_fcreq); 1289*01649561SJames Smart 1290*01649561SJames Smart /* 1291*01649561SJames Smart * Catch race where our node has transitioned, but the 1292*01649561SJames Smart * transport is still transitioning. 1293*01649561SJames Smart */ 1294*01649561SJames Smart ndlp = rport->ndlp; 1295*01649561SJames Smart if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 1296*01649561SJames Smart lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_ABTS, 1297*01649561SJames Smart "6054 rport %p, ndlp %p, DID x%06x ndlp " 1298*01649561SJames Smart " not ready.\n", 1299*01649561SJames Smart rport, ndlp, pnvme_rport->port_id); 1300*01649561SJames Smart 1301*01649561SJames Smart ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id); 1302*01649561SJames Smart if (!ndlp) { 1303*01649561SJames Smart lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, 1304*01649561SJames Smart "6055 Could not find node for " 1305*01649561SJames Smart "DID %x\n", 1306*01649561SJames Smart pnvme_rport->port_id); 1307*01649561SJames Smart return; 1308*01649561SJames Smart } 1309*01649561SJames Smart } 1310*01649561SJames Smart 1311*01649561SJames Smart /* The remote node has to be ready to send an abort. */ 1312*01649561SJames Smart if ((ndlp->nlp_state != NLP_STE_MAPPED_NODE) && 1313*01649561SJames Smart !(ndlp->nlp_type & NLP_NVME_TARGET)) { 1314*01649561SJames Smart lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_ABTS, 1315*01649561SJames Smart "6048 rport %p, DID x%06x not ready for " 1316*01649561SJames Smart "IO. State x%x, Type x%x\n", 1317*01649561SJames Smart rport, pnvme_rport->port_id, 1318*01649561SJames Smart ndlp->nlp_state, ndlp->nlp_type); 1319*01649561SJames Smart return; 1320*01649561SJames Smart } 1321*01649561SJames Smart 1322*01649561SJames Smart /* If the hba is getting reset, this flag is set. It is 1323*01649561SJames Smart * cleared when the reset is complete and rings reestablished. 1324*01649561SJames Smart */ 1325*01649561SJames Smart spin_lock_irqsave(&phba->hbalock, flags); 1326*01649561SJames Smart /* driver queued commands are in process of being flushed */ 1327*01649561SJames Smart if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) { 1328*01649561SJames Smart spin_unlock_irqrestore(&phba->hbalock, flags); 1329*01649561SJames Smart lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, 1330*01649561SJames Smart "6139 Driver in reset cleanup - flushing " 1331*01649561SJames Smart "NVME Req now. hba_flag x%x\n", 1332*01649561SJames Smart phba->hba_flag); 1333*01649561SJames Smart return; 1334*01649561SJames Smart } 1335*01649561SJames Smart 1336*01649561SJames Smart lpfc_nbuf = (struct lpfc_nvme_buf *)pnvme_fcreq->private; 1337*01649561SJames Smart if (!lpfc_nbuf) { 1338*01649561SJames Smart spin_unlock_irqrestore(&phba->hbalock, flags); 1339*01649561SJames Smart lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, 1340*01649561SJames Smart "6140 NVME IO req has no matching lpfc nvme " 1341*01649561SJames Smart "io buffer. Skipping abort req.\n"); 1342*01649561SJames Smart return; 1343*01649561SJames Smart } else if (!lpfc_nbuf->nvmeCmd) { 1344*01649561SJames Smart spin_unlock_irqrestore(&phba->hbalock, flags); 1345*01649561SJames Smart lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, 1346*01649561SJames Smart "6141 lpfc NVME IO req has no nvme_fcreq " 1347*01649561SJames Smart "io buffer. Skipping abort req.\n"); 1348*01649561SJames Smart return; 1349*01649561SJames Smart } 1350*01649561SJames Smart 1351*01649561SJames Smart /* 1352*01649561SJames Smart * The lpfc_nbuf and the mapped nvme_fcreq in the driver's 1353*01649561SJames Smart * state must match the nvme_fcreq passed by the nvme 1354*01649561SJames Smart * transport. If they don't match, it is likely the driver 1355*01649561SJames Smart * has already completed the NVME IO and the nvme transport 1356*01649561SJames Smart * has not seen it yet. 1357*01649561SJames Smart */ 1358*01649561SJames Smart if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) { 1359*01649561SJames Smart spin_unlock_irqrestore(&phba->hbalock, flags); 1360*01649561SJames Smart lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, 1361*01649561SJames Smart "6143 NVME req mismatch: " 1362*01649561SJames Smart "lpfc_nbuf %p nvmeCmd %p, " 1363*01649561SJames Smart "pnvme_fcreq %p. Skipping Abort\n", 1364*01649561SJames Smart lpfc_nbuf, lpfc_nbuf->nvmeCmd, 1365*01649561SJames Smart pnvme_fcreq); 1366*01649561SJames Smart return; 1367*01649561SJames Smart } 1368*01649561SJames Smart 1369*01649561SJames Smart /* Don't abort IOs no longer on the pending queue. */ 1370*01649561SJames Smart nvmereq_wqe = &lpfc_nbuf->cur_iocbq; 1371*01649561SJames Smart if (!(nvmereq_wqe->iocb_flag & LPFC_IO_ON_TXCMPLQ)) { 1372*01649561SJames Smart spin_unlock_irqrestore(&phba->hbalock, flags); 1373*01649561SJames Smart lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, 1374*01649561SJames Smart "6142 NVME IO req %p not queued - skipping " 1375*01649561SJames Smart "abort req\n", 1376*01649561SJames Smart pnvme_fcreq); 1377*01649561SJames Smart return; 1378*01649561SJames Smart } 1379*01649561SJames Smart 1380*01649561SJames Smart /* Outstanding abort is in progress */ 1381*01649561SJames Smart if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) { 1382*01649561SJames Smart spin_unlock_irqrestore(&phba->hbalock, flags); 1383*01649561SJames Smart lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, 1384*01649561SJames Smart "6144 Outstanding NVME I/O Abort Request " 1385*01649561SJames Smart "still pending on nvme_fcreq %p, " 1386*01649561SJames Smart "lpfc_ncmd %p\n", 1387*01649561SJames Smart pnvme_fcreq, lpfc_nbuf); 1388*01649561SJames Smart return; 1389*01649561SJames Smart } 1390*01649561SJames Smart 1391*01649561SJames Smart abts_buf = __lpfc_sli_get_iocbq(phba); 1392*01649561SJames Smart if (!abts_buf) { 1393*01649561SJames Smart spin_unlock_irqrestore(&phba->hbalock, flags); 1394*01649561SJames Smart lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, 1395*01649561SJames Smart "6136 No available abort wqes. Skipping " 1396*01649561SJames Smart "Abts req for nvme_fcreq %p.\n", 1397*01649561SJames Smart pnvme_fcreq); 1398*01649561SJames Smart return; 1399*01649561SJames Smart } 1400*01649561SJames Smart 1401*01649561SJames Smart /* Ready - mark outstanding as aborted by driver. */ 1402*01649561SJames Smart nvmereq_wqe->iocb_flag |= LPFC_DRIVER_ABORTED; 1403*01649561SJames Smart 1404*01649561SJames Smart /* Complete prepping the abort wqe and issue to the FW. */ 1405*01649561SJames Smart abts_wqe = &abts_buf->wqe; 1406*01649561SJames Smart 1407*01649561SJames Smart /* WQEs are reused. Clear stale data and set key fields to 1408*01649561SJames Smart * zero like ia, iaab, iaar, xri_tag, and ctxt_tag. 1409*01649561SJames Smart */ 1410*01649561SJames Smart memset(abts_wqe, 0, sizeof(union lpfc_wqe)); 1411*01649561SJames Smart bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG); 1412*01649561SJames Smart 1413*01649561SJames Smart /* word 7 */ 1414*01649561SJames Smart bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0); 1415*01649561SJames Smart bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); 1416*01649561SJames Smart bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com, 1417*01649561SJames Smart nvmereq_wqe->iocb.ulpClass); 1418*01649561SJames Smart 1419*01649561SJames Smart /* word 8 - tell the FW to abort the IO associated with this 1420*01649561SJames Smart * outstanding exchange ID. 1421*01649561SJames Smart */ 1422*01649561SJames Smart abts_wqe->abort_cmd.wqe_com.abort_tag = nvmereq_wqe->sli4_xritag; 1423*01649561SJames Smart 1424*01649561SJames Smart /* word 9 - this is the iotag for the abts_wqe completion. */ 1425*01649561SJames Smart bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com, 1426*01649561SJames Smart abts_buf->iotag); 1427*01649561SJames Smart 1428*01649561SJames Smart /* word 10 */ 1429*01649561SJames Smart bf_set(wqe_wqid, &abts_wqe->abort_cmd.wqe_com, nvmereq_wqe->hba_wqidx); 1430*01649561SJames Smart bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1); 1431*01649561SJames Smart bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE); 1432*01649561SJames Smart 1433*01649561SJames Smart /* word 11 */ 1434*01649561SJames Smart bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND); 1435*01649561SJames Smart bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1); 1436*01649561SJames Smart bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 1437*01649561SJames Smart 1438*01649561SJames Smart /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 1439*01649561SJames Smart abts_buf->iocb_flag |= LPFC_IO_NVME; 1440*01649561SJames Smart abts_buf->hba_wqidx = nvmereq_wqe->hba_wqidx; 1441*01649561SJames Smart abts_buf->vport = vport; 1442*01649561SJames Smart abts_buf->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl; 1443*01649561SJames Smart ret_val = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_buf); 1444*01649561SJames Smart spin_unlock_irqrestore(&phba->hbalock, flags); 1445*01649561SJames Smart if (ret_val == IOCB_ERROR) { 1446*01649561SJames Smart lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, 1447*01649561SJames Smart "6137 Failed abts issue_wqe with status x%x " 1448*01649561SJames Smart "for nvme_fcreq %p.\n", 1449*01649561SJames Smart ret_val, pnvme_fcreq); 1450*01649561SJames Smart lpfc_sli_release_iocbq(phba, abts_buf); 1451*01649561SJames Smart return; 1452*01649561SJames Smart } 1453*01649561SJames Smart 1454*01649561SJames Smart lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, 1455*01649561SJames Smart "6138 Transport Abort NVME Request Issued for\n" 1456*01649561SJames Smart "ox_id x%x on reqtag x%x\n", 1457*01649561SJames Smart nvmereq_wqe->sli4_xritag, 1458*01649561SJames Smart abts_buf->iotag); 1459*01649561SJames Smart } 1460*01649561SJames Smart 1461*01649561SJames Smart /* Declare and initialization an instance of the FC NVME template. */ 1462*01649561SJames Smart static struct nvme_fc_port_template lpfc_nvme_template = { 1463*01649561SJames Smart /* initiator-based functions */ 1464*01649561SJames Smart .localport_delete = lpfc_nvme_localport_delete, 1465*01649561SJames Smart .remoteport_delete = lpfc_nvme_remoteport_delete, 1466*01649561SJames Smart .create_queue = lpfc_nvme_create_queue, 1467*01649561SJames Smart .delete_queue = lpfc_nvme_delete_queue, 1468*01649561SJames Smart .ls_req = lpfc_nvme_ls_req, 1469*01649561SJames Smart .fcp_io = lpfc_nvme_fcp_io_submit, 1470*01649561SJames Smart .ls_abort = lpfc_nvme_ls_abort, 1471*01649561SJames Smart .fcp_abort = lpfc_nvme_fcp_abort, 1472*01649561SJames Smart 1473*01649561SJames Smart .max_hw_queues = 1, 1474*01649561SJames Smart .max_sgl_segments = LPFC_NVME_DEFAULT_SEGS, 1475*01649561SJames Smart .max_dif_sgl_segments = LPFC_NVME_DEFAULT_SEGS, 1476*01649561SJames Smart .dma_boundary = 0xFFFFFFFF, 1477*01649561SJames Smart 1478*01649561SJames Smart /* Sizes of additional private data for data structures. 1479*01649561SJames Smart * No use for the last two sizes at this time. 1480*01649561SJames Smart */ 1481*01649561SJames Smart .local_priv_sz = sizeof(struct lpfc_nvme_lport), 1482*01649561SJames Smart .remote_priv_sz = sizeof(struct lpfc_nvme_rport), 1483*01649561SJames Smart .lsrqst_priv_sz = 0, 1484*01649561SJames Smart .fcprqst_priv_sz = 0, 1485*01649561SJames Smart }; 1486*01649561SJames Smart 1487*01649561SJames Smart /** 1488*01649561SJames Smart * lpfc_sli4_post_nvme_sgl_block - post a block of nvme sgl list to firmware 1489*01649561SJames Smart * @phba: pointer to lpfc hba data structure. 1490*01649561SJames Smart * @nblist: pointer to nvme buffer list. 1491*01649561SJames Smart * @count: number of scsi buffers on the list. 1492*01649561SJames Smart * 1493*01649561SJames Smart * This routine is invoked to post a block of @count scsi sgl pages from a 1494*01649561SJames Smart * SCSI buffer list @nblist to the HBA using non-embedded mailbox command. 1495*01649561SJames Smart * No Lock is held. 1496*01649561SJames Smart * 1497*01649561SJames Smart **/ 1498*01649561SJames Smart static int 1499*01649561SJames Smart lpfc_sli4_post_nvme_sgl_block(struct lpfc_hba *phba, 1500*01649561SJames Smart struct list_head *nblist, 1501*01649561SJames Smart int count) 1502*01649561SJames Smart { 1503*01649561SJames Smart struct lpfc_nvme_buf *lpfc_ncmd; 1504*01649561SJames Smart struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 1505*01649561SJames Smart struct sgl_page_pairs *sgl_pg_pairs; 1506*01649561SJames Smart void *viraddr; 1507*01649561SJames Smart LPFC_MBOXQ_t *mbox; 1508*01649561SJames Smart uint32_t reqlen, alloclen, pg_pairs; 1509*01649561SJames Smart uint32_t mbox_tmo; 1510*01649561SJames Smart uint16_t xritag_start = 0; 1511*01649561SJames Smart int rc = 0; 1512*01649561SJames Smart uint32_t shdr_status, shdr_add_status; 1513*01649561SJames Smart dma_addr_t pdma_phys_bpl1; 1514*01649561SJames Smart union lpfc_sli4_cfg_shdr *shdr; 1515*01649561SJames Smart 1516*01649561SJames Smart /* Calculate the requested length of the dma memory */ 1517*01649561SJames Smart reqlen = count * sizeof(struct sgl_page_pairs) + 1518*01649561SJames Smart sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 1519*01649561SJames Smart if (reqlen > SLI4_PAGE_SIZE) { 1520*01649561SJames Smart lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1521*01649561SJames Smart "6118 Block sgl registration required DMA " 1522*01649561SJames Smart "size (%d) great than a page\n", reqlen); 1523*01649561SJames Smart return -ENOMEM; 1524*01649561SJames Smart } 1525*01649561SJames Smart mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1526*01649561SJames Smart if (!mbox) { 1527*01649561SJames Smart lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1528*01649561SJames Smart "6119 Failed to allocate mbox cmd memory\n"); 1529*01649561SJames Smart return -ENOMEM; 1530*01649561SJames Smart } 1531*01649561SJames Smart 1532*01649561SJames Smart /* Allocate DMA memory and set up the non-embedded mailbox command */ 1533*01649561SJames Smart alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 1534*01649561SJames Smart LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, 1535*01649561SJames Smart LPFC_SLI4_MBX_NEMBED); 1536*01649561SJames Smart 1537*01649561SJames Smart if (alloclen < reqlen) { 1538*01649561SJames Smart lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1539*01649561SJames Smart "6120 Allocated DMA memory size (%d) is " 1540*01649561SJames Smart "less than the requested DMA memory " 1541*01649561SJames Smart "size (%d)\n", alloclen, reqlen); 1542*01649561SJames Smart lpfc_sli4_mbox_cmd_free(phba, mbox); 1543*01649561SJames Smart return -ENOMEM; 1544*01649561SJames Smart } 1545*01649561SJames Smart 1546*01649561SJames Smart /* Get the first SGE entry from the non-embedded DMA memory */ 1547*01649561SJames Smart viraddr = mbox->sge_array->addr[0]; 1548*01649561SJames Smart 1549*01649561SJames Smart /* Set up the SGL pages in the non-embedded DMA pages */ 1550*01649561SJames Smart sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 1551*01649561SJames Smart sgl_pg_pairs = &sgl->sgl_pg_pairs; 1552*01649561SJames Smart 1553*01649561SJames Smart pg_pairs = 0; 1554*01649561SJames Smart list_for_each_entry(lpfc_ncmd, nblist, list) { 1555*01649561SJames Smart /* Set up the sge entry */ 1556*01649561SJames Smart sgl_pg_pairs->sgl_pg0_addr_lo = 1557*01649561SJames Smart cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl)); 1558*01649561SJames Smart sgl_pg_pairs->sgl_pg0_addr_hi = 1559*01649561SJames Smart cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl)); 1560*01649561SJames Smart if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) 1561*01649561SJames Smart pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl + 1562*01649561SJames Smart SGL_PAGE_SIZE; 1563*01649561SJames Smart else 1564*01649561SJames Smart pdma_phys_bpl1 = 0; 1565*01649561SJames Smart sgl_pg_pairs->sgl_pg1_addr_lo = 1566*01649561SJames Smart cpu_to_le32(putPaddrLow(pdma_phys_bpl1)); 1567*01649561SJames Smart sgl_pg_pairs->sgl_pg1_addr_hi = 1568*01649561SJames Smart cpu_to_le32(putPaddrHigh(pdma_phys_bpl1)); 1569*01649561SJames Smart /* Keep the first xritag on the list */ 1570*01649561SJames Smart if (pg_pairs == 0) 1571*01649561SJames Smart xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag; 1572*01649561SJames Smart sgl_pg_pairs++; 1573*01649561SJames Smart pg_pairs++; 1574*01649561SJames Smart } 1575*01649561SJames Smart bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 1576*01649561SJames Smart bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs); 1577*01649561SJames Smart /* Perform endian conversion if necessary */ 1578*01649561SJames Smart sgl->word0 = cpu_to_le32(sgl->word0); 1579*01649561SJames Smart 1580*01649561SJames Smart if (!phba->sli4_hba.intr_enable) 1581*01649561SJames Smart rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 1582*01649561SJames Smart else { 1583*01649561SJames Smart mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 1584*01649561SJames Smart rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 1585*01649561SJames Smart } 1586*01649561SJames Smart shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr; 1587*01649561SJames Smart shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 1588*01649561SJames Smart shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 1589*01649561SJames Smart if (rc != MBX_TIMEOUT) 1590*01649561SJames Smart lpfc_sli4_mbox_cmd_free(phba, mbox); 1591*01649561SJames Smart if (shdr_status || shdr_add_status || rc) { 1592*01649561SJames Smart lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1593*01649561SJames Smart "6125 POST_SGL_BLOCK mailbox command failed " 1594*01649561SJames Smart "status x%x add_status x%x mbx status x%x\n", 1595*01649561SJames Smart shdr_status, shdr_add_status, rc); 1596*01649561SJames Smart rc = -ENXIO; 1597*01649561SJames Smart } 1598*01649561SJames Smart return rc; 1599*01649561SJames Smart } 1600*01649561SJames Smart 1601*01649561SJames Smart /** 1602*01649561SJames Smart * lpfc_post_nvme_sgl_list - Post blocks of nvme buffer sgls from a list 1603*01649561SJames Smart * @phba: pointer to lpfc hba data structure. 1604*01649561SJames Smart * @post_nblist: pointer to the nvme buffer list. 1605*01649561SJames Smart * 1606*01649561SJames Smart * This routine walks a list of nvme buffers that was passed in. It attempts 1607*01649561SJames Smart * to construct blocks of nvme buffer sgls which contains contiguous xris and 1608*01649561SJames Smart * uses the non-embedded SGL block post mailbox commands to post to the port. 1609*01649561SJames Smart * For single NVME buffer sgl with non-contiguous xri, if any, it shall use 1610*01649561SJames Smart * embedded SGL post mailbox command for posting. The @post_nblist passed in 1611*01649561SJames Smart * must be local list, thus no lock is needed when manipulate the list. 1612*01649561SJames Smart * 1613*01649561SJames Smart * Returns: 0 = failure, non-zero number of successfully posted buffers. 1614*01649561SJames Smart **/ 1615*01649561SJames Smart static int 1616*01649561SJames Smart lpfc_post_nvme_sgl_list(struct lpfc_hba *phba, 1617*01649561SJames Smart struct list_head *post_nblist, int sb_count) 1618*01649561SJames Smart { 1619*01649561SJames Smart struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next; 1620*01649561SJames Smart int status, sgl_size; 1621*01649561SJames Smart int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0; 1622*01649561SJames Smart dma_addr_t pdma_phys_sgl1; 1623*01649561SJames Smart int last_xritag = NO_XRI; 1624*01649561SJames Smart int cur_xritag; 1625*01649561SJames Smart LIST_HEAD(prep_nblist); 1626*01649561SJames Smart LIST_HEAD(blck_nblist); 1627*01649561SJames Smart LIST_HEAD(nvme_nblist); 1628*01649561SJames Smart 1629*01649561SJames Smart /* sanity check */ 1630*01649561SJames Smart if (sb_count <= 0) 1631*01649561SJames Smart return -EINVAL; 1632*01649561SJames Smart 1633*01649561SJames Smart sgl_size = phba->cfg_sg_dma_buf_size; 1634*01649561SJames Smart 1635*01649561SJames Smart list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) { 1636*01649561SJames Smart list_del_init(&lpfc_ncmd->list); 1637*01649561SJames Smart block_cnt++; 1638*01649561SJames Smart if ((last_xritag != NO_XRI) && 1639*01649561SJames Smart (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) { 1640*01649561SJames Smart /* a hole in xri block, form a sgl posting block */ 1641*01649561SJames Smart list_splice_init(&prep_nblist, &blck_nblist); 1642*01649561SJames Smart post_cnt = block_cnt - 1; 1643*01649561SJames Smart /* prepare list for next posting block */ 1644*01649561SJames Smart list_add_tail(&lpfc_ncmd->list, &prep_nblist); 1645*01649561SJames Smart block_cnt = 1; 1646*01649561SJames Smart } else { 1647*01649561SJames Smart /* prepare list for next posting block */ 1648*01649561SJames Smart list_add_tail(&lpfc_ncmd->list, &prep_nblist); 1649*01649561SJames Smart /* enough sgls for non-embed sgl mbox command */ 1650*01649561SJames Smart if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) { 1651*01649561SJames Smart list_splice_init(&prep_nblist, &blck_nblist); 1652*01649561SJames Smart post_cnt = block_cnt; 1653*01649561SJames Smart block_cnt = 0; 1654*01649561SJames Smart } 1655*01649561SJames Smart } 1656*01649561SJames Smart num_posting++; 1657*01649561SJames Smart last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag; 1658*01649561SJames Smart 1659*01649561SJames Smart /* end of repost sgl list condition for NVME buffers */ 1660*01649561SJames Smart if (num_posting == sb_count) { 1661*01649561SJames Smart if (post_cnt == 0) { 1662*01649561SJames Smart /* last sgl posting block */ 1663*01649561SJames Smart list_splice_init(&prep_nblist, &blck_nblist); 1664*01649561SJames Smart post_cnt = block_cnt; 1665*01649561SJames Smart } else if (block_cnt == 1) { 1666*01649561SJames Smart /* last single sgl with non-contiguous xri */ 1667*01649561SJames Smart if (sgl_size > SGL_PAGE_SIZE) 1668*01649561SJames Smart pdma_phys_sgl1 = 1669*01649561SJames Smart lpfc_ncmd->dma_phys_sgl + 1670*01649561SJames Smart SGL_PAGE_SIZE; 1671*01649561SJames Smart else 1672*01649561SJames Smart pdma_phys_sgl1 = 0; 1673*01649561SJames Smart cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag; 1674*01649561SJames Smart status = lpfc_sli4_post_sgl(phba, 1675*01649561SJames Smart lpfc_ncmd->dma_phys_sgl, 1676*01649561SJames Smart pdma_phys_sgl1, cur_xritag); 1677*01649561SJames Smart if (status) { 1678*01649561SJames Smart /* failure, put on abort nvme list */ 1679*01649561SJames Smart lpfc_ncmd->exch_busy = 1; 1680*01649561SJames Smart } else { 1681*01649561SJames Smart /* success, put on NVME buffer list */ 1682*01649561SJames Smart lpfc_ncmd->exch_busy = 0; 1683*01649561SJames Smart lpfc_ncmd->status = IOSTAT_SUCCESS; 1684*01649561SJames Smart num_posted++; 1685*01649561SJames Smart } 1686*01649561SJames Smart /* success, put on NVME buffer sgl list */ 1687*01649561SJames Smart list_add_tail(&lpfc_ncmd->list, &nvme_nblist); 1688*01649561SJames Smart } 1689*01649561SJames Smart } 1690*01649561SJames Smart 1691*01649561SJames Smart /* continue until a nembed page worth of sgls */ 1692*01649561SJames Smart if (post_cnt == 0) 1693*01649561SJames Smart continue; 1694*01649561SJames Smart 1695*01649561SJames Smart /* post block of NVME buffer list sgls */ 1696*01649561SJames Smart status = lpfc_sli4_post_nvme_sgl_block(phba, &blck_nblist, 1697*01649561SJames Smart post_cnt); 1698*01649561SJames Smart 1699*01649561SJames Smart /* don't reset xirtag due to hole in xri block */ 1700*01649561SJames Smart if (block_cnt == 0) 1701*01649561SJames Smart last_xritag = NO_XRI; 1702*01649561SJames Smart 1703*01649561SJames Smart /* reset NVME buffer post count for next round of posting */ 1704*01649561SJames Smart post_cnt = 0; 1705*01649561SJames Smart 1706*01649561SJames Smart /* put posted NVME buffer-sgl posted on NVME buffer sgl list */ 1707*01649561SJames Smart while (!list_empty(&blck_nblist)) { 1708*01649561SJames Smart list_remove_head(&blck_nblist, lpfc_ncmd, 1709*01649561SJames Smart struct lpfc_nvme_buf, list); 1710*01649561SJames Smart if (status) { 1711*01649561SJames Smart /* failure, put on abort nvme list */ 1712*01649561SJames Smart lpfc_ncmd->exch_busy = 1; 1713*01649561SJames Smart } else { 1714*01649561SJames Smart /* success, put on NVME buffer list */ 1715*01649561SJames Smart lpfc_ncmd->exch_busy = 0; 1716*01649561SJames Smart lpfc_ncmd->status = IOSTAT_SUCCESS; 1717*01649561SJames Smart num_posted++; 1718*01649561SJames Smart } 1719*01649561SJames Smart list_add_tail(&lpfc_ncmd->list, &nvme_nblist); 1720*01649561SJames Smart } 1721*01649561SJames Smart } 1722*01649561SJames Smart /* Push NVME buffers with sgl posted to the available list */ 1723*01649561SJames Smart while (!list_empty(&nvme_nblist)) { 1724*01649561SJames Smart list_remove_head(&nvme_nblist, lpfc_ncmd, 1725*01649561SJames Smart struct lpfc_nvme_buf, list); 1726*01649561SJames Smart lpfc_release_nvme_buf(phba, lpfc_ncmd); 1727*01649561SJames Smart } 1728*01649561SJames Smart return num_posted; 1729*01649561SJames Smart } 1730*01649561SJames Smart 1731*01649561SJames Smart /** 1732*01649561SJames Smart * lpfc_repost_nvme_sgl_list - Repost all the allocated nvme buffer sgls 1733*01649561SJames Smart * @phba: pointer to lpfc hba data structure. 1734*01649561SJames Smart * 1735*01649561SJames Smart * This routine walks the list of nvme buffers that have been allocated and 1736*01649561SJames Smart * repost them to the port by using SGL block post. This is needed after a 1737*01649561SJames Smart * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine 1738*01649561SJames Smart * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list 1739*01649561SJames Smart * to the lpfc_nvme_buf_list. If the repost fails, reject all nvme buffers. 1740*01649561SJames Smart * 1741*01649561SJames Smart * Returns: 0 = success, non-zero failure. 1742*01649561SJames Smart **/ 1743*01649561SJames Smart int 1744*01649561SJames Smart lpfc_repost_nvme_sgl_list(struct lpfc_hba *phba) 1745*01649561SJames Smart { 1746*01649561SJames Smart LIST_HEAD(post_nblist); 1747*01649561SJames Smart int num_posted, rc = 0; 1748*01649561SJames Smart 1749*01649561SJames Smart /* get all NVME buffers need to repost to a local list */ 1750*01649561SJames Smart spin_lock_irq(&phba->nvme_buf_list_get_lock); 1751*01649561SJames Smart spin_lock(&phba->nvme_buf_list_put_lock); 1752*01649561SJames Smart list_splice_init(&phba->lpfc_nvme_buf_list_get, &post_nblist); 1753*01649561SJames Smart list_splice(&phba->lpfc_nvme_buf_list_put, &post_nblist); 1754*01649561SJames Smart spin_unlock(&phba->nvme_buf_list_put_lock); 1755*01649561SJames Smart spin_unlock_irq(&phba->nvme_buf_list_get_lock); 1756*01649561SJames Smart 1757*01649561SJames Smart /* post the list of nvme buffer sgls to port if available */ 1758*01649561SJames Smart if (!list_empty(&post_nblist)) { 1759*01649561SJames Smart num_posted = lpfc_post_nvme_sgl_list(phba, &post_nblist, 1760*01649561SJames Smart phba->sli4_hba.nvme_xri_cnt); 1761*01649561SJames Smart /* failed to post any nvme buffer, return error */ 1762*01649561SJames Smart if (num_posted == 0) 1763*01649561SJames Smart rc = -EIO; 1764*01649561SJames Smart } 1765*01649561SJames Smart return rc; 1766*01649561SJames Smart } 1767*01649561SJames Smart 1768*01649561SJames Smart /** 1769*01649561SJames Smart * lpfc_new_nvme_buf - Scsi buffer allocator for HBA with SLI4 IF spec 1770*01649561SJames Smart * @vport: The virtual port for which this call being executed. 1771*01649561SJames Smart * @num_to_allocate: The requested number of buffers to allocate. 1772*01649561SJames Smart * 1773*01649561SJames Smart * This routine allocates nvme buffers for device with SLI-4 interface spec, 1774*01649561SJames Smart * the nvme buffer contains all the necessary information needed to initiate 1775*01649561SJames Smart * a NVME I/O. After allocating up to @num_to_allocate NVME buffers and put 1776*01649561SJames Smart * them on a list, it post them to the port by using SGL block post. 1777*01649561SJames Smart * 1778*01649561SJames Smart * Return codes: 1779*01649561SJames Smart * int - number of nvme buffers that were allocated and posted. 1780*01649561SJames Smart * 0 = failure, less than num_to_alloc is a partial failure. 1781*01649561SJames Smart **/ 1782*01649561SJames Smart static int 1783*01649561SJames Smart lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc) 1784*01649561SJames Smart { 1785*01649561SJames Smart struct lpfc_hba *phba = vport->phba; 1786*01649561SJames Smart struct lpfc_nvme_buf *lpfc_ncmd; 1787*01649561SJames Smart struct lpfc_iocbq *pwqeq; 1788*01649561SJames Smart union lpfc_wqe128 *wqe; 1789*01649561SJames Smart struct sli4_sge *sgl; 1790*01649561SJames Smart dma_addr_t pdma_phys_sgl; 1791*01649561SJames Smart uint16_t iotag, lxri = 0; 1792*01649561SJames Smart int bcnt, num_posted, sgl_size; 1793*01649561SJames Smart LIST_HEAD(prep_nblist); 1794*01649561SJames Smart LIST_HEAD(post_nblist); 1795*01649561SJames Smart LIST_HEAD(nvme_nblist); 1796*01649561SJames Smart 1797*01649561SJames Smart sgl_size = phba->cfg_sg_dma_buf_size; 1798*01649561SJames Smart 1799*01649561SJames Smart for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { 1800*01649561SJames Smart lpfc_ncmd = kzalloc(sizeof(struct lpfc_nvme_buf), GFP_KERNEL); 1801*01649561SJames Smart if (!lpfc_ncmd) 1802*01649561SJames Smart break; 1803*01649561SJames Smart /* 1804*01649561SJames Smart * Get memory from the pci pool to map the virt space to 1805*01649561SJames Smart * pci bus space for an I/O. The DMA buffer includes the 1806*01649561SJames Smart * number of SGE's necessary to support the sg_tablesize. 1807*01649561SJames Smart */ 1808*01649561SJames Smart lpfc_ncmd->data = pci_pool_alloc(phba->lpfc_sg_dma_buf_pool, 1809*01649561SJames Smart GFP_KERNEL, 1810*01649561SJames Smart &lpfc_ncmd->dma_handle); 1811*01649561SJames Smart if (!lpfc_ncmd->data) { 1812*01649561SJames Smart kfree(lpfc_ncmd); 1813*01649561SJames Smart break; 1814*01649561SJames Smart } 1815*01649561SJames Smart memset(lpfc_ncmd->data, 0, phba->cfg_sg_dma_buf_size); 1816*01649561SJames Smart 1817*01649561SJames Smart lxri = lpfc_sli4_next_xritag(phba); 1818*01649561SJames Smart if (lxri == NO_XRI) { 1819*01649561SJames Smart pci_pool_free(phba->lpfc_sg_dma_buf_pool, 1820*01649561SJames Smart lpfc_ncmd->data, lpfc_ncmd->dma_handle); 1821*01649561SJames Smart kfree(lpfc_ncmd); 1822*01649561SJames Smart break; 1823*01649561SJames Smart } 1824*01649561SJames Smart pwqeq = &(lpfc_ncmd->cur_iocbq); 1825*01649561SJames Smart wqe = (union lpfc_wqe128 *)&pwqeq->wqe; 1826*01649561SJames Smart 1827*01649561SJames Smart /* Allocate iotag for lpfc_ncmd->cur_iocbq. */ 1828*01649561SJames Smart iotag = lpfc_sli_next_iotag(phba, pwqeq); 1829*01649561SJames Smart if (iotag == 0) { 1830*01649561SJames Smart pci_pool_free(phba->lpfc_sg_dma_buf_pool, 1831*01649561SJames Smart lpfc_ncmd->data, lpfc_ncmd->dma_handle); 1832*01649561SJames Smart kfree(lpfc_ncmd); 1833*01649561SJames Smart lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1834*01649561SJames Smart "6121 Failed to allocated IOTAG for" 1835*01649561SJames Smart " XRI:0x%x\n", lxri); 1836*01649561SJames Smart lpfc_sli4_free_xri(phba, lxri); 1837*01649561SJames Smart break; 1838*01649561SJames Smart } 1839*01649561SJames Smart pwqeq->sli4_lxritag = lxri; 1840*01649561SJames Smart pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 1841*01649561SJames Smart pwqeq->iocb_flag |= LPFC_IO_NVME; 1842*01649561SJames Smart pwqeq->context1 = lpfc_ncmd; 1843*01649561SJames Smart pwqeq->wqe_cmpl = lpfc_nvme_io_cmd_wqe_cmpl; 1844*01649561SJames Smart 1845*01649561SJames Smart /* Initialize local short-hand pointers. */ 1846*01649561SJames Smart lpfc_ncmd->nvme_sgl = lpfc_ncmd->data; 1847*01649561SJames Smart sgl = lpfc_ncmd->nvme_sgl; 1848*01649561SJames Smart pdma_phys_sgl = lpfc_ncmd->dma_handle; 1849*01649561SJames Smart lpfc_ncmd->dma_phys_sgl = pdma_phys_sgl; 1850*01649561SJames Smart 1851*01649561SJames Smart /* Rsp SGE will be filled in when we rcv an IO 1852*01649561SJames Smart * from the NVME Layer to be sent. 1853*01649561SJames Smart * The cmd is going to be embedded so we need a SKIP SGE. 1854*01649561SJames Smart */ 1855*01649561SJames Smart bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP); 1856*01649561SJames Smart bf_set(lpfc_sli4_sge_last, sgl, 0); 1857*01649561SJames Smart sgl->word2 = cpu_to_le32(sgl->word2); 1858*01649561SJames Smart /* Fill in word 3 / sgl_len during cmd submission */ 1859*01649561SJames Smart 1860*01649561SJames Smart lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd; 1861*01649561SJames Smart 1862*01649561SJames Smart /* Word 7 */ 1863*01649561SJames Smart bf_set(wqe_erp, &wqe->generic.wqe_com, 0); 1864*01649561SJames Smart /* NVME upper layers will time things out, if needed */ 1865*01649561SJames Smart bf_set(wqe_tmo, &wqe->generic.wqe_com, 0); 1866*01649561SJames Smart 1867*01649561SJames Smart /* Word 10 */ 1868*01649561SJames Smart bf_set(wqe_ebde_cnt, &wqe->generic.wqe_com, 0); 1869*01649561SJames Smart bf_set(wqe_dbde, &wqe->generic.wqe_com, 1); 1870*01649561SJames Smart 1871*01649561SJames Smart /* add the nvme buffer to a post list */ 1872*01649561SJames Smart list_add_tail(&lpfc_ncmd->list, &post_nblist); 1873*01649561SJames Smart spin_lock_irq(&phba->nvme_buf_list_get_lock); 1874*01649561SJames Smart phba->sli4_hba.nvme_xri_cnt++; 1875*01649561SJames Smart spin_unlock_irq(&phba->nvme_buf_list_get_lock); 1876*01649561SJames Smart } 1877*01649561SJames Smart lpfc_printf_log(phba, KERN_INFO, LOG_NVME, 1878*01649561SJames Smart "6114 Allocate %d out of %d requested new NVME " 1879*01649561SJames Smart "buffers\n", bcnt, num_to_alloc); 1880*01649561SJames Smart 1881*01649561SJames Smart /* post the list of nvme buffer sgls to port if available */ 1882*01649561SJames Smart if (!list_empty(&post_nblist)) 1883*01649561SJames Smart num_posted = lpfc_post_nvme_sgl_list(phba, 1884*01649561SJames Smart &post_nblist, bcnt); 1885*01649561SJames Smart else 1886*01649561SJames Smart num_posted = 0; 1887*01649561SJames Smart 1888*01649561SJames Smart return num_posted; 1889*01649561SJames Smart } 1890*01649561SJames Smart 1891*01649561SJames Smart /** 1892*01649561SJames Smart * lpfc_get_nvme_buf - Get a nvme buffer from lpfc_nvme_buf_list of the HBA 1893*01649561SJames Smart * @phba: The HBA for which this call is being executed. 1894*01649561SJames Smart * 1895*01649561SJames Smart * This routine removes a nvme buffer from head of @phba lpfc_nvme_buf_list list 1896*01649561SJames Smart * and returns to caller. 1897*01649561SJames Smart * 1898*01649561SJames Smart * Return codes: 1899*01649561SJames Smart * NULL - Error 1900*01649561SJames Smart * Pointer to lpfc_nvme_buf - Success 1901*01649561SJames Smart **/ 1902*01649561SJames Smart static struct lpfc_nvme_buf * 1903*01649561SJames Smart lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 1904*01649561SJames Smart { 1905*01649561SJames Smart struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next; 1906*01649561SJames Smart unsigned long iflag = 0; 1907*01649561SJames Smart int found = 0; 1908*01649561SJames Smart 1909*01649561SJames Smart spin_lock_irqsave(&phba->nvme_buf_list_get_lock, iflag); 1910*01649561SJames Smart list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 1911*01649561SJames Smart &phba->lpfc_nvme_buf_list_get, list) { 1912*01649561SJames Smart if (lpfc_test_rrq_active(phba, ndlp, 1913*01649561SJames Smart lpfc_ncmd->cur_iocbq.sli4_lxritag)) 1914*01649561SJames Smart continue; 1915*01649561SJames Smart list_del(&lpfc_ncmd->list); 1916*01649561SJames Smart found = 1; 1917*01649561SJames Smart break; 1918*01649561SJames Smart } 1919*01649561SJames Smart if (!found) { 1920*01649561SJames Smart spin_lock(&phba->nvme_buf_list_put_lock); 1921*01649561SJames Smart list_splice(&phba->lpfc_nvme_buf_list_put, 1922*01649561SJames Smart &phba->lpfc_nvme_buf_list_get); 1923*01649561SJames Smart INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put); 1924*01649561SJames Smart spin_unlock(&phba->nvme_buf_list_put_lock); 1925*01649561SJames Smart list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 1926*01649561SJames Smart &phba->lpfc_nvme_buf_list_get, list) { 1927*01649561SJames Smart if (lpfc_test_rrq_active( 1928*01649561SJames Smart phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag)) 1929*01649561SJames Smart continue; 1930*01649561SJames Smart list_del(&lpfc_ncmd->list); 1931*01649561SJames Smart found = 1; 1932*01649561SJames Smart break; 1933*01649561SJames Smart } 1934*01649561SJames Smart } 1935*01649561SJames Smart spin_unlock_irqrestore(&phba->nvme_buf_list_get_lock, iflag); 1936*01649561SJames Smart if (!found) 1937*01649561SJames Smart return NULL; 1938*01649561SJames Smart return lpfc_ncmd; 1939*01649561SJames Smart } 1940*01649561SJames Smart 1941*01649561SJames Smart /** 1942*01649561SJames Smart * lpfc_release_nvme_buf: Return a nvme buffer back to hba nvme buf list. 1943*01649561SJames Smart * @phba: The Hba for which this call is being executed. 1944*01649561SJames Smart * @lpfc_ncmd: The nvme buffer which is being released. 1945*01649561SJames Smart * 1946*01649561SJames Smart * This routine releases @lpfc_ncmd nvme buffer by adding it to tail of @phba 1947*01649561SJames Smart * lpfc_nvme_buf_list list. For SLI4 XRI's are tied to the nvme buffer 1948*01649561SJames Smart * and cannot be reused for at least RA_TOV amount of time if it was 1949*01649561SJames Smart * aborted. 1950*01649561SJames Smart **/ 1951*01649561SJames Smart static void 1952*01649561SJames Smart lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd) 1953*01649561SJames Smart { 1954*01649561SJames Smart unsigned long iflag = 0; 1955*01649561SJames Smart 1956*01649561SJames Smart lpfc_ncmd->nonsg_phys = 0; 1957*01649561SJames Smart if (lpfc_ncmd->exch_busy) { 1958*01649561SJames Smart spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock, 1959*01649561SJames Smart iflag); 1960*01649561SJames Smart lpfc_ncmd->nvmeCmd = NULL; 1961*01649561SJames Smart list_add_tail(&lpfc_ncmd->list, 1962*01649561SJames Smart &phba->sli4_hba.lpfc_abts_nvme_buf_list); 1963*01649561SJames Smart spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock, 1964*01649561SJames Smart iflag); 1965*01649561SJames Smart } else { 1966*01649561SJames Smart lpfc_ncmd->nvmeCmd = NULL; 1967*01649561SJames Smart lpfc_ncmd->cur_iocbq.iocb_flag = LPFC_IO_NVME; 1968*01649561SJames Smart spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag); 1969*01649561SJames Smart list_add_tail(&lpfc_ncmd->list, &phba->lpfc_nvme_buf_list_put); 1970*01649561SJames Smart spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag); 1971*01649561SJames Smart } 1972*01649561SJames Smart } 1973*01649561SJames Smart 1974*01649561SJames Smart /** 1975*01649561SJames Smart * lpfc_nvme_create_localport - Create/Bind an nvme localport instance. 1976*01649561SJames Smart * @pvport - the lpfc_vport instance requesting a localport. 1977*01649561SJames Smart * 1978*01649561SJames Smart * This routine is invoked to create an nvme localport instance to bind 1979*01649561SJames Smart * to the nvme_fc_transport. It is called once during driver load 1980*01649561SJames Smart * like lpfc_create_shost after all other services are initialized. 1981*01649561SJames Smart * It requires a vport, vpi, and wwns at call time. Other localport 1982*01649561SJames Smart * parameters are modified as the driver's FCID and the Fabric WWN 1983*01649561SJames Smart * are established. 1984*01649561SJames Smart * 1985*01649561SJames Smart * Return codes 1986*01649561SJames Smart * 0 - successful 1987*01649561SJames Smart * -ENOMEM - no heap memory available 1988*01649561SJames Smart * other values - from nvme registration upcall 1989*01649561SJames Smart **/ 1990*01649561SJames Smart int 1991*01649561SJames Smart lpfc_nvme_create_localport(struct lpfc_vport *vport) 1992*01649561SJames Smart { 1993*01649561SJames Smart struct lpfc_hba *phba = vport->phba; 1994*01649561SJames Smart struct nvme_fc_port_info nfcp_info; 1995*01649561SJames Smart struct nvme_fc_local_port *localport; 1996*01649561SJames Smart struct lpfc_nvme_lport *lport; 1997*01649561SJames Smart int len, ret = 0; 1998*01649561SJames Smart 1999*01649561SJames Smart /* Initialize this localport instance. The vport wwn usage ensures 2000*01649561SJames Smart * that NPIV is accounted for. 2001*01649561SJames Smart */ 2002*01649561SJames Smart memset(&nfcp_info, 0, sizeof(struct nvme_fc_port_info)); 2003*01649561SJames Smart nfcp_info.port_role = FC_PORT_ROLE_NVME_INITIATOR; 2004*01649561SJames Smart nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn); 2005*01649561SJames Smart nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn); 2006*01649561SJames Smart 2007*01649561SJames Smart /* For now need + 1 to get around NVME transport logic */ 2008*01649561SJames Smart lpfc_nvme_template.max_sgl_segments = phba->cfg_sg_seg_cnt + 1; 2009*01649561SJames Smart lpfc_nvme_template.max_hw_queues = phba->cfg_nvme_io_channel; 2010*01649561SJames Smart 2011*01649561SJames Smart /* localport is allocated from the stack, but the registration 2012*01649561SJames Smart * call allocates heap memory as well as the private area. 2013*01649561SJames Smart */ 2014*01649561SJames Smart ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template, 2015*01649561SJames Smart &vport->phba->pcidev->dev, &localport); 2016*01649561SJames Smart if (!ret) { 2017*01649561SJames Smart lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC, 2018*01649561SJames Smart "6005 Successfully registered local " 2019*01649561SJames Smart "NVME port num %d, localP %p, private %p, " 2020*01649561SJames Smart "sg_seg %d\n", 2021*01649561SJames Smart localport->port_num, localport, 2022*01649561SJames Smart localport->private, 2023*01649561SJames Smart lpfc_nvme_template.max_sgl_segments); 2024*01649561SJames Smart 2025*01649561SJames Smart /* Private is our lport size declared in the template. */ 2026*01649561SJames Smart lport = (struct lpfc_nvme_lport *)localport->private; 2027*01649561SJames Smart vport->localport = localport; 2028*01649561SJames Smart lport->vport = vport; 2029*01649561SJames Smart INIT_LIST_HEAD(&lport->rport_list); 2030*01649561SJames Smart vport->nvmei_support = 1; 2031*01649561SJames Smart } 2032*01649561SJames Smart 2033*01649561SJames Smart len = lpfc_new_nvme_buf(vport, phba->sli4_hba.nvme_xri_max); 2034*01649561SJames Smart vport->phba->total_nvme_bufs += len; 2035*01649561SJames Smart return ret; 2036*01649561SJames Smart } 2037*01649561SJames Smart 2038*01649561SJames Smart /** 2039*01649561SJames Smart * lpfc_nvme_destroy_localport - Destroy lpfc_nvme bound to nvme transport. 2040*01649561SJames Smart * @pnvme: pointer to lpfc nvme data structure. 2041*01649561SJames Smart * 2042*01649561SJames Smart * This routine is invoked to destroy all lports bound to the phba. 2043*01649561SJames Smart * The lport memory was allocated by the nvme fc transport and is 2044*01649561SJames Smart * released there. This routine ensures all rports bound to the 2045*01649561SJames Smart * lport have been disconnected. 2046*01649561SJames Smart * 2047*01649561SJames Smart **/ 2048*01649561SJames Smart void 2049*01649561SJames Smart lpfc_nvme_destroy_localport(struct lpfc_vport *vport) 2050*01649561SJames Smart { 2051*01649561SJames Smart struct nvme_fc_local_port *localport; 2052*01649561SJames Smart struct lpfc_nvme_lport *lport; 2053*01649561SJames Smart struct lpfc_nvme_rport *rport = NULL, *rport_next = NULL; 2054*01649561SJames Smart int ret; 2055*01649561SJames Smart 2056*01649561SJames Smart if (vport->nvmei_support == 0) 2057*01649561SJames Smart return; 2058*01649561SJames Smart 2059*01649561SJames Smart localport = vport->localport; 2060*01649561SJames Smart vport->localport = NULL; 2061*01649561SJames Smart lport = (struct lpfc_nvme_lport *)localport->private; 2062*01649561SJames Smart 2063*01649561SJames Smart lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, 2064*01649561SJames Smart "6011 Destroying NVME localport %p\n", 2065*01649561SJames Smart localport); 2066*01649561SJames Smart 2067*01649561SJames Smart list_for_each_entry_safe(rport, rport_next, &lport->rport_list, list) { 2068*01649561SJames Smart /* The last node ref has to get released now before the rport 2069*01649561SJames Smart * private memory area is released by the transport. 2070*01649561SJames Smart */ 2071*01649561SJames Smart list_del(&rport->list); 2072*01649561SJames Smart 2073*01649561SJames Smart init_completion(&rport->rport_unreg_done); 2074*01649561SJames Smart ret = nvme_fc_unregister_remoteport(rport->remoteport); 2075*01649561SJames Smart if (ret) 2076*01649561SJames Smart lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, 2077*01649561SJames Smart "6008 rport fail destroy %x\n", ret); 2078*01649561SJames Smart wait_for_completion_timeout(&rport->rport_unreg_done, 5); 2079*01649561SJames Smart } 2080*01649561SJames Smart /* lport's rport list is clear. Unregister 2081*01649561SJames Smart * lport and release resources. 2082*01649561SJames Smart */ 2083*01649561SJames Smart init_completion(&lport->lport_unreg_done); 2084*01649561SJames Smart ret = nvme_fc_unregister_localport(localport); 2085*01649561SJames Smart wait_for_completion_timeout(&lport->lport_unreg_done, 5); 2086*01649561SJames Smart 2087*01649561SJames Smart /* Regardless of the unregister upcall response, clear 2088*01649561SJames Smart * nvmei_support. All rports are unregistered and the 2089*01649561SJames Smart * driver will clean up. 2090*01649561SJames Smart */ 2091*01649561SJames Smart vport->nvmei_support = 0; 2092*01649561SJames Smart if (ret == 0) { 2093*01649561SJames Smart lpfc_printf_vlog(vport, 2094*01649561SJames Smart KERN_INFO, LOG_NVME_DISC, 2095*01649561SJames Smart "6009 Unregistered lport Success\n"); 2096*01649561SJames Smart } else { 2097*01649561SJames Smart lpfc_printf_vlog(vport, 2098*01649561SJames Smart KERN_INFO, LOG_NVME_DISC, 2099*01649561SJames Smart "6010 Unregistered lport " 2100*01649561SJames Smart "Failed, status x%x\n", 2101*01649561SJames Smart ret); 2102*01649561SJames Smart } 2103*01649561SJames Smart } 2104*01649561SJames Smart 2105*01649561SJames Smart void 2106*01649561SJames Smart lpfc_nvme_update_localport(struct lpfc_vport *vport) 2107*01649561SJames Smart { 2108*01649561SJames Smart struct nvme_fc_local_port *localport; 2109*01649561SJames Smart struct lpfc_nvme_lport *lport; 2110*01649561SJames Smart 2111*01649561SJames Smart localport = vport->localport; 2112*01649561SJames Smart lport = (struct lpfc_nvme_lport *)localport->private; 2113*01649561SJames Smart 2114*01649561SJames Smart lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, 2115*01649561SJames Smart "6012 Update NVME lport %p did x%x\n", 2116*01649561SJames Smart localport, vport->fc_myDID); 2117*01649561SJames Smart 2118*01649561SJames Smart localport->port_id = vport->fc_myDID; 2119*01649561SJames Smart if (localport->port_id == 0) 2120*01649561SJames Smart localport->port_role = FC_PORT_ROLE_NVME_DISCOVERY; 2121*01649561SJames Smart else 2122*01649561SJames Smart localport->port_role = FC_PORT_ROLE_NVME_INITIATOR; 2123*01649561SJames Smart 2124*01649561SJames Smart lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 2125*01649561SJames Smart "6030 bound lport %p to DID x%06x\n", 2126*01649561SJames Smart lport, localport->port_id); 2127*01649561SJames Smart 2128*01649561SJames Smart } 2129*01649561SJames Smart 2130*01649561SJames Smart int 2131*01649561SJames Smart lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 2132*01649561SJames Smart { 2133*01649561SJames Smart int ret = 0; 2134*01649561SJames Smart struct nvme_fc_local_port *localport; 2135*01649561SJames Smart struct lpfc_nvme_lport *lport; 2136*01649561SJames Smart struct lpfc_nvme_rport *rport; 2137*01649561SJames Smart struct nvme_fc_remote_port *remote_port; 2138*01649561SJames Smart struct nvme_fc_port_info rpinfo; 2139*01649561SJames Smart 2140*01649561SJames Smart lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC, 2141*01649561SJames Smart "6006 Register NVME PORT. DID x%06x nlptype x%x\n", 2142*01649561SJames Smart ndlp->nlp_DID, ndlp->nlp_type); 2143*01649561SJames Smart 2144*01649561SJames Smart localport = vport->localport; 2145*01649561SJames Smart lport = (struct lpfc_nvme_lport *)localport->private; 2146*01649561SJames Smart 2147*01649561SJames Smart if (ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_INITIATOR)) { 2148*01649561SJames Smart 2149*01649561SJames Smart /* The driver isn't expecting the rport wwn to change 2150*01649561SJames Smart * but it might get a different DID on a different 2151*01649561SJames Smart * fabric. 2152*01649561SJames Smart */ 2153*01649561SJames Smart list_for_each_entry(rport, &lport->rport_list, list) { 2154*01649561SJames Smart if (rport->remoteport->port_name != 2155*01649561SJames Smart wwn_to_u64(ndlp->nlp_portname.u.wwn)) 2156*01649561SJames Smart continue; 2157*01649561SJames Smart lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC, 2158*01649561SJames Smart "6035 lport %p, found matching rport " 2159*01649561SJames Smart "at wwpn 0x%llx, Data: x%x x%x x%x " 2160*01649561SJames Smart "x%06x\n", 2161*01649561SJames Smart lport, 2162*01649561SJames Smart rport->remoteport->port_name, 2163*01649561SJames Smart rport->remoteport->port_id, 2164*01649561SJames Smart rport->remoteport->port_role, 2165*01649561SJames Smart ndlp->nlp_type, 2166*01649561SJames Smart ndlp->nlp_DID); 2167*01649561SJames Smart remote_port = rport->remoteport; 2168*01649561SJames Smart if ((remote_port->port_id == 0) && 2169*01649561SJames Smart (remote_port->port_role == 2170*01649561SJames Smart FC_PORT_ROLE_NVME_DISCOVERY)) { 2171*01649561SJames Smart remote_port->port_id = ndlp->nlp_DID; 2172*01649561SJames Smart remote_port->port_role &= 2173*01649561SJames Smart ~FC_PORT_ROLE_NVME_DISCOVERY; 2174*01649561SJames Smart if (ndlp->nlp_type & NLP_NVME_TARGET) 2175*01649561SJames Smart remote_port->port_role |= 2176*01649561SJames Smart FC_PORT_ROLE_NVME_TARGET; 2177*01649561SJames Smart if (ndlp->nlp_type & NLP_NVME_INITIATOR) 2178*01649561SJames Smart remote_port->port_role |= 2179*01649561SJames Smart FC_PORT_ROLE_NVME_INITIATOR; 2180*01649561SJames Smart 2181*01649561SJames Smart lpfc_printf_vlog(ndlp->vport, KERN_INFO, 2182*01649561SJames Smart LOG_NVME_DISC, 2183*01649561SJames Smart "6014 Rebinding lport to " 2184*01649561SJames Smart "rport wwpn 0x%llx, " 2185*01649561SJames Smart "Data: x%x x%x x%x x%06x\n", 2186*01649561SJames Smart remote_port->port_name, 2187*01649561SJames Smart remote_port->port_id, 2188*01649561SJames Smart remote_port->port_role, 2189*01649561SJames Smart ndlp->nlp_type, 2190*01649561SJames Smart ndlp->nlp_DID); 2191*01649561SJames Smart } 2192*01649561SJames Smart return 0; 2193*01649561SJames Smart } 2194*01649561SJames Smart 2195*01649561SJames Smart /* NVME rports are not preserved across devloss. 2196*01649561SJames Smart * Just register this instance. 2197*01649561SJames Smart */ 2198*01649561SJames Smart rpinfo.port_id = ndlp->nlp_DID; 2199*01649561SJames Smart rpinfo.port_role = 0; 2200*01649561SJames Smart if (ndlp->nlp_type & NLP_NVME_TARGET) 2201*01649561SJames Smart rpinfo.port_role |= FC_PORT_ROLE_NVME_TARGET; 2202*01649561SJames Smart if (ndlp->nlp_type & NLP_NVME_INITIATOR) 2203*01649561SJames Smart rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR; 2204*01649561SJames Smart rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn); 2205*01649561SJames Smart rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn); 2206*01649561SJames Smart 2207*01649561SJames Smart ret = nvme_fc_register_remoteport(localport, &rpinfo, 2208*01649561SJames Smart &remote_port); 2209*01649561SJames Smart if (!ret) { 2210*01649561SJames Smart rport = remote_port->private; 2211*01649561SJames Smart rport->remoteport = remote_port; 2212*01649561SJames Smart rport->lport = lport; 2213*01649561SJames Smart rport->ndlp = lpfc_nlp_get(ndlp); 2214*01649561SJames Smart if (!rport->ndlp) 2215*01649561SJames Smart return -1; 2216*01649561SJames Smart ndlp->nrport = rport; 2217*01649561SJames Smart INIT_LIST_HEAD(&rport->list); 2218*01649561SJames Smart list_add_tail(&rport->list, &lport->rport_list); 2219*01649561SJames Smart lpfc_printf_vlog(vport, KERN_INFO, 2220*01649561SJames Smart LOG_NVME_DISC | LOG_NODE, 2221*01649561SJames Smart "6022 Binding new rport to lport %p " 2222*01649561SJames Smart "Rport WWNN 0x%llx, Rport WWPN 0x%llx " 2223*01649561SJames Smart "DID x%06x Role x%x\n", 2224*01649561SJames Smart lport, 2225*01649561SJames Smart rpinfo.node_name, rpinfo.port_name, 2226*01649561SJames Smart rpinfo.port_id, rpinfo.port_role); 2227*01649561SJames Smart } else { 2228*01649561SJames Smart lpfc_printf_vlog(vport, KERN_ERR, 2229*01649561SJames Smart LOG_NVME_DISC | LOG_NODE, 2230*01649561SJames Smart "6031 RemotePort Registration failed " 2231*01649561SJames Smart "err: %d, DID x%06x\n", 2232*01649561SJames Smart ret, ndlp->nlp_DID); 2233*01649561SJames Smart } 2234*01649561SJames Smart } else { 2235*01649561SJames Smart ret = -EINVAL; 2236*01649561SJames Smart lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 2237*01649561SJames Smart "6027 Unknown nlp_type x%x on DID x%06x " 2238*01649561SJames Smart "ndlp %p. Not Registering nvme rport\n", 2239*01649561SJames Smart ndlp->nlp_type, ndlp->nlp_DID, ndlp); 2240*01649561SJames Smart } 2241*01649561SJames Smart return ret; 2242*01649561SJames Smart } 2243*01649561SJames Smart 2244*01649561SJames Smart /* lpfc_nvme_unregister_port - unbind the DID and port_role from this rport. 2245*01649561SJames Smart * 2246*01649561SJames Smart * There is no notion of Devloss or rport recovery from the current 2247*01649561SJames Smart * nvme_transport perspective. Loss of an rport just means IO cannot 2248*01649561SJames Smart * be sent and recovery is completely up to the initator. 2249*01649561SJames Smart * For now, the driver just unbinds the DID and port_role so that 2250*01649561SJames Smart * no further IO can be issued. Changes are planned for later. 2251*01649561SJames Smart * 2252*01649561SJames Smart * Notes - the ndlp reference count is not decremented here since 2253*01649561SJames Smart * since there is no nvme_transport api for devloss. Node ref count 2254*01649561SJames Smart * is only adjusted in driver unload. 2255*01649561SJames Smart */ 2256*01649561SJames Smart void 2257*01649561SJames Smart lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 2258*01649561SJames Smart { 2259*01649561SJames Smart int ret; 2260*01649561SJames Smart struct nvme_fc_local_port *localport; 2261*01649561SJames Smart struct lpfc_nvme_lport *lport; 2262*01649561SJames Smart struct lpfc_nvme_rport *rport; 2263*01649561SJames Smart struct nvme_fc_remote_port *remoteport; 2264*01649561SJames Smart 2265*01649561SJames Smart localport = vport->localport; 2266*01649561SJames Smart 2267*01649561SJames Smart /* This is fundamental error. The localport is always 2268*01649561SJames Smart * available until driver unload. Just exit. 2269*01649561SJames Smart */ 2270*01649561SJames Smart if (!localport) 2271*01649561SJames Smart return; 2272*01649561SJames Smart 2273*01649561SJames Smart lport = (struct lpfc_nvme_lport *)localport->private; 2274*01649561SJames Smart if (!lport) 2275*01649561SJames Smart goto input_err; 2276*01649561SJames Smart 2277*01649561SJames Smart rport = ndlp->nrport; 2278*01649561SJames Smart if (!rport) 2279*01649561SJames Smart goto input_err; 2280*01649561SJames Smart 2281*01649561SJames Smart remoteport = rport->remoteport; 2282*01649561SJames Smart lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 2283*01649561SJames Smart "6033 Unreg nvme remoteport %p, portname x%llx, " 2284*01649561SJames Smart "port_id x%06x, portstate x%x port type x%x\n", 2285*01649561SJames Smart remoteport, remoteport->port_name, 2286*01649561SJames Smart remoteport->port_id, remoteport->port_state, 2287*01649561SJames Smart ndlp->nlp_type); 2288*01649561SJames Smart 2289*01649561SJames Smart /* Sanity check ndlp type. Only call for NVME ports. Don't 2290*01649561SJames Smart * clear any rport state until the transport calls back. 2291*01649561SJames Smart */ 2292*01649561SJames Smart if (ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_INITIATOR)) { 2293*01649561SJames Smart init_completion(&rport->rport_unreg_done); 2294*01649561SJames Smart ret = nvme_fc_unregister_remoteport(remoteport); 2295*01649561SJames Smart if (ret != 0) { 2296*01649561SJames Smart lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, 2297*01649561SJames Smart "6167 NVME unregister failed %d " 2298*01649561SJames Smart "port_state x%x\n", 2299*01649561SJames Smart ret, remoteport->port_state); 2300*01649561SJames Smart } 2301*01649561SJames Smart 2302*01649561SJames Smart /* Wait for the driver's delete completion routine to finish 2303*01649561SJames Smart * before proceeding. This guarantees the transport and driver 2304*01649561SJames Smart * have completed the unreg process. 2305*01649561SJames Smart */ 2306*01649561SJames Smart ret = wait_for_completion_timeout(&rport->rport_unreg_done, 5); 2307*01649561SJames Smart if (ret == 0) { 2308*01649561SJames Smart lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, 2309*01649561SJames Smart "6169 Unreg nvme wait failed %d\n", 2310*01649561SJames Smart ret); 2311*01649561SJames Smart } 2312*01649561SJames Smart } 2313*01649561SJames Smart return; 2314*01649561SJames Smart 2315*01649561SJames Smart input_err: 2316*01649561SJames Smart lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, 2317*01649561SJames Smart "6168: State error: lport %p, rport%p FCID x%06x\n", 2318*01649561SJames Smart vport->localport, ndlp->rport, ndlp->nlp_DID); 2319*01649561SJames Smart } 2320