xref: /linux/drivers/scsi/qla2xxx/qla_nvme.c (revision f96a974170b749e3a56844e25b31d46a7233b6f6)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * QLogic Fibre Channel HBA Driver
4  * Copyright (c)  2003-2017 QLogic Corporation
5  */
6 #include "qla_nvme.h"
7 #include <linux/scatterlist.h>
8 #include <linux/delay.h>
9 #include <linux/nvme.h>
10 #include <linux/nvme-fc.h>
11 #include <linux/blk-mq.h>
12 
13 static struct nvme_fc_port_template qla_nvme_fc_transport;
14 static int qla_nvme_ls_reject_iocb(struct scsi_qla_host *vha,
15 				   struct qla_qpair *qp,
16 				   struct qla_nvme_lsrjt_pt_arg *a,
17 				   bool is_xchg_terminate);
18 
19 struct qla_nvme_unsol_ctx {
20 	struct list_head elem;
21 	struct scsi_qla_host *vha;
22 	struct fc_port *fcport;
23 	struct srb *sp;
24 	struct nvmefc_ls_rsp lsrsp;
25 	struct nvmefc_ls_rsp *fd_rsp;
26 	struct work_struct lsrsp_work;
27 	struct work_struct abort_work;
28 	__le32 exchange_address;
29 	__le16 nport_handle;
30 	__le16 ox_id;
31 	int comp_status;
32 	spinlock_t cmd_lock;
33 };
34 
35 int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
36 {
37 	struct qla_nvme_rport *rport;
38 	struct nvme_fc_port_info req;
39 	int ret;
40 
41 	if (!IS_ENABLED(CONFIG_NVME_FC))
42 		return 0;
43 
44 	if (!vha->flags.nvme_enabled) {
45 		ql_log(ql_log_info, vha, 0x2100,
46 		    "%s: Not registering target since Host NVME is not enabled\n",
47 		    __func__);
48 		return 0;
49 	}
50 
51 	if (qla_nvme_register_hba(vha))
52 		return 0;
53 
54 	if (!vha->nvme_local_port)
55 		return 0;
56 
57 	if (!(fcport->nvme_prli_service_param &
58 	    (NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)) ||
59 		(fcport->nvme_flag & NVME_FLAG_REGISTERED))
60 		return 0;
61 
62 	fcport->nvme_flag &= ~NVME_FLAG_RESETTING;
63 
64 	memset(&req, 0, sizeof(struct nvme_fc_port_info));
65 	req.port_name = wwn_to_u64(fcport->port_name);
66 	req.node_name = wwn_to_u64(fcport->node_name);
67 	req.port_role = 0;
68 	req.dev_loss_tmo = fcport->dev_loss_tmo;
69 
70 	if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR)
71 		req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
72 
73 	if (fcport->nvme_prli_service_param & NVME_PRLI_SP_TARGET)
74 		req.port_role |= FC_PORT_ROLE_NVME_TARGET;
75 
76 	if (fcport->nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY)
77 		req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
78 
79 	req.port_id = fcport->d_id.b24;
80 
81 	ql_log(ql_log_info, vha, 0x2102,
82 	    "%s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x\n",
83 	    __func__, req.node_name, req.port_name,
84 	    req.port_id);
85 
86 	ret = nvme_fc_register_remoteport(vha->nvme_local_port, &req,
87 	    &fcport->nvme_remote_port);
88 	if (ret) {
89 		ql_log(ql_log_warn, vha, 0x212e,
90 		    "Failed to register remote port. Transport returned %d\n",
91 		    ret);
92 		return ret;
93 	}
94 
95 	nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port,
96 				       fcport->dev_loss_tmo);
97 
98 	if (fcport->nvme_prli_service_param & NVME_PRLI_SP_SLER)
99 		ql_log(ql_log_info, vha, 0x212a,
100 		       "PortID:%06x Supports SLER\n", req.port_id);
101 
102 	if (fcport->nvme_prli_service_param & NVME_PRLI_SP_PI_CTRL)
103 		ql_log(ql_log_info, vha, 0x212b,
104 		       "PortID:%06x Supports PI control\n", req.port_id);
105 
106 	rport = fcport->nvme_remote_port->private;
107 	rport->fcport = fcport;
108 
109 	fcport->nvme_flag |= NVME_FLAG_REGISTERED;
110 	return 0;
111 }
112 
113 /* Allocate a queue for NVMe traffic */
114 static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
115     unsigned int qidx, u16 qsize, void **handle)
116 {
117 	struct scsi_qla_host *vha;
118 	struct qla_hw_data *ha;
119 	struct qla_qpair *qpair;
120 
121 	/* Map admin queue and 1st IO queue to index 0 */
122 	if (qidx)
123 		qidx--;
124 
125 	vha = (struct scsi_qla_host *)lport->private;
126 	ha = vha->hw;
127 
128 	ql_log(ql_log_info, vha, 0x2104,
129 	    "%s: handle %p, idx =%d, qsize %d\n",
130 	    __func__, handle, qidx, qsize);
131 
132 	if (qidx > qla_nvme_fc_transport.max_hw_queues) {
133 		ql_log(ql_log_warn, vha, 0x212f,
134 		    "%s: Illegal qidx=%d. Max=%d\n",
135 		    __func__, qidx, qla_nvme_fc_transport.max_hw_queues);
136 		return -EINVAL;
137 	}
138 
139 	/* Use base qpair if max_qpairs is 0 */
140 	if (!ha->max_qpairs) {
141 		qpair = ha->base_qpair;
142 	} else {
143 		if (ha->queue_pair_map[qidx]) {
144 			*handle = ha->queue_pair_map[qidx];
145 			ql_log(ql_log_info, vha, 0x2121,
146 			       "Returning existing qpair of %p for idx=%x\n",
147 			       *handle, qidx);
148 			return 0;
149 		}
150 
151 		qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
152 		if (!qpair) {
153 			ql_log(ql_log_warn, vha, 0x2122,
154 			       "Failed to allocate qpair\n");
155 			return -EINVAL;
156 		}
157 		qla_adjust_iocb_limit(vha);
158 	}
159 	*handle = qpair;
160 
161 	return 0;
162 }
163 
164 static void qla_nvme_release_fcp_cmd_kref(struct kref *kref)
165 {
166 	struct srb *sp = container_of(kref, struct srb, cmd_kref);
167 	struct nvme_private *priv = (struct nvme_private *)sp->priv;
168 	struct nvmefc_fcp_req *fd;
169 	struct srb_iocb *nvme;
170 	unsigned long flags;
171 
172 	if (!priv)
173 		goto out;
174 
175 	nvme = &sp->u.iocb_cmd;
176 	fd = nvme->u.nvme.desc;
177 
178 	spin_lock_irqsave(&priv->cmd_lock, flags);
179 	priv->sp = NULL;
180 	sp->priv = NULL;
181 	if (priv->comp_status == QLA_SUCCESS) {
182 		fd->rcv_rsplen = le16_to_cpu(nvme->u.nvme.rsp_pyld_len);
183 		fd->status = NVME_SC_SUCCESS;
184 	} else {
185 		fd->rcv_rsplen = 0;
186 		fd->transferred_length = 0;
187 		fd->status = NVME_SC_INTERNAL;
188 	}
189 	spin_unlock_irqrestore(&priv->cmd_lock, flags);
190 
191 	fd->done(fd);
192 out:
193 	qla2xxx_rel_qpair_sp(sp->qpair, sp);
194 }
195 
196 static void qla_nvme_release_ls_cmd_kref(struct kref *kref)
197 {
198 	struct srb *sp = container_of(kref, struct srb, cmd_kref);
199 	struct nvme_private *priv = (struct nvme_private *)sp->priv;
200 	struct nvmefc_ls_req *fd;
201 	unsigned long flags;
202 
203 	if (!priv)
204 		goto out;
205 
206 	spin_lock_irqsave(&priv->cmd_lock, flags);
207 	priv->sp = NULL;
208 	sp->priv = NULL;
209 	spin_unlock_irqrestore(&priv->cmd_lock, flags);
210 
211 	fd = priv->fd;
212 
213 	fd->done(fd, priv->comp_status);
214 out:
215 	qla2x00_rel_sp(sp);
216 }
217 
218 static void qla_nvme_ls_complete(struct work_struct *work)
219 {
220 	struct nvme_private *priv =
221 		container_of(work, struct nvme_private, ls_work);
222 
223 	kref_put(&priv->sp->cmd_kref, qla_nvme_release_ls_cmd_kref);
224 }
225 
226 static void qla_nvme_sp_ls_done(srb_t *sp, int res)
227 {
228 	struct nvme_private *priv = sp->priv;
229 
230 	if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0))
231 		return;
232 
233 	if (res)
234 		res = -EINVAL;
235 
236 	priv->comp_status = res;
237 	INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
238 	schedule_work(&priv->ls_work);
239 }
240 
241 static void qla_nvme_release_lsrsp_cmd_kref(struct kref *kref)
242 {
243 	struct srb *sp = container_of(kref, struct srb, cmd_kref);
244 	struct qla_nvme_unsol_ctx *uctx = sp->priv;
245 	struct nvmefc_ls_rsp *fd_rsp;
246 	unsigned long flags;
247 
248 	if (!uctx) {
249 		qla2x00_rel_sp(sp);
250 		return;
251 	}
252 
253 	spin_lock_irqsave(&uctx->cmd_lock, flags);
254 	uctx->sp = NULL;
255 	sp->priv = NULL;
256 	spin_unlock_irqrestore(&uctx->cmd_lock, flags);
257 
258 	fd_rsp = uctx->fd_rsp;
259 
260 	list_del(&uctx->elem);
261 
262 	fd_rsp->done(fd_rsp);
263 	kfree(uctx);
264 	qla2x00_rel_sp(sp);
265 }
266 
267 static void qla_nvme_lsrsp_complete(struct work_struct *work)
268 {
269 	struct qla_nvme_unsol_ctx *uctx =
270 		container_of(work, struct qla_nvme_unsol_ctx, lsrsp_work);
271 
272 	kref_put(&uctx->sp->cmd_kref, qla_nvme_release_lsrsp_cmd_kref);
273 }
274 
275 static void qla_nvme_sp_lsrsp_done(srb_t *sp, int res)
276 {
277 	struct qla_nvme_unsol_ctx *uctx = sp->priv;
278 
279 	if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0))
280 		return;
281 
282 	if (res)
283 		res = -EINVAL;
284 
285 	uctx->comp_status = res;
286 	INIT_WORK(&uctx->lsrsp_work, qla_nvme_lsrsp_complete);
287 	schedule_work(&uctx->lsrsp_work);
288 }
289 
290 /* it assumed that QPair lock is held. */
291 static void qla_nvme_sp_done(srb_t *sp, int res)
292 {
293 	struct nvme_private *priv = sp->priv;
294 
295 	priv->comp_status = res;
296 	kref_put(&sp->cmd_kref, qla_nvme_release_fcp_cmd_kref);
297 
298 	return;
299 }
300 
301 static void qla_nvme_abort_work(struct work_struct *work)
302 {
303 	struct nvme_private *priv =
304 		container_of(work, struct nvme_private, abort_work);
305 	srb_t *sp = priv->sp;
306 	fc_port_t *fcport = sp->fcport;
307 	struct qla_hw_data *ha = fcport->vha->hw;
308 	int rval, abts_done_called = 1;
309 	bool io_wait_for_abort_done;
310 	uint32_t handle;
311 
312 	ql_dbg(ql_dbg_io, fcport->vha, 0xffff,
313 	       "%s called for sp=%p, hndl=%x on fcport=%p desc=%p deleted=%d\n",
314 	       __func__, sp, sp->handle, fcport, sp->u.iocb_cmd.u.nvme.desc, fcport->deleted);
315 
316 	if (!ha->flags.fw_started || fcport->deleted == QLA_SESS_DELETED)
317 		goto out;
318 
319 	if (ha->flags.host_shutting_down) {
320 		ql_log(ql_log_info, sp->fcport->vha, 0xffff,
321 		    "%s Calling done on sp: %p, type: 0x%x\n",
322 		    __func__, sp, sp->type);
323 		sp->done(sp, 0);
324 		goto out;
325 	}
326 
327 	/*
328 	 * sp may not be valid after abort_command if return code is either
329 	 * SUCCESS or ERR_FROM_FW codes, so cache the value here.
330 	 */
331 	io_wait_for_abort_done = ql2xabts_wait_nvme &&
332 					QLA_ABTS_WAIT_ENABLED(sp);
333 	handle = sp->handle;
334 
335 	rval = ha->isp_ops->abort_command(sp);
336 
337 	ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
338 	    "%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n",
339 	    __func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
340 	    sp, handle, fcport, rval);
341 
342 	/*
343 	 * If async tmf is enabled, the abort callback is called only on
344 	 * return codes QLA_SUCCESS and QLA_ERR_FROM_FW.
345 	 */
346 	if (ql2xasynctmfenable &&
347 	    rval != QLA_SUCCESS && rval != QLA_ERR_FROM_FW)
348 		abts_done_called = 0;
349 
350 	/*
351 	 * Returned before decreasing kref so that I/O requests
352 	 * are waited until ABTS complete. This kref is decreased
353 	 * at qla24xx_abort_sp_done function.
354 	 */
355 	if (abts_done_called && io_wait_for_abort_done)
356 		return;
357 out:
358 	/* kref_get was done before work was schedule. */
359 	kref_put(&sp->cmd_kref, sp->put_fn);
360 }
361 
362 static int qla_nvme_xmt_ls_rsp(struct nvme_fc_local_port *lport,
363 			       struct nvme_fc_remote_port *rport,
364 			       struct nvmefc_ls_rsp *fd_resp)
365 {
366 	struct qla_nvme_unsol_ctx *uctx = container_of(fd_resp,
367 				struct qla_nvme_unsol_ctx, lsrsp);
368 	struct qla_nvme_rport *qla_rport = rport->private;
369 	fc_port_t *fcport = qla_rport->fcport;
370 	struct scsi_qla_host *vha = uctx->vha;
371 	struct qla_hw_data *ha = vha->hw;
372 	struct qla_nvme_lsrjt_pt_arg a;
373 	struct srb_iocb *nvme;
374 	srb_t *sp;
375 	int rval = QLA_FUNCTION_FAILED;
376 	uint8_t cnt = 0;
377 
378 	if (!fcport || fcport->deleted)
379 		goto out;
380 
381 	if (!ha->flags.fw_started)
382 		goto out;
383 
384 	/* Alloc SRB structure */
385 	sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
386 	if (!sp)
387 		goto out;
388 
389 	sp->type = SRB_NVME_LS;
390 	sp->name = "nvme_ls";
391 	sp->done = qla_nvme_sp_lsrsp_done;
392 	sp->put_fn = qla_nvme_release_lsrsp_cmd_kref;
393 	sp->priv = (void *)uctx;
394 	sp->unsol_rsp = 1;
395 	uctx->sp = sp;
396 	spin_lock_init(&uctx->cmd_lock);
397 	nvme = &sp->u.iocb_cmd;
398 	uctx->fd_rsp = fd_resp;
399 	nvme->u.nvme.desc = fd_resp;
400 	nvme->u.nvme.dir = 0;
401 	nvme->u.nvme.dl = 0;
402 	nvme->u.nvme.timeout_sec = 0;
403 	nvme->u.nvme.cmd_dma = fd_resp->rspdma;
404 	nvme->u.nvme.cmd_len = cpu_to_le32(fd_resp->rsplen);
405 	nvme->u.nvme.rsp_len = 0;
406 	nvme->u.nvme.rsp_dma = 0;
407 	nvme->u.nvme.exchange_address = uctx->exchange_address;
408 	nvme->u.nvme.nport_handle = uctx->nport_handle;
409 	nvme->u.nvme.ox_id = uctx->ox_id;
410 	dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
411 				   fd_resp->rsplen, DMA_TO_DEVICE);
412 
413 	ql_dbg(ql_dbg_unsol, vha, 0x2122,
414 	       "Unsol lsreq portid=%06x %8phC exchange_address 0x%x ox_id 0x%x hdl 0x%x\n",
415 	       fcport->d_id.b24, fcport->port_name, uctx->exchange_address,
416 	       uctx->ox_id, uctx->nport_handle);
417 retry:
418 	rval = qla2x00_start_sp(sp);
419 	switch (rval) {
420 	case QLA_SUCCESS:
421 		break;
422 	case EAGAIN:
423 		msleep(PURLS_MSLEEP_INTERVAL);
424 		cnt++;
425 		if (cnt < PURLS_RETRY_COUNT)
426 			goto retry;
427 
428 		fallthrough;
429 	default:
430 		ql_dbg(ql_log_warn, vha, 0x2123,
431 		       "Failed to xmit Unsol ls response = %d\n", rval);
432 		rval = -EIO;
433 		qla2x00_rel_sp(sp);
434 		goto out;
435 	}
436 
437 	return 0;
438 out:
439 	memset((void *)&a, 0, sizeof(a));
440 	a.vp_idx = vha->vp_idx;
441 	a.nport_handle = uctx->nport_handle;
442 	a.xchg_address = uctx->exchange_address;
443 	qla_nvme_ls_reject_iocb(vha, ha->base_qpair, &a, true);
444 	kfree(uctx);
445 	return rval;
446 }
447 
448 static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
449     struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
450 {
451 	struct nvme_private *priv = fd->private;
452 	unsigned long flags;
453 
454 	spin_lock_irqsave(&priv->cmd_lock, flags);
455 	if (!priv->sp) {
456 		spin_unlock_irqrestore(&priv->cmd_lock, flags);
457 		return;
458 	}
459 
460 	if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
461 		spin_unlock_irqrestore(&priv->cmd_lock, flags);
462 		return;
463 	}
464 	spin_unlock_irqrestore(&priv->cmd_lock, flags);
465 
466 	INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
467 	schedule_work(&priv->abort_work);
468 }
469 
470 static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
471     struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
472 {
473 	struct qla_nvme_rport *qla_rport = rport->private;
474 	fc_port_t *fcport = qla_rport->fcport;
475 	struct srb_iocb   *nvme;
476 	struct nvme_private *priv = fd->private;
477 	struct scsi_qla_host *vha;
478 	int     rval = QLA_FUNCTION_FAILED;
479 	struct qla_hw_data *ha;
480 	srb_t           *sp;
481 
482 	if (!fcport || fcport->deleted)
483 		return rval;
484 
485 	vha = fcport->vha;
486 	ha = vha->hw;
487 
488 	if (!ha->flags.fw_started)
489 		return rval;
490 
491 	/* Alloc SRB structure */
492 	sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
493 	if (!sp)
494 		return rval;
495 
496 	sp->type = SRB_NVME_LS;
497 	sp->name = "nvme_ls";
498 	sp->done = qla_nvme_sp_ls_done;
499 	sp->put_fn = qla_nvme_release_ls_cmd_kref;
500 	sp->priv = priv;
501 	priv->sp = sp;
502 	kref_init(&sp->cmd_kref);
503 	spin_lock_init(&priv->cmd_lock);
504 	nvme = &sp->u.iocb_cmd;
505 	priv->fd = fd;
506 	nvme->u.nvme.desc = fd;
507 	nvme->u.nvme.dir = 0;
508 	nvme->u.nvme.dl = 0;
509 	nvme->u.nvme.cmd_len = cpu_to_le32(fd->rqstlen);
510 	nvme->u.nvme.rsp_len = cpu_to_le32(fd->rsplen);
511 	nvme->u.nvme.rsp_dma = fd->rspdma;
512 	nvme->u.nvme.timeout_sec = fd->timeout;
513 	nvme->u.nvme.cmd_dma = fd->rqstdma;
514 	dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
515 	    fd->rqstlen, DMA_TO_DEVICE);
516 
517 	rval = qla2x00_start_sp(sp);
518 	if (rval != QLA_SUCCESS) {
519 		ql_log(ql_log_warn, vha, 0x700e,
520 		    "qla2x00_start_sp failed = %d\n", rval);
521 		sp->priv = NULL;
522 		priv->sp = NULL;
523 		qla2x00_rel_sp(sp);
524 		return rval;
525 	}
526 
527 	return rval;
528 }
529 
530 static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
531     struct nvme_fc_remote_port *rport, void *hw_queue_handle,
532     struct nvmefc_fcp_req *fd)
533 {
534 	struct nvme_private *priv = fd->private;
535 	unsigned long flags;
536 
537 	spin_lock_irqsave(&priv->cmd_lock, flags);
538 	if (!priv->sp) {
539 		spin_unlock_irqrestore(&priv->cmd_lock, flags);
540 		return;
541 	}
542 	if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
543 		spin_unlock_irqrestore(&priv->cmd_lock, flags);
544 		return;
545 	}
546 	spin_unlock_irqrestore(&priv->cmd_lock, flags);
547 
548 	INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
549 	schedule_work(&priv->abort_work);
550 }
551 
552 static inline int qla2x00_start_nvme_mq(srb_t *sp)
553 {
554 	unsigned long   flags;
555 	uint32_t        *clr_ptr;
556 	uint32_t        handle;
557 	struct cmd_nvme *cmd_pkt;
558 	uint16_t        cnt, i;
559 	uint16_t        req_cnt;
560 	uint16_t        tot_dsds;
561 	uint16_t	avail_dsds;
562 	struct dsd64	*cur_dsd;
563 	struct req_que *req = NULL;
564 	struct rsp_que *rsp = NULL;
565 	struct scsi_qla_host *vha = sp->fcport->vha;
566 	struct qla_hw_data *ha = vha->hw;
567 	struct qla_qpair *qpair = sp->qpair;
568 	struct srb_iocb *nvme = &sp->u.iocb_cmd;
569 	struct scatterlist *sgl, *sg;
570 	struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
571 	struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
572 	uint32_t        rval = QLA_SUCCESS;
573 
574 	/* Setup qpair pointers */
575 	req = qpair->req;
576 	rsp = qpair->rsp;
577 	tot_dsds = fd->sg_cnt;
578 
579 	/* Acquire qpair specific lock */
580 	spin_lock_irqsave(&qpair->qp_lock, flags);
581 
582 	handle = qla2xxx_get_next_handle(req);
583 	if (handle == 0) {
584 		rval = -EBUSY;
585 		goto queuing_error;
586 	}
587 	req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
588 
589 	sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH;
590 	sp->iores.exch_cnt = 1;
591 	sp->iores.iocb_cnt = req_cnt;
592 	if (qla_get_fw_resources(sp->qpair, &sp->iores)) {
593 		rval = -EBUSY;
594 		goto queuing_error;
595 	}
596 
597 	if (req->cnt < (req_cnt + 2)) {
598 		if (IS_SHADOW_REG_CAPABLE(ha)) {
599 			cnt = *req->out_ptr;
600 		} else {
601 			cnt = rd_reg_dword_relaxed(req->req_q_out);
602 			if (qla2x00_check_reg16_for_disconnect(vha, cnt)) {
603 				rval = -EBUSY;
604 				goto queuing_error;
605 			}
606 		}
607 
608 		if (req->ring_index < cnt)
609 			req->cnt = cnt - req->ring_index;
610 		else
611 			req->cnt = req->length - (req->ring_index - cnt);
612 
613 		if (req->cnt < (req_cnt + 2)){
614 			rval = -EBUSY;
615 			goto queuing_error;
616 		}
617 	}
618 
619 	if (unlikely(!fd->sqid)) {
620 		if (cmd->sqe.common.opcode == nvme_admin_async_event) {
621 			nvme->u.nvme.aen_op = 1;
622 			atomic_inc(&ha->nvme_active_aen_cnt);
623 		}
624 	}
625 
626 	/* Build command packet. */
627 	req->current_outstanding_cmd = handle;
628 	req->outstanding_cmds[handle] = sp;
629 	sp->handle = handle;
630 	req->cnt -= req_cnt;
631 
632 	cmd_pkt = (struct cmd_nvme *)req->ring_ptr;
633 	cmd_pkt->handle = make_handle(req->id, handle);
634 
635 	/* Zero out remaining portion of packet. */
636 	clr_ptr = (uint32_t *)cmd_pkt + 2;
637 	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
638 
639 	cmd_pkt->entry_status = 0;
640 
641 	/* Update entry type to indicate Command NVME IOCB */
642 	cmd_pkt->entry_type = COMMAND_NVME;
643 
644 	/* No data transfer how do we check buffer len == 0?? */
645 	if (fd->io_dir == NVMEFC_FCP_READ) {
646 		cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
647 		qpair->counters.input_bytes += fd->payload_length;
648 		qpair->counters.input_requests++;
649 	} else if (fd->io_dir == NVMEFC_FCP_WRITE) {
650 		cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
651 		if ((vha->flags.nvme_first_burst) &&
652 		    (sp->fcport->nvme_prli_service_param &
653 			NVME_PRLI_SP_FIRST_BURST)) {
654 			if ((fd->payload_length <=
655 			    sp->fcport->nvme_first_burst_size) ||
656 				(sp->fcport->nvme_first_burst_size == 0))
657 				cmd_pkt->control_flags |=
658 					cpu_to_le16(CF_NVME_FIRST_BURST_ENABLE);
659 		}
660 		qpair->counters.output_bytes += fd->payload_length;
661 		qpair->counters.output_requests++;
662 	} else if (fd->io_dir == 0) {
663 		cmd_pkt->control_flags = 0;
664 	}
665 
666 	if (sp->fcport->edif.enable && fd->io_dir != 0)
667 		cmd_pkt->control_flags |= cpu_to_le16(CF_EN_EDIF);
668 
669 	/* Set BIT_13 of control flags for Async event */
670 	if (vha->flags.nvme2_enabled &&
671 	    cmd->sqe.common.opcode == nvme_admin_async_event) {
672 		cmd_pkt->control_flags |= cpu_to_le16(CF_ADMIN_ASYNC_EVENT);
673 	}
674 
675 	/* Set NPORT-ID */
676 	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
677 	cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
678 	cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
679 	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
680 	cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
681 
682 	/* NVME RSP IU */
683 	cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen);
684 	put_unaligned_le64(fd->rspdma, &cmd_pkt->nvme_rsp_dseg_address);
685 
686 	/* NVME CNMD IU */
687 	cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen);
688 	cmd_pkt->nvme_cmnd_dseg_address = cpu_to_le64(fd->cmddma);
689 
690 	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
691 	cmd_pkt->byte_count = cpu_to_le32(fd->payload_length);
692 
693 	/* One DSD is available in the Command Type NVME IOCB */
694 	avail_dsds = 1;
695 	cur_dsd = &cmd_pkt->nvme_dsd;
696 	sgl = fd->first_sgl;
697 
698 	/* Load data segments */
699 	for_each_sg(sgl, sg, tot_dsds, i) {
700 		cont_a64_entry_t *cont_pkt;
701 
702 		/* Allocate additional continuation packets? */
703 		if (avail_dsds == 0) {
704 			/*
705 			 * Five DSDs are available in the Continuation
706 			 * Type 1 IOCB.
707 			 */
708 
709 			/* Adjust ring index */
710 			req->ring_index++;
711 			if (req->ring_index == req->length) {
712 				req->ring_index = 0;
713 				req->ring_ptr = req->ring;
714 			} else {
715 				req->ring_ptr++;
716 			}
717 			cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
718 			put_unaligned_le32(CONTINUE_A64_TYPE,
719 					   &cont_pkt->entry_type);
720 
721 			cur_dsd = cont_pkt->dsd;
722 			avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
723 		}
724 
725 		append_dsd64(&cur_dsd, sg);
726 		avail_dsds--;
727 	}
728 
729 	/* Set total entry count. */
730 	cmd_pkt->entry_count = (uint8_t)req_cnt;
731 	wmb();
732 
733 	/* Adjust ring index. */
734 	req->ring_index++;
735 	if (req->ring_index == req->length) {
736 		req->ring_index = 0;
737 		req->ring_ptr = req->ring;
738 	} else {
739 		req->ring_ptr++;
740 	}
741 
742 	/* ignore nvme async cmd due to long timeout */
743 	if (!nvme->u.nvme.aen_op)
744 		sp->qpair->cmd_cnt++;
745 
746 	/* Set chip new ring index. */
747 	wrt_reg_dword(req->req_q_in, req->ring_index);
748 
749 	if (vha->flags.process_response_queue &&
750 	    rsp->ring_ptr->signature != RESPONSE_PROCESSED)
751 		qla24xx_process_response_queue(vha, rsp);
752 
753 queuing_error:
754 	if (rval)
755 		qla_put_fw_resources(sp->qpair, &sp->iores);
756 	spin_unlock_irqrestore(&qpair->qp_lock, flags);
757 
758 	return rval;
759 }
760 
761 /* Post a command */
762 static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
763     struct nvme_fc_remote_port *rport, void *hw_queue_handle,
764     struct nvmefc_fcp_req *fd)
765 {
766 	fc_port_t *fcport;
767 	struct srb_iocb *nvme;
768 	struct scsi_qla_host *vha;
769 	struct qla_hw_data *ha;
770 	int rval;
771 	srb_t *sp;
772 	struct qla_qpair *qpair = hw_queue_handle;
773 	struct nvme_private *priv = fd->private;
774 	struct qla_nvme_rport *qla_rport = rport->private;
775 
776 	if (!priv) {
777 		/* nvme association has been torn down */
778 		return -ENODEV;
779 	}
780 
781 	fcport = qla_rport->fcport;
782 
783 	if (unlikely(!qpair || !fcport || fcport->deleted))
784 		return -EBUSY;
785 
786 	if (!(fcport->nvme_flag & NVME_FLAG_REGISTERED))
787 		return -ENODEV;
788 
789 	vha = fcport->vha;
790 	ha = vha->hw;
791 
792 	if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
793 		return -EBUSY;
794 
795 	/*
796 	 * If we know the dev is going away while the transport is still sending
797 	 * IO's return busy back to stall the IO Q.  This happens when the
798 	 * link goes away and fw hasn't notified us yet, but IO's are being
799 	 * returned. If the dev comes back quickly we won't exhaust the IO
800 	 * retry count at the core.
801 	 */
802 	if (fcport->nvme_flag & NVME_FLAG_RESETTING)
803 		return -EBUSY;
804 
805 	qpair = qla_mapq_nvme_select_qpair(ha, qpair);
806 
807 	/* Alloc SRB structure */
808 	sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC);
809 	if (!sp)
810 		return -EBUSY;
811 
812 	kref_init(&sp->cmd_kref);
813 	spin_lock_init(&priv->cmd_lock);
814 	sp->priv = priv;
815 	priv->sp = sp;
816 	sp->type = SRB_NVME_CMD;
817 	sp->name = "nvme_cmd";
818 	sp->done = qla_nvme_sp_done;
819 	sp->put_fn = qla_nvme_release_fcp_cmd_kref;
820 	sp->qpair = qpair;
821 	sp->vha = vha;
822 	sp->cmd_sp = sp;
823 	nvme = &sp->u.iocb_cmd;
824 	nvme->u.nvme.desc = fd;
825 
826 	rval = qla2x00_start_nvme_mq(sp);
827 	if (rval != QLA_SUCCESS) {
828 		ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x212d,
829 		    "qla2x00_start_nvme_mq failed = %d\n", rval);
830 		sp->priv = NULL;
831 		priv->sp = NULL;
832 		qla2xxx_rel_qpair_sp(sp->qpair, sp);
833 	}
834 
835 	return rval;
836 }
837 
838 static void qla_nvme_map_queues(struct nvme_fc_local_port *lport,
839 		struct blk_mq_queue_map *map)
840 {
841 	struct scsi_qla_host *vha = lport->private;
842 
843 	blk_mq_map_hw_queues(map, &vha->hw->pdev->dev, vha->irq_offset);
844 }
845 
846 static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
847 {
848 	struct scsi_qla_host *vha = lport->private;
849 
850 	ql_log(ql_log_info, vha, 0x210f,
851 	    "localport delete of %p completed.\n", vha->nvme_local_port);
852 	vha->nvme_local_port = NULL;
853 	complete(&vha->nvme_del_done);
854 }
855 
856 static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
857 {
858 	fc_port_t *fcport;
859 	struct qla_nvme_rport *qla_rport = rport->private;
860 
861 	fcport = qla_rport->fcport;
862 	fcport->nvme_remote_port = NULL;
863 	fcport->nvme_flag &= ~NVME_FLAG_REGISTERED;
864 	fcport->nvme_flag &= ~NVME_FLAG_DELETING;
865 	ql_log(ql_log_info, fcport->vha, 0x2110,
866 	    "remoteport_delete of %p %8phN completed.\n",
867 	    fcport, fcport->port_name);
868 	complete(&fcport->nvme_del_done);
869 }
870 
871 static struct nvme_fc_port_template qla_nvme_fc_transport = {
872 	.localport_delete = qla_nvme_localport_delete,
873 	.remoteport_delete = qla_nvme_remoteport_delete,
874 	.create_queue   = qla_nvme_alloc_queue,
875 	.delete_queue 	= NULL,
876 	.ls_req		= qla_nvme_ls_req,
877 	.ls_abort	= qla_nvme_ls_abort,
878 	.fcp_io		= qla_nvme_post_cmd,
879 	.fcp_abort	= qla_nvme_fcp_abort,
880 	.xmt_ls_rsp	= qla_nvme_xmt_ls_rsp,
881 	.map_queues	= qla_nvme_map_queues,
882 	.max_hw_queues  = DEF_NVME_HW_QUEUES,
883 	.max_sgl_segments = 1024,
884 	.max_dif_sgl_segments = 64,
885 	.dma_boundary = 0xFFFFFFFF,
886 	.local_priv_sz  = 8,
887 	.remote_priv_sz = sizeof(struct qla_nvme_rport),
888 	.lsrqst_priv_sz = sizeof(struct nvme_private),
889 	.fcprqst_priv_sz = sizeof(struct nvme_private),
890 };
891 
892 void qla_nvme_unregister_remote_port(struct fc_port *fcport)
893 {
894 	int ret;
895 
896 	if (!IS_ENABLED(CONFIG_NVME_FC))
897 		return;
898 
899 	ql_log(ql_log_warn, fcport->vha, 0x2112,
900 	    "%s: unregister remoteport on %p %8phN\n",
901 	    __func__, fcport, fcport->port_name);
902 
903 	if (test_bit(PFLG_DRIVER_REMOVING, &fcport->vha->pci_flags))
904 		nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
905 
906 	init_completion(&fcport->nvme_del_done);
907 	ret = nvme_fc_unregister_remoteport(fcport->nvme_remote_port);
908 	if (ret)
909 		ql_log(ql_log_info, fcport->vha, 0x2114,
910 			"%s: Failed to unregister nvme_remote_port (%d)\n",
911 			    __func__, ret);
912 	wait_for_completion(&fcport->nvme_del_done);
913 }
914 
915 void qla_nvme_delete(struct scsi_qla_host *vha)
916 {
917 	int nv_ret;
918 
919 	if (!IS_ENABLED(CONFIG_NVME_FC))
920 		return;
921 
922 	if (vha->nvme_local_port) {
923 		init_completion(&vha->nvme_del_done);
924 		ql_log(ql_log_info, vha, 0x2116,
925 			"unregister localport=%p\n",
926 			vha->nvme_local_port);
927 		nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port);
928 		if (nv_ret)
929 			ql_log(ql_log_info, vha, 0x2115,
930 			    "Unregister of localport failed\n");
931 		else
932 			wait_for_completion(&vha->nvme_del_done);
933 	}
934 }
935 
936 int qla_nvme_register_hba(struct scsi_qla_host *vha)
937 {
938 	struct nvme_fc_port_template *tmpl;
939 	struct qla_hw_data *ha;
940 	struct nvme_fc_port_info pinfo;
941 	int ret = -EINVAL;
942 
943 	if (!IS_ENABLED(CONFIG_NVME_FC))
944 		return ret;
945 
946 	ha = vha->hw;
947 	tmpl = &qla_nvme_fc_transport;
948 
949 	if (ql2xnvme_queues < MIN_NVME_HW_QUEUES) {
950 		ql_log(ql_log_warn, vha, 0xfffd,
951 		    "ql2xnvme_queues=%d is lower than minimum queues: %d. Resetting ql2xnvme_queues to:%d\n",
952 		    ql2xnvme_queues, MIN_NVME_HW_QUEUES, DEF_NVME_HW_QUEUES);
953 		ql2xnvme_queues = DEF_NVME_HW_QUEUES;
954 	} else if (ql2xnvme_queues > (ha->max_qpairs - 1)) {
955 		ql_log(ql_log_warn, vha, 0xfffd,
956 		       "ql2xnvme_queues=%d is greater than available IRQs: %d. Resetting ql2xnvme_queues to: %d\n",
957 		       ql2xnvme_queues, (ha->max_qpairs - 1),
958 		       (ha->max_qpairs - 1));
959 		ql2xnvme_queues = ((ha->max_qpairs - 1));
960 	}
961 
962 	qla_nvme_fc_transport.max_hw_queues =
963 	    min((uint8_t)(ql2xnvme_queues),
964 		(uint8_t)((ha->max_qpairs - 1) ? (ha->max_qpairs - 1) : 1));
965 
966 	ql_log(ql_log_info, vha, 0xfffb,
967 	       "Number of NVME queues used for this port: %d\n",
968 	    qla_nvme_fc_transport.max_hw_queues);
969 
970 	pinfo.node_name = wwn_to_u64(vha->node_name);
971 	pinfo.port_name = wwn_to_u64(vha->port_name);
972 	pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR;
973 	pinfo.port_id = vha->d_id.b24;
974 
975 	mutex_lock(&ha->vport_lock);
976 	/*
977 	 * Check again for nvme_local_port to see if any other thread raced
978 	 * with this one and finished registration.
979 	 */
980 	if (!vha->nvme_local_port) {
981 		ql_log(ql_log_info, vha, 0xffff,
982 		    "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
983 		    pinfo.node_name, pinfo.port_name, pinfo.port_id);
984 		qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
985 
986 		ret = nvme_fc_register_localport(&pinfo, tmpl,
987 						 get_device(&ha->pdev->dev),
988 						 &vha->nvme_local_port);
989 		mutex_unlock(&ha->vport_lock);
990 	} else {
991 		mutex_unlock(&ha->vport_lock);
992 		return 0;
993 	}
994 	if (ret) {
995 		ql_log(ql_log_warn, vha, 0xffff,
996 		    "register_localport failed: ret=%x\n", ret);
997 	} else {
998 		vha->nvme_local_port->private = vha;
999 	}
1000 
1001 	return ret;
1002 }
1003 
1004 void qla_nvme_abort_set_option(struct abort_entry_24xx *abt, srb_t *orig_sp)
1005 {
1006 	struct qla_hw_data *ha;
1007 
1008 	if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
1009 		return;
1010 
1011 	ha = orig_sp->fcport->vha->hw;
1012 
1013 	WARN_ON_ONCE(abt->options & cpu_to_le16(BIT_0));
1014 	/* Use Driver Specified Retry Count */
1015 	abt->options |= cpu_to_le16(AOF_ABTS_RTY_CNT);
1016 	abt->drv.abts_rty_cnt = cpu_to_le16(2);
1017 	/* Use specified response timeout */
1018 	abt->options |= cpu_to_le16(AOF_RSP_TIMEOUT);
1019 	/* set it to 2 * r_a_tov in secs */
1020 	abt->drv.rsp_timeout = cpu_to_le16(2 * (ha->r_a_tov / 10));
1021 }
1022 
1023 void qla_nvme_abort_process_comp_status(struct abort_entry_24xx *abt, srb_t *orig_sp)
1024 {
1025 	u16	comp_status;
1026 	struct scsi_qla_host *vha;
1027 
1028 	if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
1029 		return;
1030 
1031 	vha = orig_sp->fcport->vha;
1032 
1033 	comp_status = le16_to_cpu(abt->comp_status);
1034 	switch (comp_status) {
1035 	case CS_RESET:		/* reset event aborted */
1036 	case CS_ABORTED:	/* IOCB was cleaned */
1037 	/* N_Port handle is not currently logged in */
1038 	case CS_TIMEOUT:
1039 	/* N_Port handle was logged out while waiting for ABTS to complete */
1040 	case CS_PORT_UNAVAILABLE:
1041 	/* Firmware found that the port name changed */
1042 	case CS_PORT_LOGGED_OUT:
1043 	/* BA_RJT was received for the ABTS */
1044 	case CS_PORT_CONFIG_CHG:
1045 		ql_dbg(ql_dbg_async, vha, 0xf09d,
1046 		       "Abort I/O IOCB completed with error, comp_status=%x\n",
1047 		comp_status);
1048 		break;
1049 
1050 	/* BA_RJT was received for the ABTS */
1051 	case CS_REJECT_RECEIVED:
1052 		ql_dbg(ql_dbg_async, vha, 0xf09e,
1053 		       "BA_RJT was received for the ABTS rjt_vendorUnique = %u",
1054 			abt->fw.ba_rjt_vendorUnique);
1055 		ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf09e,
1056 		       "ba_rjt_reasonCodeExpl = %u, ba_rjt_reasonCode = %u\n",
1057 		       abt->fw.ba_rjt_reasonCodeExpl, abt->fw.ba_rjt_reasonCode);
1058 		break;
1059 
1060 	case CS_COMPLETE:
1061 		ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0xf09f,
1062 		       "IOCB request is completed successfully comp_status=%x\n",
1063 		comp_status);
1064 		break;
1065 
1066 	case CS_IOCB_ERROR:
1067 		ql_dbg(ql_dbg_async, vha, 0xf0a0,
1068 		       "IOCB request is failed, comp_status=%x\n", comp_status);
1069 		break;
1070 
1071 	default:
1072 		ql_dbg(ql_dbg_async, vha, 0xf0a1,
1073 		       "Invalid Abort IO IOCB Completion Status %x\n",
1074 		comp_status);
1075 		break;
1076 	}
1077 }
1078 
1079 inline void qla_wait_nvme_release_cmd_kref(srb_t *orig_sp)
1080 {
1081 	if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
1082 		return;
1083 	kref_put(&orig_sp->cmd_kref, orig_sp->put_fn);
1084 }
1085 
1086 static void qla_nvme_fc_format_rjt(void *buf, u8 ls_cmd, u8 reason,
1087 				   u8 explanation, u8 vendor)
1088 {
1089 	struct fcnvme_ls_rjt *rjt = buf;
1090 
1091 	rjt->w0.ls_cmd = FCNVME_LSDESC_RQST;
1092 	rjt->desc_list_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt));
1093 	rjt->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST);
1094 	rjt->rqst.desc_len =
1095 		fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst));
1096 	rjt->rqst.w0.ls_cmd = ls_cmd;
1097 	rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT);
1098 	rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt));
1099 	rjt->rjt.reason_code = reason;
1100 	rjt->rjt.reason_explanation = explanation;
1101 	rjt->rjt.vendor = vendor;
1102 }
1103 
1104 static void qla_nvme_lsrjt_pt_iocb(struct scsi_qla_host *vha,
1105 				   struct pt_ls4_request *lsrjt_iocb,
1106 				   struct qla_nvme_lsrjt_pt_arg *a)
1107 {
1108 	lsrjt_iocb->entry_type = PT_LS4_REQUEST;
1109 	lsrjt_iocb->entry_count = 1;
1110 	lsrjt_iocb->sys_define = 0;
1111 	lsrjt_iocb->entry_status = 0;
1112 	lsrjt_iocb->handle = QLA_SKIP_HANDLE;
1113 	lsrjt_iocb->nport_handle = a->nport_handle;
1114 	lsrjt_iocb->exchange_address = a->xchg_address;
1115 	lsrjt_iocb->vp_index = a->vp_idx;
1116 
1117 	lsrjt_iocb->control_flags = cpu_to_le16(a->control_flags);
1118 
1119 	put_unaligned_le64(a->tx_addr, &lsrjt_iocb->dsd[0].address);
1120 	lsrjt_iocb->dsd[0].length = cpu_to_le32(a->tx_byte_count);
1121 	lsrjt_iocb->tx_dseg_count = cpu_to_le16(1);
1122 	lsrjt_iocb->tx_byte_count = cpu_to_le32(a->tx_byte_count);
1123 
1124 	put_unaligned_le64(a->rx_addr, &lsrjt_iocb->dsd[1].address);
1125 	lsrjt_iocb->dsd[1].length = 0;
1126 	lsrjt_iocb->rx_dseg_count = 0;
1127 	lsrjt_iocb->rx_byte_count = 0;
1128 }
1129 
1130 static int
1131 qla_nvme_ls_reject_iocb(struct scsi_qla_host *vha, struct qla_qpair *qp,
1132 			struct qla_nvme_lsrjt_pt_arg *a, bool is_xchg_terminate)
1133 {
1134 	struct pt_ls4_request *lsrjt_iocb;
1135 
1136 	lsrjt_iocb = __qla2x00_alloc_iocbs(qp, NULL);
1137 	if (!lsrjt_iocb) {
1138 		ql_log(ql_log_warn, vha, 0x210e,
1139 		       "qla2x00_alloc_iocbs failed.\n");
1140 		return QLA_FUNCTION_FAILED;
1141 	}
1142 
1143 	if (!is_xchg_terminate) {
1144 		qla_nvme_fc_format_rjt((void *)vha->hw->lsrjt.c, a->opcode,
1145 				       a->reason, a->explanation, 0);
1146 
1147 		a->tx_byte_count = sizeof(struct fcnvme_ls_rjt);
1148 		a->tx_addr = vha->hw->lsrjt.cdma;
1149 		a->control_flags = CF_LS4_RESPONDER << CF_LS4_SHIFT;
1150 
1151 		ql_dbg(ql_dbg_unsol, vha, 0x211f,
1152 		       "Sending nvme fc ls reject ox_id %04x op %04x\n",
1153 		       a->ox_id, a->opcode);
1154 		ql_dump_buffer(ql_dbg_unsol + ql_dbg_verbose, vha, 0x210f,
1155 			       vha->hw->lsrjt.c, sizeof(*vha->hw->lsrjt.c));
1156 	} else {
1157 		a->tx_byte_count = 0;
1158 		a->control_flags = CF_LS4_RESPONDER_TERM << CF_LS4_SHIFT;
1159 		ql_dbg(ql_dbg_unsol, vha, 0x2110,
1160 		       "Terminate nvme ls xchg 0x%x\n", a->xchg_address);
1161 	}
1162 
1163 	qla_nvme_lsrjt_pt_iocb(vha, lsrjt_iocb, a);
1164 	/* flush iocb to mem before notifying hw doorbell */
1165 	wmb();
1166 	qla2x00_start_iocbs(vha, qp->req);
1167 	return 0;
1168 }
1169 
1170 /*
1171  * qla2xxx_process_purls_pkt() - Pass-up Unsolicited
1172  * Received FC-NVMe Link Service pkt to nvme_fc_rcv_ls_req().
1173  * LLDD need to provide memory for response buffer, which
1174  * will be used to reference the exchange corresponding
1175  * to the LS when issuing an ls response. LLDD will have to free
1176  * response buffer in lport->ops->xmt_ls_rsp().
1177  *
1178  * @vha: SCSI qla host
1179  * @item: ptr to purex_item
1180  */
1181 static void
1182 qla2xxx_process_purls_pkt(struct scsi_qla_host *vha, struct purex_item *item)
1183 {
1184 	struct qla_nvme_unsol_ctx *uctx = item->purls_context;
1185 	struct qla_nvme_lsrjt_pt_arg a;
1186 	int ret = 1;
1187 
1188 #if (IS_ENABLED(CONFIG_NVME_FC))
1189 	ret = nvme_fc_rcv_ls_req(uctx->fcport->nvme_remote_port, &uctx->lsrsp,
1190 				 &item->iocb, item->size);
1191 #endif
1192 	if (ret) {
1193 		ql_dbg(ql_dbg_unsol, vha, 0x2125, "NVMe transport ls_req failed\n");
1194 		memset((void *)&a, 0, sizeof(a));
1195 		a.vp_idx = vha->vp_idx;
1196 		a.nport_handle = uctx->nport_handle;
1197 		a.xchg_address = uctx->exchange_address;
1198 		qla_nvme_ls_reject_iocb(vha, vha->hw->base_qpair, &a, true);
1199 		list_del(&uctx->elem);
1200 		kfree(uctx);
1201 	}
1202 }
1203 
1204 static scsi_qla_host_t *
1205 qla2xxx_get_vha_from_vp_idx(struct qla_hw_data *ha, uint16_t vp_index)
1206 {
1207 	scsi_qla_host_t *base_vha, *vha, *tvp;
1208 	unsigned long flags;
1209 
1210 	base_vha = pci_get_drvdata(ha->pdev);
1211 
1212 	if (!vp_index && !ha->num_vhosts)
1213 		return base_vha;
1214 
1215 	spin_lock_irqsave(&ha->vport_slock, flags);
1216 	list_for_each_entry_safe(vha, tvp, &ha->vp_list, list) {
1217 		if (vha->vp_idx == vp_index) {
1218 			spin_unlock_irqrestore(&ha->vport_slock, flags);
1219 			return vha;
1220 		}
1221 	}
1222 	spin_unlock_irqrestore(&ha->vport_slock, flags);
1223 
1224 	return NULL;
1225 }
1226 
1227 void qla2xxx_process_purls_iocb(void **pkt, struct rsp_que **rsp)
1228 {
1229 	struct nvme_fc_remote_port *rport;
1230 	struct qla_nvme_rport *qla_rport;
1231 	struct qla_nvme_lsrjt_pt_arg a;
1232 	struct pt_ls4_rx_unsol *p = *pkt;
1233 	struct qla_nvme_unsol_ctx *uctx;
1234 	struct rsp_que *rsp_q = *rsp;
1235 	struct qla_hw_data *ha;
1236 	scsi_qla_host_t	*vha;
1237 	fc_port_t *fcport = NULL;
1238 	struct purex_item *item;
1239 	port_id_t d_id = {0};
1240 	port_id_t id = {0};
1241 	u8 *opcode;
1242 	bool xmt_reject = false;
1243 
1244 	ha = rsp_q->hw;
1245 
1246 	vha = qla2xxx_get_vha_from_vp_idx(ha, p->vp_index);
1247 	if (!vha) {
1248 		ql_log(ql_log_warn, NULL, 0x2110, "Invalid vp index %d\n", p->vp_index);
1249 		WARN_ON_ONCE(1);
1250 		return;
1251 	}
1252 
1253 	memset((void *)&a, 0, sizeof(a));
1254 	opcode = (u8 *)&p->payload[0];
1255 	a.opcode = opcode[3];
1256 	a.vp_idx = p->vp_index;
1257 	a.nport_handle = p->nport_handle;
1258 	a.ox_id = p->ox_id;
1259 	a.xchg_address = p->exchange_address;
1260 
1261 	id.b.domain = p->s_id.domain;
1262 	id.b.area   = p->s_id.area;
1263 	id.b.al_pa  = p->s_id.al_pa;
1264 	d_id.b.domain = p->d_id[2];
1265 	d_id.b.area   = p->d_id[1];
1266 	d_id.b.al_pa  = p->d_id[0];
1267 
1268 	fcport = qla2x00_find_fcport_by_nportid(vha, &id, 0);
1269 	if (!fcport) {
1270 		ql_dbg(ql_dbg_unsol, vha, 0x211e,
1271 		       "Failed to find sid=%06x did=%06x\n",
1272 		       id.b24, d_id.b24);
1273 		a.reason = FCNVME_RJT_RC_INV_ASSOC;
1274 		a.explanation = FCNVME_RJT_EXP_NONE;
1275 		xmt_reject = true;
1276 		goto out;
1277 	}
1278 	rport = fcport->nvme_remote_port;
1279 	qla_rport = rport->private;
1280 
1281 	item = qla27xx_copy_multiple_pkt(vha, pkt, rsp, true, false);
1282 	if (!item) {
1283 		a.reason = FCNVME_RJT_RC_LOGIC;
1284 		a.explanation = FCNVME_RJT_EXP_NONE;
1285 		xmt_reject = true;
1286 		goto out;
1287 	}
1288 
1289 	uctx = kzalloc(sizeof(*uctx), GFP_ATOMIC);
1290 	if (!uctx) {
1291 		ql_log(ql_log_info, vha, 0x2126, "Failed allocate memory\n");
1292 		a.reason = FCNVME_RJT_RC_LOGIC;
1293 		a.explanation = FCNVME_RJT_EXP_NONE;
1294 		xmt_reject = true;
1295 		kfree(item);
1296 		goto out;
1297 	}
1298 
1299 	uctx->vha = vha;
1300 	uctx->fcport = fcport;
1301 	uctx->exchange_address = p->exchange_address;
1302 	uctx->nport_handle = p->nport_handle;
1303 	uctx->ox_id = p->ox_id;
1304 	qla_rport->uctx = uctx;
1305 	INIT_LIST_HEAD(&uctx->elem);
1306 	list_add_tail(&uctx->elem, &fcport->unsol_ctx_head);
1307 	item->purls_context = (void *)uctx;
1308 
1309 	ql_dbg(ql_dbg_unsol, vha, 0x2121,
1310 	       "PURLS OP[%01x] size %d xchg addr 0x%x portid %06x\n",
1311 	       item->iocb.iocb[3], item->size, uctx->exchange_address,
1312 	       fcport->d_id.b24);
1313 	/* +48    0  1  2  3  4  5  6  7  8  9  A  B  C  D  E  F
1314 	 * ----- -----------------------------------------------
1315 	 * 0000: 00 00 00 05 28 00 00 00 07 00 00 00 08 00 00 00
1316 	 * 0010: ab ec 0f cc 00 00 8d 7d 05 00 00 00 10 00 00 00
1317 	 * 0020: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
1318 	 */
1319 	ql_dump_buffer(ql_dbg_unsol + ql_dbg_verbose, vha, 0x2120,
1320 		       &item->iocb, item->size);
1321 
1322 	qla24xx_queue_purex_item(vha, item, qla2xxx_process_purls_pkt);
1323 out:
1324 	if (xmt_reject) {
1325 		qla_nvme_ls_reject_iocb(vha, (*rsp)->qpair, &a, false);
1326 		__qla_consume_iocb(vha, pkt, rsp);
1327 	}
1328 }
1329