xref: /linux/drivers/scsi/qla2xxx/qla_nvme.c (revision 0e2b2a76278153d1ac312b0691cb65dabb9aef3e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * QLogic Fibre Channel HBA Driver
4  * Copyright (c)  2003-2017 QLogic Corporation
5  */
6 #include "qla_nvme.h"
7 #include <linux/scatterlist.h>
8 #include <linux/delay.h>
9 #include <linux/nvme.h>
10 #include <linux/nvme-fc.h>
11 #include <linux/blk-mq-pci.h>
12 #include <linux/blk-mq.h>
13 
14 static struct nvme_fc_port_template qla_nvme_fc_transport;
15 
16 int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
17 {
18 	struct qla_nvme_rport *rport;
19 	struct nvme_fc_port_info req;
20 	int ret;
21 
22 	if (!IS_ENABLED(CONFIG_NVME_FC))
23 		return 0;
24 
25 	if (!vha->flags.nvme_enabled) {
26 		ql_log(ql_log_info, vha, 0x2100,
27 		    "%s: Not registering target since Host NVME is not enabled\n",
28 		    __func__);
29 		return 0;
30 	}
31 
32 	if (!vha->nvme_local_port && qla_nvme_register_hba(vha))
33 		return 0;
34 
35 	if (!(fcport->nvme_prli_service_param &
36 	    (NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)) ||
37 		(fcport->nvme_flag & NVME_FLAG_REGISTERED))
38 		return 0;
39 
40 	fcport->nvme_flag &= ~NVME_FLAG_RESETTING;
41 
42 	memset(&req, 0, sizeof(struct nvme_fc_port_info));
43 	req.port_name = wwn_to_u64(fcport->port_name);
44 	req.node_name = wwn_to_u64(fcport->node_name);
45 	req.port_role = 0;
46 	req.dev_loss_tmo = fcport->dev_loss_tmo;
47 
48 	if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR)
49 		req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
50 
51 	if (fcport->nvme_prli_service_param & NVME_PRLI_SP_TARGET)
52 		req.port_role |= FC_PORT_ROLE_NVME_TARGET;
53 
54 	if (fcport->nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY)
55 		req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
56 
57 	req.port_id = fcport->d_id.b24;
58 
59 	ql_log(ql_log_info, vha, 0x2102,
60 	    "%s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x\n",
61 	    __func__, req.node_name, req.port_name,
62 	    req.port_id);
63 
64 	ret = nvme_fc_register_remoteport(vha->nvme_local_port, &req,
65 	    &fcport->nvme_remote_port);
66 	if (ret) {
67 		ql_log(ql_log_warn, vha, 0x212e,
68 		    "Failed to register remote port. Transport returned %d\n",
69 		    ret);
70 		return ret;
71 	}
72 
73 	nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port,
74 				       fcport->dev_loss_tmo);
75 
76 	if (fcport->nvme_prli_service_param & NVME_PRLI_SP_SLER)
77 		ql_log(ql_log_info, vha, 0x212a,
78 		       "PortID:%06x Supports SLER\n", req.port_id);
79 
80 	if (fcport->nvme_prli_service_param & NVME_PRLI_SP_PI_CTRL)
81 		ql_log(ql_log_info, vha, 0x212b,
82 		       "PortID:%06x Supports PI control\n", req.port_id);
83 
84 	rport = fcport->nvme_remote_port->private;
85 	rport->fcport = fcport;
86 
87 	fcport->nvme_flag |= NVME_FLAG_REGISTERED;
88 	return 0;
89 }
90 
91 /* Allocate a queue for NVMe traffic */
92 static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
93     unsigned int qidx, u16 qsize, void **handle)
94 {
95 	struct scsi_qla_host *vha;
96 	struct qla_hw_data *ha;
97 	struct qla_qpair *qpair;
98 
99 	/* Map admin queue and 1st IO queue to index 0 */
100 	if (qidx)
101 		qidx--;
102 
103 	vha = (struct scsi_qla_host *)lport->private;
104 	ha = vha->hw;
105 
106 	ql_log(ql_log_info, vha, 0x2104,
107 	    "%s: handle %p, idx =%d, qsize %d\n",
108 	    __func__, handle, qidx, qsize);
109 
110 	if (qidx > qla_nvme_fc_transport.max_hw_queues) {
111 		ql_log(ql_log_warn, vha, 0x212f,
112 		    "%s: Illegal qidx=%d. Max=%d\n",
113 		    __func__, qidx, qla_nvme_fc_transport.max_hw_queues);
114 		return -EINVAL;
115 	}
116 
117 	/* Use base qpair if max_qpairs is 0 */
118 	if (!ha->max_qpairs) {
119 		qpair = ha->base_qpair;
120 	} else {
121 		if (ha->queue_pair_map[qidx]) {
122 			*handle = ha->queue_pair_map[qidx];
123 			ql_log(ql_log_info, vha, 0x2121,
124 			       "Returning existing qpair of %p for idx=%x\n",
125 			       *handle, qidx);
126 			return 0;
127 		}
128 
129 		qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
130 		if (!qpair) {
131 			ql_log(ql_log_warn, vha, 0x2122,
132 			       "Failed to allocate qpair\n");
133 			return -EINVAL;
134 		}
135 	}
136 	*handle = qpair;
137 
138 	return 0;
139 }
140 
141 static void qla_nvme_release_fcp_cmd_kref(struct kref *kref)
142 {
143 	struct srb *sp = container_of(kref, struct srb, cmd_kref);
144 	struct nvme_private *priv = (struct nvme_private *)sp->priv;
145 	struct nvmefc_fcp_req *fd;
146 	struct srb_iocb *nvme;
147 	unsigned long flags;
148 
149 	if (!priv)
150 		goto out;
151 
152 	nvme = &sp->u.iocb_cmd;
153 	fd = nvme->u.nvme.desc;
154 
155 	spin_lock_irqsave(&priv->cmd_lock, flags);
156 	priv->sp = NULL;
157 	sp->priv = NULL;
158 	if (priv->comp_status == QLA_SUCCESS) {
159 		fd->rcv_rsplen = le16_to_cpu(nvme->u.nvme.rsp_pyld_len);
160 		fd->status = NVME_SC_SUCCESS;
161 	} else {
162 		fd->rcv_rsplen = 0;
163 		fd->transferred_length = 0;
164 		fd->status = NVME_SC_INTERNAL;
165 	}
166 	spin_unlock_irqrestore(&priv->cmd_lock, flags);
167 
168 	fd->done(fd);
169 out:
170 	qla2xxx_rel_qpair_sp(sp->qpair, sp);
171 }
172 
173 static void qla_nvme_release_ls_cmd_kref(struct kref *kref)
174 {
175 	struct srb *sp = container_of(kref, struct srb, cmd_kref);
176 	struct nvme_private *priv = (struct nvme_private *)sp->priv;
177 	struct nvmefc_ls_req *fd;
178 	unsigned long flags;
179 
180 	if (!priv)
181 		goto out;
182 
183 	spin_lock_irqsave(&priv->cmd_lock, flags);
184 	priv->sp = NULL;
185 	sp->priv = NULL;
186 	spin_unlock_irqrestore(&priv->cmd_lock, flags);
187 
188 	fd = priv->fd;
189 
190 	fd->done(fd, priv->comp_status);
191 out:
192 	qla2x00_rel_sp(sp);
193 }
194 
195 static void qla_nvme_ls_complete(struct work_struct *work)
196 {
197 	struct nvme_private *priv =
198 		container_of(work, struct nvme_private, ls_work);
199 
200 	kref_put(&priv->sp->cmd_kref, qla_nvme_release_ls_cmd_kref);
201 }
202 
203 static void qla_nvme_sp_ls_done(srb_t *sp, int res)
204 {
205 	struct nvme_private *priv = sp->priv;
206 
207 	if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0))
208 		return;
209 
210 	if (res)
211 		res = -EINVAL;
212 
213 	priv->comp_status = res;
214 	INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
215 	schedule_work(&priv->ls_work);
216 }
217 
218 /* it assumed that QPair lock is held. */
219 static void qla_nvme_sp_done(srb_t *sp, int res)
220 {
221 	struct nvme_private *priv = sp->priv;
222 
223 	priv->comp_status = res;
224 	kref_put(&sp->cmd_kref, qla_nvme_release_fcp_cmd_kref);
225 
226 	return;
227 }
228 
229 static void qla_nvme_abort_work(struct work_struct *work)
230 {
231 	struct nvme_private *priv =
232 		container_of(work, struct nvme_private, abort_work);
233 	srb_t *sp = priv->sp;
234 	fc_port_t *fcport = sp->fcport;
235 	struct qla_hw_data *ha = fcport->vha->hw;
236 	int rval, abts_done_called = 1;
237 	bool io_wait_for_abort_done;
238 	uint32_t handle;
239 
240 	ql_dbg(ql_dbg_io, fcport->vha, 0xffff,
241 	       "%s called for sp=%p, hndl=%x on fcport=%p desc=%p deleted=%d\n",
242 	       __func__, sp, sp->handle, fcport, sp->u.iocb_cmd.u.nvme.desc, fcport->deleted);
243 
244 	if (!ha->flags.fw_started || fcport->deleted == QLA_SESS_DELETED)
245 		goto out;
246 
247 	if (ha->flags.host_shutting_down) {
248 		ql_log(ql_log_info, sp->fcport->vha, 0xffff,
249 		    "%s Calling done on sp: %p, type: 0x%x\n",
250 		    __func__, sp, sp->type);
251 		sp->done(sp, 0);
252 		goto out;
253 	}
254 
255 	/*
256 	 * sp may not be valid after abort_command if return code is either
257 	 * SUCCESS or ERR_FROM_FW codes, so cache the value here.
258 	 */
259 	io_wait_for_abort_done = ql2xabts_wait_nvme &&
260 					QLA_ABTS_WAIT_ENABLED(sp);
261 	handle = sp->handle;
262 
263 	rval = ha->isp_ops->abort_command(sp);
264 
265 	ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
266 	    "%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n",
267 	    __func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
268 	    sp, handle, fcport, rval);
269 
270 	/*
271 	 * If async tmf is enabled, the abort callback is called only on
272 	 * return codes QLA_SUCCESS and QLA_ERR_FROM_FW.
273 	 */
274 	if (ql2xasynctmfenable &&
275 	    rval != QLA_SUCCESS && rval != QLA_ERR_FROM_FW)
276 		abts_done_called = 0;
277 
278 	/*
279 	 * Returned before decreasing kref so that I/O requests
280 	 * are waited until ABTS complete. This kref is decreased
281 	 * at qla24xx_abort_sp_done function.
282 	 */
283 	if (abts_done_called && io_wait_for_abort_done)
284 		return;
285 out:
286 	/* kref_get was done before work was schedule. */
287 	kref_put(&sp->cmd_kref, sp->put_fn);
288 }
289 
290 static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
291     struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
292 {
293 	struct nvme_private *priv = fd->private;
294 	unsigned long flags;
295 
296 	spin_lock_irqsave(&priv->cmd_lock, flags);
297 	if (!priv->sp) {
298 		spin_unlock_irqrestore(&priv->cmd_lock, flags);
299 		return;
300 	}
301 
302 	if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
303 		spin_unlock_irqrestore(&priv->cmd_lock, flags);
304 		return;
305 	}
306 	spin_unlock_irqrestore(&priv->cmd_lock, flags);
307 
308 	INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
309 	schedule_work(&priv->abort_work);
310 }
311 
312 static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
313     struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
314 {
315 	struct qla_nvme_rport *qla_rport = rport->private;
316 	fc_port_t *fcport = qla_rport->fcport;
317 	struct srb_iocb   *nvme;
318 	struct nvme_private *priv = fd->private;
319 	struct scsi_qla_host *vha;
320 	int     rval = QLA_FUNCTION_FAILED;
321 	struct qla_hw_data *ha;
322 	srb_t           *sp;
323 
324 	if (!fcport || fcport->deleted)
325 		return rval;
326 
327 	vha = fcport->vha;
328 	ha = vha->hw;
329 
330 	if (!ha->flags.fw_started)
331 		return rval;
332 
333 	/* Alloc SRB structure */
334 	sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
335 	if (!sp)
336 		return rval;
337 
338 	sp->type = SRB_NVME_LS;
339 	sp->name = "nvme_ls";
340 	sp->done = qla_nvme_sp_ls_done;
341 	sp->put_fn = qla_nvme_release_ls_cmd_kref;
342 	sp->priv = priv;
343 	priv->sp = sp;
344 	kref_init(&sp->cmd_kref);
345 	spin_lock_init(&priv->cmd_lock);
346 	nvme = &sp->u.iocb_cmd;
347 	priv->fd = fd;
348 	nvme->u.nvme.desc = fd;
349 	nvme->u.nvme.dir = 0;
350 	nvme->u.nvme.dl = 0;
351 	nvme->u.nvme.cmd_len = fd->rqstlen;
352 	nvme->u.nvme.rsp_len = fd->rsplen;
353 	nvme->u.nvme.rsp_dma = fd->rspdma;
354 	nvme->u.nvme.timeout_sec = fd->timeout;
355 	nvme->u.nvme.cmd_dma = fd->rqstdma;
356 	dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
357 	    fd->rqstlen, DMA_TO_DEVICE);
358 
359 	rval = qla2x00_start_sp(sp);
360 	if (rval != QLA_SUCCESS) {
361 		ql_log(ql_log_warn, vha, 0x700e,
362 		    "qla2x00_start_sp failed = %d\n", rval);
363 		sp->priv = NULL;
364 		priv->sp = NULL;
365 		qla2x00_rel_sp(sp);
366 		return rval;
367 	}
368 
369 	return rval;
370 }
371 
372 static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
373     struct nvme_fc_remote_port *rport, void *hw_queue_handle,
374     struct nvmefc_fcp_req *fd)
375 {
376 	struct nvme_private *priv = fd->private;
377 	unsigned long flags;
378 
379 	spin_lock_irqsave(&priv->cmd_lock, flags);
380 	if (!priv->sp) {
381 		spin_unlock_irqrestore(&priv->cmd_lock, flags);
382 		return;
383 	}
384 	if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
385 		spin_unlock_irqrestore(&priv->cmd_lock, flags);
386 		return;
387 	}
388 	spin_unlock_irqrestore(&priv->cmd_lock, flags);
389 
390 	INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
391 	schedule_work(&priv->abort_work);
392 }
393 
394 static inline int qla2x00_start_nvme_mq(srb_t *sp)
395 {
396 	unsigned long   flags;
397 	uint32_t        *clr_ptr;
398 	uint32_t        handle;
399 	struct cmd_nvme *cmd_pkt;
400 	uint16_t        cnt, i;
401 	uint16_t        req_cnt;
402 	uint16_t        tot_dsds;
403 	uint16_t	avail_dsds;
404 	struct dsd64	*cur_dsd;
405 	struct req_que *req = NULL;
406 	struct rsp_que *rsp = NULL;
407 	struct scsi_qla_host *vha = sp->fcport->vha;
408 	struct qla_hw_data *ha = vha->hw;
409 	struct qla_qpair *qpair = sp->qpair;
410 	struct srb_iocb *nvme = &sp->u.iocb_cmd;
411 	struct scatterlist *sgl, *sg;
412 	struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
413 	struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
414 	uint32_t        rval = QLA_SUCCESS;
415 
416 	/* Setup qpair pointers */
417 	req = qpair->req;
418 	rsp = qpair->rsp;
419 	tot_dsds = fd->sg_cnt;
420 
421 	/* Acquire qpair specific lock */
422 	spin_lock_irqsave(&qpair->qp_lock, flags);
423 
424 	handle = qla2xxx_get_next_handle(req);
425 	if (handle == 0) {
426 		rval = -EBUSY;
427 		goto queuing_error;
428 	}
429 	req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
430 
431 	sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH;
432 	sp->iores.exch_cnt = 1;
433 	sp->iores.iocb_cnt = req_cnt;
434 	if (qla_get_fw_resources(sp->qpair, &sp->iores)) {
435 		rval = -EBUSY;
436 		goto queuing_error;
437 	}
438 
439 	if (req->cnt < (req_cnt + 2)) {
440 		if (IS_SHADOW_REG_CAPABLE(ha)) {
441 			cnt = *req->out_ptr;
442 		} else {
443 			cnt = rd_reg_dword_relaxed(req->req_q_out);
444 			if (qla2x00_check_reg16_for_disconnect(vha, cnt)) {
445 				rval = -EBUSY;
446 				goto queuing_error;
447 			}
448 		}
449 
450 		if (req->ring_index < cnt)
451 			req->cnt = cnt - req->ring_index;
452 		else
453 			req->cnt = req->length - (req->ring_index - cnt);
454 
455 		if (req->cnt < (req_cnt + 2)){
456 			rval = -EBUSY;
457 			goto queuing_error;
458 		}
459 	}
460 
461 	if (unlikely(!fd->sqid)) {
462 		if (cmd->sqe.common.opcode == nvme_admin_async_event) {
463 			nvme->u.nvme.aen_op = 1;
464 			atomic_inc(&ha->nvme_active_aen_cnt);
465 		}
466 	}
467 
468 	/* Build command packet. */
469 	req->current_outstanding_cmd = handle;
470 	req->outstanding_cmds[handle] = sp;
471 	sp->handle = handle;
472 	req->cnt -= req_cnt;
473 
474 	cmd_pkt = (struct cmd_nvme *)req->ring_ptr;
475 	cmd_pkt->handle = make_handle(req->id, handle);
476 
477 	/* Zero out remaining portion of packet. */
478 	clr_ptr = (uint32_t *)cmd_pkt + 2;
479 	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
480 
481 	cmd_pkt->entry_status = 0;
482 
483 	/* Update entry type to indicate Command NVME IOCB */
484 	cmd_pkt->entry_type = COMMAND_NVME;
485 
486 	/* No data transfer how do we check buffer len == 0?? */
487 	if (fd->io_dir == NVMEFC_FCP_READ) {
488 		cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
489 		qpair->counters.input_bytes += fd->payload_length;
490 		qpair->counters.input_requests++;
491 	} else if (fd->io_dir == NVMEFC_FCP_WRITE) {
492 		cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
493 		if ((vha->flags.nvme_first_burst) &&
494 		    (sp->fcport->nvme_prli_service_param &
495 			NVME_PRLI_SP_FIRST_BURST)) {
496 			if ((fd->payload_length <=
497 			    sp->fcport->nvme_first_burst_size) ||
498 				(sp->fcport->nvme_first_burst_size == 0))
499 				cmd_pkt->control_flags |=
500 					cpu_to_le16(CF_NVME_FIRST_BURST_ENABLE);
501 		}
502 		qpair->counters.output_bytes += fd->payload_length;
503 		qpair->counters.output_requests++;
504 	} else if (fd->io_dir == 0) {
505 		cmd_pkt->control_flags = 0;
506 	}
507 
508 	if (sp->fcport->edif.enable && fd->io_dir != 0)
509 		cmd_pkt->control_flags |= cpu_to_le16(CF_EN_EDIF);
510 
511 	/* Set BIT_13 of control flags for Async event */
512 	if (vha->flags.nvme2_enabled &&
513 	    cmd->sqe.common.opcode == nvme_admin_async_event) {
514 		cmd_pkt->control_flags |= cpu_to_le16(CF_ADMIN_ASYNC_EVENT);
515 	}
516 
517 	/* Set NPORT-ID */
518 	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
519 	cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
520 	cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
521 	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
522 	cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
523 
524 	/* NVME RSP IU */
525 	cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen);
526 	put_unaligned_le64(fd->rspdma, &cmd_pkt->nvme_rsp_dseg_address);
527 
528 	/* NVME CNMD IU */
529 	cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen);
530 	cmd_pkt->nvme_cmnd_dseg_address = cpu_to_le64(fd->cmddma);
531 
532 	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
533 	cmd_pkt->byte_count = cpu_to_le32(fd->payload_length);
534 
535 	/* One DSD is available in the Command Type NVME IOCB */
536 	avail_dsds = 1;
537 	cur_dsd = &cmd_pkt->nvme_dsd;
538 	sgl = fd->first_sgl;
539 
540 	/* Load data segments */
541 	for_each_sg(sgl, sg, tot_dsds, i) {
542 		cont_a64_entry_t *cont_pkt;
543 
544 		/* Allocate additional continuation packets? */
545 		if (avail_dsds == 0) {
546 			/*
547 			 * Five DSDs are available in the Continuation
548 			 * Type 1 IOCB.
549 			 */
550 
551 			/* Adjust ring index */
552 			req->ring_index++;
553 			if (req->ring_index == req->length) {
554 				req->ring_index = 0;
555 				req->ring_ptr = req->ring;
556 			} else {
557 				req->ring_ptr++;
558 			}
559 			cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
560 			put_unaligned_le32(CONTINUE_A64_TYPE,
561 					   &cont_pkt->entry_type);
562 
563 			cur_dsd = cont_pkt->dsd;
564 			avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
565 		}
566 
567 		append_dsd64(&cur_dsd, sg);
568 		avail_dsds--;
569 	}
570 
571 	/* Set total entry count. */
572 	cmd_pkt->entry_count = (uint8_t)req_cnt;
573 	wmb();
574 
575 	/* Adjust ring index. */
576 	req->ring_index++;
577 	if (req->ring_index == req->length) {
578 		req->ring_index = 0;
579 		req->ring_ptr = req->ring;
580 	} else {
581 		req->ring_ptr++;
582 	}
583 
584 	/* ignore nvme async cmd due to long timeout */
585 	if (!nvme->u.nvme.aen_op)
586 		sp->qpair->cmd_cnt++;
587 
588 	/* Set chip new ring index. */
589 	wrt_reg_dword(req->req_q_in, req->ring_index);
590 
591 	if (vha->flags.process_response_queue &&
592 	    rsp->ring_ptr->signature != RESPONSE_PROCESSED)
593 		qla24xx_process_response_queue(vha, rsp);
594 
595 queuing_error:
596 	if (rval)
597 		qla_put_fw_resources(sp->qpair, &sp->iores);
598 	spin_unlock_irqrestore(&qpair->qp_lock, flags);
599 
600 	return rval;
601 }
602 
603 /* Post a command */
604 static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
605     struct nvme_fc_remote_port *rport, void *hw_queue_handle,
606     struct nvmefc_fcp_req *fd)
607 {
608 	fc_port_t *fcport;
609 	struct srb_iocb *nvme;
610 	struct scsi_qla_host *vha;
611 	struct qla_hw_data *ha;
612 	int rval;
613 	srb_t *sp;
614 	struct qla_qpair *qpair = hw_queue_handle;
615 	struct nvme_private *priv = fd->private;
616 	struct qla_nvme_rport *qla_rport = rport->private;
617 
618 	if (!priv) {
619 		/* nvme association has been torn down */
620 		return -ENODEV;
621 	}
622 
623 	fcport = qla_rport->fcport;
624 
625 	if (unlikely(!qpair || !fcport || fcport->deleted))
626 		return -EBUSY;
627 
628 	if (!(fcport->nvme_flag & NVME_FLAG_REGISTERED))
629 		return -ENODEV;
630 
631 	vha = fcport->vha;
632 	ha = vha->hw;
633 
634 	if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
635 		return -EBUSY;
636 
637 	/*
638 	 * If we know the dev is going away while the transport is still sending
639 	 * IO's return busy back to stall the IO Q.  This happens when the
640 	 * link goes away and fw hasn't notified us yet, but IO's are being
641 	 * returned. If the dev comes back quickly we won't exhaust the IO
642 	 * retry count at the core.
643 	 */
644 	if (fcport->nvme_flag & NVME_FLAG_RESETTING)
645 		return -EBUSY;
646 
647 	qpair = qla_mapq_nvme_select_qpair(ha, qpair);
648 
649 	/* Alloc SRB structure */
650 	sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC);
651 	if (!sp)
652 		return -EBUSY;
653 
654 	kref_init(&sp->cmd_kref);
655 	spin_lock_init(&priv->cmd_lock);
656 	sp->priv = priv;
657 	priv->sp = sp;
658 	sp->type = SRB_NVME_CMD;
659 	sp->name = "nvme_cmd";
660 	sp->done = qla_nvme_sp_done;
661 	sp->put_fn = qla_nvme_release_fcp_cmd_kref;
662 	sp->qpair = qpair;
663 	sp->vha = vha;
664 	sp->cmd_sp = sp;
665 	nvme = &sp->u.iocb_cmd;
666 	nvme->u.nvme.desc = fd;
667 
668 	rval = qla2x00_start_nvme_mq(sp);
669 	if (rval != QLA_SUCCESS) {
670 		ql_log(ql_log_warn, vha, 0x212d,
671 		    "qla2x00_start_nvme_mq failed = %d\n", rval);
672 		sp->priv = NULL;
673 		priv->sp = NULL;
674 		qla2xxx_rel_qpair_sp(sp->qpair, sp);
675 	}
676 
677 	return rval;
678 }
679 
680 static void qla_nvme_map_queues(struct nvme_fc_local_port *lport,
681 		struct blk_mq_queue_map *map)
682 {
683 	struct scsi_qla_host *vha = lport->private;
684 
685 	blk_mq_pci_map_queues(map, vha->hw->pdev, vha->irq_offset);
686 }
687 
688 static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
689 {
690 	struct scsi_qla_host *vha = lport->private;
691 
692 	ql_log(ql_log_info, vha, 0x210f,
693 	    "localport delete of %p completed.\n", vha->nvme_local_port);
694 	vha->nvme_local_port = NULL;
695 	complete(&vha->nvme_del_done);
696 }
697 
698 static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
699 {
700 	fc_port_t *fcport;
701 	struct qla_nvme_rport *qla_rport = rport->private;
702 
703 	fcport = qla_rport->fcport;
704 	fcport->nvme_remote_port = NULL;
705 	fcport->nvme_flag &= ~NVME_FLAG_REGISTERED;
706 	fcport->nvme_flag &= ~NVME_FLAG_DELETING;
707 	ql_log(ql_log_info, fcport->vha, 0x2110,
708 	    "remoteport_delete of %p %8phN completed.\n",
709 	    fcport, fcport->port_name);
710 	complete(&fcport->nvme_del_done);
711 }
712 
713 static struct nvme_fc_port_template qla_nvme_fc_transport = {
714 	.localport_delete = qla_nvme_localport_delete,
715 	.remoteport_delete = qla_nvme_remoteport_delete,
716 	.create_queue   = qla_nvme_alloc_queue,
717 	.delete_queue 	= NULL,
718 	.ls_req		= qla_nvme_ls_req,
719 	.ls_abort	= qla_nvme_ls_abort,
720 	.fcp_io		= qla_nvme_post_cmd,
721 	.fcp_abort	= qla_nvme_fcp_abort,
722 	.map_queues	= qla_nvme_map_queues,
723 	.max_hw_queues  = DEF_NVME_HW_QUEUES,
724 	.max_sgl_segments = 1024,
725 	.max_dif_sgl_segments = 64,
726 	.dma_boundary = 0xFFFFFFFF,
727 	.local_priv_sz  = 8,
728 	.remote_priv_sz = sizeof(struct qla_nvme_rport),
729 	.lsrqst_priv_sz = sizeof(struct nvme_private),
730 	.fcprqst_priv_sz = sizeof(struct nvme_private),
731 };
732 
733 void qla_nvme_unregister_remote_port(struct fc_port *fcport)
734 {
735 	int ret;
736 
737 	if (!IS_ENABLED(CONFIG_NVME_FC))
738 		return;
739 
740 	ql_log(ql_log_warn, fcport->vha, 0x2112,
741 	    "%s: unregister remoteport on %p %8phN\n",
742 	    __func__, fcport, fcport->port_name);
743 
744 	if (test_bit(PFLG_DRIVER_REMOVING, &fcport->vha->pci_flags))
745 		nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
746 
747 	init_completion(&fcport->nvme_del_done);
748 	ret = nvme_fc_unregister_remoteport(fcport->nvme_remote_port);
749 	if (ret)
750 		ql_log(ql_log_info, fcport->vha, 0x2114,
751 			"%s: Failed to unregister nvme_remote_port (%d)\n",
752 			    __func__, ret);
753 	wait_for_completion(&fcport->nvme_del_done);
754 }
755 
756 void qla_nvme_delete(struct scsi_qla_host *vha)
757 {
758 	int nv_ret;
759 
760 	if (!IS_ENABLED(CONFIG_NVME_FC))
761 		return;
762 
763 	if (vha->nvme_local_port) {
764 		init_completion(&vha->nvme_del_done);
765 		ql_log(ql_log_info, vha, 0x2116,
766 			"unregister localport=%p\n",
767 			vha->nvme_local_port);
768 		nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port);
769 		if (nv_ret)
770 			ql_log(ql_log_info, vha, 0x2115,
771 			    "Unregister of localport failed\n");
772 		else
773 			wait_for_completion(&vha->nvme_del_done);
774 	}
775 }
776 
777 int qla_nvme_register_hba(struct scsi_qla_host *vha)
778 {
779 	struct nvme_fc_port_template *tmpl;
780 	struct qla_hw_data *ha;
781 	struct nvme_fc_port_info pinfo;
782 	int ret = -EINVAL;
783 
784 	if (!IS_ENABLED(CONFIG_NVME_FC))
785 		return ret;
786 
787 	ha = vha->hw;
788 	tmpl = &qla_nvme_fc_transport;
789 
790 	if (ql2xnvme_queues < MIN_NVME_HW_QUEUES) {
791 		ql_log(ql_log_warn, vha, 0xfffd,
792 		    "ql2xnvme_queues=%d is lower than minimum queues: %d. Resetting ql2xnvme_queues to:%d\n",
793 		    ql2xnvme_queues, MIN_NVME_HW_QUEUES, DEF_NVME_HW_QUEUES);
794 		ql2xnvme_queues = DEF_NVME_HW_QUEUES;
795 	} else if (ql2xnvme_queues > (ha->max_qpairs - 1)) {
796 		ql_log(ql_log_warn, vha, 0xfffd,
797 		       "ql2xnvme_queues=%d is greater than available IRQs: %d. Resetting ql2xnvme_queues to: %d\n",
798 		       ql2xnvme_queues, (ha->max_qpairs - 1),
799 		       (ha->max_qpairs - 1));
800 		ql2xnvme_queues = ((ha->max_qpairs - 1));
801 	}
802 
803 	qla_nvme_fc_transport.max_hw_queues =
804 	    min((uint8_t)(ql2xnvme_queues),
805 		(uint8_t)((ha->max_qpairs - 1) ? (ha->max_qpairs - 1) : 1));
806 
807 	ql_log(ql_log_info, vha, 0xfffb,
808 	       "Number of NVME queues used for this port: %d\n",
809 	    qla_nvme_fc_transport.max_hw_queues);
810 
811 	pinfo.node_name = wwn_to_u64(vha->node_name);
812 	pinfo.port_name = wwn_to_u64(vha->port_name);
813 	pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR;
814 	pinfo.port_id = vha->d_id.b24;
815 
816 	mutex_lock(&ha->vport_lock);
817 	/*
818 	 * Check again for nvme_local_port to see if any other thread raced
819 	 * with this one and finished registration.
820 	 */
821 	if (!vha->nvme_local_port) {
822 		ql_log(ql_log_info, vha, 0xffff,
823 		    "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
824 		    pinfo.node_name, pinfo.port_name, pinfo.port_id);
825 		qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
826 
827 		ret = nvme_fc_register_localport(&pinfo, tmpl,
828 						 get_device(&ha->pdev->dev),
829 						 &vha->nvme_local_port);
830 		mutex_unlock(&ha->vport_lock);
831 	} else {
832 		mutex_unlock(&ha->vport_lock);
833 		return 0;
834 	}
835 	if (ret) {
836 		ql_log(ql_log_warn, vha, 0xffff,
837 		    "register_localport failed: ret=%x\n", ret);
838 	} else {
839 		vha->nvme_local_port->private = vha;
840 	}
841 
842 	return ret;
843 }
844 
845 void qla_nvme_abort_set_option(struct abort_entry_24xx *abt, srb_t *orig_sp)
846 {
847 	struct qla_hw_data *ha;
848 
849 	if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
850 		return;
851 
852 	ha = orig_sp->fcport->vha->hw;
853 
854 	WARN_ON_ONCE(abt->options & cpu_to_le16(BIT_0));
855 	/* Use Driver Specified Retry Count */
856 	abt->options |= cpu_to_le16(AOF_ABTS_RTY_CNT);
857 	abt->drv.abts_rty_cnt = cpu_to_le16(2);
858 	/* Use specified response timeout */
859 	abt->options |= cpu_to_le16(AOF_RSP_TIMEOUT);
860 	/* set it to 2 * r_a_tov in secs */
861 	abt->drv.rsp_timeout = cpu_to_le16(2 * (ha->r_a_tov / 10));
862 }
863 
864 void qla_nvme_abort_process_comp_status(struct abort_entry_24xx *abt, srb_t *orig_sp)
865 {
866 	u16	comp_status;
867 	struct scsi_qla_host *vha;
868 
869 	if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
870 		return;
871 
872 	vha = orig_sp->fcport->vha;
873 
874 	comp_status = le16_to_cpu(abt->comp_status);
875 	switch (comp_status) {
876 	case CS_RESET:		/* reset event aborted */
877 	case CS_ABORTED:	/* IOCB was cleaned */
878 	/* N_Port handle is not currently logged in */
879 	case CS_TIMEOUT:
880 	/* N_Port handle was logged out while waiting for ABTS to complete */
881 	case CS_PORT_UNAVAILABLE:
882 	/* Firmware found that the port name changed */
883 	case CS_PORT_LOGGED_OUT:
884 	/* BA_RJT was received for the ABTS */
885 	case CS_PORT_CONFIG_CHG:
886 		ql_dbg(ql_dbg_async, vha, 0xf09d,
887 		       "Abort I/O IOCB completed with error, comp_status=%x\n",
888 		comp_status);
889 		break;
890 
891 	/* BA_RJT was received for the ABTS */
892 	case CS_REJECT_RECEIVED:
893 		ql_dbg(ql_dbg_async, vha, 0xf09e,
894 		       "BA_RJT was received for the ABTS rjt_vendorUnique = %u",
895 			abt->fw.ba_rjt_vendorUnique);
896 		ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf09e,
897 		       "ba_rjt_reasonCodeExpl = %u, ba_rjt_reasonCode = %u\n",
898 		       abt->fw.ba_rjt_reasonCodeExpl, abt->fw.ba_rjt_reasonCode);
899 		break;
900 
901 	case CS_COMPLETE:
902 		ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0xf09f,
903 		       "IOCB request is completed successfully comp_status=%x\n",
904 		comp_status);
905 		break;
906 
907 	case CS_IOCB_ERROR:
908 		ql_dbg(ql_dbg_async, vha, 0xf0a0,
909 		       "IOCB request is failed, comp_status=%x\n", comp_status);
910 		break;
911 
912 	default:
913 		ql_dbg(ql_dbg_async, vha, 0xf0a1,
914 		       "Invalid Abort IO IOCB Completion Status %x\n",
915 		comp_status);
916 		break;
917 	}
918 }
919 
920 inline void qla_wait_nvme_release_cmd_kref(srb_t *orig_sp)
921 {
922 	if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
923 		return;
924 	kref_put(&orig_sp->cmd_kref, orig_sp->put_fn);
925 }
926