xref: /linux/drivers/scsi/lpfc/lpfc_nvme.h (revision 02680c23d7b3febe45ea3d4f9818c2b2dc89020a)
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
5  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  *
6  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7  * EMULEX and SLI are trademarks of Emulex.                        *
8  * www.broadcom.com                                                *
9  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
10  *                                                                 *
11  * This program is free software; you can redistribute it and/or   *
12  * modify it under the terms of version 2 of the GNU General       *
13  * Public License as published by the Free Software Foundation.    *
14  * This program is distributed in the hope that it will be useful. *
15  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
16  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
17  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
18  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
20  * more details, a copy of which can be found in the file COPYING  *
21  * included with this package.                                     *
22  ********************************************************************/
23 
24 #include <linux/nvme.h>
25 #include <linux/nvme-fc-driver.h>
26 #include <linux/nvme-fc.h>
27 
28 #define LPFC_NVME_DEFAULT_SEGS		(64 + 1)	/* 256K IOs */
29 
30 #define LPFC_NVME_ERSP_LEN		0x20
31 
32 #define LPFC_NVME_WAIT_TMO              10
33 #define LPFC_NVME_EXPEDITE_XRICNT	8
34 #define LPFC_NVME_FB_SHIFT		9
35 #define LPFC_NVME_MAX_FB		(1 << 20)	/* 1M */
36 
37 #define LPFC_MAX_NVME_INFO_TMP_LEN	100
38 #define LPFC_NVME_INFO_MORE_STR		"\nCould be more info...\n"
39 
40 #define lpfc_ndlp_get_nrport(ndlp)					\
41 	((!ndlp->nrport || (ndlp->fc4_xpt_flags & NLP_WAIT_FOR_UNREG))	\
42 	? NULL : ndlp->nrport)
43 
44 struct lpfc_nvme_qhandle {
45 	uint32_t index;		/* WQ index to use */
46 	uint32_t qidx;		/* queue index passed to create */
47 	uint32_t cpu_id;	/* current cpu id at time of create */
48 };
49 
50 /* Declare nvme-based local and remote port definitions. */
51 struct lpfc_nvme_lport {
52 	struct lpfc_vport *vport;
53 	struct completion *lport_unreg_cmp;
54 	/* Add stats counters here */
55 	atomic_t fc4NvmeLsRequests;
56 	atomic_t fc4NvmeLsCmpls;
57 	atomic_t xmt_fcp_noxri;
58 	atomic_t xmt_fcp_bad_ndlp;
59 	atomic_t xmt_fcp_qdepth;
60 	atomic_t xmt_fcp_wqerr;
61 	atomic_t xmt_fcp_err;
62 	atomic_t xmt_fcp_abort;
63 	atomic_t xmt_ls_abort;
64 	atomic_t xmt_ls_err;
65 	atomic_t cmpl_fcp_xb;
66 	atomic_t cmpl_fcp_err;
67 	atomic_t cmpl_ls_xb;
68 	atomic_t cmpl_ls_err;
69 };
70 
71 struct lpfc_nvme_rport {
72 	struct lpfc_nvme_lport *lport;
73 	struct nvme_fc_remote_port *remoteport;
74 	struct lpfc_nodelist *ndlp;
75 	struct completion rport_unreg_done;
76 };
77 
78 struct lpfc_nvme_fcpreq_priv {
79 	struct lpfc_io_buf *nvme_buf;
80 };
81 
82 /*
83  * set NVME LS request timeouts to 30s. It is larger than the 2*R_A_TOV
84  * set by the spec, which appears to have issues with some devices.
85  */
86 #define LPFC_NVME_LS_TIMEOUT		30
87 
88 
89 #define LPFC_NVMET_DEFAULT_SEGS		(64 + 1)	/* 256K IOs */
90 #define LPFC_NVMET_RQE_MIN_POST		128
91 #define LPFC_NVMET_RQE_DEF_POST		512
92 #define LPFC_NVMET_RQE_DEF_COUNT	2048
93 #define LPFC_NVMET_SUCCESS_LEN		12
94 
95 #define LPFC_NVMET_MRQ_AUTO		0
96 #define LPFC_NVMET_MRQ_MAX		16
97 
98 #define LPFC_NVMET_WAIT_TMO		(5 * MSEC_PER_SEC)
99 
100 /* Used for NVME Target */
101 #define LPFC_NVMET_INV_HOST_ACTIVE      1
102 
103 struct lpfc_nvmet_tgtport {
104 	struct lpfc_hba *phba;
105 	struct completion *tport_unreg_cmp;
106 	atomic_t state;		/* tracks nvmet hosthandle invalidation */
107 
108 	/* Stats counters - lpfc_nvmet_unsol_ls_buffer */
109 	atomic_t rcv_ls_req_in;
110 	atomic_t rcv_ls_req_out;
111 	atomic_t rcv_ls_req_drop;
112 	atomic_t xmt_ls_abort;
113 	atomic_t xmt_ls_abort_cmpl;
114 
115 	/* Stats counters - lpfc_nvmet_xmt_ls_rsp */
116 	atomic_t xmt_ls_rsp;
117 	atomic_t xmt_ls_drop;
118 
119 	/* Stats counters - lpfc_nvmet_xmt_ls_rsp_cmp */
120 	atomic_t xmt_ls_rsp_error;
121 	atomic_t xmt_ls_rsp_aborted;
122 	atomic_t xmt_ls_rsp_xb_set;
123 	atomic_t xmt_ls_rsp_cmpl;
124 
125 	/* Stats counters - lpfc_nvmet_unsol_fcp_buffer */
126 	atomic_t rcv_fcp_cmd_in;
127 	atomic_t rcv_fcp_cmd_out;
128 	atomic_t rcv_fcp_cmd_drop;
129 	atomic_t rcv_fcp_cmd_defer;
130 	atomic_t xmt_fcp_release;
131 
132 	/* Stats counters - lpfc_nvmet_xmt_fcp_op */
133 	atomic_t xmt_fcp_drop;
134 	atomic_t xmt_fcp_read_rsp;
135 	atomic_t xmt_fcp_read;
136 	atomic_t xmt_fcp_write;
137 	atomic_t xmt_fcp_rsp;
138 
139 	/* Stats counters - lpfc_nvmet_xmt_fcp_op_cmp */
140 	atomic_t xmt_fcp_rsp_xb_set;
141 	atomic_t xmt_fcp_rsp_cmpl;
142 	atomic_t xmt_fcp_rsp_error;
143 	atomic_t xmt_fcp_rsp_aborted;
144 	atomic_t xmt_fcp_rsp_drop;
145 
146 	/* Stats counters - lpfc_nvmet_xmt_fcp_abort */
147 	atomic_t xmt_fcp_xri_abort_cqe;
148 	atomic_t xmt_fcp_abort;
149 	atomic_t xmt_fcp_abort_cmpl;
150 	atomic_t xmt_abort_sol;
151 	atomic_t xmt_abort_unsol;
152 	atomic_t xmt_abort_rsp;
153 	atomic_t xmt_abort_rsp_error;
154 
155 	/* Stats counters - defer IO */
156 	atomic_t defer_ctx;
157 	atomic_t defer_fod;
158 	atomic_t defer_wqfull;
159 };
160 
161 struct lpfc_nvmet_ctx_info {
162 	struct list_head nvmet_ctx_list;
163 	spinlock_t	nvmet_ctx_list_lock; /* lock per CPU */
164 	struct lpfc_nvmet_ctx_info *nvmet_ctx_next_cpu;
165 	struct lpfc_nvmet_ctx_info *nvmet_ctx_start_cpu;
166 	uint16_t	nvmet_ctx_list_cnt;
167 	char pad[16];  /* pad to a cache-line */
168 };
169 
170 /* This retrieves the context info associated with the specified cpu / mrq */
171 #define lpfc_get_ctx_list(phba, cpu, mrq)  \
172 	(phba->sli4_hba.nvmet_ctx_info + ((cpu * phba->cfg_nvmet_mrq) + mrq))
173 
174 /* Values for state field of struct lpfc_async_xchg_ctx */
175 #define LPFC_NVME_STE_LS_RCV		1
176 #define LPFC_NVME_STE_LS_ABORT		2
177 #define LPFC_NVME_STE_LS_RSP		3
178 #define LPFC_NVME_STE_RCV		4
179 #define LPFC_NVME_STE_DATA		5
180 #define LPFC_NVME_STE_ABORT		6
181 #define LPFC_NVME_STE_DONE		7
182 #define LPFC_NVME_STE_FREE		0xff
183 
184 /* Values for flag field of struct lpfc_async_xchg_ctx */
185 #define LPFC_NVME_IO_INP		0x1  /* IO is in progress on exchange */
186 #define LPFC_NVME_ABORT_OP		0x2  /* Abort WQE issued on exchange */
187 #define LPFC_NVME_XBUSY			0x4  /* XB bit set on IO cmpl */
188 #define LPFC_NVME_CTX_RLS		0x8  /* ctx free requested */
189 #define LPFC_NVME_ABTS_RCV		0x10  /* ABTS received on exchange */
190 #define LPFC_NVME_CTX_REUSE_WQ		0x20  /* ctx reused via WQ */
191 #define LPFC_NVME_DEFER_WQFULL		0x40  /* Waiting on a free WQE */
192 #define LPFC_NVME_TNOTIFY		0x80  /* notify transport of abts */
193 
194 struct lpfc_async_xchg_ctx {
195 	union {
196 		struct nvmefc_tgt_fcp_req fcp_req;
197 	} hdlrctx;
198 	struct list_head list;
199 	struct lpfc_hba *phba;
200 	struct lpfc_nodelist *ndlp;
201 	struct nvmefc_ls_req *ls_req;
202 	struct nvmefc_ls_rsp ls_rsp;
203 	struct lpfc_iocbq *wqeq;
204 	struct lpfc_iocbq *abort_wqeq;
205 	spinlock_t ctxlock; /* protect flag access */
206 	uint32_t sid;
207 	uint32_t offset;
208 	uint16_t oxid;
209 	uint16_t size;
210 	uint16_t entry_cnt;
211 	uint16_t cpu;
212 	uint16_t idx;
213 	uint16_t state;
214 	uint16_t flag;
215 	void *payload;
216 	struct rqb_dmabuf *rqb_buffer;
217 	struct lpfc_nvmet_ctxbuf *ctxbuf;
218 	struct lpfc_sli4_hdw_queue *hdwq;
219 
220 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
221 	uint64_t ts_isr_cmd;
222 	uint64_t ts_cmd_nvme;
223 	uint64_t ts_nvme_data;
224 	uint64_t ts_data_wqput;
225 	uint64_t ts_isr_data;
226 	uint64_t ts_data_nvme;
227 	uint64_t ts_nvme_status;
228 	uint64_t ts_status_wqput;
229 	uint64_t ts_isr_status;
230 	uint64_t ts_status_nvme;
231 #endif
232 };
233 
234 
235 /* routines found in lpfc_nvme.c */
236 int __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
237 		struct nvmefc_ls_req *pnvme_lsreq,
238 		void (*gen_req_cmp)(struct lpfc_hba *phba,
239 				struct lpfc_iocbq *cmdwqe,
240 				struct lpfc_wcqe_complete *wcqe));
241 void __lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba,  struct lpfc_vport *vport,
242 		struct lpfc_iocbq *cmdwqe, struct lpfc_wcqe_complete *wcqe);
243 int __lpfc_nvme_ls_abort(struct lpfc_vport *vport,
244 		struct lpfc_nodelist *ndlp, struct nvmefc_ls_req *pnvme_lsreq);
245 
246 /* routines found in lpfc_nvmet.c */
247 int lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba *phba,
248 			struct lpfc_async_xchg_ctx *ctxp, uint32_t sid,
249 			uint16_t xri);
250 int __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
251 			struct nvmefc_ls_rsp *ls_rsp,
252 			void (*xmt_ls_rsp_cmp)(struct lpfc_hba *phba,
253 				struct lpfc_iocbq *cmdwqe,
254 				struct lpfc_wcqe_complete *wcqe));
255 void __lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba,
256 		struct lpfc_iocbq *cmdwqe, struct lpfc_wcqe_complete *wcqe);
257