xref: /linux/drivers/net/ethernet/qlogic/qed/qed_nvmetcp.c (revision 897e87a10c35fb37a20886af6f731748d92c1836)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* Copyright 2021 Marvell. All rights reserved. */
3 
4 #include <linux/types.h>
5 #include <asm/byteorder.h>
6 #include <asm/param.h>
7 #include <linux/delay.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/etherdevice.h>
10 #include <linux/kernel.h>
11 #include <linux/log2.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/stddef.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/list.h>
18 #include <linux/qed/qed_nvmetcp_if.h>
19 #include "qed.h"
20 #include "qed_cxt.h"
21 #include "qed_dev_api.h"
22 #include "qed_hsi.h"
23 #include "qed_hw.h"
24 #include "qed_int.h"
25 #include "qed_nvmetcp.h"
26 #include "qed_ll2.h"
27 #include "qed_mcp.h"
28 #include "qed_sp.h"
29 #include "qed_reg_addr.h"
30 
31 static int qed_nvmetcp_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code,
32 				   u16 echo, union event_ring_data *data,
33 				   u8 fw_return_code)
34 {
35 	if (p_hwfn->p_nvmetcp_info->event_cb) {
36 		struct qed_nvmetcp_info *p_nvmetcp = p_hwfn->p_nvmetcp_info;
37 
38 		return p_nvmetcp->event_cb(p_nvmetcp->event_context,
39 					 fw_event_code, data);
40 	} else {
41 		DP_NOTICE(p_hwfn, "nvmetcp async completion is not set\n");
42 
43 		return -EINVAL;
44 	}
45 }
46 
47 static int qed_sp_nvmetcp_func_start(struct qed_hwfn *p_hwfn,
48 				     enum spq_mode comp_mode,
49 				     struct qed_spq_comp_cb *p_comp_addr,
50 				     void *event_context,
51 				     nvmetcp_event_cb_t async_event_cb)
52 {
53 	struct nvmetcp_init_ramrod_params *p_ramrod = NULL;
54 	struct qed_nvmetcp_pf_params *p_params = NULL;
55 	struct scsi_init_func_queues *p_queue = NULL;
56 	struct nvmetcp_spe_func_init *p_init = NULL;
57 	struct qed_sp_init_data init_data = {};
58 	struct qed_spq_entry *p_ent = NULL;
59 	int rc = 0;
60 	u16 val;
61 	u8 i;
62 
63 	/* Get SPQ entry */
64 	init_data.cid = qed_spq_get_cid(p_hwfn);
65 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
66 	init_data.comp_mode = comp_mode;
67 	init_data.p_comp_data = p_comp_addr;
68 	rc = qed_sp_init_request(p_hwfn, &p_ent,
69 				 NVMETCP_RAMROD_CMD_ID_INIT_FUNC,
70 				 PROTOCOLID_TCP_ULP, &init_data);
71 	if (rc)
72 		return rc;
73 
74 	p_ramrod = &p_ent->ramrod.nvmetcp_init;
75 	p_init = &p_ramrod->nvmetcp_init_spe;
76 	p_params = &p_hwfn->pf_params.nvmetcp_pf_params;
77 	p_queue = &p_init->q_params;
78 	p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring;
79 	p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring;
80 	p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring;
81 	p_init->ll2_rx_queue_id = RESC_START(p_hwfn, QED_LL2_RAM_QUEUE) +
82 					p_params->ll2_ooo_queue_id;
83 	SET_FIELD(p_init->flags, NVMETCP_SPE_FUNC_INIT_NVMETCP_MODE, 1);
84 	p_init->func_params.log_page_size = ilog2(PAGE_SIZE);
85 	p_init->func_params.num_tasks = cpu_to_le16(p_params->num_tasks);
86 	p_init->debug_flags = p_params->debug_mode;
87 	DMA_REGPAIR_LE(p_queue->glbl_q_params_addr,
88 		       p_params->glbl_q_params_addr);
89 	p_queue->cq_num_entries = cpu_to_le16(QED_NVMETCP_FW_CQ_SIZE);
90 	p_queue->num_queues = p_params->num_queues;
91 	val = RESC_START(p_hwfn, QED_CMDQS_CQS);
92 	p_queue->queue_relative_offset = cpu_to_le16((u16)val);
93 	p_queue->cq_sb_pi = p_params->gl_rq_pi;
94 
95 	for (i = 0; i < p_params->num_queues; i++) {
96 		val = qed_get_igu_sb_id(p_hwfn, i);
97 		p_queue->cq_cmdq_sb_num_arr[i] = cpu_to_le16(val);
98 	}
99 
100 	SET_FIELD(p_queue->q_validity,
101 		  SCSI_INIT_FUNC_QUEUES_CMD_VALID, 0);
102 	p_queue->cmdq_num_entries = 0;
103 	p_queue->bdq_resource_id = (u8)RESC_START(p_hwfn, QED_BDQ);
104 	p_ramrod->tcp_init.two_msl_timer = cpu_to_le32(QED_TCP_TWO_MSL_TIMER);
105 	p_ramrod->tcp_init.tx_sws_timer = cpu_to_le16(QED_TCP_SWS_TIMER);
106 	p_init->half_way_close_timeout = cpu_to_le16(QED_TCP_HALF_WAY_CLOSE_TIMEOUT);
107 	p_ramrod->tcp_init.max_fin_rt = QED_TCP_MAX_FIN_RT;
108 	SET_FIELD(p_ramrod->nvmetcp_init_spe.params,
109 		  NVMETCP_SPE_FUNC_INIT_MAX_SYN_RT, QED_TCP_MAX_FIN_RT);
110 	p_hwfn->p_nvmetcp_info->event_context = event_context;
111 	p_hwfn->p_nvmetcp_info->event_cb = async_event_cb;
112 	qed_spq_register_async_cb(p_hwfn, PROTOCOLID_TCP_ULP,
113 				  qed_nvmetcp_async_event);
114 
115 	return qed_spq_post(p_hwfn, p_ent, NULL);
116 }
117 
118 static int qed_sp_nvmetcp_func_stop(struct qed_hwfn *p_hwfn,
119 				    enum spq_mode comp_mode,
120 				    struct qed_spq_comp_cb *p_comp_addr)
121 {
122 	struct qed_spq_entry *p_ent = NULL;
123 	struct qed_sp_init_data init_data;
124 	int rc;
125 
126 	/* Get SPQ entry */
127 	memset(&init_data, 0, sizeof(init_data));
128 	init_data.cid = qed_spq_get_cid(p_hwfn);
129 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
130 	init_data.comp_mode = comp_mode;
131 	init_data.p_comp_data = p_comp_addr;
132 	rc = qed_sp_init_request(p_hwfn, &p_ent,
133 				 NVMETCP_RAMROD_CMD_ID_DESTROY_FUNC,
134 				 PROTOCOLID_TCP_ULP, &init_data);
135 	if (rc)
136 		return rc;
137 
138 	rc = qed_spq_post(p_hwfn, p_ent, NULL);
139 	qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_TCP_ULP);
140 
141 	return rc;
142 }
143 
144 static int qed_fill_nvmetcp_dev_info(struct qed_dev *cdev,
145 				     struct qed_dev_nvmetcp_info *info)
146 {
147 	struct qed_hwfn *hwfn = QED_AFFIN_HWFN(cdev);
148 	int rc;
149 
150 	memset(info, 0, sizeof(*info));
151 	rc = qed_fill_dev_info(cdev, &info->common);
152 	info->port_id = MFW_PORT(hwfn);
153 	info->num_cqs = FEAT_NUM(hwfn, QED_NVMETCP_CQ);
154 
155 	return rc;
156 }
157 
158 static void qed_register_nvmetcp_ops(struct qed_dev *cdev,
159 				     struct qed_nvmetcp_cb_ops *ops,
160 				     void *cookie)
161 {
162 	cdev->protocol_ops.nvmetcp = ops;
163 	cdev->ops_cookie = cookie;
164 }
165 
166 static int qed_nvmetcp_stop(struct qed_dev *cdev)
167 {
168 	int rc;
169 
170 	if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) {
171 		DP_NOTICE(cdev, "nvmetcp already stopped\n");
172 
173 		return 0;
174 	}
175 
176 	if (!hash_empty(cdev->connections)) {
177 		DP_NOTICE(cdev,
178 			  "Can't stop nvmetcp - not all connections were returned\n");
179 
180 		return -EINVAL;
181 	}
182 
183 	/* Stop the nvmetcp */
184 	rc = qed_sp_nvmetcp_func_stop(QED_AFFIN_HWFN(cdev), QED_SPQ_MODE_EBLOCK,
185 				      NULL);
186 	cdev->flags &= ~QED_FLAG_STORAGE_STARTED;
187 
188 	return rc;
189 }
190 
191 static int qed_nvmetcp_start(struct qed_dev *cdev,
192 			     struct qed_nvmetcp_tid *tasks,
193 			     void *event_context,
194 			     nvmetcp_event_cb_t async_event_cb)
195 {
196 	struct qed_tid_mem *tid_info;
197 	int rc;
198 
199 	if (cdev->flags & QED_FLAG_STORAGE_STARTED) {
200 		DP_NOTICE(cdev, "nvmetcp already started;\n");
201 
202 		return 0;
203 	}
204 
205 	rc = qed_sp_nvmetcp_func_start(QED_AFFIN_HWFN(cdev),
206 				       QED_SPQ_MODE_EBLOCK, NULL,
207 				       event_context, async_event_cb);
208 	if (rc) {
209 		DP_NOTICE(cdev, "Failed to start nvmetcp\n");
210 
211 		return rc;
212 	}
213 
214 	cdev->flags |= QED_FLAG_STORAGE_STARTED;
215 	hash_init(cdev->connections);
216 
217 	if (!tasks)
218 		return 0;
219 
220 	tid_info = kzalloc(sizeof(*tid_info), GFP_KERNEL);
221 	if (!tid_info) {
222 		qed_nvmetcp_stop(cdev);
223 
224 		return -ENOMEM;
225 	}
226 
227 	rc = qed_cxt_get_tid_mem_info(QED_AFFIN_HWFN(cdev), tid_info);
228 	if (rc) {
229 		DP_NOTICE(cdev, "Failed to gather task information\n");
230 		qed_nvmetcp_stop(cdev);
231 		kfree(tid_info);
232 
233 		return rc;
234 	}
235 
236 	/* Fill task information */
237 	tasks->size = tid_info->tid_size;
238 	tasks->num_tids_per_block = tid_info->num_tids_per_block;
239 	memcpy(tasks->blocks, tid_info->blocks,
240 	       MAX_TID_BLOCKS_NVMETCP * sizeof(u8 *));
241 	kfree(tid_info);
242 
243 	return 0;
244 }
245 
246 static const struct qed_nvmetcp_ops qed_nvmetcp_ops_pass = {
247 	.common = &qed_common_ops_pass,
248 	.ll2 = &qed_ll2_ops_pass,
249 	.fill_dev_info = &qed_fill_nvmetcp_dev_info,
250 	.register_ops = &qed_register_nvmetcp_ops,
251 	.start = &qed_nvmetcp_start,
252 	.stop = &qed_nvmetcp_stop,
253 
254 	/* Placeholder - Connection level ops */
255 };
256 
257 const struct qed_nvmetcp_ops *qed_get_nvmetcp_ops(void)
258 {
259 	return &qed_nvmetcp_ops_pass;
260 }
261 EXPORT_SYMBOL(qed_get_nvmetcp_ops);
262 
263 void qed_put_nvmetcp_ops(void)
264 {
265 }
266 EXPORT_SYMBOL(qed_put_nvmetcp_ops);
267