xref: /linux/drivers/net/ethernet/qlogic/qed/qed_nvmetcp.c (revision 762f99f4f3cb41a775b5157dd761217beba65873)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* Copyright 2021 Marvell. All rights reserved. */
3 
4 #include <linux/types.h>
5 #include <asm/byteorder.h>
6 #include <asm/param.h>
7 #include <linux/delay.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/etherdevice.h>
10 #include <linux/kernel.h>
11 #include <linux/log2.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/stddef.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/list.h>
18 #include <linux/qed/qed_nvmetcp_if.h>
19 #include "qed.h"
20 #include "qed_cxt.h"
21 #include "qed_dev_api.h"
22 #include "qed_hsi.h"
23 #include "qed_hw.h"
24 #include "qed_int.h"
25 #include "qed_nvmetcp.h"
26 #include "qed_ll2.h"
27 #include "qed_mcp.h"
28 #include "qed_sp.h"
29 #include "qed_reg_addr.h"
30 #include "qed_nvmetcp_fw_funcs.h"
31 
qed_nvmetcp_async_event(struct qed_hwfn * p_hwfn,u8 fw_event_code,u16 echo,union event_ring_data * data,u8 fw_return_code)32 static int qed_nvmetcp_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code,
33 				   u16 echo, union event_ring_data *data,
34 				   u8 fw_return_code)
35 {
36 	if (p_hwfn->p_nvmetcp_info->event_cb) {
37 		struct qed_nvmetcp_info *p_nvmetcp = p_hwfn->p_nvmetcp_info;
38 
39 		return p_nvmetcp->event_cb(p_nvmetcp->event_context,
40 					 fw_event_code, data);
41 	} else {
42 		DP_NOTICE(p_hwfn, "nvmetcp async completion is not set\n");
43 
44 		return -EINVAL;
45 	}
46 }
47 
qed_sp_nvmetcp_func_start(struct qed_hwfn * p_hwfn,enum spq_mode comp_mode,struct qed_spq_comp_cb * p_comp_addr,void * event_context,nvmetcp_event_cb_t async_event_cb)48 static int qed_sp_nvmetcp_func_start(struct qed_hwfn *p_hwfn,
49 				     enum spq_mode comp_mode,
50 				     struct qed_spq_comp_cb *p_comp_addr,
51 				     void *event_context,
52 				     nvmetcp_event_cb_t async_event_cb)
53 {
54 	struct nvmetcp_init_ramrod_params *p_ramrod = NULL;
55 	struct qed_nvmetcp_pf_params *p_params = NULL;
56 	struct scsi_init_func_queues *p_queue = NULL;
57 	struct nvmetcp_spe_func_init *p_init = NULL;
58 	struct qed_sp_init_data init_data = {};
59 	struct qed_spq_entry *p_ent = NULL;
60 	int rc = 0;
61 	u16 val;
62 	u8 i;
63 
64 	/* Get SPQ entry */
65 	init_data.cid = qed_spq_get_cid(p_hwfn);
66 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
67 	init_data.comp_mode = comp_mode;
68 	init_data.p_comp_data = p_comp_addr;
69 	rc = qed_sp_init_request(p_hwfn, &p_ent,
70 				 NVMETCP_RAMROD_CMD_ID_INIT_FUNC,
71 				 PROTOCOLID_TCP_ULP, &init_data);
72 	if (rc)
73 		return rc;
74 
75 	p_ramrod = &p_ent->ramrod.nvmetcp_init;
76 	p_init = &p_ramrod->nvmetcp_init_spe;
77 	p_params = &p_hwfn->pf_params.nvmetcp_pf_params;
78 	p_queue = &p_init->q_params;
79 	p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring;
80 	p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring;
81 	p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring;
82 	p_init->ll2_rx_queue_id = RESC_START(p_hwfn, QED_LL2_RAM_QUEUE) +
83 					p_params->ll2_ooo_queue_id;
84 	SET_FIELD(p_init->flags, NVMETCP_SPE_FUNC_INIT_NVMETCP_MODE, 1);
85 	p_init->func_params.log_page_size = ilog2(PAGE_SIZE);
86 	p_init->func_params.num_tasks = cpu_to_le16(p_params->num_tasks);
87 	p_init->debug_flags = p_params->debug_mode;
88 	DMA_REGPAIR_LE(p_queue->glbl_q_params_addr,
89 		       p_params->glbl_q_params_addr);
90 	p_queue->cq_num_entries = cpu_to_le16(QED_NVMETCP_FW_CQ_SIZE);
91 	p_queue->num_queues = p_params->num_queues;
92 	val = RESC_START(p_hwfn, QED_CMDQS_CQS);
93 	p_queue->queue_relative_offset = cpu_to_le16((u16)val);
94 	p_queue->cq_sb_pi = p_params->gl_rq_pi;
95 
96 	for (i = 0; i < p_params->num_queues; i++) {
97 		val = qed_get_igu_sb_id(p_hwfn, i);
98 		p_queue->cq_cmdq_sb_num_arr[i] = cpu_to_le16(val);
99 	}
100 
101 	SET_FIELD(p_queue->q_validity,
102 		  SCSI_INIT_FUNC_QUEUES_CMD_VALID, 0);
103 	p_queue->cmdq_num_entries = 0;
104 	p_queue->bdq_resource_id = (u8)RESC_START(p_hwfn, QED_BDQ);
105 	p_ramrod->tcp_init.two_msl_timer = cpu_to_le32(QED_TCP_TWO_MSL_TIMER);
106 	p_ramrod->tcp_init.tx_sws_timer = cpu_to_le16(QED_TCP_SWS_TIMER);
107 	p_init->half_way_close_timeout = cpu_to_le16(QED_TCP_HALF_WAY_CLOSE_TIMEOUT);
108 	p_ramrod->tcp_init.max_fin_rt = QED_TCP_MAX_FIN_RT;
109 	SET_FIELD(p_ramrod->nvmetcp_init_spe.params,
110 		  NVMETCP_SPE_FUNC_INIT_MAX_SYN_RT, QED_TCP_MAX_FIN_RT);
111 	p_hwfn->p_nvmetcp_info->event_context = event_context;
112 	p_hwfn->p_nvmetcp_info->event_cb = async_event_cb;
113 	qed_spq_register_async_cb(p_hwfn, PROTOCOLID_TCP_ULP,
114 				  qed_nvmetcp_async_event);
115 
116 	return qed_spq_post(p_hwfn, p_ent, NULL);
117 }
118 
qed_sp_nvmetcp_func_stop(struct qed_hwfn * p_hwfn,enum spq_mode comp_mode,struct qed_spq_comp_cb * p_comp_addr)119 static int qed_sp_nvmetcp_func_stop(struct qed_hwfn *p_hwfn,
120 				    enum spq_mode comp_mode,
121 				    struct qed_spq_comp_cb *p_comp_addr)
122 {
123 	struct qed_spq_entry *p_ent = NULL;
124 	struct qed_sp_init_data init_data;
125 	int rc;
126 
127 	/* Get SPQ entry */
128 	memset(&init_data, 0, sizeof(init_data));
129 	init_data.cid = qed_spq_get_cid(p_hwfn);
130 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
131 	init_data.comp_mode = comp_mode;
132 	init_data.p_comp_data = p_comp_addr;
133 	rc = qed_sp_init_request(p_hwfn, &p_ent,
134 				 NVMETCP_RAMROD_CMD_ID_DESTROY_FUNC,
135 				 PROTOCOLID_TCP_ULP, &init_data);
136 	if (rc)
137 		return rc;
138 
139 	rc = qed_spq_post(p_hwfn, p_ent, NULL);
140 	qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_TCP_ULP);
141 
142 	return rc;
143 }
144 
qed_fill_nvmetcp_dev_info(struct qed_dev * cdev,struct qed_dev_nvmetcp_info * info)145 static int qed_fill_nvmetcp_dev_info(struct qed_dev *cdev,
146 				     struct qed_dev_nvmetcp_info *info)
147 {
148 	struct qed_hwfn *hwfn = QED_AFFIN_HWFN(cdev);
149 	int rc;
150 
151 	memset(info, 0, sizeof(*info));
152 	rc = qed_fill_dev_info(cdev, &info->common);
153 	info->port_id = MFW_PORT(hwfn);
154 	info->num_cqs = FEAT_NUM(hwfn, QED_NVMETCP_CQ);
155 
156 	return rc;
157 }
158 
qed_register_nvmetcp_ops(struct qed_dev * cdev,struct qed_nvmetcp_cb_ops * ops,void * cookie)159 static void qed_register_nvmetcp_ops(struct qed_dev *cdev,
160 				     struct qed_nvmetcp_cb_ops *ops,
161 				     void *cookie)
162 {
163 	cdev->protocol_ops.nvmetcp = ops;
164 	cdev->ops_cookie = cookie;
165 }
166 
qed_nvmetcp_stop(struct qed_dev * cdev)167 static int qed_nvmetcp_stop(struct qed_dev *cdev)
168 {
169 	int rc;
170 
171 	if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) {
172 		DP_NOTICE(cdev, "nvmetcp already stopped\n");
173 
174 		return 0;
175 	}
176 
177 	if (!hash_empty(cdev->connections)) {
178 		DP_NOTICE(cdev,
179 			  "Can't stop nvmetcp - not all connections were returned\n");
180 
181 		return -EINVAL;
182 	}
183 
184 	/* Stop the nvmetcp */
185 	rc = qed_sp_nvmetcp_func_stop(QED_AFFIN_HWFN(cdev), QED_SPQ_MODE_EBLOCK,
186 				      NULL);
187 	cdev->flags &= ~QED_FLAG_STORAGE_STARTED;
188 
189 	return rc;
190 }
191 
qed_nvmetcp_start(struct qed_dev * cdev,struct qed_nvmetcp_tid * tasks,void * event_context,nvmetcp_event_cb_t async_event_cb)192 static int qed_nvmetcp_start(struct qed_dev *cdev,
193 			     struct qed_nvmetcp_tid *tasks,
194 			     void *event_context,
195 			     nvmetcp_event_cb_t async_event_cb)
196 {
197 	struct qed_tid_mem *tid_info;
198 	int rc;
199 
200 	if (cdev->flags & QED_FLAG_STORAGE_STARTED) {
201 		DP_NOTICE(cdev, "nvmetcp already started;\n");
202 
203 		return 0;
204 	}
205 
206 	rc = qed_sp_nvmetcp_func_start(QED_AFFIN_HWFN(cdev),
207 				       QED_SPQ_MODE_EBLOCK, NULL,
208 				       event_context, async_event_cb);
209 	if (rc) {
210 		DP_NOTICE(cdev, "Failed to start nvmetcp\n");
211 
212 		return rc;
213 	}
214 
215 	cdev->flags |= QED_FLAG_STORAGE_STARTED;
216 	hash_init(cdev->connections);
217 
218 	if (!tasks)
219 		return 0;
220 
221 	tid_info = kzalloc(sizeof(*tid_info), GFP_KERNEL);
222 	if (!tid_info) {
223 		qed_nvmetcp_stop(cdev);
224 
225 		return -ENOMEM;
226 	}
227 
228 	rc = qed_cxt_get_tid_mem_info(QED_AFFIN_HWFN(cdev), tid_info);
229 	if (rc) {
230 		DP_NOTICE(cdev, "Failed to gather task information\n");
231 		qed_nvmetcp_stop(cdev);
232 		kfree(tid_info);
233 
234 		return rc;
235 	}
236 
237 	/* Fill task information */
238 	tasks->size = tid_info->tid_size;
239 	tasks->num_tids_per_block = tid_info->num_tids_per_block;
240 	memcpy(tasks->blocks, tid_info->blocks,
241 	       MAX_TID_BLOCKS_NVMETCP * sizeof(u8 *));
242 	kfree(tid_info);
243 
244 	return 0;
245 }
246 
qed_nvmetcp_get_hash(struct qed_dev * cdev,u32 handle)247 static struct qed_hash_nvmetcp_con *qed_nvmetcp_get_hash(struct qed_dev *cdev,
248 							 u32 handle)
249 {
250 	struct qed_hash_nvmetcp_con *hash_con = NULL;
251 
252 	if (!(cdev->flags & QED_FLAG_STORAGE_STARTED))
253 		return NULL;
254 
255 	hash_for_each_possible(cdev->connections, hash_con, node, handle) {
256 		if (hash_con->con->icid == handle)
257 			break;
258 	}
259 
260 	if (!hash_con || hash_con->con->icid != handle)
261 		return NULL;
262 
263 	return hash_con;
264 }
265 
qed_sp_nvmetcp_conn_offload(struct qed_hwfn * p_hwfn,struct qed_nvmetcp_conn * p_conn,enum spq_mode comp_mode,struct qed_spq_comp_cb * p_comp_addr)266 static int qed_sp_nvmetcp_conn_offload(struct qed_hwfn *p_hwfn,
267 				       struct qed_nvmetcp_conn *p_conn,
268 				       enum spq_mode comp_mode,
269 				       struct qed_spq_comp_cb *p_comp_addr)
270 {
271 	struct nvmetcp_spe_conn_offload *p_ramrod = NULL;
272 	struct tcp_offload_params_opt2 *p_tcp = NULL;
273 	struct qed_sp_init_data init_data = { 0 };
274 	struct qed_spq_entry *p_ent = NULL;
275 	dma_addr_t r2tq_pbl_addr;
276 	dma_addr_t xhq_pbl_addr;
277 	dma_addr_t uhq_pbl_addr;
278 	u16 physical_q;
279 	int rc = 0;
280 	u8 i;
281 
282 	/* Get SPQ entry */
283 	init_data.cid = p_conn->icid;
284 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
285 	init_data.comp_mode = comp_mode;
286 	init_data.p_comp_data = p_comp_addr;
287 	rc = qed_sp_init_request(p_hwfn, &p_ent,
288 				 NVMETCP_RAMROD_CMD_ID_OFFLOAD_CONN,
289 				 PROTOCOLID_TCP_ULP, &init_data);
290 	if (rc)
291 		return rc;
292 
293 	p_ramrod = &p_ent->ramrod.nvmetcp_conn_offload;
294 
295 	/* Transmission PQ is the first of the PF */
296 	physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
297 	p_conn->physical_q0 = cpu_to_le16(physical_q);
298 	p_ramrod->nvmetcp.physical_q0 = cpu_to_le16(physical_q);
299 
300 	/* nvmetcp Pure-ACK PQ */
301 	physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
302 	p_conn->physical_q1 = cpu_to_le16(physical_q);
303 	p_ramrod->nvmetcp.physical_q1 = cpu_to_le16(physical_q);
304 	p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
305 	DMA_REGPAIR_LE(p_ramrod->nvmetcp.sq_pbl_addr, p_conn->sq_pbl_addr);
306 	r2tq_pbl_addr = qed_chain_get_pbl_phys(&p_conn->r2tq);
307 	DMA_REGPAIR_LE(p_ramrod->nvmetcp.r2tq_pbl_addr, r2tq_pbl_addr);
308 	xhq_pbl_addr = qed_chain_get_pbl_phys(&p_conn->xhq);
309 	DMA_REGPAIR_LE(p_ramrod->nvmetcp.xhq_pbl_addr, xhq_pbl_addr);
310 	uhq_pbl_addr = qed_chain_get_pbl_phys(&p_conn->uhq);
311 	DMA_REGPAIR_LE(p_ramrod->nvmetcp.uhq_pbl_addr, uhq_pbl_addr);
312 	p_ramrod->nvmetcp.flags = p_conn->offl_flags;
313 	p_ramrod->nvmetcp.default_cq = p_conn->default_cq;
314 	p_ramrod->nvmetcp.initial_ack = 0;
315 	DMA_REGPAIR_LE(p_ramrod->nvmetcp.nvmetcp.cccid_itid_table_addr,
316 		       p_conn->nvmetcp_cccid_itid_table_addr);
317 	p_ramrod->nvmetcp.nvmetcp.cccid_max_range =
318 		 cpu_to_le16(p_conn->nvmetcp_cccid_max_range);
319 	p_tcp = &p_ramrod->tcp;
320 	qed_set_fw_mac_addr(&p_tcp->remote_mac_addr_hi,
321 			    &p_tcp->remote_mac_addr_mid,
322 			    &p_tcp->remote_mac_addr_lo, p_conn->remote_mac);
323 	qed_set_fw_mac_addr(&p_tcp->local_mac_addr_hi,
324 			    &p_tcp->local_mac_addr_mid,
325 			    &p_tcp->local_mac_addr_lo, p_conn->local_mac);
326 	p_tcp->vlan_id = cpu_to_le16(p_conn->vlan_id);
327 	p_tcp->flags = cpu_to_le16(p_conn->tcp_flags);
328 	p_tcp->ip_version = p_conn->ip_version;
329 	if (p_tcp->ip_version == TCP_IPV6) {
330 		for (i = 0; i < 4; i++) {
331 			p_tcp->remote_ip[i] = cpu_to_le32(p_conn->remote_ip[i]);
332 			p_tcp->local_ip[i] = cpu_to_le32(p_conn->local_ip[i]);
333 		}
334 	} else {
335 		p_tcp->remote_ip[0] = cpu_to_le32(p_conn->remote_ip[0]);
336 		p_tcp->local_ip[0] = cpu_to_le32(p_conn->local_ip[0]);
337 	}
338 
339 	p_tcp->flow_label = cpu_to_le32(p_conn->flow_label);
340 	p_tcp->ttl = p_conn->ttl;
341 	p_tcp->tos_or_tc = p_conn->tos_or_tc;
342 	p_tcp->remote_port = cpu_to_le16(p_conn->remote_port);
343 	p_tcp->local_port = cpu_to_le16(p_conn->local_port);
344 	p_tcp->mss = cpu_to_le16(p_conn->mss);
345 	p_tcp->rcv_wnd_scale = p_conn->rcv_wnd_scale;
346 	p_tcp->connect_mode = p_conn->connect_mode;
347 	p_tcp->cwnd = cpu_to_le32(p_conn->cwnd);
348 	p_tcp->ka_max_probe_cnt = p_conn->ka_max_probe_cnt;
349 	p_tcp->ka_timeout = cpu_to_le32(p_conn->ka_timeout);
350 	p_tcp->max_rt_time = cpu_to_le32(p_conn->max_rt_time);
351 	p_tcp->ka_interval = cpu_to_le32(p_conn->ka_interval);
352 
353 	return qed_spq_post(p_hwfn, p_ent, NULL);
354 }
355 
qed_sp_nvmetcp_conn_update(struct qed_hwfn * p_hwfn,struct qed_nvmetcp_conn * p_conn,enum spq_mode comp_mode,struct qed_spq_comp_cb * p_comp_addr)356 static int qed_sp_nvmetcp_conn_update(struct qed_hwfn *p_hwfn,
357 				      struct qed_nvmetcp_conn *p_conn,
358 				      enum spq_mode comp_mode,
359 				      struct qed_spq_comp_cb *p_comp_addr)
360 {
361 	struct nvmetcp_conn_update_ramrod_params *p_ramrod = NULL;
362 	struct qed_spq_entry *p_ent = NULL;
363 	struct qed_sp_init_data init_data;
364 	int rc = -EINVAL;
365 	u32 dval;
366 
367 	/* Get SPQ entry */
368 	memset(&init_data, 0, sizeof(init_data));
369 	init_data.cid = p_conn->icid;
370 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
371 	init_data.comp_mode = comp_mode;
372 	init_data.p_comp_data = p_comp_addr;
373 
374 	rc = qed_sp_init_request(p_hwfn, &p_ent,
375 				 NVMETCP_RAMROD_CMD_ID_UPDATE_CONN,
376 				 PROTOCOLID_TCP_ULP, &init_data);
377 	if (rc)
378 		return rc;
379 
380 	p_ramrod = &p_ent->ramrod.nvmetcp_conn_update;
381 	p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
382 	p_ramrod->flags = p_conn->update_flag;
383 	p_ramrod->max_seq_size = cpu_to_le32(p_conn->max_seq_size);
384 	dval = p_conn->max_recv_pdu_length;
385 	p_ramrod->max_recv_pdu_length = cpu_to_le32(dval);
386 	dval = p_conn->max_send_pdu_length;
387 	p_ramrod->max_send_pdu_length = cpu_to_le32(dval);
388 	p_ramrod->first_seq_length = cpu_to_le32(p_conn->first_seq_length);
389 
390 	return qed_spq_post(p_hwfn, p_ent, NULL);
391 }
392 
qed_sp_nvmetcp_conn_terminate(struct qed_hwfn * p_hwfn,struct qed_nvmetcp_conn * p_conn,enum spq_mode comp_mode,struct qed_spq_comp_cb * p_comp_addr)393 static int qed_sp_nvmetcp_conn_terminate(struct qed_hwfn *p_hwfn,
394 					 struct qed_nvmetcp_conn *p_conn,
395 					 enum spq_mode comp_mode,
396 					 struct qed_spq_comp_cb *p_comp_addr)
397 {
398 	struct nvmetcp_spe_conn_termination *p_ramrod = NULL;
399 	struct qed_spq_entry *p_ent = NULL;
400 	struct qed_sp_init_data init_data;
401 	int rc = -EINVAL;
402 
403 	/* Get SPQ entry */
404 	memset(&init_data, 0, sizeof(init_data));
405 	init_data.cid = p_conn->icid;
406 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
407 	init_data.comp_mode = comp_mode;
408 	init_data.p_comp_data = p_comp_addr;
409 	rc = qed_sp_init_request(p_hwfn, &p_ent,
410 				 NVMETCP_RAMROD_CMD_ID_TERMINATION_CONN,
411 				 PROTOCOLID_TCP_ULP, &init_data);
412 	if (rc)
413 		return rc;
414 
415 	p_ramrod = &p_ent->ramrod.nvmetcp_conn_terminate;
416 	p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
417 	p_ramrod->abortive = p_conn->abortive_dsconnect;
418 
419 	return qed_spq_post(p_hwfn, p_ent, NULL);
420 }
421 
qed_sp_nvmetcp_conn_clear_sq(struct qed_hwfn * p_hwfn,struct qed_nvmetcp_conn * p_conn,enum spq_mode comp_mode,struct qed_spq_comp_cb * p_comp_addr)422 static int qed_sp_nvmetcp_conn_clear_sq(struct qed_hwfn *p_hwfn,
423 					struct qed_nvmetcp_conn *p_conn,
424 					enum spq_mode comp_mode,
425 					struct qed_spq_comp_cb *p_comp_addr)
426 {
427 	struct qed_spq_entry *p_ent = NULL;
428 	struct qed_sp_init_data init_data;
429 	int rc = -EINVAL;
430 
431 	/* Get SPQ entry */
432 	memset(&init_data, 0, sizeof(init_data));
433 	init_data.cid = p_conn->icid;
434 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
435 	init_data.comp_mode = comp_mode;
436 	init_data.p_comp_data = p_comp_addr;
437 	rc = qed_sp_init_request(p_hwfn, &p_ent,
438 				 NVMETCP_RAMROD_CMD_ID_CLEAR_SQ,
439 				 PROTOCOLID_TCP_ULP, &init_data);
440 	if (rc)
441 		return rc;
442 
443 	return qed_spq_post(p_hwfn, p_ent, NULL);
444 }
445 
qed_nvmetcp_get_db_addr(struct qed_hwfn * p_hwfn,u32 cid)446 static void __iomem *qed_nvmetcp_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid)
447 {
448 	return (u8 __iomem *)p_hwfn->doorbells +
449 			     qed_db_addr(cid, DQ_DEMS_LEGACY);
450 }
451 
qed_nvmetcp_allocate_connection(struct qed_hwfn * p_hwfn,struct qed_nvmetcp_conn ** p_out_conn)452 static int qed_nvmetcp_allocate_connection(struct qed_hwfn *p_hwfn,
453 					   struct qed_nvmetcp_conn **p_out_conn)
454 {
455 	struct qed_chain_init_params params = {
456 		.mode		= QED_CHAIN_MODE_PBL,
457 		.intended_use	= QED_CHAIN_USE_TO_CONSUME_PRODUCE,
458 		.cnt_type	= QED_CHAIN_CNT_TYPE_U16,
459 	};
460 	struct qed_nvmetcp_pf_params *p_params = NULL;
461 	struct qed_nvmetcp_conn *p_conn = NULL;
462 	int rc = 0;
463 
464 	/* Try finding a free connection that can be used */
465 	spin_lock_bh(&p_hwfn->p_nvmetcp_info->lock);
466 	if (!list_empty(&p_hwfn->p_nvmetcp_info->free_list))
467 		p_conn = list_first_entry(&p_hwfn->p_nvmetcp_info->free_list,
468 					  struct qed_nvmetcp_conn, list_entry);
469 	if (p_conn) {
470 		list_del(&p_conn->list_entry);
471 		spin_unlock_bh(&p_hwfn->p_nvmetcp_info->lock);
472 		*p_out_conn = p_conn;
473 
474 		return 0;
475 	}
476 	spin_unlock_bh(&p_hwfn->p_nvmetcp_info->lock);
477 
478 	/* Need to allocate a new connection */
479 	p_params = &p_hwfn->pf_params.nvmetcp_pf_params;
480 	p_conn = kzalloc(sizeof(*p_conn), GFP_KERNEL);
481 	if (!p_conn)
482 		return -ENOMEM;
483 
484 	params.num_elems = p_params->num_r2tq_pages_in_ring *
485 			   QED_CHAIN_PAGE_SIZE / sizeof(struct nvmetcp_wqe);
486 	params.elem_size = sizeof(struct nvmetcp_wqe);
487 	rc = qed_chain_alloc(p_hwfn->cdev, &p_conn->r2tq, &params);
488 	if (rc)
489 		goto nomem_r2tq;
490 
491 	params.num_elems = p_params->num_uhq_pages_in_ring *
492 			   QED_CHAIN_PAGE_SIZE / sizeof(struct iscsi_uhqe);
493 	params.elem_size = sizeof(struct iscsi_uhqe);
494 	rc = qed_chain_alloc(p_hwfn->cdev, &p_conn->uhq, &params);
495 	if (rc)
496 		goto nomem_uhq;
497 
498 	params.elem_size = sizeof(struct iscsi_xhqe);
499 	rc = qed_chain_alloc(p_hwfn->cdev, &p_conn->xhq, &params);
500 	if (rc)
501 		goto nomem;
502 
503 	p_conn->free_on_delete = true;
504 	*p_out_conn = p_conn;
505 
506 	return 0;
507 
508 nomem:
509 	qed_chain_free(p_hwfn->cdev, &p_conn->uhq);
510 nomem_uhq:
511 	qed_chain_free(p_hwfn->cdev, &p_conn->r2tq);
512 nomem_r2tq:
513 	kfree(p_conn);
514 
515 	return -ENOMEM;
516 }
517 
qed_nvmetcp_acquire_connection(struct qed_hwfn * p_hwfn,struct qed_nvmetcp_conn ** p_out_conn)518 static int qed_nvmetcp_acquire_connection(struct qed_hwfn *p_hwfn,
519 					  struct qed_nvmetcp_conn **p_out_conn)
520 {
521 	struct qed_nvmetcp_conn *p_conn = NULL;
522 	int rc = 0;
523 	u32 icid;
524 
525 	spin_lock_bh(&p_hwfn->p_nvmetcp_info->lock);
526 	rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_TCP_ULP, &icid);
527 	spin_unlock_bh(&p_hwfn->p_nvmetcp_info->lock);
528 
529 	if (rc)
530 		return rc;
531 
532 	rc = qed_nvmetcp_allocate_connection(p_hwfn, &p_conn);
533 	if (rc) {
534 		spin_lock_bh(&p_hwfn->p_nvmetcp_info->lock);
535 		qed_cxt_release_cid(p_hwfn, icid);
536 		spin_unlock_bh(&p_hwfn->p_nvmetcp_info->lock);
537 
538 		return rc;
539 	}
540 
541 	p_conn->icid = icid;
542 	p_conn->conn_id = (u16)icid;
543 	p_conn->fw_cid = (p_hwfn->hw_info.opaque_fid << 16) | icid;
544 	*p_out_conn = p_conn;
545 
546 	return rc;
547 }
548 
qed_nvmetcp_release_connection(struct qed_hwfn * p_hwfn,struct qed_nvmetcp_conn * p_conn)549 static void qed_nvmetcp_release_connection(struct qed_hwfn *p_hwfn,
550 					   struct qed_nvmetcp_conn *p_conn)
551 {
552 	spin_lock_bh(&p_hwfn->p_nvmetcp_info->lock);
553 	list_add_tail(&p_conn->list_entry, &p_hwfn->p_nvmetcp_info->free_list);
554 	qed_cxt_release_cid(p_hwfn, p_conn->icid);
555 	spin_unlock_bh(&p_hwfn->p_nvmetcp_info->lock);
556 }
557 
qed_nvmetcp_free_connection(struct qed_hwfn * p_hwfn,struct qed_nvmetcp_conn * p_conn)558 static void qed_nvmetcp_free_connection(struct qed_hwfn *p_hwfn,
559 					struct qed_nvmetcp_conn *p_conn)
560 {
561 	qed_chain_free(p_hwfn->cdev, &p_conn->xhq);
562 	qed_chain_free(p_hwfn->cdev, &p_conn->uhq);
563 	qed_chain_free(p_hwfn->cdev, &p_conn->r2tq);
564 	kfree(p_conn);
565 }
566 
qed_nvmetcp_alloc(struct qed_hwfn * p_hwfn)567 int qed_nvmetcp_alloc(struct qed_hwfn *p_hwfn)
568 {
569 	struct qed_nvmetcp_info *p_nvmetcp_info;
570 
571 	p_nvmetcp_info = kzalloc(sizeof(*p_nvmetcp_info), GFP_KERNEL);
572 	if (!p_nvmetcp_info)
573 		return -ENOMEM;
574 
575 	INIT_LIST_HEAD(&p_nvmetcp_info->free_list);
576 	p_hwfn->p_nvmetcp_info = p_nvmetcp_info;
577 
578 	return 0;
579 }
580 
qed_nvmetcp_setup(struct qed_hwfn * p_hwfn)581 void qed_nvmetcp_setup(struct qed_hwfn *p_hwfn)
582 {
583 	spin_lock_init(&p_hwfn->p_nvmetcp_info->lock);
584 }
585 
qed_nvmetcp_free(struct qed_hwfn * p_hwfn)586 void qed_nvmetcp_free(struct qed_hwfn *p_hwfn)
587 {
588 	struct qed_nvmetcp_conn *p_conn = NULL;
589 
590 	if (!p_hwfn->p_nvmetcp_info)
591 		return;
592 
593 	while (!list_empty(&p_hwfn->p_nvmetcp_info->free_list)) {
594 		p_conn = list_first_entry(&p_hwfn->p_nvmetcp_info->free_list,
595 					  struct qed_nvmetcp_conn, list_entry);
596 		if (p_conn) {
597 			list_del(&p_conn->list_entry);
598 			qed_nvmetcp_free_connection(p_hwfn, p_conn);
599 		}
600 	}
601 
602 	kfree(p_hwfn->p_nvmetcp_info);
603 	p_hwfn->p_nvmetcp_info = NULL;
604 }
605 
qed_nvmetcp_acquire_conn(struct qed_dev * cdev,u32 * handle,u32 * fw_cid,void __iomem ** p_doorbell)606 static int qed_nvmetcp_acquire_conn(struct qed_dev *cdev,
607 				    u32 *handle,
608 				    u32 *fw_cid, void __iomem **p_doorbell)
609 {
610 	struct qed_hash_nvmetcp_con *hash_con;
611 	int rc;
612 
613 	/* Allocate a hashed connection */
614 	hash_con = kzalloc(sizeof(*hash_con), GFP_ATOMIC);
615 	if (!hash_con)
616 		return -ENOMEM;
617 
618 	/* Acquire the connection */
619 	rc = qed_nvmetcp_acquire_connection(QED_AFFIN_HWFN(cdev),
620 					    &hash_con->con);
621 	if (rc) {
622 		DP_NOTICE(cdev, "Failed to acquire Connection\n");
623 		kfree(hash_con);
624 
625 		return rc;
626 	}
627 
628 	/* Added the connection to hash table */
629 	*handle = hash_con->con->icid;
630 	*fw_cid = hash_con->con->fw_cid;
631 	hash_add(cdev->connections, &hash_con->node, *handle);
632 	if (p_doorbell)
633 		*p_doorbell = qed_nvmetcp_get_db_addr(QED_AFFIN_HWFN(cdev),
634 						      *handle);
635 
636 	return 0;
637 }
638 
qed_nvmetcp_release_conn(struct qed_dev * cdev,u32 handle)639 static int qed_nvmetcp_release_conn(struct qed_dev *cdev, u32 handle)
640 {
641 	struct qed_hash_nvmetcp_con *hash_con;
642 
643 	hash_con = qed_nvmetcp_get_hash(cdev, handle);
644 	if (!hash_con) {
645 		DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
646 			  handle);
647 
648 		return -EINVAL;
649 	}
650 
651 	hlist_del(&hash_con->node);
652 	qed_nvmetcp_release_connection(QED_AFFIN_HWFN(cdev), hash_con->con);
653 	kfree(hash_con);
654 
655 	return 0;
656 }
657 
qed_nvmetcp_offload_conn(struct qed_dev * cdev,u32 handle,struct qed_nvmetcp_params_offload * conn_info)658 static int qed_nvmetcp_offload_conn(struct qed_dev *cdev, u32 handle,
659 				    struct qed_nvmetcp_params_offload *conn_info)
660 {
661 	struct qed_hash_nvmetcp_con *hash_con;
662 	struct qed_nvmetcp_conn *con;
663 
664 	hash_con = qed_nvmetcp_get_hash(cdev, handle);
665 	if (!hash_con) {
666 		DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
667 			  handle);
668 
669 		return -EINVAL;
670 	}
671 
672 	/* Update the connection with information from the params */
673 	con = hash_con->con;
674 
675 	/* FW initializations */
676 	con->layer_code = NVMETCP_SLOW_PATH_LAYER_CODE;
677 	con->sq_pbl_addr = conn_info->sq_pbl_addr;
678 	con->nvmetcp_cccid_max_range = conn_info->nvmetcp_cccid_max_range;
679 	con->nvmetcp_cccid_itid_table_addr = conn_info->nvmetcp_cccid_itid_table_addr;
680 	con->default_cq = conn_info->default_cq;
681 	SET_FIELD(con->offl_flags, NVMETCP_CONN_OFFLOAD_PARAMS_TARGET_MODE, 0);
682 	SET_FIELD(con->offl_flags, NVMETCP_CONN_OFFLOAD_PARAMS_NVMETCP_MODE, 1);
683 	SET_FIELD(con->offl_flags, NVMETCP_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B, 1);
684 
685 	/* Networking and TCP stack initializations */
686 	ether_addr_copy(con->local_mac, conn_info->src.mac);
687 	ether_addr_copy(con->remote_mac, conn_info->dst.mac);
688 	memcpy(con->local_ip, conn_info->src.ip, sizeof(con->local_ip));
689 	memcpy(con->remote_ip, conn_info->dst.ip, sizeof(con->remote_ip));
690 	con->local_port = conn_info->src.port;
691 	con->remote_port = conn_info->dst.port;
692 	con->vlan_id = conn_info->vlan_id;
693 
694 	if (conn_info->timestamp_en)
695 		SET_FIELD(con->tcp_flags, TCP_OFFLOAD_PARAMS_OPT2_TS_EN, 1);
696 
697 	if (conn_info->delayed_ack_en)
698 		SET_FIELD(con->tcp_flags, TCP_OFFLOAD_PARAMS_OPT2_DA_EN, 1);
699 
700 	if (conn_info->tcp_keep_alive_en)
701 		SET_FIELD(con->tcp_flags, TCP_OFFLOAD_PARAMS_OPT2_KA_EN, 1);
702 
703 	if (conn_info->ecn_en)
704 		SET_FIELD(con->tcp_flags, TCP_OFFLOAD_PARAMS_OPT2_ECN_EN, 1);
705 
706 	con->ip_version = conn_info->ip_version;
707 	con->flow_label = QED_TCP_FLOW_LABEL;
708 	con->ka_max_probe_cnt = conn_info->ka_max_probe_cnt;
709 	con->ka_timeout = conn_info->ka_timeout;
710 	con->ka_interval = conn_info->ka_interval;
711 	con->max_rt_time = conn_info->max_rt_time;
712 	con->ttl = conn_info->ttl;
713 	con->tos_or_tc = conn_info->tos_or_tc;
714 	con->mss = conn_info->mss;
715 	con->cwnd = conn_info->cwnd;
716 	con->rcv_wnd_scale = conn_info->rcv_wnd_scale;
717 	con->connect_mode = 0;
718 
719 	return qed_sp_nvmetcp_conn_offload(QED_AFFIN_HWFN(cdev), con,
720 					 QED_SPQ_MODE_EBLOCK, NULL);
721 }
722 
qed_nvmetcp_update_conn(struct qed_dev * cdev,u32 handle,struct qed_nvmetcp_params_update * conn_info)723 static int qed_nvmetcp_update_conn(struct qed_dev *cdev,
724 				   u32 handle,
725 				   struct qed_nvmetcp_params_update *conn_info)
726 {
727 	struct qed_hash_nvmetcp_con *hash_con;
728 	struct qed_nvmetcp_conn *con;
729 
730 	hash_con = qed_nvmetcp_get_hash(cdev, handle);
731 	if (!hash_con) {
732 		DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
733 			  handle);
734 
735 		return -EINVAL;
736 	}
737 
738 	/* Update the connection with information from the params */
739 	con = hash_con->con;
740 	SET_FIELD(con->update_flag,
741 		  ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T, 0);
742 	SET_FIELD(con->update_flag,
743 		  ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA, 1);
744 	if (conn_info->hdr_digest_en)
745 		SET_FIELD(con->update_flag, ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN, 1);
746 
747 	if (conn_info->data_digest_en)
748 		SET_FIELD(con->update_flag, ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN, 1);
749 
750 	/* Placeholder - initialize pfv, cpda, hpda */
751 
752 	con->max_seq_size = conn_info->max_io_size;
753 	con->max_recv_pdu_length = conn_info->max_recv_pdu_length;
754 	con->max_send_pdu_length = conn_info->max_send_pdu_length;
755 	con->first_seq_length = conn_info->max_io_size;
756 
757 	return qed_sp_nvmetcp_conn_update(QED_AFFIN_HWFN(cdev), con,
758 					QED_SPQ_MODE_EBLOCK, NULL);
759 }
760 
qed_nvmetcp_clear_conn_sq(struct qed_dev * cdev,u32 handle)761 static int qed_nvmetcp_clear_conn_sq(struct qed_dev *cdev, u32 handle)
762 {
763 	struct qed_hash_nvmetcp_con *hash_con;
764 
765 	hash_con = qed_nvmetcp_get_hash(cdev, handle);
766 	if (!hash_con) {
767 		DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
768 			  handle);
769 
770 		return -EINVAL;
771 	}
772 
773 	return qed_sp_nvmetcp_conn_clear_sq(QED_AFFIN_HWFN(cdev), hash_con->con,
774 					    QED_SPQ_MODE_EBLOCK, NULL);
775 }
776 
qed_nvmetcp_destroy_conn(struct qed_dev * cdev,u32 handle,u8 abrt_conn)777 static int qed_nvmetcp_destroy_conn(struct qed_dev *cdev,
778 				    u32 handle, u8 abrt_conn)
779 {
780 	struct qed_hash_nvmetcp_con *hash_con;
781 
782 	hash_con = qed_nvmetcp_get_hash(cdev, handle);
783 	if (!hash_con) {
784 		DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
785 			  handle);
786 
787 		return -EINVAL;
788 	}
789 
790 	hash_con->con->abortive_dsconnect = abrt_conn;
791 
792 	return qed_sp_nvmetcp_conn_terminate(QED_AFFIN_HWFN(cdev), hash_con->con,
793 					   QED_SPQ_MODE_EBLOCK, NULL);
794 }
795 
796 static const struct qed_nvmetcp_ops qed_nvmetcp_ops_pass = {
797 	.common = &qed_common_ops_pass,
798 	.ll2 = &qed_ll2_ops_pass,
799 	.fill_dev_info = &qed_fill_nvmetcp_dev_info,
800 	.register_ops = &qed_register_nvmetcp_ops,
801 	.start = &qed_nvmetcp_start,
802 	.stop = &qed_nvmetcp_stop,
803 	.acquire_conn = &qed_nvmetcp_acquire_conn,
804 	.release_conn = &qed_nvmetcp_release_conn,
805 	.offload_conn = &qed_nvmetcp_offload_conn,
806 	.update_conn = &qed_nvmetcp_update_conn,
807 	.destroy_conn = &qed_nvmetcp_destroy_conn,
808 	.clear_sq = &qed_nvmetcp_clear_conn_sq,
809 	.add_src_tcp_port_filter = &qed_llh_add_src_tcp_port_filter,
810 	.remove_src_tcp_port_filter = &qed_llh_remove_src_tcp_port_filter,
811 	.add_dst_tcp_port_filter = &qed_llh_add_dst_tcp_port_filter,
812 	.remove_dst_tcp_port_filter = &qed_llh_remove_dst_tcp_port_filter,
813 	.clear_all_filters = &qed_llh_clear_all_filters,
814 	.init_read_io = &init_nvmetcp_host_read_task,
815 	.init_write_io = &init_nvmetcp_host_write_task,
816 	.init_icreq_exchange = &init_nvmetcp_init_conn_req_task,
817 	.init_task_cleanup = &init_cleanup_task_nvmetcp
818 };
819 
qed_get_nvmetcp_ops(void)820 const struct qed_nvmetcp_ops *qed_get_nvmetcp_ops(void)
821 {
822 	return &qed_nvmetcp_ops_pass;
823 }
824 EXPORT_SYMBOL(qed_get_nvmetcp_ops);
825 
qed_put_nvmetcp_ops(void)826 void qed_put_nvmetcp_ops(void)
827 {
828 }
829 EXPORT_SYMBOL(qed_put_nvmetcp_ops);
830