xref: /linux/drivers/net/ethernet/qlogic/qed/qed_roce.c (revision 4f2c0a4acffbec01079c28f839422e64ddeff004)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3  * Copyright (c) 2015-2017  QLogic Corporation
4  * Copyright (c) 2019-2020 Marvell International Ltd.
5  */
6 
7 #include <linux/types.h>
8 #include <asm/byteorder.h>
9 #include <linux/bitops.h>
10 #include <linux/delay.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/errno.h>
13 #include <linux/io.h>
14 #include <linux/kernel.h>
15 #include <linux/list.h>
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/pci.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/string.h>
22 #include <linux/if_vlan.h>
23 #include "qed.h"
24 #include "qed_cxt.h"
25 #include "qed_dcbx.h"
26 #include "qed_hsi.h"
27 #include "qed_hw.h"
28 #include "qed_init_ops.h"
29 #include "qed_int.h"
30 #include "qed_ll2.h"
31 #include "qed_mcp.h"
32 #include "qed_reg_addr.h"
33 #include <linux/qed/qed_rdma_if.h>
34 #include "qed_rdma.h"
35 #include "qed_roce.h"
36 #include "qed_sp.h"
37 
38 static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid);
39 
qed_roce_async_event(struct qed_hwfn * p_hwfn,u8 fw_event_code,__le16 echo,union event_ring_data * data,u8 fw_return_code)40 static int qed_roce_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code,
41 				__le16 echo, union event_ring_data *data,
42 				u8 fw_return_code)
43 {
44 	struct qed_rdma_events events = p_hwfn->p_rdma_info->events;
45 	union rdma_eqe_data *rdata = &data->rdma_data;
46 
47 	if (fw_event_code == ROCE_ASYNC_EVENT_DESTROY_QP_DONE) {
48 		u16 icid = (u16)le32_to_cpu(rdata->rdma_destroy_qp_data.cid);
49 
50 		/* icid release in this async event can occur only if the icid
51 		 * was offloaded to the FW. In case it wasn't offloaded this is
52 		 * handled in qed_roce_sp_destroy_qp.
53 		 */
54 		qed_roce_free_real_icid(p_hwfn, icid);
55 	} else if (fw_event_code == ROCE_ASYNC_EVENT_SRQ_EMPTY ||
56 		   fw_event_code == ROCE_ASYNC_EVENT_SRQ_LIMIT) {
57 		u16 srq_id = (u16)le32_to_cpu(rdata->async_handle.lo);
58 
59 		events.affiliated_event(events.context, fw_event_code,
60 					&srq_id);
61 	} else {
62 		events.affiliated_event(events.context, fw_event_code,
63 					(void *)&rdata->async_handle);
64 	}
65 
66 	return 0;
67 }
68 
qed_roce_stop(struct qed_hwfn * p_hwfn)69 void qed_roce_stop(struct qed_hwfn *p_hwfn)
70 {
71 	struct qed_bmap *rcid_map = &p_hwfn->p_rdma_info->real_cid_map;
72 	int wait_count = 0;
73 
74 	/* when destroying a_RoCE QP the control is returned to the user after
75 	 * the synchronous part. The asynchronous part may take a little longer.
76 	 * We delay for a short while if an async destroy QP is still expected.
77 	 * Beyond the added delay we clear the bitmap anyway.
78 	 */
79 	while (!bitmap_empty(rcid_map->bitmap, rcid_map->max_count)) {
80 		/* If the HW device is during recovery, all resources are
81 		 * immediately reset without receiving a per-cid indication
82 		 * from HW. In this case we don't expect the cid bitmap to be
83 		 * cleared.
84 		 */
85 		if (p_hwfn->cdev->recov_in_prog)
86 			return;
87 
88 		msleep(100);
89 		if (wait_count++ > 20) {
90 			DP_NOTICE(p_hwfn, "cid bitmap wait timed out\n");
91 			break;
92 		}
93 	}
94 }
95 
qed_rdma_copy_gids(struct qed_rdma_qp * qp,__le32 * src_gid,__le32 * dst_gid)96 static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
97 			       __le32 *dst_gid)
98 {
99 	u32 i;
100 
101 	if (qp->roce_mode == ROCE_V2_IPV4) {
102 		/* The IPv4 addresses shall be aligned to the highest word.
103 		 * The lower words must be zero.
104 		 */
105 		memset(src_gid, 0, sizeof(union qed_gid));
106 		memset(dst_gid, 0, sizeof(union qed_gid));
107 		src_gid[3] = cpu_to_le32(qp->sgid.ipv4_addr);
108 		dst_gid[3] = cpu_to_le32(qp->dgid.ipv4_addr);
109 	} else {
110 		/* GIDs and IPv6 addresses coincide in location and size */
111 		for (i = 0; i < ARRAY_SIZE(qp->sgid.dwords); i++) {
112 			src_gid[i] = cpu_to_le32(qp->sgid.dwords[i]);
113 			dst_gid[i] = cpu_to_le32(qp->dgid.dwords[i]);
114 		}
115 	}
116 }
117 
qed_roce_mode_to_flavor(enum roce_mode roce_mode)118 static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
119 {
120 	switch (roce_mode) {
121 	case ROCE_V1:
122 		return PLAIN_ROCE;
123 	case ROCE_V2_IPV4:
124 		return RROCE_IPV4;
125 	case ROCE_V2_IPV6:
126 		return RROCE_IPV6;
127 	default:
128 		return MAX_ROCE_FLAVOR;
129 	}
130 }
131 
qed_roce_free_cid_pair(struct qed_hwfn * p_hwfn,u16 cid)132 static void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid)
133 {
134 	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
135 	qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
136 	qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid + 1);
137 	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
138 }
139 
qed_roce_alloc_cid(struct qed_hwfn * p_hwfn,u16 * cid)140 int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
141 {
142 	struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
143 	u32 responder_icid;
144 	u32 requester_icid;
145 	int rc;
146 
147 	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
148 	rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map,
149 				    &responder_icid);
150 	if (rc) {
151 		spin_unlock_bh(&p_rdma_info->lock);
152 		return rc;
153 	}
154 
155 	rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map,
156 				    &requester_icid);
157 
158 	spin_unlock_bh(&p_rdma_info->lock);
159 	if (rc)
160 		goto err;
161 
162 	/* the two icid's should be adjacent */
163 	if ((requester_icid - responder_icid) != 1) {
164 		DP_NOTICE(p_hwfn, "Failed to allocate two adjacent qp's'\n");
165 		rc = -EINVAL;
166 		goto err;
167 	}
168 
169 	responder_icid += qed_cxt_get_proto_cid_start(p_hwfn,
170 						      p_rdma_info->proto);
171 	requester_icid += qed_cxt_get_proto_cid_start(p_hwfn,
172 						      p_rdma_info->proto);
173 
174 	/* If these icids require a new ILT line allocate DMA-able context for
175 	 * an ILT page
176 	 */
177 	rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, responder_icid);
178 	if (rc)
179 		goto err;
180 
181 	rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, requester_icid);
182 	if (rc)
183 		goto err;
184 
185 	*cid = (u16)responder_icid;
186 	return rc;
187 
188 err:
189 	spin_lock_bh(&p_rdma_info->lock);
190 	qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, responder_icid);
191 	qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, requester_icid);
192 
193 	spin_unlock_bh(&p_rdma_info->lock);
194 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
195 		   "Allocate CID - failed, rc = %d\n", rc);
196 	return rc;
197 }
198 
qed_roce_set_real_cid(struct qed_hwfn * p_hwfn,u32 cid)199 static void qed_roce_set_real_cid(struct qed_hwfn *p_hwfn, u32 cid)
200 {
201 	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
202 	qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, cid);
203 	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
204 }
205 
qed_roce_get_qp_tc(struct qed_hwfn * p_hwfn,struct qed_rdma_qp * qp)206 static u8 qed_roce_get_qp_tc(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
207 {
208 	u8 pri, tc = 0;
209 
210 	if (qp->vlan_id) {
211 		pri = (qp->vlan_id & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
212 		tc = qed_dcbx_get_priority_tc(p_hwfn, pri);
213 	}
214 
215 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
216 		   "qp icid %u tc: %u (vlan priority %s)\n",
217 		   qp->icid, tc, qp->vlan_id ? "enabled" : "disabled");
218 
219 	return tc;
220 }
221 
qed_roce_sp_create_responder(struct qed_hwfn * p_hwfn,struct qed_rdma_qp * qp)222 static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
223 					struct qed_rdma_qp *qp)
224 {
225 	struct roce_create_qp_resp_ramrod_data *p_ramrod;
226 	u16 regular_latency_queue, low_latency_queue;
227 	struct qed_sp_init_data init_data;
228 	struct qed_spq_entry *p_ent;
229 	enum protocol_type proto;
230 	u32 flags = 0;
231 	int rc;
232 	u8 tc;
233 
234 	if (!qp->has_resp)
235 		return 0;
236 
237 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
238 
239 	/* Allocate DMA-able memory for IRQ */
240 	qp->irq_num_pages = 1;
241 	qp->irq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
242 				     RDMA_RING_PAGE_SIZE,
243 				     &qp->irq_phys_addr, GFP_KERNEL);
244 	if (!qp->irq) {
245 		rc = -ENOMEM;
246 		DP_NOTICE(p_hwfn,
247 			  "qed create responder failed: cannot allocate memory (irq). rc = %d\n",
248 			  rc);
249 		return rc;
250 	}
251 
252 	/* Get SPQ entry */
253 	memset(&init_data, 0, sizeof(init_data));
254 	init_data.cid = qp->icid;
255 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
256 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
257 
258 	rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_CREATE_QP,
259 				 PROTOCOLID_ROCE, &init_data);
260 	if (rc)
261 		goto err;
262 
263 	SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR,
264 		  qed_roce_mode_to_flavor(qp->roce_mode));
265 
266 	SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
267 		  qp->incoming_rdma_read_en);
268 
269 	SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
270 		  qp->incoming_rdma_write_en);
271 
272 	SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN,
273 		  qp->incoming_atomic_en);
274 
275 	SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
276 		  qp->e2e_flow_control_en);
277 
278 	SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG, qp->use_srq);
279 
280 	SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN,
281 		  qp->fmr_and_reserved_lkey);
282 
283 	SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
284 		  qp->min_rnr_nak_timer);
285 
286 	SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG,
287 		  qed_rdma_is_xrc_qp(qp));
288 
289 	p_ramrod = &p_ent->ramrod.roce_create_qp_resp;
290 	p_ramrod->flags = cpu_to_le32(flags);
291 	p_ramrod->max_ird = qp->max_rd_atomic_resp;
292 	p_ramrod->traffic_class = qp->traffic_class_tos;
293 	p_ramrod->hop_limit = qp->hop_limit_ttl;
294 	p_ramrod->irq_num_pages = qp->irq_num_pages;
295 	p_ramrod->p_key = cpu_to_le16(qp->pkey);
296 	p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
297 	p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp);
298 	p_ramrod->mtu = cpu_to_le16(qp->mtu);
299 	p_ramrod->initial_psn = cpu_to_le32(qp->rq_psn);
300 	p_ramrod->pd = cpu_to_le16(qp->pd);
301 	p_ramrod->rq_num_pages = cpu_to_le16(qp->rq_num_pages);
302 	DMA_REGPAIR_LE(p_ramrod->rq_pbl_addr, qp->rq_pbl_ptr);
303 	DMA_REGPAIR_LE(p_ramrod->irq_pbl_addr, qp->irq_phys_addr);
304 	qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
305 	p_ramrod->qp_handle_for_async.hi = qp->qp_handle_async.hi;
306 	p_ramrod->qp_handle_for_async.lo = qp->qp_handle_async.lo;
307 	p_ramrod->qp_handle_for_cqe.hi = qp->qp_handle.hi;
308 	p_ramrod->qp_handle_for_cqe.lo = qp->qp_handle.lo;
309 	p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
310 				       qp->rq_cq_id);
311 	p_ramrod->xrc_domain = cpu_to_le16(qp->xrcd_id);
312 
313 	tc = qed_roce_get_qp_tc(p_hwfn, qp);
314 	regular_latency_queue = qed_get_cm_pq_idx_ofld_mtc(p_hwfn, tc);
315 	low_latency_queue = qed_get_cm_pq_idx_llt_mtc(p_hwfn, tc);
316 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
317 		   "qp icid %u pqs: regular_latency %u low_latency %u\n",
318 		   qp->icid, regular_latency_queue - CM_TX_PQ_BASE,
319 		   low_latency_queue - CM_TX_PQ_BASE);
320 	p_ramrod->regular_latency_phy_queue =
321 	    cpu_to_le16(regular_latency_queue);
322 	p_ramrod->low_latency_phy_queue =
323 	    cpu_to_le16(low_latency_queue);
324 
325 	p_ramrod->dpi = cpu_to_le16(qp->dpi);
326 
327 	qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
328 	qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
329 
330 	p_ramrod->udp_src_port = cpu_to_le16(qp->udp_src_port);
331 	p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id);
332 	p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id);
333 	p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid);
334 
335 	p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) +
336 				     qp->stats_queue;
337 
338 	rc = qed_spq_post(p_hwfn, p_ent, NULL);
339 	if (rc)
340 		goto err;
341 
342 	qp->resp_offloaded = true;
343 	qp->cq_prod = 0;
344 
345 	proto = p_hwfn->p_rdma_info->proto;
346 	qed_roce_set_real_cid(p_hwfn, qp->icid -
347 			      qed_cxt_get_proto_cid_start(p_hwfn, proto));
348 
349 	return rc;
350 
351 err:
352 	DP_NOTICE(p_hwfn, "create responder - failed, rc = %d\n", rc);
353 	dma_free_coherent(&p_hwfn->cdev->pdev->dev,
354 			  qp->irq_num_pages * RDMA_RING_PAGE_SIZE,
355 			  qp->irq, qp->irq_phys_addr);
356 
357 	return rc;
358 }
359 
qed_roce_sp_create_requester(struct qed_hwfn * p_hwfn,struct qed_rdma_qp * qp)360 static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
361 					struct qed_rdma_qp *qp)
362 {
363 	struct roce_create_qp_req_ramrod_data *p_ramrod;
364 	u16 regular_latency_queue, low_latency_queue;
365 	struct qed_sp_init_data init_data;
366 	struct qed_spq_entry *p_ent;
367 	enum protocol_type proto;
368 	u16 flags = 0;
369 	int rc;
370 	u8 tc;
371 
372 	if (!qp->has_req)
373 		return 0;
374 
375 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
376 
377 	/* Allocate DMA-able memory for ORQ */
378 	qp->orq_num_pages = 1;
379 	qp->orq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
380 				     RDMA_RING_PAGE_SIZE,
381 				     &qp->orq_phys_addr, GFP_KERNEL);
382 	if (!qp->orq) {
383 		rc = -ENOMEM;
384 		DP_NOTICE(p_hwfn,
385 			  "qed create requester failed: cannot allocate memory (orq). rc = %d\n",
386 			  rc);
387 		return rc;
388 	}
389 
390 	/* Get SPQ entry */
391 	memset(&init_data, 0, sizeof(init_data));
392 	init_data.cid = qp->icid + 1;
393 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
394 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
395 
396 	rc = qed_sp_init_request(p_hwfn, &p_ent,
397 				 ROCE_RAMROD_CREATE_QP,
398 				 PROTOCOLID_ROCE, &init_data);
399 	if (rc)
400 		goto err;
401 
402 	SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR,
403 		  qed_roce_mode_to_flavor(qp->roce_mode));
404 
405 	SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN,
406 		  qp->fmr_and_reserved_lkey);
407 
408 	SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP,
409 		  qp->signal_all);
410 
411 	SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT,
412 		  qp->retry_cnt);
413 
414 	SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
415 		  qp->rnr_retry_cnt);
416 
417 	SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_XRC_FLAG,
418 		  qed_rdma_is_xrc_qp(qp));
419 
420 	p_ramrod = &p_ent->ramrod.roce_create_qp_req;
421 	p_ramrod->flags = cpu_to_le16(flags);
422 
423 	SET_FIELD(p_ramrod->flags2, ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE,
424 		  qp->edpm_mode);
425 
426 	p_ramrod->max_ord = qp->max_rd_atomic_req;
427 	p_ramrod->traffic_class = qp->traffic_class_tos;
428 	p_ramrod->hop_limit = qp->hop_limit_ttl;
429 	p_ramrod->orq_num_pages = qp->orq_num_pages;
430 	p_ramrod->p_key = cpu_to_le16(qp->pkey);
431 	p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
432 	p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp);
433 	p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout);
434 	p_ramrod->mtu = cpu_to_le16(qp->mtu);
435 	p_ramrod->initial_psn = cpu_to_le32(qp->sq_psn);
436 	p_ramrod->pd = cpu_to_le16(qp->pd);
437 	p_ramrod->sq_num_pages = cpu_to_le16(qp->sq_num_pages);
438 	DMA_REGPAIR_LE(p_ramrod->sq_pbl_addr, qp->sq_pbl_ptr);
439 	DMA_REGPAIR_LE(p_ramrod->orq_pbl_addr, qp->orq_phys_addr);
440 	qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
441 	p_ramrod->qp_handle_for_async.hi = qp->qp_handle_async.hi;
442 	p_ramrod->qp_handle_for_async.lo = qp->qp_handle_async.lo;
443 	p_ramrod->qp_handle_for_cqe.hi = qp->qp_handle.hi;
444 	p_ramrod->qp_handle_for_cqe.lo = qp->qp_handle.lo;
445 	p_ramrod->cq_cid =
446 	    cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id);
447 
448 	tc = qed_roce_get_qp_tc(p_hwfn, qp);
449 	regular_latency_queue = qed_get_cm_pq_idx_ofld_mtc(p_hwfn, tc);
450 	low_latency_queue = qed_get_cm_pq_idx_llt_mtc(p_hwfn, tc);
451 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
452 		   "qp icid %u pqs: regular_latency %u low_latency %u\n",
453 		   qp->icid, regular_latency_queue - CM_TX_PQ_BASE,
454 		   low_latency_queue - CM_TX_PQ_BASE);
455 	p_ramrod->regular_latency_phy_queue =
456 	    cpu_to_le16(regular_latency_queue);
457 	p_ramrod->low_latency_phy_queue =
458 	    cpu_to_le16(low_latency_queue);
459 
460 	p_ramrod->dpi = cpu_to_le16(qp->dpi);
461 
462 	qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
463 	qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
464 
465 	p_ramrod->udp_src_port = cpu_to_le16(qp->udp_src_port);
466 	p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id);
467 	p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) +
468 				     qp->stats_queue;
469 
470 	rc = qed_spq_post(p_hwfn, p_ent, NULL);
471 	if (rc)
472 		goto err;
473 
474 	qp->req_offloaded = true;
475 	proto = p_hwfn->p_rdma_info->proto;
476 	qed_roce_set_real_cid(p_hwfn,
477 			      qp->icid + 1 -
478 			      qed_cxt_get_proto_cid_start(p_hwfn, proto));
479 
480 	return rc;
481 
482 err:
483 	DP_NOTICE(p_hwfn, "Create requested - failed, rc = %d\n", rc);
484 	dma_free_coherent(&p_hwfn->cdev->pdev->dev,
485 			  qp->orq_num_pages * RDMA_RING_PAGE_SIZE,
486 			  qp->orq, qp->orq_phys_addr);
487 	return rc;
488 }
489 
qed_roce_sp_modify_responder(struct qed_hwfn * p_hwfn,struct qed_rdma_qp * qp,bool move_to_err,u32 modify_flags)490 static int qed_roce_sp_modify_responder(struct qed_hwfn *p_hwfn,
491 					struct qed_rdma_qp *qp,
492 					bool move_to_err, u32 modify_flags)
493 {
494 	struct roce_modify_qp_resp_ramrod_data *p_ramrod;
495 	struct qed_sp_init_data init_data;
496 	struct qed_spq_entry *p_ent;
497 	u16 flags = 0;
498 	int rc;
499 
500 	if (!qp->has_resp)
501 		return 0;
502 
503 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
504 
505 	if (move_to_err && !qp->resp_offloaded)
506 		return 0;
507 
508 	/* Get SPQ entry */
509 	memset(&init_data, 0, sizeof(init_data));
510 	init_data.cid = qp->icid;
511 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
512 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
513 
514 	rc = qed_sp_init_request(p_hwfn, &p_ent,
515 				 ROCE_EVENT_MODIFY_QP,
516 				 PROTOCOLID_ROCE, &init_data);
517 	if (rc) {
518 		DP_NOTICE(p_hwfn, "rc = %d\n", rc);
519 		return rc;
520 	}
521 
522 	SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG,
523 		  !!move_to_err);
524 
525 	SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
526 		  qp->incoming_rdma_read_en);
527 
528 	SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
529 		  qp->incoming_rdma_write_en);
530 
531 	SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN,
532 		  qp->incoming_atomic_en);
533 
534 	SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
535 		  qp->e2e_flow_control_en);
536 
537 	SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG,
538 		  GET_FIELD(modify_flags,
539 			    QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN));
540 
541 	SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG,
542 		  GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY));
543 
544 	SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG,
545 		  GET_FIELD(modify_flags,
546 			    QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
547 
548 	SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG,
549 		  GET_FIELD(modify_flags,
550 			    QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP));
551 
552 	SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG,
553 		  GET_FIELD(modify_flags,
554 			    QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER));
555 
556 	p_ramrod = &p_ent->ramrod.roce_modify_qp_resp;
557 	p_ramrod->flags = cpu_to_le16(flags);
558 
559 	p_ramrod->fields = 0;
560 	SET_FIELD(p_ramrod->fields,
561 		  ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
562 		  qp->min_rnr_nak_timer);
563 
564 	p_ramrod->max_ird = qp->max_rd_atomic_resp;
565 	p_ramrod->traffic_class = qp->traffic_class_tos;
566 	p_ramrod->hop_limit = qp->hop_limit_ttl;
567 	p_ramrod->p_key = cpu_to_le16(qp->pkey);
568 	p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
569 	p_ramrod->mtu = cpu_to_le16(qp->mtu);
570 	qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
571 	rc = qed_spq_post(p_hwfn, p_ent, NULL);
572 
573 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify responder, rc = %d\n", rc);
574 	return rc;
575 }
576 
qed_roce_sp_modify_requester(struct qed_hwfn * p_hwfn,struct qed_rdma_qp * qp,bool move_to_sqd,bool move_to_err,u32 modify_flags)577 static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn,
578 					struct qed_rdma_qp *qp,
579 					bool move_to_sqd,
580 					bool move_to_err, u32 modify_flags)
581 {
582 	struct roce_modify_qp_req_ramrod_data *p_ramrod;
583 	struct qed_sp_init_data init_data;
584 	struct qed_spq_entry *p_ent;
585 	u16 flags = 0;
586 	int rc;
587 
588 	if (!qp->has_req)
589 		return 0;
590 
591 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
592 
593 	if (move_to_err && !(qp->req_offloaded))
594 		return 0;
595 
596 	/* Get SPQ entry */
597 	memset(&init_data, 0, sizeof(init_data));
598 	init_data.cid = qp->icid + 1;
599 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
600 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
601 
602 	rc = qed_sp_init_request(p_hwfn, &p_ent,
603 				 ROCE_EVENT_MODIFY_QP,
604 				 PROTOCOLID_ROCE, &init_data);
605 	if (rc) {
606 		DP_NOTICE(p_hwfn, "rc = %d\n", rc);
607 		return rc;
608 	}
609 
610 	SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG,
611 		  !!move_to_err);
612 
613 	SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG,
614 		  !!move_to_sqd);
615 
616 	SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY,
617 		  qp->sqd_async);
618 
619 	SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG,
620 		  GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY));
621 
622 	SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG,
623 		  GET_FIELD(modify_flags,
624 			    QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
625 
626 	SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG,
627 		  GET_FIELD(modify_flags,
628 			    QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ));
629 
630 	SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG,
631 		  GET_FIELD(modify_flags,
632 			    QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT));
633 
634 	SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG,
635 		  GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT));
636 
637 	SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG,
638 		  GET_FIELD(modify_flags,
639 			    QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT));
640 
641 	p_ramrod = &p_ent->ramrod.roce_modify_qp_req;
642 	p_ramrod->flags = cpu_to_le16(flags);
643 
644 	p_ramrod->fields = 0;
645 	SET_FIELD(p_ramrod->fields,
646 		  ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt);
647 	SET_FIELD(p_ramrod->fields, ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
648 		  qp->rnr_retry_cnt);
649 
650 	p_ramrod->max_ord = qp->max_rd_atomic_req;
651 	p_ramrod->traffic_class = qp->traffic_class_tos;
652 	p_ramrod->hop_limit = qp->hop_limit_ttl;
653 	p_ramrod->p_key = cpu_to_le16(qp->pkey);
654 	p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
655 	p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout);
656 	p_ramrod->mtu = cpu_to_le16(qp->mtu);
657 	qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
658 	rc = qed_spq_post(p_hwfn, p_ent, NULL);
659 
660 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify requester, rc = %d\n", rc);
661 	return rc;
662 }
663 
qed_roce_sp_destroy_qp_responder(struct qed_hwfn * p_hwfn,struct qed_rdma_qp * qp,u32 * cq_prod)664 static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
665 					    struct qed_rdma_qp *qp,
666 					    u32 *cq_prod)
667 {
668 	struct roce_destroy_qp_resp_output_params *p_ramrod_res;
669 	struct roce_destroy_qp_resp_ramrod_data *p_ramrod;
670 	struct qed_sp_init_data init_data;
671 	struct qed_spq_entry *p_ent;
672 	dma_addr_t ramrod_res_phys;
673 	int rc;
674 
675 	if (!qp->has_resp) {
676 		*cq_prod = 0;
677 		return 0;
678 	}
679 
680 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
681 	*cq_prod = qp->cq_prod;
682 
683 	if (!qp->resp_offloaded) {
684 		/* If a responder was never offload, we need to free the cids
685 		 * allocated in create_qp as a FW async event will never arrive
686 		 */
687 		u32 cid;
688 
689 		cid = qp->icid -
690 		      qed_cxt_get_proto_cid_start(p_hwfn,
691 						  p_hwfn->p_rdma_info->proto);
692 		qed_roce_free_cid_pair(p_hwfn, (u16)cid);
693 
694 		return 0;
695 	}
696 
697 	/* Get SPQ entry */
698 	memset(&init_data, 0, sizeof(init_data));
699 	init_data.cid = qp->icid;
700 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
701 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
702 
703 	rc = qed_sp_init_request(p_hwfn, &p_ent,
704 				 ROCE_RAMROD_DESTROY_QP,
705 				 PROTOCOLID_ROCE, &init_data);
706 	if (rc)
707 		return rc;
708 
709 	p_ramrod = &p_ent->ramrod.roce_destroy_qp_resp;
710 
711 	p_ramrod_res = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
712 					  sizeof(*p_ramrod_res),
713 					  &ramrod_res_phys, GFP_KERNEL);
714 
715 	if (!p_ramrod_res) {
716 		rc = -ENOMEM;
717 		DP_NOTICE(p_hwfn,
718 			  "qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n",
719 			  rc);
720 		qed_sp_destroy_request(p_hwfn, p_ent);
721 		return rc;
722 	}
723 
724 	DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
725 
726 	rc = qed_spq_post(p_hwfn, p_ent, NULL);
727 	if (rc)
728 		goto err;
729 
730 	*cq_prod = le32_to_cpu(p_ramrod_res->cq_prod);
731 	qp->cq_prod = *cq_prod;
732 
733 	/* Free IRQ - only if ramrod succeeded, in case FW is still using it */
734 	dma_free_coherent(&p_hwfn->cdev->pdev->dev,
735 			  qp->irq_num_pages * RDMA_RING_PAGE_SIZE,
736 			  qp->irq, qp->irq_phys_addr);
737 
738 	qp->resp_offloaded = false;
739 
740 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy responder, rc = %d\n", rc);
741 
742 err:
743 	dma_free_coherent(&p_hwfn->cdev->pdev->dev,
744 			  sizeof(struct roce_destroy_qp_resp_output_params),
745 			  p_ramrod_res, ramrod_res_phys);
746 
747 	return rc;
748 }
749 
qed_roce_sp_destroy_qp_requester(struct qed_hwfn * p_hwfn,struct qed_rdma_qp * qp)750 static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn,
751 					    struct qed_rdma_qp *qp)
752 {
753 	struct roce_destroy_qp_req_output_params *p_ramrod_res;
754 	struct roce_destroy_qp_req_ramrod_data *p_ramrod;
755 	struct qed_sp_init_data init_data;
756 	struct qed_spq_entry *p_ent;
757 	dma_addr_t ramrod_res_phys;
758 	int rc = -ENOMEM;
759 
760 	if (!qp->has_req)
761 		return 0;
762 
763 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
764 
765 	if (!qp->req_offloaded)
766 		return 0;
767 
768 	p_ramrod_res = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
769 					  sizeof(*p_ramrod_res),
770 					  &ramrod_res_phys, GFP_KERNEL);
771 	if (!p_ramrod_res) {
772 		DP_NOTICE(p_hwfn,
773 			  "qed destroy requester failed: cannot allocate memory (ramrod)\n");
774 		return rc;
775 	}
776 
777 	/* Get SPQ entry */
778 	memset(&init_data, 0, sizeof(init_data));
779 	init_data.cid = qp->icid + 1;
780 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
781 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
782 
783 	rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_DESTROY_QP,
784 				 PROTOCOLID_ROCE, &init_data);
785 	if (rc)
786 		goto err;
787 
788 	p_ramrod = &p_ent->ramrod.roce_destroy_qp_req;
789 	DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
790 
791 	rc = qed_spq_post(p_hwfn, p_ent, NULL);
792 	if (rc)
793 		goto err;
794 
795 	/* Free ORQ - only if ramrod succeeded, in case FW is still using it */
796 	dma_free_coherent(&p_hwfn->cdev->pdev->dev,
797 			  qp->orq_num_pages * RDMA_RING_PAGE_SIZE,
798 			  qp->orq, qp->orq_phys_addr);
799 
800 	qp->req_offloaded = false;
801 
802 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy requester, rc = %d\n", rc);
803 
804 err:
805 	dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res),
806 			  p_ramrod_res, ramrod_res_phys);
807 
808 	return rc;
809 }
810 
qed_roce_query_qp(struct qed_hwfn * p_hwfn,struct qed_rdma_qp * qp,struct qed_rdma_query_qp_out_params * out_params)811 int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
812 		      struct qed_rdma_qp *qp,
813 		      struct qed_rdma_query_qp_out_params *out_params)
814 {
815 	struct roce_query_qp_resp_output_params *p_resp_ramrod_res;
816 	struct roce_query_qp_req_output_params *p_req_ramrod_res;
817 	struct roce_query_qp_resp_ramrod_data *p_resp_ramrod;
818 	struct roce_query_qp_req_ramrod_data *p_req_ramrod;
819 	struct qed_sp_init_data init_data;
820 	dma_addr_t resp_ramrod_res_phys;
821 	dma_addr_t req_ramrod_res_phys;
822 	struct qed_spq_entry *p_ent;
823 	bool rq_err_state;
824 	bool sq_err_state;
825 	bool sq_draining;
826 	int rc = -ENOMEM;
827 
828 	if ((!(qp->resp_offloaded)) && (!(qp->req_offloaded))) {
829 		/* We can't send ramrod to the fw since this qp wasn't offloaded
830 		 * to the fw yet
831 		 */
832 		out_params->draining = false;
833 		out_params->rq_psn = qp->rq_psn;
834 		out_params->sq_psn = qp->sq_psn;
835 		out_params->state = qp->cur_state;
836 
837 		DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "No QPs as no offload\n");
838 		return 0;
839 	}
840 
841 	if (!(qp->resp_offloaded)) {
842 		DP_NOTICE(p_hwfn,
843 			  "The responder's qp should be offloaded before requester's\n");
844 		return -EINVAL;
845 	}
846 
847 	/* Send a query responder ramrod to FW to get RQ-PSN and state */
848 	p_resp_ramrod_res =
849 		dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
850 				   sizeof(*p_resp_ramrod_res),
851 				   &resp_ramrod_res_phys, GFP_KERNEL);
852 	if (!p_resp_ramrod_res) {
853 		DP_NOTICE(p_hwfn,
854 			  "qed query qp failed: cannot allocate memory (ramrod)\n");
855 		return rc;
856 	}
857 
858 	/* Get SPQ entry */
859 	memset(&init_data, 0, sizeof(init_data));
860 	init_data.cid = qp->icid;
861 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
862 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
863 	rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP,
864 				 PROTOCOLID_ROCE, &init_data);
865 	if (rc)
866 		goto err_resp;
867 
868 	p_resp_ramrod = &p_ent->ramrod.roce_query_qp_resp;
869 	DMA_REGPAIR_LE(p_resp_ramrod->output_params_addr, resp_ramrod_res_phys);
870 
871 	rc = qed_spq_post(p_hwfn, p_ent, NULL);
872 	if (rc)
873 		goto err_resp;
874 
875 	out_params->rq_psn = le32_to_cpu(p_resp_ramrod_res->psn);
876 	rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->flags),
877 				 ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG);
878 
879 	dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
880 			  p_resp_ramrod_res, resp_ramrod_res_phys);
881 
882 	if (!(qp->req_offloaded)) {
883 		/* Don't send query qp for the requester */
884 		out_params->sq_psn = qp->sq_psn;
885 		out_params->draining = false;
886 
887 		if (rq_err_state)
888 			qp->cur_state = QED_ROCE_QP_STATE_ERR;
889 
890 		out_params->state = qp->cur_state;
891 
892 		return 0;
893 	}
894 
895 	/* Send a query requester ramrod to FW to get SQ-PSN and state */
896 	p_req_ramrod_res = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
897 					      sizeof(*p_req_ramrod_res),
898 					      &req_ramrod_res_phys,
899 					      GFP_KERNEL);
900 	if (!p_req_ramrod_res) {
901 		rc = -ENOMEM;
902 		DP_NOTICE(p_hwfn,
903 			  "qed query qp failed: cannot allocate memory (ramrod)\n");
904 		return rc;
905 	}
906 
907 	/* Get SPQ entry */
908 	init_data.cid = qp->icid + 1;
909 	rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP,
910 				 PROTOCOLID_ROCE, &init_data);
911 	if (rc)
912 		goto err_req;
913 
914 	p_req_ramrod = &p_ent->ramrod.roce_query_qp_req;
915 	DMA_REGPAIR_LE(p_req_ramrod->output_params_addr, req_ramrod_res_phys);
916 
917 	rc = qed_spq_post(p_hwfn, p_ent, NULL);
918 	if (rc)
919 		goto err_req;
920 
921 	out_params->sq_psn = le32_to_cpu(p_req_ramrod_res->psn);
922 	sq_err_state = GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
923 				 ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG);
924 	sq_draining =
925 		GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
926 			  ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG);
927 
928 	dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
929 			  p_req_ramrod_res, req_ramrod_res_phys);
930 
931 	out_params->draining = false;
932 
933 	if (rq_err_state || sq_err_state)
934 		qp->cur_state = QED_ROCE_QP_STATE_ERR;
935 	else if (sq_draining)
936 		out_params->draining = true;
937 	out_params->state = qp->cur_state;
938 
939 	return 0;
940 
941 err_req:
942 	dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
943 			  p_req_ramrod_res, req_ramrod_res_phys);
944 	return rc;
945 err_resp:
946 	dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
947 			  p_resp_ramrod_res, resp_ramrod_res_phys);
948 	return rc;
949 }
950 
qed_roce_destroy_qp(struct qed_hwfn * p_hwfn,struct qed_rdma_qp * qp)951 int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
952 {
953 	u32 cq_prod;
954 	int rc;
955 
956 	/* Destroys the specified QP */
957 	if ((qp->cur_state != QED_ROCE_QP_STATE_RESET) &&
958 	    (qp->cur_state != QED_ROCE_QP_STATE_ERR) &&
959 	    (qp->cur_state != QED_ROCE_QP_STATE_INIT)) {
960 		DP_NOTICE(p_hwfn,
961 			  "QP must be in error, reset or init state before destroying it\n");
962 		return -EINVAL;
963 	}
964 
965 	if (qp->cur_state != QED_ROCE_QP_STATE_RESET) {
966 		rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp,
967 						      &cq_prod);
968 		if (rc)
969 			return rc;
970 
971 		/* Send destroy requester ramrod */
972 		rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp);
973 		if (rc)
974 			return rc;
975 	}
976 
977 	return 0;
978 }
979 
qed_roce_modify_qp(struct qed_hwfn * p_hwfn,struct qed_rdma_qp * qp,enum qed_roce_qp_state prev_state,struct qed_rdma_modify_qp_in_params * params)980 int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
981 		       struct qed_rdma_qp *qp,
982 		       enum qed_roce_qp_state prev_state,
983 		       struct qed_rdma_modify_qp_in_params *params)
984 {
985 	int rc = 0;
986 
987 	/* Perform additional operations according to the current state and the
988 	 * next state
989 	 */
990 	if (((prev_state == QED_ROCE_QP_STATE_INIT) ||
991 	     (prev_state == QED_ROCE_QP_STATE_RESET)) &&
992 	    (qp->cur_state == QED_ROCE_QP_STATE_RTR)) {
993 		/* Init->RTR or Reset->RTR */
994 		rc = qed_roce_sp_create_responder(p_hwfn, qp);
995 		return rc;
996 	} else if ((prev_state == QED_ROCE_QP_STATE_RTR) &&
997 		   (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
998 		/* RTR-> RTS */
999 		rc = qed_roce_sp_create_requester(p_hwfn, qp);
1000 		if (rc)
1001 			return rc;
1002 
1003 		/* Send modify responder ramrod */
1004 		rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
1005 						  params->modify_flags);
1006 		return rc;
1007 	} else if ((prev_state == QED_ROCE_QP_STATE_RTS) &&
1008 		   (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
1009 		/* RTS->RTS */
1010 		rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
1011 						  params->modify_flags);
1012 		if (rc)
1013 			return rc;
1014 
1015 		rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
1016 						  params->modify_flags);
1017 		return rc;
1018 	} else if ((prev_state == QED_ROCE_QP_STATE_RTS) &&
1019 		   (qp->cur_state == QED_ROCE_QP_STATE_SQD)) {
1020 		/* RTS->SQD */
1021 		rc = qed_roce_sp_modify_requester(p_hwfn, qp, true, false,
1022 						  params->modify_flags);
1023 		return rc;
1024 	} else if ((prev_state == QED_ROCE_QP_STATE_SQD) &&
1025 		   (qp->cur_state == QED_ROCE_QP_STATE_SQD)) {
1026 		/* SQD->SQD */
1027 		rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
1028 						  params->modify_flags);
1029 		if (rc)
1030 			return rc;
1031 
1032 		rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
1033 						  params->modify_flags);
1034 		return rc;
1035 	} else if ((prev_state == QED_ROCE_QP_STATE_SQD) &&
1036 		   (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
1037 		/* SQD->RTS */
1038 		rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
1039 						  params->modify_flags);
1040 		if (rc)
1041 			return rc;
1042 
1043 		rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
1044 						  params->modify_flags);
1045 
1046 		return rc;
1047 	} else if (qp->cur_state == QED_ROCE_QP_STATE_ERR) {
1048 		/* ->ERR */
1049 		rc = qed_roce_sp_modify_responder(p_hwfn, qp, true,
1050 						  params->modify_flags);
1051 		if (rc)
1052 			return rc;
1053 
1054 		rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, true,
1055 						  params->modify_flags);
1056 		return rc;
1057 	} else if (qp->cur_state == QED_ROCE_QP_STATE_RESET) {
1058 		/* Any state -> RESET */
1059 		u32 cq_prod;
1060 
1061 		/* Send destroy responder ramrod */
1062 		rc = qed_roce_sp_destroy_qp_responder(p_hwfn,
1063 						      qp,
1064 						      &cq_prod);
1065 
1066 		if (rc)
1067 			return rc;
1068 
1069 		qp->cq_prod = cq_prod;
1070 
1071 		rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp);
1072 	} else {
1073 		DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n");
1074 	}
1075 
1076 	return rc;
1077 }
1078 
qed_roce_free_real_icid(struct qed_hwfn * p_hwfn,u16 icid)1079 static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid)
1080 {
1081 	struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
1082 	u32 start_cid, cid, xcid;
1083 
1084 	/* an even icid belongs to a responder while an odd icid belongs to a
1085 	 * requester. The 'cid' received as an input can be either. We calculate
1086 	 * the "partner" icid and call it xcid. Only if both are free then the
1087 	 * "cid" map can be cleared.
1088 	 */
1089 	start_cid = qed_cxt_get_proto_cid_start(p_hwfn, p_rdma_info->proto);
1090 	cid = icid - start_cid;
1091 	xcid = cid ^ 1;
1092 
1093 	spin_lock_bh(&p_rdma_info->lock);
1094 
1095 	qed_bmap_release_id(p_hwfn, &p_rdma_info->real_cid_map, cid);
1096 	if (qed_bmap_test_id(p_hwfn, &p_rdma_info->real_cid_map, xcid) == 0) {
1097 		qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, cid);
1098 		qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, xcid);
1099 	}
1100 
1101 	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1102 }
1103 
qed_roce_dpm_dcbx(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)1104 void qed_roce_dpm_dcbx(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1105 {
1106 	u8 val;
1107 
1108 	/* if any QPs are already active, we want to disable DPM, since their
1109 	 * context information contains information from before the latest DCBx
1110 	 * update. Otherwise enable it.
1111 	 */
1112 	val = qed_rdma_allocated_qps(p_hwfn) ? true : false;
1113 	p_hwfn->dcbx_no_edpm = (u8)val;
1114 
1115 	qed_rdma_dpm_conf(p_hwfn, p_ptt);
1116 }
1117 
qed_roce_setup(struct qed_hwfn * p_hwfn)1118 int qed_roce_setup(struct qed_hwfn *p_hwfn)
1119 {
1120 	return qed_spq_register_async_cb(p_hwfn, PROTOCOLID_ROCE,
1121 					 qed_roce_async_event);
1122 }
1123 
qed_roce_init_hw(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)1124 int qed_roce_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1125 {
1126 	u32 ll2_ethertype_en;
1127 
1128 	qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
1129 
1130 	p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_ROCE;
1131 
1132 	ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
1133 	qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
1134 	       (ll2_ethertype_en | 0x01));
1135 
1136 	if (qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ROCE) % 2) {
1137 		DP_NOTICE(p_hwfn, "The first RoCE's cid should be even\n");
1138 		return -EINVAL;
1139 	}
1140 
1141 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW - Done\n");
1142 	return 0;
1143 }
1144