xref: /freebsd/sys/dev/bnxt/bnxt_re/qplib_sp.c (revision bbe42332e5b2cbe16a885360fad4462d13c7b357)
1 /*
2  * Copyright (c) 2015-2024, Broadcom. All rights reserved.  The term
3  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in
13  *    the documentation and/or other materials provided with the
14  *    distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
18  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
23  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
25  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
26  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  *
28  * Description: Slow Path Operators
29  */
30 
31 #include <linux/interrupt.h>
32 #include <linux/spinlock.h>
33 #include <linux/sched.h>
34 #include <linux/pci.h>
35 #include <linux/if_ether.h>
36 #include <linux/printk.h>
37 
38 #include "hsi_struct_def.h"
39 #include "qplib_tlv.h"
40 #include "qplib_res.h"
41 #include "qplib_rcfw.h"
42 #include "qplib_sp.h"
43 
44 const struct bnxt_qplib_gid bnxt_qplib_gid_zero = {{ 0, 0, 0, 0, 0, 0, 0, 0,
45 						     0, 0, 0, 0, 0, 0, 0, 0 }};
46 
47 /* Device */
bnxt_qplib_is_atomic_cap(struct bnxt_qplib_rcfw * rcfw)48 static u8 bnxt_qplib_is_atomic_cap(struct bnxt_qplib_rcfw *rcfw)
49 {
50 	u16 pcie_ctl2 = 0;
51 
52 	if (!_is_chip_gen_p5_p7(rcfw->res->cctx))
53 		return false;
54 	pcie_capability_read_word(rcfw->pdev, PCI_EXP_DEVCTL2, &pcie_ctl2);
55 	return (pcie_ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ);
56 }
57 
bnxt_qplib_query_version(struct bnxt_qplib_rcfw * rcfw,char * fw_ver)58 static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw, char *fw_ver)
59 {
60 	struct creq_query_version_resp resp = {};
61 	struct bnxt_qplib_cmdqmsg msg = {};
62 	struct cmdq_query_version req = {};
63 	int rc = 0;
64 
65 	bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_QUERY_VERSION,
66 				 sizeof(req));
67 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
68 				sizeof(resp), 0);
69 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
70 	if (rc) {
71 		dev_err(&rcfw->pdev->dev, "QPLIB: Failed to query version\n");
72 		return;
73 	}
74 	fw_ver[0] = resp.fw_maj;
75 	fw_ver[1] = resp.fw_minor;
76 	fw_ver[2] = resp.fw_bld;
77 	fw_ver[3] = resp.fw_rsvd;
78 }
79 
bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw * rcfw)80 int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw)
81 {
82 	struct creq_query_func_resp resp = {};
83 	struct bnxt_qplib_cmdqmsg msg = {};
84 	struct creq_query_func_resp_sb *sb;
85 	struct bnxt_qplib_rcfw_sbuf sbuf;
86 	struct bnxt_qplib_dev_attr *attr;
87 	struct bnxt_qplib_chip_ctx *cctx;
88 	struct cmdq_query_func req = {};
89 	u8 *tqm_alloc;
90 	int i, rc = 0;
91 	u32 temp;
92 	u8 chip_gen = BNXT_RE_DEFAULT;
93 
94 	cctx = rcfw->res->cctx;
95 	attr = rcfw->res->dattr;
96 
97 	bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_QUERY_FUNC,
98 				 sizeof(req));
99 
100 	sbuf.size = sizeof(*sb);
101 	sbuf.sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf.size,
102 				       &sbuf.dma_addr, GFP_KERNEL);
103 	if (!sbuf.sb)
104 		return -ENOMEM;
105 
106 	sb = sbuf.sb;
107 	req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
108 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
109 				sizeof(resp), 0);
110 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
111 	if (rc)
112 		goto bail;
113 	/* Extract the context from the side buffer */
114 	chip_gen = _get_chip_gen_p5_type(cctx);
115 	attr->max_qp = le32_to_cpu(sb->max_qp);
116 	attr->max_qp = min_t(u32, attr->max_qp, BNXT_RE_MAX_QP_SUPPORTED(chip_gen));
117 	/* max_qp value reported by FW does not include the QP1 */
118 	attr->max_qp += 1;
119 	attr->max_qp_rd_atom =
120 		sb->max_qp_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ?
121 		BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_rd_atom;
122 	attr->max_qp_init_rd_atom =
123 		sb->max_qp_init_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ?
124 		BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_init_rd_atom;
125 	/* Report 1 less than the max_qp_wqes reported by FW as driver adds
126 	 * one extra entry while creating the qp
127 	 */
128 	attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr) - 1;
129 	/* Adjust for max_qp_wqes for variable wqe */
130 	if (cctx->modes.wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) {
131 		attr->max_qp_wqes = (BNXT_MAX_SQ_SIZE) /
132 			(BNXT_MAX_VAR_WQE_SIZE / BNXT_SGE_SIZE) - 1;
133 	}
134 	if (!_is_chip_gen_p5_p7(cctx)) {
135 		/*
136 		 * 128 WQEs needs to be reserved for the HW (8916). Prevent
137 		 * reporting the max number for gen-p4 only.
138 		 */
139 		attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS;
140 	}
141 	attr->max_qp_sges = sb->max_sge;
142 	if (_is_chip_gen_p5_p7(cctx) &&
143 	    cctx->modes.wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
144 		attr->max_qp_sges = sb->max_sge_var_wqe;
145 	attr->max_cq = le32_to_cpu(sb->max_cq);
146 	attr->max_cq = min_t(u32, attr->max_cq, BNXT_RE_MAX_CQ_SUPPORTED(chip_gen));
147 
148 	attr->max_cq_wqes = le32_to_cpu(sb->max_cqe);
149 	attr->max_cq_wqes = min_t(u32, BNXT_QPLIB_MAX_CQ_WQES, attr->max_cq_wqes);
150 
151 	attr->max_cq_sges = attr->max_qp_sges;
152 	attr->max_mr = le32_to_cpu(sb->max_mr);
153 	attr->max_mr = min_t(u32, attr->max_mr, BNXT_RE_MAX_MRW_SUPPORTED(chip_gen));
154 	attr->max_mw = le32_to_cpu(sb->max_mw);
155 	attr->max_mw = min_t(u32, attr->max_mw, BNXT_RE_MAX_MRW_SUPPORTED(chip_gen));
156 
157 	attr->max_mr_size = le64_to_cpu(sb->max_mr_size);
158 	attr->max_pd = BNXT_QPLIB_MAX_PD;
159 	attr->max_raw_ethy_qp = le32_to_cpu(sb->max_raw_eth_qp);
160 	attr->max_ah = le32_to_cpu(sb->max_ah);
161 	attr->max_ah = min_t(u32, attr->max_ah, BNXT_RE_MAX_AH_SUPPORTED(chip_gen));
162 
163 	attr->max_fmr = le32_to_cpu(sb->max_fmr);
164 	attr->max_map_per_fmr = sb->max_map_per_fmr;
165 
166 	attr->max_srq = le16_to_cpu(sb->max_srq);
167 	attr->max_srq = min_t(u32, attr->max_srq, BNXT_RE_MAX_SRQ_SUPPORTED(chip_gen));
168 	attr->max_srq_wqes = le32_to_cpu(sb->max_srq_wr) - 1;
169 	attr->max_srq_sges = sb->max_srq_sge;
170 	attr->max_pkey = 1;
171 
172 	attr->max_inline_data = !cctx->modes.wqe_mode ?
173 				le32_to_cpu(sb->max_inline_data) :
174 				le16_to_cpu(sb->max_inline_data_var_wqe);
175 	if (!_is_chip_p7(cctx)) {
176 		attr->l2_db_size = (sb->l2_db_space_size + 1) *
177 				    (0x01 << RCFW_DBR_BASE_PAGE_SHIFT);
178 	}
179 	attr->max_sgid = le32_to_cpu(sb->max_gid);
180 
181 	/* TODO: remove this hack for statically allocated gid_map */
182 	bnxt_re_set_max_gid(&attr->max_sgid);
183 
184 	attr->dev_cap_flags = le16_to_cpu(sb->dev_cap_flags);
185 	attr->page_size_cap = BIT_ULL(28) | BIT_ULL(21) | BIT_ULL(12);
186 
187 	bnxt_qplib_query_version(rcfw, attr->fw_ver);
188 
189 	for (i = 0; i < MAX_TQM_ALLOC_REQ / 4; i++) {
190 		temp = le32_to_cpu(sb->tqm_alloc_reqs[i]);
191 		tqm_alloc = (u8 *)&temp;
192 		attr->tqm_alloc_reqs[i * 4] = *tqm_alloc;
193 		attr->tqm_alloc_reqs[i * 4 + 1] = *(++tqm_alloc);
194 		attr->tqm_alloc_reqs[i * 4 + 2] = *(++tqm_alloc);
195 		attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc);
196 	}
197 
198 	if (rcfw->res->cctx->hwrm_intf_ver >= HWRM_VERSION_DEV_ATTR_MAX_DPI)
199 		attr->max_dpi = le32_to_cpu(sb->max_dpi);
200 
201 	attr->is_atomic = bnxt_qplib_is_atomic_cap(rcfw);
202 bail:
203 	dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
204 				  sbuf.sb, sbuf.dma_addr);
205 	return rc;
206 }
207 
bnxt_qplib_set_func_resources(struct bnxt_qplib_res * res)208 int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res)
209 {
210 	struct creq_set_func_resources_resp resp = {};
211 	struct cmdq_set_func_resources req = {};
212 	struct bnxt_qplib_cmdqmsg msg = {};
213 	struct bnxt_qplib_rcfw *rcfw;
214 	struct bnxt_qplib_ctx *hctx;
215 	int rc = 0;
216 
217 	rcfw = res->rcfw;
218 	hctx = res->hctx;
219 	bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_SET_FUNC_RESOURCES,
220 				 sizeof(req));
221 
222 	req.number_of_qp = cpu_to_le32(hctx->qp_ctx.max);
223 	req.number_of_mrw = cpu_to_le32(hctx->mrw_ctx.max);
224 	req.number_of_srq =  cpu_to_le32(hctx->srq_ctx.max);
225 	req.number_of_cq = cpu_to_le32(hctx->cq_ctx.max);
226 
227 	req.max_qp_per_vf = cpu_to_le32(hctx->vf_res.max_qp);
228 	req.max_mrw_per_vf = cpu_to_le32(hctx->vf_res.max_mrw);
229 	req.max_srq_per_vf = cpu_to_le32(hctx->vf_res.max_srq);
230 	req.max_cq_per_vf = cpu_to_le32(hctx->vf_res.max_cq);
231 	req.max_gid_per_vf = cpu_to_le32(hctx->vf_res.max_gid);
232 
233 	/* Keep the old stats context id of PF */
234 	req.stat_ctx_id = cpu_to_le32(hctx->stats.fw_id);
235 
236 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
237 				sizeof(resp), 0);
238 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
239 	if (rc)
240 		dev_err(&res->pdev->dev,
241 			"QPLIB: Failed to set function resources\n");
242 
243 	return rc;
244 }
245 
bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl * sgid_tbl,struct bnxt_qplib_gid * gid,u16 gid_idx,const u8 * smac)246 int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
247 			   struct bnxt_qplib_gid *gid, u16 gid_idx, const u8 *smac)
248 {
249 	struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl,
250 						   struct bnxt_qplib_res,
251 						   sgid_tbl);
252 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
253 	struct creq_modify_gid_resp resp = {};
254 	struct bnxt_qplib_cmdqmsg msg = {};
255 	struct cmdq_modify_gid req = {};
256 	int rc;
257 
258 	bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_MODIFY_GID,
259 				 sizeof(req));
260 
261 	req.gid[0] = cpu_to_be32(((u32 *)gid->data)[3]);
262 	req.gid[1] = cpu_to_be32(((u32 *)gid->data)[2]);
263 	req.gid[2] = cpu_to_be32(((u32 *)gid->data)[1]);
264 	req.gid[3] = cpu_to_be32(((u32 *)gid->data)[0]);
265 	if (res->prio) {
266 		req.vlan |= cpu_to_le16(CMDQ_ADD_GID_VLAN_TPID_TPID_8100 |
267 			CMDQ_ADD_GID_VLAN_VLAN_EN);
268 	}
269 
270 	/* MAC in network format */
271 	req.src_mac[0] = cpu_to_be16(((u16 *)smac)[0]);
272 	req.src_mac[1] = cpu_to_be16(((u16 *)smac)[1]);
273 	req.src_mac[2] = cpu_to_be16(((u16 *)smac)[2]);
274 	req.gid_index = cpu_to_le16(gid_idx);
275 
276 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
277 				sizeof(resp), 0);
278 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
279 	if (rc) {
280 		dev_err(&res->pdev->dev,
281 			"QPLIB: update SGID table failed\n");
282 		return rc;
283 	}
284 	return 0;
285 }
286 
287 /* SGID */
bnxt_qplib_get_sgid(struct bnxt_qplib_res * res,struct bnxt_qplib_sgid_tbl * sgid_tbl,int index,struct bnxt_qplib_gid * gid)288 int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res,
289 			struct bnxt_qplib_sgid_tbl *sgid_tbl, int index,
290 			struct bnxt_qplib_gid *gid)
291 {
292 	if (index > sgid_tbl->max) {
293 		dev_err(&res->pdev->dev,
294 			"QPLIB: Index %d exceeded SGID table max (%d)\n",
295 			index, sgid_tbl->max);
296 		return -EINVAL;
297 	}
298 	memcpy(gid, &sgid_tbl->tbl[index].gid, sizeof(*gid));
299 	return 0;
300 }
301 
bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl * sgid_tbl,struct bnxt_qplib_gid * gid,u16 vlan_id,bool update)302 int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
303 			struct bnxt_qplib_gid *gid,
304 			u16 vlan_id, bool update)
305 {
306 	struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl,
307 						   struct bnxt_qplib_res,
308 						   sgid_tbl);
309 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
310 	int index;
311 
312 	if (sgid_tbl == NULL) {
313 		dev_err(&res->pdev->dev, "QPLIB: SGID table not allocated\n");
314 		return -EINVAL;
315 	}
316 	/* Do we need a sgid_lock here? */
317 	if (!sgid_tbl->active) {
318 		dev_err(&res->pdev->dev,
319 			"QPLIB: SGID table has no active entries\n");
320 		return -ENOMEM;
321 	}
322 	for (index = 0; index < sgid_tbl->max; index++) {
323 		if (!memcmp(&sgid_tbl->tbl[index].gid, gid, sizeof(*gid)) &&
324 		    vlan_id == sgid_tbl->tbl[index].vlan_id)
325 			break;
326 	}
327 	if (index == sgid_tbl->max) {
328 		dev_warn(&res->pdev->dev, "GID not found in the SGID table\n");
329 		return 0;
330 	}
331 
332 	if (update) {
333 		struct creq_delete_gid_resp resp = {};
334 		struct bnxt_qplib_cmdqmsg msg = {};
335 		struct cmdq_delete_gid req = {};
336 		int rc;
337 
338 		bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_DELETE_GID,
339 					 sizeof(req));
340 		if (sgid_tbl->hw_id[index] == 0xFFFF) {
341 			dev_err(&res->pdev->dev,
342 				"QPLIB: GID entry contains an invalid HW id");
343 			return -EINVAL;
344 		}
345 		req.gid_index = cpu_to_le16(sgid_tbl->hw_id[index]);
346 		bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
347 					sizeof(resp), 0);
348 		rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
349 		if (rc)
350 			return rc;
351 	}
352 	memcpy(&sgid_tbl->tbl[index].gid, &bnxt_qplib_gid_zero,
353 	       sizeof(bnxt_qplib_gid_zero));
354 	sgid_tbl->tbl[index].vlan_id = 0xFFFF;
355 	sgid_tbl->vlan[index] = false;
356 	sgid_tbl->active--;
357 	dev_dbg(&res->pdev->dev,
358 		 "QPLIB: SGID deleted hw_id[0x%x] = 0x%x active = 0x%x\n",
359 		 index, sgid_tbl->hw_id[index], sgid_tbl->active);
360 	sgid_tbl->hw_id[index] = (u16)-1;
361 
362 	return 0;
363 }
364 
bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl * sgid_tbl,const union ib_gid * gid,const u8 * smac,u16 vlan_id,bool update,u32 * index)365 int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
366 			const union ib_gid *gid, const u8 *smac, u16 vlan_id,
367 			bool update, u32 *index)
368 {
369 	struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl,
370 						   struct bnxt_qplib_res,
371 						   sgid_tbl);
372 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
373 	int i, free_idx;
374 
375 	if (sgid_tbl == NULL) {
376 		dev_err(&res->pdev->dev, "QPLIB: SGID table not allocated\n");
377 		return -EINVAL;
378 	}
379 	/* Do we need a sgid_lock here? */
380 	if (sgid_tbl->active == sgid_tbl->max) {
381 		dev_err(&res->pdev->dev, "QPLIB: SGID table is full\n");
382 		return -ENOMEM;
383 	}
384 	free_idx = sgid_tbl->max;
385 	for (i = 0; i < sgid_tbl->max; i++) {
386 		if (!memcmp(&sgid_tbl->tbl[i], gid, sizeof(*gid)) &&
387 		    sgid_tbl->tbl[i].vlan_id == vlan_id) {
388 			dev_dbg(&res->pdev->dev,
389 				"QPLIB: SGID entry already exist in entry %d!\n",
390 				i);
391 			*index = i;
392 			return -EALREADY;
393 		} else if (!memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero,
394 				   sizeof(bnxt_qplib_gid_zero)) &&
395 			   free_idx == sgid_tbl->max) {
396 			free_idx = i;
397 		}
398 	}
399 	if (free_idx == sgid_tbl->max) {
400 		dev_err(&res->pdev->dev,
401 			"QPLIB: SGID table is FULL but count is not MAX??\n");
402 		return -ENOMEM;
403 	}
404 	if (update) {
405 		struct creq_add_gid_resp resp = {};
406 		struct bnxt_qplib_cmdqmsg msg = {};
407 		struct cmdq_add_gid req = {};
408 		int rc;
409 
410 		bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_ADD_GID,
411 					 sizeof(req));
412 
413 		req.gid[0] = cpu_to_be32(((u32 *)gid->raw)[3]);
414 		req.gid[1] = cpu_to_be32(((u32 *)gid->raw)[2]);
415 		req.gid[2] = cpu_to_be32(((u32 *)gid->raw)[1]);
416 		req.gid[3] = cpu_to_be32(((u32 *)gid->raw)[0]);
417 		/* driver should ensure that all RoCE traffic is always VLAN tagged
418 		 * if RoCE traffic is running on non-zero VLAN ID or
419 		 * RoCE traffic is running on non-zero Priority.
420 		 */
421 		if ((vlan_id != 0xFFFF) || res->prio) {
422 			if (vlan_id != 0xFFFF)
423 				req.vlan = cpu_to_le16(vlan_id &
424 						CMDQ_ADD_GID_VLAN_VLAN_ID_MASK);
425 			req.vlan |=
426 				cpu_to_le16(CMDQ_ADD_GID_VLAN_TPID_TPID_8100 |
427 					    CMDQ_ADD_GID_VLAN_VLAN_EN);
428 		}
429 
430 		/* MAC in network format */
431 		req.src_mac[0] = cpu_to_be16(((u16 *)smac)[0]);
432 		req.src_mac[1] = cpu_to_be16(((u16 *)smac)[1]);
433 		req.src_mac[2] = cpu_to_be16(((u16 *)smac)[2]);
434 
435 		bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
436 					sizeof(resp), 0);
437 		rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
438 		if (rc)
439 			return rc;
440 		sgid_tbl->hw_id[free_idx] = le32_to_cpu(resp.xid);
441 	}
442 
443 	if (vlan_id != 0xFFFF)
444 		sgid_tbl->vlan[free_idx] = true;
445 
446 	memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid));
447 	sgid_tbl->tbl[free_idx].vlan_id = vlan_id;
448 	sgid_tbl->active++;
449 	dev_dbg(&res->pdev->dev,
450 		 "QPLIB: SGID added hw_id[0x%x] = 0x%x active = 0x%x\n",
451 		 free_idx, sgid_tbl->hw_id[free_idx], sgid_tbl->active);
452 
453 	*index = free_idx;
454 	/* unlock */
455 	return 0;
456 }
457 
458 /* AH */
bnxt_qplib_create_ah(struct bnxt_qplib_res * res,struct bnxt_qplib_ah * ah,bool block)459 int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah,
460 			 bool block)
461 {
462 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
463 	struct creq_create_ah_resp resp = {};
464 	struct bnxt_qplib_cmdqmsg msg = {};
465 	struct cmdq_create_ah req = {};
466 	u32 temp32[4];
467 	u16 temp16[3];
468 	int rc;
469 
470 	bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_CREATE_AH,
471 				 sizeof(req));
472 
473 	memcpy(temp32, ah->dgid.data, sizeof(struct bnxt_qplib_gid));
474 	req.dgid[0] = cpu_to_le32(temp32[0]);
475 	req.dgid[1] = cpu_to_le32(temp32[1]);
476 	req.dgid[2] = cpu_to_le32(temp32[2]);
477 	req.dgid[3] = cpu_to_le32(temp32[3]);
478 
479 	req.type = ah->nw_type;
480 	req.hop_limit = ah->hop_limit;
481 	req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id[ah->sgid_index]);
482 	req.dest_vlan_id_flow_label = cpu_to_le32((ah->flow_label &
483 					CMDQ_CREATE_AH_FLOW_LABEL_MASK) |
484 					CMDQ_CREATE_AH_DEST_VLAN_ID_MASK);
485 	req.pd_id = cpu_to_le32(ah->pd->id);
486 	req.traffic_class = ah->traffic_class;
487 
488 	/* MAC in network format */
489 	memcpy(temp16, ah->dmac, ETH_ALEN);
490 	req.dest_mac[0] = cpu_to_le16(temp16[0]);
491 	req.dest_mac[1] = cpu_to_le16(temp16[1]);
492 	req.dest_mac[2] = cpu_to_le16(temp16[2]);
493 
494 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
495 				sizeof(resp), block);
496 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
497 	if (rc)
498 		return rc;
499 
500 	ah->id = le32_to_cpu(resp.xid);
501 	/* for Cu/Wh AHID 0 is not valid */
502 	if (!_is_chip_gen_p5_p7(res->cctx) && !ah->id)
503 		rc = -EINVAL;
504 
505 	return rc;
506 }
507 
bnxt_qplib_destroy_ah(struct bnxt_qplib_res * res,struct bnxt_qplib_ah * ah,bool block)508 int bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah,
509 			  bool block)
510 {
511 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
512 	struct creq_destroy_ah_resp resp = {};
513 	struct bnxt_qplib_cmdqmsg msg = {};
514 	struct cmdq_destroy_ah req = {};
515 	int rc;
516 
517 	bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_DESTROY_AH,
518 				 sizeof(req));
519 
520 	req.ah_cid = cpu_to_le32(ah->id);
521 
522 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
523 				sizeof(resp), block);
524 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
525 	return rc;
526 }
527 
528 /* MRW */
bnxt_qplib_free_mrw(struct bnxt_qplib_res * res,struct bnxt_qplib_mrw * mrw)529 int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
530 {
531 	struct creq_deallocate_key_resp resp = {};
532 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
533 	struct cmdq_deallocate_key req = {};
534 	struct bnxt_qplib_cmdqmsg msg = {};
535 	int rc;
536 
537 	if (mrw->lkey == 0xFFFFFFFF) {
538 		dev_info(&res->pdev->dev,
539 			 "QPLIB: SP: Free a reserved lkey MRW\n");
540 		return 0;
541 	}
542 
543 	bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_DEALLOCATE_KEY,
544 				 sizeof(req));
545 
546 	req.mrw_flags = mrw->type;
547 
548 	if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1)  ||
549 	    (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A) ||
550 	    (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B))
551 		req.key = cpu_to_le32(mrw->rkey);
552 	else
553 		req.key = cpu_to_le32(mrw->lkey);
554 
555 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
556 				sizeof(resp), 0);
557 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
558 	if (rc)
559 		return rc;
560 
561 	if (mrw->hwq.max_elements)
562 		bnxt_qplib_free_hwq(res, &mrw->hwq);
563 
564 	return 0;
565 }
566 
bnxt_qplib_alloc_mrw(struct bnxt_qplib_res * res,struct bnxt_qplib_mrw * mrw)567 int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
568 {
569 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
570 	struct creq_allocate_mrw_resp resp = {};
571 	struct bnxt_qplib_cmdqmsg msg = {};
572 	struct cmdq_allocate_mrw req = {};
573 	int rc;
574 
575 	bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_ALLOCATE_MRW,
576 				 sizeof(req));
577 
578 	req.pd_id = cpu_to_le32(mrw->pd->id);
579 	req.mrw_flags = mrw->type;
580 	if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR &&
581 	     mrw->flags & BNXT_QPLIB_FR_PMR) ||
582 	    mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A ||
583 	    mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B)
584 		req.access = CMDQ_ALLOCATE_MRW_ACCESS_CONSUMER_OWNED_KEY;
585 	req.mrw_handle = cpu_to_le64((uintptr_t)mrw);
586 
587 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
588 				sizeof(resp), 0);
589 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
590 	if (rc)
591 		return rc;
592 	if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1)  ||
593 	    (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A) ||
594 	    (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B))
595 		mrw->rkey = le32_to_cpu(resp.xid);
596 	else
597 		mrw->lkey = le32_to_cpu(resp.xid);
598 
599 	return 0;
600 }
601 
bnxt_qplib_dereg_mrw(struct bnxt_qplib_res * res,struct bnxt_qplib_mrw * mrw,bool block)602 int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw,
603 			 bool block)
604 {
605 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
606 	struct creq_deregister_mr_resp resp = {};
607 	struct bnxt_qplib_cmdqmsg msg = {};
608 	struct cmdq_deregister_mr req = {};
609 	int rc;
610 
611 	bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_DEREGISTER_MR,
612 				 sizeof(req));
613 
614 	req.lkey = cpu_to_le32(mrw->lkey);
615 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
616 				sizeof(resp), block);
617 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
618 	if (rc)
619 		return rc;
620 
621 	if (mrw->hwq.max_elements) {
622 		mrw->va = 0;
623 		mrw->total_size = 0;
624 		bnxt_qplib_free_hwq(res, &mrw->hwq);
625 	}
626 
627 	return 0;
628 }
629 
bnxt_qplib_reg_mr(struct bnxt_qplib_res * res,struct bnxt_qplib_mrinfo * mrinfo,bool block)630 int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res,
631 		      struct bnxt_qplib_mrinfo *mrinfo,
632 		      bool block)
633 {
634 	struct bnxt_qplib_hwq_attr hwq_attr = {};
635 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
636 	struct creq_register_mr_resp resp = {};
637 	struct bnxt_qplib_cmdqmsg msg = {};
638 	struct cmdq_register_mr req = {};
639 	struct bnxt_qplib_mrw *mr;
640 	u32 buf_pg_size;
641 	u32 pg_size;
642 	u16 level;
643 	u16 flags;
644 	int rc;
645 
646 	mr = mrinfo->mrw;
647 	buf_pg_size = 0x01ULL << mrinfo->sg.pgshft;
648 	if (mrinfo->sg.npages) {
649 		/* Free the hwq if it already exist, must be a rereg */
650 		if (mr->hwq.max_elements)
651 			bnxt_qplib_free_hwq(res, &mr->hwq);
652 		/* Use system PAGE_SIZE */
653 		hwq_attr.res = res;
654 		hwq_attr.depth = mrinfo->sg.npages;
655 		hwq_attr.stride = PAGE_SIZE;
656 		hwq_attr.type = HWQ_TYPE_MR;
657 		hwq_attr.sginfo = &mrinfo->sg;
658 		rc = bnxt_qplib_alloc_init_hwq(&mr->hwq, &hwq_attr);
659 		if (rc) {
660 			dev_err(&res->pdev->dev,
661 				"SP: Reg MR memory allocation failed\n");
662 			return -ENOMEM;
663 		}
664 	}
665 
666 	bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_REGISTER_MR,
667 				 sizeof(req));
668 	/* Configure the request */
669 	if (mrinfo->is_dma) {
670 		/* No PBL provided, just use system PAGE_SIZE */
671 		level = 0;
672 		req.pbl = 0;
673 		pg_size = PAGE_SIZE;
674 	} else {
675 		level = mr->hwq.level;
676 		req.pbl = cpu_to_le64(mr->hwq.pbl[PBL_LVL_0].pg_map_arr[0]);
677 	}
678 
679 	pg_size = buf_pg_size ? buf_pg_size : PAGE_SIZE;
680 	req.log2_pg_size_lvl = (level << CMDQ_REGISTER_MR_LVL_SFT) |
681 			       ((ilog2(pg_size) <<
682 				 CMDQ_REGISTER_MR_LOG2_PG_SIZE_SFT) &
683 				CMDQ_REGISTER_MR_LOG2_PG_SIZE_MASK);
684 	req.log2_pbl_pg_size = cpu_to_le16(((ilog2(PAGE_SIZE) <<
685 				 CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_SFT) &
686 				CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_MASK));
687 	req.access = (mr->flags & 0xFFFF);
688 	req.va = cpu_to_le64(mr->va);
689 	req.key = cpu_to_le32(mr->lkey);
690 	if (_is_alloc_mr_unified(res->dattr)) {
691 		flags = 0;
692 		req.key = cpu_to_le32(mr->pd->id);
693 		flags |= CMDQ_REGISTER_MR_FLAGS_ALLOC_MR;
694 		req.flags = cpu_to_le16(flags);
695 	}
696 	req.mr_size = cpu_to_le64(mr->total_size);
697 
698 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
699 				sizeof(resp), block);
700 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
701 	if (rc)
702 		goto fail;
703 
704 	if (_is_alloc_mr_unified(res->dattr)) {
705 		mr->lkey = le32_to_cpu(resp.xid);
706 		mr->rkey = mr->lkey;
707 	}
708 
709 	return 0;
710 fail:
711 	if (mr->hwq.max_elements)
712 		bnxt_qplib_free_hwq(res, &mr->hwq);
713 	return rc;
714 }
715 
bnxt_qplib_alloc_fast_reg_page_list(struct bnxt_qplib_res * res,struct bnxt_qplib_frpl * frpl,int max_pg_ptrs)716 int bnxt_qplib_alloc_fast_reg_page_list(struct bnxt_qplib_res *res,
717 					struct bnxt_qplib_frpl *frpl,
718 					int max_pg_ptrs)
719 {
720 	struct bnxt_qplib_hwq_attr hwq_attr = {};
721 	struct bnxt_qplib_sg_info sginfo = {};
722 	int pg_ptrs, rc;
723 
724 	/* Re-calculate the max to fit the HWQ allocation model */
725 	pg_ptrs = roundup_pow_of_two(max_pg_ptrs);
726 
727 	sginfo.pgsize = PAGE_SIZE;
728 	sginfo.nopte = true;
729 
730 	hwq_attr.res = res;
731 	hwq_attr.depth = pg_ptrs;
732 	hwq_attr.stride = PAGE_SIZE;
733 	hwq_attr.sginfo = &sginfo;
734 	hwq_attr.type = HWQ_TYPE_CTX;
735 	rc = bnxt_qplib_alloc_init_hwq(&frpl->hwq, &hwq_attr);
736 	if (!rc)
737 		frpl->max_pg_ptrs = pg_ptrs;
738 
739 	return rc;
740 }
741 
bnxt_qplib_free_fast_reg_page_list(struct bnxt_qplib_res * res,struct bnxt_qplib_frpl * frpl)742 void bnxt_qplib_free_fast_reg_page_list(struct bnxt_qplib_res *res,
743 					struct bnxt_qplib_frpl *frpl)
744 {
745 	bnxt_qplib_free_hwq(res, &frpl->hwq);
746 }
747 
bnxt_qplib_map_tc2cos(struct bnxt_qplib_res * res,u16 * cids)748 int bnxt_qplib_map_tc2cos(struct bnxt_qplib_res *res, u16 *cids)
749 {
750 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
751 	struct creq_map_tc_to_cos_resp resp = {};
752 	struct bnxt_qplib_cmdqmsg msg = {};
753 	struct cmdq_map_tc_to_cos req = {};
754 	int rc;
755 
756 	bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_MAP_TC_TO_COS,
757 				 sizeof(req));
758 	req.cos0 = cpu_to_le16(cids[0]);
759 	req.cos1 = cpu_to_le16(cids[1]);
760 
761 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
762 				sizeof(resp), 0);
763 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
764 	return rc;
765 }
766 
bnxt_qplib_fill_cc_gen1(struct cmdq_modify_roce_cc_gen1_tlv * ext_req,struct bnxt_qplib_cc_param_ext * cc_ext)767 static void bnxt_qplib_fill_cc_gen1(struct cmdq_modify_roce_cc_gen1_tlv *ext_req,
768 			     struct bnxt_qplib_cc_param_ext *cc_ext)
769 {
770 	ext_req->modify_mask = cpu_to_le64(cc_ext->ext_mask);
771 	cc_ext->ext_mask = 0;
772 	ext_req->inactivity_th_hi = cpu_to_le16(cc_ext->inact_th_hi);
773 	ext_req->min_time_between_cnps = cpu_to_le16(cc_ext->min_delta_cnp);
774 	ext_req->init_cp = cpu_to_le16(cc_ext->init_cp);
775 	ext_req->tr_update_mode = cc_ext->tr_update_mode;
776 	ext_req->tr_update_cycles = cc_ext->tr_update_cyls;
777 	ext_req->fr_num_rtts = cc_ext->fr_rtt;
778 	ext_req->ai_rate_increase = cc_ext->ai_rate_incr;
779 	ext_req->reduction_relax_rtts_th = cpu_to_le16(cc_ext->rr_rtt_th);
780 	ext_req->additional_relax_cr_th = cpu_to_le16(cc_ext->ar_cr_th);
781 	ext_req->cr_min_th = cpu_to_le16(cc_ext->cr_min_th);
782 	ext_req->bw_avg_weight = cc_ext->bw_avg_weight;
783 	ext_req->actual_cr_factor = cc_ext->cr_factor;
784 	ext_req->max_cp_cr_th = cpu_to_le16(cc_ext->cr_th_max_cp);
785 	ext_req->cp_bias_en = cc_ext->cp_bias_en;
786 	ext_req->cp_bias = cc_ext->cp_bias;
787 	ext_req->cnp_ecn = cc_ext->cnp_ecn;
788 	ext_req->rtt_jitter_en = cc_ext->rtt_jitter_en;
789 	ext_req->link_bytes_per_usec = cpu_to_le16(cc_ext->bytes_per_usec);
790 	ext_req->reset_cc_cr_th = cpu_to_le16(cc_ext->cc_cr_reset_th);
791 	ext_req->cr_width = cc_ext->cr_width;
792 	ext_req->quota_period_min = cc_ext->min_quota;
793 	ext_req->quota_period_max = cc_ext->max_quota;
794 	ext_req->quota_period_abs_max = cc_ext->abs_max_quota;
795 	ext_req->tr_lower_bound = cpu_to_le16(cc_ext->tr_lb);
796 	ext_req->cr_prob_factor = cc_ext->cr_prob_fac;
797 	ext_req->tr_prob_factor = cc_ext->tr_prob_fac;
798 	ext_req->fairness_cr_th = cpu_to_le16(cc_ext->fair_cr_th);
799 	ext_req->red_div = cc_ext->red_div;
800 	ext_req->cnp_ratio_th = cc_ext->cnp_ratio_th;
801 	ext_req->exp_ai_rtts = cpu_to_le16(cc_ext->ai_ext_rtt);
802 	ext_req->exp_ai_cr_cp_ratio = cc_ext->exp_crcp_ratio;
803 	ext_req->use_rate_table = cc_ext->low_rate_en;
804 	ext_req->cp_exp_update_th = cpu_to_le16(cc_ext->cpcr_update_th);
805 	ext_req->high_exp_ai_rtts_th1 = cpu_to_le16(cc_ext->ai_rtt_th1);
806 	ext_req->high_exp_ai_rtts_th2 = cpu_to_le16(cc_ext->ai_rtt_th2);
807 	ext_req->actual_cr_cong_free_rtts_th = cpu_to_le16(cc_ext->cf_rtt_th);
808 	ext_req->severe_cong_cr_th1 = cpu_to_le16(cc_ext->sc_cr_th1);
809 	ext_req->severe_cong_cr_th2 = cpu_to_le16(cc_ext->sc_cr_th2);
810 	ext_req->link64B_per_rtt = cpu_to_le32(cc_ext->l64B_per_rtt);
811 	ext_req->cc_ack_bytes = cc_ext->cc_ack_bytes;
812 	ext_req->reduce_init_cong_free_rtts_th = cpu_to_le16(cc_ext->reduce_cf_rtt_th);
813 }
814 
bnxt_qplib_modify_cc(struct bnxt_qplib_res * res,struct bnxt_qplib_cc_param * cc_param)815 int bnxt_qplib_modify_cc(struct bnxt_qplib_res *res,
816 			 struct bnxt_qplib_cc_param *cc_param)
817 {
818 	struct bnxt_qplib_tlv_modify_cc_req tlv_req = {};
819 	struct creq_modify_roce_cc_resp resp = {};
820 	struct bnxt_qplib_cmdqmsg msg = {};
821 	struct cmdq_modify_roce_cc *req;
822 	int req_size;
823 	void *cmd;
824 	int rc;
825 
826 	/* Prepare the older base command */
827 	req = &tlv_req.base_req;
828 	cmd = req;
829 	req_size = sizeof(*req);
830 	bnxt_qplib_rcfw_cmd_prep(req, CMDQ_BASE_OPCODE_MODIFY_ROCE_CC,
831 				 sizeof(*req));
832 	req->modify_mask = cpu_to_le32(cc_param->mask);
833 	req->enable_cc = cc_param->enable;
834 	req->g = cc_param->g;
835 	req->num_phases_per_state = cc_param->nph_per_state;
836 	req->time_per_phase = cc_param->time_pph;
837 	req->pkts_per_phase = cc_param->pkts_pph;
838 	req->init_cr = cpu_to_le16(cc_param->init_cr);
839 	req->init_tr = cpu_to_le16(cc_param->init_tr);
840 	req->tos_dscp_tos_ecn = (cc_param->tos_dscp <<
841 				 CMDQ_MODIFY_ROCE_CC_TOS_DSCP_SFT) |
842 				 (cc_param->tos_ecn &
843 				 CMDQ_MODIFY_ROCE_CC_TOS_ECN_MASK);
844 	req->alt_vlan_pcp = cc_param->alt_vlan_pcp;
845 	req->alt_tos_dscp = cpu_to_le16(cc_param->alt_tos_dscp);
846 	req->rtt = cpu_to_le16(cc_param->rtt);
847 	req->tcp_cp = cpu_to_le16(cc_param->tcp_cp);
848 	req->cc_mode = cc_param->cc_mode;
849 	req->inactivity_th = cpu_to_le16(cc_param->inact_th);
850 
851 	/* For chip gen P5 onwards fill extended cmd and header */
852 	if (_is_chip_gen_p5_p7(res->cctx)) {
853 		struct roce_tlv *hdr;
854 		u32 payload;
855 		u32 chunks;
856 
857 		cmd = &tlv_req;
858 		req_size = sizeof(tlv_req);
859 		/* Prepare primary tlv header */
860 		hdr = &tlv_req.tlv_hdr;
861 		chunks = CHUNKS(sizeof(struct bnxt_qplib_tlv_modify_cc_req));
862 		payload = sizeof(struct cmdq_modify_roce_cc);
863 		ROCE_1ST_TLV_PREP(hdr, chunks, payload, true);
864 		/* Prepare secondary tlv header */
865 		hdr = (struct roce_tlv *)&tlv_req.ext_req;
866 		payload = sizeof(struct cmdq_modify_roce_cc_gen1_tlv) -
867 			  sizeof(struct roce_tlv);
868 		ROCE_EXT_TLV_PREP(hdr, TLV_TYPE_MODIFY_ROCE_CC_GEN1, payload,
869 				  false, true);
870 		bnxt_qplib_fill_cc_gen1(&tlv_req.ext_req, &cc_param->cc_ext);
871 	}
872 
873 	bnxt_qplib_fill_cmdqmsg(&msg, cmd, &resp, NULL, req_size,
874 				sizeof(resp), 0);
875 	rc = bnxt_qplib_rcfw_send_message(res->rcfw, &msg);
876 	return rc;
877 }
878 
bnxt_qplib_read_cc_gen1(struct bnxt_qplib_cc_param_ext * cc_ext,struct creq_query_roce_cc_gen1_resp_sb_tlv * sb)879 static void bnxt_qplib_read_cc_gen1(struct bnxt_qplib_cc_param_ext *cc_ext,
880 			     struct creq_query_roce_cc_gen1_resp_sb_tlv *sb)
881 {
882 	cc_ext->inact_th_hi = le16_to_cpu(sb->inactivity_th_hi);
883 	cc_ext->min_delta_cnp = le16_to_cpu(sb->min_time_between_cnps);
884 	cc_ext->init_cp = le16_to_cpu(sb->init_cp);
885 	cc_ext->tr_update_mode = sb->tr_update_mode;
886 	cc_ext->tr_update_cyls = sb->tr_update_cycles;
887 	cc_ext->fr_rtt = sb->fr_num_rtts;
888 	cc_ext->ai_rate_incr = sb->ai_rate_increase;
889 	cc_ext->rr_rtt_th = le16_to_cpu(sb->reduction_relax_rtts_th);
890 	cc_ext->ar_cr_th = le16_to_cpu(sb->additional_relax_cr_th);
891 	cc_ext->cr_min_th = le16_to_cpu(sb->cr_min_th);
892 	cc_ext->bw_avg_weight = sb->bw_avg_weight;
893 	cc_ext->cr_factor = sb->actual_cr_factor;
894 	cc_ext->cr_th_max_cp = le16_to_cpu(sb->max_cp_cr_th);
895 	cc_ext->cp_bias_en = sb->cp_bias_en;
896 	cc_ext->cp_bias = sb->cp_bias;
897 	cc_ext->cnp_ecn = sb->cnp_ecn;
898 	cc_ext->rtt_jitter_en = sb->rtt_jitter_en;
899 	cc_ext->bytes_per_usec = le16_to_cpu(sb->link_bytes_per_usec);
900 	cc_ext->cc_cr_reset_th = le16_to_cpu(sb->reset_cc_cr_th);
901 	cc_ext->cr_width = sb->cr_width;
902 	cc_ext->min_quota = sb->quota_period_min;
903 	cc_ext->max_quota = sb->quota_period_max;
904 	cc_ext->abs_max_quota = sb->quota_period_abs_max;
905 	cc_ext->tr_lb = le16_to_cpu(sb->tr_lower_bound);
906 	cc_ext->cr_prob_fac = sb->cr_prob_factor;
907 	cc_ext->tr_prob_fac = sb->tr_prob_factor;
908 	cc_ext->fair_cr_th = le16_to_cpu(sb->fairness_cr_th);
909 	cc_ext->red_div = sb->red_div;
910 	cc_ext->cnp_ratio_th = sb->cnp_ratio_th;
911 	cc_ext->ai_ext_rtt = le16_to_cpu(sb->exp_ai_rtts);
912 	cc_ext->exp_crcp_ratio = sb->exp_ai_cr_cp_ratio;
913 	cc_ext->low_rate_en = sb->use_rate_table;
914 	cc_ext->cpcr_update_th = le16_to_cpu(sb->cp_exp_update_th);
915 	cc_ext->ai_rtt_th1 = le16_to_cpu(sb->high_exp_ai_rtts_th1);
916 	cc_ext->ai_rtt_th2 = le16_to_cpu(sb->high_exp_ai_rtts_th2);
917 	cc_ext->cf_rtt_th = le16_to_cpu(sb->actual_cr_cong_free_rtts_th);
918 	cc_ext->sc_cr_th1 = le16_to_cpu(sb->severe_cong_cr_th1);
919 	cc_ext->sc_cr_th2 = le16_to_cpu(sb->severe_cong_cr_th2);
920 	cc_ext->l64B_per_rtt = le32_to_cpu(sb->link64B_per_rtt);
921 	cc_ext->cc_ack_bytes = sb->cc_ack_bytes;
922 	cc_ext->reduce_cf_rtt_th = le16_to_cpu(sb->reduce_init_cong_free_rtts_th);
923 }
924 
bnxt_qplib_query_cc_param(struct bnxt_qplib_res * res,struct bnxt_qplib_cc_param * cc_param)925 int bnxt_qplib_query_cc_param(struct bnxt_qplib_res *res,
926 			      struct bnxt_qplib_cc_param *cc_param)
927 {
928 	struct creq_query_roce_cc_gen1_resp_sb_tlv *gen1_sb;
929 	struct bnxt_qplib_tlv_query_rcc_sb *ext_sb;
930 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
931 	struct creq_query_roce_cc_resp resp = {};
932 	struct creq_query_roce_cc_resp_sb *sb;
933 	struct bnxt_qplib_cmdqmsg msg = {};
934 	struct cmdq_query_roce_cc req = {};
935 	struct bnxt_qplib_rcfw_sbuf sbuf;
936 	size_t resp_size;
937 	int rc;
938 
939 	/* Query the parameters from chip */
940 	bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_QUERY_ROCE_CC,
941 				 sizeof(req));
942 	if (_is_chip_gen_p5_p7(res->cctx))
943 		resp_size = sizeof(*ext_sb);
944 	else
945 		resp_size = sizeof(*sb);
946 	sbuf.size = ALIGN(resp_size, BNXT_QPLIB_CMDQE_UNITS);
947 	sbuf.sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf.size,
948 				       &sbuf.dma_addr, GFP_KERNEL);
949 	if (!sbuf.sb)
950 		return -ENOMEM;
951 
952 	req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
953 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
954 				sizeof(resp), 0);
955 	rc = bnxt_qplib_rcfw_send_message(res->rcfw, &msg);
956 	if (rc) {
957 		dev_dbg(&res->pdev->dev, "%s:Query CC param failed:0x%x\n",
958 			__func__, rc);
959 		goto out;
960 	}
961 
962 	ext_sb = sbuf.sb;
963 	gen1_sb = &ext_sb->gen1_sb;
964 	sb = _is_chip_gen_p5_p7(res->cctx) ? &ext_sb->base_sb :
965 		(struct creq_query_roce_cc_resp_sb *)ext_sb;
966 
967 	cc_param->enable = sb->enable_cc & CREQ_QUERY_ROCE_CC_RESP_SB_ENABLE_CC;
968 	cc_param->tos_ecn = (sb->tos_dscp_tos_ecn &
969 			     CREQ_QUERY_ROCE_CC_RESP_SB_TOS_ECN_MASK) >>
970 			     CREQ_QUERY_ROCE_CC_RESP_SB_TOS_ECN_SFT;
971 	cc_param->tos_dscp = (sb->tos_dscp_tos_ecn &
972 			      CREQ_QUERY_ROCE_CC_RESP_SB_TOS_DSCP_MASK) >>
973 			      CREQ_QUERY_ROCE_CC_RESP_SB_TOS_DSCP_SFT;
974 	cc_param->alt_tos_dscp = sb->alt_tos_dscp;
975 	cc_param->alt_vlan_pcp = sb->alt_vlan_pcp;
976 
977 	cc_param->g = sb->g;
978 	cc_param->nph_per_state = sb->num_phases_per_state;
979 	cc_param->init_cr = le16_to_cpu(sb->init_cr);
980 	cc_param->init_tr = le16_to_cpu(sb->init_tr);
981 	cc_param->cc_mode = sb->cc_mode;
982 	cc_param->inact_th = le16_to_cpu(sb->inactivity_th);
983 	cc_param->rtt = le16_to_cpu(sb->rtt);
984 	cc_param->tcp_cp = le16_to_cpu(sb->tcp_cp);
985 	cc_param->time_pph = sb->time_per_phase;
986 	cc_param->pkts_pph = sb->pkts_per_phase;
987 	if (_is_chip_gen_p5_p7(res->cctx))
988 		bnxt_qplib_read_cc_gen1(&cc_param->cc_ext, gen1_sb);
989 out:
990 	dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
991 				  sbuf.sb, sbuf.dma_addr);
992 	return rc;
993 }
994 
995 
bnxt_qplib_get_roce_error_stats(struct bnxt_qplib_rcfw * rcfw,struct bnxt_qplib_roce_stats * stats,struct bnxt_qplib_query_stats_info * sinfo)996 int bnxt_qplib_get_roce_error_stats(struct bnxt_qplib_rcfw *rcfw,
997 				    struct bnxt_qplib_roce_stats *stats,
998 				    struct bnxt_qplib_query_stats_info *sinfo)
999 {
1000 	struct creq_query_roce_stats_resp resp = {};
1001 	struct creq_query_roce_stats_resp_sb *sb;
1002 	struct cmdq_query_roce_stats req = {};
1003 	struct bnxt_qplib_cmdqmsg msg = {};
1004 	struct bnxt_qplib_rcfw_sbuf sbuf;
1005 	u16 cmd_flags = 0;
1006 	u32 fn_id = 0;
1007 	int rc = 0;
1008 
1009 	bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_QUERY_ROCE_STATS,
1010 				 sizeof(req));
1011 
1012 	sbuf.size = sizeof(*sb);
1013 	sbuf.sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf.size,
1014 				       &sbuf.dma_addr, GFP_KERNEL);
1015 	if (!sbuf.sb)
1016 		return -ENOMEM;
1017 	sb = sbuf.sb;
1018 
1019 	if (rcfw->res->cctx->hwrm_intf_ver >= HWRM_VERSION_ROCE_STATS_FN_ID) {
1020 		if (sinfo->function_id != 0xFFFFFFFF) {
1021 			cmd_flags = CMDQ_QUERY_ROCE_STATS_FLAGS_FUNCTION_ID;
1022 			if (sinfo->vf_valid) {
1023 				fn_id = CMDQ_QUERY_ROCE_STATS_VF_VALID;
1024 				fn_id |= (sinfo->function_id <<
1025 					  CMDQ_QUERY_ROCE_STATS_VF_NUM_SFT) &
1026 					  CMDQ_QUERY_ROCE_STATS_VF_NUM_MASK;
1027 			} else {
1028 				fn_id = sinfo->function_id &
1029 					CMDQ_QUERY_ROCE_STATS_PF_NUM_MASK;
1030 			}
1031 		}
1032 
1033 		req.flags = cpu_to_le16(cmd_flags);
1034 		req.function_id = cpu_to_le32(fn_id);
1035 
1036 		if (sinfo->collection_id != 0xFF) {
1037 			cmd_flags |= CMDQ_QUERY_ROCE_STATS_FLAGS_COLLECTION_ID;
1038 			req.collection_id = sinfo->collection_id;
1039 		}
1040 	} else {
1041 		/* For older HWRM version, the command length has to be
1042 		 * adjusted. 8 bytes are more in the newer command.
1043 		 * So subtract these 8 bytes for older HWRM version.
1044 		 * command units are adjusted inside
1045 		 * bnxt_qplib_rcfw_send_message.
1046 		 */
1047 		req.cmd_size -= 8;
1048 	}
1049 
1050 	req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
1051 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
1052 				sizeof(resp), 0);
1053 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1054 	if (rc)
1055 		goto bail;
1056 	/* Extract the context from the side buffer */
1057 	stats->to_retransmits = le64_to_cpu(sb->to_retransmits);
1058 	stats->seq_err_naks_rcvd = le64_to_cpu(sb->seq_err_naks_rcvd);
1059 	stats->max_retry_exceeded = le64_to_cpu(sb->max_retry_exceeded);
1060 	stats->rnr_naks_rcvd = le64_to_cpu(sb->rnr_naks_rcvd);
1061 	stats->missing_resp = le64_to_cpu(sb->missing_resp);
1062 	stats->unrecoverable_err = le64_to_cpu(sb->unrecoverable_err);
1063 	stats->bad_resp_err = le64_to_cpu(sb->bad_resp_err);
1064 	stats->local_qp_op_err = le64_to_cpu(sb->local_qp_op_err);
1065 	stats->local_protection_err = le64_to_cpu(sb->local_protection_err);
1066 	stats->mem_mgmt_op_err = le64_to_cpu(sb->mem_mgmt_op_err);
1067 	stats->remote_invalid_req_err = le64_to_cpu(sb->remote_invalid_req_err);
1068 	stats->remote_access_err = le64_to_cpu(sb->remote_access_err);
1069 	stats->remote_op_err = le64_to_cpu(sb->remote_op_err);
1070 	stats->dup_req = le64_to_cpu(sb->dup_req);
1071 	stats->res_exceed_max = le64_to_cpu(sb->res_exceed_max);
1072 	stats->res_length_mismatch = le64_to_cpu(sb->res_length_mismatch);
1073 	stats->res_exceeds_wqe = le64_to_cpu(sb->res_exceeds_wqe);
1074 	stats->res_opcode_err = le64_to_cpu(sb->res_opcode_err);
1075 	stats->res_rx_invalid_rkey = le64_to_cpu(sb->res_rx_invalid_rkey);
1076 	stats->res_rx_domain_err = le64_to_cpu(sb->res_rx_domain_err);
1077 	stats->res_rx_no_perm = le64_to_cpu(sb->res_rx_no_perm);
1078 	stats->res_rx_range_err = le64_to_cpu(sb->res_rx_range_err);
1079 	stats->res_tx_invalid_rkey = le64_to_cpu(sb->res_tx_invalid_rkey);
1080 	stats->res_tx_domain_err = le64_to_cpu(sb->res_tx_domain_err);
1081 	stats->res_tx_no_perm = le64_to_cpu(sb->res_tx_no_perm);
1082 	stats->res_tx_range_err = le64_to_cpu(sb->res_tx_range_err);
1083 	stats->res_irrq_oflow = le64_to_cpu(sb->res_irrq_oflow);
1084 	stats->res_unsup_opcode = le64_to_cpu(sb->res_unsup_opcode);
1085 	stats->res_unaligned_atomic = le64_to_cpu(sb->res_unaligned_atomic);
1086 	stats->res_rem_inv_err = le64_to_cpu(sb->res_rem_inv_err);
1087 	stats->res_mem_error = le64_to_cpu(sb->res_mem_error);
1088 	stats->res_srq_err = le64_to_cpu(sb->res_srq_err);
1089 	stats->res_cmp_err = le64_to_cpu(sb->res_cmp_err);
1090 	stats->res_invalid_dup_rkey = le64_to_cpu(sb->res_invalid_dup_rkey);
1091 	stats->res_wqe_format_err = le64_to_cpu(sb->res_wqe_format_err);
1092 	stats->res_cq_load_err = le64_to_cpu(sb->res_cq_load_err);
1093 	stats->res_srq_load_err = le64_to_cpu(sb->res_srq_load_err);
1094 	stats->res_tx_pci_err = le64_to_cpu(sb->res_tx_pci_err);
1095 	stats->res_rx_pci_err = le64_to_cpu(sb->res_rx_pci_err);
1096 
1097 	if (!rcfw->init_oos_stats) {
1098 		rcfw->oos_prev = le64_to_cpu(sb->res_oos_drop_count);
1099 		rcfw->init_oos_stats = true;
1100 	} else {
1101 		stats->res_oos_drop_count += (le64_to_cpu(sb->res_oos_drop_count) -
1102 					      rcfw->oos_prev) &
1103 					     BNXT_QPLIB_OOS_COUNT_MASK;
1104 		rcfw->oos_prev = le64_to_cpu(sb->res_oos_drop_count);
1105 	}
1106 
1107 	stats->active_qp_count_p0 = le64_to_cpu(sb->active_qp_count_p0);
1108 	stats->active_qp_count_p1 = le64_to_cpu(sb->active_qp_count_p1);
1109 	stats->active_qp_count_p2 = le64_to_cpu(sb->active_qp_count_p2);
1110 	stats->active_qp_count_p3 = le64_to_cpu(sb->active_qp_count_p3);
1111 bail:
1112 	dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
1113 				  sbuf.sb, sbuf.dma_addr);
1114 	return rc;
1115 }
1116 
bnxt_qplib_set_link_aggr_mode(struct bnxt_qplib_res * res,u8 aggr_mode,u8 member_port_map,u8 active_port_map,bool aggr_en,u32 stats_fw_id)1117 int bnxt_qplib_set_link_aggr_mode(struct bnxt_qplib_res *res,
1118 				  u8 aggr_mode, u8 member_port_map,
1119 				  u8 active_port_map, bool aggr_en,
1120 				  u32 stats_fw_id)
1121 {
1122 	struct creq_set_link_aggr_mode_resources_resp resp = {};
1123 	struct cmdq_set_link_aggr_mode_cc req = {};
1124 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1125 	struct bnxt_qplib_cmdqmsg msg = {};
1126 	int rc = 0;
1127 
1128 	bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_SET_LINK_AGGR_MODE,
1129 				 sizeof(req));
1130 
1131 	req.aggr_enable = aggr_en;
1132 	req.active_port_map = active_port_map;
1133 	req.member_port_map = member_port_map;
1134 	req.link_aggr_mode = aggr_mode;
1135 
1136 	/* need to specify only second port stats ctx id for now */
1137 	req.stat_ctx_id[1] = cpu_to_le16((u16)(stats_fw_id));
1138 
1139 	req.modify_mask =
1140 		cpu_to_le32(CMDQ_SET_LINK_AGGR_MODE_MODIFY_MASK_AGGR_EN |
1141 			    CMDQ_SET_LINK_AGGR_MODE_MODIFY_MASK_ACTIVE_PORT_MAP |
1142 			    CMDQ_SET_LINK_AGGR_MODE_MODIFY_MASK_MEMBER_PORT_MAP |
1143 			    CMDQ_SET_LINK_AGGR_MODE_MODIFY_MASK_AGGR_MODE |
1144 			    CMDQ_SET_LINK_AGGR_MODE_MODIFY_MASK_STAT_CTX_ID);
1145 
1146 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
1147 				sizeof(resp), 0);
1148 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1149 	if (rc)
1150 		dev_err(&res->pdev->dev,
1151 			"QPLIB: Failed to set link aggr mode, %#x\n", rc);
1152 
1153 	return rc;
1154 }
1155 
bnxt_qplib_qext_stat(struct bnxt_qplib_rcfw * rcfw,u32 fid,struct bnxt_qplib_ext_stat * estat,struct bnxt_qplib_query_stats_info * sinfo)1156 int bnxt_qplib_qext_stat(struct bnxt_qplib_rcfw *rcfw, u32 fid,
1157 			 struct bnxt_qplib_ext_stat *estat,
1158 			 struct bnxt_qplib_query_stats_info *sinfo)
1159 {
1160 	struct creq_query_roce_stats_ext_resp resp = {};
1161 	struct creq_query_roce_stats_ext_resp_sb *sb;
1162 	struct cmdq_query_roce_stats_ext req = {};
1163 	struct bnxt_qplib_cmdqmsg msg = {};
1164 	struct bnxt_qplib_rcfw_sbuf sbuf;
1165 	int rc;
1166 
1167 	sbuf.size = sizeof(*sb);
1168 	sbuf.sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf.size,
1169 				       &sbuf.dma_addr, GFP_KERNEL);
1170 	if (!sbuf.sb) {
1171 		dev_err(&rcfw->pdev->dev,
1172 			"QPLIB: SP: QUERY_ROCE_STATS_EXT alloc sb failed\n");
1173 		return -ENOMEM;
1174 	}
1175 	sb = sbuf.sb;
1176 
1177 	bnxt_qplib_rcfw_cmd_prep(&req,
1178 			CMDQ_QUERY_ROCE_STATS_EXT_OPCODE_QUERY_ROCE_STATS,
1179 			sizeof(req));
1180 	req.resp_size = sbuf.size;
1181 	req.resp_addr = cpu_to_le64(sbuf.dma_addr);
1182 	req.flags = cpu_to_le16(CMDQ_QUERY_ROCE_STATS_EXT_FLAGS_FUNCTION_ID);
1183 	if (_is_chip_p7(rcfw->res->cctx) && rcfw->res->is_vf) {
1184 		if (sinfo->vf_valid)
1185 			req.function_id =
1186 				cpu_to_le32(CMDQ_QUERY_ROCE_STATS_EXT_VF_VALID |
1187 					    (fid << CMDQ_QUERY_ROCE_STATS_EXT_VF_NUM_SFT));
1188 		else
1189 			req.flags = cpu_to_le16(0);
1190 	} else {
1191 		req.function_id = cpu_to_le32(fid);
1192 	}
1193 
1194 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
1195 				sizeof(resp), 0);
1196 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1197 	if (rc)
1198 		goto bail;
1199 
1200 	/* dump when dyndbg is enabled */
1201 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, sb, sizeof(*sb));
1202 	estat->tx_atomic_req = le64_to_cpu(sb->tx_atomic_req_pkts);
1203 	estat->tx_read_req = le64_to_cpu(sb->tx_read_req_pkts);
1204 	estat->tx_read_res = le64_to_cpu(sb->tx_read_res_pkts);
1205 	estat->tx_write_req = le64_to_cpu(sb->tx_write_req_pkts);
1206 	estat->tx_send_req = le64_to_cpu(sb->tx_send_req_pkts);
1207 	estat->tx_roce_pkts = le64_to_cpu(sb->tx_roce_pkts);
1208 	estat->tx_roce_bytes = le64_to_cpu(sb->tx_roce_bytes);
1209 	estat->rx_atomic_req = le64_to_cpu(sb->rx_atomic_req_pkts);
1210 	estat->rx_read_req = le64_to_cpu(sb->rx_read_req_pkts);
1211 	estat->rx_read_res = le64_to_cpu(sb->rx_read_res_pkts);
1212 	estat->rx_write_req = le64_to_cpu(sb->rx_write_req_pkts);
1213 	estat->rx_send_req = le64_to_cpu(sb->rx_send_req_pkts);
1214 	estat->rx_roce_pkts = le64_to_cpu(sb->rx_roce_pkts);
1215 	estat->rx_roce_bytes = le64_to_cpu(sb->rx_roce_bytes);
1216 	estat->rx_roce_good_pkts = le64_to_cpu(sb->rx_roce_good_pkts);
1217 	estat->rx_roce_good_bytes = le64_to_cpu(sb->rx_roce_good_bytes);
1218 	estat->rx_out_of_buffer = le64_to_cpu(sb->rx_out_of_buffer_pkts);
1219 	estat->rx_out_of_sequence = le64_to_cpu(sb->rx_out_of_sequence_pkts);
1220 	estat->tx_cnp = le64_to_cpu(sb->tx_cnp_pkts);
1221 	estat->rx_cnp = le64_to_cpu(sb->rx_cnp_pkts);
1222 	estat->rx_ecn_marked = le64_to_cpu(sb->rx_ecn_marked_pkts);
1223 	estat->seq_err_naks_rcvd = le64_to_cpu(sb->seq_err_naks_rcvd);
1224 	estat->rnr_naks_rcvd = le64_to_cpu(sb->rnr_naks_rcvd);
1225 	estat->missing_resp = le64_to_cpu(sb->missing_resp);
1226 	estat->to_retransmits = le64_to_cpu(sb->to_retransmit);
1227 	estat->dup_req = le64_to_cpu(sb->dup_req);
1228 	estat->rx_dcn_payload_cut = le64_to_cpu(sb->rx_dcn_payload_cut);
1229 	estat->te_bypassed = le64_to_cpu(sb->te_bypassed);
1230 bail:
1231 	dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
1232 				  sbuf.sb, sbuf.dma_addr);
1233 	return rc;
1234 }
1235