xref: /linux/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c (revision 55a42f78ffd386e01a5404419f8c5ded7db70a21)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2025 Broadcom.
3 
4 #include <linux/errno.h>
5 #include <linux/kernel.h>
6 #include <linux/mm.h>
7 #include <linux/pci.h>
8 #include <linux/bnxt/hsi.h>
9 #include <linux/if_vlan.h>
10 #include <net/netdev_queues.h>
11 
12 #include "bnge.h"
13 #include "bnge_hwrm.h"
14 #include "bnge_hwrm_lib.h"
15 #include "bnge_rmem.h"
16 #include "bnge_resc.h"
17 
18 int bnge_hwrm_ver_get(struct bnge_dev *bd)
19 {
20 	u32 dev_caps_cfg, hwrm_ver, hwrm_spec_code;
21 	u16 fw_maj, fw_min, fw_bld, fw_rsv;
22 	struct hwrm_ver_get_output *resp;
23 	struct hwrm_ver_get_input *req;
24 	int rc;
25 
26 	rc = bnge_hwrm_req_init(bd, req, HWRM_VER_GET);
27 	if (rc)
28 		return rc;
29 
30 	bnge_hwrm_req_flags(bd, req, BNGE_HWRM_FULL_WAIT);
31 	bd->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
32 	req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
33 	req->hwrm_intf_min = HWRM_VERSION_MINOR;
34 	req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
35 
36 	resp = bnge_hwrm_req_hold(bd, req);
37 	rc = bnge_hwrm_req_send(bd, req);
38 	if (rc)
39 		goto hwrm_ver_get_exit;
40 
41 	memcpy(&bd->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
42 
43 	hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
44 			 resp->hwrm_intf_min_8b << 8 |
45 			 resp->hwrm_intf_upd_8b;
46 	hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
47 			HWRM_VERSION_UPDATE;
48 
49 	if (hwrm_spec_code > hwrm_ver)
50 		snprintf(bd->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
51 			 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
52 			 HWRM_VERSION_UPDATE);
53 	else
54 		snprintf(bd->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
55 			 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
56 			 resp->hwrm_intf_upd_8b);
57 
58 	fw_maj = le16_to_cpu(resp->hwrm_fw_major);
59 	fw_min = le16_to_cpu(resp->hwrm_fw_minor);
60 	fw_bld = le16_to_cpu(resp->hwrm_fw_build);
61 	fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
62 
63 	bd->fw_ver_code = BNGE_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
64 	snprintf(bd->fw_ver_str, FW_VER_STR_LEN, "%d.%d.%d.%d",
65 		 fw_maj, fw_min, fw_bld, fw_rsv);
66 
67 	if (strlen(resp->active_pkg_name)) {
68 		int fw_ver_len = strlen(bd->fw_ver_str);
69 
70 		snprintf(bd->fw_ver_str + fw_ver_len,
71 			 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
72 			 resp->active_pkg_name);
73 		bd->fw_cap |= BNGE_FW_CAP_PKG_VER;
74 	}
75 
76 	bd->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
77 	if (!bd->hwrm_cmd_timeout)
78 		bd->hwrm_cmd_timeout = BNGE_DFLT_HWRM_CMD_TIMEOUT;
79 	bd->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000;
80 	if (!bd->hwrm_cmd_max_timeout)
81 		bd->hwrm_cmd_max_timeout = BNGE_HWRM_CMD_MAX_TIMEOUT;
82 	else if (bd->hwrm_cmd_max_timeout > BNGE_HWRM_CMD_MAX_TIMEOUT)
83 		dev_warn(bd->dev, "Default HWRM commands max timeout increased to %d seconds\n",
84 			 bd->hwrm_cmd_max_timeout / 1000);
85 
86 	bd->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
87 	bd->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
88 
89 	if (bd->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
90 		bd->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
91 
92 	bd->chip_num = le16_to_cpu(resp->chip_num);
93 	bd->chip_rev = resp->chip_rev;
94 
95 	dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
96 	if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
97 	    (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
98 		bd->fw_cap |= BNGE_FW_CAP_SHORT_CMD;
99 
100 	if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
101 		bd->fw_cap |= BNGE_FW_CAP_KONG_MB_CHNL;
102 
103 	if (dev_caps_cfg &
104 	    VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
105 		bd->fw_cap |= BNGE_FW_CAP_CFA_ADV_FLOW;
106 
107 hwrm_ver_get_exit:
108 	bnge_hwrm_req_drop(bd, req);
109 	return rc;
110 }
111 
112 int
113 bnge_hwrm_nvm_dev_info(struct bnge_dev *bd,
114 		       struct hwrm_nvm_get_dev_info_output *nvm_info)
115 {
116 	struct hwrm_nvm_get_dev_info_output *resp;
117 	struct hwrm_nvm_get_dev_info_input *req;
118 	int rc;
119 
120 	rc = bnge_hwrm_req_init(bd, req, HWRM_NVM_GET_DEV_INFO);
121 	if (rc)
122 		return rc;
123 
124 	resp = bnge_hwrm_req_hold(bd, req);
125 	rc = bnge_hwrm_req_send(bd, req);
126 	if (!rc)
127 		memcpy(nvm_info, resp, sizeof(*resp));
128 	bnge_hwrm_req_drop(bd, req);
129 	return rc;
130 }
131 
132 int bnge_hwrm_func_reset(struct bnge_dev *bd)
133 {
134 	struct hwrm_func_reset_input *req;
135 	int rc;
136 
137 	rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_RESET);
138 	if (rc)
139 		return rc;
140 
141 	req->enables = 0;
142 	bnge_hwrm_req_timeout(bd, req, BNGE_HWRM_RESET_TIMEOUT);
143 	return bnge_hwrm_req_send(bd, req);
144 }
145 
146 int bnge_hwrm_fw_set_time(struct bnge_dev *bd)
147 {
148 	struct hwrm_fw_set_time_input *req;
149 	struct tm tm;
150 	int rc;
151 
152 	time64_to_tm(ktime_get_real_seconds(), 0, &tm);
153 
154 	rc = bnge_hwrm_req_init(bd, req, HWRM_FW_SET_TIME);
155 	if (rc)
156 		return rc;
157 
158 	req->year = cpu_to_le16(1900 + tm.tm_year);
159 	req->month = 1 + tm.tm_mon;
160 	req->day = tm.tm_mday;
161 	req->hour = tm.tm_hour;
162 	req->minute = tm.tm_min;
163 	req->second = tm.tm_sec;
164 	return bnge_hwrm_req_send(bd, req);
165 }
166 
167 int bnge_hwrm_func_drv_rgtr(struct bnge_dev *bd)
168 {
169 	struct hwrm_func_drv_rgtr_output *resp;
170 	struct hwrm_func_drv_rgtr_input *req;
171 	u32 flags;
172 	int rc;
173 
174 	rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_DRV_RGTR);
175 	if (rc)
176 		return rc;
177 
178 	req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
179 				   FUNC_DRV_RGTR_REQ_ENABLES_VER |
180 				   FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
181 
182 	req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
183 	flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
184 
185 	req->flags = cpu_to_le32(flags);
186 	req->ver_maj_8b = DRV_VER_MAJ;
187 	req->ver_min_8b = DRV_VER_MIN;
188 	req->ver_upd_8b = DRV_VER_UPD;
189 	req->ver_maj = cpu_to_le16(DRV_VER_MAJ);
190 	req->ver_min = cpu_to_le16(DRV_VER_MIN);
191 	req->ver_upd = cpu_to_le16(DRV_VER_UPD);
192 
193 	resp = bnge_hwrm_req_hold(bd, req);
194 	rc = bnge_hwrm_req_send(bd, req);
195 	if (!rc) {
196 		set_bit(BNGE_STATE_DRV_REGISTERED, &bd->state);
197 		if (resp->flags &
198 		    cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
199 			bd->fw_cap |= BNGE_FW_CAP_IF_CHANGE;
200 	}
201 	bnge_hwrm_req_drop(bd, req);
202 	return rc;
203 }
204 
205 int bnge_hwrm_func_drv_unrgtr(struct bnge_dev *bd)
206 {
207 	struct hwrm_func_drv_unrgtr_input *req;
208 	int rc;
209 
210 	if (!test_and_clear_bit(BNGE_STATE_DRV_REGISTERED, &bd->state))
211 		return 0;
212 
213 	rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_DRV_UNRGTR);
214 	if (rc)
215 		return rc;
216 	return bnge_hwrm_req_send(bd, req);
217 }
218 
219 static void bnge_init_ctx_initializer(struct bnge_ctx_mem_type *ctxm,
220 				      u8 init_val, u8 init_offset,
221 				      bool init_mask_set)
222 {
223 	ctxm->init_value = init_val;
224 	ctxm->init_offset = BNGE_CTX_INIT_INVALID_OFFSET;
225 	if (init_mask_set)
226 		ctxm->init_offset = init_offset * 4;
227 	else
228 		ctxm->init_value = 0;
229 }
230 
231 static int bnge_alloc_all_ctx_pg_info(struct bnge_dev *bd, int ctx_max)
232 {
233 	struct bnge_ctx_mem_info *ctx = bd->ctx;
234 	u16 type;
235 
236 	for (type = 0; type < ctx_max; type++) {
237 		struct bnge_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
238 		int n = 1;
239 
240 		if (!ctxm->max_entries)
241 			continue;
242 
243 		if (ctxm->instance_bmap)
244 			n = hweight32(ctxm->instance_bmap);
245 		ctxm->pg_info = kcalloc(n, sizeof(*ctxm->pg_info), GFP_KERNEL);
246 		if (!ctxm->pg_info)
247 			return -ENOMEM;
248 	}
249 
250 	return 0;
251 }
252 
253 #define BNGE_CTX_INIT_VALID(flags)	\
254 	(!!((flags) &			\
255 	    FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT))
256 
257 int bnge_hwrm_func_backing_store_qcaps(struct bnge_dev *bd)
258 {
259 	struct hwrm_func_backing_store_qcaps_v2_output *resp;
260 	struct hwrm_func_backing_store_qcaps_v2_input *req;
261 	struct bnge_ctx_mem_info *ctx;
262 	u16 type;
263 	int rc;
264 
265 	if (bd->ctx)
266 		return 0;
267 
268 	rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_BACKING_STORE_QCAPS_V2);
269 	if (rc)
270 		return rc;
271 
272 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
273 	if (!ctx)
274 		return -ENOMEM;
275 	bd->ctx = ctx;
276 
277 	resp = bnge_hwrm_req_hold(bd, req);
278 
279 	for (type = 0; type < BNGE_CTX_V2_MAX; ) {
280 		struct bnge_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
281 		u8 init_val, init_off, i;
282 		__le32 *p;
283 		u32 flags;
284 
285 		req->type = cpu_to_le16(type);
286 		rc = bnge_hwrm_req_send(bd, req);
287 		if (rc)
288 			goto ctx_done;
289 		flags = le32_to_cpu(resp->flags);
290 		type = le16_to_cpu(resp->next_valid_type);
291 		if (!(flags &
292 		      FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID))
293 			continue;
294 
295 		ctxm->type = le16_to_cpu(resp->type);
296 		ctxm->entry_size = le16_to_cpu(resp->entry_size);
297 		ctxm->flags = flags;
298 		ctxm->instance_bmap = le32_to_cpu(resp->instance_bit_map);
299 		ctxm->entry_multiple = resp->entry_multiple;
300 		ctxm->max_entries = le32_to_cpu(resp->max_num_entries);
301 		ctxm->min_entries = le32_to_cpu(resp->min_num_entries);
302 		init_val = resp->ctx_init_value;
303 		init_off = resp->ctx_init_offset;
304 		bnge_init_ctx_initializer(ctxm, init_val, init_off,
305 					  BNGE_CTX_INIT_VALID(flags));
306 		ctxm->split_entry_cnt = min_t(u8, resp->subtype_valid_cnt,
307 					      BNGE_MAX_SPLIT_ENTRY);
308 		for (i = 0, p = &resp->split_entry_0; i < ctxm->split_entry_cnt;
309 		     i++, p++)
310 			ctxm->split[i] = le32_to_cpu(*p);
311 	}
312 	rc = bnge_alloc_all_ctx_pg_info(bd, BNGE_CTX_V2_MAX);
313 
314 ctx_done:
315 	bnge_hwrm_req_drop(bd, req);
316 	return rc;
317 }
318 
319 static void bnge_hwrm_set_pg_attr(struct bnge_ring_mem_info *rmem, u8 *pg_attr,
320 				  __le64 *pg_dir)
321 {
322 	if (!rmem->nr_pages)
323 		return;
324 
325 	BNGE_SET_CTX_PAGE_ATTR(*pg_attr);
326 	if (rmem->depth >= 1) {
327 		if (rmem->depth == 2)
328 			*pg_attr |= 2;
329 		else
330 			*pg_attr |= 1;
331 		*pg_dir = cpu_to_le64(rmem->dma_pg_tbl);
332 	} else {
333 		*pg_dir = cpu_to_le64(rmem->dma_arr[0]);
334 	}
335 }
336 
337 int bnge_hwrm_func_backing_store(struct bnge_dev *bd,
338 				 struct bnge_ctx_mem_type *ctxm,
339 				 bool last)
340 {
341 	struct hwrm_func_backing_store_cfg_v2_input *req;
342 	u32 instance_bmap = ctxm->instance_bmap;
343 	int i, j, rc = 0, n = 1;
344 	__le32 *p;
345 
346 	if (!(ctxm->flags & BNGE_CTX_MEM_TYPE_VALID) || !ctxm->pg_info)
347 		return 0;
348 
349 	if (instance_bmap)
350 		n = hweight32(ctxm->instance_bmap);
351 	else
352 		instance_bmap = 1;
353 
354 	rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_BACKING_STORE_CFG_V2);
355 	if (rc)
356 		return rc;
357 	bnge_hwrm_req_hold(bd, req);
358 	req->type = cpu_to_le16(ctxm->type);
359 	req->entry_size = cpu_to_le16(ctxm->entry_size);
360 	req->subtype_valid_cnt = ctxm->split_entry_cnt;
361 	for (i = 0, p = &req->split_entry_0; i < ctxm->split_entry_cnt; i++)
362 		p[i] = cpu_to_le32(ctxm->split[i]);
363 	for (i = 0, j = 0; j < n && !rc; i++) {
364 		struct bnge_ctx_pg_info *ctx_pg;
365 
366 		if (!(instance_bmap & (1 << i)))
367 			continue;
368 		req->instance = cpu_to_le16(i);
369 		ctx_pg = &ctxm->pg_info[j++];
370 		if (!ctx_pg->entries)
371 			continue;
372 		req->num_entries = cpu_to_le32(ctx_pg->entries);
373 		bnge_hwrm_set_pg_attr(&ctx_pg->ring_mem,
374 				      &req->page_size_pbl_level,
375 				      &req->page_dir);
376 		if (last && j == n)
377 			req->flags =
378 				cpu_to_le32(BNGE_BS_CFG_ALL_DONE);
379 		rc = bnge_hwrm_req_send(bd, req);
380 	}
381 	bnge_hwrm_req_drop(bd, req);
382 
383 	return rc;
384 }
385 
386 static int bnge_hwrm_get_rings(struct bnge_dev *bd)
387 {
388 	struct bnge_hw_resc *hw_resc = &bd->hw_resc;
389 	struct hwrm_func_qcfg_output *resp;
390 	struct hwrm_func_qcfg_input *req;
391 	u16 cp, stats;
392 	u16 rx, tx;
393 	int rc;
394 
395 	rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_QCFG);
396 	if (rc)
397 		return rc;
398 
399 	req->fid = cpu_to_le16(0xffff);
400 	resp = bnge_hwrm_req_hold(bd, req);
401 	rc = bnge_hwrm_req_send(bd, req);
402 	if (rc) {
403 		bnge_hwrm_req_drop(bd, req);
404 		return rc;
405 	}
406 
407 	hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
408 	hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
409 	hw_resc->resv_hw_ring_grps =
410 		le32_to_cpu(resp->alloc_hw_ring_grps);
411 	hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
412 	hw_resc->resv_rsscos_ctxs = le16_to_cpu(resp->alloc_rsscos_ctx);
413 	cp = le16_to_cpu(resp->alloc_cmpl_rings);
414 	stats = le16_to_cpu(resp->alloc_stat_ctx);
415 	hw_resc->resv_irqs = cp;
416 	rx = hw_resc->resv_rx_rings;
417 	tx = hw_resc->resv_tx_rings;
418 	if (bnge_is_agg_reqd(bd))
419 		rx >>= 1;
420 	if (cp < (rx + tx)) {
421 		rc = bnge_fix_rings_count(&rx, &tx, cp, false);
422 		if (rc)
423 			goto get_rings_exit;
424 		if (bnge_is_agg_reqd(bd))
425 			rx <<= 1;
426 		hw_resc->resv_rx_rings = rx;
427 		hw_resc->resv_tx_rings = tx;
428 	}
429 	hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
430 	hw_resc->resv_hw_ring_grps = rx;
431 	hw_resc->resv_cp_rings = cp;
432 	hw_resc->resv_stat_ctxs = stats;
433 
434 get_rings_exit:
435 	bnge_hwrm_req_drop(bd, req);
436 	return rc;
437 }
438 
439 static struct hwrm_func_cfg_input *
440 __bnge_hwrm_reserve_pf_rings(struct bnge_dev *bd, struct bnge_hw_rings *hwr)
441 {
442 	struct hwrm_func_cfg_input *req;
443 	u32 enables = 0;
444 
445 	if (bnge_hwrm_req_init(bd, req, HWRM_FUNC_QCFG))
446 		return NULL;
447 
448 	req->fid = cpu_to_le16(0xffff);
449 	enables |= hwr->tx ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
450 	req->num_tx_rings = cpu_to_le16(hwr->tx);
451 
452 	enables |= hwr->rx ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
453 	enables |= hwr->stat ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
454 	enables |= hwr->nq ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
455 	enables |= hwr->cmpl ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
456 	enables |= hwr->vnic ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
457 	enables |= hwr->rss_ctx ? FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
458 
459 	req->num_rx_rings = cpu_to_le16(hwr->rx);
460 	req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
461 	req->num_cmpl_rings = cpu_to_le16(hwr->cmpl);
462 	req->num_msix = cpu_to_le16(hwr->nq);
463 	req->num_stat_ctxs = cpu_to_le16(hwr->stat);
464 	req->num_vnics = cpu_to_le16(hwr->vnic);
465 	req->enables = cpu_to_le32(enables);
466 
467 	return req;
468 }
469 
470 static int
471 bnge_hwrm_reserve_pf_rings(struct bnge_dev *bd, struct bnge_hw_rings *hwr)
472 {
473 	struct hwrm_func_cfg_input *req;
474 	int rc;
475 
476 	req = __bnge_hwrm_reserve_pf_rings(bd, hwr);
477 	if (!req)
478 		return -ENOMEM;
479 
480 	if (!req->enables) {
481 		bnge_hwrm_req_drop(bd, req);
482 		return 0;
483 	}
484 
485 	rc = bnge_hwrm_req_send(bd, req);
486 	if (rc)
487 		return rc;
488 
489 	return bnge_hwrm_get_rings(bd);
490 }
491 
492 int bnge_hwrm_reserve_rings(struct bnge_dev *bd, struct bnge_hw_rings *hwr)
493 {
494 	return bnge_hwrm_reserve_pf_rings(bd, hwr);
495 }
496 
497 int bnge_hwrm_func_qcfg(struct bnge_dev *bd)
498 {
499 	struct hwrm_func_qcfg_output *resp;
500 	struct hwrm_func_qcfg_input *req;
501 	int rc;
502 
503 	rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_QCFG);
504 	if (rc)
505 		return rc;
506 
507 	req->fid = cpu_to_le16(0xffff);
508 	resp = bnge_hwrm_req_hold(bd, req);
509 	rc = bnge_hwrm_req_send(bd, req);
510 	if (rc)
511 		goto func_qcfg_exit;
512 
513 	bd->max_mtu = le16_to_cpu(resp->max_mtu_configured);
514 	if (!bd->max_mtu)
515 		bd->max_mtu = BNGE_MAX_MTU;
516 
517 	if (bd->db_size)
518 		goto func_qcfg_exit;
519 
520 	bd->db_offset = le16_to_cpu(resp->legacy_l2_db_size_kb) * 1024;
521 	bd->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
522 			1024);
523 	if (!bd->db_size || bd->db_size > pci_resource_len(bd->pdev, 2) ||
524 	    bd->db_size <= bd->db_offset)
525 		bd->db_size = pci_resource_len(bd->pdev, 2);
526 
527 func_qcfg_exit:
528 	bnge_hwrm_req_drop(bd, req);
529 	return rc;
530 }
531 
532 int bnge_hwrm_func_resc_qcaps(struct bnge_dev *bd)
533 {
534 	struct hwrm_func_resource_qcaps_output *resp;
535 	struct bnge_hw_resc *hw_resc = &bd->hw_resc;
536 	struct hwrm_func_resource_qcaps_input *req;
537 	int rc;
538 
539 	rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_RESOURCE_QCAPS);
540 	if (rc)
541 		return rc;
542 
543 	req->fid = cpu_to_le16(0xffff);
544 	resp = bnge_hwrm_req_hold(bd, req);
545 	rc = bnge_hwrm_req_send_silent(bd, req);
546 	if (rc)
547 		goto hwrm_func_resc_qcaps_exit;
548 
549 	hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
550 	hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
551 	hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
552 	hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
553 	hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
554 	hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
555 	hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
556 	hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
557 	hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
558 	hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
559 	hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
560 	hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
561 	hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
562 	hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
563 	hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
564 	hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
565 	hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
566 
567 	hw_resc->max_nqs = le16_to_cpu(resp->max_msix);
568 	hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
569 
570 hwrm_func_resc_qcaps_exit:
571 	bnge_hwrm_req_drop(bd, req);
572 	return rc;
573 }
574 
575 int bnge_hwrm_func_qcaps(struct bnge_dev *bd)
576 {
577 	struct hwrm_func_qcaps_output *resp;
578 	struct hwrm_func_qcaps_input *req;
579 	struct bnge_pf_info *pf = &bd->pf;
580 	u32 flags;
581 	int rc;
582 
583 	rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_QCAPS);
584 	if (rc)
585 		return rc;
586 
587 	req->fid = cpu_to_le16(0xffff);
588 	resp = bnge_hwrm_req_hold(bd, req);
589 	rc = bnge_hwrm_req_send(bd, req);
590 	if (rc)
591 		goto hwrm_func_qcaps_exit;
592 
593 	flags = le32_to_cpu(resp->flags);
594 	if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
595 		bd->flags |= BNGE_EN_ROCE_V1;
596 	if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
597 		bd->flags |= BNGE_EN_ROCE_V2;
598 
599 	pf->fw_fid = le16_to_cpu(resp->fid);
600 	pf->port_id = le16_to_cpu(resp->port_id);
601 	memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
602 
603 	bd->tso_max_segs = le16_to_cpu(resp->max_tso_segs);
604 
605 hwrm_func_qcaps_exit:
606 	bnge_hwrm_req_drop(bd, req);
607 	return rc;
608 }
609 
610 int bnge_hwrm_vnic_qcaps(struct bnge_dev *bd)
611 {
612 	struct hwrm_vnic_qcaps_output *resp;
613 	struct hwrm_vnic_qcaps_input *req;
614 	int rc;
615 
616 	bd->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
617 	bd->rss_cap &= ~BNGE_RSS_CAP_NEW_RSS_CAP;
618 
619 	rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_QCAPS);
620 	if (rc)
621 		return rc;
622 
623 	resp = bnge_hwrm_req_hold(bd, req);
624 	rc = bnge_hwrm_req_send(bd, req);
625 	if (!rc) {
626 		u32 flags = le32_to_cpu(resp->flags);
627 
628 		if (flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP)
629 			bd->fw_cap |= BNGE_FW_CAP_VLAN_RX_STRIP;
630 		if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP)
631 			bd->rss_cap |= BNGE_RSS_CAP_RSS_HASH_TYPE_DELTA;
632 		if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED)
633 			bd->rss_cap |= BNGE_RSS_CAP_RSS_TCAM;
634 		bd->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
635 		if (bd->max_tpa_v2)
636 			bd->hw_ring_stats_size = BNGE_RING_STATS_SIZE;
637 		if (flags & VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP)
638 			bd->fw_cap |= BNGE_FW_CAP_VNIC_TUNNEL_TPA;
639 		if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP)
640 			bd->rss_cap |= BNGE_RSS_CAP_AH_V4_RSS_CAP;
641 		if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP)
642 			bd->rss_cap |= BNGE_RSS_CAP_AH_V6_RSS_CAP;
643 		if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP)
644 			bd->rss_cap |= BNGE_RSS_CAP_ESP_V4_RSS_CAP;
645 		if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP)
646 			bd->rss_cap |= BNGE_RSS_CAP_ESP_V6_RSS_CAP;
647 	}
648 	bnge_hwrm_req_drop(bd, req);
649 
650 	return rc;
651 }
652 
653 #define BNGE_CNPQ(q_profile)	\
654 		((q_profile) ==	\
655 		 QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP)
656 
657 int bnge_hwrm_queue_qportcfg(struct bnge_dev *bd)
658 {
659 	struct hwrm_queue_qportcfg_output *resp;
660 	struct hwrm_queue_qportcfg_input *req;
661 	u8 i, j, *qptr;
662 	bool no_rdma;
663 	int rc;
664 
665 	rc = bnge_hwrm_req_init(bd, req, HWRM_QUEUE_QPORTCFG);
666 	if (rc)
667 		return rc;
668 
669 	resp = bnge_hwrm_req_hold(bd, req);
670 	rc = bnge_hwrm_req_send(bd, req);
671 	if (rc)
672 		goto qportcfg_exit;
673 
674 	if (!resp->max_configurable_queues) {
675 		rc = -EINVAL;
676 		goto qportcfg_exit;
677 	}
678 	bd->max_tc = resp->max_configurable_queues;
679 	bd->max_lltc = resp->max_configurable_lossless_queues;
680 	if (bd->max_tc > BNGE_MAX_QUEUE)
681 		bd->max_tc = BNGE_MAX_QUEUE;
682 
683 	no_rdma = !bnge_is_roce_en(bd);
684 	qptr = &resp->queue_id0;
685 	for (i = 0, j = 0; i < bd->max_tc; i++) {
686 		bd->q_info[j].queue_id = *qptr;
687 		bd->q_ids[i] = *qptr++;
688 		bd->q_info[j].queue_profile = *qptr++;
689 		bd->tc_to_qidx[j] = j;
690 		if (!BNGE_CNPQ(bd->q_info[j].queue_profile) || no_rdma)
691 			j++;
692 	}
693 	bd->max_q = bd->max_tc;
694 	bd->max_tc = max_t(u8, j, 1);
695 
696 	if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
697 		bd->max_tc = 1;
698 
699 	if (bd->max_lltc > bd->max_tc)
700 		bd->max_lltc = bd->max_tc;
701 
702 qportcfg_exit:
703 	bnge_hwrm_req_drop(bd, req);
704 	return rc;
705 }
706 
707 int bnge_hwrm_vnic_set_hds(struct bnge_net *bn, struct bnge_vnic_info *vnic)
708 {
709 	u16 hds_thresh = (u16)bn->netdev->cfg_pending->hds_thresh;
710 	struct hwrm_vnic_plcmodes_cfg_input *req;
711 	struct bnge_dev *bd = bn->bd;
712 	int rc;
713 
714 	rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_PLCMODES_CFG);
715 	if (rc)
716 		return rc;
717 
718 	req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
719 	req->enables = cpu_to_le32(BNGE_PLC_EN_JUMBO_THRES_VALID);
720 	req->jumbo_thresh = cpu_to_le16(bn->rx_buf_use_size);
721 
722 	if (bnge_is_agg_reqd(bd)) {
723 		req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
724 					  VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
725 		req->enables |=
726 			cpu_to_le32(BNGE_PLC_EN_HDS_THRES_VALID);
727 		req->hds_threshold = cpu_to_le16(hds_thresh);
728 	}
729 	req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
730 	return bnge_hwrm_req_send(bd, req);
731 }
732 
733 int bnge_hwrm_vnic_ctx_alloc(struct bnge_dev *bd,
734 			     struct bnge_vnic_info *vnic, u16 ctx_idx)
735 {
736 	struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp;
737 	struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req;
738 	int rc;
739 
740 	rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
741 	if (rc)
742 		return rc;
743 
744 	resp = bnge_hwrm_req_hold(bd, req);
745 	rc = bnge_hwrm_req_send(bd, req);
746 	if (!rc)
747 		vnic->fw_rss_cos_lb_ctx[ctx_idx] =
748 			le16_to_cpu(resp->rss_cos_lb_ctx_id);
749 	bnge_hwrm_req_drop(bd, req);
750 
751 	return rc;
752 }
753 
754 static void
755 __bnge_hwrm_vnic_set_rss(struct bnge_net *bn,
756 			 struct hwrm_vnic_rss_cfg_input *req,
757 			 struct bnge_vnic_info *vnic)
758 {
759 	struct bnge_dev *bd = bn->bd;
760 
761 	bnge_fill_hw_rss_tbl(bn, vnic);
762 	req->flags |= VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT;
763 
764 	req->hash_type = cpu_to_le32(bd->rss_hash_cfg);
765 	req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
766 	req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
767 	req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
768 }
769 
770 int bnge_hwrm_vnic_set_rss(struct bnge_net *bn,
771 			   struct bnge_vnic_info *vnic, bool set_rss)
772 {
773 	struct hwrm_vnic_rss_cfg_input *req;
774 	struct bnge_dev *bd = bn->bd;
775 	dma_addr_t ring_tbl_map;
776 	u32 i, nr_ctxs;
777 	int rc;
778 
779 	rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_RSS_CFG);
780 	if (rc)
781 		return rc;
782 
783 	req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
784 	if (!set_rss)
785 		return bnge_hwrm_req_send(bd, req);
786 
787 	__bnge_hwrm_vnic_set_rss(bn, req, vnic);
788 	ring_tbl_map = vnic->rss_table_dma_addr;
789 	nr_ctxs = bnge_cal_nr_rss_ctxs(bd->rx_nr_rings);
790 
791 	bnge_hwrm_req_hold(bd, req);
792 	for (i = 0; i < nr_ctxs; ring_tbl_map += BNGE_RSS_TABLE_SIZE, i++) {
793 		req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
794 		req->ring_table_pair_index = i;
795 		req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
796 		rc = bnge_hwrm_req_send(bd, req);
797 		if (rc)
798 			goto exit;
799 	}
800 
801 exit:
802 	bnge_hwrm_req_drop(bd, req);
803 	return rc;
804 }
805 
806 int bnge_hwrm_vnic_cfg(struct bnge_net *bn, struct bnge_vnic_info *vnic)
807 {
808 	struct bnge_rx_ring_info *rxr = &bn->rx_ring[0];
809 	struct hwrm_vnic_cfg_input *req;
810 	struct bnge_dev *bd = bn->bd;
811 	int rc;
812 
813 	rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_CFG);
814 	if (rc)
815 		return rc;
816 
817 	req->default_rx_ring_id =
818 		cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
819 	req->default_cmpl_ring_id =
820 		cpu_to_le16(bnge_cp_ring_for_rx(rxr));
821 	req->enables =
822 		cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
823 			    VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
824 	vnic->mru = bd->netdev->mtu + ETH_HLEN + VLAN_HLEN;
825 	req->mru = cpu_to_le16(vnic->mru);
826 
827 	req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
828 
829 	if (bd->flags & BNGE_EN_STRIP_VLAN)
830 		req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
831 	if (vnic->vnic_id == BNGE_VNIC_DEFAULT && bnge_aux_registered(bd))
832 		req->flags |= cpu_to_le32(BNGE_VNIC_CFG_ROCE_DUAL_MODE);
833 
834 	return bnge_hwrm_req_send(bd, req);
835 }
836 
837 void bnge_hwrm_update_rss_hash_cfg(struct bnge_net *bn)
838 {
839 	struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
840 	struct hwrm_vnic_rss_qcfg_output *resp;
841 	struct hwrm_vnic_rss_qcfg_input *req;
842 	struct bnge_dev *bd = bn->bd;
843 
844 	if (bnge_hwrm_req_init(bd, req, HWRM_VNIC_RSS_QCFG))
845 		return;
846 
847 	req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
848 	/* all contexts configured to same hash_type, zero always exists */
849 	req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
850 	resp = bnge_hwrm_req_hold(bd, req);
851 	if (!bnge_hwrm_req_send(bd, req))
852 		bd->rss_hash_cfg =
853 			le32_to_cpu(resp->hash_type) ?: bd->rss_hash_cfg;
854 	bnge_hwrm_req_drop(bd, req);
855 }
856 
857 int bnge_hwrm_l2_filter_free(struct bnge_dev *bd, struct bnge_l2_filter *fltr)
858 {
859 	struct hwrm_cfa_l2_filter_free_input *req;
860 	int rc;
861 
862 	rc = bnge_hwrm_req_init(bd, req, HWRM_CFA_L2_FILTER_FREE);
863 	if (rc)
864 		return rc;
865 
866 	req->l2_filter_id = fltr->base.filter_id;
867 	return bnge_hwrm_req_send(bd, req);
868 }
869 
870 int bnge_hwrm_l2_filter_alloc(struct bnge_dev *bd, struct bnge_l2_filter *fltr)
871 {
872 	struct hwrm_cfa_l2_filter_alloc_output *resp;
873 	struct hwrm_cfa_l2_filter_alloc_input *req;
874 	int rc;
875 
876 	rc = bnge_hwrm_req_init(bd, req, HWRM_CFA_L2_FILTER_ALLOC);
877 	if (rc)
878 		return rc;
879 
880 	req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
881 
882 	req->flags |= cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
883 	req->dst_id = cpu_to_le16(fltr->base.fw_vnic_id);
884 	req->enables =
885 		cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
886 			    CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
887 			    CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
888 	ether_addr_copy(req->l2_addr, fltr->l2_key.dst_mac_addr);
889 	eth_broadcast_addr(req->l2_addr_mask);
890 
891 	if (fltr->l2_key.vlan) {
892 		req->enables |=
893 			cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN |
894 				CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK |
895 				CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS);
896 		req->num_vlans = 1;
897 		req->l2_ivlan = cpu_to_le16(fltr->l2_key.vlan);
898 		req->l2_ivlan_mask = cpu_to_le16(0xfff);
899 	}
900 
901 	resp = bnge_hwrm_req_hold(bd, req);
902 	rc = bnge_hwrm_req_send(bd, req);
903 	if (!rc)
904 		fltr->base.filter_id = resp->l2_filter_id;
905 
906 	bnge_hwrm_req_drop(bd, req);
907 	return rc;
908 }
909 
910 int bnge_hwrm_cfa_l2_set_rx_mask(struct bnge_dev *bd,
911 				 struct bnge_vnic_info *vnic)
912 {
913 	struct hwrm_cfa_l2_set_rx_mask_input *req;
914 	int rc;
915 
916 	rc = bnge_hwrm_req_init(bd, req, HWRM_CFA_L2_SET_RX_MASK);
917 	if (rc)
918 		return rc;
919 
920 	req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
921 	if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) {
922 		req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
923 		req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
924 	}
925 	req->mask = cpu_to_le32(vnic->rx_mask);
926 	return bnge_hwrm_req_send_silent(bd, req);
927 }
928 
929 int bnge_hwrm_vnic_alloc(struct bnge_dev *bd, struct bnge_vnic_info *vnic,
930 			 unsigned int nr_rings)
931 {
932 	struct hwrm_vnic_alloc_output *resp;
933 	struct hwrm_vnic_alloc_input *req;
934 	unsigned int i;
935 	int rc;
936 
937 	rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_ALLOC);
938 	if (rc)
939 		return rc;
940 
941 	for (i = 0; i < BNGE_MAX_CTX_PER_VNIC; i++)
942 		vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
943 	if (vnic->vnic_id == BNGE_VNIC_DEFAULT)
944 		req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
945 
946 	resp = bnge_hwrm_req_hold(bd, req);
947 	rc = bnge_hwrm_req_send(bd, req);
948 	if (!rc)
949 		vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
950 	bnge_hwrm_req_drop(bd, req);
951 	return rc;
952 }
953 
954 void bnge_hwrm_vnic_free_one(struct bnge_dev *bd, struct bnge_vnic_info *vnic)
955 {
956 	if (vnic->fw_vnic_id != INVALID_HW_RING_ID) {
957 		struct hwrm_vnic_free_input *req;
958 
959 		if (bnge_hwrm_req_init(bd, req, HWRM_VNIC_FREE))
960 			return;
961 
962 		req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
963 
964 		bnge_hwrm_req_send(bd, req);
965 		vnic->fw_vnic_id = INVALID_HW_RING_ID;
966 	}
967 }
968 
969 void bnge_hwrm_vnic_ctx_free_one(struct bnge_dev *bd,
970 				 struct bnge_vnic_info *vnic, u16 ctx_idx)
971 {
972 	struct hwrm_vnic_rss_cos_lb_ctx_free_input *req;
973 
974 	if (bnge_hwrm_req_init(bd, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE))
975 		return;
976 
977 	req->rss_cos_lb_ctx_id =
978 		cpu_to_le16(vnic->fw_rss_cos_lb_ctx[ctx_idx]);
979 
980 	bnge_hwrm_req_send(bd, req);
981 	vnic->fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
982 }
983 
984 void bnge_hwrm_stat_ctx_free(struct bnge_net *bn)
985 {
986 	struct hwrm_stat_ctx_free_input *req;
987 	struct bnge_dev *bd = bn->bd;
988 	int i;
989 
990 	if (bnge_hwrm_req_init(bd, req, HWRM_STAT_CTX_FREE))
991 		return;
992 
993 	bnge_hwrm_req_hold(bd, req);
994 	for (i = 0; i < bd->nq_nr_rings; i++) {
995 		struct bnge_napi *bnapi = bn->bnapi[i];
996 		struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
997 
998 		if (nqr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
999 			req->stat_ctx_id = cpu_to_le32(nqr->hw_stats_ctx_id);
1000 			bnge_hwrm_req_send(bd, req);
1001 
1002 			nqr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
1003 		}
1004 	}
1005 	bnge_hwrm_req_drop(bd, req);
1006 }
1007 
1008 int bnge_hwrm_stat_ctx_alloc(struct bnge_net *bn)
1009 {
1010 	struct hwrm_stat_ctx_alloc_output *resp;
1011 	struct hwrm_stat_ctx_alloc_input *req;
1012 	struct bnge_dev *bd = bn->bd;
1013 	int rc, i;
1014 
1015 	rc = bnge_hwrm_req_init(bd, req, HWRM_STAT_CTX_ALLOC);
1016 	if (rc)
1017 		return rc;
1018 
1019 	req->stats_dma_length = cpu_to_le16(bd->hw_ring_stats_size);
1020 	req->update_period_ms = cpu_to_le32(bn->stats_coal_ticks / 1000);
1021 
1022 	resp = bnge_hwrm_req_hold(bd, req);
1023 	for (i = 0; i < bd->nq_nr_rings; i++) {
1024 		struct bnge_napi *bnapi = bn->bnapi[i];
1025 		struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
1026 
1027 		req->stats_dma_addr = cpu_to_le64(nqr->stats.hw_stats_map);
1028 
1029 		rc = bnge_hwrm_req_send(bd, req);
1030 		if (rc)
1031 			break;
1032 
1033 		nqr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
1034 		bn->grp_info[i].fw_stats_ctx = nqr->hw_stats_ctx_id;
1035 	}
1036 	bnge_hwrm_req_drop(bd, req);
1037 	return rc;
1038 }
1039 
1040 int hwrm_ring_free_send_msg(struct bnge_net *bn,
1041 			    struct bnge_ring_struct *ring,
1042 			    u32 ring_type, int cmpl_ring_id)
1043 {
1044 	struct hwrm_ring_free_input *req;
1045 	struct bnge_dev *bd = bn->bd;
1046 	int rc;
1047 
1048 	rc = bnge_hwrm_req_init(bd, req, HWRM_RING_FREE);
1049 	if (rc)
1050 		goto exit;
1051 
1052 	req->cmpl_ring = cpu_to_le16(cmpl_ring_id);
1053 	req->ring_type = ring_type;
1054 	req->ring_id = cpu_to_le16(ring->fw_ring_id);
1055 
1056 	bnge_hwrm_req_hold(bd, req);
1057 	rc = bnge_hwrm_req_send(bd, req);
1058 	bnge_hwrm_req_drop(bd, req);
1059 exit:
1060 	if (rc) {
1061 		netdev_err(bd->netdev, "hwrm_ring_free type %d failed. rc:%d\n", ring_type, rc);
1062 		return -EIO;
1063 	}
1064 	return 0;
1065 }
1066 
1067 int hwrm_ring_alloc_send_msg(struct bnge_net *bn,
1068 			     struct bnge_ring_struct *ring,
1069 			     u32 ring_type, u32 map_index)
1070 {
1071 	struct bnge_ring_mem_info *rmem = &ring->ring_mem;
1072 	struct bnge_ring_grp_info *grp_info;
1073 	struct hwrm_ring_alloc_output *resp;
1074 	struct hwrm_ring_alloc_input *req;
1075 	struct bnge_dev *bd = bn->bd;
1076 	u16 ring_id, flags = 0;
1077 	int rc;
1078 
1079 	rc = bnge_hwrm_req_init(bd, req, HWRM_RING_ALLOC);
1080 	if (rc)
1081 		goto exit;
1082 
1083 	req->enables = 0;
1084 	if (rmem->nr_pages > 1) {
1085 		req->page_tbl_addr = cpu_to_le64(rmem->dma_pg_tbl);
1086 		/* Page size is in log2 units */
1087 		req->page_size = BNGE_PAGE_SHIFT;
1088 		req->page_tbl_depth = 1;
1089 	} else {
1090 		req->page_tbl_addr =  cpu_to_le64(rmem->dma_arr[0]);
1091 	}
1092 	req->fbo = 0;
1093 	/* Association of ring index with doorbell index and MSIX number */
1094 	req->logical_id = cpu_to_le16(map_index);
1095 
1096 	switch (ring_type) {
1097 	case HWRM_RING_ALLOC_TX: {
1098 		struct bnge_tx_ring_info *txr;
1099 
1100 		txr = container_of(ring, struct bnge_tx_ring_info,
1101 				   tx_ring_struct);
1102 		req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
1103 		/* Association of transmit ring with completion ring */
1104 		grp_info = &bn->grp_info[ring->grp_idx];
1105 		req->cmpl_ring_id = cpu_to_le16(bnge_cp_ring_for_tx(txr));
1106 		req->length = cpu_to_le32(bn->tx_ring_mask + 1);
1107 		req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
1108 		req->queue_id = cpu_to_le16(ring->queue_id);
1109 		req->flags = cpu_to_le16(flags);
1110 		break;
1111 	}
1112 	case HWRM_RING_ALLOC_RX:
1113 		req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
1114 		req->length = cpu_to_le32(bn->rx_ring_mask + 1);
1115 
1116 		/* Association of rx ring with stats context */
1117 		grp_info = &bn->grp_info[ring->grp_idx];
1118 		req->rx_buf_size = cpu_to_le16(bn->rx_buf_use_size);
1119 		req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
1120 		req->enables |=
1121 			cpu_to_le32(RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
1122 		if (NET_IP_ALIGN == 2)
1123 			flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
1124 		req->flags = cpu_to_le16(flags);
1125 		break;
1126 	case HWRM_RING_ALLOC_AGG:
1127 		req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
1128 		/* Association of agg ring with rx ring */
1129 		grp_info = &bn->grp_info[ring->grp_idx];
1130 		req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
1131 		req->rx_buf_size = cpu_to_le16(BNGE_RX_PAGE_SIZE);
1132 		req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
1133 		req->enables |=
1134 			cpu_to_le32(RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
1135 				    RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
1136 		req->length = cpu_to_le32(bn->rx_agg_ring_mask + 1);
1137 		break;
1138 	case HWRM_RING_ALLOC_CMPL:
1139 		req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
1140 		req->length = cpu_to_le32(bn->cp_ring_mask + 1);
1141 		/* Association of cp ring with nq */
1142 		grp_info = &bn->grp_info[map_index];
1143 		req->nq_ring_id = cpu_to_le16(grp_info->nq_fw_ring_id);
1144 		req->cq_handle = cpu_to_le64(ring->handle);
1145 		req->enables |=
1146 			cpu_to_le32(RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
1147 		break;
1148 	case HWRM_RING_ALLOC_NQ:
1149 		req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
1150 		req->length = cpu_to_le32(bn->cp_ring_mask + 1);
1151 		req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
1152 		break;
1153 	default:
1154 		netdev_err(bn->netdev, "hwrm alloc invalid ring type %d\n", ring_type);
1155 		return -EINVAL;
1156 	}
1157 
1158 	resp = bnge_hwrm_req_hold(bd, req);
1159 	rc = bnge_hwrm_req_send(bd, req);
1160 	ring_id = le16_to_cpu(resp->ring_id);
1161 	bnge_hwrm_req_drop(bd, req);
1162 
1163 exit:
1164 	if (rc) {
1165 		netdev_err(bd->netdev, "hwrm_ring_alloc type %d failed. rc:%d\n", ring_type, rc);
1166 		return -EIO;
1167 	}
1168 	ring->fw_ring_id = ring_id;
1169 	return rc;
1170 }
1171 
1172 int bnge_hwrm_set_async_event_cr(struct bnge_dev *bd, int idx)
1173 {
1174 	struct hwrm_func_cfg_input *req;
1175 	int rc;
1176 
1177 	rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_CFG);
1178 	if (rc)
1179 		return rc;
1180 
1181 	req->fid = cpu_to_le16(0xffff);
1182 	req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
1183 	req->async_event_cr = cpu_to_le16(idx);
1184 	return bnge_hwrm_req_send(bd, req);
1185 }
1186