xref: /freebsd/sys/dev/bnxt/bnxt_en/bnxt_hwrm.c (revision b7d6334df61a559e98191f90fd5e611e0d077def)
1 /*-
2  * Broadcom NetXtreme-C/E network driver.
3  *
4  * Copyright (c) 2016 Broadcom, All Rights Reserved.
5  * The term Broadcom refers to Broadcom Limited and/or its subsidiaries
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26  * THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 #include <sys/endian.h>
31 #include <linux/pci.h>
32 
33 #include "bnxt.h"
34 #include "bnxt_hwrm.h"
35 #include "hsi_struct_def.h"
36 
37 static int bnxt_hwrm_err_map(uint16_t err);
38 static inline int _is_valid_ether_addr(uint8_t *);
39 static inline void get_random_ether_addr(uint8_t *);
40 static void	bnxt_hwrm_set_link_common(struct bnxt_softc *softc,
41 		    struct hwrm_port_phy_cfg_input *req);
42 static void	bnxt_hwrm_set_pause_common(struct bnxt_softc *softc,
43 		    struct hwrm_port_phy_cfg_input *req);
44 static void	bnxt_hwrm_set_eee(struct bnxt_softc *softc,
45 		    struct hwrm_port_phy_cfg_input *req);
46 
47 /* NVRam stuff has a five minute timeout */
48 #define BNXT_NVM_TIMEO	(5 * 60 * 1000)
49 
50 #define BNXT_RX_STATS_PRI_ENTRY(counter, n)		\
51 	BNXT_RX_STATS_EXT_OFFSET(counter##_cos0)
52 
53 #define BNXT_TX_STATS_PRI_ENTRY(counter, n)		\
54 	 BNXT_TX_STATS_EXT_OFFSET(counter##_cos0)
55 
56 #define BNXT_RX_STATS_PRI_ENTRIES(counter)		\
57 	BNXT_RX_STATS_PRI_ENTRY(counter, 0),		\
58 	BNXT_RX_STATS_PRI_ENTRY(counter, 1),		\
59 	BNXT_RX_STATS_PRI_ENTRY(counter, 2),		\
60 	BNXT_RX_STATS_PRI_ENTRY(counter, 3),		\
61 	BNXT_RX_STATS_PRI_ENTRY(counter, 4),		\
62 	BNXT_RX_STATS_PRI_ENTRY(counter, 5),		\
63 	BNXT_RX_STATS_PRI_ENTRY(counter, 6),		\
64 	BNXT_RX_STATS_PRI_ENTRY(counter, 7)
65 
66 #define BNXT_TX_STATS_PRI_ENTRIES(counter)		\
67 	BNXT_TX_STATS_PRI_ENTRY(counter, 0),		\
68 	BNXT_TX_STATS_PRI_ENTRY(counter, 1),		\
69 	BNXT_TX_STATS_PRI_ENTRY(counter, 2),		\
70 	BNXT_TX_STATS_PRI_ENTRY(counter, 3),		\
71 	BNXT_TX_STATS_PRI_ENTRY(counter, 4),		\
72 	BNXT_TX_STATS_PRI_ENTRY(counter, 5),		\
73 	BNXT_TX_STATS_PRI_ENTRY(counter, 6),		\
74 	BNXT_TX_STATS_PRI_ENTRY(counter, 7)
75 
76 
77 long bnxt_rx_bytes_pri_arr_base_off[] = {BNXT_RX_STATS_PRI_ENTRIES(rx_bytes)};
78 long bnxt_rx_pkts_pri_arr_base_off[] = {BNXT_RX_STATS_PRI_ENTRIES(rx_packets)};
79 long bnxt_tx_bytes_pri_arr_base_off[] = {BNXT_TX_STATS_PRI_ENTRIES(tx_bytes)};
80 long bnxt_tx_pkts_pri_arr_base_off[] = {BNXT_TX_STATS_PRI_ENTRIES(tx_packets)};
81 
82 static int
bnxt_hwrm_err_map(uint16_t err)83 bnxt_hwrm_err_map(uint16_t err)
84 {
85 	int rc;
86 
87 	switch (err) {
88 	case HWRM_ERR_CODE_SUCCESS:
89 		return 0;
90 	case HWRM_ERR_CODE_INVALID_PARAMS:
91 	case HWRM_ERR_CODE_INVALID_FLAGS:
92 	case HWRM_ERR_CODE_INVALID_ENABLES:
93 		return EINVAL;
94 	case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED:
95 		return EACCES;
96 	case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR:
97 		return ENOMEM;
98 	case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
99 		return ENOSYS;
100 	case HWRM_ERR_CODE_FAIL:
101 		return EIO;
102 	case HWRM_ERR_CODE_HWRM_ERROR:
103 	case HWRM_ERR_CODE_UNKNOWN_ERR:
104 	default:
105 		return EDOOFUS;
106 	}
107 
108 	return rc;
109 }
110 
111 int
bnxt_alloc_hwrm_dma_mem(struct bnxt_softc * softc)112 bnxt_alloc_hwrm_dma_mem(struct bnxt_softc *softc)
113 {
114 	int rc;
115 
116 	rc = iflib_dma_alloc(softc->ctx, PAGE_SIZE, &softc->hwrm_cmd_resp,
117 	    BUS_DMA_NOWAIT);
118 	return rc;
119 }
120 
121 void
bnxt_free_hwrm_dma_mem(struct bnxt_softc * softc)122 bnxt_free_hwrm_dma_mem(struct bnxt_softc *softc)
123 {
124 	if (softc->hwrm_cmd_resp.idi_vaddr)
125 		iflib_dma_free(&softc->hwrm_cmd_resp);
126 	softc->hwrm_cmd_resp.idi_vaddr = NULL;
127 	return;
128 }
129 
130 void
bnxt_hwrm_cmd_hdr_init(struct bnxt_softc * softc,void * request,uint16_t req_type)131 bnxt_hwrm_cmd_hdr_init(struct bnxt_softc *softc, void *request,
132     uint16_t req_type)
133 {
134 	struct input *req = request;
135 
136 	req->req_type = htole16(req_type);
137 	req->cmpl_ring = 0xffff;
138 	req->target_id = 0xffff;
139 	req->resp_addr = htole64(softc->hwrm_cmd_resp.idi_paddr);
140 }
141 
142 int
_hwrm_send_message(struct bnxt_softc * softc,void * msg,uint32_t msg_len)143 _hwrm_send_message(struct bnxt_softc *softc, void *msg, uint32_t msg_len)
144 {
145 	struct input *req = msg;
146 	struct hwrm_err_output *resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
147 	uint32_t *data = msg;
148 	int i;
149 	uint8_t *valid;
150 	uint16_t err;
151 	uint16_t max_req_len = BNXT_HWRM_MAX_REQ_LEN;
152 	struct hwrm_short_input short_input = {0};
153 
154 	/* TODO: DMASYNC in here. */
155 	req->seq_id = htole16(softc->hwrm_cmd_seq++);
156 	memset(resp, 0, PAGE_SIZE);
157 
158 	if (BNXT_NO_FW_ACCESS(softc) &&
159 	    (req->req_type != HWRM_FUNC_RESET && req->req_type != HWRM_VER_GET))
160 		return -EINVAL;
161 
162 	if ((softc->flags & BNXT_FLAG_SHORT_CMD) ||
163 	    msg_len > BNXT_HWRM_MAX_REQ_LEN) {
164 		void *short_cmd_req = softc->hwrm_short_cmd_req_addr.idi_vaddr;
165                 uint16_t max_msg_len;
166 
167                 /* Set boundary for maximum extended request length for short
168                  * cmd format. If passed up from device use the max supported
169                  * internal req length.
170 		 */
171 
172 		max_msg_len = softc->hwrm_max_ext_req_len;
173 
174 
175 		memcpy(short_cmd_req, req, msg_len);
176                 if (msg_len < max_msg_len)
177 			memset((uint8_t *) short_cmd_req + msg_len, 0,
178 				max_msg_len - msg_len);
179 
180 		short_input.req_type = req->req_type;
181 		short_input.signature =
182 		    htole16(HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD);
183 		short_input.size = htole16(msg_len);
184 		short_input.req_addr =
185 		    htole64(softc->hwrm_short_cmd_req_addr.idi_paddr);
186 
187 		data = (uint32_t *)&short_input;
188 		msg_len = sizeof(short_input);
189 
190 		/* Sync memory write before updating doorbell */
191 		wmb();
192 
193 		max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
194 	}
195 
196 	/* Write request msg to hwrm channel */
197 	for (i = 0; i < msg_len; i += 4) {
198 		bus_space_write_4(softc->hwrm_bar.tag,
199 				  softc->hwrm_bar.handle,
200 				  i, *data);
201 		data++;
202 	}
203 
204 	/* Clear to the end of the request buffer */
205 	for (i = msg_len; i < max_req_len; i += 4)
206 		bus_space_write_4(softc->hwrm_bar.tag, softc->hwrm_bar.handle,
207 		    i, 0);
208 
209 	/* Ring channel doorbell */
210 	bus_space_write_4(softc->hwrm_bar.tag,
211 			  softc->hwrm_bar.handle,
212 			  0x100, htole32(1));
213 
214 	/* Check if response len is updated */
215 	for (i = 0; i < softc->hwrm_cmd_timeo; i++) {
216 		if (resp->resp_len && resp->resp_len <= 4096)
217 			break;
218 		DELAY(1000);
219 	}
220 	if (i >= softc->hwrm_cmd_timeo) {
221 		device_printf(softc->dev,
222 		    "Timeout sending %s: (timeout: %u) seq: %d\n",
223 		    GET_HWRM_REQ_TYPE(req->req_type), softc->hwrm_cmd_timeo,
224 		    le16toh(req->seq_id));
225 		return ETIMEDOUT;
226 	}
227 	/* Last byte of resp contains the valid key */
228 	valid = (uint8_t *)resp + resp->resp_len - 1;
229 	for (i = 0; i < softc->hwrm_cmd_timeo; i++) {
230 		if (*valid == HWRM_RESP_VALID_KEY)
231 			break;
232 		DELAY(1000);
233 	}
234 	if (i >= softc->hwrm_cmd_timeo) {
235 		device_printf(softc->dev, "Timeout sending %s: "
236 		    "(timeout: %u) msg {0x%x 0x%x} len:%d v: %d\n",
237 		    GET_HWRM_REQ_TYPE(req->req_type),
238 		    softc->hwrm_cmd_timeo, le16toh(req->req_type),
239 		    le16toh(req->seq_id), msg_len,
240 		    *valid);
241 		return ETIMEDOUT;
242 	}
243 
244 	err = le16toh(resp->error_code);
245 	if (err) {
246 		/* HWRM_ERR_CODE_FAIL is a "normal" error, don't log */
247 		if (err != HWRM_ERR_CODE_FAIL) {
248 			device_printf(softc->dev,
249 			    "%s command returned %s error.\n",
250 			    GET_HWRM_REQ_TYPE(req->req_type),
251 			    GET_HWRM_ERROR_CODE(err));
252 		}
253 		return bnxt_hwrm_err_map(err);
254 	}
255 
256 	return 0;
257 }
258 
259 int
hwrm_send_message(struct bnxt_softc * softc,void * msg,uint32_t msg_len)260 hwrm_send_message(struct bnxt_softc *softc, void *msg, uint32_t msg_len)
261 {
262 	int rc;
263 
264 	BNXT_HWRM_LOCK(softc);
265 	rc = _hwrm_send_message(softc, msg, msg_len);
266 	BNXT_HWRM_UNLOCK(softc);
267 	return rc;
268 }
269 
270 int
bnxt_hwrm_queue_qportcfg(struct bnxt_softc * softc,uint32_t path_dir)271 bnxt_hwrm_queue_qportcfg(struct bnxt_softc *softc, uint32_t path_dir)
272 {
273 	int rc = 0;
274 	struct hwrm_queue_qportcfg_input req = {0};
275 	struct hwrm_queue_qportcfg_output *resp =
276 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
277 	uint8_t max_tc, max_lltc, *max_q;
278 	uint8_t queue_profile, queue_id;
279 	struct bnxt_queue_info *q_info;
280 	uint8_t i, j, *qptr, *q_ids;
281 	bool no_rdma;
282 
283 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_QUEUE_QPORTCFG);
284 
285 	BNXT_HWRM_LOCK(softc);
286 	rc = _hwrm_send_message(softc, &req, sizeof(req));
287 	if (rc)
288 		goto qportcfg_exit;
289 
290 	if (!resp->max_configurable_queues) {
291 		rc = -EINVAL;
292 		goto qportcfg_exit;
293 	}
294 
295 	if (resp->queue_cfg_info & HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_CFG_INFO_ASYM_CFG) {
296 		softc->is_asym_q = true;
297 		/* bnxt_init_cosq_names(softc, path_dir); */
298 	} else {
299 		softc->is_asym_q = false;
300 		/* bnxt_free_stats_cosqnames_mem(softc); */
301 	}
302 
303 	max_tc = min_t(uint8_t, resp->max_configurable_queues, BNXT_MAX_QUEUE);
304 	max_lltc = resp->max_configurable_lossless_queues;
305 
306 	/*
307 	 * No RDMA support yet.
308 	 * no_rdma = !(softc->flags & BNXT_FLAG_ROCE_CAP);
309 	 */
310 	no_rdma = true;
311 	qptr = &resp->queue_id0;
312 
313 	if (path_dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX) {
314 		q_info = softc->tx_q_info;
315 		q_ids = softc->tx_q_ids;
316 		max_q = &softc->tx_max_q;
317 	} else {
318 		q_info = softc->rx_q_info;
319 		q_ids = softc->rx_q_ids;
320 		max_q = &softc->rx_max_q;
321 	}
322 
323 	for (i = 0, j = 0; i < max_tc; i++) {
324 		queue_id = *qptr;
325 		qptr++;
326 
327 		queue_profile = *qptr;
328 		qptr++;
329 
330 		q_info[j].queue_id = queue_id;
331 		q_info[j].queue_profile = queue_profile;
332 		q_ids[i] = queue_id;
333 
334 		softc->tc_to_qidx[j] = j;
335 
336 		if (!BNXT_CNPQ(q_info[j].queue_profile) ||
337 		    (no_rdma && BNXT_PF(softc)))
338 			j++;
339 	}
340 	*max_q = max_tc;
341 	max_tc = max_t(uint8_t, j, 1);
342 	softc->max_tc = softc->max_tc ? min(softc->max_tc, max_tc) : max_tc;
343 	softc->max_lltc = softc->max_lltc ? min(softc->max_lltc, max_lltc) : max_lltc;
344 
345 	if (softc->max_lltc > softc->max_tc)
346 		softc->max_lltc = softc->max_tc;
347 
348 qportcfg_exit:
349 	BNXT_HWRM_UNLOCK(softc);
350 	return rc;
351 }
352 
bnxt_alloc_all_ctx_pg_info(struct bnxt_softc * softc,int ctx_max)353 static int bnxt_alloc_all_ctx_pg_info(struct bnxt_softc *softc, int ctx_max)
354 {
355 	struct bnxt_ctx_mem_info *ctx = softc->ctx_mem;
356 	u16 type;
357 
358 	for (type = 0; type < ctx_max; type++) {
359 		struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
360 		int n = 1;
361 
362 		if (!ctxm->max_entries || ctxm->pg_info)
363 			continue;
364 
365 		if (ctxm->instance_bmap)
366 			n = hweight32(ctxm->instance_bmap);
367 		ctxm->pg_info = kcalloc(n, sizeof(*ctxm->pg_info), GFP_ATOMIC);
368 		if (!ctxm->pg_info)
369 			return -ENOMEM;
370 	}
371 	return 0;
372 }
373 
bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type * ctxm,u8 init_val,u8 init_offset,bool init_mask_set)374 static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type *ctxm,
375 				      u8 init_val, u8 init_offset,
376 				      bool init_mask_set)
377 {
378 	ctxm->init_value = init_val;
379 	ctxm->init_offset = BNXT_CTX_INIT_INVALID_OFFSET;
380 	if (init_mask_set)
381 		ctxm->init_offset = init_offset * 4;
382 	else
383 		ctxm->init_value = 0;
384 }
385 
386 #define BNXT_CTX_INIT_VALID(flags)      \
387         (!!((flags) &                   \
388             HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_FLAGS_ENABLE_CTX_KIND_INIT))
389 
390 static int
bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt_softc * softc)391 bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt_softc *softc)
392 {
393 	struct hwrm_func_backing_store_qcaps_v2_input req = {0};
394 	struct hwrm_func_backing_store_qcaps_v2_output *resp =
395 		(void *)softc->hwrm_cmd_resp.idi_vaddr;
396 	struct bnxt_ctx_mem_info *ctx = NULL;
397 	u16 type;
398 	int rc;
399 
400 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_BACKING_STORE_QCAPS_V2);
401 
402 	ctx = malloc(sizeof(*ctx), M_DEVBUF, M_NOWAIT | M_ZERO);
403 	if (!ctx)
404 		return -ENOMEM;
405 
406 	softc->ctx_mem = ctx;
407 
408 	BNXT_HWRM_LOCK(softc);
409 	for (type = 0; type < BNXT_CTX_V2_MAX; ) {
410 		struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
411 		u8 init_val, init_off, i;
412 		__le32 *p;
413 		u32 flags;
414 
415 		req.type = cpu_to_le16(type);
416 		rc = _hwrm_send_message(softc, &req, sizeof(req));
417 		if (rc)
418 			goto ctx_done;
419 		flags = le32_to_cpu(resp->flags);
420 		type = le16_to_cpu(resp->next_valid_type);
421 		if (!(flags & HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_FLAGS_TYPE_VALID))
422 			continue;
423 
424 		ctxm->type = le16_to_cpu(resp->type);
425 		ctxm->flags = flags;
426 
427 		ctxm->entry_size = le16_to_cpu(resp->entry_size);
428 		ctxm->instance_bmap = le32_to_cpu(resp->instance_bit_map);
429 		ctxm->entry_multiple = resp->entry_multiple;
430 		ctxm->max_entries = le32_to_cpu(resp->max_num_entries);
431 		ctxm->min_entries = le32_to_cpu(resp->min_num_entries);
432 		init_val = resp->ctx_init_value;
433 		init_off = resp->ctx_init_offset;
434 		bnxt_init_ctx_initializer(ctxm, init_val, init_off,
435 					  BNXT_CTX_INIT_VALID(flags));
436 		ctxm->split_entry_cnt = min_t(u8, resp->subtype_valid_cnt,
437 					      BNXT_MAX_SPLIT_ENTRY);
438 		for (i = 0, p = &resp->split_entry_0; i < ctxm->split_entry_cnt;
439 		     i++, p++)
440 			ctxm->split[i] = le32_to_cpu(*p);
441 	}
442 	rc = bnxt_alloc_all_ctx_pg_info(softc, BNXT_CTX_V2_MAX);
443 
444 ctx_done:
445 	BNXT_HWRM_UNLOCK(softc);
446 	return rc;
447 }
448 
bnxt_hwrm_func_backing_store_qcaps(struct bnxt_softc * softc)449 int bnxt_hwrm_func_backing_store_qcaps(struct bnxt_softc *softc)
450 {
451 	struct hwrm_func_backing_store_qcaps_input req = {0};
452 	struct hwrm_func_backing_store_qcaps_output *resp =
453 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
454 	int rc;
455 
456 	if (softc->hwrm_spec_code < 0x10902 || softc->ctx_mem)
457 		return 0;
458 
459 	if (BNXT_CHIP_P7(softc)) {
460 		if (softc->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
461 			return bnxt_hwrm_func_backing_store_qcaps_v2(softc);
462 	}
463 
464 	if (BNXT_VF(softc))
465 		return 0;
466 
467 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_BACKING_STORE_QCAPS);
468 	BNXT_HWRM_LOCK(softc);
469 	rc = _hwrm_send_message(softc, &req, sizeof(req));
470 	if (!rc) {
471 		struct bnxt_ctx_mem_type *ctxm;
472 		struct bnxt_ctx_mem_info *ctx;
473 		u8 init_val, init_idx = 0;
474 		u16 init_mask;
475 
476 		ctx = softc->ctx_mem;
477 		if (!ctx) {
478 			ctx = malloc(sizeof(*ctx), M_DEVBUF, M_NOWAIT | M_ZERO);
479 			if (!ctx) {
480 				rc = -ENOMEM;
481 				goto ctx_err;
482 			}
483 			softc->ctx_mem = ctx;
484 		}
485 		init_val = resp->ctx_kind_initializer;
486 		init_mask = le16_to_cpu(resp->ctx_init_mask);
487 
488 		ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
489 		ctxm->max_entries = le32_to_cpu(resp->qp_max_entries);
490 		ctxm->qp_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
491 		ctxm->qp_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
492 		ctxm->entry_size = le16_to_cpu(resp->qp_entry_size);
493 		bnxt_init_ctx_initializer(ctxm, init_val, resp->qp_init_offset,
494 					  (init_mask & (1 << init_idx++)) != 0);
495 
496 		ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
497 		ctxm->srq_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
498 		ctxm->max_entries = le32_to_cpu(resp->srq_max_entries);
499 		ctxm->entry_size = le16_to_cpu(resp->srq_entry_size);
500 		bnxt_init_ctx_initializer(ctxm, init_val, resp->srq_init_offset,
501 					  (init_mask & (1 << init_idx++)) != 0);
502 
503 		ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
504 		ctxm->cq_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
505 		ctxm->max_entries = le32_to_cpu(resp->cq_max_entries);
506 		ctxm->entry_size = le16_to_cpu(resp->cq_entry_size);
507 		bnxt_init_ctx_initializer(ctxm, init_val, resp->cq_init_offset,
508 					  (init_mask & (1 << init_idx++)) != 0);
509 
510 		ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
511 		ctxm->vnic_entries = le32_to_cpu(resp->vnic_max_vnic_entries);
512 		ctxm->max_entries = ctxm->vnic_entries +
513 			le16_to_cpu(resp->vnic_max_ring_table_entries);
514 		ctxm->entry_size = le16_to_cpu(resp->vnic_entry_size);
515 		bnxt_init_ctx_initializer(ctxm, init_val,
516 					  resp->vnic_init_offset,
517 					  (init_mask & (1 << init_idx++)) != 0);
518 
519 		ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
520 		ctxm->max_entries = le32_to_cpu(resp->stat_max_entries);
521 		ctxm->entry_size = le16_to_cpu(resp->stat_entry_size);
522 		bnxt_init_ctx_initializer(ctxm, init_val,
523 					  resp->stat_init_offset,
524 					  (init_mask & (1 << init_idx++)) != 0);
525 
526 		ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
527 		ctxm->entry_size = le16_to_cpu(resp->tqm_entry_size);
528 		ctxm->min_entries = le32_to_cpu(resp->tqm_min_entries_per_ring);
529 		ctxm->max_entries = le32_to_cpu(resp->tqm_max_entries_per_ring);
530 		ctxm->entry_multiple = resp->tqm_entries_multiple;
531 		if (!ctxm->entry_multiple)
532 			ctxm->entry_multiple = 1;
533 
534 		memcpy(&ctx->ctx_arr[BNXT_CTX_FTQM], ctxm, sizeof(*ctxm));
535 
536 		ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
537 		ctxm->max_entries = le32_to_cpu(resp->mrav_max_entries);
538 		ctxm->entry_size = le16_to_cpu(resp->mrav_entry_size);
539 		ctxm->mrav_num_entries_units =
540 			le16_to_cpu(resp->mrav_num_entries_units);
541 		bnxt_init_ctx_initializer(ctxm, init_val,
542 					  resp->mrav_init_offset,
543 					  (init_mask & (1 << init_idx++)) != 0);
544 
545 		ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
546 		ctxm->entry_size = le16_to_cpu(resp->tim_entry_size);
547 		ctxm->max_entries = le32_to_cpu(resp->tim_max_entries);
548 
549 		ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
550 		if (!ctx->tqm_fp_rings_count)
551 			ctx->tqm_fp_rings_count = softc->tx_max_q;
552 		else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_LEGACY_RINGS)
553 			ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_LEGACY_RINGS;
554 		if (ctx->tqm_fp_rings_count == BNXT_MAX_TQM_FP_LEGACY_RINGS &&
555 		    softc->hwrm_max_ext_req_len >= BNXT_BACKING_STORE_CFG_LEN) {
556 			ctx->tqm_fp_rings_count += resp->tqm_fp_rings_count_ext;
557 			if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
558 				ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
559 		}
560 		ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
561 		memcpy(ctxm, &ctx->ctx_arr[BNXT_CTX_STQM], sizeof(*ctxm));
562 		ctxm->instance_bmap = (1 << ctx->tqm_fp_rings_count) - 1;
563 
564 		rc = bnxt_alloc_all_ctx_pg_info(softc, BNXT_CTX_MAX);
565 	} else {
566 		rc = 0;
567 	}
568 ctx_err:
569 	BNXT_HWRM_UNLOCK(softc);
570 	return rc;
571 }
572 
573 #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES                 \
574         (HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP |                \
575          HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ |               \
576          HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ |                \
577          HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC |              \
578          HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT)
579 
bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info * rmem,uint8_t * pg_attr,uint64_t * pg_dir)580 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, uint8_t *pg_attr,
581 				  uint64_t *pg_dir)
582 {
583 	if (!rmem->nr_pages)
584 		return;
585 
586 	BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
587 
588 	if (rmem->depth >= 1) {
589 		if (rmem->depth == 2)
590 			*pg_attr |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_LVL_2;
591 		else
592 			*pg_attr |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_LVL_1;
593 		*pg_dir = htole64(rmem->pg_tbl.idi_paddr);
594 	} else {
595 		*pg_dir = htole64(rmem->pg_arr[0].idi_paddr);
596 	}
597 }
598 
bnxt_hwrm_func_backing_store_cfg(struct bnxt_softc * softc,uint32_t enables)599 int bnxt_hwrm_func_backing_store_cfg(struct bnxt_softc *softc, uint32_t enables)
600 {
601 	struct hwrm_func_backing_store_cfg_input req = {0};
602 	struct bnxt_ctx_mem_info *ctx = softc->ctx_mem;
603 	struct bnxt_ctx_pg_info *ctx_pg;
604 	struct bnxt_ctx_mem_type *ctxm;
605 	u32 req_len = sizeof(req);
606 	__le32 *num_entries;
607 	u32 ena, flags = 0;
608 	__le64 *pg_dir;
609 	u8 *pg_attr;
610 	int i;
611 
612 	if (!ctx)
613 		return 0;
614 
615 	if (req_len > softc->hwrm_max_ext_req_len)
616 		req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
617 
618 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_BACKING_STORE_CFG);
619 	req.enables = htole32(enables);
620 
621 	if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP) {
622 		ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
623 		ctx_pg = ctxm->pg_info;
624 		req.qp_num_entries = cpu_to_le32(ctx_pg->entries);
625 		req.qp_num_qp1_entries = cpu_to_le16(ctxm->qp_qp1_entries);
626 		req.qp_num_l2_entries = cpu_to_le16(ctxm->qp_l2_entries);
627 		req.qp_entry_size = cpu_to_le16(ctxm->entry_size);
628 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
629 				&req.qpc_pg_size_qpc_lvl,
630 				&req.qpc_page_dir);
631 	}
632 	if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ) {
633 		ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
634 		ctx_pg = ctxm->pg_info;
635 		req.srq_num_entries = cpu_to_le32(ctx_pg->entries);
636 		req.srq_num_l2_entries = cpu_to_le16(ctxm->srq_l2_entries);
637 		req.srq_entry_size = cpu_to_le16(ctxm->entry_size);
638 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
639 				&req.srq_pg_size_srq_lvl,
640 				&req.srq_page_dir);
641 	}
642 	if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ) {
643 		ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
644 		ctx_pg = ctxm->pg_info;
645 		req.cq_num_entries = cpu_to_le32(ctx_pg->entries);
646 		req.cq_num_l2_entries = cpu_to_le16(ctxm->cq_l2_entries);
647 		req.cq_entry_size = cpu_to_le16(ctxm->entry_size);
648 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
649 				      &req.cq_pg_size_cq_lvl,
650 				&req.cq_page_dir);
651 	}
652 	if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_MRAV) {
653 		ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
654 		ctx_pg = ctxm->pg_info;
655 		req.mrav_num_entries = cpu_to_le32(ctx_pg->entries);
656 		if (ctxm->mrav_num_entries_units)
657 			flags |=
658 			HWRM_FUNC_BACKING_STORE_CFG_INPUT_FLAGS_MRAV_RESERVATION_SPLIT;
659 		req.mrav_entry_size = cpu_to_le16(ctxm->entry_size);
660 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
661 				&req.mrav_pg_size_mrav_lvl,
662 				&req.mrav_page_dir);
663 	}
664 	if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TIM) {
665 		ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
666 		ctx_pg = ctxm->pg_info;
667 		req.tim_num_entries = cpu_to_le32(ctx_pg->entries);
668 		req.tim_entry_size = cpu_to_le16(ctxm->entry_size);
669 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
670 				&req.tim_pg_size_tim_lvl,
671 				&req.tim_page_dir);
672 	}
673 	if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC) {
674 		ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
675 		ctx_pg = ctxm->pg_info;
676 		req.vnic_num_vnic_entries = cpu_to_le16(ctxm->vnic_entries);
677 		req.vnic_num_ring_table_entries =
678 			cpu_to_le16(ctxm->max_entries - ctxm->vnic_entries);
679 		req.vnic_entry_size = cpu_to_le16(ctxm->entry_size);
680 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
681 				&req.vnic_pg_size_vnic_lvl,
682 				&req.vnic_page_dir);
683 	}
684 	if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT) {
685 		ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
686 		ctx_pg = ctxm->pg_info;
687 		req.stat_num_entries = cpu_to_le32(ctxm->max_entries);
688 		req.stat_entry_size = cpu_to_le16(ctxm->entry_size);
689 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
690 				&req.stat_pg_size_stat_lvl,
691 				&req.stat_page_dir);
692 	}
693 	ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
694 	for (i = 0, num_entries = &req.tqm_sp_num_entries,
695 			pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
696 			pg_dir = &req.tqm_sp_page_dir,
697 	     ena = HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP,
698 	     ctx_pg = ctxm->pg_info;
699 	     i < BNXT_MAX_TQM_LEGACY_RINGS;
700 	     ctx_pg = &ctx->ctx_arr[BNXT_CTX_FTQM].pg_info[i],
701 	     i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
702 		if (!(enables & ena))
703 			continue;
704 
705 		req.tqm_entry_size = cpu_to_le16(ctxm->entry_size);
706 		*num_entries = cpu_to_le32(ctx_pg->entries);
707 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
708 	}
709 	if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING8) {
710 		pg_attr = &req.tqm_ring8_pg_size_tqm_ring_lvl;
711 		pg_dir = &req.tqm_ring8_page_dir;
712 		ctx_pg = &ctx->ctx_arr[BNXT_CTX_FTQM].pg_info[8];
713 		req.tqm_ring8_num_entries = cpu_to_le32(ctx_pg->entries);
714 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
715 	}
716 	req.flags = cpu_to_le32(flags);
717 	return hwrm_send_message(softc, &req, req_len);
718 }
719 
bnxt_hwrm_func_resc_qcaps(struct bnxt_softc * softc,bool all)720 int bnxt_hwrm_func_resc_qcaps(struct bnxt_softc *softc, bool all)
721 {
722 	struct hwrm_func_resource_qcaps_output *resp =
723 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
724 	struct hwrm_func_resource_qcaps_input req = {0};
725 	struct bnxt_hw_resc *hw_resc = &softc->hw_resc;
726 	int rc;
727 
728 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_RESOURCE_QCAPS);
729 	req.fid = htole16(0xffff);
730 
731 	BNXT_HWRM_LOCK(softc);
732 	rc = _hwrm_send_message(softc, &req, sizeof(req));
733 	if (rc) {
734 		rc = -EIO;
735 		goto hwrm_func_resc_qcaps_exit;
736 	}
737 
738 	hw_resc->max_tx_sch_inputs = le16toh(resp->max_tx_scheduler_inputs);
739 	if (!all)
740 		goto hwrm_func_resc_qcaps_exit;
741 
742 	hw_resc->min_rsscos_ctxs = le16toh(resp->min_rsscos_ctx);
743 	hw_resc->max_rsscos_ctxs = le16toh(resp->max_rsscos_ctx);
744 	hw_resc->min_cp_rings = le16toh(resp->min_cmpl_rings);
745 	hw_resc->max_cp_rings = le16toh(resp->max_cmpl_rings);
746 	hw_resc->min_tx_rings = le16toh(resp->min_tx_rings);
747 	hw_resc->max_tx_rings = le16toh(resp->max_tx_rings);
748 	hw_resc->min_rx_rings = le16toh(resp->min_rx_rings);
749 	hw_resc->max_rx_rings = le16toh(resp->max_rx_rings);
750 	hw_resc->min_hw_ring_grps = le16toh(resp->min_hw_ring_grps);
751 	hw_resc->max_hw_ring_grps = le16toh(resp->max_hw_ring_grps);
752 	hw_resc->min_l2_ctxs = le16toh(resp->min_l2_ctxs);
753 	hw_resc->max_l2_ctxs = le16toh(resp->max_l2_ctxs);
754 	hw_resc->min_vnics = le16toh(resp->min_vnics);
755 	hw_resc->max_vnics = le16toh(resp->max_vnics);
756 	hw_resc->min_stat_ctxs = le16toh(resp->min_stat_ctx);
757 	hw_resc->max_stat_ctxs = le16toh(resp->max_stat_ctx);
758 
759 	if (BNXT_CHIP_P5_PLUS(softc)) {
760 		hw_resc->max_nqs = le16toh(resp->max_msix);
761 		hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
762 	}
763 
764 hwrm_func_resc_qcaps_exit:
765 	BNXT_HWRM_UNLOCK(softc);
766 	return rc;
767 }
768 
bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt_softc * softc,struct bnxt_ctx_mem_type * ctxm,bool last)769 int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt_softc *softc,
770 					struct bnxt_ctx_mem_type *ctxm,
771 					bool last)
772 {
773 	struct hwrm_func_backing_store_cfg_v2_input req = {0};
774 	u32 instance_bmap = ctxm->instance_bmap;
775 	int i, j, rc = 0, n = 1;
776 	__le32 *p;
777 
778 	if (!(ctxm->flags & BNXT_CTX_MEM_TYPE_VALID) || !ctxm->pg_info)
779 		return 0;
780 
781 	if (instance_bmap)
782 		n = hweight32(ctxm->instance_bmap);
783 	else
784 		instance_bmap = 1;
785 
786 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_BACKING_STORE_CFG_V2);
787 
788 	BNXT_HWRM_LOCK(softc);
789 	req.type = cpu_to_le16(ctxm->type);
790 	req.entry_size = cpu_to_le16(ctxm->entry_size);
791 	req.subtype_valid_cnt = ctxm->split_entry_cnt;
792 	for (i = 0, p = &req.split_entry_0; i < ctxm->split_entry_cnt; i++)
793 		p[i] = cpu_to_le32(ctxm->split[i]);
794 	for (i = 0, j = 0; j < n && !rc; i++) {
795 		struct bnxt_ctx_pg_info *ctx_pg;
796 
797 		if (!(instance_bmap & (1 << i)))
798 			continue;
799 		req.instance = cpu_to_le16(i);
800 		ctx_pg = &ctxm->pg_info[j++];
801 		if (!ctx_pg->entries)
802 			continue;
803 		req.num_entries = cpu_to_le32(ctx_pg->entries);
804 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
805 				      &req.page_size_pbl_level,
806 				      &req.page_dir);
807 		if (last && j == n)
808 			req.flags =
809 				cpu_to_le32(HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_FLAGS_BS_CFG_ALL_DONE);
810 		rc = _hwrm_send_message(softc, &req, sizeof(req));
811 	}
812 	BNXT_HWRM_UNLOCK(softc);
813 	return rc;
814 }
815 
816 int
bnxt_hwrm_passthrough(struct bnxt_softc * softc,void * req,uint32_t req_len,void * resp,uint32_t resp_len,uint32_t app_timeout)817 bnxt_hwrm_passthrough(struct bnxt_softc *softc, void *req, uint32_t req_len,
818 		void *resp, uint32_t resp_len, uint32_t app_timeout)
819 {
820 	int rc = 0;
821 	void *output = (void *)softc->hwrm_cmd_resp.idi_vaddr;
822 	struct input *input = req;
823 	uint32_t old_timeo;
824 
825 	input->resp_addr = htole64(softc->hwrm_cmd_resp.idi_paddr);
826 	BNXT_HWRM_LOCK(softc);
827 	old_timeo = softc->hwrm_cmd_timeo;
828 	if (input->req_type == HWRM_NVM_INSTALL_UPDATE)
829 		softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
830 	else
831 		softc->hwrm_cmd_timeo = max(app_timeout, softc->hwrm_cmd_timeo);
832 	rc = _hwrm_send_message(softc, req, req_len);
833 	softc->hwrm_cmd_timeo = old_timeo;
834 	if (rc) {
835 		device_printf(softc->dev, "%s: %s command failed with rc: 0x%x\n",
836 			      __FUNCTION__, GET_HWRM_REQ_TYPE(input->req_type), rc);
837 		goto fail;
838 	}
839 
840 	memcpy(resp, output, resp_len);
841 fail:
842 	BNXT_HWRM_UNLOCK(softc);
843 	return rc;
844 }
845 
846 
847 int
bnxt_hwrm_ver_get(struct bnxt_softc * softc)848 bnxt_hwrm_ver_get(struct bnxt_softc *softc)
849 {
850 	struct hwrm_ver_get_input	req = {0};
851 	struct hwrm_ver_get_output	*resp =
852 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
853 	int				rc;
854 	const char nastr[] = "<not installed>";
855 	const char naver[] = "<N/A>";
856 	uint32_t dev_caps_cfg;
857 	uint16_t fw_maj, fw_min, fw_bld, fw_rsv, len;
858 
859 	softc->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
860 	softc->hwrm_cmd_timeo = 1000;
861 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VER_GET);
862 
863 	req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
864 	req.hwrm_intf_min = HWRM_VERSION_MINOR;
865 	req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
866 
867 	BNXT_HWRM_LOCK(softc);
868 	rc = _hwrm_send_message(softc, &req, sizeof(req));
869 	if (rc)
870 		goto fail;
871 
872 	snprintf(softc->ver_info->hwrm_if_ver, BNXT_VERSTR_SIZE, "%d.%d.%d",
873 	    resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, resp->hwrm_intf_upd_8b);
874 	softc->ver_info->hwrm_if_major = resp->hwrm_intf_maj_8b;
875 	softc->ver_info->hwrm_if_minor = resp->hwrm_intf_min_8b;
876 	softc->ver_info->hwrm_if_update = resp->hwrm_intf_upd_8b;
877 	strlcpy(softc->ver_info->driver_hwrm_if_ver, HWRM_VERSION_STR,
878 	    BNXT_VERSTR_SIZE);
879 	strlcpy(softc->ver_info->hwrm_fw_name, resp->hwrm_fw_name,
880 	    BNXT_NAME_SIZE);
881 
882 	 softc->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
883                              resp->hwrm_intf_min_8b << 8 |
884                              resp->hwrm_intf_upd_8b;
885 	if (resp->hwrm_intf_maj_8b < 1) {
886 		 device_printf(softc->dev, "HWRM interface %d.%d.%d is older "
887 			       "than 1.0.0.\n", resp->hwrm_intf_maj_8b,
888 			       resp->hwrm_intf_min_8b, resp->hwrm_intf_upd_8b);
889 		 device_printf(softc->dev, "Please update firmware with HWRM "
890 				"interface 1.0.0 or newer.\n");
891 	 }
892 	if (resp->mgmt_fw_major == 0 && resp->mgmt_fw_minor == 0 &&
893 	    resp->mgmt_fw_build == 0) {
894 		strlcpy(softc->ver_info->mgmt_fw_ver, naver, BNXT_VERSTR_SIZE);
895 		strlcpy(softc->ver_info->mgmt_fw_name, nastr, BNXT_NAME_SIZE);
896 	}
897 	else {
898 		snprintf(softc->ver_info->mgmt_fw_ver, FW_VER_STR_LEN,
899 		    "%d.%d.%d.%d", resp->mgmt_fw_major, resp->mgmt_fw_minor,
900 		    resp->mgmt_fw_build, resp->mgmt_fw_patch);
901 		strlcpy(softc->ver_info->mgmt_fw_name, resp->mgmt_fw_name,
902 		    BNXT_NAME_SIZE);
903 	}
904 	if (resp->netctrl_fw_major == 0 && resp->netctrl_fw_minor == 0 &&
905 	    resp->netctrl_fw_build == 0) {
906 		strlcpy(softc->ver_info->netctrl_fw_ver, naver,
907 		    BNXT_VERSTR_SIZE);
908 		strlcpy(softc->ver_info->netctrl_fw_name, nastr,
909 		    BNXT_NAME_SIZE);
910 	}
911 	else {
912 		snprintf(softc->ver_info->netctrl_fw_ver, FW_VER_STR_LEN,
913 		    "%d.%d.%d.%d", resp->netctrl_fw_major, resp->netctrl_fw_minor,
914 		    resp->netctrl_fw_build, resp->netctrl_fw_patch);
915 		strlcpy(softc->ver_info->netctrl_fw_name, resp->netctrl_fw_name,
916 		    BNXT_NAME_SIZE);
917 	}
918 	if (resp->roce_fw_major == 0 && resp->roce_fw_minor == 0 &&
919 	    resp->roce_fw_build == 0) {
920 		strlcpy(softc->ver_info->roce_fw_ver, naver, BNXT_VERSTR_SIZE);
921 		strlcpy(softc->ver_info->roce_fw_name, nastr, BNXT_NAME_SIZE);
922 	}
923 	else {
924 		snprintf(softc->ver_info->roce_fw_ver, BNXT_VERSTR_SIZE,
925 		    "%d.%d.%d.%d", resp->roce_fw_major, resp->roce_fw_minor,
926 		    resp->roce_fw_build, resp->roce_fw_patch);
927 		strlcpy(softc->ver_info->roce_fw_name, resp->roce_fw_name,
928 		    BNXT_NAME_SIZE);
929 	}
930 
931 	fw_maj = le32toh(resp->hwrm_fw_major);
932 	if (softc->hwrm_spec_code > 0x10803 && fw_maj) {
933 		fw_min = le16toh(resp->hwrm_fw_minor);
934 		fw_bld = le16toh(resp->hwrm_fw_build);
935 		fw_rsv = le16toh(resp->hwrm_fw_patch);
936 		len = FW_VER_STR_LEN;
937 	} else {
938 		fw_maj = resp->hwrm_fw_maj_8b;
939 		fw_min = resp->hwrm_fw_min_8b;
940 		fw_bld = resp->hwrm_fw_bld_8b;
941 		fw_rsv = resp->hwrm_fw_rsvd_8b;
942 		len = BC_HWRM_STR_LEN;
943 	}
944 
945 	softc->ver_info->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
946 	snprintf (softc->ver_info->fw_ver_str, len, "%d.%d.%d.%d",
947 			fw_maj, fw_min, fw_bld, fw_rsv);
948 
949 	if (strlen(resp->active_pkg_name)) {
950 		int fw_ver_len = strlen (softc->ver_info->fw_ver_str);
951 
952 		snprintf(softc->ver_info->fw_ver_str + fw_ver_len,
953 				FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
954 				resp->active_pkg_name);
955 		softc->fw_cap |= BNXT_FW_CAP_PKG_VER;
956 	}
957 
958 	softc->ver_info->chip_num = le16toh(resp->chip_num);
959 	softc->ver_info->chip_rev = resp->chip_rev;
960 	softc->ver_info->chip_metal = resp->chip_metal;
961 	softc->ver_info->chip_bond_id = resp->chip_bond_id;
962 	softc->ver_info->chip_type = resp->chip_platform_type;
963 
964 	if (resp->hwrm_intf_maj_8b >= 1) {
965 		softc->hwrm_max_req_len = le16toh(resp->max_req_win_len);
966 		softc->hwrm_max_ext_req_len = le16toh(resp->max_ext_req_len);
967 	}
968 	softc->hwrm_cmd_timeo = le16toh(resp->def_req_timeout);
969 	if (!softc->hwrm_cmd_timeo)
970 		softc->hwrm_cmd_timeo = DFLT_HWRM_CMD_TIMEOUT;
971 
972 	dev_caps_cfg = le32toh(resp->dev_caps_cfg);
973 	if ((dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
974 	    (dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
975 		softc->flags |= BNXT_FLAG_SHORT_CMD;
976 
977 	if ((dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
978 	    (dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
979 		softc->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
980 
981 	if (dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
982 		softc->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
983 
984 	if (dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
985 		softc->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
986 
987 	if (dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
988 		softc->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
989 
990 	if (dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
991 		softc->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
992 
993 	if (dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_EEM_SUPPORTED)
994 		softc->fw_cap |= BNXT_FW_CAP_CFA_EEM;
995 
996 	if (dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_TRUFLOW_SUPPORTED)
997 		softc->fw_cap |= BNXT_FW_CAP_TRUFLOW_EN;
998 
999 fail:
1000 	BNXT_HWRM_UNLOCK(softc);
1001 	return rc;
1002 }
1003 
1004 static const u16 bnxt_async_events_arr[] = {
1005 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
1006 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
1007 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
1008 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
1009 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
1010 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
1011 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
1012 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
1013 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
1014 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
1015 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE,
1016 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
1017 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE,
1018 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
1019 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
1020 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT,
1021 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE,
1022 };
1023 
bnxt_hwrm_func_drv_rgtr(struct bnxt_softc * bp,unsigned long * bmap,int bmap_size,bool async_only)1024 int bnxt_hwrm_func_drv_rgtr(struct bnxt_softc *bp, unsigned long *bmap, int bmap_size,
1025 			    bool async_only)
1026 {
1027 	DECLARE_BITMAP(async_events_bmap, 256);
1028 	u32 *events = (u32 *)async_events_bmap;
1029 	struct hwrm_func_drv_rgtr_output *resp =
1030 		(void *)bp->hwrm_cmd_resp.idi_vaddr;
1031 	struct hwrm_func_drv_rgtr_input req = {0};
1032 	u32 flags = 0;
1033 	int rc;
1034 	int i;
1035 
1036 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR);
1037 	req.ver_maj = HWRM_VERSION_MAJOR;
1038 	req.ver_min = HWRM_VERSION_MINOR;
1039 	req.ver_upd = HWRM_VERSION_UPDATE;
1040 
1041 	req.enables = htole32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_OS_TYPE |
1042 				   HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
1043 				   HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
1044 
1045 	if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
1046 		flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_HOT_RESET_SUPPORT;
1047 	if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
1048 		flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_ERROR_RECOVERY_SUPPORT |
1049 			 HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_MASTER_SUPPORT;
1050 	if (bp->fw_cap & BNXT_FW_CAP_NPAR_1_2)
1051 		flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_NPAR_1_2_SUPPORT;
1052 	flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_ASYM_QUEUE_CFG_SUPPORT;
1053 	req.flags = htole32(flags);
1054 	req.os_type = htole16(HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_FREEBSD);
1055 
1056 	if (BNXT_PF(bp)) {
1057 		req.enables |=
1058 			htole32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD);
1059 	}
1060 
1061 	if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
1062 		req.flags |= cpu_to_le32(HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FLOW_HANDLE_64BIT_MODE);
1063 
1064 	memset(async_events_bmap, 0, sizeof(async_events_bmap));
1065 	for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
1066 		u16 event_id = bnxt_async_events_arr[i];
1067 
1068 		if (event_id == HWRM_ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
1069 		    !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
1070 			continue;
1071 		}
1072 		__set_bit(bnxt_async_events_arr[i], async_events_bmap);
1073 	}
1074 	if (bmap && bmap_size) {
1075 		for (i = 0; i < bmap_size; i++) {
1076 			if (test_bit(i, bmap))
1077 				__set_bit(i, async_events_bmap);
1078 		}
1079 	}
1080 	for (i = 0; i < 8; i++)
1081 		req.async_event_fwd[i] |= htole32(events[i]);
1082 
1083 	if (async_only)
1084 		req.enables =
1085 			htole32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
1086 
1087 	rc = hwrm_send_message(bp, &req, sizeof(req));
1088 
1089 	if (!rc) {
1090 		if (resp->flags &
1091 		    le32toh(HWRM_FUNC_DRV_RGTR_OUTPUT_FLAGS_IF_CHANGE_SUPPORTED))
1092 			bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
1093 	}
1094 
1095 
1096 	return rc;
1097 }
1098 
1099 int
bnxt_hwrm_func_drv_unrgtr(struct bnxt_softc * softc,bool shutdown)1100 bnxt_hwrm_func_drv_unrgtr(struct bnxt_softc *softc, bool shutdown)
1101 {
1102 	struct hwrm_func_drv_unrgtr_input req = {0};
1103 
1104 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_DRV_UNRGTR);
1105 	if (shutdown == true)
1106 		req.flags |=
1107 		    HWRM_FUNC_DRV_UNRGTR_INPUT_FLAGS_PREPARE_FOR_SHUTDOWN;
1108 	return hwrm_send_message(softc, &req, sizeof(req));
1109 }
1110 
1111 static inline int
_is_valid_ether_addr(uint8_t * addr)1112 _is_valid_ether_addr(uint8_t *addr)
1113 {
1114 	char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
1115 
1116 	if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN)))
1117 		return (FALSE);
1118 
1119 	return (TRUE);
1120 }
1121 
1122 static inline void
get_random_ether_addr(uint8_t * addr)1123 get_random_ether_addr(uint8_t *addr)
1124 {
1125 	uint8_t temp[ETHER_ADDR_LEN];
1126 
1127 	arc4rand(&temp, sizeof(temp), 0);
1128 	temp[0] &= 0xFE;
1129 	temp[0] |= 0x02;
1130 	bcopy(temp, addr, sizeof(temp));
1131 }
1132 
1133 int
bnxt_hwrm_func_qcaps(struct bnxt_softc * softc)1134 bnxt_hwrm_func_qcaps(struct bnxt_softc *softc)
1135 {
1136 	int rc = 0;
1137 	struct hwrm_func_qcaps_input req = {0};
1138 	struct hwrm_func_qcaps_output *resp =
1139 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
1140 	struct bnxt_func_info *func = &softc->func;
1141 	uint32_t flags, flags_ext, flags_ext2;
1142 
1143 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_QCAPS);
1144 	req.fid = htole16(0xffff);
1145 
1146 	BNXT_HWRM_LOCK(softc);
1147 	rc = _hwrm_send_message(softc, &req, sizeof(req));
1148 	if (rc)
1149 		goto fail;
1150 
1151 	flags = htole32(resp->flags);
1152 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WOL_MAGICPKT_SUPPORTED)
1153 		softc->flags |= BNXT_FLAG_WOL_CAP;
1154 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_STATS_SUPPORTED)
1155 		softc->flags |= BNXT_FLAG_FW_CAP_EXT_STATS;
1156 
1157 	/* Enable RoCE only on Thor devices */
1158 	if (BNXT_CHIP_P5_PLUS(softc)) {
1159 		if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ROCE_V1_SUPPORTED)
1160 			softc->flags |= BNXT_FLAG_ROCEV1_CAP;
1161 		if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ROCE_V2_SUPPORTED)
1162 			softc->flags |= BNXT_FLAG_ROCEV2_CAP;
1163 	}
1164 
1165 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_LINK_ADMIN_STATUS_SUPPORTED)
1166 		softc->fw_cap |= BNXT_FW_CAP_LINK_ADMIN;
1167 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ADMIN_PF_SUPPORTED)
1168 		softc->fw_cap |= BNXT_FW_CAP_ADMIN_PF;
1169 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_HOT_RESET_CAPABLE)
1170 		softc->fw_cap |= BNXT_FW_CAP_HOT_RESET;
1171 	if (flags &  HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERROR_RECOVERY_CAPABLE)
1172 		softc->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
1173 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PCIE_STATS_SUPPORTED)
1174 		softc->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
1175 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_STATS_SUPPORTED)
1176 		softc->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
1177 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERR_RECOVER_RELOAD)
1178 		softc->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
1179 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_NOTIFY_VF_DEF_VNIC_CHNG_SUPPORTED)
1180 		softc->fw_cap |= BNXT_FW_CAP_VF_VNIC_NOTIFY;
1181 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_CRASHDUMP_CMD_SUPPORTED)
1182 		softc->fw_cap |= BNXT_FW_CAP_CRASHDUMP;
1183 	if (!(flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
1184 		softc->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
1185 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_DBG_QCAPS_CMD_SUPPORTED)
1186 		softc->fw_cap |= BNXT_FW_CAP_DBG_QCAPS;
1187 
1188 	flags_ext = htole32(resp->flags_ext);
1189 	if (flags_ext & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
1190 		softc->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
1191 	if (BNXT_PF(softc) && (flags_ext & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_ECN_STATS_SUPPORTED))
1192 		softc->fw_cap |= BNXT_FW_CAP_ECN_STATS;
1193 
1194 	if (flags_ext & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_PTP_PPS_SUPPORTED)
1195 		softc->fw_cap |= BNXT_FW_CAP_PTP_PPS;
1196 	if (flags_ext & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_PTP_PTM_SUPPORTED)
1197 		softc->fw_cap |= BNXT_FW_CAP_PTP_PTM;
1198 	if (flags_ext & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED)
1199 		softc->fw_cap |= BNXT_FW_CAP_PTP_RTC;
1200 	if (BNXT_PF(softc) && (flags_ext & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_HOT_RESET_IF_SUPPORT))
1201 		softc->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
1202 	if (BNXT_PF(softc) && (flags_ext & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
1203 		softc->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
1204 	if (flags_ext & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_NPAR_1_2_SUPPORTED)
1205 		softc->fw_cap |= BNXT_FW_CAP_NPAR_1_2;
1206 	if (flags_ext & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_BS_V2_SUPPORTED)
1207 		softc->fw_cap |= BNXT_FW_CAP_BACKING_STORE_V2;
1208 	if (BNXT_PF(softc) &&
1209 	    (flags_ext & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_VF_CFG_ASYNC_FOR_PF_SUPPORTED))
1210 		softc->fw_cap |= BNXT_FW_CAP_VF_CFG_FOR_PF;
1211 
1212 	flags_ext2 = htole32(resp->flags_ext2);
1213 	if (flags_ext2 & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED)
1214 		softc->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS;
1215 	if (flags_ext2 & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_SW_DBR_DROP_RECOVERY_SUPPORTED)
1216 		softc->fw_cap |= BNXT_FW_CAP_DBR_SUPPORTED;
1217 	if (flags_ext2 & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_DBR_PACING_EXT_SUPPORTED ||
1218 	    flags_ext & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_DBR_PACING_SUPPORTED)
1219 		softc->fw_cap |= BNXT_FW_CAP_DBR_PACING_SUPPORTED;
1220 
1221 	if (flags_ext2 & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_SW_MAX_RESOURCE_LIMITS_SUPPORTED)
1222 		softc->fw_cap |= BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS;
1223 
1224 	if (flags_ext2 & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_GENERIC_STATS_SUPPORTED)
1225 		softc->fw_cap |= BNXT_FW_CAP_GENERIC_STATS;
1226 	func->fw_fid = le16toh(resp->fid);
1227 	memcpy(func->mac_addr, resp->mac_address, ETHER_ADDR_LEN);
1228 	func->max_rsscos_ctxs = le16toh(resp->max_rsscos_ctx);
1229 	func->max_cp_rings = le16toh(resp->max_cmpl_rings);
1230 	func->max_tx_rings = le16toh(resp->max_tx_rings);
1231 	func->max_rx_rings = le16toh(resp->max_rx_rings);
1232 	func->max_hw_ring_grps = le32toh(resp->max_hw_ring_grps);
1233 	if (!func->max_hw_ring_grps)
1234 		func->max_hw_ring_grps = func->max_tx_rings;
1235 	func->max_l2_ctxs = le16toh(resp->max_l2_ctxs);
1236 	func->max_vnics = le16toh(resp->max_vnics);
1237 	func->max_stat_ctxs = le16toh(resp->max_stat_ctx);
1238 	if (BNXT_PF(softc)) {
1239 		struct bnxt_pf_info *pf = &softc->pf;
1240 
1241 		pf->port_id = le16toh(resp->port_id);
1242 		pf->first_vf_id = le16toh(resp->first_vf_id);
1243 		pf->max_vfs = le16toh(resp->max_vfs);
1244 		pf->max_encap_records = le32toh(resp->max_encap_records);
1245 		pf->max_decap_records = le32toh(resp->max_decap_records);
1246 		pf->max_tx_em_flows = le32toh(resp->max_tx_em_flows);
1247 		pf->max_tx_wm_flows = le32toh(resp->max_tx_wm_flows);
1248 		pf->max_rx_em_flows = le32toh(resp->max_rx_em_flows);
1249 		pf->max_rx_wm_flows = le32toh(resp->max_rx_wm_flows);
1250 	}
1251 	if (!_is_valid_ether_addr(func->mac_addr)) {
1252 		device_printf(softc->dev, "Invalid ethernet address, generating random locally administered address\n");
1253 		get_random_ether_addr(func->mac_addr);
1254 	}
1255 
1256 fail:
1257 	BNXT_HWRM_UNLOCK(softc);
1258 	return rc;
1259 }
1260 
1261 int
bnxt_hwrm_func_qcfg(struct bnxt_softc * softc)1262 bnxt_hwrm_func_qcfg(struct bnxt_softc *softc)
1263 {
1264 	struct hwrm_func_qcfg_input req = {0};
1265 	struct hwrm_func_qcfg_output *resp =
1266 		(void *)softc->hwrm_cmd_resp.idi_vaddr;
1267 	struct bnxt_func_qcfg *fn_qcfg = &softc->fn_qcfg;
1268 	uint32_t min_db_offset = 0;
1269 	uint16_t flags;
1270 	int rc;
1271 
1272 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_QCFG);
1273 	req.fid = htole16(0xffff);
1274 	BNXT_HWRM_LOCK(softc);
1275 	rc = _hwrm_send_message(softc, &req, sizeof(req));
1276 	if (rc)
1277 		goto end;
1278 
1279 	fn_qcfg->alloc_completion_rings = le16toh(resp->alloc_cmpl_rings);
1280 	fn_qcfg->alloc_tx_rings = le16toh(resp->alloc_tx_rings);
1281 	fn_qcfg->alloc_rx_rings = le16toh(resp->alloc_rx_rings);
1282 	fn_qcfg->alloc_vnics = le16toh(resp->alloc_vnics);
1283 
1284 	switch (resp->port_partition_type) {
1285 	case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
1286 	case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_2:
1287 	case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
1288 	case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
1289 		softc->port_partition_type = resp->port_partition_type;
1290 		break;
1291 	}
1292 
1293 	flags = le16toh(resp->flags);
1294 	if (flags & (HWRM_FUNC_QCFG_OUTPUT_FLAGS_FW_DCBX_AGENT_ENABLED |
1295 		     HWRM_FUNC_QCFG_OUTPUT_FLAGS_FW_LLDP_AGENT_ENABLED)) {
1296 		softc->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
1297 		if (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_FW_DCBX_AGENT_ENABLED)
1298 			softc->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
1299 	}
1300 	if (BNXT_PF(softc) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
1301 		softc->flags |= BNXT_FLAG_MULTI_HOST;
1302 	if (BNXT_PF(softc) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_ROOT))
1303 		softc->flags |= BNXT_FLAG_MULTI_ROOT;
1304 	if (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_SECURE_MODE_ENABLED)
1305 		softc->fw_cap |= BNXT_FW_CAP_SECURE_MODE;
1306 	if (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_RING_MONITOR_ENABLED)
1307 		softc->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
1308 	if (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_ENABLE_RDMA_SRIOV)
1309 		softc->fw_cap |= BNXT_FW_CAP_ENABLE_RDMA_SRIOV;
1310 
1311 	if (softc->db_size)
1312 		goto end;
1313 
1314 	softc->legacy_db_size = le16_to_cpu(resp->legacy_l2_db_size_kb) * 1024;
1315 	softc->db_offset = le16toh(resp->legacy_l2_db_size_kb) * 1024;
1316 
1317 	if (BNXT_CHIP_P5(softc)) {
1318 		if (BNXT_PF(softc))
1319 			min_db_offset = DB_PF_OFFSET_P5;
1320 		else
1321 			min_db_offset = DB_VF_OFFSET_P5;
1322 		softc->legacy_db_size = min_db_offset;
1323 		softc->db_offset = min_db_offset;
1324 	}
1325 
1326 	softc->db_size = roundup2(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
1327 			1024, PAGE_SIZE);
1328 	if (!softc->db_size || softc->db_size > pci_resource_len(softc->pdev, 2) ||
1329 			softc->db_size <= min_db_offset)
1330 		softc->db_size = pci_resource_len(softc->pdev, 2);
1331 
1332 	end:
1333 	BNXT_HWRM_UNLOCK(softc);
1334 	return rc;
1335 }
1336 
1337 int
bnxt_hwrm_func_reset(struct bnxt_softc * softc)1338 bnxt_hwrm_func_reset(struct bnxt_softc *softc)
1339 {
1340 	struct hwrm_func_reset_input req = {0};
1341 
1342 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_RESET);
1343 	req.enables = 0;
1344 
1345 	return hwrm_send_message(softc, &req, sizeof(req));
1346 }
1347 
1348 static void
bnxt_hwrm_set_link_common(struct bnxt_softc * softc,struct hwrm_port_phy_cfg_input * req)1349 bnxt_hwrm_set_link_common(struct bnxt_softc *softc,
1350     struct hwrm_port_phy_cfg_input *req)
1351 {
1352 	struct bnxt_link_info *link_info = &softc->link_info;
1353 	uint8_t autoneg = softc->link_info.autoneg;
1354 	uint16_t fw_link_speed = softc->link_info.req_link_speed;
1355 
1356 	if (autoneg & BNXT_AUTONEG_SPEED) {
1357 		uint8_t phy_type = get_phy_type(softc);
1358 
1359 		if (phy_type == HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASET ||
1360 		    phy_type == HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
1361 		    phy_type == HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE) {
1362 
1363 			req->auto_mode |= htole32(HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK);
1364 			if (link_info->advertising) {
1365 				req->enables |= htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK);
1366 				req->auto_link_speed_mask = htole16(link_info->advertising);
1367 			}
1368 		} else {
1369 			req->auto_mode |= HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
1370 		}
1371 
1372 		req->enables |=
1373 		    htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE);
1374 		req->flags |=
1375 		    htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG);
1376 	} else {
1377 
1378 		if (link_info->force_speed2_nrz ||
1379 		    link_info->force_pam4_56_speed2 ||
1380 		    link_info->force_pam4_112_speed2) {
1381 			req->force_link_speeds2 = htole16(fw_link_speed);
1382 			req->enables |= htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_LINK_SPEEDS2);
1383 			link_info->force_speed2_nrz = false;
1384 			link_info->force_pam4_56_speed2 = false;
1385 			link_info->force_pam4_112_speed2 = false;
1386 		} else if (link_info->force_pam4_speed) {
1387 			req->force_pam4_link_speed = htole16(fw_link_speed);
1388 			req->enables |= htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAM4_LINK_SPEED);
1389 			link_info->force_pam4_speed = false;
1390 		} else {
1391 			req->force_link_speed = htole16(fw_link_speed);
1392 		}
1393 
1394 		req->flags |= htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE);
1395 	}
1396 
1397 	/* tell chimp that the setting takes effect immediately */
1398 	req->flags |= htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY);
1399 }
1400 
1401 static void
bnxt_hwrm_set_pause_common(struct bnxt_softc * softc,struct hwrm_port_phy_cfg_input * req)1402 bnxt_hwrm_set_pause_common(struct bnxt_softc *softc,
1403     struct hwrm_port_phy_cfg_input *req)
1404 {
1405 	struct bnxt_link_info *link_info = &softc->link_info;
1406 
1407 	if (link_info->flow_ctrl.autoneg) {
1408 		req->auto_pause =
1409 		    HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_AUTONEG_PAUSE;
1410 		if (link_info->flow_ctrl.rx)
1411 			req->auto_pause |=
1412 			    HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
1413 		if (link_info->flow_ctrl.tx)
1414 			req->auto_pause |=
1415 			    HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX;
1416 		req->enables |=
1417 		    htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE);
1418 	} else {
1419 		if (link_info->flow_ctrl.rx)
1420 			req->force_pause |=
1421 			    HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
1422 		if (link_info->flow_ctrl.tx)
1423 			req->force_pause |=
1424 			    HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX;
1425 		req->enables |=
1426 			htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE);
1427 		req->auto_pause = req->force_pause;
1428 		req->enables |=
1429 		    htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE);
1430 	}
1431 }
1432 
1433 /* JFV this needs interface connection */
1434 static void
bnxt_hwrm_set_eee(struct bnxt_softc * softc,struct hwrm_port_phy_cfg_input * req)1435 bnxt_hwrm_set_eee(struct bnxt_softc *softc, struct hwrm_port_phy_cfg_input *req)
1436 {
1437 	/* struct ethtool_eee *eee = &softc->eee; */
1438 	bool	eee_enabled = false;
1439 
1440 	if (eee_enabled) {
1441 #if 0
1442 		uint16_t eee_speeds;
1443 		uint32_t flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_ENABLE;
1444 
1445 		if (eee->tx_lpi_enabled)
1446 			flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_TX_LPI;
1447 
1448 		req->flags |= htole32(flags);
1449 		eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
1450 		req->eee_link_speed_mask = htole16(eee_speeds);
1451 		req->tx_lpi_timer = htole32(eee->tx_lpi_timer);
1452 #endif
1453 	} else {
1454 		req->flags |=
1455 		    htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_DISABLE);
1456 	}
1457 }
1458 
1459 int
bnxt_hwrm_set_link_setting(struct bnxt_softc * softc,bool set_pause,bool set_eee,bool set_link)1460 bnxt_hwrm_set_link_setting(struct bnxt_softc *softc, bool set_pause,
1461     bool set_eee, bool set_link)
1462 {
1463 	struct hwrm_port_phy_cfg_input req = {0};
1464 	int rc;
1465 
1466 	if (softc->flags & BNXT_FLAG_NPAR)
1467 		return ENOTSUP;
1468 
1469 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_PHY_CFG);
1470 
1471 	if (set_pause) {
1472 		bnxt_hwrm_set_pause_common(softc, &req);
1473 
1474 		if (softc->link_info.flow_ctrl.autoneg)
1475 			set_link = true;
1476 	}
1477 
1478 	if (set_link)
1479 		bnxt_hwrm_set_link_common(softc, &req);
1480 
1481 	if (set_eee)
1482 		bnxt_hwrm_set_eee(softc, &req);
1483 
1484 	BNXT_HWRM_LOCK(softc);
1485 	rc = _hwrm_send_message(softc, &req, sizeof(req));
1486 
1487 	if (!rc) {
1488 		if (set_pause) {
1489 			/* since changing of 'force pause' setting doesn't
1490 			 * trigger any link change event, the driver needs to
1491 			 * update the current pause result upon successfully i
1492 			 * return of the phy_cfg command */
1493 			if (!softc->link_info.flow_ctrl.autoneg)
1494 				bnxt_report_link(softc);
1495 		}
1496 	}
1497 	BNXT_HWRM_UNLOCK(softc);
1498 	return rc;
1499 }
1500 
1501 int
bnxt_hwrm_vnic_set_hds(struct bnxt_softc * softc,struct bnxt_vnic_info * vnic)1502 bnxt_hwrm_vnic_set_hds(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
1503 {
1504 	struct hwrm_vnic_plcmodes_cfg_input req = {0};
1505 
1506 	if (!BNXT_CHIP_P5_PLUS(softc))
1507 		return 0;
1508 
1509 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_PLCMODES_CFG);
1510 
1511 	req.flags = htole32(HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1512 	req.vnic_id = htole16(vnic->id);
1513 
1514 	return hwrm_send_message(softc, &req, sizeof(req));
1515 }
1516 
1517 int
bnxt_hwrm_vnic_cfg(struct bnxt_softc * softc,struct bnxt_vnic_info * vnic)1518 bnxt_hwrm_vnic_cfg(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
1519 {
1520 	struct hwrm_vnic_cfg_input req = {0};
1521 
1522 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_CFG);
1523 
1524 	if (vnic->flags & BNXT_VNIC_FLAG_DEFAULT)
1525 		req.flags |= htole32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1526 	if (vnic->flags & BNXT_VNIC_FLAG_BD_STALL)
1527 		req.flags |= htole32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1528 	if (vnic->flags & BNXT_VNIC_FLAG_VLAN_STRIP)
1529 		req.flags |= htole32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1530 	if (BNXT_CHIP_P5_PLUS (softc)) {
1531 		req.default_rx_ring_id =
1532 			htole16(softc->rx_rings[0].phys_id);
1533 		req.default_cmpl_ring_id =
1534 			htole16(softc->rx_cp_rings[0].ring.phys_id);
1535 		req.enables |=
1536 			htole32(HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_RX_RING_ID |
1537 			    HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_CMPL_RING_ID);
1538 		req.vnic_id = htole16(vnic->id);
1539 	} else {
1540 		req.enables = htole32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
1541 				HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE);
1542 		req.vnic_id = htole16(vnic->id);
1543 		req.dflt_ring_grp = htole16(vnic->def_ring_grp);
1544 	}
1545 	req.rss_rule = htole16(vnic->rss_id);
1546 	req.cos_rule = htole16(vnic->cos_rule);
1547 	req.lb_rule = htole16(vnic->lb_rule);
1548 	req.enables |= htole32(HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
1549 	req.mru = htole16(vnic->mru);
1550 
1551 	return hwrm_send_message(softc, &req, sizeof(req));
1552 }
1553 
1554 int
bnxt_hwrm_vnic_free(struct bnxt_softc * softc,struct bnxt_vnic_info * vnic)1555 bnxt_hwrm_vnic_free(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
1556 {
1557 	struct hwrm_vnic_free_input req = {0};
1558 	int rc = 0;
1559 
1560 	if (vnic->id == (uint16_t)HWRM_NA_SIGNATURE)
1561 		return rc;
1562 
1563 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_FREE);
1564 
1565 	req.vnic_id = htole32(vnic->id);
1566 
1567 	BNXT_HWRM_LOCK(softc);
1568 	rc = _hwrm_send_message(softc, &req, sizeof(req));
1569 	if (rc)
1570 		goto fail;
1571 
1572 fail:
1573 	BNXT_HWRM_UNLOCK(softc);
1574 	return (rc);
1575 }
1576 
1577 int
bnxt_hwrm_vnic_alloc(struct bnxt_softc * softc,struct bnxt_vnic_info * vnic)1578 bnxt_hwrm_vnic_alloc(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
1579 {
1580 	struct hwrm_vnic_alloc_input req = {0};
1581 	struct hwrm_vnic_alloc_output *resp =
1582 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
1583 	int rc;
1584 
1585 	if (vnic->id != (uint16_t)HWRM_NA_SIGNATURE) {
1586 		device_printf(softc->dev,
1587 		    "Attempt to re-allocate vnic %04x\n", vnic->id);
1588 		return EDOOFUS;
1589 	}
1590 
1591 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_ALLOC);
1592 
1593 	if (vnic->flags & BNXT_VNIC_FLAG_DEFAULT)
1594 		req.flags = htole32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
1595 
1596 	BNXT_HWRM_LOCK(softc);
1597 	rc = _hwrm_send_message(softc, &req, sizeof(req));
1598 	if (rc)
1599 		goto fail;
1600 
1601 	vnic->id = le32toh(resp->vnic_id);
1602 
1603 fail:
1604 	BNXT_HWRM_UNLOCK(softc);
1605 	return (rc);
1606 }
1607 
1608 int
bnxt_hwrm_vnic_ctx_free(struct bnxt_softc * softc,uint16_t ctx_id)1609 bnxt_hwrm_vnic_ctx_free(struct bnxt_softc *softc, uint16_t ctx_id)
1610 {
1611 	struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
1612 	int rc = 0;
1613 
1614 	if (ctx_id == (uint16_t)HWRM_NA_SIGNATURE)
1615 		return rc;
1616 
1617 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE);
1618 	req.rss_cos_lb_ctx_id = htole16(ctx_id);
1619 	BNXT_HWRM_LOCK(softc);
1620 	rc = _hwrm_send_message(softc, &req, sizeof(req));
1621 	if (rc)
1622 		goto fail;
1623 
1624 fail:
1625 	BNXT_HWRM_UNLOCK(softc);
1626 	return rc;
1627 }
1628 
1629 int
bnxt_hwrm_vnic_ctx_alloc(struct bnxt_softc * softc,uint16_t * ctx_id)1630 bnxt_hwrm_vnic_ctx_alloc(struct bnxt_softc *softc, uint16_t *ctx_id)
1631 {
1632 	struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
1633 	struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1634 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
1635 	int rc;
1636 
1637 	if (*ctx_id != (uint16_t)HWRM_NA_SIGNATURE) {
1638 		device_printf(softc->dev,
1639 		    "Attempt to re-allocate vnic ctx %04x\n", *ctx_id);
1640 		return EDOOFUS;
1641 	}
1642 
1643 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
1644 
1645 	BNXT_HWRM_LOCK(softc);
1646 	rc = _hwrm_send_message(softc, &req, sizeof(req));
1647 	if (rc)
1648 		goto fail;
1649 
1650 	*ctx_id = le32toh(resp->rss_cos_lb_ctx_id);
1651 
1652 fail:
1653 	BNXT_HWRM_UNLOCK(softc);
1654 	return (rc);
1655 }
1656 
1657 int
bnxt_hwrm_ring_grp_alloc(struct bnxt_softc * softc,struct bnxt_grp_info * grp)1658 bnxt_hwrm_ring_grp_alloc(struct bnxt_softc *softc, struct bnxt_grp_info *grp)
1659 {
1660 	struct hwrm_ring_grp_alloc_input req = {0};
1661 	struct hwrm_ring_grp_alloc_output *resp;
1662 	int rc = 0;
1663 
1664 	if (grp->grp_id != (uint16_t)HWRM_NA_SIGNATURE) {
1665 		device_printf(softc->dev,
1666 		    "Attempt to re-allocate ring group %04x\n", grp->grp_id);
1667 		return EDOOFUS;
1668 	}
1669 
1670 	if (BNXT_CHIP_P5_PLUS (softc))
1671 		return 0;
1672 
1673 	resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
1674 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_RING_GRP_ALLOC);
1675 	req.cr = htole16(grp->cp_ring_id);
1676 	req.rr = htole16(grp->rx_ring_id);
1677 	req.ar = htole16(grp->ag_ring_id);
1678 	req.sc = htole16(grp->stats_ctx);
1679 
1680 	BNXT_HWRM_LOCK(softc);
1681 	rc = _hwrm_send_message(softc, &req, sizeof(req));
1682 	if (rc)
1683 		goto fail;
1684 
1685 	grp->grp_id = le32toh(resp->ring_group_id);
1686 
1687 fail:
1688 	BNXT_HWRM_UNLOCK(softc);
1689 	return rc;
1690 }
1691 
1692 int
bnxt_hwrm_ring_grp_free(struct bnxt_softc * softc,struct bnxt_grp_info * grp)1693 bnxt_hwrm_ring_grp_free(struct bnxt_softc *softc, struct bnxt_grp_info *grp)
1694 {
1695 	struct hwrm_ring_grp_free_input req = {0};
1696 	int rc = 0;
1697 
1698 	if (grp->grp_id == (uint16_t)HWRM_NA_SIGNATURE)
1699 		return 0;
1700 
1701 	if (BNXT_CHIP_P5_PLUS (softc))
1702 		return 0;
1703 
1704 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_RING_GRP_FREE);
1705 
1706 	req.ring_group_id = htole32(grp->grp_id);
1707 
1708 	BNXT_HWRM_LOCK(softc);
1709 	rc = _hwrm_send_message(softc, &req, sizeof(req));
1710 	if (rc)
1711 		goto fail;
1712 
1713 fail:
1714 	BNXT_HWRM_UNLOCK(softc);
1715 	return rc;
1716 }
1717 
bnxt_hwrm_ring_free(struct bnxt_softc * softc,uint32_t ring_type,struct bnxt_ring * ring,int cmpl_ring_id)1718 int bnxt_hwrm_ring_free(struct bnxt_softc *softc, uint32_t ring_type,
1719 		struct bnxt_ring *ring, int cmpl_ring_id)
1720 {
1721         struct hwrm_ring_free_input req = {0};
1722 	struct hwrm_ring_free_output *resp;
1723 	int rc = 0;
1724         uint16_t error_code;
1725 
1726 	if (ring->phys_id == (uint16_t)HWRM_NA_SIGNATURE)
1727 		return 0;
1728 
1729 	resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
1730 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_RING_FREE);
1731 	req.cmpl_ring = htole16(cmpl_ring_id);
1732         req.ring_type = ring_type;
1733         req.ring_id = htole16(ring->phys_id);
1734 
1735 	BNXT_HWRM_LOCK(softc);
1736 	rc = _hwrm_send_message(softc, &req, sizeof(req));
1737         error_code = le16toh(resp->error_code);
1738 
1739 	if (rc || error_code) {
1740 		device_printf(softc->dev, "hwrm_ring_free type %d failed. "
1741 				"rc:%x err:%x\n", ring_type, rc, error_code);
1742 		if (!rc)
1743 			rc = -EIO;
1744 	}
1745 
1746 	BNXT_HWRM_UNLOCK(softc);
1747 	return rc;
1748 }
1749 
1750 /*
1751  * Ring allocation message to the firmware
1752  */
1753 int
bnxt_hwrm_ring_alloc(struct bnxt_softc * softc,uint8_t type,struct bnxt_ring * ring)1754 bnxt_hwrm_ring_alloc(struct bnxt_softc *softc, uint8_t type,
1755                      struct bnxt_ring *ring)
1756 {
1757 	struct hwrm_ring_alloc_input req = {0};
1758 	struct hwrm_ring_alloc_output *resp;
1759 	uint16_t idx = ring->idx;
1760 	struct bnxt_cp_ring *cp_ring;
1761 	int rc;
1762 
1763 	if (ring->phys_id != (uint16_t)HWRM_NA_SIGNATURE) {
1764 		device_printf(softc->dev,
1765 		    "Attempt to re-allocate ring %04x\n", ring->phys_id);
1766 		return EDOOFUS;
1767 	}
1768 
1769 	resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
1770 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_RING_ALLOC);
1771 	req.enables = htole32(0);
1772 	req.fbo = htole32(0);
1773 	req.ring_type = type;
1774 	req.page_tbl_addr = htole64(ring->paddr);
1775 	req.logical_id = htole16(ring->id);
1776 	req.length = htole32(ring->ring_size);
1777 
1778 	switch (type) {
1779 	case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1780 		cp_ring = &softc->tx_cp_rings[idx];
1781 
1782 		req.cmpl_ring_id = htole16(cp_ring->ring.phys_id);
1783 		/* queue_id - what CoS queue the TX ring is associated with */
1784 		req.queue_id = htole16(softc->tx_q_info[0].queue_id);
1785 
1786 		req.stat_ctx_id = htole32(cp_ring->stats_ctx_id);
1787 		req.enables |= htole32(
1788 		    HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
1789 		break;
1790 	case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1791 		if (!BNXT_CHIP_P5_PLUS(softc))
1792 			break;
1793 
1794 		cp_ring = &softc->rx_cp_rings[idx];
1795 
1796 		req.stat_ctx_id = htole32(cp_ring->stats_ctx_id);
1797 		req.rx_buf_size = htole16(softc->rx_buf_size);
1798 		req.enables |= htole32(
1799 			HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID |
1800 			HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
1801 		break;
1802 	case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
1803 		if (!BNXT_CHIP_P5_PLUS(softc)) {
1804 			req.ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_RX;
1805 			break;
1806 		}
1807 
1808 		cp_ring = &softc->rx_cp_rings[idx];
1809 
1810 		req.rx_ring_id = htole16(softc->rx_rings[idx].phys_id);
1811 		req.stat_ctx_id = htole32(cp_ring->stats_ctx_id);
1812 		req.rx_buf_size = htole16(softc->rx_buf_size);
1813 		req.enables |= htole32(
1814 			HWRM_RING_ALLOC_INPUT_ENABLES_RX_RING_ID_VALID |
1815 			HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID |
1816 			HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
1817 		break;
1818 	case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1819 		if (!BNXT_CHIP_P5_PLUS(softc)) {
1820 			req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1821 			break;
1822 		}
1823 
1824                 req.cq_handle = htole64(ring->id);
1825 		req.nq_ring_id = htole16(softc->nq_rings[idx].ring.phys_id);
1826 		req.enables |= htole32(
1827 			HWRM_RING_ALLOC_INPUT_ENABLES_NQ_RING_ID_VALID);
1828 		break;
1829 	case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
1830 		req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1831 		break;
1832 	default:
1833 		device_printf(softc->dev,
1834 			      "hwrm alloc invalid ring type %d\n", type);
1835 		return -1;
1836 	}
1837 
1838 	BNXT_HWRM_LOCK(softc);
1839 	rc = _hwrm_send_message(softc, &req, sizeof(req));
1840 	if (rc)
1841 		goto fail;
1842 
1843 	ring->phys_id = le16toh(resp->ring_id);
1844 
1845 fail:
1846 	BNXT_HWRM_UNLOCK(softc);
1847 	return rc;
1848 }
1849 
1850 int
bnxt_hwrm_stat_ctx_free(struct bnxt_softc * softc,struct bnxt_cp_ring * cpr)1851 bnxt_hwrm_stat_ctx_free(struct bnxt_softc *softc, struct bnxt_cp_ring *cpr)
1852 {
1853 	struct hwrm_stat_ctx_free_input req = {0};
1854 	int rc = 0;
1855 
1856 	if (cpr->stats_ctx_id == HWRM_NA_SIGNATURE)
1857 		return rc;
1858 
1859 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_STAT_CTX_FREE);
1860 
1861 	req.stat_ctx_id = htole16(cpr->stats_ctx_id);
1862 	BNXT_HWRM_LOCK(softc);
1863 	rc = _hwrm_send_message(softc, &req, sizeof(req));
1864 	if (rc)
1865 		goto fail;
1866 
1867 fail:
1868 	BNXT_HWRM_UNLOCK(softc);
1869 
1870 	return rc;
1871 }
1872 
1873 int
bnxt_hwrm_stat_ctx_alloc(struct bnxt_softc * softc,struct bnxt_cp_ring * cpr,uint64_t paddr)1874 bnxt_hwrm_stat_ctx_alloc(struct bnxt_softc *softc, struct bnxt_cp_ring *cpr,
1875     uint64_t paddr)
1876 {
1877 	struct hwrm_stat_ctx_alloc_input req = {0};
1878 	struct hwrm_stat_ctx_alloc_output *resp;
1879 	int rc = 0;
1880 
1881 	if (cpr->stats_ctx_id != HWRM_NA_SIGNATURE) {
1882 		device_printf(softc->dev,
1883 		    "Attempt to re-allocate stats ctx %08x\n",
1884 		    cpr->stats_ctx_id);
1885 		return EDOOFUS;
1886 	}
1887 
1888 	resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
1889 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_STAT_CTX_ALLOC);
1890 
1891 	req.update_period_ms = htole32(1000);
1892 	req.stats_dma_addr = htole64(paddr);
1893 
1894 	if (BNXT_CHIP_P7(softc))
1895 		req.stats_dma_length = htole16(sizeof(struct ctx_hw_stats_ext));
1896 	else if (BNXT_CHIP_P5(softc))
1897 		req.stats_dma_length = htole16(sizeof(struct ctx_hw_stats_ext) - 8);
1898 	else
1899 		req.stats_dma_length = htole16(sizeof(struct ctx_hw_stats));
1900 
1901 	BNXT_HWRM_LOCK(softc);
1902 	rc = _hwrm_send_message(softc, &req, sizeof(req));
1903 	if (rc)
1904 		goto fail;
1905 
1906 	cpr->stats_ctx_id = le32toh(resp->stat_ctx_id);
1907 
1908 fail:
1909 	BNXT_HWRM_UNLOCK(softc);
1910 
1911 	return rc;
1912 }
1913 
1914 int
bnxt_hwrm_port_qstats(struct bnxt_softc * softc)1915 bnxt_hwrm_port_qstats(struct bnxt_softc *softc)
1916 {
1917 	struct hwrm_port_qstats_input req = {0};
1918 	int rc = 0;
1919 
1920 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_QSTATS);
1921 
1922 	req.port_id = htole16(softc->pf.port_id);
1923 	req.rx_stat_host_addr = htole64(softc->hw_rx_port_stats.idi_paddr);
1924 	req.tx_stat_host_addr = htole64(softc->hw_tx_port_stats.idi_paddr);
1925 
1926 	BNXT_HWRM_LOCK(softc);
1927 	rc = _hwrm_send_message(softc, &req, sizeof(req));
1928 	BNXT_HWRM_UNLOCK(softc);
1929 
1930 	return rc;
1931 }
bnxt_hwrm_pri2cos_idx(struct bnxt_softc * softc,uint32_t path_dir)1932 static int bnxt_hwrm_pri2cos_idx(struct bnxt_softc *softc, uint32_t path_dir)
1933 {
1934 	struct hwrm_queue_pri2cos_qcfg_input req = {0};
1935 	struct hwrm_queue_pri2cos_qcfg_output *resp;
1936 	uint8_t *pri2cos_idx, *q_ids, max_q;
1937 	int rc, i, j;
1938 	uint8_t *pri2cos;
1939 
1940 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_QUEUE_PRI2COS_QCFG);
1941 	resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
1942 
1943 	req.flags = htole32(HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_IVLAN |
1944 			    path_dir);
1945 	rc = hwrm_send_message(softc, &req, sizeof(req));
1946 
1947 	if (rc)
1948 		return rc;
1949 
1950 	if (path_dir == HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_PATH_TX) {
1951 		pri2cos_idx = softc->tx_pri2cos_idx;
1952 		q_ids = softc->tx_q_ids;
1953 		max_q = softc->tx_max_q;
1954 	} else {
1955 		pri2cos_idx = softc->rx_pri2cos_idx;
1956 		q_ids = softc->rx_q_ids;
1957 		max_q = softc->rx_max_q;
1958 	}
1959 
1960 	pri2cos = &resp->pri0_cos_queue_id;
1961 
1962 	for (i = 0; i < BNXT_MAX_QUEUE; i++) {
1963 		uint8_t queue_id = pri2cos[i];
1964 		uint8_t queue_idx;
1965 
1966 		/* Per port queue IDs start from 0, 10, 20, etc */
1967 		queue_idx = queue_id % 10;
1968 		if (queue_idx > BNXT_MAX_QUEUE) {
1969 			softc->pri2cos_valid = false;
1970 			rc = -EINVAL;
1971 			return rc;
1972 		}
1973 
1974 		for (j = 0; j < max_q; j++) {
1975 			if (q_ids[j] == queue_id)
1976 				pri2cos_idx[i] = queue_idx;
1977 		}
1978 	}
1979 
1980 	softc->pri2cos_valid = true;
1981 
1982 	return rc;
1983 }
1984 
1985 int
bnxt_hwrm_port_qstats_ext(struct bnxt_softc * softc)1986 bnxt_hwrm_port_qstats_ext(struct bnxt_softc *softc)
1987 {
1988 	struct hwrm_port_qstats_ext_input req = {0};
1989 	struct hwrm_port_qstats_ext_output *resp;
1990 	int rc = 0, i;
1991 	uint32_t tx_stat_size;
1992 
1993 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_QSTATS_EXT);
1994 	resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
1995 
1996 	tx_stat_size = sizeof(struct tx_port_stats_ext);
1997 	req.port_id = htole16(softc->pf.port_id);
1998 	req.tx_stat_size = htole16(tx_stat_size);
1999 	req.rx_stat_size = htole16(sizeof(struct rx_port_stats_ext));
2000 	req.rx_stat_host_addr = htole64(softc->hw_rx_port_stats_ext.idi_paddr);
2001 	req.tx_stat_host_addr = htole64(softc->hw_tx_port_stats_ext.idi_paddr);
2002 
2003 	rc = hwrm_send_message(softc, &req, sizeof(req));
2004 
2005 	if (!rc) {
2006 		softc->fw_rx_stats_ext_size =
2007 			le16toh(resp->rx_stat_size) / 8;
2008 		if (BNXT_FW_MAJ(softc) < 220 && !BNXT_CHIP_P7(softc) &&
2009 		    softc->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY)
2010 			softc->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY;
2011 
2012 		softc->fw_tx_stats_ext_size = tx_stat_size ?
2013 			le16toh(resp->tx_stat_size) / 8 : 0;
2014 	} else {
2015 		softc->fw_rx_stats_ext_size = 0;
2016 		softc->fw_tx_stats_ext_size = 0;
2017 	}
2018 
2019 	if (softc->fw_tx_stats_ext_size <=
2020 	    offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
2021 		softc->pri2cos_valid = false;
2022 		return rc;
2023 	}
2024 
2025 	rc = bnxt_hwrm_pri2cos_idx(softc, HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_PATH_TX);
2026 	if (rc)
2027 		return rc;
2028 
2029 	if (softc->is_asym_q) {
2030 		rc = bnxt_hwrm_pri2cos_idx(softc, HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_PATH_RX);
2031 		if (rc)
2032 			return rc;
2033 	} else {
2034 		memcpy(softc->rx_pri2cos_idx, softc->tx_pri2cos_idx, sizeof(softc->rx_pri2cos_idx));
2035 	}
2036 
2037 	u64 *rx_port_stats_ext = (u64 *)softc->hw_rx_port_stats_ext.idi_vaddr;
2038 	u64 *tx_port_stats_ext = (u64 *)softc->hw_tx_port_stats_ext.idi_vaddr;
2039 
2040 	if (softc->pri2cos_valid) {
2041 		for (i = 0; i < 8; i++) {
2042 			long n = bnxt_rx_bytes_pri_arr_base_off[i] +
2043 				 softc->rx_pri2cos_idx[i];
2044 
2045 			softc->rx_bytes_pri[i] = *(rx_port_stats_ext + n);
2046 		}
2047 		for (i = 0; i < 8; i++) {
2048 			long n = bnxt_rx_pkts_pri_arr_base_off[i] +
2049 				 softc->rx_pri2cos_idx[i];
2050 
2051 			softc->rx_packets_pri[i] = *(rx_port_stats_ext + n);
2052 		}
2053 		for (i = 0; i < 8; i++) {
2054 			long n = bnxt_tx_bytes_pri_arr_base_off[i] +
2055 				 softc->tx_pri2cos_idx[i];
2056 
2057 			softc->tx_bytes_pri[i] = *(tx_port_stats_ext + n);
2058 		}
2059 		for (i = 0; i < 8; i++) {
2060 			long n = bnxt_tx_pkts_pri_arr_base_off[i] +
2061 				 softc->tx_pri2cos_idx[i];
2062 
2063 			softc->tx_packets_pri[i] = *(tx_port_stats_ext + n);
2064 		}
2065 	}
2066 
2067 	return rc;
2068 }
2069 
2070 int
bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt_softc * softc,struct bnxt_vnic_info * vnic)2071 bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt_softc *softc,
2072     struct bnxt_vnic_info *vnic)
2073 {
2074 	struct hwrm_cfa_l2_set_rx_mask_input req = {0};
2075 	uint32_t mask = vnic->rx_mask;
2076 
2077 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_CFA_L2_SET_RX_MASK);
2078 
2079 	req.vnic_id = htole32(vnic->id);
2080 	req.mask = htole32(mask);
2081 	req.mc_tbl_addr = htole64(vnic->mc_list.idi_paddr);
2082 	req.num_mc_entries = htole32(vnic->mc_list_count);
2083 	return hwrm_send_message(softc, &req, sizeof(req));
2084 }
2085 
2086 int
bnxt_hwrm_l2_filter_free(struct bnxt_softc * softc,uint64_t filter_id)2087 bnxt_hwrm_l2_filter_free(struct bnxt_softc *softc, uint64_t filter_id)
2088 {
2089 	struct hwrm_cfa_l2_filter_free_input	req = {0};
2090 	int rc = 0;
2091 
2092 	if (filter_id == -1)
2093 		return rc;
2094 
2095 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_CFA_L2_FILTER_FREE);
2096 
2097 	req.l2_filter_id = htole64(filter_id);
2098 
2099 	BNXT_HWRM_LOCK(softc);
2100 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2101 	if (rc)
2102 		goto fail;
2103 
2104 fail:
2105 	BNXT_HWRM_UNLOCK(softc);
2106 	return (rc);
2107 }
2108 
2109 int
bnxt_hwrm_free_filter(struct bnxt_softc * softc)2110 bnxt_hwrm_free_filter(struct bnxt_softc *softc)
2111 {
2112 	struct bnxt_vnic_info *vnic = &softc->vnic_info;
2113 	struct bnxt_vlan_tag *tag;
2114 	int rc = 0;
2115 
2116 	rc = bnxt_hwrm_l2_filter_free(softc, softc->vnic_info.filter_id);
2117 	if (rc)
2118 		goto end;
2119 
2120 	SLIST_FOREACH(tag, &vnic->vlan_tags, next) {
2121 		rc = bnxt_hwrm_l2_filter_free(softc, tag->filter_id);
2122 		if (rc)
2123 			goto end;
2124 		tag->filter_id = -1;
2125 	}
2126 
2127 end:
2128 	return rc;
2129 }
2130 
2131 int
bnxt_hwrm_l2_filter_alloc(struct bnxt_softc * softc,uint16_t vlan_tag,uint64_t * filter_id)2132 bnxt_hwrm_l2_filter_alloc(struct bnxt_softc *softc, uint16_t vlan_tag,
2133 		uint64_t *filter_id)
2134 {
2135 	struct hwrm_cfa_l2_filter_alloc_input	req = {0};
2136 	struct hwrm_cfa_l2_filter_alloc_output	*resp;
2137 	struct bnxt_vnic_info *vnic = &softc->vnic_info;
2138 	uint32_t enables = 0;
2139 	int rc = 0;
2140 
2141 	if (*filter_id != -1) {
2142 		device_printf(softc->dev, "Attempt to re-allocate l2 ctx "
2143 		    "filter (fid: 0x%jx)\n", (uintmax_t)*filter_id);
2144 		return EDOOFUS;
2145 	}
2146 
2147 	resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
2148 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_CFA_L2_FILTER_ALLOC);
2149 
2150 	req.flags = htole32(HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX);
2151 	enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR
2152 	    | HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK
2153 	    | HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
2154 
2155 	if (vlan_tag != 0xffff) {
2156 		enables |=
2157 			HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
2158 			HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK |
2159 			HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_NUM_VLANS;
2160 		req.l2_ivlan_mask = 0xffff;
2161 		req.l2_ivlan = vlan_tag;
2162 		req.num_vlans = 1;
2163 	}
2164 
2165 	req.enables = htole32(enables);
2166 	req.dst_id = htole16(vnic->id);
2167 	memcpy(req.l2_addr, if_getlladdr(iflib_get_ifp(softc->ctx)),
2168 	    ETHER_ADDR_LEN);
2169 	memset(&req.l2_addr_mask, 0xff, sizeof(req.l2_addr_mask));
2170 
2171 	BNXT_HWRM_LOCK(softc);
2172 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2173 	if (rc)
2174 		goto fail;
2175 
2176 	*filter_id = le64toh(resp->l2_filter_id);
2177 fail:
2178 	BNXT_HWRM_UNLOCK(softc);
2179 	return (rc);
2180 }
2181 
2182 int
bnxt_hwrm_set_filter(struct bnxt_softc * softc)2183 bnxt_hwrm_set_filter(struct bnxt_softc *softc)
2184 {
2185 	struct bnxt_vnic_info *vnic = &softc->vnic_info;
2186 	struct bnxt_vlan_tag *tag;
2187 	int rc = 0;
2188 
2189 	rc = bnxt_hwrm_l2_filter_alloc(softc, 0xffff, &vnic->filter_id);
2190 	if (rc)
2191 		goto end;
2192 
2193 	SLIST_FOREACH(tag, &vnic->vlan_tags, next) {
2194 		rc = bnxt_hwrm_l2_filter_alloc(softc, tag->tag,
2195 				&tag->filter_id);
2196 		if (rc)
2197 			goto end;
2198 	}
2199 
2200 end:
2201 	return rc;
2202 }
2203 
2204 int
bnxt_hwrm_rss_cfg(struct bnxt_softc * softc,struct bnxt_vnic_info * vnic,uint32_t hash_type)2205 bnxt_hwrm_rss_cfg(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic,
2206     uint32_t hash_type)
2207 {
2208 	struct hwrm_vnic_rss_cfg_input	req = {0};
2209 
2210 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_RSS_CFG);
2211 
2212 	if (BNXT_CHIP_P7(softc))
2213 		req.flags |= HWRM_VNIC_RSS_CFG_INPUT_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT;
2214 
2215 	req.hash_type = htole32(hash_type);
2216 	req.ring_grp_tbl_addr = htole64(vnic->rss_grp_tbl.idi_paddr);
2217 	req.hash_key_tbl_addr = htole64(vnic->rss_hash_key_tbl.idi_paddr);
2218 	req.rss_ctx_idx = htole16(vnic->rss_id);
2219 	req.hash_mode_flags = HWRM_FUNC_SPD_CFG_INPUT_HASH_MODE_FLAGS_DEFAULT;
2220 	if (BNXT_CHIP_P5_PLUS(softc)) {
2221 		req.vnic_id = htole16(vnic->id);
2222 		req.ring_table_pair_index = 0x0;
2223 	}
2224 
2225 	return hwrm_send_message(softc, &req, sizeof(req));
2226 }
2227 
2228 int
bnxt_hwrm_reserve_pf_rings(struct bnxt_softc * softc)2229 bnxt_hwrm_reserve_pf_rings(struct bnxt_softc *softc)
2230 {
2231 	struct hwrm_func_cfg_input req = {0};
2232 
2233 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_CFG);
2234 
2235 	req.fid = htole16(0xffff);
2236 	req.enables |= htole32(HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS);
2237 	req.enables |= htole32(HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS);
2238 	req.enables |= htole32(HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS);
2239 	req.enables |= htole32(HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS);
2240 	req.enables |= htole32(HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS);
2241 	req.enables |= htole32(HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MSIX);
2242 	req.enables |= htole32(HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS);
2243 	req.num_msix = htole16(BNXT_MAX_NUM_QUEUES);
2244 	req.num_rsscos_ctxs = htole16(0x8);
2245 	req.num_cmpl_rings = htole16(BNXT_MAX_NUM_QUEUES * 2);
2246 	req.num_tx_rings = htole16(BNXT_MAX_NUM_QUEUES);
2247 	req.num_rx_rings = htole16(BNXT_MAX_NUM_QUEUES);
2248 	req.num_vnics = htole16(BNXT_MAX_NUM_QUEUES);
2249 	req.num_stat_ctxs = htole16(BNXT_MAX_NUM_QUEUES * 2);
2250 
2251 	return hwrm_send_message(softc, &req, sizeof(req));
2252 }
2253 
2254 int
bnxt_cfg_async_cr(struct bnxt_softc * softc)2255 bnxt_cfg_async_cr(struct bnxt_softc *softc)
2256 {
2257 	int rc = 0;
2258 	struct hwrm_func_cfg_input req = {0};
2259 
2260 	if (!BNXT_PF(softc))
2261 		return 0;
2262 
2263 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_CFG);
2264 
2265 	req.fid = htole16(0xffff);
2266 	req.enables = htole32(HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2267 	if (BNXT_CHIP_P5_PLUS(softc))
2268 		req.async_event_cr = htole16(softc->nq_rings[0].ring.phys_id);
2269 	else
2270 		req.async_event_cr = htole16(softc->def_cp_ring.ring.phys_id);
2271 
2272 	rc = hwrm_send_message(softc, &req, sizeof(req));
2273 
2274 	return rc;
2275 }
2276 
2277 void
bnxt_validate_hw_lro_settings(struct bnxt_softc * softc)2278 bnxt_validate_hw_lro_settings(struct bnxt_softc *softc)
2279 {
2280 	softc->hw_lro.enable = min(softc->hw_lro.enable, 1);
2281 
2282         softc->hw_lro.is_mode_gro = min(softc->hw_lro.is_mode_gro, 1);
2283 
2284 	softc->hw_lro.max_agg_segs = min(softc->hw_lro.max_agg_segs,
2285 		HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_MAX);
2286 
2287 	softc->hw_lro.max_aggs = min(softc->hw_lro.max_aggs,
2288 		HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
2289 
2290 	softc->hw_lro.min_agg_len = min(softc->hw_lro.min_agg_len, BNXT_MAX_MTU);
2291 }
2292 
2293 int
bnxt_hwrm_vnic_tpa_cfg(struct bnxt_softc * softc)2294 bnxt_hwrm_vnic_tpa_cfg(struct bnxt_softc *softc)
2295 {
2296 	struct hwrm_vnic_tpa_cfg_input req = {0};
2297 	uint32_t flags;
2298 
2299 	if (softc->vnic_info.id == (uint16_t) HWRM_NA_SIGNATURE) {
2300 		return 0;
2301 	}
2302 
2303 	if (!(softc->flags & BNXT_FLAG_TPA))
2304 		return 0;
2305 
2306 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_TPA_CFG);
2307 
2308 	if (softc->hw_lro.enable) {
2309 		flags = HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
2310 			HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
2311 			HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
2312 			HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ;
2313 
2314         	if (softc->hw_lro.is_mode_gro)
2315 			flags |= HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO;
2316 		else
2317 			flags |= HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE;
2318 
2319 		req.flags = htole32(flags);
2320 
2321 		req.enables = htole32(HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
2322 				HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
2323 				HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
2324 
2325 		req.max_agg_segs = htole16(softc->hw_lro.max_agg_segs);
2326 		req.max_aggs = htole16(softc->hw_lro.max_aggs);
2327 		req.min_agg_len = htole32(softc->hw_lro.min_agg_len);
2328 	}
2329 
2330 	req.vnic_id = htole16(softc->vnic_info.id);
2331 
2332 	return hwrm_send_message(softc, &req, sizeof(req));
2333 }
2334 
2335 int
bnxt_hwrm_nvm_find_dir_entry(struct bnxt_softc * softc,uint16_t type,uint16_t * ordinal,uint16_t ext,uint16_t * index,bool use_index,uint8_t search_opt,uint32_t * data_length,uint32_t * item_length,uint32_t * fw_ver)2336 bnxt_hwrm_nvm_find_dir_entry(struct bnxt_softc *softc, uint16_t type,
2337     uint16_t *ordinal, uint16_t ext, uint16_t *index, bool use_index,
2338     uint8_t search_opt, uint32_t *data_length, uint32_t *item_length,
2339     uint32_t *fw_ver)
2340 {
2341 	struct hwrm_nvm_find_dir_entry_input req = {0};
2342 	struct hwrm_nvm_find_dir_entry_output *resp =
2343 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
2344 	int	rc = 0;
2345 	uint32_t old_timeo;
2346 
2347 	MPASS(ordinal);
2348 
2349 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_FIND_DIR_ENTRY);
2350 	if (use_index) {
2351 		req.enables = htole32(
2352 		    HWRM_NVM_FIND_DIR_ENTRY_INPUT_ENABLES_DIR_IDX_VALID);
2353 		req.dir_idx = htole16(*index);
2354 	}
2355 	req.dir_type = htole16(type);
2356 	req.dir_ordinal = htole16(*ordinal);
2357 	req.dir_ext = htole16(ext);
2358 	req.opt_ordinal = search_opt;
2359 
2360 	BNXT_HWRM_LOCK(softc);
2361 	old_timeo = softc->hwrm_cmd_timeo;
2362 	softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
2363 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2364 	softc->hwrm_cmd_timeo = old_timeo;
2365 	if (rc)
2366 		goto exit;
2367 
2368 	if (item_length)
2369 		*item_length = le32toh(resp->dir_item_length);
2370 	if (data_length)
2371 		*data_length = le32toh(resp->dir_data_length);
2372 	if (fw_ver)
2373 		*fw_ver = le32toh(resp->fw_ver);
2374 	*ordinal = le16toh(resp->dir_ordinal);
2375 	if (index)
2376 		*index = le16toh(resp->dir_idx);
2377 
2378 exit:
2379 	BNXT_HWRM_UNLOCK(softc);
2380 	return (rc);
2381 }
2382 
2383 int
bnxt_hwrm_nvm_read(struct bnxt_softc * softc,uint16_t index,uint32_t offset,uint32_t length,struct iflib_dma_info * data)2384 bnxt_hwrm_nvm_read(struct bnxt_softc *softc, uint16_t index, uint32_t offset,
2385     uint32_t length, struct iflib_dma_info *data)
2386 {
2387 	struct hwrm_nvm_read_input req = {0};
2388 	int rc;
2389 	uint32_t old_timeo;
2390 
2391 	if (length > data->idi_size) {
2392 		rc = EINVAL;
2393 		goto exit;
2394 	}
2395 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_READ);
2396 	req.host_dest_addr = htole64(data->idi_paddr);
2397 	req.dir_idx = htole16(index);
2398 	req.offset = htole32(offset);
2399 	req.len = htole32(length);
2400 	BNXT_HWRM_LOCK(softc);
2401 	old_timeo = softc->hwrm_cmd_timeo;
2402 	softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
2403 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2404 	softc->hwrm_cmd_timeo = old_timeo;
2405 	BNXT_HWRM_UNLOCK(softc);
2406 	if (rc)
2407 		goto exit;
2408 	bus_dmamap_sync(data->idi_tag, data->idi_map, BUS_DMASYNC_POSTREAD);
2409 
2410 	goto exit;
2411 
2412 exit:
2413 	return rc;
2414 }
2415 
2416 int
bnxt_hwrm_nvm_modify(struct bnxt_softc * softc,uint16_t index,uint32_t offset,void * data,bool cpyin,uint32_t length)2417 bnxt_hwrm_nvm_modify(struct bnxt_softc *softc, uint16_t index, uint32_t offset,
2418     void *data, bool cpyin, uint32_t length)
2419 {
2420 	struct hwrm_nvm_modify_input req = {0};
2421 	struct iflib_dma_info dma_data;
2422 	int rc;
2423 	uint32_t old_timeo;
2424 
2425 	if (length == 0 || !data)
2426 		return EINVAL;
2427 	rc = iflib_dma_alloc(softc->ctx, length, &dma_data,
2428 	    BUS_DMA_NOWAIT);
2429 	if (rc)
2430 		return ENOMEM;
2431 	if (cpyin) {
2432 		rc = copyin(data, dma_data.idi_vaddr, length);
2433 		if (rc)
2434 			goto exit;
2435 	}
2436 	else
2437 		memcpy(dma_data.idi_vaddr, data, length);
2438 	bus_dmamap_sync(dma_data.idi_tag, dma_data.idi_map,
2439 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2440 
2441 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_MODIFY);
2442 	req.host_src_addr = htole64(dma_data.idi_paddr);
2443 	req.dir_idx = htole16(index);
2444 	req.offset = htole32(offset);
2445 	req.len = htole32(length);
2446 	BNXT_HWRM_LOCK(softc);
2447 	old_timeo = softc->hwrm_cmd_timeo;
2448 	softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
2449 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2450 	softc->hwrm_cmd_timeo = old_timeo;
2451 	BNXT_HWRM_UNLOCK(softc);
2452 
2453 exit:
2454 	iflib_dma_free(&dma_data);
2455 	return rc;
2456 }
2457 
2458 int
bnxt_hwrm_fw_reset(struct bnxt_softc * softc,uint8_t processor,uint8_t * selfreset)2459 bnxt_hwrm_fw_reset(struct bnxt_softc *softc, uint8_t processor,
2460     uint8_t *selfreset)
2461 {
2462 	struct hwrm_fw_reset_input req = {0};
2463 	struct hwrm_fw_reset_output *resp =
2464 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
2465 	int rc;
2466 
2467 	MPASS(selfreset);
2468 
2469 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_RESET);
2470 	req.embedded_proc_type = processor;
2471 	req.selfrst_status = *selfreset;
2472 
2473 	BNXT_HWRM_LOCK(softc);
2474 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2475 	if (rc)
2476 		goto exit;
2477 	*selfreset = resp->selfrst_status;
2478 
2479 exit:
2480 	BNXT_HWRM_UNLOCK(softc);
2481 	return rc;
2482 }
2483 
2484 int
bnxt_hwrm_fw_qstatus(struct bnxt_softc * softc,uint8_t type,uint8_t * selfreset)2485 bnxt_hwrm_fw_qstatus(struct bnxt_softc *softc, uint8_t type, uint8_t *selfreset)
2486 {
2487 	struct hwrm_fw_qstatus_input req = {0};
2488 	struct hwrm_fw_qstatus_output *resp =
2489 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
2490 	int rc;
2491 
2492 	MPASS(selfreset);
2493 
2494 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_QSTATUS);
2495 	req.embedded_proc_type = type;
2496 
2497 	BNXT_HWRM_LOCK(softc);
2498 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2499 	if (rc)
2500 		goto exit;
2501 	*selfreset = resp->selfrst_status;
2502 
2503 exit:
2504 	BNXT_HWRM_UNLOCK(softc);
2505 	return rc;
2506 }
2507 
2508 int
bnxt_hwrm_nvm_write(struct bnxt_softc * softc,void * data,bool cpyin,uint16_t type,uint16_t ordinal,uint16_t ext,uint16_t attr,uint16_t option,uint32_t data_length,bool keep,uint32_t * item_length,uint16_t * index)2509 bnxt_hwrm_nvm_write(struct bnxt_softc *softc, void *data, bool cpyin,
2510     uint16_t type, uint16_t ordinal, uint16_t ext, uint16_t attr,
2511     uint16_t option, uint32_t data_length, bool keep, uint32_t *item_length,
2512     uint16_t *index)
2513 {
2514 	struct hwrm_nvm_write_input req = {0};
2515 	struct hwrm_nvm_write_output *resp =
2516 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
2517 	struct iflib_dma_info dma_data;
2518 	int rc;
2519 	uint32_t old_timeo;
2520 
2521 	if (data_length) {
2522 		rc = iflib_dma_alloc(softc->ctx, data_length, &dma_data,
2523 		    BUS_DMA_NOWAIT);
2524 		if (rc)
2525 			return ENOMEM;
2526 		if (cpyin) {
2527 			rc = copyin(data, dma_data.idi_vaddr, data_length);
2528 			if (rc)
2529 				goto early_exit;
2530 		}
2531 		else
2532 			memcpy(dma_data.idi_vaddr, data, data_length);
2533 		bus_dmamap_sync(dma_data.idi_tag, dma_data.idi_map,
2534 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2535 	}
2536 	else
2537 		dma_data.idi_paddr = 0;
2538 
2539 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_WRITE);
2540 
2541 	req.host_src_addr = htole64(dma_data.idi_paddr);
2542 	req.dir_type = htole16(type);
2543 	req.dir_ordinal = htole16(ordinal);
2544 	req.dir_ext = htole16(ext);
2545 	req.dir_attr = htole16(attr);
2546 	req.dir_data_length = htole32(data_length);
2547 	req.option = htole16(option);
2548 	if (keep) {
2549 		req.flags =
2550 		    htole16(HWRM_NVM_WRITE_INPUT_FLAGS_KEEP_ORIG_ACTIVE_IMG);
2551 	}
2552 	if (item_length)
2553 		req.dir_item_length = htole32(*item_length);
2554 
2555 	BNXT_HWRM_LOCK(softc);
2556 	old_timeo = softc->hwrm_cmd_timeo;
2557 	softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
2558 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2559 	softc->hwrm_cmd_timeo = old_timeo;
2560 	if (rc)
2561 		goto exit;
2562 	if (item_length)
2563 		*item_length = le32toh(resp->dir_item_length);
2564 	if (index)
2565 		*index = le16toh(resp->dir_idx);
2566 
2567 exit:
2568 	BNXT_HWRM_UNLOCK(softc);
2569 early_exit:
2570 	if (data_length)
2571 		iflib_dma_free(&dma_data);
2572 	return rc;
2573 }
2574 
2575 int
bnxt_hwrm_nvm_erase_dir_entry(struct bnxt_softc * softc,uint16_t index)2576 bnxt_hwrm_nvm_erase_dir_entry(struct bnxt_softc *softc, uint16_t index)
2577 {
2578 	struct hwrm_nvm_erase_dir_entry_input req = {0};
2579 	uint32_t old_timeo;
2580 	int rc;
2581 
2582 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_ERASE_DIR_ENTRY);
2583 	req.dir_idx = htole16(index);
2584 	BNXT_HWRM_LOCK(softc);
2585 	old_timeo = softc->hwrm_cmd_timeo;
2586 	softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
2587 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2588 	softc->hwrm_cmd_timeo = old_timeo;
2589 	BNXT_HWRM_UNLOCK(softc);
2590 	return rc;
2591 }
2592 
2593 int
bnxt_hwrm_nvm_get_dir_info(struct bnxt_softc * softc,uint32_t * entries,uint32_t * entry_length)2594 bnxt_hwrm_nvm_get_dir_info(struct bnxt_softc *softc, uint32_t *entries,
2595     uint32_t *entry_length)
2596 {
2597 	struct hwrm_nvm_get_dir_info_input req = {0};
2598 	struct hwrm_nvm_get_dir_info_output *resp =
2599 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
2600 	int rc;
2601 	uint32_t old_timeo;
2602 
2603 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_GET_DIR_INFO);
2604 
2605 	BNXT_HWRM_LOCK(softc);
2606 	old_timeo = softc->hwrm_cmd_timeo;
2607 	softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
2608 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2609 	softc->hwrm_cmd_timeo = old_timeo;
2610 	if (rc)
2611 		goto exit;
2612 
2613 	if (entries)
2614 		*entries = le32toh(resp->entries);
2615 	if (entry_length)
2616 		*entry_length = le32toh(resp->entry_length);
2617 
2618 exit:
2619 	BNXT_HWRM_UNLOCK(softc);
2620 	return rc;
2621 }
2622 
2623 int
bnxt_hwrm_nvm_get_dir_entries(struct bnxt_softc * softc,uint32_t * entries,uint32_t * entry_length,struct iflib_dma_info * dma_data)2624 bnxt_hwrm_nvm_get_dir_entries(struct bnxt_softc *softc, uint32_t *entries,
2625     uint32_t *entry_length, struct iflib_dma_info *dma_data)
2626 {
2627 	struct hwrm_nvm_get_dir_entries_input req = {0};
2628 	uint32_t ent;
2629 	uint32_t ent_len;
2630 	int rc;
2631 	uint32_t old_timeo;
2632 
2633 	if (!entries)
2634 		entries = &ent;
2635 	if (!entry_length)
2636 		entry_length = &ent_len;
2637 
2638 	rc = bnxt_hwrm_nvm_get_dir_info(softc, entries, entry_length);
2639 	if (rc)
2640 		goto exit;
2641 	if (*entries * *entry_length > dma_data->idi_size) {
2642 		rc = EINVAL;
2643 		goto exit;
2644 	}
2645 
2646 	/*
2647 	 * TODO: There's a race condition here that could blow up DMA memory...
2648 	 *	 we need to allocate the max size, not the currently in use
2649 	 *	 size.  The command should totally have a max size here.
2650 	 */
2651 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_GET_DIR_ENTRIES);
2652 	req.host_dest_addr = htole64(dma_data->idi_paddr);
2653 	BNXT_HWRM_LOCK(softc);
2654 	old_timeo = softc->hwrm_cmd_timeo;
2655 	softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
2656 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2657 	softc->hwrm_cmd_timeo = old_timeo;
2658 	BNXT_HWRM_UNLOCK(softc);
2659 	if (rc)
2660 		goto exit;
2661 	bus_dmamap_sync(dma_data->idi_tag, dma_data->idi_map,
2662 	    BUS_DMASYNC_POSTWRITE);
2663 
2664 exit:
2665 	return rc;
2666 }
2667 
2668 int
bnxt_hwrm_nvm_get_dev_info(struct bnxt_softc * softc,uint16_t * mfg_id,uint16_t * device_id,uint32_t * sector_size,uint32_t * nvram_size,uint32_t * reserved_size,uint32_t * available_size)2669 bnxt_hwrm_nvm_get_dev_info(struct bnxt_softc *softc, uint16_t *mfg_id,
2670     uint16_t *device_id, uint32_t *sector_size, uint32_t *nvram_size,
2671     uint32_t *reserved_size, uint32_t *available_size)
2672 {
2673 	struct hwrm_nvm_get_dev_info_input req = {0};
2674 	struct hwrm_nvm_get_dev_info_output *resp =
2675 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
2676 	int rc;
2677 	uint32_t old_timeo;
2678 
2679 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_GET_DEV_INFO);
2680 
2681 	BNXT_HWRM_LOCK(softc);
2682 	old_timeo = softc->hwrm_cmd_timeo;
2683 	softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
2684 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2685 	softc->hwrm_cmd_timeo = old_timeo;
2686 	if (rc)
2687 		goto exit;
2688 
2689 	if (mfg_id)
2690 		*mfg_id = le16toh(resp->manufacturer_id);
2691 	if (device_id)
2692 		*device_id = le16toh(resp->device_id);
2693 	if (sector_size)
2694 		*sector_size = le32toh(resp->sector_size);
2695 	if (nvram_size)
2696 		*nvram_size = le32toh(resp->nvram_size);
2697 	if (reserved_size)
2698 		*reserved_size = le32toh(resp->reserved_size);
2699 	if (available_size)
2700 		*available_size = le32toh(resp->available_size);
2701 
2702 exit:
2703 	BNXT_HWRM_UNLOCK(softc);
2704 	return rc;
2705 }
2706 
2707 int
bnxt_hwrm_nvm_install_update(struct bnxt_softc * softc,uint32_t install_type,uint64_t * installed_items,uint8_t * result,uint8_t * problem_item,uint8_t * reset_required)2708 bnxt_hwrm_nvm_install_update(struct bnxt_softc *softc,
2709     uint32_t install_type, uint64_t *installed_items, uint8_t *result,
2710     uint8_t *problem_item, uint8_t *reset_required)
2711 {
2712 	struct hwrm_nvm_install_update_input req = {0};
2713 	struct hwrm_nvm_install_update_output *resp =
2714 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
2715 	int rc;
2716 	uint32_t old_timeo;
2717 
2718 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_INSTALL_UPDATE);
2719 	req.install_type = htole32(install_type);
2720 
2721 	BNXT_HWRM_LOCK(softc);
2722 	old_timeo = softc->hwrm_cmd_timeo;
2723 	softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
2724 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2725 	softc->hwrm_cmd_timeo = old_timeo;
2726 	if (rc)
2727 		goto exit;
2728 
2729 	if (installed_items)
2730 		*installed_items = le32toh(resp->installed_items);
2731 	if (result)
2732 		*result = resp->result;
2733 	if (problem_item)
2734 		*problem_item = resp->problem_item;
2735 	if (reset_required)
2736 		*reset_required = resp->reset_required;
2737 
2738 exit:
2739 	BNXT_HWRM_UNLOCK(softc);
2740 	return rc;
2741 }
2742 
2743 int
bnxt_hwrm_nvm_verify_update(struct bnxt_softc * softc,uint16_t type,uint16_t ordinal,uint16_t ext)2744 bnxt_hwrm_nvm_verify_update(struct bnxt_softc *softc, uint16_t type,
2745     uint16_t ordinal, uint16_t ext)
2746 {
2747 	struct hwrm_nvm_verify_update_input req = {0};
2748 	uint32_t old_timeo;
2749 	int rc;
2750 
2751 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_VERIFY_UPDATE);
2752 
2753 	req.dir_type = htole16(type);
2754 	req.dir_ordinal = htole16(ordinal);
2755 	req.dir_ext = htole16(ext);
2756 
2757 	BNXT_HWRM_LOCK(softc);
2758 	old_timeo = softc->hwrm_cmd_timeo;
2759 	softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
2760 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2761 	softc->hwrm_cmd_timeo = old_timeo;
2762 	BNXT_HWRM_UNLOCK(softc);
2763 	return rc;
2764 }
2765 
2766 int
bnxt_hwrm_fw_get_time(struct bnxt_softc * softc,uint16_t * year,uint8_t * month,uint8_t * day,uint8_t * hour,uint8_t * minute,uint8_t * second,uint16_t * millisecond,uint16_t * zone)2767 bnxt_hwrm_fw_get_time(struct bnxt_softc *softc, uint16_t *year, uint8_t *month,
2768     uint8_t *day, uint8_t *hour, uint8_t *minute, uint8_t *second,
2769     uint16_t *millisecond, uint16_t *zone)
2770 {
2771 	struct hwrm_fw_get_time_input req = {0};
2772 	struct hwrm_fw_get_time_output *resp =
2773 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
2774 	int rc;
2775 
2776 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_GET_TIME);
2777 
2778 	BNXT_HWRM_LOCK(softc);
2779 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2780 	if (rc)
2781 		goto exit;
2782 
2783 	if (year)
2784 		*year = le16toh(resp->year);
2785 	if (month)
2786 		*month = resp->month;
2787 	if (day)
2788 		*day = resp->day;
2789 	if (hour)
2790 		*hour = resp->hour;
2791 	if (minute)
2792 		*minute = resp->minute;
2793 	if (second)
2794 		*second = resp->second;
2795 	if (millisecond)
2796 		*millisecond = le16toh(resp->millisecond);
2797 	if (zone)
2798 		*zone = le16toh(resp->zone);
2799 
2800 exit:
2801 	BNXT_HWRM_UNLOCK(softc);
2802 	return rc;
2803 }
2804 
2805 int
bnxt_hwrm_fw_set_time(struct bnxt_softc * softc,uint16_t year,uint8_t month,uint8_t day,uint8_t hour,uint8_t minute,uint8_t second,uint16_t millisecond,uint16_t zone)2806 bnxt_hwrm_fw_set_time(struct bnxt_softc *softc, uint16_t year, uint8_t month,
2807     uint8_t day, uint8_t hour, uint8_t minute, uint8_t second,
2808     uint16_t millisecond, uint16_t zone)
2809 {
2810 	struct hwrm_fw_set_time_input req = {0};
2811 
2812 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_SET_TIME);
2813 
2814 	req.year = htole16(year);
2815 	req.month = month;
2816 	req.day = day;
2817 	req.hour = hour;
2818 	req.minute = minute;
2819 	req.second = second;
2820 	req.millisecond = htole16(millisecond);
2821 	req.zone = htole16(zone);
2822 	return hwrm_send_message(softc, &req, sizeof(req));
2823 }
2824 
bnxt_read_sfp_module_eeprom_info(struct bnxt_softc * softc,uint16_t i2c_addr,uint16_t page_number,uint8_t bank,bool bank_sel_en,uint16_t start_addr,uint16_t data_length,uint8_t * buf)2825 int bnxt_read_sfp_module_eeprom_info(struct bnxt_softc *softc, uint16_t i2c_addr,
2826     uint16_t page_number, uint8_t bank,bool bank_sel_en, uint16_t start_addr,
2827     uint16_t data_length, uint8_t *buf)
2828 {
2829 	struct hwrm_port_phy_i2c_read_output *output =
2830 			(void *)softc->hwrm_cmd_resp.idi_vaddr;
2831 	struct hwrm_port_phy_i2c_read_input req = {0};
2832 	int rc = 0, byte_offset = 0;
2833 
2834 	BNXT_HWRM_LOCK(softc);
2835 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_PHY_I2C_READ);
2836 
2837 	req.i2c_slave_addr = i2c_addr;
2838 	req.page_number = htole16(page_number);
2839 	req.port_id = htole16(softc->pf.port_id);
2840 	do {
2841 		uint16_t xfer_size;
2842 
2843 		xfer_size = min_t(uint16_t, data_length, BNXT_MAX_PHY_I2C_RESP_SIZE);
2844 		data_length -= xfer_size;
2845 		req.page_offset = htole16(start_addr + byte_offset);
2846 		req.data_length = xfer_size;
2847 		req.bank_number = bank;
2848 		req.enables = htole32((start_addr + byte_offset ?
2849 				HWRM_PORT_PHY_I2C_READ_INPUT_ENABLES_PAGE_OFFSET : 0) |
2850 				(bank_sel_en ?
2851 				HWRM_PORT_PHY_I2C_READ_INPUT_ENABLES_BANK_NUMBER : 0));
2852 		rc = _hwrm_send_message(softc, &req, sizeof(req));
2853 		if (!rc)
2854 			memcpy(buf + byte_offset, output->data, xfer_size);
2855 		byte_offset += xfer_size;
2856 	} while (!rc && data_length > 0);
2857 
2858 	BNXT_HWRM_UNLOCK(softc);
2859 
2860 	return rc;
2861 }
2862 
2863 int
bnxt_hwrm_port_phy_qcfg(struct bnxt_softc * softc)2864 bnxt_hwrm_port_phy_qcfg(struct bnxt_softc *softc)
2865 {
2866 	struct bnxt_link_info *link_info = &softc->link_info;
2867 	struct hwrm_port_phy_qcfg_input req = {0};
2868 	struct hwrm_port_phy_qcfg_output *resp =
2869 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
2870 	int rc = 0;
2871 
2872 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_PHY_QCFG);
2873 
2874 	BNXT_HWRM_LOCK(softc);
2875 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2876 	if (rc)
2877 		goto exit;
2878 
2879 	memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
2880 	link_info->phy_link_status = resp->link;
2881 	link_info->duplex =  resp->duplex_cfg;
2882 	link_info->auto_mode = resp->auto_mode;
2883 
2884         /*
2885          * When AUTO_PAUSE_AUTONEG_PAUSE bit is set to 1,
2886          * the advertisement of pause is enabled.
2887          * 1. When the auto_mode is not set to none and this flag is set to 1,
2888          *    then the auto_pause bits on this port are being advertised and
2889          *    autoneg pause results are being interpreted.
2890          * 2. When the auto_mode is not set to none and this flag is set to 0,
2891          *    the pause is forced as indicated in force_pause, and also
2892 	 *    advertised as auto_pause bits, but the autoneg results are not
2893 	 *    interpreted since the pause configuration is being forced.
2894          * 3. When the auto_mode is set to none and this flag is set to 1,
2895          *    auto_pause bits should be ignored and should be set to 0.
2896          */
2897 
2898 	link_info->flow_ctrl.autoneg = false;
2899 	link_info->flow_ctrl.tx = false;
2900 	link_info->flow_ctrl.rx = false;
2901 
2902 	if ((resp->auto_mode) &&
2903             (resp->auto_pause & BNXT_AUTO_PAUSE_AUTONEG_PAUSE)) {
2904 			link_info->flow_ctrl.autoneg = true;
2905 	}
2906 
2907 	if (link_info->flow_ctrl.autoneg) {
2908 		if (resp->auto_pause & BNXT_PAUSE_TX)
2909 			link_info->flow_ctrl.tx = true;
2910 		if (resp->auto_pause & BNXT_PAUSE_RX)
2911 			link_info->flow_ctrl.rx = true;
2912 	} else {
2913 		if (resp->force_pause & BNXT_PAUSE_TX)
2914 			link_info->flow_ctrl.tx = true;
2915 		if (resp->force_pause & BNXT_PAUSE_RX)
2916 			link_info->flow_ctrl.rx = true;
2917 	}
2918 
2919 	link_info->duplex_setting = resp->duplex_cfg;
2920 	if (link_info->phy_link_status == HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) {
2921 		link_info->link_speed = le16toh(resp->link_speed);
2922 		if (softc->phy_flags & BNXT_PHY_FL_SPEEDS2)
2923 			link_info->active_lanes = resp->active_lanes;
2924 	} else {
2925 		link_info->link_speed = 0;
2926 		link_info->active_lanes = 0;
2927 	}
2928 	link_info->force_link_speed = le16toh(resp->force_link_speed);
2929 	link_info->auto_link_speeds = le16toh(resp->auto_link_speed);
2930 	link_info->support_speeds = le16toh(resp->support_speeds);
2931 	link_info->auto_link_speeds = le16toh(resp->auto_link_speed_mask);
2932 	link_info->preemphasis = le32toh(resp->preemphasis);
2933 	link_info->phy_ver[0] = resp->phy_maj;
2934 	link_info->phy_ver[1] = resp->phy_min;
2935 	link_info->phy_ver[2] = resp->phy_bld;
2936 	snprintf(softc->ver_info->phy_ver, sizeof(softc->ver_info->phy_ver),
2937 	    "%d.%d.%d", link_info->phy_ver[0], link_info->phy_ver[1],
2938 	    link_info->phy_ver[2]);
2939 	strlcpy(softc->ver_info->phy_vendor, resp->phy_vendor_name,
2940 	    BNXT_NAME_SIZE);
2941 	strlcpy(softc->ver_info->phy_partnumber, resp->phy_vendor_partnumber,
2942 	    BNXT_NAME_SIZE);
2943 	link_info->media_type = resp->media_type;
2944 	link_info->phy_type = resp->phy_type;
2945 	link_info->transceiver = resp->xcvr_pkg_type;
2946 	link_info->phy_addr = resp->eee_config_phy_addr &
2947 	    HWRM_PORT_PHY_QCFG_OUTPUT_PHY_ADDR_MASK;
2948 	link_info->module_status = resp->module_status;
2949 	link_info->support_pam4_speeds = le16toh(resp->support_pam4_speeds);
2950 	link_info->auto_pam4_link_speeds = le16toh(resp->auto_pam4_link_speed_mask);
2951 	link_info->force_pam4_link_speed = le16toh(resp->force_pam4_link_speed);
2952 
2953 	if (softc->hwrm_spec_code >= 0x10504)
2954 		link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
2955 
2956 	link_info->support_speeds2 = le16toh(resp->support_speeds2);
2957 	link_info->auto_link_speeds2 = le16toh(resp->auto_link_speeds2);
2958 	link_info->force_link_speeds2 = le16toh(resp->force_link_speeds2);
2959 
2960 exit:
2961 	BNXT_HWRM_UNLOCK(softc);
2962 	return rc;
2963 }
2964 
2965 static bool
bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output * resp)2966 bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
2967 {
2968 	if (!resp->supported_speeds_auto_mode &&
2969 	    !resp->supported_speeds_force_mode &&
2970 	    !resp->supported_pam4_speeds_auto_mode &&
2971 	    !resp->supported_pam4_speeds_force_mode &&
2972 	    !resp->supported_speeds2_auto_mode &&
2973 	    !resp->supported_speeds2_force_mode)
2974 		return true;
2975 
2976 	return false;
2977 }
2978 
bnxt_hwrm_phy_qcaps(struct bnxt_softc * softc)2979 int bnxt_hwrm_phy_qcaps(struct bnxt_softc *softc)
2980 {
2981 	struct bnxt_link_info *link_info = &softc->link_info;
2982 	struct hwrm_port_phy_qcaps_output *resp =
2983 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
2984 	struct hwrm_port_phy_qcaps_input req = {};
2985 	int rc;
2986 
2987 	if (softc->hwrm_spec_code < 0x10201)
2988 		return 0;
2989 
2990 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_PHY_QCAPS);
2991 
2992 	BNXT_HWRM_LOCK(softc);
2993 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2994 	if (rc)
2995 		goto exit;
2996 
2997 	softc->phy_flags = resp->flags | (resp->flags2 << 8);
2998 	if (resp->flags & HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS_EEE_SUPPORTED) {
2999 
3000 		softc->lpi_tmr_lo = le32toh(resp->tx_lpi_timer_low) &
3001 					    HWRM_PORT_PHY_QCAPS_OUTPUT_TX_LPI_TIMER_LOW_MASK;
3002 		softc->lpi_tmr_hi = le32toh(resp->valid_tx_lpi_timer_high) &
3003 					    HWRM_PORT_PHY_QCAPS_OUTPUT_TX_LPI_TIMER_HIGH_MASK;
3004 	}
3005 
3006 	if (softc->hwrm_spec_code >= 0x10a01) {
3007 		if (bnxt_phy_qcaps_no_speed(resp)) {
3008 			link_info->phy_state = BNXT_PHY_STATE_DISABLED;
3009 			device_printf(softc->dev, "Ethernet link disabled\n");
3010 		} else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) {
3011 			link_info->phy_state = BNXT_PHY_STATE_ENABLED;
3012 			device_printf(softc->dev, "Ethernet link enabled\n");
3013 			/* Phy re-enabled, reprobe the speeds */
3014 			link_info->support_auto_speeds = 0;
3015 			link_info->support_pam4_auto_speeds = 0;
3016 			link_info->support_auto_speeds2 = 0;
3017 		}
3018 	}
3019 	if (resp->supported_speeds_auto_mode)
3020 		link_info->support_auto_speeds =
3021 			le16toh(resp->supported_speeds_auto_mode);
3022 	if (resp->supported_speeds_force_mode)
3023 		link_info->support_force_speeds =
3024 			le16toh(resp->supported_speeds_force_mode);
3025 	if (resp->supported_pam4_speeds_auto_mode)
3026 		link_info->support_pam4_auto_speeds =
3027 			le16toh(resp->supported_pam4_speeds_auto_mode);
3028 	if (resp->supported_pam4_speeds_force_mode)
3029 		link_info->support_pam4_force_speeds =
3030 			le16toh(resp->supported_pam4_speeds_force_mode);
3031 
3032 	if (resp->supported_speeds2_auto_mode)
3033 		link_info->support_auto_speeds2 =
3034 			le16toh(resp->supported_speeds2_auto_mode);
3035 
3036 	if (resp->supported_speeds2_force_mode)
3037 		link_info->support_force_speeds2 =
3038 			le16toh(resp->supported_speeds2_force_mode);
3039 
3040 exit:
3041 	BNXT_HWRM_UNLOCK(softc);
3042 	return rc;
3043 }
3044 
3045 uint16_t
bnxt_hwrm_get_wol_fltrs(struct bnxt_softc * softc,uint16_t handle)3046 bnxt_hwrm_get_wol_fltrs(struct bnxt_softc *softc, uint16_t handle)
3047 {
3048 	struct hwrm_wol_filter_qcfg_input req = {0};
3049 	struct hwrm_wol_filter_qcfg_output *resp =
3050 			(void *)softc->hwrm_cmd_resp.idi_vaddr;
3051 	uint16_t next_handle = 0;
3052 	int rc;
3053 
3054 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_WOL_FILTER_QCFG);
3055 	req.port_id = htole16(softc->pf.port_id);
3056 	req.handle = htole16(handle);
3057 	rc = hwrm_send_message(softc, &req, sizeof(req));
3058 	if (!rc) {
3059 		next_handle = le16toh(resp->next_handle);
3060 		if (next_handle != 0) {
3061 			if (resp->wol_type ==
3062 				HWRM_WOL_FILTER_ALLOC_INPUT_WOL_TYPE_MAGICPKT) {
3063 				softc->wol = 1;
3064 				softc->wol_filter_id = resp->wol_filter_id;
3065 			}
3066 		}
3067 	}
3068 	return next_handle;
3069 }
3070 
3071 int
bnxt_hwrm_alloc_wol_fltr(struct bnxt_softc * softc)3072 bnxt_hwrm_alloc_wol_fltr(struct bnxt_softc *softc)
3073 {
3074 	struct hwrm_wol_filter_alloc_input req = {0};
3075 	struct hwrm_wol_filter_alloc_output *resp =
3076 		(void *)softc->hwrm_cmd_resp.idi_vaddr;
3077 	int rc;
3078 
3079 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_WOL_FILTER_ALLOC);
3080 	req.port_id = htole16(softc->pf.port_id);
3081 	req.wol_type = HWRM_WOL_FILTER_ALLOC_INPUT_WOL_TYPE_MAGICPKT;
3082 	req.enables =
3083 		htole32(HWRM_WOL_FILTER_ALLOC_INPUT_ENABLES_MAC_ADDRESS);
3084 	memcpy(req.mac_address, softc->func.mac_addr, ETHER_ADDR_LEN);
3085 	rc = hwrm_send_message(softc, &req, sizeof(req));
3086 	if (!rc)
3087 		softc->wol_filter_id = resp->wol_filter_id;
3088 
3089 	return rc;
3090 }
3091 
3092 int
bnxt_hwrm_free_wol_fltr(struct bnxt_softc * softc)3093 bnxt_hwrm_free_wol_fltr(struct bnxt_softc *softc)
3094 {
3095 	struct hwrm_wol_filter_free_input req = {0};
3096 
3097 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_WOL_FILTER_FREE);
3098 	req.port_id = htole16(softc->pf.port_id);
3099 	req.enables =
3100 		htole32(HWRM_WOL_FILTER_FREE_INPUT_ENABLES_WOL_FILTER_ID);
3101 	req.wol_filter_id = softc->wol_filter_id;
3102 	return hwrm_send_message(softc, &req, sizeof(req));
3103 }
3104 
bnxt_hwrm_set_coal_params(struct bnxt_softc * softc,uint32_t max_frames,uint32_t buf_tmrs,uint16_t flags,struct hwrm_ring_cmpl_ring_cfg_aggint_params_input * req)3105 static void bnxt_hwrm_set_coal_params(struct bnxt_softc *softc, uint32_t max_frames,
3106         uint32_t buf_tmrs, uint16_t flags,
3107         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
3108 {
3109         req->flags = htole16(flags);
3110         req->num_cmpl_dma_aggr = htole16((uint16_t)max_frames);
3111         req->num_cmpl_dma_aggr_during_int = htole16(max_frames >> 16);
3112         req->cmpl_aggr_dma_tmr = htole16((uint16_t)buf_tmrs);
3113         req->cmpl_aggr_dma_tmr_during_int = htole16(buf_tmrs >> 16);
3114         /* Minimum time between 2 interrupts set to buf_tmr x 2 */
3115         req->int_lat_tmr_min = htole16((uint16_t)buf_tmrs * 2);
3116         req->int_lat_tmr_max = htole16((uint16_t)buf_tmrs * 4);
3117         req->num_cmpl_aggr_int = htole16((uint16_t)max_frames * 4);
3118 }
3119 
bnxt_hwrm_set_coal(struct bnxt_softc * softc)3120 int bnxt_hwrm_set_coal(struct bnxt_softc *softc)
3121 {
3122 	int i, rc = 0;
3123 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
3124 							   req_tx = {0}, *req;
3125 	uint16_t max_buf, max_buf_irq;
3126 	uint16_t buf_tmr, buf_tmr_irq;
3127 	uint32_t flags;
3128 
3129 	bnxt_hwrm_cmd_hdr_init(softc, &req_rx,
3130 			       HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
3131 	bnxt_hwrm_cmd_hdr_init(softc, &req_tx,
3132 			       HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
3133 
3134 	/* Each rx completion (2 records) should be DMAed immediately.
3135 	 * DMA 1/4 of the completion buffers at a time.
3136 	 */
3137 	max_buf = min_t(uint16_t, softc->rx_coal_frames / 4, 2);
3138 	/* max_buf must not be zero */
3139 	max_buf = clamp_t(uint16_t, max_buf, 1, 63);
3140 	max_buf_irq = clamp_t(uint16_t, softc->rx_coal_frames_irq, 1, 63);
3141 	buf_tmr = BNXT_USEC_TO_COAL_TIMER(softc->rx_coal_usecs);
3142 	/* buf timer set to 1/4 of interrupt timer */
3143 	buf_tmr = max_t(uint16_t, buf_tmr / 4, 1);
3144 	buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(softc->rx_coal_usecs_irq);
3145 	buf_tmr_irq = max_t(uint16_t, buf_tmr_irq, 1);
3146 
3147 	flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET;
3148 
3149 	/* RING_IDLE generates more IRQs for lower latency.  Enable it only
3150 	 * if coal_usecs is less than 25 us.
3151 	 */
3152 	if (softc->rx_coal_usecs < 25)
3153 		flags |= HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
3154 
3155 	bnxt_hwrm_set_coal_params(softc, max_buf_irq << 16 | max_buf,
3156 				  buf_tmr_irq << 16 | buf_tmr, flags, &req_rx);
3157 
3158 	/* max_buf must not be zero */
3159 	max_buf = clamp_t(uint16_t, softc->tx_coal_frames, 1, 63);
3160 	max_buf_irq = clamp_t(uint16_t, softc->tx_coal_frames_irq, 1, 63);
3161 	buf_tmr = BNXT_USEC_TO_COAL_TIMER(softc->tx_coal_usecs);
3162 	/* buf timer set to 1/4 of interrupt timer */
3163 	buf_tmr = max_t(uint16_t, buf_tmr / 4, 1);
3164 	buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(softc->tx_coal_usecs_irq);
3165 	buf_tmr_irq = max_t(uint16_t, buf_tmr_irq, 1);
3166 	flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET;
3167 	bnxt_hwrm_set_coal_params(softc, max_buf_irq << 16 | max_buf,
3168 				  buf_tmr_irq << 16 | buf_tmr, flags, &req_tx);
3169 
3170 	for (i = 0; i < softc->nrxqsets; i++) {
3171 
3172 		req = &req_rx;
3173 		req->ring_id = htole16(softc->grp_info[i].cp_ring_id);
3174 
3175 		rc = hwrm_send_message(softc, req, sizeof(*req));
3176 		if (rc)
3177 			break;
3178 	}
3179 	return rc;
3180 }
3181 
bnxt_hwrm_ring_info_get(struct bnxt_softc * softc,uint8_t ring_type,uint32_t ring_id,uint32_t * prod,uint32_t * cons)3182 void bnxt_hwrm_ring_info_get(struct bnxt_softc *softc, uint8_t ring_type,
3183 			     uint32_t ring_id, uint32_t *prod, uint32_t *cons)
3184 {
3185 	hwrm_dbg_ring_info_get_input_t req = {0};
3186 	hwrm_dbg_ring_info_get_output_t *resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
3187 	int rc = 0;
3188 
3189 	*prod = *cons = 0xffffffff;
3190 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_DBG_RING_INFO_GET);
3191         req.ring_type = le32toh(ring_type);
3192         req.fw_ring_id = le32toh(ring_id);
3193 	rc = hwrm_send_message(softc, &req, sizeof(req));
3194 	if (!rc) {
3195 		*prod = resp->producer_index;
3196 		*cons = resp->consumer_index;
3197 	}
3198 
3199 	return;
3200 }
3201