xref: /freebsd/sys/dev/bnxt/bnxt_en/bnxt_hwrm.c (revision fec0e2064818f991867c9851a837012ea31774da)
1 /*-
2  * Broadcom NetXtreme-C/E network driver.
3  *
4  * Copyright (c) 2016 Broadcom, All Rights Reserved.
5  * The term Broadcom refers to Broadcom Limited and/or its subsidiaries
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26  * THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 #include <sys/endian.h>
31 #include <linux/pci.h>
32 
33 #include "bnxt.h"
34 #include "bnxt_hwrm.h"
35 #include "hsi_struct_def.h"
36 
37 static int bnxt_hwrm_err_map(uint16_t err);
38 static inline int _is_valid_ether_addr(uint8_t *);
39 static inline void get_random_ether_addr(uint8_t *);
40 static void	bnxt_hwrm_set_link_common(struct bnxt_softc *softc,
41 		    struct hwrm_port_phy_cfg_input *req);
42 static void	bnxt_hwrm_set_pause_common(struct bnxt_softc *softc,
43 		    struct hwrm_port_phy_cfg_input *req);
44 static void	bnxt_hwrm_set_eee(struct bnxt_softc *softc,
45 		    struct hwrm_port_phy_cfg_input *req);
46 
47 /* NVRam stuff has a five minute timeout */
48 #define BNXT_NVM_TIMEO	(5 * 60 * 1000)
49 
50 #define BNXT_RX_STATS_PRI_ENTRY(counter, n)		\
51 	BNXT_RX_STATS_EXT_OFFSET(counter##_cos0)
52 
53 #define BNXT_TX_STATS_PRI_ENTRY(counter, n)		\
54 	 BNXT_TX_STATS_EXT_OFFSET(counter##_cos0)
55 
56 #define BNXT_RX_STATS_PRI_ENTRIES(counter)		\
57 	BNXT_RX_STATS_PRI_ENTRY(counter, 0),		\
58 	BNXT_RX_STATS_PRI_ENTRY(counter, 1),		\
59 	BNXT_RX_STATS_PRI_ENTRY(counter, 2),		\
60 	BNXT_RX_STATS_PRI_ENTRY(counter, 3),		\
61 	BNXT_RX_STATS_PRI_ENTRY(counter, 4),		\
62 	BNXT_RX_STATS_PRI_ENTRY(counter, 5),		\
63 	BNXT_RX_STATS_PRI_ENTRY(counter, 6),		\
64 	BNXT_RX_STATS_PRI_ENTRY(counter, 7)
65 
66 #define BNXT_TX_STATS_PRI_ENTRIES(counter)		\
67 	BNXT_TX_STATS_PRI_ENTRY(counter, 0),		\
68 	BNXT_TX_STATS_PRI_ENTRY(counter, 1),		\
69 	BNXT_TX_STATS_PRI_ENTRY(counter, 2),		\
70 	BNXT_TX_STATS_PRI_ENTRY(counter, 3),		\
71 	BNXT_TX_STATS_PRI_ENTRY(counter, 4),		\
72 	BNXT_TX_STATS_PRI_ENTRY(counter, 5),		\
73 	BNXT_TX_STATS_PRI_ENTRY(counter, 6),		\
74 	BNXT_TX_STATS_PRI_ENTRY(counter, 7)
75 
76 
77 long bnxt_rx_bytes_pri_arr_base_off[] = {BNXT_RX_STATS_PRI_ENTRIES(rx_bytes)};
78 long bnxt_rx_pkts_pri_arr_base_off[] = {BNXT_RX_STATS_PRI_ENTRIES(rx_packets)};
79 long bnxt_tx_bytes_pri_arr_base_off[] = {BNXT_TX_STATS_PRI_ENTRIES(tx_bytes)};
80 long bnxt_tx_pkts_pri_arr_base_off[] = {BNXT_TX_STATS_PRI_ENTRIES(tx_packets)};
81 
82 static int
bnxt_hwrm_err_map(uint16_t err)83 bnxt_hwrm_err_map(uint16_t err)
84 {
85 	int rc;
86 
87 	switch (err) {
88 	case HWRM_ERR_CODE_SUCCESS:
89 		return 0;
90 	case HWRM_ERR_CODE_INVALID_PARAMS:
91 	case HWRM_ERR_CODE_INVALID_FLAGS:
92 	case HWRM_ERR_CODE_INVALID_ENABLES:
93 		return EINVAL;
94 	case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED:
95 		return EACCES;
96 	case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR:
97 		return ENOMEM;
98 	case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
99 		return ENOSYS;
100 	case HWRM_ERR_CODE_FAIL:
101 		return EIO;
102 	case HWRM_ERR_CODE_HWRM_ERROR:
103 	case HWRM_ERR_CODE_UNKNOWN_ERR:
104 	default:
105 		return EDOOFUS;
106 	}
107 
108 	return rc;
109 }
110 
111 int
bnxt_alloc_hwrm_dma_mem(struct bnxt_softc * softc)112 bnxt_alloc_hwrm_dma_mem(struct bnxt_softc *softc)
113 {
114 	int rc;
115 
116 	rc = iflib_dma_alloc(softc->ctx, PAGE_SIZE, &softc->hwrm_cmd_resp,
117 	    BUS_DMA_NOWAIT);
118 	return rc;
119 }
120 
121 void
bnxt_free_hwrm_dma_mem(struct bnxt_softc * softc)122 bnxt_free_hwrm_dma_mem(struct bnxt_softc *softc)
123 {
124 	if (softc->hwrm_cmd_resp.idi_vaddr)
125 		iflib_dma_free(&softc->hwrm_cmd_resp);
126 	softc->hwrm_cmd_resp.idi_vaddr = NULL;
127 	return;
128 }
129 
130 void
bnxt_hwrm_cmd_hdr_init(struct bnxt_softc * softc,void * request,uint16_t req_type)131 bnxt_hwrm_cmd_hdr_init(struct bnxt_softc *softc, void *request,
132     uint16_t req_type)
133 {
134 	struct input *req = request;
135 
136 	req->req_type = htole16(req_type);
137 	req->cmpl_ring = 0xffff;
138 	req->target_id = 0xffff;
139 	req->resp_addr = htole64(softc->hwrm_cmd_resp.idi_paddr);
140 }
141 
142 int
_hwrm_send_message(struct bnxt_softc * softc,void * msg,uint32_t msg_len)143 _hwrm_send_message(struct bnxt_softc *softc, void *msg, uint32_t msg_len)
144 {
145 	struct input *req = msg;
146 	struct hwrm_err_output *resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
147 	uint32_t *data = msg;
148 	int i;
149 	uint8_t *valid;
150 	uint16_t err;
151 	uint16_t max_req_len = BNXT_HWRM_MAX_REQ_LEN;
152 	struct hwrm_short_input short_input = {0};
153 
154 	/* TODO: DMASYNC in here. */
155 	req->seq_id = htole16(softc->hwrm_cmd_seq++);
156 	memset(resp, 0, PAGE_SIZE);
157 
158 	if (BNXT_NO_FW_ACCESS(softc) &&
159 	    (req->req_type != HWRM_FUNC_RESET && req->req_type != HWRM_VER_GET))
160 		return -EINVAL;
161 
162 	if ((softc->flags & BNXT_FLAG_SHORT_CMD) ||
163 	    msg_len > BNXT_HWRM_MAX_REQ_LEN) {
164 		void *short_cmd_req = softc->hwrm_short_cmd_req_addr.idi_vaddr;
165                 uint16_t max_msg_len;
166 
167                 /* Set boundary for maximum extended request length for short
168                  * cmd format. If passed up from device use the max supported
169                  * internal req length.
170 		 */
171 
172 		max_msg_len = softc->hwrm_max_ext_req_len;
173 
174 
175 		memcpy(short_cmd_req, req, msg_len);
176                 if (msg_len < max_msg_len)
177 			memset((uint8_t *) short_cmd_req + msg_len, 0,
178 				max_msg_len - msg_len);
179 
180 		short_input.req_type = req->req_type;
181 		short_input.signature =
182 		    htole16(HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD);
183 		short_input.size = htole16(msg_len);
184 		short_input.req_addr =
185 		    htole64(softc->hwrm_short_cmd_req_addr.idi_paddr);
186 
187 		data = (uint32_t *)&short_input;
188 		msg_len = sizeof(short_input);
189 
190 		/* Sync memory write before updating doorbell */
191 		wmb();
192 
193 		max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
194 	}
195 
196 	/* Write request msg to hwrm channel */
197 	for (i = 0; i < msg_len; i += 4) {
198 		bus_space_write_4(softc->hwrm_bar.tag,
199 				  softc->hwrm_bar.handle,
200 				  i, *data);
201 		data++;
202 	}
203 
204 	/* Clear to the end of the request buffer */
205 	for (i = msg_len; i < max_req_len; i += 4)
206 		bus_space_write_4(softc->hwrm_bar.tag, softc->hwrm_bar.handle,
207 		    i, 0);
208 
209 	/* Ring channel doorbell */
210 	bus_space_write_4(softc->hwrm_bar.tag,
211 			  softc->hwrm_bar.handle,
212 			  0x100, htole32(1));
213 
214 	/* Check if response len is updated */
215 	for (i = 0; i < softc->hwrm_cmd_timeo; i++) {
216 		if (resp->resp_len && resp->resp_len <= 4096)
217 			break;
218 		DELAY(1000);
219 	}
220 	if (i >= softc->hwrm_cmd_timeo) {
221 		device_printf(softc->dev,
222 		    "Timeout sending %s: (timeout: %u) seq: %d\n",
223 		    GET_HWRM_REQ_TYPE(req->req_type), softc->hwrm_cmd_timeo,
224 		    le16toh(req->seq_id));
225 		return ETIMEDOUT;
226 	}
227 	/* Last byte of resp contains the valid key */
228 	valid = (uint8_t *)resp + resp->resp_len - 1;
229 	for (i = 0; i < softc->hwrm_cmd_timeo; i++) {
230 		if (*valid == HWRM_RESP_VALID_KEY)
231 			break;
232 		DELAY(1000);
233 	}
234 	if (i >= softc->hwrm_cmd_timeo) {
235 		device_printf(softc->dev, "Timeout sending %s: "
236 		    "(timeout: %u) msg {0x%x 0x%x} len:%d v: %d\n",
237 		    GET_HWRM_REQ_TYPE(req->req_type),
238 		    softc->hwrm_cmd_timeo, le16toh(req->req_type),
239 		    le16toh(req->seq_id), msg_len,
240 		    *valid);
241 		return ETIMEDOUT;
242 	}
243 
244 	err = le16toh(resp->error_code);
245 	if (err) {
246 		/* HWRM_ERR_CODE_FAIL is a "normal" error, don't log */
247 		if (err != HWRM_ERR_CODE_FAIL) {
248 			device_printf(softc->dev,
249 			    "%s command returned %s error.\n",
250 			    GET_HWRM_REQ_TYPE(req->req_type),
251 			    GET_HWRM_ERROR_CODE(err));
252 		}
253 		return bnxt_hwrm_err_map(err);
254 	}
255 
256 	return 0;
257 }
258 
259 int
hwrm_send_message(struct bnxt_softc * softc,void * msg,uint32_t msg_len)260 hwrm_send_message(struct bnxt_softc *softc, void *msg, uint32_t msg_len)
261 {
262 	int rc;
263 
264 	BNXT_HWRM_LOCK(softc);
265 	rc = _hwrm_send_message(softc, msg, msg_len);
266 	BNXT_HWRM_UNLOCK(softc);
267 	return rc;
268 }
269 
270 int
bnxt_hwrm_queue_qportcfg(struct bnxt_softc * softc,uint32_t path_dir)271 bnxt_hwrm_queue_qportcfg(struct bnxt_softc *softc, uint32_t path_dir)
272 {
273 	int rc = 0;
274 	struct hwrm_queue_qportcfg_input req = {0};
275 	struct hwrm_queue_qportcfg_output *resp =
276 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
277 	uint8_t max_tc, max_lltc, *max_q;
278 	uint8_t queue_profile, queue_id;
279 	struct bnxt_queue_info *q_info;
280 	uint8_t i, j, *qptr, *q_ids;
281 	bool no_rdma;
282 
283 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_QUEUE_QPORTCFG);
284 
285 	BNXT_HWRM_LOCK(softc);
286 	rc = _hwrm_send_message(softc, &req, sizeof(req));
287 	if (rc)
288 		goto qportcfg_exit;
289 
290 	if (!resp->max_configurable_queues) {
291 		rc = -EINVAL;
292 		goto qportcfg_exit;
293 	}
294 
295 	if (resp->queue_cfg_info & HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_CFG_INFO_ASYM_CFG) {
296 		softc->is_asym_q = true;
297 		/* bnxt_init_cosq_names(softc, path_dir); */
298 	} else {
299 		softc->is_asym_q = false;
300 		/* bnxt_free_stats_cosqnames_mem(softc); */
301 	}
302 
303 	max_tc = min_t(uint8_t, resp->max_configurable_queues, BNXT_MAX_QUEUE);
304 	max_lltc = resp->max_configurable_lossless_queues;
305 
306 	/*
307 	 * No RDMA support yet.
308 	 * no_rdma = !(softc->flags & BNXT_FLAG_ROCE_CAP);
309 	 */
310 	no_rdma = true;
311 	qptr = &resp->queue_id0;
312 
313 	if (path_dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX) {
314 		q_info = softc->tx_q_info;
315 		q_ids = softc->tx_q_ids;
316 		max_q = &softc->tx_max_q;
317 	} else {
318 		q_info = softc->rx_q_info;
319 		q_ids = softc->rx_q_ids;
320 		max_q = &softc->rx_max_q;
321 	}
322 
323 	for (i = 0, j = 0; i < max_tc; i++) {
324 		queue_id = *qptr;
325 		qptr++;
326 
327 		queue_profile = *qptr;
328 		qptr++;
329 
330 		q_info[j].queue_id = queue_id;
331 		q_info[j].queue_profile = queue_profile;
332 		q_ids[i] = queue_id;
333 
334 		softc->tc_to_qidx[j] = j;
335 
336 		if (!BNXT_CNPQ(q_info[j].queue_profile) ||
337 		    (no_rdma && BNXT_PF(softc)))
338 			j++;
339 	}
340 	*max_q = max_tc;
341 	max_tc = max_t(uint8_t, j, 1);
342 	softc->max_tc = softc->max_tc ? min(softc->max_tc, max_tc) : max_tc;
343 	softc->max_lltc = softc->max_lltc ? min(softc->max_lltc, max_lltc) : max_lltc;
344 
345 	if (softc->max_lltc > softc->max_tc)
346 		softc->max_lltc = softc->max_tc;
347 
348 qportcfg_exit:
349 	BNXT_HWRM_UNLOCK(softc);
350 	return rc;
351 }
352 
bnxt_alloc_all_ctx_pg_info(struct bnxt_softc * softc,int ctx_max)353 static int bnxt_alloc_all_ctx_pg_info(struct bnxt_softc *softc, int ctx_max)
354 {
355 	struct bnxt_ctx_mem_info *ctx = softc->ctx_mem;
356 	u16 type;
357 
358 	for (type = 0; type < ctx_max; type++) {
359 		struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
360 		int n = 1;
361 
362 		if (!ctxm->max_entries || ctxm->pg_info)
363 			continue;
364 
365 		if (ctxm->instance_bmap)
366 			n = hweight32(ctxm->instance_bmap);
367 		ctxm->pg_info = kcalloc(n, sizeof(*ctxm->pg_info), GFP_ATOMIC);
368 		if (!ctxm->pg_info)
369 			return -ENOMEM;
370 	}
371 	return 0;
372 }
373 
bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type * ctxm,u8 init_val,u8 init_offset,bool init_mask_set)374 static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type *ctxm,
375 				      u8 init_val, u8 init_offset,
376 				      bool init_mask_set)
377 {
378 	ctxm->init_value = init_val;
379 	ctxm->init_offset = BNXT_CTX_INIT_INVALID_OFFSET;
380 	if (init_mask_set)
381 		ctxm->init_offset = init_offset * 4;
382 	else
383 		ctxm->init_value = 0;
384 }
385 
386 #define BNXT_CTX_INIT_VALID(flags)      \
387         (!!((flags) &                   \
388             HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_FLAGS_ENABLE_CTX_KIND_INIT))
389 
390 static int
bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt_softc * softc)391 bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt_softc *softc)
392 {
393 	struct hwrm_func_backing_store_qcaps_v2_input req = {0};
394 	struct hwrm_func_backing_store_qcaps_v2_output *resp =
395 		(void *)softc->hwrm_cmd_resp.idi_vaddr;
396 	struct bnxt_ctx_mem_info *ctx = NULL;
397 	u16 type;
398 	int rc;
399 
400 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_BACKING_STORE_QCAPS_V2);
401 
402 	ctx = malloc(sizeof(*ctx), M_DEVBUF, M_NOWAIT | M_ZERO);
403 	if (!ctx)
404 		return -ENOMEM;
405 
406 	softc->ctx_mem = ctx;
407 
408 	BNXT_HWRM_LOCK(softc);
409 	for (type = 0; type < BNXT_CTX_V2_MAX; ) {
410 		struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
411 		u8 init_val, init_off, i;
412 		__le32 *p;
413 		u32 flags;
414 
415 		req.type = cpu_to_le16(type);
416 		rc = _hwrm_send_message(softc, &req, sizeof(req));
417 		if (rc)
418 			goto ctx_done;
419 		flags = le32_to_cpu(resp->flags);
420 		type = le16_to_cpu(resp->next_valid_type);
421 		if (!(flags & HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_FLAGS_TYPE_VALID))
422 			continue;
423 
424 		ctxm->type = le16_to_cpu(resp->type);
425 		ctxm->flags = flags;
426 
427 		ctxm->entry_size = le16_to_cpu(resp->entry_size);
428 		ctxm->instance_bmap = le32_to_cpu(resp->instance_bit_map);
429 		ctxm->entry_multiple = resp->entry_multiple;
430 		ctxm->max_entries = le32_to_cpu(resp->max_num_entries);
431 		ctxm->min_entries = le32_to_cpu(resp->min_num_entries);
432 		init_val = resp->ctx_init_value;
433 		init_off = resp->ctx_init_offset;
434 		bnxt_init_ctx_initializer(ctxm, init_val, init_off,
435 					  BNXT_CTX_INIT_VALID(flags));
436 		ctxm->split_entry_cnt = min_t(u8, resp->subtype_valid_cnt,
437 					      BNXT_MAX_SPLIT_ENTRY);
438 		for (i = 0, p = &resp->split_entry_0; i < ctxm->split_entry_cnt;
439 		     i++, p++)
440 			ctxm->split[i] = le32_to_cpu(*p);
441 	}
442 	rc = bnxt_alloc_all_ctx_pg_info(softc, BNXT_CTX_V2_MAX);
443 
444 ctx_done:
445 	BNXT_HWRM_UNLOCK(softc);
446 	return rc;
447 }
448 
bnxt_hwrm_func_backing_store_qcaps(struct bnxt_softc * softc)449 int bnxt_hwrm_func_backing_store_qcaps(struct bnxt_softc *softc)
450 {
451 	struct hwrm_func_backing_store_qcaps_input req = {0};
452 	struct hwrm_func_backing_store_qcaps_output *resp =
453 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
454 	int rc;
455 
456 	if (softc->hwrm_spec_code < 0x10902 || softc->ctx_mem)
457 		return 0;
458 
459 	if (BNXT_CHIP_P7(softc)) {
460 		if (softc->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
461 			return bnxt_hwrm_func_backing_store_qcaps_v2(softc);
462 	}
463 
464 	if (BNXT_VF(softc))
465 		return 0;
466 
467 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_BACKING_STORE_QCAPS);
468 	BNXT_HWRM_LOCK(softc);
469 	rc = _hwrm_send_message(softc, &req, sizeof(req));
470 	if (!rc) {
471 		struct bnxt_ctx_mem_type *ctxm;
472 		struct bnxt_ctx_mem_info *ctx;
473 		u8 init_val, init_idx = 0;
474 		u16 init_mask;
475 
476 		ctx = softc->ctx_mem;
477 		if (!ctx) {
478 			ctx = malloc(sizeof(*ctx), M_DEVBUF, M_NOWAIT | M_ZERO);
479 			if (!ctx) {
480 				rc = -ENOMEM;
481 				goto ctx_err;
482 			}
483 			softc->ctx_mem = ctx;
484 		}
485 		init_val = resp->ctx_kind_initializer;
486 		init_mask = le16_to_cpu(resp->ctx_init_mask);
487 
488 		ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
489 		ctxm->max_entries = le32_to_cpu(resp->qp_max_entries);
490 		ctxm->qp_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
491 		ctxm->qp_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
492 		ctxm->entry_size = le16_to_cpu(resp->qp_entry_size);
493 		bnxt_init_ctx_initializer(ctxm, init_val, resp->qp_init_offset,
494 					  (init_mask & (1 << init_idx++)) != 0);
495 
496 		ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
497 		ctxm->srq_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
498 		ctxm->max_entries = le32_to_cpu(resp->srq_max_entries);
499 		ctxm->entry_size = le16_to_cpu(resp->srq_entry_size);
500 		bnxt_init_ctx_initializer(ctxm, init_val, resp->srq_init_offset,
501 					  (init_mask & (1 << init_idx++)) != 0);
502 
503 		ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
504 		ctxm->cq_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
505 		ctxm->max_entries = le32_to_cpu(resp->cq_max_entries);
506 		ctxm->entry_size = le16_to_cpu(resp->cq_entry_size);
507 		bnxt_init_ctx_initializer(ctxm, init_val, resp->cq_init_offset,
508 					  (init_mask & (1 << init_idx++)) != 0);
509 
510 		ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
511 		ctxm->vnic_entries = le32_to_cpu(resp->vnic_max_vnic_entries);
512 		ctxm->max_entries = ctxm->vnic_entries +
513 			le16_to_cpu(resp->vnic_max_ring_table_entries);
514 		ctxm->entry_size = le16_to_cpu(resp->vnic_entry_size);
515 		bnxt_init_ctx_initializer(ctxm, init_val,
516 					  resp->vnic_init_offset,
517 					  (init_mask & (1 << init_idx++)) != 0);
518 
519 		ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
520 		ctxm->max_entries = le32_to_cpu(resp->stat_max_entries);
521 		ctxm->entry_size = le16_to_cpu(resp->stat_entry_size);
522 		bnxt_init_ctx_initializer(ctxm, init_val,
523 					  resp->stat_init_offset,
524 					  (init_mask & (1 << init_idx++)) != 0);
525 
526 		ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
527 		ctxm->entry_size = le16_to_cpu(resp->tqm_entry_size);
528 		ctxm->min_entries = le32_to_cpu(resp->tqm_min_entries_per_ring);
529 		ctxm->max_entries = le32_to_cpu(resp->tqm_max_entries_per_ring);
530 		ctxm->entry_multiple = resp->tqm_entries_multiple;
531 		if (!ctxm->entry_multiple)
532 			ctxm->entry_multiple = 1;
533 
534 		memcpy(&ctx->ctx_arr[BNXT_CTX_FTQM], ctxm, sizeof(*ctxm));
535 
536 		ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
537 		ctxm->max_entries = le32_to_cpu(resp->mrav_max_entries);
538 		ctxm->entry_size = le16_to_cpu(resp->mrav_entry_size);
539 		ctxm->mrav_num_entries_units =
540 			le16_to_cpu(resp->mrav_num_entries_units);
541 		bnxt_init_ctx_initializer(ctxm, init_val,
542 					  resp->mrav_init_offset,
543 					  (init_mask & (1 << init_idx++)) != 0);
544 
545 		ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
546 		ctxm->entry_size = le16_to_cpu(resp->tim_entry_size);
547 		ctxm->max_entries = le32_to_cpu(resp->tim_max_entries);
548 
549 		ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
550 		if (!ctx->tqm_fp_rings_count)
551 			ctx->tqm_fp_rings_count = softc->tx_max_q;
552 		else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_LEGACY_RINGS)
553 			ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_LEGACY_RINGS;
554 		if (ctx->tqm_fp_rings_count == BNXT_MAX_TQM_FP_LEGACY_RINGS &&
555 		    softc->hwrm_max_ext_req_len >= BNXT_BACKING_STORE_CFG_LEN) {
556 			ctx->tqm_fp_rings_count += resp->tqm_fp_rings_count_ext;
557 			if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
558 				ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
559 		}
560 		ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
561 		memcpy(ctxm, &ctx->ctx_arr[BNXT_CTX_STQM], sizeof(*ctxm));
562 		ctxm->instance_bmap = (1 << ctx->tqm_fp_rings_count) - 1;
563 
564 		rc = bnxt_alloc_all_ctx_pg_info(softc, BNXT_CTX_MAX);
565 	} else {
566 		rc = 0;
567 	}
568 ctx_err:
569 	BNXT_HWRM_UNLOCK(softc);
570 	return rc;
571 }
572 
573 #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES                 \
574         (HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP |                \
575          HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ |               \
576          HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ |                \
577          HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC |              \
578          HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT)
579 
bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info * rmem,uint8_t * pg_attr,uint64_t * pg_dir)580 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, uint8_t *pg_attr,
581 				  uint64_t *pg_dir)
582 {
583 	if (!rmem->nr_pages)
584 		return;
585 
586 	BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
587 
588 	if (rmem->depth >= 1) {
589 		if (rmem->depth == 2)
590 			*pg_attr |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_LVL_2;
591 		else
592 			*pg_attr |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_LVL_1;
593 		*pg_dir = htole64(rmem->pg_tbl.idi_paddr);
594 	} else {
595 		*pg_dir = htole64(rmem->pg_arr[0].idi_paddr);
596 	}
597 }
598 
bnxt_hwrm_func_backing_store_cfg(struct bnxt_softc * softc,uint32_t enables)599 int bnxt_hwrm_func_backing_store_cfg(struct bnxt_softc *softc, uint32_t enables)
600 {
601 	struct hwrm_func_backing_store_cfg_input req = {0};
602 	struct bnxt_ctx_mem_info *ctx = softc->ctx_mem;
603 	struct bnxt_ctx_pg_info *ctx_pg;
604 	struct bnxt_ctx_mem_type *ctxm;
605 	u32 req_len = sizeof(req);
606 	__le32 *num_entries;
607 	u32 ena, flags = 0;
608 	__le64 *pg_dir;
609 	u8 *pg_attr;
610 	int i;
611 
612 	if (!ctx)
613 		return 0;
614 
615 	if (req_len > softc->hwrm_max_ext_req_len)
616 		req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
617 
618 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_BACKING_STORE_CFG);
619 	req.enables = htole32(enables);
620 
621 	if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP) {
622 		ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
623 		ctx_pg = ctxm->pg_info;
624 		req.qp_num_entries = cpu_to_le32(ctx_pg->entries);
625 		req.qp_num_qp1_entries = cpu_to_le16(ctxm->qp_qp1_entries);
626 		req.qp_num_l2_entries = cpu_to_le16(ctxm->qp_l2_entries);
627 		req.qp_entry_size = cpu_to_le16(ctxm->entry_size);
628 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
629 				&req.qpc_pg_size_qpc_lvl,
630 				&req.qpc_page_dir);
631 	}
632 	if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ) {
633 		ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
634 		ctx_pg = ctxm->pg_info;
635 		req.srq_num_entries = cpu_to_le32(ctx_pg->entries);
636 		req.srq_num_l2_entries = cpu_to_le16(ctxm->srq_l2_entries);
637 		req.srq_entry_size = cpu_to_le16(ctxm->entry_size);
638 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
639 				&req.srq_pg_size_srq_lvl,
640 				&req.srq_page_dir);
641 	}
642 	if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ) {
643 		ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
644 		ctx_pg = ctxm->pg_info;
645 		req.cq_num_entries = cpu_to_le32(ctx_pg->entries);
646 		req.cq_num_l2_entries = cpu_to_le16(ctxm->cq_l2_entries);
647 		req.cq_entry_size = cpu_to_le16(ctxm->entry_size);
648 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
649 				      &req.cq_pg_size_cq_lvl,
650 				&req.cq_page_dir);
651 	}
652 	if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_MRAV) {
653 		ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
654 		ctx_pg = ctxm->pg_info;
655 		req.mrav_num_entries = cpu_to_le32(ctx_pg->entries);
656 		if (ctxm->mrav_num_entries_units)
657 			flags |=
658 			HWRM_FUNC_BACKING_STORE_CFG_INPUT_FLAGS_MRAV_RESERVATION_SPLIT;
659 		req.mrav_entry_size = cpu_to_le16(ctxm->entry_size);
660 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
661 				&req.mrav_pg_size_mrav_lvl,
662 				&req.mrav_page_dir);
663 	}
664 	if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TIM) {
665 		ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
666 		ctx_pg = ctxm->pg_info;
667 		req.tim_num_entries = cpu_to_le32(ctx_pg->entries);
668 		req.tim_entry_size = cpu_to_le16(ctxm->entry_size);
669 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
670 				&req.tim_pg_size_tim_lvl,
671 				&req.tim_page_dir);
672 	}
673 	if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC) {
674 		ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
675 		ctx_pg = ctxm->pg_info;
676 		req.vnic_num_vnic_entries = cpu_to_le16(ctxm->vnic_entries);
677 		req.vnic_num_ring_table_entries =
678 			cpu_to_le16(ctxm->max_entries - ctxm->vnic_entries);
679 		req.vnic_entry_size = cpu_to_le16(ctxm->entry_size);
680 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
681 				&req.vnic_pg_size_vnic_lvl,
682 				&req.vnic_page_dir);
683 	}
684 	if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT) {
685 		ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
686 		ctx_pg = ctxm->pg_info;
687 		req.stat_num_entries = cpu_to_le32(ctxm->max_entries);
688 		req.stat_entry_size = cpu_to_le16(ctxm->entry_size);
689 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
690 				&req.stat_pg_size_stat_lvl,
691 				&req.stat_page_dir);
692 	}
693 	ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
694 	for (i = 0, num_entries = &req.tqm_sp_num_entries,
695 			pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
696 			pg_dir = &req.tqm_sp_page_dir,
697 	     ena = HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP,
698 	     ctx_pg = ctxm->pg_info;
699 	     i < BNXT_MAX_TQM_LEGACY_RINGS;
700 	     ctx_pg = &ctx->ctx_arr[BNXT_CTX_FTQM].pg_info[i],
701 	     i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
702 		if (!(enables & ena))
703 			continue;
704 
705 		req.tqm_entry_size = cpu_to_le16(ctxm->entry_size);
706 		*num_entries = cpu_to_le32(ctx_pg->entries);
707 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
708 	}
709 	if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING8) {
710 		pg_attr = &req.tqm_ring8_pg_size_tqm_ring_lvl;
711 		pg_dir = &req.tqm_ring8_page_dir;
712 		ctx_pg = &ctx->ctx_arr[BNXT_CTX_FTQM].pg_info[8];
713 		req.tqm_ring8_num_entries = cpu_to_le32(ctx_pg->entries);
714 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
715 	}
716 	req.flags = cpu_to_le32(flags);
717 	return hwrm_send_message(softc, &req, sizeof(req));
718 }
719 
bnxt_hwrm_func_resc_qcaps(struct bnxt_softc * softc,bool all)720 int bnxt_hwrm_func_resc_qcaps(struct bnxt_softc *softc, bool all)
721 {
722 	struct hwrm_func_resource_qcaps_output *resp =
723 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
724 	struct hwrm_func_resource_qcaps_input req = {0};
725 	struct bnxt_hw_resc *hw_resc = &softc->hw_resc;
726 	int rc;
727 
728 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_RESOURCE_QCAPS);
729 	req.fid = htole16(0xffff);
730 
731 	BNXT_HWRM_LOCK(softc);
732 	rc = _hwrm_send_message(softc, &req, sizeof(req));
733 	if (rc) {
734 		rc = -EIO;
735 		goto hwrm_func_resc_qcaps_exit;
736 	}
737 
738 	hw_resc->max_tx_sch_inputs = le16toh(resp->max_tx_scheduler_inputs);
739 	if (!all)
740 		goto hwrm_func_resc_qcaps_exit;
741 
742 	hw_resc->min_rsscos_ctxs = le16toh(resp->min_rsscos_ctx);
743 	hw_resc->max_rsscos_ctxs = le16toh(resp->max_rsscos_ctx);
744 	hw_resc->min_cp_rings = le16toh(resp->min_cmpl_rings);
745 	hw_resc->max_cp_rings = le16toh(resp->max_cmpl_rings);
746 	hw_resc->min_tx_rings = le16toh(resp->min_tx_rings);
747 	hw_resc->max_tx_rings = le16toh(resp->max_tx_rings);
748 	hw_resc->min_rx_rings = le16toh(resp->min_rx_rings);
749 	hw_resc->max_rx_rings = le16toh(resp->max_rx_rings);
750 	hw_resc->min_hw_ring_grps = le16toh(resp->min_hw_ring_grps);
751 	hw_resc->max_hw_ring_grps = le16toh(resp->max_hw_ring_grps);
752 	hw_resc->min_l2_ctxs = le16toh(resp->min_l2_ctxs);
753 	hw_resc->max_l2_ctxs = le16toh(resp->max_l2_ctxs);
754 	hw_resc->min_vnics = le16toh(resp->min_vnics);
755 	hw_resc->max_vnics = le16toh(resp->max_vnics);
756 	hw_resc->min_stat_ctxs = le16toh(resp->min_stat_ctx);
757 	hw_resc->max_stat_ctxs = le16toh(resp->max_stat_ctx);
758 
759 	if (BNXT_CHIP_P5_PLUS(softc)) {
760 		hw_resc->max_nqs = le16toh(resp->max_msix);
761 		hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
762 	}
763 
764 hwrm_func_resc_qcaps_exit:
765 	BNXT_HWRM_UNLOCK(softc);
766 	return rc;
767 }
768 
bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt_softc * softc,struct bnxt_ctx_mem_type * ctxm,bool last)769 int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt_softc *softc,
770 					struct bnxt_ctx_mem_type *ctxm,
771 					bool last)
772 {
773 	struct hwrm_func_backing_store_cfg_v2_input req = {0};
774 	u32 instance_bmap = ctxm->instance_bmap;
775 	int i, j, rc = 0, n = 1;
776 	__le32 *p;
777 
778 	if (!(ctxm->flags & BNXT_CTX_MEM_TYPE_VALID) || !ctxm->pg_info)
779 		return 0;
780 
781 	if (instance_bmap)
782 		n = hweight32(ctxm->instance_bmap);
783 	else
784 		instance_bmap = 1;
785 
786 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_BACKING_STORE_CFG_V2);
787 
788 	BNXT_HWRM_LOCK(softc);
789 	req.type = cpu_to_le16(ctxm->type);
790 	req.entry_size = cpu_to_le16(ctxm->entry_size);
791 	req.subtype_valid_cnt = ctxm->split_entry_cnt;
792 	for (i = 0, p = &req.split_entry_0; i < ctxm->split_entry_cnt; i++)
793 		p[i] = cpu_to_le32(ctxm->split[i]);
794 	for (i = 0, j = 0; j < n && !rc; i++) {
795 		struct bnxt_ctx_pg_info *ctx_pg;
796 
797 		if (!(instance_bmap & (1 << i)))
798 			continue;
799 		req.instance = cpu_to_le16(i);
800 		ctx_pg = &ctxm->pg_info[j++];
801 		if (!ctx_pg->entries)
802 			continue;
803 		req.num_entries = cpu_to_le32(ctx_pg->entries);
804 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
805 				      &req.page_size_pbl_level,
806 				      &req.page_dir);
807 		if (last && j == n)
808 			req.flags =
809 				cpu_to_le32(HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_FLAGS_BS_CFG_ALL_DONE);
810 		rc = _hwrm_send_message(softc, &req, sizeof(req));
811 	}
812 	BNXT_HWRM_UNLOCK(softc);
813 	return rc;
814 }
815 
816 int
bnxt_hwrm_passthrough(struct bnxt_softc * softc,void * req,uint32_t req_len,void * resp,uint32_t resp_len,uint32_t app_timeout)817 bnxt_hwrm_passthrough(struct bnxt_softc *softc, void *req, uint32_t req_len,
818 		void *resp, uint32_t resp_len, uint32_t app_timeout)
819 {
820 	int rc = 0;
821 	void *output = (void *)softc->hwrm_cmd_resp.idi_vaddr;
822 	struct input *input = req;
823 	uint32_t old_timeo;
824 
825 	input->resp_addr = htole64(softc->hwrm_cmd_resp.idi_paddr);
826 	BNXT_HWRM_LOCK(softc);
827 	old_timeo = softc->hwrm_cmd_timeo;
828 	if (input->req_type == HWRM_NVM_INSTALL_UPDATE)
829 		softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
830 	else
831 		softc->hwrm_cmd_timeo = max(app_timeout, softc->hwrm_cmd_timeo);
832 	rc = _hwrm_send_message(softc, req, req_len);
833 	softc->hwrm_cmd_timeo = old_timeo;
834 	if (rc) {
835 		device_printf(softc->dev, "%s: %s command failed with rc: 0x%x\n",
836 			      __FUNCTION__, GET_HWRM_REQ_TYPE(input->req_type), rc);
837 		goto fail;
838 	}
839 
840 	memcpy(resp, output, resp_len);
841 fail:
842 	BNXT_HWRM_UNLOCK(softc);
843 	return rc;
844 }
845 
846 
847 int
bnxt_hwrm_ver_get(struct bnxt_softc * softc)848 bnxt_hwrm_ver_get(struct bnxt_softc *softc)
849 {
850 	struct hwrm_ver_get_input	req = {0};
851 	struct hwrm_ver_get_output	*resp =
852 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
853 	int				rc;
854 	const char nastr[] = "<not installed>";
855 	const char naver[] = "<N/A>";
856 	uint32_t dev_caps_cfg;
857 	uint16_t fw_maj, fw_min, fw_bld, fw_rsv, len;
858 
859 	softc->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
860 	softc->hwrm_cmd_timeo = 1000;
861 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VER_GET);
862 
863 	req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
864 	req.hwrm_intf_min = HWRM_VERSION_MINOR;
865 	req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
866 
867 	BNXT_HWRM_LOCK(softc);
868 	rc = _hwrm_send_message(softc, &req, sizeof(req));
869 	if (rc)
870 		goto fail;
871 
872 	snprintf(softc->ver_info->hwrm_if_ver, BNXT_VERSTR_SIZE, "%d.%d.%d",
873 	    resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, resp->hwrm_intf_upd_8b);
874 	softc->ver_info->hwrm_if_major = resp->hwrm_intf_maj_8b;
875 	softc->ver_info->hwrm_if_minor = resp->hwrm_intf_min_8b;
876 	softc->ver_info->hwrm_if_update = resp->hwrm_intf_upd_8b;
877 	strlcpy(softc->ver_info->driver_hwrm_if_ver, HWRM_VERSION_STR,
878 	    BNXT_VERSTR_SIZE);
879 	strlcpy(softc->ver_info->hwrm_fw_name, resp->hwrm_fw_name,
880 	    BNXT_NAME_SIZE);
881 
882 	 softc->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
883                              resp->hwrm_intf_min_8b << 8 |
884                              resp->hwrm_intf_upd_8b;
885 	if (resp->hwrm_intf_maj_8b < 1) {
886 		 device_printf(softc->dev, "HWRM interface %d.%d.%d is older "
887 			       "than 1.0.0.\n", resp->hwrm_intf_maj_8b,
888 			       resp->hwrm_intf_min_8b, resp->hwrm_intf_upd_8b);
889 		 device_printf(softc->dev, "Please update firmware with HWRM "
890 				"interface 1.0.0 or newer.\n");
891 	 }
892 	if (resp->mgmt_fw_major == 0 && resp->mgmt_fw_minor == 0 &&
893 	    resp->mgmt_fw_build == 0) {
894 		strlcpy(softc->ver_info->mgmt_fw_ver, naver, BNXT_VERSTR_SIZE);
895 		strlcpy(softc->ver_info->mgmt_fw_name, nastr, BNXT_NAME_SIZE);
896 	}
897 	else {
898 		snprintf(softc->ver_info->mgmt_fw_ver, FW_VER_STR_LEN,
899 		    "%d.%d.%d.%d", resp->mgmt_fw_major, resp->mgmt_fw_minor,
900 		    resp->mgmt_fw_build, resp->mgmt_fw_patch);
901 		strlcpy(softc->ver_info->mgmt_fw_name, resp->mgmt_fw_name,
902 		    BNXT_NAME_SIZE);
903 	}
904 	if (resp->netctrl_fw_major == 0 && resp->netctrl_fw_minor == 0 &&
905 	    resp->netctrl_fw_build == 0) {
906 		strlcpy(softc->ver_info->netctrl_fw_ver, naver,
907 		    BNXT_VERSTR_SIZE);
908 		strlcpy(softc->ver_info->netctrl_fw_name, nastr,
909 		    BNXT_NAME_SIZE);
910 	}
911 	else {
912 		snprintf(softc->ver_info->netctrl_fw_ver, FW_VER_STR_LEN,
913 		    "%d.%d.%d.%d", resp->netctrl_fw_major, resp->netctrl_fw_minor,
914 		    resp->netctrl_fw_build, resp->netctrl_fw_patch);
915 		strlcpy(softc->ver_info->netctrl_fw_name, resp->netctrl_fw_name,
916 		    BNXT_NAME_SIZE);
917 	}
918 	if (resp->roce_fw_major == 0 && resp->roce_fw_minor == 0 &&
919 	    resp->roce_fw_build == 0) {
920 		strlcpy(softc->ver_info->roce_fw_ver, naver, BNXT_VERSTR_SIZE);
921 		strlcpy(softc->ver_info->roce_fw_name, nastr, BNXT_NAME_SIZE);
922 	}
923 	else {
924 		snprintf(softc->ver_info->roce_fw_ver, BNXT_VERSTR_SIZE,
925 		    "%d.%d.%d.%d", resp->roce_fw_major, resp->roce_fw_minor,
926 		    resp->roce_fw_build, resp->roce_fw_patch);
927 		strlcpy(softc->ver_info->roce_fw_name, resp->roce_fw_name,
928 		    BNXT_NAME_SIZE);
929 	}
930 
931 	fw_maj = le32toh(resp->hwrm_fw_major);
932 	if (softc->hwrm_spec_code > 0x10803 && fw_maj) {
933 		fw_min = le16toh(resp->hwrm_fw_minor);
934 		fw_bld = le16toh(resp->hwrm_fw_build);
935 		fw_rsv = le16toh(resp->hwrm_fw_patch);
936 		len = FW_VER_STR_LEN;
937 	} else {
938 		fw_maj = resp->hwrm_fw_maj_8b;
939 		fw_min = resp->hwrm_fw_min_8b;
940 		fw_bld = resp->hwrm_fw_bld_8b;
941 		fw_rsv = resp->hwrm_fw_rsvd_8b;
942 		len = BC_HWRM_STR_LEN;
943 	}
944 
945 	softc->ver_info->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
946 	snprintf (softc->ver_info->fw_ver_str, len, "%d.%d.%d.%d",
947 			fw_maj, fw_min, fw_bld, fw_rsv);
948 
949 	if (strlen(resp->active_pkg_name)) {
950 		int fw_ver_len = strlen (softc->ver_info->fw_ver_str);
951 
952 		snprintf(softc->ver_info->fw_ver_str + fw_ver_len,
953 				FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
954 				resp->active_pkg_name);
955 		softc->fw_cap |= BNXT_FW_CAP_PKG_VER;
956 	}
957 
958 	softc->ver_info->chip_num = le16toh(resp->chip_num);
959 	softc->ver_info->chip_rev = resp->chip_rev;
960 	softc->ver_info->chip_metal = resp->chip_metal;
961 	softc->ver_info->chip_bond_id = resp->chip_bond_id;
962 	softc->ver_info->chip_type = resp->chip_platform_type;
963 
964 	if (resp->hwrm_intf_maj_8b >= 1) {
965 		softc->hwrm_max_req_len = le16toh(resp->max_req_win_len);
966 		softc->hwrm_max_ext_req_len = le16toh(resp->max_ext_req_len);
967 	}
968 	softc->hwrm_cmd_timeo = le16toh(resp->def_req_timeout);
969 	if (!softc->hwrm_cmd_timeo)
970 		softc->hwrm_cmd_timeo = DFLT_HWRM_CMD_TIMEOUT;
971 
972 	dev_caps_cfg = le32toh(resp->dev_caps_cfg);
973 	if ((dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
974 	    (dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
975 		softc->flags |= BNXT_FLAG_SHORT_CMD;
976 
977 	if ((dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
978 	    (dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
979 		softc->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
980 
981 	if (dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
982 		softc->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
983 
984 	if (dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
985 		softc->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
986 
987 	if (dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
988 		softc->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
989 
990 	if (dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
991 		softc->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
992 
993 	if (dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_EEM_SUPPORTED)
994 		softc->fw_cap |= BNXT_FW_CAP_CFA_EEM;
995 
996 	if (dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_TRUFLOW_SUPPORTED)
997 		softc->fw_cap |= BNXT_FW_CAP_TRUFLOW_EN;
998 
999 fail:
1000 	BNXT_HWRM_UNLOCK(softc);
1001 	return rc;
1002 }
1003 
1004 static const u16 bnxt_async_events_arr[] = {
1005 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
1006 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
1007 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
1008 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
1009 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
1010 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
1011 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
1012 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
1013 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
1014 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
1015 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE,
1016 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
1017 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE,
1018 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
1019 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
1020 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT,
1021 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE,
1022 };
1023 
bnxt_hwrm_func_drv_rgtr(struct bnxt_softc * bp,unsigned long * bmap,int bmap_size,bool async_only)1024 int bnxt_hwrm_func_drv_rgtr(struct bnxt_softc *bp, unsigned long *bmap, int bmap_size,
1025 			    bool async_only)
1026 {
1027 	DECLARE_BITMAP(async_events_bmap, 256);
1028 	u32 *events = (u32 *)async_events_bmap;
1029 	struct hwrm_func_drv_rgtr_output *resp =
1030 		(void *)bp->hwrm_cmd_resp.idi_vaddr;
1031 	struct hwrm_func_drv_rgtr_input req = {0};
1032 	u32 flags = 0;
1033 	int rc;
1034 	int i;
1035 
1036 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR);
1037 	req.ver_maj = HWRM_VERSION_MAJOR;
1038 	req.ver_min = HWRM_VERSION_MINOR;
1039 	req.ver_upd = HWRM_VERSION_UPDATE;
1040 
1041 	req.enables = htole32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_OS_TYPE |
1042 				   HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
1043 				   HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
1044 
1045 	if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
1046 		flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_HOT_RESET_SUPPORT;
1047 	if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
1048 		flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_ERROR_RECOVERY_SUPPORT |
1049 			 HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_MASTER_SUPPORT;
1050 	if (bp->fw_cap & BNXT_FW_CAP_NPAR_1_2)
1051 		flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_NPAR_1_2_SUPPORT;
1052 	flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_ASYM_QUEUE_CFG_SUPPORT;
1053 	req.flags = htole32(flags);
1054 	req.os_type = htole16(HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_FREEBSD);
1055 
1056 	if (BNXT_PF(bp)) {
1057 		req.enables |=
1058 			htole32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD);
1059 	}
1060 
1061 	if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
1062 		req.flags |= cpu_to_le32(HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FLOW_HANDLE_64BIT_MODE);
1063 
1064 	memset(async_events_bmap, 0, sizeof(async_events_bmap));
1065 	for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
1066 		u16 event_id = bnxt_async_events_arr[i];
1067 
1068 		if (event_id == HWRM_ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
1069 		    !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
1070 			continue;
1071 		}
1072 		__set_bit(bnxt_async_events_arr[i], async_events_bmap);
1073 	}
1074 	if (bmap && bmap_size) {
1075 		for (i = 0; i < bmap_size; i++) {
1076 			if (test_bit(i, bmap))
1077 				__set_bit(i, async_events_bmap);
1078 		}
1079 	}
1080 	for (i = 0; i < 8; i++)
1081 		req.async_event_fwd[i] |= htole32(events[i]);
1082 
1083 	if (async_only)
1084 		req.enables =
1085 			htole32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
1086 
1087 	rc = hwrm_send_message(bp, &req, sizeof(req));
1088 
1089 	if (!rc) {
1090 		if (resp->flags &
1091 		    le32toh(HWRM_FUNC_DRV_RGTR_OUTPUT_FLAGS_IF_CHANGE_SUPPORTED))
1092 			bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
1093 	}
1094 
1095 
1096 	return rc;
1097 }
1098 
1099 int
bnxt_hwrm_func_drv_unrgtr(struct bnxt_softc * softc,bool shutdown)1100 bnxt_hwrm_func_drv_unrgtr(struct bnxt_softc *softc, bool shutdown)
1101 {
1102 	struct hwrm_func_drv_unrgtr_input req = {0};
1103 
1104 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_DRV_UNRGTR);
1105 	if (shutdown == true)
1106 		req.flags |=
1107 		    HWRM_FUNC_DRV_UNRGTR_INPUT_FLAGS_PREPARE_FOR_SHUTDOWN;
1108 	return hwrm_send_message(softc, &req, sizeof(req));
1109 }
1110 
1111 static inline int
_is_valid_ether_addr(uint8_t * addr)1112 _is_valid_ether_addr(uint8_t *addr)
1113 {
1114 	char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
1115 
1116 	if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN)))
1117 		return (FALSE);
1118 
1119 	return (TRUE);
1120 }
1121 
1122 static inline void
get_random_ether_addr(uint8_t * addr)1123 get_random_ether_addr(uint8_t *addr)
1124 {
1125 	uint8_t temp[ETHER_ADDR_LEN];
1126 
1127 	arc4rand(&temp, sizeof(temp), 0);
1128 	temp[0] &= 0xFE;
1129 	temp[0] |= 0x02;
1130 	bcopy(temp, addr, sizeof(temp));
1131 }
1132 
1133 int
bnxt_hwrm_func_qcaps(struct bnxt_softc * softc)1134 bnxt_hwrm_func_qcaps(struct bnxt_softc *softc)
1135 {
1136 	int rc = 0;
1137 	struct hwrm_func_qcaps_input req = {0};
1138 	struct hwrm_func_qcaps_output *resp =
1139 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
1140 	struct bnxt_func_info *func = &softc->func;
1141 	uint32_t flags, flags_ext, flags_ext2;
1142 
1143 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_QCAPS);
1144 	req.fid = htole16(0xffff);
1145 
1146 	BNXT_HWRM_LOCK(softc);
1147 	rc = _hwrm_send_message(softc, &req, sizeof(req));
1148 	if (rc)
1149 		goto fail;
1150 
1151 	flags = htole32(resp->flags);
1152 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WOL_MAGICPKT_SUPPORTED)
1153 		softc->flags |= BNXT_FLAG_WOL_CAP;
1154 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_STATS_SUPPORTED)
1155 		softc->flags |= BNXT_FLAG_FW_CAP_EXT_STATS;
1156 
1157 	/* Enable RoCE only on Thor devices */
1158 	if (BNXT_CHIP_P5_PLUS(softc)) {
1159 		if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ROCE_V1_SUPPORTED)
1160 			softc->flags |= BNXT_FLAG_ROCEV1_CAP;
1161 		if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ROCE_V2_SUPPORTED)
1162 			softc->flags |= BNXT_FLAG_ROCEV2_CAP;
1163 	}
1164 
1165 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_LINK_ADMIN_STATUS_SUPPORTED)
1166 		softc->fw_cap |= BNXT_FW_CAP_LINK_ADMIN;
1167 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ADMIN_PF_SUPPORTED)
1168 		softc->fw_cap |= BNXT_FW_CAP_ADMIN_PF;
1169 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_HOT_RESET_CAPABLE)
1170 		softc->fw_cap |= BNXT_FW_CAP_HOT_RESET;
1171 	if (flags &  HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERROR_RECOVERY_CAPABLE)
1172 		softc->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
1173 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PCIE_STATS_SUPPORTED)
1174 		softc->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
1175 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_STATS_SUPPORTED)
1176 		softc->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
1177 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERR_RECOVER_RELOAD)
1178 		softc->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
1179 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_NOTIFY_VF_DEF_VNIC_CHNG_SUPPORTED)
1180 		softc->fw_cap |= BNXT_FW_CAP_VF_VNIC_NOTIFY;
1181 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_CRASHDUMP_CMD_SUPPORTED)
1182 		softc->fw_cap |= BNXT_FW_CAP_CRASHDUMP;
1183 	if (!(flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
1184 		softc->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
1185 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_DBG_QCAPS_CMD_SUPPORTED)
1186 		softc->fw_cap |= BNXT_FW_CAP_DBG_QCAPS;
1187 
1188 	flags_ext = htole32(resp->flags_ext);
1189 	if (flags_ext & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
1190 		softc->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
1191 	if (BNXT_PF(softc) && (flags_ext & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_ECN_STATS_SUPPORTED))
1192 		softc->fw_cap |= BNXT_FW_CAP_ECN_STATS;
1193 
1194 	if (flags_ext & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_PTP_PPS_SUPPORTED)
1195 		softc->fw_cap |= BNXT_FW_CAP_PTP_PPS;
1196 	if (flags_ext & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_PTP_PTM_SUPPORTED)
1197 		softc->fw_cap |= BNXT_FW_CAP_PTP_PTM;
1198 	if (flags_ext & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED)
1199 		softc->fw_cap |= BNXT_FW_CAP_PTP_RTC;
1200 	if (BNXT_PF(softc) && (flags_ext & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_HOT_RESET_IF_SUPPORT))
1201 		softc->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
1202 	if (BNXT_PF(softc) && (flags_ext & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
1203 		softc->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
1204 	if (flags_ext & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_NPAR_1_2_SUPPORTED)
1205 		softc->fw_cap |= BNXT_FW_CAP_NPAR_1_2;
1206 	if (flags_ext & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_BS_V2_SUPPORTED)
1207 		softc->fw_cap |= BNXT_FW_CAP_BACKING_STORE_V2;
1208 	if (BNXT_PF(softc) &&
1209 	    (flags_ext & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_VF_CFG_ASYNC_FOR_PF_SUPPORTED))
1210 		softc->fw_cap |= BNXT_FW_CAP_VF_CFG_FOR_PF;
1211 
1212 	flags_ext2 = htole32(resp->flags_ext2);
1213 	if (flags_ext2 & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED)
1214 		softc->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS;
1215 	if (flags_ext2 & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_SW_DBR_DROP_RECOVERY_SUPPORTED)
1216 		softc->fw_cap |= BNXT_FW_CAP_DBR_SUPPORTED;
1217 	if (flags_ext2 & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_DBR_PACING_EXT_SUPPORTED ||
1218 	    flags_ext & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_DBR_PACING_SUPPORTED)
1219 		softc->fw_cap |= BNXT_FW_CAP_DBR_PACING_SUPPORTED;
1220 
1221 	if (flags_ext2 & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_GENERIC_STATS_SUPPORTED)
1222 		softc->fw_cap |= BNXT_FW_CAP_GENERIC_STATS;
1223 	func->fw_fid = le16toh(resp->fid);
1224 	memcpy(func->mac_addr, resp->mac_address, ETHER_ADDR_LEN);
1225 	func->max_rsscos_ctxs = le16toh(resp->max_rsscos_ctx);
1226 	func->max_cp_rings = le16toh(resp->max_cmpl_rings);
1227 	func->max_tx_rings = le16toh(resp->max_tx_rings);
1228 	func->max_rx_rings = le16toh(resp->max_rx_rings);
1229 	func->max_hw_ring_grps = le32toh(resp->max_hw_ring_grps);
1230 	if (!func->max_hw_ring_grps)
1231 		func->max_hw_ring_grps = func->max_tx_rings;
1232 	func->max_l2_ctxs = le16toh(resp->max_l2_ctxs);
1233 	func->max_vnics = le16toh(resp->max_vnics);
1234 	func->max_stat_ctxs = le16toh(resp->max_stat_ctx);
1235 	if (BNXT_PF(softc)) {
1236 		struct bnxt_pf_info *pf = &softc->pf;
1237 
1238 		pf->port_id = le16toh(resp->port_id);
1239 		pf->first_vf_id = le16toh(resp->first_vf_id);
1240 		pf->max_vfs = le16toh(resp->max_vfs);
1241 		pf->max_encap_records = le32toh(resp->max_encap_records);
1242 		pf->max_decap_records = le32toh(resp->max_decap_records);
1243 		pf->max_tx_em_flows = le32toh(resp->max_tx_em_flows);
1244 		pf->max_tx_wm_flows = le32toh(resp->max_tx_wm_flows);
1245 		pf->max_rx_em_flows = le32toh(resp->max_rx_em_flows);
1246 		pf->max_rx_wm_flows = le32toh(resp->max_rx_wm_flows);
1247 	}
1248 	if (!_is_valid_ether_addr(func->mac_addr)) {
1249 		device_printf(softc->dev, "Invalid ethernet address, generating random locally administered address\n");
1250 		get_random_ether_addr(func->mac_addr);
1251 	}
1252 
1253 fail:
1254 	BNXT_HWRM_UNLOCK(softc);
1255 	return rc;
1256 }
1257 
1258 int
bnxt_hwrm_func_qcfg(struct bnxt_softc * softc)1259 bnxt_hwrm_func_qcfg(struct bnxt_softc *softc)
1260 {
1261 	struct hwrm_func_qcfg_input req = {0};
1262 	struct hwrm_func_qcfg_output *resp =
1263 		(void *)softc->hwrm_cmd_resp.idi_vaddr;
1264 	struct bnxt_func_qcfg *fn_qcfg = &softc->fn_qcfg;
1265 	uint32_t min_db_offset = 0;
1266 	uint16_t flags;
1267 	int rc;
1268 
1269 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_QCFG);
1270 	req.fid = htole16(0xffff);
1271 	BNXT_HWRM_LOCK(softc);
1272 	rc = _hwrm_send_message(softc, &req, sizeof(req));
1273 	if (rc)
1274 		goto end;
1275 
1276 	fn_qcfg->alloc_completion_rings = le16toh(resp->alloc_cmpl_rings);
1277 	fn_qcfg->alloc_tx_rings = le16toh(resp->alloc_tx_rings);
1278 	fn_qcfg->alloc_rx_rings = le16toh(resp->alloc_rx_rings);
1279 	fn_qcfg->alloc_vnics = le16toh(resp->alloc_vnics);
1280 
1281 	switch (resp->port_partition_type) {
1282 	case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
1283 	case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_2:
1284 	case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
1285 	case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
1286 		softc->port_partition_type = resp->port_partition_type;
1287 		break;
1288 	}
1289 
1290 	flags = le16toh(resp->flags);
1291 	if (flags & (HWRM_FUNC_QCFG_OUTPUT_FLAGS_FW_DCBX_AGENT_ENABLED |
1292 		     HWRM_FUNC_QCFG_OUTPUT_FLAGS_FW_LLDP_AGENT_ENABLED)) {
1293 		softc->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
1294 		if (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_FW_DCBX_AGENT_ENABLED)
1295 			softc->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
1296 	}
1297 	if (BNXT_PF(softc) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
1298 		softc->flags |= BNXT_FLAG_MULTI_HOST;
1299 	if (BNXT_PF(softc) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_ROOT))
1300 		softc->flags |= BNXT_FLAG_MULTI_ROOT;
1301 	if (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_SECURE_MODE_ENABLED)
1302 		softc->fw_cap |= BNXT_FW_CAP_SECURE_MODE;
1303 	if (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_RING_MONITOR_ENABLED)
1304 		softc->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
1305 	if (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_ENABLE_RDMA_SRIOV)
1306 		softc->fw_cap |= BNXT_FW_CAP_ENABLE_RDMA_SRIOV;
1307 
1308 	if (softc->db_size)
1309 		goto end;
1310 
1311 	softc->legacy_db_size = le16_to_cpu(resp->legacy_l2_db_size_kb) * 1024;
1312 
1313 	if (BNXT_CHIP_P5(softc)) {
1314 		if (BNXT_PF(softc))
1315 			min_db_offset = DB_PF_OFFSET_P5;
1316 		else
1317 			min_db_offset = DB_VF_OFFSET_P5;
1318 		softc->legacy_db_size = min_db_offset;
1319 	}
1320 
1321 	softc->db_size = roundup2(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
1322 			1024, PAGE_SIZE);
1323 	if (!softc->db_size || softc->db_size > pci_resource_len(softc->pdev, 2) ||
1324 			softc->db_size <= min_db_offset)
1325 		softc->db_size = pci_resource_len(softc->pdev, 2);
1326 
1327 	end:
1328 	BNXT_HWRM_UNLOCK(softc);
1329 	return rc;
1330 }
1331 
1332 int
bnxt_hwrm_func_reset(struct bnxt_softc * softc)1333 bnxt_hwrm_func_reset(struct bnxt_softc *softc)
1334 {
1335 	struct hwrm_func_reset_input req = {0};
1336 
1337 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_RESET);
1338 	req.enables = 0;
1339 
1340 	return hwrm_send_message(softc, &req, sizeof(req));
1341 }
1342 
1343 static void
bnxt_hwrm_set_link_common(struct bnxt_softc * softc,struct hwrm_port_phy_cfg_input * req)1344 bnxt_hwrm_set_link_common(struct bnxt_softc *softc,
1345     struct hwrm_port_phy_cfg_input *req)
1346 {
1347 	struct bnxt_link_info *link_info = &softc->link_info;
1348 	uint8_t autoneg = softc->link_info.autoneg;
1349 	uint16_t fw_link_speed = softc->link_info.req_link_speed;
1350 
1351 	if (autoneg & BNXT_AUTONEG_SPEED) {
1352 		uint8_t phy_type = get_phy_type(softc);
1353 
1354 		if (phy_type == HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASET ||
1355 		    phy_type == HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
1356 		    phy_type == HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE) {
1357 
1358 			req->auto_mode |= htole32(HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK);
1359 			if (link_info->advertising) {
1360 				req->enables |= htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK);
1361 				req->auto_link_speed_mask = htole16(link_info->advertising);
1362 			}
1363 		} else {
1364 			req->auto_mode |= HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
1365 		}
1366 
1367 		req->enables |=
1368 		    htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE);
1369 		req->flags |=
1370 		    htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG);
1371 	} else {
1372 
1373 		if (link_info->force_speed2_nrz ||
1374 		    link_info->force_pam4_56_speed2 ||
1375 		    link_info->force_pam4_112_speed2) {
1376 			req->force_link_speeds2 = htole16(fw_link_speed);
1377 			req->enables |= htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_LINK_SPEEDS2);
1378 			link_info->force_speed2_nrz = false;
1379 			link_info->force_pam4_56_speed2 = false;
1380 			link_info->force_pam4_112_speed2 = false;
1381 		} else if (link_info->force_pam4_speed) {
1382 			req->force_pam4_link_speed = htole16(fw_link_speed);
1383 			req->enables |= htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAM4_LINK_SPEED);
1384 			link_info->force_pam4_speed = false;
1385 		} else {
1386 			req->force_link_speed = htole16(fw_link_speed);
1387 		}
1388 
1389 		req->flags |= htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE);
1390 	}
1391 
1392 	/* tell chimp that the setting takes effect immediately */
1393 	req->flags |= htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY);
1394 }
1395 
1396 static void
bnxt_hwrm_set_pause_common(struct bnxt_softc * softc,struct hwrm_port_phy_cfg_input * req)1397 bnxt_hwrm_set_pause_common(struct bnxt_softc *softc,
1398     struct hwrm_port_phy_cfg_input *req)
1399 {
1400 	struct bnxt_link_info *link_info = &softc->link_info;
1401 
1402 	if (link_info->flow_ctrl.autoneg) {
1403 		req->auto_pause =
1404 		    HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_AUTONEG_PAUSE;
1405 		if (link_info->flow_ctrl.rx)
1406 			req->auto_pause |=
1407 			    HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
1408 		if (link_info->flow_ctrl.tx)
1409 			req->auto_pause |=
1410 			    HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX;
1411 		req->enables |=
1412 		    htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE);
1413 	} else {
1414 		if (link_info->flow_ctrl.rx)
1415 			req->force_pause |=
1416 			    HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
1417 		if (link_info->flow_ctrl.tx)
1418 			req->force_pause |=
1419 			    HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX;
1420 		req->enables |=
1421 			htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE);
1422 		req->auto_pause = req->force_pause;
1423 		req->enables |=
1424 		    htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE);
1425 	}
1426 }
1427 
1428 /* JFV this needs interface connection */
1429 static void
bnxt_hwrm_set_eee(struct bnxt_softc * softc,struct hwrm_port_phy_cfg_input * req)1430 bnxt_hwrm_set_eee(struct bnxt_softc *softc, struct hwrm_port_phy_cfg_input *req)
1431 {
1432 	/* struct ethtool_eee *eee = &softc->eee; */
1433 	bool	eee_enabled = false;
1434 
1435 	if (eee_enabled) {
1436 #if 0
1437 		uint16_t eee_speeds;
1438 		uint32_t flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_ENABLE;
1439 
1440 		if (eee->tx_lpi_enabled)
1441 			flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_TX_LPI;
1442 
1443 		req->flags |= htole32(flags);
1444 		eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
1445 		req->eee_link_speed_mask = htole16(eee_speeds);
1446 		req->tx_lpi_timer = htole32(eee->tx_lpi_timer);
1447 #endif
1448 	} else {
1449 		req->flags |=
1450 		    htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_DISABLE);
1451 	}
1452 }
1453 
1454 int
bnxt_hwrm_set_link_setting(struct bnxt_softc * softc,bool set_pause,bool set_eee,bool set_link)1455 bnxt_hwrm_set_link_setting(struct bnxt_softc *softc, bool set_pause,
1456     bool set_eee, bool set_link)
1457 {
1458 	struct hwrm_port_phy_cfg_input req = {0};
1459 	int rc;
1460 
1461 	if (softc->flags & BNXT_FLAG_NPAR)
1462 		return ENOTSUP;
1463 
1464 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_PHY_CFG);
1465 
1466 	if (set_pause) {
1467 		bnxt_hwrm_set_pause_common(softc, &req);
1468 
1469 		if (softc->link_info.flow_ctrl.autoneg)
1470 			set_link = true;
1471 	}
1472 
1473 	if (set_link)
1474 		bnxt_hwrm_set_link_common(softc, &req);
1475 
1476 	if (set_eee)
1477 		bnxt_hwrm_set_eee(softc, &req);
1478 
1479 	BNXT_HWRM_LOCK(softc);
1480 	rc = _hwrm_send_message(softc, &req, sizeof(req));
1481 
1482 	if (!rc) {
1483 		if (set_pause) {
1484 			/* since changing of 'force pause' setting doesn't
1485 			 * trigger any link change event, the driver needs to
1486 			 * update the current pause result upon successfully i
1487 			 * return of the phy_cfg command */
1488 			if (!softc->link_info.flow_ctrl.autoneg)
1489 				bnxt_report_link(softc);
1490 		}
1491 	}
1492 	BNXT_HWRM_UNLOCK(softc);
1493 	return rc;
1494 }
1495 
1496 int
bnxt_hwrm_vnic_set_hds(struct bnxt_softc * softc,struct bnxt_vnic_info * vnic)1497 bnxt_hwrm_vnic_set_hds(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
1498 {
1499 	struct hwrm_vnic_plcmodes_cfg_input req = {0};
1500 
1501 	if (!BNXT_CHIP_P5_PLUS(softc))
1502 		return 0;
1503 
1504 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_PLCMODES_CFG);
1505 
1506 	req.flags = htole32(HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1507 	req.vnic_id = htole16(vnic->id);
1508 
1509 	return hwrm_send_message(softc, &req, sizeof(req));
1510 }
1511 
1512 int
bnxt_hwrm_vnic_cfg(struct bnxt_softc * softc,struct bnxt_vnic_info * vnic)1513 bnxt_hwrm_vnic_cfg(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
1514 {
1515 	struct hwrm_vnic_cfg_input req = {0};
1516 
1517 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_CFG);
1518 
1519 	if (vnic->flags & BNXT_VNIC_FLAG_DEFAULT)
1520 		req.flags |= htole32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1521 	if (vnic->flags & BNXT_VNIC_FLAG_BD_STALL)
1522 		req.flags |= htole32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1523 	if (vnic->flags & BNXT_VNIC_FLAG_VLAN_STRIP)
1524 		req.flags |= htole32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1525 	if (BNXT_CHIP_P5_PLUS (softc)) {
1526 		req.default_rx_ring_id =
1527 			htole16(softc->rx_rings[0].phys_id);
1528 		req.default_cmpl_ring_id =
1529 			htole16(softc->rx_cp_rings[0].ring.phys_id);
1530 		req.enables |=
1531 			htole32(HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_RX_RING_ID |
1532 			    HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_CMPL_RING_ID);
1533 		req.vnic_id = htole16(vnic->id);
1534 	} else {
1535 		req.enables = htole32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
1536 				HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE);
1537 		req.vnic_id = htole16(vnic->id);
1538 		req.dflt_ring_grp = htole16(vnic->def_ring_grp);
1539 	}
1540 	req.rss_rule = htole16(vnic->rss_id);
1541 	req.cos_rule = htole16(vnic->cos_rule);
1542 	req.lb_rule = htole16(vnic->lb_rule);
1543 	req.enables |= htole32(HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
1544 	req.mru = htole16(vnic->mru);
1545 
1546 	return hwrm_send_message(softc, &req, sizeof(req));
1547 }
1548 
1549 int
bnxt_hwrm_vnic_free(struct bnxt_softc * softc,struct bnxt_vnic_info * vnic)1550 bnxt_hwrm_vnic_free(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
1551 {
1552 	struct hwrm_vnic_free_input req = {0};
1553 	int rc = 0;
1554 
1555 	if (vnic->id == (uint16_t)HWRM_NA_SIGNATURE)
1556 		return rc;
1557 
1558 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_FREE);
1559 
1560 	req.vnic_id = htole32(vnic->id);
1561 
1562 	BNXT_HWRM_LOCK(softc);
1563 	rc = _hwrm_send_message(softc, &req, sizeof(req));
1564 	if (rc)
1565 		goto fail;
1566 
1567 fail:
1568 	BNXT_HWRM_UNLOCK(softc);
1569 	return (rc);
1570 }
1571 
1572 int
bnxt_hwrm_vnic_alloc(struct bnxt_softc * softc,struct bnxt_vnic_info * vnic)1573 bnxt_hwrm_vnic_alloc(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
1574 {
1575 	struct hwrm_vnic_alloc_input req = {0};
1576 	struct hwrm_vnic_alloc_output *resp =
1577 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
1578 	int rc;
1579 
1580 	if (vnic->id != (uint16_t)HWRM_NA_SIGNATURE) {
1581 		device_printf(softc->dev,
1582 		    "Attempt to re-allocate vnic %04x\n", vnic->id);
1583 		return EDOOFUS;
1584 	}
1585 
1586 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_ALLOC);
1587 
1588 	if (vnic->flags & BNXT_VNIC_FLAG_DEFAULT)
1589 		req.flags = htole32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
1590 
1591 	BNXT_HWRM_LOCK(softc);
1592 	rc = _hwrm_send_message(softc, &req, sizeof(req));
1593 	if (rc)
1594 		goto fail;
1595 
1596 	vnic->id = le32toh(resp->vnic_id);
1597 
1598 fail:
1599 	BNXT_HWRM_UNLOCK(softc);
1600 	return (rc);
1601 }
1602 
1603 int
bnxt_hwrm_vnic_ctx_free(struct bnxt_softc * softc,uint16_t ctx_id)1604 bnxt_hwrm_vnic_ctx_free(struct bnxt_softc *softc, uint16_t ctx_id)
1605 {
1606 	struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
1607 	int rc = 0;
1608 
1609 	if (ctx_id == (uint16_t)HWRM_NA_SIGNATURE)
1610 		return rc;
1611 
1612 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE);
1613 	req.rss_cos_lb_ctx_id = htole16(ctx_id);
1614 	BNXT_HWRM_LOCK(softc);
1615 	rc = _hwrm_send_message(softc, &req, sizeof(req));
1616 	if (rc)
1617 		goto fail;
1618 
1619 fail:
1620 	BNXT_HWRM_UNLOCK(softc);
1621 	return rc;
1622 }
1623 
1624 int
bnxt_hwrm_vnic_ctx_alloc(struct bnxt_softc * softc,uint16_t * ctx_id)1625 bnxt_hwrm_vnic_ctx_alloc(struct bnxt_softc *softc, uint16_t *ctx_id)
1626 {
1627 	struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
1628 	struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1629 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
1630 	int rc;
1631 
1632 	if (*ctx_id != (uint16_t)HWRM_NA_SIGNATURE) {
1633 		device_printf(softc->dev,
1634 		    "Attempt to re-allocate vnic ctx %04x\n", *ctx_id);
1635 		return EDOOFUS;
1636 	}
1637 
1638 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
1639 
1640 	BNXT_HWRM_LOCK(softc);
1641 	rc = _hwrm_send_message(softc, &req, sizeof(req));
1642 	if (rc)
1643 		goto fail;
1644 
1645 	*ctx_id = le32toh(resp->rss_cos_lb_ctx_id);
1646 
1647 fail:
1648 	BNXT_HWRM_UNLOCK(softc);
1649 	return (rc);
1650 }
1651 
1652 int
bnxt_hwrm_ring_grp_alloc(struct bnxt_softc * softc,struct bnxt_grp_info * grp)1653 bnxt_hwrm_ring_grp_alloc(struct bnxt_softc *softc, struct bnxt_grp_info *grp)
1654 {
1655 	struct hwrm_ring_grp_alloc_input req = {0};
1656 	struct hwrm_ring_grp_alloc_output *resp;
1657 	int rc = 0;
1658 
1659 	if (grp->grp_id != (uint16_t)HWRM_NA_SIGNATURE) {
1660 		device_printf(softc->dev,
1661 		    "Attempt to re-allocate ring group %04x\n", grp->grp_id);
1662 		return EDOOFUS;
1663 	}
1664 
1665 	if (BNXT_CHIP_P5_PLUS (softc))
1666 		return 0;
1667 
1668 	resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
1669 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_RING_GRP_ALLOC);
1670 	req.cr = htole16(grp->cp_ring_id);
1671 	req.rr = htole16(grp->rx_ring_id);
1672 	req.ar = htole16(grp->ag_ring_id);
1673 	req.sc = htole16(grp->stats_ctx);
1674 
1675 	BNXT_HWRM_LOCK(softc);
1676 	rc = _hwrm_send_message(softc, &req, sizeof(req));
1677 	if (rc)
1678 		goto fail;
1679 
1680 	grp->grp_id = le32toh(resp->ring_group_id);
1681 
1682 fail:
1683 	BNXT_HWRM_UNLOCK(softc);
1684 	return rc;
1685 }
1686 
1687 int
bnxt_hwrm_ring_grp_free(struct bnxt_softc * softc,struct bnxt_grp_info * grp)1688 bnxt_hwrm_ring_grp_free(struct bnxt_softc *softc, struct bnxt_grp_info *grp)
1689 {
1690 	struct hwrm_ring_grp_free_input req = {0};
1691 	int rc = 0;
1692 
1693 	if (grp->grp_id == (uint16_t)HWRM_NA_SIGNATURE)
1694 		return 0;
1695 
1696 	if (BNXT_CHIP_P5_PLUS (softc))
1697 		return 0;
1698 
1699 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_RING_GRP_FREE);
1700 
1701 	req.ring_group_id = htole32(grp->grp_id);
1702 
1703 	BNXT_HWRM_LOCK(softc);
1704 	rc = _hwrm_send_message(softc, &req, sizeof(req));
1705 	if (rc)
1706 		goto fail;
1707 
1708 fail:
1709 	BNXT_HWRM_UNLOCK(softc);
1710 	return rc;
1711 }
1712 
bnxt_hwrm_ring_free(struct bnxt_softc * softc,uint32_t ring_type,struct bnxt_ring * ring,int cmpl_ring_id)1713 int bnxt_hwrm_ring_free(struct bnxt_softc *softc, uint32_t ring_type,
1714 		struct bnxt_ring *ring, int cmpl_ring_id)
1715 {
1716         struct hwrm_ring_free_input req = {0};
1717 	struct hwrm_ring_free_output *resp;
1718 	int rc = 0;
1719         uint16_t error_code;
1720 
1721 	if (ring->phys_id == (uint16_t)HWRM_NA_SIGNATURE)
1722 		return 0;
1723 
1724 	resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
1725 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_RING_FREE);
1726 	req.cmpl_ring = htole16(cmpl_ring_id);
1727         req.ring_type = ring_type;
1728         req.ring_id = htole16(ring->phys_id);
1729 
1730 	BNXT_HWRM_LOCK(softc);
1731 	rc = _hwrm_send_message(softc, &req, sizeof(req));
1732         error_code = le16toh(resp->error_code);
1733 
1734 	if (rc || error_code) {
1735 		device_printf(softc->dev, "hwrm_ring_free type %d failed. "
1736 				"rc:%x err:%x\n", ring_type, rc, error_code);
1737 		if (!rc)
1738 			rc = -EIO;
1739 	}
1740 
1741 	BNXT_HWRM_UNLOCK(softc);
1742 	return rc;
1743 }
1744 
1745 /*
1746  * Ring allocation message to the firmware
1747  */
1748 int
bnxt_hwrm_ring_alloc(struct bnxt_softc * softc,uint8_t type,struct bnxt_ring * ring)1749 bnxt_hwrm_ring_alloc(struct bnxt_softc *softc, uint8_t type,
1750                      struct bnxt_ring *ring)
1751 {
1752 	struct hwrm_ring_alloc_input req = {0};
1753 	struct hwrm_ring_alloc_output *resp;
1754 	uint16_t idx = ring->idx;
1755 	struct bnxt_cp_ring *cp_ring;
1756 	int rc;
1757 
1758 	if (ring->phys_id != (uint16_t)HWRM_NA_SIGNATURE) {
1759 		device_printf(softc->dev,
1760 		    "Attempt to re-allocate ring %04x\n", ring->phys_id);
1761 		return EDOOFUS;
1762 	}
1763 
1764 	resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
1765 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_RING_ALLOC);
1766 	req.enables = htole32(0);
1767 	req.fbo = htole32(0);
1768 	req.ring_type = type;
1769 	req.page_tbl_addr = htole64(ring->paddr);
1770 	req.logical_id = htole16(ring->id);
1771 	req.length = htole32(ring->ring_size);
1772 
1773 	switch (type) {
1774 	case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1775 		cp_ring = &softc->tx_cp_rings[idx];
1776 
1777 		req.cmpl_ring_id = htole16(cp_ring->ring.phys_id);
1778 		/* queue_id - what CoS queue the TX ring is associated with */
1779 		req.queue_id = htole16(softc->tx_q_info[0].queue_id);
1780 
1781 		req.stat_ctx_id = htole32(cp_ring->stats_ctx_id);
1782 		req.enables |= htole32(
1783 		    HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
1784 		break;
1785 	case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1786 		if (!BNXT_CHIP_P5_PLUS(softc))
1787 			break;
1788 
1789 		cp_ring = &softc->rx_cp_rings[idx];
1790 
1791 		req.stat_ctx_id = htole32(cp_ring->stats_ctx_id);
1792 		req.rx_buf_size = htole16(softc->rx_buf_size);
1793 		req.enables |= htole32(
1794 			HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID |
1795 			HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
1796 		break;
1797 	case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
1798 		if (!BNXT_CHIP_P5_PLUS(softc)) {
1799 			req.ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_RX;
1800 			break;
1801 		}
1802 
1803 		cp_ring = &softc->rx_cp_rings[idx];
1804 
1805 		req.rx_ring_id = htole16(softc->rx_rings[idx].phys_id);
1806 		req.stat_ctx_id = htole32(cp_ring->stats_ctx_id);
1807 		req.rx_buf_size = htole16(softc->rx_buf_size);
1808 		req.enables |= htole32(
1809 			HWRM_RING_ALLOC_INPUT_ENABLES_RX_RING_ID_VALID |
1810 			HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID |
1811 			HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
1812 		break;
1813 	case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1814 		if (!BNXT_CHIP_P5_PLUS(softc)) {
1815 			req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1816 			break;
1817 		}
1818 
1819                 req.cq_handle = htole64(ring->id);
1820 		req.nq_ring_id = htole16(softc->nq_rings[idx].ring.phys_id);
1821 		req.enables |= htole32(
1822 			HWRM_RING_ALLOC_INPUT_ENABLES_NQ_RING_ID_VALID);
1823 		break;
1824 	case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
1825 		req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1826 		break;
1827 	default:
1828 		device_printf(softc->dev,
1829 			      "hwrm alloc invalid ring type %d\n", type);
1830 		return -1;
1831 	}
1832 
1833 	BNXT_HWRM_LOCK(softc);
1834 	rc = _hwrm_send_message(softc, &req, sizeof(req));
1835 	if (rc)
1836 		goto fail;
1837 
1838 	ring->phys_id = le16toh(resp->ring_id);
1839 
1840 fail:
1841 	BNXT_HWRM_UNLOCK(softc);
1842 	return rc;
1843 }
1844 
1845 int
bnxt_hwrm_stat_ctx_free(struct bnxt_softc * softc,struct bnxt_cp_ring * cpr)1846 bnxt_hwrm_stat_ctx_free(struct bnxt_softc *softc, struct bnxt_cp_ring *cpr)
1847 {
1848 	struct hwrm_stat_ctx_free_input req = {0};
1849 	int rc = 0;
1850 
1851 	if (cpr->stats_ctx_id == HWRM_NA_SIGNATURE)
1852 		return rc;
1853 
1854 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_STAT_CTX_FREE);
1855 
1856 	req.stat_ctx_id = htole16(cpr->stats_ctx_id);
1857 	BNXT_HWRM_LOCK(softc);
1858 	rc = _hwrm_send_message(softc, &req, sizeof(req));
1859 	if (rc)
1860 		goto fail;
1861 
1862 fail:
1863 	BNXT_HWRM_UNLOCK(softc);
1864 
1865 	return rc;
1866 }
1867 
1868 int
bnxt_hwrm_stat_ctx_alloc(struct bnxt_softc * softc,struct bnxt_cp_ring * cpr,uint64_t paddr)1869 bnxt_hwrm_stat_ctx_alloc(struct bnxt_softc *softc, struct bnxt_cp_ring *cpr,
1870     uint64_t paddr)
1871 {
1872 	struct hwrm_stat_ctx_alloc_input req = {0};
1873 	struct hwrm_stat_ctx_alloc_output *resp;
1874 	int rc = 0;
1875 
1876 	if (cpr->stats_ctx_id != HWRM_NA_SIGNATURE) {
1877 		device_printf(softc->dev,
1878 		    "Attempt to re-allocate stats ctx %08x\n",
1879 		    cpr->stats_ctx_id);
1880 		return EDOOFUS;
1881 	}
1882 
1883 	resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
1884 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_STAT_CTX_ALLOC);
1885 
1886 	req.update_period_ms = htole32(1000);
1887 	req.stats_dma_addr = htole64(paddr);
1888 
1889 	if (BNXT_CHIP_P7(softc))
1890 		req.stats_dma_length = htole16(sizeof(struct ctx_hw_stats_ext));
1891 	else if (BNXT_CHIP_P5(softc))
1892 		req.stats_dma_length = htole16(sizeof(struct ctx_hw_stats_ext) - 8);
1893 	else
1894 		req.stats_dma_length = htole16(sizeof(struct ctx_hw_stats));
1895 
1896 	BNXT_HWRM_LOCK(softc);
1897 	rc = _hwrm_send_message(softc, &req, sizeof(req));
1898 	if (rc)
1899 		goto fail;
1900 
1901 	cpr->stats_ctx_id = le32toh(resp->stat_ctx_id);
1902 
1903 fail:
1904 	BNXT_HWRM_UNLOCK(softc);
1905 
1906 	return rc;
1907 }
1908 
1909 int
bnxt_hwrm_port_qstats(struct bnxt_softc * softc)1910 bnxt_hwrm_port_qstats(struct bnxt_softc *softc)
1911 {
1912 	struct hwrm_port_qstats_input req = {0};
1913 	int rc = 0;
1914 
1915 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_QSTATS);
1916 
1917 	req.port_id = htole16(softc->pf.port_id);
1918 	req.rx_stat_host_addr = htole64(softc->hw_rx_port_stats.idi_paddr);
1919 	req.tx_stat_host_addr = htole64(softc->hw_tx_port_stats.idi_paddr);
1920 
1921 	BNXT_HWRM_LOCK(softc);
1922 	rc = _hwrm_send_message(softc, &req, sizeof(req));
1923 	BNXT_HWRM_UNLOCK(softc);
1924 
1925 	return rc;
1926 }
bnxt_hwrm_pri2cos_idx(struct bnxt_softc * softc,uint32_t path_dir)1927 static int bnxt_hwrm_pri2cos_idx(struct bnxt_softc *softc, uint32_t path_dir)
1928 {
1929 	struct hwrm_queue_pri2cos_qcfg_input req = {0};
1930 	struct hwrm_queue_pri2cos_qcfg_output *resp;
1931 	uint8_t *pri2cos_idx, *q_ids, max_q;
1932 	int rc, i, j;
1933 	uint8_t *pri2cos;
1934 
1935 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_QUEUE_PRI2COS_QCFG);
1936 	resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
1937 
1938 	req.flags = htole32(HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_IVLAN |
1939 			    path_dir);
1940 	rc = hwrm_send_message(softc, &req, sizeof(req));
1941 
1942 	if (rc)
1943 		return rc;
1944 
1945 	if (path_dir == HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_PATH_TX) {
1946 		pri2cos_idx = softc->tx_pri2cos_idx;
1947 		q_ids = softc->tx_q_ids;
1948 		max_q = softc->tx_max_q;
1949 	} else {
1950 		pri2cos_idx = softc->rx_pri2cos_idx;
1951 		q_ids = softc->rx_q_ids;
1952 		max_q = softc->rx_max_q;
1953 	}
1954 
1955 	pri2cos = &resp->pri0_cos_queue_id;
1956 
1957 	for (i = 0; i < BNXT_MAX_QUEUE; i++) {
1958 		uint8_t queue_id = pri2cos[i];
1959 		uint8_t queue_idx;
1960 
1961 		/* Per port queue IDs start from 0, 10, 20, etc */
1962 		queue_idx = queue_id % 10;
1963 		if (queue_idx > BNXT_MAX_QUEUE) {
1964 			softc->pri2cos_valid = false;
1965 			rc = -EINVAL;
1966 			return rc;
1967 		}
1968 
1969 		for (j = 0; j < max_q; j++) {
1970 			if (q_ids[j] == queue_id)
1971 				pri2cos_idx[i] = queue_idx;
1972 		}
1973 	}
1974 
1975 	softc->pri2cos_valid = true;
1976 
1977 	return rc;
1978 }
1979 
1980 int
bnxt_hwrm_port_qstats_ext(struct bnxt_softc * softc)1981 bnxt_hwrm_port_qstats_ext(struct bnxt_softc *softc)
1982 {
1983 	struct hwrm_port_qstats_ext_input req = {0};
1984 	struct hwrm_port_qstats_ext_output *resp;
1985 	int rc = 0, i;
1986 	uint32_t tx_stat_size;
1987 
1988 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_QSTATS_EXT);
1989 	resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
1990 
1991 	tx_stat_size = sizeof(struct tx_port_stats_ext);
1992 	req.port_id = htole16(softc->pf.port_id);
1993 	req.tx_stat_size = htole16(tx_stat_size);
1994 	req.rx_stat_size = htole16(sizeof(struct rx_port_stats_ext));
1995 	req.rx_stat_host_addr = htole64(softc->hw_rx_port_stats_ext.idi_paddr);
1996 	req.tx_stat_host_addr = htole64(softc->hw_tx_port_stats_ext.idi_paddr);
1997 
1998 	rc = hwrm_send_message(softc, &req, sizeof(req));
1999 
2000 	if (!rc) {
2001 		softc->fw_rx_stats_ext_size =
2002 			le16toh(resp->rx_stat_size) / 8;
2003 		if (BNXT_FW_MAJ(softc) < 220 && !BNXT_CHIP_P7(softc) &&
2004 		    softc->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY)
2005 			softc->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY;
2006 
2007 		softc->fw_tx_stats_ext_size = tx_stat_size ?
2008 			le16toh(resp->tx_stat_size) / 8 : 0;
2009 	} else {
2010 		softc->fw_rx_stats_ext_size = 0;
2011 		softc->fw_tx_stats_ext_size = 0;
2012 	}
2013 
2014 	if (softc->fw_tx_stats_ext_size <=
2015 	    offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
2016 		softc->pri2cos_valid = false;
2017 		return rc;
2018 	}
2019 
2020 	rc = bnxt_hwrm_pri2cos_idx(softc, HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_PATH_TX);
2021 	if (rc)
2022 		return rc;
2023 
2024 	if (softc->is_asym_q) {
2025 		rc = bnxt_hwrm_pri2cos_idx(softc, HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_PATH_RX);
2026 		if (rc)
2027 			return rc;
2028 	} else {
2029 		memcpy(softc->rx_pri2cos_idx, softc->tx_pri2cos_idx, sizeof(softc->rx_pri2cos_idx));
2030 	}
2031 
2032 	u64 *rx_port_stats_ext = (u64 *)softc->hw_rx_port_stats_ext.idi_vaddr;
2033 	u64 *tx_port_stats_ext = (u64 *)softc->hw_tx_port_stats_ext.idi_vaddr;
2034 
2035 	if (softc->pri2cos_valid) {
2036 		for (i = 0; i < 8; i++) {
2037 			long n = bnxt_rx_bytes_pri_arr_base_off[i] +
2038 				 softc->rx_pri2cos_idx[i];
2039 
2040 			softc->rx_bytes_pri[i] = *(rx_port_stats_ext + n);
2041 		}
2042 		for (i = 0; i < 8; i++) {
2043 			long n = bnxt_rx_pkts_pri_arr_base_off[i] +
2044 				 softc->rx_pri2cos_idx[i];
2045 
2046 			softc->rx_packets_pri[i] = *(rx_port_stats_ext + n);
2047 		}
2048 		for (i = 0; i < 8; i++) {
2049 			long n = bnxt_tx_bytes_pri_arr_base_off[i] +
2050 				 softc->tx_pri2cos_idx[i];
2051 
2052 			softc->tx_bytes_pri[i] = *(tx_port_stats_ext + n);
2053 		}
2054 		for (i = 0; i < 8; i++) {
2055 			long n = bnxt_tx_pkts_pri_arr_base_off[i] +
2056 				 softc->tx_pri2cos_idx[i];
2057 
2058 			softc->tx_packets_pri[i] = *(tx_port_stats_ext + n);
2059 		}
2060 	}
2061 
2062 	return rc;
2063 }
2064 
2065 int
bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt_softc * softc,struct bnxt_vnic_info * vnic)2066 bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt_softc *softc,
2067     struct bnxt_vnic_info *vnic)
2068 {
2069 	struct hwrm_cfa_l2_set_rx_mask_input req = {0};
2070 	uint32_t mask = vnic->rx_mask;
2071 
2072 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_CFA_L2_SET_RX_MASK);
2073 
2074 	req.vnic_id = htole32(vnic->id);
2075 	req.mask = htole32(mask);
2076 	req.mc_tbl_addr = htole64(vnic->mc_list.idi_paddr);
2077 	req.num_mc_entries = htole32(vnic->mc_list_count);
2078 	return hwrm_send_message(softc, &req, sizeof(req));
2079 }
2080 
2081 int
bnxt_hwrm_l2_filter_free(struct bnxt_softc * softc,uint64_t filter_id)2082 bnxt_hwrm_l2_filter_free(struct bnxt_softc *softc, uint64_t filter_id)
2083 {
2084 	struct hwrm_cfa_l2_filter_free_input	req = {0};
2085 	int rc = 0;
2086 
2087 	if (filter_id == -1)
2088 		return rc;
2089 
2090 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_CFA_L2_FILTER_FREE);
2091 
2092 	req.l2_filter_id = htole64(filter_id);
2093 
2094 	BNXT_HWRM_LOCK(softc);
2095 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2096 	if (rc)
2097 		goto fail;
2098 
2099 fail:
2100 	BNXT_HWRM_UNLOCK(softc);
2101 	return (rc);
2102 }
2103 
2104 int
bnxt_hwrm_free_filter(struct bnxt_softc * softc)2105 bnxt_hwrm_free_filter(struct bnxt_softc *softc)
2106 {
2107 	struct bnxt_vnic_info *vnic = &softc->vnic_info;
2108 	struct bnxt_vlan_tag *tag;
2109 	int rc = 0;
2110 
2111 	rc = bnxt_hwrm_l2_filter_free(softc, softc->vnic_info.filter_id);
2112 	if (rc)
2113 		goto end;
2114 
2115 	SLIST_FOREACH(tag, &vnic->vlan_tags, next) {
2116 		rc = bnxt_hwrm_l2_filter_free(softc, tag->filter_id);
2117 		if (rc)
2118 			goto end;
2119 		tag->filter_id = -1;
2120 	}
2121 
2122 end:
2123 	return rc;
2124 }
2125 
2126 int
bnxt_hwrm_l2_filter_alloc(struct bnxt_softc * softc,uint16_t vlan_tag,uint64_t * filter_id)2127 bnxt_hwrm_l2_filter_alloc(struct bnxt_softc *softc, uint16_t vlan_tag,
2128 		uint64_t *filter_id)
2129 {
2130 	struct hwrm_cfa_l2_filter_alloc_input	req = {0};
2131 	struct hwrm_cfa_l2_filter_alloc_output	*resp;
2132 	struct bnxt_vnic_info *vnic = &softc->vnic_info;
2133 	uint32_t enables = 0;
2134 	int rc = 0;
2135 
2136 	if (*filter_id != -1) {
2137 		device_printf(softc->dev, "Attempt to re-allocate l2 ctx "
2138 		    "filter (fid: 0x%jx)\n", (uintmax_t)*filter_id);
2139 		return EDOOFUS;
2140 	}
2141 
2142 	resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
2143 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_CFA_L2_FILTER_ALLOC);
2144 
2145 	req.flags = htole32(HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX);
2146 	enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR
2147 	    | HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK
2148 	    | HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
2149 
2150 	if (vlan_tag != 0xffff) {
2151 		enables |=
2152 			HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
2153 			HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK |
2154 			HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_NUM_VLANS;
2155 		req.l2_ivlan_mask = 0xffff;
2156 		req.l2_ivlan = vlan_tag;
2157 		req.num_vlans = 1;
2158 	}
2159 
2160 	req.enables = htole32(enables);
2161 	req.dst_id = htole16(vnic->id);
2162 	memcpy(req.l2_addr, if_getlladdr(iflib_get_ifp(softc->ctx)),
2163 	    ETHER_ADDR_LEN);
2164 	memset(&req.l2_addr_mask, 0xff, sizeof(req.l2_addr_mask));
2165 
2166 	BNXT_HWRM_LOCK(softc);
2167 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2168 	if (rc)
2169 		goto fail;
2170 
2171 	*filter_id = le64toh(resp->l2_filter_id);
2172 fail:
2173 	BNXT_HWRM_UNLOCK(softc);
2174 	return (rc);
2175 }
2176 
2177 int
bnxt_hwrm_set_filter(struct bnxt_softc * softc)2178 bnxt_hwrm_set_filter(struct bnxt_softc *softc)
2179 {
2180 	struct bnxt_vnic_info *vnic = &softc->vnic_info;
2181 	struct bnxt_vlan_tag *tag;
2182 	int rc = 0;
2183 
2184 	rc = bnxt_hwrm_l2_filter_alloc(softc, 0xffff, &vnic->filter_id);
2185 	if (rc)
2186 		goto end;
2187 
2188 	SLIST_FOREACH(tag, &vnic->vlan_tags, next) {
2189 		rc = bnxt_hwrm_l2_filter_alloc(softc, tag->tag,
2190 				&tag->filter_id);
2191 		if (rc)
2192 			goto end;
2193 	}
2194 
2195 end:
2196 	return rc;
2197 }
2198 
2199 int
bnxt_hwrm_rss_cfg(struct bnxt_softc * softc,struct bnxt_vnic_info * vnic,uint32_t hash_type)2200 bnxt_hwrm_rss_cfg(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic,
2201     uint32_t hash_type)
2202 {
2203 	struct hwrm_vnic_rss_cfg_input	req = {0};
2204 
2205 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_RSS_CFG);
2206 
2207 	if (BNXT_CHIP_P7(softc))
2208 		req.flags |= HWRM_VNIC_RSS_CFG_INPUT_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT;
2209 
2210 	req.hash_type = htole32(hash_type);
2211 	req.ring_grp_tbl_addr = htole64(vnic->rss_grp_tbl.idi_paddr);
2212 	req.hash_key_tbl_addr = htole64(vnic->rss_hash_key_tbl.idi_paddr);
2213 	req.rss_ctx_idx = htole16(vnic->rss_id);
2214 	req.hash_mode_flags = HWRM_FUNC_SPD_CFG_INPUT_HASH_MODE_FLAGS_DEFAULT;
2215 	if (BNXT_CHIP_P5_PLUS(softc)) {
2216 		req.vnic_id = htole16(vnic->id);
2217 		req.ring_table_pair_index = 0x0;
2218 	}
2219 
2220 	return hwrm_send_message(softc, &req, sizeof(req));
2221 }
2222 
2223 int
bnxt_hwrm_reserve_pf_rings(struct bnxt_softc * softc)2224 bnxt_hwrm_reserve_pf_rings(struct bnxt_softc *softc)
2225 {
2226 	struct hwrm_func_cfg_input req = {0};
2227 
2228 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_CFG);
2229 
2230 	req.fid = htole16(0xffff);
2231 	req.enables |= htole32(HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS);
2232 	req.enables |= htole32(HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS);
2233 	req.enables |= htole32(HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS);
2234 	req.enables |= htole32(HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS);
2235 	req.enables |= htole32(HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS);
2236 	req.enables |= htole32(HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MSIX);
2237 	req.enables |= htole32(HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS);
2238 	req.num_msix = htole16(BNXT_MAX_NUM_QUEUES);
2239 	req.num_rsscos_ctxs = htole16(0x8);
2240 	req.num_cmpl_rings = htole16(BNXT_MAX_NUM_QUEUES * 2);
2241 	req.num_tx_rings = htole16(BNXT_MAX_NUM_QUEUES);
2242 	req.num_rx_rings = htole16(BNXT_MAX_NUM_QUEUES);
2243 	req.num_vnics = htole16(BNXT_MAX_NUM_QUEUES);
2244 	req.num_stat_ctxs = htole16(BNXT_MAX_NUM_QUEUES * 2);
2245 
2246 	return hwrm_send_message(softc, &req, sizeof(req));
2247 }
2248 
2249 int
bnxt_cfg_async_cr(struct bnxt_softc * softc)2250 bnxt_cfg_async_cr(struct bnxt_softc *softc)
2251 {
2252 	int rc = 0;
2253 	struct hwrm_func_cfg_input req = {0};
2254 
2255 	if (!BNXT_PF(softc))
2256 		return 0;
2257 
2258 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_CFG);
2259 
2260 	req.fid = htole16(0xffff);
2261 	req.enables = htole32(HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2262 	if (BNXT_CHIP_P5_PLUS(softc))
2263 		req.async_event_cr = htole16(softc->nq_rings[0].ring.phys_id);
2264 	else
2265 		req.async_event_cr = htole16(softc->def_cp_ring.ring.phys_id);
2266 
2267 	rc = hwrm_send_message(softc, &req, sizeof(req));
2268 
2269 	return rc;
2270 }
2271 
2272 void
bnxt_validate_hw_lro_settings(struct bnxt_softc * softc)2273 bnxt_validate_hw_lro_settings(struct bnxt_softc *softc)
2274 {
2275 	softc->hw_lro.enable = min(softc->hw_lro.enable, 1);
2276 
2277         softc->hw_lro.is_mode_gro = min(softc->hw_lro.is_mode_gro, 1);
2278 
2279 	softc->hw_lro.max_agg_segs = min(softc->hw_lro.max_agg_segs,
2280 		HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_MAX);
2281 
2282 	softc->hw_lro.max_aggs = min(softc->hw_lro.max_aggs,
2283 		HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
2284 
2285 	softc->hw_lro.min_agg_len = min(softc->hw_lro.min_agg_len, BNXT_MAX_MTU);
2286 }
2287 
2288 int
bnxt_hwrm_vnic_tpa_cfg(struct bnxt_softc * softc)2289 bnxt_hwrm_vnic_tpa_cfg(struct bnxt_softc *softc)
2290 {
2291 	struct hwrm_vnic_tpa_cfg_input req = {0};
2292 	uint32_t flags;
2293 
2294 	if (softc->vnic_info.id == (uint16_t) HWRM_NA_SIGNATURE) {
2295 		return 0;
2296 	}
2297 
2298 	if (!(softc->flags & BNXT_FLAG_TPA))
2299 		return 0;
2300 
2301 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_TPA_CFG);
2302 
2303 	if (softc->hw_lro.enable) {
2304 		flags = HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
2305 			HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
2306 			HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
2307 			HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ;
2308 
2309         	if (softc->hw_lro.is_mode_gro)
2310 			flags |= HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO;
2311 		else
2312 			flags |= HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE;
2313 
2314 		req.flags = htole32(flags);
2315 
2316 		req.enables = htole32(HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
2317 				HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
2318 				HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
2319 
2320 		req.max_agg_segs = htole16(softc->hw_lro.max_agg_segs);
2321 		req.max_aggs = htole16(softc->hw_lro.max_aggs);
2322 		req.min_agg_len = htole32(softc->hw_lro.min_agg_len);
2323 	}
2324 
2325 	req.vnic_id = htole16(softc->vnic_info.id);
2326 
2327 	return hwrm_send_message(softc, &req, sizeof(req));
2328 }
2329 
2330 int
bnxt_hwrm_nvm_find_dir_entry(struct bnxt_softc * softc,uint16_t type,uint16_t * ordinal,uint16_t ext,uint16_t * index,bool use_index,uint8_t search_opt,uint32_t * data_length,uint32_t * item_length,uint32_t * fw_ver)2331 bnxt_hwrm_nvm_find_dir_entry(struct bnxt_softc *softc, uint16_t type,
2332     uint16_t *ordinal, uint16_t ext, uint16_t *index, bool use_index,
2333     uint8_t search_opt, uint32_t *data_length, uint32_t *item_length,
2334     uint32_t *fw_ver)
2335 {
2336 	struct hwrm_nvm_find_dir_entry_input req = {0};
2337 	struct hwrm_nvm_find_dir_entry_output *resp =
2338 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
2339 	int	rc = 0;
2340 	uint32_t old_timeo;
2341 
2342 	MPASS(ordinal);
2343 
2344 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_FIND_DIR_ENTRY);
2345 	if (use_index) {
2346 		req.enables = htole32(
2347 		    HWRM_NVM_FIND_DIR_ENTRY_INPUT_ENABLES_DIR_IDX_VALID);
2348 		req.dir_idx = htole16(*index);
2349 	}
2350 	req.dir_type = htole16(type);
2351 	req.dir_ordinal = htole16(*ordinal);
2352 	req.dir_ext = htole16(ext);
2353 	req.opt_ordinal = search_opt;
2354 
2355 	BNXT_HWRM_LOCK(softc);
2356 	old_timeo = softc->hwrm_cmd_timeo;
2357 	softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
2358 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2359 	softc->hwrm_cmd_timeo = old_timeo;
2360 	if (rc)
2361 		goto exit;
2362 
2363 	if (item_length)
2364 		*item_length = le32toh(resp->dir_item_length);
2365 	if (data_length)
2366 		*data_length = le32toh(resp->dir_data_length);
2367 	if (fw_ver)
2368 		*fw_ver = le32toh(resp->fw_ver);
2369 	*ordinal = le16toh(resp->dir_ordinal);
2370 	if (index)
2371 		*index = le16toh(resp->dir_idx);
2372 
2373 exit:
2374 	BNXT_HWRM_UNLOCK(softc);
2375 	return (rc);
2376 }
2377 
2378 int
bnxt_hwrm_nvm_read(struct bnxt_softc * softc,uint16_t index,uint32_t offset,uint32_t length,struct iflib_dma_info * data)2379 bnxt_hwrm_nvm_read(struct bnxt_softc *softc, uint16_t index, uint32_t offset,
2380     uint32_t length, struct iflib_dma_info *data)
2381 {
2382 	struct hwrm_nvm_read_input req = {0};
2383 	int rc;
2384 	uint32_t old_timeo;
2385 
2386 	if (length > data->idi_size) {
2387 		rc = EINVAL;
2388 		goto exit;
2389 	}
2390 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_READ);
2391 	req.host_dest_addr = htole64(data->idi_paddr);
2392 	req.dir_idx = htole16(index);
2393 	req.offset = htole32(offset);
2394 	req.len = htole32(length);
2395 	BNXT_HWRM_LOCK(softc);
2396 	old_timeo = softc->hwrm_cmd_timeo;
2397 	softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
2398 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2399 	softc->hwrm_cmd_timeo = old_timeo;
2400 	BNXT_HWRM_UNLOCK(softc);
2401 	if (rc)
2402 		goto exit;
2403 	bus_dmamap_sync(data->idi_tag, data->idi_map, BUS_DMASYNC_POSTREAD);
2404 
2405 	goto exit;
2406 
2407 exit:
2408 	return rc;
2409 }
2410 
2411 int
bnxt_hwrm_nvm_modify(struct bnxt_softc * softc,uint16_t index,uint32_t offset,void * data,bool cpyin,uint32_t length)2412 bnxt_hwrm_nvm_modify(struct bnxt_softc *softc, uint16_t index, uint32_t offset,
2413     void *data, bool cpyin, uint32_t length)
2414 {
2415 	struct hwrm_nvm_modify_input req = {0};
2416 	struct iflib_dma_info dma_data;
2417 	int rc;
2418 	uint32_t old_timeo;
2419 
2420 	if (length == 0 || !data)
2421 		return EINVAL;
2422 	rc = iflib_dma_alloc(softc->ctx, length, &dma_data,
2423 	    BUS_DMA_NOWAIT);
2424 	if (rc)
2425 		return ENOMEM;
2426 	if (cpyin) {
2427 		rc = copyin(data, dma_data.idi_vaddr, length);
2428 		if (rc)
2429 			goto exit;
2430 	}
2431 	else
2432 		memcpy(dma_data.idi_vaddr, data, length);
2433 	bus_dmamap_sync(dma_data.idi_tag, dma_data.idi_map,
2434 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2435 
2436 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_MODIFY);
2437 	req.host_src_addr = htole64(dma_data.idi_paddr);
2438 	req.dir_idx = htole16(index);
2439 	req.offset = htole32(offset);
2440 	req.len = htole32(length);
2441 	BNXT_HWRM_LOCK(softc);
2442 	old_timeo = softc->hwrm_cmd_timeo;
2443 	softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
2444 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2445 	softc->hwrm_cmd_timeo = old_timeo;
2446 	BNXT_HWRM_UNLOCK(softc);
2447 
2448 exit:
2449 	iflib_dma_free(&dma_data);
2450 	return rc;
2451 }
2452 
2453 int
bnxt_hwrm_fw_reset(struct bnxt_softc * softc,uint8_t processor,uint8_t * selfreset)2454 bnxt_hwrm_fw_reset(struct bnxt_softc *softc, uint8_t processor,
2455     uint8_t *selfreset)
2456 {
2457 	struct hwrm_fw_reset_input req = {0};
2458 	struct hwrm_fw_reset_output *resp =
2459 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
2460 	int rc;
2461 
2462 	MPASS(selfreset);
2463 
2464 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_RESET);
2465 	req.embedded_proc_type = processor;
2466 	req.selfrst_status = *selfreset;
2467 
2468 	BNXT_HWRM_LOCK(softc);
2469 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2470 	if (rc)
2471 		goto exit;
2472 	*selfreset = resp->selfrst_status;
2473 
2474 exit:
2475 	BNXT_HWRM_UNLOCK(softc);
2476 	return rc;
2477 }
2478 
2479 int
bnxt_hwrm_fw_qstatus(struct bnxt_softc * softc,uint8_t type,uint8_t * selfreset)2480 bnxt_hwrm_fw_qstatus(struct bnxt_softc *softc, uint8_t type, uint8_t *selfreset)
2481 {
2482 	struct hwrm_fw_qstatus_input req = {0};
2483 	struct hwrm_fw_qstatus_output *resp =
2484 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
2485 	int rc;
2486 
2487 	MPASS(selfreset);
2488 
2489 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_QSTATUS);
2490 	req.embedded_proc_type = type;
2491 
2492 	BNXT_HWRM_LOCK(softc);
2493 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2494 	if (rc)
2495 		goto exit;
2496 	*selfreset = resp->selfrst_status;
2497 
2498 exit:
2499 	BNXT_HWRM_UNLOCK(softc);
2500 	return rc;
2501 }
2502 
2503 int
bnxt_hwrm_nvm_write(struct bnxt_softc * softc,void * data,bool cpyin,uint16_t type,uint16_t ordinal,uint16_t ext,uint16_t attr,uint16_t option,uint32_t data_length,bool keep,uint32_t * item_length,uint16_t * index)2504 bnxt_hwrm_nvm_write(struct bnxt_softc *softc, void *data, bool cpyin,
2505     uint16_t type, uint16_t ordinal, uint16_t ext, uint16_t attr,
2506     uint16_t option, uint32_t data_length, bool keep, uint32_t *item_length,
2507     uint16_t *index)
2508 {
2509 	struct hwrm_nvm_write_input req = {0};
2510 	struct hwrm_nvm_write_output *resp =
2511 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
2512 	struct iflib_dma_info dma_data;
2513 	int rc;
2514 	uint32_t old_timeo;
2515 
2516 	if (data_length) {
2517 		rc = iflib_dma_alloc(softc->ctx, data_length, &dma_data,
2518 		    BUS_DMA_NOWAIT);
2519 		if (rc)
2520 			return ENOMEM;
2521 		if (cpyin) {
2522 			rc = copyin(data, dma_data.idi_vaddr, data_length);
2523 			if (rc)
2524 				goto early_exit;
2525 		}
2526 		else
2527 			memcpy(dma_data.idi_vaddr, data, data_length);
2528 		bus_dmamap_sync(dma_data.idi_tag, dma_data.idi_map,
2529 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2530 	}
2531 	else
2532 		dma_data.idi_paddr = 0;
2533 
2534 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_WRITE);
2535 
2536 	req.host_src_addr = htole64(dma_data.idi_paddr);
2537 	req.dir_type = htole16(type);
2538 	req.dir_ordinal = htole16(ordinal);
2539 	req.dir_ext = htole16(ext);
2540 	req.dir_attr = htole16(attr);
2541 	req.dir_data_length = htole32(data_length);
2542 	req.option = htole16(option);
2543 	if (keep) {
2544 		req.flags =
2545 		    htole16(HWRM_NVM_WRITE_INPUT_FLAGS_KEEP_ORIG_ACTIVE_IMG);
2546 	}
2547 	if (item_length)
2548 		req.dir_item_length = htole32(*item_length);
2549 
2550 	BNXT_HWRM_LOCK(softc);
2551 	old_timeo = softc->hwrm_cmd_timeo;
2552 	softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
2553 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2554 	softc->hwrm_cmd_timeo = old_timeo;
2555 	if (rc)
2556 		goto exit;
2557 	if (item_length)
2558 		*item_length = le32toh(resp->dir_item_length);
2559 	if (index)
2560 		*index = le16toh(resp->dir_idx);
2561 
2562 exit:
2563 	BNXT_HWRM_UNLOCK(softc);
2564 early_exit:
2565 	if (data_length)
2566 		iflib_dma_free(&dma_data);
2567 	return rc;
2568 }
2569 
2570 int
bnxt_hwrm_nvm_erase_dir_entry(struct bnxt_softc * softc,uint16_t index)2571 bnxt_hwrm_nvm_erase_dir_entry(struct bnxt_softc *softc, uint16_t index)
2572 {
2573 	struct hwrm_nvm_erase_dir_entry_input req = {0};
2574 	uint32_t old_timeo;
2575 	int rc;
2576 
2577 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_ERASE_DIR_ENTRY);
2578 	req.dir_idx = htole16(index);
2579 	BNXT_HWRM_LOCK(softc);
2580 	old_timeo = softc->hwrm_cmd_timeo;
2581 	softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
2582 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2583 	softc->hwrm_cmd_timeo = old_timeo;
2584 	BNXT_HWRM_UNLOCK(softc);
2585 	return rc;
2586 }
2587 
2588 int
bnxt_hwrm_nvm_get_dir_info(struct bnxt_softc * softc,uint32_t * entries,uint32_t * entry_length)2589 bnxt_hwrm_nvm_get_dir_info(struct bnxt_softc *softc, uint32_t *entries,
2590     uint32_t *entry_length)
2591 {
2592 	struct hwrm_nvm_get_dir_info_input req = {0};
2593 	struct hwrm_nvm_get_dir_info_output *resp =
2594 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
2595 	int rc;
2596 	uint32_t old_timeo;
2597 
2598 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_GET_DIR_INFO);
2599 
2600 	BNXT_HWRM_LOCK(softc);
2601 	old_timeo = softc->hwrm_cmd_timeo;
2602 	softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
2603 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2604 	softc->hwrm_cmd_timeo = old_timeo;
2605 	if (rc)
2606 		goto exit;
2607 
2608 	if (entries)
2609 		*entries = le32toh(resp->entries);
2610 	if (entry_length)
2611 		*entry_length = le32toh(resp->entry_length);
2612 
2613 exit:
2614 	BNXT_HWRM_UNLOCK(softc);
2615 	return rc;
2616 }
2617 
2618 int
bnxt_hwrm_nvm_get_dir_entries(struct bnxt_softc * softc,uint32_t * entries,uint32_t * entry_length,struct iflib_dma_info * dma_data)2619 bnxt_hwrm_nvm_get_dir_entries(struct bnxt_softc *softc, uint32_t *entries,
2620     uint32_t *entry_length, struct iflib_dma_info *dma_data)
2621 {
2622 	struct hwrm_nvm_get_dir_entries_input req = {0};
2623 	uint32_t ent;
2624 	uint32_t ent_len;
2625 	int rc;
2626 	uint32_t old_timeo;
2627 
2628 	if (!entries)
2629 		entries = &ent;
2630 	if (!entry_length)
2631 		entry_length = &ent_len;
2632 
2633 	rc = bnxt_hwrm_nvm_get_dir_info(softc, entries, entry_length);
2634 	if (rc)
2635 		goto exit;
2636 	if (*entries * *entry_length > dma_data->idi_size) {
2637 		rc = EINVAL;
2638 		goto exit;
2639 	}
2640 
2641 	/*
2642 	 * TODO: There's a race condition here that could blow up DMA memory...
2643 	 *	 we need to allocate the max size, not the currently in use
2644 	 *	 size.  The command should totally have a max size here.
2645 	 */
2646 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_GET_DIR_ENTRIES);
2647 	req.host_dest_addr = htole64(dma_data->idi_paddr);
2648 	BNXT_HWRM_LOCK(softc);
2649 	old_timeo = softc->hwrm_cmd_timeo;
2650 	softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
2651 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2652 	softc->hwrm_cmd_timeo = old_timeo;
2653 	BNXT_HWRM_UNLOCK(softc);
2654 	if (rc)
2655 		goto exit;
2656 	bus_dmamap_sync(dma_data->idi_tag, dma_data->idi_map,
2657 	    BUS_DMASYNC_POSTWRITE);
2658 
2659 exit:
2660 	return rc;
2661 }
2662 
2663 int
bnxt_hwrm_nvm_get_dev_info(struct bnxt_softc * softc,uint16_t * mfg_id,uint16_t * device_id,uint32_t * sector_size,uint32_t * nvram_size,uint32_t * reserved_size,uint32_t * available_size)2664 bnxt_hwrm_nvm_get_dev_info(struct bnxt_softc *softc, uint16_t *mfg_id,
2665     uint16_t *device_id, uint32_t *sector_size, uint32_t *nvram_size,
2666     uint32_t *reserved_size, uint32_t *available_size)
2667 {
2668 	struct hwrm_nvm_get_dev_info_input req = {0};
2669 	struct hwrm_nvm_get_dev_info_output *resp =
2670 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
2671 	int rc;
2672 	uint32_t old_timeo;
2673 
2674 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_GET_DEV_INFO);
2675 
2676 	BNXT_HWRM_LOCK(softc);
2677 	old_timeo = softc->hwrm_cmd_timeo;
2678 	softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
2679 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2680 	softc->hwrm_cmd_timeo = old_timeo;
2681 	if (rc)
2682 		goto exit;
2683 
2684 	if (mfg_id)
2685 		*mfg_id = le16toh(resp->manufacturer_id);
2686 	if (device_id)
2687 		*device_id = le16toh(resp->device_id);
2688 	if (sector_size)
2689 		*sector_size = le32toh(resp->sector_size);
2690 	if (nvram_size)
2691 		*nvram_size = le32toh(resp->nvram_size);
2692 	if (reserved_size)
2693 		*reserved_size = le32toh(resp->reserved_size);
2694 	if (available_size)
2695 		*available_size = le32toh(resp->available_size);
2696 
2697 exit:
2698 	BNXT_HWRM_UNLOCK(softc);
2699 	return rc;
2700 }
2701 
2702 int
bnxt_hwrm_nvm_install_update(struct bnxt_softc * softc,uint32_t install_type,uint64_t * installed_items,uint8_t * result,uint8_t * problem_item,uint8_t * reset_required)2703 bnxt_hwrm_nvm_install_update(struct bnxt_softc *softc,
2704     uint32_t install_type, uint64_t *installed_items, uint8_t *result,
2705     uint8_t *problem_item, uint8_t *reset_required)
2706 {
2707 	struct hwrm_nvm_install_update_input req = {0};
2708 	struct hwrm_nvm_install_update_output *resp =
2709 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
2710 	int rc;
2711 	uint32_t old_timeo;
2712 
2713 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_INSTALL_UPDATE);
2714 	req.install_type = htole32(install_type);
2715 
2716 	BNXT_HWRM_LOCK(softc);
2717 	old_timeo = softc->hwrm_cmd_timeo;
2718 	softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
2719 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2720 	softc->hwrm_cmd_timeo = old_timeo;
2721 	if (rc)
2722 		goto exit;
2723 
2724 	if (installed_items)
2725 		*installed_items = le32toh(resp->installed_items);
2726 	if (result)
2727 		*result = resp->result;
2728 	if (problem_item)
2729 		*problem_item = resp->problem_item;
2730 	if (reset_required)
2731 		*reset_required = resp->reset_required;
2732 
2733 exit:
2734 	BNXT_HWRM_UNLOCK(softc);
2735 	return rc;
2736 }
2737 
2738 int
bnxt_hwrm_nvm_verify_update(struct bnxt_softc * softc,uint16_t type,uint16_t ordinal,uint16_t ext)2739 bnxt_hwrm_nvm_verify_update(struct bnxt_softc *softc, uint16_t type,
2740     uint16_t ordinal, uint16_t ext)
2741 {
2742 	struct hwrm_nvm_verify_update_input req = {0};
2743 	uint32_t old_timeo;
2744 	int rc;
2745 
2746 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_VERIFY_UPDATE);
2747 
2748 	req.dir_type = htole16(type);
2749 	req.dir_ordinal = htole16(ordinal);
2750 	req.dir_ext = htole16(ext);
2751 
2752 	BNXT_HWRM_LOCK(softc);
2753 	old_timeo = softc->hwrm_cmd_timeo;
2754 	softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
2755 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2756 	softc->hwrm_cmd_timeo = old_timeo;
2757 	BNXT_HWRM_UNLOCK(softc);
2758 	return rc;
2759 }
2760 
2761 int
bnxt_hwrm_fw_get_time(struct bnxt_softc * softc,uint16_t * year,uint8_t * month,uint8_t * day,uint8_t * hour,uint8_t * minute,uint8_t * second,uint16_t * millisecond,uint16_t * zone)2762 bnxt_hwrm_fw_get_time(struct bnxt_softc *softc, uint16_t *year, uint8_t *month,
2763     uint8_t *day, uint8_t *hour, uint8_t *minute, uint8_t *second,
2764     uint16_t *millisecond, uint16_t *zone)
2765 {
2766 	struct hwrm_fw_get_time_input req = {0};
2767 	struct hwrm_fw_get_time_output *resp =
2768 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
2769 	int rc;
2770 
2771 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_GET_TIME);
2772 
2773 	BNXT_HWRM_LOCK(softc);
2774 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2775 	if (rc)
2776 		goto exit;
2777 
2778 	if (year)
2779 		*year = le16toh(resp->year);
2780 	if (month)
2781 		*month = resp->month;
2782 	if (day)
2783 		*day = resp->day;
2784 	if (hour)
2785 		*hour = resp->hour;
2786 	if (minute)
2787 		*minute = resp->minute;
2788 	if (second)
2789 		*second = resp->second;
2790 	if (millisecond)
2791 		*millisecond = le16toh(resp->millisecond);
2792 	if (zone)
2793 		*zone = le16toh(resp->zone);
2794 
2795 exit:
2796 	BNXT_HWRM_UNLOCK(softc);
2797 	return rc;
2798 }
2799 
2800 int
bnxt_hwrm_fw_set_time(struct bnxt_softc * softc,uint16_t year,uint8_t month,uint8_t day,uint8_t hour,uint8_t minute,uint8_t second,uint16_t millisecond,uint16_t zone)2801 bnxt_hwrm_fw_set_time(struct bnxt_softc *softc, uint16_t year, uint8_t month,
2802     uint8_t day, uint8_t hour, uint8_t minute, uint8_t second,
2803     uint16_t millisecond, uint16_t zone)
2804 {
2805 	struct hwrm_fw_set_time_input req = {0};
2806 
2807 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_SET_TIME);
2808 
2809 	req.year = htole16(year);
2810 	req.month = month;
2811 	req.day = day;
2812 	req.hour = hour;
2813 	req.minute = minute;
2814 	req.second = second;
2815 	req.millisecond = htole16(millisecond);
2816 	req.zone = htole16(zone);
2817 	return hwrm_send_message(softc, &req, sizeof(req));
2818 }
2819 
bnxt_read_sfp_module_eeprom_info(struct bnxt_softc * softc,uint16_t i2c_addr,uint16_t page_number,uint8_t bank,bool bank_sel_en,uint16_t start_addr,uint16_t data_length,uint8_t * buf)2820 int bnxt_read_sfp_module_eeprom_info(struct bnxt_softc *softc, uint16_t i2c_addr,
2821     uint16_t page_number, uint8_t bank,bool bank_sel_en, uint16_t start_addr,
2822     uint16_t data_length, uint8_t *buf)
2823 {
2824 	struct hwrm_port_phy_i2c_read_output *output =
2825 			(void *)softc->hwrm_cmd_resp.idi_vaddr;
2826 	struct hwrm_port_phy_i2c_read_input req = {0};
2827 	int rc = 0, byte_offset = 0;
2828 
2829 	BNXT_HWRM_LOCK(softc);
2830 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_PHY_I2C_READ);
2831 
2832 	req.i2c_slave_addr = i2c_addr;
2833 	req.page_number = htole16(page_number);
2834 	req.port_id = htole16(softc->pf.port_id);
2835 	do {
2836 		uint16_t xfer_size;
2837 
2838 		xfer_size = min_t(uint16_t, data_length, BNXT_MAX_PHY_I2C_RESP_SIZE);
2839 		data_length -= xfer_size;
2840 		req.page_offset = htole16(start_addr + byte_offset);
2841 		req.data_length = xfer_size;
2842 		req.bank_number = bank;
2843 		req.enables = htole32((start_addr + byte_offset ?
2844 				HWRM_PORT_PHY_I2C_READ_INPUT_ENABLES_PAGE_OFFSET : 0) |
2845 				(bank_sel_en ?
2846 				HWRM_PORT_PHY_I2C_READ_INPUT_ENABLES_BANK_NUMBER : 0));
2847 		rc = _hwrm_send_message(softc, &req, sizeof(req));
2848 		if (!rc)
2849 			memcpy(buf + byte_offset, output->data, xfer_size);
2850 		byte_offset += xfer_size;
2851 	} while (!rc && data_length > 0);
2852 
2853 	BNXT_HWRM_UNLOCK(softc);
2854 
2855 	return rc;
2856 }
2857 
2858 int
bnxt_hwrm_port_phy_qcfg(struct bnxt_softc * softc)2859 bnxt_hwrm_port_phy_qcfg(struct bnxt_softc *softc)
2860 {
2861 	struct bnxt_link_info *link_info = &softc->link_info;
2862 	struct hwrm_port_phy_qcfg_input req = {0};
2863 	struct hwrm_port_phy_qcfg_output *resp =
2864 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
2865 	int rc = 0;
2866 
2867 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_PHY_QCFG);
2868 
2869 	BNXT_HWRM_LOCK(softc);
2870 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2871 	if (rc)
2872 		goto exit;
2873 
2874 	memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
2875 	link_info->phy_link_status = resp->link;
2876 	link_info->duplex =  resp->duplex_cfg;
2877 	link_info->auto_mode = resp->auto_mode;
2878 
2879         /*
2880          * When AUTO_PAUSE_AUTONEG_PAUSE bit is set to 1,
2881          * the advertisement of pause is enabled.
2882          * 1. When the auto_mode is not set to none and this flag is set to 1,
2883          *    then the auto_pause bits on this port are being advertised and
2884          *    autoneg pause results are being interpreted.
2885          * 2. When the auto_mode is not set to none and this flag is set to 0,
2886          *    the pause is forced as indicated in force_pause, and also
2887 	 *    advertised as auto_pause bits, but the autoneg results are not
2888 	 *    interpreted since the pause configuration is being forced.
2889          * 3. When the auto_mode is set to none and this flag is set to 1,
2890          *    auto_pause bits should be ignored and should be set to 0.
2891          */
2892 
2893 	link_info->flow_ctrl.autoneg = false;
2894 	link_info->flow_ctrl.tx = false;
2895 	link_info->flow_ctrl.rx = false;
2896 
2897 	if ((resp->auto_mode) &&
2898             (resp->auto_pause & BNXT_AUTO_PAUSE_AUTONEG_PAUSE)) {
2899 			link_info->flow_ctrl.autoneg = true;
2900 	}
2901 
2902 	if (link_info->flow_ctrl.autoneg) {
2903 		if (resp->auto_pause & BNXT_PAUSE_TX)
2904 			link_info->flow_ctrl.tx = true;
2905 		if (resp->auto_pause & BNXT_PAUSE_RX)
2906 			link_info->flow_ctrl.rx = true;
2907 	} else {
2908 		if (resp->force_pause & BNXT_PAUSE_TX)
2909 			link_info->flow_ctrl.tx = true;
2910 		if (resp->force_pause & BNXT_PAUSE_RX)
2911 			link_info->flow_ctrl.rx = true;
2912 	}
2913 
2914 	link_info->duplex_setting = resp->duplex_cfg;
2915 	if (link_info->phy_link_status == HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK)
2916 		link_info->link_speed = le16toh(resp->link_speed);
2917 	else
2918 		link_info->link_speed = 0;
2919 	link_info->force_link_speed = le16toh(resp->force_link_speed);
2920 	link_info->auto_link_speeds = le16toh(resp->auto_link_speed);
2921 	link_info->support_speeds = le16toh(resp->support_speeds);
2922 	link_info->auto_link_speeds = le16toh(resp->auto_link_speed_mask);
2923 	link_info->preemphasis = le32toh(resp->preemphasis);
2924 	link_info->phy_ver[0] = resp->phy_maj;
2925 	link_info->phy_ver[1] = resp->phy_min;
2926 	link_info->phy_ver[2] = resp->phy_bld;
2927 	snprintf(softc->ver_info->phy_ver, sizeof(softc->ver_info->phy_ver),
2928 	    "%d.%d.%d", link_info->phy_ver[0], link_info->phy_ver[1],
2929 	    link_info->phy_ver[2]);
2930 	strlcpy(softc->ver_info->phy_vendor, resp->phy_vendor_name,
2931 	    BNXT_NAME_SIZE);
2932 	strlcpy(softc->ver_info->phy_partnumber, resp->phy_vendor_partnumber,
2933 	    BNXT_NAME_SIZE);
2934 	link_info->media_type = resp->media_type;
2935 	link_info->phy_type = resp->phy_type;
2936 	link_info->transceiver = resp->xcvr_pkg_type;
2937 	link_info->phy_addr = resp->eee_config_phy_addr &
2938 	    HWRM_PORT_PHY_QCFG_OUTPUT_PHY_ADDR_MASK;
2939 	link_info->module_status = resp->module_status;
2940 	link_info->support_pam4_speeds = le16toh(resp->support_pam4_speeds);
2941 	link_info->auto_pam4_link_speeds = le16toh(resp->auto_pam4_link_speed_mask);
2942 	link_info->force_pam4_link_speed = le16toh(resp->force_pam4_link_speed);
2943 
2944 	if (softc->hwrm_spec_code >= 0x10504)
2945 		link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
2946 
2947 	link_info->support_speeds2 = le16toh(resp->support_speeds2);
2948 	link_info->auto_link_speeds2 = le16toh(resp->auto_link_speeds2);
2949 	link_info->force_link_speeds2 = le16toh(resp->force_link_speeds2);
2950 
2951 exit:
2952 	BNXT_HWRM_UNLOCK(softc);
2953 	return rc;
2954 }
2955 
2956 static bool
bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output * resp)2957 bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
2958 {
2959 	if (!resp->supported_speeds_auto_mode &&
2960 	    !resp->supported_speeds_force_mode &&
2961 	    !resp->supported_pam4_speeds_auto_mode &&
2962 	    !resp->supported_pam4_speeds_force_mode &&
2963 	    !resp->supported_speeds2_auto_mode &&
2964 	    !resp->supported_speeds2_force_mode)
2965 		return true;
2966 
2967 	return false;
2968 }
2969 
bnxt_hwrm_phy_qcaps(struct bnxt_softc * softc)2970 int bnxt_hwrm_phy_qcaps(struct bnxt_softc *softc)
2971 {
2972 	struct bnxt_link_info *link_info = &softc->link_info;
2973 	struct hwrm_port_phy_qcaps_output *resp =
2974 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
2975 	struct hwrm_port_phy_qcaps_input req = {};
2976 	int rc;
2977 
2978 	if (softc->hwrm_spec_code < 0x10201)
2979 		return 0;
2980 
2981 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_PHY_QCAPS);
2982 
2983 	BNXT_HWRM_LOCK(softc);
2984 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2985 	if (rc)
2986 		goto exit;
2987 
2988 	softc->phy_flags = resp->flags | (resp->flags2 << 8);
2989 	if (resp->flags & HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS_EEE_SUPPORTED) {
2990 
2991 		softc->lpi_tmr_lo = le32toh(resp->tx_lpi_timer_low) &
2992 					    HWRM_PORT_PHY_QCAPS_OUTPUT_TX_LPI_TIMER_LOW_MASK;
2993 		softc->lpi_tmr_hi = le32toh(resp->valid_tx_lpi_timer_high) &
2994 					    HWRM_PORT_PHY_QCAPS_OUTPUT_TX_LPI_TIMER_HIGH_MASK;
2995 	}
2996 
2997 	if (softc->hwrm_spec_code >= 0x10a01) {
2998 		if (bnxt_phy_qcaps_no_speed(resp)) {
2999 			link_info->phy_state = BNXT_PHY_STATE_DISABLED;
3000 			device_printf(softc->dev, "Ethernet link disabled\n");
3001 		} else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) {
3002 			link_info->phy_state = BNXT_PHY_STATE_ENABLED;
3003 			device_printf(softc->dev, "Ethernet link enabled\n");
3004 			/* Phy re-enabled, reprobe the speeds */
3005 			link_info->support_auto_speeds = 0;
3006 			link_info->support_pam4_auto_speeds = 0;
3007 			link_info->support_auto_speeds2 = 0;
3008 		}
3009 	}
3010 	if (resp->supported_speeds_auto_mode)
3011 		link_info->support_auto_speeds =
3012 			le16toh(resp->supported_speeds_auto_mode);
3013 	if (resp->supported_speeds_force_mode)
3014 		link_info->support_force_speeds =
3015 			le16toh(resp->supported_speeds_force_mode);
3016 	if (resp->supported_pam4_speeds_auto_mode)
3017 		link_info->support_pam4_auto_speeds =
3018 			le16toh(resp->supported_pam4_speeds_auto_mode);
3019 	if (resp->supported_pam4_speeds_force_mode)
3020 		link_info->support_pam4_force_speeds =
3021 			le16toh(resp->supported_pam4_speeds_force_mode);
3022 
3023 	if (resp->supported_speeds2_auto_mode)
3024 		link_info->support_auto_speeds2 =
3025 			le16toh(resp->supported_speeds2_auto_mode);
3026 
3027 	if (resp->supported_speeds2_force_mode)
3028 		link_info->support_force_speeds2 =
3029 			le16toh(resp->supported_speeds2_force_mode);
3030 
3031 exit:
3032 	BNXT_HWRM_UNLOCK(softc);
3033 	return rc;
3034 }
3035 
3036 uint16_t
bnxt_hwrm_get_wol_fltrs(struct bnxt_softc * softc,uint16_t handle)3037 bnxt_hwrm_get_wol_fltrs(struct bnxt_softc *softc, uint16_t handle)
3038 {
3039 	struct hwrm_wol_filter_qcfg_input req = {0};
3040 	struct hwrm_wol_filter_qcfg_output *resp =
3041 			(void *)softc->hwrm_cmd_resp.idi_vaddr;
3042 	uint16_t next_handle = 0;
3043 	int rc;
3044 
3045 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_WOL_FILTER_QCFG);
3046 	req.port_id = htole16(softc->pf.port_id);
3047 	req.handle = htole16(handle);
3048 	rc = hwrm_send_message(softc, &req, sizeof(req));
3049 	if (!rc) {
3050 		next_handle = le16toh(resp->next_handle);
3051 		if (next_handle != 0) {
3052 			if (resp->wol_type ==
3053 				HWRM_WOL_FILTER_ALLOC_INPUT_WOL_TYPE_MAGICPKT) {
3054 				softc->wol = 1;
3055 				softc->wol_filter_id = resp->wol_filter_id;
3056 			}
3057 		}
3058 	}
3059 	return next_handle;
3060 }
3061 
3062 int
bnxt_hwrm_alloc_wol_fltr(struct bnxt_softc * softc)3063 bnxt_hwrm_alloc_wol_fltr(struct bnxt_softc *softc)
3064 {
3065 	struct hwrm_wol_filter_alloc_input req = {0};
3066 	struct hwrm_wol_filter_alloc_output *resp =
3067 		(void *)softc->hwrm_cmd_resp.idi_vaddr;
3068 	int rc;
3069 
3070 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_WOL_FILTER_ALLOC);
3071 	req.port_id = htole16(softc->pf.port_id);
3072 	req.wol_type = HWRM_WOL_FILTER_ALLOC_INPUT_WOL_TYPE_MAGICPKT;
3073 	req.enables =
3074 		htole32(HWRM_WOL_FILTER_ALLOC_INPUT_ENABLES_MAC_ADDRESS);
3075 	memcpy(req.mac_address, softc->func.mac_addr, ETHER_ADDR_LEN);
3076 	rc = hwrm_send_message(softc, &req, sizeof(req));
3077 	if (!rc)
3078 		softc->wol_filter_id = resp->wol_filter_id;
3079 
3080 	return rc;
3081 }
3082 
3083 int
bnxt_hwrm_free_wol_fltr(struct bnxt_softc * softc)3084 bnxt_hwrm_free_wol_fltr(struct bnxt_softc *softc)
3085 {
3086 	struct hwrm_wol_filter_free_input req = {0};
3087 
3088 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_WOL_FILTER_FREE);
3089 	req.port_id = htole16(softc->pf.port_id);
3090 	req.enables =
3091 		htole32(HWRM_WOL_FILTER_FREE_INPUT_ENABLES_WOL_FILTER_ID);
3092 	req.wol_filter_id = softc->wol_filter_id;
3093 	return hwrm_send_message(softc, &req, sizeof(req));
3094 }
3095 
bnxt_hwrm_set_coal_params(struct bnxt_softc * softc,uint32_t max_frames,uint32_t buf_tmrs,uint16_t flags,struct hwrm_ring_cmpl_ring_cfg_aggint_params_input * req)3096 static void bnxt_hwrm_set_coal_params(struct bnxt_softc *softc, uint32_t max_frames,
3097         uint32_t buf_tmrs, uint16_t flags,
3098         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
3099 {
3100         req->flags = htole16(flags);
3101         req->num_cmpl_dma_aggr = htole16((uint16_t)max_frames);
3102         req->num_cmpl_dma_aggr_during_int = htole16(max_frames >> 16);
3103         req->cmpl_aggr_dma_tmr = htole16((uint16_t)buf_tmrs);
3104         req->cmpl_aggr_dma_tmr_during_int = htole16(buf_tmrs >> 16);
3105         /* Minimum time between 2 interrupts set to buf_tmr x 2 */
3106         req->int_lat_tmr_min = htole16((uint16_t)buf_tmrs * 2);
3107         req->int_lat_tmr_max = htole16((uint16_t)buf_tmrs * 4);
3108         req->num_cmpl_aggr_int = htole16((uint16_t)max_frames * 4);
3109 }
3110 
bnxt_hwrm_set_coal(struct bnxt_softc * softc)3111 int bnxt_hwrm_set_coal(struct bnxt_softc *softc)
3112 {
3113 	int i, rc = 0;
3114 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
3115 							   req_tx = {0}, *req;
3116 	uint16_t max_buf, max_buf_irq;
3117 	uint16_t buf_tmr, buf_tmr_irq;
3118 	uint32_t flags;
3119 
3120 	bnxt_hwrm_cmd_hdr_init(softc, &req_rx,
3121 			       HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
3122 	bnxt_hwrm_cmd_hdr_init(softc, &req_tx,
3123 			       HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
3124 
3125 	/* Each rx completion (2 records) should be DMAed immediately.
3126 	 * DMA 1/4 of the completion buffers at a time.
3127 	 */
3128 	max_buf = min_t(uint16_t, softc->rx_coal_frames / 4, 2);
3129 	/* max_buf must not be zero */
3130 	max_buf = clamp_t(uint16_t, max_buf, 1, 63);
3131 	max_buf_irq = clamp_t(uint16_t, softc->rx_coal_frames_irq, 1, 63);
3132 	buf_tmr = BNXT_USEC_TO_COAL_TIMER(softc->rx_coal_usecs);
3133 	/* buf timer set to 1/4 of interrupt timer */
3134 	buf_tmr = max_t(uint16_t, buf_tmr / 4, 1);
3135 	buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(softc->rx_coal_usecs_irq);
3136 	buf_tmr_irq = max_t(uint16_t, buf_tmr_irq, 1);
3137 
3138 	flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET;
3139 
3140 	/* RING_IDLE generates more IRQs for lower latency.  Enable it only
3141 	 * if coal_usecs is less than 25 us.
3142 	 */
3143 	if (softc->rx_coal_usecs < 25)
3144 		flags |= HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
3145 
3146 	bnxt_hwrm_set_coal_params(softc, max_buf_irq << 16 | max_buf,
3147 				  buf_tmr_irq << 16 | buf_tmr, flags, &req_rx);
3148 
3149 	/* max_buf must not be zero */
3150 	max_buf = clamp_t(uint16_t, softc->tx_coal_frames, 1, 63);
3151 	max_buf_irq = clamp_t(uint16_t, softc->tx_coal_frames_irq, 1, 63);
3152 	buf_tmr = BNXT_USEC_TO_COAL_TIMER(softc->tx_coal_usecs);
3153 	/* buf timer set to 1/4 of interrupt timer */
3154 	buf_tmr = max_t(uint16_t, buf_tmr / 4, 1);
3155 	buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(softc->tx_coal_usecs_irq);
3156 	buf_tmr_irq = max_t(uint16_t, buf_tmr_irq, 1);
3157 	flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET;
3158 	bnxt_hwrm_set_coal_params(softc, max_buf_irq << 16 | max_buf,
3159 				  buf_tmr_irq << 16 | buf_tmr, flags, &req_tx);
3160 
3161 	for (i = 0; i < softc->nrxqsets; i++) {
3162 
3163 		req = &req_rx;
3164 		req->ring_id = htole16(softc->grp_info[i].cp_ring_id);
3165 
3166 		rc = hwrm_send_message(softc, req, sizeof(*req));
3167 		if (rc)
3168 			break;
3169 	}
3170 	return rc;
3171 }
3172 
bnxt_hwrm_ring_info_get(struct bnxt_softc * softc,uint8_t ring_type,uint32_t ring_id,uint32_t * prod,uint32_t * cons)3173 void bnxt_hwrm_ring_info_get(struct bnxt_softc *softc, uint8_t ring_type,
3174 			     uint32_t ring_id, uint32_t *prod, uint32_t *cons)
3175 {
3176 	hwrm_dbg_ring_info_get_input_t req = {0};
3177 	hwrm_dbg_ring_info_get_output_t *resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
3178 	int rc = 0;
3179 
3180 	*prod = *cons = 0xffffffff;
3181 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_DBG_RING_INFO_GET);
3182         req.ring_type = le32toh(ring_type);
3183         req.fw_ring_id = le32toh(ring_id);
3184 	rc = hwrm_send_message(softc, &req, sizeof(req));
3185 	if (!rc) {
3186 		*prod = resp->producer_index;
3187 		*cons = resp->consumer_index;
3188 	}
3189 
3190 	return;
3191 }
3192