xref: /freebsd/sys/dev/bnxt/bnxt_en/bnxt_hwrm.c (revision c9965974a52b5dfad1737706b7f2623d999fb569)
1 /*-
2  * Broadcom NetXtreme-C/E network driver.
3  *
4  * Copyright (c) 2016 Broadcom, All Rights Reserved.
5  * The term Broadcom refers to Broadcom Limited and/or its subsidiaries
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26  * THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 #include <sys/endian.h>
31 #include <linux/pci.h>
32 
33 #include "bnxt.h"
34 #include "bnxt_hwrm.h"
35 #include "hsi_struct_def.h"
36 
37 static int bnxt_hwrm_err_map(uint16_t err);
38 static inline int _is_valid_ether_addr(uint8_t *);
39 static inline void get_random_ether_addr(uint8_t *);
40 static void	bnxt_hwrm_set_link_common(struct bnxt_softc *softc,
41 		    struct hwrm_port_phy_cfg_input *req);
42 static void	bnxt_hwrm_set_pause_common(struct bnxt_softc *softc,
43 		    struct hwrm_port_phy_cfg_input *req);
44 static void	bnxt_hwrm_set_eee(struct bnxt_softc *softc,
45 		    struct hwrm_port_phy_cfg_input *req);
46 
47 /* NVRam stuff has a five minute timeout */
48 #define BNXT_NVM_TIMEO	(5 * 60 * 1000)
49 
50 #define BNXT_RX_STATS_PRI_ENTRY(counter, n)		\
51 	BNXT_RX_STATS_EXT_OFFSET(counter##_cos0)
52 
53 #define BNXT_TX_STATS_PRI_ENTRY(counter, n)		\
54 	 BNXT_TX_STATS_EXT_OFFSET(counter##_cos0)
55 
56 #define BNXT_RX_STATS_PRI_ENTRIES(counter)		\
57 	BNXT_RX_STATS_PRI_ENTRY(counter, 0),		\
58 	BNXT_RX_STATS_PRI_ENTRY(counter, 1),		\
59 	BNXT_RX_STATS_PRI_ENTRY(counter, 2),		\
60 	BNXT_RX_STATS_PRI_ENTRY(counter, 3),		\
61 	BNXT_RX_STATS_PRI_ENTRY(counter, 4),		\
62 	BNXT_RX_STATS_PRI_ENTRY(counter, 5),		\
63 	BNXT_RX_STATS_PRI_ENTRY(counter, 6),		\
64 	BNXT_RX_STATS_PRI_ENTRY(counter, 7)
65 
66 #define BNXT_TX_STATS_PRI_ENTRIES(counter)		\
67 	BNXT_TX_STATS_PRI_ENTRY(counter, 0),		\
68 	BNXT_TX_STATS_PRI_ENTRY(counter, 1),		\
69 	BNXT_TX_STATS_PRI_ENTRY(counter, 2),		\
70 	BNXT_TX_STATS_PRI_ENTRY(counter, 3),		\
71 	BNXT_TX_STATS_PRI_ENTRY(counter, 4),		\
72 	BNXT_TX_STATS_PRI_ENTRY(counter, 5),		\
73 	BNXT_TX_STATS_PRI_ENTRY(counter, 6),		\
74 	BNXT_TX_STATS_PRI_ENTRY(counter, 7)
75 
76 
77 long bnxt_rx_bytes_pri_arr_base_off[] = {BNXT_RX_STATS_PRI_ENTRIES(rx_bytes)};
78 long bnxt_rx_pkts_pri_arr_base_off[] = {BNXT_RX_STATS_PRI_ENTRIES(rx_packets)};
79 long bnxt_tx_bytes_pri_arr_base_off[] = {BNXT_TX_STATS_PRI_ENTRIES(tx_bytes)};
80 long bnxt_tx_pkts_pri_arr_base_off[] = {BNXT_TX_STATS_PRI_ENTRIES(tx_packets)};
81 
82 static int
bnxt_hwrm_err_map(uint16_t err)83 bnxt_hwrm_err_map(uint16_t err)
84 {
85 	int rc;
86 
87 	switch (err) {
88 	case HWRM_ERR_CODE_SUCCESS:
89 		return 0;
90 	case HWRM_ERR_CODE_INVALID_PARAMS:
91 	case HWRM_ERR_CODE_INVALID_FLAGS:
92 	case HWRM_ERR_CODE_INVALID_ENABLES:
93 		return EINVAL;
94 	case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED:
95 		return EACCES;
96 	case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR:
97 		return ENOMEM;
98 	case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
99 		return ENOSYS;
100 	case HWRM_ERR_CODE_FAIL:
101 		return EIO;
102 	case HWRM_ERR_CODE_HWRM_ERROR:
103 	case HWRM_ERR_CODE_UNKNOWN_ERR:
104 	default:
105 		return EDOOFUS;
106 	}
107 
108 	return rc;
109 }
110 
111 int
bnxt_alloc_hwrm_dma_mem(struct bnxt_softc * softc)112 bnxt_alloc_hwrm_dma_mem(struct bnxt_softc *softc)
113 {
114 	int rc;
115 
116 	rc = iflib_dma_alloc(softc->ctx, PAGE_SIZE, &softc->hwrm_cmd_resp,
117 	    BUS_DMA_NOWAIT);
118 	return rc;
119 }
120 
121 void
bnxt_free_hwrm_dma_mem(struct bnxt_softc * softc)122 bnxt_free_hwrm_dma_mem(struct bnxt_softc *softc)
123 {
124 	if (softc->hwrm_cmd_resp.idi_vaddr)
125 		iflib_dma_free(&softc->hwrm_cmd_resp);
126 	softc->hwrm_cmd_resp.idi_vaddr = NULL;
127 	return;
128 }
129 
130 void
bnxt_hwrm_cmd_hdr_init(struct bnxt_softc * softc,void * request,uint16_t req_type)131 bnxt_hwrm_cmd_hdr_init(struct bnxt_softc *softc, void *request,
132     uint16_t req_type)
133 {
134 	struct input *req = request;
135 
136 	req->req_type = htole16(req_type);
137 	req->cmpl_ring = 0xffff;
138 	req->target_id = 0xffff;
139 	req->resp_addr = htole64(softc->hwrm_cmd_resp.idi_paddr);
140 }
141 
142 int
_hwrm_send_message(struct bnxt_softc * softc,void * msg,uint32_t msg_len)143 _hwrm_send_message(struct bnxt_softc *softc, void *msg, uint32_t msg_len)
144 {
145 	struct input *req = msg;
146 	struct hwrm_err_output *resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
147 	uint32_t *data = msg;
148 	int i;
149 	uint8_t *valid;
150 	uint16_t err;
151 	uint16_t max_req_len = BNXT_HWRM_MAX_REQ_LEN;
152 	struct hwrm_short_input short_input = {0};
153 
154 	/* TODO: DMASYNC in here. */
155 	req->seq_id = htole16(softc->hwrm_cmd_seq++);
156 	memset(resp, 0, PAGE_SIZE);
157 
158 	if (BNXT_NO_FW_ACCESS(softc) &&
159 	    (req->req_type != HWRM_FUNC_RESET && req->req_type != HWRM_VER_GET))
160 		return -EINVAL;
161 
162 	if ((softc->flags & BNXT_FLAG_SHORT_CMD) ||
163 	    msg_len > BNXT_HWRM_MAX_REQ_LEN) {
164 		void *short_cmd_req = softc->hwrm_short_cmd_req_addr.idi_vaddr;
165                 uint16_t max_msg_len;
166 
167                 /* Set boundary for maximum extended request length for short
168                  * cmd format. If passed up from device use the max supported
169                  * internal req length.
170 		 */
171 
172 		max_msg_len = softc->hwrm_max_ext_req_len;
173 
174 
175 		memcpy(short_cmd_req, req, msg_len);
176                 if (msg_len < max_msg_len)
177 			memset((uint8_t *) short_cmd_req + msg_len, 0,
178 				max_msg_len - msg_len);
179 
180 		short_input.req_type = req->req_type;
181 		short_input.signature =
182 		    htole16(HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD);
183 		short_input.size = htole16(msg_len);
184 		short_input.req_addr =
185 		    htole64(softc->hwrm_short_cmd_req_addr.idi_paddr);
186 
187 		data = (uint32_t *)&short_input;
188 		msg_len = sizeof(short_input);
189 
190 		/* Sync memory write before updating doorbell */
191 		wmb();
192 
193 		max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
194 	}
195 
196 	/* Write request msg to hwrm channel */
197 	for (i = 0; i < msg_len; i += 4) {
198 		bus_space_write_4(softc->hwrm_bar.tag,
199 				  softc->hwrm_bar.handle,
200 				  i, *data);
201 		data++;
202 	}
203 
204 	/* Clear to the end of the request buffer */
205 	for (i = msg_len; i < max_req_len; i += 4)
206 		bus_space_write_4(softc->hwrm_bar.tag, softc->hwrm_bar.handle,
207 		    i, 0);
208 
209 	/* Ring channel doorbell */
210 	bus_space_write_4(softc->hwrm_bar.tag,
211 			  softc->hwrm_bar.handle,
212 			  0x100, htole32(1));
213 
214 	/* Check if response len is updated */
215 	for (i = 0; i < softc->hwrm_cmd_timeo; i++) {
216 		if (resp->resp_len && resp->resp_len <= 4096)
217 			break;
218 		DELAY(1000);
219 	}
220 	if (i >= softc->hwrm_cmd_timeo) {
221 		device_printf(softc->dev,
222 		    "Timeout sending %s: (timeout: %u) seq: %d\n",
223 		    GET_HWRM_REQ_TYPE(req->req_type), softc->hwrm_cmd_timeo,
224 		    le16toh(req->seq_id));
225 		return ETIMEDOUT;
226 	}
227 	/* Last byte of resp contains the valid key */
228 	valid = (uint8_t *)resp + resp->resp_len - 1;
229 	for (i = 0; i < softc->hwrm_cmd_timeo; i++) {
230 		if (*valid == HWRM_RESP_VALID_KEY)
231 			break;
232 		DELAY(1000);
233 	}
234 	if (i >= softc->hwrm_cmd_timeo) {
235 		device_printf(softc->dev, "Timeout sending %s: "
236 		    "(timeout: %u) msg {0x%x 0x%x} len:%d v: %d\n",
237 		    GET_HWRM_REQ_TYPE(req->req_type),
238 		    softc->hwrm_cmd_timeo, le16toh(req->req_type),
239 		    le16toh(req->seq_id), msg_len,
240 		    *valid);
241 		return ETIMEDOUT;
242 	}
243 
244 	err = le16toh(resp->error_code);
245 	if (err) {
246 		/* HWRM_ERR_CODE_FAIL is a "normal" error, don't log */
247 		if (err != HWRM_ERR_CODE_FAIL) {
248 			device_printf(softc->dev,
249 			    "%s command returned %s error.\n",
250 			    GET_HWRM_REQ_TYPE(req->req_type),
251 			    GET_HWRM_ERROR_CODE(err));
252 		}
253 		return bnxt_hwrm_err_map(err);
254 	}
255 
256 	return 0;
257 }
258 
259 int
hwrm_send_message(struct bnxt_softc * softc,void * msg,uint32_t msg_len)260 hwrm_send_message(struct bnxt_softc *softc, void *msg, uint32_t msg_len)
261 {
262 	int rc;
263 
264 	BNXT_HWRM_LOCK(softc);
265 	rc = _hwrm_send_message(softc, msg, msg_len);
266 	BNXT_HWRM_UNLOCK(softc);
267 	return rc;
268 }
269 
270 int
bnxt_hwrm_queue_qportcfg(struct bnxt_softc * softc,uint32_t path_dir)271 bnxt_hwrm_queue_qportcfg(struct bnxt_softc *softc, uint32_t path_dir)
272 {
273 	int rc = 0;
274 	struct hwrm_queue_qportcfg_input req = {0};
275 	struct hwrm_queue_qportcfg_output *resp =
276 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
277 	uint8_t max_tc, max_lltc, *max_q;
278 	uint8_t queue_profile, queue_id;
279 	struct bnxt_queue_info *q_info;
280 	uint8_t i, j, *qptr, *q_ids;
281 	bool no_rdma;
282 
283 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_QUEUE_QPORTCFG);
284 
285 	BNXT_HWRM_LOCK(softc);
286 	rc = _hwrm_send_message(softc, &req, sizeof(req));
287 	if (rc)
288 		goto qportcfg_exit;
289 
290 	if (!resp->max_configurable_queues) {
291 		rc = -EINVAL;
292 		goto qportcfg_exit;
293 	}
294 
295 	if (resp->queue_cfg_info & HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_CFG_INFO_ASYM_CFG) {
296 		softc->is_asym_q = true;
297 		/* bnxt_init_cosq_names(softc, path_dir); */
298 	} else {
299 		softc->is_asym_q = false;
300 		/* bnxt_free_stats_cosqnames_mem(softc); */
301 	}
302 
303 	max_tc = min_t(uint8_t, resp->max_configurable_queues, BNXT_MAX_QUEUE);
304 	max_lltc = resp->max_configurable_lossless_queues;
305 
306 	/*
307 	 * No RDMA support yet.
308 	 * no_rdma = !(softc->flags & BNXT_FLAG_ROCE_CAP);
309 	 */
310 	no_rdma = true;
311 	qptr = &resp->queue_id0;
312 
313 	if (path_dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX) {
314 		q_info = softc->tx_q_info;
315 		q_ids = softc->tx_q_ids;
316 		max_q = &softc->tx_max_q;
317 	} else {
318 		q_info = softc->rx_q_info;
319 		q_ids = softc->rx_q_ids;
320 		max_q = &softc->rx_max_q;
321 	}
322 
323 	for (i = 0, j = 0; i < max_tc; i++) {
324 		queue_id = *qptr;
325 		qptr++;
326 
327 		queue_profile = *qptr;
328 		qptr++;
329 
330 		q_info[j].queue_id = queue_id;
331 		q_info[j].queue_profile = queue_profile;
332 		q_ids[i] = queue_id;
333 
334 		softc->tc_to_qidx[j] = j;
335 
336 		if (!BNXT_CNPQ(q_info[j].queue_profile) ||
337 		    (no_rdma && BNXT_PF(softc)))
338 			j++;
339 	}
340 	*max_q = max_tc;
341 	max_tc = max_t(uint8_t, j, 1);
342 	softc->max_tc = softc->max_tc ? min(softc->max_tc, max_tc) : max_tc;
343 	softc->max_lltc = softc->max_lltc ? min(softc->max_lltc, max_lltc) : max_lltc;
344 
345 	if (softc->max_lltc > softc->max_tc)
346 		softc->max_lltc = softc->max_tc;
347 
348 qportcfg_exit:
349 	BNXT_HWRM_UNLOCK(softc);
350 	return rc;
351 }
352 
bnxt_alloc_all_ctx_pg_info(struct bnxt_softc * softc)353 static int bnxt_alloc_all_ctx_pg_info(struct bnxt_softc *softc)
354 {
355 	struct bnxt_ctx_mem_info *ctx = softc->ctx_mem;
356 	u16 type;
357 
358 	for (type = 0; type < BNXT_CTX_MAX; type++) {
359 		struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
360 		int n = 1;
361 
362 		if (!ctxm->max_entries || ctxm->pg_info)
363 			continue;
364 
365 		if (ctxm->instance_bmap)
366 			n = hweight32(ctxm->instance_bmap);
367 		ctxm->pg_info = kcalloc(n, sizeof(*ctxm->pg_info), GFP_KERNEL);
368 		if (!ctxm->pg_info)
369 			return -ENOMEM;
370 	}
371 	return 0;
372 }
373 
bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type * ctxm,u8 init_val,u8 init_offset,bool init_mask_set)374 static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type *ctxm,
375 				      u8 init_val, u8 init_offset,
376 				      bool init_mask_set)
377 {
378 	ctxm->init_value = init_val;
379 	ctxm->init_offset = BNXT_CTX_INIT_INVALID_OFFSET;
380 	if (init_mask_set)
381 		ctxm->init_offset = init_offset * 4;
382 	else
383 		ctxm->init_value = 0;
384 }
385 
bnxt_hwrm_func_backing_store_qcaps(struct bnxt_softc * softc)386 int bnxt_hwrm_func_backing_store_qcaps(struct bnxt_softc *softc)
387 {
388 	struct hwrm_func_backing_store_qcaps_input req = {0};
389 	struct hwrm_func_backing_store_qcaps_output *resp =
390 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
391 	int rc;
392 
393 	if (softc->hwrm_spec_code < 0x10902 || softc->ctx_mem)
394 		return 0;
395 
396 	if (BNXT_VF(softc))
397 		return 0;
398 
399 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_BACKING_STORE_QCAPS);
400 	BNXT_HWRM_LOCK(softc);
401 	rc = _hwrm_send_message(softc, &req, sizeof(req));
402 	if (!rc) {
403 		struct bnxt_ctx_mem_type *ctxm;
404 		struct bnxt_ctx_mem_info *ctx;
405 		u8 init_val, init_idx = 0;
406 		u16 init_mask;
407 
408 		ctx = softc->ctx_mem;
409 		if (!ctx) {
410 			ctx = malloc(sizeof(*ctx), M_DEVBUF, M_NOWAIT | M_ZERO);
411 			if (!ctx) {
412 				rc = -ENOMEM;
413 				goto ctx_err;
414 			}
415 			softc->ctx_mem = ctx;
416 		}
417 		init_val = resp->ctx_kind_initializer;
418 		init_mask = le16_to_cpu(resp->ctx_init_mask);
419 
420 		ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
421 		ctxm->max_entries = le32_to_cpu(resp->qp_max_entries);
422 		ctxm->qp_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
423 		ctxm->qp_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
424 		ctxm->entry_size = le16_to_cpu(resp->qp_entry_size);
425 		bnxt_init_ctx_initializer(ctxm, init_val, resp->qp_init_offset,
426 					  (init_mask & (1 << init_idx++)) != 0);
427 
428 		ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
429 		ctxm->srq_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
430 		ctxm->max_entries = le32_to_cpu(resp->srq_max_entries);
431 		ctxm->entry_size = le16_to_cpu(resp->srq_entry_size);
432 		bnxt_init_ctx_initializer(ctxm, init_val, resp->srq_init_offset,
433 					  (init_mask & (1 << init_idx++)) != 0);
434 
435 		ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
436 		ctxm->cq_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
437 		ctxm->max_entries = le32_to_cpu(resp->cq_max_entries);
438 		ctxm->entry_size = le16_to_cpu(resp->cq_entry_size);
439 		bnxt_init_ctx_initializer(ctxm, init_val, resp->cq_init_offset,
440 					  (init_mask & (1 << init_idx++)) != 0);
441 
442 		ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
443 		ctxm->vnic_entries = le32_to_cpu(resp->vnic_max_vnic_entries);
444 		ctxm->max_entries = ctxm->vnic_entries +
445 			le16_to_cpu(resp->vnic_max_ring_table_entries);
446 		ctxm->entry_size = le16_to_cpu(resp->vnic_entry_size);
447 		bnxt_init_ctx_initializer(ctxm, init_val,
448 					  resp->vnic_init_offset,
449 					  (init_mask & (1 << init_idx++)) != 0);
450 
451 		ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
452 		ctxm->max_entries = le32_to_cpu(resp->stat_max_entries);
453 		ctxm->entry_size = le16_to_cpu(resp->stat_entry_size);
454 		bnxt_init_ctx_initializer(ctxm, init_val,
455 					  resp->stat_init_offset,
456 					  (init_mask & (1 << init_idx++)) != 0);
457 
458 		ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
459 		ctxm->entry_size = le16_to_cpu(resp->tqm_entry_size);
460 		ctxm->min_entries = le32_to_cpu(resp->tqm_min_entries_per_ring);
461 		ctxm->max_entries = le32_to_cpu(resp->tqm_max_entries_per_ring);
462 		ctxm->entry_multiple = resp->tqm_entries_multiple;
463 		if (!ctxm->entry_multiple)
464 			ctxm->entry_multiple = 1;
465 
466 		memcpy(&ctx->ctx_arr[BNXT_CTX_FTQM], ctxm, sizeof(*ctxm));
467 
468 		ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
469 		ctxm->max_entries = le32_to_cpu(resp->mrav_max_entries);
470 		ctxm->entry_size = le16_to_cpu(resp->mrav_entry_size);
471 		ctxm->mrav_num_entries_units =
472 			le16_to_cpu(resp->mrav_num_entries_units);
473 		bnxt_init_ctx_initializer(ctxm, init_val,
474 					  resp->mrav_init_offset,
475 					  (init_mask & (1 << init_idx++)) != 0);
476 
477 		ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
478 		ctxm->entry_size = le16_to_cpu(resp->tim_entry_size);
479 		ctxm->max_entries = le32_to_cpu(resp->tim_max_entries);
480 
481 		ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
482 		if (!ctx->tqm_fp_rings_count)
483 			ctx->tqm_fp_rings_count = softc->tx_max_q;
484 		else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_LEGACY_RINGS)
485 			ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_LEGACY_RINGS;
486 		if (ctx->tqm_fp_rings_count == BNXT_MAX_TQM_FP_LEGACY_RINGS &&
487 		    softc->hwrm_max_ext_req_len >= BNXT_BACKING_STORE_CFG_LEN) {
488 			ctx->tqm_fp_rings_count += resp->tqm_fp_rings_count_ext;
489 			if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
490 				ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
491 		}
492 		ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
493 		memcpy(ctxm, &ctx->ctx_arr[BNXT_CTX_STQM], sizeof(*ctxm));
494 		ctxm->instance_bmap = (1 << ctx->tqm_fp_rings_count) - 1;
495 
496 		rc = bnxt_alloc_all_ctx_pg_info(softc);
497 	} else {
498 		rc = 0;
499 	}
500 ctx_err:
501 	BNXT_HWRM_UNLOCK(softc);
502 	return rc;
503 }
504 
505 #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES                 \
506         (HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP |                \
507          HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ |               \
508          HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ |                \
509          HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC |              \
510          HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT)
511 
bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info * rmem,uint8_t * pg_attr,uint64_t * pg_dir)512 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, uint8_t *pg_attr,
513 				  uint64_t *pg_dir)
514 {
515 	uint8_t pg_size = 0;
516 
517 	if (BNXT_PAGE_SHIFT == 13)
518 		pg_size = 1 << 4;
519 	else if (BNXT_PAGE_SIZE == 16)
520 		pg_size = 2 << 4;
521 
522 	*pg_attr = pg_size;
523 	if (rmem->depth >= 1) {
524 		if (rmem->depth == 2)
525 			*pg_attr |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_LVL_2;
526 		else
527 			*pg_attr |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_LVL_1;
528 		*pg_dir = htole64(rmem->pg_tbl.idi_paddr);
529 	} else {
530 		*pg_dir = htole64(rmem->pg_arr[0].idi_paddr);
531 	}
532 }
533 
bnxt_hwrm_func_backing_store_cfg(struct bnxt_softc * softc,uint32_t enables)534 int bnxt_hwrm_func_backing_store_cfg(struct bnxt_softc *softc, uint32_t enables)
535 {
536 	struct hwrm_func_backing_store_cfg_input req = {0};
537 	struct bnxt_ctx_mem_info *ctx = softc->ctx_mem;
538 	struct bnxt_ctx_pg_info *ctx_pg;
539 	struct bnxt_ctx_mem_type *ctxm;
540 	u32 req_len = sizeof(req);
541 	__le32 *num_entries;
542 	u32 ena, flags = 0;
543 	__le64 *pg_dir;
544 	u8 *pg_attr;
545 	int i;
546 
547 	if (!ctx)
548 		return 0;
549 
550 	if (req_len > softc->hwrm_max_ext_req_len)
551 		req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
552 
553 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_BACKING_STORE_CFG);
554 	req.enables = htole32(enables);
555 
556 	if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP) {
557 		ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
558 		ctx_pg = ctxm->pg_info;
559 		req.qp_num_entries = cpu_to_le32(ctx_pg->entries);
560 		req.qp_num_qp1_entries = cpu_to_le16(ctxm->qp_qp1_entries);
561 		req.qp_num_l2_entries = cpu_to_le16(ctxm->qp_l2_entries);
562 		req.qp_entry_size = cpu_to_le16(ctxm->entry_size);
563 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
564 				&req.qpc_pg_size_qpc_lvl,
565 				&req.qpc_page_dir);
566 	}
567 	if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ) {
568 		ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
569 		ctx_pg = ctxm->pg_info;
570 		req.srq_num_entries = cpu_to_le32(ctx_pg->entries);
571 		req.srq_num_l2_entries = cpu_to_le16(ctxm->srq_l2_entries);
572 		req.srq_entry_size = cpu_to_le16(ctxm->entry_size);
573 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
574 				&req.srq_pg_size_srq_lvl,
575 				&req.srq_page_dir);
576 	}
577 	if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ) {
578 		ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
579 		ctx_pg = ctxm->pg_info;
580 		req.cq_num_entries = cpu_to_le32(ctx_pg->entries);
581 		req.cq_num_l2_entries = cpu_to_le16(ctxm->cq_l2_entries);
582 		req.cq_entry_size = cpu_to_le16(ctxm->entry_size);
583 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
584 				      &req.cq_pg_size_cq_lvl,
585 				&req.cq_page_dir);
586 	}
587 	if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_MRAV) {
588 		ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
589 		ctx_pg = ctxm->pg_info;
590 		req.mrav_num_entries = cpu_to_le32(ctx_pg->entries);
591 		if (ctxm->mrav_num_entries_units)
592 			flags |=
593 			HWRM_FUNC_BACKING_STORE_CFG_INPUT_FLAGS_MRAV_RESERVATION_SPLIT;
594 		req.mrav_entry_size = cpu_to_le16(ctxm->entry_size);
595 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
596 				&req.mrav_pg_size_mrav_lvl,
597 				&req.mrav_page_dir);
598 	}
599 	if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TIM) {
600 		ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
601 		ctx_pg = ctxm->pg_info;
602 		req.tim_num_entries = cpu_to_le32(ctx_pg->entries);
603 		req.tim_entry_size = cpu_to_le16(ctxm->entry_size);
604 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
605 				&req.tim_pg_size_tim_lvl,
606 				&req.tim_page_dir);
607 	}
608 	if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC) {
609 		ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
610 		ctx_pg = ctxm->pg_info;
611 		req.vnic_num_vnic_entries = cpu_to_le16(ctxm->vnic_entries);
612 		req.vnic_num_ring_table_entries =
613 			cpu_to_le16(ctxm->max_entries - ctxm->vnic_entries);
614 		req.vnic_entry_size = cpu_to_le16(ctxm->entry_size);
615 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
616 				&req.vnic_pg_size_vnic_lvl,
617 				&req.vnic_page_dir);
618 	}
619 	if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT) {
620 		ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
621 		ctx_pg = ctxm->pg_info;
622 		req.stat_num_entries = cpu_to_le32(ctxm->max_entries);
623 		req.stat_entry_size = cpu_to_le16(ctxm->entry_size);
624 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
625 				&req.stat_pg_size_stat_lvl,
626 				&req.stat_page_dir);
627 	}
628 	ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
629 	for (i = 0, num_entries = &req.tqm_sp_num_entries,
630 			pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
631 			pg_dir = &req.tqm_sp_page_dir,
632 	     ena = HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP,
633 	     ctx_pg = ctxm->pg_info;
634 	     i < BNXT_MAX_TQM_LEGACY_RINGS;
635 	     ctx_pg = &ctx->ctx_arr[BNXT_CTX_FTQM].pg_info[i],
636 	     i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
637 		if (!(enables & ena))
638 			continue;
639 
640 		req.tqm_entry_size = cpu_to_le16(ctxm->entry_size);
641 		*num_entries = cpu_to_le32(ctx_pg->entries);
642 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
643 	}
644 	if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING8) {
645 		pg_attr = &req.tqm_ring8_pg_size_tqm_ring_lvl;
646 		pg_dir = &req.tqm_ring8_page_dir;
647 		ctx_pg = &ctx->ctx_arr[BNXT_CTX_FTQM].pg_info[8];
648 		req.tqm_ring8_num_entries = cpu_to_le32(ctx_pg->entries);
649 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
650 	}
651 	req.flags = cpu_to_le32(flags);
652 	return hwrm_send_message(softc, &req, sizeof(req));
653 }
654 
bnxt_hwrm_func_resc_qcaps(struct bnxt_softc * softc,bool all)655 int bnxt_hwrm_func_resc_qcaps(struct bnxt_softc *softc, bool all)
656 {
657 	struct hwrm_func_resource_qcaps_output *resp =
658 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
659 	struct hwrm_func_resource_qcaps_input req = {0};
660 	struct bnxt_hw_resc *hw_resc = &softc->hw_resc;
661 	int rc;
662 
663 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_RESOURCE_QCAPS);
664 	req.fid = htole16(0xffff);
665 
666 	BNXT_HWRM_LOCK(softc);
667 	rc = _hwrm_send_message(softc, &req, sizeof(req));
668 	if (rc) {
669 		rc = -EIO;
670 		goto hwrm_func_resc_qcaps_exit;
671 	}
672 
673 	hw_resc->max_tx_sch_inputs = le16toh(resp->max_tx_scheduler_inputs);
674 	if (!all)
675 		goto hwrm_func_resc_qcaps_exit;
676 
677 	hw_resc->min_rsscos_ctxs = le16toh(resp->min_rsscos_ctx);
678 	hw_resc->max_rsscos_ctxs = le16toh(resp->max_rsscos_ctx);
679 	hw_resc->min_cp_rings = le16toh(resp->min_cmpl_rings);
680 	hw_resc->max_cp_rings = le16toh(resp->max_cmpl_rings);
681 	hw_resc->min_tx_rings = le16toh(resp->min_tx_rings);
682 	hw_resc->max_tx_rings = le16toh(resp->max_tx_rings);
683 	hw_resc->min_rx_rings = le16toh(resp->min_rx_rings);
684 	hw_resc->max_rx_rings = le16toh(resp->max_rx_rings);
685 	hw_resc->min_hw_ring_grps = le16toh(resp->min_hw_ring_grps);
686 	hw_resc->max_hw_ring_grps = le16toh(resp->max_hw_ring_grps);
687 	hw_resc->min_l2_ctxs = le16toh(resp->min_l2_ctxs);
688 	hw_resc->max_l2_ctxs = le16toh(resp->max_l2_ctxs);
689 	hw_resc->min_vnics = le16toh(resp->min_vnics);
690 	hw_resc->max_vnics = le16toh(resp->max_vnics);
691 	hw_resc->min_stat_ctxs = le16toh(resp->min_stat_ctx);
692 	hw_resc->max_stat_ctxs = le16toh(resp->max_stat_ctx);
693 
694 	if (BNXT_CHIP_P5(softc)) {
695 		hw_resc->max_nqs = le16toh(resp->max_msix);
696 		hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
697 	}
698 
699 hwrm_func_resc_qcaps_exit:
700 	BNXT_HWRM_UNLOCK(softc);
701 	return rc;
702 }
703 
bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt_softc * softc,struct bnxt_ctx_mem_type * ctxm,bool last)704 int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt_softc *softc,
705 					struct bnxt_ctx_mem_type *ctxm,
706 					bool last)
707 {
708 	struct hwrm_func_backing_store_cfg_v2_input req = {0};
709 	u32 instance_bmap = ctxm->instance_bmap;
710 	int i, j, rc = 0, n = 1;
711 	__le32 *p;
712 
713 	if (!(ctxm->flags & BNXT_CTX_MEM_TYPE_VALID) || !ctxm->pg_info)
714 		return 0;
715 
716 	if (instance_bmap)
717 		n = hweight32(ctxm->instance_bmap);
718 	else
719 		instance_bmap = 1;
720 
721 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_BACKING_STORE_CFG_V2);
722 
723 	req.type = cpu_to_le16(ctxm->type);
724 	req.entry_size = cpu_to_le16(ctxm->entry_size);
725 	for (i = 0, p = &req.split_entry_0; i < ctxm->split_entry_cnt; i++)
726 		p[i] = cpu_to_le32(ctxm->split[i]);
727 	for (i = 0, j = 0; j < n && !rc; i++) {
728 		struct bnxt_ctx_pg_info *ctx_pg;
729 
730 		if (!(instance_bmap & (1 << i)))
731 			continue;
732 		req.instance = cpu_to_le16(i);
733 		ctx_pg = &ctxm->pg_info[j++];
734 		if (!ctx_pg->entries)
735 			continue;
736 		req.num_entries = cpu_to_le32(ctx_pg->entries);
737 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
738 				      &req.page_size_pbl_level,
739 				      &req.page_dir);
740 		if (last && j == (n - 1))
741 			req.flags =
742 				cpu_to_le32(HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_FLAGS_BS_CFG_ALL_DONE);
743 		rc = hwrm_send_message(softc, &req, sizeof(req));
744 	}
745 	return rc;
746 }
747 
748 int
bnxt_hwrm_passthrough(struct bnxt_softc * softc,void * req,uint32_t req_len,void * resp,uint32_t resp_len,uint32_t app_timeout)749 bnxt_hwrm_passthrough(struct bnxt_softc *softc, void *req, uint32_t req_len,
750 		void *resp, uint32_t resp_len, uint32_t app_timeout)
751 {
752 	int rc = 0;
753 	void *output = (void *)softc->hwrm_cmd_resp.idi_vaddr;
754 	struct input *input = req;
755 	uint32_t old_timeo;
756 
757 	input->resp_addr = htole64(softc->hwrm_cmd_resp.idi_paddr);
758 	BNXT_HWRM_LOCK(softc);
759 	old_timeo = softc->hwrm_cmd_timeo;
760 	if (input->req_type == HWRM_NVM_INSTALL_UPDATE)
761 		softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
762 	else
763 		softc->hwrm_cmd_timeo = max(app_timeout, softc->hwrm_cmd_timeo);
764 	rc = _hwrm_send_message(softc, req, req_len);
765 	softc->hwrm_cmd_timeo = old_timeo;
766 	if (rc) {
767 		device_printf(softc->dev, "%s: %s command failed with rc: 0x%x\n",
768 			      __FUNCTION__, GET_HWRM_REQ_TYPE(input->req_type), rc);
769 		goto fail;
770 	}
771 
772 	memcpy(resp, output, resp_len);
773 fail:
774 	BNXT_HWRM_UNLOCK(softc);
775 	return rc;
776 }
777 
778 
779 int
bnxt_hwrm_ver_get(struct bnxt_softc * softc)780 bnxt_hwrm_ver_get(struct bnxt_softc *softc)
781 {
782 	struct hwrm_ver_get_input	req = {0};
783 	struct hwrm_ver_get_output	*resp =
784 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
785 	int				rc;
786 	const char nastr[] = "<not installed>";
787 	const char naver[] = "<N/A>";
788 	uint32_t dev_caps_cfg;
789 	uint16_t fw_maj, fw_min, fw_bld, fw_rsv, len;
790 
791 	softc->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
792 	softc->hwrm_cmd_timeo = 1000;
793 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VER_GET);
794 
795 	req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
796 	req.hwrm_intf_min = HWRM_VERSION_MINOR;
797 	req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
798 
799 	BNXT_HWRM_LOCK(softc);
800 	rc = _hwrm_send_message(softc, &req, sizeof(req));
801 	if (rc)
802 		goto fail;
803 
804 	snprintf(softc->ver_info->hwrm_if_ver, BNXT_VERSTR_SIZE, "%d.%d.%d",
805 	    resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, resp->hwrm_intf_upd_8b);
806 	softc->ver_info->hwrm_if_major = resp->hwrm_intf_maj_8b;
807 	softc->ver_info->hwrm_if_minor = resp->hwrm_intf_min_8b;
808 	softc->ver_info->hwrm_if_update = resp->hwrm_intf_upd_8b;
809 	strlcpy(softc->ver_info->driver_hwrm_if_ver, HWRM_VERSION_STR,
810 	    BNXT_VERSTR_SIZE);
811 	strlcpy(softc->ver_info->hwrm_fw_name, resp->hwrm_fw_name,
812 	    BNXT_NAME_SIZE);
813 
814 	 softc->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
815                              resp->hwrm_intf_min_8b << 8 |
816                              resp->hwrm_intf_upd_8b;
817 	if (resp->hwrm_intf_maj_8b < 1) {
818 		 device_printf(softc->dev, "HWRM interface %d.%d.%d is older "
819 			       "than 1.0.0.\n", resp->hwrm_intf_maj_8b,
820 			       resp->hwrm_intf_min_8b, resp->hwrm_intf_upd_8b);
821 		 device_printf(softc->dev, "Please update firmware with HWRM "
822 				"interface 1.0.0 or newer.\n");
823 	 }
824 	if (resp->mgmt_fw_major == 0 && resp->mgmt_fw_minor == 0 &&
825 	    resp->mgmt_fw_build == 0) {
826 		strlcpy(softc->ver_info->mgmt_fw_ver, naver, BNXT_VERSTR_SIZE);
827 		strlcpy(softc->ver_info->mgmt_fw_name, nastr, BNXT_NAME_SIZE);
828 	}
829 	else {
830 		snprintf(softc->ver_info->mgmt_fw_ver, FW_VER_STR_LEN,
831 		    "%d.%d.%d.%d", resp->mgmt_fw_major, resp->mgmt_fw_minor,
832 		    resp->mgmt_fw_build, resp->mgmt_fw_patch);
833 		strlcpy(softc->ver_info->mgmt_fw_name, resp->mgmt_fw_name,
834 		    BNXT_NAME_SIZE);
835 	}
836 	if (resp->netctrl_fw_major == 0 && resp->netctrl_fw_minor == 0 &&
837 	    resp->netctrl_fw_build == 0) {
838 		strlcpy(softc->ver_info->netctrl_fw_ver, naver,
839 		    BNXT_VERSTR_SIZE);
840 		strlcpy(softc->ver_info->netctrl_fw_name, nastr,
841 		    BNXT_NAME_SIZE);
842 	}
843 	else {
844 		snprintf(softc->ver_info->netctrl_fw_ver, FW_VER_STR_LEN,
845 		    "%d.%d.%d.%d", resp->netctrl_fw_major, resp->netctrl_fw_minor,
846 		    resp->netctrl_fw_build, resp->netctrl_fw_patch);
847 		strlcpy(softc->ver_info->netctrl_fw_name, resp->netctrl_fw_name,
848 		    BNXT_NAME_SIZE);
849 	}
850 	if (resp->roce_fw_major == 0 && resp->roce_fw_minor == 0 &&
851 	    resp->roce_fw_build == 0) {
852 		strlcpy(softc->ver_info->roce_fw_ver, naver, BNXT_VERSTR_SIZE);
853 		strlcpy(softc->ver_info->roce_fw_name, nastr, BNXT_NAME_SIZE);
854 	}
855 	else {
856 		snprintf(softc->ver_info->roce_fw_ver, BNXT_VERSTR_SIZE,
857 		    "%d.%d.%d.%d", resp->roce_fw_major, resp->roce_fw_minor,
858 		    resp->roce_fw_build, resp->roce_fw_patch);
859 		strlcpy(softc->ver_info->roce_fw_name, resp->roce_fw_name,
860 		    BNXT_NAME_SIZE);
861 	}
862 
863 	fw_maj = le32toh(resp->hwrm_fw_major);
864 	if (softc->hwrm_spec_code > 0x10803 && fw_maj) {
865 		fw_min = le16toh(resp->hwrm_fw_minor);
866 		fw_bld = le16toh(resp->hwrm_fw_build);
867 		fw_rsv = le16toh(resp->hwrm_fw_patch);
868 		len = FW_VER_STR_LEN;
869 	} else {
870 		fw_maj = resp->hwrm_fw_maj_8b;
871 		fw_min = resp->hwrm_fw_min_8b;
872 		fw_bld = resp->hwrm_fw_bld_8b;
873 		fw_rsv = resp->hwrm_fw_rsvd_8b;
874 		len = BC_HWRM_STR_LEN;
875 	}
876 
877 	softc->ver_info->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
878 	snprintf (softc->ver_info->fw_ver_str, len, "%d.%d.%d.%d",
879 			fw_maj, fw_min, fw_bld, fw_rsv);
880 
881 	if (strlen(resp->active_pkg_name)) {
882 		int fw_ver_len = strlen (softc->ver_info->fw_ver_str);
883 
884 		snprintf(softc->ver_info->fw_ver_str + fw_ver_len,
885 				FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
886 				resp->active_pkg_name);
887 		softc->fw_cap |= BNXT_FW_CAP_PKG_VER;
888 	}
889 
890 	softc->ver_info->chip_num = le16toh(resp->chip_num);
891 	softc->ver_info->chip_rev = resp->chip_rev;
892 	softc->ver_info->chip_metal = resp->chip_metal;
893 	softc->ver_info->chip_bond_id = resp->chip_bond_id;
894 	softc->ver_info->chip_type = resp->chip_platform_type;
895 
896 	if (resp->hwrm_intf_maj_8b >= 1) {
897 		softc->hwrm_max_req_len = le16toh(resp->max_req_win_len);
898 		softc->hwrm_max_ext_req_len = le16toh(resp->max_ext_req_len);
899 	}
900 	softc->hwrm_cmd_timeo = le16toh(resp->def_req_timeout);
901 	if (!softc->hwrm_cmd_timeo)
902 		softc->hwrm_cmd_timeo = DFLT_HWRM_CMD_TIMEOUT;
903 
904 	dev_caps_cfg = le32toh(resp->dev_caps_cfg);
905 	if ((dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
906 	    (dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
907 		softc->flags |= BNXT_FLAG_SHORT_CMD;
908 
909 	if ((dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
910 	    (dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
911 		softc->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
912 
913 	if (dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
914 		softc->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
915 
916 	if (dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
917 		softc->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
918 
919 	if (dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
920 		softc->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
921 
922 	if (dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
923 		softc->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
924 
925 	if (dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_EEM_SUPPORTED)
926 		softc->fw_cap |= BNXT_FW_CAP_CFA_EEM;
927 
928 	if (dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_TRUFLOW_SUPPORTED)
929 		softc->fw_cap |= BNXT_FW_CAP_TRUFLOW_EN;
930 
931 fail:
932 	BNXT_HWRM_UNLOCK(softc);
933 	return rc;
934 }
935 
936 static const u16 bnxt_async_events_arr[] = {
937 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
938 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
939 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
940 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
941 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
942 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
943 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
944 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
945 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
946 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
947 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE,
948 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
949 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE,
950 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
951 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
952 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT,
953 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE,
954 };
955 
bnxt_hwrm_func_drv_rgtr(struct bnxt_softc * bp,unsigned long * bmap,int bmap_size,bool async_only)956 int bnxt_hwrm_func_drv_rgtr(struct bnxt_softc *bp, unsigned long *bmap, int bmap_size,
957 			    bool async_only)
958 {
959 	DECLARE_BITMAP(async_events_bmap, 256);
960 	u32 *events = (u32 *)async_events_bmap;
961 	struct hwrm_func_drv_rgtr_output *resp =
962 		(void *)bp->hwrm_cmd_resp.idi_vaddr;
963 	struct hwrm_func_drv_rgtr_input req = {0};
964 	u32 flags = 0;
965 	int rc;
966 	int i;
967 
968 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR);
969 	req.ver_maj = HWRM_VERSION_MAJOR;
970 	req.ver_min = HWRM_VERSION_MINOR;
971 	req.ver_upd = HWRM_VERSION_UPDATE;
972 
973 	req.enables = htole32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_OS_TYPE |
974 				   HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
975 				   HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
976 
977 	if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
978 		flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_HOT_RESET_SUPPORT;
979 	if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
980 		flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_ERROR_RECOVERY_SUPPORT |
981 			 HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_MASTER_SUPPORT;
982 	if (bp->fw_cap & BNXT_FW_CAP_NPAR_1_2)
983 		flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_NPAR_1_2_SUPPORT;
984 	flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_ASYM_QUEUE_CFG_SUPPORT;
985 	req.flags = htole32(flags);
986 	req.os_type = htole16(HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_FREEBSD);
987 
988 	if (BNXT_PF(bp)) {
989 		req.enables |=
990 			htole32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD);
991 	}
992 
993 	if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
994 		req.flags |= cpu_to_le32(HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FLOW_HANDLE_64BIT_MODE);
995 
996 	memset(async_events_bmap, 0, sizeof(async_events_bmap));
997 	for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
998 		u16 event_id = bnxt_async_events_arr[i];
999 
1000 		if (event_id == HWRM_ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
1001 		    !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
1002 			continue;
1003 		}
1004 		__set_bit(bnxt_async_events_arr[i], async_events_bmap);
1005 	}
1006 	if (bmap && bmap_size) {
1007 		for (i = 0; i < bmap_size; i++) {
1008 			if (test_bit(i, bmap))
1009 				__set_bit(i, async_events_bmap);
1010 		}
1011 	}
1012 	for (i = 0; i < 8; i++)
1013 		req.async_event_fwd[i] |= htole32(events[i]);
1014 
1015 	if (async_only)
1016 		req.enables =
1017 			htole32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
1018 
1019 	rc = hwrm_send_message(bp, &req, sizeof(req));
1020 
1021 	if (!rc) {
1022 		if (resp->flags &
1023 		    le32toh(HWRM_FUNC_DRV_RGTR_OUTPUT_FLAGS_IF_CHANGE_SUPPORTED))
1024 			bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
1025 	}
1026 
1027 
1028 	return rc;
1029 }
1030 
1031 int
bnxt_hwrm_func_drv_unrgtr(struct bnxt_softc * softc,bool shutdown)1032 bnxt_hwrm_func_drv_unrgtr(struct bnxt_softc *softc, bool shutdown)
1033 {
1034 	struct hwrm_func_drv_unrgtr_input req = {0};
1035 
1036 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_DRV_UNRGTR);
1037 	if (shutdown == true)
1038 		req.flags |=
1039 		    HWRM_FUNC_DRV_UNRGTR_INPUT_FLAGS_PREPARE_FOR_SHUTDOWN;
1040 	return hwrm_send_message(softc, &req, sizeof(req));
1041 }
1042 
1043 static inline int
_is_valid_ether_addr(uint8_t * addr)1044 _is_valid_ether_addr(uint8_t *addr)
1045 {
1046 	char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
1047 
1048 	if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN)))
1049 		return (FALSE);
1050 
1051 	return (TRUE);
1052 }
1053 
1054 static inline void
get_random_ether_addr(uint8_t * addr)1055 get_random_ether_addr(uint8_t *addr)
1056 {
1057 	uint8_t temp[ETHER_ADDR_LEN];
1058 
1059 	arc4rand(&temp, sizeof(temp), 0);
1060 	temp[0] &= 0xFE;
1061 	temp[0] |= 0x02;
1062 	bcopy(temp, addr, sizeof(temp));
1063 }
1064 
1065 int
bnxt_hwrm_func_qcaps(struct bnxt_softc * softc)1066 bnxt_hwrm_func_qcaps(struct bnxt_softc *softc)
1067 {
1068 	int rc = 0;
1069 	struct hwrm_func_qcaps_input req = {0};
1070 	struct hwrm_func_qcaps_output *resp =
1071 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
1072 	struct bnxt_func_info *func = &softc->func;
1073 	uint32_t flags, flags_ext, flags_ext2;
1074 
1075 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_QCAPS);
1076 	req.fid = htole16(0xffff);
1077 
1078 	BNXT_HWRM_LOCK(softc);
1079 	rc = _hwrm_send_message(softc, &req, sizeof(req));
1080 	if (rc)
1081 		goto fail;
1082 
1083 	flags = htole32(resp->flags);
1084 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WOL_MAGICPKT_SUPPORTED)
1085 		softc->flags |= BNXT_FLAG_WOL_CAP;
1086 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_STATS_SUPPORTED)
1087 		softc->flags |= BNXT_FLAG_FW_CAP_EXT_STATS;
1088 
1089 	/* Enable RoCE only on Thor devices */
1090 	if (BNXT_CHIP_P5(softc)) {
1091 		if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ROCE_V1_SUPPORTED)
1092 			softc->flags |= BNXT_FLAG_ROCEV1_CAP;
1093 		if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ROCE_V2_SUPPORTED)
1094 			softc->flags |= BNXT_FLAG_ROCEV2_CAP;
1095 	}
1096 
1097 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_LINK_ADMIN_STATUS_SUPPORTED)
1098 		softc->fw_cap |= BNXT_FW_CAP_LINK_ADMIN;
1099 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ADMIN_PF_SUPPORTED)
1100 		softc->fw_cap |= BNXT_FW_CAP_ADMIN_PF;
1101 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_HOT_RESET_CAPABLE)
1102 		softc->fw_cap |= BNXT_FW_CAP_HOT_RESET;
1103 	if (flags &  HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERROR_RECOVERY_CAPABLE)
1104 		softc->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
1105 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PCIE_STATS_SUPPORTED)
1106 		softc->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
1107 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_STATS_SUPPORTED)
1108 		softc->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
1109 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERR_RECOVER_RELOAD)
1110 		softc->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
1111 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_NOTIFY_VF_DEF_VNIC_CHNG_SUPPORTED)
1112 		softc->fw_cap |= BNXT_FW_CAP_VF_VNIC_NOTIFY;
1113 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_CRASHDUMP_CMD_SUPPORTED)
1114 		softc->fw_cap |= BNXT_FW_CAP_CRASHDUMP;
1115 	if (!(flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
1116 		softc->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
1117 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_DBG_QCAPS_CMD_SUPPORTED)
1118 		softc->fw_cap |= BNXT_FW_CAP_DBG_QCAPS;
1119 
1120 	flags_ext = htole32(resp->flags_ext);
1121 	if (flags_ext & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
1122 		softc->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
1123 	if (BNXT_PF(softc) && (flags_ext & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_ECN_STATS_SUPPORTED))
1124 		softc->fw_cap |= BNXT_FW_CAP_ECN_STATS;
1125 
1126 	if (flags_ext & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_PTP_PPS_SUPPORTED)
1127 		softc->fw_cap |= BNXT_FW_CAP_PTP_PPS;
1128 	if (flags_ext & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_PTP_PTM_SUPPORTED)
1129 		softc->fw_cap |= BNXT_FW_CAP_PTP_PTM;
1130 	if (flags_ext & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED)
1131 		softc->fw_cap |= BNXT_FW_CAP_PTP_RTC;
1132 	if (BNXT_PF(softc) && (flags_ext & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_HOT_RESET_IF_SUPPORT))
1133 		softc->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
1134 	if (BNXT_PF(softc) && (flags_ext & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
1135 		softc->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
1136 	if (flags_ext & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_NPAR_1_2_SUPPORTED)
1137 		softc->fw_cap |= BNXT_FW_CAP_NPAR_1_2;
1138 	if (flags_ext & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_BS_V2_SUPPORTED)
1139 		softc->fw_cap |= BNXT_FW_CAP_BACKING_STORE_V2;
1140 	if (BNXT_PF(softc) &&
1141 	    (flags_ext & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_VF_CFG_ASYNC_FOR_PF_SUPPORTED))
1142 		softc->fw_cap |= BNXT_FW_CAP_VF_CFG_FOR_PF;
1143 
1144 	flags_ext2 = htole32(resp->flags_ext2);
1145 	if (flags_ext2 & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED)
1146 		softc->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS;
1147 	if (flags_ext2 & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_SW_DBR_DROP_RECOVERY_SUPPORTED)
1148 		softc->fw_cap |= BNXT_FW_CAP_DBR_SUPPORTED;
1149 	if (flags_ext2 & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_DBR_PACING_EXT_SUPPORTED ||
1150 	    flags_ext & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_DBR_PACING_SUPPORTED)
1151 		softc->fw_cap |= BNXT_FW_CAP_DBR_PACING_SUPPORTED;
1152 
1153 	if (flags_ext2 & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_GENERIC_STATS_SUPPORTED)
1154 		softc->fw_cap |= BNXT_FW_CAP_GENERIC_STATS;
1155 	func->fw_fid = le16toh(resp->fid);
1156 	memcpy(func->mac_addr, resp->mac_address, ETHER_ADDR_LEN);
1157 	func->max_rsscos_ctxs = le16toh(resp->max_rsscos_ctx);
1158 	func->max_cp_rings = le16toh(resp->max_cmpl_rings);
1159 	func->max_tx_rings = le16toh(resp->max_tx_rings);
1160 	func->max_rx_rings = le16toh(resp->max_rx_rings);
1161 	func->max_hw_ring_grps = le32toh(resp->max_hw_ring_grps);
1162 	if (!func->max_hw_ring_grps)
1163 		func->max_hw_ring_grps = func->max_tx_rings;
1164 	func->max_l2_ctxs = le16toh(resp->max_l2_ctxs);
1165 	func->max_vnics = le16toh(resp->max_vnics);
1166 	func->max_stat_ctxs = le16toh(resp->max_stat_ctx);
1167 	if (BNXT_PF(softc)) {
1168 		struct bnxt_pf_info *pf = &softc->pf;
1169 
1170 		pf->port_id = le16toh(resp->port_id);
1171 		pf->first_vf_id = le16toh(resp->first_vf_id);
1172 		pf->max_vfs = le16toh(resp->max_vfs);
1173 		pf->max_encap_records = le32toh(resp->max_encap_records);
1174 		pf->max_decap_records = le32toh(resp->max_decap_records);
1175 		pf->max_tx_em_flows = le32toh(resp->max_tx_em_flows);
1176 		pf->max_tx_wm_flows = le32toh(resp->max_tx_wm_flows);
1177 		pf->max_rx_em_flows = le32toh(resp->max_rx_em_flows);
1178 		pf->max_rx_wm_flows = le32toh(resp->max_rx_wm_flows);
1179 	}
1180 	if (!_is_valid_ether_addr(func->mac_addr)) {
1181 		device_printf(softc->dev, "Invalid ethernet address, generating random locally administered address\n");
1182 		get_random_ether_addr(func->mac_addr);
1183 	}
1184 
1185 fail:
1186 	BNXT_HWRM_UNLOCK(softc);
1187 	return rc;
1188 }
1189 
1190 int
bnxt_hwrm_func_qcfg(struct bnxt_softc * softc)1191 bnxt_hwrm_func_qcfg(struct bnxt_softc *softc)
1192 {
1193 	struct hwrm_func_qcfg_input req = {0};
1194 	struct hwrm_func_qcfg_output *resp =
1195 		(void *)softc->hwrm_cmd_resp.idi_vaddr;
1196 	struct bnxt_func_qcfg *fn_qcfg = &softc->fn_qcfg;
1197 	uint32_t min_db_offset = 0;
1198 	uint16_t flags;
1199 	int rc;
1200 
1201 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_QCFG);
1202 	req.fid = htole16(0xffff);
1203 	BNXT_HWRM_LOCK(softc);
1204 	rc = _hwrm_send_message(softc, &req, sizeof(req));
1205 	if (rc)
1206 		goto end;
1207 
1208 	fn_qcfg->alloc_completion_rings = le16toh(resp->alloc_cmpl_rings);
1209 	fn_qcfg->alloc_tx_rings = le16toh(resp->alloc_tx_rings);
1210 	fn_qcfg->alloc_rx_rings = le16toh(resp->alloc_rx_rings);
1211 	fn_qcfg->alloc_vnics = le16toh(resp->alloc_vnics);
1212 
1213 	switch (resp->port_partition_type) {
1214 	case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
1215 	case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_2:
1216 	case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
1217 	case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
1218 		softc->port_partition_type = resp->port_partition_type;
1219 		break;
1220 	}
1221 
1222 	flags = le16toh(resp->flags);
1223 	if (flags & (HWRM_FUNC_QCFG_OUTPUT_FLAGS_FW_DCBX_AGENT_ENABLED |
1224 		     HWRM_FUNC_QCFG_OUTPUT_FLAGS_FW_LLDP_AGENT_ENABLED)) {
1225 		softc->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
1226 		if (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_FW_DCBX_AGENT_ENABLED)
1227 			softc->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
1228 	}
1229 	if (BNXT_PF(softc) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
1230 		softc->flags |= BNXT_FLAG_MULTI_HOST;
1231 	if (BNXT_PF(softc) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_ROOT))
1232 		softc->flags |= BNXT_FLAG_MULTI_ROOT;
1233 	if (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_SECURE_MODE_ENABLED)
1234 		softc->fw_cap |= BNXT_FW_CAP_SECURE_MODE;
1235 	if (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_RING_MONITOR_ENABLED)
1236 		softc->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
1237 	if (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_ENABLE_RDMA_SRIOV)
1238 		softc->fw_cap |= BNXT_FW_CAP_ENABLE_RDMA_SRIOV;
1239 
1240 	if (softc->db_size)
1241 		goto end;
1242 
1243 	softc->legacy_db_size = le16_to_cpu(resp->legacy_l2_db_size_kb) * 1024;
1244 
1245 	if (BNXT_CHIP_P5(softc)) {
1246 		if (BNXT_PF(softc))
1247 		min_db_offset = DB_PF_OFFSET_P5;
1248 		else
1249 			min_db_offset = DB_VF_OFFSET_P5;
1250 	}
1251 
1252 	softc->db_size = roundup2(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
1253 			1024, PAGE_SIZE);
1254 	if (!softc->db_size || softc->db_size > pci_resource_len(softc->pdev, 2) ||
1255 			softc->db_size <= min_db_offset)
1256 		softc->db_size = pci_resource_len(softc->pdev, 2);
1257 
1258 	end:
1259 	BNXT_HWRM_UNLOCK(softc);
1260 	return rc;
1261 }
1262 
1263 int
bnxt_hwrm_func_reset(struct bnxt_softc * softc)1264 bnxt_hwrm_func_reset(struct bnxt_softc *softc)
1265 {
1266 	struct hwrm_func_reset_input req = {0};
1267 
1268 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_RESET);
1269 	req.enables = 0;
1270 
1271 	return hwrm_send_message(softc, &req, sizeof(req));
1272 }
1273 
1274 static void
bnxt_hwrm_set_link_common(struct bnxt_softc * softc,struct hwrm_port_phy_cfg_input * req)1275 bnxt_hwrm_set_link_common(struct bnxt_softc *softc,
1276     struct hwrm_port_phy_cfg_input *req)
1277 {
1278 	struct bnxt_link_info *link_info = &softc->link_info;
1279 	uint8_t autoneg = softc->link_info.autoneg;
1280 	uint16_t fw_link_speed = softc->link_info.req_link_speed;
1281 
1282 	if (autoneg & BNXT_AUTONEG_SPEED) {
1283 		uint8_t phy_type = get_phy_type(softc);
1284 
1285 		if (phy_type == HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASET ||
1286 		    phy_type == HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
1287 		    phy_type == HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE) {
1288 
1289 			req->auto_mode |= htole32(HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK);
1290 			if (link_info->advertising) {
1291 				req->enables |= htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK);
1292 				req->auto_link_speed_mask = htole16(link_info->advertising);
1293 			}
1294 		} else {
1295 			req->auto_mode |= HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
1296 		}
1297 
1298 		req->enables |=
1299 		    htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE);
1300 		req->flags |=
1301 		    htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG);
1302 	} else {
1303 		req->flags |= htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE);
1304 
1305 		if (link_info->force_pam4_speed_set_by_user) {
1306 			req->force_pam4_link_speed = htole16(fw_link_speed);
1307 			req->enables |= htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAM4_LINK_SPEED);
1308 			link_info->force_pam4_speed_set_by_user = false;
1309 		} else {
1310 			req->force_link_speed = htole16(fw_link_speed);
1311 		}
1312 	}
1313 
1314 	/* tell chimp that the setting takes effect immediately */
1315 	req->flags |= htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY);
1316 }
1317 
1318 static void
bnxt_hwrm_set_pause_common(struct bnxt_softc * softc,struct hwrm_port_phy_cfg_input * req)1319 bnxt_hwrm_set_pause_common(struct bnxt_softc *softc,
1320     struct hwrm_port_phy_cfg_input *req)
1321 {
1322 	struct bnxt_link_info *link_info = &softc->link_info;
1323 
1324 	if (link_info->flow_ctrl.autoneg) {
1325 		req->auto_pause =
1326 		    HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_AUTONEG_PAUSE;
1327 		if (link_info->flow_ctrl.rx)
1328 			req->auto_pause |=
1329 			    HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
1330 		if (link_info->flow_ctrl.tx)
1331 			req->auto_pause |=
1332 			    HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX;
1333 		req->enables |=
1334 		    htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE);
1335 	} else {
1336 		if (link_info->flow_ctrl.rx)
1337 			req->force_pause |=
1338 			    HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
1339 		if (link_info->flow_ctrl.tx)
1340 			req->force_pause |=
1341 			    HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX;
1342 		req->enables |=
1343 			htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE);
1344 		req->auto_pause = req->force_pause;
1345 		req->enables |=
1346 		    htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE);
1347 	}
1348 }
1349 
1350 /* JFV this needs interface connection */
1351 static void
bnxt_hwrm_set_eee(struct bnxt_softc * softc,struct hwrm_port_phy_cfg_input * req)1352 bnxt_hwrm_set_eee(struct bnxt_softc *softc, struct hwrm_port_phy_cfg_input *req)
1353 {
1354 	/* struct ethtool_eee *eee = &softc->eee; */
1355 	bool	eee_enabled = false;
1356 
1357 	if (eee_enabled) {
1358 #if 0
1359 		uint16_t eee_speeds;
1360 		uint32_t flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_ENABLE;
1361 
1362 		if (eee->tx_lpi_enabled)
1363 			flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_TX_LPI;
1364 
1365 		req->flags |= htole32(flags);
1366 		eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
1367 		req->eee_link_speed_mask = htole16(eee_speeds);
1368 		req->tx_lpi_timer = htole32(eee->tx_lpi_timer);
1369 #endif
1370 	} else {
1371 		req->flags |=
1372 		    htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_DISABLE);
1373 	}
1374 }
1375 
1376 int
bnxt_hwrm_set_link_setting(struct bnxt_softc * softc,bool set_pause,bool set_eee,bool set_link)1377 bnxt_hwrm_set_link_setting(struct bnxt_softc *softc, bool set_pause,
1378     bool set_eee, bool set_link)
1379 {
1380 	struct hwrm_port_phy_cfg_input req = {0};
1381 	int rc;
1382 
1383 	if (softc->flags & BNXT_FLAG_NPAR)
1384 		return ENOTSUP;
1385 
1386 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_PHY_CFG);
1387 
1388 	if (set_pause) {
1389 		bnxt_hwrm_set_pause_common(softc, &req);
1390 
1391 		if (softc->link_info.flow_ctrl.autoneg)
1392 			set_link = true;
1393 	}
1394 
1395 	if (set_link)
1396 		bnxt_hwrm_set_link_common(softc, &req);
1397 
1398 	if (set_eee)
1399 		bnxt_hwrm_set_eee(softc, &req);
1400 
1401 	BNXT_HWRM_LOCK(softc);
1402 	rc = _hwrm_send_message(softc, &req, sizeof(req));
1403 
1404 	if (!rc) {
1405 		if (set_pause) {
1406 			/* since changing of 'force pause' setting doesn't
1407 			 * trigger any link change event, the driver needs to
1408 			 * update the current pause result upon successfully i
1409 			 * return of the phy_cfg command */
1410 			if (!softc->link_info.flow_ctrl.autoneg)
1411 				bnxt_report_link(softc);
1412 		}
1413 	}
1414 	BNXT_HWRM_UNLOCK(softc);
1415 	return rc;
1416 }
1417 
1418 int
bnxt_hwrm_vnic_set_hds(struct bnxt_softc * softc,struct bnxt_vnic_info * vnic)1419 bnxt_hwrm_vnic_set_hds(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
1420 {
1421 	struct hwrm_vnic_plcmodes_cfg_input req = {0};
1422 
1423 	if (!BNXT_CHIP_P5(softc))
1424 		return 0;
1425 
1426 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_PLCMODES_CFG);
1427 
1428 	req.flags = htole32(HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1429 	req.vnic_id = htole16(vnic->id);
1430 
1431 	return hwrm_send_message(softc, &req, sizeof(req));
1432 }
1433 
1434 int
bnxt_hwrm_vnic_cfg(struct bnxt_softc * softc,struct bnxt_vnic_info * vnic)1435 bnxt_hwrm_vnic_cfg(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
1436 {
1437 	struct hwrm_vnic_cfg_input req = {0};
1438 
1439 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_CFG);
1440 
1441 	if (vnic->flags & BNXT_VNIC_FLAG_DEFAULT)
1442 		req.flags |= htole32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1443 	if (vnic->flags & BNXT_VNIC_FLAG_BD_STALL)
1444 		req.flags |= htole32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1445 	if (vnic->flags & BNXT_VNIC_FLAG_VLAN_STRIP)
1446 		req.flags |= htole32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1447 	if (BNXT_CHIP_P5 (softc)) {
1448 		req.default_rx_ring_id =
1449 			htole16(softc->rx_rings[0].phys_id);
1450 		req.default_cmpl_ring_id =
1451 			htole16(softc->rx_cp_rings[0].ring.phys_id);
1452 		req.enables |=
1453 			htole32(HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_RX_RING_ID |
1454 			    HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_CMPL_RING_ID);
1455 		req.vnic_id = htole16(vnic->id);
1456 	} else {
1457 		req.enables = htole32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
1458 				HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE);
1459 		req.vnic_id = htole16(vnic->id);
1460 		req.dflt_ring_grp = htole16(vnic->def_ring_grp);
1461 	}
1462 	req.rss_rule = htole16(vnic->rss_id);
1463 	req.cos_rule = htole16(vnic->cos_rule);
1464 	req.lb_rule = htole16(vnic->lb_rule);
1465 	req.enables |= htole32(HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
1466 	req.mru = htole16(vnic->mru);
1467 
1468 	return hwrm_send_message(softc, &req, sizeof(req));
1469 }
1470 
1471 int
bnxt_hwrm_vnic_free(struct bnxt_softc * softc,struct bnxt_vnic_info * vnic)1472 bnxt_hwrm_vnic_free(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
1473 {
1474 	struct hwrm_vnic_free_input req = {0};
1475 	int rc = 0;
1476 
1477 	if (vnic->id == (uint16_t)HWRM_NA_SIGNATURE)
1478 		return rc;
1479 
1480 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_FREE);
1481 
1482 	req.vnic_id = htole32(vnic->id);
1483 
1484 	BNXT_HWRM_LOCK(softc);
1485 	rc = _hwrm_send_message(softc, &req, sizeof(req));
1486 	if (rc)
1487 		goto fail;
1488 
1489 fail:
1490 	BNXT_HWRM_UNLOCK(softc);
1491 	return (rc);
1492 }
1493 
1494 int
bnxt_hwrm_vnic_alloc(struct bnxt_softc * softc,struct bnxt_vnic_info * vnic)1495 bnxt_hwrm_vnic_alloc(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
1496 {
1497 	struct hwrm_vnic_alloc_input req = {0};
1498 	struct hwrm_vnic_alloc_output *resp =
1499 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
1500 	int rc;
1501 
1502 	if (vnic->id != (uint16_t)HWRM_NA_SIGNATURE) {
1503 		device_printf(softc->dev,
1504 		    "Attempt to re-allocate vnic %04x\n", vnic->id);
1505 		return EDOOFUS;
1506 	}
1507 
1508 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_ALLOC);
1509 
1510 	if (vnic->flags & BNXT_VNIC_FLAG_DEFAULT)
1511 		req.flags = htole32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
1512 
1513 	BNXT_HWRM_LOCK(softc);
1514 	rc = _hwrm_send_message(softc, &req, sizeof(req));
1515 	if (rc)
1516 		goto fail;
1517 
1518 	vnic->id = le32toh(resp->vnic_id);
1519 
1520 fail:
1521 	BNXT_HWRM_UNLOCK(softc);
1522 	return (rc);
1523 }
1524 
1525 int
bnxt_hwrm_vnic_ctx_free(struct bnxt_softc * softc,uint16_t ctx_id)1526 bnxt_hwrm_vnic_ctx_free(struct bnxt_softc *softc, uint16_t ctx_id)
1527 {
1528 	struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
1529 	int rc = 0;
1530 
1531 	if (ctx_id == (uint16_t)HWRM_NA_SIGNATURE)
1532 		return rc;
1533 
1534 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE);
1535 	req.rss_cos_lb_ctx_id = htole16(ctx_id);
1536 	BNXT_HWRM_LOCK(softc);
1537 	rc = _hwrm_send_message(softc, &req, sizeof(req));
1538 	if (rc)
1539 		goto fail;
1540 
1541 fail:
1542 	BNXT_HWRM_UNLOCK(softc);
1543 	return rc;
1544 }
1545 
1546 int
bnxt_hwrm_vnic_ctx_alloc(struct bnxt_softc * softc,uint16_t * ctx_id)1547 bnxt_hwrm_vnic_ctx_alloc(struct bnxt_softc *softc, uint16_t *ctx_id)
1548 {
1549 	struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
1550 	struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1551 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
1552 	int rc;
1553 
1554 	if (*ctx_id != (uint16_t)HWRM_NA_SIGNATURE) {
1555 		device_printf(softc->dev,
1556 		    "Attempt to re-allocate vnic ctx %04x\n", *ctx_id);
1557 		return EDOOFUS;
1558 	}
1559 
1560 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
1561 
1562 	BNXT_HWRM_LOCK(softc);
1563 	rc = _hwrm_send_message(softc, &req, sizeof(req));
1564 	if (rc)
1565 		goto fail;
1566 
1567 	*ctx_id = le32toh(resp->rss_cos_lb_ctx_id);
1568 
1569 fail:
1570 	BNXT_HWRM_UNLOCK(softc);
1571 	return (rc);
1572 }
1573 
1574 int
bnxt_hwrm_ring_grp_alloc(struct bnxt_softc * softc,struct bnxt_grp_info * grp)1575 bnxt_hwrm_ring_grp_alloc(struct bnxt_softc *softc, struct bnxt_grp_info *grp)
1576 {
1577 	struct hwrm_ring_grp_alloc_input req = {0};
1578 	struct hwrm_ring_grp_alloc_output *resp;
1579 	int rc = 0;
1580 
1581 	if (grp->grp_id != (uint16_t)HWRM_NA_SIGNATURE) {
1582 		device_printf(softc->dev,
1583 		    "Attempt to re-allocate ring group %04x\n", grp->grp_id);
1584 		return EDOOFUS;
1585 	}
1586 
1587 	if (BNXT_CHIP_P5 (softc))
1588 		return 0;
1589 
1590 	resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
1591 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_RING_GRP_ALLOC);
1592 	req.cr = htole16(grp->cp_ring_id);
1593 	req.rr = htole16(grp->rx_ring_id);
1594 	req.ar = htole16(grp->ag_ring_id);
1595 	req.sc = htole16(grp->stats_ctx);
1596 
1597 	BNXT_HWRM_LOCK(softc);
1598 	rc = _hwrm_send_message(softc, &req, sizeof(req));
1599 	if (rc)
1600 		goto fail;
1601 
1602 	grp->grp_id = le32toh(resp->ring_group_id);
1603 
1604 fail:
1605 	BNXT_HWRM_UNLOCK(softc);
1606 	return rc;
1607 }
1608 
1609 int
bnxt_hwrm_ring_grp_free(struct bnxt_softc * softc,struct bnxt_grp_info * grp)1610 bnxt_hwrm_ring_grp_free(struct bnxt_softc *softc, struct bnxt_grp_info *grp)
1611 {
1612 	struct hwrm_ring_grp_free_input req = {0};
1613 	int rc = 0;
1614 
1615 	if (grp->grp_id == (uint16_t)HWRM_NA_SIGNATURE)
1616 		return 0;
1617 
1618 	if (BNXT_CHIP_P5 (softc))
1619 		return 0;
1620 
1621 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_RING_GRP_FREE);
1622 
1623 	req.ring_group_id = htole32(grp->grp_id);
1624 
1625 	BNXT_HWRM_LOCK(softc);
1626 	rc = _hwrm_send_message(softc, &req, sizeof(req));
1627 	if (rc)
1628 		goto fail;
1629 
1630 fail:
1631 	BNXT_HWRM_UNLOCK(softc);
1632 	return rc;
1633 }
1634 
bnxt_hwrm_ring_free(struct bnxt_softc * softc,uint32_t ring_type,struct bnxt_ring * ring,int cmpl_ring_id)1635 int bnxt_hwrm_ring_free(struct bnxt_softc *softc, uint32_t ring_type,
1636 		struct bnxt_ring *ring, int cmpl_ring_id)
1637 {
1638         struct hwrm_ring_free_input req = {0};
1639 	struct hwrm_ring_free_output *resp;
1640 	int rc = 0;
1641         uint16_t error_code;
1642 
1643 	if (ring->phys_id == (uint16_t)HWRM_NA_SIGNATURE)
1644 		return 0;
1645 
1646 	resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
1647 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_RING_FREE);
1648 	req.cmpl_ring = htole16(cmpl_ring_id);
1649         req.ring_type = ring_type;
1650         req.ring_id = htole16(ring->phys_id);
1651 
1652 	BNXT_HWRM_LOCK(softc);
1653 	rc = _hwrm_send_message(softc, &req, sizeof(req));
1654         error_code = le16toh(resp->error_code);
1655 
1656 	if (rc || error_code) {
1657 		device_printf(softc->dev, "hwrm_ring_free type %d failed. "
1658 				"rc:%x err:%x\n", ring_type, rc, error_code);
1659 		if (!rc)
1660 			rc = -EIO;
1661 	}
1662 
1663 	BNXT_HWRM_UNLOCK(softc);
1664 	return rc;
1665 }
1666 
1667 /*
1668  * Ring allocation message to the firmware
1669  */
1670 int
bnxt_hwrm_ring_alloc(struct bnxt_softc * softc,uint8_t type,struct bnxt_ring * ring)1671 bnxt_hwrm_ring_alloc(struct bnxt_softc *softc, uint8_t type,
1672                      struct bnxt_ring *ring)
1673 {
1674 	struct hwrm_ring_alloc_input req = {0};
1675 	struct hwrm_ring_alloc_output *resp;
1676 	uint16_t idx = ring->idx;
1677 	struct bnxt_cp_ring *cp_ring;
1678 	int rc;
1679 
1680 	if (ring->phys_id != (uint16_t)HWRM_NA_SIGNATURE) {
1681 		device_printf(softc->dev,
1682 		    "Attempt to re-allocate ring %04x\n", ring->phys_id);
1683 		return EDOOFUS;
1684 	}
1685 
1686 	resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
1687 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_RING_ALLOC);
1688 	req.enables = htole32(0);
1689 	req.fbo = htole32(0);
1690 	req.ring_type = type;
1691 	req.page_tbl_addr = htole64(ring->paddr);
1692 	req.logical_id = htole16(ring->id);
1693 	req.length = htole32(ring->ring_size);
1694 
1695 	switch (type) {
1696 	case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1697 		cp_ring = &softc->tx_cp_rings[idx];
1698 
1699 		req.cmpl_ring_id = htole16(cp_ring->ring.phys_id);
1700 		/* queue_id - what CoS queue the TX ring is associated with */
1701 		req.queue_id = htole16(softc->tx_q_info[0].queue_id);
1702 
1703 		req.stat_ctx_id = htole32(cp_ring->stats_ctx_id);
1704 		req.enables |= htole32(
1705 		    HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
1706 		break;
1707 	case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1708 		if (!BNXT_CHIP_P5(softc))
1709 			break;
1710 
1711 		cp_ring = &softc->rx_cp_rings[idx];
1712 
1713 		req.stat_ctx_id = htole32(cp_ring->stats_ctx_id);
1714 		req.rx_buf_size = htole16(softc->rx_buf_size);
1715 		req.enables |= htole32(
1716 			HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID |
1717 			HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
1718 		break;
1719 	case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
1720 		if (!BNXT_CHIP_P5(softc)) {
1721 			req.ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_RX;
1722 			break;
1723 		}
1724 
1725 		cp_ring = &softc->rx_cp_rings[idx];
1726 
1727 		req.rx_ring_id = htole16(softc->rx_rings[idx].phys_id);
1728 		req.stat_ctx_id = htole32(cp_ring->stats_ctx_id);
1729 		req.rx_buf_size = htole16(softc->rx_buf_size);
1730 		req.enables |= htole32(
1731 			HWRM_RING_ALLOC_INPUT_ENABLES_RX_RING_ID_VALID |
1732 			HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID |
1733 			HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
1734 		break;
1735 	case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1736 		if (!BNXT_CHIP_P5(softc)) {
1737 			req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1738 			break;
1739 		}
1740 
1741                 req.cq_handle = htole64(ring->id);
1742 		req.nq_ring_id = htole16(softc->nq_rings[idx].ring.phys_id);
1743 		req.enables |= htole32(
1744 			HWRM_RING_ALLOC_INPUT_ENABLES_NQ_RING_ID_VALID);
1745 		break;
1746 	case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
1747 		req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1748 		break;
1749 	default:
1750 		device_printf(softc->dev,
1751 			      "hwrm alloc invalid ring type %d\n", type);
1752 		return -1;
1753 	}
1754 
1755 	BNXT_HWRM_LOCK(softc);
1756 	rc = _hwrm_send_message(softc, &req, sizeof(req));
1757 	if (rc)
1758 		goto fail;
1759 
1760 	ring->phys_id = le16toh(resp->ring_id);
1761 
1762 fail:
1763 	BNXT_HWRM_UNLOCK(softc);
1764 	return rc;
1765 }
1766 
1767 int
bnxt_hwrm_stat_ctx_free(struct bnxt_softc * softc,struct bnxt_cp_ring * cpr)1768 bnxt_hwrm_stat_ctx_free(struct bnxt_softc *softc, struct bnxt_cp_ring *cpr)
1769 {
1770 	struct hwrm_stat_ctx_free_input req = {0};
1771 	int rc = 0;
1772 
1773 	if (cpr->stats_ctx_id == HWRM_NA_SIGNATURE)
1774 		return rc;
1775 
1776 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_STAT_CTX_FREE);
1777 
1778 	req.stat_ctx_id = htole16(cpr->stats_ctx_id);
1779 	BNXT_HWRM_LOCK(softc);
1780 	rc = _hwrm_send_message(softc, &req, sizeof(req));
1781 	if (rc)
1782 		goto fail;
1783 
1784 fail:
1785 	BNXT_HWRM_UNLOCK(softc);
1786 
1787 	return rc;
1788 }
1789 
1790 int
bnxt_hwrm_stat_ctx_alloc(struct bnxt_softc * softc,struct bnxt_cp_ring * cpr,uint64_t paddr)1791 bnxt_hwrm_stat_ctx_alloc(struct bnxt_softc *softc, struct bnxt_cp_ring *cpr,
1792     uint64_t paddr)
1793 {
1794 	struct hwrm_stat_ctx_alloc_input req = {0};
1795 	struct hwrm_stat_ctx_alloc_output *resp;
1796 	int rc = 0;
1797 
1798 	if (cpr->stats_ctx_id != HWRM_NA_SIGNATURE) {
1799 		device_printf(softc->dev,
1800 		    "Attempt to re-allocate stats ctx %08x\n",
1801 		    cpr->stats_ctx_id);
1802 		return EDOOFUS;
1803 	}
1804 
1805 	resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
1806 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_STAT_CTX_ALLOC);
1807 
1808 	req.update_period_ms = htole32(1000);
1809 	req.stats_dma_addr = htole64(paddr);
1810 	if (BNXT_CHIP_P5(softc))
1811 		req.stats_dma_length = htole16(sizeof(struct ctx_hw_stats_ext) - 8);
1812 	else
1813 		req.stats_dma_length = htole16(sizeof(struct ctx_hw_stats));
1814 
1815 	BNXT_HWRM_LOCK(softc);
1816 	rc = _hwrm_send_message(softc, &req, sizeof(req));
1817 	if (rc)
1818 		goto fail;
1819 
1820 	cpr->stats_ctx_id = le32toh(resp->stat_ctx_id);
1821 
1822 fail:
1823 	BNXT_HWRM_UNLOCK(softc);
1824 
1825 	return rc;
1826 }
1827 
1828 int
bnxt_hwrm_port_qstats(struct bnxt_softc * softc)1829 bnxt_hwrm_port_qstats(struct bnxt_softc *softc)
1830 {
1831 	struct hwrm_port_qstats_input req = {0};
1832 	int rc = 0;
1833 
1834 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_QSTATS);
1835 
1836 	req.port_id = htole16(softc->pf.port_id);
1837 	req.rx_stat_host_addr = htole64(softc->hw_rx_port_stats.idi_paddr);
1838 	req.tx_stat_host_addr = htole64(softc->hw_tx_port_stats.idi_paddr);
1839 
1840 	BNXT_HWRM_LOCK(softc);
1841 	rc = _hwrm_send_message(softc, &req, sizeof(req));
1842 	BNXT_HWRM_UNLOCK(softc);
1843 
1844 	return rc;
1845 }
bnxt_hwrm_pri2cos_idx(struct bnxt_softc * softc,uint32_t path_dir)1846 static int bnxt_hwrm_pri2cos_idx(struct bnxt_softc *softc, uint32_t path_dir)
1847 {
1848 	struct hwrm_queue_pri2cos_qcfg_input req = {0};
1849 	struct hwrm_queue_pri2cos_qcfg_output *resp;
1850 	uint8_t *pri2cos_idx, *q_ids, max_q;
1851 	int rc, i, j;
1852 	uint8_t *pri2cos;
1853 
1854 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_QUEUE_PRI2COS_QCFG);
1855 	resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
1856 
1857 	req.flags = htole32(HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_IVLAN |
1858 			    path_dir);
1859 	rc = hwrm_send_message(softc, &req, sizeof(req));
1860 
1861 	if (rc)
1862 		return rc;
1863 
1864 	if (path_dir == HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_PATH_TX) {
1865 		pri2cos_idx = softc->tx_pri2cos_idx;
1866 		q_ids = softc->tx_q_ids;
1867 		max_q = softc->tx_max_q;
1868 	} else {
1869 		pri2cos_idx = softc->rx_pri2cos_idx;
1870 		q_ids = softc->rx_q_ids;
1871 		max_q = softc->rx_max_q;
1872 	}
1873 
1874 	pri2cos = &resp->pri0_cos_queue_id;
1875 
1876 	for (i = 0; i < BNXT_MAX_QUEUE; i++) {
1877 		uint8_t queue_id = pri2cos[i];
1878 		uint8_t queue_idx;
1879 
1880 		/* Per port queue IDs start from 0, 10, 20, etc */
1881 		queue_idx = queue_id % 10;
1882 		if (queue_idx > BNXT_MAX_QUEUE) {
1883 			softc->pri2cos_valid = false;
1884 			rc = -EINVAL;
1885 			return rc;
1886 		}
1887 
1888 		for (j = 0; j < max_q; j++) {
1889 			if (q_ids[j] == queue_id)
1890 				pri2cos_idx[i] = queue_idx;
1891 		}
1892 	}
1893 
1894 	softc->pri2cos_valid = true;
1895 
1896 	return rc;
1897 }
1898 
1899 int
bnxt_hwrm_port_qstats_ext(struct bnxt_softc * softc)1900 bnxt_hwrm_port_qstats_ext(struct bnxt_softc *softc)
1901 {
1902 	struct hwrm_port_qstats_ext_input req = {0};
1903 	struct hwrm_port_qstats_ext_output *resp;
1904 	int rc = 0, i;
1905 	uint32_t tx_stat_size;
1906 
1907 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_QSTATS_EXT);
1908 	resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
1909 
1910 	tx_stat_size = sizeof(struct tx_port_stats_ext);
1911 	req.port_id = htole16(softc->pf.port_id);
1912 	req.tx_stat_size = htole16(tx_stat_size);
1913 	req.rx_stat_size = htole16(sizeof(struct rx_port_stats_ext));
1914 	req.rx_stat_host_addr = htole64(softc->hw_rx_port_stats_ext.idi_paddr);
1915 	req.tx_stat_host_addr = htole64(softc->hw_tx_port_stats_ext.idi_paddr);
1916 
1917 	rc = hwrm_send_message(softc, &req, sizeof(req));
1918 
1919 	if (!rc) {
1920 		softc->fw_rx_stats_ext_size =
1921 			le16toh(resp->rx_stat_size) / 8;
1922 		if (BNXT_FW_MAJ(softc) < 220 &&
1923 		    softc->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY)
1924 			softc->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY;
1925 
1926 		softc->fw_tx_stats_ext_size = tx_stat_size ?
1927 			le16toh(resp->tx_stat_size) / 8 : 0;
1928 	} else {
1929 		softc->fw_rx_stats_ext_size = 0;
1930 		softc->fw_tx_stats_ext_size = 0;
1931 	}
1932 
1933 	if (softc->fw_tx_stats_ext_size <=
1934 	    offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
1935 		softc->pri2cos_valid = false;
1936 		return rc;
1937 	}
1938 
1939 	rc = bnxt_hwrm_pri2cos_idx(softc, HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_PATH_TX);
1940 	if (rc)
1941 		return rc;
1942 
1943 	if (softc->is_asym_q) {
1944 		rc = bnxt_hwrm_pri2cos_idx(softc, HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_PATH_RX);
1945 		if (rc)
1946 			return rc;
1947 	} else {
1948 		memcpy(softc->rx_pri2cos_idx, softc->tx_pri2cos_idx, sizeof(softc->rx_pri2cos_idx));
1949 	}
1950 
1951 	u64 *rx_port_stats_ext = (u64 *)softc->hw_rx_port_stats_ext.idi_vaddr;
1952 	u64 *tx_port_stats_ext = (u64 *)softc->hw_tx_port_stats_ext.idi_vaddr;
1953 
1954 	if (softc->pri2cos_valid) {
1955 		for (i = 0; i < 8; i++) {
1956 			long n = bnxt_rx_bytes_pri_arr_base_off[i] +
1957 				 softc->rx_pri2cos_idx[i];
1958 
1959 			softc->rx_bytes_pri[i] = *(rx_port_stats_ext + n);
1960 		}
1961 		for (i = 0; i < 8; i++) {
1962 			long n = bnxt_rx_pkts_pri_arr_base_off[i] +
1963 				 softc->rx_pri2cos_idx[i];
1964 
1965 			softc->rx_packets_pri[i] = *(rx_port_stats_ext + n);
1966 		}
1967 		for (i = 0; i < 8; i++) {
1968 			long n = bnxt_tx_bytes_pri_arr_base_off[i] +
1969 				 softc->tx_pri2cos_idx[i];
1970 
1971 			softc->tx_bytes_pri[i] = *(tx_port_stats_ext + n);
1972 		}
1973 		for (i = 0; i < 8; i++) {
1974 			long n = bnxt_tx_pkts_pri_arr_base_off[i] +
1975 				 softc->tx_pri2cos_idx[i];
1976 
1977 			softc->tx_packets_pri[i] = *(tx_port_stats_ext + n);
1978 		}
1979 	}
1980 
1981 	return rc;
1982 }
1983 
1984 int
bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt_softc * softc,struct bnxt_vnic_info * vnic)1985 bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt_softc *softc,
1986     struct bnxt_vnic_info *vnic)
1987 {
1988 	struct hwrm_cfa_l2_set_rx_mask_input req = {0};
1989 	uint32_t mask = vnic->rx_mask;
1990 
1991 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_CFA_L2_SET_RX_MASK);
1992 
1993 	req.vnic_id = htole32(vnic->id);
1994 	req.mask = htole32(mask);
1995 	req.mc_tbl_addr = htole64(vnic->mc_list.idi_paddr);
1996 	req.num_mc_entries = htole32(vnic->mc_list_count);
1997 	return hwrm_send_message(softc, &req, sizeof(req));
1998 }
1999 
2000 int
bnxt_hwrm_l2_filter_free(struct bnxt_softc * softc,uint64_t filter_id)2001 bnxt_hwrm_l2_filter_free(struct bnxt_softc *softc, uint64_t filter_id)
2002 {
2003 	struct hwrm_cfa_l2_filter_free_input	req = {0};
2004 	int rc = 0;
2005 
2006 	if (filter_id == -1)
2007 		return rc;
2008 
2009 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_CFA_L2_FILTER_FREE);
2010 
2011 	req.l2_filter_id = htole64(filter_id);
2012 
2013 	BNXT_HWRM_LOCK(softc);
2014 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2015 	if (rc)
2016 		goto fail;
2017 
2018 fail:
2019 	BNXT_HWRM_UNLOCK(softc);
2020 	return (rc);
2021 }
2022 
2023 int
bnxt_hwrm_free_filter(struct bnxt_softc * softc)2024 bnxt_hwrm_free_filter(struct bnxt_softc *softc)
2025 {
2026 	struct bnxt_vnic_info *vnic = &softc->vnic_info;
2027 	struct bnxt_vlan_tag *tag;
2028 	int rc = 0;
2029 
2030 	rc = bnxt_hwrm_l2_filter_free(softc, softc->vnic_info.filter_id);
2031 	if (rc)
2032 		goto end;
2033 
2034 	SLIST_FOREACH(tag, &vnic->vlan_tags, next) {
2035 		rc = bnxt_hwrm_l2_filter_free(softc, tag->filter_id);
2036 		if (rc)
2037 			goto end;
2038 		tag->filter_id = -1;
2039 	}
2040 
2041 end:
2042 	return rc;
2043 }
2044 
2045 int
bnxt_hwrm_l2_filter_alloc(struct bnxt_softc * softc,uint16_t vlan_tag,uint64_t * filter_id)2046 bnxt_hwrm_l2_filter_alloc(struct bnxt_softc *softc, uint16_t vlan_tag,
2047 		uint64_t *filter_id)
2048 {
2049 	struct hwrm_cfa_l2_filter_alloc_input	req = {0};
2050 	struct hwrm_cfa_l2_filter_alloc_output	*resp;
2051 	struct bnxt_vnic_info *vnic = &softc->vnic_info;
2052 	uint32_t enables = 0;
2053 	int rc = 0;
2054 
2055 	if (*filter_id != -1) {
2056 		device_printf(softc->dev, "Attempt to re-allocate l2 ctx "
2057 		    "filter (fid: 0x%jx)\n", (uintmax_t)*filter_id);
2058 		return EDOOFUS;
2059 	}
2060 
2061 	resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
2062 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_CFA_L2_FILTER_ALLOC);
2063 
2064 	req.flags = htole32(HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX);
2065 	enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR
2066 	    | HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK
2067 	    | HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
2068 
2069 	if (vlan_tag != 0xffff) {
2070 		enables |=
2071 			HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
2072 			HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK |
2073 			HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_NUM_VLANS;
2074 		req.l2_ivlan_mask = 0xffff;
2075 		req.l2_ivlan = vlan_tag;
2076 		req.num_vlans = 1;
2077 	}
2078 
2079 	req.enables = htole32(enables);
2080 	req.dst_id = htole16(vnic->id);
2081 	memcpy(req.l2_addr, if_getlladdr(iflib_get_ifp(softc->ctx)),
2082 	    ETHER_ADDR_LEN);
2083 	memset(&req.l2_addr_mask, 0xff, sizeof(req.l2_addr_mask));
2084 
2085 	BNXT_HWRM_LOCK(softc);
2086 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2087 	if (rc)
2088 		goto fail;
2089 
2090 	*filter_id = le64toh(resp->l2_filter_id);
2091 fail:
2092 	BNXT_HWRM_UNLOCK(softc);
2093 	return (rc);
2094 }
2095 
2096 int
bnxt_hwrm_set_filter(struct bnxt_softc * softc)2097 bnxt_hwrm_set_filter(struct bnxt_softc *softc)
2098 {
2099 	struct bnxt_vnic_info *vnic = &softc->vnic_info;
2100 	struct bnxt_vlan_tag *tag;
2101 	int rc = 0;
2102 
2103 	rc = bnxt_hwrm_l2_filter_alloc(softc, 0xffff, &vnic->filter_id);
2104 	if (rc)
2105 		goto end;
2106 
2107 	SLIST_FOREACH(tag, &vnic->vlan_tags, next) {
2108 		rc = bnxt_hwrm_l2_filter_alloc(softc, tag->tag,
2109 				&tag->filter_id);
2110 		if (rc)
2111 			goto end;
2112 	}
2113 
2114 end:
2115 	return rc;
2116 }
2117 
2118 int
bnxt_hwrm_rss_cfg(struct bnxt_softc * softc,struct bnxt_vnic_info * vnic,uint32_t hash_type)2119 bnxt_hwrm_rss_cfg(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic,
2120     uint32_t hash_type)
2121 {
2122 	struct hwrm_vnic_rss_cfg_input	req = {0};
2123 
2124 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_RSS_CFG);
2125 
2126 	req.hash_type = htole32(hash_type);
2127 	req.ring_grp_tbl_addr = htole64(vnic->rss_grp_tbl.idi_paddr);
2128 	req.hash_key_tbl_addr = htole64(vnic->rss_hash_key_tbl.idi_paddr);
2129 	req.rss_ctx_idx = htole16(vnic->rss_id);
2130 	req.hash_mode_flags = HWRM_FUNC_SPD_CFG_INPUT_HASH_MODE_FLAGS_DEFAULT;
2131 	if (BNXT_CHIP_P5(softc)) {
2132 		req.vnic_id = htole16(vnic->id);
2133 		req.ring_table_pair_index = 0x0;
2134 	}
2135 
2136 	return hwrm_send_message(softc, &req, sizeof(req));
2137 }
2138 
2139 int
bnxt_hwrm_reserve_pf_rings(struct bnxt_softc * softc)2140 bnxt_hwrm_reserve_pf_rings(struct bnxt_softc *softc)
2141 {
2142 	struct hwrm_func_cfg_input req = {0};
2143 
2144 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_CFG);
2145 
2146 	req.fid = htole16(0xffff);
2147 	req.enables |= htole32(HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS);
2148 	req.enables |= htole32(HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS);
2149 	req.enables |= htole32(HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS);
2150 	req.enables |= htole32(HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS);
2151 	req.enables |= htole32(HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS);
2152 	req.enables |= htole32(HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MSIX);
2153 	req.enables |= htole32(HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS);
2154 	req.num_msix = htole16(BNXT_MAX_NUM_QUEUES);
2155 	req.num_rsscos_ctxs = htole16(0x8);
2156 	req.num_cmpl_rings = htole16(BNXT_MAX_NUM_QUEUES * 2);
2157 	req.num_tx_rings = htole16(BNXT_MAX_NUM_QUEUES);
2158 	req.num_rx_rings = htole16(BNXT_MAX_NUM_QUEUES);
2159 	req.num_vnics = htole16(BNXT_MAX_NUM_QUEUES);
2160 	req.num_stat_ctxs = htole16(BNXT_MAX_NUM_QUEUES * 2);
2161 
2162 	return hwrm_send_message(softc, &req, sizeof(req));
2163 }
2164 
2165 int
bnxt_cfg_async_cr(struct bnxt_softc * softc)2166 bnxt_cfg_async_cr(struct bnxt_softc *softc)
2167 {
2168 	int rc = 0;
2169 	struct hwrm_func_cfg_input req = {0};
2170 
2171 	if (!BNXT_PF(softc))
2172 		return 0;
2173 
2174 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_CFG);
2175 
2176 	req.fid = htole16(0xffff);
2177 	req.enables = htole32(HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2178 	if (BNXT_CHIP_P5(softc))
2179 		req.async_event_cr = htole16(softc->nq_rings[0].ring.phys_id);
2180 	else
2181 		req.async_event_cr = htole16(softc->def_cp_ring.ring.phys_id);
2182 
2183 	rc = hwrm_send_message(softc, &req, sizeof(req));
2184 
2185 	return rc;
2186 }
2187 
2188 void
bnxt_validate_hw_lro_settings(struct bnxt_softc * softc)2189 bnxt_validate_hw_lro_settings(struct bnxt_softc *softc)
2190 {
2191 	softc->hw_lro.enable = min(softc->hw_lro.enable, 1);
2192 
2193         softc->hw_lro.is_mode_gro = min(softc->hw_lro.is_mode_gro, 1);
2194 
2195 	softc->hw_lro.max_agg_segs = min(softc->hw_lro.max_agg_segs,
2196 		HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_MAX);
2197 
2198 	softc->hw_lro.max_aggs = min(softc->hw_lro.max_aggs,
2199 		HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
2200 
2201 	softc->hw_lro.min_agg_len = min(softc->hw_lro.min_agg_len, BNXT_MAX_MTU);
2202 }
2203 
2204 int
bnxt_hwrm_vnic_tpa_cfg(struct bnxt_softc * softc)2205 bnxt_hwrm_vnic_tpa_cfg(struct bnxt_softc *softc)
2206 {
2207 	struct hwrm_vnic_tpa_cfg_input req = {0};
2208 	uint32_t flags;
2209 
2210 	if (softc->vnic_info.id == (uint16_t) HWRM_NA_SIGNATURE) {
2211 		return 0;
2212 	}
2213 
2214 	if (!(softc->flags & BNXT_FLAG_TPA))
2215 		return 0;
2216 
2217 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_TPA_CFG);
2218 
2219 	if (softc->hw_lro.enable) {
2220 		flags = HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
2221 			HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
2222 			HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
2223 			HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ;
2224 
2225         	if (softc->hw_lro.is_mode_gro)
2226 			flags |= HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO;
2227 		else
2228 			flags |= HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE;
2229 
2230 		req.flags = htole32(flags);
2231 
2232 		req.enables = htole32(HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
2233 				HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
2234 				HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
2235 
2236 		req.max_agg_segs = htole16(softc->hw_lro.max_agg_segs);
2237 		req.max_aggs = htole16(softc->hw_lro.max_aggs);
2238 		req.min_agg_len = htole32(softc->hw_lro.min_agg_len);
2239 	}
2240 
2241 	req.vnic_id = htole16(softc->vnic_info.id);
2242 
2243 	return hwrm_send_message(softc, &req, sizeof(req));
2244 }
2245 
2246 int
bnxt_hwrm_nvm_find_dir_entry(struct bnxt_softc * softc,uint16_t type,uint16_t * ordinal,uint16_t ext,uint16_t * index,bool use_index,uint8_t search_opt,uint32_t * data_length,uint32_t * item_length,uint32_t * fw_ver)2247 bnxt_hwrm_nvm_find_dir_entry(struct bnxt_softc *softc, uint16_t type,
2248     uint16_t *ordinal, uint16_t ext, uint16_t *index, bool use_index,
2249     uint8_t search_opt, uint32_t *data_length, uint32_t *item_length,
2250     uint32_t *fw_ver)
2251 {
2252 	struct hwrm_nvm_find_dir_entry_input req = {0};
2253 	struct hwrm_nvm_find_dir_entry_output *resp =
2254 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
2255 	int	rc = 0;
2256 	uint32_t old_timeo;
2257 
2258 	MPASS(ordinal);
2259 
2260 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_FIND_DIR_ENTRY);
2261 	if (use_index) {
2262 		req.enables = htole32(
2263 		    HWRM_NVM_FIND_DIR_ENTRY_INPUT_ENABLES_DIR_IDX_VALID);
2264 		req.dir_idx = htole16(*index);
2265 	}
2266 	req.dir_type = htole16(type);
2267 	req.dir_ordinal = htole16(*ordinal);
2268 	req.dir_ext = htole16(ext);
2269 	req.opt_ordinal = search_opt;
2270 
2271 	BNXT_HWRM_LOCK(softc);
2272 	old_timeo = softc->hwrm_cmd_timeo;
2273 	softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
2274 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2275 	softc->hwrm_cmd_timeo = old_timeo;
2276 	if (rc)
2277 		goto exit;
2278 
2279 	if (item_length)
2280 		*item_length = le32toh(resp->dir_item_length);
2281 	if (data_length)
2282 		*data_length = le32toh(resp->dir_data_length);
2283 	if (fw_ver)
2284 		*fw_ver = le32toh(resp->fw_ver);
2285 	*ordinal = le16toh(resp->dir_ordinal);
2286 	if (index)
2287 		*index = le16toh(resp->dir_idx);
2288 
2289 exit:
2290 	BNXT_HWRM_UNLOCK(softc);
2291 	return (rc);
2292 }
2293 
2294 int
bnxt_hwrm_nvm_read(struct bnxt_softc * softc,uint16_t index,uint32_t offset,uint32_t length,struct iflib_dma_info * data)2295 bnxt_hwrm_nvm_read(struct bnxt_softc *softc, uint16_t index, uint32_t offset,
2296     uint32_t length, struct iflib_dma_info *data)
2297 {
2298 	struct hwrm_nvm_read_input req = {0};
2299 	int rc;
2300 	uint32_t old_timeo;
2301 
2302 	if (length > data->idi_size) {
2303 		rc = EINVAL;
2304 		goto exit;
2305 	}
2306 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_READ);
2307 	req.host_dest_addr = htole64(data->idi_paddr);
2308 	req.dir_idx = htole16(index);
2309 	req.offset = htole32(offset);
2310 	req.len = htole32(length);
2311 	BNXT_HWRM_LOCK(softc);
2312 	old_timeo = softc->hwrm_cmd_timeo;
2313 	softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
2314 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2315 	softc->hwrm_cmd_timeo = old_timeo;
2316 	BNXT_HWRM_UNLOCK(softc);
2317 	if (rc)
2318 		goto exit;
2319 	bus_dmamap_sync(data->idi_tag, data->idi_map, BUS_DMASYNC_POSTREAD);
2320 
2321 	goto exit;
2322 
2323 exit:
2324 	return rc;
2325 }
2326 
2327 int
bnxt_hwrm_nvm_modify(struct bnxt_softc * softc,uint16_t index,uint32_t offset,void * data,bool cpyin,uint32_t length)2328 bnxt_hwrm_nvm_modify(struct bnxt_softc *softc, uint16_t index, uint32_t offset,
2329     void *data, bool cpyin, uint32_t length)
2330 {
2331 	struct hwrm_nvm_modify_input req = {0};
2332 	struct iflib_dma_info dma_data;
2333 	int rc;
2334 	uint32_t old_timeo;
2335 
2336 	if (length == 0 || !data)
2337 		return EINVAL;
2338 	rc = iflib_dma_alloc(softc->ctx, length, &dma_data,
2339 	    BUS_DMA_NOWAIT);
2340 	if (rc)
2341 		return ENOMEM;
2342 	if (cpyin) {
2343 		rc = copyin(data, dma_data.idi_vaddr, length);
2344 		if (rc)
2345 			goto exit;
2346 	}
2347 	else
2348 		memcpy(dma_data.idi_vaddr, data, length);
2349 	bus_dmamap_sync(dma_data.idi_tag, dma_data.idi_map,
2350 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2351 
2352 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_MODIFY);
2353 	req.host_src_addr = htole64(dma_data.idi_paddr);
2354 	req.dir_idx = htole16(index);
2355 	req.offset = htole32(offset);
2356 	req.len = htole32(length);
2357 	BNXT_HWRM_LOCK(softc);
2358 	old_timeo = softc->hwrm_cmd_timeo;
2359 	softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
2360 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2361 	softc->hwrm_cmd_timeo = old_timeo;
2362 	BNXT_HWRM_UNLOCK(softc);
2363 
2364 exit:
2365 	iflib_dma_free(&dma_data);
2366 	return rc;
2367 }
2368 
2369 int
bnxt_hwrm_fw_reset(struct bnxt_softc * softc,uint8_t processor,uint8_t * selfreset)2370 bnxt_hwrm_fw_reset(struct bnxt_softc *softc, uint8_t processor,
2371     uint8_t *selfreset)
2372 {
2373 	struct hwrm_fw_reset_input req = {0};
2374 	struct hwrm_fw_reset_output *resp =
2375 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
2376 	int rc;
2377 
2378 	MPASS(selfreset);
2379 
2380 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_RESET);
2381 	req.embedded_proc_type = processor;
2382 	req.selfrst_status = *selfreset;
2383 
2384 	BNXT_HWRM_LOCK(softc);
2385 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2386 	if (rc)
2387 		goto exit;
2388 	*selfreset = resp->selfrst_status;
2389 
2390 exit:
2391 	BNXT_HWRM_UNLOCK(softc);
2392 	return rc;
2393 }
2394 
2395 int
bnxt_hwrm_fw_qstatus(struct bnxt_softc * softc,uint8_t type,uint8_t * selfreset)2396 bnxt_hwrm_fw_qstatus(struct bnxt_softc *softc, uint8_t type, uint8_t *selfreset)
2397 {
2398 	struct hwrm_fw_qstatus_input req = {0};
2399 	struct hwrm_fw_qstatus_output *resp =
2400 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
2401 	int rc;
2402 
2403 	MPASS(selfreset);
2404 
2405 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_QSTATUS);
2406 	req.embedded_proc_type = type;
2407 
2408 	BNXT_HWRM_LOCK(softc);
2409 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2410 	if (rc)
2411 		goto exit;
2412 	*selfreset = resp->selfrst_status;
2413 
2414 exit:
2415 	BNXT_HWRM_UNLOCK(softc);
2416 	return rc;
2417 }
2418 
2419 int
bnxt_hwrm_nvm_write(struct bnxt_softc * softc,void * data,bool cpyin,uint16_t type,uint16_t ordinal,uint16_t ext,uint16_t attr,uint16_t option,uint32_t data_length,bool keep,uint32_t * item_length,uint16_t * index)2420 bnxt_hwrm_nvm_write(struct bnxt_softc *softc, void *data, bool cpyin,
2421     uint16_t type, uint16_t ordinal, uint16_t ext, uint16_t attr,
2422     uint16_t option, uint32_t data_length, bool keep, uint32_t *item_length,
2423     uint16_t *index)
2424 {
2425 	struct hwrm_nvm_write_input req = {0};
2426 	struct hwrm_nvm_write_output *resp =
2427 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
2428 	struct iflib_dma_info dma_data;
2429 	int rc;
2430 	uint32_t old_timeo;
2431 
2432 	if (data_length) {
2433 		rc = iflib_dma_alloc(softc->ctx, data_length, &dma_data,
2434 		    BUS_DMA_NOWAIT);
2435 		if (rc)
2436 			return ENOMEM;
2437 		if (cpyin) {
2438 			rc = copyin(data, dma_data.idi_vaddr, data_length);
2439 			if (rc)
2440 				goto early_exit;
2441 		}
2442 		else
2443 			memcpy(dma_data.idi_vaddr, data, data_length);
2444 		bus_dmamap_sync(dma_data.idi_tag, dma_data.idi_map,
2445 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2446 	}
2447 	else
2448 		dma_data.idi_paddr = 0;
2449 
2450 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_WRITE);
2451 
2452 	req.host_src_addr = htole64(dma_data.idi_paddr);
2453 	req.dir_type = htole16(type);
2454 	req.dir_ordinal = htole16(ordinal);
2455 	req.dir_ext = htole16(ext);
2456 	req.dir_attr = htole16(attr);
2457 	req.dir_data_length = htole32(data_length);
2458 	req.option = htole16(option);
2459 	if (keep) {
2460 		req.flags =
2461 		    htole16(HWRM_NVM_WRITE_INPUT_FLAGS_KEEP_ORIG_ACTIVE_IMG);
2462 	}
2463 	if (item_length)
2464 		req.dir_item_length = htole32(*item_length);
2465 
2466 	BNXT_HWRM_LOCK(softc);
2467 	old_timeo = softc->hwrm_cmd_timeo;
2468 	softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
2469 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2470 	softc->hwrm_cmd_timeo = old_timeo;
2471 	if (rc)
2472 		goto exit;
2473 	if (item_length)
2474 		*item_length = le32toh(resp->dir_item_length);
2475 	if (index)
2476 		*index = le16toh(resp->dir_idx);
2477 
2478 exit:
2479 	BNXT_HWRM_UNLOCK(softc);
2480 early_exit:
2481 	if (data_length)
2482 		iflib_dma_free(&dma_data);
2483 	return rc;
2484 }
2485 
2486 int
bnxt_hwrm_nvm_erase_dir_entry(struct bnxt_softc * softc,uint16_t index)2487 bnxt_hwrm_nvm_erase_dir_entry(struct bnxt_softc *softc, uint16_t index)
2488 {
2489 	struct hwrm_nvm_erase_dir_entry_input req = {0};
2490 	uint32_t old_timeo;
2491 	int rc;
2492 
2493 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_ERASE_DIR_ENTRY);
2494 	req.dir_idx = htole16(index);
2495 	BNXT_HWRM_LOCK(softc);
2496 	old_timeo = softc->hwrm_cmd_timeo;
2497 	softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
2498 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2499 	softc->hwrm_cmd_timeo = old_timeo;
2500 	BNXT_HWRM_UNLOCK(softc);
2501 	return rc;
2502 }
2503 
2504 int
bnxt_hwrm_nvm_get_dir_info(struct bnxt_softc * softc,uint32_t * entries,uint32_t * entry_length)2505 bnxt_hwrm_nvm_get_dir_info(struct bnxt_softc *softc, uint32_t *entries,
2506     uint32_t *entry_length)
2507 {
2508 	struct hwrm_nvm_get_dir_info_input req = {0};
2509 	struct hwrm_nvm_get_dir_info_output *resp =
2510 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
2511 	int rc;
2512 	uint32_t old_timeo;
2513 
2514 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_GET_DIR_INFO);
2515 
2516 	BNXT_HWRM_LOCK(softc);
2517 	old_timeo = softc->hwrm_cmd_timeo;
2518 	softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
2519 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2520 	softc->hwrm_cmd_timeo = old_timeo;
2521 	if (rc)
2522 		goto exit;
2523 
2524 	if (entries)
2525 		*entries = le32toh(resp->entries);
2526 	if (entry_length)
2527 		*entry_length = le32toh(resp->entry_length);
2528 
2529 exit:
2530 	BNXT_HWRM_UNLOCK(softc);
2531 	return rc;
2532 }
2533 
2534 int
bnxt_hwrm_nvm_get_dir_entries(struct bnxt_softc * softc,uint32_t * entries,uint32_t * entry_length,struct iflib_dma_info * dma_data)2535 bnxt_hwrm_nvm_get_dir_entries(struct bnxt_softc *softc, uint32_t *entries,
2536     uint32_t *entry_length, struct iflib_dma_info *dma_data)
2537 {
2538 	struct hwrm_nvm_get_dir_entries_input req = {0};
2539 	uint32_t ent;
2540 	uint32_t ent_len;
2541 	int rc;
2542 	uint32_t old_timeo;
2543 
2544 	if (!entries)
2545 		entries = &ent;
2546 	if (!entry_length)
2547 		entry_length = &ent_len;
2548 
2549 	rc = bnxt_hwrm_nvm_get_dir_info(softc, entries, entry_length);
2550 	if (rc)
2551 		goto exit;
2552 	if (*entries * *entry_length > dma_data->idi_size) {
2553 		rc = EINVAL;
2554 		goto exit;
2555 	}
2556 
2557 	/*
2558 	 * TODO: There's a race condition here that could blow up DMA memory...
2559 	 *	 we need to allocate the max size, not the currently in use
2560 	 *	 size.  The command should totally have a max size here.
2561 	 */
2562 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_GET_DIR_ENTRIES);
2563 	req.host_dest_addr = htole64(dma_data->idi_paddr);
2564 	BNXT_HWRM_LOCK(softc);
2565 	old_timeo = softc->hwrm_cmd_timeo;
2566 	softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
2567 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2568 	softc->hwrm_cmd_timeo = old_timeo;
2569 	BNXT_HWRM_UNLOCK(softc);
2570 	if (rc)
2571 		goto exit;
2572 	bus_dmamap_sync(dma_data->idi_tag, dma_data->idi_map,
2573 	    BUS_DMASYNC_POSTWRITE);
2574 
2575 exit:
2576 	return rc;
2577 }
2578 
2579 int
bnxt_hwrm_nvm_get_dev_info(struct bnxt_softc * softc,uint16_t * mfg_id,uint16_t * device_id,uint32_t * sector_size,uint32_t * nvram_size,uint32_t * reserved_size,uint32_t * available_size)2580 bnxt_hwrm_nvm_get_dev_info(struct bnxt_softc *softc, uint16_t *mfg_id,
2581     uint16_t *device_id, uint32_t *sector_size, uint32_t *nvram_size,
2582     uint32_t *reserved_size, uint32_t *available_size)
2583 {
2584 	struct hwrm_nvm_get_dev_info_input req = {0};
2585 	struct hwrm_nvm_get_dev_info_output *resp =
2586 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
2587 	int rc;
2588 	uint32_t old_timeo;
2589 
2590 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_GET_DEV_INFO);
2591 
2592 	BNXT_HWRM_LOCK(softc);
2593 	old_timeo = softc->hwrm_cmd_timeo;
2594 	softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
2595 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2596 	softc->hwrm_cmd_timeo = old_timeo;
2597 	if (rc)
2598 		goto exit;
2599 
2600 	if (mfg_id)
2601 		*mfg_id = le16toh(resp->manufacturer_id);
2602 	if (device_id)
2603 		*device_id = le16toh(resp->device_id);
2604 	if (sector_size)
2605 		*sector_size = le32toh(resp->sector_size);
2606 	if (nvram_size)
2607 		*nvram_size = le32toh(resp->nvram_size);
2608 	if (reserved_size)
2609 		*reserved_size = le32toh(resp->reserved_size);
2610 	if (available_size)
2611 		*available_size = le32toh(resp->available_size);
2612 
2613 exit:
2614 	BNXT_HWRM_UNLOCK(softc);
2615 	return rc;
2616 }
2617 
2618 int
bnxt_hwrm_nvm_install_update(struct bnxt_softc * softc,uint32_t install_type,uint64_t * installed_items,uint8_t * result,uint8_t * problem_item,uint8_t * reset_required)2619 bnxt_hwrm_nvm_install_update(struct bnxt_softc *softc,
2620     uint32_t install_type, uint64_t *installed_items, uint8_t *result,
2621     uint8_t *problem_item, uint8_t *reset_required)
2622 {
2623 	struct hwrm_nvm_install_update_input req = {0};
2624 	struct hwrm_nvm_install_update_output *resp =
2625 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
2626 	int rc;
2627 	uint32_t old_timeo;
2628 
2629 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_INSTALL_UPDATE);
2630 	req.install_type = htole32(install_type);
2631 
2632 	BNXT_HWRM_LOCK(softc);
2633 	old_timeo = softc->hwrm_cmd_timeo;
2634 	softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
2635 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2636 	softc->hwrm_cmd_timeo = old_timeo;
2637 	if (rc)
2638 		goto exit;
2639 
2640 	if (installed_items)
2641 		*installed_items = le32toh(resp->installed_items);
2642 	if (result)
2643 		*result = resp->result;
2644 	if (problem_item)
2645 		*problem_item = resp->problem_item;
2646 	if (reset_required)
2647 		*reset_required = resp->reset_required;
2648 
2649 exit:
2650 	BNXT_HWRM_UNLOCK(softc);
2651 	return rc;
2652 }
2653 
2654 int
bnxt_hwrm_nvm_verify_update(struct bnxt_softc * softc,uint16_t type,uint16_t ordinal,uint16_t ext)2655 bnxt_hwrm_nvm_verify_update(struct bnxt_softc *softc, uint16_t type,
2656     uint16_t ordinal, uint16_t ext)
2657 {
2658 	struct hwrm_nvm_verify_update_input req = {0};
2659 	uint32_t old_timeo;
2660 	int rc;
2661 
2662 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_VERIFY_UPDATE);
2663 
2664 	req.dir_type = htole16(type);
2665 	req.dir_ordinal = htole16(ordinal);
2666 	req.dir_ext = htole16(ext);
2667 
2668 	BNXT_HWRM_LOCK(softc);
2669 	old_timeo = softc->hwrm_cmd_timeo;
2670 	softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
2671 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2672 	softc->hwrm_cmd_timeo = old_timeo;
2673 	BNXT_HWRM_UNLOCK(softc);
2674 	return rc;
2675 }
2676 
2677 int
bnxt_hwrm_fw_get_time(struct bnxt_softc * softc,uint16_t * year,uint8_t * month,uint8_t * day,uint8_t * hour,uint8_t * minute,uint8_t * second,uint16_t * millisecond,uint16_t * zone)2678 bnxt_hwrm_fw_get_time(struct bnxt_softc *softc, uint16_t *year, uint8_t *month,
2679     uint8_t *day, uint8_t *hour, uint8_t *minute, uint8_t *second,
2680     uint16_t *millisecond, uint16_t *zone)
2681 {
2682 	struct hwrm_fw_get_time_input req = {0};
2683 	struct hwrm_fw_get_time_output *resp =
2684 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
2685 	int rc;
2686 
2687 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_GET_TIME);
2688 
2689 	BNXT_HWRM_LOCK(softc);
2690 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2691 	if (rc)
2692 		goto exit;
2693 
2694 	if (year)
2695 		*year = le16toh(resp->year);
2696 	if (month)
2697 		*month = resp->month;
2698 	if (day)
2699 		*day = resp->day;
2700 	if (hour)
2701 		*hour = resp->hour;
2702 	if (minute)
2703 		*minute = resp->minute;
2704 	if (second)
2705 		*second = resp->second;
2706 	if (millisecond)
2707 		*millisecond = le16toh(resp->millisecond);
2708 	if (zone)
2709 		*zone = le16toh(resp->zone);
2710 
2711 exit:
2712 	BNXT_HWRM_UNLOCK(softc);
2713 	return rc;
2714 }
2715 
2716 int
bnxt_hwrm_fw_set_time(struct bnxt_softc * softc,uint16_t year,uint8_t month,uint8_t day,uint8_t hour,uint8_t minute,uint8_t second,uint16_t millisecond,uint16_t zone)2717 bnxt_hwrm_fw_set_time(struct bnxt_softc *softc, uint16_t year, uint8_t month,
2718     uint8_t day, uint8_t hour, uint8_t minute, uint8_t second,
2719     uint16_t millisecond, uint16_t zone)
2720 {
2721 	struct hwrm_fw_set_time_input req = {0};
2722 
2723 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_SET_TIME);
2724 
2725 	req.year = htole16(year);
2726 	req.month = month;
2727 	req.day = day;
2728 	req.hour = hour;
2729 	req.minute = minute;
2730 	req.second = second;
2731 	req.millisecond = htole16(millisecond);
2732 	req.zone = htole16(zone);
2733 	return hwrm_send_message(softc, &req, sizeof(req));
2734 }
2735 
bnxt_read_sfp_module_eeprom_info(struct bnxt_softc * softc,uint16_t i2c_addr,uint16_t page_number,uint8_t bank,bool bank_sel_en,uint16_t start_addr,uint16_t data_length,uint8_t * buf)2736 int bnxt_read_sfp_module_eeprom_info(struct bnxt_softc *softc, uint16_t i2c_addr,
2737     uint16_t page_number, uint8_t bank,bool bank_sel_en, uint16_t start_addr,
2738     uint16_t data_length, uint8_t *buf)
2739 {
2740 	struct hwrm_port_phy_i2c_read_output *output =
2741 			(void *)softc->hwrm_cmd_resp.idi_vaddr;
2742 	struct hwrm_port_phy_i2c_read_input req = {0};
2743 	int rc = 0, byte_offset = 0;
2744 
2745 	BNXT_HWRM_LOCK(softc);
2746 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_PHY_I2C_READ);
2747 
2748 	req.i2c_slave_addr = i2c_addr;
2749 	req.page_number = htole16(page_number);
2750 	req.port_id = htole16(softc->pf.port_id);
2751 	do {
2752 		uint16_t xfer_size;
2753 
2754 		xfer_size = min_t(uint16_t, data_length, BNXT_MAX_PHY_I2C_RESP_SIZE);
2755 		data_length -= xfer_size;
2756 		req.page_offset = htole16(start_addr + byte_offset);
2757 		req.data_length = xfer_size;
2758 		req.bank_number = bank;
2759 		req.enables = htole32((start_addr + byte_offset ?
2760 				HWRM_PORT_PHY_I2C_READ_INPUT_ENABLES_PAGE_OFFSET : 0) |
2761 				(bank_sel_en ?
2762 				HWRM_PORT_PHY_I2C_READ_INPUT_ENABLES_BANK_NUMBER : 0));
2763 		rc = hwrm_send_message(softc, &req, sizeof(req));
2764 		if (!rc)
2765 			memcpy(buf + byte_offset, output->data, xfer_size);
2766 		byte_offset += xfer_size;
2767 	} while (!rc && data_length > 0);
2768 
2769 	BNXT_HWRM_UNLOCK(softc);
2770 
2771 	return rc;
2772 }
2773 
2774 int
bnxt_hwrm_port_phy_qcfg(struct bnxt_softc * softc)2775 bnxt_hwrm_port_phy_qcfg(struct bnxt_softc *softc)
2776 {
2777 	struct bnxt_link_info *link_info = &softc->link_info;
2778 	struct hwrm_port_phy_qcfg_input req = {0};
2779 	struct hwrm_port_phy_qcfg_output *resp =
2780 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
2781 	int rc = 0;
2782 
2783 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_PHY_QCFG);
2784 
2785 	BNXT_HWRM_LOCK(softc);
2786 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2787 	if (rc)
2788 		goto exit;
2789 
2790 	memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
2791 	link_info->phy_link_status = resp->link;
2792 	link_info->duplex =  resp->duplex_cfg;
2793 	link_info->auto_mode = resp->auto_mode;
2794 
2795         /*
2796          * When AUTO_PAUSE_AUTONEG_PAUSE bit is set to 1,
2797          * the advertisement of pause is enabled.
2798          * 1. When the auto_mode is not set to none and this flag is set to 1,
2799          *    then the auto_pause bits on this port are being advertised and
2800          *    autoneg pause results are being interpreted.
2801          * 2. When the auto_mode is not set to none and this flag is set to 0,
2802          *    the pause is forced as indicated in force_pause, and also
2803 	 *    advertised as auto_pause bits, but the autoneg results are not
2804 	 *    interpreted since the pause configuration is being forced.
2805          * 3. When the auto_mode is set to none and this flag is set to 1,
2806          *    auto_pause bits should be ignored and should be set to 0.
2807          */
2808 
2809 	link_info->flow_ctrl.autoneg = false;
2810 	link_info->flow_ctrl.tx = false;
2811 	link_info->flow_ctrl.rx = false;
2812 
2813 	if ((resp->auto_mode) &&
2814             (resp->auto_pause & BNXT_AUTO_PAUSE_AUTONEG_PAUSE)) {
2815 			link_info->flow_ctrl.autoneg = true;
2816 	}
2817 
2818 	if (link_info->flow_ctrl.autoneg) {
2819 		if (resp->auto_pause & BNXT_PAUSE_TX)
2820 			link_info->flow_ctrl.tx = true;
2821 		if (resp->auto_pause & BNXT_PAUSE_RX)
2822 			link_info->flow_ctrl.rx = true;
2823 	} else {
2824 		if (resp->force_pause & BNXT_PAUSE_TX)
2825 			link_info->flow_ctrl.tx = true;
2826 		if (resp->force_pause & BNXT_PAUSE_RX)
2827 			link_info->flow_ctrl.rx = true;
2828 	}
2829 
2830 	link_info->duplex_setting = resp->duplex_cfg;
2831 	if (link_info->phy_link_status == HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK)
2832 		link_info->link_speed = le16toh(resp->link_speed);
2833 	else
2834 		link_info->link_speed = 0;
2835 	link_info->force_link_speed = le16toh(resp->force_link_speed);
2836 	link_info->auto_link_speeds = le16toh(resp->auto_link_speed);
2837 	link_info->support_speeds = le16toh(resp->support_speeds);
2838 	link_info->auto_link_speeds = le16toh(resp->auto_link_speed_mask);
2839 	link_info->preemphasis = le32toh(resp->preemphasis);
2840 	link_info->phy_ver[0] = resp->phy_maj;
2841 	link_info->phy_ver[1] = resp->phy_min;
2842 	link_info->phy_ver[2] = resp->phy_bld;
2843 	snprintf(softc->ver_info->phy_ver, sizeof(softc->ver_info->phy_ver),
2844 	    "%d.%d.%d", link_info->phy_ver[0], link_info->phy_ver[1],
2845 	    link_info->phy_ver[2]);
2846 	strlcpy(softc->ver_info->phy_vendor, resp->phy_vendor_name,
2847 	    BNXT_NAME_SIZE);
2848 	strlcpy(softc->ver_info->phy_partnumber, resp->phy_vendor_partnumber,
2849 	    BNXT_NAME_SIZE);
2850 	link_info->media_type = resp->media_type;
2851 	link_info->phy_type = resp->phy_type;
2852 	link_info->transceiver = resp->xcvr_pkg_type;
2853 	link_info->phy_addr = resp->eee_config_phy_addr &
2854 	    HWRM_PORT_PHY_QCFG_OUTPUT_PHY_ADDR_MASK;
2855 	link_info->module_status = resp->module_status;
2856 	link_info->support_pam4_speeds = le16toh(resp->support_pam4_speeds);
2857 	link_info->auto_pam4_link_speeds = le16toh(resp->auto_pam4_link_speed_mask);
2858 	link_info->force_pam4_link_speed = le16toh(resp->force_pam4_link_speed);
2859 
2860 	if (softc->hwrm_spec_code >= 0x10504)
2861 		link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
2862 
2863 exit:
2864 	BNXT_HWRM_UNLOCK(softc);
2865 	return rc;
2866 }
2867 
2868 static bool
bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output * resp)2869 bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
2870 {
2871 	if (!resp->supported_speeds_auto_mode &&
2872 	    !resp->supported_speeds_force_mode &&
2873 	    !resp->supported_pam4_speeds_auto_mode &&
2874 	    !resp->supported_pam4_speeds_force_mode)
2875 		return true;
2876 
2877 	return false;
2878 }
2879 
bnxt_hwrm_phy_qcaps(struct bnxt_softc * softc)2880 int bnxt_hwrm_phy_qcaps(struct bnxt_softc *softc)
2881 {
2882 	struct bnxt_link_info *link_info = &softc->link_info;
2883 	struct hwrm_port_phy_qcaps_output *resp =
2884 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
2885 	struct hwrm_port_phy_qcaps_input req = {};
2886 	int rc;
2887 
2888 	if (softc->hwrm_spec_code < 0x10201)
2889 		return 0;
2890 
2891 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_PHY_QCAPS);
2892 
2893 	BNXT_HWRM_LOCK(softc);
2894 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2895 	if (rc)
2896 		goto exit;
2897 
2898 	softc->phy_flags = resp->flags | (resp->flags2 << 8);
2899 	if (resp->flags & HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS_EEE_SUPPORTED) {
2900 
2901 		softc->lpi_tmr_lo = le32toh(resp->tx_lpi_timer_low) &
2902 					    HWRM_PORT_PHY_QCAPS_OUTPUT_TX_LPI_TIMER_LOW_MASK;
2903 		softc->lpi_tmr_hi = le32toh(resp->valid_tx_lpi_timer_high) &
2904 					    HWRM_PORT_PHY_QCAPS_OUTPUT_TX_LPI_TIMER_HIGH_MASK;
2905 	}
2906 
2907 	if (softc->hwrm_spec_code >= 0x10a01) {
2908 		if (bnxt_phy_qcaps_no_speed(resp)) {
2909 			link_info->phy_state = BNXT_PHY_STATE_DISABLED;
2910 			device_printf(softc->dev, "Ethernet link disabled\n");
2911 		} else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) {
2912 			link_info->phy_state = BNXT_PHY_STATE_ENABLED;
2913 			device_printf(softc->dev, "Ethernet link enabled\n");
2914 			/* Phy re-enabled, reprobe the speeds */
2915 			link_info->support_auto_speeds = 0;
2916 			link_info->support_pam4_auto_speeds = 0;
2917 		}
2918 	}
2919 	if (resp->supported_speeds_auto_mode)
2920 		link_info->support_auto_speeds =
2921 			le16toh(resp->supported_speeds_auto_mode);
2922 	if (resp->supported_speeds_force_mode)
2923 		link_info->support_force_speeds =
2924 			le16toh(resp->supported_speeds_force_mode);
2925 	if (resp->supported_pam4_speeds_auto_mode)
2926 		link_info->support_pam4_auto_speeds =
2927 			le16toh(resp->supported_pam4_speeds_auto_mode);
2928 	if (resp->supported_pam4_speeds_force_mode)
2929 		link_info->support_pam4_force_speeds =
2930 			le16toh(resp->supported_pam4_speeds_force_mode);
2931 
2932 exit:
2933 	BNXT_HWRM_UNLOCK(softc);
2934 	return rc;
2935 }
2936 
2937 uint16_t
bnxt_hwrm_get_wol_fltrs(struct bnxt_softc * softc,uint16_t handle)2938 bnxt_hwrm_get_wol_fltrs(struct bnxt_softc *softc, uint16_t handle)
2939 {
2940 	struct hwrm_wol_filter_qcfg_input req = {0};
2941 	struct hwrm_wol_filter_qcfg_output *resp =
2942 			(void *)softc->hwrm_cmd_resp.idi_vaddr;
2943 	uint16_t next_handle = 0;
2944 	int rc;
2945 
2946 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_WOL_FILTER_QCFG);
2947 	req.port_id = htole16(softc->pf.port_id);
2948 	req.handle = htole16(handle);
2949 	rc = hwrm_send_message(softc, &req, sizeof(req));
2950 	if (!rc) {
2951 		next_handle = le16toh(resp->next_handle);
2952 		if (next_handle != 0) {
2953 			if (resp->wol_type ==
2954 				HWRM_WOL_FILTER_ALLOC_INPUT_WOL_TYPE_MAGICPKT) {
2955 				softc->wol = 1;
2956 				softc->wol_filter_id = resp->wol_filter_id;
2957 			}
2958 		}
2959 	}
2960 	return next_handle;
2961 }
2962 
2963 int
bnxt_hwrm_alloc_wol_fltr(struct bnxt_softc * softc)2964 bnxt_hwrm_alloc_wol_fltr(struct bnxt_softc *softc)
2965 {
2966 	struct hwrm_wol_filter_alloc_input req = {0};
2967 	struct hwrm_wol_filter_alloc_output *resp =
2968 		(void *)softc->hwrm_cmd_resp.idi_vaddr;
2969 	int rc;
2970 
2971 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_WOL_FILTER_ALLOC);
2972 	req.port_id = htole16(softc->pf.port_id);
2973 	req.wol_type = HWRM_WOL_FILTER_ALLOC_INPUT_WOL_TYPE_MAGICPKT;
2974 	req.enables =
2975 		htole32(HWRM_WOL_FILTER_ALLOC_INPUT_ENABLES_MAC_ADDRESS);
2976 	memcpy(req.mac_address, softc->func.mac_addr, ETHER_ADDR_LEN);
2977 	rc = hwrm_send_message(softc, &req, sizeof(req));
2978 	if (!rc)
2979 		softc->wol_filter_id = resp->wol_filter_id;
2980 
2981 	return rc;
2982 }
2983 
2984 int
bnxt_hwrm_free_wol_fltr(struct bnxt_softc * softc)2985 bnxt_hwrm_free_wol_fltr(struct bnxt_softc *softc)
2986 {
2987 	struct hwrm_wol_filter_free_input req = {0};
2988 
2989 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_WOL_FILTER_FREE);
2990 	req.port_id = htole16(softc->pf.port_id);
2991 	req.enables =
2992 		htole32(HWRM_WOL_FILTER_FREE_INPUT_ENABLES_WOL_FILTER_ID);
2993 	req.wol_filter_id = softc->wol_filter_id;
2994 	return hwrm_send_message(softc, &req, sizeof(req));
2995 }
2996 
bnxt_hwrm_set_coal_params(struct bnxt_softc * softc,uint32_t max_frames,uint32_t buf_tmrs,uint16_t flags,struct hwrm_ring_cmpl_ring_cfg_aggint_params_input * req)2997 static void bnxt_hwrm_set_coal_params(struct bnxt_softc *softc, uint32_t max_frames,
2998         uint32_t buf_tmrs, uint16_t flags,
2999         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
3000 {
3001         req->flags = htole16(flags);
3002         req->num_cmpl_dma_aggr = htole16((uint16_t)max_frames);
3003         req->num_cmpl_dma_aggr_during_int = htole16(max_frames >> 16);
3004         req->cmpl_aggr_dma_tmr = htole16((uint16_t)buf_tmrs);
3005         req->cmpl_aggr_dma_tmr_during_int = htole16(buf_tmrs >> 16);
3006         /* Minimum time between 2 interrupts set to buf_tmr x 2 */
3007         req->int_lat_tmr_min = htole16((uint16_t)buf_tmrs * 2);
3008         req->int_lat_tmr_max = htole16((uint16_t)buf_tmrs * 4);
3009         req->num_cmpl_aggr_int = htole16((uint16_t)max_frames * 4);
3010 }
3011 
bnxt_hwrm_set_coal(struct bnxt_softc * softc)3012 int bnxt_hwrm_set_coal(struct bnxt_softc *softc)
3013 {
3014 	int i, rc = 0;
3015 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
3016 							   req_tx = {0}, *req;
3017 	uint16_t max_buf, max_buf_irq;
3018 	uint16_t buf_tmr, buf_tmr_irq;
3019 	uint32_t flags;
3020 
3021 	bnxt_hwrm_cmd_hdr_init(softc, &req_rx,
3022 			       HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
3023 	bnxt_hwrm_cmd_hdr_init(softc, &req_tx,
3024 			       HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
3025 
3026 	/* Each rx completion (2 records) should be DMAed immediately.
3027 	 * DMA 1/4 of the completion buffers at a time.
3028 	 */
3029 	max_buf = min_t(uint16_t, softc->rx_coal_frames / 4, 2);
3030 	/* max_buf must not be zero */
3031 	max_buf = clamp_t(uint16_t, max_buf, 1, 63);
3032 	max_buf_irq = clamp_t(uint16_t, softc->rx_coal_frames_irq, 1, 63);
3033 	buf_tmr = BNXT_USEC_TO_COAL_TIMER(softc->rx_coal_usecs);
3034 	/* buf timer set to 1/4 of interrupt timer */
3035 	buf_tmr = max_t(uint16_t, buf_tmr / 4, 1);
3036 	buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(softc->rx_coal_usecs_irq);
3037 	buf_tmr_irq = max_t(uint16_t, buf_tmr_irq, 1);
3038 
3039 	flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET;
3040 
3041 	/* RING_IDLE generates more IRQs for lower latency.  Enable it only
3042 	 * if coal_usecs is less than 25 us.
3043 	 */
3044 	if (softc->rx_coal_usecs < 25)
3045 		flags |= HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
3046 
3047 	bnxt_hwrm_set_coal_params(softc, max_buf_irq << 16 | max_buf,
3048 				  buf_tmr_irq << 16 | buf_tmr, flags, &req_rx);
3049 
3050 	/* max_buf must not be zero */
3051 	max_buf = clamp_t(uint16_t, softc->tx_coal_frames, 1, 63);
3052 	max_buf_irq = clamp_t(uint16_t, softc->tx_coal_frames_irq, 1, 63);
3053 	buf_tmr = BNXT_USEC_TO_COAL_TIMER(softc->tx_coal_usecs);
3054 	/* buf timer set to 1/4 of interrupt timer */
3055 	buf_tmr = max_t(uint16_t, buf_tmr / 4, 1);
3056 	buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(softc->tx_coal_usecs_irq);
3057 	buf_tmr_irq = max_t(uint16_t, buf_tmr_irq, 1);
3058 	flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET;
3059 	bnxt_hwrm_set_coal_params(softc, max_buf_irq << 16 | max_buf,
3060 				  buf_tmr_irq << 16 | buf_tmr, flags, &req_tx);
3061 
3062 	for (i = 0; i < softc->nrxqsets; i++) {
3063 
3064 		req = &req_rx;
3065 		req->ring_id = htole16(softc->grp_info[i].cp_ring_id);
3066 
3067 		rc = hwrm_send_message(softc, req, sizeof(*req));
3068 		if (rc)
3069 			break;
3070 	}
3071 	return rc;
3072 }
3073 
bnxt_hwrm_ring_info_get(struct bnxt_softc * softc,uint8_t ring_type,uint32_t ring_id,uint32_t * prod,uint32_t * cons)3074 void bnxt_hwrm_ring_info_get(struct bnxt_softc *softc, uint8_t ring_type,
3075 			     uint32_t ring_id, uint32_t *prod, uint32_t *cons)
3076 {
3077 	hwrm_dbg_ring_info_get_input_t req = {0};
3078 	hwrm_dbg_ring_info_get_output_t *resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
3079 	int rc = 0;
3080 
3081 	*prod = *cons = 0xffffffff;
3082 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_DBG_RING_INFO_GET);
3083         req.ring_type = le32toh(ring_type);
3084         req.fw_ring_id = le32toh(ring_id);
3085 	rc = hwrm_send_message(softc, &req, sizeof(req));
3086 	if (!rc) {
3087 		*prod = resp->producer_index;
3088 		*cons = resp->consumer_index;
3089 	}
3090 
3091 	return;
3092 }
3093