xref: /freebsd/sys/dev/bnxt/bnxt_en/if_bnxt.c (revision e9ac41698b2f322d55ccf9da50a3596edb2c1800)
1 /*-
2  * Broadcom NetXtreme-C/E network driver.
3  *
4  * Copyright (c) 2016 Broadcom, All Rights Reserved.
5  * The term Broadcom refers to Broadcom Limited and/or its subsidiaries
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26  * THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/param.h>
30 #include <sys/socket.h>
31 #include <sys/kernel.h>
32 #include <sys/bus.h>
33 #include <sys/module.h>
34 #include <sys/rman.h>
35 #include <sys/endian.h>
36 #include <sys/sockio.h>
37 #include <sys/priv.h>
38 
39 #include <machine/bus.h>
40 #include <machine/resource.h>
41 
42 #include <dev/pci/pcireg.h>
43 
44 #include <net/if.h>
45 #include <net/if_dl.h>
46 #include <net/if_media.h>
47 #include <net/if_var.h>
48 #include <net/ethernet.h>
49 #include <net/iflib.h>
50 
51 #include <linux/pci.h>
52 #include <linux/kmod.h>
53 #include <linux/module.h>
54 #include <linux/delay.h>
55 #include <linux/idr.h>
56 #include <linux/netdevice.h>
57 #include <linux/etherdevice.h>
58 #include <linux/rcupdate.h>
59 #include "opt_inet.h"
60 #include "opt_inet6.h"
61 #include "opt_rss.h"
62 
63 #include "ifdi_if.h"
64 
65 #include "bnxt.h"
66 #include "bnxt_hwrm.h"
67 #include "bnxt_ioctl.h"
68 #include "bnxt_sysctl.h"
69 #include "hsi_struct_def.h"
70 #include "bnxt_mgmt.h"
71 #include "bnxt_ulp.h"
72 #include "bnxt_auxbus_compat.h"
73 
74 /*
75  * PCI Device ID Table
76  */
77 
78 static const pci_vendor_info_t bnxt_vendor_info_array[] =
79 {
80     PVID(BROADCOM_VENDOR_ID, BCM57301,
81 	"Broadcom BCM57301 NetXtreme-C 10Gb Ethernet Controller"),
82     PVID(BROADCOM_VENDOR_ID, BCM57302,
83 	"Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet Controller"),
84     PVID(BROADCOM_VENDOR_ID, BCM57304,
85 	"Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet Controller"),
86     PVID(BROADCOM_VENDOR_ID, BCM57311,
87 	"Broadcom BCM57311 NetXtreme-C 10Gb Ethernet"),
88     PVID(BROADCOM_VENDOR_ID, BCM57312,
89 	"Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet"),
90     PVID(BROADCOM_VENDOR_ID, BCM57314,
91 	"Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet"),
92     PVID(BROADCOM_VENDOR_ID, BCM57402,
93 	"Broadcom BCM57402 NetXtreme-E 10Gb Ethernet Controller"),
94     PVID(BROADCOM_VENDOR_ID, BCM57402_NPAR,
95 	"Broadcom BCM57402 NetXtreme-E Partition"),
96     PVID(BROADCOM_VENDOR_ID, BCM57404,
97 	"Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet Controller"),
98     PVID(BROADCOM_VENDOR_ID, BCM57404_NPAR,
99 	"Broadcom BCM57404 NetXtreme-E Partition"),
100     PVID(BROADCOM_VENDOR_ID, BCM57406,
101 	"Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet Controller"),
102     PVID(BROADCOM_VENDOR_ID, BCM57406_NPAR,
103 	"Broadcom BCM57406 NetXtreme-E Partition"),
104     PVID(BROADCOM_VENDOR_ID, BCM57407,
105 	"Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet Controller"),
106     PVID(BROADCOM_VENDOR_ID, BCM57407_NPAR,
107 	"Broadcom BCM57407 NetXtreme-E Ethernet Partition"),
108     PVID(BROADCOM_VENDOR_ID, BCM57407_SFP,
109 	"Broadcom BCM57407 NetXtreme-E 25Gb Ethernet Controller"),
110     PVID(BROADCOM_VENDOR_ID, BCM57412,
111 	"Broadcom BCM57412 NetXtreme-E 10Gb Ethernet"),
112     PVID(BROADCOM_VENDOR_ID, BCM57412_NPAR1,
113 	"Broadcom BCM57412 NetXtreme-E Ethernet Partition"),
114     PVID(BROADCOM_VENDOR_ID, BCM57412_NPAR2,
115 	"Broadcom BCM57412 NetXtreme-E Ethernet Partition"),
116     PVID(BROADCOM_VENDOR_ID, BCM57414,
117 	"Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet"),
118     PVID(BROADCOM_VENDOR_ID, BCM57414_NPAR1,
119 	"Broadcom BCM57414 NetXtreme-E Ethernet Partition"),
120     PVID(BROADCOM_VENDOR_ID, BCM57414_NPAR2,
121 	"Broadcom BCM57414 NetXtreme-E Ethernet Partition"),
122     PVID(BROADCOM_VENDOR_ID, BCM57416,
123 	"Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet"),
124     PVID(BROADCOM_VENDOR_ID, BCM57416_NPAR1,
125 	"Broadcom BCM57416 NetXtreme-E Ethernet Partition"),
126     PVID(BROADCOM_VENDOR_ID, BCM57416_NPAR2,
127 	"Broadcom BCM57416 NetXtreme-E Ethernet Partition"),
128     PVID(BROADCOM_VENDOR_ID, BCM57416_SFP,
129 	"Broadcom BCM57416 NetXtreme-E 10Gb Ethernet"),
130     PVID(BROADCOM_VENDOR_ID, BCM57417,
131 	"Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet"),
132     PVID(BROADCOM_VENDOR_ID, BCM57417_NPAR1,
133 	"Broadcom BCM57417 NetXtreme-E Ethernet Partition"),
134     PVID(BROADCOM_VENDOR_ID, BCM57417_NPAR2,
135 	"Broadcom BCM57417 NetXtreme-E Ethernet Partition"),
136     PVID(BROADCOM_VENDOR_ID, BCM57417_SFP,
137 	"Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet"),
138     PVID(BROADCOM_VENDOR_ID, BCM57454,
139 	"Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet"),
140     PVID(BROADCOM_VENDOR_ID, BCM58700,
141 	"Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet"),
142     PVID(BROADCOM_VENDOR_ID, BCM57508,
143 	"Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet"),
144     PVID(BROADCOM_VENDOR_ID, BCM57504,
145 	"Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet"),
146     PVID(BROADCOM_VENDOR_ID, BCM57504_NPAR,
147 	"Broadcom BCM57504 NetXtreme-E Ethernet Partition"),
148     PVID(BROADCOM_VENDOR_ID, BCM57502,
149 	"Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet"),
150     PVID(BROADCOM_VENDOR_ID, NETXTREME_C_VF1,
151 	"Broadcom NetXtreme-C Ethernet Virtual Function"),
152     PVID(BROADCOM_VENDOR_ID, NETXTREME_C_VF2,
153 	"Broadcom NetXtreme-C Ethernet Virtual Function"),
154     PVID(BROADCOM_VENDOR_ID, NETXTREME_C_VF3,
155 	"Broadcom NetXtreme-C Ethernet Virtual Function"),
156     PVID(BROADCOM_VENDOR_ID, NETXTREME_E_VF1,
157 	"Broadcom NetXtreme-E Ethernet Virtual Function"),
158     PVID(BROADCOM_VENDOR_ID, NETXTREME_E_VF2,
159 	"Broadcom NetXtreme-E Ethernet Virtual Function"),
160     PVID(BROADCOM_VENDOR_ID, NETXTREME_E_VF3,
161 	"Broadcom NetXtreme-E Ethernet Virtual Function"),
162     /* required last entry */
163 
164     PVID_END
165 };
166 
167 /*
168  * Function prototypes
169  */
170 
171 SLIST_HEAD(softc_list, bnxt_softc_list) pf_list;
172 int bnxt_num_pfs = 0;
173 
174 void
175 process_nq(struct bnxt_softc *softc, uint16_t nqid);
176 static void *bnxt_register(device_t dev);
177 
178 /* Soft queue setup and teardown */
179 static int bnxt_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
180     uint64_t *paddrs, int ntxqs, int ntxqsets);
181 static int bnxt_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
182     uint64_t *paddrs, int nrxqs, int nrxqsets);
183 static void bnxt_queues_free(if_ctx_t ctx);
184 
185 /* Device setup and teardown */
186 static int bnxt_attach_pre(if_ctx_t ctx);
187 static int bnxt_attach_post(if_ctx_t ctx);
188 static int bnxt_detach(if_ctx_t ctx);
189 
190 /* Device configuration */
191 static void bnxt_init(if_ctx_t ctx);
192 static void bnxt_stop(if_ctx_t ctx);
193 static void bnxt_multi_set(if_ctx_t ctx);
194 static int bnxt_mtu_set(if_ctx_t ctx, uint32_t mtu);
195 static void bnxt_media_status(if_ctx_t ctx, struct ifmediareq * ifmr);
196 static int bnxt_media_change(if_ctx_t ctx);
197 static int bnxt_promisc_set(if_ctx_t ctx, int flags);
198 static uint64_t	bnxt_get_counter(if_ctx_t, ift_counter);
199 static void bnxt_update_admin_status(if_ctx_t ctx);
200 static void bnxt_if_timer(if_ctx_t ctx, uint16_t qid);
201 
202 /* Interrupt enable / disable */
203 static void bnxt_intr_enable(if_ctx_t ctx);
204 static int bnxt_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
205 static int bnxt_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
206 static void bnxt_disable_intr(if_ctx_t ctx);
207 static int bnxt_msix_intr_assign(if_ctx_t ctx, int msix);
208 
209 /* vlan support */
210 static void bnxt_vlan_register(if_ctx_t ctx, uint16_t vtag);
211 static void bnxt_vlan_unregister(if_ctx_t ctx, uint16_t vtag);
212 
213 /* ioctl */
214 static int bnxt_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data);
215 
216 static int bnxt_shutdown(if_ctx_t ctx);
217 static int bnxt_suspend(if_ctx_t ctx);
218 static int bnxt_resume(if_ctx_t ctx);
219 
220 /* Internal support functions */
221 static int bnxt_probe_phy(struct bnxt_softc *softc);
222 static void bnxt_add_media_types(struct bnxt_softc *softc);
223 static int bnxt_pci_mapping(struct bnxt_softc *softc);
224 static void bnxt_pci_mapping_free(struct bnxt_softc *softc);
225 static int bnxt_update_link(struct bnxt_softc *softc, bool chng_link_state);
226 static int bnxt_handle_def_cp(void *arg);
227 static int bnxt_handle_isr(void *arg);
228 static void bnxt_clear_ids(struct bnxt_softc *softc);
229 static void inline bnxt_do_enable_intr(struct bnxt_cp_ring *cpr);
230 static void inline bnxt_do_disable_intr(struct bnxt_cp_ring *cpr);
231 static void bnxt_mark_cpr_invalid(struct bnxt_cp_ring *cpr);
232 static void bnxt_def_cp_task(void *context);
233 static void bnxt_handle_async_event(struct bnxt_softc *softc,
234     struct cmpl_base *cmpl);
235 static uint64_t bnxt_get_baudrate(struct bnxt_link_info *link);
236 static void bnxt_get_wol_settings(struct bnxt_softc *softc);
237 static int bnxt_wol_config(if_ctx_t ctx);
238 static bool bnxt_if_needs_restart(if_ctx_t, enum iflib_restart_event);
239 static int bnxt_i2c_req(if_ctx_t ctx, struct ifi2creq *i2c);
240 static void bnxt_get_port_module_status(struct bnxt_softc *softc);
241 static void bnxt_rdma_aux_device_init(struct bnxt_softc *softc);
242 static void bnxt_rdma_aux_device_uninit(struct bnxt_softc *softc);
243 static void bnxt_queue_fw_reset_work(struct bnxt_softc *bp, unsigned long delay);
244 void bnxt_queue_sp_work(struct bnxt_softc *bp);
245 
246 void bnxt_fw_reset(struct bnxt_softc *bp);
247 /*
248  * Device Interface Declaration
249  */
250 
251 static device_method_t bnxt_methods[] = {
252 	/* Device interface */
253 	DEVMETHOD(device_register, bnxt_register),
254 	DEVMETHOD(device_probe, iflib_device_probe),
255 	DEVMETHOD(device_attach, iflib_device_attach),
256 	DEVMETHOD(device_detach, iflib_device_detach),
257 	DEVMETHOD(device_shutdown, iflib_device_shutdown),
258 	DEVMETHOD(device_suspend, iflib_device_suspend),
259 	DEVMETHOD(device_resume, iflib_device_resume),
260 	DEVMETHOD_END
261 };
262 
263 static driver_t bnxt_driver = {
264 	"bnxt", bnxt_methods, sizeof(struct bnxt_softc),
265 };
266 
267 DRIVER_MODULE(bnxt, pci, bnxt_driver, 0, 0);
268 
269 MODULE_LICENSE("Dual BSD/GPL");
270 MODULE_DEPEND(if_bnxt, pci, 1, 1, 1);
271 MODULE_DEPEND(if_bnxt, ether, 1, 1, 1);
272 MODULE_DEPEND(if_bnxt, iflib, 1, 1, 1);
273 MODULE_DEPEND(if_bnxt, linuxkpi, 1, 1, 1);
274 MODULE_VERSION(if_bnxt, 1);
275 
276 IFLIB_PNP_INFO(pci, bnxt, bnxt_vendor_info_array);
277 
278 void writel_fbsd(struct bnxt_softc *bp, u32, u8, u32);
279 u32 readl_fbsd(struct bnxt_softc *bp, u32, u8);
280 
281 u32 readl_fbsd(struct bnxt_softc *bp, u32 reg_off, u8 bar_idx)
282 {
283 
284 	if (!bar_idx)
285 		return bus_space_read_4(bp->doorbell_bar.tag, bp->doorbell_bar.handle, reg_off);
286 	else
287 		return bus_space_read_4(bp->hwrm_bar.tag, bp->hwrm_bar.handle, reg_off);
288 }
289 
290 void writel_fbsd(struct bnxt_softc *bp, u32 reg_off, u8 bar_idx, u32 val)
291 {
292 
293 	if (!bar_idx)
294 		bus_space_write_4(bp->doorbell_bar.tag, bp->doorbell_bar.handle, reg_off, htole32(val));
295 	else
296 		bus_space_write_4(bp->hwrm_bar.tag, bp->hwrm_bar.handle, reg_off, htole32(val));
297 }
298 
299 static DEFINE_IDA(bnxt_aux_dev_ids);
300 
301 static device_method_t bnxt_iflib_methods[] = {
302 	DEVMETHOD(ifdi_tx_queues_alloc, bnxt_tx_queues_alloc),
303 	DEVMETHOD(ifdi_rx_queues_alloc, bnxt_rx_queues_alloc),
304 	DEVMETHOD(ifdi_queues_free, bnxt_queues_free),
305 
306 	DEVMETHOD(ifdi_attach_pre, bnxt_attach_pre),
307 	DEVMETHOD(ifdi_attach_post, bnxt_attach_post),
308 	DEVMETHOD(ifdi_detach, bnxt_detach),
309 
310 	DEVMETHOD(ifdi_init, bnxt_init),
311 	DEVMETHOD(ifdi_stop, bnxt_stop),
312 	DEVMETHOD(ifdi_multi_set, bnxt_multi_set),
313 	DEVMETHOD(ifdi_mtu_set, bnxt_mtu_set),
314 	DEVMETHOD(ifdi_media_status, bnxt_media_status),
315 	DEVMETHOD(ifdi_media_change, bnxt_media_change),
316 	DEVMETHOD(ifdi_promisc_set, bnxt_promisc_set),
317 	DEVMETHOD(ifdi_get_counter, bnxt_get_counter),
318 	DEVMETHOD(ifdi_update_admin_status, bnxt_update_admin_status),
319 	DEVMETHOD(ifdi_timer, bnxt_if_timer),
320 
321 	DEVMETHOD(ifdi_intr_enable, bnxt_intr_enable),
322 	DEVMETHOD(ifdi_tx_queue_intr_enable, bnxt_tx_queue_intr_enable),
323 	DEVMETHOD(ifdi_rx_queue_intr_enable, bnxt_rx_queue_intr_enable),
324 	DEVMETHOD(ifdi_intr_disable, bnxt_disable_intr),
325 	DEVMETHOD(ifdi_msix_intr_assign, bnxt_msix_intr_assign),
326 
327 	DEVMETHOD(ifdi_vlan_register, bnxt_vlan_register),
328 	DEVMETHOD(ifdi_vlan_unregister, bnxt_vlan_unregister),
329 
330 	DEVMETHOD(ifdi_priv_ioctl, bnxt_priv_ioctl),
331 
332 	DEVMETHOD(ifdi_suspend, bnxt_suspend),
333 	DEVMETHOD(ifdi_shutdown, bnxt_shutdown),
334 	DEVMETHOD(ifdi_resume, bnxt_resume),
335 	DEVMETHOD(ifdi_i2c_req, bnxt_i2c_req),
336 
337 	DEVMETHOD(ifdi_needs_restart, bnxt_if_needs_restart),
338 
339 	DEVMETHOD_END
340 };
341 
342 static driver_t bnxt_iflib_driver = {
343 	"bnxt", bnxt_iflib_methods, sizeof(struct bnxt_softc)
344 };
345 
346 /*
347  * iflib shared context
348  */
349 
350 #define BNXT_DRIVER_VERSION	"230.0.133.0"
351 const char bnxt_driver_version[] = BNXT_DRIVER_VERSION;
352 extern struct if_txrx bnxt_txrx;
353 static struct if_shared_ctx bnxt_sctx_init = {
354 	.isc_magic = IFLIB_MAGIC,
355 	.isc_driver = &bnxt_iflib_driver,
356 	.isc_nfl = 2,				// Number of Free Lists
357 	.isc_flags = IFLIB_HAS_RXCQ | IFLIB_HAS_TXCQ | IFLIB_NEED_ETHER_PAD,
358 	.isc_q_align = PAGE_SIZE,
359 	.isc_tx_maxsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
360 	.isc_tx_maxsegsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
361 	.isc_tso_maxsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
362 	.isc_tso_maxsegsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
363 	.isc_rx_maxsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
364 	.isc_rx_maxsegsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
365 
366 	// Only use a single segment to avoid page size constraints
367 	.isc_rx_nsegments = 1,
368 	.isc_ntxqs = 3,
369 	.isc_nrxqs = 3,
370 	.isc_nrxd_min = {16, 16, 16},
371 	.isc_nrxd_default = {PAGE_SIZE / sizeof(struct cmpl_base) * 8,
372 	    PAGE_SIZE / sizeof(struct rx_prod_pkt_bd),
373 	    PAGE_SIZE / sizeof(struct rx_prod_pkt_bd)},
374 	.isc_nrxd_max = {BNXT_MAX_RXD, BNXT_MAX_RXD, BNXT_MAX_RXD},
375 	.isc_ntxd_min = {16, 16, 16},
376 	.isc_ntxd_default = {PAGE_SIZE / sizeof(struct cmpl_base) * 2,
377 	    PAGE_SIZE / sizeof(struct tx_bd_short),
378 	    /* NQ depth 4096 */
379 	    PAGE_SIZE / sizeof(struct cmpl_base) * 16},
380 	.isc_ntxd_max = {BNXT_MAX_TXD, BNXT_MAX_TXD, BNXT_MAX_TXD},
381 
382 	.isc_admin_intrcnt = BNXT_ROCE_IRQ_COUNT,
383 	.isc_vendor_info = bnxt_vendor_info_array,
384 	.isc_driver_version = bnxt_driver_version,
385 };
386 
387 #define PCI_SUBSYSTEM_ID	0x2e
388 static struct workqueue_struct *bnxt_pf_wq;
389 
390 extern void bnxt_destroy_irq(struct bnxt_softc *softc);
391 
392 /*
393  * Device Methods
394  */
395 
396 static void *
397 bnxt_register(device_t dev)
398 {
399 	return (&bnxt_sctx_init);
400 }
401 
402 static void
403 bnxt_nq_alloc(struct bnxt_softc *softc, int nqsets)
404 {
405 
406 	if (softc->nq_rings)
407 		return;
408 
409 	softc->nq_rings = malloc(sizeof(struct bnxt_cp_ring) * nqsets,
410 	    M_DEVBUF, M_NOWAIT | M_ZERO);
411 }
412 
413 static void
414 bnxt_nq_free(struct bnxt_softc *softc)
415 {
416 
417 	if (softc->nq_rings)
418 		free(softc->nq_rings, M_DEVBUF);
419 	softc->nq_rings = NULL;
420 }
421 
422 /*
423  * Device Dependent Configuration Functions
424 */
425 
426 /* Soft queue setup and teardown */
427 static int
428 bnxt_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
429     uint64_t *paddrs, int ntxqs, int ntxqsets)
430 {
431 	struct bnxt_softc *softc;
432 	int i;
433 	int rc;
434 
435 	softc = iflib_get_softc(ctx);
436 
437 	if (BNXT_CHIP_P5(softc)) {
438 		bnxt_nq_alloc(softc, ntxqsets);
439 		if (!softc->nq_rings) {
440 			device_printf(iflib_get_dev(ctx),
441 					"unable to allocate NQ rings\n");
442 			rc = ENOMEM;
443 			goto nq_alloc_fail;
444 		}
445 	}
446 
447 	softc->tx_cp_rings = malloc(sizeof(struct bnxt_cp_ring) * ntxqsets,
448 	    M_DEVBUF, M_NOWAIT | M_ZERO);
449 	if (!softc->tx_cp_rings) {
450 		device_printf(iflib_get_dev(ctx),
451 		    "unable to allocate TX completion rings\n");
452 		rc = ENOMEM;
453 		goto cp_alloc_fail;
454 	}
455 	softc->tx_rings = malloc(sizeof(struct bnxt_ring) * ntxqsets,
456 	    M_DEVBUF, M_NOWAIT | M_ZERO);
457 	if (!softc->tx_rings) {
458 		device_printf(iflib_get_dev(ctx),
459 		    "unable to allocate TX rings\n");
460 		rc = ENOMEM;
461 		goto ring_alloc_fail;
462 	}
463 
464 	for (i=0; i < ntxqsets; i++) {
465 		rc = iflib_dma_alloc(ctx, sizeof(struct ctx_hw_stats),
466 				&softc->tx_stats[i], 0);
467 		if (rc)
468 			goto dma_alloc_fail;
469 		bus_dmamap_sync(softc->tx_stats[i].idi_tag, softc->tx_stats[i].idi_map,
470 				BUS_DMASYNC_PREREAD);
471 	}
472 
473 	for (i = 0; i < ntxqsets; i++) {
474 		/* Set up the completion ring */
475 		softc->tx_cp_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
476 		softc->tx_cp_rings[i].ring.phys_id =
477 		    (uint16_t)HWRM_NA_SIGNATURE;
478 		softc->tx_cp_rings[i].ring.softc = softc;
479 		softc->tx_cp_rings[i].ring.idx = i;
480 		softc->tx_cp_rings[i].ring.id =
481 		    (softc->scctx->isc_nrxqsets * 2) + 1 + i;
482 		softc->tx_cp_rings[i].ring.doorbell = (BNXT_CHIP_P5(softc)) ?
483 			DB_PF_OFFSET_P5: softc->tx_cp_rings[i].ring.id * 0x80;
484 		softc->tx_cp_rings[i].ring.ring_size =
485 		    softc->scctx->isc_ntxd[0];
486 		softc->tx_cp_rings[i].ring.vaddr = vaddrs[i * ntxqs];
487 		softc->tx_cp_rings[i].ring.paddr = paddrs[i * ntxqs];
488 
489 		/* Set up the TX ring */
490 		softc->tx_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
491 		softc->tx_rings[i].softc = softc;
492 		softc->tx_rings[i].idx = i;
493 		softc->tx_rings[i].id =
494 		    (softc->scctx->isc_nrxqsets * 2) + 1 + i;
495 		softc->tx_rings[i].doorbell = (BNXT_CHIP_P5(softc)) ?
496 			DB_PF_OFFSET_P5 : softc->tx_rings[i].id * 0x80;
497 		softc->tx_rings[i].ring_size = softc->scctx->isc_ntxd[1];
498 		softc->tx_rings[i].vaddr = vaddrs[i * ntxqs + 1];
499 		softc->tx_rings[i].paddr = paddrs[i * ntxqs + 1];
500 
501 		bnxt_create_tx_sysctls(softc, i);
502 
503 		if (BNXT_CHIP_P5(softc)) {
504 			/* Set up the Notification ring (NQ) */
505 			softc->nq_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
506 			softc->nq_rings[i].ring.phys_id =
507 				(uint16_t)HWRM_NA_SIGNATURE;
508 			softc->nq_rings[i].ring.softc = softc;
509 			softc->nq_rings[i].ring.idx = i;
510 			softc->nq_rings[i].ring.id = i;
511 			softc->nq_rings[i].ring.doorbell = (BNXT_CHIP_P5(softc)) ?
512 				DB_PF_OFFSET_P5 : softc->nq_rings[i].ring.id * 0x80;
513 			softc->nq_rings[i].ring.ring_size = softc->scctx->isc_ntxd[2];
514 			softc->nq_rings[i].ring.vaddr = vaddrs[i * ntxqs + 2];
515 			softc->nq_rings[i].ring.paddr = paddrs[i * ntxqs + 2];
516 		}
517 	}
518 
519 	softc->ntxqsets = ntxqsets;
520 	return rc;
521 
522 dma_alloc_fail:
523 	for (i = i - 1; i >= 0; i--)
524 		iflib_dma_free(&softc->tx_stats[i]);
525 	free(softc->tx_rings, M_DEVBUF);
526 ring_alloc_fail:
527 	free(softc->tx_cp_rings, M_DEVBUF);
528 cp_alloc_fail:
529 	bnxt_nq_free(softc);
530 nq_alloc_fail:
531 	return rc;
532 }
533 
534 static void
535 bnxt_queues_free(if_ctx_t ctx)
536 {
537 	struct bnxt_softc *softc = iflib_get_softc(ctx);
538 	int i;
539 
540 	// Free TX queues
541 	for (i=0; i<softc->ntxqsets; i++)
542 		iflib_dma_free(&softc->tx_stats[i]);
543 	free(softc->tx_rings, M_DEVBUF);
544 	softc->tx_rings = NULL;
545 	free(softc->tx_cp_rings, M_DEVBUF);
546 	softc->tx_cp_rings = NULL;
547 	softc->ntxqsets = 0;
548 
549 	// Free RX queues
550 	for (i=0; i<softc->nrxqsets; i++)
551 		iflib_dma_free(&softc->rx_stats[i]);
552 	iflib_dma_free(&softc->hw_tx_port_stats);
553 	iflib_dma_free(&softc->hw_rx_port_stats);
554 	iflib_dma_free(&softc->hw_tx_port_stats_ext);
555 	iflib_dma_free(&softc->hw_rx_port_stats_ext);
556 	free(softc->grp_info, M_DEVBUF);
557 	free(softc->ag_rings, M_DEVBUF);
558 	free(softc->rx_rings, M_DEVBUF);
559 	free(softc->rx_cp_rings, M_DEVBUF);
560 	bnxt_nq_free(softc);
561 }
562 
563 static int
564 bnxt_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
565     uint64_t *paddrs, int nrxqs, int nrxqsets)
566 {
567 	struct bnxt_softc *softc;
568 	int i;
569 	int rc;
570 
571 	softc = iflib_get_softc(ctx);
572 
573 	softc->rx_cp_rings = malloc(sizeof(struct bnxt_cp_ring) * nrxqsets,
574 	    M_DEVBUF, M_NOWAIT | M_ZERO);
575 	if (!softc->rx_cp_rings) {
576 		device_printf(iflib_get_dev(ctx),
577 		    "unable to allocate RX completion rings\n");
578 		rc = ENOMEM;
579 		goto cp_alloc_fail;
580 	}
581 	softc->rx_rings = malloc(sizeof(struct bnxt_ring) * nrxqsets,
582 	    M_DEVBUF, M_NOWAIT | M_ZERO);
583 	if (!softc->rx_rings) {
584 		device_printf(iflib_get_dev(ctx),
585 		    "unable to allocate RX rings\n");
586 		rc = ENOMEM;
587 		goto ring_alloc_fail;
588 	}
589 	softc->ag_rings = malloc(sizeof(struct bnxt_ring) * nrxqsets,
590 	    M_DEVBUF, M_NOWAIT | M_ZERO);
591 	if (!softc->ag_rings) {
592 		device_printf(iflib_get_dev(ctx),
593 		    "unable to allocate aggregation rings\n");
594 		rc = ENOMEM;
595 		goto ag_alloc_fail;
596 	}
597 	softc->grp_info = malloc(sizeof(struct bnxt_grp_info) * nrxqsets,
598 	    M_DEVBUF, M_NOWAIT | M_ZERO);
599 	if (!softc->grp_info) {
600 		device_printf(iflib_get_dev(ctx),
601 		    "unable to allocate ring groups\n");
602 		rc = ENOMEM;
603 		goto grp_alloc_fail;
604 	}
605 
606 	for (i=0; i < nrxqsets; i++) {
607 		rc = iflib_dma_alloc(ctx, sizeof(struct ctx_hw_stats),
608 				&softc->rx_stats[i], 0);
609 		if (rc)
610 			goto hw_stats_alloc_fail;
611 		bus_dmamap_sync(softc->rx_stats[i].idi_tag, softc->rx_stats[i].idi_map,
612 				BUS_DMASYNC_PREREAD);
613 	}
614 
615 /*
616  * Additional 512 bytes for future expansion.
617  * To prevent corruption when loaded with newer firmwares with added counters.
618  * This can be deleted when there will be no further additions of counters.
619  */
620 #define BNXT_PORT_STAT_PADDING  512
621 
622 	rc = iflib_dma_alloc(ctx, sizeof(struct rx_port_stats) + BNXT_PORT_STAT_PADDING,
623 	    &softc->hw_rx_port_stats, 0);
624 	if (rc)
625 		goto hw_port_rx_stats_alloc_fail;
626 
627 	bus_dmamap_sync(softc->hw_rx_port_stats.idi_tag,
628             softc->hw_rx_port_stats.idi_map, BUS_DMASYNC_PREREAD);
629 
630 
631 	rc = iflib_dma_alloc(ctx, sizeof(struct tx_port_stats) + BNXT_PORT_STAT_PADDING,
632 	    &softc->hw_tx_port_stats, 0);
633 	if (rc)
634 		goto hw_port_tx_stats_alloc_fail;
635 
636 	bus_dmamap_sync(softc->hw_tx_port_stats.idi_tag,
637             softc->hw_tx_port_stats.idi_map, BUS_DMASYNC_PREREAD);
638 
639 	softc->rx_port_stats = (void *) softc->hw_rx_port_stats.idi_vaddr;
640 	softc->tx_port_stats = (void *) softc->hw_tx_port_stats.idi_vaddr;
641 
642 
643 	rc = iflib_dma_alloc(ctx, sizeof(struct rx_port_stats_ext),
644 		&softc->hw_rx_port_stats_ext, 0);
645 	if (rc)
646 		goto hw_port_rx_stats_ext_alloc_fail;
647 
648 	bus_dmamap_sync(softc->hw_rx_port_stats_ext.idi_tag,
649 	    softc->hw_rx_port_stats_ext.idi_map, BUS_DMASYNC_PREREAD);
650 
651 	rc = iflib_dma_alloc(ctx, sizeof(struct tx_port_stats_ext),
652 		&softc->hw_tx_port_stats_ext, 0);
653 	if (rc)
654 		goto hw_port_tx_stats_ext_alloc_fail;
655 
656 	bus_dmamap_sync(softc->hw_tx_port_stats_ext.idi_tag,
657 	    softc->hw_tx_port_stats_ext.idi_map, BUS_DMASYNC_PREREAD);
658 
659 	softc->rx_port_stats_ext = (void *) softc->hw_rx_port_stats_ext.idi_vaddr;
660 	softc->tx_port_stats_ext = (void *) softc->hw_tx_port_stats_ext.idi_vaddr;
661 
662 	for (i = 0; i < nrxqsets; i++) {
663 		/* Allocation the completion ring */
664 		softc->rx_cp_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
665 		softc->rx_cp_rings[i].ring.phys_id =
666 		    (uint16_t)HWRM_NA_SIGNATURE;
667 		softc->rx_cp_rings[i].ring.softc = softc;
668 		softc->rx_cp_rings[i].ring.idx = i;
669 		softc->rx_cp_rings[i].ring.id = i + 1;
670 		softc->rx_cp_rings[i].ring.doorbell = (BNXT_CHIP_P5(softc)) ?
671 			DB_PF_OFFSET_P5 : softc->rx_cp_rings[i].ring.id * 0x80;
672 		/*
673 		 * If this ring overflows, RX stops working.
674 		 */
675 		softc->rx_cp_rings[i].ring.ring_size =
676 		    softc->scctx->isc_nrxd[0];
677 		softc->rx_cp_rings[i].ring.vaddr = vaddrs[i * nrxqs];
678 		softc->rx_cp_rings[i].ring.paddr = paddrs[i * nrxqs];
679 
680 		/* Allocate the RX ring */
681 		softc->rx_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
682 		softc->rx_rings[i].softc = softc;
683 		softc->rx_rings[i].idx = i;
684 		softc->rx_rings[i].id = i + 1;
685 		softc->rx_rings[i].doorbell = (BNXT_CHIP_P5(softc)) ?
686 			DB_PF_OFFSET_P5 : softc->rx_rings[i].id * 0x80;
687 		softc->rx_rings[i].ring_size = softc->scctx->isc_nrxd[1];
688 		softc->rx_rings[i].vaddr = vaddrs[i * nrxqs + 1];
689 		softc->rx_rings[i].paddr = paddrs[i * nrxqs + 1];
690 
691 		/* Allocate the TPA start buffer */
692 		softc->rx_rings[i].tpa_start = malloc(sizeof(struct bnxt_full_tpa_start) *
693 	    		(RX_TPA_START_CMPL_AGG_ID_MASK >> RX_TPA_START_CMPL_AGG_ID_SFT),
694 	    		M_DEVBUF, M_NOWAIT | M_ZERO);
695 		if (softc->rx_rings[i].tpa_start == NULL) {
696 			rc = -ENOMEM;
697 			device_printf(softc->dev,
698 					"Unable to allocate space for TPA\n");
699 			goto tpa_alloc_fail;
700 		}
701 		/* Allocate the AG ring */
702 		softc->ag_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
703 		softc->ag_rings[i].softc = softc;
704 		softc->ag_rings[i].idx = i;
705 		softc->ag_rings[i].id = nrxqsets + i + 1;
706 		softc->ag_rings[i].doorbell = (BNXT_CHIP_P5(softc)) ?
707 			DB_PF_OFFSET_P5 : softc->ag_rings[i].id * 0x80;
708 		softc->ag_rings[i].ring_size = softc->scctx->isc_nrxd[2];
709 		softc->ag_rings[i].vaddr = vaddrs[i * nrxqs + 2];
710 		softc->ag_rings[i].paddr = paddrs[i * nrxqs + 2];
711 
712 		/* Allocate the ring group */
713 		softc->grp_info[i].grp_id = (uint16_t)HWRM_NA_SIGNATURE;
714 		softc->grp_info[i].stats_ctx =
715 		    softc->rx_cp_rings[i].stats_ctx_id;
716 		softc->grp_info[i].rx_ring_id = softc->rx_rings[i].phys_id;
717 		softc->grp_info[i].ag_ring_id = softc->ag_rings[i].phys_id;
718 		softc->grp_info[i].cp_ring_id =
719 		    softc->rx_cp_rings[i].ring.phys_id;
720 
721 		bnxt_create_rx_sysctls(softc, i);
722 	}
723 
724 	/*
725 	 * When SR-IOV is enabled, avoid each VF sending PORT_QSTATS
726          * HWRM every sec with which firmware timeouts can happen
727          */
728 	if (BNXT_PF(softc))
729 		bnxt_create_port_stats_sysctls(softc);
730 
731 	/* And finally, the VNIC */
732 	softc->vnic_info.id = (uint16_t)HWRM_NA_SIGNATURE;
733 	softc->vnic_info.filter_id = -1;
734 	softc->vnic_info.def_ring_grp = (uint16_t)HWRM_NA_SIGNATURE;
735 	softc->vnic_info.cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
736 	softc->vnic_info.lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
737 	softc->vnic_info.rx_mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
738 		HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN;
739 	softc->vnic_info.mc_list_count = 0;
740 	softc->vnic_info.flags = BNXT_VNIC_FLAG_DEFAULT;
741 	rc = iflib_dma_alloc(ctx, BNXT_MAX_MC_ADDRS * ETHER_ADDR_LEN,
742 	    &softc->vnic_info.mc_list, 0);
743 	if (rc)
744 		goto mc_list_alloc_fail;
745 
746 	/* The VNIC RSS Hash Key */
747 	rc = iflib_dma_alloc(ctx, HW_HASH_KEY_SIZE,
748 	    &softc->vnic_info.rss_hash_key_tbl, 0);
749 	if (rc)
750 		goto rss_hash_alloc_fail;
751 	bus_dmamap_sync(softc->vnic_info.rss_hash_key_tbl.idi_tag,
752 	    softc->vnic_info.rss_hash_key_tbl.idi_map,
753 	    BUS_DMASYNC_PREWRITE);
754 	memcpy(softc->vnic_info.rss_hash_key_tbl.idi_vaddr,
755 	    softc->vnic_info.rss_hash_key, HW_HASH_KEY_SIZE);
756 
757 	/* Allocate the RSS tables */
758 	rc = iflib_dma_alloc(ctx, HW_HASH_INDEX_SIZE * sizeof(uint16_t),
759 	    &softc->vnic_info.rss_grp_tbl, 0);
760 	if (rc)
761 		goto rss_grp_alloc_fail;
762 	bus_dmamap_sync(softc->vnic_info.rss_grp_tbl.idi_tag,
763 	    softc->vnic_info.rss_grp_tbl.idi_map,
764 	    BUS_DMASYNC_PREWRITE);
765 	memset(softc->vnic_info.rss_grp_tbl.idi_vaddr, 0xff,
766 	    softc->vnic_info.rss_grp_tbl.idi_size);
767 
768 	softc->nrxqsets = nrxqsets;
769 	return rc;
770 
771 rss_grp_alloc_fail:
772 	iflib_dma_free(&softc->vnic_info.rss_hash_key_tbl);
773 rss_hash_alloc_fail:
774 	iflib_dma_free(&softc->vnic_info.mc_list);
775 mc_list_alloc_fail:
776 	for (i = i - 1; i >= 0; i--) {
777 		if (softc->rx_rings[i].tpa_start)
778 			free(softc->rx_rings[i].tpa_start, M_DEVBUF);
779 	}
780 tpa_alloc_fail:
781 	iflib_dma_free(&softc->hw_tx_port_stats_ext);
782 hw_port_tx_stats_ext_alloc_fail:
783 	iflib_dma_free(&softc->hw_rx_port_stats_ext);
784 hw_port_rx_stats_ext_alloc_fail:
785 	iflib_dma_free(&softc->hw_tx_port_stats);
786 hw_port_tx_stats_alloc_fail:
787 	iflib_dma_free(&softc->hw_rx_port_stats);
788 hw_port_rx_stats_alloc_fail:
789 	for (i=0; i < nrxqsets; i++) {
790 		if (softc->rx_stats[i].idi_vaddr)
791 			iflib_dma_free(&softc->rx_stats[i]);
792 	}
793 hw_stats_alloc_fail:
794 	free(softc->grp_info, M_DEVBUF);
795 grp_alloc_fail:
796 	free(softc->ag_rings, M_DEVBUF);
797 ag_alloc_fail:
798 	free(softc->rx_rings, M_DEVBUF);
799 ring_alloc_fail:
800 	free(softc->rx_cp_rings, M_DEVBUF);
801 cp_alloc_fail:
802 	return rc;
803 }
804 
805 static void bnxt_free_hwrm_short_cmd_req(struct bnxt_softc *softc)
806 {
807 	if (softc->hwrm_short_cmd_req_addr.idi_vaddr)
808 		iflib_dma_free(&softc->hwrm_short_cmd_req_addr);
809 	softc->hwrm_short_cmd_req_addr.idi_vaddr = NULL;
810 }
811 
812 static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt_softc *softc)
813 {
814 	int rc;
815 
816 	rc = iflib_dma_alloc(softc->ctx, softc->hwrm_max_req_len,
817 	    &softc->hwrm_short_cmd_req_addr, BUS_DMA_NOWAIT);
818 
819 	return rc;
820 }
821 
822 static void bnxt_free_ring(struct bnxt_softc *softc, struct bnxt_ring_mem_info *rmem)
823 {
824 	int i;
825 
826 	for (i = 0; i < rmem->nr_pages; i++) {
827 		if (!rmem->pg_arr[i].idi_vaddr)
828 			continue;
829 
830 		iflib_dma_free(&rmem->pg_arr[i]);
831 		rmem->pg_arr[i].idi_vaddr = NULL;
832 	}
833 	if (rmem->pg_tbl.idi_vaddr) {
834 		iflib_dma_free(&rmem->pg_tbl);
835 		rmem->pg_tbl.idi_vaddr = NULL;
836 
837 	}
838 	if (rmem->vmem_size && *rmem->vmem) {
839 		free(*rmem->vmem, M_DEVBUF);
840 		*rmem->vmem = NULL;
841 	}
842 }
843 
844 static void bnxt_init_ctx_mem(struct bnxt_ctx_mem_type *ctxm, void *p, int len)
845 {
846 	u8 init_val = ctxm->init_value;
847 	u16 offset = ctxm->init_offset;
848 	u8 *p2 = p;
849 	int i;
850 
851 	if (!init_val)
852 		return;
853 	if (offset == BNXT_CTX_INIT_INVALID_OFFSET) {
854 		memset(p, init_val, len);
855 		return;
856 	}
857 	for (i = 0; i < len; i += ctxm->entry_size)
858 		*(p2 + i + offset) = init_val;
859 }
860 
861 static int bnxt_alloc_ring(struct bnxt_softc *softc, struct bnxt_ring_mem_info *rmem)
862 {
863 	uint64_t valid_bit = 0;
864 	int i;
865 	int rc;
866 
867 	if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
868 		valid_bit = PTU_PTE_VALID;
869 
870 	if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl.idi_vaddr) {
871 		size_t pg_tbl_size = rmem->nr_pages * 8;
872 
873 		if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
874 			pg_tbl_size = rmem->page_size;
875 
876 		rc = iflib_dma_alloc(softc->ctx, pg_tbl_size, &rmem->pg_tbl, 0);
877 		if (rc)
878 			return -ENOMEM;
879 	}
880 
881 	for (i = 0; i < rmem->nr_pages; i++) {
882 		uint64_t extra_bits = valid_bit;
883 		uint64_t *ptr;
884 
885 		rc = iflib_dma_alloc(softc->ctx, rmem->page_size, &rmem->pg_arr[i], 0);
886 		if (rc)
887 			return -ENOMEM;
888 
889 		if (rmem->ctx_mem)
890 			bnxt_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i].idi_vaddr,
891 					rmem->page_size);
892 
893 		if (rmem->nr_pages > 1 || rmem->depth > 0) {
894 			if (i == rmem->nr_pages - 2 &&
895 					(rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
896 				extra_bits |= PTU_PTE_NEXT_TO_LAST;
897 			else if (i == rmem->nr_pages - 1 &&
898 					(rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
899 				extra_bits |= PTU_PTE_LAST;
900 
901 			ptr = (void *) rmem->pg_tbl.idi_vaddr;
902 			ptr[i]  = htole64(rmem->pg_arr[i].idi_paddr | extra_bits);
903 		}
904 	}
905 
906 	if (rmem->vmem_size) {
907 		*rmem->vmem = malloc(rmem->vmem_size, M_DEVBUF, M_NOWAIT | M_ZERO);
908 		if (!(*rmem->vmem))
909 			return -ENOMEM;
910 	}
911 	return 0;
912 }
913 
914 
915 #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES		\
916 	(HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP |		\
917 	 HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ |	\
918 	 HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ |		\
919 	 HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC |	\
920 	 HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT)
921 
922 static int bnxt_alloc_ctx_mem_blk(struct bnxt_softc *softc,
923 				  struct bnxt_ctx_pg_info *ctx_pg)
924 {
925 	struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
926 
927 	rmem->page_size = BNXT_PAGE_SIZE;
928 	rmem->pg_arr = ctx_pg->ctx_arr;
929 	rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
930 	if (rmem->depth >= 1)
931 		rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
932 
933 	return bnxt_alloc_ring(softc, rmem);
934 }
935 
936 static int bnxt_alloc_ctx_pg_tbls(struct bnxt_softc *softc,
937 				  struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
938 				  u8 depth, struct bnxt_ctx_mem_type *ctxm)
939 {
940 	struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
941 	int rc;
942 
943 	if (!mem_size)
944 		return -EINVAL;
945 
946 	ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
947 	if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
948 		ctx_pg->nr_pages = 0;
949 		return -EINVAL;
950 	}
951 	if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
952 		int nr_tbls, i;
953 
954 		rmem->depth = 2;
955 		ctx_pg->ctx_pg_tbl = kzalloc(MAX_CTX_PAGES * sizeof(ctx_pg),
956 					      GFP_KERNEL);
957 		if (!ctx_pg->ctx_pg_tbl)
958 			return -ENOMEM;
959 		nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
960 		rmem->nr_pages = nr_tbls;
961 		rc = bnxt_alloc_ctx_mem_blk(softc, ctx_pg);
962 		if (rc)
963 			return rc;
964 		for (i = 0; i < nr_tbls; i++) {
965 			struct bnxt_ctx_pg_info *pg_tbl;
966 
967 			pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
968 			if (!pg_tbl)
969 				return -ENOMEM;
970 			ctx_pg->ctx_pg_tbl[i] = pg_tbl;
971 			rmem = &pg_tbl->ring_mem;
972 			memcpy(&rmem->pg_tbl, &ctx_pg->ctx_arr[i], sizeof(struct iflib_dma_info));
973 			rmem->depth = 1;
974 			rmem->nr_pages = MAX_CTX_PAGES;
975 			rmem->ctx_mem = ctxm;
976 			if (i == (nr_tbls - 1)) {
977 				int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
978 
979 				if (rem)
980 					rmem->nr_pages = rem;
981 			}
982 			rc = bnxt_alloc_ctx_mem_blk(softc, pg_tbl);
983 			if (rc)
984 				break;
985 		}
986 	} else {
987 		rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
988 		if (rmem->nr_pages > 1 || depth)
989 			rmem->depth = 1;
990 		rmem->ctx_mem = ctxm;
991 		rc = bnxt_alloc_ctx_mem_blk(softc, ctx_pg);
992 	}
993 	return rc;
994 }
995 
996 static void bnxt_free_ctx_pg_tbls(struct bnxt_softc *softc,
997 				  struct bnxt_ctx_pg_info *ctx_pg)
998 {
999 	struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
1000 
1001 	if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
1002 	    ctx_pg->ctx_pg_tbl) {
1003 		int i, nr_tbls = rmem->nr_pages;
1004 
1005 		for (i = 0; i < nr_tbls; i++) {
1006 			struct bnxt_ctx_pg_info *pg_tbl;
1007 			struct bnxt_ring_mem_info *rmem2;
1008 
1009 			pg_tbl = ctx_pg->ctx_pg_tbl[i];
1010 			if (!pg_tbl)
1011 				continue;
1012 			rmem2 = &pg_tbl->ring_mem;
1013 			bnxt_free_ring(softc, rmem2);
1014 			ctx_pg->ctx_arr[i].idi_vaddr = NULL;
1015 			free(pg_tbl , M_DEVBUF);
1016 			ctx_pg->ctx_pg_tbl[i] = NULL;
1017 		}
1018 		kfree(ctx_pg->ctx_pg_tbl);
1019 		ctx_pg->ctx_pg_tbl = NULL;
1020 	}
1021 	bnxt_free_ring(softc, rmem);
1022 	ctx_pg->nr_pages = 0;
1023 }
1024 
1025 static int bnxt_setup_ctxm_pg_tbls(struct bnxt_softc *softc,
1026 				   struct bnxt_ctx_mem_type *ctxm, u32 entries,
1027 				   u8 pg_lvl)
1028 {
1029 	struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
1030 	int i, rc = 0, n = 1;
1031 	u32 mem_size;
1032 
1033 	if (!ctxm->entry_size || !ctx_pg)
1034 		return -EINVAL;
1035 	if (ctxm->instance_bmap)
1036 		n = hweight32(ctxm->instance_bmap);
1037 	if (ctxm->entry_multiple)
1038 		entries = roundup(entries, ctxm->entry_multiple);
1039 	entries = clamp_t(u32, entries, ctxm->min_entries, ctxm->max_entries);
1040 	mem_size = entries * ctxm->entry_size;
1041 	for (i = 0; i < n && !rc; i++) {
1042 		ctx_pg[i].entries = entries;
1043 		rc = bnxt_alloc_ctx_pg_tbls(softc, &ctx_pg[i], mem_size, pg_lvl,
1044 					    ctxm->init_value ? ctxm : NULL);
1045 	}
1046 	return rc;
1047 }
1048 
1049 static void bnxt_free_ctx_mem(struct bnxt_softc *softc)
1050 {
1051 	struct bnxt_ctx_mem_info *ctx = softc->ctx_mem;
1052 	u16 type;
1053 
1054 	if (!ctx)
1055 		return;
1056 
1057 	for (type = 0; type < BNXT_CTX_MAX; type++) {
1058 		struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
1059 		struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
1060 		int i, n = 1;
1061 
1062 		if (!ctx_pg)
1063 			continue;
1064 		if (ctxm->instance_bmap)
1065 			n = hweight32(ctxm->instance_bmap);
1066 		for (i = 0; i < n; i++)
1067 			bnxt_free_ctx_pg_tbls(softc, &ctx_pg[i]);
1068 
1069 		kfree(ctx_pg);
1070 		ctxm->pg_info = NULL;
1071 	}
1072 
1073 	ctx->flags &= ~BNXT_CTX_FLAG_INITED;
1074 	kfree(ctx);
1075 	softc->ctx_mem = NULL;
1076 }
1077 
1078 static int bnxt_alloc_ctx_mem(struct bnxt_softc *softc)
1079 {
1080 	struct bnxt_ctx_pg_info *ctx_pg;
1081 	struct bnxt_ctx_mem_type *ctxm;
1082 	struct bnxt_ctx_mem_info *ctx;
1083 	u32 l2_qps, qp1_qps, max_qps;
1084 	u32 ena, entries_sp, entries;
1085 	u32 srqs, max_srqs, min;
1086 	u32 num_mr, num_ah;
1087 	u32 extra_srqs = 0;
1088 	u32 extra_qps = 0;
1089 	u8 pg_lvl = 1;
1090 	int i, rc;
1091 
1092 	if (!BNXT_CHIP_P5(softc))
1093 		return 0;
1094 
1095 	rc = bnxt_hwrm_func_backing_store_qcaps(softc);
1096 	if (rc) {
1097 		device_printf(softc->dev, "Failed querying context mem capability, rc = %d.\n",
1098 			   rc);
1099 		return rc;
1100 	}
1101 	ctx = softc->ctx_mem;
1102 	if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
1103 		return 0;
1104 
1105 	ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
1106 	l2_qps = ctxm->qp_l2_entries;
1107 	qp1_qps = ctxm->qp_qp1_entries;
1108 	max_qps = ctxm->max_entries;
1109 	ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
1110 	srqs = ctxm->srq_l2_entries;
1111 	max_srqs = ctxm->max_entries;
1112 	if (softc->flags & BNXT_FLAG_ROCE_CAP) {
1113 		pg_lvl = 2;
1114 		extra_qps = min_t(u32, 65536, max_qps - l2_qps - qp1_qps);
1115 		extra_srqs = min_t(u32, 8192, max_srqs - srqs);
1116 	}
1117 
1118 	ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
1119 	rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, l2_qps + qp1_qps + extra_qps,
1120 				     pg_lvl);
1121 	if (rc)
1122 		return rc;
1123 
1124 	ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
1125 	rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, srqs + extra_srqs, pg_lvl);
1126 	if (rc)
1127 		return rc;
1128 
1129 	ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
1130 	rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, ctxm->cq_l2_entries +
1131 				     extra_qps * 2, pg_lvl);
1132 	if (rc)
1133 		return rc;
1134 
1135 	ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
1136 	rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, ctxm->max_entries, 1);
1137 	if (rc)
1138 		return rc;
1139 
1140 	ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
1141 	rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, ctxm->max_entries, 1);
1142 	if (rc)
1143 		return rc;
1144 
1145 	ena = 0;
1146 	if (!(softc->flags & BNXT_FLAG_ROCE_CAP))
1147 		goto skip_rdma;
1148 
1149 	ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
1150 	ctx_pg = ctxm->pg_info;
1151 	/* 128K extra is needed to accomodate static AH context
1152 	 * allocation by f/w.
1153 	 */
1154 	num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
1155 	num_ah = min_t(u32, num_mr, 1024 * 128);
1156 	rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, num_mr + num_ah, 2);
1157 	if (rc)
1158 		return rc;
1159 	ctx_pg->entries = num_mr + num_ah;
1160 	ena = HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_MRAV;
1161 	if (ctxm->mrav_num_entries_units)
1162 		ctx_pg->entries =
1163 			((num_mr / ctxm->mrav_num_entries_units) << 16) |
1164 			 (num_ah / ctxm->mrav_num_entries_units);
1165 
1166 	ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
1167 	rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, l2_qps + qp1_qps + extra_qps, 1);
1168 	if (rc)
1169 		return rc;
1170 	ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TIM;
1171 
1172 skip_rdma:
1173 	ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
1174 	min = ctxm->min_entries;
1175 	entries_sp = ctx->ctx_arr[BNXT_CTX_VNIC].vnic_entries + l2_qps +
1176 		     2 * (extra_qps + qp1_qps) + min;
1177 	rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, entries_sp, 2);
1178 		if (rc)
1179 			return rc;
1180 
1181 	ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
1182 	entries = l2_qps + 2 * (extra_qps + qp1_qps);
1183 	rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, entries, 2);
1184 	if (rc)
1185 		return rc;
1186 	for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
1187 		if (i < BNXT_MAX_TQM_LEGACY_RINGS)
1188 			ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i;
1189 		else
1190 			ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING8;
1191 	}
1192 	ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES;
1193 
1194 	rc = bnxt_hwrm_func_backing_store_cfg(softc, ena);
1195 	if (rc) {
1196 		device_printf(softc->dev, "Failed configuring context mem, rc = %d.\n",
1197 			   rc);
1198 		return rc;
1199 	}
1200 	ctx->flags |= BNXT_CTX_FLAG_INITED;
1201 
1202 	return 0;
1203 }
1204 
1205 /*
1206  * If we update the index, a write barrier is needed after the write to ensure
1207  * the completion ring has space before the RX/TX ring does.  Since we can't
1208  * make the RX and AG doorbells covered by the same barrier without remapping
1209  * MSI-X vectors, we create the barrier over the enture doorbell bar.
1210  * TODO: Remap the MSI-X vectors to allow a barrier to only cover the doorbells
1211  *       for a single ring group.
1212  *
1213  * A barrier of just the size of the write is used to ensure the ordering
1214  * remains correct and no writes are lost.
1215  */
1216 
1217 static void bnxt_cuw_db_rx(void *db_ptr, uint16_t idx)
1218 {
1219 	struct bnxt_ring *ring = (struct bnxt_ring *) db_ptr;
1220 	struct bnxt_bar_info *db_bar = &ring->softc->doorbell_bar;
1221 
1222 	bus_space_barrier(db_bar->tag, db_bar->handle, ring->doorbell, 4,
1223 			BUS_SPACE_BARRIER_WRITE);
1224 	bus_space_write_4(db_bar->tag, db_bar->handle, ring->doorbell,
1225 			htole32(RX_DOORBELL_KEY_RX | idx));
1226 }
1227 
1228 static void bnxt_cuw_db_tx(void *db_ptr, uint16_t idx)
1229 {
1230 	struct bnxt_ring *ring = (struct bnxt_ring *) db_ptr;
1231 	struct bnxt_bar_info *db_bar = &ring->softc->doorbell_bar;
1232 
1233 	bus_space_barrier(db_bar->tag, db_bar->handle, ring->doorbell, 4,
1234 			BUS_SPACE_BARRIER_WRITE);
1235 	bus_space_write_4(db_bar->tag, db_bar->handle, ring->doorbell,
1236 			htole32(TX_DOORBELL_KEY_TX | idx));
1237 }
1238 
1239 static void bnxt_cuw_db_cq(void *db_ptr, bool enable_irq)
1240 {
1241 	struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr;
1242 	struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar;
1243 
1244 	bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 4,
1245 			BUS_SPACE_BARRIER_WRITE);
1246 	bus_space_write_4(db_bar->tag, db_bar->handle, cpr->ring.doorbell,
1247 			htole32(CMPL_DOORBELL_KEY_CMPL |
1248 				((cpr->cons == UINT32_MAX) ? 0 :
1249 				 (cpr->cons | CMPL_DOORBELL_IDX_VALID)) |
1250 				((enable_irq) ? 0 : CMPL_DOORBELL_MASK)));
1251 	bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size,
1252 			BUS_SPACE_BARRIER_WRITE);
1253 }
1254 
1255 static void bnxt_thor_db_rx(void *db_ptr, uint16_t idx)
1256 {
1257 	struct bnxt_ring *ring = (struct bnxt_ring *) db_ptr;
1258 	struct bnxt_bar_info *db_bar = &ring->softc->doorbell_bar;
1259 
1260 	bus_space_barrier(db_bar->tag, db_bar->handle, ring->doorbell, 8,
1261 			BUS_SPACE_BARRIER_WRITE);
1262 	bus_space_write_8(db_bar->tag, db_bar->handle, ring->doorbell,
1263 			htole64((DBR_PATH_L2 | DBR_TYPE_SRQ | idx) |
1264 				((uint64_t)ring->phys_id << DBR_XID_SFT)));
1265 }
1266 
1267 static void bnxt_thor_db_tx(void *db_ptr, uint16_t idx)
1268 {
1269 	struct bnxt_ring *ring = (struct bnxt_ring *) db_ptr;
1270 	struct bnxt_bar_info *db_bar = &ring->softc->doorbell_bar;
1271 
1272 	bus_space_barrier(db_bar->tag, db_bar->handle, ring->doorbell, 8,
1273 			BUS_SPACE_BARRIER_WRITE);
1274 	bus_space_write_8(db_bar->tag, db_bar->handle, ring->doorbell,
1275 			htole64((DBR_PATH_L2 | DBR_TYPE_SQ | idx) |
1276 				((uint64_t)ring->phys_id << DBR_XID_SFT)));
1277 }
1278 
1279 static void bnxt_thor_db_rx_cq(void *db_ptr, bool enable_irq)
1280 {
1281 	struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr;
1282 	struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar;
1283 	dbc_dbc_t db_msg = { 0 };
1284 	uint32_t cons = cpr->cons;
1285 
1286 	if (cons == UINT32_MAX)
1287 		cons = 0;
1288 	else
1289 		cons = RING_NEXT(&cpr->ring, cons);
1290 
1291 	db_msg.index = ((cons << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK);
1292 
1293 	db_msg.type_path_xid = ((cpr->ring.phys_id << DBC_DBC_XID_SFT) &
1294 			DBC_DBC_XID_MASK) | DBC_DBC_PATH_L2 |
1295 		((enable_irq) ? DBC_DBC_TYPE_CQ_ARMALL: DBC_DBC_TYPE_CQ);
1296 
1297 	bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 8,
1298 			BUS_SPACE_BARRIER_WRITE);
1299 	bus_space_write_8(db_bar->tag, db_bar->handle, cpr->ring.doorbell,
1300 			htole64(*(uint64_t *)&db_msg));
1301 	bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size,
1302 			BUS_SPACE_BARRIER_WRITE);
1303 }
1304 
1305 static void bnxt_thor_db_tx_cq(void *db_ptr, bool enable_irq)
1306 {
1307 	struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr;
1308 	struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar;
1309 	dbc_dbc_t db_msg = { 0 };
1310 	uint32_t cons = cpr->cons;
1311 
1312 	db_msg.index = ((cons << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK);
1313 
1314 	db_msg.type_path_xid = ((cpr->ring.phys_id << DBC_DBC_XID_SFT) &
1315 			DBC_DBC_XID_MASK) | DBC_DBC_PATH_L2 |
1316 		((enable_irq) ? DBC_DBC_TYPE_CQ_ARMALL: DBC_DBC_TYPE_CQ);
1317 
1318 	bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 8,
1319 			BUS_SPACE_BARRIER_WRITE);
1320 	bus_space_write_8(db_bar->tag, db_bar->handle, cpr->ring.doorbell,
1321 			htole64(*(uint64_t *)&db_msg));
1322 	bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size,
1323 			BUS_SPACE_BARRIER_WRITE);
1324 }
1325 
1326 static void bnxt_thor_db_nq(void *db_ptr, bool enable_irq)
1327 {
1328 	struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr;
1329 	struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar;
1330 	dbc_dbc_t db_msg = { 0 };
1331 	uint32_t cons = cpr->cons;
1332 
1333 	db_msg.index = ((cons << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK);
1334 
1335 	db_msg.type_path_xid = ((cpr->ring.phys_id << DBC_DBC_XID_SFT) &
1336 			DBC_DBC_XID_MASK) | DBC_DBC_PATH_L2 |
1337 		((enable_irq) ? DBC_DBC_TYPE_NQ_ARM: DBC_DBC_TYPE_NQ);
1338 
1339 	bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 8,
1340 			BUS_SPACE_BARRIER_WRITE);
1341 	bus_space_write_8(db_bar->tag, db_bar->handle, cpr->ring.doorbell,
1342 			htole64(*(uint64_t *)&db_msg));
1343 	bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size,
1344 			BUS_SPACE_BARRIER_WRITE);
1345 }
1346 
1347 struct bnxt_softc *bnxt_find_dev(uint32_t domain, uint32_t bus, uint32_t dev_fn, char *dev_name)
1348 {
1349 	struct bnxt_softc_list *sc = NULL;
1350 
1351 	SLIST_FOREACH(sc, &pf_list, next) {
1352 		/* get the softc reference based on device name */
1353 		if (dev_name && !strncmp(dev_name, if_name(iflib_get_ifp(sc->softc->ctx)), BNXT_MAX_STR)) {
1354 			return sc->softc;
1355 		}
1356 		/* get the softc reference based on domain,bus,device,function */
1357 		if (!dev_name &&
1358 		    (domain == sc->softc->domain) &&
1359 		    (bus == sc->softc->bus) &&
1360 		    (dev_fn == sc->softc->dev_fn)) {
1361 			return sc->softc;
1362 
1363 		}
1364 	}
1365 
1366 	return NULL;
1367 }
1368 
1369 
1370 static void bnxt_verify_asym_queues(struct bnxt_softc *softc)
1371 {
1372 	uint8_t i, lltc = 0;
1373 
1374 	if (!softc->max_lltc)
1375 		return;
1376 
1377 	/* Verify that lossless TX and RX queues are in the same index */
1378 	for (i = 0; i < softc->max_tc; i++) {
1379 		if (BNXT_LLQ(softc->tx_q_info[i].queue_profile) &&
1380 		    BNXT_LLQ(softc->rx_q_info[i].queue_profile))
1381 			lltc++;
1382 	}
1383 	softc->max_lltc = min(softc->max_lltc, lltc);
1384 }
1385 
1386 static int bnxt_hwrm_poll(struct bnxt_softc *bp)
1387 {
1388 	struct hwrm_ver_get_output	*resp =
1389 	    (void *)bp->hwrm_cmd_resp.idi_vaddr;
1390 	struct hwrm_ver_get_input req = {0};
1391 	int rc;
1392 
1393 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET);
1394 
1395 	req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
1396 	req.hwrm_intf_min = HWRM_VERSION_MINOR;
1397 	req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
1398 
1399 	rc = _hwrm_send_message(bp, &req, sizeof(req));
1400 	if (rc)
1401 		return rc;
1402 
1403 	if (resp->flags & HWRM_VER_GET_OUTPUT_FLAGS_DEV_NOT_RDY)
1404 		rc = -EAGAIN;
1405 
1406 	return rc;
1407 }
1408 
1409 static void bnxt_rtnl_lock_sp(struct bnxt_softc *bp)
1410 {
1411 	/* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
1412 	 * set.  If the device is being closed, bnxt_close() may be holding
1413 	 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear.  So we
1414 	 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
1415 	 */
1416 	clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
1417 	rtnl_lock();
1418 }
1419 
1420 static void bnxt_rtnl_unlock_sp(struct bnxt_softc *bp)
1421 {
1422 	set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
1423 	rtnl_unlock();
1424 }
1425 
1426 static void bnxt_fw_fatal_close(struct bnxt_softc *softc)
1427 {
1428 	bnxt_disable_intr(softc->ctx);
1429 	if (pci_is_enabled(softc->pdev))
1430 		pci_disable_device(softc->pdev);
1431 }
1432 
1433 static u32 bnxt_fw_health_readl(struct bnxt_softc *bp, int reg_idx)
1434 {
1435 	struct bnxt_fw_health *fw_health = bp->fw_health;
1436 	u32 reg = fw_health->regs[reg_idx];
1437 	u32 reg_type, reg_off, val = 0;
1438 
1439 	reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
1440 	reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
1441 	switch (reg_type) {
1442 	case BNXT_FW_HEALTH_REG_TYPE_CFG:
1443 		pci_read_config_dword(bp->pdev, reg_off, &val);
1444 		break;
1445 	case BNXT_FW_HEALTH_REG_TYPE_GRC:
1446 		reg_off = fw_health->mapped_regs[reg_idx];
1447 		fallthrough;
1448 	case BNXT_FW_HEALTH_REG_TYPE_BAR0:
1449 		val = readl_fbsd(bp, reg_off, 0);
1450 		break;
1451 	case BNXT_FW_HEALTH_REG_TYPE_BAR1:
1452 		val = readl_fbsd(bp, reg_off, 2);
1453 		break;
1454 	}
1455 	if (reg_idx == BNXT_FW_RESET_INPROG_REG)
1456 		val &= fw_health->fw_reset_inprog_reg_mask;
1457 	return val;
1458 }
1459 
1460 static void bnxt_fw_reset_close(struct bnxt_softc *bp)
1461 {
1462 	int i;
1463 	bnxt_ulp_stop(bp);
1464 	/* When firmware is in fatal state, quiesce device and disable
1465 	 * bus master to prevent any potential bad DMAs before freeing
1466 	 * kernel memory.
1467 	 */
1468 	if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
1469 		u16 val = 0;
1470 
1471 		val = pci_read_config(bp->dev, PCI_SUBSYSTEM_ID, 2);
1472 		if (val == 0xffff) {
1473 			bp->fw_reset_min_dsecs = 0;
1474 		}
1475 		bnxt_fw_fatal_close(bp);
1476 	}
1477 
1478 	iflib_request_reset(bp->ctx);
1479 	bnxt_stop(bp->ctx);
1480 	bnxt_hwrm_func_drv_unrgtr(bp, false);
1481 
1482 	for (i = bp->nrxqsets-1; i>=0; i--) {
1483 		if (BNXT_CHIP_P5(bp))
1484 			iflib_irq_free(bp->ctx, &bp->nq_rings[i].irq);
1485 		else
1486 			iflib_irq_free(bp->ctx, &bp->rx_cp_rings[i].irq);
1487 
1488 	}
1489 	if (pci_is_enabled(bp->pdev))
1490 		pci_disable_device(bp->pdev);
1491 	pci_disable_busmaster(bp->dev);
1492 	bnxt_free_ctx_mem(bp);
1493 }
1494 
1495 static bool is_bnxt_fw_ok(struct bnxt_softc *bp)
1496 {
1497 	struct bnxt_fw_health *fw_health = bp->fw_health;
1498 	bool no_heartbeat = false, has_reset = false;
1499 	u32 val;
1500 
1501 	val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
1502 	if (val == fw_health->last_fw_heartbeat)
1503 		no_heartbeat = true;
1504 
1505 	val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
1506 	if (val != fw_health->last_fw_reset_cnt)
1507 		has_reset = true;
1508 
1509 	if (!no_heartbeat && has_reset)
1510 		return true;
1511 
1512 	return false;
1513 }
1514 
1515 void bnxt_fw_reset(struct bnxt_softc *bp)
1516 {
1517 	bnxt_rtnl_lock_sp(bp);
1518 	if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
1519 	    !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
1520 		int tmo;
1521 		set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
1522 		bnxt_fw_reset_close(bp);
1523 
1524 		if ((bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD)) {
1525 			bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
1526 			tmo = HZ / 10;
1527 		} else {
1528 			bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
1529 			tmo = bp->fw_reset_min_dsecs * HZ /10;
1530 		}
1531 		bnxt_queue_fw_reset_work(bp, tmo);
1532 	}
1533 	bnxt_rtnl_unlock_sp(bp);
1534 }
1535 
1536 static void bnxt_queue_fw_reset_work(struct bnxt_softc *bp, unsigned long delay)
1537 {
1538 	if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
1539 		return;
1540 
1541 	if (BNXT_PF(bp))
1542 		queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
1543 	else
1544 		schedule_delayed_work(&bp->fw_reset_task, delay);
1545 }
1546 
1547 void bnxt_queue_sp_work(struct bnxt_softc *bp)
1548 {
1549 	if (BNXT_PF(bp))
1550 		queue_work(bnxt_pf_wq, &bp->sp_task);
1551 	else
1552 		schedule_work(&bp->sp_task);
1553 }
1554 
1555 static void bnxt_fw_reset_writel(struct bnxt_softc *bp, int reg_idx)
1556 {
1557 	struct bnxt_fw_health *fw_health = bp->fw_health;
1558 	u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
1559 	u32 val = fw_health->fw_reset_seq_vals[reg_idx];
1560 	u32 reg_type, reg_off, delay_msecs;
1561 
1562 	delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
1563 	reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
1564 	reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
1565 	switch (reg_type) {
1566 	case BNXT_FW_HEALTH_REG_TYPE_CFG:
1567 		pci_write_config_dword(bp->pdev, reg_off, val);
1568 		break;
1569 	case BNXT_FW_HEALTH_REG_TYPE_GRC:
1570 		writel_fbsd(bp, BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4, 0, reg_off & BNXT_GRC_BASE_MASK);
1571 		reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
1572 		fallthrough;
1573 	case BNXT_FW_HEALTH_REG_TYPE_BAR0:
1574 		writel_fbsd(bp, reg_off, 0, val);
1575 		break;
1576 	case BNXT_FW_HEALTH_REG_TYPE_BAR1:
1577 		writel_fbsd(bp, reg_off, 2, val);
1578 		break;
1579 	}
1580 	if (delay_msecs) {
1581 		pci_read_config_dword(bp->pdev, 0, &val);
1582 		msleep(delay_msecs);
1583 	}
1584 }
1585 
1586 static void bnxt_reset_all(struct bnxt_softc *bp)
1587 {
1588 	struct bnxt_fw_health *fw_health = bp->fw_health;
1589 	int i, rc;
1590 
1591 	if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
1592 		bp->fw_reset_timestamp = jiffies;
1593 		return;
1594 	}
1595 
1596 	if (fw_health->flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_HOST) {
1597 		for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
1598 			bnxt_fw_reset_writel(bp, i);
1599 	} else if (fw_health->flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU) {
1600 		struct hwrm_fw_reset_input req = {0};
1601 
1602 		bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET);
1603 		req.target_id = htole16(HWRM_TARGET_ID_KONG);
1604 		req.embedded_proc_type = HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_CHIP;
1605 		req.selfrst_status = HWRM_FW_RESET_INPUT_SELFRST_STATUS_SELFRSTASAP;
1606 		req.flags = HWRM_FW_RESET_INPUT_FLAGS_RESET_GRACEFUL;
1607 		rc = hwrm_send_message(bp, &req, sizeof(req));
1608 
1609 		if (rc != -ENODEV)
1610 			device_printf(bp->dev, "Unable to reset FW rc=%d\n", rc);
1611 	}
1612 	bp->fw_reset_timestamp = jiffies;
1613 }
1614 
1615 static int __bnxt_alloc_fw_health(struct bnxt_softc *bp)
1616 {
1617 	if (bp->fw_health)
1618 		return 0;
1619 
1620 	bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
1621 	if (!bp->fw_health)
1622 		return -ENOMEM;
1623 
1624 	mutex_init(&bp->fw_health->lock);
1625 	return 0;
1626 }
1627 
1628 static int bnxt_alloc_fw_health(struct bnxt_softc *bp)
1629 {
1630 	int rc;
1631 
1632 	if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
1633 	    !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
1634 		return 0;
1635 
1636 	rc = __bnxt_alloc_fw_health(bp);
1637 	if (rc) {
1638 		bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
1639 		bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
1640 		return rc;
1641 	}
1642 
1643 	return 0;
1644 }
1645 
1646 static inline void __bnxt_map_fw_health_reg(struct bnxt_softc *bp, u32 reg)
1647 {
1648 	writel_fbsd(bp, BNXT_GRCPF_REG_WINDOW_BASE_OUT + BNXT_FW_HEALTH_WIN_MAP_OFF, 0, reg & BNXT_GRC_BASE_MASK);
1649 }
1650 
1651 static int bnxt_map_fw_health_regs(struct bnxt_softc *bp)
1652 {
1653 	struct bnxt_fw_health *fw_health = bp->fw_health;
1654 	u32 reg_base = 0xffffffff;
1655 	int i;
1656 
1657 	bp->fw_health->status_reliable = false;
1658 	bp->fw_health->resets_reliable = false;
1659 	/* Only pre-map the monitoring GRC registers using window 3 */
1660 	for (i = 0; i < 4; i++) {
1661 		u32 reg = fw_health->regs[i];
1662 
1663 		if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
1664 			continue;
1665 		if (reg_base == 0xffffffff)
1666 			reg_base = reg & BNXT_GRC_BASE_MASK;
1667 		if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
1668 			return -ERANGE;
1669 		fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
1670 	}
1671 	bp->fw_health->status_reliable = true;
1672 	bp->fw_health->resets_reliable = true;
1673 	if (reg_base == 0xffffffff)
1674 		return 0;
1675 
1676 	__bnxt_map_fw_health_reg(bp, reg_base);
1677 	return 0;
1678 }
1679 
1680 static void bnxt_inv_fw_health_reg(struct bnxt_softc *bp)
1681 {
1682 	struct bnxt_fw_health *fw_health = bp->fw_health;
1683 	u32 reg_type;
1684 
1685 	if (!fw_health)
1686 		return;
1687 
1688 	reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
1689 	if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
1690 		fw_health->status_reliable = false;
1691 
1692 	reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]);
1693 	if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
1694 		fw_health->resets_reliable = false;
1695 }
1696 
1697 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt_softc *bp)
1698 {
1699 	struct bnxt_fw_health *fw_health = bp->fw_health;
1700 	struct hwrm_error_recovery_qcfg_output *resp =
1701 	    (void *)bp->hwrm_cmd_resp.idi_vaddr;
1702 	struct hwrm_error_recovery_qcfg_input req = {0};
1703 	int rc, i;
1704 
1705 	if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
1706 		return 0;
1707 
1708 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_ERROR_RECOVERY_QCFG);
1709 	rc = _hwrm_send_message(bp, &req, sizeof(req));
1710 
1711 	if (rc)
1712 		goto err_recovery_out;
1713 	fw_health->flags = le32toh(resp->flags);
1714 	if ((fw_health->flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU) &&
1715 	    !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
1716 		rc = -EINVAL;
1717 		goto err_recovery_out;
1718 	}
1719 	fw_health->polling_dsecs = le32toh(resp->driver_polling_freq);
1720 	fw_health->master_func_wait_dsecs =
1721 		le32toh(resp->master_func_wait_period);
1722 	fw_health->normal_func_wait_dsecs =
1723 		le32toh(resp->normal_func_wait_period);
1724 	fw_health->post_reset_wait_dsecs =
1725 		le32toh(resp->master_func_wait_period_after_reset);
1726 	fw_health->post_reset_max_wait_dsecs =
1727 		le32toh(resp->max_bailout_time_after_reset);
1728 	fw_health->regs[BNXT_FW_HEALTH_REG] =
1729 		le32toh(resp->fw_health_status_reg);
1730 	fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
1731 		le32toh(resp->fw_heartbeat_reg);
1732 	fw_health->regs[BNXT_FW_RESET_CNT_REG] =
1733 		le32toh(resp->fw_reset_cnt_reg);
1734 	fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
1735 		le32toh(resp->reset_inprogress_reg);
1736 	fw_health->fw_reset_inprog_reg_mask =
1737 		le32toh(resp->reset_inprogress_reg_mask);
1738 	fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
1739 	if (fw_health->fw_reset_seq_cnt >= 16) {
1740 		rc = -EINVAL;
1741 		goto err_recovery_out;
1742 	}
1743 	for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
1744 		fw_health->fw_reset_seq_regs[i] =
1745 			le32toh(resp->reset_reg[i]);
1746 		fw_health->fw_reset_seq_vals[i] =
1747 			le32toh(resp->reset_reg_val[i]);
1748 		fw_health->fw_reset_seq_delay_msec[i] =
1749 			le32toh(resp->delay_after_reset[i]);
1750 	}
1751 err_recovery_out:
1752 	if (!rc)
1753 		rc = bnxt_map_fw_health_regs(bp);
1754 	if (rc)
1755 		bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
1756 	return rc;
1757 }
1758 
1759 static int bnxt_drv_rgtr(struct bnxt_softc *bp)
1760 {
1761 	int rc;
1762 
1763 	/* determine whether we can support error recovery before
1764 	 * registering with FW
1765 	 */
1766 	if (bnxt_alloc_fw_health(bp)) {
1767 		device_printf(bp->dev, "no memory for firmware error recovery\n");
1768 	} else {
1769 		rc = bnxt_hwrm_error_recovery_qcfg(bp);
1770 		if (rc)
1771 			device_printf(bp->dev, "hwrm query error recovery failure rc: %d\n",
1772 				    rc);
1773 	}
1774 	rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);  //sumit dbg: revisit the params
1775 	if (rc)
1776 		return -ENODEV;
1777 	return 0;
1778 }
1779 
1780 static bool bnxt_fw_reset_timeout(struct bnxt_softc *bp)
1781 {
1782 	return time_after(jiffies, bp->fw_reset_timestamp +
1783 			  (bp->fw_reset_max_dsecs * HZ / 10));
1784 }
1785 
1786 static int bnxt_open(struct bnxt_softc *bp)
1787 {
1788 	int rc = 0;
1789 	if (BNXT_PF(bp))
1790 		rc = bnxt_hwrm_nvm_get_dev_info(bp, &bp->nvm_info->mfg_id,
1791 			&bp->nvm_info->device_id, &bp->nvm_info->sector_size,
1792 			&bp->nvm_info->size, &bp->nvm_info->reserved_size,
1793 			&bp->nvm_info->available_size);
1794 
1795 	/* Get the queue config */
1796 	rc = bnxt_hwrm_queue_qportcfg(bp, HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX);
1797 	if (rc) {
1798 		device_printf(bp->dev, "reinit: hwrm qportcfg (tx) failed\n");
1799 		return rc;
1800 	}
1801 	if (bp->is_asym_q) {
1802 		rc = bnxt_hwrm_queue_qportcfg(bp,
1803 					      HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX);
1804 		if (rc) {
1805 			device_printf(bp->dev, "re-init: hwrm qportcfg (rx)  failed\n");
1806 			return rc;
1807 		}
1808 		bnxt_verify_asym_queues(bp);
1809 	} else {
1810 		bp->rx_max_q = bp->tx_max_q;
1811 		memcpy(bp->rx_q_info, bp->tx_q_info, sizeof(bp->rx_q_info));
1812 		memcpy(bp->rx_q_ids, bp->tx_q_ids, sizeof(bp->rx_q_ids));
1813 	}
1814 	/* Get the HW capabilities */
1815 	rc = bnxt_hwrm_func_qcaps(bp);
1816 	if (rc)
1817 		return rc;
1818 
1819 	/* Register the driver with the FW */
1820 	rc = bnxt_drv_rgtr(bp);
1821 	if (rc)
1822 		return rc;
1823 	if (bp->hwrm_spec_code >= 0x10803) {
1824 		rc = bnxt_alloc_ctx_mem(bp);
1825 		if (rc) {
1826 			device_printf(bp->dev, "attach: alloc_ctx_mem failed\n");
1827 			return rc;
1828 		}
1829 		rc = bnxt_hwrm_func_resc_qcaps(bp, true);
1830 		if (!rc)
1831 			bp->flags |= BNXT_FLAG_FW_CAP_NEW_RM;
1832 	}
1833 
1834 	if (BNXT_CHIP_P5(bp))
1835 		bnxt_hwrm_reserve_pf_rings(bp);
1836 	/* Get the current configuration of this function */
1837 	rc = bnxt_hwrm_func_qcfg(bp);
1838 	if (rc) {
1839 		device_printf(bp->dev, "re-init: hwrm func qcfg failed\n");
1840 		return rc;
1841 	}
1842 
1843 	bnxt_msix_intr_assign(bp->ctx, 0);
1844 	bnxt_init(bp->ctx);
1845 	bnxt_intr_enable(bp->ctx);
1846 
1847 	if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
1848 		if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
1849 			bnxt_ulp_start(bp, 0);
1850 		}
1851 	}
1852 
1853 	device_printf(bp->dev, "Network interface is UP and operational\n");
1854 
1855 	return rc;
1856 }
1857 static void bnxt_fw_reset_abort(struct bnxt_softc *bp, int rc)
1858 {
1859 	clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
1860 	if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) {
1861 		bnxt_ulp_start(bp, rc);
1862 	}
1863 	bp->fw_reset_state = 0;
1864 }
1865 
1866 static void bnxt_fw_reset_task(struct work_struct *work)
1867 {
1868 	struct bnxt_softc *bp = container_of(work, struct bnxt_softc, fw_reset_task.work);
1869 	int rc = 0;
1870 
1871 	if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
1872 		device_printf(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
1873 		return;
1874 	}
1875 
1876 	switch (bp->fw_reset_state) {
1877 	case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
1878 		u32 val;
1879 
1880 		val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
1881 		if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
1882 		    !bnxt_fw_reset_timeout(bp)) {
1883 			bnxt_queue_fw_reset_work(bp, HZ / 5);
1884 			return;
1885 		}
1886 
1887 		if (!bp->fw_health->primary) {
1888 			u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
1889 
1890 			bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
1891 			bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
1892 			return;
1893 		}
1894 		bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
1895 	}
1896 		fallthrough;
1897 	case BNXT_FW_RESET_STATE_RESET_FW:
1898 		bnxt_reset_all(bp);
1899 		bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
1900 		bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
1901 		return;
1902 	case BNXT_FW_RESET_STATE_ENABLE_DEV:
1903 		bnxt_inv_fw_health_reg(bp);
1904 		if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
1905 		    !bp->fw_reset_min_dsecs) {
1906 			u16 val;
1907 
1908 			val = pci_read_config(bp->dev, PCI_SUBSYSTEM_ID, 2);
1909 			if (val == 0xffff) {
1910 				if (bnxt_fw_reset_timeout(bp)) {
1911 					device_printf(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
1912 					rc = -ETIMEDOUT;
1913 					goto fw_reset_abort;
1914 				}
1915 				bnxt_queue_fw_reset_work(bp, HZ / 1000);
1916 				return;
1917 			}
1918 		}
1919 		clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
1920 		clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
1921 		if (!pci_is_enabled(bp->pdev)) {
1922 			if (pci_enable_device(bp->pdev)) {
1923 				device_printf(bp->dev, "Cannot re-enable PCI device\n");
1924 				rc = -ENODEV;
1925 				goto fw_reset_abort;
1926 			}
1927 		}
1928 		pci_set_master(bp->pdev);
1929 		bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
1930 		fallthrough;
1931 	case BNXT_FW_RESET_STATE_POLL_FW:
1932 		bp->hwrm_cmd_timeo = SHORT_HWRM_CMD_TIMEOUT;
1933 		rc = bnxt_hwrm_poll(bp);
1934 		if (rc) {
1935 			if (bnxt_fw_reset_timeout(bp)) {
1936 				device_printf(bp->dev, "Firmware reset aborted\n");
1937 				goto fw_reset_abort_status;
1938 			}
1939 			bnxt_queue_fw_reset_work(bp, HZ / 5);
1940 			return;
1941 		}
1942 		bp->hwrm_cmd_timeo = DFLT_HWRM_CMD_TIMEOUT;
1943 		bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
1944 		fallthrough;
1945 	case BNXT_FW_RESET_STATE_OPENING:
1946 		rc = bnxt_open(bp);
1947 		if (rc) {
1948 			device_printf(bp->dev, "bnxt_open() failed during FW reset\n");
1949 			bnxt_fw_reset_abort(bp, rc);
1950 			rtnl_unlock();
1951 			return;
1952 		}
1953 
1954 		if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
1955 		    bp->fw_health->enabled) {
1956 			bp->fw_health->last_fw_reset_cnt =
1957 				bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
1958 		}
1959 		bp->fw_reset_state = 0;
1960 		smp_mb__before_atomic();
1961 		clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
1962 		bnxt_ulp_start(bp, 0);
1963 		clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
1964 		set_bit(BNXT_STATE_OPEN, &bp->state);
1965 		rtnl_unlock();
1966 	}
1967 	return;
1968 
1969 fw_reset_abort_status:
1970 	if (bp->fw_health->status_reliable ||
1971 	    (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
1972 		u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
1973 
1974 		device_printf(bp->dev, "fw_health_status 0x%x\n", sts);
1975 	}
1976 fw_reset_abort:
1977 	rtnl_lock();
1978 	bnxt_fw_reset_abort(bp, rc);
1979 	rtnl_unlock();
1980 }
1981 
1982 static void bnxt_force_fw_reset(struct bnxt_softc *bp)
1983 {
1984 	struct bnxt_fw_health *fw_health = bp->fw_health;
1985 	u32 wait_dsecs;
1986 
1987 	if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
1988 	    test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
1989 		return;
1990 	bnxt_fw_reset_close(bp);
1991 	wait_dsecs = fw_health->master_func_wait_dsecs;
1992 	if (fw_health->primary) {
1993 		if (fw_health->flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU)
1994 			wait_dsecs = 0;
1995 		bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
1996 	} else {
1997 		bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
1998 		wait_dsecs = fw_health->normal_func_wait_dsecs;
1999 		bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
2000 	}
2001 
2002 	bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
2003 	bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
2004 	bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
2005 }
2006 
2007 static void bnxt_fw_exception(struct bnxt_softc *bp)
2008 {
2009 	device_printf(bp->dev, "Detected firmware fatal condition, initiating reset\n");
2010 	set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2011 	bnxt_rtnl_lock_sp(bp);
2012 	bnxt_force_fw_reset(bp);
2013 	bnxt_rtnl_unlock_sp(bp);
2014 }
2015 
2016 static void __bnxt_fw_recover(struct bnxt_softc *bp)
2017 {
2018 	if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
2019 	    test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
2020 		bnxt_fw_reset(bp);
2021 	else
2022 		bnxt_fw_exception(bp);
2023 }
2024 
2025 static void bnxt_devlink_health_fw_report(struct bnxt_softc *bp)
2026 {
2027 	struct bnxt_fw_health *fw_health = bp->fw_health;
2028 
2029 	if (!fw_health)
2030 		return;
2031 
2032 	if (!fw_health->fw_reporter) {
2033 		__bnxt_fw_recover(bp);
2034 		return;
2035 	}
2036 }
2037 
2038 static void bnxt_sp_task(struct work_struct *work)
2039 {
2040 	struct bnxt_softc *bp = container_of(work, struct bnxt_softc, sp_task);
2041 
2042 	set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
2043 	smp_mb__after_atomic();
2044 	if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
2045 		clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
2046 		return;
2047 	}
2048 
2049 	if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) {
2050 		if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
2051 		    test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
2052 			bnxt_devlink_health_fw_report(bp);
2053 		else
2054 			bnxt_fw_reset(bp);
2055 	}
2056 
2057 	if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
2058 		if (!is_bnxt_fw_ok(bp))
2059 			bnxt_devlink_health_fw_report(bp);
2060 	}
2061 	smp_mb__before_atomic();
2062 	clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
2063 }
2064 
2065 /* Device setup and teardown */
2066 static int
2067 bnxt_attach_pre(if_ctx_t ctx)
2068 {
2069 	struct bnxt_softc *softc = iflib_get_softc(ctx);
2070 	if_softc_ctx_t scctx;
2071 	int rc = 0;
2072 
2073 	softc->ctx = ctx;
2074 	softc->dev = iflib_get_dev(ctx);
2075 	softc->media = iflib_get_media(ctx);
2076 	softc->scctx = iflib_get_softc_ctx(ctx);
2077 	softc->sctx = iflib_get_sctx(ctx);
2078 	scctx = softc->scctx;
2079 
2080 	/* TODO: Better way of detecting NPAR/VF is needed */
2081 	switch (pci_get_device(softc->dev)) {
2082 	case BCM57402_NPAR:
2083 	case BCM57404_NPAR:
2084 	case BCM57406_NPAR:
2085 	case BCM57407_NPAR:
2086 	case BCM57412_NPAR1:
2087 	case BCM57412_NPAR2:
2088 	case BCM57414_NPAR1:
2089 	case BCM57414_NPAR2:
2090 	case BCM57416_NPAR1:
2091 	case BCM57416_NPAR2:
2092 	case BCM57504_NPAR:
2093 		softc->flags |= BNXT_FLAG_NPAR;
2094 		break;
2095 	case NETXTREME_C_VF1:
2096 	case NETXTREME_C_VF2:
2097 	case NETXTREME_C_VF3:
2098 	case NETXTREME_E_VF1:
2099 	case NETXTREME_E_VF2:
2100 	case NETXTREME_E_VF3:
2101 		softc->flags |= BNXT_FLAG_VF;
2102 		break;
2103 	}
2104 
2105 	softc->domain = pci_get_domain(softc->dev);
2106 	softc->bus = pci_get_bus(softc->dev);
2107 	softc->slot = pci_get_slot(softc->dev);
2108 	softc->function = pci_get_function(softc->dev);
2109 	softc->dev_fn = PCI_DEVFN(softc->slot, softc->function);
2110 
2111 	if (bnxt_num_pfs == 0)
2112 		  SLIST_INIT(&pf_list);
2113 	bnxt_num_pfs++;
2114 	softc->list.softc = softc;
2115 	SLIST_INSERT_HEAD(&pf_list, &softc->list, next);
2116 
2117 	pci_enable_busmaster(softc->dev);
2118 
2119 	if (bnxt_pci_mapping(softc)) {
2120 		device_printf(softc->dev, "PCI mapping failed\n");
2121 		rc = ENXIO;
2122 		goto pci_map_fail;
2123 	}
2124 
2125 	softc->pdev = kzalloc(sizeof(*softc->pdev), GFP_KERNEL);
2126 	if (!softc->pdev) {
2127 		device_printf(softc->dev, "pdev alloc failed\n");
2128 		rc = -ENOMEM;
2129 		goto free_pci_map;
2130 	}
2131 
2132 	rc = linux_pci_attach_device(softc->dev, NULL, NULL, softc->pdev);
2133 	if (rc) {
2134 		device_printf(softc->dev, "Failed to attach Linux PCI device 0x%x\n", rc);
2135 		goto pci_attach_fail;
2136 	}
2137 
2138 	/* HWRM setup/init */
2139 	BNXT_HWRM_LOCK_INIT(softc, device_get_nameunit(softc->dev));
2140 	rc = bnxt_alloc_hwrm_dma_mem(softc);
2141 	if (rc)
2142 		goto dma_fail;
2143 
2144 	/* Get firmware version and compare with driver */
2145 	softc->ver_info = malloc(sizeof(struct bnxt_ver_info),
2146 	    M_DEVBUF, M_NOWAIT | M_ZERO);
2147 	if (softc->ver_info == NULL) {
2148 		rc = ENOMEM;
2149 		device_printf(softc->dev,
2150 		    "Unable to allocate space for version info\n");
2151 		goto ver_alloc_fail;
2152 	}
2153 	/* Default minimum required HWRM version */
2154 	softc->ver_info->hwrm_min_major = HWRM_VERSION_MAJOR;
2155 	softc->ver_info->hwrm_min_minor = HWRM_VERSION_MINOR;
2156 	softc->ver_info->hwrm_min_update = HWRM_VERSION_UPDATE;
2157 
2158 	rc = bnxt_hwrm_ver_get(softc);
2159 	if (rc) {
2160 		device_printf(softc->dev, "attach: hwrm ver get failed\n");
2161 		goto ver_fail;
2162 	}
2163 
2164 	/* Now perform a function reset */
2165 	rc = bnxt_hwrm_func_reset(softc);
2166 
2167 	if ((softc->flags & BNXT_FLAG_SHORT_CMD) ||
2168 	    softc->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
2169 		rc = bnxt_alloc_hwrm_short_cmd_req(softc);
2170 		if (rc)
2171 			goto hwrm_short_cmd_alloc_fail;
2172 	}
2173 
2174 	if ((softc->ver_info->chip_num == BCM57508) ||
2175 	    (softc->ver_info->chip_num == BCM57504) ||
2176 	    (softc->ver_info->chip_num == BCM57504_NPAR) ||
2177 	    (softc->ver_info->chip_num == BCM57502))
2178 		softc->flags |= BNXT_FLAG_CHIP_P5;
2179 
2180 	softc->flags |= BNXT_FLAG_TPA;
2181 
2182 	if (BNXT_CHIP_P5(softc) && (!softc->ver_info->chip_rev) &&
2183 			(!softc->ver_info->chip_metal))
2184 		softc->flags &= ~BNXT_FLAG_TPA;
2185 
2186 	if (BNXT_CHIP_P5(softc))
2187 		softc->flags &= ~BNXT_FLAG_TPA;
2188 
2189 	/* Get NVRAM info */
2190 	if (BNXT_PF(softc)) {
2191 		if (!bnxt_pf_wq) {
2192 			bnxt_pf_wq =
2193 				create_singlethread_workqueue("bnxt_pf_wq");
2194 			if (!bnxt_pf_wq) {
2195 				device_printf(softc->dev, "Unable to create workqueue.\n");
2196 				rc = -ENOMEM;
2197 				goto nvm_alloc_fail;
2198 			}
2199 		}
2200 
2201 		softc->nvm_info = malloc(sizeof(struct bnxt_nvram_info),
2202 		    M_DEVBUF, M_NOWAIT | M_ZERO);
2203 		if (softc->nvm_info == NULL) {
2204 			rc = ENOMEM;
2205 			device_printf(softc->dev,
2206 			    "Unable to allocate space for NVRAM info\n");
2207 			goto nvm_alloc_fail;
2208 		}
2209 
2210 		rc = bnxt_hwrm_nvm_get_dev_info(softc, &softc->nvm_info->mfg_id,
2211 		    &softc->nvm_info->device_id, &softc->nvm_info->sector_size,
2212 		    &softc->nvm_info->size, &softc->nvm_info->reserved_size,
2213 		    &softc->nvm_info->available_size);
2214 	}
2215 
2216 	if (BNXT_CHIP_P5(softc)) {
2217 		softc->db_ops.bnxt_db_tx = bnxt_thor_db_tx;
2218 		softc->db_ops.bnxt_db_rx = bnxt_thor_db_rx;
2219 		softc->db_ops.bnxt_db_rx_cq = bnxt_thor_db_rx_cq;
2220 		softc->db_ops.bnxt_db_tx_cq = bnxt_thor_db_tx_cq;
2221 		softc->db_ops.bnxt_db_nq = bnxt_thor_db_nq;
2222 	} else {
2223 		softc->db_ops.bnxt_db_tx = bnxt_cuw_db_tx;
2224 		softc->db_ops.bnxt_db_rx = bnxt_cuw_db_rx;
2225 		softc->db_ops.bnxt_db_rx_cq = bnxt_cuw_db_cq;
2226 		softc->db_ops.bnxt_db_tx_cq = bnxt_cuw_db_cq;
2227 	}
2228 
2229 
2230 	/* Get the queue config */
2231 	rc = bnxt_hwrm_queue_qportcfg(softc, HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX);
2232 	if (rc) {
2233 		device_printf(softc->dev, "attach: hwrm qportcfg (tx) failed\n");
2234 		goto failed;
2235 	}
2236 	if (softc->is_asym_q) {
2237 		rc = bnxt_hwrm_queue_qportcfg(softc,
2238 					      HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX);
2239 		if (rc) {
2240 			device_printf(softc->dev, "attach: hwrm qportcfg (rx)  failed\n");
2241 			return rc;
2242 		}
2243 		bnxt_verify_asym_queues(softc);
2244 	} else {
2245 		softc->rx_max_q = softc->tx_max_q;
2246 		memcpy(softc->rx_q_info, softc->tx_q_info, sizeof(softc->rx_q_info));
2247 		memcpy(softc->rx_q_ids, softc->tx_q_ids, sizeof(softc->rx_q_ids));
2248 	}
2249 
2250 	/* Get the HW capabilities */
2251 	rc = bnxt_hwrm_func_qcaps(softc);
2252 	if (rc)
2253 		goto failed;
2254 
2255 	/*
2256 	 * Register the driver with the FW
2257 	 * Register the async events with the FW
2258 	 */
2259 	rc = bnxt_drv_rgtr(softc);
2260 	if (rc)
2261 		goto failed;
2262 
2263 	if (softc->hwrm_spec_code >= 0x10803) {
2264 		rc = bnxt_alloc_ctx_mem(softc);
2265 		if (rc) {
2266 			device_printf(softc->dev, "attach: alloc_ctx_mem failed\n");
2267 			return rc;
2268 		}
2269 		rc = bnxt_hwrm_func_resc_qcaps(softc, true);
2270 		if (!rc)
2271 			softc->flags |= BNXT_FLAG_FW_CAP_NEW_RM;
2272 	}
2273 
2274 	/* Get the current configuration of this function */
2275 	rc = bnxt_hwrm_func_qcfg(softc);
2276 	if (rc) {
2277 		device_printf(softc->dev, "attach: hwrm func qcfg failed\n");
2278 		goto failed;
2279 	}
2280 
2281 	iflib_set_mac(ctx, softc->func.mac_addr);
2282 
2283 	scctx->isc_txrx = &bnxt_txrx;
2284 	scctx->isc_tx_csum_flags = (CSUM_IP | CSUM_TCP | CSUM_UDP |
2285 	    CSUM_TCP_IPV6 | CSUM_UDP_IPV6 | CSUM_TSO);
2286 	scctx->isc_capabilities = scctx->isc_capenable =
2287 	    /* These are translated to hwassit bits */
2288 	    IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6 | IFCAP_TSO4 | IFCAP_TSO6 |
2289 	    /* These are checked by iflib */
2290 	    IFCAP_LRO | IFCAP_VLAN_HWFILTER |
2291 	    /* These are part of the iflib mask */
2292 	    IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_VLAN_MTU |
2293 	    IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO |
2294 	    /* These likely get lost... */
2295 	    IFCAP_VLAN_HWCSUM | IFCAP_JUMBO_MTU;
2296 
2297 	if (bnxt_wol_supported(softc))
2298 		scctx->isc_capabilities |= IFCAP_WOL_MAGIC;
2299 	bnxt_get_wol_settings(softc);
2300 	if (softc->wol)
2301 		scctx->isc_capenable |= IFCAP_WOL_MAGIC;
2302 
2303 	/* Get the queue config */
2304 	bnxt_get_wol_settings(softc);
2305 	if (BNXT_CHIP_P5(softc))
2306 		bnxt_hwrm_reserve_pf_rings(softc);
2307 	rc = bnxt_hwrm_func_qcfg(softc);
2308 	if (rc) {
2309 		device_printf(softc->dev, "attach: hwrm func qcfg failed\n");
2310 		goto failed;
2311 	}
2312 
2313 	bnxt_clear_ids(softc);
2314 	if (rc)
2315 		goto failed;
2316 
2317 	/* Now set up iflib sc */
2318 	scctx->isc_tx_nsegments = 31,
2319 	scctx->isc_tx_tso_segments_max = 31;
2320 	scctx->isc_tx_tso_size_max = BNXT_TSO_SIZE;
2321 	scctx->isc_tx_tso_segsize_max = BNXT_TSO_SIZE;
2322 	scctx->isc_vectors = softc->func.max_cp_rings;
2323 	scctx->isc_min_frame_size = BNXT_MIN_FRAME_SIZE;
2324 	scctx->isc_txrx = &bnxt_txrx;
2325 
2326 	if (scctx->isc_nrxd[0] <
2327 	    ((scctx->isc_nrxd[1] * 4) + scctx->isc_nrxd[2]))
2328 		device_printf(softc->dev,
2329 		    "WARNING: nrxd0 (%d) should be at least 4 * nrxd1 (%d) + nrxd2 (%d).  Driver may be unstable\n",
2330 		    scctx->isc_nrxd[0], scctx->isc_nrxd[1], scctx->isc_nrxd[2]);
2331 	if (scctx->isc_ntxd[0] < scctx->isc_ntxd[1] * 2)
2332 		device_printf(softc->dev,
2333 		    "WARNING: ntxd0 (%d) should be at least 2 * ntxd1 (%d).  Driver may be unstable\n",
2334 		    scctx->isc_ntxd[0], scctx->isc_ntxd[1]);
2335 	scctx->isc_txqsizes[0] = sizeof(struct cmpl_base) * scctx->isc_ntxd[0];
2336 	scctx->isc_txqsizes[1] = sizeof(struct tx_bd_short) *
2337 	    scctx->isc_ntxd[1];
2338 	scctx->isc_txqsizes[2] = sizeof(struct cmpl_base) * scctx->isc_ntxd[2];
2339 	scctx->isc_rxqsizes[0] = sizeof(struct cmpl_base) * scctx->isc_nrxd[0];
2340 	scctx->isc_rxqsizes[1] = sizeof(struct rx_prod_pkt_bd) *
2341 	    scctx->isc_nrxd[1];
2342 	scctx->isc_rxqsizes[2] = sizeof(struct rx_prod_pkt_bd) *
2343 	    scctx->isc_nrxd[2];
2344 
2345 	scctx->isc_nrxqsets_max = min(pci_msix_count(softc->dev)-1,
2346 	    softc->fn_qcfg.alloc_completion_rings - 1);
2347 	scctx->isc_nrxqsets_max = min(scctx->isc_nrxqsets_max,
2348 	    softc->fn_qcfg.alloc_rx_rings);
2349 	scctx->isc_nrxqsets_max = min(scctx->isc_nrxqsets_max,
2350 	    softc->fn_qcfg.alloc_vnics);
2351 	scctx->isc_ntxqsets_max = min(softc->fn_qcfg.alloc_tx_rings,
2352 	    softc->fn_qcfg.alloc_completion_rings - scctx->isc_nrxqsets_max - 1);
2353 
2354 	scctx->isc_rss_table_size = HW_HASH_INDEX_SIZE;
2355 	scctx->isc_rss_table_mask = scctx->isc_rss_table_size - 1;
2356 
2357 	/* iflib will map and release this bar */
2358 	scctx->isc_msix_bar = pci_msix_table_bar(softc->dev);
2359 
2360         /*
2361          * Default settings for HW LRO (TPA):
2362          *  Disable HW LRO by default
2363          *  Can be enabled after taking care of 'packet forwarding'
2364          */
2365 	if (softc->flags & BNXT_FLAG_TPA) {
2366 		softc->hw_lro.enable = 0;
2367 		softc->hw_lro.is_mode_gro = 0;
2368 		softc->hw_lro.max_agg_segs = 5; /* 2^5 = 32 segs */
2369 		softc->hw_lro.max_aggs = HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX;
2370 		softc->hw_lro.min_agg_len = 512;
2371 	}
2372 
2373 	/* Allocate the default completion ring */
2374 	softc->def_cp_ring.stats_ctx_id = HWRM_NA_SIGNATURE;
2375 	softc->def_cp_ring.ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
2376 	softc->def_cp_ring.ring.softc = softc;
2377 	softc->def_cp_ring.ring.id = 0;
2378 	softc->def_cp_ring.ring.doorbell = (BNXT_CHIP_P5(softc)) ?
2379 		DB_PF_OFFSET_P5 : softc->def_cp_ring.ring.id * 0x80;
2380 	softc->def_cp_ring.ring.ring_size = PAGE_SIZE /
2381 	    sizeof(struct cmpl_base);
2382 	rc = iflib_dma_alloc(ctx,
2383 	    sizeof(struct cmpl_base) * softc->def_cp_ring.ring.ring_size,
2384 	    &softc->def_cp_ring_mem, 0);
2385 	softc->def_cp_ring.ring.vaddr = softc->def_cp_ring_mem.idi_vaddr;
2386 	softc->def_cp_ring.ring.paddr = softc->def_cp_ring_mem.idi_paddr;
2387 	iflib_config_gtask_init(ctx, &softc->def_cp_task, bnxt_def_cp_task,
2388 	    "dflt_cp");
2389 
2390 	rc = bnxt_init_sysctl_ctx(softc);
2391 	if (rc)
2392 		goto init_sysctl_failed;
2393 	if (BNXT_PF(softc)) {
2394 		rc = bnxt_create_nvram_sysctls(softc->nvm_info);
2395 		if (rc)
2396 			goto failed;
2397 	}
2398 
2399 	arc4rand(softc->vnic_info.rss_hash_key, HW_HASH_KEY_SIZE, 0);
2400 	softc->vnic_info.rss_hash_type =
2401 	    HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4 |
2402 	    HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4 |
2403 	    HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4 |
2404 	    HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6 |
2405 	    HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6 |
2406 	    HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
2407 	rc = bnxt_create_config_sysctls_pre(softc);
2408 	if (rc)
2409 		goto failed;
2410 
2411 	rc = bnxt_create_hw_lro_sysctls(softc);
2412 	if (rc)
2413 		goto failed;
2414 
2415 	rc = bnxt_create_pause_fc_sysctls(softc);
2416 	if (rc)
2417 		goto failed;
2418 
2419 	rc = bnxt_create_dcb_sysctls(softc);
2420 	if (rc)
2421 		goto failed;
2422 
2423 	set_bit(BNXT_STATE_OPEN, &softc->state);
2424 	INIT_WORK(&softc->sp_task, bnxt_sp_task);
2425 	INIT_DELAYED_WORK(&softc->fw_reset_task, bnxt_fw_reset_task);
2426 
2427 	/* Initialize the vlan list */
2428 	SLIST_INIT(&softc->vnic_info.vlan_tags);
2429 	softc->vnic_info.vlan_tag_list.idi_vaddr = NULL;
2430 	softc->state_bv = bit_alloc(BNXT_STATE_MAX, M_DEVBUF,
2431 			M_WAITOK|M_ZERO);
2432 
2433 	return (rc);
2434 
2435 failed:
2436 	bnxt_free_sysctl_ctx(softc);
2437 init_sysctl_failed:
2438 	bnxt_hwrm_func_drv_unrgtr(softc, false);
2439 	if (BNXT_PF(softc))
2440 		free(softc->nvm_info, M_DEVBUF);
2441 nvm_alloc_fail:
2442 	bnxt_free_hwrm_short_cmd_req(softc);
2443 hwrm_short_cmd_alloc_fail:
2444 ver_fail:
2445 	free(softc->ver_info, M_DEVBUF);
2446 ver_alloc_fail:
2447 	bnxt_free_hwrm_dma_mem(softc);
2448 dma_fail:
2449 	BNXT_HWRM_LOCK_DESTROY(softc);
2450 	if (softc->pdev)
2451 		linux_pci_detach_device(softc->pdev);
2452 pci_attach_fail:
2453 	kfree(softc->pdev);
2454 	softc->pdev = NULL;
2455 free_pci_map:
2456 	bnxt_pci_mapping_free(softc);
2457 pci_map_fail:
2458 	pci_disable_busmaster(softc->dev);
2459 	return (rc);
2460 }
2461 
2462 static int
2463 bnxt_attach_post(if_ctx_t ctx)
2464 {
2465 	struct bnxt_softc *softc = iflib_get_softc(ctx);
2466 	if_t ifp = iflib_get_ifp(ctx);
2467 	int rc;
2468 
2469 	softc->ifp = ifp;
2470 	bnxt_create_config_sysctls_post(softc);
2471 
2472 	/* Update link state etc... */
2473 	rc = bnxt_probe_phy(softc);
2474 	if (rc)
2475 		goto failed;
2476 
2477 	/* Needs to be done after probing the phy */
2478 	bnxt_create_ver_sysctls(softc);
2479 	ifmedia_removeall(softc->media);
2480 	bnxt_add_media_types(softc);
2481 	ifmedia_set(softc->media, IFM_ETHER | IFM_AUTO);
2482 
2483 	softc->scctx->isc_max_frame_size = if_getmtu(ifp) + ETHER_HDR_LEN +
2484 	    ETHER_CRC_LEN;
2485 
2486 	softc->rx_buf_size = min(softc->scctx->isc_max_frame_size, BNXT_PAGE_SIZE);
2487 	bnxt_dcb_init(softc);
2488 	bnxt_rdma_aux_device_init(softc);
2489 
2490 failed:
2491 	return rc;
2492 }
2493 
2494 static int
2495 bnxt_detach(if_ctx_t ctx)
2496 {
2497 	struct bnxt_softc *softc = iflib_get_softc(ctx);
2498 	struct bnxt_vlan_tag *tag;
2499 	struct bnxt_vlan_tag *tmp;
2500 	int i;
2501 
2502 	bnxt_rdma_aux_device_uninit(softc);
2503 	cancel_delayed_work_sync(&softc->fw_reset_task);
2504 	cancel_work_sync(&softc->sp_task);
2505 	bnxt_dcb_free(softc);
2506 	SLIST_REMOVE(&pf_list, &softc->list, bnxt_softc_list, next);
2507 	bnxt_num_pfs--;
2508 	bnxt_wol_config(ctx);
2509 	bnxt_do_disable_intr(&softc->def_cp_ring);
2510 	bnxt_free_sysctl_ctx(softc);
2511 	bnxt_hwrm_func_reset(softc);
2512 	bnxt_free_ctx_mem(softc);
2513 	bnxt_clear_ids(softc);
2514 	iflib_irq_free(ctx, &softc->def_cp_ring.irq);
2515 	iflib_config_gtask_deinit(&softc->def_cp_task);
2516 	/* We need to free() these here... */
2517 	for (i = softc->nrxqsets-1; i>=0; i--) {
2518 		if (BNXT_CHIP_P5(softc))
2519 			iflib_irq_free(ctx, &softc->nq_rings[i].irq);
2520 		else
2521 			iflib_irq_free(ctx, &softc->rx_cp_rings[i].irq);
2522 
2523 	}
2524 	iflib_dma_free(&softc->vnic_info.mc_list);
2525 	iflib_dma_free(&softc->vnic_info.rss_hash_key_tbl);
2526 	iflib_dma_free(&softc->vnic_info.rss_grp_tbl);
2527 	if (softc->vnic_info.vlan_tag_list.idi_vaddr)
2528 		iflib_dma_free(&softc->vnic_info.vlan_tag_list);
2529 	SLIST_FOREACH_SAFE(tag, &softc->vnic_info.vlan_tags, next, tmp)
2530 		free(tag, M_DEVBUF);
2531 	iflib_dma_free(&softc->def_cp_ring_mem);
2532 	for (i = 0; i < softc->nrxqsets; i++)
2533 		free(softc->rx_rings[i].tpa_start, M_DEVBUF);
2534 	free(softc->ver_info, M_DEVBUF);
2535 	if (BNXT_PF(softc))
2536 		free(softc->nvm_info, M_DEVBUF);
2537 
2538 	bnxt_hwrm_func_drv_unrgtr(softc, false);
2539 	bnxt_free_hwrm_dma_mem(softc);
2540 	bnxt_free_hwrm_short_cmd_req(softc);
2541 	BNXT_HWRM_LOCK_DESTROY(softc);
2542 
2543 	if (!bnxt_num_pfs && bnxt_pf_wq)
2544 		destroy_workqueue(bnxt_pf_wq);
2545 
2546 	if (softc->pdev)
2547 		linux_pci_detach_device(softc->pdev);
2548 	free(softc->state_bv, M_DEVBUF);
2549 	pci_disable_busmaster(softc->dev);
2550 	bnxt_pci_mapping_free(softc);
2551 
2552 	return 0;
2553 }
2554 
2555 static void
2556 bnxt_hwrm_resource_free(struct bnxt_softc *softc)
2557 {
2558 	int i, rc = 0;
2559 
2560 	rc = bnxt_hwrm_ring_free(softc,
2561 			HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
2562 			&softc->def_cp_ring.ring,
2563 			(uint16_t)HWRM_NA_SIGNATURE);
2564 	if (rc)
2565 		goto fail;
2566 
2567 	for (i = 0; i < softc->ntxqsets; i++) {
2568 		rc = bnxt_hwrm_ring_free(softc,
2569 				HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
2570 				&softc->tx_rings[i],
2571 				softc->tx_cp_rings[i].ring.phys_id);
2572 		if (rc)
2573 			goto fail;
2574 
2575 		rc = bnxt_hwrm_ring_free(softc,
2576 				HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
2577 				&softc->tx_cp_rings[i].ring,
2578 				(uint16_t)HWRM_NA_SIGNATURE);
2579 		if (rc)
2580 			goto fail;
2581 
2582 		rc = bnxt_hwrm_stat_ctx_free(softc, &softc->tx_cp_rings[i]);
2583 		if (rc)
2584 			goto fail;
2585 	}
2586 	rc = bnxt_hwrm_free_filter(softc);
2587 	if (rc)
2588 		goto fail;
2589 
2590 	rc = bnxt_hwrm_vnic_free(softc, &softc->vnic_info);
2591 	if (rc)
2592 		goto fail;
2593 
2594 	rc = bnxt_hwrm_vnic_ctx_free(softc, softc->vnic_info.rss_id);
2595 	if (rc)
2596 		goto fail;
2597 
2598 	for (i = 0; i < softc->nrxqsets; i++) {
2599 		rc = bnxt_hwrm_ring_grp_free(softc, &softc->grp_info[i]);
2600 		if (rc)
2601 			goto fail;
2602 
2603 		rc = bnxt_hwrm_ring_free(softc,
2604 				HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG,
2605 				&softc->ag_rings[i],
2606 				(uint16_t)HWRM_NA_SIGNATURE);
2607 		if (rc)
2608 			goto fail;
2609 
2610 		rc = bnxt_hwrm_ring_free(softc,
2611 				HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
2612 				&softc->rx_rings[i],
2613 				softc->rx_cp_rings[i].ring.phys_id);
2614 		if (rc)
2615 			goto fail;
2616 
2617 		rc = bnxt_hwrm_ring_free(softc,
2618 				HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
2619 				&softc->rx_cp_rings[i].ring,
2620 				(uint16_t)HWRM_NA_SIGNATURE);
2621 		if (rc)
2622 			goto fail;
2623 
2624 		if (BNXT_CHIP_P5(softc)) {
2625 			rc = bnxt_hwrm_ring_free(softc,
2626 					HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ,
2627 					&softc->nq_rings[i].ring,
2628 					(uint16_t)HWRM_NA_SIGNATURE);
2629 			if (rc)
2630 				goto fail;
2631 		}
2632 
2633 		rc = bnxt_hwrm_stat_ctx_free(softc, &softc->rx_cp_rings[i]);
2634 		if (rc)
2635 			goto fail;
2636 	}
2637 
2638 fail:
2639 	return;
2640 }
2641 
2642 
2643 static void
2644 bnxt_func_reset(struct bnxt_softc *softc)
2645 {
2646 
2647 	if (!BNXT_CHIP_P5(softc)) {
2648 		bnxt_hwrm_func_reset(softc);
2649 		return;
2650 	}
2651 
2652 	bnxt_hwrm_resource_free(softc);
2653 	return;
2654 }
2655 
2656 static void
2657 bnxt_rss_grp_tbl_init(struct bnxt_softc *softc)
2658 {
2659 	uint16_t *rgt = (uint16_t *) softc->vnic_info.rss_grp_tbl.idi_vaddr;
2660 	int i, j;
2661 
2662 	for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
2663 		if (BNXT_CHIP_P5(softc)) {
2664 			rgt[i++] = htole16(softc->rx_rings[j].phys_id);
2665 			rgt[i] = htole16(softc->rx_cp_rings[j].ring.phys_id);
2666 		} else {
2667 			rgt[i] = htole16(softc->grp_info[j].grp_id);
2668 		}
2669 		if (++j == softc->nrxqsets)
2670 			j = 0;
2671 	}
2672 }
2673 
2674 static void bnxt_get_port_module_status(struct bnxt_softc *softc)
2675 {
2676 	struct bnxt_link_info *link_info = &softc->link_info;
2677 	struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
2678 	uint8_t module_status;
2679 
2680 	if (bnxt_update_link(softc, false))
2681 		return;
2682 
2683 	module_status = link_info->module_status;
2684 	switch (module_status) {
2685 	case HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_DISABLETX:
2686 	case HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_PWRDOWN:
2687 	case HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_WARNINGMSG:
2688 		device_printf(softc->dev, "Unqualified SFP+ module detected on port %d\n",
2689 			    softc->pf.port_id);
2690 		if (softc->hwrm_spec_code >= 0x10201) {
2691 			device_printf(softc->dev, "Module part number %s\n",
2692 				    resp->phy_vendor_partnumber);
2693 		}
2694 		if (module_status == HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_DISABLETX)
2695 			device_printf(softc->dev, "TX is disabled\n");
2696 		if (module_status == HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_PWRDOWN)
2697 			device_printf(softc->dev, "SFP+ module is shutdown\n");
2698 	}
2699 }
2700 
2701 static void bnxt_aux_dev_free(struct bnxt_softc *softc)
2702 {
2703 	kfree(softc->aux_dev);
2704 	softc->aux_dev = NULL;
2705 }
2706 
2707 static struct bnxt_aux_dev *bnxt_aux_dev_init(struct bnxt_softc *softc)
2708 {
2709 	struct bnxt_aux_dev *bnxt_adev;
2710 
2711 	msleep(1000 * 2);
2712 	bnxt_adev = kzalloc(sizeof(*bnxt_adev), GFP_KERNEL);
2713 	if (!bnxt_adev)
2714 		return ERR_PTR(-ENOMEM);
2715 
2716 	return bnxt_adev;
2717 }
2718 
2719 static void bnxt_rdma_aux_device_uninit(struct bnxt_softc *softc)
2720 {
2721 	struct bnxt_aux_dev *bnxt_adev = softc->aux_dev;
2722 
2723 	/* Skip if no auxiliary device init was done. */
2724 	if (!(softc->flags & BNXT_FLAG_ROCE_CAP))
2725 		return;
2726 
2727 	if (IS_ERR_OR_NULL(bnxt_adev))
2728 		return;
2729 
2730 	bnxt_rdma_aux_device_del(softc);
2731 
2732 	if (bnxt_adev->id >= 0)
2733 		ida_free(&bnxt_aux_dev_ids, bnxt_adev->id);
2734 
2735 	bnxt_aux_dev_free(softc);
2736 }
2737 
2738 static void bnxt_rdma_aux_device_init(struct bnxt_softc *softc)
2739 {
2740 	int rc;
2741 
2742 	if (!(softc->flags & BNXT_FLAG_ROCE_CAP))
2743 		return;
2744 
2745 	softc->aux_dev = bnxt_aux_dev_init(softc);
2746 	if (IS_ERR_OR_NULL(softc->aux_dev)) {
2747 		device_printf(softc->dev, "Failed to init auxiliary device for ROCE\n");
2748 		goto skip_aux_init;
2749 	}
2750 
2751 	softc->aux_dev->id = ida_alloc(&bnxt_aux_dev_ids, GFP_KERNEL);
2752 	if (softc->aux_dev->id < 0) {
2753 		device_printf(softc->dev, "ida alloc failed for ROCE auxiliary device\n");
2754 		bnxt_aux_dev_free(softc);
2755 		goto skip_aux_init;
2756 	}
2757 
2758 	msleep(1000 * 2);
2759 	/* If aux bus init fails, continue with netdev init. */
2760 	rc = bnxt_rdma_aux_device_add(softc);
2761 	if (rc) {
2762 		device_printf(softc->dev, "Failed to add auxiliary device for ROCE\n");
2763 		msleep(1000 * 2);
2764 		ida_free(&bnxt_aux_dev_ids, softc->aux_dev->id);
2765 	}
2766 	device_printf(softc->dev, "%s:%d Added auxiliary device (id %d) for ROCE \n",
2767 		      __func__, __LINE__, softc->aux_dev->id);
2768 skip_aux_init:
2769 	return;
2770 }
2771 
2772 /* Device configuration */
2773 static void
2774 bnxt_init(if_ctx_t ctx)
2775 {
2776 	struct bnxt_softc *softc = iflib_get_softc(ctx);
2777 	struct ifmediareq ifmr;
2778 	int i;
2779 	int rc;
2780 
2781 	if (!BNXT_CHIP_P5(softc)) {
2782 		rc = bnxt_hwrm_func_reset(softc);
2783 		if (rc)
2784 			return;
2785 	} else if (softc->is_dev_init) {
2786 		bnxt_stop(ctx);
2787 	}
2788 
2789 	softc->is_dev_init = true;
2790 	bnxt_clear_ids(softc);
2791 
2792 	if (BNXT_CHIP_P5(softc))
2793 		goto skip_def_cp_ring;
2794 	/* Allocate the default completion ring */
2795 	softc->def_cp_ring.cons = UINT32_MAX;
2796 	softc->def_cp_ring.v_bit = 1;
2797 	bnxt_mark_cpr_invalid(&softc->def_cp_ring);
2798 	rc = bnxt_hwrm_ring_alloc(softc,
2799 			HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
2800 			&softc->def_cp_ring.ring);
2801 	if (rc)
2802 		goto fail;
2803 skip_def_cp_ring:
2804 	for (i = 0; i < softc->nrxqsets; i++) {
2805 		/* Allocate the statistics context */
2806 		rc = bnxt_hwrm_stat_ctx_alloc(softc, &softc->rx_cp_rings[i],
2807 		    softc->rx_stats[i].idi_paddr);
2808 		if (rc)
2809 			goto fail;
2810 
2811 		if (BNXT_CHIP_P5(softc)) {
2812 			/* Allocate the NQ */
2813 			softc->nq_rings[i].cons = 0;
2814 			softc->nq_rings[i].v_bit = 1;
2815 			softc->nq_rings[i].last_idx = UINT32_MAX;
2816 			bnxt_mark_cpr_invalid(&softc->nq_rings[i]);
2817 			rc = bnxt_hwrm_ring_alloc(softc,
2818 					HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ,
2819 					&softc->nq_rings[i].ring);
2820 			if (rc)
2821 				goto fail;
2822 
2823 			softc->db_ops.bnxt_db_nq(&softc->nq_rings[i], 1);
2824 		}
2825 		/* Allocate the completion ring */
2826 		softc->rx_cp_rings[i].cons = UINT32_MAX;
2827 		softc->rx_cp_rings[i].v_bit = 1;
2828 		softc->rx_cp_rings[i].last_idx = UINT32_MAX;
2829 		bnxt_mark_cpr_invalid(&softc->rx_cp_rings[i]);
2830 		rc = bnxt_hwrm_ring_alloc(softc,
2831 				HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
2832 				&softc->rx_cp_rings[i].ring);
2833 		if (rc)
2834 			goto fail;
2835 
2836 		if (BNXT_CHIP_P5(softc))
2837 			softc->db_ops.bnxt_db_rx_cq(&softc->rx_cp_rings[i], 1);
2838 
2839 		/* Allocate the RX ring */
2840 		rc = bnxt_hwrm_ring_alloc(softc,
2841 		    HWRM_RING_ALLOC_INPUT_RING_TYPE_RX, &softc->rx_rings[i]);
2842 		if (rc)
2843 			goto fail;
2844 		softc->db_ops.bnxt_db_rx(&softc->rx_rings[i], 0);
2845 
2846 		/* Allocate the AG ring */
2847 		rc = bnxt_hwrm_ring_alloc(softc,
2848 				HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG,
2849 				&softc->ag_rings[i]);
2850 		if (rc)
2851 			goto fail;
2852 		softc->db_ops.bnxt_db_rx(&softc->ag_rings[i], 0);
2853 
2854 		/* Allocate the ring group */
2855 		softc->grp_info[i].stats_ctx =
2856 		    softc->rx_cp_rings[i].stats_ctx_id;
2857 		softc->grp_info[i].rx_ring_id = softc->rx_rings[i].phys_id;
2858 		softc->grp_info[i].ag_ring_id = softc->ag_rings[i].phys_id;
2859 		softc->grp_info[i].cp_ring_id =
2860 		    softc->rx_cp_rings[i].ring.phys_id;
2861 		rc = bnxt_hwrm_ring_grp_alloc(softc, &softc->grp_info[i]);
2862 		if (rc)
2863 			goto fail;
2864 	}
2865 
2866 	/* And now set the default CP / NQ ring for the async */
2867 	rc = bnxt_cfg_async_cr(softc);
2868 	if (rc)
2869 		goto fail;
2870 
2871 	/* Allocate the VNIC RSS context */
2872 	rc = bnxt_hwrm_vnic_ctx_alloc(softc, &softc->vnic_info.rss_id);
2873 	if (rc)
2874 		goto fail;
2875 
2876 	/* Allocate the vnic */
2877 	softc->vnic_info.def_ring_grp = softc->grp_info[0].grp_id;
2878 	softc->vnic_info.mru = softc->scctx->isc_max_frame_size;
2879 	rc = bnxt_hwrm_vnic_alloc(softc, &softc->vnic_info);
2880 	if (rc)
2881 		goto fail;
2882 	rc = bnxt_hwrm_vnic_cfg(softc, &softc->vnic_info);
2883 	if (rc)
2884 		goto fail;
2885 	rc = bnxt_hwrm_vnic_set_hds(softc, &softc->vnic_info);
2886 	if (rc)
2887 		goto fail;
2888 	rc = bnxt_hwrm_set_filter(softc);
2889 	if (rc)
2890 		goto fail;
2891 
2892 	bnxt_rss_grp_tbl_init(softc);
2893 
2894 	rc = bnxt_hwrm_rss_cfg(softc, &softc->vnic_info,
2895 	    softc->vnic_info.rss_hash_type);
2896 	if (rc)
2897 		goto fail;
2898 
2899 	rc = bnxt_hwrm_vnic_tpa_cfg(softc);
2900 	if (rc)
2901 		goto fail;
2902 
2903 	for (i = 0; i < softc->ntxqsets; i++) {
2904 		/* Allocate the statistics context */
2905 		rc = bnxt_hwrm_stat_ctx_alloc(softc, &softc->tx_cp_rings[i],
2906 		    softc->tx_stats[i].idi_paddr);
2907 		if (rc)
2908 			goto fail;
2909 
2910 		/* Allocate the completion ring */
2911 		softc->tx_cp_rings[i].cons = UINT32_MAX;
2912 		softc->tx_cp_rings[i].v_bit = 1;
2913 		bnxt_mark_cpr_invalid(&softc->tx_cp_rings[i]);
2914 		rc = bnxt_hwrm_ring_alloc(softc,
2915 				HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
2916 				&softc->tx_cp_rings[i].ring);
2917 		if (rc)
2918 			goto fail;
2919 
2920 		if (BNXT_CHIP_P5(softc))
2921 			softc->db_ops.bnxt_db_tx_cq(&softc->tx_cp_rings[i], 1);
2922 
2923 		/* Allocate the TX ring */
2924 		rc = bnxt_hwrm_ring_alloc(softc,
2925 				HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
2926 				&softc->tx_rings[i]);
2927 		if (rc)
2928 			goto fail;
2929 		softc->db_ops.bnxt_db_tx(&softc->tx_rings[i], 0);
2930 	}
2931 
2932 	bnxt_do_enable_intr(&softc->def_cp_ring);
2933 	bnxt_get_port_module_status(softc);
2934 	bnxt_media_status(softc->ctx, &ifmr);
2935 	bnxt_hwrm_cfa_l2_set_rx_mask(softc, &softc->vnic_info);
2936 	return;
2937 
2938 fail:
2939 	bnxt_func_reset(softc);
2940 	bnxt_clear_ids(softc);
2941 	return;
2942 }
2943 
2944 static void
2945 bnxt_stop(if_ctx_t ctx)
2946 {
2947 	struct bnxt_softc *softc = iflib_get_softc(ctx);
2948 
2949 	softc->is_dev_init = false;
2950 	bnxt_do_disable_intr(&softc->def_cp_ring);
2951 	bnxt_func_reset(softc);
2952 	bnxt_clear_ids(softc);
2953 	return;
2954 }
2955 
2956 static u_int
2957 bnxt_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
2958 {
2959 	uint8_t *mta = arg;
2960 
2961 	if (cnt == BNXT_MAX_MC_ADDRS)
2962 		return (1);
2963 
2964 	bcopy(LLADDR(sdl), &mta[cnt * ETHER_ADDR_LEN], ETHER_ADDR_LEN);
2965 
2966 	return (1);
2967 }
2968 
2969 static void
2970 bnxt_multi_set(if_ctx_t ctx)
2971 {
2972 	struct bnxt_softc *softc = iflib_get_softc(ctx);
2973 	if_t ifp = iflib_get_ifp(ctx);
2974 	uint8_t *mta;
2975 	int mcnt;
2976 
2977 	mta = softc->vnic_info.mc_list.idi_vaddr;
2978 	bzero(mta, softc->vnic_info.mc_list.idi_size);
2979 	mcnt = if_foreach_llmaddr(ifp, bnxt_copy_maddr, mta);
2980 
2981 	if (mcnt > BNXT_MAX_MC_ADDRS) {
2982 		softc->vnic_info.rx_mask |=
2983 		    HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
2984 		bnxt_hwrm_cfa_l2_set_rx_mask(softc, &softc->vnic_info);
2985 	} else {
2986 		softc->vnic_info.rx_mask &=
2987 		    ~HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
2988 		bus_dmamap_sync(softc->vnic_info.mc_list.idi_tag,
2989 		    softc->vnic_info.mc_list.idi_map, BUS_DMASYNC_PREWRITE);
2990 		softc->vnic_info.mc_list_count = mcnt;
2991 		softc->vnic_info.rx_mask |=
2992 		    HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
2993 		if (bnxt_hwrm_cfa_l2_set_rx_mask(softc, &softc->vnic_info))
2994 			device_printf(softc->dev,
2995 			    "set_multi: rx_mask set failed\n");
2996 	}
2997 }
2998 
2999 static int
3000 bnxt_mtu_set(if_ctx_t ctx, uint32_t mtu)
3001 {
3002 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3003 
3004 	if (mtu > BNXT_MAX_MTU)
3005 		return EINVAL;
3006 
3007 	softc->scctx->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3008 	softc->rx_buf_size = min(softc->scctx->isc_max_frame_size, BNXT_PAGE_SIZE);
3009 	return 0;
3010 }
3011 
3012 static void
3013 bnxt_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
3014 {
3015 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3016 	struct bnxt_link_info *link_info = &softc->link_info;
3017 	struct ifmedia_entry *next;
3018 	uint64_t target_baudrate = bnxt_get_baudrate(link_info);
3019 	int active_media = IFM_UNKNOWN;
3020 
3021 	bnxt_update_link(softc, true);
3022 
3023 	ifmr->ifm_status = IFM_AVALID;
3024 	ifmr->ifm_active = IFM_ETHER;
3025 
3026 	if (link_info->link_up)
3027 		ifmr->ifm_status |= IFM_ACTIVE;
3028 	else
3029 		ifmr->ifm_status &= ~IFM_ACTIVE;
3030 
3031 	if (link_info->duplex == HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_FULL)
3032 		ifmr->ifm_active |= IFM_FDX;
3033 	else
3034 		ifmr->ifm_active |= IFM_HDX;
3035 
3036         /*
3037          * Go through the list of supported media which got prepared
3038          * as part of bnxt_add_media_types() using api ifmedia_add().
3039          */
3040 	LIST_FOREACH(next, &(iflib_get_media(ctx)->ifm_list), ifm_list) {
3041 		if (ifmedia_baudrate(next->ifm_media) == target_baudrate) {
3042 			active_media = next->ifm_media;
3043 			break;
3044 		}
3045 	}
3046 	ifmr->ifm_active |= active_media;
3047 
3048 	if (link_info->flow_ctrl.rx)
3049 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
3050 	if (link_info->flow_ctrl.tx)
3051 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
3052 
3053 	bnxt_report_link(softc);
3054 	return;
3055 }
3056 
3057 static int
3058 bnxt_media_change(if_ctx_t ctx)
3059 {
3060 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3061 	struct ifmedia *ifm = iflib_get_media(ctx);
3062 	struct ifmediareq ifmr;
3063 	int rc;
3064 
3065 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3066 		return EINVAL;
3067 
3068 	softc->link_info.req_signal_mode =
3069 			HWRM_PORT_PHY_QCFG_OUTPUT_SIGNAL_MODE_PAM4;
3070 
3071 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
3072 	case IFM_100_T:
3073 		softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
3074 		softc->link_info.req_link_speed =
3075 		    HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100MB;
3076 		break;
3077 	case IFM_1000_KX:
3078 	case IFM_1000_SGMII:
3079 	case IFM_1000_CX:
3080 	case IFM_1000_SX:
3081 	case IFM_1000_LX:
3082 		softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
3083 		softc->link_info.req_link_speed =
3084 		    HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_1GB;
3085 		break;
3086 	case IFM_2500_KX:
3087 	case IFM_2500_T:
3088 		softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
3089 		softc->link_info.req_link_speed =
3090 		    HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_2_5GB;
3091 		break;
3092 	case IFM_10G_CR1:
3093 	case IFM_10G_KR:
3094 	case IFM_10G_LR:
3095 	case IFM_10G_SR:
3096 		softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
3097 		softc->link_info.req_link_speed =
3098 		    HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
3099 		break;
3100 	case IFM_20G_KR2:
3101 		softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
3102 		softc->link_info.req_link_speed =
3103 		    HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_20GB;
3104 		break;
3105 	case IFM_25G_CR:
3106 	case IFM_25G_KR:
3107 	case IFM_25G_SR:
3108 		softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
3109 		softc->link_info.req_link_speed =
3110 		    HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_25GB;
3111 		break;
3112 	case IFM_40G_CR4:
3113 	case IFM_40G_KR4:
3114 	case IFM_40G_LR4:
3115 	case IFM_40G_SR4:
3116 	case IFM_40G_XLAUI:
3117 	case IFM_40G_XLAUI_AC:
3118 		softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
3119 		softc->link_info.req_link_speed =
3120 		    HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
3121 		break;
3122 	case IFM_50G_CR2:
3123 	case IFM_50G_KR2:
3124 	case IFM_50G_SR2:
3125 		softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
3126 		softc->link_info.req_link_speed =
3127 		    HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
3128 		break;
3129 	case IFM_50G_CP:
3130 	case IFM_50G_LR:
3131 	case IFM_50G_SR:
3132 	case IFM_50G_KR_PAM4:
3133 		softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
3134 		softc->link_info.req_link_speed =
3135 		    HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_50GB;
3136 		softc->link_info.req_signal_mode =
3137 			HWRM_PORT_PHY_QCFG_OUTPUT_SIGNAL_MODE_PAM4;
3138 		softc->link_info.force_pam4_speed_set_by_user = true;
3139 		break;
3140 	case IFM_100G_CR4:
3141 	case IFM_100G_KR4:
3142 	case IFM_100G_LR4:
3143 	case IFM_100G_SR4:
3144 		softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
3145 		softc->link_info.req_link_speed =
3146 			HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
3147 		break;
3148 	case IFM_100G_CP2:
3149 	case IFM_100G_SR2:
3150 	case IFM_100G_KR_PAM4:
3151 	case IFM_100G_KR2_PAM4:
3152 		softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
3153 		softc->link_info.req_link_speed =
3154 			HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_100GB;
3155 		softc->link_info.req_signal_mode =
3156 			HWRM_PORT_PHY_QCFG_OUTPUT_SIGNAL_MODE_PAM4;
3157 		softc->link_info.force_pam4_speed_set_by_user = true;
3158 		break;
3159 	case IFM_200G_SR4:
3160 	case IFM_200G_FR4:
3161 	case IFM_200G_LR4:
3162 	case IFM_200G_DR4:
3163 	case IFM_200G_CR4_PAM4:
3164 	case IFM_200G_KR4_PAM4:
3165 		softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
3166 		softc->link_info.req_link_speed =
3167 			HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
3168 		softc->link_info.force_pam4_speed_set_by_user = true;
3169 		softc->link_info.req_signal_mode =
3170 			HWRM_PORT_PHY_QCFG_OUTPUT_SIGNAL_MODE_PAM4;
3171 		break;
3172 	case IFM_1000_T:
3173 		softc->link_info.advertising = HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
3174 		softc->link_info.autoneg |= BNXT_AUTONEG_SPEED;
3175 		break;
3176 	case IFM_10G_T:
3177 		softc->link_info.advertising = HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
3178 		softc->link_info.autoneg |= BNXT_AUTONEG_SPEED;
3179 		break;
3180 	default:
3181 		device_printf(softc->dev,
3182 		    "Unsupported media type!  Using auto\n");
3183 		/* Fall-through */
3184 	case IFM_AUTO:
3185 		// Auto
3186 		softc->link_info.autoneg |= BNXT_AUTONEG_SPEED;
3187 		break;
3188 	}
3189 	rc = bnxt_hwrm_set_link_setting(softc, true, true, true);
3190 	bnxt_media_status(softc->ctx, &ifmr);
3191 	return rc;
3192 }
3193 
3194 static int
3195 bnxt_promisc_set(if_ctx_t ctx, int flags)
3196 {
3197 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3198 	if_t ifp = iflib_get_ifp(ctx);
3199 	int rc;
3200 
3201 	if (if_getflags(ifp) & IFF_ALLMULTI ||
3202 	    if_llmaddr_count(ifp) > BNXT_MAX_MC_ADDRS)
3203 		softc->vnic_info.rx_mask |=
3204 		    HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
3205 	else
3206 		softc->vnic_info.rx_mask &=
3207 		    ~HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
3208 
3209 	if (if_getflags(ifp) & IFF_PROMISC)
3210 		softc->vnic_info.rx_mask |=
3211 		    HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS |
3212 		    HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN;
3213 	else
3214 		softc->vnic_info.rx_mask &=
3215 		    ~(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS);
3216 
3217 	rc = bnxt_hwrm_cfa_l2_set_rx_mask(softc, &softc->vnic_info);
3218 
3219 	return rc;
3220 }
3221 
3222 static uint64_t
3223 bnxt_get_counter(if_ctx_t ctx, ift_counter cnt)
3224 {
3225 	if_t ifp = iflib_get_ifp(ctx);
3226 
3227 	if (cnt < IFCOUNTERS)
3228 		return if_get_counter_default(ifp, cnt);
3229 
3230 	return 0;
3231 }
3232 
3233 static void
3234 bnxt_update_admin_status(if_ctx_t ctx)
3235 {
3236 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3237 
3238 	/*
3239 	 * When SR-IOV is enabled, avoid each VF sending this HWRM
3240 	 * request every sec with which firmware timeouts can happen
3241 	 */
3242 	if (!BNXT_PF(softc))
3243 		return;
3244 
3245 	bnxt_hwrm_port_qstats(softc);
3246 
3247 	if (BNXT_CHIP_P5(softc) &&
3248 	    (softc->flags & BNXT_FLAG_FW_CAP_EXT_STATS))
3249 		bnxt_hwrm_port_qstats_ext(softc);
3250 
3251 	if (BNXT_CHIP_P5(softc)) {
3252 		struct ifmediareq ifmr;
3253 
3254 		if (bit_test(softc->state_bv, BNXT_STATE_LINK_CHANGE)) {
3255 			bit_clear(softc->state_bv, BNXT_STATE_LINK_CHANGE);
3256 			bnxt_media_status(softc->ctx, &ifmr);
3257 		}
3258 	}
3259 
3260 	return;
3261 }
3262 
3263 static void
3264 bnxt_if_timer(if_ctx_t ctx, uint16_t qid)
3265 {
3266 
3267 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3268 	uint64_t ticks_now = ticks;
3269 
3270         /* Schedule bnxt_update_admin_status() once per sec */
3271 	if (ticks_now - softc->admin_ticks >= hz) {
3272 		softc->admin_ticks = ticks_now;
3273 		iflib_admin_intr_deferred(ctx);
3274 	}
3275 
3276 	return;
3277 }
3278 
3279 static void inline
3280 bnxt_do_enable_intr(struct bnxt_cp_ring *cpr)
3281 {
3282 	struct bnxt_softc *softc = cpr->ring.softc;
3283 
3284 	if (cpr->ring.phys_id == (uint16_t)HWRM_NA_SIGNATURE)
3285 		return;
3286 
3287 	if (BNXT_CHIP_P5(softc))
3288 		softc->db_ops.bnxt_db_nq(cpr, 1);
3289 	else
3290 		softc->db_ops.bnxt_db_rx_cq(cpr, 1);
3291 }
3292 
3293 static void inline
3294 bnxt_do_disable_intr(struct bnxt_cp_ring *cpr)
3295 {
3296 	struct bnxt_softc *softc = cpr->ring.softc;
3297 
3298 	if (cpr->ring.phys_id == (uint16_t)HWRM_NA_SIGNATURE)
3299 		return;
3300 
3301 	if (BNXT_CHIP_P5(softc))
3302 		softc->db_ops.bnxt_db_nq(cpr, 0);
3303 	else
3304 		softc->db_ops.bnxt_db_rx_cq(cpr, 0);
3305 }
3306 
3307 /* Enable all interrupts */
3308 static void
3309 bnxt_intr_enable(if_ctx_t ctx)
3310 {
3311 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3312 	int i;
3313 
3314 	bnxt_do_enable_intr(&softc->def_cp_ring);
3315 	for (i = 0; i < softc->nrxqsets; i++)
3316 		if (BNXT_CHIP_P5(softc))
3317 			softc->db_ops.bnxt_db_nq(&softc->nq_rings[i], 1);
3318 		else
3319 			softc->db_ops.bnxt_db_rx_cq(&softc->rx_cp_rings[i], 1);
3320 
3321 	return;
3322 }
3323 
3324 /* Enable interrupt for a single queue */
3325 static int
3326 bnxt_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
3327 {
3328 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3329 
3330 	if (BNXT_CHIP_P5(softc))
3331 		softc->db_ops.bnxt_db_nq(&softc->nq_rings[qid], 1);
3332 	else
3333 		softc->db_ops.bnxt_db_rx_cq(&softc->tx_cp_rings[qid], 1);
3334 
3335 	return 0;
3336 }
3337 
3338 static void
3339 bnxt_process_cmd_cmpl(struct bnxt_softc *softc, hwrm_cmpl_t *cmd_cmpl)
3340 {
3341 	device_printf(softc->dev, "cmd sequence number %d\n",
3342 			cmd_cmpl->sequence_id);
3343 	return;
3344 }
3345 
3346 static void
3347 bnxt_process_async_msg(struct bnxt_cp_ring *cpr, tx_cmpl_t *cmpl)
3348 {
3349 	struct bnxt_softc *softc = cpr->ring.softc;
3350 	uint16_t type = cmpl->flags_type & TX_CMPL_TYPE_MASK;
3351 
3352 	switch (type) {
3353 	case HWRM_CMPL_TYPE_HWRM_DONE:
3354 		bnxt_process_cmd_cmpl(softc, (hwrm_cmpl_t *)cmpl);
3355 		break;
3356 	case HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT:
3357 		bnxt_handle_async_event(softc, (cmpl_base_t *) cmpl);
3358 		break;
3359 	default:
3360 		device_printf(softc->dev, "%s:%d Unhandled async message %x\n",
3361 				__FUNCTION__, __LINE__, type);
3362 		break;
3363 	}
3364 }
3365 
3366 void
3367 process_nq(struct bnxt_softc *softc, uint16_t nqid)
3368 {
3369 	struct bnxt_cp_ring *cpr = &softc->nq_rings[nqid];
3370 	nq_cn_t *cmp = (nq_cn_t *) cpr->ring.vaddr;
3371 	bool v_bit = cpr->v_bit;
3372 	uint32_t cons = cpr->cons;
3373 	uint16_t nq_type, nqe_cnt = 0;
3374 
3375 	while (1) {
3376 		if (!NQ_VALID(&cmp[cons], v_bit))
3377 			goto done;
3378 
3379 		nq_type = NQ_CN_TYPE_MASK & cmp[cons].type;
3380 
3381 		if (nq_type != NQ_CN_TYPE_CQ_NOTIFICATION)
3382 			 bnxt_process_async_msg(cpr, (tx_cmpl_t *)&cmp[cons]);
3383 
3384 		NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
3385 		nqe_cnt++;
3386 	}
3387 done:
3388 	if (nqe_cnt) {
3389 		cpr->cons = cons;
3390 		cpr->v_bit = v_bit;
3391 	}
3392 }
3393 
3394 static int
3395 bnxt_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
3396 {
3397 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3398 
3399 	if (BNXT_CHIP_P5(softc)) {
3400 		process_nq(softc, qid);
3401 		softc->db_ops.bnxt_db_nq(&softc->nq_rings[qid], 1);
3402 	}
3403 	softc->db_ops.bnxt_db_rx_cq(&softc->rx_cp_rings[qid], 1);
3404         return 0;
3405 }
3406 
3407 /* Disable all interrupts */
3408 static void
3409 bnxt_disable_intr(if_ctx_t ctx)
3410 {
3411 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3412 	int i;
3413 
3414 	/*
3415 	 * NOTE: These TX interrupts should never get enabled, so don't
3416 	 * update the index
3417 	 */
3418 	for (i = 0; i < softc->nrxqsets; i++)
3419 		if (BNXT_CHIP_P5(softc))
3420 			softc->db_ops.bnxt_db_nq(&softc->nq_rings[i], 0);
3421 		else
3422 			softc->db_ops.bnxt_db_rx_cq(&softc->rx_cp_rings[i], 0);
3423 
3424 
3425 	return;
3426 }
3427 
3428 static int
3429 bnxt_msix_intr_assign(if_ctx_t ctx, int msix)
3430 {
3431 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3432 	struct bnxt_cp_ring *ring;
3433 	struct if_irq *irq;
3434 	uint16_t id;
3435 	int rc;
3436 	int i;
3437 	char irq_name[16];
3438 
3439 	if (BNXT_CHIP_P5(softc))
3440 		goto skip_default_cp;
3441 
3442 	rc = iflib_irq_alloc_generic(ctx, &softc->def_cp_ring.irq,
3443 	    softc->def_cp_ring.ring.id + 1, IFLIB_INTR_ADMIN,
3444 	    bnxt_handle_def_cp, softc, 0, "def_cp");
3445 	if (rc) {
3446 		device_printf(iflib_get_dev(ctx),
3447 		    "Failed to register default completion ring handler\n");
3448 		return rc;
3449 	}
3450 
3451 skip_default_cp:
3452 	for (i=0; i<softc->scctx->isc_nrxqsets; i++) {
3453 		if (BNXT_CHIP_P5(softc)) {
3454 			irq = &softc->nq_rings[i].irq;
3455 			id = softc->nq_rings[i].ring.id;
3456 			ring = &softc->nq_rings[i];
3457 		} else {
3458 			irq = &softc->rx_cp_rings[i].irq;
3459 			id = softc->rx_cp_rings[i].ring.id ;
3460 			ring = &softc->rx_cp_rings[i];
3461 		}
3462 		snprintf(irq_name, sizeof(irq_name), "rxq%d", i);
3463 		rc = iflib_irq_alloc_generic(ctx, irq, id + 1, IFLIB_INTR_RX,
3464 				bnxt_handle_isr, ring, i, irq_name);
3465 		if (rc) {
3466 			device_printf(iflib_get_dev(ctx),
3467 			    "Failed to register RX completion ring handler\n");
3468 			i--;
3469 			goto fail;
3470 		}
3471 	}
3472 
3473 	for (i=0; i<softc->scctx->isc_ntxqsets; i++)
3474 		iflib_softirq_alloc_generic(ctx, NULL, IFLIB_INTR_TX, NULL, i, "tx_cp");
3475 
3476 	return rc;
3477 
3478 fail:
3479 	for (; i>=0; i--)
3480 		iflib_irq_free(ctx, &softc->rx_cp_rings[i].irq);
3481 	iflib_irq_free(ctx, &softc->def_cp_ring.irq);
3482 	return rc;
3483 }
3484 
3485 /*
3486  * We're explicitly allowing duplicates here.  They will need to be
3487  * removed as many times as they are added.
3488  */
3489 static void
3490 bnxt_vlan_register(if_ctx_t ctx, uint16_t vtag)
3491 {
3492 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3493 	struct bnxt_vlan_tag *new_tag;
3494 
3495 	new_tag = malloc(sizeof(struct bnxt_vlan_tag), M_DEVBUF, M_NOWAIT);
3496 	if (new_tag == NULL)
3497 		return;
3498 	new_tag->tag = vtag;
3499 	new_tag->filter_id = -1;
3500 	SLIST_INSERT_HEAD(&softc->vnic_info.vlan_tags, new_tag, next);
3501 };
3502 
3503 static void
3504 bnxt_vlan_unregister(if_ctx_t ctx, uint16_t vtag)
3505 {
3506 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3507 	struct bnxt_vlan_tag *vlan_tag;
3508 
3509 	SLIST_FOREACH(vlan_tag, &softc->vnic_info.vlan_tags, next) {
3510 		if (vlan_tag->tag == vtag) {
3511 			SLIST_REMOVE(&softc->vnic_info.vlan_tags, vlan_tag,
3512 			    bnxt_vlan_tag, next);
3513 			free(vlan_tag, M_DEVBUF);
3514 			break;
3515 		}
3516 	}
3517 }
3518 
3519 static int
3520 bnxt_wol_config(if_ctx_t ctx)
3521 {
3522 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3523 	if_t ifp = iflib_get_ifp(ctx);
3524 
3525 	if (!softc)
3526 		return -EBUSY;
3527 
3528 	if (!bnxt_wol_supported(softc))
3529 		return -ENOTSUP;
3530 
3531 	if (if_getcapenable(ifp) & IFCAP_WOL_MAGIC) {
3532 		if (!softc->wol) {
3533 			if (bnxt_hwrm_alloc_wol_fltr(softc))
3534 				return -EBUSY;
3535 			softc->wol = 1;
3536 		}
3537 	} else {
3538 		if (softc->wol) {
3539 			if (bnxt_hwrm_free_wol_fltr(softc))
3540 				return -EBUSY;
3541 			softc->wol = 0;
3542 		}
3543 	}
3544 
3545 	return 0;
3546 }
3547 
3548 static bool
3549 bnxt_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
3550 {
3551 	switch (event) {
3552 	case IFLIB_RESTART_VLAN_CONFIG:
3553 	default:
3554 		return (false);
3555 	}
3556 }
3557 
3558 static int
3559 bnxt_shutdown(if_ctx_t ctx)
3560 {
3561 	bnxt_wol_config(ctx);
3562 	return 0;
3563 }
3564 
3565 static int
3566 bnxt_suspend(if_ctx_t ctx)
3567 {
3568 	bnxt_wol_config(ctx);
3569 	return 0;
3570 }
3571 
3572 static int
3573 bnxt_resume(if_ctx_t ctx)
3574 {
3575 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3576 
3577 	bnxt_get_wol_settings(softc);
3578 	return 0;
3579 }
3580 
3581 static int
3582 bnxt_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data)
3583 {
3584 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3585 	struct ifreq *ifr = (struct ifreq *)data;
3586 	struct bnxt_ioctl_header *ioh;
3587 	size_t iol;
3588 	int rc = ENOTSUP;
3589 	struct bnxt_ioctl_data iod_storage, *iod = &iod_storage;
3590 
3591 	switch (command) {
3592 	case SIOCGPRIVATE_0:
3593 		if ((rc = priv_check(curthread, PRIV_DRIVER)) != 0)
3594 			goto exit;
3595 
3596 		ioh = ifr_buffer_get_buffer(ifr);
3597 		iol = ifr_buffer_get_length(ifr);
3598 		if (iol > sizeof(iod_storage))
3599 			return (EINVAL);
3600 
3601 		if ((rc = copyin(ioh, iod, iol)) != 0)
3602 			goto exit;
3603 
3604 		switch (iod->hdr.type) {
3605 		case BNXT_HWRM_NVM_FIND_DIR_ENTRY:
3606 		{
3607 			struct bnxt_ioctl_hwrm_nvm_find_dir_entry *find =
3608 			    &iod->find;
3609 
3610 			rc = bnxt_hwrm_nvm_find_dir_entry(softc, find->type,
3611 			    &find->ordinal, find->ext, &find->index,
3612 			    find->use_index, find->search_opt,
3613 			    &find->data_length, &find->item_length,
3614 			    &find->fw_ver);
3615 			if (rc) {
3616 				iod->hdr.rc = rc;
3617 				rc = copyout(&iod->hdr.rc, &ioh->rc,
3618 				    sizeof(ioh->rc));
3619 			} else {
3620 				iod->hdr.rc = 0;
3621 				rc = copyout(iod, ioh, iol);
3622 			}
3623 
3624 			goto exit;
3625 		}
3626 		case BNXT_HWRM_NVM_READ:
3627 		{
3628 			struct bnxt_ioctl_hwrm_nvm_read *rd = &iod->read;
3629 			struct iflib_dma_info dma_data;
3630 			size_t offset;
3631 			size_t remain;
3632 			size_t csize;
3633 
3634 			/*
3635 			 * Some HWRM versions can't read more than 0x8000 bytes
3636 			 */
3637 			rc = iflib_dma_alloc(softc->ctx,
3638 			    min(rd->length, 0x8000), &dma_data, BUS_DMA_NOWAIT);
3639 			if (rc)
3640 				break;
3641 			for (remain = rd->length, offset = 0;
3642 			    remain && offset < rd->length; offset += 0x8000) {
3643 				csize = min(remain, 0x8000);
3644 				rc = bnxt_hwrm_nvm_read(softc, rd->index,
3645 				    rd->offset + offset, csize, &dma_data);
3646 				if (rc) {
3647 					iod->hdr.rc = rc;
3648 					rc = copyout(&iod->hdr.rc, &ioh->rc,
3649 					    sizeof(ioh->rc));
3650 					break;
3651 				} else {
3652 					rc = copyout(dma_data.idi_vaddr,
3653 					    rd->data + offset, csize);
3654 					iod->hdr.rc = rc;
3655 				}
3656 				remain -= csize;
3657 			}
3658 			if (rc == 0)
3659 				rc = copyout(iod, ioh, iol);
3660 
3661 			iflib_dma_free(&dma_data);
3662 			goto exit;
3663 		}
3664 		case BNXT_HWRM_FW_RESET:
3665 		{
3666 			struct bnxt_ioctl_hwrm_fw_reset *rst =
3667 			    &iod->reset;
3668 
3669 			rc = bnxt_hwrm_fw_reset(softc, rst->processor,
3670 			    &rst->selfreset);
3671 			if (rc) {
3672 				iod->hdr.rc = rc;
3673 				rc = copyout(&iod->hdr.rc, &ioh->rc,
3674 				    sizeof(ioh->rc));
3675 			} else {
3676 				iod->hdr.rc = 0;
3677 				rc = copyout(iod, ioh, iol);
3678 			}
3679 
3680 			goto exit;
3681 		}
3682 		case BNXT_HWRM_FW_QSTATUS:
3683 		{
3684 			struct bnxt_ioctl_hwrm_fw_qstatus *qstat =
3685 			    &iod->status;
3686 
3687 			rc = bnxt_hwrm_fw_qstatus(softc, qstat->processor,
3688 			    &qstat->selfreset);
3689 			if (rc) {
3690 				iod->hdr.rc = rc;
3691 				rc = copyout(&iod->hdr.rc, &ioh->rc,
3692 				    sizeof(ioh->rc));
3693 			} else {
3694 				iod->hdr.rc = 0;
3695 				rc = copyout(iod, ioh, iol);
3696 			}
3697 
3698 			goto exit;
3699 		}
3700 		case BNXT_HWRM_NVM_WRITE:
3701 		{
3702 			struct bnxt_ioctl_hwrm_nvm_write *wr =
3703 			    &iod->write;
3704 
3705 			rc = bnxt_hwrm_nvm_write(softc, wr->data, true,
3706 			    wr->type, wr->ordinal, wr->ext, wr->attr,
3707 			    wr->option, wr->data_length, wr->keep,
3708 			    &wr->item_length, &wr->index);
3709 			if (rc) {
3710 				iod->hdr.rc = rc;
3711 				rc = copyout(&iod->hdr.rc, &ioh->rc,
3712 				    sizeof(ioh->rc));
3713 			}
3714 			else {
3715 				iod->hdr.rc = 0;
3716 				rc = copyout(iod, ioh, iol);
3717 			}
3718 
3719 			goto exit;
3720 		}
3721 		case BNXT_HWRM_NVM_ERASE_DIR_ENTRY:
3722 		{
3723 			struct bnxt_ioctl_hwrm_nvm_erase_dir_entry *erase =
3724 			    &iod->erase;
3725 
3726 			rc = bnxt_hwrm_nvm_erase_dir_entry(softc, erase->index);
3727 			if (rc) {
3728 				iod->hdr.rc = rc;
3729 				rc = copyout(&iod->hdr.rc, &ioh->rc,
3730 				    sizeof(ioh->rc));
3731 			} else {
3732 				iod->hdr.rc = 0;
3733 				rc = copyout(iod, ioh, iol);
3734 			}
3735 
3736 			goto exit;
3737 		}
3738 		case BNXT_HWRM_NVM_GET_DIR_INFO:
3739 		{
3740 			struct bnxt_ioctl_hwrm_nvm_get_dir_info *info =
3741 			    &iod->dir_info;
3742 
3743 			rc = bnxt_hwrm_nvm_get_dir_info(softc, &info->entries,
3744 			    &info->entry_length);
3745 			if (rc) {
3746 				iod->hdr.rc = rc;
3747 				rc = copyout(&iod->hdr.rc, &ioh->rc,
3748 				    sizeof(ioh->rc));
3749 			} else {
3750 				iod->hdr.rc = 0;
3751 				rc = copyout(iod, ioh, iol);
3752 			}
3753 
3754 			goto exit;
3755 		}
3756 		case BNXT_HWRM_NVM_GET_DIR_ENTRIES:
3757 		{
3758 			struct bnxt_ioctl_hwrm_nvm_get_dir_entries *get =
3759 			    &iod->dir_entries;
3760 			struct iflib_dma_info dma_data;
3761 
3762 			rc = iflib_dma_alloc(softc->ctx, get->max_size,
3763 			    &dma_data, BUS_DMA_NOWAIT);
3764 			if (rc)
3765 				break;
3766 			rc = bnxt_hwrm_nvm_get_dir_entries(softc, &get->entries,
3767 			    &get->entry_length, &dma_data);
3768 			if (rc) {
3769 				iod->hdr.rc = rc;
3770 				rc = copyout(&iod->hdr.rc, &ioh->rc,
3771 				    sizeof(ioh->rc));
3772 			} else {
3773 				rc = copyout(dma_data.idi_vaddr, get->data,
3774 				    get->entry_length * get->entries);
3775 				iod->hdr.rc = rc;
3776 				if (rc == 0)
3777 					rc = copyout(iod, ioh, iol);
3778 			}
3779 			iflib_dma_free(&dma_data);
3780 
3781 			goto exit;
3782 		}
3783 		case BNXT_HWRM_NVM_VERIFY_UPDATE:
3784 		{
3785 			struct bnxt_ioctl_hwrm_nvm_verify_update *vrfy =
3786 			    &iod->verify;
3787 
3788 			rc = bnxt_hwrm_nvm_verify_update(softc, vrfy->type,
3789 			    vrfy->ordinal, vrfy->ext);
3790 			if (rc) {
3791 				iod->hdr.rc = rc;
3792 				rc = copyout(&iod->hdr.rc, &ioh->rc,
3793 				    sizeof(ioh->rc));
3794 			} else {
3795 				iod->hdr.rc = 0;
3796 				rc = copyout(iod, ioh, iol);
3797 			}
3798 
3799 			goto exit;
3800 		}
3801 		case BNXT_HWRM_NVM_INSTALL_UPDATE:
3802 		{
3803 			struct bnxt_ioctl_hwrm_nvm_install_update *inst =
3804 			    &iod->install;
3805 
3806 			rc = bnxt_hwrm_nvm_install_update(softc,
3807 			    inst->install_type, &inst->installed_items,
3808 			    &inst->result, &inst->problem_item,
3809 			    &inst->reset_required);
3810 			if (rc) {
3811 				iod->hdr.rc = rc;
3812 				rc = copyout(&iod->hdr.rc, &ioh->rc,
3813 				    sizeof(ioh->rc));
3814 			} else {
3815 				iod->hdr.rc = 0;
3816 				rc = copyout(iod, ioh, iol);
3817 			}
3818 
3819 			goto exit;
3820 		}
3821 		case BNXT_HWRM_NVM_MODIFY:
3822 		{
3823 			struct bnxt_ioctl_hwrm_nvm_modify *mod = &iod->modify;
3824 
3825 			rc = bnxt_hwrm_nvm_modify(softc, mod->index,
3826 			    mod->offset, mod->data, true, mod->length);
3827 			if (rc) {
3828 				iod->hdr.rc = rc;
3829 				rc = copyout(&iod->hdr.rc, &ioh->rc,
3830 				    sizeof(ioh->rc));
3831 			} else {
3832 				iod->hdr.rc = 0;
3833 				rc = copyout(iod, ioh, iol);
3834 			}
3835 
3836 			goto exit;
3837 		}
3838 		case BNXT_HWRM_FW_GET_TIME:
3839 		{
3840 			struct bnxt_ioctl_hwrm_fw_get_time *gtm =
3841 			    &iod->get_time;
3842 
3843 			rc = bnxt_hwrm_fw_get_time(softc, &gtm->year,
3844 			    &gtm->month, &gtm->day, &gtm->hour, &gtm->minute,
3845 			    &gtm->second, &gtm->millisecond, &gtm->zone);
3846 			if (rc) {
3847 				iod->hdr.rc = rc;
3848 				rc = copyout(&iod->hdr.rc, &ioh->rc,
3849 				    sizeof(ioh->rc));
3850 			} else {
3851 				iod->hdr.rc = 0;
3852 				rc = copyout(iod, ioh, iol);
3853 			}
3854 
3855 			goto exit;
3856 		}
3857 		case BNXT_HWRM_FW_SET_TIME:
3858 		{
3859 			struct bnxt_ioctl_hwrm_fw_set_time *stm =
3860 			    &iod->set_time;
3861 
3862 			rc = bnxt_hwrm_fw_set_time(softc, stm->year,
3863 			    stm->month, stm->day, stm->hour, stm->minute,
3864 			    stm->second, stm->millisecond, stm->zone);
3865 			if (rc) {
3866 				iod->hdr.rc = rc;
3867 				rc = copyout(&iod->hdr.rc, &ioh->rc,
3868 				    sizeof(ioh->rc));
3869 			} else {
3870 				iod->hdr.rc = 0;
3871 				rc = copyout(iod, ioh, iol);
3872 			}
3873 
3874 			goto exit;
3875 		}
3876 		}
3877 		break;
3878 	}
3879 
3880 exit:
3881 	return rc;
3882 }
3883 
3884 static int
3885 bnxt_i2c_req(if_ctx_t ctx, struct ifi2creq *i2c)
3886 {
3887 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3888 	uint8_t *data = i2c->data;
3889 	int rc;
3890 
3891 	/* No point in going further if phy status indicates
3892 	 * module is not inserted or if it is powered down or
3893 	 * if it is of type 10GBase-T
3894 	 */
3895 	if (softc->link_info.module_status >
3896 		HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_WARNINGMSG)
3897 		return -EOPNOTSUPP;
3898 
3899 	/* This feature is not supported in older firmware versions */
3900 	if (!BNXT_CHIP_P5(softc) ||
3901 	    (softc->hwrm_spec_code < 0x10202))
3902 		return -EOPNOTSUPP;
3903 
3904 
3905 	rc = bnxt_read_sfp_module_eeprom_info(softc, I2C_DEV_ADDR_A0, 0, 0, 0,
3906 		i2c->offset, i2c->len, data);
3907 
3908 	return rc;
3909 }
3910 
3911 /*
3912  * Support functions
3913  */
3914 static int
3915 bnxt_probe_phy(struct bnxt_softc *softc)
3916 {
3917 	struct bnxt_link_info *link_info = &softc->link_info;
3918 	int rc = 0;
3919 
3920 	softc->phy_flags = 0;
3921 	rc = bnxt_hwrm_phy_qcaps(softc);
3922 	if (rc) {
3923 		device_printf(softc->dev,
3924 			      "Probe phy can't get phy capabilities (rc: %x)\n", rc);
3925 		return rc;
3926 	}
3927 
3928 	rc = bnxt_update_link(softc, false);
3929 	if (rc) {
3930 		device_printf(softc->dev,
3931 		    "Probe phy can't update link (rc: %x)\n", rc);
3932 		return (rc);
3933 	}
3934 
3935 	bnxt_get_port_module_status(softc);
3936 
3937 	/*initialize the ethool setting copy with NVM settings */
3938 	if (link_info->auto_mode != HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE)
3939 		link_info->autoneg |= BNXT_AUTONEG_SPEED;
3940 
3941 	link_info->req_duplex = link_info->duplex_setting;
3942 
3943 	/* NRZ link speed */
3944 	if (link_info->autoneg & BNXT_AUTONEG_SPEED)
3945 		link_info->req_link_speed = link_info->auto_link_speeds;
3946 	else
3947 		link_info->req_link_speed = link_info->force_link_speed;
3948 
3949 	/* PAM4 link speed */
3950 	if (link_info->auto_pam4_link_speeds)
3951 		link_info->req_link_speed = link_info->auto_pam4_link_speeds;
3952 	if (link_info->force_pam4_link_speed)
3953 		link_info->req_link_speed = link_info->force_pam4_link_speed;
3954 
3955 	return (rc);
3956 }
3957 
3958 static void
3959 add_media(struct bnxt_softc *softc, uint8_t media_type, uint16_t supported,
3960 	  uint16_t supported_pam4)
3961 {
3962 	switch (media_type) {
3963 		case BNXT_MEDIA_CR:
3964 			BNXT_IFMEDIA_ADD(supported_pam4, PAM4_SPEEDS_50G, IFM_50G_CP);
3965 			BNXT_IFMEDIA_ADD(supported_pam4, PAM4_SPEEDS_100G, IFM_100G_CP2);
3966 			BNXT_IFMEDIA_ADD(supported_pam4, PAM4_SPEEDS_200G, IFM_200G_CR4_PAM4);
3967 			BNXT_IFMEDIA_ADD(supported, SPEEDS_100GB, IFM_100G_CR4);
3968 			BNXT_IFMEDIA_ADD(supported, SPEEDS_50GB, IFM_50G_CR2);
3969 			BNXT_IFMEDIA_ADD(supported, SPEEDS_40GB, IFM_40G_CR4);
3970 			BNXT_IFMEDIA_ADD(supported, SPEEDS_25GB, IFM_25G_CR);
3971 			BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_CR1);
3972 			BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_CX);
3973 			break;
3974 
3975 		case BNXT_MEDIA_LR:
3976 			BNXT_IFMEDIA_ADD(supported_pam4, PAM4_SPEEDS_50G, IFM_50G_LR);
3977 			BNXT_IFMEDIA_ADD(supported_pam4, PAM4_SPEEDS_200G, IFM_200G_LR4);
3978 			BNXT_IFMEDIA_ADD(supported, SPEEDS_100GB, IFM_100G_LR4);
3979 			BNXT_IFMEDIA_ADD(supported, SPEEDS_50GB, IFM_50G_LR2);
3980 			BNXT_IFMEDIA_ADD(supported, SPEEDS_40GB, IFM_40G_LR4);
3981 			BNXT_IFMEDIA_ADD(supported, SPEEDS_25GB, IFM_25G_LR);
3982 			BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_LR);
3983 			BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_LX);
3984 			break;
3985 
3986 		case BNXT_MEDIA_SR:
3987 			BNXT_IFMEDIA_ADD(supported_pam4, PAM4_SPEEDS_50G, IFM_50G_SR);
3988 			BNXT_IFMEDIA_ADD(supported_pam4, PAM4_SPEEDS_100G, IFM_100G_SR2);
3989 			BNXT_IFMEDIA_ADD(supported_pam4, PAM4_SPEEDS_200G, IFM_200G_SR4);
3990 			BNXT_IFMEDIA_ADD(supported, SPEEDS_100GB, IFM_100G_SR4);
3991 			BNXT_IFMEDIA_ADD(supported, SPEEDS_50GB, IFM_50G_SR2);
3992 			BNXT_IFMEDIA_ADD(supported, SPEEDS_40GB, IFM_40G_SR4);
3993 			BNXT_IFMEDIA_ADD(supported, SPEEDS_25GB, IFM_25G_SR);
3994 			BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_SR);
3995 			BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_SX);
3996 			break;
3997 
3998 		case BNXT_MEDIA_KR:
3999 			BNXT_IFMEDIA_ADD(supported_pam4, PAM4_SPEEDS_50G, IFM_50G_KR_PAM4);
4000 			BNXT_IFMEDIA_ADD(supported_pam4, PAM4_SPEEDS_100G, IFM_100G_KR2_PAM4);
4001 			BNXT_IFMEDIA_ADD(supported_pam4, PAM4_SPEEDS_200G, IFM_200G_KR4_PAM4);
4002 			BNXT_IFMEDIA_ADD(supported, SPEEDS_100GB, IFM_100G_KR4);
4003 			BNXT_IFMEDIA_ADD(supported, SPEEDS_50GB, IFM_50G_KR2);
4004 			BNXT_IFMEDIA_ADD(supported, SPEEDS_50GB, IFM_50G_KR4);
4005 			BNXT_IFMEDIA_ADD(supported, SPEEDS_40GB, IFM_40G_KR4);
4006 			BNXT_IFMEDIA_ADD(supported, SPEEDS_25GB, IFM_25G_KR);
4007 			BNXT_IFMEDIA_ADD(supported, SPEEDS_20GB, IFM_20G_KR2);
4008 			BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_KR);
4009 			BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_KX);
4010 			break;
4011 
4012 		default:
4013 			break;
4014 
4015 	}
4016 	return;
4017 
4018 }
4019 
4020 static void
4021 bnxt_add_media_types(struct bnxt_softc *softc)
4022 {
4023 	struct bnxt_link_info *link_info = &softc->link_info;
4024 	uint16_t supported = 0, supported_pam4 = 0;
4025 	uint8_t phy_type = get_phy_type(softc), media_type;
4026 
4027 	supported = link_info->support_speeds;
4028 	supported_pam4 = link_info->support_pam4_speeds;
4029 
4030 	/* Auto is always supported */
4031 	ifmedia_add(softc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
4032 
4033 	if (softc->flags & BNXT_FLAG_NPAR)
4034 		return;
4035 
4036 	switch (phy_type) {
4037 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASECR4:
4038 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASECR4:
4039 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASECR2:
4040 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_50G_BASECR:
4041 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASECR4:
4042 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_L:
4043 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_S:
4044 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_N:
4045 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASECR:
4046 		media_type = BNXT_MEDIA_CR;
4047 		break;
4048 
4049 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASELR4:
4050 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASELR4:
4051 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_50G_BASELR:
4052 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASELR4:
4053 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASELR:
4054 		media_type = BNXT_MEDIA_LR;
4055 		break;
4056 
4057 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASESR4:
4058 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR10:
4059 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR4:
4060 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_50G_BASESR:
4061 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASESR4:
4062 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASESR:
4063 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASEER4:
4064 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASEER4:
4065 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASEER4:
4066 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASESR:
4067 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASESX:
4068 		media_type = BNXT_MEDIA_SR;
4069 		break;
4070 
4071 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR4:
4072 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR2:
4073 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR:
4074 		media_type = BNXT_MEDIA_KR;
4075 		break;
4076 
4077 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_ACTIVE_CABLE:
4078 		BNXT_IFMEDIA_ADD(supported, SPEEDS_25GB, IFM_25G_ACC);
4079 		BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_AOC);
4080 		BNXT_IFMEDIA_ADD(supported, SPEEDS_40GB, IFM_40G_XLAUI);
4081 		BNXT_IFMEDIA_ADD(supported, SPEEDS_40GB, IFM_40G_XLAUI_AC);
4082 		return;
4083 
4084 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASECX:
4085 		BNXT_IFMEDIA_ADD(supported, SPEEDS_1GBHD, IFM_1000_CX);
4086 		return;
4087 
4088 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASET:
4089 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET:
4090 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE:
4091 		BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_T);
4092 		BNXT_IFMEDIA_ADD(supported, SPEEDS_2_5GB, IFM_2500_T);
4093 		BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_T);
4094 		BNXT_IFMEDIA_ADD(supported, SPEEDS_100MB, IFM_100_T);
4095 		BNXT_IFMEDIA_ADD(supported, SPEEDS_10MB, IFM_10_T);
4096 		return;
4097 
4098 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKX:
4099 		BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_KR);
4100 		BNXT_IFMEDIA_ADD(supported, SPEEDS_2_5GB, IFM_2500_KX);
4101 		BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_KX);
4102 		return;
4103 
4104 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_SGMIIEXTPHY:
4105 		BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_SGMII);
4106 		return;
4107 
4108 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_UNKNOWN:
4109 		/* Only Autoneg is supported for TYPE_UNKNOWN */
4110 		return;
4111 
4112         default:
4113 		/* Only Autoneg is supported for new phy type values */
4114 		device_printf(softc->dev, "phy type %d not supported by driver\n", phy_type);
4115 		return;
4116 	}
4117 
4118 	/* add_media is invoked twice, once with a firmware speed mask of 0 and a valid
4119 	 * value for both NRZ and PAM4 sig mode. This ensures accurate display of all
4120 	 * supported medias and currently configured media in the "ifconfig -m" output
4121 	 */
4122 
4123 	if (link_info->sig_mode == BNXT_SIG_MODE_PAM4) {
4124 		add_media(softc, media_type, supported, 0);
4125 		add_media(softc, media_type, 0, supported_pam4);
4126 	} else {
4127 		add_media(softc, media_type, 0, supported_pam4);
4128 		add_media(softc, media_type, supported, 0);
4129 	}
4130 
4131 	return;
4132 }
4133 
4134 static int
4135 bnxt_map_bar(struct bnxt_softc *softc, struct bnxt_bar_info *bar, int bar_num, bool shareable)
4136 {
4137 	uint32_t	flag;
4138 
4139 	if (bar->res != NULL) {
4140 		device_printf(softc->dev, "Bar %d already mapped\n", bar_num);
4141 		return EDOOFUS;
4142 	}
4143 
4144 	bar->rid = PCIR_BAR(bar_num);
4145 	flag = RF_ACTIVE;
4146 	if (shareable)
4147 		flag |= RF_SHAREABLE;
4148 
4149 	if ((bar->res =
4150 		bus_alloc_resource_any(softc->dev,
4151 			   SYS_RES_MEMORY,
4152 			   &bar->rid,
4153 			   flag)) == NULL) {
4154 		device_printf(softc->dev,
4155 		    "PCI BAR%d mapping failure\n", bar_num);
4156 		return (ENXIO);
4157 	}
4158 	bar->tag = rman_get_bustag(bar->res);
4159 	bar->handle = rman_get_bushandle(bar->res);
4160 	bar->size = rman_get_size(bar->res);
4161 
4162 	return 0;
4163 }
4164 
4165 static int
4166 bnxt_pci_mapping(struct bnxt_softc *softc)
4167 {
4168 	int rc;
4169 
4170 	rc = bnxt_map_bar(softc, &softc->hwrm_bar, 0, true);
4171 	if (rc)
4172 		return rc;
4173 
4174 	rc = bnxt_map_bar(softc, &softc->doorbell_bar, 2, false);
4175 
4176 	return rc;
4177 }
4178 
4179 static void
4180 bnxt_pci_mapping_free(struct bnxt_softc *softc)
4181 {
4182 	if (softc->hwrm_bar.res != NULL)
4183 		bus_release_resource(softc->dev, SYS_RES_MEMORY,
4184 		    softc->hwrm_bar.rid, softc->hwrm_bar.res);
4185 	softc->hwrm_bar.res = NULL;
4186 
4187 	if (softc->doorbell_bar.res != NULL)
4188 		bus_release_resource(softc->dev, SYS_RES_MEMORY,
4189 		    softc->doorbell_bar.rid, softc->doorbell_bar.res);
4190 	softc->doorbell_bar.res = NULL;
4191 }
4192 
4193 static int
4194 bnxt_update_link(struct bnxt_softc *softc, bool chng_link_state)
4195 {
4196 	struct bnxt_link_info *link_info = &softc->link_info;
4197 	uint8_t link_up = link_info->link_up;
4198 	int rc = 0;
4199 
4200 	rc = bnxt_hwrm_port_phy_qcfg(softc);
4201 	if (rc)
4202 		goto exit;
4203 
4204 	/* TODO: need to add more logic to report VF link */
4205 	if (chng_link_state) {
4206 		if (link_info->phy_link_status ==
4207 		    HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK)
4208 			link_info->link_up = 1;
4209 		else
4210 			link_info->link_up = 0;
4211 		if (link_up != link_info->link_up)
4212 			bnxt_report_link(softc);
4213 	} else {
4214 		/* always link down if not require to update link state */
4215 		link_info->link_up = 0;
4216 	}
4217 
4218 exit:
4219 	return rc;
4220 }
4221 
4222 #define ETHTOOL_SPEED_1000		1000
4223 #define ETHTOOL_SPEED_10000		10000
4224 #define ETHTOOL_SPEED_20000		20000
4225 #define ETHTOOL_SPEED_25000		25000
4226 #define ETHTOOL_SPEED_40000		40000
4227 #define ETHTOOL_SPEED_50000		50000
4228 #define ETHTOOL_SPEED_100000		100000
4229 #define ETHTOOL_SPEED_200000		200000
4230 #define ETHTOOL_SPEED_UNKNOWN		-1
4231 
4232 static u32
4233 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
4234 {
4235 	switch (fw_link_speed) {
4236 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
4237 		return ETHTOOL_SPEED_1000;
4238 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
4239 		return ETHTOOL_SPEED_10000;
4240 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
4241 		return ETHTOOL_SPEED_20000;
4242 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
4243 		return ETHTOOL_SPEED_25000;
4244 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
4245 		return ETHTOOL_SPEED_40000;
4246 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
4247 		return ETHTOOL_SPEED_50000;
4248 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
4249 		return ETHTOOL_SPEED_100000;
4250 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_200GB:
4251 		return ETHTOOL_SPEED_200000;
4252 	default:
4253 		return ETHTOOL_SPEED_UNKNOWN;
4254 	}
4255 }
4256 
4257 void
4258 bnxt_report_link(struct bnxt_softc *softc)
4259 {
4260 	struct bnxt_link_info *link_info = &softc->link_info;
4261 	const char *duplex = NULL, *flow_ctrl = NULL;
4262 	const char *signal_mode = "";
4263 
4264 	if(softc->edev)
4265 		softc->edev->espeed =
4266 		    bnxt_fw_to_ethtool_speed(link_info->link_speed);
4267 
4268 	if (link_info->link_up == link_info->last_link_up) {
4269 		if (!link_info->link_up)
4270 			return;
4271 		if ((link_info->duplex == link_info->last_duplex) &&
4272 		    (link_info->phy_type == link_info->last_phy_type) &&
4273                     (!(BNXT_IS_FLOW_CTRL_CHANGED(link_info))))
4274 			return;
4275 	}
4276 
4277 	if (link_info->link_up) {
4278 		if (link_info->duplex ==
4279 		    HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_FULL)
4280 			duplex = "full duplex";
4281 		else
4282 			duplex = "half duplex";
4283 		if (link_info->flow_ctrl.tx & link_info->flow_ctrl.rx)
4284 			flow_ctrl = "FC - receive & transmit";
4285 		else if (link_info->flow_ctrl.tx)
4286 			flow_ctrl = "FC - transmit";
4287 		else if (link_info->flow_ctrl.rx)
4288 			flow_ctrl = "FC - receive";
4289 		else
4290 			flow_ctrl = "FC - none";
4291 
4292 		if (softc->link_info.phy_qcfg_resp.option_flags &
4293 		    HWRM_PORT_PHY_QCFG_OUTPUT_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
4294 			uint8_t sig_mode = softc->link_info.active_fec_sig_mode &
4295 				      HWRM_PORT_PHY_QCFG_OUTPUT_SIGNAL_MODE_MASK;
4296 			switch (sig_mode) {
4297 			case BNXT_SIG_MODE_NRZ:
4298 				signal_mode = "(NRZ) ";
4299 				break;
4300 			case BNXT_SIG_MODE_PAM4:
4301 				signal_mode = "(PAM4) ";
4302 				break;
4303 			default:
4304 				break;
4305 			}
4306 		link_info->sig_mode = sig_mode;
4307 		}
4308 
4309 		iflib_link_state_change(softc->ctx, LINK_STATE_UP,
4310 		    IF_Gbps(100));
4311 		device_printf(softc->dev, "Link is UP %s %s, %s - %d Mbps \n", duplex, signal_mode,
4312 		    flow_ctrl, (link_info->link_speed * 100));
4313 	} else {
4314 		iflib_link_state_change(softc->ctx, LINK_STATE_DOWN,
4315 		    bnxt_get_baudrate(&softc->link_info));
4316 		device_printf(softc->dev, "Link is Down\n");
4317 	}
4318 
4319 	link_info->last_link_up = link_info->link_up;
4320 	link_info->last_duplex = link_info->duplex;
4321 	link_info->last_phy_type = link_info->phy_type;
4322 	link_info->last_flow_ctrl.tx = link_info->flow_ctrl.tx;
4323 	link_info->last_flow_ctrl.rx = link_info->flow_ctrl.rx;
4324 	link_info->last_flow_ctrl.autoneg = link_info->flow_ctrl.autoneg;
4325 	/* update media types */
4326 	ifmedia_removeall(softc->media);
4327 	bnxt_add_media_types(softc);
4328 	ifmedia_set(softc->media, IFM_ETHER | IFM_AUTO);
4329 }
4330 
4331 static int
4332 bnxt_handle_isr(void *arg)
4333 {
4334 	struct bnxt_cp_ring *cpr = arg;
4335 	struct bnxt_softc *softc = cpr->ring.softc;
4336 
4337 	cpr->int_count++;
4338 	/* Disable further interrupts for this queue */
4339 	if (!BNXT_CHIP_P5(softc))
4340 		softc->db_ops.bnxt_db_rx_cq(cpr, 0);
4341 
4342 	return FILTER_SCHEDULE_THREAD;
4343 }
4344 
4345 static int
4346 bnxt_handle_def_cp(void *arg)
4347 {
4348 	struct bnxt_softc *softc = arg;
4349 
4350 	softc->db_ops.bnxt_db_rx_cq(&softc->def_cp_ring, 0);
4351 	GROUPTASK_ENQUEUE(&softc->def_cp_task);
4352 	return FILTER_HANDLED;
4353 }
4354 
4355 static void
4356 bnxt_clear_ids(struct bnxt_softc *softc)
4357 {
4358 	int i;
4359 
4360 	softc->def_cp_ring.stats_ctx_id = HWRM_NA_SIGNATURE;
4361 	softc->def_cp_ring.ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
4362 	softc->def_nq_ring.stats_ctx_id = HWRM_NA_SIGNATURE;
4363 	softc->def_nq_ring.ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
4364 	for (i = 0; i < softc->ntxqsets; i++) {
4365 		softc->tx_cp_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
4366 		softc->tx_cp_rings[i].ring.phys_id =
4367 		    (uint16_t)HWRM_NA_SIGNATURE;
4368 		softc->tx_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
4369 
4370 		if (!softc->nq_rings)
4371 			continue;
4372 		softc->nq_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
4373 		softc->nq_rings[i].ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
4374 	}
4375 	for (i = 0; i < softc->nrxqsets; i++) {
4376 		softc->rx_cp_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
4377 		softc->rx_cp_rings[i].ring.phys_id =
4378 		    (uint16_t)HWRM_NA_SIGNATURE;
4379 		softc->rx_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
4380 		softc->ag_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
4381 		softc->grp_info[i].grp_id = (uint16_t)HWRM_NA_SIGNATURE;
4382 	}
4383 	softc->vnic_info.filter_id = -1;
4384 	softc->vnic_info.id = (uint16_t)HWRM_NA_SIGNATURE;
4385 	softc->vnic_info.rss_id = (uint16_t)HWRM_NA_SIGNATURE;
4386 	memset(softc->vnic_info.rss_grp_tbl.idi_vaddr, 0xff,
4387 	    softc->vnic_info.rss_grp_tbl.idi_size);
4388 }
4389 
4390 static void
4391 bnxt_mark_cpr_invalid(struct bnxt_cp_ring *cpr)
4392 {
4393 	struct cmpl_base *cmp = (void *)cpr->ring.vaddr;
4394 	int i;
4395 
4396 	for (i = 0; i < cpr->ring.ring_size; i++)
4397 		cmp[i].info3_v = !cpr->v_bit;
4398 }
4399 
4400 static void bnxt_event_error_report(struct bnxt_softc *softc, u32 data1, u32 data2)
4401 {
4402 	u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1);
4403 
4404 	switch (err_type) {
4405 	case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL:
4406 		device_printf(softc->dev,
4407 			      "1PPS: Received invalid signal on pin%u from the external source. Please fix the signal and reconfigure the pin\n",
4408 			      BNXT_EVENT_INVALID_SIGNAL_DATA(data2));
4409 		break;
4410 	case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM:
4411 		device_printf(softc->dev,
4412 			      "Pause Storm detected!\n");
4413 		break;
4414 	case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD:
4415 		device_printf(softc->dev,
4416 			      "One or more MMIO doorbells dropped by the device! epoch: 0x%x\n",
4417 			      BNXT_EVENT_DBR_EPOCH(data1));
4418 		break;
4419 	case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM: {
4420 		const char *nvm_err_str;
4421 
4422 		if (EVENT_DATA1_NVM_ERR_TYPE_WRITE(data1))
4423 			nvm_err_str = "nvm write error";
4424 		else if (EVENT_DATA1_NVM_ERR_TYPE_ERASE(data1))
4425 			nvm_err_str = "nvm erase error";
4426 		else
4427 			nvm_err_str = "unrecognized nvm error";
4428 
4429 		device_printf(softc->dev,
4430 			      "%s reported at address 0x%x\n", nvm_err_str,
4431 			      (u32)EVENT_DATA2_NVM_ERR_ADDR(data2));
4432 		break;
4433 	}
4434 	case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD: {
4435 		char *threshold_type;
4436 		char *dir_str;
4437 
4438 		switch (EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1)) {
4439 		case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN:
4440 			threshold_type = "warning";
4441 			break;
4442 		case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL:
4443 			threshold_type = "critical";
4444 			break;
4445 		case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL:
4446 			threshold_type = "fatal";
4447 			break;
4448 		case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN:
4449 			threshold_type = "shutdown";
4450 			break;
4451 		default:
4452 			device_printf(softc->dev,
4453 				      "Unknown Thermal threshold type event\n");
4454 			return;
4455 		}
4456 		if (EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1))
4457 			dir_str = "above";
4458 		else
4459 			dir_str = "below";
4460 		device_printf(softc->dev,
4461 			      "Chip temperature has gone %s the %s thermal threshold!\n",
4462 			      dir_str, threshold_type);
4463 		device_printf(softc->dev,
4464 			      "Temperature (In Celsius), Current: %u, threshold: %u\n",
4465 			      BNXT_EVENT_THERMAL_CURRENT_TEMP(data2),
4466 			      BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2));
4467 		break;
4468 	}
4469 	case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED:
4470 		device_printf(softc->dev,
4471 			      "Speed change is not supported with dual rate transceivers on this board\n");
4472 		break;
4473 
4474 	default:
4475 	device_printf(softc->dev,
4476 		      "FW reported unknown error type: %u, data1: 0x%x data2: 0x%x\n",
4477 		      err_type, data1, data2);
4478 		break;
4479 	}
4480 }
4481 
4482 static void
4483 bnxt_handle_async_event(struct bnxt_softc *softc, struct cmpl_base *cmpl)
4484 {
4485 	struct hwrm_async_event_cmpl *ae = (void *)cmpl;
4486 	uint16_t async_id = le16toh(ae->event_id);
4487 	struct ifmediareq ifmr;
4488 	char *type_str;
4489 	char *status_desc;
4490 	struct bnxt_fw_health *fw_health;
4491 	u32 data1 = le32toh(ae->event_data1);
4492 	u32 data2 = le32toh(ae->event_data2);
4493 
4494 	switch (async_id) {
4495 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
4496 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
4497 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE:
4498 		if (BNXT_CHIP_P5(softc))
4499 			bit_set(softc->state_bv, BNXT_STATE_LINK_CHANGE);
4500 		else
4501 			bnxt_media_status(softc->ctx, &ifmr);
4502 		break;
4503 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: {
4504 		bnxt_event_error_report(softc, data1, data2);
4505 		goto async_event_process_exit;
4506 	}
4507 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_THRESHOLD:
4508 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_NQ_UPDATE:
4509 		break;
4510 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
4511 		type_str = "Solicited";
4512 
4513 		if (!softc->fw_health)
4514 			goto async_event_process_exit;
4515 
4516 		softc->fw_reset_timestamp = jiffies;
4517 		softc->fw_reset_min_dsecs = ae->timestamp_lo;
4518 		if (!softc->fw_reset_min_dsecs)
4519 			softc->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
4520 		softc->fw_reset_max_dsecs = le16toh(ae->timestamp_hi);
4521 		if (!softc->fw_reset_max_dsecs)
4522 			softc->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
4523 		if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) {
4524 			set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &softc->state);
4525 		} else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
4526 			type_str = "Fatal";
4527 			softc->fw_health->fatalities++;
4528 			set_bit(BNXT_STATE_FW_FATAL_COND, &softc->state);
4529 		} else if (data2 && BNXT_FW_STATUS_HEALTHY !=
4530 			   EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) {
4531 			type_str = "Non-fatal";
4532 			softc->fw_health->survivals++;
4533 			set_bit(BNXT_STATE_FW_NON_FATAL_COND, &softc->state);
4534 		}
4535 		device_printf(softc->dev,
4536 			   "%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
4537 			   type_str, data1, data2,
4538 			   softc->fw_reset_min_dsecs * 100,
4539 			   softc->fw_reset_max_dsecs * 100);
4540 		set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &softc->sp_event);
4541 		break;
4542 	}
4543 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
4544 		fw_health = softc->fw_health;
4545 		status_desc = "healthy";
4546 		u32 status;
4547 
4548 		if (!fw_health)
4549 			goto async_event_process_exit;
4550 
4551 		if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
4552 			fw_health->enabled = false;
4553 			device_printf(softc->dev, "Driver recovery watchdog is disabled\n");
4554 			break;
4555 		}
4556 		fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
4557 		fw_health->tmr_multiplier =
4558 			DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
4559 				     HZ * 10);
4560 		fw_health->tmr_counter = fw_health->tmr_multiplier;
4561 		if (!fw_health->enabled)
4562 			fw_health->last_fw_heartbeat =
4563 				bnxt_fw_health_readl(softc, BNXT_FW_HEARTBEAT_REG);
4564 		fw_health->last_fw_reset_cnt =
4565 			bnxt_fw_health_readl(softc, BNXT_FW_RESET_CNT_REG);
4566 		status = bnxt_fw_health_readl(softc, BNXT_FW_HEALTH_REG);
4567 		if (status != BNXT_FW_STATUS_HEALTHY)
4568 			status_desc = "unhealthy";
4569 		device_printf(softc->dev,
4570 			   "Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n",
4571 			   fw_health->primary ? "primary" : "backup", status,
4572 			   status_desc, fw_health->last_fw_reset_cnt);
4573 		if (!fw_health->enabled) {
4574 			/* Make sure tmr_counter is set and seen by
4575 			 * bnxt_health_check() before setting enabled
4576 			 */
4577 			smp_mb();
4578 			fw_health->enabled = true;
4579 		}
4580 		goto async_event_process_exit;
4581 	}
4582 
4583 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE:
4584 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE:
4585 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED:
4586 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED:
4587 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD:
4588 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD:
4589 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
4590 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD:
4591 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR:
4592 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE:
4593 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE:
4594 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
4595 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR:
4596 		device_printf(softc->dev,
4597 		    "Unhandled async completion type %u\n", async_id);
4598 		break;
4599 	default:
4600 		device_printf(softc->dev,
4601 		    "Unknown async completion type %u\n", async_id);
4602 		break;
4603 	}
4604 	bnxt_queue_sp_work(softc);
4605 
4606 async_event_process_exit:
4607 	bnxt_ulp_async_events(softc, ae);
4608 }
4609 
4610 static void
4611 bnxt_def_cp_task(void *context)
4612 {
4613 	if_ctx_t ctx = context;
4614 	struct bnxt_softc *softc = iflib_get_softc(ctx);
4615 	struct bnxt_cp_ring *cpr = &softc->def_cp_ring;
4616 
4617 	/* Handle completions on the default completion ring */
4618 	struct cmpl_base *cmpl;
4619 	uint32_t cons = cpr->cons;
4620 	bool v_bit = cpr->v_bit;
4621 	bool last_v_bit;
4622 	uint32_t last_cons;
4623 	uint16_t type;
4624 
4625 	for (;;) {
4626 		last_cons = cons;
4627 		last_v_bit = v_bit;
4628 		NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
4629 		cmpl = &((struct cmpl_base *)cpr->ring.vaddr)[cons];
4630 
4631 		if (!CMP_VALID(cmpl, v_bit))
4632 			break;
4633 
4634 		type = le16toh(cmpl->type) & CMPL_BASE_TYPE_MASK;
4635 		switch (type) {
4636 		case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
4637 			bnxt_handle_async_event(softc, cmpl);
4638 			break;
4639 		case CMPL_BASE_TYPE_TX_L2:
4640 		case CMPL_BASE_TYPE_RX_L2:
4641 		case CMPL_BASE_TYPE_RX_AGG:
4642 		case CMPL_BASE_TYPE_RX_TPA_START:
4643 		case CMPL_BASE_TYPE_RX_TPA_END:
4644 		case CMPL_BASE_TYPE_STAT_EJECT:
4645 		case CMPL_BASE_TYPE_HWRM_DONE:
4646 		case CMPL_BASE_TYPE_HWRM_FWD_REQ:
4647 		case CMPL_BASE_TYPE_HWRM_FWD_RESP:
4648 		case CMPL_BASE_TYPE_CQ_NOTIFICATION:
4649 		case CMPL_BASE_TYPE_SRQ_EVENT:
4650 		case CMPL_BASE_TYPE_DBQ_EVENT:
4651 		case CMPL_BASE_TYPE_QP_EVENT:
4652 		case CMPL_BASE_TYPE_FUNC_EVENT:
4653 			device_printf(softc->dev,
4654 			    "Unhandled completion type %u\n", type);
4655 			break;
4656 		default:
4657 			device_printf(softc->dev,
4658 			    "Unknown completion type %u\n", type);
4659 			break;
4660 		}
4661 	}
4662 
4663 	cpr->cons = last_cons;
4664 	cpr->v_bit = last_v_bit;
4665 	softc->db_ops.bnxt_db_rx_cq(cpr, 1);
4666 }
4667 
4668 uint8_t
4669 get_phy_type(struct bnxt_softc *softc)
4670 {
4671 	struct bnxt_link_info *link_info = &softc->link_info;
4672 	uint8_t phy_type = link_info->phy_type;
4673 	uint16_t supported;
4674 
4675 	if (phy_type != HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_UNKNOWN)
4676 		return phy_type;
4677 
4678 	/* Deduce the phy type from the media type and supported speeds */
4679 	supported = link_info->support_speeds;
4680 
4681 	if (link_info->media_type ==
4682 	    HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP)
4683 		return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET;
4684 	if (link_info->media_type ==
4685 	    HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_DAC) {
4686 		if (supported & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB)
4687 			return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKX;
4688 		if (supported & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB)
4689 			return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR;
4690 		return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASECR;
4691 	}
4692 	if (link_info->media_type ==
4693 	    HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_FIBRE)
4694 		return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASESR;
4695 
4696 	return phy_type;
4697 }
4698 
4699 bool
4700 bnxt_check_hwrm_version(struct bnxt_softc *softc)
4701 {
4702 	char buf[16];
4703 
4704 	sprintf(buf, "%hhu.%hhu.%hhu", softc->ver_info->hwrm_min_major,
4705 	    softc->ver_info->hwrm_min_minor, softc->ver_info->hwrm_min_update);
4706 	if (softc->ver_info->hwrm_min_major > softc->ver_info->hwrm_if_major) {
4707 		device_printf(softc->dev,
4708 		    "WARNING: HWRM version %s is too old (older than %s)\n",
4709 		    softc->ver_info->hwrm_if_ver, buf);
4710 		return false;
4711 	}
4712 	else if(softc->ver_info->hwrm_min_major ==
4713 	    softc->ver_info->hwrm_if_major) {
4714 		if (softc->ver_info->hwrm_min_minor >
4715 		    softc->ver_info->hwrm_if_minor) {
4716 			device_printf(softc->dev,
4717 			    "WARNING: HWRM version %s is too old (older than %s)\n",
4718 			    softc->ver_info->hwrm_if_ver, buf);
4719 			return false;
4720 		}
4721 		else if (softc->ver_info->hwrm_min_minor ==
4722 		    softc->ver_info->hwrm_if_minor) {
4723 			if (softc->ver_info->hwrm_min_update >
4724 			    softc->ver_info->hwrm_if_update) {
4725 				device_printf(softc->dev,
4726 				    "WARNING: HWRM version %s is too old (older than %s)\n",
4727 				    softc->ver_info->hwrm_if_ver, buf);
4728 				return false;
4729 			}
4730 		}
4731 	}
4732 	return true;
4733 }
4734 
4735 static uint64_t
4736 bnxt_get_baudrate(struct bnxt_link_info *link)
4737 {
4738 	switch (link->link_speed) {
4739 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
4740 		return IF_Mbps(100);
4741 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
4742 		return IF_Gbps(1);
4743 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
4744 		return IF_Gbps(2);
4745 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
4746 		return IF_Mbps(2500);
4747 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
4748 		return IF_Gbps(10);
4749 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
4750 		return IF_Gbps(20);
4751 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
4752 		return IF_Gbps(25);
4753 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
4754 		return IF_Gbps(40);
4755 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
4756 		return IF_Gbps(50);
4757 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
4758 		return IF_Gbps(100);
4759 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10MB:
4760 		return IF_Mbps(10);
4761 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_200GB:
4762 		return IF_Gbps(200);
4763 	}
4764 	return IF_Gbps(100);
4765 }
4766 
4767 static void
4768 bnxt_get_wol_settings(struct bnxt_softc *softc)
4769 {
4770 	uint16_t wol_handle = 0;
4771 
4772 	if (!bnxt_wol_supported(softc))
4773 		return;
4774 
4775 	do {
4776 		wol_handle = bnxt_hwrm_get_wol_fltrs(softc, wol_handle);
4777 	} while (wol_handle && wol_handle != BNXT_NO_MORE_WOL_FILTERS);
4778 }
4779