xref: /freebsd/sys/dev/bnxt/bnxt_en/if_bnxt.c (revision 3ad01642fe9e241124553f2f18fd365ffea5d20b)
1 /*-
2  * Broadcom NetXtreme-C/E network driver.
3  *
4  * Copyright (c) 2016 Broadcom, All Rights Reserved.
5  * The term Broadcom refers to Broadcom Limited and/or its subsidiaries
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26  * THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/param.h>
30 #include <sys/socket.h>
31 #include <sys/kernel.h>
32 #include <sys/bus.h>
33 #include <sys/module.h>
34 #include <sys/rman.h>
35 #include <sys/endian.h>
36 #include <sys/sockio.h>
37 #include <sys/priv.h>
38 
39 #include <machine/bus.h>
40 #include <machine/resource.h>
41 
42 #include <dev/pci/pcireg.h>
43 
44 #include <net/if.h>
45 #include <net/if_dl.h>
46 #include <net/if_media.h>
47 #include <net/if_var.h>
48 #include <net/ethernet.h>
49 #include <net/iflib.h>
50 
51 #include <linux/pci.h>
52 #include <linux/kmod.h>
53 #include <linux/module.h>
54 #include <linux/delay.h>
55 #include <linux/idr.h>
56 #include <linux/netdevice.h>
57 #include <linux/etherdevice.h>
58 #include <linux/rcupdate.h>
59 #include "opt_inet.h"
60 #include "opt_inet6.h"
61 #include "opt_rss.h"
62 
63 #include "ifdi_if.h"
64 
65 #include "bnxt.h"
66 #include "bnxt_hwrm.h"
67 #include "bnxt_ioctl.h"
68 #include "bnxt_sysctl.h"
69 #include "hsi_struct_def.h"
70 #include "bnxt_mgmt.h"
71 #include "bnxt_ulp.h"
72 #include "bnxt_auxbus_compat.h"
73 
74 /*
75  * PCI Device ID Table
76  */
77 
78 static const pci_vendor_info_t bnxt_vendor_info_array[] =
79 {
80     PVID(BROADCOM_VENDOR_ID, BCM57301,
81 	"Broadcom BCM57301 NetXtreme-C 10Gb Ethernet Controller"),
82     PVID(BROADCOM_VENDOR_ID, BCM57302,
83 	"Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet Controller"),
84     PVID(BROADCOM_VENDOR_ID, BCM57304,
85 	"Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet Controller"),
86     PVID(BROADCOM_VENDOR_ID, BCM57311,
87 	"Broadcom BCM57311 NetXtreme-C 10Gb Ethernet"),
88     PVID(BROADCOM_VENDOR_ID, BCM57312,
89 	"Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet"),
90     PVID(BROADCOM_VENDOR_ID, BCM57314,
91 	"Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet"),
92     PVID(BROADCOM_VENDOR_ID, BCM57402,
93 	"Broadcom BCM57402 NetXtreme-E 10Gb Ethernet Controller"),
94     PVID(BROADCOM_VENDOR_ID, BCM57402_NPAR,
95 	"Broadcom BCM57402 NetXtreme-E Partition"),
96     PVID(BROADCOM_VENDOR_ID, BCM57404,
97 	"Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet Controller"),
98     PVID(BROADCOM_VENDOR_ID, BCM57404_NPAR,
99 	"Broadcom BCM57404 NetXtreme-E Partition"),
100     PVID(BROADCOM_VENDOR_ID, BCM57406,
101 	"Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet Controller"),
102     PVID(BROADCOM_VENDOR_ID, BCM57406_NPAR,
103 	"Broadcom BCM57406 NetXtreme-E Partition"),
104     PVID(BROADCOM_VENDOR_ID, BCM57407,
105 	"Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet Controller"),
106     PVID(BROADCOM_VENDOR_ID, BCM57407_NPAR,
107 	"Broadcom BCM57407 NetXtreme-E Ethernet Partition"),
108     PVID(BROADCOM_VENDOR_ID, BCM57407_SFP,
109 	"Broadcom BCM57407 NetXtreme-E 25Gb Ethernet Controller"),
110     PVID(BROADCOM_VENDOR_ID, BCM57412,
111 	"Broadcom BCM57412 NetXtreme-E 10Gb Ethernet"),
112     PVID(BROADCOM_VENDOR_ID, BCM57412_NPAR1,
113 	"Broadcom BCM57412 NetXtreme-E Ethernet Partition"),
114     PVID(BROADCOM_VENDOR_ID, BCM57412_NPAR2,
115 	"Broadcom BCM57412 NetXtreme-E Ethernet Partition"),
116     PVID(BROADCOM_VENDOR_ID, BCM57414,
117 	"Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet"),
118     PVID(BROADCOM_VENDOR_ID, BCM57414_NPAR1,
119 	"Broadcom BCM57414 NetXtreme-E Ethernet Partition"),
120     PVID(BROADCOM_VENDOR_ID, BCM57414_NPAR2,
121 	"Broadcom BCM57414 NetXtreme-E Ethernet Partition"),
122     PVID(BROADCOM_VENDOR_ID, BCM57416,
123 	"Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet"),
124     PVID(BROADCOM_VENDOR_ID, BCM57416_NPAR1,
125 	"Broadcom BCM57416 NetXtreme-E Ethernet Partition"),
126     PVID(BROADCOM_VENDOR_ID, BCM57416_NPAR2,
127 	"Broadcom BCM57416 NetXtreme-E Ethernet Partition"),
128     PVID(BROADCOM_VENDOR_ID, BCM57416_SFP,
129 	"Broadcom BCM57416 NetXtreme-E 10Gb Ethernet"),
130     PVID(BROADCOM_VENDOR_ID, BCM57417,
131 	"Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet"),
132     PVID(BROADCOM_VENDOR_ID, BCM57417_NPAR1,
133 	"Broadcom BCM57417 NetXtreme-E Ethernet Partition"),
134     PVID(BROADCOM_VENDOR_ID, BCM57417_NPAR2,
135 	"Broadcom BCM57417 NetXtreme-E Ethernet Partition"),
136     PVID(BROADCOM_VENDOR_ID, BCM57417_SFP,
137 	"Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet"),
138     PVID(BROADCOM_VENDOR_ID, BCM57454,
139 	"Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet"),
140     PVID(BROADCOM_VENDOR_ID, BCM58700,
141 	"Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet"),
142     PVID(BROADCOM_VENDOR_ID, BCM57508,
143 	"Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet"),
144     PVID(BROADCOM_VENDOR_ID, BCM57504,
145 	"Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet"),
146     PVID(BROADCOM_VENDOR_ID, BCM57504_NPAR,
147 	"Broadcom BCM57504 NetXtreme-E Ethernet Partition"),
148     PVID(BROADCOM_VENDOR_ID, BCM57502,
149 	"Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet"),
150     PVID(BROADCOM_VENDOR_ID, NETXTREME_C_VF1,
151 	"Broadcom NetXtreme-C Ethernet Virtual Function"),
152     PVID(BROADCOM_VENDOR_ID, NETXTREME_C_VF2,
153 	"Broadcom NetXtreme-C Ethernet Virtual Function"),
154     PVID(BROADCOM_VENDOR_ID, NETXTREME_C_VF3,
155 	"Broadcom NetXtreme-C Ethernet Virtual Function"),
156     PVID(BROADCOM_VENDOR_ID, NETXTREME_E_VF1,
157 	"Broadcom NetXtreme-E Ethernet Virtual Function"),
158     PVID(BROADCOM_VENDOR_ID, NETXTREME_E_VF2,
159 	"Broadcom NetXtreme-E Ethernet Virtual Function"),
160     PVID(BROADCOM_VENDOR_ID, NETXTREME_E_VF3,
161 	"Broadcom NetXtreme-E Ethernet Virtual Function"),
162     /* required last entry */
163 
164     PVID_END
165 };
166 
167 /*
168  * Function prototypes
169  */
170 
171 SLIST_HEAD(softc_list, bnxt_softc_list) pf_list;
172 int bnxt_num_pfs = 0;
173 
174 void
175 process_nq(struct bnxt_softc *softc, uint16_t nqid);
176 static void *bnxt_register(device_t dev);
177 
178 /* Soft queue setup and teardown */
179 static int bnxt_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
180     uint64_t *paddrs, int ntxqs, int ntxqsets);
181 static int bnxt_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
182     uint64_t *paddrs, int nrxqs, int nrxqsets);
183 static void bnxt_queues_free(if_ctx_t ctx);
184 
185 /* Device setup and teardown */
186 static int bnxt_attach_pre(if_ctx_t ctx);
187 static int bnxt_attach_post(if_ctx_t ctx);
188 static int bnxt_detach(if_ctx_t ctx);
189 
190 /* Device configuration */
191 static void bnxt_init(if_ctx_t ctx);
192 static void bnxt_stop(if_ctx_t ctx);
193 static void bnxt_multi_set(if_ctx_t ctx);
194 static int bnxt_mtu_set(if_ctx_t ctx, uint32_t mtu);
195 static void bnxt_media_status(if_ctx_t ctx, struct ifmediareq * ifmr);
196 static int bnxt_media_change(if_ctx_t ctx);
197 static int bnxt_promisc_set(if_ctx_t ctx, int flags);
198 static uint64_t	bnxt_get_counter(if_ctx_t, ift_counter);
199 static void bnxt_update_admin_status(if_ctx_t ctx);
200 static void bnxt_if_timer(if_ctx_t ctx, uint16_t qid);
201 
202 /* Interrupt enable / disable */
203 static void bnxt_intr_enable(if_ctx_t ctx);
204 static int bnxt_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
205 static int bnxt_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
206 static void bnxt_disable_intr(if_ctx_t ctx);
207 static int bnxt_msix_intr_assign(if_ctx_t ctx, int msix);
208 
209 /* vlan support */
210 static void bnxt_vlan_register(if_ctx_t ctx, uint16_t vtag);
211 static void bnxt_vlan_unregister(if_ctx_t ctx, uint16_t vtag);
212 
213 /* ioctl */
214 static int bnxt_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data);
215 
216 static int bnxt_shutdown(if_ctx_t ctx);
217 static int bnxt_suspend(if_ctx_t ctx);
218 static int bnxt_resume(if_ctx_t ctx);
219 
220 /* Internal support functions */
221 static int bnxt_probe_phy(struct bnxt_softc *softc);
222 static void bnxt_add_media_types(struct bnxt_softc *softc);
223 static int bnxt_pci_mapping(struct bnxt_softc *softc);
224 static void bnxt_pci_mapping_free(struct bnxt_softc *softc);
225 static int bnxt_update_link(struct bnxt_softc *softc, bool chng_link_state);
226 static int bnxt_handle_def_cp(void *arg);
227 static int bnxt_handle_isr(void *arg);
228 static void bnxt_clear_ids(struct bnxt_softc *softc);
229 static void inline bnxt_do_enable_intr(struct bnxt_cp_ring *cpr);
230 static void inline bnxt_do_disable_intr(struct bnxt_cp_ring *cpr);
231 static void bnxt_mark_cpr_invalid(struct bnxt_cp_ring *cpr);
232 static void bnxt_def_cp_task(void *context, int pending);
233 static void bnxt_handle_async_event(struct bnxt_softc *softc,
234     struct cmpl_base *cmpl);
235 static uint64_t bnxt_get_baudrate(struct bnxt_link_info *link);
236 static void bnxt_get_wol_settings(struct bnxt_softc *softc);
237 static int bnxt_wol_config(if_ctx_t ctx);
238 static bool bnxt_if_needs_restart(if_ctx_t, enum iflib_restart_event);
239 static int bnxt_i2c_req(if_ctx_t ctx, struct ifi2creq *i2c);
240 static void bnxt_get_port_module_status(struct bnxt_softc *softc);
241 static void bnxt_rdma_aux_device_init(struct bnxt_softc *softc);
242 static void bnxt_rdma_aux_device_uninit(struct bnxt_softc *softc);
243 static void bnxt_queue_fw_reset_work(struct bnxt_softc *bp, unsigned long delay);
244 void bnxt_queue_sp_work(struct bnxt_softc *bp);
245 
246 void bnxt_fw_reset(struct bnxt_softc *bp);
247 /*
248  * Device Interface Declaration
249  */
250 
251 static device_method_t bnxt_methods[] = {
252 	/* Device interface */
253 	DEVMETHOD(device_register, bnxt_register),
254 	DEVMETHOD(device_probe, iflib_device_probe),
255 	DEVMETHOD(device_attach, iflib_device_attach),
256 	DEVMETHOD(device_detach, iflib_device_detach),
257 	DEVMETHOD(device_shutdown, iflib_device_shutdown),
258 	DEVMETHOD(device_suspend, iflib_device_suspend),
259 	DEVMETHOD(device_resume, iflib_device_resume),
260 	DEVMETHOD_END
261 };
262 
263 static driver_t bnxt_driver = {
264 	"bnxt", bnxt_methods, sizeof(struct bnxt_softc),
265 };
266 
267 DRIVER_MODULE(bnxt, pci, bnxt_driver, 0, 0);
268 
269 MODULE_LICENSE("Dual BSD/GPL");
270 MODULE_DEPEND(if_bnxt, pci, 1, 1, 1);
271 MODULE_DEPEND(if_bnxt, ether, 1, 1, 1);
272 MODULE_DEPEND(if_bnxt, iflib, 1, 1, 1);
273 MODULE_DEPEND(if_bnxt, linuxkpi, 1, 1, 1);
274 MODULE_VERSION(if_bnxt, 1);
275 
276 IFLIB_PNP_INFO(pci, bnxt, bnxt_vendor_info_array);
277 
278 void writel_fbsd(struct bnxt_softc *bp, u32, u8, u32);
279 u32 readl_fbsd(struct bnxt_softc *bp, u32, u8);
280 
readl_fbsd(struct bnxt_softc * bp,u32 reg_off,u8 bar_idx)281 u32 readl_fbsd(struct bnxt_softc *bp, u32 reg_off, u8 bar_idx)
282 {
283 
284 	if (!bar_idx)
285 		return bus_space_read_4(bp->doorbell_bar.tag, bp->doorbell_bar.handle, reg_off);
286 	else
287 		return bus_space_read_4(bp->hwrm_bar.tag, bp->hwrm_bar.handle, reg_off);
288 }
289 
writel_fbsd(struct bnxt_softc * bp,u32 reg_off,u8 bar_idx,u32 val)290 void writel_fbsd(struct bnxt_softc *bp, u32 reg_off, u8 bar_idx, u32 val)
291 {
292 
293 	if (!bar_idx)
294 		bus_space_write_4(bp->doorbell_bar.tag, bp->doorbell_bar.handle, reg_off, htole32(val));
295 	else
296 		bus_space_write_4(bp->hwrm_bar.tag, bp->hwrm_bar.handle, reg_off, htole32(val));
297 }
298 
299 static DEFINE_IDA(bnxt_aux_dev_ids);
300 
301 static device_method_t bnxt_iflib_methods[] = {
302 	DEVMETHOD(ifdi_tx_queues_alloc, bnxt_tx_queues_alloc),
303 	DEVMETHOD(ifdi_rx_queues_alloc, bnxt_rx_queues_alloc),
304 	DEVMETHOD(ifdi_queues_free, bnxt_queues_free),
305 
306 	DEVMETHOD(ifdi_attach_pre, bnxt_attach_pre),
307 	DEVMETHOD(ifdi_attach_post, bnxt_attach_post),
308 	DEVMETHOD(ifdi_detach, bnxt_detach),
309 
310 	DEVMETHOD(ifdi_init, bnxt_init),
311 	DEVMETHOD(ifdi_stop, bnxt_stop),
312 	DEVMETHOD(ifdi_multi_set, bnxt_multi_set),
313 	DEVMETHOD(ifdi_mtu_set, bnxt_mtu_set),
314 	DEVMETHOD(ifdi_media_status, bnxt_media_status),
315 	DEVMETHOD(ifdi_media_change, bnxt_media_change),
316 	DEVMETHOD(ifdi_promisc_set, bnxt_promisc_set),
317 	DEVMETHOD(ifdi_get_counter, bnxt_get_counter),
318 	DEVMETHOD(ifdi_update_admin_status, bnxt_update_admin_status),
319 	DEVMETHOD(ifdi_timer, bnxt_if_timer),
320 
321 	DEVMETHOD(ifdi_intr_enable, bnxt_intr_enable),
322 	DEVMETHOD(ifdi_tx_queue_intr_enable, bnxt_tx_queue_intr_enable),
323 	DEVMETHOD(ifdi_rx_queue_intr_enable, bnxt_rx_queue_intr_enable),
324 	DEVMETHOD(ifdi_intr_disable, bnxt_disable_intr),
325 	DEVMETHOD(ifdi_msix_intr_assign, bnxt_msix_intr_assign),
326 
327 	DEVMETHOD(ifdi_vlan_register, bnxt_vlan_register),
328 	DEVMETHOD(ifdi_vlan_unregister, bnxt_vlan_unregister),
329 
330 	DEVMETHOD(ifdi_priv_ioctl, bnxt_priv_ioctl),
331 
332 	DEVMETHOD(ifdi_suspend, bnxt_suspend),
333 	DEVMETHOD(ifdi_shutdown, bnxt_shutdown),
334 	DEVMETHOD(ifdi_resume, bnxt_resume),
335 	DEVMETHOD(ifdi_i2c_req, bnxt_i2c_req),
336 
337 	DEVMETHOD(ifdi_needs_restart, bnxt_if_needs_restart),
338 
339 	DEVMETHOD_END
340 };
341 
342 static driver_t bnxt_iflib_driver = {
343 	"bnxt", bnxt_iflib_methods, sizeof(struct bnxt_softc)
344 };
345 
346 /*
347  * iflib shared context
348  */
349 
350 #define BNXT_DRIVER_VERSION	"230.0.133.0"
351 const char bnxt_driver_version[] = BNXT_DRIVER_VERSION;
352 extern struct if_txrx bnxt_txrx;
353 static struct if_shared_ctx bnxt_sctx_init = {
354 	.isc_magic = IFLIB_MAGIC,
355 	.isc_driver = &bnxt_iflib_driver,
356 	.isc_nfl = 2,				// Number of Free Lists
357 	.isc_flags = IFLIB_HAS_RXCQ | IFLIB_HAS_TXCQ | IFLIB_NEED_ETHER_PAD,
358 	.isc_q_align = PAGE_SIZE,
359 	.isc_tx_maxsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
360 	.isc_tx_maxsegsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
361 	.isc_tso_maxsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
362 	.isc_tso_maxsegsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
363 	.isc_rx_maxsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
364 	.isc_rx_maxsegsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
365 
366 	// Only use a single segment to avoid page size constraints
367 	.isc_rx_nsegments = 1,
368 	.isc_ntxqs = 3,
369 	.isc_nrxqs = 3,
370 	.isc_nrxd_min = {16, 16, 16},
371 	.isc_nrxd_default = {PAGE_SIZE / sizeof(struct cmpl_base) * 8,
372 	    PAGE_SIZE / sizeof(struct rx_prod_pkt_bd),
373 	    PAGE_SIZE / sizeof(struct rx_prod_pkt_bd)},
374 	.isc_nrxd_max = {BNXT_MAX_RXD, BNXT_MAX_RXD, BNXT_MAX_RXD},
375 	.isc_ntxd_min = {16, 16, 16},
376 	.isc_ntxd_default = {PAGE_SIZE / sizeof(struct cmpl_base) * 2,
377 	    PAGE_SIZE / sizeof(struct tx_bd_short),
378 	    /* NQ depth 4096 */
379 	    PAGE_SIZE / sizeof(struct cmpl_base) * 16},
380 	.isc_ntxd_max = {BNXT_MAX_TXD, BNXT_MAX_TXD, BNXT_MAX_TXD},
381 
382 	.isc_admin_intrcnt = BNXT_ROCE_IRQ_COUNT,
383 	.isc_vendor_info = bnxt_vendor_info_array,
384 	.isc_driver_version = bnxt_driver_version,
385 };
386 
387 #define PCI_SUBSYSTEM_ID	0x2e
388 static struct workqueue_struct *bnxt_pf_wq;
389 
390 extern void bnxt_destroy_irq(struct bnxt_softc *softc);
391 
392 /*
393  * Device Methods
394  */
395 
396 static void *
bnxt_register(device_t dev)397 bnxt_register(device_t dev)
398 {
399 	return (&bnxt_sctx_init);
400 }
401 
402 static void
bnxt_nq_alloc(struct bnxt_softc * softc,int nqsets)403 bnxt_nq_alloc(struct bnxt_softc *softc, int nqsets)
404 {
405 
406 	if (softc->nq_rings)
407 		return;
408 
409 	softc->nq_rings = malloc(sizeof(struct bnxt_cp_ring) * nqsets,
410 	    M_DEVBUF, M_NOWAIT | M_ZERO);
411 }
412 
413 static void
bnxt_nq_free(struct bnxt_softc * softc)414 bnxt_nq_free(struct bnxt_softc *softc)
415 {
416 
417 	if (softc->nq_rings)
418 		free(softc->nq_rings, M_DEVBUF);
419 	softc->nq_rings = NULL;
420 }
421 
422 /*
423  * Device Dependent Configuration Functions
424 */
425 
426 /* Soft queue setup and teardown */
427 static int
bnxt_tx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int ntxqs,int ntxqsets)428 bnxt_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
429     uint64_t *paddrs, int ntxqs, int ntxqsets)
430 {
431 	struct bnxt_softc *softc;
432 	int i;
433 	int rc;
434 
435 	softc = iflib_get_softc(ctx);
436 
437 	if (BNXT_CHIP_P5(softc)) {
438 		bnxt_nq_alloc(softc, ntxqsets);
439 		if (!softc->nq_rings) {
440 			device_printf(iflib_get_dev(ctx),
441 					"unable to allocate NQ rings\n");
442 			rc = ENOMEM;
443 			goto nq_alloc_fail;
444 		}
445 	}
446 
447 	softc->tx_cp_rings = malloc(sizeof(struct bnxt_cp_ring) * ntxqsets,
448 	    M_DEVBUF, M_NOWAIT | M_ZERO);
449 	if (!softc->tx_cp_rings) {
450 		device_printf(iflib_get_dev(ctx),
451 		    "unable to allocate TX completion rings\n");
452 		rc = ENOMEM;
453 		goto cp_alloc_fail;
454 	}
455 	softc->tx_rings = malloc(sizeof(struct bnxt_ring) * ntxqsets,
456 	    M_DEVBUF, M_NOWAIT | M_ZERO);
457 	if (!softc->tx_rings) {
458 		device_printf(iflib_get_dev(ctx),
459 		    "unable to allocate TX rings\n");
460 		rc = ENOMEM;
461 		goto ring_alloc_fail;
462 	}
463 
464 	for (i=0; i < ntxqsets; i++) {
465 		rc = iflib_dma_alloc(ctx, sizeof(struct ctx_hw_stats),
466 				&softc->tx_stats[i], 0);
467 		if (rc)
468 			goto dma_alloc_fail;
469 		bus_dmamap_sync(softc->tx_stats[i].idi_tag, softc->tx_stats[i].idi_map,
470 				BUS_DMASYNC_PREREAD);
471 	}
472 
473 	for (i = 0; i < ntxqsets; i++) {
474 		/* Set up the completion ring */
475 		softc->tx_cp_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
476 		softc->tx_cp_rings[i].ring.phys_id =
477 		    (uint16_t)HWRM_NA_SIGNATURE;
478 		softc->tx_cp_rings[i].ring.softc = softc;
479 		softc->tx_cp_rings[i].ring.idx = i;
480 		softc->tx_cp_rings[i].ring.id =
481 		    (softc->scctx->isc_nrxqsets * 2) + 1 + i;
482 		softc->tx_cp_rings[i].ring.doorbell = (BNXT_CHIP_P5(softc)) ?
483 			DB_PF_OFFSET_P5: softc->tx_cp_rings[i].ring.id * 0x80;
484 		softc->tx_cp_rings[i].ring.ring_size =
485 		    softc->scctx->isc_ntxd[0];
486 		softc->tx_cp_rings[i].ring.vaddr = vaddrs[i * ntxqs];
487 		softc->tx_cp_rings[i].ring.paddr = paddrs[i * ntxqs];
488 
489 		/* Set up the TX ring */
490 		softc->tx_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
491 		softc->tx_rings[i].softc = softc;
492 		softc->tx_rings[i].idx = i;
493 		softc->tx_rings[i].id =
494 		    (softc->scctx->isc_nrxqsets * 2) + 1 + i;
495 		softc->tx_rings[i].doorbell = (BNXT_CHIP_P5(softc)) ?
496 			DB_PF_OFFSET_P5 : softc->tx_rings[i].id * 0x80;
497 		softc->tx_rings[i].ring_size = softc->scctx->isc_ntxd[1];
498 		softc->tx_rings[i].vaddr = vaddrs[i * ntxqs + 1];
499 		softc->tx_rings[i].paddr = paddrs[i * ntxqs + 1];
500 
501 		bnxt_create_tx_sysctls(softc, i);
502 
503 		if (BNXT_CHIP_P5(softc)) {
504 			/* Set up the Notification ring (NQ) */
505 			softc->nq_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
506 			softc->nq_rings[i].ring.phys_id =
507 				(uint16_t)HWRM_NA_SIGNATURE;
508 			softc->nq_rings[i].ring.softc = softc;
509 			softc->nq_rings[i].ring.idx = i;
510 			softc->nq_rings[i].ring.id = i;
511 			softc->nq_rings[i].ring.doorbell = (BNXT_CHIP_P5(softc)) ?
512 				DB_PF_OFFSET_P5 : softc->nq_rings[i].ring.id * 0x80;
513 			softc->nq_rings[i].ring.ring_size = softc->scctx->isc_ntxd[2];
514 			softc->nq_rings[i].ring.vaddr = vaddrs[i * ntxqs + 2];
515 			softc->nq_rings[i].ring.paddr = paddrs[i * ntxqs + 2];
516 		}
517 	}
518 
519 	softc->ntxqsets = ntxqsets;
520 	return rc;
521 
522 dma_alloc_fail:
523 	for (i = i - 1; i >= 0; i--)
524 		iflib_dma_free(&softc->tx_stats[i]);
525 	free(softc->tx_rings, M_DEVBUF);
526 ring_alloc_fail:
527 	free(softc->tx_cp_rings, M_DEVBUF);
528 cp_alloc_fail:
529 	bnxt_nq_free(softc);
530 nq_alloc_fail:
531 	return rc;
532 }
533 
534 static void
bnxt_queues_free(if_ctx_t ctx)535 bnxt_queues_free(if_ctx_t ctx)
536 {
537 	struct bnxt_softc *softc = iflib_get_softc(ctx);
538 	int i;
539 
540 	// Free TX queues
541 	for (i=0; i<softc->ntxqsets; i++)
542 		iflib_dma_free(&softc->tx_stats[i]);
543 	free(softc->tx_rings, M_DEVBUF);
544 	softc->tx_rings = NULL;
545 	free(softc->tx_cp_rings, M_DEVBUF);
546 	softc->tx_cp_rings = NULL;
547 	softc->ntxqsets = 0;
548 
549 	// Free RX queues
550 	for (i=0; i<softc->nrxqsets; i++)
551 		iflib_dma_free(&softc->rx_stats[i]);
552 	iflib_dma_free(&softc->hw_tx_port_stats);
553 	iflib_dma_free(&softc->hw_rx_port_stats);
554 	iflib_dma_free(&softc->hw_tx_port_stats_ext);
555 	iflib_dma_free(&softc->hw_rx_port_stats_ext);
556 	free(softc->grp_info, M_DEVBUF);
557 	free(softc->ag_rings, M_DEVBUF);
558 	free(softc->rx_rings, M_DEVBUF);
559 	free(softc->rx_cp_rings, M_DEVBUF);
560 	bnxt_nq_free(softc);
561 }
562 
563 static int
bnxt_rx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int nrxqs,int nrxqsets)564 bnxt_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
565     uint64_t *paddrs, int nrxqs, int nrxqsets)
566 {
567 	struct bnxt_softc *softc;
568 	int i;
569 	int rc;
570 
571 	softc = iflib_get_softc(ctx);
572 
573 	softc->rx_cp_rings = malloc(sizeof(struct bnxt_cp_ring) * nrxqsets,
574 	    M_DEVBUF, M_NOWAIT | M_ZERO);
575 	if (!softc->rx_cp_rings) {
576 		device_printf(iflib_get_dev(ctx),
577 		    "unable to allocate RX completion rings\n");
578 		rc = ENOMEM;
579 		goto cp_alloc_fail;
580 	}
581 	softc->rx_rings = malloc(sizeof(struct bnxt_ring) * nrxqsets,
582 	    M_DEVBUF, M_NOWAIT | M_ZERO);
583 	if (!softc->rx_rings) {
584 		device_printf(iflib_get_dev(ctx),
585 		    "unable to allocate RX rings\n");
586 		rc = ENOMEM;
587 		goto ring_alloc_fail;
588 	}
589 	softc->ag_rings = malloc(sizeof(struct bnxt_ring) * nrxqsets,
590 	    M_DEVBUF, M_NOWAIT | M_ZERO);
591 	if (!softc->ag_rings) {
592 		device_printf(iflib_get_dev(ctx),
593 		    "unable to allocate aggregation rings\n");
594 		rc = ENOMEM;
595 		goto ag_alloc_fail;
596 	}
597 	softc->grp_info = malloc(sizeof(struct bnxt_grp_info) * nrxqsets,
598 	    M_DEVBUF, M_NOWAIT | M_ZERO);
599 	if (!softc->grp_info) {
600 		device_printf(iflib_get_dev(ctx),
601 		    "unable to allocate ring groups\n");
602 		rc = ENOMEM;
603 		goto grp_alloc_fail;
604 	}
605 
606 	for (i=0; i < nrxqsets; i++) {
607 		rc = iflib_dma_alloc(ctx, sizeof(struct ctx_hw_stats),
608 				&softc->rx_stats[i], 0);
609 		if (rc)
610 			goto hw_stats_alloc_fail;
611 		bus_dmamap_sync(softc->rx_stats[i].idi_tag, softc->rx_stats[i].idi_map,
612 				BUS_DMASYNC_PREREAD);
613 	}
614 
615 /*
616  * Additional 512 bytes for future expansion.
617  * To prevent corruption when loaded with newer firmwares with added counters.
618  * This can be deleted when there will be no further additions of counters.
619  */
620 #define BNXT_PORT_STAT_PADDING  512
621 
622 	rc = iflib_dma_alloc(ctx, sizeof(struct rx_port_stats) + BNXT_PORT_STAT_PADDING,
623 	    &softc->hw_rx_port_stats, 0);
624 	if (rc)
625 		goto hw_port_rx_stats_alloc_fail;
626 
627 	bus_dmamap_sync(softc->hw_rx_port_stats.idi_tag,
628             softc->hw_rx_port_stats.idi_map, BUS_DMASYNC_PREREAD);
629 
630 
631 	rc = iflib_dma_alloc(ctx, sizeof(struct tx_port_stats) + BNXT_PORT_STAT_PADDING,
632 	    &softc->hw_tx_port_stats, 0);
633 	if (rc)
634 		goto hw_port_tx_stats_alloc_fail;
635 
636 	bus_dmamap_sync(softc->hw_tx_port_stats.idi_tag,
637             softc->hw_tx_port_stats.idi_map, BUS_DMASYNC_PREREAD);
638 
639 	softc->rx_port_stats = (void *) softc->hw_rx_port_stats.idi_vaddr;
640 	softc->tx_port_stats = (void *) softc->hw_tx_port_stats.idi_vaddr;
641 
642 
643 	rc = iflib_dma_alloc(ctx, sizeof(struct rx_port_stats_ext),
644 		&softc->hw_rx_port_stats_ext, 0);
645 	if (rc)
646 		goto hw_port_rx_stats_ext_alloc_fail;
647 
648 	bus_dmamap_sync(softc->hw_rx_port_stats_ext.idi_tag,
649 	    softc->hw_rx_port_stats_ext.idi_map, BUS_DMASYNC_PREREAD);
650 
651 	rc = iflib_dma_alloc(ctx, sizeof(struct tx_port_stats_ext),
652 		&softc->hw_tx_port_stats_ext, 0);
653 	if (rc)
654 		goto hw_port_tx_stats_ext_alloc_fail;
655 
656 	bus_dmamap_sync(softc->hw_tx_port_stats_ext.idi_tag,
657 	    softc->hw_tx_port_stats_ext.idi_map, BUS_DMASYNC_PREREAD);
658 
659 	softc->rx_port_stats_ext = (void *) softc->hw_rx_port_stats_ext.idi_vaddr;
660 	softc->tx_port_stats_ext = (void *) softc->hw_tx_port_stats_ext.idi_vaddr;
661 
662 	for (i = 0; i < nrxqsets; i++) {
663 		/* Allocation the completion ring */
664 		softc->rx_cp_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
665 		softc->rx_cp_rings[i].ring.phys_id =
666 		    (uint16_t)HWRM_NA_SIGNATURE;
667 		softc->rx_cp_rings[i].ring.softc = softc;
668 		softc->rx_cp_rings[i].ring.idx = i;
669 		softc->rx_cp_rings[i].ring.id = i + 1;
670 		softc->rx_cp_rings[i].ring.doorbell = (BNXT_CHIP_P5(softc)) ?
671 			DB_PF_OFFSET_P5 : softc->rx_cp_rings[i].ring.id * 0x80;
672 		/*
673 		 * If this ring overflows, RX stops working.
674 		 */
675 		softc->rx_cp_rings[i].ring.ring_size =
676 		    softc->scctx->isc_nrxd[0];
677 		softc->rx_cp_rings[i].ring.vaddr = vaddrs[i * nrxqs];
678 		softc->rx_cp_rings[i].ring.paddr = paddrs[i * nrxqs];
679 
680 		/* Allocate the RX ring */
681 		softc->rx_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
682 		softc->rx_rings[i].softc = softc;
683 		softc->rx_rings[i].idx = i;
684 		softc->rx_rings[i].id = i + 1;
685 		softc->rx_rings[i].doorbell = (BNXT_CHIP_P5(softc)) ?
686 			DB_PF_OFFSET_P5 : softc->rx_rings[i].id * 0x80;
687 		softc->rx_rings[i].ring_size = softc->scctx->isc_nrxd[1];
688 		softc->rx_rings[i].vaddr = vaddrs[i * nrxqs + 1];
689 		softc->rx_rings[i].paddr = paddrs[i * nrxqs + 1];
690 
691 		/* Allocate the TPA start buffer */
692 		softc->rx_rings[i].tpa_start = malloc(sizeof(struct bnxt_full_tpa_start) *
693 	    		(RX_TPA_START_CMPL_AGG_ID_MASK >> RX_TPA_START_CMPL_AGG_ID_SFT),
694 	    		M_DEVBUF, M_NOWAIT | M_ZERO);
695 		if (softc->rx_rings[i].tpa_start == NULL) {
696 			rc = -ENOMEM;
697 			device_printf(softc->dev,
698 					"Unable to allocate space for TPA\n");
699 			goto tpa_alloc_fail;
700 		}
701 		/* Allocate the AG ring */
702 		softc->ag_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
703 		softc->ag_rings[i].softc = softc;
704 		softc->ag_rings[i].idx = i;
705 		softc->ag_rings[i].id = nrxqsets + i + 1;
706 		softc->ag_rings[i].doorbell = (BNXT_CHIP_P5(softc)) ?
707 			DB_PF_OFFSET_P5 : softc->ag_rings[i].id * 0x80;
708 		softc->ag_rings[i].ring_size = softc->scctx->isc_nrxd[2];
709 		softc->ag_rings[i].vaddr = vaddrs[i * nrxqs + 2];
710 		softc->ag_rings[i].paddr = paddrs[i * nrxqs + 2];
711 
712 		/* Allocate the ring group */
713 		softc->grp_info[i].grp_id = (uint16_t)HWRM_NA_SIGNATURE;
714 		softc->grp_info[i].stats_ctx =
715 		    softc->rx_cp_rings[i].stats_ctx_id;
716 		softc->grp_info[i].rx_ring_id = softc->rx_rings[i].phys_id;
717 		softc->grp_info[i].ag_ring_id = softc->ag_rings[i].phys_id;
718 		softc->grp_info[i].cp_ring_id =
719 		    softc->rx_cp_rings[i].ring.phys_id;
720 
721 		bnxt_create_rx_sysctls(softc, i);
722 	}
723 
724 	/*
725 	 * When SR-IOV is enabled, avoid each VF sending PORT_QSTATS
726          * HWRM every sec with which firmware timeouts can happen
727          */
728 	if (BNXT_PF(softc))
729 		bnxt_create_port_stats_sysctls(softc);
730 
731 	/* And finally, the VNIC */
732 	softc->vnic_info.id = (uint16_t)HWRM_NA_SIGNATURE;
733 	softc->vnic_info.filter_id = -1;
734 	softc->vnic_info.def_ring_grp = (uint16_t)HWRM_NA_SIGNATURE;
735 	softc->vnic_info.cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
736 	softc->vnic_info.lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
737 	softc->vnic_info.rx_mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
738 		HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN;
739 	softc->vnic_info.mc_list_count = 0;
740 	softc->vnic_info.flags = BNXT_VNIC_FLAG_DEFAULT;
741 	rc = iflib_dma_alloc(ctx, BNXT_MAX_MC_ADDRS * ETHER_ADDR_LEN,
742 	    &softc->vnic_info.mc_list, 0);
743 	if (rc)
744 		goto mc_list_alloc_fail;
745 
746 	/* The VNIC RSS Hash Key */
747 	rc = iflib_dma_alloc(ctx, HW_HASH_KEY_SIZE,
748 	    &softc->vnic_info.rss_hash_key_tbl, 0);
749 	if (rc)
750 		goto rss_hash_alloc_fail;
751 	bus_dmamap_sync(softc->vnic_info.rss_hash_key_tbl.idi_tag,
752 	    softc->vnic_info.rss_hash_key_tbl.idi_map,
753 	    BUS_DMASYNC_PREWRITE);
754 	memcpy(softc->vnic_info.rss_hash_key_tbl.idi_vaddr,
755 	    softc->vnic_info.rss_hash_key, HW_HASH_KEY_SIZE);
756 
757 	/* Allocate the RSS tables */
758 	rc = iflib_dma_alloc(ctx, HW_HASH_INDEX_SIZE * sizeof(uint16_t),
759 	    &softc->vnic_info.rss_grp_tbl, 0);
760 	if (rc)
761 		goto rss_grp_alloc_fail;
762 	bus_dmamap_sync(softc->vnic_info.rss_grp_tbl.idi_tag,
763 	    softc->vnic_info.rss_grp_tbl.idi_map,
764 	    BUS_DMASYNC_PREWRITE);
765 	memset(softc->vnic_info.rss_grp_tbl.idi_vaddr, 0xff,
766 	    softc->vnic_info.rss_grp_tbl.idi_size);
767 
768 	softc->nrxqsets = nrxqsets;
769 	return rc;
770 
771 rss_grp_alloc_fail:
772 	iflib_dma_free(&softc->vnic_info.rss_hash_key_tbl);
773 rss_hash_alloc_fail:
774 	iflib_dma_free(&softc->vnic_info.mc_list);
775 mc_list_alloc_fail:
776 	for (i = i - 1; i >= 0; i--) {
777 		if (softc->rx_rings[i].tpa_start)
778 			free(softc->rx_rings[i].tpa_start, M_DEVBUF);
779 	}
780 tpa_alloc_fail:
781 	iflib_dma_free(&softc->hw_tx_port_stats_ext);
782 hw_port_tx_stats_ext_alloc_fail:
783 	iflib_dma_free(&softc->hw_rx_port_stats_ext);
784 hw_port_rx_stats_ext_alloc_fail:
785 	iflib_dma_free(&softc->hw_tx_port_stats);
786 hw_port_tx_stats_alloc_fail:
787 	iflib_dma_free(&softc->hw_rx_port_stats);
788 hw_port_rx_stats_alloc_fail:
789 	for (i=0; i < nrxqsets; i++) {
790 		if (softc->rx_stats[i].idi_vaddr)
791 			iflib_dma_free(&softc->rx_stats[i]);
792 	}
793 hw_stats_alloc_fail:
794 	free(softc->grp_info, M_DEVBUF);
795 grp_alloc_fail:
796 	free(softc->ag_rings, M_DEVBUF);
797 ag_alloc_fail:
798 	free(softc->rx_rings, M_DEVBUF);
799 ring_alloc_fail:
800 	free(softc->rx_cp_rings, M_DEVBUF);
801 cp_alloc_fail:
802 	return rc;
803 }
804 
bnxt_free_hwrm_short_cmd_req(struct bnxt_softc * softc)805 static void bnxt_free_hwrm_short_cmd_req(struct bnxt_softc *softc)
806 {
807 	if (softc->hwrm_short_cmd_req_addr.idi_vaddr)
808 		iflib_dma_free(&softc->hwrm_short_cmd_req_addr);
809 	softc->hwrm_short_cmd_req_addr.idi_vaddr = NULL;
810 }
811 
bnxt_alloc_hwrm_short_cmd_req(struct bnxt_softc * softc)812 static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt_softc *softc)
813 {
814 	int rc;
815 
816 	rc = iflib_dma_alloc(softc->ctx, softc->hwrm_max_req_len,
817 	    &softc->hwrm_short_cmd_req_addr, BUS_DMA_NOWAIT);
818 
819 	return rc;
820 }
821 
bnxt_free_ring(struct bnxt_softc * softc,struct bnxt_ring_mem_info * rmem)822 static void bnxt_free_ring(struct bnxt_softc *softc, struct bnxt_ring_mem_info *rmem)
823 {
824 	int i;
825 
826 	for (i = 0; i < rmem->nr_pages; i++) {
827 		if (!rmem->pg_arr[i].idi_vaddr)
828 			continue;
829 
830 		iflib_dma_free(&rmem->pg_arr[i]);
831 		rmem->pg_arr[i].idi_vaddr = NULL;
832 	}
833 	if (rmem->pg_tbl.idi_vaddr) {
834 		iflib_dma_free(&rmem->pg_tbl);
835 		rmem->pg_tbl.idi_vaddr = NULL;
836 
837 	}
838 	if (rmem->vmem_size && *rmem->vmem) {
839 		free(*rmem->vmem, M_DEVBUF);
840 		*rmem->vmem = NULL;
841 	}
842 }
843 
bnxt_init_ctx_mem(struct bnxt_ctx_mem_type * ctxm,void * p,int len)844 static void bnxt_init_ctx_mem(struct bnxt_ctx_mem_type *ctxm, void *p, int len)
845 {
846 	u8 init_val = ctxm->init_value;
847 	u16 offset = ctxm->init_offset;
848 	u8 *p2 = p;
849 	int i;
850 
851 	if (!init_val)
852 		return;
853 	if (offset == BNXT_CTX_INIT_INVALID_OFFSET) {
854 		memset(p, init_val, len);
855 		return;
856 	}
857 	for (i = 0; i < len; i += ctxm->entry_size)
858 		*(p2 + i + offset) = init_val;
859 }
860 
bnxt_alloc_ring(struct bnxt_softc * softc,struct bnxt_ring_mem_info * rmem)861 static int bnxt_alloc_ring(struct bnxt_softc *softc, struct bnxt_ring_mem_info *rmem)
862 {
863 	uint64_t valid_bit = 0;
864 	int i;
865 	int rc;
866 
867 	if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
868 		valid_bit = PTU_PTE_VALID;
869 
870 	if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl.idi_vaddr) {
871 		size_t pg_tbl_size = rmem->nr_pages * 8;
872 
873 		if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
874 			pg_tbl_size = rmem->page_size;
875 
876 		rc = iflib_dma_alloc(softc->ctx, pg_tbl_size, &rmem->pg_tbl, 0);
877 		if (rc)
878 			return -ENOMEM;
879 	}
880 
881 	for (i = 0; i < rmem->nr_pages; i++) {
882 		uint64_t extra_bits = valid_bit;
883 		uint64_t *ptr;
884 
885 		rc = iflib_dma_alloc(softc->ctx, rmem->page_size, &rmem->pg_arr[i], 0);
886 		if (rc)
887 			return -ENOMEM;
888 
889 		if (rmem->ctx_mem)
890 			bnxt_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i].idi_vaddr,
891 					rmem->page_size);
892 
893 		if (rmem->nr_pages > 1 || rmem->depth > 0) {
894 			if (i == rmem->nr_pages - 2 &&
895 					(rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
896 				extra_bits |= PTU_PTE_NEXT_TO_LAST;
897 			else if (i == rmem->nr_pages - 1 &&
898 					(rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
899 				extra_bits |= PTU_PTE_LAST;
900 
901 			ptr = (void *) rmem->pg_tbl.idi_vaddr;
902 			ptr[i]  = htole64(rmem->pg_arr[i].idi_paddr | extra_bits);
903 		}
904 	}
905 
906 	if (rmem->vmem_size) {
907 		*rmem->vmem = malloc(rmem->vmem_size, M_DEVBUF, M_NOWAIT | M_ZERO);
908 		if (!(*rmem->vmem))
909 			return -ENOMEM;
910 	}
911 	return 0;
912 }
913 
914 
915 #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES		\
916 	(HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP |		\
917 	 HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ |	\
918 	 HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ |		\
919 	 HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC |	\
920 	 HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT)
921 
bnxt_alloc_ctx_mem_blk(struct bnxt_softc * softc,struct bnxt_ctx_pg_info * ctx_pg)922 static int bnxt_alloc_ctx_mem_blk(struct bnxt_softc *softc,
923 				  struct bnxt_ctx_pg_info *ctx_pg)
924 {
925 	struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
926 
927 	rmem->page_size = BNXT_PAGE_SIZE;
928 	rmem->pg_arr = ctx_pg->ctx_arr;
929 	rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
930 	if (rmem->depth >= 1)
931 		rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
932 
933 	return bnxt_alloc_ring(softc, rmem);
934 }
935 
bnxt_alloc_ctx_pg_tbls(struct bnxt_softc * softc,struct bnxt_ctx_pg_info * ctx_pg,u32 mem_size,u8 depth,struct bnxt_ctx_mem_type * ctxm)936 static int bnxt_alloc_ctx_pg_tbls(struct bnxt_softc *softc,
937 				  struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
938 				  u8 depth, struct bnxt_ctx_mem_type *ctxm)
939 {
940 	struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
941 	int rc;
942 
943 	if (!mem_size)
944 		return -EINVAL;
945 
946 	ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
947 	if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
948 		ctx_pg->nr_pages = 0;
949 		return -EINVAL;
950 	}
951 	if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
952 		int nr_tbls, i;
953 
954 		rmem->depth = 2;
955 		ctx_pg->ctx_pg_tbl = kzalloc(MAX_CTX_PAGES * sizeof(ctx_pg),
956 					      GFP_KERNEL);
957 		if (!ctx_pg->ctx_pg_tbl)
958 			return -ENOMEM;
959 		nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
960 		rmem->nr_pages = nr_tbls;
961 		rc = bnxt_alloc_ctx_mem_blk(softc, ctx_pg);
962 		if (rc)
963 			return rc;
964 		for (i = 0; i < nr_tbls; i++) {
965 			struct bnxt_ctx_pg_info *pg_tbl;
966 
967 			pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
968 			if (!pg_tbl)
969 				return -ENOMEM;
970 			ctx_pg->ctx_pg_tbl[i] = pg_tbl;
971 			rmem = &pg_tbl->ring_mem;
972 			memcpy(&rmem->pg_tbl, &ctx_pg->ctx_arr[i], sizeof(struct iflib_dma_info));
973 			rmem->depth = 1;
974 			rmem->nr_pages = MAX_CTX_PAGES;
975 			rmem->ctx_mem = ctxm;
976 			if (i == (nr_tbls - 1)) {
977 				int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
978 
979 				if (rem)
980 					rmem->nr_pages = rem;
981 			}
982 			rc = bnxt_alloc_ctx_mem_blk(softc, pg_tbl);
983 			if (rc)
984 				break;
985 		}
986 	} else {
987 		rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
988 		if (rmem->nr_pages > 1 || depth)
989 			rmem->depth = 1;
990 		rmem->ctx_mem = ctxm;
991 		rc = bnxt_alloc_ctx_mem_blk(softc, ctx_pg);
992 	}
993 	return rc;
994 }
995 
bnxt_free_ctx_pg_tbls(struct bnxt_softc * softc,struct bnxt_ctx_pg_info * ctx_pg)996 static void bnxt_free_ctx_pg_tbls(struct bnxt_softc *softc,
997 				  struct bnxt_ctx_pg_info *ctx_pg)
998 {
999 	struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
1000 
1001 	if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
1002 	    ctx_pg->ctx_pg_tbl) {
1003 		int i, nr_tbls = rmem->nr_pages;
1004 
1005 		for (i = 0; i < nr_tbls; i++) {
1006 			struct bnxt_ctx_pg_info *pg_tbl;
1007 			struct bnxt_ring_mem_info *rmem2;
1008 
1009 			pg_tbl = ctx_pg->ctx_pg_tbl[i];
1010 			if (!pg_tbl)
1011 				continue;
1012 			rmem2 = &pg_tbl->ring_mem;
1013 			bnxt_free_ring(softc, rmem2);
1014 			ctx_pg->ctx_arr[i].idi_vaddr = NULL;
1015 			free(pg_tbl , M_DEVBUF);
1016 			ctx_pg->ctx_pg_tbl[i] = NULL;
1017 		}
1018 		kfree(ctx_pg->ctx_pg_tbl);
1019 		ctx_pg->ctx_pg_tbl = NULL;
1020 	}
1021 	bnxt_free_ring(softc, rmem);
1022 	ctx_pg->nr_pages = 0;
1023 }
1024 
bnxt_setup_ctxm_pg_tbls(struct bnxt_softc * softc,struct bnxt_ctx_mem_type * ctxm,u32 entries,u8 pg_lvl)1025 static int bnxt_setup_ctxm_pg_tbls(struct bnxt_softc *softc,
1026 				   struct bnxt_ctx_mem_type *ctxm, u32 entries,
1027 				   u8 pg_lvl)
1028 {
1029 	struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
1030 	int i, rc = 0, n = 1;
1031 	u32 mem_size;
1032 
1033 	if (!ctxm->entry_size || !ctx_pg)
1034 		return -EINVAL;
1035 	if (ctxm->instance_bmap)
1036 		n = hweight32(ctxm->instance_bmap);
1037 	if (ctxm->entry_multiple)
1038 		entries = roundup(entries, ctxm->entry_multiple);
1039 	entries = clamp_t(u32, entries, ctxm->min_entries, ctxm->max_entries);
1040 	mem_size = entries * ctxm->entry_size;
1041 	for (i = 0; i < n && !rc; i++) {
1042 		ctx_pg[i].entries = entries;
1043 		rc = bnxt_alloc_ctx_pg_tbls(softc, &ctx_pg[i], mem_size, pg_lvl,
1044 					    ctxm->init_value ? ctxm : NULL);
1045 	}
1046 	return rc;
1047 }
1048 
bnxt_free_ctx_mem(struct bnxt_softc * softc)1049 static void bnxt_free_ctx_mem(struct bnxt_softc *softc)
1050 {
1051 	struct bnxt_ctx_mem_info *ctx = softc->ctx_mem;
1052 	u16 type;
1053 
1054 	if (!ctx)
1055 		return;
1056 
1057 	for (type = 0; type < BNXT_CTX_MAX; type++) {
1058 		struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
1059 		struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
1060 		int i, n = 1;
1061 
1062 		if (!ctx_pg)
1063 			continue;
1064 		if (ctxm->instance_bmap)
1065 			n = hweight32(ctxm->instance_bmap);
1066 		for (i = 0; i < n; i++)
1067 			bnxt_free_ctx_pg_tbls(softc, &ctx_pg[i]);
1068 
1069 		kfree(ctx_pg);
1070 		ctxm->pg_info = NULL;
1071 	}
1072 
1073 	ctx->flags &= ~BNXT_CTX_FLAG_INITED;
1074 	kfree(ctx);
1075 	softc->ctx_mem = NULL;
1076 }
1077 
bnxt_alloc_ctx_mem(struct bnxt_softc * softc)1078 static int bnxt_alloc_ctx_mem(struct bnxt_softc *softc)
1079 {
1080 	struct bnxt_ctx_pg_info *ctx_pg;
1081 	struct bnxt_ctx_mem_type *ctxm;
1082 	struct bnxt_ctx_mem_info *ctx;
1083 	u32 l2_qps, qp1_qps, max_qps;
1084 	u32 ena, entries_sp, entries;
1085 	u32 srqs, max_srqs, min;
1086 	u32 num_mr, num_ah;
1087 	u32 extra_srqs = 0;
1088 	u32 extra_qps = 0;
1089 	u8 pg_lvl = 1;
1090 	int i, rc;
1091 
1092 	if (!BNXT_CHIP_P5(softc))
1093 		return 0;
1094 
1095 	rc = bnxt_hwrm_func_backing_store_qcaps(softc);
1096 	if (rc) {
1097 		device_printf(softc->dev, "Failed querying context mem capability, rc = %d.\n",
1098 			   rc);
1099 		return rc;
1100 	}
1101 	ctx = softc->ctx_mem;
1102 	if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
1103 		return 0;
1104 
1105 	ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
1106 	l2_qps = ctxm->qp_l2_entries;
1107 	qp1_qps = ctxm->qp_qp1_entries;
1108 	max_qps = ctxm->max_entries;
1109 	ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
1110 	srqs = ctxm->srq_l2_entries;
1111 	max_srqs = ctxm->max_entries;
1112 	if (softc->flags & BNXT_FLAG_ROCE_CAP) {
1113 		pg_lvl = 2;
1114 		extra_qps = min_t(u32, 65536, max_qps - l2_qps - qp1_qps);
1115 		extra_srqs = min_t(u32, 8192, max_srqs - srqs);
1116 	}
1117 
1118 	ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
1119 	rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, l2_qps + qp1_qps + extra_qps,
1120 				     pg_lvl);
1121 	if (rc)
1122 		return rc;
1123 
1124 	ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
1125 	rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, srqs + extra_srqs, pg_lvl);
1126 	if (rc)
1127 		return rc;
1128 
1129 	ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
1130 	rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, ctxm->cq_l2_entries +
1131 				     extra_qps * 2, pg_lvl);
1132 	if (rc)
1133 		return rc;
1134 
1135 	ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
1136 	rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, ctxm->max_entries, 1);
1137 	if (rc)
1138 		return rc;
1139 
1140 	ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
1141 	rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, ctxm->max_entries, 1);
1142 	if (rc)
1143 		return rc;
1144 
1145 	ena = 0;
1146 	if (!(softc->flags & BNXT_FLAG_ROCE_CAP))
1147 		goto skip_rdma;
1148 
1149 	ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
1150 	ctx_pg = ctxm->pg_info;
1151 	/* 128K extra is needed to accomodate static AH context
1152 	 * allocation by f/w.
1153 	 */
1154 	num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
1155 	num_ah = min_t(u32, num_mr, 1024 * 128);
1156 	rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, num_mr + num_ah, 2);
1157 	if (rc)
1158 		return rc;
1159 	ctx_pg->entries = num_mr + num_ah;
1160 	ena = HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_MRAV;
1161 	if (ctxm->mrav_num_entries_units)
1162 		ctx_pg->entries =
1163 			((num_mr / ctxm->mrav_num_entries_units) << 16) |
1164 			 (num_ah / ctxm->mrav_num_entries_units);
1165 
1166 	ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
1167 	rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, l2_qps + qp1_qps + extra_qps, 1);
1168 	if (rc)
1169 		return rc;
1170 	ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TIM;
1171 
1172 skip_rdma:
1173 	ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
1174 	min = ctxm->min_entries;
1175 	entries_sp = ctx->ctx_arr[BNXT_CTX_VNIC].vnic_entries + l2_qps +
1176 		     2 * (extra_qps + qp1_qps) + min;
1177 	rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, entries_sp, 2);
1178 		if (rc)
1179 			return rc;
1180 
1181 	ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
1182 	entries = l2_qps + 2 * (extra_qps + qp1_qps);
1183 	rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, entries, 2);
1184 	if (rc)
1185 		return rc;
1186 	for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
1187 		if (i < BNXT_MAX_TQM_LEGACY_RINGS)
1188 			ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i;
1189 		else
1190 			ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING8;
1191 	}
1192 	ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES;
1193 
1194 	rc = bnxt_hwrm_func_backing_store_cfg(softc, ena);
1195 	if (rc) {
1196 		device_printf(softc->dev, "Failed configuring context mem, rc = %d.\n",
1197 			   rc);
1198 		return rc;
1199 	}
1200 	ctx->flags |= BNXT_CTX_FLAG_INITED;
1201 
1202 	return 0;
1203 }
1204 
1205 /*
1206  * If we update the index, a write barrier is needed after the write to ensure
1207  * the completion ring has space before the RX/TX ring does.  Since we can't
1208  * make the RX and AG doorbells covered by the same barrier without remapping
1209  * MSI-X vectors, we create the barrier over the enture doorbell bar.
1210  * TODO: Remap the MSI-X vectors to allow a barrier to only cover the doorbells
1211  *       for a single ring group.
1212  *
1213  * A barrier of just the size of the write is used to ensure the ordering
1214  * remains correct and no writes are lost.
1215  */
1216 
bnxt_cuw_db_rx(void * db_ptr,uint16_t idx)1217 static void bnxt_cuw_db_rx(void *db_ptr, uint16_t idx)
1218 {
1219 	struct bnxt_ring *ring = (struct bnxt_ring *) db_ptr;
1220 	struct bnxt_bar_info *db_bar = &ring->softc->doorbell_bar;
1221 
1222 	bus_space_barrier(db_bar->tag, db_bar->handle, ring->doorbell, 4,
1223 			BUS_SPACE_BARRIER_WRITE);
1224 	bus_space_write_4(db_bar->tag, db_bar->handle, ring->doorbell,
1225 			htole32(RX_DOORBELL_KEY_RX | idx));
1226 }
1227 
bnxt_cuw_db_tx(void * db_ptr,uint16_t idx)1228 static void bnxt_cuw_db_tx(void *db_ptr, uint16_t idx)
1229 {
1230 	struct bnxt_ring *ring = (struct bnxt_ring *) db_ptr;
1231 	struct bnxt_bar_info *db_bar = &ring->softc->doorbell_bar;
1232 
1233 	bus_space_barrier(db_bar->tag, db_bar->handle, ring->doorbell, 4,
1234 			BUS_SPACE_BARRIER_WRITE);
1235 	bus_space_write_4(db_bar->tag, db_bar->handle, ring->doorbell,
1236 			htole32(TX_DOORBELL_KEY_TX | idx));
1237 }
1238 
bnxt_cuw_db_cq(void * db_ptr,bool enable_irq)1239 static void bnxt_cuw_db_cq(void *db_ptr, bool enable_irq)
1240 {
1241 	struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr;
1242 	struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar;
1243 
1244 	bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 4,
1245 			BUS_SPACE_BARRIER_WRITE);
1246 	bus_space_write_4(db_bar->tag, db_bar->handle, cpr->ring.doorbell,
1247 			htole32(CMPL_DOORBELL_KEY_CMPL |
1248 				((cpr->cons == UINT32_MAX) ? 0 :
1249 				 (cpr->cons | CMPL_DOORBELL_IDX_VALID)) |
1250 				((enable_irq) ? 0 : CMPL_DOORBELL_MASK)));
1251 	bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size,
1252 			BUS_SPACE_BARRIER_WRITE);
1253 }
1254 
bnxt_thor_db_rx(void * db_ptr,uint16_t idx)1255 static void bnxt_thor_db_rx(void *db_ptr, uint16_t idx)
1256 {
1257 	struct bnxt_ring *ring = (struct bnxt_ring *) db_ptr;
1258 	struct bnxt_bar_info *db_bar = &ring->softc->doorbell_bar;
1259 
1260 	bus_space_barrier(db_bar->tag, db_bar->handle, ring->doorbell, 8,
1261 			BUS_SPACE_BARRIER_WRITE);
1262 	bus_space_write_8(db_bar->tag, db_bar->handle, ring->doorbell,
1263 			htole64((DBR_PATH_L2 | DBR_TYPE_SRQ | idx) |
1264 				((uint64_t)ring->phys_id << DBR_XID_SFT)));
1265 }
1266 
bnxt_thor_db_tx(void * db_ptr,uint16_t idx)1267 static void bnxt_thor_db_tx(void *db_ptr, uint16_t idx)
1268 {
1269 	struct bnxt_ring *ring = (struct bnxt_ring *) db_ptr;
1270 	struct bnxt_bar_info *db_bar = &ring->softc->doorbell_bar;
1271 
1272 	bus_space_barrier(db_bar->tag, db_bar->handle, ring->doorbell, 8,
1273 			BUS_SPACE_BARRIER_WRITE);
1274 	bus_space_write_8(db_bar->tag, db_bar->handle, ring->doorbell,
1275 			htole64((DBR_PATH_L2 | DBR_TYPE_SQ | idx) |
1276 				((uint64_t)ring->phys_id << DBR_XID_SFT)));
1277 }
1278 
bnxt_thor_db_rx_cq(void * db_ptr,bool enable_irq)1279 static void bnxt_thor_db_rx_cq(void *db_ptr, bool enable_irq)
1280 {
1281 	struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr;
1282 	struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar;
1283 	dbc_dbc_t db_msg = { 0 };
1284 	uint32_t cons = cpr->cons;
1285 
1286 	if (cons == UINT32_MAX)
1287 		cons = 0;
1288 	else
1289 		cons = RING_NEXT(&cpr->ring, cons);
1290 
1291 	db_msg.index = ((cons << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK);
1292 
1293 	db_msg.type_path_xid = ((cpr->ring.phys_id << DBC_DBC_XID_SFT) &
1294 			DBC_DBC_XID_MASK) | DBC_DBC_PATH_L2 |
1295 		((enable_irq) ? DBC_DBC_TYPE_CQ_ARMALL: DBC_DBC_TYPE_CQ);
1296 
1297 	bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 8,
1298 			BUS_SPACE_BARRIER_WRITE);
1299 	bus_space_write_8(db_bar->tag, db_bar->handle, cpr->ring.doorbell,
1300 			htole64(*(uint64_t *)&db_msg));
1301 	bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size,
1302 			BUS_SPACE_BARRIER_WRITE);
1303 }
1304 
bnxt_thor_db_tx_cq(void * db_ptr,bool enable_irq)1305 static void bnxt_thor_db_tx_cq(void *db_ptr, bool enable_irq)
1306 {
1307 	struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr;
1308 	struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar;
1309 	dbc_dbc_t db_msg = { 0 };
1310 	uint32_t cons = cpr->cons;
1311 
1312 	db_msg.index = ((cons << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK);
1313 
1314 	db_msg.type_path_xid = ((cpr->ring.phys_id << DBC_DBC_XID_SFT) &
1315 			DBC_DBC_XID_MASK) | DBC_DBC_PATH_L2 |
1316 		((enable_irq) ? DBC_DBC_TYPE_CQ_ARMALL: DBC_DBC_TYPE_CQ);
1317 
1318 	bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 8,
1319 			BUS_SPACE_BARRIER_WRITE);
1320 	bus_space_write_8(db_bar->tag, db_bar->handle, cpr->ring.doorbell,
1321 			htole64(*(uint64_t *)&db_msg));
1322 	bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size,
1323 			BUS_SPACE_BARRIER_WRITE);
1324 }
1325 
bnxt_thor_db_nq(void * db_ptr,bool enable_irq)1326 static void bnxt_thor_db_nq(void *db_ptr, bool enable_irq)
1327 {
1328 	struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr;
1329 	struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar;
1330 	dbc_dbc_t db_msg = { 0 };
1331 	uint32_t cons = cpr->cons;
1332 
1333 	db_msg.index = ((cons << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK);
1334 
1335 	db_msg.type_path_xid = ((cpr->ring.phys_id << DBC_DBC_XID_SFT) &
1336 			DBC_DBC_XID_MASK) | DBC_DBC_PATH_L2 |
1337 		((enable_irq) ? DBC_DBC_TYPE_NQ_ARM: DBC_DBC_TYPE_NQ);
1338 
1339 	bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 8,
1340 			BUS_SPACE_BARRIER_WRITE);
1341 	bus_space_write_8(db_bar->tag, db_bar->handle, cpr->ring.doorbell,
1342 			htole64(*(uint64_t *)&db_msg));
1343 	bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size,
1344 			BUS_SPACE_BARRIER_WRITE);
1345 }
1346 
bnxt_find_dev(uint32_t domain,uint32_t bus,uint32_t dev_fn,char * dev_name)1347 struct bnxt_softc *bnxt_find_dev(uint32_t domain, uint32_t bus, uint32_t dev_fn, char *dev_name)
1348 {
1349 	struct bnxt_softc_list *sc = NULL;
1350 
1351 	SLIST_FOREACH(sc, &pf_list, next) {
1352 		/* get the softc reference based on device name */
1353 		if (dev_name && !strncmp(dev_name, if_name(iflib_get_ifp(sc->softc->ctx)), BNXT_MAX_STR)) {
1354 			return sc->softc;
1355 		}
1356 		/* get the softc reference based on domain,bus,device,function */
1357 		if (!dev_name &&
1358 		    (domain == sc->softc->domain) &&
1359 		    (bus == sc->softc->bus) &&
1360 		    (dev_fn == sc->softc->dev_fn)) {
1361 			return sc->softc;
1362 
1363 		}
1364 	}
1365 
1366 	return NULL;
1367 }
1368 
1369 
bnxt_verify_asym_queues(struct bnxt_softc * softc)1370 static void bnxt_verify_asym_queues(struct bnxt_softc *softc)
1371 {
1372 	uint8_t i, lltc = 0;
1373 
1374 	if (!softc->max_lltc)
1375 		return;
1376 
1377 	/* Verify that lossless TX and RX queues are in the same index */
1378 	for (i = 0; i < softc->max_tc; i++) {
1379 		if (BNXT_LLQ(softc->tx_q_info[i].queue_profile) &&
1380 		    BNXT_LLQ(softc->rx_q_info[i].queue_profile))
1381 			lltc++;
1382 	}
1383 	softc->max_lltc = min(softc->max_lltc, lltc);
1384 }
1385 
bnxt_hwrm_poll(struct bnxt_softc * bp)1386 static int bnxt_hwrm_poll(struct bnxt_softc *bp)
1387 {
1388 	struct hwrm_ver_get_output	*resp =
1389 	    (void *)bp->hwrm_cmd_resp.idi_vaddr;
1390 	struct hwrm_ver_get_input req = {0};
1391 	int rc;
1392 
1393 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET);
1394 
1395 	req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
1396 	req.hwrm_intf_min = HWRM_VERSION_MINOR;
1397 	req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
1398 
1399 	rc = _hwrm_send_message(bp, &req, sizeof(req));
1400 	if (rc)
1401 		return rc;
1402 
1403 	if (resp->flags & HWRM_VER_GET_OUTPUT_FLAGS_DEV_NOT_RDY)
1404 		rc = -EAGAIN;
1405 
1406 	return rc;
1407 }
1408 
bnxt_rtnl_lock_sp(struct bnxt_softc * bp)1409 static void bnxt_rtnl_lock_sp(struct bnxt_softc *bp)
1410 {
1411 	/* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
1412 	 * set.  If the device is being closed, bnxt_close() may be holding
1413 	 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear.  So we
1414 	 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
1415 	 */
1416 	clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
1417 	rtnl_lock();
1418 }
1419 
bnxt_rtnl_unlock_sp(struct bnxt_softc * bp)1420 static void bnxt_rtnl_unlock_sp(struct bnxt_softc *bp)
1421 {
1422 	set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
1423 	rtnl_unlock();
1424 }
1425 
bnxt_fw_fatal_close(struct bnxt_softc * softc)1426 static void bnxt_fw_fatal_close(struct bnxt_softc *softc)
1427 {
1428 	bnxt_disable_intr(softc->ctx);
1429 	if (pci_is_enabled(softc->pdev))
1430 		pci_disable_device(softc->pdev);
1431 }
1432 
bnxt_fw_health_readl(struct bnxt_softc * bp,int reg_idx)1433 static u32 bnxt_fw_health_readl(struct bnxt_softc *bp, int reg_idx)
1434 {
1435 	struct bnxt_fw_health *fw_health = bp->fw_health;
1436 	u32 reg = fw_health->regs[reg_idx];
1437 	u32 reg_type, reg_off, val = 0;
1438 
1439 	reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
1440 	reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
1441 	switch (reg_type) {
1442 	case BNXT_FW_HEALTH_REG_TYPE_CFG:
1443 		pci_read_config_dword(bp->pdev, reg_off, &val);
1444 		break;
1445 	case BNXT_FW_HEALTH_REG_TYPE_GRC:
1446 		reg_off = fw_health->mapped_regs[reg_idx];
1447 		fallthrough;
1448 	case BNXT_FW_HEALTH_REG_TYPE_BAR0:
1449 		val = readl_fbsd(bp, reg_off, 0);
1450 		break;
1451 	case BNXT_FW_HEALTH_REG_TYPE_BAR1:
1452 		val = readl_fbsd(bp, reg_off, 2);
1453 		break;
1454 	}
1455 	if (reg_idx == BNXT_FW_RESET_INPROG_REG)
1456 		val &= fw_health->fw_reset_inprog_reg_mask;
1457 	return val;
1458 }
1459 
bnxt_fw_reset_close(struct bnxt_softc * bp)1460 static void bnxt_fw_reset_close(struct bnxt_softc *bp)
1461 {
1462 	int i;
1463 	bnxt_ulp_stop(bp);
1464 	/* When firmware is in fatal state, quiesce device and disable
1465 	 * bus master to prevent any potential bad DMAs before freeing
1466 	 * kernel memory.
1467 	 */
1468 	if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
1469 		u16 val = 0;
1470 
1471 		val = pci_read_config(bp->dev, PCI_SUBSYSTEM_ID, 2);
1472 		if (val == 0xffff) {
1473 			bp->fw_reset_min_dsecs = 0;
1474 		}
1475 		bnxt_fw_fatal_close(bp);
1476 	}
1477 
1478 	iflib_request_reset(bp->ctx);
1479 	bnxt_stop(bp->ctx);
1480 	bnxt_hwrm_func_drv_unrgtr(bp, false);
1481 
1482 	for (i = bp->nrxqsets-1; i>=0; i--) {
1483 		if (BNXT_CHIP_P5(bp))
1484 			iflib_irq_free(bp->ctx, &bp->nq_rings[i].irq);
1485 		else
1486 			iflib_irq_free(bp->ctx, &bp->rx_cp_rings[i].irq);
1487 
1488 	}
1489 	if (pci_is_enabled(bp->pdev))
1490 		pci_disable_device(bp->pdev);
1491 	pci_disable_busmaster(bp->dev);
1492 	bnxt_free_ctx_mem(bp);
1493 }
1494 
is_bnxt_fw_ok(struct bnxt_softc * bp)1495 static bool is_bnxt_fw_ok(struct bnxt_softc *bp)
1496 {
1497 	struct bnxt_fw_health *fw_health = bp->fw_health;
1498 	bool no_heartbeat = false, has_reset = false;
1499 	u32 val;
1500 
1501 	val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
1502 	if (val == fw_health->last_fw_heartbeat)
1503 		no_heartbeat = true;
1504 
1505 	val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
1506 	if (val != fw_health->last_fw_reset_cnt)
1507 		has_reset = true;
1508 
1509 	if (!no_heartbeat && has_reset)
1510 		return true;
1511 
1512 	return false;
1513 }
1514 
bnxt_fw_reset(struct bnxt_softc * bp)1515 void bnxt_fw_reset(struct bnxt_softc *bp)
1516 {
1517 	bnxt_rtnl_lock_sp(bp);
1518 	if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
1519 	    !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
1520 		int tmo;
1521 		set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
1522 		bnxt_fw_reset_close(bp);
1523 
1524 		if ((bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD)) {
1525 			bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
1526 			tmo = HZ / 10;
1527 		} else {
1528 			bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
1529 			tmo = bp->fw_reset_min_dsecs * HZ /10;
1530 		}
1531 		bnxt_queue_fw_reset_work(bp, tmo);
1532 	}
1533 	bnxt_rtnl_unlock_sp(bp);
1534 }
1535 
bnxt_queue_fw_reset_work(struct bnxt_softc * bp,unsigned long delay)1536 static void bnxt_queue_fw_reset_work(struct bnxt_softc *bp, unsigned long delay)
1537 {
1538 	if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
1539 		return;
1540 
1541 	if (BNXT_PF(bp))
1542 		queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
1543 	else
1544 		schedule_delayed_work(&bp->fw_reset_task, delay);
1545 }
1546 
bnxt_queue_sp_work(struct bnxt_softc * bp)1547 void bnxt_queue_sp_work(struct bnxt_softc *bp)
1548 {
1549 	if (BNXT_PF(bp))
1550 		queue_work(bnxt_pf_wq, &bp->sp_task);
1551 	else
1552 		schedule_work(&bp->sp_task);
1553 }
1554 
bnxt_fw_reset_writel(struct bnxt_softc * bp,int reg_idx)1555 static void bnxt_fw_reset_writel(struct bnxt_softc *bp, int reg_idx)
1556 {
1557 	struct bnxt_fw_health *fw_health = bp->fw_health;
1558 	u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
1559 	u32 val = fw_health->fw_reset_seq_vals[reg_idx];
1560 	u32 reg_type, reg_off, delay_msecs;
1561 
1562 	delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
1563 	reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
1564 	reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
1565 	switch (reg_type) {
1566 	case BNXT_FW_HEALTH_REG_TYPE_CFG:
1567 		pci_write_config_dword(bp->pdev, reg_off, val);
1568 		break;
1569 	case BNXT_FW_HEALTH_REG_TYPE_GRC:
1570 		writel_fbsd(bp, BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4, 0, reg_off & BNXT_GRC_BASE_MASK);
1571 		reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
1572 		fallthrough;
1573 	case BNXT_FW_HEALTH_REG_TYPE_BAR0:
1574 		writel_fbsd(bp, reg_off, 0, val);
1575 		break;
1576 	case BNXT_FW_HEALTH_REG_TYPE_BAR1:
1577 		writel_fbsd(bp, reg_off, 2, val);
1578 		break;
1579 	}
1580 	if (delay_msecs) {
1581 		pci_read_config_dword(bp->pdev, 0, &val);
1582 		msleep(delay_msecs);
1583 	}
1584 }
1585 
bnxt_reset_all(struct bnxt_softc * bp)1586 static void bnxt_reset_all(struct bnxt_softc *bp)
1587 {
1588 	struct bnxt_fw_health *fw_health = bp->fw_health;
1589 	int i, rc;
1590 
1591 	if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
1592 		bp->fw_reset_timestamp = jiffies;
1593 		return;
1594 	}
1595 
1596 	if (fw_health->flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_HOST) {
1597 		for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
1598 			bnxt_fw_reset_writel(bp, i);
1599 	} else if (fw_health->flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU) {
1600 		struct hwrm_fw_reset_input req = {0};
1601 
1602 		bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET);
1603 		req.target_id = htole16(HWRM_TARGET_ID_KONG);
1604 		req.embedded_proc_type = HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_CHIP;
1605 		req.selfrst_status = HWRM_FW_RESET_INPUT_SELFRST_STATUS_SELFRSTASAP;
1606 		req.flags = HWRM_FW_RESET_INPUT_FLAGS_RESET_GRACEFUL;
1607 		rc = hwrm_send_message(bp, &req, sizeof(req));
1608 
1609 		if (rc != -ENODEV)
1610 			device_printf(bp->dev, "Unable to reset FW rc=%d\n", rc);
1611 	}
1612 	bp->fw_reset_timestamp = jiffies;
1613 }
1614 
__bnxt_alloc_fw_health(struct bnxt_softc * bp)1615 static int __bnxt_alloc_fw_health(struct bnxt_softc *bp)
1616 {
1617 	if (bp->fw_health)
1618 		return 0;
1619 
1620 	bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
1621 	if (!bp->fw_health)
1622 		return -ENOMEM;
1623 
1624 	mutex_init(&bp->fw_health->lock);
1625 	return 0;
1626 }
1627 
bnxt_alloc_fw_health(struct bnxt_softc * bp)1628 static int bnxt_alloc_fw_health(struct bnxt_softc *bp)
1629 {
1630 	int rc;
1631 
1632 	if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
1633 	    !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
1634 		return 0;
1635 
1636 	rc = __bnxt_alloc_fw_health(bp);
1637 	if (rc) {
1638 		bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
1639 		bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
1640 		return rc;
1641 	}
1642 
1643 	return 0;
1644 }
1645 
__bnxt_map_fw_health_reg(struct bnxt_softc * bp,u32 reg)1646 static inline void __bnxt_map_fw_health_reg(struct bnxt_softc *bp, u32 reg)
1647 {
1648 	writel_fbsd(bp, BNXT_GRCPF_REG_WINDOW_BASE_OUT + BNXT_FW_HEALTH_WIN_MAP_OFF, 0, reg & BNXT_GRC_BASE_MASK);
1649 }
1650 
bnxt_map_fw_health_regs(struct bnxt_softc * bp)1651 static int bnxt_map_fw_health_regs(struct bnxt_softc *bp)
1652 {
1653 	struct bnxt_fw_health *fw_health = bp->fw_health;
1654 	u32 reg_base = 0xffffffff;
1655 	int i;
1656 
1657 	bp->fw_health->status_reliable = false;
1658 	bp->fw_health->resets_reliable = false;
1659 	/* Only pre-map the monitoring GRC registers using window 3 */
1660 	for (i = 0; i < 4; i++) {
1661 		u32 reg = fw_health->regs[i];
1662 
1663 		if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
1664 			continue;
1665 		if (reg_base == 0xffffffff)
1666 			reg_base = reg & BNXT_GRC_BASE_MASK;
1667 		if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
1668 			return -ERANGE;
1669 		fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
1670 	}
1671 	bp->fw_health->status_reliable = true;
1672 	bp->fw_health->resets_reliable = true;
1673 	if (reg_base == 0xffffffff)
1674 		return 0;
1675 
1676 	__bnxt_map_fw_health_reg(bp, reg_base);
1677 	return 0;
1678 }
1679 
bnxt_inv_fw_health_reg(struct bnxt_softc * bp)1680 static void bnxt_inv_fw_health_reg(struct bnxt_softc *bp)
1681 {
1682 	struct bnxt_fw_health *fw_health = bp->fw_health;
1683 	u32 reg_type;
1684 
1685 	if (!fw_health)
1686 		return;
1687 
1688 	reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
1689 	if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
1690 		fw_health->status_reliable = false;
1691 
1692 	reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]);
1693 	if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
1694 		fw_health->resets_reliable = false;
1695 }
1696 
bnxt_hwrm_error_recovery_qcfg(struct bnxt_softc * bp)1697 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt_softc *bp)
1698 {
1699 	struct bnxt_fw_health *fw_health = bp->fw_health;
1700 	struct hwrm_error_recovery_qcfg_output *resp =
1701 	    (void *)bp->hwrm_cmd_resp.idi_vaddr;
1702 	struct hwrm_error_recovery_qcfg_input req = {0};
1703 	int rc, i;
1704 
1705 	if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
1706 		return 0;
1707 
1708 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_ERROR_RECOVERY_QCFG);
1709 	rc = _hwrm_send_message(bp, &req, sizeof(req));
1710 
1711 	if (rc)
1712 		goto err_recovery_out;
1713 	fw_health->flags = le32toh(resp->flags);
1714 	if ((fw_health->flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU) &&
1715 	    !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
1716 		rc = -EINVAL;
1717 		goto err_recovery_out;
1718 	}
1719 	fw_health->polling_dsecs = le32toh(resp->driver_polling_freq);
1720 	fw_health->master_func_wait_dsecs =
1721 		le32toh(resp->master_func_wait_period);
1722 	fw_health->normal_func_wait_dsecs =
1723 		le32toh(resp->normal_func_wait_period);
1724 	fw_health->post_reset_wait_dsecs =
1725 		le32toh(resp->master_func_wait_period_after_reset);
1726 	fw_health->post_reset_max_wait_dsecs =
1727 		le32toh(resp->max_bailout_time_after_reset);
1728 	fw_health->regs[BNXT_FW_HEALTH_REG] =
1729 		le32toh(resp->fw_health_status_reg);
1730 	fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
1731 		le32toh(resp->fw_heartbeat_reg);
1732 	fw_health->regs[BNXT_FW_RESET_CNT_REG] =
1733 		le32toh(resp->fw_reset_cnt_reg);
1734 	fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
1735 		le32toh(resp->reset_inprogress_reg);
1736 	fw_health->fw_reset_inprog_reg_mask =
1737 		le32toh(resp->reset_inprogress_reg_mask);
1738 	fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
1739 	if (fw_health->fw_reset_seq_cnt >= 16) {
1740 		rc = -EINVAL;
1741 		goto err_recovery_out;
1742 	}
1743 	for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
1744 		fw_health->fw_reset_seq_regs[i] =
1745 			le32toh(resp->reset_reg[i]);
1746 		fw_health->fw_reset_seq_vals[i] =
1747 			le32toh(resp->reset_reg_val[i]);
1748 		fw_health->fw_reset_seq_delay_msec[i] =
1749 			le32toh(resp->delay_after_reset[i]);
1750 	}
1751 err_recovery_out:
1752 	if (!rc)
1753 		rc = bnxt_map_fw_health_regs(bp);
1754 	if (rc)
1755 		bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
1756 	return rc;
1757 }
1758 
bnxt_drv_rgtr(struct bnxt_softc * bp)1759 static int bnxt_drv_rgtr(struct bnxt_softc *bp)
1760 {
1761 	int rc;
1762 
1763 	/* determine whether we can support error recovery before
1764 	 * registering with FW
1765 	 */
1766 	if (bnxt_alloc_fw_health(bp)) {
1767 		device_printf(bp->dev, "no memory for firmware error recovery\n");
1768 	} else {
1769 		rc = bnxt_hwrm_error_recovery_qcfg(bp);
1770 		if (rc)
1771 			device_printf(bp->dev, "hwrm query error recovery failure rc: %d\n",
1772 				    rc);
1773 	}
1774 	rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);  //sumit dbg: revisit the params
1775 	if (rc)
1776 		return -ENODEV;
1777 	return 0;
1778 }
1779 
bnxt_fw_reset_timeout(struct bnxt_softc * bp)1780 static bool bnxt_fw_reset_timeout(struct bnxt_softc *bp)
1781 {
1782 	return time_after(jiffies, bp->fw_reset_timestamp +
1783 			  (bp->fw_reset_max_dsecs * HZ / 10));
1784 }
1785 
bnxt_open(struct bnxt_softc * bp)1786 static int bnxt_open(struct bnxt_softc *bp)
1787 {
1788 	int rc = 0;
1789 	if (BNXT_PF(bp))
1790 		rc = bnxt_hwrm_nvm_get_dev_info(bp, &bp->nvm_info->mfg_id,
1791 			&bp->nvm_info->device_id, &bp->nvm_info->sector_size,
1792 			&bp->nvm_info->size, &bp->nvm_info->reserved_size,
1793 			&bp->nvm_info->available_size);
1794 
1795 	/* Get the queue config */
1796 	rc = bnxt_hwrm_queue_qportcfg(bp, HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX);
1797 	if (rc) {
1798 		device_printf(bp->dev, "reinit: hwrm qportcfg (tx) failed\n");
1799 		return rc;
1800 	}
1801 	if (bp->is_asym_q) {
1802 		rc = bnxt_hwrm_queue_qportcfg(bp,
1803 					      HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX);
1804 		if (rc) {
1805 			device_printf(bp->dev, "re-init: hwrm qportcfg (rx)  failed\n");
1806 			return rc;
1807 		}
1808 		bnxt_verify_asym_queues(bp);
1809 	} else {
1810 		bp->rx_max_q = bp->tx_max_q;
1811 		memcpy(bp->rx_q_info, bp->tx_q_info, sizeof(bp->rx_q_info));
1812 		memcpy(bp->rx_q_ids, bp->tx_q_ids, sizeof(bp->rx_q_ids));
1813 	}
1814 	/* Get the HW capabilities */
1815 	rc = bnxt_hwrm_func_qcaps(bp);
1816 	if (rc)
1817 		return rc;
1818 
1819 	/* Register the driver with the FW */
1820 	rc = bnxt_drv_rgtr(bp);
1821 	if (rc)
1822 		return rc;
1823 	if (bp->hwrm_spec_code >= 0x10803) {
1824 		rc = bnxt_alloc_ctx_mem(bp);
1825 		if (rc) {
1826 			device_printf(bp->dev, "attach: alloc_ctx_mem failed\n");
1827 			return rc;
1828 		}
1829 		rc = bnxt_hwrm_func_resc_qcaps(bp, true);
1830 		if (!rc)
1831 			bp->flags |= BNXT_FLAG_FW_CAP_NEW_RM;
1832 	}
1833 
1834 	if (BNXT_CHIP_P5(bp))
1835 		bnxt_hwrm_reserve_pf_rings(bp);
1836 	/* Get the current configuration of this function */
1837 	rc = bnxt_hwrm_func_qcfg(bp);
1838 	if (rc) {
1839 		device_printf(bp->dev, "re-init: hwrm func qcfg failed\n");
1840 		return rc;
1841 	}
1842 
1843 	bnxt_msix_intr_assign(bp->ctx, 0);
1844 	bnxt_init(bp->ctx);
1845 	bnxt_intr_enable(bp->ctx);
1846 
1847 	if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
1848 		if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
1849 			bnxt_ulp_start(bp, 0);
1850 		}
1851 	}
1852 
1853 	device_printf(bp->dev, "Network interface is UP and operational\n");
1854 
1855 	return rc;
1856 }
bnxt_fw_reset_abort(struct bnxt_softc * bp,int rc)1857 static void bnxt_fw_reset_abort(struct bnxt_softc *bp, int rc)
1858 {
1859 	clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
1860 	if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) {
1861 		bnxt_ulp_start(bp, rc);
1862 	}
1863 	bp->fw_reset_state = 0;
1864 }
1865 
bnxt_fw_reset_task(struct work_struct * work)1866 static void bnxt_fw_reset_task(struct work_struct *work)
1867 {
1868 	struct bnxt_softc *bp = container_of(work, struct bnxt_softc, fw_reset_task.work);
1869 	int rc = 0;
1870 
1871 	if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
1872 		device_printf(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
1873 		return;
1874 	}
1875 
1876 	switch (bp->fw_reset_state) {
1877 	case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
1878 		u32 val;
1879 
1880 		val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
1881 		if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
1882 		    !bnxt_fw_reset_timeout(bp)) {
1883 			bnxt_queue_fw_reset_work(bp, HZ / 5);
1884 			return;
1885 		}
1886 
1887 		if (!bp->fw_health->primary) {
1888 			u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
1889 
1890 			bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
1891 			bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
1892 			return;
1893 		}
1894 		bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
1895 	}
1896 		fallthrough;
1897 	case BNXT_FW_RESET_STATE_RESET_FW:
1898 		bnxt_reset_all(bp);
1899 		bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
1900 		bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
1901 		return;
1902 	case BNXT_FW_RESET_STATE_ENABLE_DEV:
1903 		bnxt_inv_fw_health_reg(bp);
1904 		if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
1905 		    !bp->fw_reset_min_dsecs) {
1906 			u16 val;
1907 
1908 			val = pci_read_config(bp->dev, PCI_SUBSYSTEM_ID, 2);
1909 			if (val == 0xffff) {
1910 				if (bnxt_fw_reset_timeout(bp)) {
1911 					device_printf(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
1912 					rc = -ETIMEDOUT;
1913 					goto fw_reset_abort;
1914 				}
1915 				bnxt_queue_fw_reset_work(bp, HZ / 1000);
1916 				return;
1917 			}
1918 		}
1919 		clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
1920 		clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
1921 		if (!pci_is_enabled(bp->pdev)) {
1922 			if (pci_enable_device(bp->pdev)) {
1923 				device_printf(bp->dev, "Cannot re-enable PCI device\n");
1924 				rc = -ENODEV;
1925 				goto fw_reset_abort;
1926 			}
1927 		}
1928 		pci_set_master(bp->pdev);
1929 		bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
1930 		fallthrough;
1931 	case BNXT_FW_RESET_STATE_POLL_FW:
1932 		bp->hwrm_cmd_timeo = SHORT_HWRM_CMD_TIMEOUT;
1933 		rc = bnxt_hwrm_poll(bp);
1934 		if (rc) {
1935 			if (bnxt_fw_reset_timeout(bp)) {
1936 				device_printf(bp->dev, "Firmware reset aborted\n");
1937 				goto fw_reset_abort_status;
1938 			}
1939 			bnxt_queue_fw_reset_work(bp, HZ / 5);
1940 			return;
1941 		}
1942 		bp->hwrm_cmd_timeo = DFLT_HWRM_CMD_TIMEOUT;
1943 		bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
1944 		fallthrough;
1945 	case BNXT_FW_RESET_STATE_OPENING:
1946 		rc = bnxt_open(bp);
1947 		if (rc) {
1948 			device_printf(bp->dev, "bnxt_open() failed during FW reset\n");
1949 			bnxt_fw_reset_abort(bp, rc);
1950 			rtnl_unlock();
1951 			return;
1952 		}
1953 
1954 		if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
1955 		    bp->fw_health->enabled) {
1956 			bp->fw_health->last_fw_reset_cnt =
1957 				bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
1958 		}
1959 		bp->fw_reset_state = 0;
1960 		smp_mb__before_atomic();
1961 		clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
1962 		bnxt_ulp_start(bp, 0);
1963 		clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
1964 		set_bit(BNXT_STATE_OPEN, &bp->state);
1965 		rtnl_unlock();
1966 	}
1967 	return;
1968 
1969 fw_reset_abort_status:
1970 	if (bp->fw_health->status_reliable ||
1971 	    (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
1972 		u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
1973 
1974 		device_printf(bp->dev, "fw_health_status 0x%x\n", sts);
1975 	}
1976 fw_reset_abort:
1977 	rtnl_lock();
1978 	bnxt_fw_reset_abort(bp, rc);
1979 	rtnl_unlock();
1980 }
1981 
bnxt_force_fw_reset(struct bnxt_softc * bp)1982 static void bnxt_force_fw_reset(struct bnxt_softc *bp)
1983 {
1984 	struct bnxt_fw_health *fw_health = bp->fw_health;
1985 	u32 wait_dsecs;
1986 
1987 	if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
1988 	    test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
1989 		return;
1990 	bnxt_fw_reset_close(bp);
1991 	wait_dsecs = fw_health->master_func_wait_dsecs;
1992 	if (fw_health->primary) {
1993 		if (fw_health->flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU)
1994 			wait_dsecs = 0;
1995 		bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
1996 	} else {
1997 		bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
1998 		wait_dsecs = fw_health->normal_func_wait_dsecs;
1999 		bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
2000 	}
2001 
2002 	bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
2003 	bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
2004 	bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
2005 }
2006 
bnxt_fw_exception(struct bnxt_softc * bp)2007 static void bnxt_fw_exception(struct bnxt_softc *bp)
2008 {
2009 	device_printf(bp->dev, "Detected firmware fatal condition, initiating reset\n");
2010 	set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2011 	bnxt_rtnl_lock_sp(bp);
2012 	bnxt_force_fw_reset(bp);
2013 	bnxt_rtnl_unlock_sp(bp);
2014 }
2015 
__bnxt_fw_recover(struct bnxt_softc * bp)2016 static void __bnxt_fw_recover(struct bnxt_softc *bp)
2017 {
2018 	if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
2019 	    test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
2020 		bnxt_fw_reset(bp);
2021 	else
2022 		bnxt_fw_exception(bp);
2023 }
2024 
bnxt_devlink_health_fw_report(struct bnxt_softc * bp)2025 static void bnxt_devlink_health_fw_report(struct bnxt_softc *bp)
2026 {
2027 	struct bnxt_fw_health *fw_health = bp->fw_health;
2028 
2029 	if (!fw_health)
2030 		return;
2031 
2032 	if (!fw_health->fw_reporter) {
2033 		__bnxt_fw_recover(bp);
2034 		return;
2035 	}
2036 }
2037 
bnxt_sp_task(struct work_struct * work)2038 static void bnxt_sp_task(struct work_struct *work)
2039 {
2040 	struct bnxt_softc *bp = container_of(work, struct bnxt_softc, sp_task);
2041 
2042 	set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
2043 	smp_mb__after_atomic();
2044 	if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
2045 		clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
2046 		return;
2047 	}
2048 
2049 	if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) {
2050 		if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
2051 		    test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
2052 			bnxt_devlink_health_fw_report(bp);
2053 		else
2054 			bnxt_fw_reset(bp);
2055 	}
2056 
2057 	if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
2058 		if (!is_bnxt_fw_ok(bp))
2059 			bnxt_devlink_health_fw_report(bp);
2060 	}
2061 	smp_mb__before_atomic();
2062 	clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
2063 }
2064 
2065 /* Device setup and teardown */
2066 static int
bnxt_attach_pre(if_ctx_t ctx)2067 bnxt_attach_pre(if_ctx_t ctx)
2068 {
2069 	struct bnxt_softc *softc = iflib_get_softc(ctx);
2070 	if_softc_ctx_t scctx;
2071 	int rc = 0;
2072 
2073 	softc->ctx = ctx;
2074 	softc->dev = iflib_get_dev(ctx);
2075 	softc->media = iflib_get_media(ctx);
2076 	softc->scctx = iflib_get_softc_ctx(ctx);
2077 	softc->sctx = iflib_get_sctx(ctx);
2078 	scctx = softc->scctx;
2079 
2080 	/* TODO: Better way of detecting NPAR/VF is needed */
2081 	switch (pci_get_device(softc->dev)) {
2082 	case BCM57402_NPAR:
2083 	case BCM57404_NPAR:
2084 	case BCM57406_NPAR:
2085 	case BCM57407_NPAR:
2086 	case BCM57412_NPAR1:
2087 	case BCM57412_NPAR2:
2088 	case BCM57414_NPAR1:
2089 	case BCM57414_NPAR2:
2090 	case BCM57416_NPAR1:
2091 	case BCM57416_NPAR2:
2092 	case BCM57504_NPAR:
2093 		softc->flags |= BNXT_FLAG_NPAR;
2094 		break;
2095 	case NETXTREME_C_VF1:
2096 	case NETXTREME_C_VF2:
2097 	case NETXTREME_C_VF3:
2098 	case NETXTREME_E_VF1:
2099 	case NETXTREME_E_VF2:
2100 	case NETXTREME_E_VF3:
2101 		softc->flags |= BNXT_FLAG_VF;
2102 		break;
2103 	}
2104 
2105 	softc->domain = pci_get_domain(softc->dev);
2106 	softc->bus = pci_get_bus(softc->dev);
2107 	softc->slot = pci_get_slot(softc->dev);
2108 	softc->function = pci_get_function(softc->dev);
2109 	softc->dev_fn = PCI_DEVFN(softc->slot, softc->function);
2110 
2111 	if (bnxt_num_pfs == 0)
2112 		  SLIST_INIT(&pf_list);
2113 	bnxt_num_pfs++;
2114 	softc->list.softc = softc;
2115 	SLIST_INSERT_HEAD(&pf_list, &softc->list, next);
2116 
2117 	pci_enable_busmaster(softc->dev);
2118 
2119 	if (bnxt_pci_mapping(softc)) {
2120 		device_printf(softc->dev, "PCI mapping failed\n");
2121 		rc = ENXIO;
2122 		goto pci_map_fail;
2123 	}
2124 
2125 	softc->pdev = kzalloc(sizeof(*softc->pdev), GFP_KERNEL);
2126 	if (!softc->pdev) {
2127 		device_printf(softc->dev, "pdev alloc failed\n");
2128 		rc = -ENOMEM;
2129 		goto free_pci_map;
2130 	}
2131 
2132 	rc = linux_pci_attach_device(softc->dev, NULL, NULL, softc->pdev);
2133 	if (rc) {
2134 		device_printf(softc->dev, "Failed to attach Linux PCI device 0x%x\n", rc);
2135 		goto pci_attach_fail;
2136 	}
2137 
2138 	/* HWRM setup/init */
2139 	BNXT_HWRM_LOCK_INIT(softc, device_get_nameunit(softc->dev));
2140 	rc = bnxt_alloc_hwrm_dma_mem(softc);
2141 	if (rc)
2142 		goto dma_fail;
2143 
2144 	/* Get firmware version and compare with driver */
2145 	softc->ver_info = malloc(sizeof(struct bnxt_ver_info),
2146 	    M_DEVBUF, M_NOWAIT | M_ZERO);
2147 	if (softc->ver_info == NULL) {
2148 		rc = ENOMEM;
2149 		device_printf(softc->dev,
2150 		    "Unable to allocate space for version info\n");
2151 		goto ver_alloc_fail;
2152 	}
2153 	/* Default minimum required HWRM version */
2154 	softc->ver_info->hwrm_min_major = HWRM_VERSION_MAJOR;
2155 	softc->ver_info->hwrm_min_minor = HWRM_VERSION_MINOR;
2156 	softc->ver_info->hwrm_min_update = HWRM_VERSION_UPDATE;
2157 
2158 	rc = bnxt_hwrm_ver_get(softc);
2159 	if (rc) {
2160 		device_printf(softc->dev, "attach: hwrm ver get failed\n");
2161 		goto ver_fail;
2162 	}
2163 
2164 	/* Now perform a function reset */
2165 	rc = bnxt_hwrm_func_reset(softc);
2166 
2167 	if ((softc->flags & BNXT_FLAG_SHORT_CMD) ||
2168 	    softc->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
2169 		rc = bnxt_alloc_hwrm_short_cmd_req(softc);
2170 		if (rc)
2171 			goto hwrm_short_cmd_alloc_fail;
2172 	}
2173 
2174 	if ((softc->ver_info->chip_num == BCM57508) ||
2175 	    (softc->ver_info->chip_num == BCM57504) ||
2176 	    (softc->ver_info->chip_num == BCM57504_NPAR) ||
2177 	    (softc->ver_info->chip_num == BCM57502))
2178 		softc->flags |= BNXT_FLAG_CHIP_P5;
2179 
2180 	softc->flags |= BNXT_FLAG_TPA;
2181 
2182 	if (BNXT_CHIP_P5(softc) && (!softc->ver_info->chip_rev) &&
2183 			(!softc->ver_info->chip_metal))
2184 		softc->flags &= ~BNXT_FLAG_TPA;
2185 
2186 	if (BNXT_CHIP_P5(softc))
2187 		softc->flags &= ~BNXT_FLAG_TPA;
2188 
2189 	/* Get NVRAM info */
2190 	if (BNXT_PF(softc)) {
2191 		if (!bnxt_pf_wq) {
2192 			bnxt_pf_wq =
2193 				create_singlethread_workqueue("bnxt_pf_wq");
2194 			if (!bnxt_pf_wq) {
2195 				device_printf(softc->dev, "Unable to create workqueue.\n");
2196 				rc = -ENOMEM;
2197 				goto nvm_alloc_fail;
2198 			}
2199 		}
2200 
2201 		softc->nvm_info = malloc(sizeof(struct bnxt_nvram_info),
2202 		    M_DEVBUF, M_NOWAIT | M_ZERO);
2203 		if (softc->nvm_info == NULL) {
2204 			rc = ENOMEM;
2205 			device_printf(softc->dev,
2206 			    "Unable to allocate space for NVRAM info\n");
2207 			goto nvm_alloc_fail;
2208 		}
2209 
2210 		rc = bnxt_hwrm_nvm_get_dev_info(softc, &softc->nvm_info->mfg_id,
2211 		    &softc->nvm_info->device_id, &softc->nvm_info->sector_size,
2212 		    &softc->nvm_info->size, &softc->nvm_info->reserved_size,
2213 		    &softc->nvm_info->available_size);
2214 	}
2215 
2216 	if (BNXT_CHIP_P5(softc)) {
2217 		softc->db_ops.bnxt_db_tx = bnxt_thor_db_tx;
2218 		softc->db_ops.bnxt_db_rx = bnxt_thor_db_rx;
2219 		softc->db_ops.bnxt_db_rx_cq = bnxt_thor_db_rx_cq;
2220 		softc->db_ops.bnxt_db_tx_cq = bnxt_thor_db_tx_cq;
2221 		softc->db_ops.bnxt_db_nq = bnxt_thor_db_nq;
2222 	} else {
2223 		softc->db_ops.bnxt_db_tx = bnxt_cuw_db_tx;
2224 		softc->db_ops.bnxt_db_rx = bnxt_cuw_db_rx;
2225 		softc->db_ops.bnxt_db_rx_cq = bnxt_cuw_db_cq;
2226 		softc->db_ops.bnxt_db_tx_cq = bnxt_cuw_db_cq;
2227 	}
2228 
2229 
2230 	/* Get the queue config */
2231 	rc = bnxt_hwrm_queue_qportcfg(softc, HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX);
2232 	if (rc) {
2233 		device_printf(softc->dev, "attach: hwrm qportcfg (tx) failed\n");
2234 		goto failed;
2235 	}
2236 	if (softc->is_asym_q) {
2237 		rc = bnxt_hwrm_queue_qportcfg(softc,
2238 					      HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX);
2239 		if (rc) {
2240 			device_printf(softc->dev, "attach: hwrm qportcfg (rx)  failed\n");
2241 			return rc;
2242 		}
2243 		bnxt_verify_asym_queues(softc);
2244 	} else {
2245 		softc->rx_max_q = softc->tx_max_q;
2246 		memcpy(softc->rx_q_info, softc->tx_q_info, sizeof(softc->rx_q_info));
2247 		memcpy(softc->rx_q_ids, softc->tx_q_ids, sizeof(softc->rx_q_ids));
2248 	}
2249 
2250 	/* Get the HW capabilities */
2251 	rc = bnxt_hwrm_func_qcaps(softc);
2252 	if (rc)
2253 		goto failed;
2254 
2255 	/*
2256 	 * Register the driver with the FW
2257 	 * Register the async events with the FW
2258 	 */
2259 	rc = bnxt_drv_rgtr(softc);
2260 	if (rc)
2261 		goto failed;
2262 
2263 	if (softc->hwrm_spec_code >= 0x10803) {
2264 		rc = bnxt_alloc_ctx_mem(softc);
2265 		if (rc) {
2266 			device_printf(softc->dev, "attach: alloc_ctx_mem failed\n");
2267 			return rc;
2268 		}
2269 		rc = bnxt_hwrm_func_resc_qcaps(softc, true);
2270 		if (!rc)
2271 			softc->flags |= BNXT_FLAG_FW_CAP_NEW_RM;
2272 	}
2273 
2274 	/* Get the current configuration of this function */
2275 	rc = bnxt_hwrm_func_qcfg(softc);
2276 	if (rc) {
2277 		device_printf(softc->dev, "attach: hwrm func qcfg failed\n");
2278 		goto failed;
2279 	}
2280 
2281 	iflib_set_mac(ctx, softc->func.mac_addr);
2282 
2283 	scctx->isc_txrx = &bnxt_txrx;
2284 	scctx->isc_tx_csum_flags = (CSUM_IP | CSUM_TCP | CSUM_UDP |
2285 	    CSUM_TCP_IPV6 | CSUM_UDP_IPV6 | CSUM_TSO);
2286 	scctx->isc_capabilities = scctx->isc_capenable =
2287 	    /* These are translated to hwassit bits */
2288 	    IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6 | IFCAP_TSO4 | IFCAP_TSO6 |
2289 	    /* These are checked by iflib */
2290 	    IFCAP_LRO | IFCAP_VLAN_HWFILTER |
2291 	    /* These are part of the iflib mask */
2292 	    IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_VLAN_MTU |
2293 	    IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO |
2294 	    /* These likely get lost... */
2295 	    IFCAP_VLAN_HWCSUM | IFCAP_JUMBO_MTU;
2296 
2297 	if (bnxt_wol_supported(softc))
2298 		scctx->isc_capabilities |= IFCAP_WOL_MAGIC;
2299 	bnxt_get_wol_settings(softc);
2300 	if (softc->wol)
2301 		scctx->isc_capenable |= IFCAP_WOL_MAGIC;
2302 
2303 	/* Get the queue config */
2304 	bnxt_get_wol_settings(softc);
2305 	if (BNXT_CHIP_P5(softc))
2306 		bnxt_hwrm_reserve_pf_rings(softc);
2307 	rc = bnxt_hwrm_func_qcfg(softc);
2308 	if (rc) {
2309 		device_printf(softc->dev, "attach: hwrm func qcfg failed\n");
2310 		goto failed;
2311 	}
2312 
2313 	bnxt_clear_ids(softc);
2314 	if (rc)
2315 		goto failed;
2316 
2317 	/* Now set up iflib sc */
2318 	scctx->isc_tx_nsegments = 31,
2319 	scctx->isc_tx_tso_segments_max = 31;
2320 	scctx->isc_tx_tso_size_max = BNXT_TSO_SIZE;
2321 	scctx->isc_tx_tso_segsize_max = BNXT_TSO_SIZE;
2322 	scctx->isc_vectors = softc->func.max_cp_rings;
2323 	scctx->isc_min_frame_size = BNXT_MIN_FRAME_SIZE;
2324 	scctx->isc_txrx = &bnxt_txrx;
2325 
2326 	if (scctx->isc_nrxd[0] <
2327 	    ((scctx->isc_nrxd[1] * 4) + scctx->isc_nrxd[2]))
2328 		device_printf(softc->dev,
2329 		    "WARNING: nrxd0 (%d) should be at least 4 * nrxd1 (%d) + nrxd2 (%d).  Driver may be unstable\n",
2330 		    scctx->isc_nrxd[0], scctx->isc_nrxd[1], scctx->isc_nrxd[2]);
2331 	if (scctx->isc_ntxd[0] < scctx->isc_ntxd[1] * 2)
2332 		device_printf(softc->dev,
2333 		    "WARNING: ntxd0 (%d) should be at least 2 * ntxd1 (%d).  Driver may be unstable\n",
2334 		    scctx->isc_ntxd[0], scctx->isc_ntxd[1]);
2335 	scctx->isc_txqsizes[0] = sizeof(struct cmpl_base) * scctx->isc_ntxd[0];
2336 	scctx->isc_txqsizes[1] = sizeof(struct tx_bd_short) *
2337 	    scctx->isc_ntxd[1];
2338 	scctx->isc_txqsizes[2] = sizeof(struct cmpl_base) * scctx->isc_ntxd[2];
2339 	scctx->isc_rxqsizes[0] = sizeof(struct cmpl_base) * scctx->isc_nrxd[0];
2340 	scctx->isc_rxqsizes[1] = sizeof(struct rx_prod_pkt_bd) *
2341 	    scctx->isc_nrxd[1];
2342 	scctx->isc_rxqsizes[2] = sizeof(struct rx_prod_pkt_bd) *
2343 	    scctx->isc_nrxd[2];
2344 
2345 	scctx->isc_nrxqsets_max = min(pci_msix_count(softc->dev)-1,
2346 	    softc->fn_qcfg.alloc_completion_rings - 1);
2347 	scctx->isc_nrxqsets_max = min(scctx->isc_nrxqsets_max,
2348 	    softc->fn_qcfg.alloc_rx_rings);
2349 	scctx->isc_nrxqsets_max = min(scctx->isc_nrxqsets_max,
2350 	    softc->fn_qcfg.alloc_vnics);
2351 	scctx->isc_ntxqsets_max = min(softc->fn_qcfg.alloc_tx_rings,
2352 	    softc->fn_qcfg.alloc_completion_rings - scctx->isc_nrxqsets_max - 1);
2353 
2354 	scctx->isc_rss_table_size = HW_HASH_INDEX_SIZE;
2355 	scctx->isc_rss_table_mask = scctx->isc_rss_table_size - 1;
2356 
2357 	/* iflib will map and release this bar */
2358 	scctx->isc_msix_bar = pci_msix_table_bar(softc->dev);
2359 
2360         /*
2361          * Default settings for HW LRO (TPA):
2362          *  Disable HW LRO by default
2363          *  Can be enabled after taking care of 'packet forwarding'
2364          */
2365 	if (softc->flags & BNXT_FLAG_TPA) {
2366 		softc->hw_lro.enable = 0;
2367 		softc->hw_lro.is_mode_gro = 0;
2368 		softc->hw_lro.max_agg_segs = 5; /* 2^5 = 32 segs */
2369 		softc->hw_lro.max_aggs = HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX;
2370 		softc->hw_lro.min_agg_len = 512;
2371 	}
2372 
2373 	/* Allocate the default completion ring */
2374 	softc->def_cp_ring.stats_ctx_id = HWRM_NA_SIGNATURE;
2375 	softc->def_cp_ring.ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
2376 	softc->def_cp_ring.ring.softc = softc;
2377 	softc->def_cp_ring.ring.id = 0;
2378 	softc->def_cp_ring.ring.doorbell = (BNXT_CHIP_P5(softc)) ?
2379 		DB_PF_OFFSET_P5 : softc->def_cp_ring.ring.id * 0x80;
2380 	softc->def_cp_ring.ring.ring_size = PAGE_SIZE /
2381 	    sizeof(struct cmpl_base);
2382 	rc = iflib_dma_alloc(ctx,
2383 	    sizeof(struct cmpl_base) * softc->def_cp_ring.ring.ring_size,
2384 	    &softc->def_cp_ring_mem, 0);
2385 	softc->def_cp_ring.ring.vaddr = softc->def_cp_ring_mem.idi_vaddr;
2386 	softc->def_cp_ring.ring.paddr = softc->def_cp_ring_mem.idi_paddr;
2387 	iflib_config_task_init(ctx, &softc->def_cp_task, bnxt_def_cp_task);
2388 
2389 	rc = bnxt_init_sysctl_ctx(softc);
2390 	if (rc)
2391 		goto init_sysctl_failed;
2392 	if (BNXT_PF(softc)) {
2393 		rc = bnxt_create_nvram_sysctls(softc->nvm_info);
2394 		if (rc)
2395 			goto failed;
2396 	}
2397 
2398 	arc4rand(softc->vnic_info.rss_hash_key, HW_HASH_KEY_SIZE, 0);
2399 	softc->vnic_info.rss_hash_type =
2400 	    HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4 |
2401 	    HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4 |
2402 	    HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4 |
2403 	    HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6 |
2404 	    HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6 |
2405 	    HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
2406 	rc = bnxt_create_config_sysctls_pre(softc);
2407 	if (rc)
2408 		goto failed;
2409 
2410 	rc = bnxt_create_hw_lro_sysctls(softc);
2411 	if (rc)
2412 		goto failed;
2413 
2414 	rc = bnxt_create_pause_fc_sysctls(softc);
2415 	if (rc)
2416 		goto failed;
2417 
2418 	rc = bnxt_create_dcb_sysctls(softc);
2419 	if (rc)
2420 		goto failed;
2421 
2422 	set_bit(BNXT_STATE_OPEN, &softc->state);
2423 	INIT_WORK(&softc->sp_task, bnxt_sp_task);
2424 	INIT_DELAYED_WORK(&softc->fw_reset_task, bnxt_fw_reset_task);
2425 
2426 	/* Initialize the vlan list */
2427 	SLIST_INIT(&softc->vnic_info.vlan_tags);
2428 	softc->vnic_info.vlan_tag_list.idi_vaddr = NULL;
2429 	softc->state_bv = bit_alloc(BNXT_STATE_MAX, M_DEVBUF,
2430 			M_WAITOK|M_ZERO);
2431 
2432 	return (rc);
2433 
2434 failed:
2435 	bnxt_free_sysctl_ctx(softc);
2436 init_sysctl_failed:
2437 	bnxt_hwrm_func_drv_unrgtr(softc, false);
2438 	if (BNXT_PF(softc))
2439 		free(softc->nvm_info, M_DEVBUF);
2440 nvm_alloc_fail:
2441 	bnxt_free_hwrm_short_cmd_req(softc);
2442 hwrm_short_cmd_alloc_fail:
2443 ver_fail:
2444 	free(softc->ver_info, M_DEVBUF);
2445 ver_alloc_fail:
2446 	bnxt_free_hwrm_dma_mem(softc);
2447 dma_fail:
2448 	BNXT_HWRM_LOCK_DESTROY(softc);
2449 	if (softc->pdev)
2450 		linux_pci_detach_device(softc->pdev);
2451 pci_attach_fail:
2452 	kfree(softc->pdev);
2453 	softc->pdev = NULL;
2454 free_pci_map:
2455 	bnxt_pci_mapping_free(softc);
2456 pci_map_fail:
2457 	pci_disable_busmaster(softc->dev);
2458 	return (rc);
2459 }
2460 
2461 static int
bnxt_attach_post(if_ctx_t ctx)2462 bnxt_attach_post(if_ctx_t ctx)
2463 {
2464 	struct bnxt_softc *softc = iflib_get_softc(ctx);
2465 	if_t ifp = iflib_get_ifp(ctx);
2466 	int rc;
2467 
2468 	softc->ifp = ifp;
2469 	bnxt_create_config_sysctls_post(softc);
2470 
2471 	/* Update link state etc... */
2472 	rc = bnxt_probe_phy(softc);
2473 	if (rc)
2474 		goto failed;
2475 
2476 	/* Needs to be done after probing the phy */
2477 	bnxt_create_ver_sysctls(softc);
2478 	ifmedia_removeall(softc->media);
2479 	bnxt_add_media_types(softc);
2480 	ifmedia_set(softc->media, IFM_ETHER | IFM_AUTO);
2481 
2482 	softc->scctx->isc_max_frame_size = if_getmtu(ifp) + ETHER_HDR_LEN +
2483 	    ETHER_CRC_LEN;
2484 
2485 	softc->rx_buf_size = min(softc->scctx->isc_max_frame_size, BNXT_PAGE_SIZE);
2486 	bnxt_dcb_init(softc);
2487 	bnxt_rdma_aux_device_init(softc);
2488 
2489 failed:
2490 	return rc;
2491 }
2492 
2493 static int
bnxt_detach(if_ctx_t ctx)2494 bnxt_detach(if_ctx_t ctx)
2495 {
2496 	struct bnxt_softc *softc = iflib_get_softc(ctx);
2497 	struct bnxt_vlan_tag *tag;
2498 	struct bnxt_vlan_tag *tmp;
2499 	int i;
2500 
2501 	bnxt_rdma_aux_device_uninit(softc);
2502 	cancel_delayed_work_sync(&softc->fw_reset_task);
2503 	cancel_work_sync(&softc->sp_task);
2504 	bnxt_dcb_free(softc);
2505 	SLIST_REMOVE(&pf_list, &softc->list, bnxt_softc_list, next);
2506 	bnxt_num_pfs--;
2507 	bnxt_wol_config(ctx);
2508 	bnxt_do_disable_intr(&softc->def_cp_ring);
2509 	bnxt_free_sysctl_ctx(softc);
2510 	bnxt_hwrm_func_reset(softc);
2511 	bnxt_free_ctx_mem(softc);
2512 	bnxt_clear_ids(softc);
2513 	iflib_irq_free(ctx, &softc->def_cp_ring.irq);
2514 	/* We need to free() these here... */
2515 	for (i = softc->nrxqsets-1; i>=0; i--) {
2516 		if (BNXT_CHIP_P5(softc))
2517 			iflib_irq_free(ctx, &softc->nq_rings[i].irq);
2518 		else
2519 			iflib_irq_free(ctx, &softc->rx_cp_rings[i].irq);
2520 
2521 	}
2522 	iflib_dma_free(&softc->vnic_info.mc_list);
2523 	iflib_dma_free(&softc->vnic_info.rss_hash_key_tbl);
2524 	iflib_dma_free(&softc->vnic_info.rss_grp_tbl);
2525 	if (softc->vnic_info.vlan_tag_list.idi_vaddr)
2526 		iflib_dma_free(&softc->vnic_info.vlan_tag_list);
2527 	SLIST_FOREACH_SAFE(tag, &softc->vnic_info.vlan_tags, next, tmp)
2528 		free(tag, M_DEVBUF);
2529 	iflib_dma_free(&softc->def_cp_ring_mem);
2530 	for (i = 0; i < softc->nrxqsets; i++)
2531 		free(softc->rx_rings[i].tpa_start, M_DEVBUF);
2532 	free(softc->ver_info, M_DEVBUF);
2533 	if (BNXT_PF(softc))
2534 		free(softc->nvm_info, M_DEVBUF);
2535 
2536 	bnxt_hwrm_func_drv_unrgtr(softc, false);
2537 	bnxt_free_hwrm_dma_mem(softc);
2538 	bnxt_free_hwrm_short_cmd_req(softc);
2539 	BNXT_HWRM_LOCK_DESTROY(softc);
2540 
2541 	if (!bnxt_num_pfs && bnxt_pf_wq)
2542 		destroy_workqueue(bnxt_pf_wq);
2543 
2544 	if (softc->pdev)
2545 		linux_pci_detach_device(softc->pdev);
2546 	free(softc->state_bv, M_DEVBUF);
2547 	pci_disable_busmaster(softc->dev);
2548 	bnxt_pci_mapping_free(softc);
2549 
2550 	return 0;
2551 }
2552 
2553 static void
bnxt_hwrm_resource_free(struct bnxt_softc * softc)2554 bnxt_hwrm_resource_free(struct bnxt_softc *softc)
2555 {
2556 	int i, rc = 0;
2557 
2558 	rc = bnxt_hwrm_ring_free(softc,
2559 			HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
2560 			&softc->def_cp_ring.ring,
2561 			(uint16_t)HWRM_NA_SIGNATURE);
2562 	if (rc)
2563 		goto fail;
2564 
2565 	for (i = 0; i < softc->ntxqsets; i++) {
2566 		rc = bnxt_hwrm_ring_free(softc,
2567 				HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
2568 				&softc->tx_rings[i],
2569 				softc->tx_cp_rings[i].ring.phys_id);
2570 		if (rc)
2571 			goto fail;
2572 
2573 		rc = bnxt_hwrm_ring_free(softc,
2574 				HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
2575 				&softc->tx_cp_rings[i].ring,
2576 				(uint16_t)HWRM_NA_SIGNATURE);
2577 		if (rc)
2578 			goto fail;
2579 
2580 		rc = bnxt_hwrm_stat_ctx_free(softc, &softc->tx_cp_rings[i]);
2581 		if (rc)
2582 			goto fail;
2583 	}
2584 	rc = bnxt_hwrm_free_filter(softc);
2585 	if (rc)
2586 		goto fail;
2587 
2588 	rc = bnxt_hwrm_vnic_free(softc, &softc->vnic_info);
2589 	if (rc)
2590 		goto fail;
2591 
2592 	rc = bnxt_hwrm_vnic_ctx_free(softc, softc->vnic_info.rss_id);
2593 	if (rc)
2594 		goto fail;
2595 
2596 	for (i = 0; i < softc->nrxqsets; i++) {
2597 		rc = bnxt_hwrm_ring_grp_free(softc, &softc->grp_info[i]);
2598 		if (rc)
2599 			goto fail;
2600 
2601 		rc = bnxt_hwrm_ring_free(softc,
2602 				HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG,
2603 				&softc->ag_rings[i],
2604 				(uint16_t)HWRM_NA_SIGNATURE);
2605 		if (rc)
2606 			goto fail;
2607 
2608 		rc = bnxt_hwrm_ring_free(softc,
2609 				HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
2610 				&softc->rx_rings[i],
2611 				softc->rx_cp_rings[i].ring.phys_id);
2612 		if (rc)
2613 			goto fail;
2614 
2615 		rc = bnxt_hwrm_ring_free(softc,
2616 				HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
2617 				&softc->rx_cp_rings[i].ring,
2618 				(uint16_t)HWRM_NA_SIGNATURE);
2619 		if (rc)
2620 			goto fail;
2621 
2622 		if (BNXT_CHIP_P5(softc)) {
2623 			rc = bnxt_hwrm_ring_free(softc,
2624 					HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ,
2625 					&softc->nq_rings[i].ring,
2626 					(uint16_t)HWRM_NA_SIGNATURE);
2627 			if (rc)
2628 				goto fail;
2629 		}
2630 
2631 		rc = bnxt_hwrm_stat_ctx_free(softc, &softc->rx_cp_rings[i]);
2632 		if (rc)
2633 			goto fail;
2634 	}
2635 
2636 fail:
2637 	return;
2638 }
2639 
2640 
2641 static void
bnxt_func_reset(struct bnxt_softc * softc)2642 bnxt_func_reset(struct bnxt_softc *softc)
2643 {
2644 
2645 	if (!BNXT_CHIP_P5(softc)) {
2646 		bnxt_hwrm_func_reset(softc);
2647 		return;
2648 	}
2649 
2650 	bnxt_hwrm_resource_free(softc);
2651 	return;
2652 }
2653 
2654 static void
bnxt_rss_grp_tbl_init(struct bnxt_softc * softc)2655 bnxt_rss_grp_tbl_init(struct bnxt_softc *softc)
2656 {
2657 	uint16_t *rgt = (uint16_t *) softc->vnic_info.rss_grp_tbl.idi_vaddr;
2658 	int i, j;
2659 
2660 	for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
2661 		if (BNXT_CHIP_P5(softc)) {
2662 			rgt[i++] = htole16(softc->rx_rings[j].phys_id);
2663 			rgt[i] = htole16(softc->rx_cp_rings[j].ring.phys_id);
2664 		} else {
2665 			rgt[i] = htole16(softc->grp_info[j].grp_id);
2666 		}
2667 		if (++j == softc->nrxqsets)
2668 			j = 0;
2669 	}
2670 }
2671 
bnxt_get_port_module_status(struct bnxt_softc * softc)2672 static void bnxt_get_port_module_status(struct bnxt_softc *softc)
2673 {
2674 	struct bnxt_link_info *link_info = &softc->link_info;
2675 	struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
2676 	uint8_t module_status;
2677 
2678 	if (bnxt_update_link(softc, false))
2679 		return;
2680 
2681 	module_status = link_info->module_status;
2682 	switch (module_status) {
2683 	case HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_DISABLETX:
2684 	case HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_PWRDOWN:
2685 	case HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_WARNINGMSG:
2686 		device_printf(softc->dev, "Unqualified SFP+ module detected on port %d\n",
2687 			    softc->pf.port_id);
2688 		if (softc->hwrm_spec_code >= 0x10201) {
2689 			device_printf(softc->dev, "Module part number %s\n",
2690 				    resp->phy_vendor_partnumber);
2691 		}
2692 		if (module_status == HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_DISABLETX)
2693 			device_printf(softc->dev, "TX is disabled\n");
2694 		if (module_status == HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_PWRDOWN)
2695 			device_printf(softc->dev, "SFP+ module is shutdown\n");
2696 	}
2697 }
2698 
bnxt_aux_dev_free(struct bnxt_softc * softc)2699 static void bnxt_aux_dev_free(struct bnxt_softc *softc)
2700 {
2701 	kfree(softc->aux_dev);
2702 	softc->aux_dev = NULL;
2703 }
2704 
bnxt_aux_dev_init(struct bnxt_softc * softc)2705 static struct bnxt_aux_dev *bnxt_aux_dev_init(struct bnxt_softc *softc)
2706 {
2707 	struct bnxt_aux_dev *bnxt_adev;
2708 
2709 	msleep(1000 * 2);
2710 	bnxt_adev = kzalloc(sizeof(*bnxt_adev), GFP_KERNEL);
2711 	if (!bnxt_adev)
2712 		return ERR_PTR(-ENOMEM);
2713 
2714 	return bnxt_adev;
2715 }
2716 
bnxt_rdma_aux_device_uninit(struct bnxt_softc * softc)2717 static void bnxt_rdma_aux_device_uninit(struct bnxt_softc *softc)
2718 {
2719 	struct bnxt_aux_dev *bnxt_adev = softc->aux_dev;
2720 
2721 	/* Skip if no auxiliary device init was done. */
2722 	if (!(softc->flags & BNXT_FLAG_ROCE_CAP))
2723 		return;
2724 
2725 	if (IS_ERR_OR_NULL(bnxt_adev))
2726 		return;
2727 
2728 	bnxt_rdma_aux_device_del(softc);
2729 
2730 	if (bnxt_adev->id >= 0)
2731 		ida_free(&bnxt_aux_dev_ids, bnxt_adev->id);
2732 
2733 	bnxt_aux_dev_free(softc);
2734 }
2735 
bnxt_rdma_aux_device_init(struct bnxt_softc * softc)2736 static void bnxt_rdma_aux_device_init(struct bnxt_softc *softc)
2737 {
2738 	int rc;
2739 
2740 	if (!(softc->flags & BNXT_FLAG_ROCE_CAP))
2741 		return;
2742 
2743 	softc->aux_dev = bnxt_aux_dev_init(softc);
2744 	if (IS_ERR_OR_NULL(softc->aux_dev)) {
2745 		device_printf(softc->dev, "Failed to init auxiliary device for ROCE\n");
2746 		goto skip_aux_init;
2747 	}
2748 
2749 	softc->aux_dev->id = ida_alloc(&bnxt_aux_dev_ids, GFP_KERNEL);
2750 	if (softc->aux_dev->id < 0) {
2751 		device_printf(softc->dev, "ida alloc failed for ROCE auxiliary device\n");
2752 		bnxt_aux_dev_free(softc);
2753 		goto skip_aux_init;
2754 	}
2755 
2756 	msleep(1000 * 2);
2757 	/* If aux bus init fails, continue with netdev init. */
2758 	rc = bnxt_rdma_aux_device_add(softc);
2759 	if (rc) {
2760 		device_printf(softc->dev, "Failed to add auxiliary device for ROCE\n");
2761 		msleep(1000 * 2);
2762 		ida_free(&bnxt_aux_dev_ids, softc->aux_dev->id);
2763 	}
2764 	device_printf(softc->dev, "%s:%d Added auxiliary device (id %d) for ROCE \n",
2765 		      __func__, __LINE__, softc->aux_dev->id);
2766 skip_aux_init:
2767 	return;
2768 }
2769 
2770 /* Device configuration */
2771 static void
bnxt_init(if_ctx_t ctx)2772 bnxt_init(if_ctx_t ctx)
2773 {
2774 	struct bnxt_softc *softc = iflib_get_softc(ctx);
2775 	struct ifmediareq ifmr;
2776 	int i;
2777 	int rc;
2778 
2779 	if (!BNXT_CHIP_P5(softc)) {
2780 		rc = bnxt_hwrm_func_reset(softc);
2781 		if (rc)
2782 			return;
2783 	} else if (softc->is_dev_init) {
2784 		bnxt_stop(ctx);
2785 	}
2786 
2787 	softc->is_dev_init = true;
2788 	bnxt_clear_ids(softc);
2789 
2790 	if (BNXT_CHIP_P5(softc))
2791 		goto skip_def_cp_ring;
2792 	/* Allocate the default completion ring */
2793 	softc->def_cp_ring.cons = UINT32_MAX;
2794 	softc->def_cp_ring.v_bit = 1;
2795 	bnxt_mark_cpr_invalid(&softc->def_cp_ring);
2796 	rc = bnxt_hwrm_ring_alloc(softc,
2797 			HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
2798 			&softc->def_cp_ring.ring);
2799 	if (rc)
2800 		goto fail;
2801 skip_def_cp_ring:
2802 	for (i = 0; i < softc->nrxqsets; i++) {
2803 		/* Allocate the statistics context */
2804 		rc = bnxt_hwrm_stat_ctx_alloc(softc, &softc->rx_cp_rings[i],
2805 		    softc->rx_stats[i].idi_paddr);
2806 		if (rc)
2807 			goto fail;
2808 
2809 		if (BNXT_CHIP_P5(softc)) {
2810 			/* Allocate the NQ */
2811 			softc->nq_rings[i].cons = 0;
2812 			softc->nq_rings[i].v_bit = 1;
2813 			softc->nq_rings[i].last_idx = UINT32_MAX;
2814 			bnxt_mark_cpr_invalid(&softc->nq_rings[i]);
2815 			rc = bnxt_hwrm_ring_alloc(softc,
2816 					HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ,
2817 					&softc->nq_rings[i].ring);
2818 			if (rc)
2819 				goto fail;
2820 
2821 			softc->db_ops.bnxt_db_nq(&softc->nq_rings[i], 1);
2822 		}
2823 		/* Allocate the completion ring */
2824 		softc->rx_cp_rings[i].cons = UINT32_MAX;
2825 		softc->rx_cp_rings[i].v_bit = 1;
2826 		softc->rx_cp_rings[i].last_idx = UINT32_MAX;
2827 		bnxt_mark_cpr_invalid(&softc->rx_cp_rings[i]);
2828 		rc = bnxt_hwrm_ring_alloc(softc,
2829 				HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
2830 				&softc->rx_cp_rings[i].ring);
2831 		if (rc)
2832 			goto fail;
2833 
2834 		if (BNXT_CHIP_P5(softc))
2835 			softc->db_ops.bnxt_db_rx_cq(&softc->rx_cp_rings[i], 1);
2836 
2837 		/* Allocate the RX ring */
2838 		rc = bnxt_hwrm_ring_alloc(softc,
2839 		    HWRM_RING_ALLOC_INPUT_RING_TYPE_RX, &softc->rx_rings[i]);
2840 		if (rc)
2841 			goto fail;
2842 		softc->db_ops.bnxt_db_rx(&softc->rx_rings[i], 0);
2843 
2844 		/* Allocate the AG ring */
2845 		rc = bnxt_hwrm_ring_alloc(softc,
2846 				HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG,
2847 				&softc->ag_rings[i]);
2848 		if (rc)
2849 			goto fail;
2850 		softc->db_ops.bnxt_db_rx(&softc->ag_rings[i], 0);
2851 
2852 		/* Allocate the ring group */
2853 		softc->grp_info[i].stats_ctx =
2854 		    softc->rx_cp_rings[i].stats_ctx_id;
2855 		softc->grp_info[i].rx_ring_id = softc->rx_rings[i].phys_id;
2856 		softc->grp_info[i].ag_ring_id = softc->ag_rings[i].phys_id;
2857 		softc->grp_info[i].cp_ring_id =
2858 		    softc->rx_cp_rings[i].ring.phys_id;
2859 		rc = bnxt_hwrm_ring_grp_alloc(softc, &softc->grp_info[i]);
2860 		if (rc)
2861 			goto fail;
2862 	}
2863 
2864 	/* And now set the default CP / NQ ring for the async */
2865 	rc = bnxt_cfg_async_cr(softc);
2866 	if (rc)
2867 		goto fail;
2868 
2869 	/* Allocate the VNIC RSS context */
2870 	rc = bnxt_hwrm_vnic_ctx_alloc(softc, &softc->vnic_info.rss_id);
2871 	if (rc)
2872 		goto fail;
2873 
2874 	/* Allocate the vnic */
2875 	softc->vnic_info.def_ring_grp = softc->grp_info[0].grp_id;
2876 	softc->vnic_info.mru = softc->scctx->isc_max_frame_size;
2877 	rc = bnxt_hwrm_vnic_alloc(softc, &softc->vnic_info);
2878 	if (rc)
2879 		goto fail;
2880 	rc = bnxt_hwrm_vnic_cfg(softc, &softc->vnic_info);
2881 	if (rc)
2882 		goto fail;
2883 	rc = bnxt_hwrm_vnic_set_hds(softc, &softc->vnic_info);
2884 	if (rc)
2885 		goto fail;
2886 	rc = bnxt_hwrm_set_filter(softc);
2887 	if (rc)
2888 		goto fail;
2889 
2890 	bnxt_rss_grp_tbl_init(softc);
2891 
2892 	rc = bnxt_hwrm_rss_cfg(softc, &softc->vnic_info,
2893 	    softc->vnic_info.rss_hash_type);
2894 	if (rc)
2895 		goto fail;
2896 
2897 	rc = bnxt_hwrm_vnic_tpa_cfg(softc);
2898 	if (rc)
2899 		goto fail;
2900 
2901 	for (i = 0; i < softc->ntxqsets; i++) {
2902 		/* Allocate the statistics context */
2903 		rc = bnxt_hwrm_stat_ctx_alloc(softc, &softc->tx_cp_rings[i],
2904 		    softc->tx_stats[i].idi_paddr);
2905 		if (rc)
2906 			goto fail;
2907 
2908 		/* Allocate the completion ring */
2909 		softc->tx_cp_rings[i].cons = UINT32_MAX;
2910 		softc->tx_cp_rings[i].v_bit = 1;
2911 		bnxt_mark_cpr_invalid(&softc->tx_cp_rings[i]);
2912 		rc = bnxt_hwrm_ring_alloc(softc,
2913 				HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
2914 				&softc->tx_cp_rings[i].ring);
2915 		if (rc)
2916 			goto fail;
2917 
2918 		if (BNXT_CHIP_P5(softc))
2919 			softc->db_ops.bnxt_db_tx_cq(&softc->tx_cp_rings[i], 1);
2920 
2921 		/* Allocate the TX ring */
2922 		rc = bnxt_hwrm_ring_alloc(softc,
2923 				HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
2924 				&softc->tx_rings[i]);
2925 		if (rc)
2926 			goto fail;
2927 		softc->db_ops.bnxt_db_tx(&softc->tx_rings[i], 0);
2928 	}
2929 
2930 	bnxt_do_enable_intr(&softc->def_cp_ring);
2931 	bnxt_get_port_module_status(softc);
2932 	bnxt_media_status(softc->ctx, &ifmr);
2933 	bnxt_hwrm_cfa_l2_set_rx_mask(softc, &softc->vnic_info);
2934 	return;
2935 
2936 fail:
2937 	bnxt_func_reset(softc);
2938 	bnxt_clear_ids(softc);
2939 	return;
2940 }
2941 
2942 static void
bnxt_stop(if_ctx_t ctx)2943 bnxt_stop(if_ctx_t ctx)
2944 {
2945 	struct bnxt_softc *softc = iflib_get_softc(ctx);
2946 
2947 	softc->is_dev_init = false;
2948 	bnxt_do_disable_intr(&softc->def_cp_ring);
2949 	bnxt_func_reset(softc);
2950 	bnxt_clear_ids(softc);
2951 	return;
2952 }
2953 
2954 static u_int
bnxt_copy_maddr(void * arg,struct sockaddr_dl * sdl,u_int cnt)2955 bnxt_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
2956 {
2957 	uint8_t *mta = arg;
2958 
2959 	if (cnt == BNXT_MAX_MC_ADDRS)
2960 		return (1);
2961 
2962 	bcopy(LLADDR(sdl), &mta[cnt * ETHER_ADDR_LEN], ETHER_ADDR_LEN);
2963 
2964 	return (1);
2965 }
2966 
2967 static void
bnxt_multi_set(if_ctx_t ctx)2968 bnxt_multi_set(if_ctx_t ctx)
2969 {
2970 	struct bnxt_softc *softc = iflib_get_softc(ctx);
2971 	if_t ifp = iflib_get_ifp(ctx);
2972 	uint8_t *mta;
2973 	int mcnt;
2974 
2975 	mta = softc->vnic_info.mc_list.idi_vaddr;
2976 	bzero(mta, softc->vnic_info.mc_list.idi_size);
2977 	mcnt = if_foreach_llmaddr(ifp, bnxt_copy_maddr, mta);
2978 
2979 	if (mcnt > BNXT_MAX_MC_ADDRS) {
2980 		softc->vnic_info.rx_mask |=
2981 		    HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
2982 		bnxt_hwrm_cfa_l2_set_rx_mask(softc, &softc->vnic_info);
2983 	} else {
2984 		softc->vnic_info.rx_mask &=
2985 		    ~HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
2986 		bus_dmamap_sync(softc->vnic_info.mc_list.idi_tag,
2987 		    softc->vnic_info.mc_list.idi_map, BUS_DMASYNC_PREWRITE);
2988 		softc->vnic_info.mc_list_count = mcnt;
2989 		softc->vnic_info.rx_mask |=
2990 		    HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
2991 		if (bnxt_hwrm_cfa_l2_set_rx_mask(softc, &softc->vnic_info))
2992 			device_printf(softc->dev,
2993 			    "set_multi: rx_mask set failed\n");
2994 	}
2995 }
2996 
2997 static int
bnxt_mtu_set(if_ctx_t ctx,uint32_t mtu)2998 bnxt_mtu_set(if_ctx_t ctx, uint32_t mtu)
2999 {
3000 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3001 
3002 	if (mtu > BNXT_MAX_MTU)
3003 		return EINVAL;
3004 
3005 	softc->scctx->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3006 	softc->rx_buf_size = min(softc->scctx->isc_max_frame_size, BNXT_PAGE_SIZE);
3007 	return 0;
3008 }
3009 
3010 static void
bnxt_media_status(if_ctx_t ctx,struct ifmediareq * ifmr)3011 bnxt_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
3012 {
3013 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3014 	struct bnxt_link_info *link_info = &softc->link_info;
3015 	struct ifmedia_entry *next;
3016 	uint64_t target_baudrate = bnxt_get_baudrate(link_info);
3017 	int active_media = IFM_UNKNOWN;
3018 
3019 	bnxt_update_link(softc, true);
3020 
3021 	ifmr->ifm_status = IFM_AVALID;
3022 	ifmr->ifm_active = IFM_ETHER;
3023 
3024 	if (link_info->link_up)
3025 		ifmr->ifm_status |= IFM_ACTIVE;
3026 	else
3027 		ifmr->ifm_status &= ~IFM_ACTIVE;
3028 
3029 	if (link_info->duplex == HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_FULL)
3030 		ifmr->ifm_active |= IFM_FDX;
3031 	else
3032 		ifmr->ifm_active |= IFM_HDX;
3033 
3034         /*
3035          * Go through the list of supported media which got prepared
3036          * as part of bnxt_add_media_types() using api ifmedia_add().
3037          */
3038 	LIST_FOREACH(next, &(iflib_get_media(ctx)->ifm_list), ifm_list) {
3039 		if (ifmedia_baudrate(next->ifm_media) == target_baudrate) {
3040 			active_media = next->ifm_media;
3041 			break;
3042 		}
3043 	}
3044 	ifmr->ifm_active |= active_media;
3045 
3046 	if (link_info->flow_ctrl.rx)
3047 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
3048 	if (link_info->flow_ctrl.tx)
3049 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
3050 
3051 	bnxt_report_link(softc);
3052 	return;
3053 }
3054 
3055 static int
bnxt_media_change(if_ctx_t ctx)3056 bnxt_media_change(if_ctx_t ctx)
3057 {
3058 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3059 	struct ifmedia *ifm = iflib_get_media(ctx);
3060 	struct ifmediareq ifmr;
3061 	int rc;
3062 
3063 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3064 		return EINVAL;
3065 
3066 	softc->link_info.req_signal_mode =
3067 			HWRM_PORT_PHY_QCFG_OUTPUT_SIGNAL_MODE_PAM4;
3068 
3069 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
3070 	case IFM_100_T:
3071 		softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
3072 		softc->link_info.req_link_speed =
3073 		    HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100MB;
3074 		break;
3075 	case IFM_1000_KX:
3076 	case IFM_1000_SGMII:
3077 	case IFM_1000_CX:
3078 	case IFM_1000_SX:
3079 	case IFM_1000_LX:
3080 		softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
3081 		softc->link_info.req_link_speed =
3082 		    HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_1GB;
3083 		break;
3084 	case IFM_2500_KX:
3085 	case IFM_2500_T:
3086 		softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
3087 		softc->link_info.req_link_speed =
3088 		    HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_2_5GB;
3089 		break;
3090 	case IFM_10G_CR1:
3091 	case IFM_10G_KR:
3092 	case IFM_10G_LR:
3093 	case IFM_10G_SR:
3094 		softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
3095 		softc->link_info.req_link_speed =
3096 		    HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
3097 		break;
3098 	case IFM_20G_KR2:
3099 		softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
3100 		softc->link_info.req_link_speed =
3101 		    HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_20GB;
3102 		break;
3103 	case IFM_25G_CR:
3104 	case IFM_25G_KR:
3105 	case IFM_25G_SR:
3106 		softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
3107 		softc->link_info.req_link_speed =
3108 		    HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_25GB;
3109 		break;
3110 	case IFM_40G_CR4:
3111 	case IFM_40G_KR4:
3112 	case IFM_40G_LR4:
3113 	case IFM_40G_SR4:
3114 	case IFM_40G_XLAUI:
3115 	case IFM_40G_XLAUI_AC:
3116 		softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
3117 		softc->link_info.req_link_speed =
3118 		    HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
3119 		break;
3120 	case IFM_50G_CR2:
3121 	case IFM_50G_KR2:
3122 	case IFM_50G_SR2:
3123 		softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
3124 		softc->link_info.req_link_speed =
3125 		    HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
3126 		break;
3127 	case IFM_50G_CP:
3128 	case IFM_50G_LR:
3129 	case IFM_50G_SR:
3130 	case IFM_50G_KR_PAM4:
3131 		softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
3132 		softc->link_info.req_link_speed =
3133 		    HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_50GB;
3134 		softc->link_info.req_signal_mode =
3135 			HWRM_PORT_PHY_QCFG_OUTPUT_SIGNAL_MODE_PAM4;
3136 		softc->link_info.force_pam4_speed_set_by_user = true;
3137 		break;
3138 	case IFM_100G_CR4:
3139 	case IFM_100G_KR4:
3140 	case IFM_100G_LR4:
3141 	case IFM_100G_SR4:
3142 		softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
3143 		softc->link_info.req_link_speed =
3144 			HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
3145 		break;
3146 	case IFM_100G_CP2:
3147 	case IFM_100G_SR2:
3148 	case IFM_100G_KR_PAM4:
3149 	case IFM_100G_KR2_PAM4:
3150 		softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
3151 		softc->link_info.req_link_speed =
3152 			HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_100GB;
3153 		softc->link_info.req_signal_mode =
3154 			HWRM_PORT_PHY_QCFG_OUTPUT_SIGNAL_MODE_PAM4;
3155 		softc->link_info.force_pam4_speed_set_by_user = true;
3156 		break;
3157 	case IFM_200G_SR4:
3158 	case IFM_200G_FR4:
3159 	case IFM_200G_LR4:
3160 	case IFM_200G_DR4:
3161 	case IFM_200G_CR4_PAM4:
3162 	case IFM_200G_KR4_PAM4:
3163 		softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
3164 		softc->link_info.req_link_speed =
3165 			HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
3166 		softc->link_info.force_pam4_speed_set_by_user = true;
3167 		softc->link_info.req_signal_mode =
3168 			HWRM_PORT_PHY_QCFG_OUTPUT_SIGNAL_MODE_PAM4;
3169 		break;
3170 	case IFM_1000_T:
3171 		softc->link_info.advertising = HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
3172 		softc->link_info.autoneg |= BNXT_AUTONEG_SPEED;
3173 		break;
3174 	case IFM_10G_T:
3175 		softc->link_info.advertising = HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
3176 		softc->link_info.autoneg |= BNXT_AUTONEG_SPEED;
3177 		break;
3178 	default:
3179 		device_printf(softc->dev,
3180 		    "Unsupported media type!  Using auto\n");
3181 		/* Fall-through */
3182 	case IFM_AUTO:
3183 		// Auto
3184 		softc->link_info.autoneg |= BNXT_AUTONEG_SPEED;
3185 		break;
3186 	}
3187 	rc = bnxt_hwrm_set_link_setting(softc, true, true, true);
3188 	bnxt_media_status(softc->ctx, &ifmr);
3189 	return rc;
3190 }
3191 
3192 static int
bnxt_promisc_set(if_ctx_t ctx,int flags)3193 bnxt_promisc_set(if_ctx_t ctx, int flags)
3194 {
3195 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3196 	if_t ifp = iflib_get_ifp(ctx);
3197 	int rc;
3198 
3199 	if (if_getflags(ifp) & IFF_ALLMULTI ||
3200 	    if_llmaddr_count(ifp) > BNXT_MAX_MC_ADDRS)
3201 		softc->vnic_info.rx_mask |=
3202 		    HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
3203 	else
3204 		softc->vnic_info.rx_mask &=
3205 		    ~HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
3206 
3207 	if (if_getflags(ifp) & IFF_PROMISC)
3208 		softc->vnic_info.rx_mask |=
3209 		    HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS |
3210 		    HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN;
3211 	else
3212 		softc->vnic_info.rx_mask &=
3213 		    ~(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS);
3214 
3215 	rc = bnxt_hwrm_cfa_l2_set_rx_mask(softc, &softc->vnic_info);
3216 
3217 	return rc;
3218 }
3219 
3220 static uint64_t
bnxt_get_counter(if_ctx_t ctx,ift_counter cnt)3221 bnxt_get_counter(if_ctx_t ctx, ift_counter cnt)
3222 {
3223 	if_t ifp = iflib_get_ifp(ctx);
3224 
3225 	if (cnt < IFCOUNTERS)
3226 		return if_get_counter_default(ifp, cnt);
3227 
3228 	return 0;
3229 }
3230 
3231 static void
bnxt_update_admin_status(if_ctx_t ctx)3232 bnxt_update_admin_status(if_ctx_t ctx)
3233 {
3234 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3235 
3236 	/*
3237 	 * When SR-IOV is enabled, avoid each VF sending this HWRM
3238 	 * request every sec with which firmware timeouts can happen
3239 	 */
3240 	if (!BNXT_PF(softc))
3241 		return;
3242 
3243 	bnxt_hwrm_port_qstats(softc);
3244 
3245 	if (BNXT_CHIP_P5(softc) &&
3246 	    (softc->flags & BNXT_FLAG_FW_CAP_EXT_STATS))
3247 		bnxt_hwrm_port_qstats_ext(softc);
3248 
3249 	if (BNXT_CHIP_P5(softc)) {
3250 		struct ifmediareq ifmr;
3251 
3252 		if (bit_test(softc->state_bv, BNXT_STATE_LINK_CHANGE)) {
3253 			bit_clear(softc->state_bv, BNXT_STATE_LINK_CHANGE);
3254 			bnxt_media_status(softc->ctx, &ifmr);
3255 		}
3256 	}
3257 
3258 	return;
3259 }
3260 
3261 static void
bnxt_if_timer(if_ctx_t ctx,uint16_t qid)3262 bnxt_if_timer(if_ctx_t ctx, uint16_t qid)
3263 {
3264 
3265 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3266 	uint64_t ticks_now = ticks;
3267 
3268         /* Schedule bnxt_update_admin_status() once per sec */
3269 	if (ticks_now - softc->admin_ticks >= hz) {
3270 		softc->admin_ticks = ticks_now;
3271 		iflib_admin_intr_deferred(ctx);
3272 	}
3273 
3274 	return;
3275 }
3276 
3277 static void inline
bnxt_do_enable_intr(struct bnxt_cp_ring * cpr)3278 bnxt_do_enable_intr(struct bnxt_cp_ring *cpr)
3279 {
3280 	struct bnxt_softc *softc = cpr->ring.softc;
3281 
3282 	if (cpr->ring.phys_id == (uint16_t)HWRM_NA_SIGNATURE)
3283 		return;
3284 
3285 	if (BNXT_CHIP_P5(softc))
3286 		softc->db_ops.bnxt_db_nq(cpr, 1);
3287 	else
3288 		softc->db_ops.bnxt_db_rx_cq(cpr, 1);
3289 }
3290 
3291 static void inline
bnxt_do_disable_intr(struct bnxt_cp_ring * cpr)3292 bnxt_do_disable_intr(struct bnxt_cp_ring *cpr)
3293 {
3294 	struct bnxt_softc *softc = cpr->ring.softc;
3295 
3296 	if (cpr->ring.phys_id == (uint16_t)HWRM_NA_SIGNATURE)
3297 		return;
3298 
3299 	if (BNXT_CHIP_P5(softc))
3300 		softc->db_ops.bnxt_db_nq(cpr, 0);
3301 	else
3302 		softc->db_ops.bnxt_db_rx_cq(cpr, 0);
3303 }
3304 
3305 /* Enable all interrupts */
3306 static void
bnxt_intr_enable(if_ctx_t ctx)3307 bnxt_intr_enable(if_ctx_t ctx)
3308 {
3309 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3310 	int i;
3311 
3312 	bnxt_do_enable_intr(&softc->def_cp_ring);
3313 	for (i = 0; i < softc->nrxqsets; i++)
3314 		if (BNXT_CHIP_P5(softc))
3315 			softc->db_ops.bnxt_db_nq(&softc->nq_rings[i], 1);
3316 		else
3317 			softc->db_ops.bnxt_db_rx_cq(&softc->rx_cp_rings[i], 1);
3318 
3319 	return;
3320 }
3321 
3322 /* Enable interrupt for a single queue */
3323 static int
bnxt_tx_queue_intr_enable(if_ctx_t ctx,uint16_t qid)3324 bnxt_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
3325 {
3326 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3327 
3328 	if (BNXT_CHIP_P5(softc))
3329 		softc->db_ops.bnxt_db_nq(&softc->nq_rings[qid], 1);
3330 	else
3331 		softc->db_ops.bnxt_db_rx_cq(&softc->tx_cp_rings[qid], 1);
3332 
3333 	return 0;
3334 }
3335 
3336 static void
bnxt_process_cmd_cmpl(struct bnxt_softc * softc,hwrm_cmpl_t * cmd_cmpl)3337 bnxt_process_cmd_cmpl(struct bnxt_softc *softc, hwrm_cmpl_t *cmd_cmpl)
3338 {
3339 	device_printf(softc->dev, "cmd sequence number %d\n",
3340 			cmd_cmpl->sequence_id);
3341 	return;
3342 }
3343 
3344 static void
bnxt_process_async_msg(struct bnxt_cp_ring * cpr,tx_cmpl_t * cmpl)3345 bnxt_process_async_msg(struct bnxt_cp_ring *cpr, tx_cmpl_t *cmpl)
3346 {
3347 	struct bnxt_softc *softc = cpr->ring.softc;
3348 	uint16_t type = cmpl->flags_type & TX_CMPL_TYPE_MASK;
3349 
3350 	switch (type) {
3351 	case HWRM_CMPL_TYPE_HWRM_DONE:
3352 		bnxt_process_cmd_cmpl(softc, (hwrm_cmpl_t *)cmpl);
3353 		break;
3354 	case HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT:
3355 		bnxt_handle_async_event(softc, (cmpl_base_t *) cmpl);
3356 		break;
3357 	default:
3358 		device_printf(softc->dev, "%s:%d Unhandled async message %x\n",
3359 				__FUNCTION__, __LINE__, type);
3360 		break;
3361 	}
3362 }
3363 
3364 void
process_nq(struct bnxt_softc * softc,uint16_t nqid)3365 process_nq(struct bnxt_softc *softc, uint16_t nqid)
3366 {
3367 	struct bnxt_cp_ring *cpr = &softc->nq_rings[nqid];
3368 	nq_cn_t *cmp = (nq_cn_t *) cpr->ring.vaddr;
3369 	bool v_bit = cpr->v_bit;
3370 	uint32_t cons = cpr->cons;
3371 	uint16_t nq_type, nqe_cnt = 0;
3372 
3373 	while (1) {
3374 		if (!NQ_VALID(&cmp[cons], v_bit))
3375 			goto done;
3376 
3377 		nq_type = NQ_CN_TYPE_MASK & cmp[cons].type;
3378 
3379 		if (nq_type != NQ_CN_TYPE_CQ_NOTIFICATION)
3380 			 bnxt_process_async_msg(cpr, (tx_cmpl_t *)&cmp[cons]);
3381 
3382 		NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
3383 		nqe_cnt++;
3384 	}
3385 done:
3386 	if (nqe_cnt) {
3387 		cpr->cons = cons;
3388 		cpr->v_bit = v_bit;
3389 	}
3390 }
3391 
3392 static int
bnxt_rx_queue_intr_enable(if_ctx_t ctx,uint16_t qid)3393 bnxt_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
3394 {
3395 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3396 
3397 	if (BNXT_CHIP_P5(softc)) {
3398 		process_nq(softc, qid);
3399 		softc->db_ops.bnxt_db_nq(&softc->nq_rings[qid], 1);
3400 	}
3401 	softc->db_ops.bnxt_db_rx_cq(&softc->rx_cp_rings[qid], 1);
3402         return 0;
3403 }
3404 
3405 /* Disable all interrupts */
3406 static void
bnxt_disable_intr(if_ctx_t ctx)3407 bnxt_disable_intr(if_ctx_t ctx)
3408 {
3409 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3410 	int i;
3411 
3412 	/*
3413 	 * NOTE: These TX interrupts should never get enabled, so don't
3414 	 * update the index
3415 	 */
3416 	for (i = 0; i < softc->nrxqsets; i++)
3417 		if (BNXT_CHIP_P5(softc))
3418 			softc->db_ops.bnxt_db_nq(&softc->nq_rings[i], 0);
3419 		else
3420 			softc->db_ops.bnxt_db_rx_cq(&softc->rx_cp_rings[i], 0);
3421 
3422 
3423 	return;
3424 }
3425 
3426 static int
bnxt_msix_intr_assign(if_ctx_t ctx,int msix)3427 bnxt_msix_intr_assign(if_ctx_t ctx, int msix)
3428 {
3429 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3430 	struct bnxt_cp_ring *ring;
3431 	struct if_irq *irq;
3432 	uint16_t id;
3433 	int rc;
3434 	int i;
3435 	char irq_name[16];
3436 
3437 	if (BNXT_CHIP_P5(softc))
3438 		goto skip_default_cp;
3439 
3440 	rc = iflib_irq_alloc_generic(ctx, &softc->def_cp_ring.irq,
3441 	    softc->def_cp_ring.ring.id + 1, IFLIB_INTR_ADMIN,
3442 	    bnxt_handle_def_cp, softc, 0, "def_cp");
3443 	if (rc) {
3444 		device_printf(iflib_get_dev(ctx),
3445 		    "Failed to register default completion ring handler\n");
3446 		return rc;
3447 	}
3448 
3449 skip_default_cp:
3450 	for (i=0; i<softc->scctx->isc_nrxqsets; i++) {
3451 		if (BNXT_CHIP_P5(softc)) {
3452 			irq = &softc->nq_rings[i].irq;
3453 			id = softc->nq_rings[i].ring.id;
3454 			ring = &softc->nq_rings[i];
3455 		} else {
3456 			irq = &softc->rx_cp_rings[i].irq;
3457 			id = softc->rx_cp_rings[i].ring.id ;
3458 			ring = &softc->rx_cp_rings[i];
3459 		}
3460 		snprintf(irq_name, sizeof(irq_name), "rxq%d", i);
3461 		rc = iflib_irq_alloc_generic(ctx, irq, id + 1, IFLIB_INTR_RX,
3462 				bnxt_handle_isr, ring, i, irq_name);
3463 		if (rc) {
3464 			device_printf(iflib_get_dev(ctx),
3465 			    "Failed to register RX completion ring handler\n");
3466 			i--;
3467 			goto fail;
3468 		}
3469 	}
3470 
3471 	for (i=0; i<softc->scctx->isc_ntxqsets; i++)
3472 		iflib_softirq_alloc_generic(ctx, NULL, IFLIB_INTR_TX, NULL, i, "tx_cp");
3473 
3474 	return rc;
3475 
3476 fail:
3477 	for (; i>=0; i--)
3478 		iflib_irq_free(ctx, &softc->rx_cp_rings[i].irq);
3479 	iflib_irq_free(ctx, &softc->def_cp_ring.irq);
3480 	return rc;
3481 }
3482 
3483 /*
3484  * We're explicitly allowing duplicates here.  They will need to be
3485  * removed as many times as they are added.
3486  */
3487 static void
bnxt_vlan_register(if_ctx_t ctx,uint16_t vtag)3488 bnxt_vlan_register(if_ctx_t ctx, uint16_t vtag)
3489 {
3490 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3491 	struct bnxt_vlan_tag *new_tag;
3492 
3493 	new_tag = malloc(sizeof(struct bnxt_vlan_tag), M_DEVBUF, M_NOWAIT);
3494 	if (new_tag == NULL)
3495 		return;
3496 	new_tag->tag = vtag;
3497 	new_tag->filter_id = -1;
3498 	SLIST_INSERT_HEAD(&softc->vnic_info.vlan_tags, new_tag, next);
3499 };
3500 
3501 static void
bnxt_vlan_unregister(if_ctx_t ctx,uint16_t vtag)3502 bnxt_vlan_unregister(if_ctx_t ctx, uint16_t vtag)
3503 {
3504 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3505 	struct bnxt_vlan_tag *vlan_tag;
3506 
3507 	SLIST_FOREACH(vlan_tag, &softc->vnic_info.vlan_tags, next) {
3508 		if (vlan_tag->tag == vtag) {
3509 			SLIST_REMOVE(&softc->vnic_info.vlan_tags, vlan_tag,
3510 			    bnxt_vlan_tag, next);
3511 			free(vlan_tag, M_DEVBUF);
3512 			break;
3513 		}
3514 	}
3515 }
3516 
3517 static int
bnxt_wol_config(if_ctx_t ctx)3518 bnxt_wol_config(if_ctx_t ctx)
3519 {
3520 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3521 	if_t ifp = iflib_get_ifp(ctx);
3522 
3523 	if (!softc)
3524 		return -EBUSY;
3525 
3526 	if (!bnxt_wol_supported(softc))
3527 		return -ENOTSUP;
3528 
3529 	if (if_getcapenable(ifp) & IFCAP_WOL_MAGIC) {
3530 		if (!softc->wol) {
3531 			if (bnxt_hwrm_alloc_wol_fltr(softc))
3532 				return -EBUSY;
3533 			softc->wol = 1;
3534 		}
3535 	} else {
3536 		if (softc->wol) {
3537 			if (bnxt_hwrm_free_wol_fltr(softc))
3538 				return -EBUSY;
3539 			softc->wol = 0;
3540 		}
3541 	}
3542 
3543 	return 0;
3544 }
3545 
3546 static bool
bnxt_if_needs_restart(if_ctx_t ctx __unused,enum iflib_restart_event event)3547 bnxt_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
3548 {
3549 	switch (event) {
3550 	case IFLIB_RESTART_VLAN_CONFIG:
3551 	default:
3552 		return (false);
3553 	}
3554 }
3555 
3556 static int
bnxt_shutdown(if_ctx_t ctx)3557 bnxt_shutdown(if_ctx_t ctx)
3558 {
3559 	bnxt_wol_config(ctx);
3560 	return 0;
3561 }
3562 
3563 static int
bnxt_suspend(if_ctx_t ctx)3564 bnxt_suspend(if_ctx_t ctx)
3565 {
3566 	bnxt_wol_config(ctx);
3567 	return 0;
3568 }
3569 
3570 static int
bnxt_resume(if_ctx_t ctx)3571 bnxt_resume(if_ctx_t ctx)
3572 {
3573 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3574 
3575 	bnxt_get_wol_settings(softc);
3576 	return 0;
3577 }
3578 
3579 static int
bnxt_priv_ioctl(if_ctx_t ctx,u_long command,caddr_t data)3580 bnxt_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data)
3581 {
3582 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3583 	struct ifreq *ifr = (struct ifreq *)data;
3584 	struct bnxt_ioctl_header *ioh;
3585 	size_t iol;
3586 	int rc = ENOTSUP;
3587 	struct bnxt_ioctl_data iod_storage, *iod = &iod_storage;
3588 
3589 	switch (command) {
3590 	case SIOCGPRIVATE_0:
3591 		if ((rc = priv_check(curthread, PRIV_DRIVER)) != 0)
3592 			goto exit;
3593 
3594 		ioh = ifr_buffer_get_buffer(ifr);
3595 		iol = ifr_buffer_get_length(ifr);
3596 		if (iol > sizeof(iod_storage))
3597 			return (EINVAL);
3598 
3599 		if ((rc = copyin(ioh, iod, iol)) != 0)
3600 			goto exit;
3601 
3602 		switch (iod->hdr.type) {
3603 		case BNXT_HWRM_NVM_FIND_DIR_ENTRY:
3604 		{
3605 			struct bnxt_ioctl_hwrm_nvm_find_dir_entry *find =
3606 			    &iod->find;
3607 
3608 			rc = bnxt_hwrm_nvm_find_dir_entry(softc, find->type,
3609 			    &find->ordinal, find->ext, &find->index,
3610 			    find->use_index, find->search_opt,
3611 			    &find->data_length, &find->item_length,
3612 			    &find->fw_ver);
3613 			if (rc) {
3614 				iod->hdr.rc = rc;
3615 				rc = copyout(&iod->hdr.rc, &ioh->rc,
3616 				    sizeof(ioh->rc));
3617 			} else {
3618 				iod->hdr.rc = 0;
3619 				rc = copyout(iod, ioh, iol);
3620 			}
3621 
3622 			goto exit;
3623 		}
3624 		case BNXT_HWRM_NVM_READ:
3625 		{
3626 			struct bnxt_ioctl_hwrm_nvm_read *rd = &iod->read;
3627 			struct iflib_dma_info dma_data;
3628 			size_t offset;
3629 			size_t remain;
3630 			size_t csize;
3631 
3632 			/*
3633 			 * Some HWRM versions can't read more than 0x8000 bytes
3634 			 */
3635 			rc = iflib_dma_alloc(softc->ctx,
3636 			    min(rd->length, 0x8000), &dma_data, BUS_DMA_NOWAIT);
3637 			if (rc)
3638 				break;
3639 			for (remain = rd->length, offset = 0;
3640 			    remain && offset < rd->length; offset += 0x8000) {
3641 				csize = min(remain, 0x8000);
3642 				rc = bnxt_hwrm_nvm_read(softc, rd->index,
3643 				    rd->offset + offset, csize, &dma_data);
3644 				if (rc) {
3645 					iod->hdr.rc = rc;
3646 					rc = copyout(&iod->hdr.rc, &ioh->rc,
3647 					    sizeof(ioh->rc));
3648 					break;
3649 				} else {
3650 					rc = copyout(dma_data.idi_vaddr,
3651 					    rd->data + offset, csize);
3652 					iod->hdr.rc = rc;
3653 				}
3654 				remain -= csize;
3655 			}
3656 			if (rc == 0)
3657 				rc = copyout(iod, ioh, iol);
3658 
3659 			iflib_dma_free(&dma_data);
3660 			goto exit;
3661 		}
3662 		case BNXT_HWRM_FW_RESET:
3663 		{
3664 			struct bnxt_ioctl_hwrm_fw_reset *rst =
3665 			    &iod->reset;
3666 
3667 			rc = bnxt_hwrm_fw_reset(softc, rst->processor,
3668 			    &rst->selfreset);
3669 			if (rc) {
3670 				iod->hdr.rc = rc;
3671 				rc = copyout(&iod->hdr.rc, &ioh->rc,
3672 				    sizeof(ioh->rc));
3673 			} else {
3674 				iod->hdr.rc = 0;
3675 				rc = copyout(iod, ioh, iol);
3676 			}
3677 
3678 			goto exit;
3679 		}
3680 		case BNXT_HWRM_FW_QSTATUS:
3681 		{
3682 			struct bnxt_ioctl_hwrm_fw_qstatus *qstat =
3683 			    &iod->status;
3684 
3685 			rc = bnxt_hwrm_fw_qstatus(softc, qstat->processor,
3686 			    &qstat->selfreset);
3687 			if (rc) {
3688 				iod->hdr.rc = rc;
3689 				rc = copyout(&iod->hdr.rc, &ioh->rc,
3690 				    sizeof(ioh->rc));
3691 			} else {
3692 				iod->hdr.rc = 0;
3693 				rc = copyout(iod, ioh, iol);
3694 			}
3695 
3696 			goto exit;
3697 		}
3698 		case BNXT_HWRM_NVM_WRITE:
3699 		{
3700 			struct bnxt_ioctl_hwrm_nvm_write *wr =
3701 			    &iod->write;
3702 
3703 			rc = bnxt_hwrm_nvm_write(softc, wr->data, true,
3704 			    wr->type, wr->ordinal, wr->ext, wr->attr,
3705 			    wr->option, wr->data_length, wr->keep,
3706 			    &wr->item_length, &wr->index);
3707 			if (rc) {
3708 				iod->hdr.rc = rc;
3709 				rc = copyout(&iod->hdr.rc, &ioh->rc,
3710 				    sizeof(ioh->rc));
3711 			}
3712 			else {
3713 				iod->hdr.rc = 0;
3714 				rc = copyout(iod, ioh, iol);
3715 			}
3716 
3717 			goto exit;
3718 		}
3719 		case BNXT_HWRM_NVM_ERASE_DIR_ENTRY:
3720 		{
3721 			struct bnxt_ioctl_hwrm_nvm_erase_dir_entry *erase =
3722 			    &iod->erase;
3723 
3724 			rc = bnxt_hwrm_nvm_erase_dir_entry(softc, erase->index);
3725 			if (rc) {
3726 				iod->hdr.rc = rc;
3727 				rc = copyout(&iod->hdr.rc, &ioh->rc,
3728 				    sizeof(ioh->rc));
3729 			} else {
3730 				iod->hdr.rc = 0;
3731 				rc = copyout(iod, ioh, iol);
3732 			}
3733 
3734 			goto exit;
3735 		}
3736 		case BNXT_HWRM_NVM_GET_DIR_INFO:
3737 		{
3738 			struct bnxt_ioctl_hwrm_nvm_get_dir_info *info =
3739 			    &iod->dir_info;
3740 
3741 			rc = bnxt_hwrm_nvm_get_dir_info(softc, &info->entries,
3742 			    &info->entry_length);
3743 			if (rc) {
3744 				iod->hdr.rc = rc;
3745 				rc = copyout(&iod->hdr.rc, &ioh->rc,
3746 				    sizeof(ioh->rc));
3747 			} else {
3748 				iod->hdr.rc = 0;
3749 				rc = copyout(iod, ioh, iol);
3750 			}
3751 
3752 			goto exit;
3753 		}
3754 		case BNXT_HWRM_NVM_GET_DIR_ENTRIES:
3755 		{
3756 			struct bnxt_ioctl_hwrm_nvm_get_dir_entries *get =
3757 			    &iod->dir_entries;
3758 			struct iflib_dma_info dma_data;
3759 
3760 			rc = iflib_dma_alloc(softc->ctx, get->max_size,
3761 			    &dma_data, BUS_DMA_NOWAIT);
3762 			if (rc)
3763 				break;
3764 			rc = bnxt_hwrm_nvm_get_dir_entries(softc, &get->entries,
3765 			    &get->entry_length, &dma_data);
3766 			if (rc) {
3767 				iod->hdr.rc = rc;
3768 				rc = copyout(&iod->hdr.rc, &ioh->rc,
3769 				    sizeof(ioh->rc));
3770 			} else {
3771 				rc = copyout(dma_data.idi_vaddr, get->data,
3772 				    get->entry_length * get->entries);
3773 				iod->hdr.rc = rc;
3774 				if (rc == 0)
3775 					rc = copyout(iod, ioh, iol);
3776 			}
3777 			iflib_dma_free(&dma_data);
3778 
3779 			goto exit;
3780 		}
3781 		case BNXT_HWRM_NVM_VERIFY_UPDATE:
3782 		{
3783 			struct bnxt_ioctl_hwrm_nvm_verify_update *vrfy =
3784 			    &iod->verify;
3785 
3786 			rc = bnxt_hwrm_nvm_verify_update(softc, vrfy->type,
3787 			    vrfy->ordinal, vrfy->ext);
3788 			if (rc) {
3789 				iod->hdr.rc = rc;
3790 				rc = copyout(&iod->hdr.rc, &ioh->rc,
3791 				    sizeof(ioh->rc));
3792 			} else {
3793 				iod->hdr.rc = 0;
3794 				rc = copyout(iod, ioh, iol);
3795 			}
3796 
3797 			goto exit;
3798 		}
3799 		case BNXT_HWRM_NVM_INSTALL_UPDATE:
3800 		{
3801 			struct bnxt_ioctl_hwrm_nvm_install_update *inst =
3802 			    &iod->install;
3803 
3804 			rc = bnxt_hwrm_nvm_install_update(softc,
3805 			    inst->install_type, &inst->installed_items,
3806 			    &inst->result, &inst->problem_item,
3807 			    &inst->reset_required);
3808 			if (rc) {
3809 				iod->hdr.rc = rc;
3810 				rc = copyout(&iod->hdr.rc, &ioh->rc,
3811 				    sizeof(ioh->rc));
3812 			} else {
3813 				iod->hdr.rc = 0;
3814 				rc = copyout(iod, ioh, iol);
3815 			}
3816 
3817 			goto exit;
3818 		}
3819 		case BNXT_HWRM_NVM_MODIFY:
3820 		{
3821 			struct bnxt_ioctl_hwrm_nvm_modify *mod = &iod->modify;
3822 
3823 			rc = bnxt_hwrm_nvm_modify(softc, mod->index,
3824 			    mod->offset, mod->data, true, mod->length);
3825 			if (rc) {
3826 				iod->hdr.rc = rc;
3827 				rc = copyout(&iod->hdr.rc, &ioh->rc,
3828 				    sizeof(ioh->rc));
3829 			} else {
3830 				iod->hdr.rc = 0;
3831 				rc = copyout(iod, ioh, iol);
3832 			}
3833 
3834 			goto exit;
3835 		}
3836 		case BNXT_HWRM_FW_GET_TIME:
3837 		{
3838 			struct bnxt_ioctl_hwrm_fw_get_time *gtm =
3839 			    &iod->get_time;
3840 
3841 			rc = bnxt_hwrm_fw_get_time(softc, &gtm->year,
3842 			    &gtm->month, &gtm->day, &gtm->hour, &gtm->minute,
3843 			    &gtm->second, &gtm->millisecond, &gtm->zone);
3844 			if (rc) {
3845 				iod->hdr.rc = rc;
3846 				rc = copyout(&iod->hdr.rc, &ioh->rc,
3847 				    sizeof(ioh->rc));
3848 			} else {
3849 				iod->hdr.rc = 0;
3850 				rc = copyout(iod, ioh, iol);
3851 			}
3852 
3853 			goto exit;
3854 		}
3855 		case BNXT_HWRM_FW_SET_TIME:
3856 		{
3857 			struct bnxt_ioctl_hwrm_fw_set_time *stm =
3858 			    &iod->set_time;
3859 
3860 			rc = bnxt_hwrm_fw_set_time(softc, stm->year,
3861 			    stm->month, stm->day, stm->hour, stm->minute,
3862 			    stm->second, stm->millisecond, stm->zone);
3863 			if (rc) {
3864 				iod->hdr.rc = rc;
3865 				rc = copyout(&iod->hdr.rc, &ioh->rc,
3866 				    sizeof(ioh->rc));
3867 			} else {
3868 				iod->hdr.rc = 0;
3869 				rc = copyout(iod, ioh, iol);
3870 			}
3871 
3872 			goto exit;
3873 		}
3874 		}
3875 		break;
3876 	}
3877 
3878 exit:
3879 	return rc;
3880 }
3881 
3882 static int
bnxt_i2c_req(if_ctx_t ctx,struct ifi2creq * i2c)3883 bnxt_i2c_req(if_ctx_t ctx, struct ifi2creq *i2c)
3884 {
3885 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3886 	uint8_t *data = i2c->data;
3887 	int rc;
3888 
3889 	/* No point in going further if phy status indicates
3890 	 * module is not inserted or if it is powered down or
3891 	 * if it is of type 10GBase-T
3892 	 */
3893 	if (softc->link_info.module_status >
3894 		HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_WARNINGMSG)
3895 		return -EOPNOTSUPP;
3896 
3897 	/* This feature is not supported in older firmware versions */
3898 	if (!BNXT_CHIP_P5(softc) ||
3899 	    (softc->hwrm_spec_code < 0x10202))
3900 		return -EOPNOTSUPP;
3901 
3902 
3903 	rc = bnxt_read_sfp_module_eeprom_info(softc, I2C_DEV_ADDR_A0, 0, 0, 0,
3904 		i2c->offset, i2c->len, data);
3905 
3906 	return rc;
3907 }
3908 
3909 /*
3910  * Support functions
3911  */
3912 static int
bnxt_probe_phy(struct bnxt_softc * softc)3913 bnxt_probe_phy(struct bnxt_softc *softc)
3914 {
3915 	struct bnxt_link_info *link_info = &softc->link_info;
3916 	int rc = 0;
3917 
3918 	softc->phy_flags = 0;
3919 	rc = bnxt_hwrm_phy_qcaps(softc);
3920 	if (rc) {
3921 		device_printf(softc->dev,
3922 			      "Probe phy can't get phy capabilities (rc: %x)\n", rc);
3923 		return rc;
3924 	}
3925 
3926 	rc = bnxt_update_link(softc, false);
3927 	if (rc) {
3928 		device_printf(softc->dev,
3929 		    "Probe phy can't update link (rc: %x)\n", rc);
3930 		return (rc);
3931 	}
3932 
3933 	bnxt_get_port_module_status(softc);
3934 
3935 	/*initialize the ethool setting copy with NVM settings */
3936 	if (link_info->auto_mode != HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE)
3937 		link_info->autoneg |= BNXT_AUTONEG_SPEED;
3938 
3939 	link_info->req_duplex = link_info->duplex_setting;
3940 
3941 	/* NRZ link speed */
3942 	if (link_info->autoneg & BNXT_AUTONEG_SPEED)
3943 		link_info->req_link_speed = link_info->auto_link_speeds;
3944 	else
3945 		link_info->req_link_speed = link_info->force_link_speed;
3946 
3947 	/* PAM4 link speed */
3948 	if (link_info->auto_pam4_link_speeds)
3949 		link_info->req_link_speed = link_info->auto_pam4_link_speeds;
3950 	if (link_info->force_pam4_link_speed)
3951 		link_info->req_link_speed = link_info->force_pam4_link_speed;
3952 
3953 	return (rc);
3954 }
3955 
3956 static void
add_media(struct bnxt_softc * softc,uint8_t media_type,uint16_t supported,uint16_t supported_pam4)3957 add_media(struct bnxt_softc *softc, uint8_t media_type, uint16_t supported,
3958 	  uint16_t supported_pam4)
3959 {
3960 	switch (media_type) {
3961 		case BNXT_MEDIA_CR:
3962 			BNXT_IFMEDIA_ADD(supported_pam4, PAM4_SPEEDS_50G, IFM_50G_CP);
3963 			BNXT_IFMEDIA_ADD(supported_pam4, PAM4_SPEEDS_100G, IFM_100G_CP2);
3964 			BNXT_IFMEDIA_ADD(supported_pam4, PAM4_SPEEDS_200G, IFM_200G_CR4_PAM4);
3965 			BNXT_IFMEDIA_ADD(supported, SPEEDS_100GB, IFM_100G_CR4);
3966 			BNXT_IFMEDIA_ADD(supported, SPEEDS_50GB, IFM_50G_CR2);
3967 			BNXT_IFMEDIA_ADD(supported, SPEEDS_40GB, IFM_40G_CR4);
3968 			BNXT_IFMEDIA_ADD(supported, SPEEDS_25GB, IFM_25G_CR);
3969 			BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_CR1);
3970 			BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_CX);
3971 			break;
3972 
3973 		case BNXT_MEDIA_LR:
3974 			BNXT_IFMEDIA_ADD(supported_pam4, PAM4_SPEEDS_50G, IFM_50G_LR);
3975 			BNXT_IFMEDIA_ADD(supported_pam4, PAM4_SPEEDS_200G, IFM_200G_LR4);
3976 			BNXT_IFMEDIA_ADD(supported, SPEEDS_100GB, IFM_100G_LR4);
3977 			BNXT_IFMEDIA_ADD(supported, SPEEDS_50GB, IFM_50G_LR2);
3978 			BNXT_IFMEDIA_ADD(supported, SPEEDS_40GB, IFM_40G_LR4);
3979 			BNXT_IFMEDIA_ADD(supported, SPEEDS_25GB, IFM_25G_LR);
3980 			BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_LR);
3981 			BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_LX);
3982 			break;
3983 
3984 		case BNXT_MEDIA_SR:
3985 			BNXT_IFMEDIA_ADD(supported_pam4, PAM4_SPEEDS_50G, IFM_50G_SR);
3986 			BNXT_IFMEDIA_ADD(supported_pam4, PAM4_SPEEDS_100G, IFM_100G_SR2);
3987 			BNXT_IFMEDIA_ADD(supported_pam4, PAM4_SPEEDS_200G, IFM_200G_SR4);
3988 			BNXT_IFMEDIA_ADD(supported, SPEEDS_100GB, IFM_100G_SR4);
3989 			BNXT_IFMEDIA_ADD(supported, SPEEDS_50GB, IFM_50G_SR2);
3990 			BNXT_IFMEDIA_ADD(supported, SPEEDS_40GB, IFM_40G_SR4);
3991 			BNXT_IFMEDIA_ADD(supported, SPEEDS_25GB, IFM_25G_SR);
3992 			BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_SR);
3993 			BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_SX);
3994 			break;
3995 
3996 		case BNXT_MEDIA_KR:
3997 			BNXT_IFMEDIA_ADD(supported_pam4, PAM4_SPEEDS_50G, IFM_50G_KR_PAM4);
3998 			BNXT_IFMEDIA_ADD(supported_pam4, PAM4_SPEEDS_100G, IFM_100G_KR2_PAM4);
3999 			BNXT_IFMEDIA_ADD(supported_pam4, PAM4_SPEEDS_200G, IFM_200G_KR4_PAM4);
4000 			BNXT_IFMEDIA_ADD(supported, SPEEDS_100GB, IFM_100G_KR4);
4001 			BNXT_IFMEDIA_ADD(supported, SPEEDS_50GB, IFM_50G_KR2);
4002 			BNXT_IFMEDIA_ADD(supported, SPEEDS_50GB, IFM_50G_KR4);
4003 			BNXT_IFMEDIA_ADD(supported, SPEEDS_40GB, IFM_40G_KR4);
4004 			BNXT_IFMEDIA_ADD(supported, SPEEDS_25GB, IFM_25G_KR);
4005 			BNXT_IFMEDIA_ADD(supported, SPEEDS_20GB, IFM_20G_KR2);
4006 			BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_KR);
4007 			BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_KX);
4008 			break;
4009 
4010 		default:
4011 			break;
4012 
4013 	}
4014 	return;
4015 
4016 }
4017 
4018 static void
bnxt_add_media_types(struct bnxt_softc * softc)4019 bnxt_add_media_types(struct bnxt_softc *softc)
4020 {
4021 	struct bnxt_link_info *link_info = &softc->link_info;
4022 	uint16_t supported = 0, supported_pam4 = 0;
4023 	uint8_t phy_type = get_phy_type(softc), media_type;
4024 
4025 	supported = link_info->support_speeds;
4026 	supported_pam4 = link_info->support_pam4_speeds;
4027 
4028 	/* Auto is always supported */
4029 	ifmedia_add(softc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
4030 
4031 	if (softc->flags & BNXT_FLAG_NPAR)
4032 		return;
4033 
4034 	switch (phy_type) {
4035 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASECR4:
4036 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASECR4:
4037 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASECR2:
4038 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_50G_BASECR:
4039 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASECR4:
4040 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_L:
4041 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_S:
4042 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_N:
4043 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASECR:
4044 		media_type = BNXT_MEDIA_CR;
4045 		break;
4046 
4047 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASELR4:
4048 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASELR4:
4049 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_50G_BASELR:
4050 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASELR4:
4051 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASELR:
4052 		media_type = BNXT_MEDIA_LR;
4053 		break;
4054 
4055 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASESR4:
4056 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR10:
4057 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR4:
4058 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_50G_BASESR:
4059 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASESR4:
4060 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASESR:
4061 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASEER4:
4062 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASEER4:
4063 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASEER4:
4064 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASESR:
4065 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASESX:
4066 		media_type = BNXT_MEDIA_SR;
4067 		break;
4068 
4069 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR4:
4070 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR2:
4071 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR:
4072 		media_type = BNXT_MEDIA_KR;
4073 		break;
4074 
4075 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_ACTIVE_CABLE:
4076 		BNXT_IFMEDIA_ADD(supported, SPEEDS_25GB, IFM_25G_ACC);
4077 		BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_AOC);
4078 		BNXT_IFMEDIA_ADD(supported, SPEEDS_40GB, IFM_40G_XLAUI);
4079 		BNXT_IFMEDIA_ADD(supported, SPEEDS_40GB, IFM_40G_XLAUI_AC);
4080 		return;
4081 
4082 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASECX:
4083 		BNXT_IFMEDIA_ADD(supported, SPEEDS_1GBHD, IFM_1000_CX);
4084 		return;
4085 
4086 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASET:
4087 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET:
4088 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE:
4089 		BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_T);
4090 		BNXT_IFMEDIA_ADD(supported, SPEEDS_2_5GB, IFM_2500_T);
4091 		BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_T);
4092 		BNXT_IFMEDIA_ADD(supported, SPEEDS_100MB, IFM_100_T);
4093 		BNXT_IFMEDIA_ADD(supported, SPEEDS_10MB, IFM_10_T);
4094 		return;
4095 
4096 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKX:
4097 		BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_KR);
4098 		BNXT_IFMEDIA_ADD(supported, SPEEDS_2_5GB, IFM_2500_KX);
4099 		BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_KX);
4100 		return;
4101 
4102 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_SGMIIEXTPHY:
4103 		BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_SGMII);
4104 		return;
4105 
4106 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_UNKNOWN:
4107 		/* Only Autoneg is supported for TYPE_UNKNOWN */
4108 		return;
4109 
4110         default:
4111 		/* Only Autoneg is supported for new phy type values */
4112 		device_printf(softc->dev, "phy type %d not supported by driver\n", phy_type);
4113 		return;
4114 	}
4115 
4116 	/* add_media is invoked twice, once with a firmware speed mask of 0 and a valid
4117 	 * value for both NRZ and PAM4 sig mode. This ensures accurate display of all
4118 	 * supported medias and currently configured media in the "ifconfig -m" output
4119 	 */
4120 
4121 	if (link_info->sig_mode == BNXT_SIG_MODE_PAM4) {
4122 		add_media(softc, media_type, supported, 0);
4123 		add_media(softc, media_type, 0, supported_pam4);
4124 	} else {
4125 		add_media(softc, media_type, 0, supported_pam4);
4126 		add_media(softc, media_type, supported, 0);
4127 	}
4128 
4129 	return;
4130 }
4131 
4132 static int
bnxt_map_bar(struct bnxt_softc * softc,struct bnxt_bar_info * bar,int bar_num,bool shareable)4133 bnxt_map_bar(struct bnxt_softc *softc, struct bnxt_bar_info *bar, int bar_num, bool shareable)
4134 {
4135 	uint32_t	flag;
4136 
4137 	if (bar->res != NULL) {
4138 		device_printf(softc->dev, "Bar %d already mapped\n", bar_num);
4139 		return EDOOFUS;
4140 	}
4141 
4142 	bar->rid = PCIR_BAR(bar_num);
4143 	flag = RF_ACTIVE;
4144 	if (shareable)
4145 		flag |= RF_SHAREABLE;
4146 
4147 	if ((bar->res =
4148 		bus_alloc_resource_any(softc->dev,
4149 			   SYS_RES_MEMORY,
4150 			   &bar->rid,
4151 			   flag)) == NULL) {
4152 		device_printf(softc->dev,
4153 		    "PCI BAR%d mapping failure\n", bar_num);
4154 		return (ENXIO);
4155 	}
4156 	bar->tag = rman_get_bustag(bar->res);
4157 	bar->handle = rman_get_bushandle(bar->res);
4158 	bar->size = rman_get_size(bar->res);
4159 
4160 	return 0;
4161 }
4162 
4163 static int
bnxt_pci_mapping(struct bnxt_softc * softc)4164 bnxt_pci_mapping(struct bnxt_softc *softc)
4165 {
4166 	int rc;
4167 
4168 	rc = bnxt_map_bar(softc, &softc->hwrm_bar, 0, true);
4169 	if (rc)
4170 		return rc;
4171 
4172 	rc = bnxt_map_bar(softc, &softc->doorbell_bar, 2, false);
4173 
4174 	return rc;
4175 }
4176 
4177 static void
bnxt_pci_mapping_free(struct bnxt_softc * softc)4178 bnxt_pci_mapping_free(struct bnxt_softc *softc)
4179 {
4180 	if (softc->hwrm_bar.res != NULL)
4181 		bus_release_resource(softc->dev, SYS_RES_MEMORY,
4182 		    softc->hwrm_bar.rid, softc->hwrm_bar.res);
4183 	softc->hwrm_bar.res = NULL;
4184 
4185 	if (softc->doorbell_bar.res != NULL)
4186 		bus_release_resource(softc->dev, SYS_RES_MEMORY,
4187 		    softc->doorbell_bar.rid, softc->doorbell_bar.res);
4188 	softc->doorbell_bar.res = NULL;
4189 }
4190 
4191 static int
bnxt_update_link(struct bnxt_softc * softc,bool chng_link_state)4192 bnxt_update_link(struct bnxt_softc *softc, bool chng_link_state)
4193 {
4194 	struct bnxt_link_info *link_info = &softc->link_info;
4195 	uint8_t link_up = link_info->link_up;
4196 	int rc = 0;
4197 
4198 	rc = bnxt_hwrm_port_phy_qcfg(softc);
4199 	if (rc)
4200 		goto exit;
4201 
4202 	/* TODO: need to add more logic to report VF link */
4203 	if (chng_link_state) {
4204 		if (link_info->phy_link_status ==
4205 		    HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK)
4206 			link_info->link_up = 1;
4207 		else
4208 			link_info->link_up = 0;
4209 		if (link_up != link_info->link_up)
4210 			bnxt_report_link(softc);
4211 	} else {
4212 		/* always link down if not require to update link state */
4213 		link_info->link_up = 0;
4214 	}
4215 
4216 exit:
4217 	return rc;
4218 }
4219 
4220 #define ETHTOOL_SPEED_1000		1000
4221 #define ETHTOOL_SPEED_10000		10000
4222 #define ETHTOOL_SPEED_20000		20000
4223 #define ETHTOOL_SPEED_25000		25000
4224 #define ETHTOOL_SPEED_40000		40000
4225 #define ETHTOOL_SPEED_50000		50000
4226 #define ETHTOOL_SPEED_100000		100000
4227 #define ETHTOOL_SPEED_200000		200000
4228 #define ETHTOOL_SPEED_UNKNOWN		-1
4229 
4230 static u32
bnxt_fw_to_ethtool_speed(u16 fw_link_speed)4231 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
4232 {
4233 	switch (fw_link_speed) {
4234 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
4235 		return ETHTOOL_SPEED_1000;
4236 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
4237 		return ETHTOOL_SPEED_10000;
4238 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
4239 		return ETHTOOL_SPEED_20000;
4240 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
4241 		return ETHTOOL_SPEED_25000;
4242 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
4243 		return ETHTOOL_SPEED_40000;
4244 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
4245 		return ETHTOOL_SPEED_50000;
4246 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
4247 		return ETHTOOL_SPEED_100000;
4248 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_200GB:
4249 		return ETHTOOL_SPEED_200000;
4250 	default:
4251 		return ETHTOOL_SPEED_UNKNOWN;
4252 	}
4253 }
4254 
4255 void
bnxt_report_link(struct bnxt_softc * softc)4256 bnxt_report_link(struct bnxt_softc *softc)
4257 {
4258 	struct bnxt_link_info *link_info = &softc->link_info;
4259 	const char *duplex = NULL, *flow_ctrl = NULL;
4260 	const char *signal_mode = "";
4261 
4262 	if(softc->edev)
4263 		softc->edev->espeed =
4264 		    bnxt_fw_to_ethtool_speed(link_info->link_speed);
4265 
4266 	if (link_info->link_up == link_info->last_link_up) {
4267 		if (!link_info->link_up)
4268 			return;
4269 		if ((link_info->duplex == link_info->last_duplex) &&
4270 		    (link_info->phy_type == link_info->last_phy_type) &&
4271                     (!(BNXT_IS_FLOW_CTRL_CHANGED(link_info))))
4272 			return;
4273 	}
4274 
4275 	if (link_info->link_up) {
4276 		if (link_info->duplex ==
4277 		    HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_FULL)
4278 			duplex = "full duplex";
4279 		else
4280 			duplex = "half duplex";
4281 		if (link_info->flow_ctrl.tx & link_info->flow_ctrl.rx)
4282 			flow_ctrl = "FC - receive & transmit";
4283 		else if (link_info->flow_ctrl.tx)
4284 			flow_ctrl = "FC - transmit";
4285 		else if (link_info->flow_ctrl.rx)
4286 			flow_ctrl = "FC - receive";
4287 		else
4288 			flow_ctrl = "FC - none";
4289 
4290 		if (softc->link_info.phy_qcfg_resp.option_flags &
4291 		    HWRM_PORT_PHY_QCFG_OUTPUT_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
4292 			uint8_t sig_mode = softc->link_info.active_fec_sig_mode &
4293 				      HWRM_PORT_PHY_QCFG_OUTPUT_SIGNAL_MODE_MASK;
4294 			switch (sig_mode) {
4295 			case BNXT_SIG_MODE_NRZ:
4296 				signal_mode = "(NRZ) ";
4297 				break;
4298 			case BNXT_SIG_MODE_PAM4:
4299 				signal_mode = "(PAM4) ";
4300 				break;
4301 			default:
4302 				break;
4303 			}
4304 		link_info->sig_mode = sig_mode;
4305 		}
4306 
4307 		iflib_link_state_change(softc->ctx, LINK_STATE_UP,
4308 		    IF_Gbps(100));
4309 		device_printf(softc->dev, "Link is UP %s %s, %s - %d Mbps \n", duplex, signal_mode,
4310 		    flow_ctrl, (link_info->link_speed * 100));
4311 	} else {
4312 		iflib_link_state_change(softc->ctx, LINK_STATE_DOWN,
4313 		    bnxt_get_baudrate(&softc->link_info));
4314 		device_printf(softc->dev, "Link is Down\n");
4315 	}
4316 
4317 	link_info->last_link_up = link_info->link_up;
4318 	link_info->last_duplex = link_info->duplex;
4319 	link_info->last_phy_type = link_info->phy_type;
4320 	link_info->last_flow_ctrl.tx = link_info->flow_ctrl.tx;
4321 	link_info->last_flow_ctrl.rx = link_info->flow_ctrl.rx;
4322 	link_info->last_flow_ctrl.autoneg = link_info->flow_ctrl.autoneg;
4323 	/* update media types */
4324 	ifmedia_removeall(softc->media);
4325 	bnxt_add_media_types(softc);
4326 	ifmedia_set(softc->media, IFM_ETHER | IFM_AUTO);
4327 }
4328 
4329 static int
bnxt_handle_isr(void * arg)4330 bnxt_handle_isr(void *arg)
4331 {
4332 	struct bnxt_cp_ring *cpr = arg;
4333 	struct bnxt_softc *softc = cpr->ring.softc;
4334 
4335 	cpr->int_count++;
4336 	/* Disable further interrupts for this queue */
4337 	if (!BNXT_CHIP_P5(softc))
4338 		softc->db_ops.bnxt_db_rx_cq(cpr, 0);
4339 
4340 	return FILTER_SCHEDULE_THREAD;
4341 }
4342 
4343 static int
bnxt_handle_def_cp(void * arg)4344 bnxt_handle_def_cp(void *arg)
4345 {
4346 	struct bnxt_softc *softc = arg;
4347 
4348 	softc->db_ops.bnxt_db_rx_cq(&softc->def_cp_ring, 0);
4349 	iflib_config_task_enqueue(softc->ctx, &softc->def_cp_task);
4350 	return FILTER_HANDLED;
4351 }
4352 
4353 static void
bnxt_clear_ids(struct bnxt_softc * softc)4354 bnxt_clear_ids(struct bnxt_softc *softc)
4355 {
4356 	int i;
4357 
4358 	softc->def_cp_ring.stats_ctx_id = HWRM_NA_SIGNATURE;
4359 	softc->def_cp_ring.ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
4360 	softc->def_nq_ring.stats_ctx_id = HWRM_NA_SIGNATURE;
4361 	softc->def_nq_ring.ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
4362 	for (i = 0; i < softc->ntxqsets; i++) {
4363 		softc->tx_cp_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
4364 		softc->tx_cp_rings[i].ring.phys_id =
4365 		    (uint16_t)HWRM_NA_SIGNATURE;
4366 		softc->tx_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
4367 
4368 		if (!softc->nq_rings)
4369 			continue;
4370 		softc->nq_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
4371 		softc->nq_rings[i].ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
4372 	}
4373 	for (i = 0; i < softc->nrxqsets; i++) {
4374 		softc->rx_cp_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
4375 		softc->rx_cp_rings[i].ring.phys_id =
4376 		    (uint16_t)HWRM_NA_SIGNATURE;
4377 		softc->rx_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
4378 		softc->ag_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
4379 		softc->grp_info[i].grp_id = (uint16_t)HWRM_NA_SIGNATURE;
4380 	}
4381 	softc->vnic_info.filter_id = -1;
4382 	softc->vnic_info.id = (uint16_t)HWRM_NA_SIGNATURE;
4383 	softc->vnic_info.rss_id = (uint16_t)HWRM_NA_SIGNATURE;
4384 	memset(softc->vnic_info.rss_grp_tbl.idi_vaddr, 0xff,
4385 	    softc->vnic_info.rss_grp_tbl.idi_size);
4386 }
4387 
4388 static void
bnxt_mark_cpr_invalid(struct bnxt_cp_ring * cpr)4389 bnxt_mark_cpr_invalid(struct bnxt_cp_ring *cpr)
4390 {
4391 	struct cmpl_base *cmp = (void *)cpr->ring.vaddr;
4392 	int i;
4393 
4394 	for (i = 0; i < cpr->ring.ring_size; i++)
4395 		cmp[i].info3_v = !cpr->v_bit;
4396 }
4397 
bnxt_event_error_report(struct bnxt_softc * softc,u32 data1,u32 data2)4398 static void bnxt_event_error_report(struct bnxt_softc *softc, u32 data1, u32 data2)
4399 {
4400 	u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1);
4401 
4402 	switch (err_type) {
4403 	case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL:
4404 		device_printf(softc->dev,
4405 			      "1PPS: Received invalid signal on pin%u from the external source. Please fix the signal and reconfigure the pin\n",
4406 			      BNXT_EVENT_INVALID_SIGNAL_DATA(data2));
4407 		break;
4408 	case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM:
4409 		device_printf(softc->dev,
4410 			      "Pause Storm detected!\n");
4411 		break;
4412 	case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD:
4413 		device_printf(softc->dev,
4414 			      "One or more MMIO doorbells dropped by the device! epoch: 0x%x\n",
4415 			      BNXT_EVENT_DBR_EPOCH(data1));
4416 		break;
4417 	case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM: {
4418 		const char *nvm_err_str;
4419 
4420 		if (EVENT_DATA1_NVM_ERR_TYPE_WRITE(data1))
4421 			nvm_err_str = "nvm write error";
4422 		else if (EVENT_DATA1_NVM_ERR_TYPE_ERASE(data1))
4423 			nvm_err_str = "nvm erase error";
4424 		else
4425 			nvm_err_str = "unrecognized nvm error";
4426 
4427 		device_printf(softc->dev,
4428 			      "%s reported at address 0x%x\n", nvm_err_str,
4429 			      (u32)EVENT_DATA2_NVM_ERR_ADDR(data2));
4430 		break;
4431 	}
4432 	case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD: {
4433 		char *threshold_type;
4434 		char *dir_str;
4435 
4436 		switch (EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1)) {
4437 		case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN:
4438 			threshold_type = "warning";
4439 			break;
4440 		case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL:
4441 			threshold_type = "critical";
4442 			break;
4443 		case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL:
4444 			threshold_type = "fatal";
4445 			break;
4446 		case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN:
4447 			threshold_type = "shutdown";
4448 			break;
4449 		default:
4450 			device_printf(softc->dev,
4451 				      "Unknown Thermal threshold type event\n");
4452 			return;
4453 		}
4454 		if (EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1))
4455 			dir_str = "above";
4456 		else
4457 			dir_str = "below";
4458 		device_printf(softc->dev,
4459 			      "Chip temperature has gone %s the %s thermal threshold!\n",
4460 			      dir_str, threshold_type);
4461 		device_printf(softc->dev,
4462 			      "Temperature (In Celsius), Current: %u, threshold: %u\n",
4463 			      BNXT_EVENT_THERMAL_CURRENT_TEMP(data2),
4464 			      BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2));
4465 		break;
4466 	}
4467 	case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED:
4468 		device_printf(softc->dev,
4469 			      "Speed change is not supported with dual rate transceivers on this board\n");
4470 		break;
4471 
4472 	default:
4473 	device_printf(softc->dev,
4474 		      "FW reported unknown error type: %u, data1: 0x%x data2: 0x%x\n",
4475 		      err_type, data1, data2);
4476 		break;
4477 	}
4478 }
4479 
4480 static void
bnxt_handle_async_event(struct bnxt_softc * softc,struct cmpl_base * cmpl)4481 bnxt_handle_async_event(struct bnxt_softc *softc, struct cmpl_base *cmpl)
4482 {
4483 	struct hwrm_async_event_cmpl *ae = (void *)cmpl;
4484 	uint16_t async_id = le16toh(ae->event_id);
4485 	struct ifmediareq ifmr;
4486 	char *type_str;
4487 	char *status_desc;
4488 	struct bnxt_fw_health *fw_health;
4489 	u32 data1 = le32toh(ae->event_data1);
4490 	u32 data2 = le32toh(ae->event_data2);
4491 
4492 	switch (async_id) {
4493 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
4494 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
4495 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE:
4496 		if (BNXT_CHIP_P5(softc))
4497 			bit_set(softc->state_bv, BNXT_STATE_LINK_CHANGE);
4498 		else
4499 			bnxt_media_status(softc->ctx, &ifmr);
4500 		break;
4501 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: {
4502 		bnxt_event_error_report(softc, data1, data2);
4503 		goto async_event_process_exit;
4504 	}
4505 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_THRESHOLD:
4506 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_NQ_UPDATE:
4507 		break;
4508 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
4509 		type_str = "Solicited";
4510 
4511 		if (!softc->fw_health)
4512 			goto async_event_process_exit;
4513 
4514 		softc->fw_reset_timestamp = jiffies;
4515 		softc->fw_reset_min_dsecs = ae->timestamp_lo;
4516 		if (!softc->fw_reset_min_dsecs)
4517 			softc->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
4518 		softc->fw_reset_max_dsecs = le16toh(ae->timestamp_hi);
4519 		if (!softc->fw_reset_max_dsecs)
4520 			softc->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
4521 		if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) {
4522 			set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &softc->state);
4523 		} else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
4524 			type_str = "Fatal";
4525 			softc->fw_health->fatalities++;
4526 			set_bit(BNXT_STATE_FW_FATAL_COND, &softc->state);
4527 		} else if (data2 && BNXT_FW_STATUS_HEALTHY !=
4528 			   EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) {
4529 			type_str = "Non-fatal";
4530 			softc->fw_health->survivals++;
4531 			set_bit(BNXT_STATE_FW_NON_FATAL_COND, &softc->state);
4532 		}
4533 		device_printf(softc->dev,
4534 			   "%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
4535 			   type_str, data1, data2,
4536 			   softc->fw_reset_min_dsecs * 100,
4537 			   softc->fw_reset_max_dsecs * 100);
4538 		set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &softc->sp_event);
4539 		break;
4540 	}
4541 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
4542 		fw_health = softc->fw_health;
4543 		status_desc = "healthy";
4544 		u32 status;
4545 
4546 		if (!fw_health)
4547 			goto async_event_process_exit;
4548 
4549 		if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
4550 			fw_health->enabled = false;
4551 			device_printf(softc->dev, "Driver recovery watchdog is disabled\n");
4552 			break;
4553 		}
4554 		fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
4555 		fw_health->tmr_multiplier =
4556 			DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
4557 				     HZ * 10);
4558 		fw_health->tmr_counter = fw_health->tmr_multiplier;
4559 		if (!fw_health->enabled)
4560 			fw_health->last_fw_heartbeat =
4561 				bnxt_fw_health_readl(softc, BNXT_FW_HEARTBEAT_REG);
4562 		fw_health->last_fw_reset_cnt =
4563 			bnxt_fw_health_readl(softc, BNXT_FW_RESET_CNT_REG);
4564 		status = bnxt_fw_health_readl(softc, BNXT_FW_HEALTH_REG);
4565 		if (status != BNXT_FW_STATUS_HEALTHY)
4566 			status_desc = "unhealthy";
4567 		device_printf(softc->dev,
4568 			   "Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n",
4569 			   fw_health->primary ? "primary" : "backup", status,
4570 			   status_desc, fw_health->last_fw_reset_cnt);
4571 		if (!fw_health->enabled) {
4572 			/* Make sure tmr_counter is set and seen by
4573 			 * bnxt_health_check() before setting enabled
4574 			 */
4575 			smp_mb();
4576 			fw_health->enabled = true;
4577 		}
4578 		goto async_event_process_exit;
4579 	}
4580 
4581 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE:
4582 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE:
4583 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED:
4584 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED:
4585 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD:
4586 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD:
4587 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
4588 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD:
4589 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR:
4590 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE:
4591 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE:
4592 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
4593 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR:
4594 		device_printf(softc->dev,
4595 		    "Unhandled async completion type %u\n", async_id);
4596 		break;
4597 	default:
4598 		device_printf(softc->dev,
4599 		    "Unknown async completion type %u\n", async_id);
4600 		break;
4601 	}
4602 	bnxt_queue_sp_work(softc);
4603 
4604 async_event_process_exit:
4605 	bnxt_ulp_async_events(softc, ae);
4606 }
4607 
4608 static void
bnxt_def_cp_task(void * context,int pending)4609 bnxt_def_cp_task(void *context, int pending)
4610 {
4611 	if_ctx_t ctx = context;
4612 	struct bnxt_softc *softc = iflib_get_softc(ctx);
4613 	struct bnxt_cp_ring *cpr = &softc->def_cp_ring;
4614 
4615 	/* Handle completions on the default completion ring */
4616 	struct cmpl_base *cmpl;
4617 	uint32_t cons = cpr->cons;
4618 	bool v_bit = cpr->v_bit;
4619 	bool last_v_bit;
4620 	uint32_t last_cons;
4621 	uint16_t type;
4622 
4623 	for (;;) {
4624 		last_cons = cons;
4625 		last_v_bit = v_bit;
4626 		NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
4627 		cmpl = &((struct cmpl_base *)cpr->ring.vaddr)[cons];
4628 
4629 		if (!CMP_VALID(cmpl, v_bit))
4630 			break;
4631 
4632 		type = le16toh(cmpl->type) & CMPL_BASE_TYPE_MASK;
4633 		switch (type) {
4634 		case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
4635 			bnxt_handle_async_event(softc, cmpl);
4636 			break;
4637 		case CMPL_BASE_TYPE_TX_L2:
4638 		case CMPL_BASE_TYPE_RX_L2:
4639 		case CMPL_BASE_TYPE_RX_AGG:
4640 		case CMPL_BASE_TYPE_RX_TPA_START:
4641 		case CMPL_BASE_TYPE_RX_TPA_END:
4642 		case CMPL_BASE_TYPE_STAT_EJECT:
4643 		case CMPL_BASE_TYPE_HWRM_DONE:
4644 		case CMPL_BASE_TYPE_HWRM_FWD_REQ:
4645 		case CMPL_BASE_TYPE_HWRM_FWD_RESP:
4646 		case CMPL_BASE_TYPE_CQ_NOTIFICATION:
4647 		case CMPL_BASE_TYPE_SRQ_EVENT:
4648 		case CMPL_BASE_TYPE_DBQ_EVENT:
4649 		case CMPL_BASE_TYPE_QP_EVENT:
4650 		case CMPL_BASE_TYPE_FUNC_EVENT:
4651 			device_printf(softc->dev,
4652 			    "Unhandled completion type %u\n", type);
4653 			break;
4654 		default:
4655 			device_printf(softc->dev,
4656 			    "Unknown completion type %u\n", type);
4657 			break;
4658 		}
4659 	}
4660 
4661 	cpr->cons = last_cons;
4662 	cpr->v_bit = last_v_bit;
4663 	softc->db_ops.bnxt_db_rx_cq(cpr, 1);
4664 }
4665 
4666 uint8_t
get_phy_type(struct bnxt_softc * softc)4667 get_phy_type(struct bnxt_softc *softc)
4668 {
4669 	struct bnxt_link_info *link_info = &softc->link_info;
4670 	uint8_t phy_type = link_info->phy_type;
4671 	uint16_t supported;
4672 
4673 	if (phy_type != HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_UNKNOWN)
4674 		return phy_type;
4675 
4676 	/* Deduce the phy type from the media type and supported speeds */
4677 	supported = link_info->support_speeds;
4678 
4679 	if (link_info->media_type ==
4680 	    HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP)
4681 		return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET;
4682 	if (link_info->media_type ==
4683 	    HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_DAC) {
4684 		if (supported & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB)
4685 			return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKX;
4686 		if (supported & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB)
4687 			return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR;
4688 		return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASECR;
4689 	}
4690 	if (link_info->media_type ==
4691 	    HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_FIBRE)
4692 		return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASESR;
4693 
4694 	return phy_type;
4695 }
4696 
4697 bool
bnxt_check_hwrm_version(struct bnxt_softc * softc)4698 bnxt_check_hwrm_version(struct bnxt_softc *softc)
4699 {
4700 	char buf[16];
4701 
4702 	sprintf(buf, "%hhu.%hhu.%hhu", softc->ver_info->hwrm_min_major,
4703 	    softc->ver_info->hwrm_min_minor, softc->ver_info->hwrm_min_update);
4704 	if (softc->ver_info->hwrm_min_major > softc->ver_info->hwrm_if_major) {
4705 		device_printf(softc->dev,
4706 		    "WARNING: HWRM version %s is too old (older than %s)\n",
4707 		    softc->ver_info->hwrm_if_ver, buf);
4708 		return false;
4709 	}
4710 	else if(softc->ver_info->hwrm_min_major ==
4711 	    softc->ver_info->hwrm_if_major) {
4712 		if (softc->ver_info->hwrm_min_minor >
4713 		    softc->ver_info->hwrm_if_minor) {
4714 			device_printf(softc->dev,
4715 			    "WARNING: HWRM version %s is too old (older than %s)\n",
4716 			    softc->ver_info->hwrm_if_ver, buf);
4717 			return false;
4718 		}
4719 		else if (softc->ver_info->hwrm_min_minor ==
4720 		    softc->ver_info->hwrm_if_minor) {
4721 			if (softc->ver_info->hwrm_min_update >
4722 			    softc->ver_info->hwrm_if_update) {
4723 				device_printf(softc->dev,
4724 				    "WARNING: HWRM version %s is too old (older than %s)\n",
4725 				    softc->ver_info->hwrm_if_ver, buf);
4726 				return false;
4727 			}
4728 		}
4729 	}
4730 	return true;
4731 }
4732 
4733 static uint64_t
bnxt_get_baudrate(struct bnxt_link_info * link)4734 bnxt_get_baudrate(struct bnxt_link_info *link)
4735 {
4736 	switch (link->link_speed) {
4737 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
4738 		return IF_Mbps(100);
4739 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
4740 		return IF_Gbps(1);
4741 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
4742 		return IF_Gbps(2);
4743 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
4744 		return IF_Mbps(2500);
4745 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
4746 		return IF_Gbps(10);
4747 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
4748 		return IF_Gbps(20);
4749 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
4750 		return IF_Gbps(25);
4751 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
4752 		return IF_Gbps(40);
4753 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
4754 		return IF_Gbps(50);
4755 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
4756 		return IF_Gbps(100);
4757 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10MB:
4758 		return IF_Mbps(10);
4759 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_200GB:
4760 		return IF_Gbps(200);
4761 	}
4762 	return IF_Gbps(100);
4763 }
4764 
4765 static void
bnxt_get_wol_settings(struct bnxt_softc * softc)4766 bnxt_get_wol_settings(struct bnxt_softc *softc)
4767 {
4768 	uint16_t wol_handle = 0;
4769 
4770 	if (!bnxt_wol_supported(softc))
4771 		return;
4772 
4773 	do {
4774 		wol_handle = bnxt_hwrm_get_wol_fltrs(softc, wol_handle);
4775 	} while (wol_handle && wol_handle != BNXT_NO_MORE_WOL_FILTERS);
4776 }
4777