xref: /freebsd/sys/dev/bnxt/bnxt_en/if_bnxt.c (revision 6450d937955fcd1ab9034c49d53306e882c4a281)
1 /*-
2  * Broadcom NetXtreme-C/E network driver.
3  *
4  * Copyright (c) 2016 Broadcom, All Rights Reserved.
5  * The term Broadcom refers to Broadcom Limited and/or its subsidiaries
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26  * THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/param.h>
30 #include <sys/socket.h>
31 #include <sys/kernel.h>
32 #include <sys/bus.h>
33 #include <sys/module.h>
34 #include <sys/rman.h>
35 #include <sys/endian.h>
36 #include <sys/sockio.h>
37 #include <sys/priv.h>
38 
39 #include <machine/bus.h>
40 #include <machine/resource.h>
41 
42 #include <dev/pci/pcireg.h>
43 
44 #include <net/if.h>
45 #include <net/if_dl.h>
46 #include <net/if_media.h>
47 #include <net/if_var.h>
48 #include <net/ethernet.h>
49 #include <net/iflib.h>
50 
51 #include <linux/pci.h>
52 #include <linux/kmod.h>
53 #include <linux/module.h>
54 #include <linux/delay.h>
55 #include <linux/idr.h>
56 #include <linux/netdevice.h>
57 #include <linux/etherdevice.h>
58 #include <linux/rcupdate.h>
59 #include "opt_inet.h"
60 #include "opt_inet6.h"
61 #include "opt_rss.h"
62 
63 #include "ifdi_if.h"
64 
65 #include "bnxt.h"
66 #include "bnxt_hwrm.h"
67 #include "bnxt_ioctl.h"
68 #include "bnxt_sysctl.h"
69 #include "hsi_struct_def.h"
70 #include "bnxt_mgmt.h"
71 #include "bnxt_ulp.h"
72 #include "bnxt_auxbus_compat.h"
73 
74 /*
75  * PCI Device ID Table
76  */
77 
78 static const pci_vendor_info_t bnxt_vendor_info_array[] =
79 {
80     PVID(BROADCOM_VENDOR_ID, BCM57301,
81 	"Broadcom BCM57301 NetXtreme-C 10Gb Ethernet Controller"),
82     PVID(BROADCOM_VENDOR_ID, BCM57302,
83 	"Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet Controller"),
84     PVID(BROADCOM_VENDOR_ID, BCM57304,
85 	"Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet Controller"),
86     PVID(BROADCOM_VENDOR_ID, BCM57311,
87 	"Broadcom BCM57311 NetXtreme-C 10Gb Ethernet"),
88     PVID(BROADCOM_VENDOR_ID, BCM57312,
89 	"Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet"),
90     PVID(BROADCOM_VENDOR_ID, BCM57314,
91 	"Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet"),
92     PVID(BROADCOM_VENDOR_ID, BCM57402,
93 	"Broadcom BCM57402 NetXtreme-E 10Gb Ethernet Controller"),
94     PVID(BROADCOM_VENDOR_ID, BCM57402_NPAR,
95 	"Broadcom BCM57402 NetXtreme-E Partition"),
96     PVID(BROADCOM_VENDOR_ID, BCM57404,
97 	"Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet Controller"),
98     PVID(BROADCOM_VENDOR_ID, BCM57404_NPAR,
99 	"Broadcom BCM57404 NetXtreme-E Partition"),
100     PVID(BROADCOM_VENDOR_ID, BCM57406,
101 	"Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet Controller"),
102     PVID(BROADCOM_VENDOR_ID, BCM57406_NPAR,
103 	"Broadcom BCM57406 NetXtreme-E Partition"),
104     PVID(BROADCOM_VENDOR_ID, BCM57407,
105 	"Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet Controller"),
106     PVID(BROADCOM_VENDOR_ID, BCM57407_NPAR,
107 	"Broadcom BCM57407 NetXtreme-E Ethernet Partition"),
108     PVID(BROADCOM_VENDOR_ID, BCM57407_SFP,
109 	"Broadcom BCM57407 NetXtreme-E 25Gb Ethernet Controller"),
110     PVID(BROADCOM_VENDOR_ID, BCM57412,
111 	"Broadcom BCM57412 NetXtreme-E 10Gb Ethernet"),
112     PVID(BROADCOM_VENDOR_ID, BCM57412_NPAR1,
113 	"Broadcom BCM57412 NetXtreme-E Ethernet Partition"),
114     PVID(BROADCOM_VENDOR_ID, BCM57412_NPAR2,
115 	"Broadcom BCM57412 NetXtreme-E Ethernet Partition"),
116     PVID(BROADCOM_VENDOR_ID, BCM57414,
117 	"Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet"),
118     PVID(BROADCOM_VENDOR_ID, BCM57414_NPAR1,
119 	"Broadcom BCM57414 NetXtreme-E Ethernet Partition"),
120     PVID(BROADCOM_VENDOR_ID, BCM57414_NPAR2,
121 	"Broadcom BCM57414 NetXtreme-E Ethernet Partition"),
122     PVID(BROADCOM_VENDOR_ID, BCM57416,
123 	"Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet"),
124     PVID(BROADCOM_VENDOR_ID, BCM57416_NPAR1,
125 	"Broadcom BCM57416 NetXtreme-E Ethernet Partition"),
126     PVID(BROADCOM_VENDOR_ID, BCM57416_NPAR2,
127 	"Broadcom BCM57416 NetXtreme-E Ethernet Partition"),
128     PVID(BROADCOM_VENDOR_ID, BCM57416_SFP,
129 	"Broadcom BCM57416 NetXtreme-E 10Gb Ethernet"),
130     PVID(BROADCOM_VENDOR_ID, BCM57417,
131 	"Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet"),
132     PVID(BROADCOM_VENDOR_ID, BCM57417_NPAR1,
133 	"Broadcom BCM57417 NetXtreme-E Ethernet Partition"),
134     PVID(BROADCOM_VENDOR_ID, BCM57417_NPAR2,
135 	"Broadcom BCM57417 NetXtreme-E Ethernet Partition"),
136     PVID(BROADCOM_VENDOR_ID, BCM57417_SFP,
137 	"Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet"),
138     PVID(BROADCOM_VENDOR_ID, BCM57454,
139 	"Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet"),
140     PVID(BROADCOM_VENDOR_ID, BCM58700,
141 	"Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet"),
142     PVID(BROADCOM_VENDOR_ID, BCM57508,
143 	"Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet"),
144     PVID(BROADCOM_VENDOR_ID, BCM57504,
145 	"Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet"),
146     PVID(BROADCOM_VENDOR_ID, BCM57504_NPAR,
147 	"Broadcom BCM57504 NetXtreme-E Ethernet Partition"),
148     PVID(BROADCOM_VENDOR_ID, BCM57502,
149 	"Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet"),
150     PVID(BROADCOM_VENDOR_ID, BCM57608,
151 	"Broadcom BCM57608 NetXtreme-E 25Gb/50Gb/100Gb/200Gb/400Gb Ethernet"),
152     PVID(BROADCOM_VENDOR_ID, BCM57604,
153 	"Broadcom BCM57604 NetXtreme-E 25Gb/50Gb/100Gb/200Gb Ethernet"),
154     PVID(BROADCOM_VENDOR_ID, BCM57602,
155 	"Broadcom BCM57602 NetXtreme-E 25Gb/50Gb Ethernet"),
156     PVID(BROADCOM_VENDOR_ID, BCM57601,
157 	"Broadcom BCM57601 NetXtreme-E 25Gb/50Gb Ethernet"),
158     PVID(BROADCOM_VENDOR_ID, NETXTREME_C_VF1,
159 	"Broadcom NetXtreme-C Ethernet Virtual Function"),
160     PVID(BROADCOM_VENDOR_ID, NETXTREME_C_VF2,
161 	"Broadcom NetXtreme-C Ethernet Virtual Function"),
162     PVID(BROADCOM_VENDOR_ID, NETXTREME_C_VF3,
163 	"Broadcom NetXtreme-C Ethernet Virtual Function"),
164     PVID(BROADCOM_VENDOR_ID, NETXTREME_E_VF1,
165 	"Broadcom NetXtreme-E Ethernet Virtual Function"),
166     PVID(BROADCOM_VENDOR_ID, NETXTREME_E_VF2,
167 	"Broadcom NetXtreme-E Ethernet Virtual Function"),
168     PVID(BROADCOM_VENDOR_ID, NETXTREME_E_VF3,
169 	"Broadcom NetXtreme-E Ethernet Virtual Function"),
170     /* required last entry */
171 
172     PVID_END
173 };
174 
175 /*
176  * Function prototypes
177  */
178 
179 SLIST_HEAD(softc_list, bnxt_softc_list) pf_list;
180 int bnxt_num_pfs = 0;
181 
182 void
183 process_nq(struct bnxt_softc *softc, uint16_t nqid);
184 static void *bnxt_register(device_t dev);
185 
186 /* Soft queue setup and teardown */
187 static int bnxt_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
188     uint64_t *paddrs, int ntxqs, int ntxqsets);
189 static int bnxt_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
190     uint64_t *paddrs, int nrxqs, int nrxqsets);
191 static void bnxt_queues_free(if_ctx_t ctx);
192 
193 /* Device setup and teardown */
194 static int bnxt_attach_pre(if_ctx_t ctx);
195 static int bnxt_attach_post(if_ctx_t ctx);
196 static int bnxt_detach(if_ctx_t ctx);
197 
198 /* Device configuration */
199 static void bnxt_init(if_ctx_t ctx);
200 static void bnxt_stop(if_ctx_t ctx);
201 static void bnxt_multi_set(if_ctx_t ctx);
202 static int bnxt_mtu_set(if_ctx_t ctx, uint32_t mtu);
203 static void bnxt_media_status(if_ctx_t ctx, struct ifmediareq * ifmr);
204 static int bnxt_media_change(if_ctx_t ctx);
205 static int bnxt_promisc_set(if_ctx_t ctx, int flags);
206 static uint64_t	bnxt_get_counter(if_ctx_t, ift_counter);
207 static void bnxt_update_admin_status(if_ctx_t ctx);
208 static void bnxt_if_timer(if_ctx_t ctx, uint16_t qid);
209 
210 /* Interrupt enable / disable */
211 static void bnxt_intr_enable(if_ctx_t ctx);
212 static int bnxt_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
213 static int bnxt_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
214 static void bnxt_disable_intr(if_ctx_t ctx);
215 static int bnxt_msix_intr_assign(if_ctx_t ctx, int msix);
216 
217 /* vlan support */
218 static void bnxt_vlan_register(if_ctx_t ctx, uint16_t vtag);
219 static void bnxt_vlan_unregister(if_ctx_t ctx, uint16_t vtag);
220 
221 /* ioctl */
222 static int bnxt_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data);
223 
224 static int bnxt_shutdown(if_ctx_t ctx);
225 static int bnxt_suspend(if_ctx_t ctx);
226 static int bnxt_resume(if_ctx_t ctx);
227 
228 /* Internal support functions */
229 static int bnxt_probe_phy(struct bnxt_softc *softc);
230 static void bnxt_add_media_types(struct bnxt_softc *softc);
231 static int bnxt_pci_mapping(struct bnxt_softc *softc);
232 static void bnxt_pci_mapping_free(struct bnxt_softc *softc);
233 static int bnxt_update_link(struct bnxt_softc *softc, bool chng_link_state);
234 static int bnxt_handle_def_cp(void *arg);
235 static int bnxt_handle_isr(void *arg);
236 static void bnxt_clear_ids(struct bnxt_softc *softc);
237 static void inline bnxt_do_enable_intr(struct bnxt_cp_ring *cpr);
238 static void inline bnxt_do_disable_intr(struct bnxt_cp_ring *cpr);
239 static void bnxt_mark_cpr_invalid(struct bnxt_cp_ring *cpr);
240 static void bnxt_def_cp_task(void *context, int pending);
241 static void bnxt_handle_async_event(struct bnxt_softc *softc,
242     struct cmpl_base *cmpl);
243 static uint64_t bnxt_get_baudrate(struct bnxt_link_info *link);
244 static void bnxt_get_wol_settings(struct bnxt_softc *softc);
245 static int bnxt_wol_config(if_ctx_t ctx);
246 static bool bnxt_if_needs_restart(if_ctx_t, enum iflib_restart_event);
247 static int bnxt_i2c_req(if_ctx_t ctx, struct ifi2creq *i2c);
248 static void bnxt_get_port_module_status(struct bnxt_softc *softc);
249 static void bnxt_rdma_aux_device_init(struct bnxt_softc *softc);
250 static void bnxt_rdma_aux_device_uninit(struct bnxt_softc *softc);
251 static void bnxt_queue_fw_reset_work(struct bnxt_softc *bp, unsigned long delay);
252 void bnxt_queue_sp_work(struct bnxt_softc *bp);
253 
254 void bnxt_fw_reset(struct bnxt_softc *bp);
255 /*
256  * Device Interface Declaration
257  */
258 
259 static device_method_t bnxt_methods[] = {
260 	/* Device interface */
261 	DEVMETHOD(device_register, bnxt_register),
262 	DEVMETHOD(device_probe, iflib_device_probe),
263 	DEVMETHOD(device_attach, iflib_device_attach),
264 	DEVMETHOD(device_detach, iflib_device_detach),
265 	DEVMETHOD(device_shutdown, iflib_device_shutdown),
266 	DEVMETHOD(device_suspend, iflib_device_suspend),
267 	DEVMETHOD(device_resume, iflib_device_resume),
268 	DEVMETHOD_END
269 };
270 
271 static driver_t bnxt_driver = {
272 	"bnxt", bnxt_methods, sizeof(struct bnxt_softc),
273 };
274 
275 DRIVER_MODULE(bnxt, pci, bnxt_driver, 0, 0);
276 
277 MODULE_LICENSE("Dual BSD/GPL");
278 MODULE_DEPEND(if_bnxt, pci, 1, 1, 1);
279 MODULE_DEPEND(if_bnxt, ether, 1, 1, 1);
280 MODULE_DEPEND(if_bnxt, iflib, 1, 1, 1);
281 MODULE_DEPEND(if_bnxt, linuxkpi, 1, 1, 1);
282 MODULE_VERSION(if_bnxt, 1);
283 
284 IFLIB_PNP_INFO(pci, bnxt, bnxt_vendor_info_array);
285 
286 void writel_fbsd(struct bnxt_softc *bp, u32, u8, u32);
287 u32 readl_fbsd(struct bnxt_softc *bp, u32, u8);
288 
readl_fbsd(struct bnxt_softc * bp,u32 reg_off,u8 bar_idx)289 u32 readl_fbsd(struct bnxt_softc *bp, u32 reg_off, u8 bar_idx)
290 {
291 
292 	if (!bar_idx)
293 		return bus_space_read_4(bp->doorbell_bar.tag, bp->doorbell_bar.handle, reg_off);
294 	else
295 		return bus_space_read_4(bp->hwrm_bar.tag, bp->hwrm_bar.handle, reg_off);
296 }
297 
writel_fbsd(struct bnxt_softc * bp,u32 reg_off,u8 bar_idx,u32 val)298 void writel_fbsd(struct bnxt_softc *bp, u32 reg_off, u8 bar_idx, u32 val)
299 {
300 
301 	if (!bar_idx)
302 		bus_space_write_4(bp->doorbell_bar.tag, bp->doorbell_bar.handle, reg_off, htole32(val));
303 	else
304 		bus_space_write_4(bp->hwrm_bar.tag, bp->hwrm_bar.handle, reg_off, htole32(val));
305 }
306 
307 static DEFINE_IDA(bnxt_aux_dev_ids);
308 
309 static device_method_t bnxt_iflib_methods[] = {
310 	DEVMETHOD(ifdi_tx_queues_alloc, bnxt_tx_queues_alloc),
311 	DEVMETHOD(ifdi_rx_queues_alloc, bnxt_rx_queues_alloc),
312 	DEVMETHOD(ifdi_queues_free, bnxt_queues_free),
313 
314 	DEVMETHOD(ifdi_attach_pre, bnxt_attach_pre),
315 	DEVMETHOD(ifdi_attach_post, bnxt_attach_post),
316 	DEVMETHOD(ifdi_detach, bnxt_detach),
317 
318 	DEVMETHOD(ifdi_init, bnxt_init),
319 	DEVMETHOD(ifdi_stop, bnxt_stop),
320 	DEVMETHOD(ifdi_multi_set, bnxt_multi_set),
321 	DEVMETHOD(ifdi_mtu_set, bnxt_mtu_set),
322 	DEVMETHOD(ifdi_media_status, bnxt_media_status),
323 	DEVMETHOD(ifdi_media_change, bnxt_media_change),
324 	DEVMETHOD(ifdi_promisc_set, bnxt_promisc_set),
325 	DEVMETHOD(ifdi_get_counter, bnxt_get_counter),
326 	DEVMETHOD(ifdi_update_admin_status, bnxt_update_admin_status),
327 	DEVMETHOD(ifdi_timer, bnxt_if_timer),
328 
329 	DEVMETHOD(ifdi_intr_enable, bnxt_intr_enable),
330 	DEVMETHOD(ifdi_tx_queue_intr_enable, bnxt_tx_queue_intr_enable),
331 	DEVMETHOD(ifdi_rx_queue_intr_enable, bnxt_rx_queue_intr_enable),
332 	DEVMETHOD(ifdi_intr_disable, bnxt_disable_intr),
333 	DEVMETHOD(ifdi_msix_intr_assign, bnxt_msix_intr_assign),
334 
335 	DEVMETHOD(ifdi_vlan_register, bnxt_vlan_register),
336 	DEVMETHOD(ifdi_vlan_unregister, bnxt_vlan_unregister),
337 
338 	DEVMETHOD(ifdi_priv_ioctl, bnxt_priv_ioctl),
339 
340 	DEVMETHOD(ifdi_suspend, bnxt_suspend),
341 	DEVMETHOD(ifdi_shutdown, bnxt_shutdown),
342 	DEVMETHOD(ifdi_resume, bnxt_resume),
343 	DEVMETHOD(ifdi_i2c_req, bnxt_i2c_req),
344 
345 	DEVMETHOD(ifdi_needs_restart, bnxt_if_needs_restart),
346 
347 	DEVMETHOD_END
348 };
349 
350 static driver_t bnxt_iflib_driver = {
351 	"bnxt", bnxt_iflib_methods, sizeof(struct bnxt_softc)
352 };
353 
354 /*
355  * iflib shared context
356  */
357 
358 #define BNXT_DRIVER_VERSION	"230.0.133.0"
359 const char bnxt_driver_version[] = BNXT_DRIVER_VERSION;
360 extern struct if_txrx bnxt_txrx;
361 static struct if_shared_ctx bnxt_sctx_init = {
362 	.isc_magic = IFLIB_MAGIC,
363 	.isc_driver = &bnxt_iflib_driver,
364 	.isc_nfl = 2,				// Number of Free Lists
365 	.isc_flags = IFLIB_HAS_RXCQ | IFLIB_HAS_TXCQ | IFLIB_NEED_ETHER_PAD,
366 	.isc_q_align = PAGE_SIZE,
367 	.isc_tx_maxsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
368 	.isc_tx_maxsegsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
369 	.isc_tso_maxsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
370 	.isc_tso_maxsegsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
371 	.isc_rx_maxsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
372 	.isc_rx_maxsegsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
373 
374 	// Only use a single segment to avoid page size constraints
375 	.isc_rx_nsegments = 1,
376 	.isc_ntxqs = 3,
377 	.isc_nrxqs = 3,
378 	.isc_nrxd_min = {16, 16, 16},
379 	.isc_nrxd_default = {PAGE_SIZE / sizeof(struct cmpl_base) * 8,
380 	    PAGE_SIZE / sizeof(struct rx_prod_pkt_bd),
381 	    PAGE_SIZE / sizeof(struct rx_prod_pkt_bd)},
382 	.isc_nrxd_max = {BNXT_MAX_RXD, BNXT_MAX_RXD, BNXT_MAX_RXD},
383 	.isc_ntxd_min = {16, 16, 16},
384 	.isc_ntxd_default = {PAGE_SIZE / sizeof(struct cmpl_base) * 2,
385 	    PAGE_SIZE / sizeof(struct tx_bd_short),
386 	    /* NQ depth 4096 */
387 	    PAGE_SIZE / sizeof(struct cmpl_base) * 16},
388 	.isc_ntxd_max = {BNXT_MAX_TXD, BNXT_MAX_TXD, BNXT_MAX_TXD},
389 
390 	.isc_admin_intrcnt = BNXT_ROCE_IRQ_COUNT,
391 	.isc_vendor_info = bnxt_vendor_info_array,
392 	.isc_driver_version = bnxt_driver_version,
393 };
394 
395 #define PCI_SUBSYSTEM_ID	0x2e
396 static struct workqueue_struct *bnxt_pf_wq;
397 
398 extern void bnxt_destroy_irq(struct bnxt_softc *softc);
399 
400 /*
401  * Device Methods
402  */
403 
404 static void *
bnxt_register(device_t dev)405 bnxt_register(device_t dev)
406 {
407 	return (&bnxt_sctx_init);
408 }
409 
410 static void
bnxt_nq_alloc(struct bnxt_softc * softc,int nqsets)411 bnxt_nq_alloc(struct bnxt_softc *softc, int nqsets)
412 {
413 
414 	if (softc->nq_rings)
415 		return;
416 
417 	softc->nq_rings = malloc(sizeof(struct bnxt_cp_ring) * nqsets,
418 	    M_DEVBUF, M_NOWAIT | M_ZERO);
419 }
420 
421 static void
bnxt_nq_free(struct bnxt_softc * softc)422 bnxt_nq_free(struct bnxt_softc *softc)
423 {
424 
425 	if (softc->nq_rings)
426 		free(softc->nq_rings, M_DEVBUF);
427 	softc->nq_rings = NULL;
428 }
429 
430 
431 static void
bnxt_set_db_mask(struct bnxt_softc * bp,struct bnxt_ring * db,u32 ring_type)432 bnxt_set_db_mask(struct bnxt_softc *bp, struct bnxt_ring *db,
433 		 u32 ring_type)
434 {
435 	if (BNXT_CHIP_P7(bp)) {
436 		db->db_epoch_mask = db->db_ring_mask + 1;
437 		db->db_epoch_shift = DBR_EPOCH_SFT - ilog2(db->db_epoch_mask);
438 
439 	}
440 }
441 
442 /*
443  * Device Dependent Configuration Functions
444 */
445 
446 /* Soft queue setup and teardown */
447 static int
bnxt_tx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int ntxqs,int ntxqsets)448 bnxt_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
449     uint64_t *paddrs, int ntxqs, int ntxqsets)
450 {
451 	struct bnxt_softc *softc;
452 	int i;
453 	int rc;
454 
455 	softc = iflib_get_softc(ctx);
456 
457 	if (BNXT_CHIP_P5_PLUS(softc)) {
458 		bnxt_nq_alloc(softc, ntxqsets);
459 		if (!softc->nq_rings) {
460 			device_printf(iflib_get_dev(ctx),
461 					"unable to allocate NQ rings\n");
462 			rc = ENOMEM;
463 			goto nq_alloc_fail;
464 		}
465 	}
466 
467 	softc->tx_cp_rings = malloc(sizeof(struct bnxt_cp_ring) * ntxqsets,
468 	    M_DEVBUF, M_NOWAIT | M_ZERO);
469 	if (!softc->tx_cp_rings) {
470 		device_printf(iflib_get_dev(ctx),
471 		    "unable to allocate TX completion rings\n");
472 		rc = ENOMEM;
473 		goto cp_alloc_fail;
474 	}
475 	softc->tx_rings = malloc(sizeof(struct bnxt_ring) * ntxqsets,
476 	    M_DEVBUF, M_NOWAIT | M_ZERO);
477 	if (!softc->tx_rings) {
478 		device_printf(iflib_get_dev(ctx),
479 		    "unable to allocate TX rings\n");
480 		rc = ENOMEM;
481 		goto ring_alloc_fail;
482 	}
483 
484 	for (i=0; i < ntxqsets; i++) {
485 		rc = iflib_dma_alloc(ctx, sizeof(struct ctx_hw_stats),
486 				&softc->tx_stats[i], 0);
487 		if (rc)
488 			goto dma_alloc_fail;
489 		bus_dmamap_sync(softc->tx_stats[i].idi_tag, softc->tx_stats[i].idi_map,
490 				BUS_DMASYNC_PREREAD);
491 	}
492 
493 	for (i = 0; i < ntxqsets; i++) {
494 		/* Set up the completion ring */
495 		softc->tx_cp_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
496 		softc->tx_cp_rings[i].ring.phys_id =
497 		    (uint16_t)HWRM_NA_SIGNATURE;
498 		softc->tx_cp_rings[i].ring.softc = softc;
499 		softc->tx_cp_rings[i].ring.idx = i;
500 		softc->tx_cp_rings[i].ring.id =
501 		    (softc->scctx->isc_nrxqsets * 2) + 1 + i;
502 		softc->tx_cp_rings[i].ring.doorbell = (BNXT_CHIP_P5_PLUS(softc)) ?
503 			softc->legacy_db_size: softc->tx_cp_rings[i].ring.id * 0x80;
504 		softc->tx_cp_rings[i].ring.ring_size =
505 		    softc->scctx->isc_ntxd[0];
506 		softc->tx_cp_rings[i].ring.db_ring_mask =
507 		    softc->tx_cp_rings[i].ring.ring_size - 1;
508 		softc->tx_cp_rings[i].ring.vaddr = vaddrs[i * ntxqs];
509 		softc->tx_cp_rings[i].ring.paddr = paddrs[i * ntxqs];
510 
511 
512 		/* Set up the TX ring */
513 		softc->tx_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
514 		softc->tx_rings[i].softc = softc;
515 		softc->tx_rings[i].idx = i;
516 		softc->tx_rings[i].id =
517 		    (softc->scctx->isc_nrxqsets * 2) + 1 + i;
518 		softc->tx_rings[i].doorbell = (BNXT_CHIP_P5_PLUS(softc)) ?
519 			softc->legacy_db_size : softc->tx_rings[i].id * 0x80;
520 		softc->tx_rings[i].ring_size = softc->scctx->isc_ntxd[1];
521 		softc->tx_rings[i].db_ring_mask = softc->tx_rings[i].ring_size - 1;
522 		softc->tx_rings[i].vaddr = vaddrs[i * ntxqs + 1];
523 		softc->tx_rings[i].paddr = paddrs[i * ntxqs + 1];
524 
525 		bnxt_create_tx_sysctls(softc, i);
526 
527 		if (BNXT_CHIP_P5_PLUS(softc)) {
528 			/* Set up the Notification ring (NQ) */
529 			softc->nq_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
530 			softc->nq_rings[i].ring.phys_id =
531 				(uint16_t)HWRM_NA_SIGNATURE;
532 			softc->nq_rings[i].ring.softc = softc;
533 			softc->nq_rings[i].ring.idx = i;
534 			softc->nq_rings[i].ring.id = i;
535 			softc->nq_rings[i].ring.doorbell = (BNXT_CHIP_P5_PLUS(softc)) ?
536 				softc->legacy_db_size : softc->nq_rings[i].ring.id * 0x80;
537 			softc->nq_rings[i].ring.ring_size = softc->scctx->isc_ntxd[2];
538 			softc->nq_rings[i].ring.db_ring_mask = softc->nq_rings[i].ring.ring_size - 1;
539 			softc->nq_rings[i].ring.vaddr = vaddrs[i * ntxqs + 2];
540 			softc->nq_rings[i].ring.paddr = paddrs[i * ntxqs + 2];
541 			softc->nq_rings[i].type = Q_TYPE_TX;
542 		}
543 	}
544 
545 	softc->ntxqsets = ntxqsets;
546 	return rc;
547 
548 dma_alloc_fail:
549 	for (i = i - 1; i >= 0; i--)
550 		iflib_dma_free(&softc->tx_stats[i]);
551 	free(softc->tx_rings, M_DEVBUF);
552 ring_alloc_fail:
553 	free(softc->tx_cp_rings, M_DEVBUF);
554 cp_alloc_fail:
555 	bnxt_nq_free(softc);
556 nq_alloc_fail:
557 	return rc;
558 }
559 
560 static void
bnxt_queues_free(if_ctx_t ctx)561 bnxt_queues_free(if_ctx_t ctx)
562 {
563 	struct bnxt_softc *softc = iflib_get_softc(ctx);
564 	int i;
565 
566 	// Free TX queues
567 	for (i=0; i<softc->ntxqsets; i++)
568 		iflib_dma_free(&softc->tx_stats[i]);
569 	free(softc->tx_rings, M_DEVBUF);
570 	softc->tx_rings = NULL;
571 	free(softc->tx_cp_rings, M_DEVBUF);
572 	softc->tx_cp_rings = NULL;
573 	softc->ntxqsets = 0;
574 
575 	// Free RX queues
576 	for (i=0; i<softc->nrxqsets; i++)
577 		iflib_dma_free(&softc->rx_stats[i]);
578 	iflib_dma_free(&softc->hw_tx_port_stats);
579 	iflib_dma_free(&softc->hw_rx_port_stats);
580 	iflib_dma_free(&softc->hw_tx_port_stats_ext);
581 	iflib_dma_free(&softc->hw_rx_port_stats_ext);
582 	free(softc->grp_info, M_DEVBUF);
583 	free(softc->ag_rings, M_DEVBUF);
584 	free(softc->rx_rings, M_DEVBUF);
585 	free(softc->rx_cp_rings, M_DEVBUF);
586 	bnxt_nq_free(softc);
587 }
588 
589 static int
bnxt_rx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int nrxqs,int nrxqsets)590 bnxt_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
591     uint64_t *paddrs, int nrxqs, int nrxqsets)
592 {
593 	struct bnxt_softc *softc;
594 	int i;
595 	int rc;
596 
597 	softc = iflib_get_softc(ctx);
598 
599 	softc->rx_cp_rings = malloc(sizeof(struct bnxt_cp_ring) * nrxqsets,
600 	    M_DEVBUF, M_NOWAIT | M_ZERO);
601 	if (!softc->rx_cp_rings) {
602 		device_printf(iflib_get_dev(ctx),
603 		    "unable to allocate RX completion rings\n");
604 		rc = ENOMEM;
605 		goto cp_alloc_fail;
606 	}
607 	softc->rx_rings = malloc(sizeof(struct bnxt_ring) * nrxqsets,
608 	    M_DEVBUF, M_NOWAIT | M_ZERO);
609 	if (!softc->rx_rings) {
610 		device_printf(iflib_get_dev(ctx),
611 		    "unable to allocate RX rings\n");
612 		rc = ENOMEM;
613 		goto ring_alloc_fail;
614 	}
615 	softc->ag_rings = malloc(sizeof(struct bnxt_ring) * nrxqsets,
616 	    M_DEVBUF, M_NOWAIT | M_ZERO);
617 	if (!softc->ag_rings) {
618 		device_printf(iflib_get_dev(ctx),
619 		    "unable to allocate aggregation rings\n");
620 		rc = ENOMEM;
621 		goto ag_alloc_fail;
622 	}
623 	softc->grp_info = malloc(sizeof(struct bnxt_grp_info) * nrxqsets,
624 	    M_DEVBUF, M_NOWAIT | M_ZERO);
625 	if (!softc->grp_info) {
626 		device_printf(iflib_get_dev(ctx),
627 		    "unable to allocate ring groups\n");
628 		rc = ENOMEM;
629 		goto grp_alloc_fail;
630 	}
631 
632 	for (i=0; i < nrxqsets; i++) {
633 		rc = iflib_dma_alloc(ctx, sizeof(struct ctx_hw_stats),
634 				&softc->rx_stats[i], 0);
635 		if (rc)
636 			goto hw_stats_alloc_fail;
637 		bus_dmamap_sync(softc->rx_stats[i].idi_tag, softc->rx_stats[i].idi_map,
638 				BUS_DMASYNC_PREREAD);
639 	}
640 
641 /*
642  * Additional 512 bytes for future expansion.
643  * To prevent corruption when loaded with newer firmwares with added counters.
644  * This can be deleted when there will be no further additions of counters.
645  */
646 #define BNXT_PORT_STAT_PADDING  512
647 
648 	rc = iflib_dma_alloc(ctx, sizeof(struct rx_port_stats) + BNXT_PORT_STAT_PADDING,
649 	    &softc->hw_rx_port_stats, 0);
650 	if (rc)
651 		goto hw_port_rx_stats_alloc_fail;
652 
653 	bus_dmamap_sync(softc->hw_rx_port_stats.idi_tag,
654             softc->hw_rx_port_stats.idi_map, BUS_DMASYNC_PREREAD);
655 
656 
657 	rc = iflib_dma_alloc(ctx, sizeof(struct tx_port_stats) + BNXT_PORT_STAT_PADDING,
658 	    &softc->hw_tx_port_stats, 0);
659 	if (rc)
660 		goto hw_port_tx_stats_alloc_fail;
661 
662 	bus_dmamap_sync(softc->hw_tx_port_stats.idi_tag,
663             softc->hw_tx_port_stats.idi_map, BUS_DMASYNC_PREREAD);
664 
665 	softc->rx_port_stats = (void *) softc->hw_rx_port_stats.idi_vaddr;
666 	softc->tx_port_stats = (void *) softc->hw_tx_port_stats.idi_vaddr;
667 
668 
669 	rc = iflib_dma_alloc(ctx, sizeof(struct rx_port_stats_ext),
670 		&softc->hw_rx_port_stats_ext, 0);
671 	if (rc)
672 		goto hw_port_rx_stats_ext_alloc_fail;
673 
674 	bus_dmamap_sync(softc->hw_rx_port_stats_ext.idi_tag,
675 	    softc->hw_rx_port_stats_ext.idi_map, BUS_DMASYNC_PREREAD);
676 
677 	rc = iflib_dma_alloc(ctx, sizeof(struct tx_port_stats_ext),
678 		&softc->hw_tx_port_stats_ext, 0);
679 	if (rc)
680 		goto hw_port_tx_stats_ext_alloc_fail;
681 
682 	bus_dmamap_sync(softc->hw_tx_port_stats_ext.idi_tag,
683 	    softc->hw_tx_port_stats_ext.idi_map, BUS_DMASYNC_PREREAD);
684 
685 	softc->rx_port_stats_ext = (void *) softc->hw_rx_port_stats_ext.idi_vaddr;
686 	softc->tx_port_stats_ext = (void *) softc->hw_tx_port_stats_ext.idi_vaddr;
687 
688 	for (i = 0; i < nrxqsets; i++) {
689 		/* Allocation the completion ring */
690 		softc->rx_cp_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
691 		softc->rx_cp_rings[i].ring.phys_id =
692 		    (uint16_t)HWRM_NA_SIGNATURE;
693 		softc->rx_cp_rings[i].ring.softc = softc;
694 		softc->rx_cp_rings[i].ring.idx = i;
695 		softc->rx_cp_rings[i].ring.id = i + 1;
696 		softc->rx_cp_rings[i].ring.doorbell = (BNXT_CHIP_P5_PLUS(softc)) ?
697 			softc->legacy_db_size : softc->rx_cp_rings[i].ring.id * 0x80;
698 		/*
699 		 * If this ring overflows, RX stops working.
700 		 */
701 		softc->rx_cp_rings[i].ring.ring_size =
702 		    softc->scctx->isc_nrxd[0];
703 		softc->rx_cp_rings[i].ring.db_ring_mask =
704 		    softc->rx_cp_rings[i].ring.ring_size - 1;
705 
706 		softc->rx_cp_rings[i].ring.vaddr = vaddrs[i * nrxqs];
707 		softc->rx_cp_rings[i].ring.paddr = paddrs[i * nrxqs];
708 
709 		/* Allocate the RX ring */
710 		softc->rx_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
711 		softc->rx_rings[i].softc = softc;
712 		softc->rx_rings[i].idx = i;
713 		softc->rx_rings[i].id = i + 1;
714 		softc->rx_rings[i].doorbell = (BNXT_CHIP_P5_PLUS(softc)) ?
715 			softc->legacy_db_size : softc->rx_rings[i].id * 0x80;
716 		softc->rx_rings[i].ring_size = softc->scctx->isc_nrxd[1];
717 		softc->rx_rings[i].db_ring_mask =
718 			softc->rx_rings[i].ring_size -1;
719 		softc->rx_rings[i].vaddr = vaddrs[i * nrxqs + 1];
720 		softc->rx_rings[i].paddr = paddrs[i * nrxqs + 1];
721 
722 		/* Allocate the TPA start buffer */
723 		softc->rx_rings[i].tpa_start = malloc(sizeof(struct bnxt_full_tpa_start) *
724 	    		(RX_TPA_START_CMPL_AGG_ID_MASK >> RX_TPA_START_CMPL_AGG_ID_SFT),
725 	    		M_DEVBUF, M_NOWAIT | M_ZERO);
726 		if (softc->rx_rings[i].tpa_start == NULL) {
727 			rc = -ENOMEM;
728 			device_printf(softc->dev,
729 					"Unable to allocate space for TPA\n");
730 			goto tpa_alloc_fail;
731 		}
732 		/* Allocate the AG ring */
733 		softc->ag_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
734 		softc->ag_rings[i].softc = softc;
735 		softc->ag_rings[i].idx = i;
736 		softc->ag_rings[i].id = nrxqsets + i + 1;
737 		softc->ag_rings[i].doorbell = (BNXT_CHIP_P5_PLUS(softc)) ?
738 			softc->legacy_db_size : softc->ag_rings[i].id * 0x80;
739 		softc->ag_rings[i].ring_size = softc->scctx->isc_nrxd[2];
740 		softc->ag_rings[i].db_ring_mask = softc->ag_rings[i].ring_size - 1;
741 		softc->ag_rings[i].vaddr = vaddrs[i * nrxqs + 2];
742 		softc->ag_rings[i].paddr = paddrs[i * nrxqs + 2];
743 
744 		/* Allocate the ring group */
745 		softc->grp_info[i].grp_id = (uint16_t)HWRM_NA_SIGNATURE;
746 		softc->grp_info[i].stats_ctx =
747 		    softc->rx_cp_rings[i].stats_ctx_id;
748 		softc->grp_info[i].rx_ring_id = softc->rx_rings[i].phys_id;
749 		softc->grp_info[i].ag_ring_id = softc->ag_rings[i].phys_id;
750 		softc->grp_info[i].cp_ring_id =
751 		    softc->rx_cp_rings[i].ring.phys_id;
752 
753 		bnxt_create_rx_sysctls(softc, i);
754 	}
755 
756 	/*
757 	 * When SR-IOV is enabled, avoid each VF sending PORT_QSTATS
758          * HWRM every sec with which firmware timeouts can happen
759          */
760 	if (BNXT_PF(softc))
761 		bnxt_create_port_stats_sysctls(softc);
762 
763 	/* And finally, the VNIC */
764 	softc->vnic_info.id = (uint16_t)HWRM_NA_SIGNATURE;
765 	softc->vnic_info.filter_id = -1;
766 	softc->vnic_info.def_ring_grp = (uint16_t)HWRM_NA_SIGNATURE;
767 	softc->vnic_info.cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
768 	softc->vnic_info.lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
769 	softc->vnic_info.rx_mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
770 		HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN;
771 	softc->vnic_info.mc_list_count = 0;
772 	softc->vnic_info.flags = BNXT_VNIC_FLAG_DEFAULT;
773 	rc = iflib_dma_alloc(ctx, BNXT_MAX_MC_ADDRS * ETHER_ADDR_LEN,
774 	    &softc->vnic_info.mc_list, 0);
775 	if (rc)
776 		goto mc_list_alloc_fail;
777 
778 	/* The VNIC RSS Hash Key */
779 	rc = iflib_dma_alloc(ctx, HW_HASH_KEY_SIZE,
780 	    &softc->vnic_info.rss_hash_key_tbl, 0);
781 	if (rc)
782 		goto rss_hash_alloc_fail;
783 	bus_dmamap_sync(softc->vnic_info.rss_hash_key_tbl.idi_tag,
784 	    softc->vnic_info.rss_hash_key_tbl.idi_map,
785 	    BUS_DMASYNC_PREWRITE);
786 	memcpy(softc->vnic_info.rss_hash_key_tbl.idi_vaddr,
787 	    softc->vnic_info.rss_hash_key, HW_HASH_KEY_SIZE);
788 
789 	/* Allocate the RSS tables */
790 	rc = iflib_dma_alloc(ctx, HW_HASH_INDEX_SIZE * sizeof(uint16_t),
791 	    &softc->vnic_info.rss_grp_tbl, 0);
792 	if (rc)
793 		goto rss_grp_alloc_fail;
794 	bus_dmamap_sync(softc->vnic_info.rss_grp_tbl.idi_tag,
795 	    softc->vnic_info.rss_grp_tbl.idi_map,
796 	    BUS_DMASYNC_PREWRITE);
797 	memset(softc->vnic_info.rss_grp_tbl.idi_vaddr, 0xff,
798 	    softc->vnic_info.rss_grp_tbl.idi_size);
799 
800 	softc->nrxqsets = nrxqsets;
801 	return rc;
802 
803 rss_grp_alloc_fail:
804 	iflib_dma_free(&softc->vnic_info.rss_hash_key_tbl);
805 rss_hash_alloc_fail:
806 	iflib_dma_free(&softc->vnic_info.mc_list);
807 mc_list_alloc_fail:
808 	for (i = i - 1; i >= 0; i--) {
809 		if (softc->rx_rings[i].tpa_start)
810 			free(softc->rx_rings[i].tpa_start, M_DEVBUF);
811 	}
812 tpa_alloc_fail:
813 	iflib_dma_free(&softc->hw_tx_port_stats_ext);
814 hw_port_tx_stats_ext_alloc_fail:
815 	iflib_dma_free(&softc->hw_rx_port_stats_ext);
816 hw_port_rx_stats_ext_alloc_fail:
817 	iflib_dma_free(&softc->hw_tx_port_stats);
818 hw_port_tx_stats_alloc_fail:
819 	iflib_dma_free(&softc->hw_rx_port_stats);
820 hw_port_rx_stats_alloc_fail:
821 	for (i=0; i < nrxqsets; i++) {
822 		if (softc->rx_stats[i].idi_vaddr)
823 			iflib_dma_free(&softc->rx_stats[i]);
824 	}
825 hw_stats_alloc_fail:
826 	free(softc->grp_info, M_DEVBUF);
827 grp_alloc_fail:
828 	free(softc->ag_rings, M_DEVBUF);
829 ag_alloc_fail:
830 	free(softc->rx_rings, M_DEVBUF);
831 ring_alloc_fail:
832 	free(softc->rx_cp_rings, M_DEVBUF);
833 cp_alloc_fail:
834 	return rc;
835 }
836 
bnxt_free_hwrm_short_cmd_req(struct bnxt_softc * softc)837 static void bnxt_free_hwrm_short_cmd_req(struct bnxt_softc *softc)
838 {
839 	if (softc->hwrm_short_cmd_req_addr.idi_vaddr)
840 		iflib_dma_free(&softc->hwrm_short_cmd_req_addr);
841 	softc->hwrm_short_cmd_req_addr.idi_vaddr = NULL;
842 }
843 
bnxt_alloc_hwrm_short_cmd_req(struct bnxt_softc * softc)844 static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt_softc *softc)
845 {
846 	int rc;
847 
848 	rc = iflib_dma_alloc(softc->ctx, softc->hwrm_max_req_len,
849 	    &softc->hwrm_short_cmd_req_addr, BUS_DMA_NOWAIT);
850 
851 	return rc;
852 }
853 
bnxt_free_ring(struct bnxt_softc * softc,struct bnxt_ring_mem_info * rmem)854 static void bnxt_free_ring(struct bnxt_softc *softc, struct bnxt_ring_mem_info *rmem)
855 {
856 	int i;
857 
858 	for (i = 0; i < rmem->nr_pages; i++) {
859 		if (!rmem->pg_arr[i].idi_vaddr)
860 			continue;
861 
862 		iflib_dma_free(&rmem->pg_arr[i]);
863 		rmem->pg_arr[i].idi_vaddr = NULL;
864 	}
865 	if (rmem->pg_tbl.idi_vaddr) {
866 		iflib_dma_free(&rmem->pg_tbl);
867 		rmem->pg_tbl.idi_vaddr = NULL;
868 
869 	}
870 	if (rmem->vmem_size && *rmem->vmem) {
871 		free(*rmem->vmem, M_DEVBUF);
872 		*rmem->vmem = NULL;
873 	}
874 }
875 
bnxt_init_ctx_mem(struct bnxt_ctx_mem_type * ctxm,void * p,int len)876 static void bnxt_init_ctx_mem(struct bnxt_ctx_mem_type *ctxm, void *p, int len)
877 {
878 	u8 init_val = ctxm->init_value;
879 	u16 offset = ctxm->init_offset;
880 	u8 *p2 = p;
881 	int i;
882 
883 	if (!init_val)
884 		return;
885 	if (offset == BNXT_CTX_INIT_INVALID_OFFSET) {
886 		memset(p, init_val, len);
887 		return;
888 	}
889 	for (i = 0; i < len; i += ctxm->entry_size)
890 		*(p2 + i + offset) = init_val;
891 }
892 
bnxt_alloc_ring(struct bnxt_softc * softc,struct bnxt_ring_mem_info * rmem)893 static int bnxt_alloc_ring(struct bnxt_softc *softc, struct bnxt_ring_mem_info *rmem)
894 {
895 	uint64_t valid_bit = 0;
896 	int i;
897 	int rc;
898 
899 	if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
900 		valid_bit = PTU_PTE_VALID;
901 
902 	if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl.idi_vaddr) {
903 		size_t pg_tbl_size = rmem->nr_pages * 8;
904 
905 		if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
906 			pg_tbl_size = rmem->page_size;
907 
908 		rc = iflib_dma_alloc(softc->ctx, pg_tbl_size, &rmem->pg_tbl, 0);
909 		if (rc)
910 			return -ENOMEM;
911 	}
912 
913 	for (i = 0; i < rmem->nr_pages; i++) {
914 		uint64_t extra_bits = valid_bit;
915 		uint64_t *ptr;
916 
917 		rc = iflib_dma_alloc(softc->ctx, rmem->page_size, &rmem->pg_arr[i], 0);
918 		if (rc)
919 			return -ENOMEM;
920 
921 		if (rmem->ctx_mem)
922 			bnxt_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i].idi_vaddr,
923 					rmem->page_size);
924 
925 		if (rmem->nr_pages > 1 || rmem->depth > 0) {
926 			if (i == rmem->nr_pages - 2 &&
927 					(rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
928 				extra_bits |= PTU_PTE_NEXT_TO_LAST;
929 			else if (i == rmem->nr_pages - 1 &&
930 					(rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
931 				extra_bits |= PTU_PTE_LAST;
932 
933 			ptr = (void *) rmem->pg_tbl.idi_vaddr;
934 			ptr[i]  = htole64(rmem->pg_arr[i].idi_paddr | extra_bits);
935 		}
936 	}
937 
938 	if (rmem->vmem_size) {
939 		*rmem->vmem = malloc(rmem->vmem_size, M_DEVBUF, M_NOWAIT | M_ZERO);
940 		if (!(*rmem->vmem))
941 			return -ENOMEM;
942 	}
943 	return 0;
944 }
945 
946 
947 #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES		\
948 	(HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP |		\
949 	 HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ |	\
950 	 HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ |		\
951 	 HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC |	\
952 	 HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT)
953 
bnxt_alloc_ctx_mem_blk(struct bnxt_softc * softc,struct bnxt_ctx_pg_info * ctx_pg)954 static int bnxt_alloc_ctx_mem_blk(struct bnxt_softc *softc,
955 				  struct bnxt_ctx_pg_info *ctx_pg)
956 {
957 	struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
958 
959 	rmem->page_size = BNXT_PAGE_SIZE;
960 	rmem->pg_arr = ctx_pg->ctx_arr;
961 	rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
962 	if (rmem->depth >= 1)
963 		rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
964 
965 	return bnxt_alloc_ring(softc, rmem);
966 }
967 
bnxt_alloc_ctx_pg_tbls(struct bnxt_softc * softc,struct bnxt_ctx_pg_info * ctx_pg,u32 mem_size,u8 depth,struct bnxt_ctx_mem_type * ctxm)968 static int bnxt_alloc_ctx_pg_tbls(struct bnxt_softc *softc,
969 				  struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
970 				  u8 depth, struct bnxt_ctx_mem_type *ctxm)
971 {
972 	struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
973 	int rc;
974 
975 	if (!mem_size)
976 		return -EINVAL;
977 
978 	ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
979 	if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
980 		ctx_pg->nr_pages = 0;
981 		return -EINVAL;
982 	}
983 	if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
984 		int nr_tbls, i;
985 
986 		rmem->depth = 2;
987 		ctx_pg->ctx_pg_tbl = kzalloc(MAX_CTX_PAGES * sizeof(ctx_pg),
988 					      GFP_KERNEL);
989 		if (!ctx_pg->ctx_pg_tbl)
990 			return -ENOMEM;
991 		nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
992 		rmem->nr_pages = nr_tbls;
993 		rc = bnxt_alloc_ctx_mem_blk(softc, ctx_pg);
994 		if (rc)
995 			return rc;
996 		for (i = 0; i < nr_tbls; i++) {
997 			struct bnxt_ctx_pg_info *pg_tbl;
998 
999 			pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
1000 			if (!pg_tbl)
1001 				return -ENOMEM;
1002 			ctx_pg->ctx_pg_tbl[i] = pg_tbl;
1003 			rmem = &pg_tbl->ring_mem;
1004 			memcpy(&rmem->pg_tbl, &ctx_pg->ctx_arr[i], sizeof(struct iflib_dma_info));
1005 			rmem->depth = 1;
1006 			rmem->nr_pages = MAX_CTX_PAGES;
1007 			rmem->ctx_mem = ctxm;
1008 			if (i == (nr_tbls - 1)) {
1009 				int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
1010 
1011 				if (rem)
1012 					rmem->nr_pages = rem;
1013 			}
1014 			rc = bnxt_alloc_ctx_mem_blk(softc, pg_tbl);
1015 			if (rc)
1016 				break;
1017 		}
1018 	} else {
1019 		rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
1020 		if (rmem->nr_pages > 1 || depth)
1021 			rmem->depth = 1;
1022 		rmem->ctx_mem = ctxm;
1023 		rc = bnxt_alloc_ctx_mem_blk(softc, ctx_pg);
1024 	}
1025 	return rc;
1026 }
1027 
bnxt_free_ctx_pg_tbls(struct bnxt_softc * softc,struct bnxt_ctx_pg_info * ctx_pg)1028 static void bnxt_free_ctx_pg_tbls(struct bnxt_softc *softc,
1029 				  struct bnxt_ctx_pg_info *ctx_pg)
1030 {
1031 	struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
1032 
1033 	if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
1034 	    ctx_pg->ctx_pg_tbl) {
1035 		int i, nr_tbls = rmem->nr_pages;
1036 
1037 		for (i = 0; i < nr_tbls; i++) {
1038 			struct bnxt_ctx_pg_info *pg_tbl;
1039 			struct bnxt_ring_mem_info *rmem2;
1040 
1041 			pg_tbl = ctx_pg->ctx_pg_tbl[i];
1042 			if (!pg_tbl)
1043 				continue;
1044 			rmem2 = &pg_tbl->ring_mem;
1045 			bnxt_free_ring(softc, rmem2);
1046 			ctx_pg->ctx_arr[i].idi_vaddr = NULL;
1047 			free(pg_tbl , M_DEVBUF);
1048 			ctx_pg->ctx_pg_tbl[i] = NULL;
1049 		}
1050 		kfree(ctx_pg->ctx_pg_tbl);
1051 		ctx_pg->ctx_pg_tbl = NULL;
1052 	}
1053 	bnxt_free_ring(softc, rmem);
1054 	ctx_pg->nr_pages = 0;
1055 }
1056 
bnxt_setup_ctxm_pg_tbls(struct bnxt_softc * softc,struct bnxt_ctx_mem_type * ctxm,u32 entries,u8 pg_lvl)1057 static int bnxt_setup_ctxm_pg_tbls(struct bnxt_softc *softc,
1058 				   struct bnxt_ctx_mem_type *ctxm, u32 entries,
1059 				   u8 pg_lvl)
1060 {
1061 	struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
1062 	int i, rc = 0, n = 1;
1063 	u32 mem_size;
1064 
1065 	if (!ctxm->entry_size || !ctx_pg)
1066 		return -EINVAL;
1067 	if (ctxm->instance_bmap)
1068 		n = hweight32(ctxm->instance_bmap);
1069 	if (ctxm->entry_multiple)
1070 		entries = roundup(entries, ctxm->entry_multiple);
1071 	entries = clamp_t(u32, entries, ctxm->min_entries, ctxm->max_entries);
1072 	mem_size = entries * ctxm->entry_size;
1073 	for (i = 0; i < n && !rc; i++) {
1074 		ctx_pg[i].entries = entries;
1075 		rc = bnxt_alloc_ctx_pg_tbls(softc, &ctx_pg[i], mem_size, pg_lvl,
1076 					    ctxm->init_value ? ctxm : NULL);
1077 	}
1078 	if (!rc)
1079 		ctxm->mem_valid = 1;
1080 	return rc;
1081 }
1082 
bnxt_free_ctx_mem(struct bnxt_softc * softc)1083 static void bnxt_free_ctx_mem(struct bnxt_softc *softc)
1084 {
1085 	struct bnxt_ctx_mem_info *ctx = softc->ctx_mem;
1086 	u16 type;
1087 
1088 	if (!ctx)
1089 		return;
1090 
1091 	for (type = 0; type < BNXT_CTX_MAX; type++) {
1092 		struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
1093 		struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
1094 		int i, n = 1;
1095 
1096 		if (!ctx_pg)
1097 			continue;
1098 		if (ctxm->instance_bmap)
1099 			n = hweight32(ctxm->instance_bmap);
1100 		for (i = 0; i < n; i++)
1101 			bnxt_free_ctx_pg_tbls(softc, &ctx_pg[i]);
1102 
1103 		kfree(ctx_pg);
1104 		ctxm->pg_info = NULL;
1105 	}
1106 
1107 	ctx->flags &= ~BNXT_CTX_FLAG_INITED;
1108 	kfree(ctx);
1109 	softc->ctx_mem = NULL;
1110 }
1111 
1112 static int
bnxt_backing_store_cfg_v2(struct bnxt_softc * softc,u32 ena)1113 bnxt_backing_store_cfg_v2(struct bnxt_softc *softc, u32 ena)
1114 {
1115 	struct bnxt_ctx_mem_info *ctx = softc->ctx_mem;
1116 	struct bnxt_ctx_mem_type *ctxm;
1117 	u16 last_type = BNXT_CTX_INV;
1118 	int rc = 0;
1119 	u16 type;
1120 
1121 	if (BNXT_PF(softc)) {
1122 		for (type = BNXT_CTX_SRT_TRACE; type <= BNXT_CTX_ROCE_HWRM_TRACE; type++) {
1123 			ctxm = &ctx->ctx_arr[type];
1124 			if (!(ctxm->flags & BNXT_CTX_MEM_TYPE_VALID))
1125 				continue;
1126 			rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, ctxm->max_entries, 1);
1127 			if (rc) {
1128 				device_printf(softc->dev, "Unable to setup ctx page for type:0x%x.\n", type);
1129 				rc = 0;
1130 				continue;
1131 			}
1132 			/* ckp TODO: this is trace buffer related stuff, so keeping it diabled now. needs revisit */
1133 			//bnxt_bs_trace_init(bp, ctxm, type - BNXT_CTX_SRT_TRACE);
1134 			last_type = type;
1135 		}
1136 	}
1137 
1138 	if (last_type == BNXT_CTX_INV) {
1139 		if (!ena)
1140 			return 0;
1141 		else if (ena & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TIM)
1142 			last_type = BNXT_CTX_MAX - 1;
1143 		else
1144 			last_type = BNXT_CTX_L2_MAX - 1;
1145 	}
1146 	ctx->ctx_arr[last_type].last = 1;
1147 
1148 	for (type = 0 ; type < BNXT_CTX_V2_MAX; type++) {
1149 		ctxm = &ctx->ctx_arr[type];
1150 
1151 		if (!ctxm->mem_valid)
1152 			continue;
1153 		rc = bnxt_hwrm_func_backing_store_cfg_v2(softc, ctxm, ctxm->last);
1154 		if (rc)
1155 			return rc;
1156 	}
1157 	return 0;
1158 }
1159 
bnxt_alloc_ctx_mem(struct bnxt_softc * softc)1160 static int bnxt_alloc_ctx_mem(struct bnxt_softc *softc)
1161 {
1162 	struct bnxt_ctx_pg_info *ctx_pg;
1163 	struct bnxt_ctx_mem_type *ctxm;
1164 	struct bnxt_ctx_mem_info *ctx;
1165 	u32 l2_qps, qp1_qps, max_qps;
1166 	u32 ena, entries_sp, entries;
1167 	u32 srqs, max_srqs, min;
1168 	u32 num_mr, num_ah;
1169 	u32 extra_srqs = 0;
1170 	u32 extra_qps = 0;
1171 	u8 pg_lvl = 1;
1172 	int i, rc;
1173 
1174 	if (!BNXT_CHIP_P5_PLUS(softc))
1175 		return 0;
1176 
1177 	rc = bnxt_hwrm_func_backing_store_qcaps(softc);
1178 	if (rc) {
1179 		device_printf(softc->dev, "Failed querying context mem capability, rc = %d.\n",
1180 			   rc);
1181 		return rc;
1182 	}
1183 	ctx = softc->ctx_mem;
1184 	if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
1185 		return 0;
1186 
1187 	ena = 0;
1188 	if (BNXT_VF(softc))
1189 		goto skip_legacy;
1190 
1191 	ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
1192 	l2_qps = ctxm->qp_l2_entries;
1193 	qp1_qps = ctxm->qp_qp1_entries;
1194 	max_qps = ctxm->max_entries;
1195 	ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
1196 	srqs = ctxm->srq_l2_entries;
1197 	max_srqs = ctxm->max_entries;
1198 	if (softc->flags & BNXT_FLAG_ROCE_CAP) {
1199 		pg_lvl = 2;
1200 		extra_qps = min_t(u32, 65536, max_qps - l2_qps - qp1_qps);
1201 		extra_srqs = min_t(u32, 8192, max_srqs - srqs);
1202 	}
1203 
1204 	ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
1205 	rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, l2_qps + qp1_qps + extra_qps,
1206 				     pg_lvl);
1207 	if (rc)
1208 		return rc;
1209 
1210 	ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
1211 	rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, srqs + extra_srqs, pg_lvl);
1212 	if (rc)
1213 		return rc;
1214 
1215 	ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
1216 	rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, ctxm->cq_l2_entries +
1217 				     extra_qps * 2, pg_lvl);
1218 	if (rc)
1219 		return rc;
1220 
1221 	ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
1222 	rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, ctxm->max_entries, 1);
1223 	if (rc)
1224 		return rc;
1225 
1226 	ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
1227 	rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, ctxm->max_entries, 1);
1228 	if (rc)
1229 		return rc;
1230 
1231 	if (!(softc->flags & BNXT_FLAG_ROCE_CAP))
1232 		goto skip_rdma;
1233 
1234 	ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
1235 	ctx_pg = ctxm->pg_info;
1236 	/* 128K extra is needed to accomodate static AH context
1237 	 * allocation by f/w.
1238 	 */
1239 	num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
1240 	num_ah = min_t(u32, num_mr, 1024 * 128);
1241 	rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, num_mr + num_ah, 2);
1242 	if (rc)
1243 		return rc;
1244 	ctx_pg->entries = num_mr + num_ah;
1245 	ena = HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_MRAV;
1246 	if (ctxm->mrav_num_entries_units)
1247 		ctx_pg->entries =
1248 			((num_mr / ctxm->mrav_num_entries_units) << 16) |
1249 			 (num_ah / ctxm->mrav_num_entries_units);
1250 
1251 	ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
1252 	rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, l2_qps + qp1_qps + extra_qps, 1);
1253 	if (rc)
1254 		return rc;
1255 	ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TIM;
1256 
1257 skip_rdma:
1258 	ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
1259 	min = ctxm->min_entries;
1260 	entries_sp = ctx->ctx_arr[BNXT_CTX_VNIC].vnic_entries + l2_qps +
1261 		     2 * (extra_qps + qp1_qps) + min;
1262 	rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, entries_sp, 2);
1263 		if (rc)
1264 			return rc;
1265 
1266 	ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
1267 	entries = l2_qps + 2 * (extra_qps + qp1_qps);
1268 	rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, entries, 2);
1269 	if (rc)
1270 		return rc;
1271 	for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
1272 		if (i < BNXT_MAX_TQM_LEGACY_RINGS)
1273 			ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i;
1274 		else
1275 			ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING8;
1276 	}
1277 	ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES;
1278 
1279 skip_legacy:
1280 	if (BNXT_CHIP_P7(softc)) {
1281 		if (softc->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
1282 			rc = bnxt_backing_store_cfg_v2(softc, ena);
1283 	} else {
1284 		rc = bnxt_hwrm_func_backing_store_cfg(softc, ena);
1285 	}
1286 	if (rc) {
1287 		device_printf(softc->dev, "Failed configuring context mem, rc = %d.\n",
1288 			      rc);
1289 		return rc;
1290 	}
1291 	ctx->flags |= BNXT_CTX_FLAG_INITED;
1292 
1293 	return 0;
1294 }
1295 
1296 /*
1297  * If we update the index, a write barrier is needed after the write to ensure
1298  * the completion ring has space before the RX/TX ring does.  Since we can't
1299  * make the RX and AG doorbells covered by the same barrier without remapping
1300  * MSI-X vectors, we create the barrier over the enture doorbell bar.
1301  * TODO: Remap the MSI-X vectors to allow a barrier to only cover the doorbells
1302  *       for a single ring group.
1303  *
1304  * A barrier of just the size of the write is used to ensure the ordering
1305  * remains correct and no writes are lost.
1306  */
1307 
bnxt_cuw_db_rx(void * db_ptr,uint16_t idx)1308 static void bnxt_cuw_db_rx(void *db_ptr, uint16_t idx)
1309 {
1310 	struct bnxt_ring *ring = (struct bnxt_ring *) db_ptr;
1311 	struct bnxt_bar_info *db_bar = &ring->softc->doorbell_bar;
1312 
1313 	bus_space_barrier(db_bar->tag, db_bar->handle, ring->doorbell, 4,
1314 			BUS_SPACE_BARRIER_WRITE);
1315 	bus_space_write_4(db_bar->tag, db_bar->handle, ring->doorbell,
1316 			htole32(RX_DOORBELL_KEY_RX | idx));
1317 }
1318 
bnxt_cuw_db_tx(void * db_ptr,uint16_t idx)1319 static void bnxt_cuw_db_tx(void *db_ptr, uint16_t idx)
1320 {
1321 	struct bnxt_ring *ring = (struct bnxt_ring *) db_ptr;
1322 	struct bnxt_bar_info *db_bar = &ring->softc->doorbell_bar;
1323 
1324 	bus_space_barrier(db_bar->tag, db_bar->handle, ring->doorbell, 4,
1325 			BUS_SPACE_BARRIER_WRITE);
1326 	bus_space_write_4(db_bar->tag, db_bar->handle, ring->doorbell,
1327 			htole32(TX_DOORBELL_KEY_TX | idx));
1328 }
1329 
bnxt_cuw_db_cq(void * db_ptr,bool enable_irq)1330 static void bnxt_cuw_db_cq(void *db_ptr, bool enable_irq)
1331 {
1332 	struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr;
1333 	struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar;
1334 
1335 	bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 4,
1336 			BUS_SPACE_BARRIER_WRITE);
1337 	bus_space_write_4(db_bar->tag, db_bar->handle, cpr->ring.doorbell,
1338 			htole32(CMPL_DOORBELL_KEY_CMPL |
1339 				((cpr->cons == UINT32_MAX) ? 0 :
1340 				 (cpr->cons | CMPL_DOORBELL_IDX_VALID)) |
1341 				((enable_irq) ? 0 : CMPL_DOORBELL_MASK)));
1342 	bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size,
1343 			BUS_SPACE_BARRIER_WRITE);
1344 }
1345 
bnxt_thor_db_rx(void * db_ptr,uint16_t idx)1346 static void bnxt_thor_db_rx(void *db_ptr, uint16_t idx)
1347 {
1348 	struct bnxt_ring *ring = (struct bnxt_ring *) db_ptr;
1349 	struct bnxt_bar_info *db_bar = &ring->softc->doorbell_bar;
1350 
1351 	bus_space_barrier(db_bar->tag, db_bar->handle, ring->doorbell, 8,
1352 			BUS_SPACE_BARRIER_WRITE);
1353 	bus_space_write_8(db_bar->tag, db_bar->handle, ring->doorbell,
1354 			htole64((DBR_PATH_L2 | DBR_TYPE_SRQ | idx) |
1355 				((uint64_t)ring->phys_id << DBR_XID_SFT)));
1356 }
1357 
bnxt_thor_db_tx(void * db_ptr,uint16_t idx)1358 static void bnxt_thor_db_tx(void *db_ptr, uint16_t idx)
1359 {
1360 	struct bnxt_ring *ring = (struct bnxt_ring *) db_ptr;
1361 	struct bnxt_bar_info *db_bar = &ring->softc->doorbell_bar;
1362 
1363 	bus_space_barrier(db_bar->tag, db_bar->handle, ring->doorbell, 8,
1364 			BUS_SPACE_BARRIER_WRITE);
1365 	bus_space_write_8(db_bar->tag, db_bar->handle, ring->doorbell,
1366 			htole64((DBR_PATH_L2 | DBR_TYPE_SQ | idx) |
1367 				((uint64_t)ring->phys_id << DBR_XID_SFT)));
1368 }
1369 
bnxt_thor_db_rx_cq(void * db_ptr,bool enable_irq)1370 static void bnxt_thor_db_rx_cq(void *db_ptr, bool enable_irq)
1371 {
1372 	struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr;
1373 	struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar;
1374 	dbc_dbc_t db_msg = { 0 };
1375 	uint32_t cons = cpr->cons;
1376 
1377 	if (cons == UINT32_MAX)
1378 		cons = 0;
1379 	else
1380 		cons = RING_NEXT(&cpr->ring, cons);
1381 
1382 	db_msg.index = ((cons << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK);
1383 
1384 	db_msg.type_path_xid = ((cpr->ring.phys_id << DBC_DBC_XID_SFT) &
1385 			DBC_DBC_XID_MASK) | DBC_DBC_PATH_L2 |
1386 		((enable_irq) ? DBC_DBC_TYPE_CQ_ARMALL: DBC_DBC_TYPE_CQ);
1387 
1388 	bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 8,
1389 			BUS_SPACE_BARRIER_WRITE);
1390 	bus_space_write_8(db_bar->tag, db_bar->handle, cpr->ring.doorbell,
1391 			htole64(*(uint64_t *)&db_msg));
1392 	bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size,
1393 			BUS_SPACE_BARRIER_WRITE);
1394 }
1395 
bnxt_thor_db_tx_cq(void * db_ptr,bool enable_irq)1396 static void bnxt_thor_db_tx_cq(void *db_ptr, bool enable_irq)
1397 {
1398 	struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr;
1399 	struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar;
1400 	dbc_dbc_t db_msg = { 0 };
1401 	uint32_t cons = cpr->cons;
1402 
1403 	db_msg.index = ((cons << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK);
1404 
1405 	db_msg.type_path_xid = ((cpr->ring.phys_id << DBC_DBC_XID_SFT) &
1406 			DBC_DBC_XID_MASK) | DBC_DBC_PATH_L2 |
1407 		((enable_irq) ? DBC_DBC_TYPE_CQ_ARMALL: DBC_DBC_TYPE_CQ);
1408 
1409 	bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 8,
1410 			BUS_SPACE_BARRIER_WRITE);
1411 	bus_space_write_8(db_bar->tag, db_bar->handle, cpr->ring.doorbell,
1412 			htole64(*(uint64_t *)&db_msg));
1413 	bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size,
1414 			BUS_SPACE_BARRIER_WRITE);
1415 }
1416 
bnxt_thor_db_nq(void * db_ptr,bool enable_irq)1417 static void bnxt_thor_db_nq(void *db_ptr, bool enable_irq)
1418 {
1419 	struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr;
1420 	struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar;
1421 	dbc_dbc_t db_msg = { 0 };
1422 	uint32_t cons = cpr->cons;
1423 
1424 	db_msg.index = ((cons << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK);
1425 
1426 	db_msg.type_path_xid = ((cpr->ring.phys_id << DBC_DBC_XID_SFT) &
1427 			DBC_DBC_XID_MASK) | DBC_DBC_PATH_L2 |
1428 		((enable_irq) ? DBC_DBC_TYPE_NQ_ARM: DBC_DBC_TYPE_NQ);
1429 
1430 	bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 8,
1431 			BUS_SPACE_BARRIER_WRITE);
1432 	bus_space_write_8(db_bar->tag, db_bar->handle, cpr->ring.doorbell,
1433 			htole64(*(uint64_t *)&db_msg));
1434 	bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size,
1435 			BUS_SPACE_BARRIER_WRITE);
1436 }
1437 
1438 static void
bnxt_thor2_db_rx(void * db_ptr,uint16_t idx)1439 bnxt_thor2_db_rx(void *db_ptr, uint16_t idx)
1440 {
1441 	struct bnxt_ring *ring = (struct bnxt_ring *) db_ptr;
1442 	struct bnxt_bar_info *db_bar = &ring->softc->doorbell_bar;
1443 	uint64_t db_val;
1444 
1445 	if (idx >= ring->ring_size) {
1446 		device_printf(ring->softc->dev, "%s: BRCM DBG: idx: %d crossed boundary\n", __func__, idx);
1447 		return;
1448 	}
1449 
1450 	db_val = ((DBR_PATH_L2 | DBR_TYPE_SRQ | DBR_VALID | idx) |
1451 				((uint64_t)ring->phys_id << DBR_XID_SFT));
1452 
1453 	/* Add the PI index */
1454 	db_val |= DB_RING_IDX(ring, idx, ring->epoch_arr[idx]);
1455 
1456 	bus_space_barrier(db_bar->tag, db_bar->handle, ring->doorbell, 8,
1457 			BUS_SPACE_BARRIER_WRITE);
1458 	bus_space_write_8(db_bar->tag, db_bar->handle, ring->doorbell,
1459 			htole64(db_val));
1460 }
1461 
1462 static void
bnxt_thor2_db_tx(void * db_ptr,uint16_t idx)1463 bnxt_thor2_db_tx(void *db_ptr, uint16_t idx)
1464 {
1465 	struct bnxt_ring *ring = (struct bnxt_ring *) db_ptr;
1466 	struct bnxt_bar_info *db_bar = &ring->softc->doorbell_bar;
1467 	uint64_t db_val;
1468 
1469 	if (idx >= ring->ring_size) {
1470 		device_printf(ring->softc->dev, "%s: BRCM DBG: idx: %d crossed boundary\n", __func__, idx);
1471 		return;
1472 	}
1473 
1474 	db_val = ((DBR_PATH_L2 | DBR_TYPE_SQ | DBR_VALID | idx) |
1475 				((uint64_t)ring->phys_id << DBR_XID_SFT));
1476 
1477 	/* Add the PI index */
1478 	db_val |= DB_RING_IDX(ring, idx, ring->epoch_arr[idx]);
1479 
1480 	bus_space_barrier(db_bar->tag, db_bar->handle, ring->doorbell, 8,
1481 			BUS_SPACE_BARRIER_WRITE);
1482 	bus_space_write_8(db_bar->tag, db_bar->handle, ring->doorbell,
1483 			htole64(db_val));
1484 }
1485 
1486 static void
bnxt_thor2_db_rx_cq(void * db_ptr,bool enable_irq)1487 bnxt_thor2_db_rx_cq(void *db_ptr, bool enable_irq)
1488 {
1489 	struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr;
1490 	struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar;
1491 	u64 db_msg = { 0 };
1492 	uint32_t cons = cpr->raw_cons;
1493 	uint32_t toggle = 0;
1494 
1495 	if (cons == UINT32_MAX)
1496 		cons = 0;
1497 
1498 	if (enable_irq == true)
1499 		toggle = cpr->toggle;
1500 
1501 	db_msg = DBR_PATH_L2 | ((u64)cpr->ring.phys_id << DBR_XID_SFT) | DBR_VALID |
1502 			DB_RING_IDX_CMP(&cpr->ring, cons) | DB_TOGGLE(toggle);
1503 
1504 	if (enable_irq)
1505 		db_msg |= DBR_TYPE_CQ_ARMALL;
1506 	else
1507 		db_msg |= DBR_TYPE_CQ;
1508 
1509 	bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 8,
1510 			BUS_SPACE_BARRIER_WRITE);
1511 	bus_space_write_8(db_bar->tag, db_bar->handle, cpr->ring.doorbell,
1512 			htole64(*(uint64_t *)&db_msg));
1513 	bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size,
1514 			BUS_SPACE_BARRIER_WRITE);
1515 }
1516 
1517 static void
bnxt_thor2_db_tx_cq(void * db_ptr,bool enable_irq)1518 bnxt_thor2_db_tx_cq(void *db_ptr, bool enable_irq)
1519 {
1520 	struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr;
1521 	struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar;
1522 	u64 db_msg = { 0 };
1523 	uint32_t cons = cpr->raw_cons;
1524 	uint32_t toggle = 0;
1525 
1526 	if (enable_irq == true)
1527 		toggle = cpr->toggle;
1528 
1529 	db_msg = DBR_PATH_L2 | ((u64)cpr->ring.phys_id << DBR_XID_SFT) | DBR_VALID |
1530 			DB_RING_IDX_CMP(&cpr->ring, cons) | DB_TOGGLE(toggle);
1531 
1532 	if (enable_irq)
1533 		db_msg |= DBR_TYPE_CQ_ARMALL;
1534 	else
1535 		db_msg |= DBR_TYPE_CQ;
1536 
1537 	bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 8,
1538 			BUS_SPACE_BARRIER_WRITE);
1539 	bus_space_write_8(db_bar->tag, db_bar->handle, cpr->ring.doorbell,
1540 			htole64(*(uint64_t *)&db_msg));
1541 	bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size,
1542 			BUS_SPACE_BARRIER_WRITE);
1543 }
1544 
1545 static void
bnxt_thor2_db_nq(void * db_ptr,bool enable_irq)1546 bnxt_thor2_db_nq(void *db_ptr, bool enable_irq)
1547 {
1548 	struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr;
1549 	struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar;
1550 	u64 db_msg = { 0 };
1551 	uint32_t cons = cpr->raw_cons;
1552 	uint32_t toggle = 0;
1553 
1554 	if (enable_irq == true)
1555 		toggle = cpr->toggle;
1556 
1557 	db_msg = DBR_PATH_L2 | ((u64)cpr->ring.phys_id << DBR_XID_SFT) | DBR_VALID |
1558 			DB_RING_IDX_CMP(&cpr->ring, cons) | DB_TOGGLE(toggle);
1559 
1560 	if (enable_irq)
1561 		db_msg |= DBR_TYPE_NQ_ARM;
1562 	else
1563 		db_msg |= DBR_TYPE_NQ_MASK;
1564 
1565 	bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 8,
1566 			BUS_SPACE_BARRIER_WRITE);
1567 	bus_space_write_8(db_bar->tag, db_bar->handle, cpr->ring.doorbell,
1568 			htole64(*(uint64_t *)&db_msg));
1569 	bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size,
1570 			BUS_SPACE_BARRIER_WRITE);
1571 }
1572 
bnxt_find_dev(uint32_t domain,uint32_t bus,uint32_t dev_fn,char * dev_name)1573 struct bnxt_softc *bnxt_find_dev(uint32_t domain, uint32_t bus, uint32_t dev_fn, char *dev_name)
1574 {
1575 	struct bnxt_softc_list *sc = NULL;
1576 
1577 	SLIST_FOREACH(sc, &pf_list, next) {
1578 		/* get the softc reference based on device name */
1579 		if (dev_name && !strncmp(dev_name, if_name(iflib_get_ifp(sc->softc->ctx)), BNXT_MAX_STR)) {
1580 			return sc->softc;
1581 		}
1582 		/* get the softc reference based on domain,bus,device,function */
1583 		if (!dev_name &&
1584 		    (domain == sc->softc->domain) &&
1585 		    (bus == sc->softc->bus) &&
1586 		    (dev_fn == sc->softc->dev_fn)) {
1587 			return sc->softc;
1588 
1589 		}
1590 	}
1591 
1592 	return NULL;
1593 }
1594 
1595 
bnxt_verify_asym_queues(struct bnxt_softc * softc)1596 static void bnxt_verify_asym_queues(struct bnxt_softc *softc)
1597 {
1598 	uint8_t i, lltc = 0;
1599 
1600 	if (!softc->max_lltc)
1601 		return;
1602 
1603 	/* Verify that lossless TX and RX queues are in the same index */
1604 	for (i = 0; i < softc->max_tc; i++) {
1605 		if (BNXT_LLQ(softc->tx_q_info[i].queue_profile) &&
1606 		    BNXT_LLQ(softc->rx_q_info[i].queue_profile))
1607 			lltc++;
1608 	}
1609 	softc->max_lltc = min(softc->max_lltc, lltc);
1610 }
1611 
bnxt_hwrm_poll(struct bnxt_softc * bp)1612 static int bnxt_hwrm_poll(struct bnxt_softc *bp)
1613 {
1614 	struct hwrm_ver_get_output	*resp =
1615 	    (void *)bp->hwrm_cmd_resp.idi_vaddr;
1616 	struct hwrm_ver_get_input req = {0};
1617 	int rc;
1618 
1619 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET);
1620 
1621 	req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
1622 	req.hwrm_intf_min = HWRM_VERSION_MINOR;
1623 	req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
1624 
1625 	rc = _hwrm_send_message(bp, &req, sizeof(req));
1626 	if (rc)
1627 		return rc;
1628 
1629 	if (resp->flags & HWRM_VER_GET_OUTPUT_FLAGS_DEV_NOT_RDY)
1630 		rc = -EAGAIN;
1631 
1632 	return rc;
1633 }
1634 
bnxt_rtnl_lock_sp(struct bnxt_softc * bp)1635 static void bnxt_rtnl_lock_sp(struct bnxt_softc *bp)
1636 {
1637 	/* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
1638 	 * set.  If the device is being closed, bnxt_close() may be holding
1639 	 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear.  So we
1640 	 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
1641 	 */
1642 	clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
1643 	rtnl_lock();
1644 }
1645 
bnxt_rtnl_unlock_sp(struct bnxt_softc * bp)1646 static void bnxt_rtnl_unlock_sp(struct bnxt_softc *bp)
1647 {
1648 	set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
1649 	rtnl_unlock();
1650 }
1651 
bnxt_fw_fatal_close(struct bnxt_softc * softc)1652 static void bnxt_fw_fatal_close(struct bnxt_softc *softc)
1653 {
1654 	bnxt_disable_intr(softc->ctx);
1655 	if (pci_is_enabled(softc->pdev))
1656 		pci_disable_device(softc->pdev);
1657 }
1658 
bnxt_fw_health_readl(struct bnxt_softc * bp,int reg_idx)1659 static u32 bnxt_fw_health_readl(struct bnxt_softc *bp, int reg_idx)
1660 {
1661 	struct bnxt_fw_health *fw_health = bp->fw_health;
1662 	u32 reg = fw_health->regs[reg_idx];
1663 	u32 reg_type, reg_off, val = 0;
1664 
1665 	reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
1666 	reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
1667 	switch (reg_type) {
1668 	case BNXT_FW_HEALTH_REG_TYPE_CFG:
1669 		pci_read_config_dword(bp->pdev, reg_off, &val);
1670 		break;
1671 	case BNXT_FW_HEALTH_REG_TYPE_GRC:
1672 		reg_off = fw_health->mapped_regs[reg_idx];
1673 		fallthrough;
1674 	case BNXT_FW_HEALTH_REG_TYPE_BAR0:
1675 		val = readl_fbsd(bp, reg_off, 0);
1676 		break;
1677 	case BNXT_FW_HEALTH_REG_TYPE_BAR1:
1678 		val = readl_fbsd(bp, reg_off, 2);
1679 		break;
1680 	}
1681 	if (reg_idx == BNXT_FW_RESET_INPROG_REG)
1682 		val &= fw_health->fw_reset_inprog_reg_mask;
1683 	return val;
1684 }
1685 
bnxt_fw_reset_close(struct bnxt_softc * bp)1686 static void bnxt_fw_reset_close(struct bnxt_softc *bp)
1687 {
1688 	int i;
1689 	bnxt_ulp_stop(bp);
1690 	/* When firmware is in fatal state, quiesce device and disable
1691 	 * bus master to prevent any potential bad DMAs before freeing
1692 	 * kernel memory.
1693 	 */
1694 	if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
1695 		u16 val = 0;
1696 
1697 		val = pci_read_config(bp->dev, PCI_SUBSYSTEM_ID, 2);
1698 		if (val == 0xffff) {
1699 			bp->fw_reset_min_dsecs = 0;
1700 		}
1701 		bnxt_fw_fatal_close(bp);
1702 	}
1703 
1704 	iflib_request_reset(bp->ctx);
1705 	bnxt_stop(bp->ctx);
1706 	bnxt_hwrm_func_drv_unrgtr(bp, false);
1707 
1708 	for (i = bp->nrxqsets-1; i>=0; i--) {
1709 		if (BNXT_CHIP_P5_PLUS(bp))
1710 			iflib_irq_free(bp->ctx, &bp->nq_rings[i].irq);
1711 		else
1712 			iflib_irq_free(bp->ctx, &bp->rx_cp_rings[i].irq);
1713 
1714 	}
1715 	if (pci_is_enabled(bp->pdev))
1716 		pci_disable_device(bp->pdev);
1717 	pci_disable_busmaster(bp->dev);
1718 	bnxt_free_ctx_mem(bp);
1719 }
1720 
is_bnxt_fw_ok(struct bnxt_softc * bp)1721 static bool is_bnxt_fw_ok(struct bnxt_softc *bp)
1722 {
1723 	struct bnxt_fw_health *fw_health = bp->fw_health;
1724 	bool no_heartbeat = false, has_reset = false;
1725 	u32 val;
1726 
1727 	val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
1728 	if (val == fw_health->last_fw_heartbeat)
1729 		no_heartbeat = true;
1730 
1731 	val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
1732 	if (val != fw_health->last_fw_reset_cnt)
1733 		has_reset = true;
1734 
1735 	if (!no_heartbeat && has_reset)
1736 		return true;
1737 
1738 	return false;
1739 }
1740 
bnxt_fw_reset(struct bnxt_softc * bp)1741 void bnxt_fw_reset(struct bnxt_softc *bp)
1742 {
1743 	bnxt_rtnl_lock_sp(bp);
1744 	if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
1745 	    !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
1746 		int tmo;
1747 		set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
1748 		bnxt_fw_reset_close(bp);
1749 
1750 		if ((bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD)) {
1751 			bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
1752 			tmo = HZ / 10;
1753 		} else {
1754 			bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
1755 			tmo = bp->fw_reset_min_dsecs * HZ /10;
1756 		}
1757 		bnxt_queue_fw_reset_work(bp, tmo);
1758 	}
1759 	bnxt_rtnl_unlock_sp(bp);
1760 }
1761 
bnxt_queue_fw_reset_work(struct bnxt_softc * bp,unsigned long delay)1762 static void bnxt_queue_fw_reset_work(struct bnxt_softc *bp, unsigned long delay)
1763 {
1764 	if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
1765 		return;
1766 
1767 	if (BNXT_PF(bp))
1768 		queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
1769 	else
1770 		schedule_delayed_work(&bp->fw_reset_task, delay);
1771 }
1772 
bnxt_queue_sp_work(struct bnxt_softc * bp)1773 void bnxt_queue_sp_work(struct bnxt_softc *bp)
1774 {
1775 	if (BNXT_PF(bp))
1776 		queue_work(bnxt_pf_wq, &bp->sp_task);
1777 	else
1778 		schedule_work(&bp->sp_task);
1779 }
1780 
bnxt_fw_reset_writel(struct bnxt_softc * bp,int reg_idx)1781 static void bnxt_fw_reset_writel(struct bnxt_softc *bp, int reg_idx)
1782 {
1783 	struct bnxt_fw_health *fw_health = bp->fw_health;
1784 	u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
1785 	u32 val = fw_health->fw_reset_seq_vals[reg_idx];
1786 	u32 reg_type, reg_off, delay_msecs;
1787 
1788 	delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
1789 	reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
1790 	reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
1791 	switch (reg_type) {
1792 	case BNXT_FW_HEALTH_REG_TYPE_CFG:
1793 		pci_write_config_dword(bp->pdev, reg_off, val);
1794 		break;
1795 	case BNXT_FW_HEALTH_REG_TYPE_GRC:
1796 		writel_fbsd(bp, BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4, 0, reg_off & BNXT_GRC_BASE_MASK);
1797 		reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
1798 		fallthrough;
1799 	case BNXT_FW_HEALTH_REG_TYPE_BAR0:
1800 		writel_fbsd(bp, reg_off, 0, val);
1801 		break;
1802 	case BNXT_FW_HEALTH_REG_TYPE_BAR1:
1803 		writel_fbsd(bp, reg_off, 2, val);
1804 		break;
1805 	}
1806 	if (delay_msecs) {
1807 		pci_read_config_dword(bp->pdev, 0, &val);
1808 		msleep(delay_msecs);
1809 	}
1810 }
1811 
bnxt_reset_all(struct bnxt_softc * bp)1812 static void bnxt_reset_all(struct bnxt_softc *bp)
1813 {
1814 	struct bnxt_fw_health *fw_health = bp->fw_health;
1815 	int i, rc;
1816 
1817 	if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
1818 		bp->fw_reset_timestamp = jiffies;
1819 		return;
1820 	}
1821 
1822 	if (fw_health->flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_HOST) {
1823 		for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
1824 			bnxt_fw_reset_writel(bp, i);
1825 	} else if (fw_health->flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU) {
1826 		struct hwrm_fw_reset_input req = {0};
1827 
1828 		bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET);
1829 		req.target_id = htole16(HWRM_TARGET_ID_KONG);
1830 		req.embedded_proc_type = HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_CHIP;
1831 		req.selfrst_status = HWRM_FW_RESET_INPUT_SELFRST_STATUS_SELFRSTASAP;
1832 		req.flags = HWRM_FW_RESET_INPUT_FLAGS_RESET_GRACEFUL;
1833 		rc = hwrm_send_message(bp, &req, sizeof(req));
1834 
1835 		if (rc != -ENODEV)
1836 			device_printf(bp->dev, "Unable to reset FW rc=%d\n", rc);
1837 	}
1838 	bp->fw_reset_timestamp = jiffies;
1839 }
1840 
__bnxt_alloc_fw_health(struct bnxt_softc * bp)1841 static int __bnxt_alloc_fw_health(struct bnxt_softc *bp)
1842 {
1843 	if (bp->fw_health)
1844 		return 0;
1845 
1846 	bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
1847 	if (!bp->fw_health)
1848 		return -ENOMEM;
1849 
1850 	mutex_init(&bp->fw_health->lock);
1851 	return 0;
1852 }
1853 
bnxt_alloc_fw_health(struct bnxt_softc * bp)1854 static int bnxt_alloc_fw_health(struct bnxt_softc *bp)
1855 {
1856 	int rc;
1857 
1858 	if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
1859 	    !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
1860 		return 0;
1861 
1862 	rc = __bnxt_alloc_fw_health(bp);
1863 	if (rc) {
1864 		bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
1865 		bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
1866 		return rc;
1867 	}
1868 
1869 	return 0;
1870 }
1871 
__bnxt_map_fw_health_reg(struct bnxt_softc * bp,u32 reg)1872 static inline void __bnxt_map_fw_health_reg(struct bnxt_softc *bp, u32 reg)
1873 {
1874 	writel_fbsd(bp, BNXT_GRCPF_REG_WINDOW_BASE_OUT + BNXT_FW_HEALTH_WIN_MAP_OFF, 0, reg & BNXT_GRC_BASE_MASK);
1875 }
1876 
bnxt_map_fw_health_regs(struct bnxt_softc * bp)1877 static int bnxt_map_fw_health_regs(struct bnxt_softc *bp)
1878 {
1879 	struct bnxt_fw_health *fw_health = bp->fw_health;
1880 	u32 reg_base = 0xffffffff;
1881 	int i;
1882 
1883 	bp->fw_health->status_reliable = false;
1884 	bp->fw_health->resets_reliable = false;
1885 	/* Only pre-map the monitoring GRC registers using window 3 */
1886 	for (i = 0; i < 4; i++) {
1887 		u32 reg = fw_health->regs[i];
1888 
1889 		if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
1890 			continue;
1891 		if (reg_base == 0xffffffff)
1892 			reg_base = reg & BNXT_GRC_BASE_MASK;
1893 		if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
1894 			return -ERANGE;
1895 		fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
1896 	}
1897 	bp->fw_health->status_reliable = true;
1898 	bp->fw_health->resets_reliable = true;
1899 	if (reg_base == 0xffffffff)
1900 		return 0;
1901 
1902 	__bnxt_map_fw_health_reg(bp, reg_base);
1903 	return 0;
1904 }
1905 
bnxt_inv_fw_health_reg(struct bnxt_softc * bp)1906 static void bnxt_inv_fw_health_reg(struct bnxt_softc *bp)
1907 {
1908 	struct bnxt_fw_health *fw_health = bp->fw_health;
1909 	u32 reg_type;
1910 
1911 	if (!fw_health)
1912 		return;
1913 
1914 	reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
1915 	if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
1916 		fw_health->status_reliable = false;
1917 
1918 	reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]);
1919 	if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
1920 		fw_health->resets_reliable = false;
1921 }
1922 
bnxt_hwrm_error_recovery_qcfg(struct bnxt_softc * bp)1923 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt_softc *bp)
1924 {
1925 	struct bnxt_fw_health *fw_health = bp->fw_health;
1926 	struct hwrm_error_recovery_qcfg_output *resp =
1927 	    (void *)bp->hwrm_cmd_resp.idi_vaddr;
1928 	struct hwrm_error_recovery_qcfg_input req = {0};
1929 	int rc, i;
1930 
1931 	if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
1932 		return 0;
1933 
1934 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_ERROR_RECOVERY_QCFG);
1935 	rc = _hwrm_send_message(bp, &req, sizeof(req));
1936 
1937 	if (rc)
1938 		goto err_recovery_out;
1939 	fw_health->flags = le32toh(resp->flags);
1940 	if ((fw_health->flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU) &&
1941 	    !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
1942 		rc = -EINVAL;
1943 		goto err_recovery_out;
1944 	}
1945 	fw_health->polling_dsecs = le32toh(resp->driver_polling_freq);
1946 	fw_health->master_func_wait_dsecs =
1947 		le32toh(resp->master_func_wait_period);
1948 	fw_health->normal_func_wait_dsecs =
1949 		le32toh(resp->normal_func_wait_period);
1950 	fw_health->post_reset_wait_dsecs =
1951 		le32toh(resp->master_func_wait_period_after_reset);
1952 	fw_health->post_reset_max_wait_dsecs =
1953 		le32toh(resp->max_bailout_time_after_reset);
1954 	fw_health->regs[BNXT_FW_HEALTH_REG] =
1955 		le32toh(resp->fw_health_status_reg);
1956 	fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
1957 		le32toh(resp->fw_heartbeat_reg);
1958 	fw_health->regs[BNXT_FW_RESET_CNT_REG] =
1959 		le32toh(resp->fw_reset_cnt_reg);
1960 	fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
1961 		le32toh(resp->reset_inprogress_reg);
1962 	fw_health->fw_reset_inprog_reg_mask =
1963 		le32toh(resp->reset_inprogress_reg_mask);
1964 	fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
1965 	if (fw_health->fw_reset_seq_cnt >= 16) {
1966 		rc = -EINVAL;
1967 		goto err_recovery_out;
1968 	}
1969 	for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
1970 		fw_health->fw_reset_seq_regs[i] =
1971 			le32toh(resp->reset_reg[i]);
1972 		fw_health->fw_reset_seq_vals[i] =
1973 			le32toh(resp->reset_reg_val[i]);
1974 		fw_health->fw_reset_seq_delay_msec[i] =
1975 			le32toh(resp->delay_after_reset[i]);
1976 	}
1977 err_recovery_out:
1978 	if (!rc)
1979 		rc = bnxt_map_fw_health_regs(bp);
1980 	if (rc)
1981 		bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
1982 	return rc;
1983 }
1984 
bnxt_drv_rgtr(struct bnxt_softc * bp)1985 static int bnxt_drv_rgtr(struct bnxt_softc *bp)
1986 {
1987 	int rc;
1988 
1989 	/* determine whether we can support error recovery before
1990 	 * registering with FW
1991 	 */
1992 	if (bnxt_alloc_fw_health(bp)) {
1993 		device_printf(bp->dev, "no memory for firmware error recovery\n");
1994 	} else {
1995 		rc = bnxt_hwrm_error_recovery_qcfg(bp);
1996 		if (rc)
1997 			device_printf(bp->dev, "hwrm query error recovery failure rc: %d\n",
1998 				    rc);
1999 	}
2000 	rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);  //sumit dbg: revisit the params
2001 	if (rc)
2002 		return -ENODEV;
2003 	return 0;
2004 }
2005 
bnxt_fw_reset_timeout(struct bnxt_softc * bp)2006 static bool bnxt_fw_reset_timeout(struct bnxt_softc *bp)
2007 {
2008 	return time_after(jiffies, bp->fw_reset_timestamp +
2009 			  (bp->fw_reset_max_dsecs * HZ / 10));
2010 }
2011 
bnxt_open(struct bnxt_softc * bp)2012 static int bnxt_open(struct bnxt_softc *bp)
2013 {
2014 	int rc = 0;
2015 	if (BNXT_PF(bp))
2016 		rc = bnxt_hwrm_nvm_get_dev_info(bp, &bp->nvm_info->mfg_id,
2017 			&bp->nvm_info->device_id, &bp->nvm_info->sector_size,
2018 			&bp->nvm_info->size, &bp->nvm_info->reserved_size,
2019 			&bp->nvm_info->available_size);
2020 
2021 	/* Get the queue config */
2022 	rc = bnxt_hwrm_queue_qportcfg(bp, HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX);
2023 	if (rc) {
2024 		device_printf(bp->dev, "reinit: hwrm qportcfg (tx) failed\n");
2025 		return rc;
2026 	}
2027 	if (bp->is_asym_q) {
2028 		rc = bnxt_hwrm_queue_qportcfg(bp,
2029 					      HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX);
2030 		if (rc) {
2031 			device_printf(bp->dev, "re-init: hwrm qportcfg (rx)  failed\n");
2032 			return rc;
2033 		}
2034 		bnxt_verify_asym_queues(bp);
2035 	} else {
2036 		bp->rx_max_q = bp->tx_max_q;
2037 		memcpy(bp->rx_q_info, bp->tx_q_info, sizeof(bp->rx_q_info));
2038 		memcpy(bp->rx_q_ids, bp->tx_q_ids, sizeof(bp->rx_q_ids));
2039 	}
2040 	/* Get the HW capabilities */
2041 	rc = bnxt_hwrm_func_qcaps(bp);
2042 	if (rc)
2043 		return rc;
2044 
2045 	/* Register the driver with the FW */
2046 	rc = bnxt_drv_rgtr(bp);
2047 	if (rc)
2048 		return rc;
2049 	if (bp->hwrm_spec_code >= 0x10803) {
2050 		rc = bnxt_alloc_ctx_mem(bp);
2051 		if (rc) {
2052 			device_printf(bp->dev, "attach: alloc_ctx_mem failed\n");
2053 			return rc;
2054 		}
2055 		rc = bnxt_hwrm_func_resc_qcaps(bp, true);
2056 		if (!rc)
2057 			bp->flags |= BNXT_FLAG_FW_CAP_NEW_RM;
2058 	}
2059 
2060 	if (BNXT_CHIP_P5_PLUS(bp))
2061 		bnxt_hwrm_reserve_pf_rings(bp);
2062 	/* Get the current configuration of this function */
2063 	rc = bnxt_hwrm_func_qcfg(bp);
2064 	if (rc) {
2065 		device_printf(bp->dev, "re-init: hwrm func qcfg failed\n");
2066 		return rc;
2067 	}
2068 
2069 	bnxt_msix_intr_assign(bp->ctx, 0);
2070 	bnxt_init(bp->ctx);
2071 	bnxt_intr_enable(bp->ctx);
2072 
2073 	if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
2074 		if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
2075 			bnxt_ulp_start(bp, 0);
2076 		}
2077 	}
2078 
2079 	device_printf(bp->dev, "Network interface is UP and operational\n");
2080 
2081 	return rc;
2082 }
bnxt_fw_reset_abort(struct bnxt_softc * bp,int rc)2083 static void bnxt_fw_reset_abort(struct bnxt_softc *bp, int rc)
2084 {
2085 	clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
2086 	if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) {
2087 		bnxt_ulp_start(bp, rc);
2088 	}
2089 	bp->fw_reset_state = 0;
2090 }
2091 
bnxt_fw_reset_task(struct work_struct * work)2092 static void bnxt_fw_reset_task(struct work_struct *work)
2093 {
2094 	struct bnxt_softc *bp = container_of(work, struct bnxt_softc, fw_reset_task.work);
2095 	int rc = 0;
2096 
2097 	if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
2098 		device_printf(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
2099 		return;
2100 	}
2101 
2102 	switch (bp->fw_reset_state) {
2103 	case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
2104 		u32 val;
2105 
2106 		val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
2107 		if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
2108 		    !bnxt_fw_reset_timeout(bp)) {
2109 			bnxt_queue_fw_reset_work(bp, HZ / 5);
2110 			return;
2111 		}
2112 
2113 		if (!bp->fw_health->primary) {
2114 			u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
2115 
2116 			bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
2117 			bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
2118 			return;
2119 		}
2120 		bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
2121 	}
2122 		fallthrough;
2123 	case BNXT_FW_RESET_STATE_RESET_FW:
2124 		bnxt_reset_all(bp);
2125 		bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
2126 		bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
2127 		return;
2128 	case BNXT_FW_RESET_STATE_ENABLE_DEV:
2129 		bnxt_inv_fw_health_reg(bp);
2130 		if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
2131 		    !bp->fw_reset_min_dsecs) {
2132 			u16 val;
2133 
2134 			val = pci_read_config(bp->dev, PCI_SUBSYSTEM_ID, 2);
2135 			if (val == 0xffff) {
2136 				if (bnxt_fw_reset_timeout(bp)) {
2137 					device_printf(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
2138 					rc = -ETIMEDOUT;
2139 					goto fw_reset_abort;
2140 				}
2141 				bnxt_queue_fw_reset_work(bp, HZ / 1000);
2142 				return;
2143 			}
2144 		}
2145 		clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2146 		clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
2147 		if (!pci_is_enabled(bp->pdev)) {
2148 			if (pci_enable_device(bp->pdev)) {
2149 				device_printf(bp->dev, "Cannot re-enable PCI device\n");
2150 				rc = -ENODEV;
2151 				goto fw_reset_abort;
2152 			}
2153 		}
2154 		pci_set_master(bp->pdev);
2155 		bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
2156 		fallthrough;
2157 	case BNXT_FW_RESET_STATE_POLL_FW:
2158 		bp->hwrm_cmd_timeo = SHORT_HWRM_CMD_TIMEOUT;
2159 		rc = bnxt_hwrm_poll(bp);
2160 		if (rc) {
2161 			if (bnxt_fw_reset_timeout(bp)) {
2162 				device_printf(bp->dev, "Firmware reset aborted\n");
2163 				goto fw_reset_abort_status;
2164 			}
2165 			bnxt_queue_fw_reset_work(bp, HZ / 5);
2166 			return;
2167 		}
2168 		bp->hwrm_cmd_timeo = DFLT_HWRM_CMD_TIMEOUT;
2169 		bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
2170 		fallthrough;
2171 	case BNXT_FW_RESET_STATE_OPENING:
2172 		rc = bnxt_open(bp);
2173 		if (rc) {
2174 			device_printf(bp->dev, "bnxt_open() failed during FW reset\n");
2175 			bnxt_fw_reset_abort(bp, rc);
2176 			rtnl_unlock();
2177 			return;
2178 		}
2179 
2180 		if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
2181 		    bp->fw_health->enabled) {
2182 			bp->fw_health->last_fw_reset_cnt =
2183 				bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2184 		}
2185 		bp->fw_reset_state = 0;
2186 		smp_mb__before_atomic();
2187 		clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
2188 		bnxt_ulp_start(bp, 0);
2189 		clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
2190 		set_bit(BNXT_STATE_OPEN, &bp->state);
2191 		rtnl_unlock();
2192 	}
2193 	return;
2194 
2195 fw_reset_abort_status:
2196 	if (bp->fw_health->status_reliable ||
2197 	    (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
2198 		u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
2199 
2200 		device_printf(bp->dev, "fw_health_status 0x%x\n", sts);
2201 	}
2202 fw_reset_abort:
2203 	rtnl_lock();
2204 	bnxt_fw_reset_abort(bp, rc);
2205 	rtnl_unlock();
2206 }
2207 
bnxt_force_fw_reset(struct bnxt_softc * bp)2208 static void bnxt_force_fw_reset(struct bnxt_softc *bp)
2209 {
2210 	struct bnxt_fw_health *fw_health = bp->fw_health;
2211 	u32 wait_dsecs;
2212 
2213 	if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
2214 	    test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
2215 		return;
2216 	bnxt_fw_reset_close(bp);
2217 	wait_dsecs = fw_health->master_func_wait_dsecs;
2218 	if (fw_health->primary) {
2219 		if (fw_health->flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU)
2220 			wait_dsecs = 0;
2221 		bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
2222 	} else {
2223 		bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
2224 		wait_dsecs = fw_health->normal_func_wait_dsecs;
2225 		bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
2226 	}
2227 
2228 	bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
2229 	bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
2230 	bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
2231 }
2232 
bnxt_fw_exception(struct bnxt_softc * bp)2233 static void bnxt_fw_exception(struct bnxt_softc *bp)
2234 {
2235 	device_printf(bp->dev, "Detected firmware fatal condition, initiating reset\n");
2236 	set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2237 	bnxt_rtnl_lock_sp(bp);
2238 	bnxt_force_fw_reset(bp);
2239 	bnxt_rtnl_unlock_sp(bp);
2240 }
2241 
__bnxt_fw_recover(struct bnxt_softc * bp)2242 static void __bnxt_fw_recover(struct bnxt_softc *bp)
2243 {
2244 	if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
2245 	    test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
2246 		bnxt_fw_reset(bp);
2247 	else
2248 		bnxt_fw_exception(bp);
2249 }
2250 
bnxt_devlink_health_fw_report(struct bnxt_softc * bp)2251 static void bnxt_devlink_health_fw_report(struct bnxt_softc *bp)
2252 {
2253 	struct bnxt_fw_health *fw_health = bp->fw_health;
2254 
2255 	if (!fw_health)
2256 		return;
2257 
2258 	if (!fw_health->fw_reporter) {
2259 		__bnxt_fw_recover(bp);
2260 		return;
2261 	}
2262 }
2263 
bnxt_sp_task(struct work_struct * work)2264 static void bnxt_sp_task(struct work_struct *work)
2265 {
2266 	struct bnxt_softc *bp = container_of(work, struct bnxt_softc, sp_task);
2267 
2268 	set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
2269 	smp_mb__after_atomic();
2270 	if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
2271 		clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
2272 		return;
2273 	}
2274 
2275 	if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) {
2276 		if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
2277 		    test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
2278 			bnxt_devlink_health_fw_report(bp);
2279 		else
2280 			bnxt_fw_reset(bp);
2281 	}
2282 
2283 	if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
2284 		if (!is_bnxt_fw_ok(bp))
2285 			bnxt_devlink_health_fw_report(bp);
2286 	}
2287 	smp_mb__before_atomic();
2288 	clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
2289 }
2290 
2291 /* Device setup and teardown */
2292 static int
bnxt_attach_pre(if_ctx_t ctx)2293 bnxt_attach_pre(if_ctx_t ctx)
2294 {
2295 	struct bnxt_softc *softc = iflib_get_softc(ctx);
2296 	if_softc_ctx_t scctx;
2297 	int rc = 0;
2298 
2299 	softc->ctx = ctx;
2300 	softc->dev = iflib_get_dev(ctx);
2301 	softc->media = iflib_get_media(ctx);
2302 	softc->scctx = iflib_get_softc_ctx(ctx);
2303 	softc->sctx = iflib_get_sctx(ctx);
2304 	scctx = softc->scctx;
2305 
2306 	/* TODO: Better way of detecting NPAR/VF is needed */
2307 	switch (pci_get_device(softc->dev)) {
2308 	case BCM57402_NPAR:
2309 	case BCM57404_NPAR:
2310 	case BCM57406_NPAR:
2311 	case BCM57407_NPAR:
2312 	case BCM57412_NPAR1:
2313 	case BCM57412_NPAR2:
2314 	case BCM57414_NPAR1:
2315 	case BCM57414_NPAR2:
2316 	case BCM57416_NPAR1:
2317 	case BCM57416_NPAR2:
2318 	case BCM57504_NPAR:
2319 		softc->flags |= BNXT_FLAG_NPAR;
2320 		break;
2321 	case NETXTREME_C_VF1:
2322 	case NETXTREME_C_VF2:
2323 	case NETXTREME_C_VF3:
2324 	case NETXTREME_E_VF1:
2325 	case NETXTREME_E_VF2:
2326 	case NETXTREME_E_VF3:
2327 		softc->flags |= BNXT_FLAG_VF;
2328 		break;
2329 	}
2330 
2331 	softc->domain = pci_get_domain(softc->dev);
2332 	softc->bus = pci_get_bus(softc->dev);
2333 	softc->slot = pci_get_slot(softc->dev);
2334 	softc->function = pci_get_function(softc->dev);
2335 	softc->dev_fn = PCI_DEVFN(softc->slot, softc->function);
2336 
2337 	if (bnxt_num_pfs == 0)
2338 		  SLIST_INIT(&pf_list);
2339 	bnxt_num_pfs++;
2340 	softc->list.softc = softc;
2341 	SLIST_INSERT_HEAD(&pf_list, &softc->list, next);
2342 
2343 	pci_enable_busmaster(softc->dev);
2344 
2345 	if (bnxt_pci_mapping(softc)) {
2346 		device_printf(softc->dev, "PCI mapping failed\n");
2347 		rc = ENXIO;
2348 		goto pci_map_fail;
2349 	}
2350 
2351 	softc->pdev = kzalloc(sizeof(*softc->pdev), GFP_KERNEL);
2352 	if (!softc->pdev) {
2353 		device_printf(softc->dev, "pdev alloc failed\n");
2354 		rc = -ENOMEM;
2355 		goto free_pci_map;
2356 	}
2357 
2358 	rc = linux_pci_attach_device(softc->dev, NULL, NULL, softc->pdev);
2359 	if (rc) {
2360 		device_printf(softc->dev, "Failed to attach Linux PCI device 0x%x\n", rc);
2361 		goto pci_attach_fail;
2362 	}
2363 
2364 	/* HWRM setup/init */
2365 	BNXT_HWRM_LOCK_INIT(softc, device_get_nameunit(softc->dev));
2366 	rc = bnxt_alloc_hwrm_dma_mem(softc);
2367 	if (rc)
2368 		goto dma_fail;
2369 
2370 	/* Get firmware version and compare with driver */
2371 	softc->ver_info = malloc(sizeof(struct bnxt_ver_info),
2372 	    M_DEVBUF, M_NOWAIT | M_ZERO);
2373 	if (softc->ver_info == NULL) {
2374 		rc = ENOMEM;
2375 		device_printf(softc->dev,
2376 		    "Unable to allocate space for version info\n");
2377 		goto ver_alloc_fail;
2378 	}
2379 	/* Default minimum required HWRM version */
2380 	softc->ver_info->hwrm_min_major = HWRM_VERSION_MAJOR;
2381 	softc->ver_info->hwrm_min_minor = HWRM_VERSION_MINOR;
2382 	softc->ver_info->hwrm_min_update = HWRM_VERSION_UPDATE;
2383 
2384 	rc = bnxt_hwrm_ver_get(softc);
2385 	if (rc) {
2386 		device_printf(softc->dev, "attach: hwrm ver get failed\n");
2387 		goto ver_fail;
2388 	}
2389 
2390 	/* Now perform a function reset */
2391 	rc = bnxt_hwrm_func_reset(softc);
2392 
2393 	if ((softc->flags & BNXT_FLAG_SHORT_CMD) ||
2394 	    softc->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
2395 		rc = bnxt_alloc_hwrm_short_cmd_req(softc);
2396 		if (rc)
2397 			goto hwrm_short_cmd_alloc_fail;
2398 	}
2399 
2400 	if ((softc->ver_info->chip_num == BCM57508) ||
2401 	    (softc->ver_info->chip_num == BCM57504) ||
2402 	    (softc->ver_info->chip_num == BCM57504_NPAR) ||
2403 	    (softc->ver_info->chip_num == BCM57502) ||
2404 	    (softc->ver_info->chip_num == BCM57601) ||
2405 	    (softc->ver_info->chip_num == BCM57602) ||
2406 	    (softc->ver_info->chip_num == BCM57604))
2407 		softc->flags |= BNXT_FLAG_CHIP_P5;
2408 
2409 	if (softc->ver_info->chip_num == BCM57608)
2410 		softc->flags |= BNXT_FLAG_CHIP_P7;
2411 
2412 	softc->flags |= BNXT_FLAG_TPA;
2413 
2414 	if (BNXT_CHIP_P5_PLUS(softc) && (!softc->ver_info->chip_rev) &&
2415 			(!softc->ver_info->chip_metal))
2416 		softc->flags &= ~BNXT_FLAG_TPA;
2417 
2418 	if (BNXT_CHIP_P5_PLUS(softc))
2419 		softc->flags &= ~BNXT_FLAG_TPA;
2420 
2421 	/* Get NVRAM info */
2422 	if (BNXT_PF(softc)) {
2423 		if (!bnxt_pf_wq) {
2424 			bnxt_pf_wq =
2425 				create_singlethread_workqueue("bnxt_pf_wq");
2426 			if (!bnxt_pf_wq) {
2427 				device_printf(softc->dev, "Unable to create workqueue.\n");
2428 				rc = -ENOMEM;
2429 				goto nvm_alloc_fail;
2430 			}
2431 		}
2432 
2433 		softc->nvm_info = malloc(sizeof(struct bnxt_nvram_info),
2434 		    M_DEVBUF, M_NOWAIT | M_ZERO);
2435 		if (softc->nvm_info == NULL) {
2436 			rc = ENOMEM;
2437 			device_printf(softc->dev,
2438 			    "Unable to allocate space for NVRAM info\n");
2439 			goto nvm_alloc_fail;
2440 		}
2441 
2442 		rc = bnxt_hwrm_nvm_get_dev_info(softc, &softc->nvm_info->mfg_id,
2443 		    &softc->nvm_info->device_id, &softc->nvm_info->sector_size,
2444 		    &softc->nvm_info->size, &softc->nvm_info->reserved_size,
2445 		    &softc->nvm_info->available_size);
2446 	}
2447 
2448 	if (BNXT_CHIP_P5(softc)) {
2449 		softc->db_ops.bnxt_db_tx = bnxt_thor_db_tx;
2450 		softc->db_ops.bnxt_db_rx = bnxt_thor_db_rx;
2451 		softc->db_ops.bnxt_db_rx_cq = bnxt_thor_db_rx_cq;
2452 		softc->db_ops.bnxt_db_tx_cq = bnxt_thor_db_tx_cq;
2453 		softc->db_ops.bnxt_db_nq = bnxt_thor_db_nq;
2454 	} else if (BNXT_CHIP_P7(softc)) {
2455 		softc->db_ops.bnxt_db_tx = bnxt_thor2_db_tx;
2456 		softc->db_ops.bnxt_db_rx = bnxt_thor2_db_rx;
2457 		softc->db_ops.bnxt_db_rx_cq = bnxt_thor2_db_rx_cq;
2458 		softc->db_ops.bnxt_db_tx_cq = bnxt_thor2_db_tx_cq;
2459 		softc->db_ops.bnxt_db_nq = bnxt_thor2_db_nq;
2460 	} else {
2461 		softc->db_ops.bnxt_db_tx = bnxt_cuw_db_tx;
2462 		softc->db_ops.bnxt_db_rx = bnxt_cuw_db_rx;
2463 		softc->db_ops.bnxt_db_rx_cq = bnxt_cuw_db_cq;
2464 		softc->db_ops.bnxt_db_tx_cq = bnxt_cuw_db_cq;
2465 	}
2466 
2467 
2468 	/* Get the queue config */
2469 	rc = bnxt_hwrm_queue_qportcfg(softc, HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX);
2470 	if (rc) {
2471 		device_printf(softc->dev, "attach: hwrm qportcfg (tx) failed\n");
2472 		goto failed;
2473 	}
2474 	if (softc->is_asym_q) {
2475 		rc = bnxt_hwrm_queue_qportcfg(softc,
2476 					      HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX);
2477 		if (rc) {
2478 			device_printf(softc->dev, "attach: hwrm qportcfg (rx)  failed\n");
2479 			return rc;
2480 		}
2481 		bnxt_verify_asym_queues(softc);
2482 	} else {
2483 		softc->rx_max_q = softc->tx_max_q;
2484 		memcpy(softc->rx_q_info, softc->tx_q_info, sizeof(softc->rx_q_info));
2485 		memcpy(softc->rx_q_ids, softc->tx_q_ids, sizeof(softc->rx_q_ids));
2486 	}
2487 
2488 	/* Get the HW capabilities */
2489 	rc = bnxt_hwrm_func_qcaps(softc);
2490 	if (rc)
2491 		goto failed;
2492 
2493 	/*
2494 	 * Register the driver with the FW
2495 	 * Register the async events with the FW
2496 	 */
2497 	rc = bnxt_drv_rgtr(softc);
2498 	if (rc)
2499 		goto failed;
2500 
2501 	if (softc->hwrm_spec_code >= 0x10803) {
2502 		rc = bnxt_alloc_ctx_mem(softc);
2503 		if (rc) {
2504 			device_printf(softc->dev, "attach: alloc_ctx_mem failed\n");
2505 			return rc;
2506 		}
2507 		rc = bnxt_hwrm_func_resc_qcaps(softc, true);
2508 		if (!rc)
2509 			softc->flags |= BNXT_FLAG_FW_CAP_NEW_RM;
2510 	}
2511 
2512 	/* Get the current configuration of this function */
2513 	rc = bnxt_hwrm_func_qcfg(softc);
2514 	if (rc) {
2515 		device_printf(softc->dev, "attach: hwrm func qcfg failed\n");
2516 		goto failed;
2517 	}
2518 
2519 	iflib_set_mac(ctx, softc->func.mac_addr);
2520 
2521 	scctx->isc_txrx = &bnxt_txrx;
2522 	scctx->isc_tx_csum_flags = (CSUM_IP | CSUM_TCP | CSUM_UDP |
2523 	    CSUM_TCP_IPV6 | CSUM_UDP_IPV6 | CSUM_TSO);
2524 	scctx->isc_capabilities = scctx->isc_capenable =
2525 	    /* These are translated to hwassit bits */
2526 	    IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6 | IFCAP_TSO4 | IFCAP_TSO6 |
2527 	    /* These are checked by iflib */
2528 	    IFCAP_LRO | IFCAP_VLAN_HWFILTER |
2529 	    /* These are part of the iflib mask */
2530 	    IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_VLAN_MTU |
2531 	    IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO |
2532 	    /* These likely get lost... */
2533 	    IFCAP_VLAN_HWCSUM | IFCAP_JUMBO_MTU;
2534 
2535 	if (bnxt_wol_supported(softc))
2536 		scctx->isc_capabilities |= IFCAP_WOL_MAGIC;
2537 	bnxt_get_wol_settings(softc);
2538 	if (softc->wol)
2539 		scctx->isc_capenable |= IFCAP_WOL_MAGIC;
2540 
2541 	/* Get the queue config */
2542 	bnxt_get_wol_settings(softc);
2543 	if (BNXT_CHIP_P5_PLUS(softc))
2544 		bnxt_hwrm_reserve_pf_rings(softc);
2545 	rc = bnxt_hwrm_func_qcfg(softc);
2546 	if (rc) {
2547 		device_printf(softc->dev, "attach: hwrm func qcfg failed\n");
2548 		goto failed;
2549 	}
2550 
2551 	bnxt_clear_ids(softc);
2552 	if (rc)
2553 		goto failed;
2554 
2555 	/* Now set up iflib sc */
2556 	scctx->isc_tx_nsegments = 31,
2557 	scctx->isc_tx_tso_segments_max = 31;
2558 	scctx->isc_tx_tso_size_max = BNXT_TSO_SIZE;
2559 	scctx->isc_tx_tso_segsize_max = BNXT_TSO_SIZE;
2560 	scctx->isc_vectors = softc->func.max_cp_rings;
2561 	scctx->isc_min_frame_size = BNXT_MIN_FRAME_SIZE;
2562 	scctx->isc_txrx = &bnxt_txrx;
2563 
2564 	if (scctx->isc_nrxd[0] <
2565 	    ((scctx->isc_nrxd[1] * 4) + scctx->isc_nrxd[2]))
2566 		device_printf(softc->dev,
2567 		    "WARNING: nrxd0 (%d) should be at least 4 * nrxd1 (%d) + nrxd2 (%d).  Driver may be unstable\n",
2568 		    scctx->isc_nrxd[0], scctx->isc_nrxd[1], scctx->isc_nrxd[2]);
2569 	if (scctx->isc_ntxd[0] < scctx->isc_ntxd[1] * 2)
2570 		device_printf(softc->dev,
2571 		    "WARNING: ntxd0 (%d) should be at least 2 * ntxd1 (%d).  Driver may be unstable\n",
2572 		    scctx->isc_ntxd[0], scctx->isc_ntxd[1]);
2573 	scctx->isc_txqsizes[0] = sizeof(struct cmpl_base) * scctx->isc_ntxd[0];
2574 	scctx->isc_txqsizes[1] = sizeof(struct tx_bd_short) *
2575 	    scctx->isc_ntxd[1];
2576 	scctx->isc_txqsizes[2] = sizeof(struct cmpl_base) * scctx->isc_ntxd[2];
2577 	scctx->isc_rxqsizes[0] = sizeof(struct cmpl_base) * scctx->isc_nrxd[0];
2578 	scctx->isc_rxqsizes[1] = sizeof(struct rx_prod_pkt_bd) *
2579 	    scctx->isc_nrxd[1];
2580 	scctx->isc_rxqsizes[2] = sizeof(struct rx_prod_pkt_bd) *
2581 	    scctx->isc_nrxd[2];
2582 
2583 	scctx->isc_nrxqsets_max = min(pci_msix_count(softc->dev)-1,
2584 	    softc->fn_qcfg.alloc_completion_rings - 1);
2585 	scctx->isc_nrxqsets_max = min(scctx->isc_nrxqsets_max,
2586 	    softc->fn_qcfg.alloc_rx_rings);
2587 	scctx->isc_nrxqsets_max = min(scctx->isc_nrxqsets_max,
2588 	    softc->fn_qcfg.alloc_vnics);
2589 	scctx->isc_ntxqsets_max = min(softc->fn_qcfg.alloc_tx_rings,
2590 	    softc->fn_qcfg.alloc_completion_rings - scctx->isc_nrxqsets_max - 1);
2591 
2592 	scctx->isc_rss_table_size = HW_HASH_INDEX_SIZE;
2593 	scctx->isc_rss_table_mask = scctx->isc_rss_table_size - 1;
2594 
2595 	/* iflib will map and release this bar */
2596 	scctx->isc_msix_bar = pci_msix_table_bar(softc->dev);
2597 
2598         /*
2599          * Default settings for HW LRO (TPA):
2600          *  Disable HW LRO by default
2601          *  Can be enabled after taking care of 'packet forwarding'
2602          */
2603 	if (softc->flags & BNXT_FLAG_TPA) {
2604 		softc->hw_lro.enable = 0;
2605 		softc->hw_lro.is_mode_gro = 0;
2606 		softc->hw_lro.max_agg_segs = 5; /* 2^5 = 32 segs */
2607 		softc->hw_lro.max_aggs = HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX;
2608 		softc->hw_lro.min_agg_len = 512;
2609 	}
2610 
2611 	/* Allocate the default completion ring */
2612 	softc->def_cp_ring.stats_ctx_id = HWRM_NA_SIGNATURE;
2613 	softc->def_cp_ring.ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
2614 	softc->def_cp_ring.ring.softc = softc;
2615 	softc->def_cp_ring.ring.id = 0;
2616 	softc->def_cp_ring.ring.doorbell = (BNXT_CHIP_P5_PLUS(softc)) ?
2617 		softc->legacy_db_size : softc->def_cp_ring.ring.id * 0x80;
2618 	softc->def_cp_ring.ring.ring_size = PAGE_SIZE /
2619 	    sizeof(struct cmpl_base);
2620 	softc->def_cp_ring.ring.db_ring_mask = softc->def_cp_ring.ring.ring_size -1 ;
2621 	rc = iflib_dma_alloc(ctx,
2622 	    sizeof(struct cmpl_base) * softc->def_cp_ring.ring.ring_size,
2623 	    &softc->def_cp_ring_mem, 0);
2624 	softc->def_cp_ring.ring.vaddr = softc->def_cp_ring_mem.idi_vaddr;
2625 	softc->def_cp_ring.ring.paddr = softc->def_cp_ring_mem.idi_paddr;
2626 	iflib_config_task_init(ctx, &softc->def_cp_task, bnxt_def_cp_task);
2627 
2628 	rc = bnxt_init_sysctl_ctx(softc);
2629 	if (rc)
2630 		goto init_sysctl_failed;
2631 	if (BNXT_PF(softc)) {
2632 		rc = bnxt_create_nvram_sysctls(softc->nvm_info);
2633 		if (rc)
2634 			goto failed;
2635 	}
2636 
2637 	arc4rand(softc->vnic_info.rss_hash_key, HW_HASH_KEY_SIZE, 0);
2638 	softc->vnic_info.rss_hash_type =
2639 	    HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4 |
2640 	    HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4 |
2641 	    HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4 |
2642 	    HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6 |
2643 	    HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6 |
2644 	    HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
2645 	rc = bnxt_create_config_sysctls_pre(softc);
2646 	if (rc)
2647 		goto failed;
2648 
2649 	rc = bnxt_create_hw_lro_sysctls(softc);
2650 	if (rc)
2651 		goto failed;
2652 
2653 	rc = bnxt_create_pause_fc_sysctls(softc);
2654 	if (rc)
2655 		goto failed;
2656 
2657 	rc = bnxt_create_dcb_sysctls(softc);
2658 	if (rc)
2659 		goto failed;
2660 
2661 	set_bit(BNXT_STATE_OPEN, &softc->state);
2662 	INIT_WORK(&softc->sp_task, bnxt_sp_task);
2663 	INIT_DELAYED_WORK(&softc->fw_reset_task, bnxt_fw_reset_task);
2664 
2665 	/* Initialize the vlan list */
2666 	SLIST_INIT(&softc->vnic_info.vlan_tags);
2667 	softc->vnic_info.vlan_tag_list.idi_vaddr = NULL;
2668 	softc->state_bv = bit_alloc(BNXT_STATE_MAX, M_DEVBUF,
2669 			M_WAITOK|M_ZERO);
2670 
2671 	return (rc);
2672 
2673 failed:
2674 	bnxt_free_sysctl_ctx(softc);
2675 init_sysctl_failed:
2676 	bnxt_hwrm_func_drv_unrgtr(softc, false);
2677 	if (BNXT_PF(softc))
2678 		free(softc->nvm_info, M_DEVBUF);
2679 nvm_alloc_fail:
2680 	bnxt_free_hwrm_short_cmd_req(softc);
2681 hwrm_short_cmd_alloc_fail:
2682 ver_fail:
2683 	free(softc->ver_info, M_DEVBUF);
2684 ver_alloc_fail:
2685 	bnxt_free_hwrm_dma_mem(softc);
2686 dma_fail:
2687 	BNXT_HWRM_LOCK_DESTROY(softc);
2688 	if (softc->pdev)
2689 		linux_pci_detach_device(softc->pdev);
2690 pci_attach_fail:
2691 	kfree(softc->pdev);
2692 	softc->pdev = NULL;
2693 free_pci_map:
2694 	bnxt_pci_mapping_free(softc);
2695 pci_map_fail:
2696 	pci_disable_busmaster(softc->dev);
2697 	return (rc);
2698 }
2699 
2700 static int
bnxt_attach_post(if_ctx_t ctx)2701 bnxt_attach_post(if_ctx_t ctx)
2702 {
2703 	struct bnxt_softc *softc = iflib_get_softc(ctx);
2704 	if_t ifp = iflib_get_ifp(ctx);
2705 	int rc;
2706 
2707 	softc->ifp = ifp;
2708 	bnxt_create_config_sysctls_post(softc);
2709 
2710 	/* Update link state etc... */
2711 	rc = bnxt_probe_phy(softc);
2712 	if (rc)
2713 		goto failed;
2714 
2715 	/* Needs to be done after probing the phy */
2716 	bnxt_create_ver_sysctls(softc);
2717 	ifmedia_removeall(softc->media);
2718 	bnxt_add_media_types(softc);
2719 	ifmedia_set(softc->media, IFM_ETHER | IFM_AUTO);
2720 
2721 	softc->scctx->isc_max_frame_size = if_getmtu(ifp) + ETHER_HDR_LEN +
2722 	    ETHER_CRC_LEN;
2723 
2724 	softc->rx_buf_size = min(softc->scctx->isc_max_frame_size, BNXT_PAGE_SIZE);
2725 	bnxt_dcb_init(softc);
2726 	bnxt_rdma_aux_device_init(softc);
2727 
2728 failed:
2729 	return rc;
2730 }
2731 
2732 static int
bnxt_detach(if_ctx_t ctx)2733 bnxt_detach(if_ctx_t ctx)
2734 {
2735 	struct bnxt_softc *softc = iflib_get_softc(ctx);
2736 	struct bnxt_vlan_tag *tag;
2737 	struct bnxt_vlan_tag *tmp;
2738 	int i;
2739 
2740 	bnxt_rdma_aux_device_uninit(softc);
2741 	cancel_delayed_work_sync(&softc->fw_reset_task);
2742 	cancel_work_sync(&softc->sp_task);
2743 	bnxt_dcb_free(softc);
2744 	SLIST_REMOVE(&pf_list, &softc->list, bnxt_softc_list, next);
2745 	bnxt_num_pfs--;
2746 	bnxt_wol_config(ctx);
2747 	bnxt_do_disable_intr(&softc->def_cp_ring);
2748 	bnxt_free_sysctl_ctx(softc);
2749 	bnxt_hwrm_func_reset(softc);
2750 	bnxt_free_ctx_mem(softc);
2751 	bnxt_clear_ids(softc);
2752 	iflib_irq_free(ctx, &softc->def_cp_ring.irq);
2753 	/* We need to free() these here... */
2754 	for (i = softc->nrxqsets-1; i>=0; i--) {
2755 		if (BNXT_CHIP_P5_PLUS(softc))
2756 			iflib_irq_free(ctx, &softc->nq_rings[i].irq);
2757 		else
2758 			iflib_irq_free(ctx, &softc->rx_cp_rings[i].irq);
2759 
2760 	}
2761 	iflib_dma_free(&softc->vnic_info.mc_list);
2762 	iflib_dma_free(&softc->vnic_info.rss_hash_key_tbl);
2763 	iflib_dma_free(&softc->vnic_info.rss_grp_tbl);
2764 	if (softc->vnic_info.vlan_tag_list.idi_vaddr)
2765 		iflib_dma_free(&softc->vnic_info.vlan_tag_list);
2766 	SLIST_FOREACH_SAFE(tag, &softc->vnic_info.vlan_tags, next, tmp)
2767 		free(tag, M_DEVBUF);
2768 	iflib_dma_free(&softc->def_cp_ring_mem);
2769 	for (i = 0; i < softc->nrxqsets; i++)
2770 		free(softc->rx_rings[i].tpa_start, M_DEVBUF);
2771 	free(softc->ver_info, M_DEVBUF);
2772 	if (BNXT_PF(softc))
2773 		free(softc->nvm_info, M_DEVBUF);
2774 
2775 	bnxt_hwrm_func_drv_unrgtr(softc, false);
2776 	bnxt_free_hwrm_dma_mem(softc);
2777 	bnxt_free_hwrm_short_cmd_req(softc);
2778 	BNXT_HWRM_LOCK_DESTROY(softc);
2779 
2780 	if (!bnxt_num_pfs && bnxt_pf_wq)
2781 		destroy_workqueue(bnxt_pf_wq);
2782 
2783 	if (softc->pdev)
2784 		linux_pci_detach_device(softc->pdev);
2785 	free(softc->state_bv, M_DEVBUF);
2786 	pci_disable_busmaster(softc->dev);
2787 	bnxt_pci_mapping_free(softc);
2788 
2789 	return 0;
2790 }
2791 
2792 static void
bnxt_hwrm_resource_free(struct bnxt_softc * softc)2793 bnxt_hwrm_resource_free(struct bnxt_softc *softc)
2794 {
2795 	int i, rc = 0;
2796 
2797 	rc = bnxt_hwrm_ring_free(softc,
2798 			HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
2799 			&softc->def_cp_ring.ring,
2800 			(uint16_t)HWRM_NA_SIGNATURE);
2801 	if (rc)
2802 		goto fail;
2803 
2804 	for (i = 0; i < softc->ntxqsets; i++) {
2805 		rc = bnxt_hwrm_ring_free(softc,
2806 				HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
2807 				&softc->tx_rings[i],
2808 				softc->tx_cp_rings[i].ring.phys_id);
2809 		if (rc)
2810 			goto fail;
2811 
2812 		rc = bnxt_hwrm_ring_free(softc,
2813 				HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
2814 				&softc->tx_cp_rings[i].ring,
2815 				(uint16_t)HWRM_NA_SIGNATURE);
2816 		if (rc)
2817 			goto fail;
2818 
2819 		rc = bnxt_hwrm_stat_ctx_free(softc, &softc->tx_cp_rings[i]);
2820 		if (rc)
2821 			goto fail;
2822 	}
2823 	rc = bnxt_hwrm_free_filter(softc);
2824 	if (rc)
2825 		goto fail;
2826 
2827 	rc = bnxt_hwrm_vnic_free(softc, &softc->vnic_info);
2828 	if (rc)
2829 		goto fail;
2830 
2831 	rc = bnxt_hwrm_vnic_ctx_free(softc, softc->vnic_info.rss_id);
2832 	if (rc)
2833 		goto fail;
2834 
2835 	for (i = 0; i < softc->nrxqsets; i++) {
2836 		rc = bnxt_hwrm_ring_grp_free(softc, &softc->grp_info[i]);
2837 		if (rc)
2838 			goto fail;
2839 
2840 		rc = bnxt_hwrm_ring_free(softc,
2841 				HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG,
2842 				&softc->ag_rings[i],
2843 				(uint16_t)HWRM_NA_SIGNATURE);
2844 		if (rc)
2845 			goto fail;
2846 
2847 		rc = bnxt_hwrm_ring_free(softc,
2848 				HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
2849 				&softc->rx_rings[i],
2850 				softc->rx_cp_rings[i].ring.phys_id);
2851 		if (rc)
2852 			goto fail;
2853 
2854 		rc = bnxt_hwrm_ring_free(softc,
2855 				HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
2856 				&softc->rx_cp_rings[i].ring,
2857 				(uint16_t)HWRM_NA_SIGNATURE);
2858 		if (rc)
2859 			goto fail;
2860 
2861 		if (BNXT_CHIP_P5_PLUS(softc)) {
2862 			rc = bnxt_hwrm_ring_free(softc,
2863 					HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ,
2864 					&softc->nq_rings[i].ring,
2865 					(uint16_t)HWRM_NA_SIGNATURE);
2866 			if (rc)
2867 				goto fail;
2868 		}
2869 
2870 		rc = bnxt_hwrm_stat_ctx_free(softc, &softc->rx_cp_rings[i]);
2871 		if (rc)
2872 			goto fail;
2873 	}
2874 
2875 fail:
2876 	return;
2877 }
2878 
2879 
2880 static void
bnxt_func_reset(struct bnxt_softc * softc)2881 bnxt_func_reset(struct bnxt_softc *softc)
2882 {
2883 
2884 	if (!BNXT_CHIP_P5_PLUS(softc)) {
2885 		bnxt_hwrm_func_reset(softc);
2886 		return;
2887 	}
2888 
2889 	bnxt_hwrm_resource_free(softc);
2890 	return;
2891 }
2892 
2893 static void
bnxt_rss_grp_tbl_init(struct bnxt_softc * softc)2894 bnxt_rss_grp_tbl_init(struct bnxt_softc *softc)
2895 {
2896 	uint16_t *rgt = (uint16_t *) softc->vnic_info.rss_grp_tbl.idi_vaddr;
2897 	int i, j;
2898 
2899 	for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
2900 		if (BNXT_CHIP_P5_PLUS(softc)) {
2901 			rgt[i++] = htole16(softc->rx_rings[j].phys_id);
2902 			rgt[i] = htole16(softc->rx_cp_rings[j].ring.phys_id);
2903 		} else {
2904 			rgt[i] = htole16(softc->grp_info[j].grp_id);
2905 		}
2906 		if (++j == softc->nrxqsets)
2907 			j = 0;
2908 	}
2909 }
2910 
bnxt_get_port_module_status(struct bnxt_softc * softc)2911 static void bnxt_get_port_module_status(struct bnxt_softc *softc)
2912 {
2913 	struct bnxt_link_info *link_info = &softc->link_info;
2914 	struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
2915 	uint8_t module_status;
2916 
2917 	if (bnxt_update_link(softc, false))
2918 		return;
2919 
2920 	module_status = link_info->module_status;
2921 	switch (module_status) {
2922 	case HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_DISABLETX:
2923 	case HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_PWRDOWN:
2924 	case HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_WARNINGMSG:
2925 		device_printf(softc->dev, "Unqualified SFP+ module detected on port %d\n",
2926 			    softc->pf.port_id);
2927 		if (softc->hwrm_spec_code >= 0x10201) {
2928 			device_printf(softc->dev, "Module part number %s\n",
2929 				    resp->phy_vendor_partnumber);
2930 		}
2931 		if (module_status == HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_DISABLETX)
2932 			device_printf(softc->dev, "TX is disabled\n");
2933 		if (module_status == HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_PWRDOWN)
2934 			device_printf(softc->dev, "SFP+ module is shutdown\n");
2935 	}
2936 }
2937 
bnxt_aux_dev_free(struct bnxt_softc * softc)2938 static void bnxt_aux_dev_free(struct bnxt_softc *softc)
2939 {
2940 	kfree(softc->aux_dev);
2941 	softc->aux_dev = NULL;
2942 }
2943 
bnxt_aux_dev_init(struct bnxt_softc * softc)2944 static struct bnxt_aux_dev *bnxt_aux_dev_init(struct bnxt_softc *softc)
2945 {
2946 	struct bnxt_aux_dev *bnxt_adev;
2947 
2948 	msleep(1000 * 2);
2949 	bnxt_adev = kzalloc(sizeof(*bnxt_adev), GFP_KERNEL);
2950 	if (!bnxt_adev)
2951 		return ERR_PTR(-ENOMEM);
2952 
2953 	return bnxt_adev;
2954 }
2955 
bnxt_rdma_aux_device_uninit(struct bnxt_softc * softc)2956 static void bnxt_rdma_aux_device_uninit(struct bnxt_softc *softc)
2957 {
2958 	struct bnxt_aux_dev *bnxt_adev = softc->aux_dev;
2959 
2960 	/* Skip if no auxiliary device init was done. */
2961 	if (!(softc->flags & BNXT_FLAG_ROCE_CAP))
2962 		return;
2963 
2964 	if (IS_ERR_OR_NULL(bnxt_adev))
2965 		return;
2966 
2967 	bnxt_rdma_aux_device_del(softc);
2968 
2969 	if (bnxt_adev->id >= 0)
2970 		ida_free(&bnxt_aux_dev_ids, bnxt_adev->id);
2971 
2972 	bnxt_aux_dev_free(softc);
2973 }
2974 
bnxt_rdma_aux_device_init(struct bnxt_softc * softc)2975 static void bnxt_rdma_aux_device_init(struct bnxt_softc *softc)
2976 {
2977 	int rc;
2978 
2979 	if (!(softc->flags & BNXT_FLAG_ROCE_CAP))
2980 		return;
2981 
2982 	softc->aux_dev = bnxt_aux_dev_init(softc);
2983 	if (IS_ERR_OR_NULL(softc->aux_dev)) {
2984 		device_printf(softc->dev, "Failed to init auxiliary device for ROCE\n");
2985 		goto skip_aux_init;
2986 	}
2987 
2988 	softc->aux_dev->id = ida_alloc(&bnxt_aux_dev_ids, GFP_KERNEL);
2989 	if (softc->aux_dev->id < 0) {
2990 		device_printf(softc->dev, "ida alloc failed for ROCE auxiliary device\n");
2991 		bnxt_aux_dev_free(softc);
2992 		goto skip_aux_init;
2993 	}
2994 
2995 	msleep(1000 * 2);
2996 	/* If aux bus init fails, continue with netdev init. */
2997 	rc = bnxt_rdma_aux_device_add(softc);
2998 	if (rc) {
2999 		device_printf(softc->dev, "Failed to add auxiliary device for ROCE\n");
3000 		msleep(1000 * 2);
3001 		ida_free(&bnxt_aux_dev_ids, softc->aux_dev->id);
3002 	}
3003 	device_printf(softc->dev, "%s:%d Added auxiliary device (id %d) for ROCE \n",
3004 		      __func__, __LINE__, softc->aux_dev->id);
3005 skip_aux_init:
3006 	return;
3007 }
3008 
3009 /* Device configuration */
3010 static void
bnxt_init(if_ctx_t ctx)3011 bnxt_init(if_ctx_t ctx)
3012 {
3013 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3014 	struct ifmediareq ifmr;
3015 	int i;
3016 	int rc;
3017 
3018 	if (!BNXT_CHIP_P5_PLUS(softc)) {
3019 		rc = bnxt_hwrm_func_reset(softc);
3020 		if (rc)
3021 			return;
3022 	} else if (softc->is_dev_init) {
3023 		bnxt_stop(ctx);
3024 	}
3025 
3026 	softc->is_dev_init = true;
3027 	bnxt_clear_ids(softc);
3028 
3029 	if (BNXT_CHIP_P5_PLUS(softc))
3030 		goto skip_def_cp_ring;
3031 	/* Allocate the default completion ring */
3032 	softc->def_cp_ring.cons = UINT32_MAX;
3033 	softc->def_cp_ring.v_bit = 1;
3034 	bnxt_mark_cpr_invalid(&softc->def_cp_ring);
3035 	rc = bnxt_hwrm_ring_alloc(softc,
3036 			HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
3037 			&softc->def_cp_ring.ring);
3038 	bnxt_set_db_mask(softc, &softc->def_cp_ring.ring,
3039 			HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL);
3040 	if (rc)
3041 		goto fail;
3042 skip_def_cp_ring:
3043 	for (i = 0; i < softc->nrxqsets; i++) {
3044 		/* Allocate the statistics context */
3045 		rc = bnxt_hwrm_stat_ctx_alloc(softc, &softc->rx_cp_rings[i],
3046 		    softc->rx_stats[i].idi_paddr);
3047 		if (rc)
3048 			goto fail;
3049 
3050 		if (BNXT_CHIP_P5_PLUS(softc)) {
3051 			/* Allocate the NQ */
3052 			softc->nq_rings[i].cons = 0;
3053 			softc->nq_rings[i].raw_cons = 0;
3054 			softc->nq_rings[i].v_bit = 1;
3055 			softc->nq_rings[i].last_idx = UINT32_MAX;
3056 			bnxt_mark_cpr_invalid(&softc->nq_rings[i]);
3057 			rc = bnxt_hwrm_ring_alloc(softc,
3058 					HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ,
3059 					&softc->nq_rings[i].ring);
3060 			bnxt_set_db_mask(softc, &softc->nq_rings[i].ring,
3061 					HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ);
3062 			if (rc)
3063 				goto fail;
3064 
3065 			softc->db_ops.bnxt_db_nq(&softc->nq_rings[i], 1);
3066 		}
3067 		/* Allocate the completion ring */
3068 		softc->rx_cp_rings[i].cons = UINT32_MAX;
3069 		softc->rx_cp_rings[i].raw_cons = UINT32_MAX;
3070 		softc->rx_cp_rings[i].v_bit = 1;
3071 		softc->rx_cp_rings[i].last_idx = UINT32_MAX;
3072 		softc->rx_cp_rings[i].toggle = 0;
3073 		bnxt_mark_cpr_invalid(&softc->rx_cp_rings[i]);
3074 		rc = bnxt_hwrm_ring_alloc(softc,
3075 				HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
3076 				&softc->rx_cp_rings[i].ring);
3077 		bnxt_set_db_mask(softc, &softc->rx_cp_rings[i].ring,
3078 				HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL);
3079 		if (rc)
3080 			goto fail;
3081 
3082 		if (BNXT_CHIP_P5_PLUS(softc))
3083 			softc->db_ops.bnxt_db_rx_cq(&softc->rx_cp_rings[i], 1);
3084 
3085 		/* Allocate the RX ring */
3086 		rc = bnxt_hwrm_ring_alloc(softc,
3087 		    HWRM_RING_ALLOC_INPUT_RING_TYPE_RX, &softc->rx_rings[i]);
3088 		bnxt_set_db_mask(softc, &softc->rx_rings[i],
3089 				HWRM_RING_ALLOC_INPUT_RING_TYPE_RX);
3090 		if (rc)
3091 			goto fail;
3092 		softc->db_ops.bnxt_db_rx(&softc->rx_rings[i], 0);
3093 
3094 		/* Allocate the AG ring */
3095 		rc = bnxt_hwrm_ring_alloc(softc,
3096 				HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG,
3097 				&softc->ag_rings[i]);
3098 		bnxt_set_db_mask(softc, &softc->ag_rings[i],
3099 				HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG);
3100 		if (rc)
3101 			goto fail;
3102 		softc->db_ops.bnxt_db_rx(&softc->ag_rings[i], 0);
3103 
3104 		/* Allocate the ring group */
3105 		softc->grp_info[i].stats_ctx =
3106 		    softc->rx_cp_rings[i].stats_ctx_id;
3107 		softc->grp_info[i].rx_ring_id = softc->rx_rings[i].phys_id;
3108 		softc->grp_info[i].ag_ring_id = softc->ag_rings[i].phys_id;
3109 		softc->grp_info[i].cp_ring_id =
3110 		    softc->rx_cp_rings[i].ring.phys_id;
3111 		rc = bnxt_hwrm_ring_grp_alloc(softc, &softc->grp_info[i]);
3112 		if (rc)
3113 			goto fail;
3114 	}
3115 
3116 	/* And now set the default CP / NQ ring for the async */
3117 	rc = bnxt_cfg_async_cr(softc);
3118 	if (rc)
3119 		goto fail;
3120 
3121 	/* Allocate the VNIC RSS context */
3122 	rc = bnxt_hwrm_vnic_ctx_alloc(softc, &softc->vnic_info.rss_id);
3123 	if (rc)
3124 		goto fail;
3125 
3126 	/* Allocate the vnic */
3127 	softc->vnic_info.def_ring_grp = softc->grp_info[0].grp_id;
3128 	softc->vnic_info.mru = softc->scctx->isc_max_frame_size;
3129 	rc = bnxt_hwrm_vnic_alloc(softc, &softc->vnic_info);
3130 	if (rc)
3131 		goto fail;
3132 	rc = bnxt_hwrm_vnic_cfg(softc, &softc->vnic_info);
3133 	if (rc)
3134 		goto fail;
3135 	rc = bnxt_hwrm_vnic_set_hds(softc, &softc->vnic_info);
3136 	if (rc)
3137 		goto fail;
3138 	rc = bnxt_hwrm_set_filter(softc);
3139 	if (rc)
3140 		goto fail;
3141 
3142 	bnxt_rss_grp_tbl_init(softc);
3143 
3144 	rc = bnxt_hwrm_rss_cfg(softc, &softc->vnic_info,
3145 	    softc->vnic_info.rss_hash_type);
3146 	if (rc)
3147 		goto fail;
3148 
3149 	rc = bnxt_hwrm_vnic_tpa_cfg(softc);
3150 	if (rc)
3151 		goto fail;
3152 
3153 	for (i = 0; i < softc->ntxqsets; i++) {
3154 		/* Allocate the statistics context */
3155 		rc = bnxt_hwrm_stat_ctx_alloc(softc, &softc->tx_cp_rings[i],
3156 		    softc->tx_stats[i].idi_paddr);
3157 		if (rc)
3158 			goto fail;
3159 
3160 		/* Allocate the completion ring */
3161 		softc->tx_cp_rings[i].cons = UINT32_MAX;
3162 		softc->tx_cp_rings[i].raw_cons = UINT32_MAX;
3163 		softc->tx_cp_rings[i].v_bit = 1;
3164 		softc->tx_cp_rings[i].toggle = 0;
3165 		bnxt_mark_cpr_invalid(&softc->tx_cp_rings[i]);
3166 		rc = bnxt_hwrm_ring_alloc(softc,
3167 				HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
3168 				&softc->tx_cp_rings[i].ring);
3169 		bnxt_set_db_mask(softc, &softc->tx_cp_rings[i].ring,
3170 				HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL);
3171 		if (rc)
3172 			goto fail;
3173 
3174 		if (BNXT_CHIP_P5_PLUS(softc))
3175 			softc->db_ops.bnxt_db_tx_cq(&softc->tx_cp_rings[i], 1);
3176 
3177 		/* Allocate the TX ring */
3178 		rc = bnxt_hwrm_ring_alloc(softc,
3179 				HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
3180 				&softc->tx_rings[i]);
3181 		bnxt_set_db_mask(softc, &softc->tx_rings[i],
3182 				HWRM_RING_ALLOC_INPUT_RING_TYPE_TX);
3183 		if (rc)
3184 			goto fail;
3185 		softc->db_ops.bnxt_db_tx(&softc->tx_rings[i], 0);
3186 	}
3187 
3188 	bnxt_do_enable_intr(&softc->def_cp_ring);
3189 	bnxt_get_port_module_status(softc);
3190 	bnxt_media_status(softc->ctx, &ifmr);
3191 	bnxt_hwrm_cfa_l2_set_rx_mask(softc, &softc->vnic_info);
3192 	return;
3193 
3194 fail:
3195 	bnxt_func_reset(softc);
3196 	bnxt_clear_ids(softc);
3197 	return;
3198 }
3199 
3200 static void
bnxt_stop(if_ctx_t ctx)3201 bnxt_stop(if_ctx_t ctx)
3202 {
3203 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3204 
3205 	softc->is_dev_init = false;
3206 	bnxt_do_disable_intr(&softc->def_cp_ring);
3207 	bnxt_func_reset(softc);
3208 	bnxt_clear_ids(softc);
3209 	return;
3210 }
3211 
3212 static u_int
bnxt_copy_maddr(void * arg,struct sockaddr_dl * sdl,u_int cnt)3213 bnxt_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
3214 {
3215 	uint8_t *mta = arg;
3216 
3217 	if (cnt == BNXT_MAX_MC_ADDRS)
3218 		return (1);
3219 
3220 	bcopy(LLADDR(sdl), &mta[cnt * ETHER_ADDR_LEN], ETHER_ADDR_LEN);
3221 
3222 	return (1);
3223 }
3224 
3225 static void
bnxt_multi_set(if_ctx_t ctx)3226 bnxt_multi_set(if_ctx_t ctx)
3227 {
3228 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3229 	if_t ifp = iflib_get_ifp(ctx);
3230 	uint8_t *mta;
3231 	int mcnt;
3232 
3233 	mta = softc->vnic_info.mc_list.idi_vaddr;
3234 	bzero(mta, softc->vnic_info.mc_list.idi_size);
3235 	mcnt = if_foreach_llmaddr(ifp, bnxt_copy_maddr, mta);
3236 
3237 	if (mcnt > BNXT_MAX_MC_ADDRS) {
3238 		softc->vnic_info.rx_mask |=
3239 		    HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
3240 		bnxt_hwrm_cfa_l2_set_rx_mask(softc, &softc->vnic_info);
3241 	} else {
3242 		softc->vnic_info.rx_mask &=
3243 		    ~HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
3244 		bus_dmamap_sync(softc->vnic_info.mc_list.idi_tag,
3245 		    softc->vnic_info.mc_list.idi_map, BUS_DMASYNC_PREWRITE);
3246 		softc->vnic_info.mc_list_count = mcnt;
3247 		softc->vnic_info.rx_mask |=
3248 		    HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
3249 		if (bnxt_hwrm_cfa_l2_set_rx_mask(softc, &softc->vnic_info))
3250 			device_printf(softc->dev,
3251 			    "set_multi: rx_mask set failed\n");
3252 	}
3253 }
3254 
3255 static int
bnxt_mtu_set(if_ctx_t ctx,uint32_t mtu)3256 bnxt_mtu_set(if_ctx_t ctx, uint32_t mtu)
3257 {
3258 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3259 
3260 	if (mtu > BNXT_MAX_MTU)
3261 		return EINVAL;
3262 
3263 	softc->scctx->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3264 	softc->rx_buf_size = min(softc->scctx->isc_max_frame_size, BNXT_PAGE_SIZE);
3265 	return 0;
3266 }
3267 
3268 static void
bnxt_media_status(if_ctx_t ctx,struct ifmediareq * ifmr)3269 bnxt_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
3270 {
3271 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3272 	struct bnxt_link_info *link_info = &softc->link_info;
3273 	struct ifmedia_entry *next;
3274 	uint64_t target_baudrate = bnxt_get_baudrate(link_info);
3275 	int active_media = IFM_UNKNOWN;
3276 
3277 	bnxt_update_link(softc, true);
3278 
3279 	ifmr->ifm_status = IFM_AVALID;
3280 	ifmr->ifm_active = IFM_ETHER;
3281 
3282 	if (link_info->link_up)
3283 		ifmr->ifm_status |= IFM_ACTIVE;
3284 	else
3285 		ifmr->ifm_status &= ~IFM_ACTIVE;
3286 
3287 	if (link_info->duplex == HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_FULL)
3288 		ifmr->ifm_active |= IFM_FDX;
3289 	else
3290 		ifmr->ifm_active |= IFM_HDX;
3291 
3292         /*
3293          * Go through the list of supported media which got prepared
3294          * as part of bnxt_add_media_types() using api ifmedia_add().
3295          */
3296 	LIST_FOREACH(next, &(iflib_get_media(ctx)->ifm_list), ifm_list) {
3297 		if (ifmedia_baudrate(next->ifm_media) == target_baudrate) {
3298 			active_media = next->ifm_media;
3299 			break;
3300 		}
3301 	}
3302 	ifmr->ifm_active |= active_media;
3303 
3304 	if (link_info->flow_ctrl.rx)
3305 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
3306 	if (link_info->flow_ctrl.tx)
3307 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
3308 
3309 	bnxt_report_link(softc);
3310 	return;
3311 }
3312 
3313 static int
bnxt_media_change(if_ctx_t ctx)3314 bnxt_media_change(if_ctx_t ctx)
3315 {
3316 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3317 	struct ifmedia *ifm = iflib_get_media(ctx);
3318 	struct ifmediareq ifmr;
3319 	int rc;
3320 	struct bnxt_link_info *link_info = &softc->link_info;
3321 
3322 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3323 		return EINVAL;
3324 
3325 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
3326 	case IFM_100_T:
3327 		link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3328 		link_info->req_link_speed =
3329 		    HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100MB;
3330 		break;
3331 	case IFM_1000_KX:
3332 	case IFM_1000_SGMII:
3333 	case IFM_1000_CX:
3334 	case IFM_1000_SX:
3335 	case IFM_1000_LX:
3336 
3337 		link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3338 
3339 		if (link_info->support_speeds & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB) {
3340 			link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_1GB;
3341 
3342 		} else if (link_info->support_speeds2 & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_1GB) {
3343 			link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_1GB;
3344 			link_info->force_speed2_nrz = true;
3345 		}
3346 
3347 		break;
3348 
3349 	case IFM_2500_KX:
3350 	case IFM_2500_T:
3351 		link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3352 		link_info->req_link_speed =
3353 		    HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_2_5GB;
3354 		break;
3355 	case IFM_10G_CR1:
3356 	case IFM_10G_KR:
3357 	case IFM_10G_LR:
3358 	case IFM_10G_SR:
3359 
3360 		link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3361 
3362 		if (link_info->support_speeds & HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB) {
3363 			link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
3364 
3365 		} else if (link_info->support_speeds2 & HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_10GB) {
3366 			link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_10GB;
3367 			link_info->force_speed2_nrz = true;
3368 		}
3369 
3370 		break;
3371 	case IFM_20G_KR2:
3372 		link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3373 		link_info->req_link_speed =
3374 		    HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_20GB;
3375 		break;
3376 	case IFM_25G_CR:
3377 	case IFM_25G_KR:
3378 	case IFM_25G_SR:
3379 	case IFM_25G_LR:
3380 
3381 		link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3382 
3383 		if (link_info->support_speeds & HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_25GB) {
3384 			link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_25GB;
3385 
3386 		} else if (link_info->support_speeds2 & HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_25GB) {
3387 			link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_25GB;
3388 			link_info->force_speed2_nrz = true;
3389 		}
3390 
3391 		break;
3392 
3393 	case IFM_40G_CR4:
3394 	case IFM_40G_KR4:
3395 	case IFM_40G_LR4:
3396 	case IFM_40G_SR4:
3397 	case IFM_40G_XLAUI:
3398 	case IFM_40G_XLAUI_AC:
3399 
3400 		link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3401 
3402 		if (link_info->support_speeds & HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB) {
3403 			link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
3404 
3405 		} else if (link_info->support_speeds2 & HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_40GB) {
3406 			link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_40GB;
3407 			link_info->force_speed2_nrz = true;
3408 		}
3409 
3410 		break;
3411 
3412 	case IFM_50G_CR2:
3413 	case IFM_50G_KR2:
3414 	case IFM_50G_KR4:
3415 	case IFM_50G_SR2:
3416 	case IFM_50G_LR2:
3417 
3418 		link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3419 
3420 		if (link_info->support_speeds & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB) {
3421 			link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
3422 
3423 		} else if (link_info->support_speeds2 & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_50GB) {
3424 			link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_50GB;
3425 			link_info->force_speed2_nrz = true;
3426 		}
3427 
3428 		break;
3429 
3430 	case IFM_50G_CP:
3431 	case IFM_50G_LR:
3432 	case IFM_50G_SR:
3433 	case IFM_50G_KR_PAM4:
3434 
3435 		link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3436 
3437 		if (link_info->support_pam4_speeds & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_50G) {
3438 			link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_50GB;
3439 			link_info->force_pam4_speed = true;
3440 
3441 		} else if (link_info->support_speeds2 & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_50GB_PAM4_56) {
3442 			link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_50GB_PAM4_56;
3443 			link_info->force_pam4_56_speed2 = true;
3444 		}
3445 
3446 		break;
3447 
3448 	case IFM_100G_CR4:
3449 	case IFM_100G_KR4:
3450 	case IFM_100G_LR4:
3451 	case IFM_100G_SR4:
3452 	case IFM_100G_AUI4:
3453 
3454 		link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3455 
3456 		if (link_info->support_speeds & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB) {
3457 			link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
3458 
3459 		} else if (link_info->support_speeds2 & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_100GB) {
3460 			link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_100GB;
3461 			link_info->force_speed2_nrz = true;
3462 		}
3463 
3464 		break;
3465 
3466 	case IFM_100G_CP2:
3467 	case IFM_100G_SR2:
3468 	case IFM_100G_KR2_PAM4:
3469 	case IFM_100G_AUI2:
3470 
3471 		link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3472 
3473 		if (link_info->support_pam4_speeds & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_100G) {
3474 			link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_100GB;
3475 			link_info->force_pam4_speed = true;
3476 
3477 		} else if (link_info->support_speeds2 & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_100GB_PAM4_56) {
3478 			link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_100GB_PAM4_56;
3479 			link_info->force_pam4_56_speed2 = true;
3480 		}
3481 
3482 		break;
3483 
3484 	case IFM_100G_KR_PAM4:
3485 	case IFM_100G_CR_PAM4:
3486 	case IFM_100G_DR:
3487 	case IFM_100G_AUI2_AC:
3488 
3489 		link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3490 
3491 		if (link_info->support_speeds2 & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_100GB_PAM4_112) {
3492 			link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_100GB_PAM4_112;
3493 			link_info->force_pam4_112_speed2 = true;
3494 		}
3495 
3496 		break;
3497 
3498 	case IFM_200G_SR4:
3499 	case IFM_200G_FR4:
3500 	case IFM_200G_LR4:
3501 	case IFM_200G_DR4:
3502 	case IFM_200G_CR4_PAM4:
3503 	case IFM_200G_KR4_PAM4:
3504 
3505 		link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3506 
3507 		if (link_info->support_pam4_speeds & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_200G) {
3508 			link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
3509 			link_info->force_pam4_speed = true;
3510 
3511 		} else if (link_info->support_speeds2 & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_200GB_PAM4_56) {
3512 			link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_200GB_PAM4_56;
3513 			link_info->force_pam4_56_speed2 = true;
3514 		}
3515 
3516 		break;
3517 
3518 	case IFM_200G_AUI4:
3519 
3520 		link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3521 
3522 		if (link_info->support_speeds2 & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_200GB_PAM4_112) {
3523 			link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_200GB_PAM4_112;
3524 			link_info->force_pam4_112_speed2 = true;
3525 		}
3526 
3527 		break;
3528 
3529 	case IFM_400G_FR8:
3530 	case IFM_400G_LR8:
3531 	case IFM_400G_AUI8:
3532 		link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3533 
3534 		if (link_info->support_speeds2 & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_400GB_PAM4_56) {
3535 			link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_400GB_PAM4_56;
3536 			link_info->force_pam4_56_speed2 = true;
3537 		}
3538 
3539 		break;
3540 
3541 	case IFM_400G_AUI8_AC:
3542 	case IFM_400G_DR4:
3543 		link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3544 
3545 		if (link_info->support_speeds2 & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_400GB_PAM4_112) {
3546 			link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_400GB_PAM4_112;
3547 			link_info->force_pam4_112_speed2 = true;
3548 		}
3549 
3550 		break;
3551 
3552 	case IFM_1000_T:
3553 		link_info->advertising = HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
3554 		link_info->autoneg |= BNXT_AUTONEG_SPEED;
3555 		break;
3556 	case IFM_10G_T:
3557 		link_info->advertising = HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
3558 		link_info->autoneg |= BNXT_AUTONEG_SPEED;
3559 		break;
3560 	default:
3561 		device_printf(softc->dev,
3562 		    "Unsupported media type!  Using auto\n");
3563 		/* Fall-through */
3564 	case IFM_AUTO:
3565 		// Auto
3566 		link_info->autoneg |= BNXT_AUTONEG_SPEED;
3567 		break;
3568 	}
3569 
3570 	rc = bnxt_hwrm_set_link_setting(softc, true, true, true);
3571 	bnxt_media_status(softc->ctx, &ifmr);
3572 	return rc;
3573 }
3574 
3575 static int
bnxt_promisc_set(if_ctx_t ctx,int flags)3576 bnxt_promisc_set(if_ctx_t ctx, int flags)
3577 {
3578 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3579 	if_t ifp = iflib_get_ifp(ctx);
3580 	int rc;
3581 
3582 	if (if_getflags(ifp) & IFF_ALLMULTI ||
3583 	    if_llmaddr_count(ifp) > BNXT_MAX_MC_ADDRS)
3584 		softc->vnic_info.rx_mask |=
3585 		    HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
3586 	else
3587 		softc->vnic_info.rx_mask &=
3588 		    ~HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
3589 
3590 	if (if_getflags(ifp) & IFF_PROMISC)
3591 		softc->vnic_info.rx_mask |=
3592 		    HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS |
3593 		    HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN;
3594 	else
3595 		softc->vnic_info.rx_mask &=
3596 		    ~(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS);
3597 
3598 	rc = bnxt_hwrm_cfa_l2_set_rx_mask(softc, &softc->vnic_info);
3599 
3600 	return rc;
3601 }
3602 
3603 static uint64_t
bnxt_get_counter(if_ctx_t ctx,ift_counter cnt)3604 bnxt_get_counter(if_ctx_t ctx, ift_counter cnt)
3605 {
3606 	if_t ifp = iflib_get_ifp(ctx);
3607 
3608 	if (cnt < IFCOUNTERS)
3609 		return if_get_counter_default(ifp, cnt);
3610 
3611 	return 0;
3612 }
3613 
3614 static void
bnxt_update_admin_status(if_ctx_t ctx)3615 bnxt_update_admin_status(if_ctx_t ctx)
3616 {
3617 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3618 
3619 	/*
3620 	 * When SR-IOV is enabled, avoid each VF sending this HWRM
3621 	 * request every sec with which firmware timeouts can happen
3622 	 */
3623 	if (!BNXT_PF(softc))
3624 		return;
3625 
3626 	bnxt_hwrm_port_qstats(softc);
3627 
3628 	if (BNXT_CHIP_P5_PLUS(softc) &&
3629 	    (softc->flags & BNXT_FLAG_FW_CAP_EXT_STATS))
3630 		bnxt_hwrm_port_qstats_ext(softc);
3631 
3632 	if (BNXT_CHIP_P5_PLUS(softc)) {
3633 		struct ifmediareq ifmr;
3634 
3635 		if (bit_test(softc->state_bv, BNXT_STATE_LINK_CHANGE)) {
3636 			bit_clear(softc->state_bv, BNXT_STATE_LINK_CHANGE);
3637 			bnxt_media_status(softc->ctx, &ifmr);
3638 		}
3639 	}
3640 
3641 	return;
3642 }
3643 
3644 static void
bnxt_if_timer(if_ctx_t ctx,uint16_t qid)3645 bnxt_if_timer(if_ctx_t ctx, uint16_t qid)
3646 {
3647 
3648 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3649 	uint64_t ticks_now = ticks;
3650 
3651         /* Schedule bnxt_update_admin_status() once per sec */
3652 	if (ticks_now - softc->admin_ticks >= hz) {
3653 		softc->admin_ticks = ticks_now;
3654 		iflib_admin_intr_deferred(ctx);
3655 	}
3656 
3657 	return;
3658 }
3659 
3660 static void inline
bnxt_do_enable_intr(struct bnxt_cp_ring * cpr)3661 bnxt_do_enable_intr(struct bnxt_cp_ring *cpr)
3662 {
3663 	struct bnxt_softc *softc = cpr->ring.softc;
3664 
3665 
3666 	if (cpr->ring.phys_id == (uint16_t)HWRM_NA_SIGNATURE)
3667 		return;
3668 
3669 	if (BNXT_CHIP_P5_PLUS(softc))
3670 		softc->db_ops.bnxt_db_nq(cpr, 1);
3671 	else
3672 		softc->db_ops.bnxt_db_rx_cq(cpr, 1);
3673 }
3674 
3675 static void inline
bnxt_do_disable_intr(struct bnxt_cp_ring * cpr)3676 bnxt_do_disable_intr(struct bnxt_cp_ring *cpr)
3677 {
3678 	struct bnxt_softc *softc = cpr->ring.softc;
3679 
3680 	if (cpr->ring.phys_id == (uint16_t)HWRM_NA_SIGNATURE)
3681 		return;
3682 
3683 	if (BNXT_CHIP_P5_PLUS(softc))
3684 		softc->db_ops.bnxt_db_nq(cpr, 0);
3685 	else
3686 		softc->db_ops.bnxt_db_rx_cq(cpr, 0);
3687 }
3688 
3689 /* Enable all interrupts */
3690 static void
bnxt_intr_enable(if_ctx_t ctx)3691 bnxt_intr_enable(if_ctx_t ctx)
3692 {
3693 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3694 	int i;
3695 
3696 	bnxt_do_enable_intr(&softc->def_cp_ring);
3697 	for (i = 0; i < softc->nrxqsets; i++)
3698 		if (BNXT_CHIP_P5_PLUS(softc))
3699 			softc->db_ops.bnxt_db_nq(&softc->nq_rings[i], 1);
3700 		else
3701 			softc->db_ops.bnxt_db_rx_cq(&softc->rx_cp_rings[i], 1);
3702 
3703 	return;
3704 }
3705 
3706 /* Enable interrupt for a single queue */
3707 static int
bnxt_tx_queue_intr_enable(if_ctx_t ctx,uint16_t qid)3708 bnxt_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
3709 {
3710 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3711 
3712 	if (BNXT_CHIP_P5_PLUS(softc))
3713 		softc->db_ops.bnxt_db_nq(&softc->nq_rings[qid], 1);
3714 	else
3715 		softc->db_ops.bnxt_db_rx_cq(&softc->tx_cp_rings[qid], 1);
3716 
3717 	return 0;
3718 }
3719 
3720 static void
bnxt_process_cmd_cmpl(struct bnxt_softc * softc,hwrm_cmpl_t * cmd_cmpl)3721 bnxt_process_cmd_cmpl(struct bnxt_softc *softc, hwrm_cmpl_t *cmd_cmpl)
3722 {
3723 	device_printf(softc->dev, "cmd sequence number %d\n",
3724 			cmd_cmpl->sequence_id);
3725 	return;
3726 }
3727 
3728 static void
bnxt_process_async_msg(struct bnxt_cp_ring * cpr,tx_cmpl_t * cmpl)3729 bnxt_process_async_msg(struct bnxt_cp_ring *cpr, tx_cmpl_t *cmpl)
3730 {
3731 	struct bnxt_softc *softc = cpr->ring.softc;
3732 	uint16_t type = cmpl->flags_type & TX_CMPL_TYPE_MASK;
3733 
3734 	switch (type) {
3735 	case HWRM_CMPL_TYPE_HWRM_DONE:
3736 		bnxt_process_cmd_cmpl(softc, (hwrm_cmpl_t *)cmpl);
3737 		break;
3738 	case HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT:
3739 		bnxt_handle_async_event(softc, (cmpl_base_t *) cmpl);
3740 		break;
3741 	default:
3742 		device_printf(softc->dev, "%s:%d Unhandled async message %x\n",
3743 				__FUNCTION__, __LINE__, type);
3744 		break;
3745 	}
3746 }
3747 
3748 void
process_nq(struct bnxt_softc * softc,uint16_t nqid)3749 process_nq(struct bnxt_softc *softc, uint16_t nqid)
3750 {
3751 	struct bnxt_cp_ring *cpr = &softc->nq_rings[nqid];
3752 	nq_cn_t *cmp = (nq_cn_t *) cpr->ring.vaddr;
3753 	struct bnxt_cp_ring *tx_cpr = &softc->tx_cp_rings[nqid];
3754 	struct bnxt_cp_ring *rx_cpr = &softc->rx_cp_rings[nqid];
3755 	bool v_bit = cpr->v_bit;
3756 	uint32_t cons = cpr->cons;
3757 	uint32_t raw_cons = cpr->raw_cons;
3758 	uint16_t nq_type, nqe_cnt = 0;
3759 
3760 	while (1) {
3761 		if (!NQ_VALID(&cmp[cons], v_bit)) {
3762 			goto done;
3763 		}
3764 
3765 		nq_type = NQ_CN_TYPE_MASK & cmp[cons].type;
3766 
3767 		if (NQE_CN_TYPE(nq_type) != NQ_CN_TYPE_CQ_NOTIFICATION) {
3768 			 bnxt_process_async_msg(cpr, (tx_cmpl_t *)&cmp[cons]);
3769 		} else {
3770 			tx_cpr->toggle = NQE_CN_TOGGLE(cmp[cons].type);
3771 			rx_cpr->toggle = NQE_CN_TOGGLE(cmp[cons].type);
3772 		}
3773 
3774 		NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
3775 		raw_cons++;
3776 		nqe_cnt++;
3777 	}
3778 done:
3779 	if (nqe_cnt) {
3780 		cpr->cons = cons;
3781 		cpr->raw_cons = raw_cons;
3782 		cpr->v_bit = v_bit;
3783 	}
3784 }
3785 
3786 static int
bnxt_rx_queue_intr_enable(if_ctx_t ctx,uint16_t qid)3787 bnxt_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
3788 {
3789 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3790 
3791 	if (BNXT_CHIP_P5_PLUS(softc)) {
3792 		process_nq(softc, qid);
3793 		softc->db_ops.bnxt_db_nq(&softc->nq_rings[qid], 1);
3794 	}
3795 	softc->db_ops.bnxt_db_rx_cq(&softc->rx_cp_rings[qid], 1);
3796         return 0;
3797 }
3798 
3799 /* Disable all interrupts */
3800 static void
bnxt_disable_intr(if_ctx_t ctx)3801 bnxt_disable_intr(if_ctx_t ctx)
3802 {
3803 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3804 	int i;
3805 
3806 	/*
3807 	 * NOTE: These TX interrupts should never get enabled, so don't
3808 	 * update the index
3809 	 */
3810 	for (i = 0; i < softc->nrxqsets; i++)
3811 		if (BNXT_CHIP_P5_PLUS(softc))
3812 			softc->db_ops.bnxt_db_nq(&softc->nq_rings[i], 0);
3813 		else
3814 			softc->db_ops.bnxt_db_rx_cq(&softc->rx_cp_rings[i], 0);
3815 
3816 
3817 	return;
3818 }
3819 
3820 static int
bnxt_msix_intr_assign(if_ctx_t ctx,int msix)3821 bnxt_msix_intr_assign(if_ctx_t ctx, int msix)
3822 {
3823 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3824 	struct bnxt_cp_ring *ring;
3825 	struct if_irq *irq;
3826 	uint16_t id;
3827 	int rc;
3828 	int i;
3829 	char irq_name[16];
3830 
3831 	if (BNXT_CHIP_P5_PLUS(softc))
3832 		goto skip_default_cp;
3833 
3834 	rc = iflib_irq_alloc_generic(ctx, &softc->def_cp_ring.irq,
3835 	    softc->def_cp_ring.ring.id + 1, IFLIB_INTR_ADMIN,
3836 	    bnxt_handle_def_cp, softc, 0, "def_cp");
3837 	if (rc) {
3838 		device_printf(iflib_get_dev(ctx),
3839 		    "Failed to register default completion ring handler\n");
3840 		return rc;
3841 	}
3842 
3843 skip_default_cp:
3844 	for (i=0; i<softc->scctx->isc_nrxqsets; i++) {
3845 		if (BNXT_CHIP_P5_PLUS(softc)) {
3846 			irq = &softc->nq_rings[i].irq;
3847 			id = softc->nq_rings[i].ring.id;
3848 			ring = &softc->nq_rings[i];
3849 		} else {
3850 			irq = &softc->rx_cp_rings[i].irq;
3851 			id = softc->rx_cp_rings[i].ring.id ;
3852 			ring = &softc->rx_cp_rings[i];
3853 		}
3854 		snprintf(irq_name, sizeof(irq_name), "rxq%d", i);
3855 		rc = iflib_irq_alloc_generic(ctx, irq, id + 1, IFLIB_INTR_RX,
3856 				bnxt_handle_isr, ring, i, irq_name);
3857 		if (rc) {
3858 			device_printf(iflib_get_dev(ctx),
3859 			    "Failed to register RX completion ring handler\n");
3860 			i--;
3861 			goto fail;
3862 		}
3863 	}
3864 
3865 	for (i=0; i<softc->scctx->isc_ntxqsets; i++)
3866 		iflib_softirq_alloc_generic(ctx, NULL, IFLIB_INTR_TX, NULL, i, "tx_cp");
3867 
3868 	return rc;
3869 
3870 fail:
3871 	for (; i>=0; i--)
3872 		iflib_irq_free(ctx, &softc->rx_cp_rings[i].irq);
3873 	iflib_irq_free(ctx, &softc->def_cp_ring.irq);
3874 	return rc;
3875 }
3876 
3877 /*
3878  * We're explicitly allowing duplicates here.  They will need to be
3879  * removed as many times as they are added.
3880  */
3881 static void
bnxt_vlan_register(if_ctx_t ctx,uint16_t vtag)3882 bnxt_vlan_register(if_ctx_t ctx, uint16_t vtag)
3883 {
3884 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3885 	struct bnxt_vlan_tag *new_tag;
3886 
3887 	new_tag = malloc(sizeof(struct bnxt_vlan_tag), M_DEVBUF, M_NOWAIT);
3888 	if (new_tag == NULL)
3889 		return;
3890 	new_tag->tag = vtag;
3891 	new_tag->filter_id = -1;
3892 	SLIST_INSERT_HEAD(&softc->vnic_info.vlan_tags, new_tag, next);
3893 };
3894 
3895 static void
bnxt_vlan_unregister(if_ctx_t ctx,uint16_t vtag)3896 bnxt_vlan_unregister(if_ctx_t ctx, uint16_t vtag)
3897 {
3898 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3899 	struct bnxt_vlan_tag *vlan_tag;
3900 
3901 	SLIST_FOREACH(vlan_tag, &softc->vnic_info.vlan_tags, next) {
3902 		if (vlan_tag->tag == vtag) {
3903 			SLIST_REMOVE(&softc->vnic_info.vlan_tags, vlan_tag,
3904 			    bnxt_vlan_tag, next);
3905 			free(vlan_tag, M_DEVBUF);
3906 			break;
3907 		}
3908 	}
3909 }
3910 
3911 static int
bnxt_wol_config(if_ctx_t ctx)3912 bnxt_wol_config(if_ctx_t ctx)
3913 {
3914 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3915 	if_t ifp = iflib_get_ifp(ctx);
3916 
3917 	if (!softc)
3918 		return -EBUSY;
3919 
3920 	if (!bnxt_wol_supported(softc))
3921 		return -ENOTSUP;
3922 
3923 	if (if_getcapenable(ifp) & IFCAP_WOL_MAGIC) {
3924 		if (!softc->wol) {
3925 			if (bnxt_hwrm_alloc_wol_fltr(softc))
3926 				return -EBUSY;
3927 			softc->wol = 1;
3928 		}
3929 	} else {
3930 		if (softc->wol) {
3931 			if (bnxt_hwrm_free_wol_fltr(softc))
3932 				return -EBUSY;
3933 			softc->wol = 0;
3934 		}
3935 	}
3936 
3937 	return 0;
3938 }
3939 
3940 static bool
bnxt_if_needs_restart(if_ctx_t ctx __unused,enum iflib_restart_event event)3941 bnxt_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
3942 {
3943 	switch (event) {
3944 	case IFLIB_RESTART_VLAN_CONFIG:
3945 	default:
3946 		return (false);
3947 	}
3948 }
3949 
3950 static int
bnxt_shutdown(if_ctx_t ctx)3951 bnxt_shutdown(if_ctx_t ctx)
3952 {
3953 	bnxt_wol_config(ctx);
3954 	return 0;
3955 }
3956 
3957 static int
bnxt_suspend(if_ctx_t ctx)3958 bnxt_suspend(if_ctx_t ctx)
3959 {
3960 	bnxt_wol_config(ctx);
3961 	return 0;
3962 }
3963 
3964 static int
bnxt_resume(if_ctx_t ctx)3965 bnxt_resume(if_ctx_t ctx)
3966 {
3967 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3968 
3969 	bnxt_get_wol_settings(softc);
3970 	return 0;
3971 }
3972 
3973 static int
bnxt_priv_ioctl(if_ctx_t ctx,u_long command,caddr_t data)3974 bnxt_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data)
3975 {
3976 	struct bnxt_softc *softc = iflib_get_softc(ctx);
3977 	struct ifreq *ifr = (struct ifreq *)data;
3978 	struct bnxt_ioctl_header *ioh;
3979 	size_t iol;
3980 	int rc = ENOTSUP;
3981 	struct bnxt_ioctl_data iod_storage, *iod = &iod_storage;
3982 
3983 	switch (command) {
3984 	case SIOCGPRIVATE_0:
3985 		if ((rc = priv_check(curthread, PRIV_DRIVER)) != 0)
3986 			goto exit;
3987 
3988 		ioh = ifr_buffer_get_buffer(ifr);
3989 		iol = ifr_buffer_get_length(ifr);
3990 		if (iol > sizeof(iod_storage))
3991 			return (EINVAL);
3992 
3993 		if ((rc = copyin(ioh, iod, iol)) != 0)
3994 			goto exit;
3995 
3996 		switch (iod->hdr.type) {
3997 		case BNXT_HWRM_NVM_FIND_DIR_ENTRY:
3998 		{
3999 			struct bnxt_ioctl_hwrm_nvm_find_dir_entry *find =
4000 			    &iod->find;
4001 
4002 			rc = bnxt_hwrm_nvm_find_dir_entry(softc, find->type,
4003 			    &find->ordinal, find->ext, &find->index,
4004 			    find->use_index, find->search_opt,
4005 			    &find->data_length, &find->item_length,
4006 			    &find->fw_ver);
4007 			if (rc) {
4008 				iod->hdr.rc = rc;
4009 				rc = copyout(&iod->hdr.rc, &ioh->rc,
4010 				    sizeof(ioh->rc));
4011 			} else {
4012 				iod->hdr.rc = 0;
4013 				rc = copyout(iod, ioh, iol);
4014 			}
4015 
4016 			goto exit;
4017 		}
4018 		case BNXT_HWRM_NVM_READ:
4019 		{
4020 			struct bnxt_ioctl_hwrm_nvm_read *rd = &iod->read;
4021 			struct iflib_dma_info dma_data;
4022 			size_t offset;
4023 			size_t remain;
4024 			size_t csize;
4025 
4026 			/*
4027 			 * Some HWRM versions can't read more than 0x8000 bytes
4028 			 */
4029 			rc = iflib_dma_alloc(softc->ctx,
4030 			    min(rd->length, 0x8000), &dma_data, BUS_DMA_NOWAIT);
4031 			if (rc)
4032 				break;
4033 			for (remain = rd->length, offset = 0;
4034 			    remain && offset < rd->length; offset += 0x8000) {
4035 				csize = min(remain, 0x8000);
4036 				rc = bnxt_hwrm_nvm_read(softc, rd->index,
4037 				    rd->offset + offset, csize, &dma_data);
4038 				if (rc) {
4039 					iod->hdr.rc = rc;
4040 					rc = copyout(&iod->hdr.rc, &ioh->rc,
4041 					    sizeof(ioh->rc));
4042 					break;
4043 				} else {
4044 					rc = copyout(dma_data.idi_vaddr,
4045 					    rd->data + offset, csize);
4046 					iod->hdr.rc = rc;
4047 				}
4048 				remain -= csize;
4049 			}
4050 			if (rc == 0)
4051 				rc = copyout(iod, ioh, iol);
4052 
4053 			iflib_dma_free(&dma_data);
4054 			goto exit;
4055 		}
4056 		case BNXT_HWRM_FW_RESET:
4057 		{
4058 			struct bnxt_ioctl_hwrm_fw_reset *rst =
4059 			    &iod->reset;
4060 
4061 			rc = bnxt_hwrm_fw_reset(softc, rst->processor,
4062 			    &rst->selfreset);
4063 			if (rc) {
4064 				iod->hdr.rc = rc;
4065 				rc = copyout(&iod->hdr.rc, &ioh->rc,
4066 				    sizeof(ioh->rc));
4067 			} else {
4068 				iod->hdr.rc = 0;
4069 				rc = copyout(iod, ioh, iol);
4070 			}
4071 
4072 			goto exit;
4073 		}
4074 		case BNXT_HWRM_FW_QSTATUS:
4075 		{
4076 			struct bnxt_ioctl_hwrm_fw_qstatus *qstat =
4077 			    &iod->status;
4078 
4079 			rc = bnxt_hwrm_fw_qstatus(softc, qstat->processor,
4080 			    &qstat->selfreset);
4081 			if (rc) {
4082 				iod->hdr.rc = rc;
4083 				rc = copyout(&iod->hdr.rc, &ioh->rc,
4084 				    sizeof(ioh->rc));
4085 			} else {
4086 				iod->hdr.rc = 0;
4087 				rc = copyout(iod, ioh, iol);
4088 			}
4089 
4090 			goto exit;
4091 		}
4092 		case BNXT_HWRM_NVM_WRITE:
4093 		{
4094 			struct bnxt_ioctl_hwrm_nvm_write *wr =
4095 			    &iod->write;
4096 
4097 			rc = bnxt_hwrm_nvm_write(softc, wr->data, true,
4098 			    wr->type, wr->ordinal, wr->ext, wr->attr,
4099 			    wr->option, wr->data_length, wr->keep,
4100 			    &wr->item_length, &wr->index);
4101 			if (rc) {
4102 				iod->hdr.rc = rc;
4103 				rc = copyout(&iod->hdr.rc, &ioh->rc,
4104 				    sizeof(ioh->rc));
4105 			}
4106 			else {
4107 				iod->hdr.rc = 0;
4108 				rc = copyout(iod, ioh, iol);
4109 			}
4110 
4111 			goto exit;
4112 		}
4113 		case BNXT_HWRM_NVM_ERASE_DIR_ENTRY:
4114 		{
4115 			struct bnxt_ioctl_hwrm_nvm_erase_dir_entry *erase =
4116 			    &iod->erase;
4117 
4118 			rc = bnxt_hwrm_nvm_erase_dir_entry(softc, erase->index);
4119 			if (rc) {
4120 				iod->hdr.rc = rc;
4121 				rc = copyout(&iod->hdr.rc, &ioh->rc,
4122 				    sizeof(ioh->rc));
4123 			} else {
4124 				iod->hdr.rc = 0;
4125 				rc = copyout(iod, ioh, iol);
4126 			}
4127 
4128 			goto exit;
4129 		}
4130 		case BNXT_HWRM_NVM_GET_DIR_INFO:
4131 		{
4132 			struct bnxt_ioctl_hwrm_nvm_get_dir_info *info =
4133 			    &iod->dir_info;
4134 
4135 			rc = bnxt_hwrm_nvm_get_dir_info(softc, &info->entries,
4136 			    &info->entry_length);
4137 			if (rc) {
4138 				iod->hdr.rc = rc;
4139 				rc = copyout(&iod->hdr.rc, &ioh->rc,
4140 				    sizeof(ioh->rc));
4141 			} else {
4142 				iod->hdr.rc = 0;
4143 				rc = copyout(iod, ioh, iol);
4144 			}
4145 
4146 			goto exit;
4147 		}
4148 		case BNXT_HWRM_NVM_GET_DIR_ENTRIES:
4149 		{
4150 			struct bnxt_ioctl_hwrm_nvm_get_dir_entries *get =
4151 			    &iod->dir_entries;
4152 			struct iflib_dma_info dma_data;
4153 
4154 			rc = iflib_dma_alloc(softc->ctx, get->max_size,
4155 			    &dma_data, BUS_DMA_NOWAIT);
4156 			if (rc)
4157 				break;
4158 			rc = bnxt_hwrm_nvm_get_dir_entries(softc, &get->entries,
4159 			    &get->entry_length, &dma_data);
4160 			if (rc) {
4161 				iod->hdr.rc = rc;
4162 				rc = copyout(&iod->hdr.rc, &ioh->rc,
4163 				    sizeof(ioh->rc));
4164 			} else {
4165 				rc = copyout(dma_data.idi_vaddr, get->data,
4166 				    get->entry_length * get->entries);
4167 				iod->hdr.rc = rc;
4168 				if (rc == 0)
4169 					rc = copyout(iod, ioh, iol);
4170 			}
4171 			iflib_dma_free(&dma_data);
4172 
4173 			goto exit;
4174 		}
4175 		case BNXT_HWRM_NVM_VERIFY_UPDATE:
4176 		{
4177 			struct bnxt_ioctl_hwrm_nvm_verify_update *vrfy =
4178 			    &iod->verify;
4179 
4180 			rc = bnxt_hwrm_nvm_verify_update(softc, vrfy->type,
4181 			    vrfy->ordinal, vrfy->ext);
4182 			if (rc) {
4183 				iod->hdr.rc = rc;
4184 				rc = copyout(&iod->hdr.rc, &ioh->rc,
4185 				    sizeof(ioh->rc));
4186 			} else {
4187 				iod->hdr.rc = 0;
4188 				rc = copyout(iod, ioh, iol);
4189 			}
4190 
4191 			goto exit;
4192 		}
4193 		case BNXT_HWRM_NVM_INSTALL_UPDATE:
4194 		{
4195 			struct bnxt_ioctl_hwrm_nvm_install_update *inst =
4196 			    &iod->install;
4197 
4198 			rc = bnxt_hwrm_nvm_install_update(softc,
4199 			    inst->install_type, &inst->installed_items,
4200 			    &inst->result, &inst->problem_item,
4201 			    &inst->reset_required);
4202 			if (rc) {
4203 				iod->hdr.rc = rc;
4204 				rc = copyout(&iod->hdr.rc, &ioh->rc,
4205 				    sizeof(ioh->rc));
4206 			} else {
4207 				iod->hdr.rc = 0;
4208 				rc = copyout(iod, ioh, iol);
4209 			}
4210 
4211 			goto exit;
4212 		}
4213 		case BNXT_HWRM_NVM_MODIFY:
4214 		{
4215 			struct bnxt_ioctl_hwrm_nvm_modify *mod = &iod->modify;
4216 
4217 			rc = bnxt_hwrm_nvm_modify(softc, mod->index,
4218 			    mod->offset, mod->data, true, mod->length);
4219 			if (rc) {
4220 				iod->hdr.rc = rc;
4221 				rc = copyout(&iod->hdr.rc, &ioh->rc,
4222 				    sizeof(ioh->rc));
4223 			} else {
4224 				iod->hdr.rc = 0;
4225 				rc = copyout(iod, ioh, iol);
4226 			}
4227 
4228 			goto exit;
4229 		}
4230 		case BNXT_HWRM_FW_GET_TIME:
4231 		{
4232 			struct bnxt_ioctl_hwrm_fw_get_time *gtm =
4233 			    &iod->get_time;
4234 
4235 			rc = bnxt_hwrm_fw_get_time(softc, &gtm->year,
4236 			    &gtm->month, &gtm->day, &gtm->hour, &gtm->minute,
4237 			    &gtm->second, &gtm->millisecond, &gtm->zone);
4238 			if (rc) {
4239 				iod->hdr.rc = rc;
4240 				rc = copyout(&iod->hdr.rc, &ioh->rc,
4241 				    sizeof(ioh->rc));
4242 			} else {
4243 				iod->hdr.rc = 0;
4244 				rc = copyout(iod, ioh, iol);
4245 			}
4246 
4247 			goto exit;
4248 		}
4249 		case BNXT_HWRM_FW_SET_TIME:
4250 		{
4251 			struct bnxt_ioctl_hwrm_fw_set_time *stm =
4252 			    &iod->set_time;
4253 
4254 			rc = bnxt_hwrm_fw_set_time(softc, stm->year,
4255 			    stm->month, stm->day, stm->hour, stm->minute,
4256 			    stm->second, stm->millisecond, stm->zone);
4257 			if (rc) {
4258 				iod->hdr.rc = rc;
4259 				rc = copyout(&iod->hdr.rc, &ioh->rc,
4260 				    sizeof(ioh->rc));
4261 			} else {
4262 				iod->hdr.rc = 0;
4263 				rc = copyout(iod, ioh, iol);
4264 			}
4265 
4266 			goto exit;
4267 		}
4268 		}
4269 		break;
4270 	}
4271 
4272 exit:
4273 	return rc;
4274 }
4275 
4276 static int
bnxt_i2c_req(if_ctx_t ctx,struct ifi2creq * i2c)4277 bnxt_i2c_req(if_ctx_t ctx, struct ifi2creq *i2c)
4278 {
4279 	struct bnxt_softc *softc = iflib_get_softc(ctx);
4280 	uint8_t *data = i2c->data;
4281 	int rc;
4282 
4283 	/* No point in going further if phy status indicates
4284 	 * module is not inserted or if it is powered down or
4285 	 * if it is of type 10GBase-T
4286 	 */
4287 	if (softc->link_info.module_status >
4288 		HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_WARNINGMSG)
4289 		return -EOPNOTSUPP;
4290 
4291 	/* This feature is not supported in older firmware versions */
4292 	if (!BNXT_CHIP_P5_PLUS(softc) ||
4293 	    (softc->hwrm_spec_code < 0x10202))
4294 		return -EOPNOTSUPP;
4295 
4296 
4297 	rc = bnxt_read_sfp_module_eeprom_info(softc, I2C_DEV_ADDR_A0, 0, 0, 0,
4298 		i2c->offset, i2c->len, data);
4299 
4300 	return rc;
4301 }
4302 
4303 /*
4304  * Support functions
4305  */
4306 static int
bnxt_probe_phy(struct bnxt_softc * softc)4307 bnxt_probe_phy(struct bnxt_softc *softc)
4308 {
4309 	struct bnxt_link_info *link_info = &softc->link_info;
4310 	int rc = 0;
4311 
4312 	softc->phy_flags = 0;
4313 	rc = bnxt_hwrm_phy_qcaps(softc);
4314 	if (rc) {
4315 		device_printf(softc->dev,
4316 			      "Probe phy can't get phy capabilities (rc: %x)\n", rc);
4317 		return rc;
4318 	}
4319 
4320 	rc = bnxt_update_link(softc, false);
4321 	if (rc) {
4322 		device_printf(softc->dev,
4323 		    "Probe phy can't update link (rc: %x)\n", rc);
4324 		return (rc);
4325 	}
4326 
4327 	bnxt_get_port_module_status(softc);
4328 
4329 	/*initialize the ethool setting copy with NVM settings */
4330 	if (link_info->auto_mode != HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE)
4331 		link_info->autoneg |= BNXT_AUTONEG_SPEED;
4332 
4333 	link_info->req_duplex = link_info->duplex_setting;
4334 
4335 	/* NRZ link speed */
4336 	if (link_info->autoneg & BNXT_AUTONEG_SPEED)
4337 		link_info->req_link_speed = link_info->auto_link_speeds;
4338 	else
4339 		link_info->req_link_speed = link_info->force_link_speed;
4340 
4341 	/* PAM4 link speed */
4342 	if (link_info->auto_pam4_link_speeds)
4343 		link_info->req_link_speed = link_info->auto_pam4_link_speeds;
4344 	if (link_info->force_pam4_link_speed)
4345 		link_info->req_link_speed = link_info->force_pam4_link_speed;
4346 
4347 	return (rc);
4348 }
4349 
4350 static void
add_media(struct bnxt_softc * softc,u8 media_type,u16 supported_NRZ_speeds,u16 supported_pam4_speeds,u16 supported_speeds2)4351 add_media(struct bnxt_softc *softc, u8 media_type, u16 supported_NRZ_speeds,
4352 	  u16 supported_pam4_speeds, u16 supported_speeds2)
4353 {
4354 
4355 	switch (media_type) {
4356 		case BNXT_MEDIA_CR:
4357 
4358 			BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_50G, IFM_50G_CP);
4359 			BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_100G, IFM_100G_CP2);
4360 			BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_200G, IFM_200G_CR4_PAM4);
4361 
4362 			BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_100GB, IFM_100G_CR4);
4363 			BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_50GB, IFM_50G_CR2);
4364 			BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_40GB, IFM_40G_CR4);
4365 			BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_25GB, IFM_25G_CR);
4366 			BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_10GB, IFM_10G_CR1);
4367 			BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_1GB, IFM_1000_CX);
4368 			/* thor2 nrz*/
4369 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB, IFM_100G_CR4);
4370 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_50GB, IFM_50G_CR2);
4371 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_40GB, IFM_40G_CR4);
4372 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_25GB, IFM_25G_CR);
4373 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_10GB, IFM_10G_CR1);
4374 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_1GB, IFM_1000_CX);
4375 			/* thor2 PAM56 */
4376 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_50GB_PAM4_56, IFM_50G_CP);
4377 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB_PAM4_56, IFM_100G_CP2);
4378 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_200GB_PAM4_56, IFM_200G_CR4_PAM4);
4379 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_400GB_PAM4_56, IFM_400G_AUI8);
4380 			/* thor2 PAM112 */
4381 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB_PAM4_112, IFM_100G_CR_PAM4);
4382 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_200GB_PAM4_112, IFM_200G_AUI4);
4383 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_400GB_PAM4_112, IFM_400G_AUI8_AC);
4384 
4385 			break;
4386 
4387 		case BNXT_MEDIA_LR:
4388 			BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_50G, IFM_50G_LR);
4389 			BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_200G, IFM_200G_LR4);
4390 			BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_100GB, IFM_100G_LR4);
4391 			BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_50GB, IFM_50G_LR2);
4392 			BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_40GB, IFM_40G_LR4);
4393 			BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_25GB, IFM_25G_LR);
4394 			BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_10GB, IFM_10G_LR);
4395 			/* thor2 nrz*/
4396 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB, IFM_100G_LR4);
4397 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_50GB, IFM_50G_LR2);
4398 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_40GB, IFM_40G_LR4);
4399 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_25GB, IFM_25G_LR);
4400 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_10GB, IFM_10G_LR);
4401 			/* thor2 PAM56 */
4402 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_50GB_PAM4_56, IFM_50G_LR);
4403 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB_PAM4_56, IFM_100G_AUI2);
4404 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_200GB_PAM4_56, IFM_200G_LR4);
4405 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_400GB_PAM4_56, IFM_400G_LR8);
4406 			/* thor2 PAM112 */
4407 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB_PAM4_112, IFM_100G_AUI2_AC);
4408 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_200GB_PAM4_112, IFM_200G_AUI4);
4409 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_400GB_PAM4_112, IFM_400G_AUI8_AC);
4410 
4411 			break;
4412 
4413 		case BNXT_MEDIA_SR:
4414 			BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_50G, IFM_50G_SR);
4415 			BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_100G, IFM_100G_SR2);
4416 			BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_200G, IFM_200G_SR4);
4417 			BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_100GB, IFM_100G_SR4);
4418 			BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_50GB, IFM_50G_SR2);
4419 			BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_40GB, IFM_40G_SR4);
4420 			BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_25GB, IFM_25G_SR);
4421 			BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_10GB, IFM_10G_SR);
4422 			BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_1GB, IFM_1000_SX);
4423 			/* thor2 nrz*/
4424 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB, IFM_100G_SR4);
4425 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_50GB, IFM_50G_SR2);
4426 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_40GB, IFM_40G_SR4);
4427 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_25GB, IFM_25G_SR);
4428 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_10GB, IFM_10G_SR);
4429 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_1GB, IFM_1000_SX);
4430 			/* thor2 PAM56 */
4431 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_50GB_PAM4_56, IFM_50G_SR);
4432 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB_PAM4_56, IFM_100G_SR2);
4433 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_200GB_PAM4_56, IFM_200G_SR4);
4434 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_400GB_PAM4_56, IFM_400G_AUI8);
4435 			/* thor2 PAM112 */
4436 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB_PAM4_112, IFM_100G_AUI2_AC);
4437 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_200GB_PAM4_112, IFM_200G_AUI4);
4438 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_400GB_PAM4_112, IFM_400G_DR4);
4439 			break;
4440 
4441 		case BNXT_MEDIA_ER:
4442 			BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_40GB, IFM_40G_ER4);
4443 			BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_100GB, IFM_100G_AUI4);
4444 			/* thor2 PAM56 */
4445 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_50GB_PAM4_56, IFM_50G_LR);
4446 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB_PAM4_56, IFM_100G_AUI2);
4447 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_200GB_PAM4_56, IFM_200G_LR4);
4448 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_400GB_PAM4_56, IFM_400G_FR8);
4449 			/* thor2 PAM112 */
4450 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB_PAM4_112, IFM_100G_AUI2_AC);
4451 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_200GB_PAM4_112, IFM_200G_AUI4_AC);
4452 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_400GB_PAM4_112, IFM_400G_AUI8_AC);
4453 			break;
4454 
4455 		case BNXT_MEDIA_KR:
4456 			BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_50G, IFM_50G_KR_PAM4);
4457 			BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_100G, IFM_100G_KR2_PAM4);
4458 			BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_200G, IFM_200G_KR4_PAM4);
4459 			BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_100GB, IFM_100G_KR4);
4460 			BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_50GB, IFM_50G_KR2);
4461 			BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_50GB, IFM_50G_KR4);
4462 			BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_40GB, IFM_40G_KR4);
4463 			BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_25GB, IFM_25G_KR);
4464 			BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_20GB, IFM_20G_KR2);
4465 			BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_10GB, IFM_10G_KR);
4466 			BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_1GB, IFM_1000_KX);
4467 			break;
4468 
4469 		case BNXT_MEDIA_AC:
4470 			BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_25GB, IFM_25G_ACC);
4471 			BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_10GB, IFM_10G_AOC);
4472 			BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_40GB, IFM_40G_XLAUI);
4473 			BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_40GB, IFM_40G_XLAUI_AC);
4474 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_25GB, IFM_25G_ACC);
4475 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_10GB, IFM_10G_AOC);
4476 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_40GB, IFM_40G_XLAUI);
4477 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_40GB, IFM_40G_XLAUI_AC);
4478 			break;
4479 
4480 		case BNXT_MEDIA_BASECX:
4481 			BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_1GB, IFM_1000_CX);
4482 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_1GB, IFM_1000_CX);
4483 			break;
4484 
4485 		case BNXT_MEDIA_BASET:
4486 			BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_10GB, IFM_10G_T);
4487 			BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_2_5GB, IFM_2500_T);
4488 			BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_1GB, IFM_1000_T);
4489 			BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_100MB, IFM_100_T);
4490 			BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_10MB, IFM_10_T);
4491 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_10GB, IFM_10G_T);
4492 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_1GB, IFM_1000_T);
4493 			break;
4494 
4495 		case BNXT_MEDIA_BASEKX:
4496 			BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_10GB, IFM_10G_KR);
4497 			BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_2_5GB, IFM_2500_KX);
4498 			BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_1GB, IFM_1000_KX);
4499 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_10GB, IFM_10G_KR);
4500 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_1GB, IFM_1000_KX);
4501 			break;
4502 
4503 		case BNXT_MEDIA_BASESGMII:
4504 			BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_1GB, IFM_1000_SGMII);
4505 			BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_1GB, IFM_1000_SGMII);
4506 			break;
4507 
4508 		default:
4509 			break;
4510 
4511 	}
4512 	return;
4513 
4514 }
4515 
4516 static void
bnxt_add_media_types(struct bnxt_softc * softc)4517 bnxt_add_media_types(struct bnxt_softc *softc)
4518 {
4519 	struct bnxt_link_info *link_info = &softc->link_info;
4520 	uint16_t supported_NRZ_speeds = 0, supported_pam4_speeds = 0, supported_speeds2 = 0;
4521 	uint8_t phy_type = get_phy_type(softc), media_type;
4522 
4523 	supported_NRZ_speeds = link_info->support_speeds;
4524 	supported_speeds2 = link_info->support_speeds2;
4525 	supported_pam4_speeds = link_info->support_pam4_speeds;
4526 
4527 	/* Auto is always supported */
4528 	ifmedia_add(softc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
4529 
4530 	if (softc->flags & BNXT_FLAG_NPAR)
4531 		return;
4532 
4533 	switch (phy_type) {
4534 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASECR4:
4535 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASECR4:
4536 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_L:
4537 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_S:
4538 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_N:
4539 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASECR:
4540 
4541 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_50G_BASECR:
4542 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASECR2:
4543 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASECR4:
4544 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_400G_BASECR8:
4545 
4546 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASECR:
4547 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASECR2:
4548 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_400G_BASECR4:
4549 
4550 		media_type = BNXT_MEDIA_CR;
4551 		break;
4552 
4553 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASELR4:
4554 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASELR4:
4555 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASELR:
4556 
4557 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_50G_BASELR:
4558 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASELR2:
4559 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASELR4:
4560 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_400G_BASELR8:
4561 
4562 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASELR:
4563 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASELR2:
4564 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_400G_BASELR4:
4565 
4566 		media_type = BNXT_MEDIA_LR;
4567 		break;
4568 
4569 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR10:
4570 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR4:
4571 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASESR4:
4572 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASESR:
4573 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASESR:
4574 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASESX:
4575 
4576 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_50G_BASESR:
4577 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR2:
4578 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASESR4:
4579 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_400G_BASESR8:
4580 
4581 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR:
4582 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASESR2:
4583 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_400G_BASESR4:
4584 
4585 		media_type = BNXT_MEDIA_SR;
4586 		break;
4587 
4588 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASEER4:
4589 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASEER4:
4590 
4591 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_50G_BASEER:
4592 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASEER2:
4593 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASEER4:
4594 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_400G_BASEER8:
4595 
4596 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASEER:
4597 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASEER2:
4598 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_400G_BASEER4:
4599 
4600 		media_type = BNXT_MEDIA_ER;
4601 		break;
4602 
4603 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR4:
4604 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR2:
4605 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR:
4606 		media_type = BNXT_MEDIA_KR;
4607 		break;
4608 
4609 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_ACTIVE_CABLE:
4610 		media_type = BNXT_MEDIA_AC;
4611 		return;
4612 
4613 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASECX:
4614 		media_type = BNXT_MEDIA_BASECX;
4615 		return;
4616 
4617 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASET:
4618 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET:
4619 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE:
4620 		media_type = BNXT_MEDIA_BASET;
4621 		return;
4622 
4623 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKX:
4624 		media_type = BNXT_MEDIA_BASEKX;
4625 		return;
4626 
4627 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_SGMIIEXTPHY:
4628 		media_type = BNXT_MEDIA_BASESGMII;
4629 		return;
4630 
4631 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_UNKNOWN:
4632 		/* Only Autoneg is supported for TYPE_UNKNOWN */
4633 		return;
4634 
4635         default:
4636 		/* Only Autoneg is supported for new phy type values */
4637 		device_printf(softc->dev, "phy type %d not supported by driver\n", phy_type);
4638 		return;
4639 	}
4640 
4641 	switch (link_info->sig_mode) {
4642 	case BNXT_SIG_MODE_NRZ:
4643 		if (supported_NRZ_speeds != 0)
4644 			add_media(softc, media_type, supported_NRZ_speeds, 0, 0);
4645 		else
4646 			add_media(softc, media_type, 0, 0, supported_speeds2);
4647 		break;
4648 	case BNXT_SIG_MODE_PAM4:
4649 		if (supported_pam4_speeds != 0)
4650 			add_media(softc, media_type, 0, supported_pam4_speeds, 0);
4651 		else
4652 			add_media(softc, media_type, 0, 0, supported_speeds2);
4653 		break;
4654 	case BNXT_SIG_MODE_PAM4_112:
4655 		add_media(softc, media_type, 0, 0, supported_speeds2);
4656 		break;
4657 	}
4658 
4659 	return;
4660 }
4661 
4662 static int
bnxt_map_bar(struct bnxt_softc * softc,struct bnxt_bar_info * bar,int bar_num,bool shareable)4663 bnxt_map_bar(struct bnxt_softc *softc, struct bnxt_bar_info *bar, int bar_num, bool shareable)
4664 {
4665 	uint32_t	flag;
4666 
4667 	if (bar->res != NULL) {
4668 		device_printf(softc->dev, "Bar %d already mapped\n", bar_num);
4669 		return EDOOFUS;
4670 	}
4671 
4672 	bar->rid = PCIR_BAR(bar_num);
4673 	flag = RF_ACTIVE;
4674 	if (shareable)
4675 		flag |= RF_SHAREABLE;
4676 
4677 	if ((bar->res =
4678 		bus_alloc_resource_any(softc->dev,
4679 			   SYS_RES_MEMORY,
4680 			   &bar->rid,
4681 			   flag)) == NULL) {
4682 		device_printf(softc->dev,
4683 		    "PCI BAR%d mapping failure\n", bar_num);
4684 		return (ENXIO);
4685 	}
4686 	bar->tag = rman_get_bustag(bar->res);
4687 	bar->handle = rman_get_bushandle(bar->res);
4688 	bar->size = rman_get_size(bar->res);
4689 
4690 	return 0;
4691 }
4692 
4693 static int
bnxt_pci_mapping(struct bnxt_softc * softc)4694 bnxt_pci_mapping(struct bnxt_softc *softc)
4695 {
4696 	int rc;
4697 
4698 	rc = bnxt_map_bar(softc, &softc->hwrm_bar, 0, true);
4699 	if (rc)
4700 		return rc;
4701 
4702 	rc = bnxt_map_bar(softc, &softc->doorbell_bar, 2, false);
4703 
4704 	return rc;
4705 }
4706 
4707 static void
bnxt_pci_mapping_free(struct bnxt_softc * softc)4708 bnxt_pci_mapping_free(struct bnxt_softc *softc)
4709 {
4710 	if (softc->hwrm_bar.res != NULL)
4711 		bus_release_resource(softc->dev, SYS_RES_MEMORY,
4712 		    softc->hwrm_bar.rid, softc->hwrm_bar.res);
4713 	softc->hwrm_bar.res = NULL;
4714 
4715 	if (softc->doorbell_bar.res != NULL)
4716 		bus_release_resource(softc->dev, SYS_RES_MEMORY,
4717 		    softc->doorbell_bar.rid, softc->doorbell_bar.res);
4718 	softc->doorbell_bar.res = NULL;
4719 }
4720 
4721 static int
bnxt_update_link(struct bnxt_softc * softc,bool chng_link_state)4722 bnxt_update_link(struct bnxt_softc *softc, bool chng_link_state)
4723 {
4724 	struct bnxt_link_info *link_info = &softc->link_info;
4725 	uint8_t link_up = link_info->link_up;
4726 	int rc = 0;
4727 
4728 	rc = bnxt_hwrm_port_phy_qcfg(softc);
4729 	if (rc)
4730 		goto exit;
4731 
4732 	/* TODO: need to add more logic to report VF link */
4733 	if (chng_link_state) {
4734 		if (link_info->phy_link_status ==
4735 		    HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK)
4736 			link_info->link_up = 1;
4737 		else
4738 			link_info->link_up = 0;
4739 		if (link_up != link_info->link_up)
4740 			bnxt_report_link(softc);
4741 	} else {
4742 		/* always link down if not require to update link state */
4743 		link_info->link_up = 0;
4744 	}
4745 
4746 exit:
4747 	return rc;
4748 }
4749 
4750 #define ETHTOOL_SPEED_1000		1000
4751 #define ETHTOOL_SPEED_10000		10000
4752 #define ETHTOOL_SPEED_20000		20000
4753 #define ETHTOOL_SPEED_25000		25000
4754 #define ETHTOOL_SPEED_40000		40000
4755 #define ETHTOOL_SPEED_50000		50000
4756 #define ETHTOOL_SPEED_100000		100000
4757 #define ETHTOOL_SPEED_200000		200000
4758 #define ETHTOOL_SPEED_UNKNOWN		-1
4759 
4760 static u32
bnxt_fw_to_ethtool_speed(u16 fw_link_speed)4761 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
4762 {
4763 	switch (fw_link_speed) {
4764 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
4765 		return ETHTOOL_SPEED_1000;
4766 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
4767 		return ETHTOOL_SPEED_10000;
4768 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
4769 		return ETHTOOL_SPEED_20000;
4770 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
4771 		return ETHTOOL_SPEED_25000;
4772 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
4773 		return ETHTOOL_SPEED_40000;
4774 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
4775 		return ETHTOOL_SPEED_50000;
4776 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
4777 		return ETHTOOL_SPEED_100000;
4778 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_200GB:
4779 		return ETHTOOL_SPEED_200000;
4780 	default:
4781 		return ETHTOOL_SPEED_UNKNOWN;
4782 	}
4783 }
4784 
4785 void
bnxt_report_link(struct bnxt_softc * softc)4786 bnxt_report_link(struct bnxt_softc *softc)
4787 {
4788 	struct bnxt_link_info *link_info = &softc->link_info;
4789 	const char *duplex = NULL, *flow_ctrl = NULL;
4790 	const char *signal_mode = "";
4791 
4792 	if(softc->edev)
4793 		softc->edev->espeed =
4794 		    bnxt_fw_to_ethtool_speed(link_info->link_speed);
4795 
4796 	if (link_info->link_up == link_info->last_link_up) {
4797 		if (!link_info->link_up)
4798 			return;
4799 		if ((link_info->duplex == link_info->last_duplex) &&
4800 		    (link_info->phy_type == link_info->last_phy_type) &&
4801                     (!(BNXT_IS_FLOW_CTRL_CHANGED(link_info))))
4802 			return;
4803 	}
4804 
4805 	if (link_info->link_up) {
4806 		if (link_info->duplex ==
4807 		    HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_FULL)
4808 			duplex = "full duplex";
4809 		else
4810 			duplex = "half duplex";
4811 		if (link_info->flow_ctrl.tx & link_info->flow_ctrl.rx)
4812 			flow_ctrl = "FC - receive & transmit";
4813 		else if (link_info->flow_ctrl.tx)
4814 			flow_ctrl = "FC - transmit";
4815 		else if (link_info->flow_ctrl.rx)
4816 			flow_ctrl = "FC - receive";
4817 		else
4818 			flow_ctrl = "FC - none";
4819 
4820 		if (softc->link_info.phy_qcfg_resp.option_flags &
4821 		    HWRM_PORT_PHY_QCFG_OUTPUT_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
4822 			uint8_t sig_mode = softc->link_info.active_fec_sig_mode &
4823 				      HWRM_PORT_PHY_QCFG_OUTPUT_SIGNAL_MODE_MASK;
4824 			switch (sig_mode) {
4825 			case BNXT_SIG_MODE_NRZ:
4826 				signal_mode = "(NRZ) ";
4827 				break;
4828 			case BNXT_SIG_MODE_PAM4:
4829 				signal_mode = "(PAM4 56Gbps) ";
4830 				break;
4831 			case BNXT_SIG_MODE_PAM4_112:
4832 				signal_mode = "(PAM4 112Gbps) ";
4833 				break;
4834 			default:
4835 				break;
4836 			}
4837 		link_info->sig_mode = sig_mode;
4838 		}
4839 
4840 		iflib_link_state_change(softc->ctx, LINK_STATE_UP,
4841 		    IF_Gbps(100));
4842 		device_printf(softc->dev, "Link is UP %s %s, %s - %d Mbps \n", duplex, signal_mode,
4843 		    flow_ctrl, (link_info->link_speed * 100));
4844 	} else {
4845 		iflib_link_state_change(softc->ctx, LINK_STATE_DOWN,
4846 		    bnxt_get_baudrate(&softc->link_info));
4847 		device_printf(softc->dev, "Link is Down\n");
4848 	}
4849 
4850 	link_info->last_link_up = link_info->link_up;
4851 	link_info->last_duplex = link_info->duplex;
4852 	link_info->last_phy_type = link_info->phy_type;
4853 	link_info->last_flow_ctrl.tx = link_info->flow_ctrl.tx;
4854 	link_info->last_flow_ctrl.rx = link_info->flow_ctrl.rx;
4855 	link_info->last_flow_ctrl.autoneg = link_info->flow_ctrl.autoneg;
4856 	/* update media types */
4857 	ifmedia_removeall(softc->media);
4858 	bnxt_add_media_types(softc);
4859 	ifmedia_set(softc->media, IFM_ETHER | IFM_AUTO);
4860 }
4861 
4862 static int
bnxt_handle_isr(void * arg)4863 bnxt_handle_isr(void *arg)
4864 {
4865 	struct bnxt_cp_ring *cpr = arg;
4866 	struct bnxt_softc *softc = cpr->ring.softc;
4867 
4868 	cpr->int_count++;
4869 	/* Disable further interrupts for this queue */
4870 	if (!BNXT_CHIP_P5_PLUS(softc))
4871 		softc->db_ops.bnxt_db_rx_cq(cpr, 0);
4872 
4873 	return FILTER_SCHEDULE_THREAD;
4874 }
4875 
4876 static int
bnxt_handle_def_cp(void * arg)4877 bnxt_handle_def_cp(void *arg)
4878 {
4879 	struct bnxt_softc *softc = arg;
4880 
4881 	softc->db_ops.bnxt_db_rx_cq(&softc->def_cp_ring, 0);
4882 	iflib_config_task_enqueue(softc->ctx, &softc->def_cp_task);
4883 	return FILTER_HANDLED;
4884 }
4885 
4886 static void
bnxt_clear_ids(struct bnxt_softc * softc)4887 bnxt_clear_ids(struct bnxt_softc *softc)
4888 {
4889 	int i;
4890 
4891 	softc->def_cp_ring.stats_ctx_id = HWRM_NA_SIGNATURE;
4892 	softc->def_cp_ring.ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
4893 	softc->def_nq_ring.stats_ctx_id = HWRM_NA_SIGNATURE;
4894 	softc->def_nq_ring.ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
4895 	for (i = 0; i < softc->ntxqsets; i++) {
4896 		softc->tx_cp_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
4897 		softc->tx_cp_rings[i].ring.phys_id =
4898 		    (uint16_t)HWRM_NA_SIGNATURE;
4899 		softc->tx_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
4900 
4901 		if (!softc->nq_rings)
4902 			continue;
4903 		softc->nq_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
4904 		softc->nq_rings[i].ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
4905 	}
4906 	for (i = 0; i < softc->nrxqsets; i++) {
4907 		softc->rx_cp_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
4908 		softc->rx_cp_rings[i].ring.phys_id =
4909 		    (uint16_t)HWRM_NA_SIGNATURE;
4910 		softc->rx_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
4911 		softc->ag_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
4912 		softc->grp_info[i].grp_id = (uint16_t)HWRM_NA_SIGNATURE;
4913 	}
4914 	softc->vnic_info.filter_id = -1;
4915 	softc->vnic_info.id = (uint16_t)HWRM_NA_SIGNATURE;
4916 	softc->vnic_info.rss_id = (uint16_t)HWRM_NA_SIGNATURE;
4917 	memset(softc->vnic_info.rss_grp_tbl.idi_vaddr, 0xff,
4918 	    softc->vnic_info.rss_grp_tbl.idi_size);
4919 }
4920 
4921 static void
bnxt_mark_cpr_invalid(struct bnxt_cp_ring * cpr)4922 bnxt_mark_cpr_invalid(struct bnxt_cp_ring *cpr)
4923 {
4924 	struct cmpl_base *cmp = (void *)cpr->ring.vaddr;
4925 	int i;
4926 
4927 	for (i = 0; i < cpr->ring.ring_size; i++)
4928 		cmp[i].info3_v = !cpr->v_bit;
4929 }
4930 
bnxt_event_error_report(struct bnxt_softc * softc,u32 data1,u32 data2)4931 static void bnxt_event_error_report(struct bnxt_softc *softc, u32 data1, u32 data2)
4932 {
4933 	u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1);
4934 
4935 	switch (err_type) {
4936 	case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL:
4937 		device_printf(softc->dev,
4938 			      "1PPS: Received invalid signal on pin%u from the external source. Please fix the signal and reconfigure the pin\n",
4939 			      BNXT_EVENT_INVALID_SIGNAL_DATA(data2));
4940 		break;
4941 	case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM:
4942 		device_printf(softc->dev,
4943 			      "Pause Storm detected!\n");
4944 		break;
4945 	case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD:
4946 		device_printf(softc->dev,
4947 			      "One or more MMIO doorbells dropped by the device! epoch: 0x%x\n",
4948 			      BNXT_EVENT_DBR_EPOCH(data1));
4949 		break;
4950 	case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM: {
4951 		const char *nvm_err_str;
4952 
4953 		if (EVENT_DATA1_NVM_ERR_TYPE_WRITE(data1))
4954 			nvm_err_str = "nvm write error";
4955 		else if (EVENT_DATA1_NVM_ERR_TYPE_ERASE(data1))
4956 			nvm_err_str = "nvm erase error";
4957 		else
4958 			nvm_err_str = "unrecognized nvm error";
4959 
4960 		device_printf(softc->dev,
4961 			      "%s reported at address 0x%x\n", nvm_err_str,
4962 			      (u32)EVENT_DATA2_NVM_ERR_ADDR(data2));
4963 		break;
4964 	}
4965 	case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD: {
4966 		char *threshold_type;
4967 		char *dir_str;
4968 
4969 		switch (EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1)) {
4970 		case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN:
4971 			threshold_type = "warning";
4972 			break;
4973 		case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL:
4974 			threshold_type = "critical";
4975 			break;
4976 		case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL:
4977 			threshold_type = "fatal";
4978 			break;
4979 		case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN:
4980 			threshold_type = "shutdown";
4981 			break;
4982 		default:
4983 			device_printf(softc->dev,
4984 				      "Unknown Thermal threshold type event\n");
4985 			return;
4986 		}
4987 		if (EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1))
4988 			dir_str = "above";
4989 		else
4990 			dir_str = "below";
4991 		device_printf(softc->dev,
4992 			      "Chip temperature has gone %s the %s thermal threshold!\n",
4993 			      dir_str, threshold_type);
4994 		device_printf(softc->dev,
4995 			      "Temperature (In Celsius), Current: %u, threshold: %u\n",
4996 			      BNXT_EVENT_THERMAL_CURRENT_TEMP(data2),
4997 			      BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2));
4998 		break;
4999 	}
5000 	case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED:
5001 		device_printf(softc->dev,
5002 			      "Speed change is not supported with dual rate transceivers on this board\n");
5003 		break;
5004 
5005 	default:
5006 	device_printf(softc->dev,
5007 		      "FW reported unknown error type: %u, data1: 0x%x data2: 0x%x\n",
5008 		      err_type, data1, data2);
5009 		break;
5010 	}
5011 }
5012 
5013 static void
bnxt_handle_async_event(struct bnxt_softc * softc,struct cmpl_base * cmpl)5014 bnxt_handle_async_event(struct bnxt_softc *softc, struct cmpl_base *cmpl)
5015 {
5016 	struct hwrm_async_event_cmpl *ae = (void *)cmpl;
5017 	uint16_t async_id = le16toh(ae->event_id);
5018 	struct ifmediareq ifmr;
5019 	char *type_str;
5020 	char *status_desc;
5021 	struct bnxt_fw_health *fw_health;
5022 	u32 data1 = le32toh(ae->event_data1);
5023 	u32 data2 = le32toh(ae->event_data2);
5024 
5025 	switch (async_id) {
5026 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
5027 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
5028 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE:
5029 		if (BNXT_CHIP_P5_PLUS(softc))
5030 			bit_set(softc->state_bv, BNXT_STATE_LINK_CHANGE);
5031 		else
5032 			bnxt_media_status(softc->ctx, &ifmr);
5033 		break;
5034 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: {
5035 		bnxt_event_error_report(softc, data1, data2);
5036 		goto async_event_process_exit;
5037 	}
5038 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_THRESHOLD:
5039 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_NQ_UPDATE:
5040 		break;
5041 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
5042 		type_str = "Solicited";
5043 
5044 		if (!softc->fw_health)
5045 			goto async_event_process_exit;
5046 
5047 		softc->fw_reset_timestamp = jiffies;
5048 		softc->fw_reset_min_dsecs = ae->timestamp_lo;
5049 		if (!softc->fw_reset_min_dsecs)
5050 			softc->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
5051 		softc->fw_reset_max_dsecs = le16toh(ae->timestamp_hi);
5052 		if (!softc->fw_reset_max_dsecs)
5053 			softc->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
5054 		if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) {
5055 			set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &softc->state);
5056 		} else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
5057 			type_str = "Fatal";
5058 			softc->fw_health->fatalities++;
5059 			set_bit(BNXT_STATE_FW_FATAL_COND, &softc->state);
5060 		} else if (data2 && BNXT_FW_STATUS_HEALTHY !=
5061 			   EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) {
5062 			type_str = "Non-fatal";
5063 			softc->fw_health->survivals++;
5064 			set_bit(BNXT_STATE_FW_NON_FATAL_COND, &softc->state);
5065 		}
5066 		device_printf(softc->dev,
5067 			   "%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
5068 			   type_str, data1, data2,
5069 			   softc->fw_reset_min_dsecs * 100,
5070 			   softc->fw_reset_max_dsecs * 100);
5071 		set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &softc->sp_event);
5072 		break;
5073 	}
5074 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
5075 		fw_health = softc->fw_health;
5076 		status_desc = "healthy";
5077 		u32 status;
5078 
5079 		if (!fw_health)
5080 			goto async_event_process_exit;
5081 
5082 		if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
5083 			fw_health->enabled = false;
5084 			device_printf(softc->dev, "Driver recovery watchdog is disabled\n");
5085 			break;
5086 		}
5087 		fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
5088 		fw_health->tmr_multiplier =
5089 			DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
5090 				     HZ * 10);
5091 		fw_health->tmr_counter = fw_health->tmr_multiplier;
5092 		if (!fw_health->enabled)
5093 			fw_health->last_fw_heartbeat =
5094 				bnxt_fw_health_readl(softc, BNXT_FW_HEARTBEAT_REG);
5095 		fw_health->last_fw_reset_cnt =
5096 			bnxt_fw_health_readl(softc, BNXT_FW_RESET_CNT_REG);
5097 		status = bnxt_fw_health_readl(softc, BNXT_FW_HEALTH_REG);
5098 		if (status != BNXT_FW_STATUS_HEALTHY)
5099 			status_desc = "unhealthy";
5100 		device_printf(softc->dev,
5101 			   "Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n",
5102 			   fw_health->primary ? "primary" : "backup", status,
5103 			   status_desc, fw_health->last_fw_reset_cnt);
5104 		if (!fw_health->enabled) {
5105 			/* Make sure tmr_counter is set and seen by
5106 			 * bnxt_health_check() before setting enabled
5107 			 */
5108 			smp_mb();
5109 			fw_health->enabled = true;
5110 		}
5111 		goto async_event_process_exit;
5112 	}
5113 
5114 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE:
5115 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE:
5116 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED:
5117 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED:
5118 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD:
5119 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD:
5120 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
5121 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD:
5122 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR:
5123 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE:
5124 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE:
5125 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
5126 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR:
5127 		device_printf(softc->dev,
5128 		    "Unhandled async completion type %u\n", async_id);
5129 		break;
5130 	default:
5131 		dev_dbg(softc->dev, "Unknown Async event completion type %u\n",
5132 			async_id);
5133 		break;
5134 	}
5135 	bnxt_queue_sp_work(softc);
5136 
5137 async_event_process_exit:
5138 	bnxt_ulp_async_events(softc, ae);
5139 }
5140 
5141 static void
bnxt_def_cp_task(void * context,int pending)5142 bnxt_def_cp_task(void *context, int pending)
5143 {
5144 	if_ctx_t ctx = context;
5145 	struct bnxt_softc *softc = iflib_get_softc(ctx);
5146 	struct bnxt_cp_ring *cpr = &softc->def_cp_ring;
5147 
5148 	/* Handle completions on the default completion ring */
5149 	struct cmpl_base *cmpl;
5150 	uint32_t cons = cpr->cons;
5151 	bool v_bit = cpr->v_bit;
5152 	bool last_v_bit;
5153 	uint32_t last_cons;
5154 	uint16_t type;
5155 
5156 	for (;;) {
5157 		last_cons = cons;
5158 		last_v_bit = v_bit;
5159 		NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
5160 		cmpl = &((struct cmpl_base *)cpr->ring.vaddr)[cons];
5161 
5162 		if (!CMP_VALID(cmpl, v_bit))
5163 			break;
5164 
5165 		type = le16toh(cmpl->type) & CMPL_BASE_TYPE_MASK;
5166 		switch (type) {
5167 		case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
5168 			bnxt_handle_async_event(softc, cmpl);
5169 			break;
5170 		case CMPL_BASE_TYPE_TX_L2:
5171 		case CMPL_BASE_TYPE_RX_L2:
5172 		case CMPL_BASE_TYPE_RX_L2_V3:
5173 		case CMPL_BASE_TYPE_RX_AGG:
5174 		case CMPL_BASE_TYPE_RX_TPA_START:
5175 		case CMPL_BASE_TYPE_RX_TPA_START_V3:
5176 		case CMPL_BASE_TYPE_RX_TPA_END:
5177 		case CMPL_BASE_TYPE_STAT_EJECT:
5178 		case CMPL_BASE_TYPE_HWRM_DONE:
5179 		case CMPL_BASE_TYPE_HWRM_FWD_REQ:
5180 		case CMPL_BASE_TYPE_HWRM_FWD_RESP:
5181 		case CMPL_BASE_TYPE_CQ_NOTIFICATION:
5182 		case CMPL_BASE_TYPE_SRQ_EVENT:
5183 		case CMPL_BASE_TYPE_DBQ_EVENT:
5184 		case CMPL_BASE_TYPE_QP_EVENT:
5185 		case CMPL_BASE_TYPE_FUNC_EVENT:
5186 			dev_dbg(softc->dev, "Unhandled Async event completion type %u\n",
5187 				type);
5188 			break;
5189 		default:
5190 			dev_dbg(softc->dev, "Unknown Async event completion type %u\n",
5191 				type);
5192 			break;
5193 		}
5194 	}
5195 
5196 	cpr->cons = last_cons;
5197 	cpr->v_bit = last_v_bit;
5198 	softc->db_ops.bnxt_db_rx_cq(cpr, 1);
5199 }
5200 
5201 uint8_t
get_phy_type(struct bnxt_softc * softc)5202 get_phy_type(struct bnxt_softc *softc)
5203 {
5204 	struct bnxt_link_info *link_info = &softc->link_info;
5205 	uint8_t phy_type = link_info->phy_type;
5206 	uint16_t supported;
5207 
5208 	if (phy_type != HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_UNKNOWN)
5209 		return phy_type;
5210 
5211 	/* Deduce the phy type from the media type and supported speeds */
5212 	supported = link_info->support_speeds;
5213 
5214 	if (link_info->media_type ==
5215 	    HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP)
5216 		return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET;
5217 	if (link_info->media_type ==
5218 	    HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_DAC) {
5219 		if (supported & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB)
5220 			return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKX;
5221 		if (supported & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB)
5222 			return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR;
5223 		return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASECR;
5224 	}
5225 	if (link_info->media_type ==
5226 	    HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_FIBRE)
5227 		return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASESR;
5228 
5229 	return phy_type;
5230 }
5231 
5232 bool
bnxt_check_hwrm_version(struct bnxt_softc * softc)5233 bnxt_check_hwrm_version(struct bnxt_softc *softc)
5234 {
5235 	char buf[16];
5236 
5237 	sprintf(buf, "%hhu.%hhu.%hhu", softc->ver_info->hwrm_min_major,
5238 	    softc->ver_info->hwrm_min_minor, softc->ver_info->hwrm_min_update);
5239 	if (softc->ver_info->hwrm_min_major > softc->ver_info->hwrm_if_major) {
5240 		device_printf(softc->dev,
5241 		    "WARNING: HWRM version %s is too old (older than %s)\n",
5242 		    softc->ver_info->hwrm_if_ver, buf);
5243 		return false;
5244 	}
5245 	else if(softc->ver_info->hwrm_min_major ==
5246 	    softc->ver_info->hwrm_if_major) {
5247 		if (softc->ver_info->hwrm_min_minor >
5248 		    softc->ver_info->hwrm_if_minor) {
5249 			device_printf(softc->dev,
5250 			    "WARNING: HWRM version %s is too old (older than %s)\n",
5251 			    softc->ver_info->hwrm_if_ver, buf);
5252 			return false;
5253 		}
5254 		else if (softc->ver_info->hwrm_min_minor ==
5255 		    softc->ver_info->hwrm_if_minor) {
5256 			if (softc->ver_info->hwrm_min_update >
5257 			    softc->ver_info->hwrm_if_update) {
5258 				device_printf(softc->dev,
5259 				    "WARNING: HWRM version %s is too old (older than %s)\n",
5260 				    softc->ver_info->hwrm_if_ver, buf);
5261 				return false;
5262 			}
5263 		}
5264 	}
5265 	return true;
5266 }
5267 
5268 static uint64_t
bnxt_get_baudrate(struct bnxt_link_info * link)5269 bnxt_get_baudrate(struct bnxt_link_info *link)
5270 {
5271 	switch (link->link_speed) {
5272 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
5273 		return IF_Mbps(100);
5274 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
5275 		return IF_Gbps(1);
5276 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
5277 		return IF_Gbps(2);
5278 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
5279 		return IF_Mbps(2500);
5280 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
5281 		return IF_Gbps(10);
5282 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
5283 		return IF_Gbps(20);
5284 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
5285 		return IF_Gbps(25);
5286 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
5287 		return IF_Gbps(40);
5288 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
5289 		return IF_Gbps(50);
5290 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
5291 		return IF_Gbps(100);
5292 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10MB:
5293 		return IF_Mbps(10);
5294 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_200GB:
5295 		return IF_Gbps(200);
5296 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_400GB:
5297 		return IF_Gbps(400);
5298 	}
5299 	return IF_Gbps(100);
5300 }
5301 
5302 static void
bnxt_get_wol_settings(struct bnxt_softc * softc)5303 bnxt_get_wol_settings(struct bnxt_softc *softc)
5304 {
5305 	uint16_t wol_handle = 0;
5306 
5307 	if (!bnxt_wol_supported(softc))
5308 		return;
5309 
5310 	do {
5311 		wol_handle = bnxt_hwrm_get_wol_fltrs(softc, wol_handle);
5312 	} while (wol_handle && wol_handle != BNXT_NO_MORE_WOL_FILTERS);
5313 }
5314