1 /*-
2 * Broadcom NetXtreme-C/E network driver.
3 *
4 * Copyright (c) 2016 Broadcom, All Rights Reserved.
5 * The term Broadcom refers to Broadcom Limited and/or its subsidiaries
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26 * THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include <sys/param.h>
30 #include <sys/socket.h>
31 #include <sys/kernel.h>
32 #include <sys/bus.h>
33 #include <sys/module.h>
34 #include <sys/rman.h>
35 #include <sys/endian.h>
36 #include <sys/sockio.h>
37 #include <sys/priv.h>
38
39 #include <machine/bus.h>
40 #include <machine/resource.h>
41
42 #include <dev/pci/pcireg.h>
43
44 #include <net/if.h>
45 #include <net/if_dl.h>
46 #include <net/if_media.h>
47 #include <net/if_var.h>
48 #include <net/ethernet.h>
49 #include <net/iflib.h>
50
51 #define WANT_NATIVE_PCI_GET_SLOT
52 #include <linux/pci.h>
53 #include <linux/kmod.h>
54 #include <linux/module.h>
55 #include <linux/delay.h>
56 #include <linux/idr.h>
57 #include <linux/netdevice.h>
58 #include <linux/etherdevice.h>
59 #include <linux/rcupdate.h>
60 #include "opt_inet.h"
61 #include "opt_inet6.h"
62 #include "opt_rss.h"
63
64 #include "ifdi_if.h"
65
66 #include "bnxt.h"
67 #include "bnxt_hwrm.h"
68 #include "bnxt_ioctl.h"
69 #include "bnxt_sysctl.h"
70 #include "hsi_struct_def.h"
71 #include "bnxt_mgmt.h"
72 #include "bnxt_ulp.h"
73 #include "bnxt_auxbus_compat.h"
74
75 /*
76 * PCI Device ID Table
77 */
78
79 static const pci_vendor_info_t bnxt_vendor_info_array[] =
80 {
81 PVID(BROADCOM_VENDOR_ID, BCM57301,
82 "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet Controller"),
83 PVID(BROADCOM_VENDOR_ID, BCM57302,
84 "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet Controller"),
85 PVID(BROADCOM_VENDOR_ID, BCM57304,
86 "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet Controller"),
87 PVID(BROADCOM_VENDOR_ID, BCM57311,
88 "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet"),
89 PVID(BROADCOM_VENDOR_ID, BCM57312,
90 "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet"),
91 PVID(BROADCOM_VENDOR_ID, BCM57314,
92 "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet"),
93 PVID(BROADCOM_VENDOR_ID, BCM57402,
94 "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet Controller"),
95 PVID(BROADCOM_VENDOR_ID, BCM57402_NPAR,
96 "Broadcom BCM57402 NetXtreme-E Partition"),
97 PVID(BROADCOM_VENDOR_ID, BCM57404,
98 "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet Controller"),
99 PVID(BROADCOM_VENDOR_ID, BCM57404_NPAR,
100 "Broadcom BCM57404 NetXtreme-E Partition"),
101 PVID(BROADCOM_VENDOR_ID, BCM57406,
102 "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet Controller"),
103 PVID(BROADCOM_VENDOR_ID, BCM57406_NPAR,
104 "Broadcom BCM57406 NetXtreme-E Partition"),
105 PVID(BROADCOM_VENDOR_ID, BCM57407,
106 "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet Controller"),
107 PVID(BROADCOM_VENDOR_ID, BCM57407_NPAR,
108 "Broadcom BCM57407 NetXtreme-E Ethernet Partition"),
109 PVID(BROADCOM_VENDOR_ID, BCM57407_SFP,
110 "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet Controller"),
111 PVID(BROADCOM_VENDOR_ID, BCM57412,
112 "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet"),
113 PVID(BROADCOM_VENDOR_ID, BCM57412_NPAR1,
114 "Broadcom BCM57412 NetXtreme-E Ethernet Partition"),
115 PVID(BROADCOM_VENDOR_ID, BCM57412_NPAR2,
116 "Broadcom BCM57412 NetXtreme-E Ethernet Partition"),
117 PVID(BROADCOM_VENDOR_ID, BCM57414,
118 "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet"),
119 PVID(BROADCOM_VENDOR_ID, BCM57414_NPAR1,
120 "Broadcom BCM57414 NetXtreme-E Ethernet Partition"),
121 PVID(BROADCOM_VENDOR_ID, BCM57414_NPAR2,
122 "Broadcom BCM57414 NetXtreme-E Ethernet Partition"),
123 PVID(BROADCOM_VENDOR_ID, BCM57416,
124 "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet"),
125 PVID(BROADCOM_VENDOR_ID, BCM57416_NPAR1,
126 "Broadcom BCM57416 NetXtreme-E Ethernet Partition"),
127 PVID(BROADCOM_VENDOR_ID, BCM57416_NPAR2,
128 "Broadcom BCM57416 NetXtreme-E Ethernet Partition"),
129 PVID(BROADCOM_VENDOR_ID, BCM57416_SFP,
130 "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet"),
131 PVID(BROADCOM_VENDOR_ID, BCM57417,
132 "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet"),
133 PVID(BROADCOM_VENDOR_ID, BCM57417_NPAR1,
134 "Broadcom BCM57417 NetXtreme-E Ethernet Partition"),
135 PVID(BROADCOM_VENDOR_ID, BCM57417_NPAR2,
136 "Broadcom BCM57417 NetXtreme-E Ethernet Partition"),
137 PVID(BROADCOM_VENDOR_ID, BCM57417_SFP,
138 "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet"),
139 PVID(BROADCOM_VENDOR_ID, BCM57454,
140 "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet"),
141 PVID(BROADCOM_VENDOR_ID, BCM58700,
142 "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet"),
143 PVID(BROADCOM_VENDOR_ID, BCM57508,
144 "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet"),
145 PVID(BROADCOM_VENDOR_ID, BCM57504,
146 "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet"),
147 PVID(BROADCOM_VENDOR_ID, BCM57504_NPAR,
148 "Broadcom BCM57504 NetXtreme-E Ethernet Partition"),
149 PVID(BROADCOM_VENDOR_ID, BCM57502,
150 "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet"),
151 PVID(BROADCOM_VENDOR_ID, BCM57608,
152 "Broadcom BCM57608 NetXtreme-E 25Gb/50Gb/100Gb/200Gb/400Gb Ethernet"),
153 PVID(BROADCOM_VENDOR_ID, BCM57604,
154 "Broadcom BCM57604 NetXtreme-E 25Gb/50Gb/100Gb/200Gb Ethernet"),
155 PVID(BROADCOM_VENDOR_ID, BCM57602,
156 "Broadcom BCM57602 NetXtreme-E 25Gb/50Gb Ethernet"),
157 PVID(BROADCOM_VENDOR_ID, BCM57601,
158 "Broadcom BCM57601 NetXtreme-E 25Gb/50Gb Ethernet"),
159 PVID(BROADCOM_VENDOR_ID, NETXTREME_C_VF1,
160 "Broadcom NetXtreme-C Ethernet Virtual Function"),
161 PVID(BROADCOM_VENDOR_ID, NETXTREME_C_VF2,
162 "Broadcom NetXtreme-C Ethernet Virtual Function"),
163 PVID(BROADCOM_VENDOR_ID, NETXTREME_C_VF3,
164 "Broadcom NetXtreme-C Ethernet Virtual Function"),
165 PVID(BROADCOM_VENDOR_ID, NETXTREME_E_VF1,
166 "Broadcom NetXtreme-E Ethernet Virtual Function"),
167 PVID(BROADCOM_VENDOR_ID, NETXTREME_E_VF2,
168 "Broadcom NetXtreme-E Ethernet Virtual Function"),
169 PVID(BROADCOM_VENDOR_ID, NETXTREME_E_VF3,
170 "Broadcom NetXtreme-E Ethernet Virtual Function"),
171 PVID(BROADCOM_VENDOR_ID, NETXTREME_E_VF4,
172 "Broadcom NetXtreme-E Ethernet Virtual Function"),
173 PVID(BROADCOM_VENDOR_ID, NETXTREME_E_VF5,
174 "Broadcom NetXtreme-E Ethernet Virtual Function"),
175 PVID(BROADCOM_VENDOR_ID, NETXTREME_E_P5_VF1,
176 "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function"),
177 PVID(BROADCOM_VENDOR_ID, NETXTREME_E_P5_VF2,
178 "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function"),
179 PVID(BROADCOM_VENDOR_ID, NETXTREME_E_P5_VF_HV1,
180 "Broadcom NetXtreme-C Virtual Function for Hyper-V"),
181 PVID(BROADCOM_VENDOR_ID, NETXTREME_E_P5_VF_HV2,
182 "Broadcom NetXtreme-C Virtual Function for Hyper-V"),
183 PVID(BROADCOM_VENDOR_ID, E_P7_VF,
184 "Broadcom BCM5760X Virtual Function"),
185 /* required last entry */
186
187 PVID_END
188 };
189
190 /*
191 * Function prototypes
192 */
193
194 SLIST_HEAD(softc_list, bnxt_softc_list) pf_list;
195 int bnxt_num_pfs = 0;
196
197 void
198 process_nq(struct bnxt_softc *softc, uint16_t nqid);
199 static void *bnxt_register(device_t dev);
200
201 /* Soft queue setup and teardown */
202 static int bnxt_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
203 uint64_t *paddrs, int ntxqs, int ntxqsets);
204 static int bnxt_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
205 uint64_t *paddrs, int nrxqs, int nrxqsets);
206 static void bnxt_queues_free(if_ctx_t ctx);
207
208 /* Device setup and teardown */
209 static int bnxt_attach_pre(if_ctx_t ctx);
210 static int bnxt_attach_post(if_ctx_t ctx);
211 static int bnxt_detach(if_ctx_t ctx);
212
213 /* Device configuration */
214 static void bnxt_init(if_ctx_t ctx);
215 static void bnxt_stop(if_ctx_t ctx);
216 static void bnxt_multi_set(if_ctx_t ctx);
217 static int bnxt_mtu_set(if_ctx_t ctx, uint32_t mtu);
218 static void bnxt_media_status(if_ctx_t ctx, struct ifmediareq * ifmr);
219 static int bnxt_media_change(if_ctx_t ctx);
220 static int bnxt_promisc_set(if_ctx_t ctx, int flags);
221 static uint64_t bnxt_get_counter(if_ctx_t, ift_counter);
222 static void bnxt_update_admin_status(if_ctx_t ctx);
223 static void bnxt_if_timer(if_ctx_t ctx, uint16_t qid);
224
225 /* Interrupt enable / disable */
226 static void bnxt_intr_enable(if_ctx_t ctx);
227 static int bnxt_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
228 static int bnxt_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
229 static void bnxt_disable_intr(if_ctx_t ctx);
230 static int bnxt_msix_intr_assign(if_ctx_t ctx, int msix);
231
232 /* vlan support */
233 static void bnxt_vlan_register(if_ctx_t ctx, uint16_t vtag);
234 static void bnxt_vlan_unregister(if_ctx_t ctx, uint16_t vtag);
235
236 /* ioctl */
237 static int bnxt_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data);
238
239 static int bnxt_shutdown(if_ctx_t ctx);
240 static int bnxt_suspend(if_ctx_t ctx);
241 static int bnxt_resume(if_ctx_t ctx);
242
243 /* Internal support functions */
244 static int bnxt_probe_phy(struct bnxt_softc *softc);
245 static void bnxt_add_media_types(struct bnxt_softc *softc);
246 static int bnxt_pci_mapping(struct bnxt_softc *softc);
247 static void bnxt_pci_mapping_free(struct bnxt_softc *softc);
248 static int bnxt_update_link(struct bnxt_softc *softc, bool chng_link_state);
249 static int bnxt_handle_def_cp(void *arg);
250 static int bnxt_handle_isr(void *arg);
251 static void bnxt_clear_ids(struct bnxt_softc *softc);
252 static void inline bnxt_do_enable_intr(struct bnxt_cp_ring *cpr);
253 static void inline bnxt_do_disable_intr(struct bnxt_cp_ring *cpr);
254 static void bnxt_mark_cpr_invalid(struct bnxt_cp_ring *cpr);
255 static void bnxt_def_cp_task(void *context, int pending);
256 static void bnxt_handle_async_event(struct bnxt_softc *softc,
257 struct cmpl_base *cmpl);
258 static uint64_t bnxt_get_baudrate(struct bnxt_link_info *link);
259 static void bnxt_get_wol_settings(struct bnxt_softc *softc);
260 static int bnxt_wol_config(if_ctx_t ctx);
261 static bool bnxt_if_needs_restart(if_ctx_t, enum iflib_restart_event);
262 static int bnxt_i2c_req(if_ctx_t ctx, struct ifi2creq *i2c);
263 static void bnxt_get_port_module_status(struct bnxt_softc *softc);
264 static void bnxt_rdma_aux_device_init(struct bnxt_softc *softc);
265 static void bnxt_rdma_aux_device_uninit(struct bnxt_softc *softc);
266 static void bnxt_queue_fw_reset_work(struct bnxt_softc *bp, unsigned long delay);
267 void bnxt_queue_sp_work(struct bnxt_softc *bp);
268
269 void bnxt_fw_reset(struct bnxt_softc *bp);
270 /*
271 * Device Interface Declaration
272 */
273
274 static device_method_t bnxt_methods[] = {
275 /* Device interface */
276 DEVMETHOD(device_register, bnxt_register),
277 DEVMETHOD(device_probe, iflib_device_probe),
278 DEVMETHOD(device_attach, iflib_device_attach),
279 DEVMETHOD(device_detach, iflib_device_detach),
280 DEVMETHOD(device_shutdown, iflib_device_shutdown),
281 DEVMETHOD(device_suspend, iflib_device_suspend),
282 DEVMETHOD(device_resume, iflib_device_resume),
283 #ifdef PCI_IOV
284 DEVMETHOD(pci_iov_init, iflib_device_iov_init),
285 DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
286 DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
287 #endif
288 DEVMETHOD_END
289 };
290
291 static driver_t bnxt_driver = {
292 "bnxt", bnxt_methods, sizeof(struct bnxt_softc),
293 };
294
295 DRIVER_MODULE(bnxt, pci, bnxt_driver, 0, 0);
296
297 MODULE_LICENSE("Dual BSD/GPL");
298 MODULE_DEPEND(if_bnxt, pci, 1, 1, 1);
299 MODULE_DEPEND(if_bnxt, ether, 1, 1, 1);
300 MODULE_DEPEND(if_bnxt, iflib, 1, 1, 1);
301 MODULE_DEPEND(if_bnxt, linuxkpi, 1, 1, 1);
302 MODULE_VERSION(if_bnxt, 1);
303
304 IFLIB_PNP_INFO(pci, bnxt, bnxt_vendor_info_array);
305
306 void writel_fbsd(struct bnxt_softc *bp, u32, u8, u32);
307 u32 readl_fbsd(struct bnxt_softc *bp, u32, u8);
308
readl_fbsd(struct bnxt_softc * bp,u32 reg_off,u8 bar_idx)309 u32 readl_fbsd(struct bnxt_softc *bp, u32 reg_off, u8 bar_idx)
310 {
311
312 if (!bar_idx)
313 return bus_space_read_4(bp->doorbell_bar.tag, bp->doorbell_bar.handle, reg_off);
314 else
315 return bus_space_read_4(bp->hwrm_bar.tag, bp->hwrm_bar.handle, reg_off);
316 }
317
writel_fbsd(struct bnxt_softc * bp,u32 reg_off,u8 bar_idx,u32 val)318 void writel_fbsd(struct bnxt_softc *bp, u32 reg_off, u8 bar_idx, u32 val)
319 {
320
321 if (!bar_idx)
322 bus_space_write_4(bp->doorbell_bar.tag, bp->doorbell_bar.handle, reg_off, htole32(val));
323 else
324 bus_space_write_4(bp->hwrm_bar.tag, bp->hwrm_bar.handle, reg_off, htole32(val));
325 }
326
327 static DEFINE_IDA(bnxt_aux_dev_ids);
328
329 static device_method_t bnxt_iflib_methods[] = {
330 DEVMETHOD(ifdi_tx_queues_alloc, bnxt_tx_queues_alloc),
331 DEVMETHOD(ifdi_rx_queues_alloc, bnxt_rx_queues_alloc),
332 DEVMETHOD(ifdi_queues_free, bnxt_queues_free),
333
334 DEVMETHOD(ifdi_attach_pre, bnxt_attach_pre),
335 DEVMETHOD(ifdi_attach_post, bnxt_attach_post),
336 DEVMETHOD(ifdi_detach, bnxt_detach),
337
338 DEVMETHOD(ifdi_init, bnxt_init),
339 DEVMETHOD(ifdi_stop, bnxt_stop),
340 DEVMETHOD(ifdi_multi_set, bnxt_multi_set),
341 DEVMETHOD(ifdi_mtu_set, bnxt_mtu_set),
342 DEVMETHOD(ifdi_media_status, bnxt_media_status),
343 DEVMETHOD(ifdi_media_change, bnxt_media_change),
344 DEVMETHOD(ifdi_promisc_set, bnxt_promisc_set),
345 DEVMETHOD(ifdi_get_counter, bnxt_get_counter),
346 DEVMETHOD(ifdi_update_admin_status, bnxt_update_admin_status),
347 DEVMETHOD(ifdi_timer, bnxt_if_timer),
348
349 DEVMETHOD(ifdi_intr_enable, bnxt_intr_enable),
350 DEVMETHOD(ifdi_tx_queue_intr_enable, bnxt_tx_queue_intr_enable),
351 DEVMETHOD(ifdi_rx_queue_intr_enable, bnxt_rx_queue_intr_enable),
352 DEVMETHOD(ifdi_intr_disable, bnxt_disable_intr),
353 DEVMETHOD(ifdi_msix_intr_assign, bnxt_msix_intr_assign),
354
355 DEVMETHOD(ifdi_vlan_register, bnxt_vlan_register),
356 DEVMETHOD(ifdi_vlan_unregister, bnxt_vlan_unregister),
357
358 DEVMETHOD(ifdi_priv_ioctl, bnxt_priv_ioctl),
359
360 DEVMETHOD(ifdi_suspend, bnxt_suspend),
361 DEVMETHOD(ifdi_shutdown, bnxt_shutdown),
362 DEVMETHOD(ifdi_resume, bnxt_resume),
363 DEVMETHOD(ifdi_i2c_req, bnxt_i2c_req),
364
365 DEVMETHOD(ifdi_needs_restart, bnxt_if_needs_restart),
366 #ifdef PCI_IOV
367 DEVMETHOD(ifdi_iov_init, bnxt_iov_init),
368 DEVMETHOD(ifdi_iov_uninit, bnxt_iov_uninit),
369 DEVMETHOD(ifdi_iov_vf_add, bnxt_iov_vf_add),
370 #endif
371 DEVMETHOD_END
372 };
373
374 static driver_t bnxt_iflib_driver = {
375 "bnxt", bnxt_iflib_methods, sizeof(struct bnxt_softc)
376 };
377
378 /*
379 * iflib shared context
380 */
381 #define BNXT_DRIVER_VERSION "230.0.133.0"
382 const char bnxt_driver_version[] = BNXT_DRIVER_VERSION;
383
384 static char drv_version_msg[] =
385 "Broadcom NetXtreme-C/E Ethernet Driver if_bnxt" \
386 " v" BNXT_DRIVER_VERSION;
387
388 extern struct if_txrx bnxt_txrx;
389
390 static struct if_shared_ctx bnxt_sctx_template = {
391 .isc_magic = IFLIB_MAGIC,
392 .isc_driver = &bnxt_iflib_driver,
393 .isc_nfl = 2,
394 .isc_flags = IFLIB_HAS_RXCQ | IFLIB_HAS_TXCQ | IFLIB_NEED_ETHER_PAD,
395 .isc_q_align = PAGE_SIZE,
396 .isc_tx_maxsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
397 .isc_tx_maxsegsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
398 .isc_tso_maxsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
399 .isc_tso_maxsegsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
400 .isc_rx_maxsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
401 .isc_rx_maxsegsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
402 .isc_rx_nsegments = 1,
403 .isc_ntxqs = 3,
404 .isc_nrxqs = 3,
405 .isc_nrxd_min = {16, 16, 16},
406 .isc_nrxd_default = {PAGE_SIZE / sizeof(struct cmpl_base) * 8,
407 PAGE_SIZE / sizeof(struct rx_prod_pkt_bd),
408 PAGE_SIZE / sizeof(struct rx_prod_pkt_bd)},
409 .isc_nrxd_max = {BNXT_MAX_RXD, BNXT_MAX_RXD, BNXT_MAX_RXD},
410 .isc_ntxd_min = {16, 16, 16},
411 .isc_ntxd_default = {PAGE_SIZE / sizeof(struct cmpl_base) * 2,
412 PAGE_SIZE / sizeof(struct tx_bd_short),
413 PAGE_SIZE / sizeof(struct cmpl_base) * 16},
414 .isc_ntxd_max = {BNXT_MAX_TXD, BNXT_MAX_TXD, BNXT_MAX_TXD},
415 .isc_vendor_info = bnxt_vendor_info_array,
416 .isc_driver_version = bnxt_driver_version,
417 };
418
419 static struct if_shared_ctx bnxt_sctx_template_p7 = {
420 .isc_magic = IFLIB_MAGIC,
421 .isc_driver = &bnxt_iflib_driver,
422 .isc_nfl = 2,
423 .isc_flags = IFLIB_HAS_RXCQ | IFLIB_HAS_TXCQ | IFLIB_NEED_ETHER_PAD,
424 .isc_q_align = PAGE_SIZE,
425 .isc_tx_maxsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
426 .isc_tx_maxsegsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
427 .isc_tso_maxsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
428 .isc_tso_maxsegsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
429 .isc_rx_maxsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
430 .isc_rx_maxsegsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
431 .isc_rx_nsegments = 1,
432 .isc_ntxqs = 3,
433 .isc_nrxqs = 3,
434 .isc_nrxd_min = {16, 16, 16},
435 .isc_nrxd_default = {PAGE_SIZE / sizeof(struct cmpl_base) * 8,
436 PAGE_SIZE / sizeof(struct rx_prod_pkt_bd),
437 PAGE_SIZE / sizeof(struct rx_prod_pkt_bd)},
438 .isc_nrxd_max = {BNXT_MAX_RXD, BNXT_MAX_RXD, BNXT_MAX_RXD},
439 .isc_ntxd_min = {128, 128, 128},
440 .isc_ntxd_default = {PAGE_SIZE / sizeof(struct cmpl_base) * 2,
441 PAGE_SIZE / sizeof(struct tx_bd_short),
442 PAGE_SIZE / sizeof(struct cmpl_base) * 16},
443 .isc_ntxd_max = {BNXT_MAX_TXD, BNXT_MAX_TXD, BNXT_MAX_TXD},
444 .isc_vendor_info = bnxt_vendor_info_array,
445 .isc_driver_version = bnxt_driver_version,
446 };
447
448 static struct if_shared_ctx bnxt_sctx_pf_init;
449 static struct if_shared_ctx bnxt_sctx_vf_init;
450 static bool sctx_initialized = false;
451
452 static inline void
bnxt_init_sctx_variants(uint16_t device_id)453 bnxt_init_sctx_variants(uint16_t device_id)
454 {
455 if (device_id == BCM57608)
456 bnxt_sctx_pf_init = bnxt_sctx_template_p7;
457 else
458 bnxt_sctx_pf_init = bnxt_sctx_template;
459
460 bnxt_sctx_pf_init.isc_admin_intrcnt = BNXT_ROCE_IRQ_COUNT;
461
462 bnxt_sctx_vf_init = bnxt_sctx_template;
463 bnxt_sctx_vf_init.isc_flags |= IFLIB_IS_VF;
464 }
465
466 static inline bool
bnxt_is_vf_device(uint16_t device_id)467 bnxt_is_vf_device(uint16_t device_id)
468 {
469 switch (device_id) {
470 case NETXTREME_C_VF1:
471 case NETXTREME_C_VF2:
472 case NETXTREME_C_VF3:
473 case NETXTREME_E_VF1:
474 case NETXTREME_E_VF2:
475 case NETXTREME_E_VF3:
476 case NETXTREME_E_VF4:
477 case NETXTREME_E_VF5:
478 case NETXTREME_E_P5_VF1:
479 case NETXTREME_E_P5_VF2:
480 case NETXTREME_E_P5_VF_HV1:
481 case NETXTREME_E_P5_VF_HV2:
482 case E_P7_VF:
483 return true;
484 default:
485 return false;
486 }
487 }
488
489 void
bnxt_set_flags_by_devid(struct bnxt_softc * softc)490 bnxt_set_flags_by_devid(struct bnxt_softc *softc)
491 {
492 uint16_t device_id = pci_get_device(softc->dev);
493
494 if (bnxt_is_vf_device(device_id))
495 softc->flags |= BNXT_FLAG_VF;
496
497 switch (device_id) {
498 case BCM57402_NPAR:
499 case BCM57404_NPAR:
500 case BCM57406_NPAR:
501 case BCM57407_NPAR:
502 case BCM57412_NPAR1:
503 case BCM57412_NPAR2:
504 case BCM57414_NPAR1:
505 case BCM57414_NPAR2:
506 case BCM57416_NPAR1:
507 case BCM57416_NPAR2:
508 softc->flags |= BNXT_FLAG_NPAR;
509 break;
510 }
511 }
512
513 #define PCI_SUBSYSTEM_ID 0x2e
514 static struct workqueue_struct *bnxt_pf_wq;
515
516 extern void bnxt_destroy_irq(struct bnxt_softc *softc);
517
518 /*
519 * Device Methods
520 */
521
522 static void *
bnxt_register(device_t dev)523 bnxt_register(device_t dev)
524 {
525 uint16_t vendor_id = pci_get_vendor(dev);
526 uint16_t device_id = pci_get_device(dev);
527
528 if (vendor_id != BROADCOM_VENDOR_ID)
529 return NULL;
530
531 if (!sctx_initialized) {
532 printf("if_bnxt: %s\n", drv_version_msg);
533 sctx_initialized = true;
534 }
535
536 bnxt_init_sctx_variants(device_id);
537
538 if (bnxt_is_vf_device(device_id))
539 return &bnxt_sctx_vf_init;
540
541 return &bnxt_sctx_pf_init;
542 }
543
544 static void
bnxt_nq_alloc(struct bnxt_softc * softc,int nqsets)545 bnxt_nq_alloc(struct bnxt_softc *softc, int nqsets)
546 {
547
548 if (softc->nq_rings)
549 return;
550
551 softc->nq_rings = malloc(sizeof(struct bnxt_cp_ring) * nqsets,
552 M_DEVBUF, M_NOWAIT | M_ZERO);
553 }
554
555 static void
bnxt_nq_free(struct bnxt_softc * softc)556 bnxt_nq_free(struct bnxt_softc *softc)
557 {
558
559 if (softc->nq_rings)
560 free(softc->nq_rings, M_DEVBUF);
561 softc->nq_rings = NULL;
562 }
563
564
565 static void
bnxt_set_db_mask(struct bnxt_softc * bp,struct bnxt_ring * db,u32 ring_type)566 bnxt_set_db_mask(struct bnxt_softc *bp, struct bnxt_ring *db,
567 u32 ring_type)
568 {
569 if (BNXT_CHIP_P7(bp)) {
570 db->db_epoch_mask = db->db_ring_mask + 1;
571 db->db_epoch_shift = DBR_EPOCH_SFT - ilog2(db->db_epoch_mask);
572
573 }
574 }
575
576 /*
577 * Device Dependent Configuration Functions
578 */
579
580 /* Soft queue setup and teardown */
581 static int
bnxt_tx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int ntxqs,int ntxqsets)582 bnxt_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
583 uint64_t *paddrs, int ntxqs, int ntxqsets)
584 {
585 struct bnxt_softc *softc;
586 int i;
587 int rc;
588
589 softc = iflib_get_softc(ctx);
590
591 if (BNXT_CHIP_P5_PLUS(softc)) {
592 bnxt_nq_alloc(softc, ntxqsets);
593 if (!softc->nq_rings) {
594 device_printf(iflib_get_dev(ctx),
595 "unable to allocate NQ rings\n");
596 rc = ENOMEM;
597 goto nq_alloc_fail;
598 }
599 }
600
601 softc->tx_cp_rings = malloc(sizeof(struct bnxt_cp_ring) * ntxqsets,
602 M_DEVBUF, M_NOWAIT | M_ZERO);
603 if (!softc->tx_cp_rings) {
604 device_printf(iflib_get_dev(ctx),
605 "unable to allocate TX completion rings\n");
606 rc = ENOMEM;
607 goto cp_alloc_fail;
608 }
609 softc->tx_rings = malloc(sizeof(struct bnxt_ring) * ntxqsets,
610 M_DEVBUF, M_NOWAIT | M_ZERO);
611 if (!softc->tx_rings) {
612 device_printf(iflib_get_dev(ctx),
613 "unable to allocate TX rings\n");
614 rc = ENOMEM;
615 goto ring_alloc_fail;
616 }
617
618 for (i=0; i < ntxqsets; i++) {
619 rc = iflib_dma_alloc(ctx, sizeof(struct ctx_hw_stats),
620 &softc->tx_stats[i], 0);
621 if (rc)
622 goto dma_alloc_fail;
623 bus_dmamap_sync(softc->tx_stats[i].idi_tag, softc->tx_stats[i].idi_map,
624 BUS_DMASYNC_PREREAD);
625 }
626
627 for (i = 0; i < ntxqsets; i++) {
628 /* Set up the completion ring */
629 softc->tx_cp_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
630 softc->tx_cp_rings[i].ring.phys_id =
631 (uint16_t)HWRM_NA_SIGNATURE;
632 softc->tx_cp_rings[i].ring.softc = softc;
633 softc->tx_cp_rings[i].ring.idx = i;
634 softc->tx_cp_rings[i].ring.id =
635 (softc->scctx->isc_nrxqsets * 2) + 1 + i;
636 softc->tx_cp_rings[i].ring.doorbell = (BNXT_CHIP_P5_PLUS(softc)) ?
637 softc->legacy_db_size: softc->tx_cp_rings[i].ring.id * 0x80;
638 softc->tx_cp_rings[i].ring.ring_size =
639 softc->scctx->isc_ntxd[0];
640 softc->tx_cp_rings[i].ring.db_ring_mask =
641 softc->tx_cp_rings[i].ring.ring_size - 1;
642 softc->tx_cp_rings[i].ring.vaddr = vaddrs[i * ntxqs];
643 softc->tx_cp_rings[i].ring.paddr = paddrs[i * ntxqs];
644
645
646 /* Set up the TX ring */
647 softc->tx_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
648 softc->tx_rings[i].softc = softc;
649 softc->tx_rings[i].idx = i;
650 softc->tx_rings[i].id =
651 (softc->scctx->isc_nrxqsets * 2) + 1 + i;
652 softc->tx_rings[i].doorbell = (BNXT_CHIP_P5_PLUS(softc)) ?
653 softc->legacy_db_size : softc->tx_rings[i].id * 0x80;
654 softc->tx_rings[i].ring_size = softc->scctx->isc_ntxd[1];
655 softc->tx_rings[i].db_ring_mask = softc->tx_rings[i].ring_size - 1;
656 softc->tx_rings[i].vaddr = vaddrs[i * ntxqs + 1];
657 softc->tx_rings[i].paddr = paddrs[i * ntxqs + 1];
658
659 bnxt_create_tx_sysctls(softc, i);
660
661 if (BNXT_CHIP_P5_PLUS(softc)) {
662 /* Set up the Notification ring (NQ) */
663 softc->nq_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
664 softc->nq_rings[i].ring.phys_id =
665 (uint16_t)HWRM_NA_SIGNATURE;
666 softc->nq_rings[i].ring.softc = softc;
667 softc->nq_rings[i].ring.idx = i;
668 softc->nq_rings[i].ring.id = i;
669 softc->nq_rings[i].ring.doorbell = (BNXT_CHIP_P5_PLUS(softc)) ?
670 softc->legacy_db_size : softc->nq_rings[i].ring.id * 0x80;
671 softc->nq_rings[i].ring.ring_size = softc->scctx->isc_ntxd[2];
672 softc->nq_rings[i].ring.db_ring_mask = softc->nq_rings[i].ring.ring_size - 1;
673 softc->nq_rings[i].ring.vaddr = vaddrs[i * ntxqs + 2];
674 softc->nq_rings[i].ring.paddr = paddrs[i * ntxqs + 2];
675 softc->nq_rings[i].type = Q_TYPE_TX;
676 }
677 }
678
679 softc->ntxqsets = ntxqsets;
680 return rc;
681
682 dma_alloc_fail:
683 for (i = i - 1; i >= 0; i--)
684 iflib_dma_free(&softc->tx_stats[i]);
685 free(softc->tx_rings, M_DEVBUF);
686 ring_alloc_fail:
687 free(softc->tx_cp_rings, M_DEVBUF);
688 cp_alloc_fail:
689 bnxt_nq_free(softc);
690 nq_alloc_fail:
691 return rc;
692 }
693
694 static void
bnxt_queues_free(if_ctx_t ctx)695 bnxt_queues_free(if_ctx_t ctx)
696 {
697 struct bnxt_softc *softc = iflib_get_softc(ctx);
698 int i;
699
700 // Free TX queues
701 for (i=0; i<softc->ntxqsets; i++)
702 iflib_dma_free(&softc->tx_stats[i]);
703 free(softc->tx_rings, M_DEVBUF);
704 softc->tx_rings = NULL;
705 free(softc->tx_cp_rings, M_DEVBUF);
706 softc->tx_cp_rings = NULL;
707 softc->ntxqsets = 0;
708
709 // Free RX queues
710 for (i=0; i<softc->nrxqsets; i++)
711 iflib_dma_free(&softc->rx_stats[i]);
712 iflib_dma_free(&softc->hw_tx_port_stats);
713 iflib_dma_free(&softc->hw_rx_port_stats);
714 iflib_dma_free(&softc->hw_tx_port_stats_ext);
715 iflib_dma_free(&softc->hw_rx_port_stats_ext);
716 free(softc->grp_info, M_DEVBUF);
717 free(softc->ag_rings, M_DEVBUF);
718 free(softc->rx_rings, M_DEVBUF);
719 free(softc->rx_cp_rings, M_DEVBUF);
720 bnxt_nq_free(softc);
721 }
722
723 static int
bnxt_rx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int nrxqs,int nrxqsets)724 bnxt_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
725 uint64_t *paddrs, int nrxqs, int nrxqsets)
726 {
727 struct bnxt_softc *softc;
728 int i;
729 int rc;
730
731 softc = iflib_get_softc(ctx);
732
733 softc->rx_cp_rings = malloc(sizeof(struct bnxt_cp_ring) * nrxqsets,
734 M_DEVBUF, M_NOWAIT | M_ZERO);
735 if (!softc->rx_cp_rings) {
736 device_printf(iflib_get_dev(ctx),
737 "unable to allocate RX completion rings\n");
738 rc = ENOMEM;
739 goto cp_alloc_fail;
740 }
741 softc->rx_rings = malloc(sizeof(struct bnxt_ring) * nrxqsets,
742 M_DEVBUF, M_NOWAIT | M_ZERO);
743 if (!softc->rx_rings) {
744 device_printf(iflib_get_dev(ctx),
745 "unable to allocate RX rings\n");
746 rc = ENOMEM;
747 goto ring_alloc_fail;
748 }
749 softc->ag_rings = malloc(sizeof(struct bnxt_ring) * nrxqsets,
750 M_DEVBUF, M_NOWAIT | M_ZERO);
751 if (!softc->ag_rings) {
752 device_printf(iflib_get_dev(ctx),
753 "unable to allocate aggregation rings\n");
754 rc = ENOMEM;
755 goto ag_alloc_fail;
756 }
757 softc->grp_info = malloc(sizeof(struct bnxt_grp_info) * nrxqsets,
758 M_DEVBUF, M_NOWAIT | M_ZERO);
759 if (!softc->grp_info) {
760 device_printf(iflib_get_dev(ctx),
761 "unable to allocate ring groups\n");
762 rc = ENOMEM;
763 goto grp_alloc_fail;
764 }
765
766 for (i=0; i < nrxqsets; i++) {
767 rc = iflib_dma_alloc(ctx, sizeof(struct ctx_hw_stats),
768 &softc->rx_stats[i], 0);
769 if (rc)
770 goto hw_stats_alloc_fail;
771 bus_dmamap_sync(softc->rx_stats[i].idi_tag, softc->rx_stats[i].idi_map,
772 BUS_DMASYNC_PREREAD);
773 }
774
775 /*
776 * Additional 512 bytes for future expansion.
777 * To prevent corruption when loaded with newer firmwares with added counters.
778 * This can be deleted when there will be no further additions of counters.
779 */
780 #define BNXT_PORT_STAT_PADDING 512
781
782 rc = iflib_dma_alloc(ctx, sizeof(struct rx_port_stats) + BNXT_PORT_STAT_PADDING,
783 &softc->hw_rx_port_stats, 0);
784 if (rc)
785 goto hw_port_rx_stats_alloc_fail;
786
787 bus_dmamap_sync(softc->hw_rx_port_stats.idi_tag,
788 softc->hw_rx_port_stats.idi_map, BUS_DMASYNC_PREREAD);
789
790
791 rc = iflib_dma_alloc(ctx, sizeof(struct tx_port_stats) + BNXT_PORT_STAT_PADDING,
792 &softc->hw_tx_port_stats, 0);
793 if (rc)
794 goto hw_port_tx_stats_alloc_fail;
795
796 bus_dmamap_sync(softc->hw_tx_port_stats.idi_tag,
797 softc->hw_tx_port_stats.idi_map, BUS_DMASYNC_PREREAD);
798
799 softc->rx_port_stats = (void *) softc->hw_rx_port_stats.idi_vaddr;
800 softc->tx_port_stats = (void *) softc->hw_tx_port_stats.idi_vaddr;
801
802
803 rc = iflib_dma_alloc(ctx, sizeof(struct rx_port_stats_ext),
804 &softc->hw_rx_port_stats_ext, 0);
805 if (rc)
806 goto hw_port_rx_stats_ext_alloc_fail;
807
808 bus_dmamap_sync(softc->hw_rx_port_stats_ext.idi_tag,
809 softc->hw_rx_port_stats_ext.idi_map, BUS_DMASYNC_PREREAD);
810
811 rc = iflib_dma_alloc(ctx, sizeof(struct tx_port_stats_ext),
812 &softc->hw_tx_port_stats_ext, 0);
813 if (rc)
814 goto hw_port_tx_stats_ext_alloc_fail;
815
816 bus_dmamap_sync(softc->hw_tx_port_stats_ext.idi_tag,
817 softc->hw_tx_port_stats_ext.idi_map, BUS_DMASYNC_PREREAD);
818
819 softc->rx_port_stats_ext = (void *) softc->hw_rx_port_stats_ext.idi_vaddr;
820 softc->tx_port_stats_ext = (void *) softc->hw_tx_port_stats_ext.idi_vaddr;
821
822 for (i = 0; i < nrxqsets; i++) {
823 /* Allocation the completion ring */
824 softc->rx_cp_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
825 softc->rx_cp_rings[i].ring.phys_id =
826 (uint16_t)HWRM_NA_SIGNATURE;
827 softc->rx_cp_rings[i].ring.softc = softc;
828 softc->rx_cp_rings[i].ring.idx = i;
829 softc->rx_cp_rings[i].ring.id = i + 1;
830 softc->rx_cp_rings[i].ring.doorbell = (BNXT_CHIP_P5_PLUS(softc)) ?
831 softc->legacy_db_size : softc->rx_cp_rings[i].ring.id * 0x80;
832 /*
833 * If this ring overflows, RX stops working.
834 */
835 softc->rx_cp_rings[i].ring.ring_size =
836 softc->scctx->isc_nrxd[0];
837 softc->rx_cp_rings[i].ring.db_ring_mask =
838 softc->rx_cp_rings[i].ring.ring_size - 1;
839
840 softc->rx_cp_rings[i].ring.vaddr = vaddrs[i * nrxqs];
841 softc->rx_cp_rings[i].ring.paddr = paddrs[i * nrxqs];
842
843 /* Allocate the RX ring */
844 softc->rx_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
845 softc->rx_rings[i].softc = softc;
846 softc->rx_rings[i].idx = i;
847 softc->rx_rings[i].id = i + 1;
848 softc->rx_rings[i].doorbell = (BNXT_CHIP_P5_PLUS(softc)) ?
849 softc->legacy_db_size : softc->rx_rings[i].id * 0x80;
850 softc->rx_rings[i].ring_size = softc->scctx->isc_nrxd[1];
851 softc->rx_rings[i].db_ring_mask =
852 softc->rx_rings[i].ring_size -1;
853 softc->rx_rings[i].vaddr = vaddrs[i * nrxqs + 1];
854 softc->rx_rings[i].paddr = paddrs[i * nrxqs + 1];
855
856 /* Allocate the TPA start buffer */
857 softc->rx_rings[i].tpa_start = malloc(sizeof(struct bnxt_full_tpa_start) *
858 (RX_TPA_START_CMPL_AGG_ID_MASK >> RX_TPA_START_CMPL_AGG_ID_SFT),
859 M_DEVBUF, M_NOWAIT | M_ZERO);
860 if (softc->rx_rings[i].tpa_start == NULL) {
861 rc = -ENOMEM;
862 device_printf(softc->dev,
863 "Unable to allocate space for TPA\n");
864 goto tpa_alloc_fail;
865 }
866 /* Allocate the AG ring */
867 softc->ag_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
868 softc->ag_rings[i].softc = softc;
869 softc->ag_rings[i].idx = i;
870 softc->ag_rings[i].id = nrxqsets + i + 1;
871 softc->ag_rings[i].doorbell = (BNXT_CHIP_P5_PLUS(softc)) ?
872 softc->legacy_db_size : softc->ag_rings[i].id * 0x80;
873 softc->ag_rings[i].ring_size = softc->scctx->isc_nrxd[2];
874 softc->ag_rings[i].db_ring_mask = softc->ag_rings[i].ring_size - 1;
875 softc->ag_rings[i].vaddr = vaddrs[i * nrxqs + 2];
876 softc->ag_rings[i].paddr = paddrs[i * nrxqs + 2];
877
878 /* Allocate the ring group */
879 softc->grp_info[i].grp_id = (uint16_t)HWRM_NA_SIGNATURE;
880 softc->grp_info[i].stats_ctx =
881 softc->rx_cp_rings[i].stats_ctx_id;
882 softc->grp_info[i].rx_ring_id = softc->rx_rings[i].phys_id;
883 softc->grp_info[i].ag_ring_id = softc->ag_rings[i].phys_id;
884 softc->grp_info[i].cp_ring_id =
885 softc->rx_cp_rings[i].ring.phys_id;
886
887 bnxt_create_rx_sysctls(softc, i);
888 }
889
890 /*
891 * When SR-IOV is enabled, avoid each VF sending PORT_QSTATS
892 * HWRM every sec with which firmware timeouts can happen
893 */
894 if (BNXT_PF(softc))
895 bnxt_create_port_stats_sysctls(softc);
896
897 /* And finally, the VNIC */
898 softc->vnic_info.id = (uint16_t)HWRM_NA_SIGNATURE;
899 softc->vnic_info.filter_id = -1;
900 softc->vnic_info.def_ring_grp = (uint16_t)HWRM_NA_SIGNATURE;
901 softc->vnic_info.cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
902 softc->vnic_info.lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
903 softc->vnic_info.rx_mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
904 HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN;
905 softc->vnic_info.mc_list_count = 0;
906 softc->vnic_info.flags = BNXT_VNIC_FLAG_DEFAULT;
907 rc = iflib_dma_alloc(ctx, BNXT_MAX_MC_ADDRS * ETHER_ADDR_LEN,
908 &softc->vnic_info.mc_list, 0);
909 if (rc)
910 goto mc_list_alloc_fail;
911
912 /* The VNIC RSS Hash Key */
913 rc = iflib_dma_alloc(ctx, HW_HASH_KEY_SIZE,
914 &softc->vnic_info.rss_hash_key_tbl, 0);
915 if (rc)
916 goto rss_hash_alloc_fail;
917 bus_dmamap_sync(softc->vnic_info.rss_hash_key_tbl.idi_tag,
918 softc->vnic_info.rss_hash_key_tbl.idi_map,
919 BUS_DMASYNC_PREWRITE);
920 memcpy(softc->vnic_info.rss_hash_key_tbl.idi_vaddr,
921 softc->vnic_info.rss_hash_key, HW_HASH_KEY_SIZE);
922
923 /* Allocate the RSS tables */
924 rc = iflib_dma_alloc(ctx, HW_HASH_INDEX_SIZE * sizeof(uint16_t),
925 &softc->vnic_info.rss_grp_tbl, 0);
926 if (rc)
927 goto rss_grp_alloc_fail;
928 bus_dmamap_sync(softc->vnic_info.rss_grp_tbl.idi_tag,
929 softc->vnic_info.rss_grp_tbl.idi_map,
930 BUS_DMASYNC_PREWRITE);
931 memset(softc->vnic_info.rss_grp_tbl.idi_vaddr, 0xff,
932 softc->vnic_info.rss_grp_tbl.idi_size);
933
934 softc->nrxqsets = nrxqsets;
935 return rc;
936
937 rss_grp_alloc_fail:
938 iflib_dma_free(&softc->vnic_info.rss_hash_key_tbl);
939 rss_hash_alloc_fail:
940 iflib_dma_free(&softc->vnic_info.mc_list);
941 mc_list_alloc_fail:
942 for (i = i - 1; i >= 0; i--) {
943 if (softc->rx_rings[i].tpa_start)
944 free(softc->rx_rings[i].tpa_start, M_DEVBUF);
945 }
946 tpa_alloc_fail:
947 iflib_dma_free(&softc->hw_tx_port_stats_ext);
948 hw_port_tx_stats_ext_alloc_fail:
949 iflib_dma_free(&softc->hw_rx_port_stats_ext);
950 hw_port_rx_stats_ext_alloc_fail:
951 iflib_dma_free(&softc->hw_tx_port_stats);
952 hw_port_tx_stats_alloc_fail:
953 iflib_dma_free(&softc->hw_rx_port_stats);
954 hw_port_rx_stats_alloc_fail:
955 for (i=0; i < nrxqsets; i++) {
956 if (softc->rx_stats[i].idi_vaddr)
957 iflib_dma_free(&softc->rx_stats[i]);
958 }
959 hw_stats_alloc_fail:
960 free(softc->grp_info, M_DEVBUF);
961 grp_alloc_fail:
962 free(softc->ag_rings, M_DEVBUF);
963 ag_alloc_fail:
964 free(softc->rx_rings, M_DEVBUF);
965 ring_alloc_fail:
966 free(softc->rx_cp_rings, M_DEVBUF);
967 cp_alloc_fail:
968 return rc;
969 }
970
bnxt_free_hwrm_short_cmd_req(struct bnxt_softc * softc)971 static void bnxt_free_hwrm_short_cmd_req(struct bnxt_softc *softc)
972 {
973 if (softc->hwrm_short_cmd_req_addr.idi_vaddr)
974 iflib_dma_free(&softc->hwrm_short_cmd_req_addr);
975 softc->hwrm_short_cmd_req_addr.idi_vaddr = NULL;
976 }
977
bnxt_alloc_hwrm_short_cmd_req(struct bnxt_softc * softc)978 static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt_softc *softc)
979 {
980 int rc;
981
982 rc = iflib_dma_alloc(softc->ctx, softc->hwrm_max_req_len,
983 &softc->hwrm_short_cmd_req_addr, BUS_DMA_NOWAIT);
984
985 return rc;
986 }
987
bnxt_free_ring(struct bnxt_softc * softc,struct bnxt_ring_mem_info * rmem)988 static void bnxt_free_ring(struct bnxt_softc *softc, struct bnxt_ring_mem_info *rmem)
989 {
990 int i;
991
992 for (i = 0; i < rmem->nr_pages; i++) {
993 if (!rmem->pg_arr[i].idi_vaddr)
994 continue;
995
996 iflib_dma_free(&rmem->pg_arr[i]);
997 rmem->pg_arr[i].idi_vaddr = NULL;
998 }
999 if (rmem->pg_tbl.idi_vaddr) {
1000 iflib_dma_free(&rmem->pg_tbl);
1001 rmem->pg_tbl.idi_vaddr = NULL;
1002
1003 }
1004 if (rmem->vmem_size && *rmem->vmem) {
1005 free(*rmem->vmem, M_DEVBUF);
1006 *rmem->vmem = NULL;
1007 }
1008 }
1009
bnxt_init_ctx_mem(struct bnxt_ctx_mem_type * ctxm,void * p,int len)1010 static void bnxt_init_ctx_mem(struct bnxt_ctx_mem_type *ctxm, void *p, int len)
1011 {
1012 u8 init_val = ctxm->init_value;
1013 u16 offset = ctxm->init_offset;
1014 u8 *p2 = p;
1015 int i;
1016
1017 if (!init_val)
1018 return;
1019 if (offset == BNXT_CTX_INIT_INVALID_OFFSET) {
1020 memset(p, init_val, len);
1021 return;
1022 }
1023 for (i = 0; i < len; i += ctxm->entry_size)
1024 *(p2 + i + offset) = init_val;
1025 }
1026
bnxt_alloc_ring(struct bnxt_softc * softc,struct bnxt_ring_mem_info * rmem)1027 static int bnxt_alloc_ring(struct bnxt_softc *softc, struct bnxt_ring_mem_info *rmem)
1028 {
1029 uint64_t valid_bit = 0;
1030 int i;
1031 int rc;
1032
1033 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
1034 valid_bit = PTU_PTE_VALID;
1035
1036 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl.idi_vaddr) {
1037 size_t pg_tbl_size = rmem->nr_pages * 8;
1038
1039 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
1040 pg_tbl_size = rmem->page_size;
1041
1042 rc = iflib_dma_alloc(softc->ctx, pg_tbl_size, &rmem->pg_tbl, 0);
1043 if (rc)
1044 return -ENOMEM;
1045 }
1046
1047 for (i = 0; i < rmem->nr_pages; i++) {
1048 uint64_t extra_bits = valid_bit;
1049 uint64_t *ptr;
1050
1051 rc = iflib_dma_alloc(softc->ctx, rmem->page_size, &rmem->pg_arr[i], 0);
1052 if (rc)
1053 return -ENOMEM;
1054
1055 if (rmem->ctx_mem)
1056 bnxt_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i].idi_vaddr,
1057 rmem->page_size);
1058
1059 if (rmem->nr_pages > 1 || rmem->depth > 0) {
1060 if (i == rmem->nr_pages - 2 &&
1061 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
1062 extra_bits |= PTU_PTE_NEXT_TO_LAST;
1063 else if (i == rmem->nr_pages - 1 &&
1064 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
1065 extra_bits |= PTU_PTE_LAST;
1066
1067 ptr = (void *) rmem->pg_tbl.idi_vaddr;
1068 ptr[i] = htole64(rmem->pg_arr[i].idi_paddr | extra_bits);
1069 }
1070 }
1071
1072 if (rmem->vmem_size) {
1073 *rmem->vmem = malloc(rmem->vmem_size, M_DEVBUF, M_NOWAIT | M_ZERO);
1074 if (!(*rmem->vmem))
1075 return -ENOMEM;
1076 }
1077 return 0;
1078 }
1079
1080
1081 #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES \
1082 (HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP | \
1083 HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ | \
1084 HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ | \
1085 HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC | \
1086 HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT)
1087
bnxt_alloc_ctx_mem_blk(struct bnxt_softc * softc,struct bnxt_ctx_pg_info * ctx_pg)1088 static int bnxt_alloc_ctx_mem_blk(struct bnxt_softc *softc,
1089 struct bnxt_ctx_pg_info *ctx_pg)
1090 {
1091 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
1092
1093 rmem->page_size = BNXT_PAGE_SIZE;
1094 rmem->pg_arr = ctx_pg->ctx_arr;
1095 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
1096 if (rmem->depth >= 1)
1097 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
1098
1099 return bnxt_alloc_ring(softc, rmem);
1100 }
1101
bnxt_alloc_ctx_pg_tbls(struct bnxt_softc * softc,struct bnxt_ctx_pg_info * ctx_pg,u32 mem_size,u8 depth,struct bnxt_ctx_mem_type * ctxm)1102 static int bnxt_alloc_ctx_pg_tbls(struct bnxt_softc *softc,
1103 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
1104 u8 depth, struct bnxt_ctx_mem_type *ctxm)
1105 {
1106 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
1107 int rc;
1108
1109 if (!mem_size)
1110 return -EINVAL;
1111
1112 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
1113 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
1114 ctx_pg->nr_pages = 0;
1115 return -EINVAL;
1116 }
1117 if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
1118 int nr_tbls, i;
1119
1120 rmem->depth = 2;
1121 ctx_pg->ctx_pg_tbl = kzalloc(MAX_CTX_PAGES * sizeof(ctx_pg),
1122 GFP_KERNEL);
1123 if (!ctx_pg->ctx_pg_tbl)
1124 return -ENOMEM;
1125 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
1126 rmem->nr_pages = nr_tbls;
1127 rc = bnxt_alloc_ctx_mem_blk(softc, ctx_pg);
1128 if (rc)
1129 return rc;
1130 for (i = 0; i < nr_tbls; i++) {
1131 struct bnxt_ctx_pg_info *pg_tbl;
1132
1133 pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
1134 if (!pg_tbl)
1135 return -ENOMEM;
1136 ctx_pg->ctx_pg_tbl[i] = pg_tbl;
1137 rmem = &pg_tbl->ring_mem;
1138 memcpy(&rmem->pg_tbl, &ctx_pg->ctx_arr[i], sizeof(struct iflib_dma_info));
1139 rmem->depth = 1;
1140 rmem->nr_pages = MAX_CTX_PAGES;
1141 rmem->ctx_mem = ctxm;
1142 if (i == (nr_tbls - 1)) {
1143 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
1144
1145 if (rem)
1146 rmem->nr_pages = rem;
1147 }
1148 rc = bnxt_alloc_ctx_mem_blk(softc, pg_tbl);
1149 if (rc)
1150 break;
1151 }
1152 } else {
1153 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
1154 if (rmem->nr_pages > 1 || depth)
1155 rmem->depth = 1;
1156 rmem->ctx_mem = ctxm;
1157 rc = bnxt_alloc_ctx_mem_blk(softc, ctx_pg);
1158 }
1159 return rc;
1160 }
1161
bnxt_free_ctx_pg_tbls(struct bnxt_softc * softc,struct bnxt_ctx_pg_info * ctx_pg)1162 static void bnxt_free_ctx_pg_tbls(struct bnxt_softc *softc,
1163 struct bnxt_ctx_pg_info *ctx_pg)
1164 {
1165 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
1166
1167 if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
1168 ctx_pg->ctx_pg_tbl) {
1169 int i, nr_tbls = rmem->nr_pages;
1170
1171 for (i = 0; i < nr_tbls; i++) {
1172 struct bnxt_ctx_pg_info *pg_tbl;
1173 struct bnxt_ring_mem_info *rmem2;
1174
1175 pg_tbl = ctx_pg->ctx_pg_tbl[i];
1176 if (!pg_tbl)
1177 continue;
1178 rmem2 = &pg_tbl->ring_mem;
1179 bnxt_free_ring(softc, rmem2);
1180 ctx_pg->ctx_arr[i].idi_vaddr = NULL;
1181 free(pg_tbl , M_DEVBUF);
1182 ctx_pg->ctx_pg_tbl[i] = NULL;
1183 }
1184 kfree(ctx_pg->ctx_pg_tbl);
1185 ctx_pg->ctx_pg_tbl = NULL;
1186 }
1187 bnxt_free_ring(softc, rmem);
1188 ctx_pg->nr_pages = 0;
1189 }
1190
bnxt_setup_ctxm_pg_tbls(struct bnxt_softc * softc,struct bnxt_ctx_mem_type * ctxm,u32 entries,u8 pg_lvl)1191 static int bnxt_setup_ctxm_pg_tbls(struct bnxt_softc *softc,
1192 struct bnxt_ctx_mem_type *ctxm, u32 entries,
1193 u8 pg_lvl)
1194 {
1195 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
1196 int i, rc = 0, n = 1;
1197 u32 mem_size;
1198
1199 if (!ctxm->entry_size || !ctx_pg)
1200 return -EINVAL;
1201 if (ctxm->instance_bmap)
1202 n = hweight32(ctxm->instance_bmap);
1203 if (ctxm->entry_multiple)
1204 entries = roundup(entries, ctxm->entry_multiple);
1205 entries = clamp_t(u32, entries, ctxm->min_entries, ctxm->max_entries);
1206 mem_size = entries * ctxm->entry_size;
1207 for (i = 0; i < n && !rc; i++) {
1208 ctx_pg[i].entries = entries;
1209 rc = bnxt_alloc_ctx_pg_tbls(softc, &ctx_pg[i], mem_size, pg_lvl,
1210 ctxm->init_value ? ctxm : NULL);
1211 }
1212 if (!rc)
1213 ctxm->mem_valid = 1;
1214 return rc;
1215 }
1216
bnxt_free_ctx_mem(struct bnxt_softc * softc)1217 static void bnxt_free_ctx_mem(struct bnxt_softc *softc)
1218 {
1219 struct bnxt_ctx_mem_info *ctx = softc->ctx_mem;
1220 u16 type;
1221
1222 if (!ctx)
1223 return;
1224
1225 for (type = 0; type < BNXT_CTX_MAX; type++) {
1226 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
1227 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
1228 int i, n = 1;
1229
1230 if (!ctx_pg)
1231 continue;
1232 if (ctxm->instance_bmap)
1233 n = hweight32(ctxm->instance_bmap);
1234 for (i = 0; i < n; i++)
1235 bnxt_free_ctx_pg_tbls(softc, &ctx_pg[i]);
1236
1237 kfree(ctx_pg);
1238 ctxm->pg_info = NULL;
1239 }
1240
1241 ctx->flags &= ~BNXT_CTX_FLAG_INITED;
1242 kfree(ctx);
1243 softc->ctx_mem = NULL;
1244 }
1245
1246 static int
bnxt_backing_store_cfg_v2(struct bnxt_softc * softc,u32 ena)1247 bnxt_backing_store_cfg_v2(struct bnxt_softc *softc, u32 ena)
1248 {
1249 struct bnxt_ctx_mem_info *ctx = softc->ctx_mem;
1250 struct bnxt_ctx_mem_type *ctxm;
1251 u16 last_type = BNXT_CTX_INV;
1252 int rc = 0;
1253 u16 type;
1254
1255 if (BNXT_PF(softc)) {
1256 for (type = BNXT_CTX_SRT_TRACE; type <= BNXT_CTX_ROCE_HWRM_TRACE; type++) {
1257 ctxm = &ctx->ctx_arr[type];
1258 if (!(ctxm->flags & BNXT_CTX_MEM_TYPE_VALID))
1259 continue;
1260 rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, ctxm->max_entries, 1);
1261 if (rc) {
1262 device_printf(softc->dev, "Unable to setup ctx page for type:0x%x.\n", type);
1263 rc = 0;
1264 continue;
1265 }
1266 /* ckp TODO: this is trace buffer related stuff, so keeping it diabled now. needs revisit */
1267 //bnxt_bs_trace_init(bp, ctxm, type - BNXT_CTX_SRT_TRACE);
1268 last_type = type;
1269 }
1270 }
1271
1272 if (last_type == BNXT_CTX_INV) {
1273 if (!ena)
1274 return 0;
1275 else if (ena & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TIM)
1276 last_type = BNXT_CTX_MAX - 1;
1277 else
1278 last_type = BNXT_CTX_L2_MAX - 1;
1279 }
1280 ctx->ctx_arr[last_type].last = 1;
1281
1282 for (type = 0 ; type < BNXT_CTX_V2_MAX; type++) {
1283 ctxm = &ctx->ctx_arr[type];
1284
1285 if (!ctxm->mem_valid)
1286 continue;
1287 rc = bnxt_hwrm_func_backing_store_cfg_v2(softc, ctxm, ctxm->last);
1288 if (rc)
1289 return rc;
1290 }
1291 return 0;
1292 }
1293
bnxt_alloc_ctx_mem(struct bnxt_softc * softc)1294 static int bnxt_alloc_ctx_mem(struct bnxt_softc *softc)
1295 {
1296 struct bnxt_ctx_pg_info *ctx_pg;
1297 struct bnxt_ctx_mem_type *ctxm;
1298 struct bnxt_ctx_mem_info *ctx;
1299 u32 l2_qps, qp1_qps, max_qps;
1300 u32 ena, entries_sp, entries;
1301 u32 srqs, max_srqs, min;
1302 u32 num_mr, num_ah;
1303 u32 extra_srqs = 0;
1304 u32 extra_qps = 0;
1305 u8 pg_lvl = 1;
1306 int i, rc;
1307
1308 if (!BNXT_CHIP_P5_PLUS(softc))
1309 return 0;
1310
1311 rc = bnxt_hwrm_func_backing_store_qcaps(softc);
1312 if (rc) {
1313 device_printf(softc->dev, "Failed querying context mem capability, rc = %d.\n",
1314 rc);
1315 return rc;
1316 }
1317 ctx = softc->ctx_mem;
1318 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
1319 return 0;
1320
1321 ena = 0;
1322 if (BNXT_VF(softc))
1323 goto skip_legacy;
1324
1325 ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
1326 l2_qps = ctxm->qp_l2_entries;
1327 qp1_qps = ctxm->qp_qp1_entries;
1328 max_qps = ctxm->max_entries;
1329 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
1330 srqs = ctxm->srq_l2_entries;
1331 max_srqs = ctxm->max_entries;
1332 if (softc->flags & BNXT_FLAG_ROCE_CAP) {
1333 pg_lvl = 2;
1334 if (BNXT_SW_RES_LMT(softc)) {
1335 extra_qps = max_qps - l2_qps - qp1_qps;
1336 extra_srqs = max_srqs - srqs;
1337 } else {
1338 extra_qps = min_t(uint32_t, 65536, max_qps - l2_qps - qp1_qps);
1339 extra_srqs = min_t(uint32_t, 8192, max_srqs - srqs);
1340 }
1341 }
1342
1343 ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
1344 rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, l2_qps + qp1_qps + extra_qps,
1345 pg_lvl);
1346 if (rc)
1347 return rc;
1348
1349 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
1350 rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, srqs + extra_srqs, pg_lvl);
1351 if (rc)
1352 return rc;
1353
1354 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
1355 rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, ctxm->cq_l2_entries +
1356 extra_qps * 2, pg_lvl);
1357 if (rc)
1358 return rc;
1359
1360 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
1361 rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, ctxm->max_entries, 1);
1362 if (rc)
1363 return rc;
1364
1365 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
1366 rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, ctxm->max_entries, 1);
1367 if (rc)
1368 return rc;
1369
1370 if (!(softc->flags & BNXT_FLAG_ROCE_CAP))
1371 goto skip_rdma;
1372
1373 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
1374 ctx_pg = ctxm->pg_info;
1375 /* 128K extra is needed to accomodate static AH context
1376 * allocation by f/w.
1377 */
1378 num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
1379 num_ah = min_t(u32, num_mr, 1024 * 128);
1380 rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, num_mr + num_ah, 2);
1381 if (rc)
1382 return rc;
1383 ctx_pg->entries = num_mr + num_ah;
1384 ena = HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_MRAV;
1385 if (ctxm->mrav_num_entries_units)
1386 ctx_pg->entries =
1387 ((num_mr / ctxm->mrav_num_entries_units) << 16) |
1388 (num_ah / ctxm->mrav_num_entries_units);
1389
1390 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
1391 rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, l2_qps + qp1_qps + extra_qps, 1);
1392 if (rc)
1393 return rc;
1394 ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TIM;
1395
1396 skip_rdma:
1397 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
1398 min = ctxm->min_entries;
1399 entries_sp = ctx->ctx_arr[BNXT_CTX_VNIC].vnic_entries + l2_qps +
1400 2 * (extra_qps + qp1_qps) + min;
1401 rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, entries_sp, 2);
1402 if (rc)
1403 return rc;
1404
1405 ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
1406 entries = l2_qps + 2 * (extra_qps + qp1_qps);
1407 rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, entries, 2);
1408 if (rc)
1409 return rc;
1410 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
1411 if (i < BNXT_MAX_TQM_LEGACY_RINGS)
1412 ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i;
1413 else
1414 ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING8;
1415 }
1416 ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES;
1417
1418 skip_legacy:
1419 if (BNXT_CHIP_P7(softc)) {
1420 if (softc->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
1421 rc = bnxt_backing_store_cfg_v2(softc, ena);
1422 } else {
1423 rc = bnxt_hwrm_func_backing_store_cfg(softc, ena);
1424 }
1425 if (rc) {
1426 device_printf(softc->dev, "Failed configuring context mem, rc = %d.\n",
1427 rc);
1428 return rc;
1429 }
1430 ctx->flags |= BNXT_CTX_FLAG_INITED;
1431
1432 return 0;
1433 }
1434
1435 /*
1436 * If we update the index, a write barrier is needed after the write to ensure
1437 * the completion ring has space before the RX/TX ring does. Since we can't
1438 * make the RX and AG doorbells covered by the same barrier without remapping
1439 * MSI-X vectors, we create the barrier over the enture doorbell bar.
1440 * TODO: Remap the MSI-X vectors to allow a barrier to only cover the doorbells
1441 * for a single ring group.
1442 *
1443 * A barrier of just the size of the write is used to ensure the ordering
1444 * remains correct and no writes are lost.
1445 */
1446
bnxt_cuw_db_rx(void * db_ptr,uint16_t idx)1447 static void bnxt_cuw_db_rx(void *db_ptr, uint16_t idx)
1448 {
1449 struct bnxt_ring *ring = (struct bnxt_ring *) db_ptr;
1450 struct bnxt_bar_info *db_bar = &ring->softc->doorbell_bar;
1451
1452 bus_space_barrier(db_bar->tag, db_bar->handle, ring->doorbell, 4,
1453 BUS_SPACE_BARRIER_WRITE);
1454 bus_space_write_4(db_bar->tag, db_bar->handle, ring->doorbell,
1455 htole32(RX_DOORBELL_KEY_RX | idx));
1456 }
1457
bnxt_cuw_db_tx(void * db_ptr,uint16_t idx)1458 static void bnxt_cuw_db_tx(void *db_ptr, uint16_t idx)
1459 {
1460 struct bnxt_ring *ring = (struct bnxt_ring *) db_ptr;
1461 struct bnxt_bar_info *db_bar = &ring->softc->doorbell_bar;
1462
1463 bus_space_barrier(db_bar->tag, db_bar->handle, ring->doorbell, 4,
1464 BUS_SPACE_BARRIER_WRITE);
1465 bus_space_write_4(db_bar->tag, db_bar->handle, ring->doorbell,
1466 htole32(TX_DOORBELL_KEY_TX | idx));
1467 }
1468
bnxt_cuw_db_cq(void * db_ptr,bool enable_irq)1469 static void bnxt_cuw_db_cq(void *db_ptr, bool enable_irq)
1470 {
1471 struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr;
1472 struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar;
1473
1474 bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 4,
1475 BUS_SPACE_BARRIER_WRITE);
1476 bus_space_write_4(db_bar->tag, db_bar->handle, cpr->ring.doorbell,
1477 htole32(CMPL_DOORBELL_KEY_CMPL |
1478 ((cpr->cons == UINT32_MAX) ? 0 :
1479 (cpr->cons | CMPL_DOORBELL_IDX_VALID)) |
1480 ((enable_irq) ? 0 : CMPL_DOORBELL_MASK)));
1481 bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size,
1482 BUS_SPACE_BARRIER_WRITE);
1483 }
1484
bnxt_thor_db_rx(void * db_ptr,uint16_t idx)1485 static void bnxt_thor_db_rx(void *db_ptr, uint16_t idx)
1486 {
1487 struct bnxt_ring *ring = (struct bnxt_ring *) db_ptr;
1488 struct bnxt_bar_info *db_bar = &ring->softc->doorbell_bar;
1489
1490 bus_space_barrier(db_bar->tag, db_bar->handle, ring->doorbell, 8,
1491 BUS_SPACE_BARRIER_WRITE);
1492 bus_space_write_8(db_bar->tag, db_bar->handle, ring->doorbell,
1493 htole64((DBR_PATH_L2 | DBR_TYPE_SRQ | idx) |
1494 ((uint64_t)ring->phys_id << DBR_XID_SFT)));
1495 }
1496
bnxt_thor_db_tx(void * db_ptr,uint16_t idx)1497 static void bnxt_thor_db_tx(void *db_ptr, uint16_t idx)
1498 {
1499 struct bnxt_ring *ring = (struct bnxt_ring *) db_ptr;
1500 struct bnxt_bar_info *db_bar = &ring->softc->doorbell_bar;
1501
1502 bus_space_barrier(db_bar->tag, db_bar->handle, ring->doorbell, 8,
1503 BUS_SPACE_BARRIER_WRITE);
1504 bus_space_write_8(db_bar->tag, db_bar->handle, ring->doorbell,
1505 htole64((DBR_PATH_L2 | DBR_TYPE_SQ | idx) |
1506 ((uint64_t)ring->phys_id << DBR_XID_SFT)));
1507 }
1508
bnxt_thor_db_rx_cq(void * db_ptr,bool enable_irq)1509 static void bnxt_thor_db_rx_cq(void *db_ptr, bool enable_irq)
1510 {
1511 struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr;
1512 struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar;
1513 dbc_dbc_t db_msg = { 0 };
1514 uint32_t cons = cpr->cons;
1515
1516 if (cons == UINT32_MAX)
1517 cons = 0;
1518 else
1519 cons = RING_NEXT(&cpr->ring, cons);
1520
1521 db_msg.index = ((cons << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK);
1522
1523 db_msg.type_path_xid = ((cpr->ring.phys_id << DBC_DBC_XID_SFT) &
1524 DBC_DBC_XID_MASK) | DBC_DBC_PATH_L2 |
1525 ((enable_irq) ? DBC_DBC_TYPE_CQ_ARMALL: DBC_DBC_TYPE_CQ);
1526
1527 bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 8,
1528 BUS_SPACE_BARRIER_WRITE);
1529 bus_space_write_8(db_bar->tag, db_bar->handle, cpr->ring.doorbell,
1530 htole64(*(uint64_t *)&db_msg));
1531 bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size,
1532 BUS_SPACE_BARRIER_WRITE);
1533 }
1534
bnxt_thor_db_tx_cq(void * db_ptr,bool enable_irq)1535 static void bnxt_thor_db_tx_cq(void *db_ptr, bool enable_irq)
1536 {
1537 struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr;
1538 struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar;
1539 dbc_dbc_t db_msg = { 0 };
1540 uint32_t cons = cpr->cons;
1541
1542 db_msg.index = ((cons << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK);
1543
1544 db_msg.type_path_xid = ((cpr->ring.phys_id << DBC_DBC_XID_SFT) &
1545 DBC_DBC_XID_MASK) | DBC_DBC_PATH_L2 |
1546 ((enable_irq) ? DBC_DBC_TYPE_CQ_ARMALL: DBC_DBC_TYPE_CQ);
1547
1548 bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 8,
1549 BUS_SPACE_BARRIER_WRITE);
1550 bus_space_write_8(db_bar->tag, db_bar->handle, cpr->ring.doorbell,
1551 htole64(*(uint64_t *)&db_msg));
1552 bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size,
1553 BUS_SPACE_BARRIER_WRITE);
1554 }
1555
bnxt_thor_db_nq(void * db_ptr,bool enable_irq)1556 static void bnxt_thor_db_nq(void *db_ptr, bool enable_irq)
1557 {
1558 struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr;
1559 struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar;
1560 dbc_dbc_t db_msg = { 0 };
1561 uint32_t cons = cpr->cons;
1562
1563 db_msg.index = ((cons << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK);
1564
1565 db_msg.type_path_xid = ((cpr->ring.phys_id << DBC_DBC_XID_SFT) &
1566 DBC_DBC_XID_MASK) | DBC_DBC_PATH_L2 |
1567 ((enable_irq) ? DBC_DBC_TYPE_NQ_ARM: DBC_DBC_TYPE_NQ);
1568
1569 bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 8,
1570 BUS_SPACE_BARRIER_WRITE);
1571 bus_space_write_8(db_bar->tag, db_bar->handle, cpr->ring.doorbell,
1572 htole64(*(uint64_t *)&db_msg));
1573 bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size,
1574 BUS_SPACE_BARRIER_WRITE);
1575 }
1576
1577 static void
bnxt_thor2_db_rx(void * db_ptr,uint16_t idx)1578 bnxt_thor2_db_rx(void *db_ptr, uint16_t idx)
1579 {
1580 struct bnxt_ring *ring = (struct bnxt_ring *) db_ptr;
1581 struct bnxt_bar_info *db_bar = &ring->softc->doorbell_bar;
1582 uint64_t db_val;
1583
1584 if (idx >= ring->ring_size) {
1585 device_printf(ring->softc->dev, "%s: BRCM DBG: idx: %d crossed boundary\n", __func__, idx);
1586 return;
1587 }
1588
1589 db_val = ((DBR_PATH_L2 | DBR_TYPE_SRQ | DBR_VALID | idx) |
1590 ((uint64_t)ring->phys_id << DBR_XID_SFT));
1591
1592 /* Add the PI index */
1593 db_val |= DB_RING_IDX(ring, idx, ring->epoch_arr[idx]);
1594
1595 bus_space_barrier(db_bar->tag, db_bar->handle, ring->doorbell, 8,
1596 BUS_SPACE_BARRIER_WRITE);
1597 bus_space_write_8(db_bar->tag, db_bar->handle, ring->doorbell,
1598 htole64(db_val));
1599 }
1600
1601 static void
bnxt_thor2_db_tx(void * db_ptr,uint16_t idx)1602 bnxt_thor2_db_tx(void *db_ptr, uint16_t idx)
1603 {
1604 struct bnxt_ring *ring = (struct bnxt_ring *) db_ptr;
1605 struct bnxt_bar_info *db_bar = &ring->softc->doorbell_bar;
1606 uint64_t db_val;
1607
1608 if (idx >= ring->ring_size) {
1609 device_printf(ring->softc->dev, "%s: BRCM DBG: idx: %d crossed boundary\n", __func__, idx);
1610 return;
1611 }
1612
1613 db_val = ((DBR_PATH_L2 | DBR_TYPE_SQ | DBR_VALID | idx) |
1614 ((uint64_t)ring->phys_id << DBR_XID_SFT));
1615
1616 /* Add the PI index */
1617 db_val |= DB_RING_IDX(ring, idx, ring->epoch_arr[idx]);
1618
1619 bus_space_barrier(db_bar->tag, db_bar->handle, ring->doorbell, 8,
1620 BUS_SPACE_BARRIER_WRITE);
1621 bus_space_write_8(db_bar->tag, db_bar->handle, ring->doorbell,
1622 htole64(db_val));
1623 }
1624
1625 static void
bnxt_thor2_db_rx_cq(void * db_ptr,bool enable_irq)1626 bnxt_thor2_db_rx_cq(void *db_ptr, bool enable_irq)
1627 {
1628 struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr;
1629 struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar;
1630 u64 db_msg = { 0 };
1631 uint32_t cons = cpr->raw_cons;
1632 uint32_t toggle = 0;
1633
1634 if (cons == UINT32_MAX)
1635 cons = 0;
1636
1637 if (enable_irq == true)
1638 toggle = cpr->toggle;
1639
1640 db_msg = DBR_PATH_L2 | ((u64)cpr->ring.phys_id << DBR_XID_SFT) | DBR_VALID |
1641 DB_RING_IDX_CMP(&cpr->ring, cons) | DB_TOGGLE(toggle);
1642
1643 if (enable_irq)
1644 db_msg |= DBR_TYPE_CQ_ARMALL;
1645 else
1646 db_msg |= DBR_TYPE_CQ;
1647
1648 bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 8,
1649 BUS_SPACE_BARRIER_WRITE);
1650 bus_space_write_8(db_bar->tag, db_bar->handle, cpr->ring.doorbell,
1651 htole64(*(uint64_t *)&db_msg));
1652 bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size,
1653 BUS_SPACE_BARRIER_WRITE);
1654 }
1655
1656 static void
bnxt_thor2_db_tx_cq(void * db_ptr,bool enable_irq)1657 bnxt_thor2_db_tx_cq(void *db_ptr, bool enable_irq)
1658 {
1659 struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr;
1660 struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar;
1661 u64 db_msg = { 0 };
1662 uint32_t cons = cpr->raw_cons;
1663 uint32_t toggle = 0;
1664
1665 if (enable_irq == true)
1666 toggle = cpr->toggle;
1667
1668 db_msg = DBR_PATH_L2 | ((u64)cpr->ring.phys_id << DBR_XID_SFT) | DBR_VALID |
1669 DB_RING_IDX_CMP(&cpr->ring, cons) | DB_TOGGLE(toggle);
1670
1671 if (enable_irq)
1672 db_msg |= DBR_TYPE_CQ_ARMALL;
1673 else
1674 db_msg |= DBR_TYPE_CQ;
1675
1676 bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 8,
1677 BUS_SPACE_BARRIER_WRITE);
1678 bus_space_write_8(db_bar->tag, db_bar->handle, cpr->ring.doorbell,
1679 htole64(*(uint64_t *)&db_msg));
1680 bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size,
1681 BUS_SPACE_BARRIER_WRITE);
1682 }
1683
1684 static void
bnxt_thor2_db_nq(void * db_ptr,bool enable_irq)1685 bnxt_thor2_db_nq(void *db_ptr, bool enable_irq)
1686 {
1687 struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr;
1688 struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar;
1689 u64 db_msg = { 0 };
1690 uint32_t cons = cpr->raw_cons;
1691 uint32_t toggle = 0;
1692
1693 if (enable_irq == true)
1694 toggle = cpr->toggle;
1695
1696 db_msg = DBR_PATH_L2 | ((u64)cpr->ring.phys_id << DBR_XID_SFT) | DBR_VALID |
1697 DB_RING_IDX_CMP(&cpr->ring, cons) | DB_TOGGLE(toggle);
1698
1699 if (enable_irq)
1700 db_msg |= DBR_TYPE_NQ_ARM;
1701 else
1702 db_msg |= DBR_TYPE_NQ_MASK;
1703
1704 bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 8,
1705 BUS_SPACE_BARRIER_WRITE);
1706 bus_space_write_8(db_bar->tag, db_bar->handle, cpr->ring.doorbell,
1707 htole64(*(uint64_t *)&db_msg));
1708 bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size,
1709 BUS_SPACE_BARRIER_WRITE);
1710 }
1711
bnxt_find_dev(uint32_t domain,uint32_t bus,uint32_t dev_fn,char * dev_name)1712 struct bnxt_softc *bnxt_find_dev(uint32_t domain, uint32_t bus, uint32_t dev_fn, char *dev_name)
1713 {
1714 struct bnxt_softc_list *sc = NULL;
1715
1716 SLIST_FOREACH(sc, &pf_list, next) {
1717 /* get the softc reference based on device name */
1718 if (dev_name && !strncmp(dev_name, if_name(iflib_get_ifp(sc->softc->ctx)), BNXT_MAX_STR)) {
1719 return sc->softc;
1720 }
1721 /* get the softc reference based on domain,bus,device,function */
1722 if (!dev_name &&
1723 (domain == sc->softc->domain) &&
1724 (bus == sc->softc->bus) &&
1725 (dev_fn == sc->softc->dev_fn)) {
1726 return sc->softc;
1727
1728 }
1729 }
1730
1731 return NULL;
1732 }
1733
1734
bnxt_verify_asym_queues(struct bnxt_softc * softc)1735 static void bnxt_verify_asym_queues(struct bnxt_softc *softc)
1736 {
1737 uint8_t i, lltc = 0;
1738
1739 if (!softc->max_lltc)
1740 return;
1741
1742 /* Verify that lossless TX and RX queues are in the same index */
1743 for (i = 0; i < softc->max_tc; i++) {
1744 if (BNXT_LLQ(softc->tx_q_info[i].queue_profile) &&
1745 BNXT_LLQ(softc->rx_q_info[i].queue_profile))
1746 lltc++;
1747 }
1748 softc->max_lltc = min(softc->max_lltc, lltc);
1749 }
1750
bnxt_hwrm_poll(struct bnxt_softc * bp)1751 static int bnxt_hwrm_poll(struct bnxt_softc *bp)
1752 {
1753 struct hwrm_ver_get_output *resp =
1754 (void *)bp->hwrm_cmd_resp.idi_vaddr;
1755 struct hwrm_ver_get_input req = {0};
1756 int rc;
1757
1758 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET);
1759
1760 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
1761 req.hwrm_intf_min = HWRM_VERSION_MINOR;
1762 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
1763
1764 rc = _hwrm_send_message(bp, &req, sizeof(req));
1765 if (rc)
1766 return rc;
1767
1768 if (resp->flags & HWRM_VER_GET_OUTPUT_FLAGS_DEV_NOT_RDY)
1769 rc = -EAGAIN;
1770
1771 return rc;
1772 }
1773
bnxt_rtnl_lock_sp(struct bnxt_softc * bp)1774 static void bnxt_rtnl_lock_sp(struct bnxt_softc *bp)
1775 {
1776 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
1777 * set. If the device is being closed, bnxt_close() may be holding
1778 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
1779 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
1780 */
1781 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
1782 rtnl_lock();
1783 }
1784
bnxt_rtnl_unlock_sp(struct bnxt_softc * bp)1785 static void bnxt_rtnl_unlock_sp(struct bnxt_softc *bp)
1786 {
1787 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
1788 rtnl_unlock();
1789 }
1790
bnxt_fw_fatal_close(struct bnxt_softc * softc)1791 static void bnxt_fw_fatal_close(struct bnxt_softc *softc)
1792 {
1793 bnxt_disable_intr(softc->ctx);
1794 if (pci_is_enabled(softc->pdev))
1795 pci_disable_device(softc->pdev);
1796 }
1797
bnxt_fw_health_readl(struct bnxt_softc * bp,int reg_idx)1798 static u32 bnxt_fw_health_readl(struct bnxt_softc *bp, int reg_idx)
1799 {
1800 struct bnxt_fw_health *fw_health = bp->fw_health;
1801 u32 reg = fw_health->regs[reg_idx];
1802 u32 reg_type, reg_off, val = 0;
1803
1804 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
1805 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
1806 switch (reg_type) {
1807 case BNXT_FW_HEALTH_REG_TYPE_CFG:
1808 pci_read_config_dword(bp->pdev, reg_off, &val);
1809 break;
1810 case BNXT_FW_HEALTH_REG_TYPE_GRC:
1811 reg_off = fw_health->mapped_regs[reg_idx];
1812 fallthrough;
1813 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
1814 val = readl_fbsd(bp, reg_off, 0);
1815 break;
1816 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
1817 val = readl_fbsd(bp, reg_off, 2);
1818 break;
1819 }
1820 if (reg_idx == BNXT_FW_RESET_INPROG_REG)
1821 val &= fw_health->fw_reset_inprog_reg_mask;
1822 return val;
1823 }
1824
bnxt_fw_reset_close(struct bnxt_softc * bp)1825 static void bnxt_fw_reset_close(struct bnxt_softc *bp)
1826 {
1827 int i;
1828 bnxt_ulp_stop(bp);
1829 /* When firmware is in fatal state, quiesce device and disable
1830 * bus master to prevent any potential bad DMAs before freeing
1831 * kernel memory.
1832 */
1833 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
1834 u16 val = 0;
1835
1836 val = pci_read_config(bp->dev, PCI_SUBSYSTEM_ID, 2);
1837 if (val == 0xffff) {
1838 bp->fw_reset_min_dsecs = 0;
1839 }
1840 bnxt_fw_fatal_close(bp);
1841 }
1842
1843 iflib_request_reset(bp->ctx);
1844 bnxt_stop(bp->ctx);
1845 bnxt_hwrm_func_drv_unrgtr(bp, false);
1846
1847 for (i = bp->nrxqsets-1; i>=0; i--) {
1848 if (BNXT_CHIP_P5_PLUS(bp))
1849 iflib_irq_free(bp->ctx, &bp->nq_rings[i].irq);
1850 else
1851 iflib_irq_free(bp->ctx, &bp->rx_cp_rings[i].irq);
1852
1853 }
1854 if (pci_is_enabled(bp->pdev))
1855 pci_disable_device(bp->pdev);
1856 pci_disable_busmaster(bp->dev);
1857 bnxt_free_ctx_mem(bp);
1858 }
1859
is_bnxt_fw_ok(struct bnxt_softc * bp)1860 static bool is_bnxt_fw_ok(struct bnxt_softc *bp)
1861 {
1862 struct bnxt_fw_health *fw_health = bp->fw_health;
1863 bool no_heartbeat = false, has_reset = false;
1864 u32 val;
1865
1866 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
1867 if (val == fw_health->last_fw_heartbeat)
1868 no_heartbeat = true;
1869
1870 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
1871 if (val != fw_health->last_fw_reset_cnt)
1872 has_reset = true;
1873
1874 if (!no_heartbeat && has_reset)
1875 return true;
1876
1877 return false;
1878 }
1879
bnxt_fw_reset(struct bnxt_softc * bp)1880 void bnxt_fw_reset(struct bnxt_softc *bp)
1881 {
1882 bnxt_rtnl_lock_sp(bp);
1883 if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
1884 !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
1885 int tmo;
1886 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
1887 bnxt_fw_reset_close(bp);
1888
1889 if ((bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD)) {
1890 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
1891 tmo = HZ / 10;
1892 } else {
1893 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
1894 tmo = bp->fw_reset_min_dsecs * HZ /10;
1895 }
1896 bnxt_queue_fw_reset_work(bp, tmo);
1897 }
1898 bnxt_rtnl_unlock_sp(bp);
1899 }
1900
bnxt_queue_fw_reset_work(struct bnxt_softc * bp,unsigned long delay)1901 static void bnxt_queue_fw_reset_work(struct bnxt_softc *bp, unsigned long delay)
1902 {
1903 if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
1904 return;
1905
1906 if (BNXT_PF(bp))
1907 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
1908 else
1909 schedule_delayed_work(&bp->fw_reset_task, delay);
1910 }
1911
bnxt_queue_sp_work(struct bnxt_softc * bp)1912 void bnxt_queue_sp_work(struct bnxt_softc *bp)
1913 {
1914 if (BNXT_PF(bp))
1915 queue_work(bnxt_pf_wq, &bp->sp_task);
1916 else
1917 schedule_work(&bp->sp_task);
1918 }
1919
bnxt_fw_reset_writel(struct bnxt_softc * bp,int reg_idx)1920 static void bnxt_fw_reset_writel(struct bnxt_softc *bp, int reg_idx)
1921 {
1922 struct bnxt_fw_health *fw_health = bp->fw_health;
1923 u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
1924 u32 val = fw_health->fw_reset_seq_vals[reg_idx];
1925 u32 reg_type, reg_off, delay_msecs;
1926
1927 delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
1928 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
1929 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
1930 switch (reg_type) {
1931 case BNXT_FW_HEALTH_REG_TYPE_CFG:
1932 pci_write_config_dword(bp->pdev, reg_off, val);
1933 break;
1934 case BNXT_FW_HEALTH_REG_TYPE_GRC:
1935 writel_fbsd(bp, BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4, 0, reg_off & BNXT_GRC_BASE_MASK);
1936 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
1937 fallthrough;
1938 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
1939 writel_fbsd(bp, reg_off, 0, val);
1940 break;
1941 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
1942 writel_fbsd(bp, reg_off, 2, val);
1943 break;
1944 }
1945 if (delay_msecs) {
1946 pci_read_config_dword(bp->pdev, 0, &val);
1947 msleep(delay_msecs);
1948 }
1949 }
1950
bnxt_reset_all(struct bnxt_softc * bp)1951 static void bnxt_reset_all(struct bnxt_softc *bp)
1952 {
1953 struct bnxt_fw_health *fw_health = bp->fw_health;
1954 int i, rc;
1955
1956 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
1957 bp->fw_reset_timestamp = jiffies;
1958 return;
1959 }
1960
1961 if (fw_health->flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_HOST) {
1962 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
1963 bnxt_fw_reset_writel(bp, i);
1964 } else if (fw_health->flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU) {
1965 struct hwrm_fw_reset_input req = {0};
1966
1967 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET);
1968 req.target_id = htole16(HWRM_TARGET_ID_KONG);
1969 req.embedded_proc_type = HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_CHIP;
1970 req.selfrst_status = HWRM_FW_RESET_INPUT_SELFRST_STATUS_SELFRSTASAP;
1971 req.flags = HWRM_FW_RESET_INPUT_FLAGS_RESET_GRACEFUL;
1972 rc = hwrm_send_message(bp, &req, sizeof(req));
1973
1974 if (rc != -ENODEV)
1975 device_printf(bp->dev, "Unable to reset FW rc=%d\n", rc);
1976 }
1977 bp->fw_reset_timestamp = jiffies;
1978 }
1979
__bnxt_alloc_fw_health(struct bnxt_softc * bp)1980 static int __bnxt_alloc_fw_health(struct bnxt_softc *bp)
1981 {
1982 if (bp->fw_health)
1983 return 0;
1984
1985 bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
1986 if (!bp->fw_health)
1987 return -ENOMEM;
1988
1989 mutex_init(&bp->fw_health->lock);
1990 return 0;
1991 }
1992
bnxt_alloc_fw_health(struct bnxt_softc * bp)1993 static int bnxt_alloc_fw_health(struct bnxt_softc *bp)
1994 {
1995 int rc;
1996
1997 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
1998 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
1999 return 0;
2000
2001 rc = __bnxt_alloc_fw_health(bp);
2002 if (rc) {
2003 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
2004 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
2005 return rc;
2006 }
2007
2008 return 0;
2009 }
2010
__bnxt_map_fw_health_reg(struct bnxt_softc * bp,u32 reg)2011 static inline void __bnxt_map_fw_health_reg(struct bnxt_softc *bp, u32 reg)
2012 {
2013 writel_fbsd(bp, BNXT_GRCPF_REG_WINDOW_BASE_OUT + BNXT_FW_HEALTH_WIN_MAP_OFF, 0, reg & BNXT_GRC_BASE_MASK);
2014 }
2015
bnxt_map_fw_health_regs(struct bnxt_softc * bp)2016 static int bnxt_map_fw_health_regs(struct bnxt_softc *bp)
2017 {
2018 struct bnxt_fw_health *fw_health = bp->fw_health;
2019 u32 reg_base = 0xffffffff;
2020 int i;
2021
2022 bp->fw_health->status_reliable = false;
2023 bp->fw_health->resets_reliable = false;
2024 /* Only pre-map the monitoring GRC registers using window 3 */
2025 for (i = 0; i < 4; i++) {
2026 u32 reg = fw_health->regs[i];
2027
2028 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
2029 continue;
2030 if (reg_base == 0xffffffff)
2031 reg_base = reg & BNXT_GRC_BASE_MASK;
2032 if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
2033 return -ERANGE;
2034 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
2035 }
2036 bp->fw_health->status_reliable = true;
2037 bp->fw_health->resets_reliable = true;
2038 if (reg_base == 0xffffffff)
2039 return 0;
2040
2041 __bnxt_map_fw_health_reg(bp, reg_base);
2042 return 0;
2043 }
2044
bnxt_inv_fw_health_reg(struct bnxt_softc * bp)2045 static void bnxt_inv_fw_health_reg(struct bnxt_softc *bp)
2046 {
2047 struct bnxt_fw_health *fw_health = bp->fw_health;
2048 u32 reg_type;
2049
2050 if (!fw_health)
2051 return;
2052
2053 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
2054 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
2055 fw_health->status_reliable = false;
2056
2057 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]);
2058 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
2059 fw_health->resets_reliable = false;
2060 }
2061
bnxt_hwrm_error_recovery_qcfg(struct bnxt_softc * bp)2062 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt_softc *bp)
2063 {
2064 struct bnxt_fw_health *fw_health = bp->fw_health;
2065 struct hwrm_error_recovery_qcfg_output *resp =
2066 (void *)bp->hwrm_cmd_resp.idi_vaddr;
2067 struct hwrm_error_recovery_qcfg_input req = {0};
2068 int rc, i;
2069
2070 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
2071 return 0;
2072
2073 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_ERROR_RECOVERY_QCFG);
2074 rc = _hwrm_send_message(bp, &req, sizeof(req));
2075
2076 if (rc)
2077 goto err_recovery_out;
2078 fw_health->flags = le32toh(resp->flags);
2079 if ((fw_health->flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU) &&
2080 !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
2081 rc = -EINVAL;
2082 goto err_recovery_out;
2083 }
2084 fw_health->polling_dsecs = le32toh(resp->driver_polling_freq);
2085 fw_health->master_func_wait_dsecs =
2086 le32toh(resp->master_func_wait_period);
2087 fw_health->normal_func_wait_dsecs =
2088 le32toh(resp->normal_func_wait_period);
2089 fw_health->post_reset_wait_dsecs =
2090 le32toh(resp->master_func_wait_period_after_reset);
2091 fw_health->post_reset_max_wait_dsecs =
2092 le32toh(resp->max_bailout_time_after_reset);
2093 fw_health->regs[BNXT_FW_HEALTH_REG] =
2094 le32toh(resp->fw_health_status_reg);
2095 fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
2096 le32toh(resp->fw_heartbeat_reg);
2097 fw_health->regs[BNXT_FW_RESET_CNT_REG] =
2098 le32toh(resp->fw_reset_cnt_reg);
2099 fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
2100 le32toh(resp->reset_inprogress_reg);
2101 fw_health->fw_reset_inprog_reg_mask =
2102 le32toh(resp->reset_inprogress_reg_mask);
2103 fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
2104 if (fw_health->fw_reset_seq_cnt >= 16) {
2105 rc = -EINVAL;
2106 goto err_recovery_out;
2107 }
2108 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
2109 fw_health->fw_reset_seq_regs[i] =
2110 le32toh(resp->reset_reg[i]);
2111 fw_health->fw_reset_seq_vals[i] =
2112 le32toh(resp->reset_reg_val[i]);
2113 fw_health->fw_reset_seq_delay_msec[i] =
2114 le32toh(resp->delay_after_reset[i]);
2115 }
2116 err_recovery_out:
2117 if (!rc)
2118 rc = bnxt_map_fw_health_regs(bp);
2119 if (rc)
2120 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
2121 return rc;
2122 }
2123
bnxt_drv_rgtr(struct bnxt_softc * bp)2124 static int bnxt_drv_rgtr(struct bnxt_softc *bp)
2125 {
2126 int rc;
2127
2128 /* determine whether we can support error recovery before
2129 * registering with FW
2130 */
2131 if (bnxt_alloc_fw_health(bp)) {
2132 device_printf(bp->dev, "no memory for firmware error recovery\n");
2133 } else {
2134 rc = bnxt_hwrm_error_recovery_qcfg(bp);
2135 if (rc)
2136 device_printf(bp->dev, "hwrm query error recovery failure rc: %d\n",
2137 rc);
2138 }
2139 rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false); //sumit dbg: revisit the params
2140 if (rc)
2141 return -ENODEV;
2142 return 0;
2143 }
2144
bnxt_fw_reset_timeout(struct bnxt_softc * bp)2145 static bool bnxt_fw_reset_timeout(struct bnxt_softc *bp)
2146 {
2147 return time_after(jiffies, bp->fw_reset_timestamp +
2148 (bp->fw_reset_max_dsecs * HZ / 10));
2149 }
2150
bnxt_open(struct bnxt_softc * bp)2151 static int bnxt_open(struct bnxt_softc *bp)
2152 {
2153 int rc = 0;
2154 if (BNXT_PF(bp))
2155 rc = bnxt_hwrm_nvm_get_dev_info(bp, &bp->nvm_info->mfg_id,
2156 &bp->nvm_info->device_id, &bp->nvm_info->sector_size,
2157 &bp->nvm_info->size, &bp->nvm_info->reserved_size,
2158 &bp->nvm_info->available_size);
2159
2160 /* Get the queue config */
2161 rc = bnxt_hwrm_queue_qportcfg(bp, HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX);
2162 if (rc) {
2163 device_printf(bp->dev, "reinit: hwrm qportcfg (tx) failed\n");
2164 return rc;
2165 }
2166 if (bp->is_asym_q) {
2167 rc = bnxt_hwrm_queue_qportcfg(bp,
2168 HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX);
2169 if (rc) {
2170 device_printf(bp->dev, "re-init: hwrm qportcfg (rx) failed\n");
2171 return rc;
2172 }
2173 bnxt_verify_asym_queues(bp);
2174 } else {
2175 bp->rx_max_q = bp->tx_max_q;
2176 memcpy(bp->rx_q_info, bp->tx_q_info, sizeof(bp->rx_q_info));
2177 memcpy(bp->rx_q_ids, bp->tx_q_ids, sizeof(bp->rx_q_ids));
2178 }
2179 /* Get the HW capabilities */
2180 rc = bnxt_hwrm_func_qcaps(bp);
2181 if (rc)
2182 return rc;
2183
2184 /* Register the driver with the FW */
2185 rc = bnxt_drv_rgtr(bp);
2186 if (rc)
2187 return rc;
2188 if (bp->hwrm_spec_code >= 0x10803) {
2189 rc = bnxt_alloc_ctx_mem(bp);
2190 if (rc) {
2191 device_printf(bp->dev, "attach: alloc_ctx_mem failed\n");
2192 return rc;
2193 }
2194 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
2195 if (!rc)
2196 bp->flags |= BNXT_FLAG_FW_CAP_NEW_RM;
2197 }
2198
2199 if (BNXT_CHIP_P5_PLUS(bp))
2200 bnxt_hwrm_reserve_rings(bp);
2201
2202 /* Get the current configuration of this function */
2203 rc = bnxt_hwrm_func_qcfg(bp);
2204 if (rc) {
2205 device_printf(bp->dev, "re-init: hwrm func qcfg failed\n");
2206 return rc;
2207 }
2208
2209 bnxt_msix_intr_assign(bp->ctx, 0);
2210 bnxt_init(bp->ctx);
2211 bnxt_intr_enable(bp->ctx);
2212
2213 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
2214 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
2215 bnxt_ulp_start(bp, 0);
2216 }
2217 }
2218
2219 device_printf(bp->dev, "Network interface is UP and operational\n");
2220
2221 return rc;
2222 }
bnxt_fw_reset_abort(struct bnxt_softc * bp,int rc)2223 static void bnxt_fw_reset_abort(struct bnxt_softc *bp, int rc)
2224 {
2225 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
2226 if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) {
2227 bnxt_ulp_start(bp, rc);
2228 }
2229 bp->fw_reset_state = 0;
2230 }
2231
bnxt_fw_reset_task(struct work_struct * work)2232 static void bnxt_fw_reset_task(struct work_struct *work)
2233 {
2234 struct bnxt_softc *bp = container_of(work, struct bnxt_softc, fw_reset_task.work);
2235 int rc = 0;
2236
2237 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
2238 device_printf(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
2239 return;
2240 }
2241
2242 switch (bp->fw_reset_state) {
2243 case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
2244 u32 val;
2245
2246 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
2247 if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
2248 !bnxt_fw_reset_timeout(bp)) {
2249 bnxt_queue_fw_reset_work(bp, HZ / 5);
2250 return;
2251 }
2252
2253 if (!bp->fw_health->primary) {
2254 u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
2255
2256 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
2257 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
2258 return;
2259 }
2260 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
2261 }
2262 fallthrough;
2263 case BNXT_FW_RESET_STATE_RESET_FW:
2264 bnxt_reset_all(bp);
2265 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
2266 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
2267 return;
2268 case BNXT_FW_RESET_STATE_ENABLE_DEV:
2269 bnxt_inv_fw_health_reg(bp);
2270 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
2271 !bp->fw_reset_min_dsecs) {
2272 u16 val;
2273
2274 val = pci_read_config(bp->dev, PCI_SUBSYSTEM_ID, 2);
2275 if (val == 0xffff) {
2276 if (bnxt_fw_reset_timeout(bp)) {
2277 device_printf(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
2278 rc = -ETIMEDOUT;
2279 goto fw_reset_abort;
2280 }
2281 bnxt_queue_fw_reset_work(bp, HZ / 1000);
2282 return;
2283 }
2284 }
2285 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2286 clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
2287 if (!pci_is_enabled(bp->pdev)) {
2288 if (pci_enable_device(bp->pdev)) {
2289 device_printf(bp->dev, "Cannot re-enable PCI device\n");
2290 rc = -ENODEV;
2291 goto fw_reset_abort;
2292 }
2293 }
2294 pci_set_master(bp->pdev);
2295 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
2296 fallthrough;
2297 case BNXT_FW_RESET_STATE_POLL_FW:
2298 bp->hwrm_cmd_timeo = SHORT_HWRM_CMD_TIMEOUT;
2299 rc = bnxt_hwrm_poll(bp);
2300 if (rc) {
2301 if (bnxt_fw_reset_timeout(bp)) {
2302 device_printf(bp->dev, "Firmware reset aborted\n");
2303 goto fw_reset_abort_status;
2304 }
2305 bnxt_queue_fw_reset_work(bp, HZ / 5);
2306 return;
2307 }
2308 bp->hwrm_cmd_timeo = DFLT_HWRM_CMD_TIMEOUT;
2309 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
2310 fallthrough;
2311 case BNXT_FW_RESET_STATE_OPENING:
2312 rc = bnxt_open(bp);
2313 if (rc) {
2314 device_printf(bp->dev, "bnxt_open() failed during FW reset\n");
2315 bnxt_fw_reset_abort(bp, rc);
2316 rtnl_unlock();
2317 return;
2318 }
2319
2320 if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
2321 bp->fw_health->enabled) {
2322 bp->fw_health->last_fw_reset_cnt =
2323 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2324 }
2325 bp->fw_reset_state = 0;
2326 smp_mb__before_atomic();
2327 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
2328 bnxt_ulp_start(bp, 0);
2329 clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
2330 set_bit(BNXT_STATE_OPEN, &bp->state);
2331 #ifdef PCI_IOV
2332 bnxt_reenable_sriov(bp);
2333 #endif
2334 rtnl_unlock();
2335 }
2336 return;
2337
2338 fw_reset_abort_status:
2339 if (bp->fw_health->status_reliable ||
2340 (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
2341 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
2342
2343 device_printf(bp->dev, "fw_health_status 0x%x\n", sts);
2344 }
2345 fw_reset_abort:
2346 rtnl_lock();
2347 bnxt_fw_reset_abort(bp, rc);
2348 rtnl_unlock();
2349 }
2350
bnxt_force_fw_reset(struct bnxt_softc * bp)2351 static void bnxt_force_fw_reset(struct bnxt_softc *bp)
2352 {
2353 struct bnxt_fw_health *fw_health = bp->fw_health;
2354 u32 wait_dsecs;
2355
2356 if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
2357 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
2358 return;
2359 bnxt_fw_reset_close(bp);
2360 wait_dsecs = fw_health->master_func_wait_dsecs;
2361 if (fw_health->primary) {
2362 if (fw_health->flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU)
2363 wait_dsecs = 0;
2364 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
2365 } else {
2366 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
2367 wait_dsecs = fw_health->normal_func_wait_dsecs;
2368 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
2369 }
2370
2371 bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
2372 bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
2373 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
2374 }
2375
bnxt_fw_exception(struct bnxt_softc * bp)2376 static void bnxt_fw_exception(struct bnxt_softc *bp)
2377 {
2378 device_printf(bp->dev, "Detected firmware fatal condition, initiating reset\n");
2379 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2380 bnxt_rtnl_lock_sp(bp);
2381 bnxt_force_fw_reset(bp);
2382 bnxt_rtnl_unlock_sp(bp);
2383 }
2384
__bnxt_fw_recover(struct bnxt_softc * bp)2385 static void __bnxt_fw_recover(struct bnxt_softc *bp)
2386 {
2387 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
2388 test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
2389 bnxt_fw_reset(bp);
2390 else
2391 bnxt_fw_exception(bp);
2392 }
2393
bnxt_devlink_health_fw_report(struct bnxt_softc * bp)2394 static void bnxt_devlink_health_fw_report(struct bnxt_softc *bp)
2395 {
2396 struct bnxt_fw_health *fw_health = bp->fw_health;
2397
2398 if (!fw_health)
2399 return;
2400
2401 if (!fw_health->fw_reporter) {
2402 __bnxt_fw_recover(bp);
2403 return;
2404 }
2405 }
2406
bnxt_sp_task(struct work_struct * work)2407 static void bnxt_sp_task(struct work_struct *work)
2408 {
2409 struct bnxt_softc *bp = container_of(work, struct bnxt_softc, sp_task);
2410
2411 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
2412 smp_mb__after_atomic();
2413 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
2414 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
2415 return;
2416 }
2417
2418 #ifdef PCI_IOV
2419 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
2420 bnxt_hwrm_exec_fwd_req(bp);
2421 #endif
2422
2423 if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) {
2424 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
2425 test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
2426 bnxt_devlink_health_fw_report(bp);
2427 else
2428 bnxt_fw_reset(bp);
2429 }
2430
2431 if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
2432 if (!is_bnxt_fw_ok(bp))
2433 bnxt_devlink_health_fw_report(bp);
2434 }
2435 smp_mb__before_atomic();
2436 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
2437 }
2438
2439 int
bnxt_hwrm_reserve_rings(struct bnxt_softc * softc)2440 bnxt_hwrm_reserve_rings(struct bnxt_softc *softc)
2441 {
2442 if (BNXT_PF(softc))
2443 return bnxt_hwrm_reserve_pf_rings(softc);
2444
2445 else
2446 return bnxt_hwrm_reserve_vf_rings(softc);
2447 }
2448
2449 /* Device setup and teardown */
2450 static int
bnxt_attach_pre(if_ctx_t ctx)2451 bnxt_attach_pre(if_ctx_t ctx)
2452 {
2453 struct bnxt_softc *softc = iflib_get_softc(ctx);
2454 if_softc_ctx_t scctx;
2455 int rc = 0;
2456
2457 softc->ctx = ctx;
2458 softc->dev = iflib_get_dev(ctx);
2459 softc->media = iflib_get_media(ctx);
2460 softc->scctx = iflib_get_softc_ctx(ctx);
2461 softc->sctx = iflib_get_sctx(ctx);
2462 scctx = softc->scctx;
2463 softc->domain = pci_get_domain(softc->dev);
2464 softc->bus = pci_get_bus(softc->dev);
2465 softc->slot = pci_get_slot(softc->dev);
2466 softc->function = pci_get_function(softc->dev);
2467 softc->dev_fn = PCI_DEVFN(softc->slot, softc->function);
2468
2469 bnxt_set_flags_by_devid(softc);
2470
2471 if (bnxt_num_pfs == 0)
2472 SLIST_INIT(&pf_list);
2473 bnxt_num_pfs++;
2474 softc->list.softc = softc;
2475 SLIST_INSERT_HEAD(&pf_list, &softc->list, next);
2476
2477 pci_enable_busmaster(softc->dev);
2478
2479 if (bnxt_pci_mapping(softc)) {
2480 device_printf(softc->dev, "PCI mapping failed\n");
2481 rc = ENXIO;
2482 goto pci_map_fail;
2483 }
2484
2485 softc->pdev = kzalloc(sizeof(*softc->pdev), GFP_KERNEL);
2486 if (!softc->pdev) {
2487 device_printf(softc->dev, "pdev alloc failed\n");
2488 rc = -ENOMEM;
2489 goto free_pci_map;
2490 }
2491
2492 rc = linux_pci_attach_device(softc->dev, NULL, NULL, softc->pdev);
2493 if (rc) {
2494 device_printf(softc->dev, "Failed to attach Linux PCI device 0x%x\n", rc);
2495 goto pci_attach_fail;
2496 }
2497
2498 /* HWRM setup/init */
2499 BNXT_HWRM_LOCK_INIT(softc, device_get_nameunit(softc->dev));
2500 rc = bnxt_alloc_hwrm_dma_mem(softc);
2501 if (rc)
2502 goto dma_fail;
2503
2504 /* Get firmware version and compare with driver */
2505 softc->ver_info = malloc(sizeof(struct bnxt_ver_info),
2506 M_DEVBUF, M_NOWAIT | M_ZERO);
2507 if (softc->ver_info == NULL) {
2508 rc = ENOMEM;
2509 device_printf(softc->dev,
2510 "Unable to allocate space for version info\n");
2511 goto ver_alloc_fail;
2512 }
2513 /* Default minimum required HWRM version */
2514 softc->ver_info->hwrm_min_major = HWRM_VERSION_MAJOR;
2515 softc->ver_info->hwrm_min_minor = HWRM_VERSION_MINOR;
2516 softc->ver_info->hwrm_min_update = HWRM_VERSION_UPDATE;
2517
2518 rc = bnxt_hwrm_ver_get(softc);
2519 if (rc) {
2520 device_printf(softc->dev, "attach: hwrm ver get failed\n");
2521 goto ver_fail;
2522 }
2523
2524 if ((softc->flags & BNXT_FLAG_SHORT_CMD) ||
2525 softc->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
2526 rc = bnxt_alloc_hwrm_short_cmd_req(softc);
2527 if (rc)
2528 goto hwrm_short_cmd_alloc_fail;
2529 }
2530
2531 /* Now perform a function reset */
2532 rc = bnxt_hwrm_func_reset(softc);
2533
2534 if ((softc->ver_info->chip_num == BCM57508) ||
2535 (softc->ver_info->chip_num == BCM57504) ||
2536 (softc->ver_info->chip_num == BCM57504_NPAR) ||
2537 (softc->ver_info->chip_num == BCM57502) ||
2538 (softc->ver_info->chip_num == BCM57601) ||
2539 (softc->ver_info->chip_num == BCM57602) ||
2540 (softc->ver_info->chip_num == BCM57604))
2541 softc->flags |= BNXT_FLAG_CHIP_P5;
2542
2543 if (softc->ver_info->chip_num == BCM57608)
2544 softc->flags |= BNXT_FLAG_CHIP_P7;
2545
2546 softc->flags |= BNXT_FLAG_TPA;
2547
2548 if (BNXT_CHIP_P5_PLUS(softc) && (!softc->ver_info->chip_rev) &&
2549 (!softc->ver_info->chip_metal))
2550 softc->flags &= ~BNXT_FLAG_TPA;
2551
2552 if (BNXT_CHIP_P5_PLUS(softc))
2553 softc->flags &= ~BNXT_FLAG_TPA;
2554
2555 /* Get NVRAM info */
2556 if (BNXT_PF(softc)) {
2557 if (!bnxt_pf_wq) {
2558 bnxt_pf_wq =
2559 create_singlethread_workqueue("bnxt_pf_wq");
2560 if (!bnxt_pf_wq) {
2561 device_printf(softc->dev, "Unable to create workqueue.\n");
2562 rc = -ENOMEM;
2563 goto nvm_alloc_fail;
2564 }
2565 }
2566
2567 softc->nvm_info = malloc(sizeof(struct bnxt_nvram_info),
2568 M_DEVBUF, M_NOWAIT | M_ZERO);
2569 if (softc->nvm_info == NULL) {
2570 rc = ENOMEM;
2571 device_printf(softc->dev,
2572 "Unable to allocate space for NVRAM info\n");
2573 goto nvm_alloc_fail;
2574 }
2575
2576 rc = bnxt_hwrm_nvm_get_dev_info(softc, &softc->nvm_info->mfg_id,
2577 &softc->nvm_info->device_id, &softc->nvm_info->sector_size,
2578 &softc->nvm_info->size, &softc->nvm_info->reserved_size,
2579 &softc->nvm_info->available_size);
2580 }
2581
2582 if (BNXT_CHIP_P5(softc)) {
2583 softc->db_ops.bnxt_db_tx = bnxt_thor_db_tx;
2584 softc->db_ops.bnxt_db_rx = bnxt_thor_db_rx;
2585 softc->db_ops.bnxt_db_rx_cq = bnxt_thor_db_rx_cq;
2586 softc->db_ops.bnxt_db_tx_cq = bnxt_thor_db_tx_cq;
2587 softc->db_ops.bnxt_db_nq = bnxt_thor_db_nq;
2588 } else if (BNXT_CHIP_P7(softc)) {
2589 softc->db_ops.bnxt_db_tx = bnxt_thor2_db_tx;
2590 softc->db_ops.bnxt_db_rx = bnxt_thor2_db_rx;
2591 softc->db_ops.bnxt_db_rx_cq = bnxt_thor2_db_rx_cq;
2592 softc->db_ops.bnxt_db_tx_cq = bnxt_thor2_db_tx_cq;
2593 softc->db_ops.bnxt_db_nq = bnxt_thor2_db_nq;
2594 } else {
2595 softc->db_ops.bnxt_db_tx = bnxt_cuw_db_tx;
2596 softc->db_ops.bnxt_db_rx = bnxt_cuw_db_rx;
2597 softc->db_ops.bnxt_db_rx_cq = bnxt_cuw_db_cq;
2598 softc->db_ops.bnxt_db_tx_cq = bnxt_cuw_db_cq;
2599 }
2600
2601
2602 /* Get the queue config */
2603 rc = bnxt_hwrm_queue_qportcfg(softc, HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX);
2604 if (rc) {
2605 device_printf(softc->dev, "attach: hwrm qportcfg (tx) failed\n");
2606 goto failed;
2607 }
2608 if (softc->is_asym_q) {
2609 rc = bnxt_hwrm_queue_qportcfg(softc,
2610 HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX);
2611 if (rc) {
2612 device_printf(softc->dev, "attach: hwrm qportcfg (rx) failed\n");
2613 return rc;
2614 }
2615 bnxt_verify_asym_queues(softc);
2616 } else {
2617 softc->rx_max_q = softc->tx_max_q;
2618 memcpy(softc->rx_q_info, softc->tx_q_info, sizeof(softc->rx_q_info));
2619 memcpy(softc->rx_q_ids, softc->tx_q_ids, sizeof(softc->rx_q_ids));
2620 }
2621
2622 /* Get the HW capabilities */
2623 rc = bnxt_hwrm_func_qcaps(softc);
2624 if (rc)
2625 goto failed;
2626
2627 /* Inform PF to approve MAC as default VF MAC. */
2628 if (BNXT_VF(softc)) {
2629 rc = bnxt_approve_mac(softc);
2630 if (rc) {
2631 device_printf(softc->dev, "attach: bnxt_approve_mac failed\n");
2632 goto failed;
2633 }
2634 }
2635
2636 /*
2637 * Register the driver with the FW
2638 * Register the async events with the FW
2639 */
2640 rc = bnxt_drv_rgtr(softc);
2641 if (rc)
2642 goto failed;
2643
2644 if (softc->hwrm_spec_code >= 0x10803) {
2645 rc = bnxt_alloc_ctx_mem(softc);
2646 if (rc) {
2647 device_printf(softc->dev, "attach: alloc_ctx_mem failed\n");
2648 return rc;
2649 }
2650 rc = bnxt_hwrm_func_resc_qcaps(softc, true);
2651 if (!rc)
2652 softc->flags |= BNXT_FLAG_FW_CAP_NEW_RM;
2653 }
2654
2655 /* Get the current configuration of this function */
2656 rc = bnxt_hwrm_func_qcfg(softc);
2657 if (rc) {
2658 device_printf(softc->dev, "attach: hwrm func qcfg failed\n");
2659 goto failed;
2660 }
2661
2662 scctx->isc_txrx = &bnxt_txrx;
2663 scctx->isc_tx_csum_flags = (CSUM_IP | CSUM_TCP | CSUM_UDP |
2664 CSUM_TCP_IPV6 | CSUM_UDP_IPV6 | CSUM_TSO);
2665 scctx->isc_capabilities = scctx->isc_capenable =
2666 /* These are translated to hwassit bits */
2667 IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6 | IFCAP_TSO4 | IFCAP_TSO6 |
2668 /* These are checked by iflib */
2669 IFCAP_LRO | IFCAP_VLAN_HWFILTER |
2670 /* These are part of the iflib mask */
2671 IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_VLAN_MTU |
2672 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO |
2673 /* These likely get lost... */
2674 IFCAP_VLAN_HWCSUM | IFCAP_JUMBO_MTU;
2675
2676 if (bnxt_wol_supported(softc))
2677 scctx->isc_capabilities |= IFCAP_WOL_MAGIC;
2678 bnxt_get_wol_settings(softc);
2679 if (softc->wol)
2680 scctx->isc_capenable |= IFCAP_WOL_MAGIC;
2681
2682 /* Get the queue config */
2683 bnxt_get_wol_settings(softc);
2684
2685 if (BNXT_CHIP_P5_PLUS(softc))
2686 bnxt_hwrm_reserve_rings(softc);
2687
2688 rc = bnxt_hwrm_func_qcfg(softc);
2689 if (rc) {
2690 device_printf(softc->dev, "attach: hwrm func qcfg failed\n");
2691 goto failed;
2692 }
2693
2694 bnxt_clear_ids(softc);
2695 if (rc)
2696 goto failed;
2697
2698 /* Now set up iflib sc */
2699 scctx->isc_tx_nsegments = 31,
2700 scctx->isc_tx_tso_segments_max = 31;
2701 scctx->isc_tx_tso_size_max = BNXT_TSO_SIZE;
2702 scctx->isc_tx_tso_segsize_max = BNXT_TSO_SIZE;
2703 scctx->isc_vectors = softc->func.max_cp_rings;
2704 scctx->isc_min_frame_size = BNXT_MIN_FRAME_SIZE;
2705 scctx->isc_txrx = &bnxt_txrx;
2706
2707 if (scctx->isc_nrxd[0] <
2708 ((scctx->isc_nrxd[1] * 4) + scctx->isc_nrxd[2]))
2709 device_printf(softc->dev,
2710 "WARNING: nrxd0 (%d) should be at least 4 * nrxd1 (%d) + nrxd2 (%d). Driver may be unstable\n",
2711 scctx->isc_nrxd[0], scctx->isc_nrxd[1], scctx->isc_nrxd[2]);
2712 if (scctx->isc_ntxd[0] < scctx->isc_ntxd[1] * 2)
2713 device_printf(softc->dev,
2714 "WARNING: ntxd0 (%d) should be at least 2 * ntxd1 (%d). Driver may be unstable\n",
2715 scctx->isc_ntxd[0], scctx->isc_ntxd[1]);
2716 scctx->isc_txqsizes[0] = sizeof(struct cmpl_base) * scctx->isc_ntxd[0];
2717 scctx->isc_txqsizes[1] = sizeof(struct tx_bd_short) *
2718 scctx->isc_ntxd[1];
2719 scctx->isc_txqsizes[2] = sizeof(struct cmpl_base) * scctx->isc_ntxd[2];
2720 scctx->isc_rxqsizes[0] = sizeof(struct cmpl_base) * scctx->isc_nrxd[0];
2721 scctx->isc_rxqsizes[1] = sizeof(struct rx_prod_pkt_bd) *
2722 scctx->isc_nrxd[1];
2723 scctx->isc_rxqsizes[2] = sizeof(struct rx_prod_pkt_bd) *
2724 scctx->isc_nrxd[2];
2725
2726 scctx->isc_nrxqsets_max = min(pci_msix_count(softc->dev)-1,
2727 softc->fn_qcfg.alloc_completion_rings - 1);
2728 scctx->isc_nrxqsets_max = min(scctx->isc_nrxqsets_max,
2729 softc->fn_qcfg.alloc_rx_rings);
2730 scctx->isc_nrxqsets_max = min(scctx->isc_nrxqsets_max,
2731 softc->fn_qcfg.alloc_vnics);
2732 scctx->isc_ntxqsets_max = min(softc->fn_qcfg.alloc_tx_rings,
2733 softc->fn_qcfg.alloc_completion_rings - scctx->isc_nrxqsets_max - 1);
2734
2735 scctx->isc_rss_table_size = HW_HASH_INDEX_SIZE;
2736 scctx->isc_rss_table_mask = scctx->isc_rss_table_size - 1;
2737
2738 /* iflib will map and release this bar */
2739 scctx->isc_msix_bar = pci_msix_table_bar(softc->dev);
2740
2741 /*
2742 * Default settings for HW LRO (TPA):
2743 * Disable HW LRO by default
2744 * Can be enabled after taking care of 'packet forwarding'
2745 */
2746 if (softc->flags & BNXT_FLAG_TPA) {
2747 softc->hw_lro.enable = 0;
2748 softc->hw_lro.is_mode_gro = 0;
2749 softc->hw_lro.max_agg_segs = 5; /* 2^5 = 32 segs */
2750 softc->hw_lro.max_aggs = HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX;
2751 softc->hw_lro.min_agg_len = 512;
2752 }
2753
2754 /* Allocate the default completion ring */
2755 softc->def_cp_ring.stats_ctx_id = HWRM_NA_SIGNATURE;
2756 softc->def_cp_ring.ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
2757 softc->def_cp_ring.ring.softc = softc;
2758 softc->def_cp_ring.ring.id = 0;
2759 softc->def_cp_ring.ring.doorbell = (BNXT_CHIP_P5_PLUS(softc)) ?
2760 softc->legacy_db_size : softc->def_cp_ring.ring.id * 0x80;
2761 softc->def_cp_ring.ring.ring_size = PAGE_SIZE /
2762 sizeof(struct cmpl_base);
2763 softc->def_cp_ring.ring.db_ring_mask = softc->def_cp_ring.ring.ring_size -1 ;
2764 rc = iflib_dma_alloc(ctx,
2765 sizeof(struct cmpl_base) * softc->def_cp_ring.ring.ring_size,
2766 &softc->def_cp_ring_mem, 0);
2767 softc->def_cp_ring.ring.vaddr = softc->def_cp_ring_mem.idi_vaddr;
2768 softc->def_cp_ring.ring.paddr = softc->def_cp_ring_mem.idi_paddr;
2769 iflib_config_task_init(ctx, &softc->def_cp_task, bnxt_def_cp_task);
2770
2771 rc = bnxt_init_sysctl_ctx(softc);
2772 if (rc)
2773 goto init_sysctl_failed;
2774 if (BNXT_PF(softc)) {
2775 rc = bnxt_create_nvram_sysctls(softc->nvm_info);
2776 if (rc)
2777 goto failed;
2778 }
2779
2780 arc4rand(softc->vnic_info.rss_hash_key, HW_HASH_KEY_SIZE, 0);
2781 softc->vnic_info.rss_hash_type =
2782 HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4 |
2783 HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4 |
2784 HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4 |
2785 HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6 |
2786 HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6 |
2787 HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
2788 rc = bnxt_create_config_sysctls_pre(softc);
2789 if (rc)
2790 goto failed;
2791
2792 rc = bnxt_create_hw_lro_sysctls(softc);
2793 if (rc)
2794 goto failed;
2795
2796 rc = bnxt_create_pause_fc_sysctls(softc);
2797 if (rc)
2798 goto failed;
2799
2800 rc = bnxt_create_dcb_sysctls(softc);
2801 if (rc)
2802 goto failed;
2803
2804 set_bit(BNXT_STATE_OPEN, &softc->state);
2805 INIT_WORK(&softc->sp_task, bnxt_sp_task);
2806 INIT_DELAYED_WORK(&softc->fw_reset_task, bnxt_fw_reset_task);
2807
2808 /* Initialize the vlan list */
2809 SLIST_INIT(&softc->vnic_info.vlan_tags);
2810 softc->vnic_info.vlan_tag_list.idi_vaddr = NULL;
2811 softc->state_bv = bit_alloc(BNXT_STATE_MAX, M_DEVBUF,
2812 M_WAITOK|M_ZERO);
2813
2814 if (BNXT_PF(softc)) {
2815 const char *part_num;
2816
2817 if (pci_get_vpd_readonly(softc->dev, "PN", &part_num) == 0)
2818 snprintf(softc->board_partno, sizeof(softc->board_partno), "%s", part_num);
2819 }
2820
2821 return (rc);
2822
2823 failed:
2824 bnxt_free_sysctl_ctx(softc);
2825 init_sysctl_failed:
2826 bnxt_hwrm_func_drv_unrgtr(softc, false);
2827 if (BNXT_PF(softc))
2828 free(softc->nvm_info, M_DEVBUF);
2829 nvm_alloc_fail:
2830 bnxt_free_hwrm_short_cmd_req(softc);
2831 hwrm_short_cmd_alloc_fail:
2832 ver_fail:
2833 free(softc->ver_info, M_DEVBUF);
2834 ver_alloc_fail:
2835 bnxt_free_hwrm_dma_mem(softc);
2836 dma_fail:
2837 BNXT_HWRM_LOCK_DESTROY(softc);
2838 if (softc->pdev)
2839 linux_pci_detach_device(softc->pdev);
2840 pci_attach_fail:
2841 kfree(softc->pdev);
2842 softc->pdev = NULL;
2843 free_pci_map:
2844 bnxt_pci_mapping_free(softc);
2845 pci_map_fail:
2846 pci_disable_busmaster(softc->dev);
2847 return (rc);
2848 }
2849
2850 static int
bnxt_attach_post(if_ctx_t ctx)2851 bnxt_attach_post(if_ctx_t ctx)
2852 {
2853 struct bnxt_softc *softc = iflib_get_softc(ctx);
2854 if_t ifp = iflib_get_ifp(ctx);
2855 int rc;
2856
2857 softc->ifp = ifp;
2858 bnxt_create_config_sysctls_post(softc);
2859
2860 /* Update link state etc... */
2861 rc = bnxt_probe_phy(softc);
2862 if (rc)
2863 goto failed;
2864
2865 /* Needs to be done after probing the phy */
2866 bnxt_create_ver_sysctls(softc);
2867 ifmedia_removeall(softc->media);
2868 bnxt_add_media_types(softc);
2869 ifmedia_set(softc->media, IFM_ETHER | IFM_AUTO);
2870
2871 softc->scctx->isc_max_frame_size = if_getmtu(ifp) + ETHER_HDR_LEN +
2872 ETHER_CRC_LEN;
2873
2874 softc->rx_buf_size = min(softc->scctx->isc_max_frame_size, BNXT_PAGE_SIZE);
2875 bnxt_dcb_init(softc);
2876 bnxt_rdma_aux_device_init(softc);
2877
2878 #if PCI_IOV
2879 /* SR-IOV attach */
2880 if (BNXT_PF(softc) && BNXT_CHIP_P5_PLUS(softc))
2881 bnxt_sriov_attach(softc);
2882 #endif
2883
2884 failed:
2885 return rc;
2886 }
2887
2888 static int
bnxt_detach(if_ctx_t ctx)2889 bnxt_detach(if_ctx_t ctx)
2890 {
2891 struct bnxt_softc *softc = iflib_get_softc(ctx);
2892 struct bnxt_vlan_tag *tag;
2893 struct bnxt_vlan_tag *tmp;
2894 int i;
2895
2896 bnxt_rdma_aux_device_uninit(softc);
2897 cancel_delayed_work_sync(&softc->fw_reset_task);
2898 cancel_work_sync(&softc->sp_task);
2899 bnxt_dcb_free(softc);
2900 SLIST_REMOVE(&pf_list, &softc->list, bnxt_softc_list, next);
2901 bnxt_num_pfs--;
2902 bnxt_wol_config(ctx);
2903 bnxt_do_disable_intr(&softc->def_cp_ring);
2904 bnxt_free_sysctl_ctx(softc);
2905 bnxt_hwrm_func_reset(softc);
2906 bnxt_free_ctx_mem(softc);
2907 bnxt_clear_ids(softc);
2908 iflib_irq_free(ctx, &softc->def_cp_ring.irq);
2909 /* We need to free() these here... */
2910 for (i = softc->nrxqsets-1; i>=0; i--) {
2911 if (BNXT_CHIP_P5_PLUS(softc))
2912 iflib_irq_free(ctx, &softc->nq_rings[i].irq);
2913 else
2914 iflib_irq_free(ctx, &softc->rx_cp_rings[i].irq);
2915
2916 }
2917 iflib_dma_free(&softc->vnic_info.mc_list);
2918 iflib_dma_free(&softc->vnic_info.rss_hash_key_tbl);
2919 iflib_dma_free(&softc->vnic_info.rss_grp_tbl);
2920 if (softc->vnic_info.vlan_tag_list.idi_vaddr)
2921 iflib_dma_free(&softc->vnic_info.vlan_tag_list);
2922 SLIST_FOREACH_SAFE(tag, &softc->vnic_info.vlan_tags, next, tmp)
2923 free(tag, M_DEVBUF);
2924 iflib_dma_free(&softc->def_cp_ring_mem);
2925 for (i = 0; i < softc->nrxqsets; i++)
2926 free(softc->rx_rings[i].tpa_start, M_DEVBUF);
2927 free(softc->ver_info, M_DEVBUF);
2928 if (BNXT_PF(softc))
2929 free(softc->nvm_info, M_DEVBUF);
2930
2931 bnxt_hwrm_func_drv_unrgtr(softc, false);
2932 bnxt_free_hwrm_dma_mem(softc);
2933 bnxt_free_hwrm_short_cmd_req(softc);
2934 BNXT_HWRM_LOCK_DESTROY(softc);
2935
2936 if (!bnxt_num_pfs && bnxt_pf_wq)
2937 destroy_workqueue(bnxt_pf_wq);
2938
2939 if (softc->pdev)
2940 linux_pci_detach_device(softc->pdev);
2941 free(softc->state_bv, M_DEVBUF);
2942 pci_disable_busmaster(softc->dev);
2943 bnxt_pci_mapping_free(softc);
2944
2945 return 0;
2946 }
2947
2948 static void
bnxt_hwrm_resource_free(struct bnxt_softc * softc)2949 bnxt_hwrm_resource_free(struct bnxt_softc *softc)
2950 {
2951 int i, rc = 0;
2952
2953 rc = bnxt_hwrm_ring_free(softc,
2954 HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
2955 &softc->def_cp_ring.ring,
2956 (uint16_t)HWRM_NA_SIGNATURE);
2957 if (rc)
2958 goto fail;
2959
2960 for (i = 0; i < softc->ntxqsets; i++) {
2961 rc = bnxt_hwrm_ring_free(softc,
2962 HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
2963 &softc->tx_rings[i],
2964 softc->tx_cp_rings[i].ring.phys_id);
2965 if (rc)
2966 goto fail;
2967
2968 rc = bnxt_hwrm_ring_free(softc,
2969 HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
2970 &softc->tx_cp_rings[i].ring,
2971 (uint16_t)HWRM_NA_SIGNATURE);
2972 if (rc)
2973 goto fail;
2974
2975 rc = bnxt_hwrm_stat_ctx_free(softc, &softc->tx_cp_rings[i]);
2976 if (rc)
2977 goto fail;
2978 }
2979 rc = bnxt_hwrm_free_filter(softc);
2980 if (rc)
2981 goto fail;
2982
2983 rc = bnxt_hwrm_vnic_free(softc, &softc->vnic_info);
2984 if (rc)
2985 goto fail;
2986
2987 rc = bnxt_hwrm_vnic_ctx_free(softc, softc->vnic_info.rss_id);
2988 if (rc)
2989 goto fail;
2990
2991 for (i = 0; i < softc->nrxqsets; i++) {
2992 rc = bnxt_hwrm_ring_grp_free(softc, &softc->grp_info[i]);
2993 if (rc)
2994 goto fail;
2995
2996 rc = bnxt_hwrm_ring_free(softc,
2997 HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG,
2998 &softc->ag_rings[i],
2999 (uint16_t)HWRM_NA_SIGNATURE);
3000 if (rc)
3001 goto fail;
3002
3003 rc = bnxt_hwrm_ring_free(softc,
3004 HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
3005 &softc->rx_rings[i],
3006 softc->rx_cp_rings[i].ring.phys_id);
3007 if (rc)
3008 goto fail;
3009
3010 rc = bnxt_hwrm_ring_free(softc,
3011 HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
3012 &softc->rx_cp_rings[i].ring,
3013 (uint16_t)HWRM_NA_SIGNATURE);
3014 if (rc)
3015 goto fail;
3016
3017 if (BNXT_CHIP_P5_PLUS(softc)) {
3018 rc = bnxt_hwrm_ring_free(softc,
3019 HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ,
3020 &softc->nq_rings[i].ring,
3021 (uint16_t)HWRM_NA_SIGNATURE);
3022 if (rc)
3023 goto fail;
3024 }
3025
3026 rc = bnxt_hwrm_stat_ctx_free(softc, &softc->rx_cp_rings[i]);
3027 if (rc)
3028 goto fail;
3029 }
3030
3031 fail:
3032 return;
3033 }
3034
3035
3036 static void
bnxt_func_reset(struct bnxt_softc * softc)3037 bnxt_func_reset(struct bnxt_softc *softc)
3038 {
3039
3040 if (!BNXT_CHIP_P5_PLUS(softc)) {
3041 bnxt_hwrm_func_reset(softc);
3042 return;
3043 }
3044
3045 bnxt_hwrm_resource_free(softc);
3046 return;
3047 }
3048
3049 static void
bnxt_rss_grp_tbl_init(struct bnxt_softc * softc)3050 bnxt_rss_grp_tbl_init(struct bnxt_softc *softc)
3051 {
3052 uint16_t *rgt = (uint16_t *) softc->vnic_info.rss_grp_tbl.idi_vaddr;
3053 int i, j;
3054
3055 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
3056 if (BNXT_CHIP_P5_PLUS(softc)) {
3057 rgt[i++] = htole16(softc->rx_rings[j].phys_id);
3058 rgt[i] = htole16(softc->rx_cp_rings[j].ring.phys_id);
3059 } else {
3060 rgt[i] = htole16(softc->grp_info[j].grp_id);
3061 }
3062 if (++j == softc->nrxqsets)
3063 j = 0;
3064 }
3065 }
3066
bnxt_get_port_module_status(struct bnxt_softc * softc)3067 static void bnxt_get_port_module_status(struct bnxt_softc *softc)
3068 {
3069 struct bnxt_link_info *link_info = &softc->link_info;
3070 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
3071 uint8_t module_status;
3072
3073 if (bnxt_update_link(softc, false))
3074 return;
3075
3076 module_status = link_info->module_status;
3077 switch (module_status) {
3078 case HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_DISABLETX:
3079 case HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_PWRDOWN:
3080 case HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_WARNINGMSG:
3081 device_printf(softc->dev, "Unqualified SFP+ module detected on port %d\n",
3082 softc->pf.port_id);
3083 if (softc->hwrm_spec_code >= 0x10201) {
3084 device_printf(softc->dev, "Module part number %s\n",
3085 resp->phy_vendor_partnumber);
3086 }
3087 if (module_status == HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_DISABLETX)
3088 device_printf(softc->dev, "TX is disabled\n");
3089 if (module_status == HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_PWRDOWN)
3090 device_printf(softc->dev, "SFP+ module is shutdown\n");
3091 }
3092 }
3093
bnxt_aux_dev_free(struct bnxt_softc * softc)3094 static void bnxt_aux_dev_free(struct bnxt_softc *softc)
3095 {
3096 kfree(softc->aux_dev);
3097 softc->aux_dev = NULL;
3098 }
3099
bnxt_aux_dev_init(struct bnxt_softc * softc)3100 static struct bnxt_aux_dev *bnxt_aux_dev_init(struct bnxt_softc *softc)
3101 {
3102 struct bnxt_aux_dev *bnxt_adev;
3103
3104 msleep(1000 * 2);
3105 bnxt_adev = kzalloc(sizeof(*bnxt_adev), GFP_KERNEL);
3106 if (!bnxt_adev)
3107 return ERR_PTR(-ENOMEM);
3108
3109 return bnxt_adev;
3110 }
3111
bnxt_rdma_aux_device_uninit(struct bnxt_softc * softc)3112 static void bnxt_rdma_aux_device_uninit(struct bnxt_softc *softc)
3113 {
3114 struct bnxt_aux_dev *bnxt_adev = softc->aux_dev;
3115
3116 /* Skip if no auxiliary device init was done. */
3117 if (!(softc->flags & BNXT_FLAG_ROCE_CAP))
3118 return;
3119
3120 if (IS_ERR_OR_NULL(bnxt_adev))
3121 return;
3122
3123 bnxt_rdma_aux_device_del(softc);
3124
3125 if (bnxt_adev->id >= 0)
3126 ida_free(&bnxt_aux_dev_ids, bnxt_adev->id);
3127
3128 bnxt_aux_dev_free(softc);
3129 }
3130
bnxt_rdma_aux_device_init(struct bnxt_softc * softc)3131 static void bnxt_rdma_aux_device_init(struct bnxt_softc *softc)
3132 {
3133 int rc;
3134
3135 if (!(softc->flags & BNXT_FLAG_ROCE_CAP))
3136 return;
3137
3138 softc->aux_dev = bnxt_aux_dev_init(softc);
3139 if (IS_ERR_OR_NULL(softc->aux_dev)) {
3140 device_printf(softc->dev, "Failed to init auxiliary device for ROCE\n");
3141 goto skip_aux_init;
3142 }
3143
3144 softc->aux_dev->id = ida_alloc(&bnxt_aux_dev_ids, GFP_KERNEL);
3145 if (softc->aux_dev->id < 0) {
3146 device_printf(softc->dev, "ida alloc failed for ROCE auxiliary device\n");
3147 bnxt_aux_dev_free(softc);
3148 goto skip_aux_init;
3149 }
3150
3151 msleep(1000 * 2);
3152 /* If aux bus init fails, continue with netdev init. */
3153 rc = bnxt_rdma_aux_device_add(softc);
3154 if (rc) {
3155 device_printf(softc->dev, "Failed to add auxiliary device for ROCE\n");
3156 msleep(1000 * 2);
3157 ida_free(&bnxt_aux_dev_ids, softc->aux_dev->id);
3158 }
3159 device_printf(softc->dev, "%s:%d Added auxiliary device (id %d) for ROCE \n",
3160 __func__, __LINE__, softc->aux_dev->id);
3161 skip_aux_init:
3162 return;
3163 }
3164
3165 /* Device configuration */
3166 static void
bnxt_init(if_ctx_t ctx)3167 bnxt_init(if_ctx_t ctx)
3168 {
3169 struct bnxt_softc *softc = iflib_get_softc(ctx);
3170 struct ifmediareq ifmr;
3171 int i;
3172 int rc;
3173
3174 if (!BNXT_CHIP_P5_PLUS(softc)) {
3175 rc = bnxt_hwrm_func_reset(softc);
3176 if (rc)
3177 return;
3178 } else if (softc->is_dev_init) {
3179 bnxt_stop(ctx);
3180 }
3181
3182 softc->is_dev_init = true;
3183 bnxt_clear_ids(softc);
3184
3185 if (BNXT_CHIP_P5_PLUS(softc))
3186 goto skip_def_cp_ring;
3187 /* Allocate the default completion ring */
3188 softc->def_cp_ring.cons = UINT32_MAX;
3189 softc->def_cp_ring.v_bit = 1;
3190 bnxt_mark_cpr_invalid(&softc->def_cp_ring);
3191 rc = bnxt_hwrm_ring_alloc(softc,
3192 HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
3193 &softc->def_cp_ring.ring);
3194 bnxt_set_db_mask(softc, &softc->def_cp_ring.ring,
3195 HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL);
3196 if (rc)
3197 goto fail;
3198 skip_def_cp_ring:
3199 for (i = 0; i < softc->nrxqsets; i++) {
3200 /* Allocate the statistics context */
3201 rc = bnxt_hwrm_stat_ctx_alloc(softc, &softc->rx_cp_rings[i],
3202 softc->rx_stats[i].idi_paddr);
3203 if (rc)
3204 goto fail;
3205
3206 if (BNXT_CHIP_P5_PLUS(softc)) {
3207 /* Allocate the NQ */
3208 softc->nq_rings[i].cons = 0;
3209 softc->nq_rings[i].raw_cons = 0;
3210 softc->nq_rings[i].v_bit = 1;
3211 softc->nq_rings[i].last_idx = UINT32_MAX;
3212 bnxt_mark_cpr_invalid(&softc->nq_rings[i]);
3213 rc = bnxt_hwrm_ring_alloc(softc,
3214 HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ,
3215 &softc->nq_rings[i].ring);
3216 bnxt_set_db_mask(softc, &softc->nq_rings[i].ring,
3217 HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ);
3218 if (rc)
3219 goto fail;
3220
3221 softc->db_ops.bnxt_db_nq(&softc->nq_rings[i], 1);
3222 }
3223 /* Allocate the completion ring */
3224 softc->rx_cp_rings[i].cons = UINT32_MAX;
3225 softc->rx_cp_rings[i].raw_cons = UINT32_MAX;
3226 softc->rx_cp_rings[i].v_bit = 1;
3227 softc->rx_cp_rings[i].last_idx = UINT32_MAX;
3228 softc->rx_cp_rings[i].toggle = 0;
3229 bnxt_mark_cpr_invalid(&softc->rx_cp_rings[i]);
3230 rc = bnxt_hwrm_ring_alloc(softc,
3231 HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
3232 &softc->rx_cp_rings[i].ring);
3233 bnxt_set_db_mask(softc, &softc->rx_cp_rings[i].ring,
3234 HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL);
3235 if (rc)
3236 goto fail;
3237
3238 if (BNXT_CHIP_P5_PLUS(softc))
3239 softc->db_ops.bnxt_db_rx_cq(&softc->rx_cp_rings[i], 1);
3240
3241 /* Allocate the RX ring */
3242 rc = bnxt_hwrm_ring_alloc(softc,
3243 HWRM_RING_ALLOC_INPUT_RING_TYPE_RX, &softc->rx_rings[i]);
3244 bnxt_set_db_mask(softc, &softc->rx_rings[i],
3245 HWRM_RING_ALLOC_INPUT_RING_TYPE_RX);
3246 if (rc)
3247 goto fail;
3248 softc->db_ops.bnxt_db_rx(&softc->rx_rings[i], 0);
3249
3250 /* Allocate the AG ring */
3251 rc = bnxt_hwrm_ring_alloc(softc,
3252 HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG,
3253 &softc->ag_rings[i]);
3254 bnxt_set_db_mask(softc, &softc->ag_rings[i],
3255 HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG);
3256 if (rc)
3257 goto fail;
3258 softc->db_ops.bnxt_db_rx(&softc->ag_rings[i], 0);
3259
3260 /* Allocate the ring group */
3261 softc->grp_info[i].stats_ctx =
3262 softc->rx_cp_rings[i].stats_ctx_id;
3263 softc->grp_info[i].rx_ring_id = softc->rx_rings[i].phys_id;
3264 softc->grp_info[i].ag_ring_id = softc->ag_rings[i].phys_id;
3265 softc->grp_info[i].cp_ring_id =
3266 softc->rx_cp_rings[i].ring.phys_id;
3267 rc = bnxt_hwrm_ring_grp_alloc(softc, &softc->grp_info[i]);
3268 if (rc)
3269 goto fail;
3270 }
3271
3272 /* Inform PF to approve MAC as default VF MAC. */
3273 if (BNXT_VF(softc))
3274 bnxt_update_vf_mac(softc);
3275
3276 /* And now set the default CP / NQ ring for the async */
3277 rc = bnxt_cfg_async_cr(softc);
3278 if (rc)
3279 goto fail;
3280
3281 /* Allocate the VNIC RSS context */
3282 rc = bnxt_hwrm_vnic_ctx_alloc(softc, &softc->vnic_info.rss_id);
3283 if (rc)
3284 goto fail;
3285
3286 /* Allocate the vnic */
3287 softc->vnic_info.def_ring_grp = softc->grp_info[0].grp_id;
3288 softc->vnic_info.mru = softc->scctx->isc_max_frame_size;
3289 rc = bnxt_hwrm_vnic_alloc(softc, &softc->vnic_info);
3290 if (rc)
3291 goto fail;
3292 rc = bnxt_hwrm_vnic_cfg(softc, &softc->vnic_info);
3293 if (rc)
3294 goto fail;
3295 rc = bnxt_hwrm_vnic_set_hds(softc, &softc->vnic_info);
3296 if (rc)
3297 goto fail;
3298 rc = bnxt_hwrm_set_filter(softc);
3299 if (rc)
3300 goto fail;
3301
3302 bnxt_rss_grp_tbl_init(softc);
3303
3304 rc = bnxt_hwrm_rss_cfg(softc, &softc->vnic_info,
3305 softc->vnic_info.rss_hash_type);
3306 if (rc)
3307 goto fail;
3308
3309 rc = bnxt_hwrm_vnic_tpa_cfg(softc);
3310 if (rc)
3311 goto fail;
3312
3313 for (i = 0; i < softc->ntxqsets; i++) {
3314 /* Allocate the statistics context */
3315 rc = bnxt_hwrm_stat_ctx_alloc(softc, &softc->tx_cp_rings[i],
3316 softc->tx_stats[i].idi_paddr);
3317 if (rc)
3318 goto fail;
3319
3320 /* Allocate the completion ring */
3321 softc->tx_cp_rings[i].cons = UINT32_MAX;
3322 softc->tx_cp_rings[i].raw_cons = UINT32_MAX;
3323 softc->tx_cp_rings[i].v_bit = 1;
3324 softc->tx_cp_rings[i].toggle = 0;
3325 bnxt_mark_cpr_invalid(&softc->tx_cp_rings[i]);
3326 rc = bnxt_hwrm_ring_alloc(softc,
3327 HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
3328 &softc->tx_cp_rings[i].ring);
3329 bnxt_set_db_mask(softc, &softc->tx_cp_rings[i].ring,
3330 HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL);
3331 if (rc)
3332 goto fail;
3333
3334 if (BNXT_CHIP_P5_PLUS(softc))
3335 softc->db_ops.bnxt_db_tx_cq(&softc->tx_cp_rings[i], 1);
3336
3337 /* Allocate the TX ring */
3338 rc = bnxt_hwrm_ring_alloc(softc,
3339 HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
3340 &softc->tx_rings[i]);
3341 bnxt_set_db_mask(softc, &softc->tx_rings[i],
3342 HWRM_RING_ALLOC_INPUT_RING_TYPE_TX);
3343 if (rc)
3344 goto fail;
3345 softc->db_ops.bnxt_db_tx(&softc->tx_rings[i], 0);
3346 }
3347
3348 bnxt_do_enable_intr(&softc->def_cp_ring);
3349 bnxt_get_port_module_status(softc);
3350 bnxt_media_status(softc->ctx, &ifmr);
3351 bnxt_hwrm_cfa_l2_set_rx_mask(softc, &softc->vnic_info);
3352 return;
3353
3354 fail:
3355 bnxt_func_reset(softc);
3356 bnxt_clear_ids(softc);
3357 return;
3358 }
3359
3360 static void
bnxt_stop(if_ctx_t ctx)3361 bnxt_stop(if_ctx_t ctx)
3362 {
3363 struct bnxt_softc *softc = iflib_get_softc(ctx);
3364
3365 softc->is_dev_init = false;
3366 bnxt_do_disable_intr(&softc->def_cp_ring);
3367 bnxt_func_reset(softc);
3368 bnxt_clear_ids(softc);
3369 return;
3370 }
3371
3372 static u_int
bnxt_copy_maddr(void * arg,struct sockaddr_dl * sdl,u_int cnt)3373 bnxt_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
3374 {
3375 uint8_t *mta = arg;
3376
3377 if (cnt == BNXT_MAX_MC_ADDRS)
3378 return (1);
3379
3380 bcopy(LLADDR(sdl), &mta[cnt * ETHER_ADDR_LEN], ETHER_ADDR_LEN);
3381
3382 return (1);
3383 }
3384
3385 static void
bnxt_multi_set(if_ctx_t ctx)3386 bnxt_multi_set(if_ctx_t ctx)
3387 {
3388 struct bnxt_softc *softc = iflib_get_softc(ctx);
3389 if_t ifp = iflib_get_ifp(ctx);
3390 uint8_t *mta;
3391 int mcnt;
3392
3393 mta = softc->vnic_info.mc_list.idi_vaddr;
3394 bzero(mta, softc->vnic_info.mc_list.idi_size);
3395 mcnt = if_foreach_llmaddr(ifp, bnxt_copy_maddr, mta);
3396
3397 if (mcnt > BNXT_MAX_MC_ADDRS) {
3398 softc->vnic_info.rx_mask |=
3399 HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
3400 bnxt_hwrm_cfa_l2_set_rx_mask(softc, &softc->vnic_info);
3401 } else {
3402 softc->vnic_info.rx_mask &=
3403 ~HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
3404 bus_dmamap_sync(softc->vnic_info.mc_list.idi_tag,
3405 softc->vnic_info.mc_list.idi_map, BUS_DMASYNC_PREWRITE);
3406 softc->vnic_info.mc_list_count = mcnt;
3407 softc->vnic_info.rx_mask |=
3408 HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
3409 if (bnxt_hwrm_cfa_l2_set_rx_mask(softc, &softc->vnic_info))
3410 device_printf(softc->dev,
3411 "set_multi: rx_mask set failed\n");
3412 }
3413 }
3414
3415 static int
bnxt_mtu_set(if_ctx_t ctx,uint32_t mtu)3416 bnxt_mtu_set(if_ctx_t ctx, uint32_t mtu)
3417 {
3418 struct bnxt_softc *softc = iflib_get_softc(ctx);
3419
3420 if (mtu > BNXT_MAX_MTU)
3421 return EINVAL;
3422
3423 softc->scctx->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3424 softc->rx_buf_size = min(softc->scctx->isc_max_frame_size, BNXT_PAGE_SIZE);
3425 return 0;
3426 }
3427
3428 static void
bnxt_media_status(if_ctx_t ctx,struct ifmediareq * ifmr)3429 bnxt_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
3430 {
3431 struct bnxt_softc *softc = iflib_get_softc(ctx);
3432 struct bnxt_link_info *link_info = &softc->link_info;
3433 struct ifmedia_entry *next;
3434 uint64_t target_baudrate = bnxt_get_baudrate(link_info);
3435 int active_media = IFM_UNKNOWN;
3436
3437 bnxt_update_link(softc, true);
3438
3439 ifmr->ifm_status = IFM_AVALID;
3440 ifmr->ifm_active = IFM_ETHER;
3441
3442 if (!link_info->link_up)
3443 return;
3444
3445 ifmr->ifm_status |= IFM_ACTIVE;
3446 if (link_info->duplex == HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_FULL)
3447 ifmr->ifm_active |= IFM_FDX;
3448 else
3449 ifmr->ifm_active |= IFM_HDX;
3450
3451 /*
3452 * Go through the list of supported media which got prepared
3453 * as part of bnxt_add_media_types() using api ifmedia_add().
3454 */
3455 LIST_FOREACH(next, &(iflib_get_media(ctx)->ifm_list), ifm_list) {
3456 if (ifmedia_baudrate(next->ifm_media) == target_baudrate) {
3457 active_media = next->ifm_media;
3458 break;
3459 }
3460 }
3461 ifmr->ifm_active |= active_media;
3462
3463 if (link_info->flow_ctrl.rx)
3464 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
3465 if (link_info->flow_ctrl.tx)
3466 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
3467
3468 bnxt_report_link(softc);
3469 return;
3470 }
3471
3472 static int
bnxt_media_change(if_ctx_t ctx)3473 bnxt_media_change(if_ctx_t ctx)
3474 {
3475 struct bnxt_softc *softc = iflib_get_softc(ctx);
3476 struct ifmedia *ifm = iflib_get_media(ctx);
3477 struct ifmediareq ifmr;
3478 int rc;
3479 struct bnxt_link_info *link_info = &softc->link_info;
3480
3481 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3482 return EINVAL;
3483
3484 switch (IFM_SUBTYPE(ifm->ifm_media)) {
3485 case IFM_100_T:
3486 link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3487 link_info->req_link_speed =
3488 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100MB;
3489 break;
3490 case IFM_1000_KX:
3491 case IFM_1000_SGMII:
3492 case IFM_1000_CX:
3493 case IFM_1000_SX:
3494 case IFM_1000_LX:
3495
3496 link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3497
3498 if (link_info->support_speeds & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB) {
3499 link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_1GB;
3500
3501 } else if (link_info->support_speeds2 & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_1GB) {
3502 link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_1GB;
3503 link_info->force_speed2_nrz = true;
3504 }
3505
3506 break;
3507
3508 case IFM_2500_KX:
3509 case IFM_2500_T:
3510 link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3511 link_info->req_link_speed =
3512 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_2_5GB;
3513 break;
3514 case IFM_10G_CR1:
3515 case IFM_10G_KR:
3516 case IFM_10G_LR:
3517 case IFM_10G_SR:
3518
3519 link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3520
3521 if (link_info->support_speeds & HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB) {
3522 link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
3523
3524 } else if (link_info->support_speeds2 & HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_10GB) {
3525 link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_10GB;
3526 link_info->force_speed2_nrz = true;
3527 }
3528
3529 break;
3530 case IFM_20G_KR2:
3531 link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3532 link_info->req_link_speed =
3533 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_20GB;
3534 break;
3535 case IFM_25G_CR:
3536 case IFM_25G_KR:
3537 case IFM_25G_SR:
3538 case IFM_25G_LR:
3539
3540 link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3541
3542 if (link_info->support_speeds & HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_25GB) {
3543 link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_25GB;
3544
3545 } else if (link_info->support_speeds2 & HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_25GB) {
3546 link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_25GB;
3547 link_info->force_speed2_nrz = true;
3548 }
3549
3550 break;
3551
3552 case IFM_40G_CR4:
3553 case IFM_40G_KR4:
3554 case IFM_40G_LR4:
3555 case IFM_40G_SR4:
3556 case IFM_40G_XLAUI:
3557 case IFM_40G_XLAUI_AC:
3558
3559 link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3560
3561 if (link_info->support_speeds & HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB) {
3562 link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
3563
3564 } else if (link_info->support_speeds2 & HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_40GB) {
3565 link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_40GB;
3566 link_info->force_speed2_nrz = true;
3567 }
3568
3569 break;
3570
3571 case IFM_50G_CR2:
3572 case IFM_50G_KR2:
3573 case IFM_50G_KR4:
3574 case IFM_50G_SR2:
3575 case IFM_50G_LR2:
3576
3577 link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3578
3579 if (link_info->support_speeds & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB) {
3580 link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
3581
3582 } else if (link_info->support_speeds2 & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_50GB) {
3583 link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_50GB;
3584 link_info->force_speed2_nrz = true;
3585 }
3586
3587 break;
3588
3589 case IFM_50G_CP:
3590 case IFM_50G_LR:
3591 case IFM_50G_SR:
3592 case IFM_50G_KR_PAM4:
3593
3594 link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3595
3596 if (link_info->support_pam4_speeds & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_50G) {
3597 link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_50GB;
3598 link_info->force_pam4_speed = true;
3599
3600 } else if (link_info->support_speeds2 & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_50GB_PAM4_56) {
3601 link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_50GB_PAM4_56;
3602 link_info->force_pam4_56_speed2 = true;
3603 }
3604
3605 break;
3606
3607 case IFM_100G_CR4:
3608 case IFM_100G_KR4:
3609 case IFM_100G_LR4:
3610 case IFM_100G_SR4:
3611 case IFM_100G_AUI4:
3612
3613 link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3614
3615 if (link_info->support_speeds & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB) {
3616 link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
3617
3618 } else if (link_info->support_speeds2 & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_100GB) {
3619 link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_100GB;
3620 link_info->force_speed2_nrz = true;
3621 }
3622
3623 break;
3624
3625 case IFM_100G_CP2:
3626 case IFM_100G_SR2:
3627 case IFM_100G_KR2_PAM4:
3628 case IFM_100G_AUI2:
3629
3630 link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3631
3632 if (link_info->support_pam4_speeds & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_100G) {
3633 link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_100GB;
3634 link_info->force_pam4_speed = true;
3635
3636 } else if (link_info->support_speeds2 & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_100GB_PAM4_56) {
3637 link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_100GB_PAM4_56;
3638 link_info->force_pam4_56_speed2 = true;
3639 }
3640
3641 break;
3642
3643 case IFM_100G_KR_PAM4:
3644 case IFM_100G_CR_PAM4:
3645 case IFM_100G_DR:
3646 case IFM_100G_AUI2_AC:
3647
3648 link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3649
3650 if (link_info->support_speeds2 & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_100GB_PAM4_112) {
3651 link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_100GB_PAM4_112;
3652 link_info->force_pam4_112_speed2 = true;
3653 }
3654
3655 break;
3656
3657 case IFM_200G_SR4:
3658 case IFM_200G_FR4:
3659 case IFM_200G_LR4:
3660 case IFM_200G_DR4:
3661 case IFM_200G_CR4_PAM4:
3662 case IFM_200G_KR4_PAM4:
3663
3664 link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3665
3666 if (link_info->support_pam4_speeds & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_200G) {
3667 link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
3668 link_info->force_pam4_speed = true;
3669
3670 } else if (link_info->support_speeds2 & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_200GB_PAM4_56) {
3671 link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_200GB_PAM4_56;
3672 link_info->force_pam4_56_speed2 = true;
3673 }
3674
3675 break;
3676
3677 case IFM_200G_AUI4:
3678
3679 link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3680
3681 if (link_info->support_speeds2 & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_200GB_PAM4_112) {
3682 link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_200GB_PAM4_112;
3683 link_info->force_pam4_112_speed2 = true;
3684 }
3685
3686 break;
3687
3688 case IFM_400G_FR8:
3689 case IFM_400G_LR8:
3690 case IFM_400G_AUI8:
3691 link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3692
3693 if (link_info->support_speeds2 & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_400GB_PAM4_56) {
3694 link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_400GB_PAM4_56;
3695 link_info->force_pam4_56_speed2 = true;
3696 }
3697
3698 break;
3699
3700 case IFM_400G_AUI8_AC:
3701 case IFM_400G_DR4:
3702 link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3703
3704 if (link_info->support_speeds2 & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_400GB_PAM4_112) {
3705 link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_400GB_PAM4_112;
3706 link_info->force_pam4_112_speed2 = true;
3707 }
3708
3709 break;
3710
3711 case IFM_1000_T:
3712 link_info->advertising = HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
3713 link_info->autoneg |= BNXT_AUTONEG_SPEED;
3714 break;
3715 case IFM_10G_T:
3716 link_info->advertising = HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
3717 link_info->autoneg |= BNXT_AUTONEG_SPEED;
3718 break;
3719 default:
3720 device_printf(softc->dev,
3721 "Unsupported media type! Using auto\n");
3722 /* Fall-through */
3723 case IFM_AUTO:
3724 // Auto
3725 link_info->autoneg |= BNXT_AUTONEG_SPEED;
3726 break;
3727 }
3728
3729 rc = bnxt_hwrm_set_link_setting(softc, true, true, true);
3730 bnxt_media_status(softc->ctx, &ifmr);
3731 return rc;
3732 }
3733
3734 static int
bnxt_promisc_set(if_ctx_t ctx,int flags)3735 bnxt_promisc_set(if_ctx_t ctx, int flags)
3736 {
3737 struct bnxt_softc *softc = iflib_get_softc(ctx);
3738 if_t ifp = iflib_get_ifp(ctx);
3739 int rc;
3740
3741 if (if_getflags(ifp) & IFF_ALLMULTI ||
3742 if_llmaddr_count(ifp) > BNXT_MAX_MC_ADDRS)
3743 softc->vnic_info.rx_mask |=
3744 HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
3745 else
3746 softc->vnic_info.rx_mask &=
3747 ~HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
3748
3749 if ((if_getflags(ifp) & IFF_PROMISC) &&
3750 bnxt_promisc_ok(softc))
3751 softc->vnic_info.rx_mask |=
3752 HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS |
3753 HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN;
3754 else
3755 softc->vnic_info.rx_mask &=
3756 ~(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS |
3757 HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN);
3758
3759 rc = bnxt_hwrm_cfa_l2_set_rx_mask(softc, &softc->vnic_info);
3760
3761 return rc;
3762 }
3763
3764 static uint64_t
bnxt_get_counter(if_ctx_t ctx,ift_counter cnt)3765 bnxt_get_counter(if_ctx_t ctx, ift_counter cnt)
3766 {
3767 if_t ifp = iflib_get_ifp(ctx);
3768
3769 if (cnt < IFCOUNTERS)
3770 return if_get_counter_default(ifp, cnt);
3771
3772 return 0;
3773 }
3774
3775 static void
bnxt_update_admin_status(if_ctx_t ctx)3776 bnxt_update_admin_status(if_ctx_t ctx)
3777 {
3778 struct bnxt_softc *softc = iflib_get_softc(ctx);
3779
3780 /*
3781 * When SR-IOV is enabled, avoid each VF sending this HWRM
3782 * request every sec with which firmware timeouts can happen
3783 */
3784 if (!BNXT_PF(softc))
3785 return;
3786
3787 bnxt_hwrm_port_qstats(softc);
3788
3789 if (BNXT_CHIP_P5_PLUS(softc) &&
3790 (softc->flags & BNXT_FLAG_FW_CAP_EXT_STATS))
3791 bnxt_hwrm_port_qstats_ext(softc);
3792
3793 if (BNXT_CHIP_P5_PLUS(softc)) {
3794 struct ifmediareq ifmr;
3795
3796 if (bit_test(softc->state_bv, BNXT_STATE_LINK_CHANGE)) {
3797 bit_clear(softc->state_bv, BNXT_STATE_LINK_CHANGE);
3798 bnxt_media_status(softc->ctx, &ifmr);
3799 }
3800 }
3801
3802 return;
3803 }
3804
3805 static void
bnxt_if_timer(if_ctx_t ctx,uint16_t qid)3806 bnxt_if_timer(if_ctx_t ctx, uint16_t qid)
3807 {
3808
3809 struct bnxt_softc *softc = iflib_get_softc(ctx);
3810 uint64_t ticks_now = ticks;
3811
3812 /* Schedule bnxt_update_admin_status() once per sec */
3813 if (ticks_now - softc->admin_ticks >= hz) {
3814 softc->admin_ticks = ticks_now;
3815 iflib_admin_intr_deferred(ctx);
3816 }
3817
3818 return;
3819 }
3820
3821 static void inline
bnxt_do_enable_intr(struct bnxt_cp_ring * cpr)3822 bnxt_do_enable_intr(struct bnxt_cp_ring *cpr)
3823 {
3824 struct bnxt_softc *softc = cpr->ring.softc;
3825
3826
3827 if (cpr->ring.phys_id == (uint16_t)HWRM_NA_SIGNATURE)
3828 return;
3829
3830 if (BNXT_CHIP_P5_PLUS(softc))
3831 softc->db_ops.bnxt_db_nq(cpr, 1);
3832 else
3833 softc->db_ops.bnxt_db_rx_cq(cpr, 1);
3834 }
3835
3836 static void inline
bnxt_do_disable_intr(struct bnxt_cp_ring * cpr)3837 bnxt_do_disable_intr(struct bnxt_cp_ring *cpr)
3838 {
3839 struct bnxt_softc *softc = cpr->ring.softc;
3840
3841 if (cpr->ring.phys_id == (uint16_t)HWRM_NA_SIGNATURE)
3842 return;
3843
3844 if (BNXT_CHIP_P5_PLUS(softc))
3845 softc->db_ops.bnxt_db_nq(cpr, 0);
3846 else
3847 softc->db_ops.bnxt_db_rx_cq(cpr, 0);
3848 }
3849
3850 /* Enable all interrupts */
3851 static void
bnxt_intr_enable(if_ctx_t ctx)3852 bnxt_intr_enable(if_ctx_t ctx)
3853 {
3854 struct bnxt_softc *softc = iflib_get_softc(ctx);
3855 int i;
3856
3857 bnxt_do_enable_intr(&softc->def_cp_ring);
3858 for (i = 0; i < softc->nrxqsets; i++)
3859 if (BNXT_CHIP_P5_PLUS(softc))
3860 softc->db_ops.bnxt_db_nq(&softc->nq_rings[i], 1);
3861 else
3862 softc->db_ops.bnxt_db_rx_cq(&softc->rx_cp_rings[i], 1);
3863
3864 return;
3865 }
3866
3867 /* Enable interrupt for a single queue */
3868 static int
bnxt_tx_queue_intr_enable(if_ctx_t ctx,uint16_t qid)3869 bnxt_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
3870 {
3871 struct bnxt_softc *softc = iflib_get_softc(ctx);
3872
3873 if (BNXT_CHIP_P5_PLUS(softc))
3874 softc->db_ops.bnxt_db_nq(&softc->nq_rings[qid], 1);
3875 else
3876 softc->db_ops.bnxt_db_rx_cq(&softc->tx_cp_rings[qid], 1);
3877
3878 return 0;
3879 }
3880
3881 static void
bnxt_process_cmd_cmpl(struct bnxt_softc * softc,hwrm_cmpl_t * cmd_cmpl)3882 bnxt_process_cmd_cmpl(struct bnxt_softc *softc, hwrm_cmpl_t *cmd_cmpl)
3883 {
3884 device_printf(softc->dev, "cmd sequence number %d\n",
3885 cmd_cmpl->sequence_id);
3886 return;
3887 }
3888
3889 static void
bnxt_process_async_msg(struct bnxt_cp_ring * cpr,tx_cmpl_t * cmpl)3890 bnxt_process_async_msg(struct bnxt_cp_ring *cpr, tx_cmpl_t *cmpl)
3891 {
3892 struct bnxt_softc *softc = cpr->ring.softc;
3893 uint16_t type = cmpl->flags_type & TX_CMPL_TYPE_MASK;
3894 #ifdef PCI_IOV
3895 struct hwrm_fwd_req_cmpl *fwd_req_cmpl = (struct hwrm_fwd_req_cmpl *)cmpl;
3896 uint16_t vf_id;
3897 #endif
3898
3899 switch (type) {
3900 case HWRM_CMPL_TYPE_HWRM_DONE:
3901 bnxt_process_cmd_cmpl(softc, (hwrm_cmpl_t *)cmpl);
3902 break;
3903 case HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT:
3904 bnxt_handle_async_event(softc, (cmpl_base_t *) cmpl);
3905 break;
3906 #ifdef PCI_IOV
3907 case CMPL_BASE_TYPE_HWRM_FWD_REQ:
3908 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
3909
3910 if ((vf_id < softc->pf.first_vf_id) ||
3911 (vf_id >= softc->pf.first_vf_id + softc->pf.active_vfs))
3912 return;
3913
3914 set_bit(vf_id - softc->pf.first_vf_id, softc->pf.vf_event_bmap);
3915 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &softc->sp_event);
3916 bnxt_queue_sp_work(softc);
3917 break;
3918 #endif
3919 default:
3920 device_printf(softc->dev, "%s:%d Unhandled async message %x\n",
3921 __FUNCTION__, __LINE__, type);
3922 break;
3923 }
3924 }
3925
3926 void
process_nq(struct bnxt_softc * softc,uint16_t nqid)3927 process_nq(struct bnxt_softc *softc, uint16_t nqid)
3928 {
3929 struct bnxt_cp_ring *cpr = &softc->nq_rings[nqid];
3930 nq_cn_t *cmp = (nq_cn_t *) cpr->ring.vaddr;
3931 struct bnxt_cp_ring *tx_cpr = &softc->tx_cp_rings[nqid];
3932 struct bnxt_cp_ring *rx_cpr = &softc->rx_cp_rings[nqid];
3933 bool v_bit = cpr->v_bit;
3934 uint32_t cons = cpr->cons;
3935 uint32_t raw_cons = cpr->raw_cons;
3936 uint16_t nq_type, nqe_cnt = 0;
3937
3938 while (1) {
3939 if (!NQ_VALID(&cmp[cons], v_bit)) {
3940 goto done;
3941 }
3942
3943 nq_type = NQ_CN_TYPE_MASK & cmp[cons].type;
3944
3945 if (NQE_CN_TYPE(nq_type) != NQ_CN_TYPE_CQ_NOTIFICATION) {
3946 bnxt_process_async_msg(cpr, (tx_cmpl_t *)&cmp[cons]);
3947 } else {
3948 tx_cpr->toggle = NQE_CN_TOGGLE(cmp[cons].type);
3949 rx_cpr->toggle = NQE_CN_TOGGLE(cmp[cons].type);
3950 }
3951
3952 NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
3953 raw_cons++;
3954 nqe_cnt++;
3955 }
3956 done:
3957 if (nqe_cnt) {
3958 cpr->cons = cons;
3959 cpr->raw_cons = raw_cons;
3960 cpr->v_bit = v_bit;
3961 }
3962 }
3963
3964 static int
bnxt_rx_queue_intr_enable(if_ctx_t ctx,uint16_t qid)3965 bnxt_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
3966 {
3967 struct bnxt_softc *softc = iflib_get_softc(ctx);
3968
3969 if (BNXT_CHIP_P5_PLUS(softc)) {
3970 process_nq(softc, qid);
3971 softc->db_ops.bnxt_db_nq(&softc->nq_rings[qid], 1);
3972 }
3973 softc->db_ops.bnxt_db_rx_cq(&softc->rx_cp_rings[qid], 1);
3974 return 0;
3975 }
3976
3977 /* Disable all interrupts */
3978 static void
bnxt_disable_intr(if_ctx_t ctx)3979 bnxt_disable_intr(if_ctx_t ctx)
3980 {
3981 struct bnxt_softc *softc = iflib_get_softc(ctx);
3982 int i;
3983
3984 /*
3985 * NOTE: These TX interrupts should never get enabled, so don't
3986 * update the index
3987 */
3988 for (i = 0; i < softc->nrxqsets; i++)
3989 if (BNXT_CHIP_P5_PLUS(softc))
3990 softc->db_ops.bnxt_db_nq(&softc->nq_rings[i], 0);
3991 else
3992 softc->db_ops.bnxt_db_rx_cq(&softc->rx_cp_rings[i], 0);
3993
3994
3995 return;
3996 }
3997
3998 static int
bnxt_msix_intr_assign(if_ctx_t ctx,int msix)3999 bnxt_msix_intr_assign(if_ctx_t ctx, int msix)
4000 {
4001 struct bnxt_softc *softc = iflib_get_softc(ctx);
4002 struct bnxt_cp_ring *ring;
4003 struct if_irq *irq;
4004 uint16_t id;
4005 int rc;
4006 int i;
4007 char irq_name[16];
4008
4009 if (BNXT_CHIP_P5_PLUS(softc))
4010 goto skip_default_cp;
4011
4012 rc = iflib_irq_alloc_generic(ctx, &softc->def_cp_ring.irq,
4013 softc->def_cp_ring.ring.id + 1, IFLIB_INTR_ADMIN,
4014 bnxt_handle_def_cp, softc, 0, "def_cp");
4015 if (rc) {
4016 device_printf(iflib_get_dev(ctx),
4017 "Failed to register default completion ring handler\n");
4018 return rc;
4019 }
4020
4021 skip_default_cp:
4022 for (i=0; i<softc->scctx->isc_nrxqsets; i++) {
4023 if (BNXT_CHIP_P5_PLUS(softc)) {
4024 irq = &softc->nq_rings[i].irq;
4025 id = softc->nq_rings[i].ring.id;
4026 ring = &softc->nq_rings[i];
4027 } else {
4028 irq = &softc->rx_cp_rings[i].irq;
4029 id = softc->rx_cp_rings[i].ring.id ;
4030 ring = &softc->rx_cp_rings[i];
4031 }
4032 snprintf(irq_name, sizeof(irq_name), "rxq%d", i);
4033 rc = iflib_irq_alloc_generic(ctx, irq, id + 1, IFLIB_INTR_RX,
4034 bnxt_handle_isr, ring, i, irq_name);
4035 if (rc) {
4036 device_printf(iflib_get_dev(ctx),
4037 "Failed to register RX completion ring handler\n");
4038 i--;
4039 goto fail;
4040 }
4041 }
4042
4043 for (i=0; i<softc->scctx->isc_ntxqsets; i++)
4044 iflib_softirq_alloc_generic(ctx, NULL, IFLIB_INTR_TX, NULL, i, "tx_cp");
4045
4046 return rc;
4047
4048 fail:
4049 for (; i>=0; i--)
4050 iflib_irq_free(ctx, &softc->rx_cp_rings[i].irq);
4051 iflib_irq_free(ctx, &softc->def_cp_ring.irq);
4052 return rc;
4053 }
4054
4055 /*
4056 * We're explicitly allowing duplicates here. They will need to be
4057 * removed as many times as they are added.
4058 */
4059 static void
bnxt_vlan_register(if_ctx_t ctx,uint16_t vtag)4060 bnxt_vlan_register(if_ctx_t ctx, uint16_t vtag)
4061 {
4062 struct bnxt_softc *softc = iflib_get_softc(ctx);
4063 struct bnxt_vlan_tag *new_tag;
4064
4065 new_tag = malloc(sizeof(struct bnxt_vlan_tag), M_DEVBUF, M_NOWAIT);
4066 if (new_tag == NULL)
4067 return;
4068 new_tag->tag = vtag;
4069 new_tag->filter_id = -1;
4070 SLIST_INSERT_HEAD(&softc->vnic_info.vlan_tags, new_tag, next);
4071 };
4072
4073 static void
bnxt_vlan_unregister(if_ctx_t ctx,uint16_t vtag)4074 bnxt_vlan_unregister(if_ctx_t ctx, uint16_t vtag)
4075 {
4076 struct bnxt_softc *softc = iflib_get_softc(ctx);
4077 struct bnxt_vlan_tag *vlan_tag;
4078
4079 SLIST_FOREACH(vlan_tag, &softc->vnic_info.vlan_tags, next) {
4080 if (vlan_tag->tag == vtag) {
4081 SLIST_REMOVE(&softc->vnic_info.vlan_tags, vlan_tag,
4082 bnxt_vlan_tag, next);
4083 free(vlan_tag, M_DEVBUF);
4084 break;
4085 }
4086 }
4087 }
4088
4089 static int
bnxt_wol_config(if_ctx_t ctx)4090 bnxt_wol_config(if_ctx_t ctx)
4091 {
4092 struct bnxt_softc *softc = iflib_get_softc(ctx);
4093 if_t ifp = iflib_get_ifp(ctx);
4094
4095 if (!softc)
4096 return -EBUSY;
4097
4098 if (!bnxt_wol_supported(softc))
4099 return -ENOTSUP;
4100
4101 if (if_getcapenable(ifp) & IFCAP_WOL_MAGIC) {
4102 if (!softc->wol) {
4103 if (bnxt_hwrm_alloc_wol_fltr(softc))
4104 return -EBUSY;
4105 softc->wol = 1;
4106 }
4107 } else {
4108 if (softc->wol) {
4109 if (bnxt_hwrm_free_wol_fltr(softc))
4110 return -EBUSY;
4111 softc->wol = 0;
4112 }
4113 }
4114
4115 return 0;
4116 }
4117
4118 static bool
bnxt_if_needs_restart(if_ctx_t ctx __unused,enum iflib_restart_event event)4119 bnxt_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
4120 {
4121 switch (event) {
4122 case IFLIB_RESTART_VLAN_CONFIG:
4123 default:
4124 return (false);
4125 }
4126 }
4127
4128 static int
bnxt_shutdown(if_ctx_t ctx)4129 bnxt_shutdown(if_ctx_t ctx)
4130 {
4131 bnxt_wol_config(ctx);
4132 return 0;
4133 }
4134
4135 static int
bnxt_suspend(if_ctx_t ctx)4136 bnxt_suspend(if_ctx_t ctx)
4137 {
4138 bnxt_wol_config(ctx);
4139 return 0;
4140 }
4141
4142 static int
bnxt_resume(if_ctx_t ctx)4143 bnxt_resume(if_ctx_t ctx)
4144 {
4145 struct bnxt_softc *softc = iflib_get_softc(ctx);
4146
4147 bnxt_get_wol_settings(softc);
4148 return 0;
4149 }
4150
4151 static int
bnxt_priv_ioctl(if_ctx_t ctx,u_long command,caddr_t data)4152 bnxt_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data)
4153 {
4154 struct bnxt_softc *softc = iflib_get_softc(ctx);
4155 struct ifreq *ifr = (struct ifreq *)data;
4156 struct bnxt_ioctl_header *ioh;
4157 size_t iol;
4158 int rc = ENOTSUP;
4159 struct bnxt_ioctl_data iod_storage, *iod = &iod_storage;
4160
4161 switch (command) {
4162 case SIOCGPRIVATE_0:
4163 if ((rc = priv_check(curthread, PRIV_DRIVER)) != 0)
4164 goto exit;
4165
4166 ioh = ifr_buffer_get_buffer(ifr);
4167 iol = ifr_buffer_get_length(ifr);
4168 if (iol > sizeof(iod_storage))
4169 return (EINVAL);
4170
4171 if ((rc = copyin(ioh, iod, iol)) != 0)
4172 goto exit;
4173
4174 switch (iod->hdr.type) {
4175 case BNXT_HWRM_NVM_FIND_DIR_ENTRY:
4176 {
4177 struct bnxt_ioctl_hwrm_nvm_find_dir_entry *find =
4178 &iod->find;
4179
4180 rc = bnxt_hwrm_nvm_find_dir_entry(softc, find->type,
4181 &find->ordinal, find->ext, &find->index,
4182 find->use_index, find->search_opt,
4183 &find->data_length, &find->item_length,
4184 &find->fw_ver);
4185 if (rc) {
4186 iod->hdr.rc = rc;
4187 rc = copyout(&iod->hdr.rc, &ioh->rc,
4188 sizeof(ioh->rc));
4189 } else {
4190 iod->hdr.rc = 0;
4191 rc = copyout(iod, ioh, iol);
4192 }
4193
4194 goto exit;
4195 }
4196 case BNXT_HWRM_NVM_READ:
4197 {
4198 struct bnxt_ioctl_hwrm_nvm_read *rd = &iod->read;
4199 struct iflib_dma_info dma_data;
4200 size_t offset;
4201 size_t remain;
4202 size_t csize;
4203
4204 /*
4205 * Some HWRM versions can't read more than 0x8000 bytes
4206 */
4207 rc = iflib_dma_alloc(softc->ctx,
4208 min(rd->length, 0x8000), &dma_data, BUS_DMA_NOWAIT);
4209 if (rc)
4210 break;
4211 for (remain = rd->length, offset = 0;
4212 remain && offset < rd->length; offset += 0x8000) {
4213 csize = min(remain, 0x8000);
4214 rc = bnxt_hwrm_nvm_read(softc, rd->index,
4215 rd->offset + offset, csize, &dma_data);
4216 if (rc) {
4217 iod->hdr.rc = rc;
4218 rc = copyout(&iod->hdr.rc, &ioh->rc,
4219 sizeof(ioh->rc));
4220 break;
4221 } else {
4222 rc = copyout(dma_data.idi_vaddr,
4223 rd->data + offset, csize);
4224 iod->hdr.rc = rc;
4225 }
4226 remain -= csize;
4227 }
4228 if (rc == 0)
4229 rc = copyout(iod, ioh, iol);
4230
4231 iflib_dma_free(&dma_data);
4232 goto exit;
4233 }
4234 case BNXT_HWRM_FW_RESET:
4235 {
4236 struct bnxt_ioctl_hwrm_fw_reset *rst =
4237 &iod->reset;
4238
4239 rc = bnxt_hwrm_fw_reset(softc, rst->processor,
4240 &rst->selfreset);
4241 if (rc) {
4242 iod->hdr.rc = rc;
4243 rc = copyout(&iod->hdr.rc, &ioh->rc,
4244 sizeof(ioh->rc));
4245 } else {
4246 iod->hdr.rc = 0;
4247 rc = copyout(iod, ioh, iol);
4248 }
4249
4250 goto exit;
4251 }
4252 case BNXT_HWRM_FW_QSTATUS:
4253 {
4254 struct bnxt_ioctl_hwrm_fw_qstatus *qstat =
4255 &iod->status;
4256
4257 rc = bnxt_hwrm_fw_qstatus(softc, qstat->processor,
4258 &qstat->selfreset);
4259 if (rc) {
4260 iod->hdr.rc = rc;
4261 rc = copyout(&iod->hdr.rc, &ioh->rc,
4262 sizeof(ioh->rc));
4263 } else {
4264 iod->hdr.rc = 0;
4265 rc = copyout(iod, ioh, iol);
4266 }
4267
4268 goto exit;
4269 }
4270 case BNXT_HWRM_NVM_WRITE:
4271 {
4272 struct bnxt_ioctl_hwrm_nvm_write *wr =
4273 &iod->write;
4274
4275 rc = bnxt_hwrm_nvm_write(softc, wr->data, true,
4276 wr->type, wr->ordinal, wr->ext, wr->attr,
4277 wr->option, wr->data_length, wr->keep,
4278 &wr->item_length, &wr->index);
4279 if (rc) {
4280 iod->hdr.rc = rc;
4281 rc = copyout(&iod->hdr.rc, &ioh->rc,
4282 sizeof(ioh->rc));
4283 }
4284 else {
4285 iod->hdr.rc = 0;
4286 rc = copyout(iod, ioh, iol);
4287 }
4288
4289 goto exit;
4290 }
4291 case BNXT_HWRM_NVM_ERASE_DIR_ENTRY:
4292 {
4293 struct bnxt_ioctl_hwrm_nvm_erase_dir_entry *erase =
4294 &iod->erase;
4295
4296 rc = bnxt_hwrm_nvm_erase_dir_entry(softc, erase->index);
4297 if (rc) {
4298 iod->hdr.rc = rc;
4299 rc = copyout(&iod->hdr.rc, &ioh->rc,
4300 sizeof(ioh->rc));
4301 } else {
4302 iod->hdr.rc = 0;
4303 rc = copyout(iod, ioh, iol);
4304 }
4305
4306 goto exit;
4307 }
4308 case BNXT_HWRM_NVM_GET_DIR_INFO:
4309 {
4310 struct bnxt_ioctl_hwrm_nvm_get_dir_info *info =
4311 &iod->dir_info;
4312
4313 rc = bnxt_hwrm_nvm_get_dir_info(softc, &info->entries,
4314 &info->entry_length);
4315 if (rc) {
4316 iod->hdr.rc = rc;
4317 rc = copyout(&iod->hdr.rc, &ioh->rc,
4318 sizeof(ioh->rc));
4319 } else {
4320 iod->hdr.rc = 0;
4321 rc = copyout(iod, ioh, iol);
4322 }
4323
4324 goto exit;
4325 }
4326 case BNXT_HWRM_NVM_GET_DIR_ENTRIES:
4327 {
4328 struct bnxt_ioctl_hwrm_nvm_get_dir_entries *get =
4329 &iod->dir_entries;
4330 struct iflib_dma_info dma_data;
4331
4332 rc = iflib_dma_alloc(softc->ctx, get->max_size,
4333 &dma_data, BUS_DMA_NOWAIT);
4334 if (rc)
4335 break;
4336 rc = bnxt_hwrm_nvm_get_dir_entries(softc, &get->entries,
4337 &get->entry_length, &dma_data);
4338 if (rc) {
4339 iod->hdr.rc = rc;
4340 rc = copyout(&iod->hdr.rc, &ioh->rc,
4341 sizeof(ioh->rc));
4342 } else {
4343 rc = copyout(dma_data.idi_vaddr, get->data,
4344 get->entry_length * get->entries);
4345 iod->hdr.rc = rc;
4346 if (rc == 0)
4347 rc = copyout(iod, ioh, iol);
4348 }
4349 iflib_dma_free(&dma_data);
4350
4351 goto exit;
4352 }
4353 case BNXT_HWRM_NVM_VERIFY_UPDATE:
4354 {
4355 struct bnxt_ioctl_hwrm_nvm_verify_update *vrfy =
4356 &iod->verify;
4357
4358 rc = bnxt_hwrm_nvm_verify_update(softc, vrfy->type,
4359 vrfy->ordinal, vrfy->ext);
4360 if (rc) {
4361 iod->hdr.rc = rc;
4362 rc = copyout(&iod->hdr.rc, &ioh->rc,
4363 sizeof(ioh->rc));
4364 } else {
4365 iod->hdr.rc = 0;
4366 rc = copyout(iod, ioh, iol);
4367 }
4368
4369 goto exit;
4370 }
4371 case BNXT_HWRM_NVM_INSTALL_UPDATE:
4372 {
4373 struct bnxt_ioctl_hwrm_nvm_install_update *inst =
4374 &iod->install;
4375
4376 rc = bnxt_hwrm_nvm_install_update(softc,
4377 inst->install_type, &inst->installed_items,
4378 &inst->result, &inst->problem_item,
4379 &inst->reset_required);
4380 if (rc) {
4381 iod->hdr.rc = rc;
4382 rc = copyout(&iod->hdr.rc, &ioh->rc,
4383 sizeof(ioh->rc));
4384 } else {
4385 iod->hdr.rc = 0;
4386 rc = copyout(iod, ioh, iol);
4387 }
4388
4389 goto exit;
4390 }
4391 case BNXT_HWRM_NVM_MODIFY:
4392 {
4393 struct bnxt_ioctl_hwrm_nvm_modify *mod = &iod->modify;
4394
4395 rc = bnxt_hwrm_nvm_modify(softc, mod->index,
4396 mod->offset, mod->data, true, mod->length);
4397 if (rc) {
4398 iod->hdr.rc = rc;
4399 rc = copyout(&iod->hdr.rc, &ioh->rc,
4400 sizeof(ioh->rc));
4401 } else {
4402 iod->hdr.rc = 0;
4403 rc = copyout(iod, ioh, iol);
4404 }
4405
4406 goto exit;
4407 }
4408 case BNXT_HWRM_FW_GET_TIME:
4409 {
4410 struct bnxt_ioctl_hwrm_fw_get_time *gtm =
4411 &iod->get_time;
4412
4413 rc = bnxt_hwrm_fw_get_time(softc, >m->year,
4414 >m->month, >m->day, >m->hour, >m->minute,
4415 >m->second, >m->millisecond, >m->zone);
4416 if (rc) {
4417 iod->hdr.rc = rc;
4418 rc = copyout(&iod->hdr.rc, &ioh->rc,
4419 sizeof(ioh->rc));
4420 } else {
4421 iod->hdr.rc = 0;
4422 rc = copyout(iod, ioh, iol);
4423 }
4424
4425 goto exit;
4426 }
4427 case BNXT_HWRM_FW_SET_TIME:
4428 {
4429 struct bnxt_ioctl_hwrm_fw_set_time *stm =
4430 &iod->set_time;
4431
4432 rc = bnxt_hwrm_fw_set_time(softc, stm->year,
4433 stm->month, stm->day, stm->hour, stm->minute,
4434 stm->second, stm->millisecond, stm->zone);
4435 if (rc) {
4436 iod->hdr.rc = rc;
4437 rc = copyout(&iod->hdr.rc, &ioh->rc,
4438 sizeof(ioh->rc));
4439 } else {
4440 iod->hdr.rc = 0;
4441 rc = copyout(iod, ioh, iol);
4442 }
4443
4444 goto exit;
4445 }
4446 }
4447 break;
4448 }
4449
4450 exit:
4451 return rc;
4452 }
4453
4454 static int
bnxt_i2c_req(if_ctx_t ctx,struct ifi2creq * i2c)4455 bnxt_i2c_req(if_ctx_t ctx, struct ifi2creq *i2c)
4456 {
4457 struct bnxt_softc *softc = iflib_get_softc(ctx);
4458 uint8_t *data = i2c->data;
4459 int rc;
4460
4461 /* No point in going further if phy status indicates
4462 * module is not inserted or if it is powered down or
4463 * if it is of type 10GBase-T
4464 */
4465 if (softc->link_info.module_status >
4466 HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_WARNINGMSG)
4467 return -EOPNOTSUPP;
4468
4469 /* This feature is not supported in older firmware versions */
4470 if (!BNXT_CHIP_P5_PLUS(softc) ||
4471 (softc->hwrm_spec_code < 0x10202))
4472 return -EOPNOTSUPP;
4473
4474
4475 rc = bnxt_read_sfp_module_eeprom_info(softc, i2c->dev_addr, 0, 0, 0,
4476 i2c->offset, i2c->len, data);
4477
4478 return rc;
4479 }
4480
4481 /*
4482 * Support functions
4483 */
4484 static int
bnxt_probe_phy(struct bnxt_softc * softc)4485 bnxt_probe_phy(struct bnxt_softc *softc)
4486 {
4487 struct bnxt_link_info *link_info = &softc->link_info;
4488 int rc = 0;
4489
4490 softc->phy_flags = 0;
4491 rc = bnxt_hwrm_phy_qcaps(softc);
4492 if (rc) {
4493 device_printf(softc->dev,
4494 "Probe phy can't get phy capabilities (rc: %x)\n", rc);
4495 return rc;
4496 }
4497
4498 rc = bnxt_update_link(softc, false);
4499 if (rc) {
4500 device_printf(softc->dev,
4501 "Probe phy can't update link (rc: %x)\n", rc);
4502 return (rc);
4503 }
4504
4505 bnxt_get_port_module_status(softc);
4506
4507 /*initialize the ethool setting copy with NVM settings */
4508 if (link_info->auto_mode != HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE)
4509 link_info->autoneg |= BNXT_AUTONEG_SPEED;
4510
4511 link_info->req_duplex = link_info->duplex_setting;
4512
4513 /* NRZ link speed */
4514 if (link_info->autoneg & BNXT_AUTONEG_SPEED)
4515 link_info->req_link_speed = link_info->auto_link_speeds;
4516 else
4517 link_info->req_link_speed = link_info->force_link_speed;
4518
4519 /* PAM4 link speed */
4520 if (link_info->auto_pam4_link_speeds)
4521 link_info->req_link_speed = link_info->auto_pam4_link_speeds;
4522 if (link_info->force_pam4_link_speed)
4523 link_info->req_link_speed = link_info->force_pam4_link_speed;
4524
4525 return (rc);
4526 }
4527
4528 static void
add_media(struct bnxt_softc * softc,u8 media_type,u16 supported_NRZ_speeds,u16 supported_pam4_speeds,u16 supported_speeds2)4529 add_media(struct bnxt_softc *softc, u8 media_type, u16 supported_NRZ_speeds,
4530 u16 supported_pam4_speeds, u16 supported_speeds2)
4531 {
4532
4533 switch (media_type) {
4534 case BNXT_MEDIA_CR:
4535
4536 BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_50G, IFM_50G_CP);
4537 BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_100G, IFM_100G_CP2);
4538 BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_200G, IFM_200G_CR4_PAM4);
4539
4540 BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_100GB, IFM_100G_CR4);
4541 BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_50GB, IFM_50G_CR2);
4542 BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_40GB, IFM_40G_CR4);
4543 BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_25GB, IFM_25G_CR);
4544 BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_10GB, IFM_10G_CR1);
4545 BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_1GB, IFM_1000_CX);
4546 /* thor2 nrz*/
4547 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB, IFM_100G_CR4);
4548 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_50GB, IFM_50G_CR2);
4549 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_40GB, IFM_40G_CR4);
4550 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_25GB, IFM_25G_CR);
4551 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_10GB, IFM_10G_CR1);
4552 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_1GB, IFM_1000_CX);
4553 /* thor2 PAM56 */
4554 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_50GB_PAM4_56, IFM_50G_CP);
4555 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB_PAM4_56, IFM_100G_CP2);
4556 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_200GB_PAM4_56, IFM_200G_CR4_PAM4);
4557 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_400GB_PAM4_56, IFM_400G_AUI8);
4558 /* thor2 PAM112 */
4559 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB_PAM4_112, IFM_100G_CR_PAM4);
4560 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_200GB_PAM4_112, IFM_200G_AUI4);
4561 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_400GB_PAM4_112, IFM_400G_AUI8_AC);
4562
4563 break;
4564
4565 case BNXT_MEDIA_LR:
4566 BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_50G, IFM_50G_LR);
4567 BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_200G, IFM_200G_LR4);
4568 BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_100GB, IFM_100G_LR4);
4569 BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_50GB, IFM_50G_LR2);
4570 BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_40GB, IFM_40G_LR4);
4571 BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_25GB, IFM_25G_LR);
4572 BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_10GB, IFM_10G_LR);
4573 /* thor2 nrz*/
4574 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB, IFM_100G_LR4);
4575 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_50GB, IFM_50G_LR2);
4576 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_40GB, IFM_40G_LR4);
4577 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_25GB, IFM_25G_LR);
4578 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_10GB, IFM_10G_LR);
4579 /* thor2 PAM56 */
4580 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_50GB_PAM4_56, IFM_50G_LR);
4581 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB_PAM4_56, IFM_100G_AUI2);
4582 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_200GB_PAM4_56, IFM_200G_LR4);
4583 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_400GB_PAM4_56, IFM_400G_LR8);
4584 /* thor2 PAM112 */
4585 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB_PAM4_112, IFM_100G_AUI2_AC);
4586 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_200GB_PAM4_112, IFM_200G_AUI4);
4587 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_400GB_PAM4_112, IFM_400G_AUI8_AC);
4588
4589 break;
4590
4591 case BNXT_MEDIA_SR:
4592 BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_50G, IFM_50G_SR);
4593 BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_100G, IFM_100G_SR2);
4594 BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_200G, IFM_200G_SR4);
4595 BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_100GB, IFM_100G_SR4);
4596 BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_50GB, IFM_50G_SR2);
4597 BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_40GB, IFM_40G_SR4);
4598 BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_25GB, IFM_25G_SR);
4599 BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_10GB, IFM_10G_SR);
4600 BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_1GB, IFM_1000_SX);
4601 /* thor2 nrz*/
4602 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB, IFM_100G_SR4);
4603 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_50GB, IFM_50G_SR2);
4604 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_40GB, IFM_40G_SR4);
4605 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_25GB, IFM_25G_SR);
4606 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_10GB, IFM_10G_SR);
4607 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_1GB, IFM_1000_SX);
4608 /* thor2 PAM56 */
4609 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_50GB_PAM4_56, IFM_50G_SR);
4610 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB_PAM4_56, IFM_100G_SR2);
4611 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_200GB_PAM4_56, IFM_200G_SR4);
4612 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_400GB_PAM4_56, IFM_400G_AUI8);
4613 /* thor2 PAM112 */
4614 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB_PAM4_112, IFM_100G_AUI2_AC);
4615 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_200GB_PAM4_112, IFM_200G_AUI4);
4616 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_400GB_PAM4_112, IFM_400G_DR4);
4617 break;
4618
4619 case BNXT_MEDIA_ER:
4620 BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_40GB, IFM_40G_ER4);
4621 BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_100GB, IFM_100G_AUI4);
4622 /* thor2 PAM56 */
4623 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_50GB_PAM4_56, IFM_50G_LR);
4624 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB_PAM4_56, IFM_100G_AUI2);
4625 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_200GB_PAM4_56, IFM_200G_LR4);
4626 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_400GB_PAM4_56, IFM_400G_FR8);
4627 /* thor2 PAM112 */
4628 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB_PAM4_112, IFM_100G_AUI2_AC);
4629 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_200GB_PAM4_112, IFM_200G_AUI4_AC);
4630 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_400GB_PAM4_112, IFM_400G_AUI8_AC);
4631 break;
4632
4633 case BNXT_MEDIA_KR:
4634 BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_50G, IFM_50G_KR_PAM4);
4635 BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_100G, IFM_100G_KR2_PAM4);
4636 BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_200G, IFM_200G_KR4_PAM4);
4637 BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_100GB, IFM_100G_KR4);
4638 BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_50GB, IFM_50G_KR2);
4639 BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_50GB, IFM_50G_KR4);
4640 BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_40GB, IFM_40G_KR4);
4641 BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_25GB, IFM_25G_KR);
4642 BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_20GB, IFM_20G_KR2);
4643 BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_10GB, IFM_10G_KR);
4644 BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_1GB, IFM_1000_KX);
4645 break;
4646
4647 case BNXT_MEDIA_AC:
4648 BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_25GB, IFM_25G_ACC);
4649 BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_10GB, IFM_10G_AOC);
4650 BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_40GB, IFM_40G_XLAUI);
4651 BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_40GB, IFM_40G_XLAUI_AC);
4652 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_25GB, IFM_25G_ACC);
4653 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_10GB, IFM_10G_AOC);
4654 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_40GB, IFM_40G_XLAUI);
4655 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_40GB, IFM_40G_XLAUI_AC);
4656 break;
4657
4658 case BNXT_MEDIA_BASECX:
4659 BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_1GB, IFM_1000_CX);
4660 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_1GB, IFM_1000_CX);
4661 break;
4662
4663 case BNXT_MEDIA_BASET:
4664 BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_10GB, IFM_10G_T);
4665 BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_2_5GB, IFM_2500_T);
4666 BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_1GB, IFM_1000_T);
4667 BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_100MB, IFM_100_T);
4668 BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_10MB, IFM_10_T);
4669 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_10GB, IFM_10G_T);
4670 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_1GB, IFM_1000_T);
4671 break;
4672
4673 case BNXT_MEDIA_BASEKX:
4674 BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_10GB, IFM_10G_KR);
4675 BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_2_5GB, IFM_2500_KX);
4676 BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_1GB, IFM_1000_KX);
4677 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_10GB, IFM_10G_KR);
4678 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_1GB, IFM_1000_KX);
4679 break;
4680
4681 case BNXT_MEDIA_BASESGMII:
4682 BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_1GB, IFM_1000_SGMII);
4683 BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_1GB, IFM_1000_SGMII);
4684 break;
4685
4686 default:
4687 break;
4688
4689 }
4690 return;
4691
4692 }
4693
4694 static void
bnxt_add_media_types(struct bnxt_softc * softc)4695 bnxt_add_media_types(struct bnxt_softc *softc)
4696 {
4697 struct bnxt_link_info *link_info = &softc->link_info;
4698 uint16_t supported_NRZ_speeds = 0, supported_pam4_speeds = 0, supported_speeds2 = 0;
4699 uint8_t phy_type = get_phy_type(softc), media_type;
4700
4701 supported_NRZ_speeds = link_info->support_speeds;
4702 supported_speeds2 = link_info->support_speeds2;
4703 supported_pam4_speeds = link_info->support_pam4_speeds;
4704
4705 /* Auto is always supported */
4706 ifmedia_add(softc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
4707
4708 if (softc->flags & BNXT_FLAG_NPAR)
4709 return;
4710
4711 switch (phy_type) {
4712 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASECR4:
4713 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASECR4:
4714 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_L:
4715 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_S:
4716 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_N:
4717 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASECR:
4718
4719 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_50G_BASECR:
4720 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASECR2:
4721 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASECR4:
4722 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_400G_BASECR8:
4723
4724 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASECR:
4725 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASECR2:
4726 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_400G_BASECR4:
4727
4728 media_type = BNXT_MEDIA_CR;
4729 break;
4730
4731 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASELR4:
4732 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASELR4:
4733 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASELR:
4734
4735 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_50G_BASELR:
4736 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASELR2:
4737 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASELR4:
4738 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_400G_BASELR8:
4739
4740 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASELR:
4741 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASELR2:
4742 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_400G_BASELR4:
4743
4744 media_type = BNXT_MEDIA_LR;
4745 break;
4746
4747 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR10:
4748 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR4:
4749 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASESR4:
4750 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASESR:
4751 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASESR:
4752 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASESX:
4753
4754 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_50G_BASESR:
4755 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR2:
4756 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASESR4:
4757 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_400G_BASESR8:
4758
4759 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR:
4760 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASESR2:
4761 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_400G_BASESR4:
4762
4763 media_type = BNXT_MEDIA_SR;
4764 break;
4765
4766 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASEER4:
4767 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASEER4:
4768
4769 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_50G_BASEER:
4770 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASEER2:
4771 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASEER4:
4772 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_400G_BASEER8:
4773
4774 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASEER:
4775 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASEER2:
4776 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_400G_BASEER4:
4777
4778 media_type = BNXT_MEDIA_ER;
4779 break;
4780
4781 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR4:
4782 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR2:
4783 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR:
4784 media_type = BNXT_MEDIA_KR;
4785 break;
4786
4787 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_ACTIVE_CABLE:
4788 media_type = BNXT_MEDIA_AC;
4789 break;
4790
4791 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASECX:
4792 media_type = BNXT_MEDIA_BASECX;
4793 break;
4794
4795 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASET:
4796 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET:
4797 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE:
4798 media_type = BNXT_MEDIA_BASET;
4799 break;
4800
4801 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKX:
4802 media_type = BNXT_MEDIA_BASEKX;
4803 break;
4804
4805 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_SGMIIEXTPHY:
4806 media_type = BNXT_MEDIA_BASESGMII;
4807 break;
4808
4809 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_UNKNOWN:
4810 /* Only Autoneg is supported for TYPE_UNKNOWN */
4811 break;
4812
4813 default:
4814 /* Only Autoneg is supported for new phy type values */
4815 device_printf(softc->dev, "phy type %d not supported by driver\n", phy_type);
4816 break;
4817 }
4818
4819 switch (link_info->sig_mode) {
4820 case BNXT_SIG_MODE_NRZ:
4821 if (supported_NRZ_speeds != 0)
4822 add_media(softc, media_type, supported_NRZ_speeds, 0, 0);
4823 else
4824 add_media(softc, media_type, 0, 0, supported_speeds2);
4825 break;
4826 case BNXT_SIG_MODE_PAM4:
4827 if (supported_pam4_speeds != 0)
4828 add_media(softc, media_type, 0, supported_pam4_speeds, 0);
4829 else
4830 add_media(softc, media_type, 0, 0, supported_speeds2);
4831 break;
4832 case BNXT_SIG_MODE_PAM4_112:
4833 add_media(softc, media_type, 0, 0, supported_speeds2);
4834 break;
4835 }
4836
4837 return;
4838 }
4839
4840 static int
bnxt_map_bar(struct bnxt_softc * softc,struct bnxt_bar_info * bar,int bar_num,bool shareable)4841 bnxt_map_bar(struct bnxt_softc *softc, struct bnxt_bar_info *bar, int bar_num, bool shareable)
4842 {
4843 uint32_t flag;
4844
4845 if (bar->res != NULL) {
4846 device_printf(softc->dev, "Bar %d already mapped\n", bar_num);
4847 return EDOOFUS;
4848 }
4849
4850 bar->rid = PCIR_BAR(bar_num);
4851 flag = RF_ACTIVE;
4852 if (shareable)
4853 flag |= RF_SHAREABLE;
4854
4855 if ((bar->res =
4856 bus_alloc_resource_any(softc->dev,
4857 SYS_RES_MEMORY,
4858 &bar->rid,
4859 flag)) == NULL) {
4860 device_printf(softc->dev,
4861 "PCI BAR%d mapping failure\n", bar_num);
4862 return (ENXIO);
4863 }
4864 bar->tag = rman_get_bustag(bar->res);
4865 bar->handle = rman_get_bushandle(bar->res);
4866 bar->size = rman_get_size(bar->res);
4867
4868 return 0;
4869 }
4870
4871 static int
bnxt_pci_mapping(struct bnxt_softc * softc)4872 bnxt_pci_mapping(struct bnxt_softc *softc)
4873 {
4874 int rc;
4875
4876 rc = bnxt_map_bar(softc, &softc->hwrm_bar, 0, true);
4877 if (rc)
4878 return rc;
4879
4880 rc = bnxt_map_bar(softc, &softc->doorbell_bar, 2, false);
4881
4882 return rc;
4883 }
4884
4885 static void
bnxt_pci_mapping_free(struct bnxt_softc * softc)4886 bnxt_pci_mapping_free(struct bnxt_softc *softc)
4887 {
4888 if (softc->hwrm_bar.res != NULL)
4889 bus_release_resource(softc->dev, SYS_RES_MEMORY,
4890 softc->hwrm_bar.rid, softc->hwrm_bar.res);
4891 softc->hwrm_bar.res = NULL;
4892
4893 if (softc->doorbell_bar.res != NULL)
4894 bus_release_resource(softc->dev, SYS_RES_MEMORY,
4895 softc->doorbell_bar.rid, softc->doorbell_bar.res);
4896 softc->doorbell_bar.res = NULL;
4897 }
4898
4899 static int
bnxt_update_link(struct bnxt_softc * softc,bool chng_link_state)4900 bnxt_update_link(struct bnxt_softc *softc, bool chng_link_state)
4901 {
4902 struct bnxt_link_info *link_info = &softc->link_info;
4903 uint8_t link_up = link_info->link_up;
4904 int rc = 0;
4905
4906 rc = bnxt_hwrm_port_phy_qcfg(softc);
4907 if (rc)
4908 goto exit;
4909
4910 /* TODO: need to add more logic to report VF link */
4911 if (chng_link_state) {
4912 if (link_info->phy_link_status ==
4913 HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK)
4914 link_info->link_up = 1;
4915 else
4916 link_info->link_up = 0;
4917 if (link_up != link_info->link_up)
4918 bnxt_report_link(softc);
4919 } else {
4920 /* always link down if not require to update link state */
4921 link_info->link_up = 0;
4922 }
4923
4924 exit:
4925 return rc;
4926 }
4927
4928 #define ETHTOOL_SPEED_1000 1000
4929 #define ETHTOOL_SPEED_10000 10000
4930 #define ETHTOOL_SPEED_20000 20000
4931 #define ETHTOOL_SPEED_25000 25000
4932 #define ETHTOOL_SPEED_40000 40000
4933 #define ETHTOOL_SPEED_50000 50000
4934 #define ETHTOOL_SPEED_100000 100000
4935 #define ETHTOOL_SPEED_200000 200000
4936 #define ETHTOOL_SPEED_UNKNOWN -1
4937
4938 static u32
bnxt_fw_to_ethtool_speed(u16 fw_link_speed)4939 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
4940 {
4941 switch (fw_link_speed) {
4942 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
4943 return ETHTOOL_SPEED_1000;
4944 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
4945 return ETHTOOL_SPEED_10000;
4946 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
4947 return ETHTOOL_SPEED_20000;
4948 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
4949 return ETHTOOL_SPEED_25000;
4950 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
4951 return ETHTOOL_SPEED_40000;
4952 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
4953 return ETHTOOL_SPEED_50000;
4954 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
4955 return ETHTOOL_SPEED_100000;
4956 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_200GB:
4957 return ETHTOOL_SPEED_200000;
4958 default:
4959 return ETHTOOL_SPEED_UNKNOWN;
4960 }
4961 }
4962
4963 void
bnxt_report_link(struct bnxt_softc * softc)4964 bnxt_report_link(struct bnxt_softc *softc)
4965 {
4966 struct bnxt_link_info *link_info = &softc->link_info;
4967 const char *duplex = NULL, *flow_ctrl = NULL;
4968 const char *signal_mode = "";
4969
4970 if(softc->edev) {
4971 softc->edev->espeed =
4972 bnxt_fw_to_ethtool_speed(link_info->link_speed);
4973 softc->edev->lanes = link_info->active_lanes;
4974 }
4975
4976 if (link_info->link_up == link_info->last_link_up) {
4977 if (!link_info->link_up)
4978 return;
4979 if ((link_info->duplex == link_info->last_duplex) &&
4980 (link_info->phy_type == link_info->last_phy_type) &&
4981 (!(BNXT_IS_FLOW_CTRL_CHANGED(link_info))))
4982 return;
4983 }
4984
4985 if (link_info->link_up) {
4986 if (link_info->duplex ==
4987 HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_FULL)
4988 duplex = "full duplex";
4989 else
4990 duplex = "half duplex";
4991 if (link_info->flow_ctrl.tx & link_info->flow_ctrl.rx)
4992 flow_ctrl = "FC - receive & transmit";
4993 else if (link_info->flow_ctrl.tx)
4994 flow_ctrl = "FC - transmit";
4995 else if (link_info->flow_ctrl.rx)
4996 flow_ctrl = "FC - receive";
4997 else
4998 flow_ctrl = "FC - none";
4999
5000 if (softc->link_info.phy_qcfg_resp.option_flags &
5001 HWRM_PORT_PHY_QCFG_OUTPUT_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
5002 uint8_t sig_mode = softc->link_info.active_fec_sig_mode &
5003 HWRM_PORT_PHY_QCFG_OUTPUT_SIGNAL_MODE_MASK;
5004 switch (sig_mode) {
5005 case BNXT_SIG_MODE_NRZ:
5006 signal_mode = "(NRZ) ";
5007 break;
5008 case BNXT_SIG_MODE_PAM4:
5009 signal_mode = "(PAM4 56Gbps) ";
5010 break;
5011 case BNXT_SIG_MODE_PAM4_112:
5012 signal_mode = "(PAM4 112Gbps) ";
5013 break;
5014 default:
5015 break;
5016 }
5017 link_info->sig_mode = sig_mode;
5018 }
5019
5020 iflib_link_state_change(softc->ctx, LINK_STATE_UP,
5021 IF_Gbps(100));
5022 device_printf(softc->dev, "Link is UP %s %s, %s - %d Mbps \n", duplex, signal_mode,
5023 flow_ctrl, (link_info->link_speed * 100));
5024 } else {
5025 iflib_link_state_change(softc->ctx, LINK_STATE_DOWN,
5026 bnxt_get_baudrate(&softc->link_info));
5027 device_printf(softc->dev, "Link is Down\n");
5028 }
5029
5030 link_info->last_link_up = link_info->link_up;
5031 link_info->last_duplex = link_info->duplex;
5032 link_info->last_phy_type = link_info->phy_type;
5033 link_info->last_flow_ctrl.tx = link_info->flow_ctrl.tx;
5034 link_info->last_flow_ctrl.rx = link_info->flow_ctrl.rx;
5035 link_info->last_flow_ctrl.autoneg = link_info->flow_ctrl.autoneg;
5036 /* update media types */
5037 ifmedia_removeall(softc->media);
5038 bnxt_add_media_types(softc);
5039 ifmedia_set(softc->media, IFM_ETHER | IFM_AUTO);
5040 }
5041
5042 static int
bnxt_handle_isr(void * arg)5043 bnxt_handle_isr(void *arg)
5044 {
5045 struct bnxt_cp_ring *cpr = arg;
5046 struct bnxt_softc *softc = cpr->ring.softc;
5047
5048 cpr->int_count++;
5049 /* Disable further interrupts for this queue */
5050 if (!BNXT_CHIP_P5_PLUS(softc))
5051 softc->db_ops.bnxt_db_rx_cq(cpr, 0);
5052
5053 return FILTER_SCHEDULE_THREAD;
5054 }
5055
5056 static int
bnxt_handle_def_cp(void * arg)5057 bnxt_handle_def_cp(void *arg)
5058 {
5059 struct bnxt_softc *softc = arg;
5060
5061 softc->db_ops.bnxt_db_rx_cq(&softc->def_cp_ring, 0);
5062 iflib_config_task_enqueue(softc->ctx, &softc->def_cp_task);
5063 return FILTER_HANDLED;
5064 }
5065
5066 static void
bnxt_clear_ids(struct bnxt_softc * softc)5067 bnxt_clear_ids(struct bnxt_softc *softc)
5068 {
5069 int i;
5070
5071 softc->def_cp_ring.stats_ctx_id = HWRM_NA_SIGNATURE;
5072 softc->def_cp_ring.ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
5073 softc->def_nq_ring.stats_ctx_id = HWRM_NA_SIGNATURE;
5074 softc->def_nq_ring.ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
5075 for (i = 0; i < softc->ntxqsets; i++) {
5076 softc->tx_cp_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
5077 softc->tx_cp_rings[i].ring.phys_id =
5078 (uint16_t)HWRM_NA_SIGNATURE;
5079 softc->tx_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
5080
5081 if (!softc->nq_rings)
5082 continue;
5083 softc->nq_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
5084 softc->nq_rings[i].ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
5085 }
5086 for (i = 0; i < softc->nrxqsets; i++) {
5087 softc->rx_cp_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
5088 softc->rx_cp_rings[i].ring.phys_id =
5089 (uint16_t)HWRM_NA_SIGNATURE;
5090 softc->rx_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
5091 softc->ag_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
5092 softc->grp_info[i].grp_id = (uint16_t)HWRM_NA_SIGNATURE;
5093 }
5094 softc->vnic_info.filter_id = -1;
5095 softc->vnic_info.id = (uint16_t)HWRM_NA_SIGNATURE;
5096 softc->vnic_info.rss_id = (uint16_t)HWRM_NA_SIGNATURE;
5097 memset(softc->vnic_info.rss_grp_tbl.idi_vaddr, 0xff,
5098 softc->vnic_info.rss_grp_tbl.idi_size);
5099 }
5100
5101 static void
bnxt_mark_cpr_invalid(struct bnxt_cp_ring * cpr)5102 bnxt_mark_cpr_invalid(struct bnxt_cp_ring *cpr)
5103 {
5104 struct cmpl_base *cmp = (void *)cpr->ring.vaddr;
5105 int i;
5106
5107 for (i = 0; i < cpr->ring.ring_size; i++)
5108 cmp[i].info3_v = !cpr->v_bit;
5109 }
5110
bnxt_event_error_report(struct bnxt_softc * softc,u32 data1,u32 data2)5111 static void bnxt_event_error_report(struct bnxt_softc *softc, u32 data1, u32 data2)
5112 {
5113 u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1);
5114
5115 switch (err_type) {
5116 case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL:
5117 device_printf(softc->dev,
5118 "1PPS: Received invalid signal on pin%u from the external source. Please fix the signal and reconfigure the pin\n",
5119 BNXT_EVENT_INVALID_SIGNAL_DATA(data2));
5120 break;
5121 case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM:
5122 device_printf(softc->dev,
5123 "Pause Storm detected!\n");
5124 break;
5125 case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD:
5126 device_printf(softc->dev,
5127 "One or more MMIO doorbells dropped by the device! epoch: 0x%x\n",
5128 BNXT_EVENT_DBR_EPOCH(data1));
5129 break;
5130 case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM: {
5131 const char *nvm_err_str;
5132
5133 if (EVENT_DATA1_NVM_ERR_TYPE_WRITE(data1))
5134 nvm_err_str = "nvm write error";
5135 else if (EVENT_DATA1_NVM_ERR_TYPE_ERASE(data1))
5136 nvm_err_str = "nvm erase error";
5137 else
5138 nvm_err_str = "unrecognized nvm error";
5139
5140 device_printf(softc->dev,
5141 "%s reported at address 0x%x\n", nvm_err_str,
5142 (u32)EVENT_DATA2_NVM_ERR_ADDR(data2));
5143 break;
5144 }
5145 case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD: {
5146 char *threshold_type;
5147 char *dir_str;
5148
5149 switch (EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1)) {
5150 case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN:
5151 threshold_type = "warning";
5152 break;
5153 case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL:
5154 threshold_type = "critical";
5155 break;
5156 case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL:
5157 threshold_type = "fatal";
5158 break;
5159 case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN:
5160 threshold_type = "shutdown";
5161 break;
5162 default:
5163 device_printf(softc->dev,
5164 "Unknown Thermal threshold type event\n");
5165 return;
5166 }
5167 if (EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1))
5168 dir_str = "above";
5169 else
5170 dir_str = "below";
5171 device_printf(softc->dev,
5172 "Chip temperature has gone %s the %s thermal threshold!\n",
5173 dir_str, threshold_type);
5174 device_printf(softc->dev,
5175 "Temperature (In Celsius), Current: %u, threshold: %u\n",
5176 BNXT_EVENT_THERMAL_CURRENT_TEMP(data2),
5177 BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2));
5178 break;
5179 }
5180 case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED:
5181 device_printf(softc->dev,
5182 "Speed change is not supported with dual rate transceivers on this board\n");
5183 break;
5184
5185 default:
5186 device_printf(softc->dev,
5187 "FW reported unknown error type: %u, data1: 0x%x data2: 0x%x\n",
5188 err_type, data1, data2);
5189 break;
5190 }
5191 }
5192
5193 static void
bnxt_handle_async_event(struct bnxt_softc * softc,struct cmpl_base * cmpl)5194 bnxt_handle_async_event(struct bnxt_softc *softc, struct cmpl_base *cmpl)
5195 {
5196 struct hwrm_async_event_cmpl *ae = (void *)cmpl;
5197 uint16_t async_id = le16toh(ae->event_id);
5198 struct ifmediareq ifmr;
5199 char *type_str;
5200 char *status_desc;
5201 struct bnxt_fw_health *fw_health;
5202 u32 data1 = le32toh(ae->event_data1);
5203 u32 data2 = le32toh(ae->event_data2);
5204
5205 switch (async_id) {
5206 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
5207 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
5208 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE:
5209 if (BNXT_CHIP_P5_PLUS(softc))
5210 bit_set(softc->state_bv, BNXT_STATE_LINK_CHANGE);
5211 else
5212 bnxt_media_status(softc->ctx, &ifmr);
5213 break;
5214 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: {
5215 bnxt_event_error_report(softc, data1, data2);
5216 goto async_event_process_exit;
5217 }
5218 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_THRESHOLD:
5219 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_NQ_UPDATE:
5220 break;
5221 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
5222 type_str = "Solicited";
5223
5224 if (!softc->fw_health)
5225 goto async_event_process_exit;
5226
5227 softc->fw_reset_timestamp = jiffies;
5228 softc->fw_reset_min_dsecs = ae->timestamp_lo;
5229 if (!softc->fw_reset_min_dsecs)
5230 softc->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
5231 softc->fw_reset_max_dsecs = le16toh(ae->timestamp_hi);
5232 if (!softc->fw_reset_max_dsecs)
5233 softc->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
5234 if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) {
5235 set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &softc->state);
5236 } else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
5237 type_str = "Fatal";
5238 softc->fw_health->fatalities++;
5239 set_bit(BNXT_STATE_FW_FATAL_COND, &softc->state);
5240 } else if (data2 && BNXT_FW_STATUS_HEALTHY !=
5241 EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) {
5242 type_str = "Non-fatal";
5243 softc->fw_health->survivals++;
5244 set_bit(BNXT_STATE_FW_NON_FATAL_COND, &softc->state);
5245 }
5246 device_printf(softc->dev,
5247 "%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
5248 type_str, data1, data2,
5249 softc->fw_reset_min_dsecs * 100,
5250 softc->fw_reset_max_dsecs * 100);
5251 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &softc->sp_event);
5252 break;
5253 }
5254 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
5255 fw_health = softc->fw_health;
5256 status_desc = "healthy";
5257 u32 status;
5258
5259 if (!fw_health)
5260 goto async_event_process_exit;
5261
5262 if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
5263 fw_health->enabled = false;
5264 device_printf(softc->dev, "Driver recovery watchdog is disabled\n");
5265 break;
5266 }
5267 fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
5268 fw_health->tmr_multiplier =
5269 DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
5270 HZ * 10);
5271 fw_health->tmr_counter = fw_health->tmr_multiplier;
5272 if (!fw_health->enabled)
5273 fw_health->last_fw_heartbeat =
5274 bnxt_fw_health_readl(softc, BNXT_FW_HEARTBEAT_REG);
5275 fw_health->last_fw_reset_cnt =
5276 bnxt_fw_health_readl(softc, BNXT_FW_RESET_CNT_REG);
5277 status = bnxt_fw_health_readl(softc, BNXT_FW_HEALTH_REG);
5278 if (status != BNXT_FW_STATUS_HEALTHY)
5279 status_desc = "unhealthy";
5280 device_printf(softc->dev,
5281 "Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n",
5282 fw_health->primary ? "primary" : "backup", status,
5283 status_desc, fw_health->last_fw_reset_cnt);
5284 if (!fw_health->enabled) {
5285 /* Make sure tmr_counter is set and seen by
5286 * bnxt_health_check() before setting enabled
5287 */
5288 smp_mb();
5289 fw_health->enabled = true;
5290 }
5291 goto async_event_process_exit;
5292 }
5293
5294 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE:
5295 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE:
5296 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED:
5297 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED:
5298 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD:
5299 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD:
5300 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
5301 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD:
5302 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR:
5303 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE:
5304 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE:
5305 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
5306 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR:
5307 device_printf(softc->dev,
5308 "Unhandled async completion type %u\n", async_id);
5309 break;
5310 default:
5311 dev_dbg(softc->dev, "Unknown Async event completion type %u\n",
5312 async_id);
5313 break;
5314 }
5315 bnxt_queue_sp_work(softc);
5316
5317 async_event_process_exit:
5318 bnxt_ulp_async_events(softc, ae);
5319 }
5320
5321 static void
bnxt_def_cp_task(void * context,int pending)5322 bnxt_def_cp_task(void *context, int pending)
5323 {
5324 if_ctx_t ctx = context;
5325 struct bnxt_softc *softc = iflib_get_softc(ctx);
5326 struct bnxt_cp_ring *cpr = &softc->def_cp_ring;
5327
5328 /* Handle completions on the default completion ring */
5329 struct cmpl_base *cmpl;
5330 uint32_t cons = cpr->cons;
5331 bool v_bit = cpr->v_bit;
5332 bool last_v_bit;
5333 uint32_t last_cons;
5334 uint16_t type;
5335
5336 for (;;) {
5337 last_cons = cons;
5338 last_v_bit = v_bit;
5339 NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
5340 cmpl = &((struct cmpl_base *)cpr->ring.vaddr)[cons];
5341
5342 if (!CMP_VALID(cmpl, v_bit))
5343 break;
5344
5345 type = le16toh(cmpl->type) & CMPL_BASE_TYPE_MASK;
5346 switch (type) {
5347 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
5348 bnxt_handle_async_event(softc, cmpl);
5349 break;
5350 case CMPL_BASE_TYPE_TX_L2:
5351 case CMPL_BASE_TYPE_RX_L2:
5352 case CMPL_BASE_TYPE_RX_L2_V3:
5353 case CMPL_BASE_TYPE_RX_AGG:
5354 case CMPL_BASE_TYPE_RX_TPA_START:
5355 case CMPL_BASE_TYPE_RX_TPA_START_V3:
5356 case CMPL_BASE_TYPE_RX_TPA_END:
5357 case CMPL_BASE_TYPE_STAT_EJECT:
5358 case CMPL_BASE_TYPE_HWRM_DONE:
5359 case CMPL_BASE_TYPE_HWRM_FWD_REQ:
5360 case CMPL_BASE_TYPE_HWRM_FWD_RESP:
5361 case CMPL_BASE_TYPE_CQ_NOTIFICATION:
5362 case CMPL_BASE_TYPE_SRQ_EVENT:
5363 case CMPL_BASE_TYPE_DBQ_EVENT:
5364 case CMPL_BASE_TYPE_QP_EVENT:
5365 case CMPL_BASE_TYPE_FUNC_EVENT:
5366 dev_dbg(softc->dev, "Unhandled Async event completion type %u\n",
5367 type);
5368 break;
5369 default:
5370 dev_dbg(softc->dev, "Unknown Async event completion type %u\n",
5371 type);
5372 break;
5373 }
5374 }
5375
5376 cpr->cons = last_cons;
5377 cpr->v_bit = last_v_bit;
5378 softc->db_ops.bnxt_db_rx_cq(cpr, 1);
5379 }
5380
5381 uint8_t
get_phy_type(struct bnxt_softc * softc)5382 get_phy_type(struct bnxt_softc *softc)
5383 {
5384 struct bnxt_link_info *link_info = &softc->link_info;
5385 uint8_t phy_type = link_info->phy_type;
5386 uint16_t supported;
5387
5388 if (phy_type != HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_UNKNOWN)
5389 return phy_type;
5390
5391 /* Deduce the phy type from the media type and supported speeds */
5392 supported = link_info->support_speeds;
5393
5394 if (link_info->media_type ==
5395 HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP)
5396 return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET;
5397 if (link_info->media_type ==
5398 HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_DAC) {
5399 if (supported & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB)
5400 return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKX;
5401 if (supported & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB)
5402 return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR;
5403 return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASECR;
5404 }
5405 if (link_info->media_type ==
5406 HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_FIBRE)
5407 return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASESR;
5408
5409 return phy_type;
5410 }
5411
5412 bool
bnxt_check_hwrm_version(struct bnxt_softc * softc)5413 bnxt_check_hwrm_version(struct bnxt_softc *softc)
5414 {
5415 char buf[16];
5416
5417 sprintf(buf, "%hhu.%hhu.%hhu", softc->ver_info->hwrm_min_major,
5418 softc->ver_info->hwrm_min_minor, softc->ver_info->hwrm_min_update);
5419 if (softc->ver_info->hwrm_min_major > softc->ver_info->hwrm_if_major) {
5420 device_printf(softc->dev,
5421 "WARNING: HWRM version %s is too old (older than %s)\n",
5422 softc->ver_info->hwrm_if_ver, buf);
5423 return false;
5424 }
5425 else if(softc->ver_info->hwrm_min_major ==
5426 softc->ver_info->hwrm_if_major) {
5427 if (softc->ver_info->hwrm_min_minor >
5428 softc->ver_info->hwrm_if_minor) {
5429 device_printf(softc->dev,
5430 "WARNING: HWRM version %s is too old (older than %s)\n",
5431 softc->ver_info->hwrm_if_ver, buf);
5432 return false;
5433 }
5434 else if (softc->ver_info->hwrm_min_minor ==
5435 softc->ver_info->hwrm_if_minor) {
5436 if (softc->ver_info->hwrm_min_update >
5437 softc->ver_info->hwrm_if_update) {
5438 device_printf(softc->dev,
5439 "WARNING: HWRM version %s is too old (older than %s)\n",
5440 softc->ver_info->hwrm_if_ver, buf);
5441 return false;
5442 }
5443 }
5444 }
5445 return true;
5446 }
5447
5448 static uint64_t
bnxt_get_baudrate(struct bnxt_link_info * link)5449 bnxt_get_baudrate(struct bnxt_link_info *link)
5450 {
5451 switch (link->link_speed) {
5452 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
5453 return IF_Mbps(100);
5454 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
5455 return IF_Gbps(1);
5456 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
5457 return IF_Gbps(2);
5458 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
5459 return IF_Mbps(2500);
5460 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
5461 return IF_Gbps(10);
5462 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
5463 return IF_Gbps(20);
5464 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
5465 return IF_Gbps(25);
5466 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
5467 return IF_Gbps(40);
5468 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
5469 return IF_Gbps(50);
5470 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
5471 return IF_Gbps(100);
5472 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10MB:
5473 return IF_Mbps(10);
5474 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_200GB:
5475 return IF_Gbps(200);
5476 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_400GB:
5477 return IF_Gbps(400);
5478 }
5479 return IF_Gbps(100);
5480 }
5481
5482 static void
bnxt_get_wol_settings(struct bnxt_softc * softc)5483 bnxt_get_wol_settings(struct bnxt_softc *softc)
5484 {
5485 uint16_t wol_handle = 0;
5486
5487 if (!bnxt_wol_supported(softc))
5488 return;
5489
5490 do {
5491 wol_handle = bnxt_hwrm_get_wol_fltrs(softc, wol_handle);
5492 } while (wol_handle && wol_handle != BNXT_NO_MORE_WOL_FILTERS);
5493 }
5494