xref: /freebsd/sys/dev/bxe/bxe.c (revision 1323ec571215a77ddd21294f0871979d5ad6b992)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  *
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26  * THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #define BXE_DRIVER_VERSION "1.78.91"
33 
34 #include "bxe.h"
35 #include "ecore_sp.h"
36 #include "ecore_init.h"
37 #include "ecore_init_ops.h"
38 
39 #include "57710_int_offsets.h"
40 #include "57711_int_offsets.h"
41 #include "57712_int_offsets.h"
42 
43 /*
44  * CTLTYPE_U64 and sysctl_handle_64 were added in r217616. Define these
45  * explicitly here for older kernels that don't include this changeset.
46  */
47 #ifndef CTLTYPE_U64
48 #define CTLTYPE_U64      CTLTYPE_QUAD
49 #define sysctl_handle_64 sysctl_handle_quad
50 #endif
51 
52 /*
53  * CSUM_TCP_IPV6 and CSUM_UDP_IPV6 were added in r236170. Define these
54  * here as zero(0) for older kernels that don't include this changeset
55  * thereby masking the functionality.
56  */
57 #ifndef CSUM_TCP_IPV6
58 #define CSUM_TCP_IPV6 0
59 #define CSUM_UDP_IPV6 0
60 #endif
61 
62 #define BXE_DEF_SB_ATT_IDX 0x0001
63 #define BXE_DEF_SB_IDX     0x0002
64 
65 /*
66  * FLR Support - bxe_pf_flr_clnup() is called during nic_load in the per
67  * function HW initialization.
68  */
69 #define FLR_WAIT_USEC     10000 /* 10 msecs */
70 #define FLR_WAIT_INTERVAL 50    /* usecs */
71 #define FLR_POLL_CNT      (FLR_WAIT_USEC / FLR_WAIT_INTERVAL) /* 200 */
72 
73 struct pbf_pN_buf_regs {
74     int pN;
75     uint32_t init_crd;
76     uint32_t crd;
77     uint32_t crd_freed;
78 };
79 
80 struct pbf_pN_cmd_regs {
81     int pN;
82     uint32_t lines_occup;
83     uint32_t lines_freed;
84 };
85 
86 /*
87  * PCI Device ID Table used by bxe_probe().
88  */
89 #define BXE_DEVDESC_MAX 64
90 static struct bxe_device_type bxe_devs[] = {
91     {
92         BRCM_VENDORID,
93         CHIP_NUM_57710,
94         PCI_ANY_ID, PCI_ANY_ID,
95         "QLogic NetXtreme II BCM57710 10GbE"
96     },
97     {
98         BRCM_VENDORID,
99         CHIP_NUM_57711,
100         PCI_ANY_ID, PCI_ANY_ID,
101         "QLogic NetXtreme II BCM57711 10GbE"
102     },
103     {
104         BRCM_VENDORID,
105         CHIP_NUM_57711E,
106         PCI_ANY_ID, PCI_ANY_ID,
107         "QLogic NetXtreme II BCM57711E 10GbE"
108     },
109     {
110         BRCM_VENDORID,
111         CHIP_NUM_57712,
112         PCI_ANY_ID, PCI_ANY_ID,
113         "QLogic NetXtreme II BCM57712 10GbE"
114     },
115     {
116         BRCM_VENDORID,
117         CHIP_NUM_57712_MF,
118         PCI_ANY_ID, PCI_ANY_ID,
119         "QLogic NetXtreme II BCM57712 MF 10GbE"
120     },
121     {
122         BRCM_VENDORID,
123         CHIP_NUM_57800,
124         PCI_ANY_ID, PCI_ANY_ID,
125         "QLogic NetXtreme II BCM57800 10GbE"
126     },
127     {
128         BRCM_VENDORID,
129         CHIP_NUM_57800_MF,
130         PCI_ANY_ID, PCI_ANY_ID,
131         "QLogic NetXtreme II BCM57800 MF 10GbE"
132     },
133     {
134         BRCM_VENDORID,
135         CHIP_NUM_57810,
136         PCI_ANY_ID, PCI_ANY_ID,
137         "QLogic NetXtreme II BCM57810 10GbE"
138     },
139     {
140         BRCM_VENDORID,
141         CHIP_NUM_57810_MF,
142         PCI_ANY_ID, PCI_ANY_ID,
143         "QLogic NetXtreme II BCM57810 MF 10GbE"
144     },
145     {
146         BRCM_VENDORID,
147         CHIP_NUM_57811,
148         PCI_ANY_ID, PCI_ANY_ID,
149         "QLogic NetXtreme II BCM57811 10GbE"
150     },
151     {
152         BRCM_VENDORID,
153         CHIP_NUM_57811_MF,
154         PCI_ANY_ID, PCI_ANY_ID,
155         "QLogic NetXtreme II BCM57811 MF 10GbE"
156     },
157     {
158         BRCM_VENDORID,
159         CHIP_NUM_57840_4_10,
160         PCI_ANY_ID, PCI_ANY_ID,
161         "QLogic NetXtreme II BCM57840 4x10GbE"
162     },
163     {
164         QLOGIC_VENDORID,
165         CHIP_NUM_57840_4_10,
166         PCI_ANY_ID, PCI_ANY_ID,
167         "QLogic NetXtreme II BCM57840 4x10GbE"
168     },
169     {
170         BRCM_VENDORID,
171         CHIP_NUM_57840_2_20,
172         PCI_ANY_ID, PCI_ANY_ID,
173         "QLogic NetXtreme II BCM57840 2x20GbE"
174     },
175     {
176         BRCM_VENDORID,
177         CHIP_NUM_57840_MF,
178         PCI_ANY_ID, PCI_ANY_ID,
179         "QLogic NetXtreme II BCM57840 MF 10GbE"
180     },
181     {
182         0, 0, 0, 0, NULL
183     }
184 };
185 
186 MALLOC_DECLARE(M_BXE_ILT);
187 MALLOC_DEFINE(M_BXE_ILT, "bxe_ilt", "bxe ILT pointer");
188 
189 /*
190  * FreeBSD device entry points.
191  */
192 static int bxe_probe(device_t);
193 static int bxe_attach(device_t);
194 static int bxe_detach(device_t);
195 static int bxe_shutdown(device_t);
196 
197 
198 /*
199  * FreeBSD KLD module/device interface event handler method.
200  */
201 static device_method_t bxe_methods[] = {
202     /* Device interface (device_if.h) */
203     DEVMETHOD(device_probe,     bxe_probe),
204     DEVMETHOD(device_attach,    bxe_attach),
205     DEVMETHOD(device_detach,    bxe_detach),
206     DEVMETHOD(device_shutdown,  bxe_shutdown),
207     /* Bus interface (bus_if.h) */
208     DEVMETHOD(bus_print_child,  bus_generic_print_child),
209     DEVMETHOD(bus_driver_added, bus_generic_driver_added),
210     KOBJMETHOD_END
211 };
212 
213 /*
214  * FreeBSD KLD Module data declaration
215  */
216 static driver_t bxe_driver = {
217     "bxe",                   /* module name */
218     bxe_methods,             /* event handler */
219     sizeof(struct bxe_softc) /* extra data */
220 };
221 
222 /*
223  * FreeBSD dev class is needed to manage dev instances and
224  * to associate with a bus type
225  */
226 static devclass_t bxe_devclass;
227 
228 MODULE_DEPEND(bxe, pci, 1, 1, 1);
229 MODULE_DEPEND(bxe, ether, 1, 1, 1);
230 DRIVER_MODULE(bxe, pci, bxe_driver, bxe_devclass, 0, 0);
231 
232 DEBUGNET_DEFINE(bxe);
233 
234 /* resources needed for unloading a previously loaded device */
235 
236 #define BXE_PREV_WAIT_NEEDED 1
237 struct mtx bxe_prev_mtx;
238 MTX_SYSINIT(bxe_prev_mtx, &bxe_prev_mtx, "bxe_prev_lock", MTX_DEF);
239 struct bxe_prev_list_node {
240     LIST_ENTRY(bxe_prev_list_node) node;
241     uint8_t bus;
242     uint8_t slot;
243     uint8_t path;
244     uint8_t aer; /* XXX automatic error recovery */
245     uint8_t undi;
246 };
247 static LIST_HEAD(, bxe_prev_list_node) bxe_prev_list = LIST_HEAD_INITIALIZER(bxe_prev_list);
248 
249 static int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
250 
251 /* Tunable device values... */
252 
253 SYSCTL_NODE(_hw, OID_AUTO, bxe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
254     "bxe driver parameters");
255 
256 /* Debug */
257 unsigned long bxe_debug = 0;
258 SYSCTL_ULONG(_hw_bxe, OID_AUTO, debug, CTLFLAG_RDTUN,
259              &bxe_debug, 0, "Debug logging mode");
260 
261 /* Interrupt Mode: 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */
262 static int bxe_interrupt_mode = INTR_MODE_MSIX;
263 SYSCTL_INT(_hw_bxe, OID_AUTO, interrupt_mode, CTLFLAG_RDTUN,
264            &bxe_interrupt_mode, 0, "Interrupt (MSI-X/MSI/INTx) mode");
265 
266 /* Number of Queues: 0 (Auto) or 1 to 16 (fixed queue number) */
267 static int bxe_queue_count = 4;
268 SYSCTL_INT(_hw_bxe, OID_AUTO, queue_count, CTLFLAG_RDTUN,
269            &bxe_queue_count, 0, "Multi-Queue queue count");
270 
271 /* max number of buffers per queue (default RX_BD_USABLE) */
272 static int bxe_max_rx_bufs = 0;
273 SYSCTL_INT(_hw_bxe, OID_AUTO, max_rx_bufs, CTLFLAG_RDTUN,
274            &bxe_max_rx_bufs, 0, "Maximum Number of Rx Buffers Per Queue");
275 
276 /* Host interrupt coalescing RX tick timer (usecs) */
277 static int bxe_hc_rx_ticks = 25;
278 SYSCTL_INT(_hw_bxe, OID_AUTO, hc_rx_ticks, CTLFLAG_RDTUN,
279            &bxe_hc_rx_ticks, 0, "Host Coalescing Rx ticks");
280 
281 /* Host interrupt coalescing TX tick timer (usecs) */
282 static int bxe_hc_tx_ticks = 50;
283 SYSCTL_INT(_hw_bxe, OID_AUTO, hc_tx_ticks, CTLFLAG_RDTUN,
284            &bxe_hc_tx_ticks, 0, "Host Coalescing Tx ticks");
285 
286 /* Maximum number of Rx packets to process at a time */
287 static int bxe_rx_budget = 0xffffffff;
288 SYSCTL_INT(_hw_bxe, OID_AUTO, rx_budget, CTLFLAG_TUN,
289            &bxe_rx_budget, 0, "Rx processing budget");
290 
291 /* Maximum LRO aggregation size */
292 static int bxe_max_aggregation_size = 0;
293 SYSCTL_INT(_hw_bxe, OID_AUTO, max_aggregation_size, CTLFLAG_TUN,
294            &bxe_max_aggregation_size, 0, "max aggregation size");
295 
296 /* PCI MRRS: -1 (Auto), 0 (128B), 1 (256B), 2 (512B), 3 (1KB) */
297 static int bxe_mrrs = -1;
298 SYSCTL_INT(_hw_bxe, OID_AUTO, mrrs, CTLFLAG_RDTUN,
299            &bxe_mrrs, 0, "PCIe maximum read request size");
300 
301 /* AutoGrEEEn: 0 (hardware default), 1 (force on), 2 (force off) */
302 static int bxe_autogreeen = 0;
303 SYSCTL_INT(_hw_bxe, OID_AUTO, autogreeen, CTLFLAG_RDTUN,
304            &bxe_autogreeen, 0, "AutoGrEEEn support");
305 
306 /* 4-tuple RSS support for UDP: 0 (disabled), 1 (enabled) */
307 static int bxe_udp_rss = 0;
308 SYSCTL_INT(_hw_bxe, OID_AUTO, udp_rss, CTLFLAG_RDTUN,
309            &bxe_udp_rss, 0, "UDP RSS support");
310 
311 
312 #define STAT_NAME_LEN 32 /* no stat names below can be longer than this */
313 
314 #define STATS_OFFSET32(stat_name)                   \
315     (offsetof(struct bxe_eth_stats, stat_name) / 4)
316 
317 #define Q_STATS_OFFSET32(stat_name)                   \
318     (offsetof(struct bxe_eth_q_stats, stat_name) / 4)
319 
320 static const struct {
321     uint32_t offset;
322     uint32_t size;
323     uint32_t flags;
324 #define STATS_FLAGS_PORT  1
325 #define STATS_FLAGS_FUNC  2 /* MF only cares about function stats */
326 #define STATS_FLAGS_BOTH  (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
327     char string[STAT_NAME_LEN];
328 } bxe_eth_stats_arr[] = {
329     { STATS_OFFSET32(total_bytes_received_hi),
330                 8, STATS_FLAGS_BOTH, "rx_bytes" },
331     { STATS_OFFSET32(error_bytes_received_hi),
332                 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
333     { STATS_OFFSET32(total_unicast_packets_received_hi),
334                 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
335     { STATS_OFFSET32(total_multicast_packets_received_hi),
336                 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
337     { STATS_OFFSET32(total_broadcast_packets_received_hi),
338                 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
339     { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
340                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
341     { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
342                 8, STATS_FLAGS_PORT, "rx_align_errors" },
343     { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
344                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
345     { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
346                 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
347     { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
348                 8, STATS_FLAGS_PORT, "rx_fragments" },
349     { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
350                 8, STATS_FLAGS_PORT, "rx_jabbers" },
351     { STATS_OFFSET32(no_buff_discard_hi),
352                 8, STATS_FLAGS_BOTH, "rx_discards" },
353     { STATS_OFFSET32(mac_filter_discard),
354                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
355     { STATS_OFFSET32(mf_tag_discard),
356                 4, STATS_FLAGS_PORT, "rx_mf_tag_discard" },
357     { STATS_OFFSET32(pfc_frames_received_hi),
358                 8, STATS_FLAGS_PORT, "pfc_frames_received" },
359     { STATS_OFFSET32(pfc_frames_sent_hi),
360                 8, STATS_FLAGS_PORT, "pfc_frames_sent" },
361     { STATS_OFFSET32(brb_drop_hi),
362                 8, STATS_FLAGS_PORT, "rx_brb_discard" },
363     { STATS_OFFSET32(brb_truncate_hi),
364                 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
365     { STATS_OFFSET32(pause_frames_received_hi),
366                 8, STATS_FLAGS_PORT, "rx_pause_frames" },
367     { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
368                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
369     { STATS_OFFSET32(nig_timer_max),
370                 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
371     { STATS_OFFSET32(total_bytes_transmitted_hi),
372                 8, STATS_FLAGS_BOTH, "tx_bytes" },
373     { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
374                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
375     { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
376                 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
377     { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
378                 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
379     { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
380                 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
381     { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
382                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
383     { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
384                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
385     { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
386                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
387     { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
388                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
389     { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
390                 8, STATS_FLAGS_PORT, "tx_deferred" },
391     { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
392                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
393     { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
394                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
395     { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
396                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
397     { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
398                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
399     { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
400                 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
401     { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
402                 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
403     { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
404                 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
405     { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
406                 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
407     { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
408                 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
409     { STATS_OFFSET32(etherstatspktsover1522octets_hi),
410                 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
411     { STATS_OFFSET32(pause_frames_sent_hi),
412                 8, STATS_FLAGS_PORT, "tx_pause_frames" },
413     { STATS_OFFSET32(total_tpa_aggregations_hi),
414                 8, STATS_FLAGS_FUNC, "tpa_aggregations" },
415     { STATS_OFFSET32(total_tpa_aggregated_frames_hi),
416                 8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"},
417     { STATS_OFFSET32(total_tpa_bytes_hi),
418                 8, STATS_FLAGS_FUNC, "tpa_bytes"},
419     { STATS_OFFSET32(eee_tx_lpi),
420                 4, STATS_FLAGS_PORT, "eee_tx_lpi"},
421     { STATS_OFFSET32(rx_calls),
422                 4, STATS_FLAGS_FUNC, "rx_calls"},
423     { STATS_OFFSET32(rx_pkts),
424                 4, STATS_FLAGS_FUNC, "rx_pkts"},
425     { STATS_OFFSET32(rx_tpa_pkts),
426                 4, STATS_FLAGS_FUNC, "rx_tpa_pkts"},
427     { STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts),
428                 4, STATS_FLAGS_FUNC, "rx_erroneous_jumbo_sge_pkts"},
429     { STATS_OFFSET32(rx_bxe_service_rxsgl),
430                 4, STATS_FLAGS_FUNC, "rx_bxe_service_rxsgl"},
431     { STATS_OFFSET32(rx_jumbo_sge_pkts),
432                 4, STATS_FLAGS_FUNC, "rx_jumbo_sge_pkts"},
433     { STATS_OFFSET32(rx_soft_errors),
434                 4, STATS_FLAGS_FUNC, "rx_soft_errors"},
435     { STATS_OFFSET32(rx_hw_csum_errors),
436                 4, STATS_FLAGS_FUNC, "rx_hw_csum_errors"},
437     { STATS_OFFSET32(rx_ofld_frames_csum_ip),
438                 4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_ip"},
439     { STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp),
440                 4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_tcp_udp"},
441     { STATS_OFFSET32(rx_budget_reached),
442                 4, STATS_FLAGS_FUNC, "rx_budget_reached"},
443     { STATS_OFFSET32(tx_pkts),
444                 4, STATS_FLAGS_FUNC, "tx_pkts"},
445     { STATS_OFFSET32(tx_soft_errors),
446                 4, STATS_FLAGS_FUNC, "tx_soft_errors"},
447     { STATS_OFFSET32(tx_ofld_frames_csum_ip),
448                 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_ip"},
449     { STATS_OFFSET32(tx_ofld_frames_csum_tcp),
450                 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_tcp"},
451     { STATS_OFFSET32(tx_ofld_frames_csum_udp),
452                 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_udp"},
453     { STATS_OFFSET32(tx_ofld_frames_lso),
454                 4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso"},
455     { STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits),
456                 4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso_hdr_splits"},
457     { STATS_OFFSET32(tx_encap_failures),
458                 4, STATS_FLAGS_FUNC, "tx_encap_failures"},
459     { STATS_OFFSET32(tx_hw_queue_full),
460                 4, STATS_FLAGS_FUNC, "tx_hw_queue_full"},
461     { STATS_OFFSET32(tx_hw_max_queue_depth),
462                 4, STATS_FLAGS_FUNC, "tx_hw_max_queue_depth"},
463     { STATS_OFFSET32(tx_dma_mapping_failure),
464                 4, STATS_FLAGS_FUNC, "tx_dma_mapping_failure"},
465     { STATS_OFFSET32(tx_max_drbr_queue_depth),
466                 4, STATS_FLAGS_FUNC, "tx_max_drbr_queue_depth"},
467     { STATS_OFFSET32(tx_window_violation_std),
468                 4, STATS_FLAGS_FUNC, "tx_window_violation_std"},
469     { STATS_OFFSET32(tx_window_violation_tso),
470                 4, STATS_FLAGS_FUNC, "tx_window_violation_tso"},
471     { STATS_OFFSET32(tx_chain_lost_mbuf),
472                 4, STATS_FLAGS_FUNC, "tx_chain_lost_mbuf"},
473     { STATS_OFFSET32(tx_frames_deferred),
474                 4, STATS_FLAGS_FUNC, "tx_frames_deferred"},
475     { STATS_OFFSET32(tx_queue_xoff),
476                 4, STATS_FLAGS_FUNC, "tx_queue_xoff"},
477     { STATS_OFFSET32(mbuf_defrag_attempts),
478                 4, STATS_FLAGS_FUNC, "mbuf_defrag_attempts"},
479     { STATS_OFFSET32(mbuf_defrag_failures),
480                 4, STATS_FLAGS_FUNC, "mbuf_defrag_failures"},
481     { STATS_OFFSET32(mbuf_rx_bd_alloc_failed),
482                 4, STATS_FLAGS_FUNC, "mbuf_rx_bd_alloc_failed"},
483     { STATS_OFFSET32(mbuf_rx_bd_mapping_failed),
484                 4, STATS_FLAGS_FUNC, "mbuf_rx_bd_mapping_failed"},
485     { STATS_OFFSET32(mbuf_rx_tpa_alloc_failed),
486                 4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_alloc_failed"},
487     { STATS_OFFSET32(mbuf_rx_tpa_mapping_failed),
488                 4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_mapping_failed"},
489     { STATS_OFFSET32(mbuf_rx_sge_alloc_failed),
490                 4, STATS_FLAGS_FUNC, "mbuf_rx_sge_alloc_failed"},
491     { STATS_OFFSET32(mbuf_rx_sge_mapping_failed),
492                 4, STATS_FLAGS_FUNC, "mbuf_rx_sge_mapping_failed"},
493     { STATS_OFFSET32(mbuf_alloc_tx),
494                 4, STATS_FLAGS_FUNC, "mbuf_alloc_tx"},
495     { STATS_OFFSET32(mbuf_alloc_rx),
496                 4, STATS_FLAGS_FUNC, "mbuf_alloc_rx"},
497     { STATS_OFFSET32(mbuf_alloc_sge),
498                 4, STATS_FLAGS_FUNC, "mbuf_alloc_sge"},
499     { STATS_OFFSET32(mbuf_alloc_tpa),
500                 4, STATS_FLAGS_FUNC, "mbuf_alloc_tpa"},
501     { STATS_OFFSET32(tx_queue_full_return),
502                 4, STATS_FLAGS_FUNC, "tx_queue_full_return"},
503     { STATS_OFFSET32(bxe_tx_mq_sc_state_failures),
504                 4, STATS_FLAGS_FUNC, "bxe_tx_mq_sc_state_failures"},
505     { STATS_OFFSET32(tx_request_link_down_failures),
506                 4, STATS_FLAGS_FUNC, "tx_request_link_down_failures"},
507     { STATS_OFFSET32(bd_avail_too_less_failures),
508                 4, STATS_FLAGS_FUNC, "bd_avail_too_less_failures"},
509     { STATS_OFFSET32(tx_mq_not_empty),
510                 4, STATS_FLAGS_FUNC, "tx_mq_not_empty"},
511     { STATS_OFFSET32(nsegs_path1_errors),
512                 4, STATS_FLAGS_FUNC, "nsegs_path1_errors"},
513     { STATS_OFFSET32(nsegs_path2_errors),
514                 4, STATS_FLAGS_FUNC, "nsegs_path2_errors"}
515 
516 
517 };
518 
519 static const struct {
520     uint32_t offset;
521     uint32_t size;
522     char string[STAT_NAME_LEN];
523 } bxe_eth_q_stats_arr[] = {
524     { Q_STATS_OFFSET32(total_bytes_received_hi),
525                 8, "rx_bytes" },
526     { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
527                 8, "rx_ucast_packets" },
528     { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
529                 8, "rx_mcast_packets" },
530     { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
531                 8, "rx_bcast_packets" },
532     { Q_STATS_OFFSET32(no_buff_discard_hi),
533                 8, "rx_discards" },
534     { Q_STATS_OFFSET32(total_bytes_transmitted_hi),
535                 8, "tx_bytes" },
536     { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
537                 8, "tx_ucast_packets" },
538     { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
539                 8, "tx_mcast_packets" },
540     { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
541                 8, "tx_bcast_packets" },
542     { Q_STATS_OFFSET32(total_tpa_aggregations_hi),
543                 8, "tpa_aggregations" },
544     { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi),
545                 8, "tpa_aggregated_frames"},
546     { Q_STATS_OFFSET32(total_tpa_bytes_hi),
547                 8, "tpa_bytes"},
548     { Q_STATS_OFFSET32(rx_calls),
549                 4, "rx_calls"},
550     { Q_STATS_OFFSET32(rx_pkts),
551                 4, "rx_pkts"},
552     { Q_STATS_OFFSET32(rx_tpa_pkts),
553                 4, "rx_tpa_pkts"},
554     { Q_STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts),
555                 4, "rx_erroneous_jumbo_sge_pkts"},
556     { Q_STATS_OFFSET32(rx_bxe_service_rxsgl),
557                 4, "rx_bxe_service_rxsgl"},
558     { Q_STATS_OFFSET32(rx_jumbo_sge_pkts),
559                 4, "rx_jumbo_sge_pkts"},
560     { Q_STATS_OFFSET32(rx_soft_errors),
561                 4, "rx_soft_errors"},
562     { Q_STATS_OFFSET32(rx_hw_csum_errors),
563                 4, "rx_hw_csum_errors"},
564     { Q_STATS_OFFSET32(rx_ofld_frames_csum_ip),
565                 4, "rx_ofld_frames_csum_ip"},
566     { Q_STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp),
567                 4, "rx_ofld_frames_csum_tcp_udp"},
568     { Q_STATS_OFFSET32(rx_budget_reached),
569                 4, "rx_budget_reached"},
570     { Q_STATS_OFFSET32(tx_pkts),
571                 4, "tx_pkts"},
572     { Q_STATS_OFFSET32(tx_soft_errors),
573                 4, "tx_soft_errors"},
574     { Q_STATS_OFFSET32(tx_ofld_frames_csum_ip),
575                 4, "tx_ofld_frames_csum_ip"},
576     { Q_STATS_OFFSET32(tx_ofld_frames_csum_tcp),
577                 4, "tx_ofld_frames_csum_tcp"},
578     { Q_STATS_OFFSET32(tx_ofld_frames_csum_udp),
579                 4, "tx_ofld_frames_csum_udp"},
580     { Q_STATS_OFFSET32(tx_ofld_frames_lso),
581                 4, "tx_ofld_frames_lso"},
582     { Q_STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits),
583                 4, "tx_ofld_frames_lso_hdr_splits"},
584     { Q_STATS_OFFSET32(tx_encap_failures),
585                 4, "tx_encap_failures"},
586     { Q_STATS_OFFSET32(tx_hw_queue_full),
587                 4, "tx_hw_queue_full"},
588     { Q_STATS_OFFSET32(tx_hw_max_queue_depth),
589                 4, "tx_hw_max_queue_depth"},
590     { Q_STATS_OFFSET32(tx_dma_mapping_failure),
591                 4, "tx_dma_mapping_failure"},
592     { Q_STATS_OFFSET32(tx_max_drbr_queue_depth),
593                 4, "tx_max_drbr_queue_depth"},
594     { Q_STATS_OFFSET32(tx_window_violation_std),
595                 4, "tx_window_violation_std"},
596     { Q_STATS_OFFSET32(tx_window_violation_tso),
597                 4, "tx_window_violation_tso"},
598     { Q_STATS_OFFSET32(tx_chain_lost_mbuf),
599                 4, "tx_chain_lost_mbuf"},
600     { Q_STATS_OFFSET32(tx_frames_deferred),
601                 4, "tx_frames_deferred"},
602     { Q_STATS_OFFSET32(tx_queue_xoff),
603                 4, "tx_queue_xoff"},
604     { Q_STATS_OFFSET32(mbuf_defrag_attempts),
605                 4, "mbuf_defrag_attempts"},
606     { Q_STATS_OFFSET32(mbuf_defrag_failures),
607                 4, "mbuf_defrag_failures"},
608     { Q_STATS_OFFSET32(mbuf_rx_bd_alloc_failed),
609                 4, "mbuf_rx_bd_alloc_failed"},
610     { Q_STATS_OFFSET32(mbuf_rx_bd_mapping_failed),
611                 4, "mbuf_rx_bd_mapping_failed"},
612     { Q_STATS_OFFSET32(mbuf_rx_tpa_alloc_failed),
613                 4, "mbuf_rx_tpa_alloc_failed"},
614     { Q_STATS_OFFSET32(mbuf_rx_tpa_mapping_failed),
615                 4, "mbuf_rx_tpa_mapping_failed"},
616     { Q_STATS_OFFSET32(mbuf_rx_sge_alloc_failed),
617                 4, "mbuf_rx_sge_alloc_failed"},
618     { Q_STATS_OFFSET32(mbuf_rx_sge_mapping_failed),
619                 4, "mbuf_rx_sge_mapping_failed"},
620     { Q_STATS_OFFSET32(mbuf_alloc_tx),
621                 4, "mbuf_alloc_tx"},
622     { Q_STATS_OFFSET32(mbuf_alloc_rx),
623                 4, "mbuf_alloc_rx"},
624     { Q_STATS_OFFSET32(mbuf_alloc_sge),
625                 4, "mbuf_alloc_sge"},
626     { Q_STATS_OFFSET32(mbuf_alloc_tpa),
627                 4, "mbuf_alloc_tpa"},
628     { Q_STATS_OFFSET32(tx_queue_full_return),
629                 4, "tx_queue_full_return"},
630     { Q_STATS_OFFSET32(bxe_tx_mq_sc_state_failures),
631                 4, "bxe_tx_mq_sc_state_failures"},
632     { Q_STATS_OFFSET32(tx_request_link_down_failures),
633                 4, "tx_request_link_down_failures"},
634     { Q_STATS_OFFSET32(bd_avail_too_less_failures),
635                 4, "bd_avail_too_less_failures"},
636     { Q_STATS_OFFSET32(tx_mq_not_empty),
637                 4, "tx_mq_not_empty"},
638     { Q_STATS_OFFSET32(nsegs_path1_errors),
639                 4, "nsegs_path1_errors"},
640     { Q_STATS_OFFSET32(nsegs_path2_errors),
641                 4, "nsegs_path2_errors"}
642 
643 
644 };
645 
646 #define BXE_NUM_ETH_STATS   ARRAY_SIZE(bxe_eth_stats_arr)
647 #define BXE_NUM_ETH_Q_STATS ARRAY_SIZE(bxe_eth_q_stats_arr)
648 
649 
650 static void    bxe_cmng_fns_init(struct bxe_softc *sc,
651                                  uint8_t          read_cfg,
652                                  uint8_t          cmng_type);
653 static int     bxe_get_cmng_fns_mode(struct bxe_softc *sc);
654 static void    storm_memset_cmng(struct bxe_softc *sc,
655                                  struct cmng_init *cmng,
656                                  uint8_t          port);
657 static void    bxe_set_reset_global(struct bxe_softc *sc);
658 static void    bxe_set_reset_in_progress(struct bxe_softc *sc);
659 static uint8_t bxe_reset_is_done(struct bxe_softc *sc,
660                                  int              engine);
661 static uint8_t bxe_clear_pf_load(struct bxe_softc *sc);
662 static uint8_t bxe_chk_parity_attn(struct bxe_softc *sc,
663                                    uint8_t          *global,
664                                    uint8_t          print);
665 static void    bxe_int_disable(struct bxe_softc *sc);
666 static int     bxe_release_leader_lock(struct bxe_softc *sc);
667 static void    bxe_pf_disable(struct bxe_softc *sc);
668 static void    bxe_free_fp_buffers(struct bxe_softc *sc);
669 static inline void bxe_update_rx_prod(struct bxe_softc    *sc,
670                                       struct bxe_fastpath *fp,
671                                       uint16_t            rx_bd_prod,
672                                       uint16_t            rx_cq_prod,
673                                       uint16_t            rx_sge_prod);
674 static void    bxe_link_report_locked(struct bxe_softc *sc);
675 static void    bxe_link_report(struct bxe_softc *sc);
676 static void    bxe_link_status_update(struct bxe_softc *sc);
677 static void    bxe_periodic_callout_func(void *xsc);
678 static void    bxe_periodic_start(struct bxe_softc *sc);
679 static void    bxe_periodic_stop(struct bxe_softc *sc);
680 static int     bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp,
681                                     uint16_t prev_index,
682                                     uint16_t index);
683 static int     bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp,
684                                      int                 queue);
685 static int     bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp,
686                                      uint16_t            index);
687 static uint8_t bxe_txeof(struct bxe_softc *sc,
688                          struct bxe_fastpath *fp);
689 static void    bxe_task_fp(struct bxe_fastpath *fp);
690 static __noinline void bxe_dump_mbuf(struct bxe_softc *sc,
691                                      struct mbuf      *m,
692                                      uint8_t          contents);
693 static int     bxe_alloc_mem(struct bxe_softc *sc);
694 static void    bxe_free_mem(struct bxe_softc *sc);
695 static int     bxe_alloc_fw_stats_mem(struct bxe_softc *sc);
696 static void    bxe_free_fw_stats_mem(struct bxe_softc *sc);
697 static int     bxe_interrupt_attach(struct bxe_softc *sc);
698 static void    bxe_interrupt_detach(struct bxe_softc *sc);
699 static void    bxe_set_rx_mode(struct bxe_softc *sc);
700 static int     bxe_init_locked(struct bxe_softc *sc);
701 static int     bxe_stop_locked(struct bxe_softc *sc);
702 static void    bxe_sp_err_timeout_task(void *arg, int pending);
703 void           bxe_parity_recover(struct bxe_softc *sc);
704 void           bxe_handle_error(struct bxe_softc *sc);
705 static __noinline int bxe_nic_load(struct bxe_softc *sc,
706                                    int              load_mode);
707 static __noinline int bxe_nic_unload(struct bxe_softc *sc,
708                                      uint32_t         unload_mode,
709                                      uint8_t          keep_link);
710 
711 static void bxe_handle_sp_tq(void *context, int pending);
712 static void bxe_handle_fp_tq(void *context, int pending);
713 
714 static int bxe_add_cdev(struct bxe_softc *sc);
715 static void bxe_del_cdev(struct bxe_softc *sc);
716 int bxe_grc_dump(struct bxe_softc *sc);
717 static int bxe_alloc_buf_rings(struct bxe_softc *sc);
718 static void bxe_free_buf_rings(struct bxe_softc *sc);
719 
720 /* calculate crc32 on a buffer (NOTE: crc32_length MUST be aligned to 8) */
721 uint32_t
722 calc_crc32(uint8_t  *crc32_packet,
723            uint32_t crc32_length,
724            uint32_t crc32_seed,
725            uint8_t  complement)
726 {
727    uint32_t byte         = 0;
728    uint32_t bit          = 0;
729    uint8_t  msb          = 0;
730    uint32_t temp         = 0;
731    uint32_t shft         = 0;
732    uint8_t  current_byte = 0;
733    uint32_t crc32_result = crc32_seed;
734    const uint32_t CRC32_POLY = 0x1edc6f41;
735 
736    if ((crc32_packet == NULL) ||
737        (crc32_length == 0) ||
738        ((crc32_length % 8) != 0))
739     {
740         return (crc32_result);
741     }
742 
743     for (byte = 0; byte < crc32_length; byte = byte + 1)
744     {
745         current_byte = crc32_packet[byte];
746         for (bit = 0; bit < 8; bit = bit + 1)
747         {
748             /* msb = crc32_result[31]; */
749             msb = (uint8_t)(crc32_result >> 31);
750 
751             crc32_result = crc32_result << 1;
752 
753             /* it (msb != current_byte[bit]) */
754             if (msb != (0x1 & (current_byte >> bit)))
755             {
756                 crc32_result = crc32_result ^ CRC32_POLY;
757                 /* crc32_result[0] = 1 */
758                 crc32_result |= 1;
759             }
760         }
761     }
762 
763     /* Last step is to:
764      * 1. "mirror" every bit
765      * 2. swap the 4 bytes
766      * 3. complement each bit
767      */
768 
769     /* Mirror */
770     temp = crc32_result;
771     shft = sizeof(crc32_result) * 8 - 1;
772 
773     for (crc32_result >>= 1; crc32_result; crc32_result >>= 1)
774     {
775         temp <<= 1;
776         temp |= crc32_result & 1;
777         shft-- ;
778     }
779 
780     /* temp[31-bit] = crc32_result[bit] */
781     temp <<= shft;
782 
783     /* Swap */
784     /* crc32_result = {temp[7:0], temp[15:8], temp[23:16], temp[31:24]} */
785     {
786         uint32_t t0, t1, t2, t3;
787         t0 = (0x000000ff & (temp >> 24));
788         t1 = (0x0000ff00 & (temp >> 8));
789         t2 = (0x00ff0000 & (temp << 8));
790         t3 = (0xff000000 & (temp << 24));
791         crc32_result = t0 | t1 | t2 | t3;
792     }
793 
794     /* Complement */
795     if (complement)
796     {
797         crc32_result = ~crc32_result;
798     }
799 
800     return (crc32_result);
801 }
802 
803 int
804 bxe_test_bit(int                    nr,
805              volatile unsigned long *addr)
806 {
807     return ((atomic_load_acq_long(addr) & (1 << nr)) != 0);
808 }
809 
810 void
811 bxe_set_bit(unsigned int           nr,
812             volatile unsigned long *addr)
813 {
814     atomic_set_acq_long(addr, (1 << nr));
815 }
816 
817 void
818 bxe_clear_bit(int                    nr,
819               volatile unsigned long *addr)
820 {
821     atomic_clear_acq_long(addr, (1 << nr));
822 }
823 
824 int
825 bxe_test_and_set_bit(int                    nr,
826                        volatile unsigned long *addr)
827 {
828     unsigned long x;
829     nr = (1 << nr);
830     do {
831         x = *addr;
832     } while (atomic_cmpset_acq_long(addr, x, x | nr) == 0);
833     // if (x & nr) bit_was_set; else bit_was_not_set;
834     return (x & nr);
835 }
836 
837 int
838 bxe_test_and_clear_bit(int                    nr,
839                        volatile unsigned long *addr)
840 {
841     unsigned long x;
842     nr = (1 << nr);
843     do {
844         x = *addr;
845     } while (atomic_cmpset_acq_long(addr, x, x & ~nr) == 0);
846     // if (x & nr) bit_was_set; else bit_was_not_set;
847     return (x & nr);
848 }
849 
850 int
851 bxe_cmpxchg(volatile int *addr,
852             int          old,
853             int          new)
854 {
855     int x;
856     do {
857         x = *addr;
858     } while (atomic_cmpset_acq_int(addr, old, new) == 0);
859     return (x);
860 }
861 
862 /*
863  * Get DMA memory from the OS.
864  *
865  * Validates that the OS has provided DMA buffers in response to a
866  * bus_dmamap_load call and saves the physical address of those buffers.
867  * When the callback is used the OS will return 0 for the mapping function
868  * (bus_dmamap_load) so we use the value of map_arg->maxsegs to pass any
869  * failures back to the caller.
870  *
871  * Returns:
872  *   Nothing.
873  */
874 static void
875 bxe_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
876 {
877     struct bxe_dma *dma = arg;
878 
879     if (error) {
880         dma->paddr = 0;
881         dma->nseg  = 0;
882         BLOGE(dma->sc, "Failed DMA alloc '%s' (%d)!\n", dma->msg, error);
883     } else {
884         dma->paddr = segs->ds_addr;
885         dma->nseg  = nseg;
886     }
887 }
888 
889 /*
890  * Allocate a block of memory and map it for DMA. No partial completions
891  * allowed and release any resources acquired if we can't acquire all
892  * resources.
893  *
894  * Returns:
895  *   0 = Success, !0 = Failure
896  */
897 int
898 bxe_dma_alloc(struct bxe_softc *sc,
899               bus_size_t       size,
900               struct bxe_dma   *dma,
901               const char       *msg)
902 {
903     int rc;
904 
905     if (dma->size > 0) {
906         BLOGE(sc, "dma block '%s' already has size %lu\n", msg,
907               (unsigned long)dma->size);
908         return (1);
909     }
910 
911     memset(dma, 0, sizeof(*dma)); /* sanity */
912     dma->sc   = sc;
913     dma->size = size;
914     snprintf(dma->msg, sizeof(dma->msg), "%s", msg);
915 
916     rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
917                             BCM_PAGE_SIZE,      /* alignment */
918                             0,                  /* boundary limit */
919                             BUS_SPACE_MAXADDR,  /* restricted low */
920                             BUS_SPACE_MAXADDR,  /* restricted hi */
921                             NULL,               /* addr filter() */
922                             NULL,               /* addr filter() arg */
923                             size,               /* max map size */
924                             1,                  /* num discontinuous */
925                             size,               /* max seg size */
926                             BUS_DMA_ALLOCNOW,   /* flags */
927                             NULL,               /* lock() */
928                             NULL,               /* lock() arg */
929                             &dma->tag);         /* returned dma tag */
930     if (rc != 0) {
931         BLOGE(sc, "Failed to create dma tag for '%s' (%d)\n", msg, rc);
932         memset(dma, 0, sizeof(*dma));
933         return (1);
934     }
935 
936     rc = bus_dmamem_alloc(dma->tag,
937                           (void **)&dma->vaddr,
938                           (BUS_DMA_NOWAIT | BUS_DMA_ZERO),
939                           &dma->map);
940     if (rc != 0) {
941         BLOGE(sc, "Failed to alloc dma mem for '%s' (%d)\n", msg, rc);
942         bus_dma_tag_destroy(dma->tag);
943         memset(dma, 0, sizeof(*dma));
944         return (1);
945     }
946 
947     rc = bus_dmamap_load(dma->tag,
948                          dma->map,
949                          dma->vaddr,
950                          size,
951                          bxe_dma_map_addr, /* BLOGD in here */
952                          dma,
953                          BUS_DMA_NOWAIT);
954     if (rc != 0) {
955         BLOGE(sc, "Failed to load dma map for '%s' (%d)\n", msg, rc);
956         bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
957         bus_dma_tag_destroy(dma->tag);
958         memset(dma, 0, sizeof(*dma));
959         return (1);
960     }
961 
962     return (0);
963 }
964 
965 void
966 bxe_dma_free(struct bxe_softc *sc,
967              struct bxe_dma   *dma)
968 {
969     if (dma->size > 0) {
970         DBASSERT(sc, (dma->tag != NULL), ("dma tag is NULL"));
971 
972         bus_dmamap_sync(dma->tag, dma->map,
973                         (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE));
974         bus_dmamap_unload(dma->tag, dma->map);
975         bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
976         bus_dma_tag_destroy(dma->tag);
977     }
978 
979     memset(dma, 0, sizeof(*dma));
980 }
981 
982 /*
983  * These indirect read and write routines are only during init.
984  * The locking is handled by the MCP.
985  */
986 
987 void
988 bxe_reg_wr_ind(struct bxe_softc *sc,
989                uint32_t         addr,
990                uint32_t         val)
991 {
992     pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4);
993     pci_write_config(sc->dev, PCICFG_GRC_DATA, val, 4);
994     pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
995 }
996 
997 uint32_t
998 bxe_reg_rd_ind(struct bxe_softc *sc,
999                uint32_t         addr)
1000 {
1001     uint32_t val;
1002 
1003     pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4);
1004     val = pci_read_config(sc->dev, PCICFG_GRC_DATA, 4);
1005     pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
1006 
1007     return (val);
1008 }
1009 
1010 static int
1011 bxe_acquire_hw_lock(struct bxe_softc *sc,
1012                     uint32_t         resource)
1013 {
1014     uint32_t lock_status;
1015     uint32_t resource_bit = (1 << resource);
1016     int func = SC_FUNC(sc);
1017     uint32_t hw_lock_control_reg;
1018     int cnt;
1019 
1020     /* validate the resource is within range */
1021     if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1022         BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)"
1023             " resource_bit 0x%x\n", resource, resource_bit);
1024         return (-1);
1025     }
1026 
1027     if (func <= 5) {
1028         hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8));
1029     } else {
1030         hw_lock_control_reg =
1031                 (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8));
1032     }
1033 
1034     /* validate the resource is not already taken */
1035     lock_status = REG_RD(sc, hw_lock_control_reg);
1036     if (lock_status & resource_bit) {
1037         BLOGE(sc, "resource (0x%x) in use (status 0x%x bit 0x%x)\n",
1038               resource, lock_status, resource_bit);
1039         return (-1);
1040     }
1041 
1042     /* try every 5ms for 5 seconds */
1043     for (cnt = 0; cnt < 1000; cnt++) {
1044         REG_WR(sc, (hw_lock_control_reg + 4), resource_bit);
1045         lock_status = REG_RD(sc, hw_lock_control_reg);
1046         if (lock_status & resource_bit) {
1047             return (0);
1048         }
1049         DELAY(5000);
1050     }
1051 
1052     BLOGE(sc, "Resource 0x%x resource_bit 0x%x lock timeout!\n",
1053         resource, resource_bit);
1054     return (-1);
1055 }
1056 
1057 static int
1058 bxe_release_hw_lock(struct bxe_softc *sc,
1059                     uint32_t         resource)
1060 {
1061     uint32_t lock_status;
1062     uint32_t resource_bit = (1 << resource);
1063     int func = SC_FUNC(sc);
1064     uint32_t hw_lock_control_reg;
1065 
1066     /* validate the resource is within range */
1067     if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1068         BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)"
1069             " resource_bit 0x%x\n", resource, resource_bit);
1070         return (-1);
1071     }
1072 
1073     if (func <= 5) {
1074         hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8));
1075     } else {
1076         hw_lock_control_reg =
1077                 (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8));
1078     }
1079 
1080     /* validate the resource is currently taken */
1081     lock_status = REG_RD(sc, hw_lock_control_reg);
1082     if (!(lock_status & resource_bit)) {
1083         BLOGE(sc, "resource (0x%x) not in use (status 0x%x bit 0x%x)\n",
1084               resource, lock_status, resource_bit);
1085         return (-1);
1086     }
1087 
1088     REG_WR(sc, hw_lock_control_reg, resource_bit);
1089     return (0);
1090 }
1091 static void bxe_acquire_phy_lock(struct bxe_softc *sc)
1092 {
1093 	BXE_PHY_LOCK(sc);
1094 	bxe_acquire_hw_lock(sc,HW_LOCK_RESOURCE_MDIO);
1095 }
1096 
1097 static void bxe_release_phy_lock(struct bxe_softc *sc)
1098 {
1099 	bxe_release_hw_lock(sc,HW_LOCK_RESOURCE_MDIO);
1100 	BXE_PHY_UNLOCK(sc);
1101 }
1102 /*
1103  * Per pf misc lock must be acquired before the per port mcp lock. Otherwise,
1104  * had we done things the other way around, if two pfs from the same port
1105  * would attempt to access nvram at the same time, we could run into a
1106  * scenario such as:
1107  * pf A takes the port lock.
1108  * pf B succeeds in taking the same lock since they are from the same port.
1109  * pf A takes the per pf misc lock. Performs eeprom access.
1110  * pf A finishes. Unlocks the per pf misc lock.
1111  * Pf B takes the lock and proceeds to perform it's own access.
1112  * pf A unlocks the per port lock, while pf B is still working (!).
1113  * mcp takes the per port lock and corrupts pf B's access (and/or has it's own
1114  * access corrupted by pf B).*
1115  */
1116 static int
1117 bxe_acquire_nvram_lock(struct bxe_softc *sc)
1118 {
1119     int port = SC_PORT(sc);
1120     int count, i;
1121     uint32_t val = 0;
1122 
1123     /* acquire HW lock: protect against other PFs in PF Direct Assignment */
1124     bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM);
1125 
1126     /* adjust timeout for emulation/FPGA */
1127     count = NVRAM_TIMEOUT_COUNT;
1128     if (CHIP_REV_IS_SLOW(sc)) {
1129         count *= 100;
1130     }
1131 
1132     /* request access to nvram interface */
1133     REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
1134            (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
1135 
1136     for (i = 0; i < count*10; i++) {
1137         val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB);
1138         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
1139             break;
1140         }
1141 
1142         DELAY(5);
1143     }
1144 
1145     if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
1146         BLOGE(sc, "Cannot get access to nvram interface "
1147             "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n",
1148             port, val);
1149         return (-1);
1150     }
1151 
1152     return (0);
1153 }
1154 
1155 static int
1156 bxe_release_nvram_lock(struct bxe_softc *sc)
1157 {
1158     int port = SC_PORT(sc);
1159     int count, i;
1160     uint32_t val = 0;
1161 
1162     /* adjust timeout for emulation/FPGA */
1163     count = NVRAM_TIMEOUT_COUNT;
1164     if (CHIP_REV_IS_SLOW(sc)) {
1165         count *= 100;
1166     }
1167 
1168     /* relinquish nvram interface */
1169     REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
1170            (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
1171 
1172     for (i = 0; i < count*10; i++) {
1173         val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB);
1174         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
1175             break;
1176         }
1177 
1178         DELAY(5);
1179     }
1180 
1181     if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
1182         BLOGE(sc, "Cannot free access to nvram interface "
1183             "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n",
1184             port, val);
1185         return (-1);
1186     }
1187 
1188     /* release HW lock: protect against other PFs in PF Direct Assignment */
1189     bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM);
1190 
1191     return (0);
1192 }
1193 
1194 static void
1195 bxe_enable_nvram_access(struct bxe_softc *sc)
1196 {
1197     uint32_t val;
1198 
1199     val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
1200 
1201     /* enable both bits, even on read */
1202     REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
1203            (val | MCPR_NVM_ACCESS_ENABLE_EN | MCPR_NVM_ACCESS_ENABLE_WR_EN));
1204 }
1205 
1206 static void
1207 bxe_disable_nvram_access(struct bxe_softc *sc)
1208 {
1209     uint32_t val;
1210 
1211     val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
1212 
1213     /* disable both bits, even after read */
1214     REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
1215            (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
1216                     MCPR_NVM_ACCESS_ENABLE_WR_EN)));
1217 }
1218 
1219 static int
1220 bxe_nvram_read_dword(struct bxe_softc *sc,
1221                      uint32_t         offset,
1222                      uint32_t         *ret_val,
1223                      uint32_t         cmd_flags)
1224 {
1225     int count, i, rc;
1226     uint32_t val;
1227 
1228     /* build the command word */
1229     cmd_flags |= MCPR_NVM_COMMAND_DOIT;
1230 
1231     /* need to clear DONE bit separately */
1232     REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
1233 
1234     /* address of the NVRAM to read from */
1235     REG_WR(sc, MCP_REG_MCPR_NVM_ADDR,
1236            (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
1237 
1238     /* issue a read command */
1239     REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
1240 
1241     /* adjust timeout for emulation/FPGA */
1242     count = NVRAM_TIMEOUT_COUNT;
1243     if (CHIP_REV_IS_SLOW(sc)) {
1244         count *= 100;
1245     }
1246 
1247     /* wait for completion */
1248     *ret_val = 0;
1249     rc = -1;
1250     for (i = 0; i < count; i++) {
1251         DELAY(5);
1252         val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND);
1253 
1254         if (val & MCPR_NVM_COMMAND_DONE) {
1255             val = REG_RD(sc, MCP_REG_MCPR_NVM_READ);
1256             /* we read nvram data in cpu order
1257              * but ethtool sees it as an array of bytes
1258              * converting to big-endian will do the work
1259              */
1260             *ret_val = htobe32(val);
1261             rc = 0;
1262             break;
1263         }
1264     }
1265 
1266     if (rc == -1) {
1267         BLOGE(sc, "nvram read timeout expired "
1268             "(offset 0x%x cmd_flags 0x%x val 0x%x)\n",
1269             offset, cmd_flags, val);
1270     }
1271 
1272     return (rc);
1273 }
1274 
1275 static int
1276 bxe_nvram_read(struct bxe_softc *sc,
1277                uint32_t         offset,
1278                uint8_t          *ret_buf,
1279                int              buf_size)
1280 {
1281     uint32_t cmd_flags;
1282     uint32_t val;
1283     int rc;
1284 
1285     if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
1286         BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n",
1287               offset, buf_size);
1288         return (-1);
1289     }
1290 
1291     if ((offset + buf_size) > sc->devinfo.flash_size) {
1292         BLOGE(sc, "Invalid parameter, "
1293                   "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1294               offset, buf_size, sc->devinfo.flash_size);
1295         return (-1);
1296     }
1297 
1298     /* request access to nvram interface */
1299     rc = bxe_acquire_nvram_lock(sc);
1300     if (rc) {
1301         return (rc);
1302     }
1303 
1304     /* enable access to nvram interface */
1305     bxe_enable_nvram_access(sc);
1306 
1307     /* read the first word(s) */
1308     cmd_flags = MCPR_NVM_COMMAND_FIRST;
1309     while ((buf_size > sizeof(uint32_t)) && (rc == 0)) {
1310         rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags);
1311         memcpy(ret_buf, &val, 4);
1312 
1313         /* advance to the next dword */
1314         offset += sizeof(uint32_t);
1315         ret_buf += sizeof(uint32_t);
1316         buf_size -= sizeof(uint32_t);
1317         cmd_flags = 0;
1318     }
1319 
1320     if (rc == 0) {
1321         cmd_flags |= MCPR_NVM_COMMAND_LAST;
1322         rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags);
1323         memcpy(ret_buf, &val, 4);
1324     }
1325 
1326     /* disable access to nvram interface */
1327     bxe_disable_nvram_access(sc);
1328     bxe_release_nvram_lock(sc);
1329 
1330     return (rc);
1331 }
1332 
1333 static int
1334 bxe_nvram_write_dword(struct bxe_softc *sc,
1335                       uint32_t         offset,
1336                       uint32_t         val,
1337                       uint32_t         cmd_flags)
1338 {
1339     int count, i, rc;
1340 
1341     /* build the command word */
1342     cmd_flags |= (MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR);
1343 
1344     /* need to clear DONE bit separately */
1345     REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
1346 
1347     /* write the data */
1348     REG_WR(sc, MCP_REG_MCPR_NVM_WRITE, val);
1349 
1350     /* address of the NVRAM to write to */
1351     REG_WR(sc, MCP_REG_MCPR_NVM_ADDR,
1352            (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
1353 
1354     /* issue the write command */
1355     REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
1356 
1357     /* adjust timeout for emulation/FPGA */
1358     count = NVRAM_TIMEOUT_COUNT;
1359     if (CHIP_REV_IS_SLOW(sc)) {
1360         count *= 100;
1361     }
1362 
1363     /* wait for completion */
1364     rc = -1;
1365     for (i = 0; i < count; i++) {
1366         DELAY(5);
1367         val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND);
1368         if (val & MCPR_NVM_COMMAND_DONE) {
1369             rc = 0;
1370             break;
1371         }
1372     }
1373 
1374     if (rc == -1) {
1375         BLOGE(sc, "nvram write timeout expired "
1376             "(offset 0x%x cmd_flags 0x%x val 0x%x)\n",
1377             offset, cmd_flags, val);
1378     }
1379 
1380     return (rc);
1381 }
1382 
1383 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
1384 
1385 static int
1386 bxe_nvram_write1(struct bxe_softc *sc,
1387                  uint32_t         offset,
1388                  uint8_t          *data_buf,
1389                  int              buf_size)
1390 {
1391     uint32_t cmd_flags;
1392     uint32_t align_offset;
1393     uint32_t val;
1394     int rc;
1395 
1396     if ((offset + buf_size) > sc->devinfo.flash_size) {
1397         BLOGE(sc, "Invalid parameter, "
1398                   "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1399               offset, buf_size, sc->devinfo.flash_size);
1400         return (-1);
1401     }
1402 
1403     /* request access to nvram interface */
1404     rc = bxe_acquire_nvram_lock(sc);
1405     if (rc) {
1406         return (rc);
1407     }
1408 
1409     /* enable access to nvram interface */
1410     bxe_enable_nvram_access(sc);
1411 
1412     cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
1413     align_offset = (offset & ~0x03);
1414     rc = bxe_nvram_read_dword(sc, align_offset, &val, cmd_flags);
1415 
1416     if (rc == 0) {
1417         val &= ~(0xff << BYTE_OFFSET(offset));
1418         val |= (*data_buf << BYTE_OFFSET(offset));
1419 
1420         /* nvram data is returned as an array of bytes
1421          * convert it back to cpu order
1422          */
1423         val = be32toh(val);
1424 
1425         rc = bxe_nvram_write_dword(sc, align_offset, val, cmd_flags);
1426     }
1427 
1428     /* disable access to nvram interface */
1429     bxe_disable_nvram_access(sc);
1430     bxe_release_nvram_lock(sc);
1431 
1432     return (rc);
1433 }
1434 
1435 static int
1436 bxe_nvram_write(struct bxe_softc *sc,
1437                 uint32_t         offset,
1438                 uint8_t          *data_buf,
1439                 int              buf_size)
1440 {
1441     uint32_t cmd_flags;
1442     uint32_t val;
1443     uint32_t written_so_far;
1444     int rc;
1445 
1446     if (buf_size == 1) {
1447         return (bxe_nvram_write1(sc, offset, data_buf, buf_size));
1448     }
1449 
1450     if ((offset & 0x03) || (buf_size & 0x03) /* || (buf_size == 0) */) {
1451         BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n",
1452               offset, buf_size);
1453         return (-1);
1454     }
1455 
1456     if (buf_size == 0) {
1457         return (0); /* nothing to do */
1458     }
1459 
1460     if ((offset + buf_size) > sc->devinfo.flash_size) {
1461         BLOGE(sc, "Invalid parameter, "
1462                   "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1463               offset, buf_size, sc->devinfo.flash_size);
1464         return (-1);
1465     }
1466 
1467     /* request access to nvram interface */
1468     rc = bxe_acquire_nvram_lock(sc);
1469     if (rc) {
1470         return (rc);
1471     }
1472 
1473     /* enable access to nvram interface */
1474     bxe_enable_nvram_access(sc);
1475 
1476     written_so_far = 0;
1477     cmd_flags = MCPR_NVM_COMMAND_FIRST;
1478     while ((written_so_far < buf_size) && (rc == 0)) {
1479         if (written_so_far == (buf_size - sizeof(uint32_t))) {
1480             cmd_flags |= MCPR_NVM_COMMAND_LAST;
1481         } else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0) {
1482             cmd_flags |= MCPR_NVM_COMMAND_LAST;
1483         } else if ((offset % NVRAM_PAGE_SIZE) == 0) {
1484             cmd_flags |= MCPR_NVM_COMMAND_FIRST;
1485         }
1486 
1487         memcpy(&val, data_buf, 4);
1488 
1489         rc = bxe_nvram_write_dword(sc, offset, val, cmd_flags);
1490 
1491         /* advance to the next dword */
1492         offset += sizeof(uint32_t);
1493         data_buf += sizeof(uint32_t);
1494         written_so_far += sizeof(uint32_t);
1495         cmd_flags = 0;
1496     }
1497 
1498     /* disable access to nvram interface */
1499     bxe_disable_nvram_access(sc);
1500     bxe_release_nvram_lock(sc);
1501 
1502     return (rc);
1503 }
1504 
1505 /* copy command into DMAE command memory and set DMAE command Go */
1506 void
1507 bxe_post_dmae(struct bxe_softc    *sc,
1508               struct dmae_cmd *dmae,
1509               int                 idx)
1510 {
1511     uint32_t cmd_offset;
1512     int i;
1513 
1514     cmd_offset = (DMAE_REG_CMD_MEM + (sizeof(struct dmae_cmd) * idx));
1515     for (i = 0; i < ((sizeof(struct dmae_cmd) / 4)); i++) {
1516         REG_WR(sc, (cmd_offset + (i * 4)), *(((uint32_t *)dmae) + i));
1517     }
1518 
1519     REG_WR(sc, dmae_reg_go_c[idx], 1);
1520 }
1521 
1522 uint32_t
1523 bxe_dmae_opcode_add_comp(uint32_t opcode,
1524                          uint8_t  comp_type)
1525 {
1526     return (opcode | ((comp_type << DMAE_CMD_C_DST_SHIFT) |
1527                       DMAE_CMD_C_TYPE_ENABLE));
1528 }
1529 
1530 uint32_t
1531 bxe_dmae_opcode_clr_src_reset(uint32_t opcode)
1532 {
1533     return (opcode & ~DMAE_CMD_SRC_RESET);
1534 }
1535 
1536 uint32_t
1537 bxe_dmae_opcode(struct bxe_softc *sc,
1538                 uint8_t          src_type,
1539                 uint8_t          dst_type,
1540                 uint8_t          with_comp,
1541                 uint8_t          comp_type)
1542 {
1543     uint32_t opcode = 0;
1544 
1545     opcode |= ((src_type << DMAE_CMD_SRC_SHIFT) |
1546                (dst_type << DMAE_CMD_DST_SHIFT));
1547 
1548     opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
1549 
1550     opcode |= (SC_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
1551 
1552     opcode |= ((SC_VN(sc) << DMAE_CMD_E1HVN_SHIFT) |
1553                (SC_VN(sc) << DMAE_CMD_DST_VN_SHIFT));
1554 
1555     opcode |= (DMAE_COM_SET_ERR << DMAE_CMD_ERR_POLICY_SHIFT);
1556 
1557 #ifdef __BIG_ENDIAN
1558     opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
1559 #else
1560     opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
1561 #endif
1562 
1563     if (with_comp) {
1564         opcode = bxe_dmae_opcode_add_comp(opcode, comp_type);
1565     }
1566 
1567     return (opcode);
1568 }
1569 
1570 static void
1571 bxe_prep_dmae_with_comp(struct bxe_softc    *sc,
1572                         struct dmae_cmd *dmae,
1573                         uint8_t             src_type,
1574                         uint8_t             dst_type)
1575 {
1576     memset(dmae, 0, sizeof(struct dmae_cmd));
1577 
1578     /* set the opcode */
1579     dmae->opcode = bxe_dmae_opcode(sc, src_type, dst_type,
1580                                    TRUE, DMAE_COMP_PCI);
1581 
1582     /* fill in the completion parameters */
1583     dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_comp));
1584     dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_comp));
1585     dmae->comp_val     = DMAE_COMP_VAL;
1586 }
1587 
1588 /* issue a DMAE command over the init channel and wait for completion */
1589 static int
1590 bxe_issue_dmae_with_comp(struct bxe_softc    *sc,
1591                          struct dmae_cmd *dmae)
1592 {
1593     uint32_t *wb_comp = BXE_SP(sc, wb_comp);
1594     int timeout = CHIP_REV_IS_SLOW(sc) ? 400000 : 4000;
1595 
1596     BXE_DMAE_LOCK(sc);
1597 
1598     /* reset completion */
1599     *wb_comp = 0;
1600 
1601     /* post the command on the channel used for initializations */
1602     bxe_post_dmae(sc, dmae, INIT_DMAE_C(sc));
1603 
1604     /* wait for completion */
1605     DELAY(5);
1606 
1607     while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
1608         if (!timeout ||
1609             (sc->recovery_state != BXE_RECOVERY_DONE &&
1610              sc->recovery_state != BXE_RECOVERY_NIC_LOADING)) {
1611             BLOGE(sc, "DMAE timeout! *wb_comp 0x%x recovery_state 0x%x\n",
1612                 *wb_comp, sc->recovery_state);
1613             BXE_DMAE_UNLOCK(sc);
1614             return (DMAE_TIMEOUT);
1615         }
1616 
1617         timeout--;
1618         DELAY(50);
1619     }
1620 
1621     if (*wb_comp & DMAE_PCI_ERR_FLAG) {
1622         BLOGE(sc, "DMAE PCI error! *wb_comp 0x%x recovery_state 0x%x\n",
1623                 *wb_comp, sc->recovery_state);
1624         BXE_DMAE_UNLOCK(sc);
1625         return (DMAE_PCI_ERROR);
1626     }
1627 
1628     BXE_DMAE_UNLOCK(sc);
1629     return (0);
1630 }
1631 
1632 void
1633 bxe_read_dmae(struct bxe_softc *sc,
1634               uint32_t         src_addr,
1635               uint32_t         len32)
1636 {
1637     struct dmae_cmd dmae;
1638     uint32_t *data;
1639     int i, rc;
1640 
1641     DBASSERT(sc, (len32 <= 4), ("DMAE read length is %d", len32));
1642 
1643     if (!sc->dmae_ready) {
1644         data = BXE_SP(sc, wb_data[0]);
1645 
1646         for (i = 0; i < len32; i++) {
1647             data[i] = (CHIP_IS_E1(sc)) ?
1648                           bxe_reg_rd_ind(sc, (src_addr + (i * 4))) :
1649                           REG_RD(sc, (src_addr + (i * 4)));
1650         }
1651 
1652         return;
1653     }
1654 
1655     /* set opcode and fixed command fields */
1656     bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
1657 
1658     /* fill in addresses and len */
1659     dmae.src_addr_lo = (src_addr >> 2); /* GRC addr has dword resolution */
1660     dmae.src_addr_hi = 0;
1661     dmae.dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_data));
1662     dmae.dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_data));
1663     dmae.len         = len32;
1664 
1665     /* issue the command and wait for completion */
1666     if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) {
1667         bxe_panic(sc, ("DMAE failed (%d)\n", rc));
1668     }
1669 }
1670 
1671 void
1672 bxe_write_dmae(struct bxe_softc *sc,
1673                bus_addr_t       dma_addr,
1674                uint32_t         dst_addr,
1675                uint32_t         len32)
1676 {
1677     struct dmae_cmd dmae;
1678     int rc;
1679 
1680     if (!sc->dmae_ready) {
1681         DBASSERT(sc, (len32 <= 4), ("DMAE not ready and length is %d", len32));
1682 
1683         if (CHIP_IS_E1(sc)) {
1684             ecore_init_ind_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32);
1685         } else {
1686             ecore_init_str_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32);
1687         }
1688 
1689         return;
1690     }
1691 
1692     /* set opcode and fixed command fields */
1693     bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
1694 
1695     /* fill in addresses and len */
1696     dmae.src_addr_lo = U64_LO(dma_addr);
1697     dmae.src_addr_hi = U64_HI(dma_addr);
1698     dmae.dst_addr_lo = (dst_addr >> 2); /* GRC addr has dword resolution */
1699     dmae.dst_addr_hi = 0;
1700     dmae.len         = len32;
1701 
1702     /* issue the command and wait for completion */
1703     if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) {
1704         bxe_panic(sc, ("DMAE failed (%d)\n", rc));
1705     }
1706 }
1707 
1708 void
1709 bxe_write_dmae_phys_len(struct bxe_softc *sc,
1710                         bus_addr_t       phys_addr,
1711                         uint32_t         addr,
1712                         uint32_t         len)
1713 {
1714     int dmae_wr_max = DMAE_LEN32_WR_MAX(sc);
1715     int offset = 0;
1716 
1717     while (len > dmae_wr_max) {
1718         bxe_write_dmae(sc,
1719                        (phys_addr + offset), /* src DMA address */
1720                        (addr + offset),      /* dst GRC address */
1721                        dmae_wr_max);
1722         offset += (dmae_wr_max * 4);
1723         len -= dmae_wr_max;
1724     }
1725 
1726     bxe_write_dmae(sc,
1727                    (phys_addr + offset), /* src DMA address */
1728                    (addr + offset),      /* dst GRC address */
1729                    len);
1730 }
1731 
1732 void
1733 bxe_set_ctx_validation(struct bxe_softc   *sc,
1734                        struct eth_context *cxt,
1735                        uint32_t           cid)
1736 {
1737     /* ustorm cxt validation */
1738     cxt->ustorm_ag_context.cdu_usage =
1739         CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid),
1740             CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
1741     /* xcontext validation */
1742     cxt->xstorm_ag_context.cdu_reserved =
1743         CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid),
1744             CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
1745 }
1746 
1747 static void
1748 bxe_storm_memset_hc_timeout(struct bxe_softc *sc,
1749                             uint8_t          port,
1750                             uint8_t          fw_sb_id,
1751                             uint8_t          sb_index,
1752                             uint8_t          ticks)
1753 {
1754     uint32_t addr =
1755         (BAR_CSTRORM_INTMEM +
1756          CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index));
1757 
1758     REG_WR8(sc, addr, ticks);
1759 
1760     BLOGD(sc, DBG_LOAD,
1761           "port %d fw_sb_id %d sb_index %d ticks %d\n",
1762           port, fw_sb_id, sb_index, ticks);
1763 }
1764 
1765 static void
1766 bxe_storm_memset_hc_disable(struct bxe_softc *sc,
1767                             uint8_t          port,
1768                             uint16_t         fw_sb_id,
1769                             uint8_t          sb_index,
1770                             uint8_t          disable)
1771 {
1772     uint32_t enable_flag =
1773         (disable) ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
1774     uint32_t addr =
1775         (BAR_CSTRORM_INTMEM +
1776          CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index));
1777     uint8_t flags;
1778 
1779     /* clear and set */
1780     flags = REG_RD8(sc, addr);
1781     flags &= ~HC_INDEX_DATA_HC_ENABLED;
1782     flags |= enable_flag;
1783     REG_WR8(sc, addr, flags);
1784 
1785     BLOGD(sc, DBG_LOAD,
1786           "port %d fw_sb_id %d sb_index %d disable %d\n",
1787           port, fw_sb_id, sb_index, disable);
1788 }
1789 
1790 void
1791 bxe_update_coalesce_sb_index(struct bxe_softc *sc,
1792                              uint8_t          fw_sb_id,
1793                              uint8_t          sb_index,
1794                              uint8_t          disable,
1795                              uint16_t         usec)
1796 {
1797     int port = SC_PORT(sc);
1798     uint8_t ticks = (usec / 4); /* XXX ??? */
1799 
1800     bxe_storm_memset_hc_timeout(sc, port, fw_sb_id, sb_index, ticks);
1801 
1802     disable = (disable) ? 1 : ((usec) ? 0 : 1);
1803     bxe_storm_memset_hc_disable(sc, port, fw_sb_id, sb_index, disable);
1804 }
1805 
1806 void
1807 elink_cb_udelay(struct bxe_softc *sc,
1808                 uint32_t         usecs)
1809 {
1810     DELAY(usecs);
1811 }
1812 
1813 uint32_t
1814 elink_cb_reg_read(struct bxe_softc *sc,
1815                   uint32_t         reg_addr)
1816 {
1817     return (REG_RD(sc, reg_addr));
1818 }
1819 
1820 void
1821 elink_cb_reg_write(struct bxe_softc *sc,
1822                    uint32_t         reg_addr,
1823                    uint32_t         val)
1824 {
1825     REG_WR(sc, reg_addr, val);
1826 }
1827 
1828 void
1829 elink_cb_reg_wb_write(struct bxe_softc *sc,
1830                       uint32_t         offset,
1831                       uint32_t         *wb_write,
1832                       uint16_t         len)
1833 {
1834     REG_WR_DMAE(sc, offset, wb_write, len);
1835 }
1836 
1837 void
1838 elink_cb_reg_wb_read(struct bxe_softc *sc,
1839                      uint32_t         offset,
1840                      uint32_t         *wb_write,
1841                      uint16_t         len)
1842 {
1843     REG_RD_DMAE(sc, offset, wb_write, len);
1844 }
1845 
1846 uint8_t
1847 elink_cb_path_id(struct bxe_softc *sc)
1848 {
1849     return (SC_PATH(sc));
1850 }
1851 
1852 void
1853 elink_cb_event_log(struct bxe_softc     *sc,
1854                    const elink_log_id_t elink_log_id,
1855                    ...)
1856 {
1857     /* XXX */
1858     BLOGI(sc, "ELINK EVENT LOG (%d)\n", elink_log_id);
1859 }
1860 
1861 static int
1862 bxe_set_spio(struct bxe_softc *sc,
1863              int              spio,
1864              uint32_t         mode)
1865 {
1866     uint32_t spio_reg;
1867 
1868     /* Only 2 SPIOs are configurable */
1869     if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
1870         BLOGE(sc, "Invalid SPIO 0x%x mode 0x%x\n", spio, mode);
1871         return (-1);
1872     }
1873 
1874     bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
1875 
1876     /* read SPIO and mask except the float bits */
1877     spio_reg = (REG_RD(sc, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
1878 
1879     switch (mode) {
1880     case MISC_SPIO_OUTPUT_LOW:
1881         BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output low\n", spio);
1882         /* clear FLOAT and set CLR */
1883         spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
1884         spio_reg |=  (spio << MISC_SPIO_CLR_POS);
1885         break;
1886 
1887     case MISC_SPIO_OUTPUT_HIGH:
1888         BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output high\n", spio);
1889         /* clear FLOAT and set SET */
1890         spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
1891         spio_reg |=  (spio << MISC_SPIO_SET_POS);
1892         break;
1893 
1894     case MISC_SPIO_INPUT_HI_Z:
1895         BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> input\n", spio);
1896         /* set FLOAT */
1897         spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
1898         break;
1899 
1900     default:
1901         break;
1902     }
1903 
1904     REG_WR(sc, MISC_REG_SPIO, spio_reg);
1905     bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
1906 
1907     return (0);
1908 }
1909 
1910 static int
1911 bxe_gpio_read(struct bxe_softc *sc,
1912               int              gpio_num,
1913               uint8_t          port)
1914 {
1915     /* The GPIO should be swapped if swap register is set and active */
1916     int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
1917                       REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
1918     int gpio_shift = (gpio_num +
1919                       (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
1920     uint32_t gpio_mask = (1 << gpio_shift);
1921     uint32_t gpio_reg;
1922 
1923     if (gpio_num > MISC_REGISTERS_GPIO_3) {
1924         BLOGE(sc, "Invalid GPIO %d port 0x%x gpio_port %d gpio_shift %d"
1925             " gpio_mask 0x%x\n", gpio_num, port, gpio_port, gpio_shift,
1926             gpio_mask);
1927         return (-1);
1928     }
1929 
1930     /* read GPIO value */
1931     gpio_reg = REG_RD(sc, MISC_REG_GPIO);
1932 
1933     /* get the requested pin value */
1934     return ((gpio_reg & gpio_mask) == gpio_mask) ? 1 : 0;
1935 }
1936 
1937 static int
1938 bxe_gpio_write(struct bxe_softc *sc,
1939                int              gpio_num,
1940                uint32_t         mode,
1941                uint8_t          port)
1942 {
1943     /* The GPIO should be swapped if swap register is set and active */
1944     int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
1945                       REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
1946     int gpio_shift = (gpio_num +
1947                       (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
1948     uint32_t gpio_mask = (1 << gpio_shift);
1949     uint32_t gpio_reg;
1950 
1951     if (gpio_num > MISC_REGISTERS_GPIO_3) {
1952         BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d"
1953             " gpio_shift %d gpio_mask 0x%x\n",
1954             gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask);
1955         return (-1);
1956     }
1957 
1958     bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
1959 
1960     /* read GPIO and mask except the float bits */
1961     gpio_reg = (REG_RD(sc, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1962 
1963     switch (mode) {
1964     case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1965         BLOGD(sc, DBG_PHY,
1966               "Set GPIO %d (shift %d) -> output low\n",
1967               gpio_num, gpio_shift);
1968         /* clear FLOAT and set CLR */
1969         gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1970         gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1971         break;
1972 
1973     case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1974         BLOGD(sc, DBG_PHY,
1975               "Set GPIO %d (shift %d) -> output high\n",
1976               gpio_num, gpio_shift);
1977         /* clear FLOAT and set SET */
1978         gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1979         gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1980         break;
1981 
1982     case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1983         BLOGD(sc, DBG_PHY,
1984               "Set GPIO %d (shift %d) -> input\n",
1985               gpio_num, gpio_shift);
1986         /* set FLOAT */
1987         gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1988         break;
1989 
1990     default:
1991         break;
1992     }
1993 
1994     REG_WR(sc, MISC_REG_GPIO, gpio_reg);
1995     bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
1996 
1997     return (0);
1998 }
1999 
2000 static int
2001 bxe_gpio_mult_write(struct bxe_softc *sc,
2002                     uint8_t          pins,
2003                     uint32_t         mode)
2004 {
2005     uint32_t gpio_reg;
2006 
2007     /* any port swapping should be handled by caller */
2008 
2009     bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2010 
2011     /* read GPIO and mask except the float bits */
2012     gpio_reg = REG_RD(sc, MISC_REG_GPIO);
2013     gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2014     gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
2015     gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
2016 
2017     switch (mode) {
2018     case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2019         BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output low\n", pins);
2020         /* set CLR */
2021         gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
2022         break;
2023 
2024     case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2025         BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output high\n", pins);
2026         /* set SET */
2027         gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
2028         break;
2029 
2030     case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2031         BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> input\n", pins);
2032         /* set FLOAT */
2033         gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2034         break;
2035 
2036     default:
2037         BLOGE(sc, "Invalid GPIO mode assignment pins 0x%x mode 0x%x"
2038             " gpio_reg 0x%x\n", pins, mode, gpio_reg);
2039         bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2040         return (-1);
2041     }
2042 
2043     REG_WR(sc, MISC_REG_GPIO, gpio_reg);
2044     bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2045 
2046     return (0);
2047 }
2048 
2049 static int
2050 bxe_gpio_int_write(struct bxe_softc *sc,
2051                    int              gpio_num,
2052                    uint32_t         mode,
2053                    uint8_t          port)
2054 {
2055     /* The GPIO should be swapped if swap register is set and active */
2056     int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
2057                       REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
2058     int gpio_shift = (gpio_num +
2059                       (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
2060     uint32_t gpio_mask = (1 << gpio_shift);
2061     uint32_t gpio_reg;
2062 
2063     if (gpio_num > MISC_REGISTERS_GPIO_3) {
2064         BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d"
2065             " gpio_shift %d gpio_mask 0x%x\n",
2066             gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask);
2067         return (-1);
2068     }
2069 
2070     bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2071 
2072     /* read GPIO int */
2073     gpio_reg = REG_RD(sc, MISC_REG_GPIO_INT);
2074 
2075     switch (mode) {
2076     case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2077         BLOGD(sc, DBG_PHY,
2078               "Clear GPIO INT %d (shift %d) -> output low\n",
2079               gpio_num, gpio_shift);
2080         /* clear SET and set CLR */
2081         gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2082         gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2083         break;
2084 
2085     case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2086         BLOGD(sc, DBG_PHY,
2087               "Set GPIO INT %d (shift %d) -> output high\n",
2088               gpio_num, gpio_shift);
2089         /* clear CLR and set SET */
2090         gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2091         gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2092         break;
2093 
2094     default:
2095         break;
2096     }
2097 
2098     REG_WR(sc, MISC_REG_GPIO_INT, gpio_reg);
2099     bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2100 
2101     return (0);
2102 }
2103 
2104 uint32_t
2105 elink_cb_gpio_read(struct bxe_softc *sc,
2106                    uint16_t         gpio_num,
2107                    uint8_t          port)
2108 {
2109     return (bxe_gpio_read(sc, gpio_num, port));
2110 }
2111 
2112 uint8_t
2113 elink_cb_gpio_write(struct bxe_softc *sc,
2114                     uint16_t         gpio_num,
2115                     uint8_t          mode, /* 0=low 1=high */
2116                     uint8_t          port)
2117 {
2118     return (bxe_gpio_write(sc, gpio_num, mode, port));
2119 }
2120 
2121 uint8_t
2122 elink_cb_gpio_mult_write(struct bxe_softc *sc,
2123                          uint8_t          pins,
2124                          uint8_t          mode) /* 0=low 1=high */
2125 {
2126     return (bxe_gpio_mult_write(sc, pins, mode));
2127 }
2128 
2129 uint8_t
2130 elink_cb_gpio_int_write(struct bxe_softc *sc,
2131                         uint16_t         gpio_num,
2132                         uint8_t          mode, /* 0=low 1=high */
2133                         uint8_t          port)
2134 {
2135     return (bxe_gpio_int_write(sc, gpio_num, mode, port));
2136 }
2137 
2138 void
2139 elink_cb_notify_link_changed(struct bxe_softc *sc)
2140 {
2141     REG_WR(sc, (MISC_REG_AEU_GENERAL_ATTN_12 +
2142                 (SC_FUNC(sc) * sizeof(uint32_t))), 1);
2143 }
2144 
2145 /* send the MCP a request, block until there is a reply */
2146 uint32_t
2147 elink_cb_fw_command(struct bxe_softc *sc,
2148                     uint32_t         command,
2149                     uint32_t         param)
2150 {
2151     int mb_idx = SC_FW_MB_IDX(sc);
2152     uint32_t seq;
2153     uint32_t rc = 0;
2154     uint32_t cnt = 1;
2155     uint8_t delay = CHIP_REV_IS_SLOW(sc) ? 100 : 10;
2156 
2157     BXE_FWMB_LOCK(sc);
2158 
2159     seq = ++sc->fw_seq;
2160     SHMEM_WR(sc, func_mb[mb_idx].drv_mb_param, param);
2161     SHMEM_WR(sc, func_mb[mb_idx].drv_mb_header, (command | seq));
2162 
2163     BLOGD(sc, DBG_PHY,
2164           "wrote command 0x%08x to FW MB param 0x%08x\n",
2165           (command | seq), param);
2166 
2167     /* Let the FW do it's magic. GIve it up to 5 seconds... */
2168     do {
2169         DELAY(delay * 1000);
2170         rc = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_header);
2171     } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2172 
2173     BLOGD(sc, DBG_PHY,
2174           "[after %d ms] read 0x%x seq 0x%x from FW MB\n",
2175           cnt*delay, rc, seq);
2176 
2177     /* is this a reply to our command? */
2178     if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
2179         rc &= FW_MSG_CODE_MASK;
2180     } else {
2181         /* Ruh-roh! */
2182         BLOGE(sc, "FW failed to respond!\n");
2183         // XXX bxe_fw_dump(sc);
2184         rc = 0;
2185     }
2186 
2187     BXE_FWMB_UNLOCK(sc);
2188     return (rc);
2189 }
2190 
2191 static uint32_t
2192 bxe_fw_command(struct bxe_softc *sc,
2193                uint32_t         command,
2194                uint32_t         param)
2195 {
2196     return (elink_cb_fw_command(sc, command, param));
2197 }
2198 
2199 static void
2200 __storm_memset_dma_mapping(struct bxe_softc *sc,
2201                            uint32_t         addr,
2202                            bus_addr_t       mapping)
2203 {
2204     REG_WR(sc, addr, U64_LO(mapping));
2205     REG_WR(sc, (addr + 4), U64_HI(mapping));
2206 }
2207 
2208 static void
2209 storm_memset_spq_addr(struct bxe_softc *sc,
2210                       bus_addr_t       mapping,
2211                       uint16_t         abs_fid)
2212 {
2213     uint32_t addr = (XSEM_REG_FAST_MEMORY +
2214                      XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid));
2215     __storm_memset_dma_mapping(sc, addr, mapping);
2216 }
2217 
2218 static void
2219 storm_memset_vf_to_pf(struct bxe_softc *sc,
2220                       uint16_t         abs_fid,
2221                       uint16_t         pf_id)
2222 {
2223     REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2224     REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2225     REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2226     REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2227 }
2228 
2229 static void
2230 storm_memset_func_en(struct bxe_softc *sc,
2231                      uint16_t         abs_fid,
2232                      uint8_t          enable)
2233 {
2234     REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2235     REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2236     REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2237     REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2238 }
2239 
2240 static void
2241 storm_memset_eq_data(struct bxe_softc       *sc,
2242                      struct event_ring_data *eq_data,
2243                      uint16_t               pfid)
2244 {
2245     uint32_t addr;
2246     size_t size;
2247 
2248     addr = (BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid));
2249     size = sizeof(struct event_ring_data);
2250     ecore_storm_memset_struct(sc, addr, size, (uint32_t *)eq_data);
2251 }
2252 
2253 static void
2254 storm_memset_eq_prod(struct bxe_softc *sc,
2255                      uint16_t         eq_prod,
2256                      uint16_t         pfid)
2257 {
2258     uint32_t addr = (BAR_CSTRORM_INTMEM +
2259                      CSTORM_EVENT_RING_PROD_OFFSET(pfid));
2260     REG_WR16(sc, addr, eq_prod);
2261 }
2262 
2263 /*
2264  * Post a slowpath command.
2265  *
2266  * A slowpath command is used to propagate a configuration change through
2267  * the controller in a controlled manner, allowing each STORM processor and
2268  * other H/W blocks to phase in the change.  The commands sent on the
2269  * slowpath are referred to as ramrods.  Depending on the ramrod used the
2270  * completion of the ramrod will occur in different ways.  Here's a
2271  * breakdown of ramrods and how they complete:
2272  *
2273  * RAMROD_CMD_ID_ETH_PORT_SETUP
2274  *   Used to setup the leading connection on a port.  Completes on the
2275  *   Receive Completion Queue (RCQ) of that port (typically fp[0]).
2276  *
2277  * RAMROD_CMD_ID_ETH_CLIENT_SETUP
2278  *   Used to setup an additional connection on a port.  Completes on the
2279  *   RCQ of the multi-queue/RSS connection being initialized.
2280  *
2281  * RAMROD_CMD_ID_ETH_STAT_QUERY
2282  *   Used to force the storm processors to update the statistics database
2283  *   in host memory.  This ramrod is send on the leading connection CID and
2284  *   completes as an index increment of the CSTORM on the default status
2285  *   block.
2286  *
2287  * RAMROD_CMD_ID_ETH_UPDATE
2288  *   Used to update the state of the leading connection, usually to udpate
2289  *   the RSS indirection table.  Completes on the RCQ of the leading
2290  *   connection. (Not currently used under FreeBSD until OS support becomes
2291  *   available.)
2292  *
2293  * RAMROD_CMD_ID_ETH_HALT
2294  *   Used when tearing down a connection prior to driver unload.  Completes
2295  *   on the RCQ of the multi-queue/RSS connection being torn down.  Don't
2296  *   use this on the leading connection.
2297  *
2298  * RAMROD_CMD_ID_ETH_SET_MAC
2299  *   Sets the Unicast/Broadcast/Multicast used by the port.  Completes on
2300  *   the RCQ of the leading connection.
2301  *
2302  * RAMROD_CMD_ID_ETH_CFC_DEL
2303  *   Used when tearing down a conneciton prior to driver unload.  Completes
2304  *   on the RCQ of the leading connection (since the current connection
2305  *   has been completely removed from controller memory).
2306  *
2307  * RAMROD_CMD_ID_ETH_PORT_DEL
2308  *   Used to tear down the leading connection prior to driver unload,
2309  *   typically fp[0].  Completes as an index increment of the CSTORM on the
2310  *   default status block.
2311  *
2312  * RAMROD_CMD_ID_ETH_FORWARD_SETUP
2313  *   Used for connection offload.  Completes on the RCQ of the multi-queue
2314  *   RSS connection that is being offloaded.  (Not currently used under
2315  *   FreeBSD.)
2316  *
2317  * There can only be one command pending per function.
2318  *
2319  * Returns:
2320  *   0 = Success, !0 = Failure.
2321  */
2322 
2323 /* must be called under the spq lock */
2324 static inline
2325 struct eth_spe *bxe_sp_get_next(struct bxe_softc *sc)
2326 {
2327     struct eth_spe *next_spe = sc->spq_prod_bd;
2328 
2329     if (sc->spq_prod_bd == sc->spq_last_bd) {
2330         /* wrap back to the first eth_spq */
2331         sc->spq_prod_bd = sc->spq;
2332         sc->spq_prod_idx = 0;
2333     } else {
2334         sc->spq_prod_bd++;
2335         sc->spq_prod_idx++;
2336     }
2337 
2338     return (next_spe);
2339 }
2340 
2341 /* must be called under the spq lock */
2342 static inline
2343 void bxe_sp_prod_update(struct bxe_softc *sc)
2344 {
2345     int func = SC_FUNC(sc);
2346 
2347     /*
2348      * Make sure that BD data is updated before writing the producer.
2349      * BD data is written to the memory, the producer is read from the
2350      * memory, thus we need a full memory barrier to ensure the ordering.
2351      */
2352     mb();
2353 
2354     REG_WR16(sc, (BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func)),
2355              sc->spq_prod_idx);
2356 
2357     bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
2358                       BUS_SPACE_BARRIER_WRITE);
2359 }
2360 
2361 /**
2362  * bxe_is_contextless_ramrod - check if the current command ends on EQ
2363  *
2364  * @cmd:      command to check
2365  * @cmd_type: command type
2366  */
2367 static inline
2368 int bxe_is_contextless_ramrod(int cmd,
2369                               int cmd_type)
2370 {
2371     if ((cmd_type == NONE_CONNECTION_TYPE) ||
2372         (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
2373         (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
2374         (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
2375         (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
2376         (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
2377         (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) {
2378         return (TRUE);
2379     } else {
2380         return (FALSE);
2381     }
2382 }
2383 
2384 /**
2385  * bxe_sp_post - place a single command on an SP ring
2386  *
2387  * @sc:         driver handle
2388  * @command:    command to place (e.g. SETUP, FILTER_RULES, etc.)
2389  * @cid:        SW CID the command is related to
2390  * @data_hi:    command private data address (high 32 bits)
2391  * @data_lo:    command private data address (low 32 bits)
2392  * @cmd_type:   command type (e.g. NONE, ETH)
2393  *
2394  * SP data is handled as if it's always an address pair, thus data fields are
2395  * not swapped to little endian in upper functions. Instead this function swaps
2396  * data as if it's two uint32 fields.
2397  */
2398 int
2399 bxe_sp_post(struct bxe_softc *sc,
2400             int              command,
2401             int              cid,
2402             uint32_t         data_hi,
2403             uint32_t         data_lo,
2404             int              cmd_type)
2405 {
2406     struct eth_spe *spe;
2407     uint16_t type;
2408     int common;
2409 
2410     common = bxe_is_contextless_ramrod(command, cmd_type);
2411 
2412     BXE_SP_LOCK(sc);
2413 
2414     if (common) {
2415         if (!atomic_load_acq_long(&sc->eq_spq_left)) {
2416             BLOGE(sc, "EQ ring is full!\n");
2417             BXE_SP_UNLOCK(sc);
2418             return (-1);
2419         }
2420     } else {
2421         if (!atomic_load_acq_long(&sc->cq_spq_left)) {
2422             BLOGE(sc, "SPQ ring is full!\n");
2423             BXE_SP_UNLOCK(sc);
2424             return (-1);
2425         }
2426     }
2427 
2428     spe = bxe_sp_get_next(sc);
2429 
2430     /* CID needs port number to be encoded int it */
2431     spe->hdr.conn_and_cmd_data =
2432         htole32((command << SPE_HDR_T_CMD_ID_SHIFT) | HW_CID(sc, cid));
2433 
2434     type = (cmd_type << SPE_HDR_T_CONN_TYPE_SHIFT) & SPE_HDR_T_CONN_TYPE;
2435 
2436     /* TBD: Check if it works for VFs */
2437     type |= ((SC_FUNC(sc) << SPE_HDR_T_FUNCTION_ID_SHIFT) &
2438              SPE_HDR_T_FUNCTION_ID);
2439 
2440     spe->hdr.type = htole16(type);
2441 
2442     spe->data.update_data_addr.hi = htole32(data_hi);
2443     spe->data.update_data_addr.lo = htole32(data_lo);
2444 
2445     /*
2446      * It's ok if the actual decrement is issued towards the memory
2447      * somewhere between the lock and unlock. Thus no more explict
2448      * memory barrier is needed.
2449      */
2450     if (common) {
2451         atomic_subtract_acq_long(&sc->eq_spq_left, 1);
2452     } else {
2453         atomic_subtract_acq_long(&sc->cq_spq_left, 1);
2454     }
2455 
2456     BLOGD(sc, DBG_SP, "SPQE -> %#jx\n", (uintmax_t)sc->spq_dma.paddr);
2457     BLOGD(sc, DBG_SP, "FUNC_RDATA -> %p / %#jx\n",
2458           BXE_SP(sc, func_rdata), (uintmax_t)BXE_SP_MAPPING(sc, func_rdata));
2459     BLOGD(sc, DBG_SP,
2460           "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%lx,%lx)\n",
2461           sc->spq_prod_idx,
2462           (uint32_t)U64_HI(sc->spq_dma.paddr),
2463           (uint32_t)(U64_LO(sc->spq_dma.paddr) + (uint8_t *)sc->spq_prod_bd - (uint8_t *)sc->spq),
2464           command,
2465           common,
2466           HW_CID(sc, cid),
2467           data_hi,
2468           data_lo,
2469           type,
2470           atomic_load_acq_long(&sc->cq_spq_left),
2471           atomic_load_acq_long(&sc->eq_spq_left));
2472 
2473     bxe_sp_prod_update(sc);
2474 
2475     BXE_SP_UNLOCK(sc);
2476     return (0);
2477 }
2478 
2479 /**
2480  * bxe_debug_print_ind_table - prints the indirection table configuration.
2481  *
2482  * @sc: driver hanlde
2483  * @p:  pointer to rss configuration
2484  */
2485 
2486 /*
2487  * FreeBSD Device probe function.
2488  *
2489  * Compares the device found to the driver's list of supported devices and
2490  * reports back to the bsd loader whether this is the right driver for the device.
2491  * This is the driver entry function called from the "kldload" command.
2492  *
2493  * Returns:
2494  *   BUS_PROBE_DEFAULT on success, positive value on failure.
2495  */
2496 static int
2497 bxe_probe(device_t dev)
2498 {
2499     struct bxe_device_type *t;
2500     char *descbuf;
2501     uint16_t did, sdid, svid, vid;
2502 
2503     /* Find our device structure */
2504     t = bxe_devs;
2505 
2506     /* Get the data for the device to be probed. */
2507     vid  = pci_get_vendor(dev);
2508     did  = pci_get_device(dev);
2509     svid = pci_get_subvendor(dev);
2510     sdid = pci_get_subdevice(dev);
2511 
2512     /* Look through the list of known devices for a match. */
2513     while (t->bxe_name != NULL) {
2514         if ((vid == t->bxe_vid) && (did == t->bxe_did) &&
2515             ((svid == t->bxe_svid) || (t->bxe_svid == PCI_ANY_ID)) &&
2516             ((sdid == t->bxe_sdid) || (t->bxe_sdid == PCI_ANY_ID))) {
2517             descbuf = malloc(BXE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
2518             if (descbuf == NULL)
2519                 return (ENOMEM);
2520 
2521             /* Print out the device identity. */
2522             snprintf(descbuf, BXE_DEVDESC_MAX,
2523                      "%s (%c%d) BXE v:%s", t->bxe_name,
2524                      (((pci_read_config(dev, PCIR_REVID, 4) &
2525                         0xf0) >> 4) + 'A'),
2526                      (pci_read_config(dev, PCIR_REVID, 4) & 0xf),
2527                      BXE_DRIVER_VERSION);
2528 
2529             device_set_desc_copy(dev, descbuf);
2530             free(descbuf, M_TEMP);
2531             return (BUS_PROBE_DEFAULT);
2532         }
2533         t++;
2534     }
2535 
2536     return (ENXIO);
2537 }
2538 
2539 static void
2540 bxe_init_mutexes(struct bxe_softc *sc)
2541 {
2542 #ifdef BXE_CORE_LOCK_SX
2543     snprintf(sc->core_sx_name, sizeof(sc->core_sx_name),
2544              "bxe%d_core_lock", sc->unit);
2545     sx_init(&sc->core_sx, sc->core_sx_name);
2546 #else
2547     snprintf(sc->core_mtx_name, sizeof(sc->core_mtx_name),
2548              "bxe%d_core_lock", sc->unit);
2549     mtx_init(&sc->core_mtx, sc->core_mtx_name, NULL, MTX_DEF);
2550 #endif
2551 
2552     snprintf(sc->sp_mtx_name, sizeof(sc->sp_mtx_name),
2553              "bxe%d_sp_lock", sc->unit);
2554     mtx_init(&sc->sp_mtx, sc->sp_mtx_name, NULL, MTX_DEF);
2555 
2556     snprintf(sc->dmae_mtx_name, sizeof(sc->dmae_mtx_name),
2557              "bxe%d_dmae_lock", sc->unit);
2558     mtx_init(&sc->dmae_mtx, sc->dmae_mtx_name, NULL, MTX_DEF);
2559 
2560     snprintf(sc->port.phy_mtx_name, sizeof(sc->port.phy_mtx_name),
2561              "bxe%d_phy_lock", sc->unit);
2562     mtx_init(&sc->port.phy_mtx, sc->port.phy_mtx_name, NULL, MTX_DEF);
2563 
2564     snprintf(sc->fwmb_mtx_name, sizeof(sc->fwmb_mtx_name),
2565              "bxe%d_fwmb_lock", sc->unit);
2566     mtx_init(&sc->fwmb_mtx, sc->fwmb_mtx_name, NULL, MTX_DEF);
2567 
2568     snprintf(sc->print_mtx_name, sizeof(sc->print_mtx_name),
2569              "bxe%d_print_lock", sc->unit);
2570     mtx_init(&(sc->print_mtx), sc->print_mtx_name, NULL, MTX_DEF);
2571 
2572     snprintf(sc->stats_mtx_name, sizeof(sc->stats_mtx_name),
2573              "bxe%d_stats_lock", sc->unit);
2574     mtx_init(&(sc->stats_mtx), sc->stats_mtx_name, NULL, MTX_DEF);
2575 
2576     snprintf(sc->mcast_mtx_name, sizeof(sc->mcast_mtx_name),
2577              "bxe%d_mcast_lock", sc->unit);
2578     mtx_init(&(sc->mcast_mtx), sc->mcast_mtx_name, NULL, MTX_DEF);
2579 }
2580 
2581 static void
2582 bxe_release_mutexes(struct bxe_softc *sc)
2583 {
2584 #ifdef BXE_CORE_LOCK_SX
2585     sx_destroy(&sc->core_sx);
2586 #else
2587     if (mtx_initialized(&sc->core_mtx)) {
2588         mtx_destroy(&sc->core_mtx);
2589     }
2590 #endif
2591 
2592     if (mtx_initialized(&sc->sp_mtx)) {
2593         mtx_destroy(&sc->sp_mtx);
2594     }
2595 
2596     if (mtx_initialized(&sc->dmae_mtx)) {
2597         mtx_destroy(&sc->dmae_mtx);
2598     }
2599 
2600     if (mtx_initialized(&sc->port.phy_mtx)) {
2601         mtx_destroy(&sc->port.phy_mtx);
2602     }
2603 
2604     if (mtx_initialized(&sc->fwmb_mtx)) {
2605         mtx_destroy(&sc->fwmb_mtx);
2606     }
2607 
2608     if (mtx_initialized(&sc->print_mtx)) {
2609         mtx_destroy(&sc->print_mtx);
2610     }
2611 
2612     if (mtx_initialized(&sc->stats_mtx)) {
2613         mtx_destroy(&sc->stats_mtx);
2614     }
2615 
2616     if (mtx_initialized(&sc->mcast_mtx)) {
2617         mtx_destroy(&sc->mcast_mtx);
2618     }
2619 }
2620 
2621 static void
2622 bxe_tx_disable(struct bxe_softc* sc)
2623 {
2624     if_t ifp = sc->ifp;
2625 
2626     /* tell the stack the driver is stopped and TX queue is full */
2627     if (ifp !=  NULL) {
2628         if_setdrvflags(ifp, 0);
2629     }
2630 }
2631 
2632 static void
2633 bxe_drv_pulse(struct bxe_softc *sc)
2634 {
2635     SHMEM_WR(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb,
2636              sc->fw_drv_pulse_wr_seq);
2637 }
2638 
2639 static inline uint16_t
2640 bxe_tx_avail(struct bxe_softc *sc,
2641              struct bxe_fastpath *fp)
2642 {
2643     int16_t  used;
2644     uint16_t prod;
2645     uint16_t cons;
2646 
2647     prod = fp->tx_bd_prod;
2648     cons = fp->tx_bd_cons;
2649 
2650     used = SUB_S16(prod, cons);
2651 
2652     return (int16_t)(sc->tx_ring_size) - used;
2653 }
2654 
2655 static inline int
2656 bxe_tx_queue_has_work(struct bxe_fastpath *fp)
2657 {
2658     uint16_t hw_cons;
2659 
2660     mb(); /* status block fields can change */
2661     hw_cons = le16toh(*fp->tx_cons_sb);
2662     return (hw_cons != fp->tx_pkt_cons);
2663 }
2664 
2665 static inline uint8_t
2666 bxe_has_tx_work(struct bxe_fastpath *fp)
2667 {
2668     /* expand this for multi-cos if ever supported */
2669     return (bxe_tx_queue_has_work(fp)) ? TRUE : FALSE;
2670 }
2671 
2672 static inline int
2673 bxe_has_rx_work(struct bxe_fastpath *fp)
2674 {
2675     uint16_t rx_cq_cons_sb;
2676 
2677     mb(); /* status block fields can change */
2678     rx_cq_cons_sb = le16toh(*fp->rx_cq_cons_sb);
2679     if ((rx_cq_cons_sb & RCQ_MAX) == RCQ_MAX)
2680         rx_cq_cons_sb++;
2681     return (fp->rx_cq_cons != rx_cq_cons_sb);
2682 }
2683 
2684 static void
2685 bxe_sp_event(struct bxe_softc    *sc,
2686              struct bxe_fastpath *fp,
2687              union eth_rx_cqe    *rr_cqe)
2688 {
2689     int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
2690     int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
2691     enum ecore_queue_cmd drv_cmd = ECORE_Q_CMD_MAX;
2692     struct ecore_queue_sp_obj *q_obj = &BXE_SP_OBJ(sc, fp).q_obj;
2693 
2694     BLOGD(sc, DBG_SP, "fp=%d cid=%d got ramrod #%d state is %x type is %d\n",
2695           fp->index, cid, command, sc->state, rr_cqe->ramrod_cqe.ramrod_type);
2696 
2697     switch (command) {
2698     case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
2699         BLOGD(sc, DBG_SP, "got UPDATE ramrod. CID %d\n", cid);
2700         drv_cmd = ECORE_Q_CMD_UPDATE;
2701         break;
2702 
2703     case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
2704         BLOGD(sc, DBG_SP, "got MULTI[%d] setup ramrod\n", cid);
2705         drv_cmd = ECORE_Q_CMD_SETUP;
2706         break;
2707 
2708     case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
2709         BLOGD(sc, DBG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
2710         drv_cmd = ECORE_Q_CMD_SETUP_TX_ONLY;
2711         break;
2712 
2713     case (RAMROD_CMD_ID_ETH_HALT):
2714         BLOGD(sc, DBG_SP, "got MULTI[%d] halt ramrod\n", cid);
2715         drv_cmd = ECORE_Q_CMD_HALT;
2716         break;
2717 
2718     case (RAMROD_CMD_ID_ETH_TERMINATE):
2719         BLOGD(sc, DBG_SP, "got MULTI[%d] teminate ramrod\n", cid);
2720         drv_cmd = ECORE_Q_CMD_TERMINATE;
2721         break;
2722 
2723     case (RAMROD_CMD_ID_ETH_EMPTY):
2724         BLOGD(sc, DBG_SP, "got MULTI[%d] empty ramrod\n", cid);
2725         drv_cmd = ECORE_Q_CMD_EMPTY;
2726         break;
2727 
2728     default:
2729         BLOGD(sc, DBG_SP, "ERROR: unexpected MC reply (%d) on fp[%d]\n",
2730               command, fp->index);
2731         return;
2732     }
2733 
2734     if ((drv_cmd != ECORE_Q_CMD_MAX) &&
2735         q_obj->complete_cmd(sc, q_obj, drv_cmd)) {
2736         /*
2737          * q_obj->complete_cmd() failure means that this was
2738          * an unexpected completion.
2739          *
2740          * In this case we don't want to increase the sc->spq_left
2741          * because apparently we haven't sent this command the first
2742          * place.
2743          */
2744         // bxe_panic(sc, ("Unexpected SP completion\n"));
2745         return;
2746     }
2747 
2748     atomic_add_acq_long(&sc->cq_spq_left, 1);
2749 
2750     BLOGD(sc, DBG_SP, "sc->cq_spq_left 0x%lx\n",
2751           atomic_load_acq_long(&sc->cq_spq_left));
2752 }
2753 
2754 /*
2755  * The current mbuf is part of an aggregation. Move the mbuf into the TPA
2756  * aggregation queue, put an empty mbuf back onto the receive chain, and mark
2757  * the current aggregation queue as in-progress.
2758  */
2759 static void
2760 bxe_tpa_start(struct bxe_softc            *sc,
2761               struct bxe_fastpath         *fp,
2762               uint16_t                    queue,
2763               uint16_t                    cons,
2764               uint16_t                    prod,
2765               struct eth_fast_path_rx_cqe *cqe)
2766 {
2767     struct bxe_sw_rx_bd tmp_bd;
2768     struct bxe_sw_rx_bd *rx_buf;
2769     struct eth_rx_bd *rx_bd;
2770     int max_agg_queues __diagused;
2771     struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue];
2772     uint16_t index;
2773 
2774     BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA START "
2775                        "cons=%d prod=%d\n",
2776           fp->index, queue, cons, prod);
2777 
2778     max_agg_queues = MAX_AGG_QS(sc);
2779 
2780     KASSERT((queue < max_agg_queues),
2781             ("fp[%02d] invalid aggr queue (%d >= %d)!",
2782              fp->index, queue, max_agg_queues));
2783 
2784     KASSERT((tpa_info->state == BXE_TPA_STATE_STOP),
2785             ("fp[%02d].tpa[%02d] starting aggr on queue not stopped!",
2786              fp->index, queue));
2787 
2788     /* copy the existing mbuf and mapping from the TPA pool */
2789     tmp_bd = tpa_info->bd;
2790 
2791     if (tmp_bd.m == NULL) {
2792         uint32_t *tmp;
2793 
2794         tmp = (uint32_t *)cqe;
2795 
2796         BLOGE(sc, "fp[%02d].tpa[%02d] cons[%d] prod[%d]mbuf not allocated!\n",
2797               fp->index, queue, cons, prod);
2798         BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n",
2799             *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7));
2800 
2801         /* XXX Error handling? */
2802         return;
2803     }
2804 
2805     /* change the TPA queue to the start state */
2806     tpa_info->state            = BXE_TPA_STATE_START;
2807     tpa_info->placement_offset = cqe->placement_offset;
2808     tpa_info->parsing_flags    = le16toh(cqe->pars_flags.flags);
2809     tpa_info->vlan_tag         = le16toh(cqe->vlan_tag);
2810     tpa_info->len_on_bd        = le16toh(cqe->len_on_bd);
2811 
2812     fp->rx_tpa_queue_used |= (1 << queue);
2813 
2814     /*
2815      * If all the buffer descriptors are filled with mbufs then fill in
2816      * the current consumer index with a new BD. Else if a maximum Rx
2817      * buffer limit is imposed then fill in the next producer index.
2818      */
2819     index = (sc->max_rx_bufs != RX_BD_USABLE) ?
2820                 prod : cons;
2821 
2822     /* move the received mbuf and mapping to TPA pool */
2823     tpa_info->bd = fp->rx_mbuf_chain[cons];
2824 
2825     /* release any existing RX BD mbuf mappings */
2826     if (cons != index) {
2827         rx_buf = &fp->rx_mbuf_chain[cons];
2828 
2829         if (rx_buf->m_map != NULL) {
2830             bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
2831                             BUS_DMASYNC_POSTREAD);
2832             bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
2833         }
2834 
2835         /*
2836          * We get here when the maximum number of rx buffers is less than
2837          * RX_BD_USABLE. The mbuf is already saved above so it's OK to NULL
2838          * it out here without concern of a memory leak.
2839          */
2840         fp->rx_mbuf_chain[cons].m = NULL;
2841     }
2842 
2843     /* update the Rx SW BD with the mbuf info from the TPA pool */
2844     fp->rx_mbuf_chain[index] = tmp_bd;
2845 
2846     /* update the Rx BD with the empty mbuf phys address from the TPA pool */
2847     rx_bd = &fp->rx_chain[index];
2848     rx_bd->addr_hi = htole32(U64_HI(tpa_info->seg.ds_addr));
2849     rx_bd->addr_lo = htole32(U64_LO(tpa_info->seg.ds_addr));
2850 }
2851 
2852 /*
2853  * When a TPA aggregation is completed, loop through the individual mbufs
2854  * of the aggregation, combining them into a single mbuf which will be sent
2855  * up the stack. Refill all freed SGEs with mbufs as we go along.
2856  */
2857 static int
2858 bxe_fill_frag_mbuf(struct bxe_softc          *sc,
2859                    struct bxe_fastpath       *fp,
2860                    struct bxe_sw_tpa_info    *tpa_info,
2861                    uint16_t                  queue,
2862                    uint16_t                  pages,
2863                    struct mbuf               *m,
2864 			       struct eth_end_agg_rx_cqe *cqe,
2865                    uint16_t                  cqe_idx)
2866 {
2867     struct mbuf *m_frag;
2868     uint32_t frag_len, frag_size, i;
2869     uint16_t sge_idx;
2870     int rc = 0;
2871     int j;
2872 
2873     frag_size = le16toh(cqe->pkt_len) - tpa_info->len_on_bd;
2874 
2875     BLOGD(sc, DBG_LRO,
2876           "fp[%02d].tpa[%02d] TPA fill len_on_bd=%d frag_size=%d pages=%d\n",
2877           fp->index, queue, tpa_info->len_on_bd, frag_size, pages);
2878 
2879     /* make sure the aggregated frame is not too big to handle */
2880     if (pages > 8 * PAGES_PER_SGE) {
2881 
2882         uint32_t *tmp = (uint32_t *)cqe;
2883 
2884         BLOGE(sc, "fp[%02d].sge[0x%04x] has too many pages (%d)! "
2885                   "pkt_len=%d len_on_bd=%d frag_size=%d\n",
2886               fp->index, cqe_idx, pages, le16toh(cqe->pkt_len),
2887               tpa_info->len_on_bd, frag_size);
2888 
2889         BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n",
2890             *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7));
2891 
2892         bxe_panic(sc, ("sge page count error\n"));
2893         return (EINVAL);
2894     }
2895 
2896     /*
2897      * Scan through the scatter gather list pulling individual mbufs into a
2898      * single mbuf for the host stack.
2899      */
2900     for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
2901         sge_idx = RX_SGE(le16toh(cqe->sgl_or_raw_data.sgl[j]));
2902 
2903         /*
2904          * Firmware gives the indices of the SGE as if the ring is an array
2905          * (meaning that the "next" element will consume 2 indices).
2906          */
2907         frag_len = min(frag_size, (uint32_t)(SGE_PAGES));
2908 
2909         BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA fill i=%d j=%d "
2910                            "sge_idx=%d frag_size=%d frag_len=%d\n",
2911               fp->index, queue, i, j, sge_idx, frag_size, frag_len);
2912 
2913         m_frag = fp->rx_sge_mbuf_chain[sge_idx].m;
2914 
2915         /* allocate a new mbuf for the SGE */
2916         rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx);
2917         if (rc) {
2918             /* Leave all remaining SGEs in the ring! */
2919             return (rc);
2920         }
2921 
2922         /* update the fragment length */
2923         m_frag->m_len = frag_len;
2924 
2925         /* concatenate the fragment to the head mbuf */
2926         m_cat(m, m_frag);
2927         fp->eth_q_stats.mbuf_alloc_sge--;
2928 
2929         /* update the TPA mbuf size and remaining fragment size */
2930         m->m_pkthdr.len += frag_len;
2931         frag_size -= frag_len;
2932     }
2933 
2934     BLOGD(sc, DBG_LRO,
2935           "fp[%02d].tpa[%02d] TPA fill done frag_size=%d\n",
2936           fp->index, queue, frag_size);
2937 
2938     return (rc);
2939 }
2940 
2941 static inline void
2942 bxe_clear_sge_mask_next_elems(struct bxe_fastpath *fp)
2943 {
2944     int i, j;
2945 
2946     for (i = 1; i <= RX_SGE_NUM_PAGES; i++) {
2947         int idx = RX_SGE_TOTAL_PER_PAGE * i - 1;
2948 
2949         for (j = 0; j < 2; j++) {
2950             BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx);
2951             idx--;
2952         }
2953     }
2954 }
2955 
2956 static inline void
2957 bxe_init_sge_ring_bit_mask(struct bxe_fastpath *fp)
2958 {
2959     /* set the mask to all 1's, it's faster to compare to 0 than to 0xf's */
2960     memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask));
2961 
2962     /*
2963      * Clear the two last indices in the page to 1. These are the indices that
2964      * correspond to the "next" element, hence will never be indicated and
2965      * should be removed from the calculations.
2966      */
2967     bxe_clear_sge_mask_next_elems(fp);
2968 }
2969 
2970 static inline void
2971 bxe_update_last_max_sge(struct bxe_fastpath *fp,
2972                         uint16_t            idx)
2973 {
2974     uint16_t last_max = fp->last_max_sge;
2975 
2976     if (SUB_S16(idx, last_max) > 0) {
2977         fp->last_max_sge = idx;
2978     }
2979 }
2980 
2981 static inline void
2982 bxe_update_sge_prod(struct bxe_softc          *sc,
2983                     struct bxe_fastpath       *fp,
2984                     uint16_t                  sge_len,
2985                     union eth_sgl_or_raw_data *cqe)
2986 {
2987     uint16_t last_max, last_elem, first_elem;
2988     uint16_t delta = 0;
2989     uint16_t i;
2990 
2991     if (!sge_len) {
2992         return;
2993     }
2994 
2995     /* first mark all used pages */
2996     for (i = 0; i < sge_len; i++) {
2997         BIT_VEC64_CLEAR_BIT(fp->sge_mask,
2998                             RX_SGE(le16toh(cqe->sgl[i])));
2999     }
3000 
3001     BLOGD(sc, DBG_LRO,
3002           "fp[%02d] fp_cqe->sgl[%d] = %d\n",
3003           fp->index, sge_len - 1,
3004           le16toh(cqe->sgl[sge_len - 1]));
3005 
3006     /* assume that the last SGE index is the biggest */
3007     bxe_update_last_max_sge(fp,
3008                             le16toh(cqe->sgl[sge_len - 1]));
3009 
3010     last_max = RX_SGE(fp->last_max_sge);
3011     last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
3012     first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
3013 
3014     /* if ring is not full */
3015     if (last_elem + 1 != first_elem) {
3016         last_elem++;
3017     }
3018 
3019     /* now update the prod */
3020     for (i = first_elem; i != last_elem; i = RX_SGE_NEXT_MASK_ELEM(i)) {
3021         if (__predict_true(fp->sge_mask[i])) {
3022             break;
3023         }
3024 
3025         fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
3026         delta += BIT_VEC64_ELEM_SZ;
3027     }
3028 
3029     if (delta > 0) {
3030         fp->rx_sge_prod += delta;
3031         /* clear page-end entries */
3032         bxe_clear_sge_mask_next_elems(fp);
3033     }
3034 
3035     BLOGD(sc, DBG_LRO,
3036           "fp[%02d] fp->last_max_sge=%d fp->rx_sge_prod=%d\n",
3037           fp->index, fp->last_max_sge, fp->rx_sge_prod);
3038 }
3039 
3040 /*
3041  * The aggregation on the current TPA queue has completed. Pull the individual
3042  * mbuf fragments together into a single mbuf, perform all necessary checksum
3043  * calculations, and send the resuting mbuf to the stack.
3044  */
3045 static void
3046 bxe_tpa_stop(struct bxe_softc          *sc,
3047              struct bxe_fastpath       *fp,
3048              struct bxe_sw_tpa_info    *tpa_info,
3049              uint16_t                  queue,
3050              uint16_t                  pages,
3051 			 struct eth_end_agg_rx_cqe *cqe,
3052              uint16_t                  cqe_idx)
3053 {
3054     if_t ifp = sc->ifp;
3055     struct mbuf *m;
3056     int rc = 0;
3057 
3058     BLOGD(sc, DBG_LRO,
3059           "fp[%02d].tpa[%02d] pad=%d pkt_len=%d pages=%d vlan=%d\n",
3060           fp->index, queue, tpa_info->placement_offset,
3061           le16toh(cqe->pkt_len), pages, tpa_info->vlan_tag);
3062 
3063     m = tpa_info->bd.m;
3064 
3065     /* allocate a replacement before modifying existing mbuf */
3066     rc = bxe_alloc_rx_tpa_mbuf(fp, queue);
3067     if (rc) {
3068         /* drop the frame and log an error */
3069         fp->eth_q_stats.rx_soft_errors++;
3070         goto bxe_tpa_stop_exit;
3071     }
3072 
3073     /* we have a replacement, fixup the current mbuf */
3074     m_adj(m, tpa_info->placement_offset);
3075     m->m_pkthdr.len = m->m_len = tpa_info->len_on_bd;
3076 
3077     /* mark the checksums valid (taken care of by the firmware) */
3078     fp->eth_q_stats.rx_ofld_frames_csum_ip++;
3079     fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++;
3080     m->m_pkthdr.csum_data = 0xffff;
3081     m->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED |
3082                                CSUM_IP_VALID   |
3083                                CSUM_DATA_VALID |
3084                                CSUM_PSEUDO_HDR);
3085 
3086     /* aggregate all of the SGEs into a single mbuf */
3087     rc = bxe_fill_frag_mbuf(sc, fp, tpa_info, queue, pages, m, cqe, cqe_idx);
3088     if (rc) {
3089         /* drop the packet and log an error */
3090         fp->eth_q_stats.rx_soft_errors++;
3091         m_freem(m);
3092     } else {
3093         if (tpa_info->parsing_flags & PARSING_FLAGS_INNER_VLAN_EXIST) {
3094             m->m_pkthdr.ether_vtag = tpa_info->vlan_tag;
3095             m->m_flags |= M_VLANTAG;
3096         }
3097 
3098         /* assign packet to this interface interface */
3099         if_setrcvif(m, ifp);
3100 
3101         /* specify what RSS queue was used for this flow */
3102         m->m_pkthdr.flowid = fp->index;
3103         BXE_SET_FLOWID(m);
3104 
3105         if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
3106         fp->eth_q_stats.rx_tpa_pkts++;
3107 
3108         /* pass the frame to the stack */
3109         if_input(ifp, m);
3110     }
3111 
3112     /* we passed an mbuf up the stack or dropped the frame */
3113     fp->eth_q_stats.mbuf_alloc_tpa--;
3114 
3115 bxe_tpa_stop_exit:
3116 
3117     fp->rx_tpa_info[queue].state = BXE_TPA_STATE_STOP;
3118     fp->rx_tpa_queue_used &= ~(1 << queue);
3119 }
3120 
3121 static uint8_t
3122 bxe_service_rxsgl(
3123                  struct bxe_fastpath *fp,
3124                  uint16_t len,
3125                  uint16_t lenonbd,
3126                  struct mbuf *m,
3127                  struct eth_fast_path_rx_cqe *cqe_fp)
3128 {
3129     struct mbuf *m_frag;
3130     uint16_t frags, frag_len;
3131     uint16_t sge_idx = 0;
3132     uint16_t j;
3133     uint8_t i, rc = 0;
3134     uint32_t frag_size;
3135 
3136     /* adjust the mbuf */
3137     m->m_len = lenonbd;
3138 
3139     frag_size =  len - lenonbd;
3140     frags = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
3141 
3142     for (i = 0, j = 0; i < frags; i += PAGES_PER_SGE, j++) {
3143         sge_idx = RX_SGE(le16toh(cqe_fp->sgl_or_raw_data.sgl[j]));
3144 
3145         m_frag = fp->rx_sge_mbuf_chain[sge_idx].m;
3146         frag_len = min(frag_size, (uint32_t)(SGE_PAGE_SIZE));
3147         m_frag->m_len = frag_len;
3148 
3149        /* allocate a new mbuf for the SGE */
3150         rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx);
3151         if (rc) {
3152             /* Leave all remaining SGEs in the ring! */
3153             return (rc);
3154         }
3155         fp->eth_q_stats.mbuf_alloc_sge--;
3156 
3157         /* concatenate the fragment to the head mbuf */
3158         m_cat(m, m_frag);
3159 
3160         frag_size -= frag_len;
3161     }
3162 
3163     bxe_update_sge_prod(fp->sc, fp, frags, &cqe_fp->sgl_or_raw_data);
3164 
3165     return rc;
3166 }
3167 
3168 static uint8_t
3169 bxe_rxeof(struct bxe_softc    *sc,
3170           struct bxe_fastpath *fp)
3171 {
3172     if_t ifp = sc->ifp;
3173     uint16_t bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
3174     uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod;
3175     int rx_pkts = 0;
3176     int rc = 0;
3177 
3178     BXE_FP_RX_LOCK(fp);
3179 
3180     /* CQ "next element" is of the size of the regular element */
3181     hw_cq_cons = le16toh(*fp->rx_cq_cons_sb);
3182     if ((hw_cq_cons & RCQ_USABLE_PER_PAGE) == RCQ_USABLE_PER_PAGE) {
3183         hw_cq_cons++;
3184     }
3185 
3186     bd_cons = fp->rx_bd_cons;
3187     bd_prod = fp->rx_bd_prod;
3188     bd_prod_fw = bd_prod;
3189     sw_cq_cons = fp->rx_cq_cons;
3190     sw_cq_prod = fp->rx_cq_prod;
3191 
3192     /*
3193      * Memory barrier necessary as speculative reads of the rx
3194      * buffer can be ahead of the index in the status block
3195      */
3196     rmb();
3197 
3198     BLOGD(sc, DBG_RX,
3199           "fp[%02d] Rx START hw_cq_cons=%u sw_cq_cons=%u\n",
3200           fp->index, hw_cq_cons, sw_cq_cons);
3201 
3202     while (sw_cq_cons != hw_cq_cons) {
3203         struct bxe_sw_rx_bd *rx_buf = NULL;
3204         union eth_rx_cqe *cqe;
3205         struct eth_fast_path_rx_cqe *cqe_fp;
3206         uint8_t cqe_fp_flags;
3207         enum eth_rx_cqe_type cqe_fp_type;
3208         uint16_t len, lenonbd,  pad;
3209         struct mbuf *m = NULL;
3210 
3211         comp_ring_cons = RCQ(sw_cq_cons);
3212         bd_prod = RX_BD(bd_prod);
3213         bd_cons = RX_BD(bd_cons);
3214 
3215         cqe          = &fp->rcq_chain[comp_ring_cons];
3216         cqe_fp       = &cqe->fast_path_cqe;
3217         cqe_fp_flags = cqe_fp->type_error_flags;
3218         cqe_fp_type  = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
3219 
3220         BLOGD(sc, DBG_RX,
3221               "fp[%02d] Rx hw_cq_cons=%d hw_sw_cons=%d "
3222               "BD prod=%d cons=%d CQE type=0x%x err=0x%x "
3223               "status=0x%x rss_hash=0x%x vlan=0x%x len=%u lenonbd=%u\n",
3224               fp->index,
3225               hw_cq_cons,
3226               sw_cq_cons,
3227               bd_prod,
3228               bd_cons,
3229               CQE_TYPE(cqe_fp_flags),
3230               cqe_fp_flags,
3231               cqe_fp->status_flags,
3232               le32toh(cqe_fp->rss_hash_result),
3233               le16toh(cqe_fp->vlan_tag),
3234               le16toh(cqe_fp->pkt_len_or_gro_seg_len),
3235               le16toh(cqe_fp->len_on_bd));
3236 
3237         /* is this a slowpath msg? */
3238         if (__predict_false(CQE_TYPE_SLOW(cqe_fp_type))) {
3239             bxe_sp_event(sc, fp, cqe);
3240             goto next_cqe;
3241         }
3242 
3243         rx_buf = &fp->rx_mbuf_chain[bd_cons];
3244 
3245         if (!CQE_TYPE_FAST(cqe_fp_type)) {
3246             struct bxe_sw_tpa_info *tpa_info;
3247             uint16_t frag_size, pages;
3248             uint8_t queue;
3249 
3250             if (CQE_TYPE_START(cqe_fp_type)) {
3251                 bxe_tpa_start(sc, fp, cqe_fp->queue_index,
3252                               bd_cons, bd_prod, cqe_fp);
3253                 m = NULL; /* packet not ready yet */
3254                 goto next_rx;
3255             }
3256 
3257             KASSERT(CQE_TYPE_STOP(cqe_fp_type),
3258                     ("CQE type is not STOP! (0x%x)\n", cqe_fp_type));
3259 
3260             queue = cqe->end_agg_cqe.queue_index;
3261             tpa_info = &fp->rx_tpa_info[queue];
3262 
3263             BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA STOP\n",
3264                   fp->index, queue);
3265 
3266             frag_size = (le16toh(cqe->end_agg_cqe.pkt_len) -
3267                          tpa_info->len_on_bd);
3268             pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
3269 
3270             bxe_tpa_stop(sc, fp, tpa_info, queue, pages,
3271                          &cqe->end_agg_cqe, comp_ring_cons);
3272 
3273             bxe_update_sge_prod(sc, fp, pages, &cqe->end_agg_cqe.sgl_or_raw_data);
3274 
3275             goto next_cqe;
3276         }
3277 
3278         /* non TPA */
3279 
3280         /* is this an error packet? */
3281         if (__predict_false(cqe_fp_flags &
3282                             ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) {
3283             BLOGE(sc, "flags 0x%x rx packet %u\n", cqe_fp_flags, sw_cq_cons);
3284             fp->eth_q_stats.rx_soft_errors++;
3285             goto next_rx;
3286         }
3287 
3288         len = le16toh(cqe_fp->pkt_len_or_gro_seg_len);
3289         lenonbd = le16toh(cqe_fp->len_on_bd);
3290         pad = cqe_fp->placement_offset;
3291 
3292         m = rx_buf->m;
3293 
3294         if (__predict_false(m == NULL)) {
3295             BLOGE(sc, "No mbuf in rx chain descriptor %d for fp[%02d]\n",
3296                   bd_cons, fp->index);
3297             goto next_rx;
3298         }
3299 
3300         /* XXX double copy if packet length under a threshold */
3301 
3302         /*
3303          * If all the buffer descriptors are filled with mbufs then fill in
3304          * the current consumer index with a new BD. Else if a maximum Rx
3305          * buffer limit is imposed then fill in the next producer index.
3306          */
3307         rc = bxe_alloc_rx_bd_mbuf(fp, bd_cons,
3308                                   (sc->max_rx_bufs != RX_BD_USABLE) ?
3309                                       bd_prod : bd_cons);
3310         if (rc != 0) {
3311 
3312             /* we simply reuse the received mbuf and don't post it to the stack */
3313             m = NULL;
3314 
3315             BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n",
3316                   fp->index, rc);
3317             fp->eth_q_stats.rx_soft_errors++;
3318 
3319             if (sc->max_rx_bufs != RX_BD_USABLE) {
3320                 /* copy this consumer index to the producer index */
3321                 memcpy(&fp->rx_mbuf_chain[bd_prod], rx_buf,
3322                        sizeof(struct bxe_sw_rx_bd));
3323                 memset(rx_buf, 0, sizeof(struct bxe_sw_rx_bd));
3324             }
3325 
3326             goto next_rx;
3327         }
3328 
3329         /* current mbuf was detached from the bd */
3330         fp->eth_q_stats.mbuf_alloc_rx--;
3331 
3332         /* we allocated a replacement mbuf, fixup the current one */
3333         m_adj(m, pad);
3334         m->m_pkthdr.len = m->m_len = len;
3335 
3336         if ((len > 60) && (len > lenonbd)) {
3337             fp->eth_q_stats.rx_bxe_service_rxsgl++;
3338             rc = bxe_service_rxsgl(fp, len, lenonbd, m, cqe_fp);
3339             if (rc)
3340                 break;
3341             fp->eth_q_stats.rx_jumbo_sge_pkts++;
3342         } else if (lenonbd < len) {
3343             fp->eth_q_stats.rx_erroneous_jumbo_sge_pkts++;
3344         }
3345 
3346         /* assign packet to this interface interface */
3347 	if_setrcvif(m, ifp);
3348 
3349         /* assume no hardware checksum has complated */
3350         m->m_pkthdr.csum_flags = 0;
3351 
3352         /* validate checksum if offload enabled */
3353         if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
3354             /* check for a valid IP frame */
3355             if (!(cqe->fast_path_cqe.status_flags &
3356                   ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG)) {
3357                 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3358                 if (__predict_false(cqe_fp_flags &
3359                                     ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) {
3360                     fp->eth_q_stats.rx_hw_csum_errors++;
3361                 } else {
3362                     fp->eth_q_stats.rx_ofld_frames_csum_ip++;
3363                     m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3364                 }
3365             }
3366 
3367             /* check for a valid TCP/UDP frame */
3368             if (!(cqe->fast_path_cqe.status_flags &
3369                   ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)) {
3370                 if (__predict_false(cqe_fp_flags &
3371                                     ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) {
3372                     fp->eth_q_stats.rx_hw_csum_errors++;
3373                 } else {
3374                     fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++;
3375                     m->m_pkthdr.csum_data = 0xFFFF;
3376                     m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID |
3377                                                CSUM_PSEUDO_HDR);
3378                 }
3379             }
3380         }
3381 
3382         /* if there is a VLAN tag then flag that info */
3383         if (cqe->fast_path_cqe.pars_flags.flags & PARSING_FLAGS_INNER_VLAN_EXIST) {
3384             m->m_pkthdr.ether_vtag = cqe->fast_path_cqe.vlan_tag;
3385             m->m_flags |= M_VLANTAG;
3386         }
3387 
3388         /* specify what RSS queue was used for this flow */
3389         m->m_pkthdr.flowid = fp->index;
3390         BXE_SET_FLOWID(m);
3391 
3392 next_rx:
3393 
3394         bd_cons    = RX_BD_NEXT(bd_cons);
3395         bd_prod    = RX_BD_NEXT(bd_prod);
3396         bd_prod_fw = RX_BD_NEXT(bd_prod_fw);
3397 
3398         /* pass the frame to the stack */
3399         if (__predict_true(m != NULL)) {
3400             if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
3401             rx_pkts++;
3402             if_input(ifp, m);
3403         }
3404 
3405 next_cqe:
3406 
3407         sw_cq_prod = RCQ_NEXT(sw_cq_prod);
3408         sw_cq_cons = RCQ_NEXT(sw_cq_cons);
3409 
3410         /* limit spinning on the queue */
3411         if (rc != 0)
3412             break;
3413 
3414         if (rx_pkts == sc->rx_budget) {
3415             fp->eth_q_stats.rx_budget_reached++;
3416             break;
3417         }
3418     } /* while work to do */
3419 
3420     fp->rx_bd_cons = bd_cons;
3421     fp->rx_bd_prod = bd_prod_fw;
3422     fp->rx_cq_cons = sw_cq_cons;
3423     fp->rx_cq_prod = sw_cq_prod;
3424 
3425     /* Update producers */
3426     bxe_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod, fp->rx_sge_prod);
3427 
3428     fp->eth_q_stats.rx_pkts += rx_pkts;
3429     fp->eth_q_stats.rx_calls++;
3430 
3431     BXE_FP_RX_UNLOCK(fp);
3432 
3433     return (sw_cq_cons != hw_cq_cons);
3434 }
3435 
3436 static uint16_t
3437 bxe_free_tx_pkt(struct bxe_softc    *sc,
3438                 struct bxe_fastpath *fp,
3439                 uint16_t            idx)
3440 {
3441     struct bxe_sw_tx_bd *tx_buf = &fp->tx_mbuf_chain[idx];
3442     struct eth_tx_start_bd *tx_start_bd;
3443     uint16_t bd_idx = TX_BD(tx_buf->first_bd);
3444     uint16_t new_cons;
3445     int nbd;
3446 
3447     /* unmap the mbuf from non-paged memory */
3448     bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
3449 
3450     tx_start_bd = &fp->tx_chain[bd_idx].start_bd;
3451     nbd = le16toh(tx_start_bd->nbd) - 1;
3452 
3453     new_cons = (tx_buf->first_bd + nbd);
3454 
3455     /* free the mbuf */
3456     if (__predict_true(tx_buf->m != NULL)) {
3457         m_freem(tx_buf->m);
3458         fp->eth_q_stats.mbuf_alloc_tx--;
3459     } else {
3460         fp->eth_q_stats.tx_chain_lost_mbuf++;
3461     }
3462 
3463     tx_buf->m = NULL;
3464     tx_buf->first_bd = 0;
3465 
3466     return (new_cons);
3467 }
3468 
3469 /* transmit timeout watchdog */
3470 static int
3471 bxe_watchdog(struct bxe_softc    *sc,
3472              struct bxe_fastpath *fp)
3473 {
3474     BXE_FP_TX_LOCK(fp);
3475 
3476     if ((fp->watchdog_timer == 0) || (--fp->watchdog_timer)) {
3477         BXE_FP_TX_UNLOCK(fp);
3478         return (0);
3479     }
3480 
3481     BLOGE(sc, "TX watchdog timeout on fp[%02d], resetting!\n", fp->index);
3482 
3483     BXE_FP_TX_UNLOCK(fp);
3484     BXE_SET_ERROR_BIT(sc, BXE_ERR_TXQ_STUCK);
3485     taskqueue_enqueue_timeout(taskqueue_thread,
3486         &sc->sp_err_timeout_task, hz/10);
3487 
3488     return (-1);
3489 }
3490 
3491 /* processes transmit completions */
3492 static uint8_t
3493 bxe_txeof(struct bxe_softc    *sc,
3494           struct bxe_fastpath *fp)
3495 {
3496     if_t ifp = sc->ifp;
3497     uint16_t bd_cons, hw_cons, sw_cons, pkt_cons;
3498     uint16_t tx_bd_avail;
3499 
3500     BXE_FP_TX_LOCK_ASSERT(fp);
3501 
3502     bd_cons = fp->tx_bd_cons;
3503     hw_cons = le16toh(*fp->tx_cons_sb);
3504     sw_cons = fp->tx_pkt_cons;
3505 
3506     while (sw_cons != hw_cons) {
3507         pkt_cons = TX_BD(sw_cons);
3508 
3509         BLOGD(sc, DBG_TX,
3510               "TX: fp[%d]: hw_cons=%u sw_cons=%u pkt_cons=%u\n",
3511               fp->index, hw_cons, sw_cons, pkt_cons);
3512 
3513         bd_cons = bxe_free_tx_pkt(sc, fp, pkt_cons);
3514 
3515         sw_cons++;
3516     }
3517 
3518     fp->tx_pkt_cons = sw_cons;
3519     fp->tx_bd_cons  = bd_cons;
3520 
3521     BLOGD(sc, DBG_TX,
3522           "TX done: fp[%d]: hw_cons=%u sw_cons=%u sw_prod=%u\n",
3523           fp->index, hw_cons, fp->tx_pkt_cons, fp->tx_pkt_prod);
3524 
3525     mb();
3526 
3527     tx_bd_avail = bxe_tx_avail(sc, fp);
3528 
3529     if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
3530         if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
3531     } else {
3532         if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
3533     }
3534 
3535     if (fp->tx_pkt_prod != fp->tx_pkt_cons) {
3536         /* reset the watchdog timer if there are pending transmits */
3537         fp->watchdog_timer = BXE_TX_TIMEOUT;
3538         return (TRUE);
3539     } else {
3540         /* clear watchdog when there are no pending transmits */
3541         fp->watchdog_timer = 0;
3542         return (FALSE);
3543     }
3544 }
3545 
3546 static void
3547 bxe_drain_tx_queues(struct bxe_softc *sc)
3548 {
3549     struct bxe_fastpath *fp;
3550     int i, count;
3551 
3552     /* wait until all TX fastpath tasks have completed */
3553     for (i = 0; i < sc->num_queues; i++) {
3554         fp = &sc->fp[i];
3555 
3556         count = 1000;
3557 
3558         while (bxe_has_tx_work(fp)) {
3559 
3560             BXE_FP_TX_LOCK(fp);
3561             bxe_txeof(sc, fp);
3562             BXE_FP_TX_UNLOCK(fp);
3563 
3564             if (count == 0) {
3565                 BLOGE(sc, "Timeout waiting for fp[%d] "
3566                           "transmits to complete!\n", i);
3567                 bxe_panic(sc, ("tx drain failure\n"));
3568                 return;
3569             }
3570 
3571             count--;
3572             DELAY(1000);
3573             rmb();
3574         }
3575     }
3576 
3577     return;
3578 }
3579 
3580 static int
3581 bxe_del_all_macs(struct bxe_softc          *sc,
3582                  struct ecore_vlan_mac_obj *mac_obj,
3583                  int                       mac_type,
3584                  uint8_t                   wait_for_comp)
3585 {
3586     unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
3587     int rc;
3588 
3589     /* wait for completion of requested */
3590     if (wait_for_comp) {
3591         bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3592     }
3593 
3594     /* Set the mac type of addresses we want to clear */
3595     bxe_set_bit(mac_type, &vlan_mac_flags);
3596 
3597     rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags);
3598     if (rc < 0) {
3599         BLOGE(sc, "Failed to delete MACs (%d) mac_type %d wait_for_comp 0x%x\n",
3600             rc, mac_type, wait_for_comp);
3601     }
3602 
3603     return (rc);
3604 }
3605 
3606 static int
3607 bxe_fill_accept_flags(struct bxe_softc *sc,
3608                       uint32_t         rx_mode,
3609                       unsigned long    *rx_accept_flags,
3610                       unsigned long    *tx_accept_flags)
3611 {
3612     /* Clear the flags first */
3613     *rx_accept_flags = 0;
3614     *tx_accept_flags = 0;
3615 
3616     switch (rx_mode) {
3617     case BXE_RX_MODE_NONE:
3618         /*
3619          * 'drop all' supersedes any accept flags that may have been
3620          * passed to the function.
3621          */
3622         break;
3623 
3624     case BXE_RX_MODE_NORMAL:
3625         bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3626         bxe_set_bit(ECORE_ACCEPT_MULTICAST, rx_accept_flags);
3627         bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3628 
3629         /* internal switching mode */
3630         bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3631         bxe_set_bit(ECORE_ACCEPT_MULTICAST, tx_accept_flags);
3632         bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3633 
3634         break;
3635 
3636     case BXE_RX_MODE_ALLMULTI:
3637         bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3638         bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags);
3639         bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3640 
3641         /* internal switching mode */
3642         bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3643         bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags);
3644         bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3645 
3646         break;
3647 
3648     case BXE_RX_MODE_PROMISC:
3649         /*
3650          * According to deffinition of SI mode, iface in promisc mode
3651          * should receive matched and unmatched (in resolution of port)
3652          * unicast packets.
3653          */
3654         bxe_set_bit(ECORE_ACCEPT_UNMATCHED, rx_accept_flags);
3655         bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3656         bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags);
3657         bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3658 
3659         /* internal switching mode */
3660         bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags);
3661         bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3662 
3663         if (IS_MF_SI(sc)) {
3664             bxe_set_bit(ECORE_ACCEPT_ALL_UNICAST, tx_accept_flags);
3665         } else {
3666             bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3667         }
3668 
3669         break;
3670 
3671     default:
3672         BLOGE(sc, "Unknown rx_mode (0x%x)\n", rx_mode);
3673         return (-1);
3674     }
3675 
3676     /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
3677     if (rx_mode != BXE_RX_MODE_NONE) {
3678         bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, rx_accept_flags);
3679         bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, tx_accept_flags);
3680     }
3681 
3682     return (0);
3683 }
3684 
3685 static int
3686 bxe_set_q_rx_mode(struct bxe_softc *sc,
3687                   uint8_t          cl_id,
3688                   unsigned long    rx_mode_flags,
3689                   unsigned long    rx_accept_flags,
3690                   unsigned long    tx_accept_flags,
3691                   unsigned long    ramrod_flags)
3692 {
3693     struct ecore_rx_mode_ramrod_params ramrod_param;
3694     int rc;
3695 
3696     memset(&ramrod_param, 0, sizeof(ramrod_param));
3697 
3698     /* Prepare ramrod parameters */
3699     ramrod_param.cid = 0;
3700     ramrod_param.cl_id = cl_id;
3701     ramrod_param.rx_mode_obj = &sc->rx_mode_obj;
3702     ramrod_param.func_id = SC_FUNC(sc);
3703 
3704     ramrod_param.pstate = &sc->sp_state;
3705     ramrod_param.state = ECORE_FILTER_RX_MODE_PENDING;
3706 
3707     ramrod_param.rdata = BXE_SP(sc, rx_mode_rdata);
3708     ramrod_param.rdata_mapping = BXE_SP_MAPPING(sc, rx_mode_rdata);
3709 
3710     bxe_set_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
3711 
3712     ramrod_param.ramrod_flags = ramrod_flags;
3713     ramrod_param.rx_mode_flags = rx_mode_flags;
3714 
3715     ramrod_param.rx_accept_flags = rx_accept_flags;
3716     ramrod_param.tx_accept_flags = tx_accept_flags;
3717 
3718     rc = ecore_config_rx_mode(sc, &ramrod_param);
3719     if (rc < 0) {
3720         BLOGE(sc, "Set rx_mode %d cli_id 0x%x rx_mode_flags 0x%x "
3721             "rx_accept_flags 0x%x tx_accept_flags 0x%x "
3722             "ramrod_flags 0x%x rc %d failed\n", sc->rx_mode, cl_id,
3723             (uint32_t)rx_mode_flags, (uint32_t)rx_accept_flags,
3724             (uint32_t)tx_accept_flags, (uint32_t)ramrod_flags, rc);
3725         return (rc);
3726     }
3727 
3728     return (0);
3729 }
3730 
3731 static int
3732 bxe_set_storm_rx_mode(struct bxe_softc *sc)
3733 {
3734     unsigned long rx_mode_flags = 0, ramrod_flags = 0;
3735     unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
3736     int rc;
3737 
3738     rc = bxe_fill_accept_flags(sc, sc->rx_mode, &rx_accept_flags,
3739                                &tx_accept_flags);
3740     if (rc) {
3741         return (rc);
3742     }
3743 
3744     bxe_set_bit(RAMROD_RX, &ramrod_flags);
3745     bxe_set_bit(RAMROD_TX, &ramrod_flags);
3746 
3747     /* XXX ensure all fastpath have same cl_id and/or move it to bxe_softc */
3748     return (bxe_set_q_rx_mode(sc, sc->fp[0].cl_id, rx_mode_flags,
3749                               rx_accept_flags, tx_accept_flags,
3750                               ramrod_flags));
3751 }
3752 
3753 /* returns the "mcp load_code" according to global load_count array */
3754 static int
3755 bxe_nic_load_no_mcp(struct bxe_softc *sc)
3756 {
3757     int path = SC_PATH(sc);
3758     int port = SC_PORT(sc);
3759 
3760     BLOGI(sc, "NO MCP - load counts[%d]      %d, %d, %d\n",
3761           path, load_count[path][0], load_count[path][1],
3762           load_count[path][2]);
3763     load_count[path][0]++;
3764     load_count[path][1 + port]++;
3765     BLOGI(sc, "NO MCP - new load counts[%d]  %d, %d, %d\n",
3766           path, load_count[path][0], load_count[path][1],
3767           load_count[path][2]);
3768     if (load_count[path][0] == 1) {
3769         return (FW_MSG_CODE_DRV_LOAD_COMMON);
3770     } else if (load_count[path][1 + port] == 1) {
3771         return (FW_MSG_CODE_DRV_LOAD_PORT);
3772     } else {
3773         return (FW_MSG_CODE_DRV_LOAD_FUNCTION);
3774     }
3775 }
3776 
3777 /* returns the "mcp load_code" according to global load_count array */
3778 static int
3779 bxe_nic_unload_no_mcp(struct bxe_softc *sc)
3780 {
3781     int port = SC_PORT(sc);
3782     int path = SC_PATH(sc);
3783 
3784     BLOGI(sc, "NO MCP - load counts[%d]      %d, %d, %d\n",
3785           path, load_count[path][0], load_count[path][1],
3786           load_count[path][2]);
3787     load_count[path][0]--;
3788     load_count[path][1 + port]--;
3789     BLOGI(sc, "NO MCP - new load counts[%d]  %d, %d, %d\n",
3790           path, load_count[path][0], load_count[path][1],
3791           load_count[path][2]);
3792     if (load_count[path][0] == 0) {
3793         return (FW_MSG_CODE_DRV_UNLOAD_COMMON);
3794     } else if (load_count[path][1 + port] == 0) {
3795         return (FW_MSG_CODE_DRV_UNLOAD_PORT);
3796     } else {
3797         return (FW_MSG_CODE_DRV_UNLOAD_FUNCTION);
3798     }
3799 }
3800 
3801 /* request unload mode from the MCP: COMMON, PORT or FUNCTION */
3802 static uint32_t
3803 bxe_send_unload_req(struct bxe_softc *sc,
3804                     int              unload_mode)
3805 {
3806     uint32_t reset_code = 0;
3807 
3808     /* Select the UNLOAD request mode */
3809     if (unload_mode == UNLOAD_NORMAL) {
3810         reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
3811     } else {
3812         reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
3813     }
3814 
3815     /* Send the request to the MCP */
3816     if (!BXE_NOMCP(sc)) {
3817         reset_code = bxe_fw_command(sc, reset_code, 0);
3818     } else {
3819         reset_code = bxe_nic_unload_no_mcp(sc);
3820     }
3821 
3822     return (reset_code);
3823 }
3824 
3825 /* send UNLOAD_DONE command to the MCP */
3826 static void
3827 bxe_send_unload_done(struct bxe_softc *sc,
3828                      uint8_t          keep_link)
3829 {
3830     uint32_t reset_param =
3831         keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
3832 
3833     /* Report UNLOAD_DONE to MCP */
3834     if (!BXE_NOMCP(sc)) {
3835         bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
3836     }
3837 }
3838 
3839 static int
3840 bxe_func_wait_started(struct bxe_softc *sc)
3841 {
3842     int tout = 50;
3843 
3844     if (!sc->port.pmf) {
3845         return (0);
3846     }
3847 
3848     /*
3849      * (assumption: No Attention from MCP at this stage)
3850      * PMF probably in the middle of TX disable/enable transaction
3851      * 1. Sync IRS for default SB
3852      * 2. Sync SP queue - this guarantees us that attention handling started
3853      * 3. Wait, that TX disable/enable transaction completes
3854      *
3855      * 1+2 guarantee that if DCBX attention was scheduled it already changed
3856      * pending bit of transaction from STARTED-->TX_STOPPED, if we already
3857      * received completion for the transaction the state is TX_STOPPED.
3858      * State will return to STARTED after completion of TX_STOPPED-->STARTED
3859      * transaction.
3860      */
3861 
3862     /* XXX make sure default SB ISR is done */
3863     /* need a way to synchronize an irq (intr_mtx?) */
3864 
3865     /* XXX flush any work queues */
3866 
3867     while (ecore_func_get_state(sc, &sc->func_obj) !=
3868            ECORE_F_STATE_STARTED && tout--) {
3869         DELAY(20000);
3870     }
3871 
3872     if (ecore_func_get_state(sc, &sc->func_obj) != ECORE_F_STATE_STARTED) {
3873         /*
3874          * Failed to complete the transaction in a "good way"
3875          * Force both transactions with CLR bit.
3876          */
3877         struct ecore_func_state_params func_params = { NULL };
3878 
3879         BLOGE(sc, "Unexpected function state! "
3880                   "Forcing STARTED-->TX_STOPPED-->STARTED\n");
3881 
3882         func_params.f_obj = &sc->func_obj;
3883         bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
3884 
3885         /* STARTED-->TX_STOPPED */
3886         func_params.cmd = ECORE_F_CMD_TX_STOP;
3887         ecore_func_state_change(sc, &func_params);
3888 
3889         /* TX_STOPPED-->STARTED */
3890         func_params.cmd = ECORE_F_CMD_TX_START;
3891         return (ecore_func_state_change(sc, &func_params));
3892     }
3893 
3894     return (0);
3895 }
3896 
3897 static int
3898 bxe_stop_queue(struct bxe_softc *sc,
3899                int              index)
3900 {
3901     struct bxe_fastpath *fp = &sc->fp[index];
3902     struct ecore_queue_state_params q_params = { NULL };
3903     int rc;
3904 
3905     BLOGD(sc, DBG_LOAD, "stopping queue %d cid %d\n", index, fp->index);
3906 
3907     q_params.q_obj = &sc->sp_objs[fp->index].q_obj;
3908     /* We want to wait for completion in this context */
3909     bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
3910 
3911     /* Stop the primary connection: */
3912 
3913     /* ...halt the connection */
3914     q_params.cmd = ECORE_Q_CMD_HALT;
3915     rc = ecore_queue_state_change(sc, &q_params);
3916     if (rc) {
3917         return (rc);
3918     }
3919 
3920     /* ...terminate the connection */
3921     q_params.cmd = ECORE_Q_CMD_TERMINATE;
3922     memset(&q_params.params.terminate, 0, sizeof(q_params.params.terminate));
3923     q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
3924     rc = ecore_queue_state_change(sc, &q_params);
3925     if (rc) {
3926         return (rc);
3927     }
3928 
3929     /* ...delete cfc entry */
3930     q_params.cmd = ECORE_Q_CMD_CFC_DEL;
3931     memset(&q_params.params.cfc_del, 0, sizeof(q_params.params.cfc_del));
3932     q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
3933     return (ecore_queue_state_change(sc, &q_params));
3934 }
3935 
3936 /* wait for the outstanding SP commands */
3937 static inline uint8_t
3938 bxe_wait_sp_comp(struct bxe_softc *sc,
3939                  unsigned long    mask)
3940 {
3941     unsigned long tmp;
3942     int tout = 5000; /* wait for 5 secs tops */
3943 
3944     while (tout--) {
3945         mb();
3946         if (!(atomic_load_acq_long(&sc->sp_state) & mask)) {
3947             return (TRUE);
3948         }
3949 
3950         DELAY(1000);
3951     }
3952 
3953     mb();
3954 
3955     tmp = atomic_load_acq_long(&sc->sp_state);
3956     if (tmp & mask) {
3957         BLOGE(sc, "Filtering completion timed out: "
3958                   "sp_state 0x%lx, mask 0x%lx\n",
3959               tmp, mask);
3960         return (FALSE);
3961     }
3962 
3963     return (FALSE);
3964 }
3965 
3966 static int
3967 bxe_func_stop(struct bxe_softc *sc)
3968 {
3969     struct ecore_func_state_params func_params = { NULL };
3970     int rc;
3971 
3972     /* prepare parameters for function state transitions */
3973     bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
3974     func_params.f_obj = &sc->func_obj;
3975     func_params.cmd = ECORE_F_CMD_STOP;
3976 
3977     /*
3978      * Try to stop the function the 'good way'. If it fails (in case
3979      * of a parity error during bxe_chip_cleanup()) and we are
3980      * not in a debug mode, perform a state transaction in order to
3981      * enable further HW_RESET transaction.
3982      */
3983     rc = ecore_func_state_change(sc, &func_params);
3984     if (rc) {
3985         BLOGE(sc, "FUNC_STOP ramrod failed. "
3986                   "Running a dry transaction (%d)\n", rc);
3987         bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
3988         return (ecore_func_state_change(sc, &func_params));
3989     }
3990 
3991     return (0);
3992 }
3993 
3994 static int
3995 bxe_reset_hw(struct bxe_softc *sc,
3996              uint32_t         load_code)
3997 {
3998     struct ecore_func_state_params func_params = { NULL };
3999 
4000     /* Prepare parameters for function state transitions */
4001     bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
4002 
4003     func_params.f_obj = &sc->func_obj;
4004     func_params.cmd = ECORE_F_CMD_HW_RESET;
4005 
4006     func_params.params.hw_init.load_phase = load_code;
4007 
4008     return (ecore_func_state_change(sc, &func_params));
4009 }
4010 
4011 static void
4012 bxe_int_disable_sync(struct bxe_softc *sc,
4013                      int              disable_hw)
4014 {
4015     if (disable_hw) {
4016         /* prevent the HW from sending interrupts */
4017         bxe_int_disable(sc);
4018     }
4019 
4020     /* XXX need a way to synchronize ALL irqs (intr_mtx?) */
4021     /* make sure all ISRs are done */
4022 
4023     /* XXX make sure sp_task is not running */
4024     /* cancel and flush work queues */
4025 }
4026 
4027 static void
4028 bxe_chip_cleanup(struct bxe_softc *sc,
4029                  uint32_t         unload_mode,
4030                  uint8_t          keep_link)
4031 {
4032     int port = SC_PORT(sc);
4033     struct ecore_mcast_ramrod_params rparam = { NULL };
4034     uint32_t reset_code;
4035     int i, rc = 0;
4036 
4037     bxe_drain_tx_queues(sc);
4038 
4039     /* give HW time to discard old tx messages */
4040     DELAY(1000);
4041 
4042     /* Clean all ETH MACs */
4043     rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_ETH_MAC, FALSE);
4044     if (rc < 0) {
4045         BLOGE(sc, "Failed to delete all ETH MACs (%d)\n", rc);
4046     }
4047 
4048     /* Clean up UC list  */
4049     rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_UC_LIST_MAC, TRUE);
4050     if (rc < 0) {
4051         BLOGE(sc, "Failed to delete UC MACs list (%d)\n", rc);
4052     }
4053 
4054     /* Disable LLH */
4055     if (!CHIP_IS_E1(sc)) {
4056         REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0);
4057     }
4058 
4059     /* Set "drop all" to stop Rx */
4060 
4061     /*
4062      * We need to take the BXE_MCAST_LOCK() here in order to prevent
4063      * a race between the completion code and this code.
4064      */
4065     BXE_MCAST_LOCK(sc);
4066 
4067     if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) {
4068         bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state);
4069     } else {
4070         bxe_set_storm_rx_mode(sc);
4071     }
4072 
4073     /* Clean up multicast configuration */
4074     rparam.mcast_obj = &sc->mcast_obj;
4075     rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
4076     if (rc < 0) {
4077         BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc);
4078     }
4079 
4080     BXE_MCAST_UNLOCK(sc);
4081 
4082     // XXX bxe_iov_chip_cleanup(sc);
4083 
4084     /*
4085      * Send the UNLOAD_REQUEST to the MCP. This will return if
4086      * this function should perform FUNCTION, PORT, or COMMON HW
4087      * reset.
4088      */
4089     reset_code = bxe_send_unload_req(sc, unload_mode);
4090 
4091     /*
4092      * (assumption: No Attention from MCP at this stage)
4093      * PMF probably in the middle of TX disable/enable transaction
4094      */
4095     rc = bxe_func_wait_started(sc);
4096     if (rc) {
4097         BLOGE(sc, "bxe_func_wait_started failed (%d)\n", rc);
4098     }
4099 
4100     /*
4101      * Close multi and leading connections
4102      * Completions for ramrods are collected in a synchronous way
4103      */
4104     for (i = 0; i < sc->num_queues; i++) {
4105         if (bxe_stop_queue(sc, i)) {
4106             goto unload_error;
4107         }
4108     }
4109 
4110     /*
4111      * If SP settings didn't get completed so far - something
4112      * very wrong has happen.
4113      */
4114     if (!bxe_wait_sp_comp(sc, ~0x0UL)) {
4115         BLOGE(sc, "Common slow path ramrods got stuck!(%d)\n", rc);
4116     }
4117 
4118 unload_error:
4119 
4120     rc = bxe_func_stop(sc);
4121     if (rc) {
4122         BLOGE(sc, "Function stop failed!(%d)\n", rc);
4123     }
4124 
4125     /* disable HW interrupts */
4126     bxe_int_disable_sync(sc, TRUE);
4127 
4128     /* detach interrupts */
4129     bxe_interrupt_detach(sc);
4130 
4131     /* Reset the chip */
4132     rc = bxe_reset_hw(sc, reset_code);
4133     if (rc) {
4134         BLOGE(sc, "Hardware reset failed(%d)\n", rc);
4135     }
4136 
4137     /* Report UNLOAD_DONE to MCP */
4138     bxe_send_unload_done(sc, keep_link);
4139 }
4140 
4141 static void
4142 bxe_disable_close_the_gate(struct bxe_softc *sc)
4143 {
4144     uint32_t val;
4145     int port = SC_PORT(sc);
4146 
4147     BLOGD(sc, DBG_LOAD,
4148           "Disabling 'close the gates'\n");
4149 
4150     if (CHIP_IS_E1(sc)) {
4151         uint32_t addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4152                                MISC_REG_AEU_MASK_ATTN_FUNC_0;
4153         val = REG_RD(sc, addr);
4154         val &= ~(0x300);
4155         REG_WR(sc, addr, val);
4156     } else {
4157         val = REG_RD(sc, MISC_REG_AEU_GENERAL_MASK);
4158         val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
4159                  MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
4160         REG_WR(sc, MISC_REG_AEU_GENERAL_MASK, val);
4161     }
4162 }
4163 
4164 /*
4165  * Cleans the object that have internal lists without sending
4166  * ramrods. Should be run when interrutps are disabled.
4167  */
4168 static void
4169 bxe_squeeze_objects(struct bxe_softc *sc)
4170 {
4171     unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
4172     struct ecore_mcast_ramrod_params rparam = { NULL };
4173     struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj;
4174     int rc;
4175 
4176     /* Cleanup MACs' object first... */
4177 
4178     /* Wait for completion of requested */
4179     bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
4180     /* Perform a dry cleanup */
4181     bxe_set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
4182 
4183     /* Clean ETH primary MAC */
4184     bxe_set_bit(ECORE_ETH_MAC, &vlan_mac_flags);
4185     rc = mac_obj->delete_all(sc, &sc->sp_objs->mac_obj, &vlan_mac_flags,
4186                              &ramrod_flags);
4187     if (rc != 0) {
4188         BLOGE(sc, "Failed to clean ETH MACs (%d)\n", rc);
4189     }
4190 
4191     /* Cleanup UC list */
4192     vlan_mac_flags = 0;
4193     bxe_set_bit(ECORE_UC_LIST_MAC, &vlan_mac_flags);
4194     rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags,
4195                              &ramrod_flags);
4196     if (rc != 0) {
4197         BLOGE(sc, "Failed to clean UC list MACs (%d)\n", rc);
4198     }
4199 
4200     /* Now clean mcast object... */
4201 
4202     rparam.mcast_obj = &sc->mcast_obj;
4203     bxe_set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
4204 
4205     /* Add a DEL command... */
4206     rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
4207     if (rc < 0) {
4208         BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc);
4209     }
4210 
4211     /* now wait until all pending commands are cleared */
4212 
4213     rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
4214     while (rc != 0) {
4215         if (rc < 0) {
4216             BLOGE(sc, "Failed to clean MCAST object (%d)\n", rc);
4217             return;
4218         }
4219 
4220         rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
4221     }
4222 }
4223 
4224 /* stop the controller */
4225 static __noinline int
4226 bxe_nic_unload(struct bxe_softc *sc,
4227                uint32_t         unload_mode,
4228                uint8_t          keep_link)
4229 {
4230     uint8_t global = FALSE;
4231     uint32_t val;
4232     int i;
4233 
4234     BXE_CORE_LOCK_ASSERT(sc);
4235 
4236     if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING);
4237 
4238     for (i = 0; i < sc->num_queues; i++) {
4239         struct bxe_fastpath *fp;
4240 
4241         fp = &sc->fp[i];
4242 	fp->watchdog_timer = 0;
4243         BXE_FP_TX_LOCK(fp);
4244         BXE_FP_TX_UNLOCK(fp);
4245     }
4246 
4247     BLOGD(sc, DBG_LOAD, "Starting NIC unload...\n");
4248 
4249     /* mark driver as unloaded in shmem2 */
4250     if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
4251         val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
4252         SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)],
4253                   val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
4254     }
4255 
4256     if (IS_PF(sc) && sc->recovery_state != BXE_RECOVERY_DONE &&
4257         (sc->state == BXE_STATE_CLOSED || sc->state == BXE_STATE_ERROR)) {
4258 
4259 	if(CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
4260             /*
4261              * We can get here if the driver has been unloaded
4262              * during parity error recovery and is either waiting for a
4263              * leader to complete or for other functions to unload and
4264              * then ifconfig down has been issued. In this case we want to
4265              * unload and let other functions to complete a recovery
4266              * process.
4267              */
4268             sc->recovery_state = BXE_RECOVERY_DONE;
4269             sc->is_leader = 0;
4270             bxe_release_leader_lock(sc);
4271             mb();
4272             BLOGD(sc, DBG_LOAD, "Releasing a leadership...\n");
4273 	}
4274         BLOGE(sc, "Can't unload in closed or error state recover_state 0x%x"
4275             " state = 0x%x\n", sc->recovery_state, sc->state);
4276         return (-1);
4277     }
4278 
4279     /*
4280      * Nothing to do during unload if previous bxe_nic_load()
4281      * did not completed successfully - all resourses are released.
4282      */
4283     if ((sc->state == BXE_STATE_CLOSED) ||
4284         (sc->state == BXE_STATE_ERROR)) {
4285         return (0);
4286     }
4287 
4288     sc->state = BXE_STATE_CLOSING_WAITING_HALT;
4289     mb();
4290 
4291     /* stop tx */
4292     bxe_tx_disable(sc);
4293 
4294     sc->rx_mode = BXE_RX_MODE_NONE;
4295     /* XXX set rx mode ??? */
4296 
4297     if (IS_PF(sc) && !sc->grcdump_done) {
4298         /* set ALWAYS_ALIVE bit in shmem */
4299         sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
4300 
4301         bxe_drv_pulse(sc);
4302 
4303         bxe_stats_handle(sc, STATS_EVENT_STOP);
4304         bxe_save_statistics(sc);
4305     }
4306 
4307     /* wait till consumers catch up with producers in all queues */
4308     bxe_drain_tx_queues(sc);
4309 
4310     /* if VF indicate to PF this function is going down (PF will delete sp
4311      * elements and clear initializations
4312      */
4313     if (IS_VF(sc)) {
4314         ; /* bxe_vfpf_close_vf(sc); */
4315     } else if (unload_mode != UNLOAD_RECOVERY) {
4316         /* if this is a normal/close unload need to clean up chip */
4317         if (!sc->grcdump_done)
4318             bxe_chip_cleanup(sc, unload_mode, keep_link);
4319     } else {
4320         /* Send the UNLOAD_REQUEST to the MCP */
4321         bxe_send_unload_req(sc, unload_mode);
4322 
4323         /*
4324          * Prevent transactions to host from the functions on the
4325          * engine that doesn't reset global blocks in case of global
4326          * attention once gloabl blocks are reset and gates are opened
4327          * (the engine which leader will perform the recovery
4328          * last).
4329          */
4330         if (!CHIP_IS_E1x(sc)) {
4331             bxe_pf_disable(sc);
4332         }
4333 
4334         /* disable HW interrupts */
4335         bxe_int_disable_sync(sc, TRUE);
4336 
4337         /* detach interrupts */
4338         bxe_interrupt_detach(sc);
4339 
4340         /* Report UNLOAD_DONE to MCP */
4341         bxe_send_unload_done(sc, FALSE);
4342     }
4343 
4344     /*
4345      * At this stage no more interrupts will arrive so we may safely clean
4346      * the queue'able objects here in case they failed to get cleaned so far.
4347      */
4348     if (IS_PF(sc)) {
4349         bxe_squeeze_objects(sc);
4350     }
4351 
4352     /* There should be no more pending SP commands at this stage */
4353     sc->sp_state = 0;
4354 
4355     sc->port.pmf = 0;
4356 
4357     bxe_free_fp_buffers(sc);
4358 
4359     if (IS_PF(sc)) {
4360         bxe_free_mem(sc);
4361     }
4362 
4363     bxe_free_fw_stats_mem(sc);
4364 
4365     sc->state = BXE_STATE_CLOSED;
4366 
4367     /*
4368      * Check if there are pending parity attentions. If there are - set
4369      * RECOVERY_IN_PROGRESS.
4370      */
4371     if (IS_PF(sc) && bxe_chk_parity_attn(sc, &global, FALSE)) {
4372         bxe_set_reset_in_progress(sc);
4373 
4374         /* Set RESET_IS_GLOBAL if needed */
4375         if (global) {
4376             bxe_set_reset_global(sc);
4377         }
4378     }
4379 
4380     /*
4381      * The last driver must disable a "close the gate" if there is no
4382      * parity attention or "process kill" pending.
4383      */
4384     if (IS_PF(sc) && !bxe_clear_pf_load(sc) &&
4385         bxe_reset_is_done(sc, SC_PATH(sc))) {
4386         bxe_disable_close_the_gate(sc);
4387     }
4388 
4389     BLOGD(sc, DBG_LOAD, "Ended NIC unload\n");
4390 
4391     bxe_link_report(sc);
4392 
4393     return (0);
4394 }
4395 
4396 /*
4397  * Called by the OS to set various media options (i.e. link, speed, etc.) when
4398  * the user runs "ifconfig bxe media ..." or "ifconfig bxe mediaopt ...".
4399  */
4400 static int
4401 bxe_ifmedia_update(struct ifnet  *ifp)
4402 {
4403     struct bxe_softc *sc = (struct bxe_softc *)if_getsoftc(ifp);
4404     struct ifmedia *ifm;
4405 
4406     ifm = &sc->ifmedia;
4407 
4408     /* We only support Ethernet media type. */
4409     if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
4410         return (EINVAL);
4411     }
4412 
4413     switch (IFM_SUBTYPE(ifm->ifm_media)) {
4414     case IFM_AUTO:
4415          break;
4416     case IFM_10G_CX4:
4417     case IFM_10G_SR:
4418     case IFM_10G_T:
4419     case IFM_10G_TWINAX:
4420     default:
4421         /* We don't support changing the media type. */
4422         BLOGD(sc, DBG_LOAD, "Invalid media type (%d)\n",
4423               IFM_SUBTYPE(ifm->ifm_media));
4424         return (EINVAL);
4425     }
4426 
4427     return (0);
4428 }
4429 
4430 /*
4431  * Called by the OS to get the current media status (i.e. link, speed, etc.).
4432  */
4433 static void
4434 bxe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
4435 {
4436     struct bxe_softc *sc = if_getsoftc(ifp);
4437 
4438     /* Bug 165447: the 'ifconfig' tool skips printing of the "status: ..."
4439        line if the IFM_AVALID flag is *NOT* set. So we need to set this
4440        flag unconditionally (irrespective of the admininistrative
4441        'up/down' state of the interface) to ensure that that line is always
4442        displayed.
4443     */
4444     ifmr->ifm_status = IFM_AVALID;
4445 
4446     /* Setup the default interface info. */
4447     ifmr->ifm_active = IFM_ETHER;
4448 
4449     /* Report link down if the driver isn't running. */
4450     if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
4451         ifmr->ifm_active |= IFM_NONE;
4452         BLOGD(sc, DBG_PHY, "in %s : nic still not loaded fully\n", __func__);
4453         BLOGD(sc, DBG_PHY, "in %s : link_up (1) : %d\n",
4454                 __func__, sc->link_vars.link_up);
4455         return;
4456     }
4457 
4458 
4459     if (sc->link_vars.link_up) {
4460         ifmr->ifm_status |= IFM_ACTIVE;
4461         ifmr->ifm_active |= IFM_FDX;
4462     } else {
4463         ifmr->ifm_active |= IFM_NONE;
4464         BLOGD(sc, DBG_PHY, "in %s : setting IFM_NONE\n",
4465                 __func__);
4466         return;
4467     }
4468 
4469     ifmr->ifm_active |= sc->media;
4470     return;
4471 }
4472 
4473 static void
4474 bxe_handle_chip_tq(void *context,
4475                    int  pending)
4476 {
4477     struct bxe_softc *sc = (struct bxe_softc *)context;
4478     long work = atomic_load_acq_long(&sc->chip_tq_flags);
4479 
4480     switch (work)
4481     {
4482 
4483     case CHIP_TQ_REINIT:
4484         if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
4485             /* restart the interface */
4486             BLOGD(sc, DBG_LOAD, "Restarting the interface...\n");
4487             bxe_periodic_stop(sc);
4488             BXE_CORE_LOCK(sc);
4489             bxe_stop_locked(sc);
4490             bxe_init_locked(sc);
4491             BXE_CORE_UNLOCK(sc);
4492         }
4493         break;
4494 
4495     default:
4496         break;
4497     }
4498 }
4499 
4500 /*
4501  * Handles any IOCTL calls from the operating system.
4502  *
4503  * Returns:
4504  *   0 = Success, >0 Failure
4505  */
4506 static int
4507 bxe_ioctl(if_t ifp,
4508           u_long       command,
4509           caddr_t      data)
4510 {
4511     struct bxe_softc *sc = if_getsoftc(ifp);
4512     struct ifreq *ifr = (struct ifreq *)data;
4513     int mask = 0;
4514     int reinit = 0;
4515     int error = 0;
4516 
4517     int mtu_min = (ETH_MIN_PACKET_SIZE - ETH_HLEN);
4518     int mtu_max = (MJUM9BYTES - ETH_OVERHEAD - IP_HEADER_ALIGNMENT_PADDING);
4519 
4520     switch (command)
4521     {
4522     case SIOCSIFMTU:
4523         BLOGD(sc, DBG_IOCTL, "Received SIOCSIFMTU ioctl (mtu=%d)\n",
4524               ifr->ifr_mtu);
4525 
4526         if (sc->mtu == ifr->ifr_mtu) {
4527             /* nothing to change */
4528             break;
4529         }
4530 
4531         if ((ifr->ifr_mtu < mtu_min) || (ifr->ifr_mtu > mtu_max)) {
4532             BLOGE(sc, "Unsupported MTU size %d (range is %d-%d)\n",
4533                   ifr->ifr_mtu, mtu_min, mtu_max);
4534             error = EINVAL;
4535             break;
4536         }
4537 
4538         atomic_store_rel_int((volatile unsigned int *)&sc->mtu,
4539                              (unsigned long)ifr->ifr_mtu);
4540 	/*
4541         atomic_store_rel_long((volatile unsigned long *)&if_getmtu(ifp),
4542                               (unsigned long)ifr->ifr_mtu);
4543 	XXX - Not sure why it needs to be atomic
4544 	*/
4545 	if_setmtu(ifp, ifr->ifr_mtu);
4546         reinit = 1;
4547         break;
4548 
4549     case SIOCSIFFLAGS:
4550         /* toggle the interface state up or down */
4551         BLOGD(sc, DBG_IOCTL, "Received SIOCSIFFLAGS ioctl\n");
4552 
4553 	BXE_CORE_LOCK(sc);
4554         /* check if the interface is up */
4555         if (if_getflags(ifp) & IFF_UP) {
4556             if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4557                 /* set the receive mode flags */
4558                 bxe_set_rx_mode(sc);
4559             } else if(sc->state != BXE_STATE_DISABLED) {
4560 		bxe_init_locked(sc);
4561             }
4562         } else {
4563             if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4564 		bxe_periodic_stop(sc);
4565 		bxe_stop_locked(sc);
4566             }
4567         }
4568 	BXE_CORE_UNLOCK(sc);
4569 
4570         break;
4571 
4572     case SIOCADDMULTI:
4573     case SIOCDELMULTI:
4574         /* add/delete multicast addresses */
4575         BLOGD(sc, DBG_IOCTL, "Received SIOCADDMULTI/SIOCDELMULTI ioctl\n");
4576 
4577         /* check if the interface is up */
4578         if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4579             /* set the receive mode flags */
4580 	    BXE_CORE_LOCK(sc);
4581             bxe_set_rx_mode(sc);
4582 	    BXE_CORE_UNLOCK(sc);
4583         }
4584 
4585         break;
4586 
4587     case SIOCSIFCAP:
4588         /* find out which capabilities have changed */
4589         mask = (ifr->ifr_reqcap ^ if_getcapenable(ifp));
4590 
4591         BLOGD(sc, DBG_IOCTL, "Received SIOCSIFCAP ioctl (mask=0x%08x)\n",
4592               mask);
4593 
4594         /* toggle the LRO capabilites enable flag */
4595         if (mask & IFCAP_LRO) {
4596 	    if_togglecapenable(ifp, IFCAP_LRO);
4597             BLOGD(sc, DBG_IOCTL, "Turning LRO %s\n",
4598                   (if_getcapenable(ifp) & IFCAP_LRO) ? "ON" : "OFF");
4599             reinit = 1;
4600         }
4601 
4602         /* toggle the TXCSUM checksum capabilites enable flag */
4603         if (mask & IFCAP_TXCSUM) {
4604 	    if_togglecapenable(ifp, IFCAP_TXCSUM);
4605             BLOGD(sc, DBG_IOCTL, "Turning TXCSUM %s\n",
4606                   (if_getcapenable(ifp) & IFCAP_TXCSUM) ? "ON" : "OFF");
4607             if (if_getcapenable(ifp) & IFCAP_TXCSUM) {
4608                 if_sethwassistbits(ifp, (CSUM_IP      |
4609                                     CSUM_TCP      |
4610                                     CSUM_UDP      |
4611                                     CSUM_TSO      |
4612                                     CSUM_TCP_IPV6 |
4613                                     CSUM_UDP_IPV6), 0);
4614             } else {
4615 		if_clearhwassist(ifp); /* XXX */
4616             }
4617         }
4618 
4619         /* toggle the RXCSUM checksum capabilities enable flag */
4620         if (mask & IFCAP_RXCSUM) {
4621 	    if_togglecapenable(ifp, IFCAP_RXCSUM);
4622             BLOGD(sc, DBG_IOCTL, "Turning RXCSUM %s\n",
4623                   (if_getcapenable(ifp) & IFCAP_RXCSUM) ? "ON" : "OFF");
4624             if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
4625                 if_sethwassistbits(ifp, (CSUM_IP      |
4626                                     CSUM_TCP      |
4627                                     CSUM_UDP      |
4628                                     CSUM_TSO      |
4629                                     CSUM_TCP_IPV6 |
4630                                     CSUM_UDP_IPV6), 0);
4631             } else {
4632 		if_clearhwassist(ifp); /* XXX */
4633             }
4634         }
4635 
4636         /* toggle TSO4 capabilities enabled flag */
4637         if (mask & IFCAP_TSO4) {
4638             if_togglecapenable(ifp, IFCAP_TSO4);
4639             BLOGD(sc, DBG_IOCTL, "Turning TSO4 %s\n",
4640                   (if_getcapenable(ifp) & IFCAP_TSO4) ? "ON" : "OFF");
4641         }
4642 
4643         /* toggle TSO6 capabilities enabled flag */
4644         if (mask & IFCAP_TSO6) {
4645 	    if_togglecapenable(ifp, IFCAP_TSO6);
4646             BLOGD(sc, DBG_IOCTL, "Turning TSO6 %s\n",
4647                   (if_getcapenable(ifp) & IFCAP_TSO6) ? "ON" : "OFF");
4648         }
4649 
4650         /* toggle VLAN_HWTSO capabilities enabled flag */
4651         if (mask & IFCAP_VLAN_HWTSO) {
4652 
4653 	    if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
4654             BLOGD(sc, DBG_IOCTL, "Turning VLAN_HWTSO %s\n",
4655                   (if_getcapenable(ifp) & IFCAP_VLAN_HWTSO) ? "ON" : "OFF");
4656         }
4657 
4658         /* toggle VLAN_HWCSUM capabilities enabled flag */
4659         if (mask & IFCAP_VLAN_HWCSUM) {
4660             /* XXX investigate this... */
4661             BLOGE(sc, "Changing VLAN_HWCSUM is not supported!\n");
4662             error = EINVAL;
4663         }
4664 
4665         /* toggle VLAN_MTU capabilities enable flag */
4666         if (mask & IFCAP_VLAN_MTU) {
4667             /* XXX investigate this... */
4668             BLOGE(sc, "Changing VLAN_MTU is not supported!\n");
4669             error = EINVAL;
4670         }
4671 
4672         /* toggle VLAN_HWTAGGING capabilities enabled flag */
4673         if (mask & IFCAP_VLAN_HWTAGGING) {
4674             /* XXX investigate this... */
4675             BLOGE(sc, "Changing VLAN_HWTAGGING is not supported!\n");
4676             error = EINVAL;
4677         }
4678 
4679         /* toggle VLAN_HWFILTER capabilities enabled flag */
4680         if (mask & IFCAP_VLAN_HWFILTER) {
4681             /* XXX investigate this... */
4682             BLOGE(sc, "Changing VLAN_HWFILTER is not supported!\n");
4683             error = EINVAL;
4684         }
4685 
4686         /* XXX not yet...
4687          * IFCAP_WOL_MAGIC
4688          */
4689 
4690         break;
4691 
4692     case SIOCSIFMEDIA:
4693     case SIOCGIFMEDIA:
4694         /* set/get interface media */
4695         BLOGD(sc, DBG_IOCTL,
4696               "Received SIOCSIFMEDIA/SIOCGIFMEDIA ioctl (cmd=%lu)\n",
4697               (command & 0xff));
4698         error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
4699         break;
4700 
4701     default:
4702         BLOGD(sc, DBG_IOCTL, "Received Unknown Ioctl (cmd=%lu)\n",
4703               (command & 0xff));
4704         error = ether_ioctl(ifp, command, data);
4705         break;
4706     }
4707 
4708     if (reinit && (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) {
4709         BLOGD(sc, DBG_LOAD | DBG_IOCTL,
4710               "Re-initializing hardware from IOCTL change\n");
4711 	bxe_periodic_stop(sc);
4712 	BXE_CORE_LOCK(sc);
4713 	bxe_stop_locked(sc);
4714 	bxe_init_locked(sc);
4715 	BXE_CORE_UNLOCK(sc);
4716     }
4717 
4718     return (error);
4719 }
4720 
4721 static __noinline void
4722 bxe_dump_mbuf(struct bxe_softc *sc,
4723               struct mbuf      *m,
4724               uint8_t          contents)
4725 {
4726     char * type;
4727     int i = 0;
4728 
4729     if (!(sc->debug & DBG_MBUF)) {
4730         return;
4731     }
4732 
4733     if (m == NULL) {
4734         BLOGD(sc, DBG_MBUF, "mbuf: null pointer\n");
4735         return;
4736     }
4737 
4738     while (m) {
4739 
4740         BLOGD(sc, DBG_MBUF,
4741               "%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n",
4742               i, m, m->m_len, m->m_flags, M_FLAG_BITS, m->m_data);
4743 
4744         if (m->m_flags & M_PKTHDR) {
4745              BLOGD(sc, DBG_MBUF,
4746                    "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n",
4747                    i, m->m_pkthdr.len, m->m_flags, M_FLAG_BITS,
4748                    (int)m->m_pkthdr.csum_flags, CSUM_BITS);
4749         }
4750 
4751         if (m->m_flags & M_EXT) {
4752             switch (m->m_ext.ext_type) {
4753             case EXT_CLUSTER:    type = "EXT_CLUSTER";    break;
4754             case EXT_SFBUF:      type = "EXT_SFBUF";      break;
4755             case EXT_JUMBOP:     type = "EXT_JUMBOP";     break;
4756             case EXT_JUMBO9:     type = "EXT_JUMBO9";     break;
4757             case EXT_JUMBO16:    type = "EXT_JUMBO16";    break;
4758             case EXT_PACKET:     type = "EXT_PACKET";     break;
4759             case EXT_MBUF:       type = "EXT_MBUF";       break;
4760             case EXT_NET_DRV:    type = "EXT_NET_DRV";    break;
4761             case EXT_MOD_TYPE:   type = "EXT_MOD_TYPE";   break;
4762             case EXT_DISPOSABLE: type = "EXT_DISPOSABLE"; break;
4763             case EXT_EXTREF:     type = "EXT_EXTREF";     break;
4764             default:             type = "UNKNOWN";        break;
4765             }
4766 
4767             BLOGD(sc, DBG_MBUF,
4768                   "%02d: - m_ext: %p ext_size=%d type=%s\n",
4769                   i, m->m_ext.ext_buf, m->m_ext.ext_size, type);
4770         }
4771 
4772         if (contents) {
4773             bxe_dump_mbuf_data(sc, "mbuf data", m, TRUE);
4774         }
4775 
4776         m = m->m_next;
4777         i++;
4778     }
4779 }
4780 
4781 /*
4782  * Checks to ensure the 13 bd sliding window is >= MSS for TSO.
4783  * Check that (13 total bds - 3 bds) = 10 bd window >= MSS.
4784  * The window: 3 bds are = 1 for headers BD + 2 for parse BD and last BD
4785  * The headers comes in a separate bd in FreeBSD so 13-3=10.
4786  * Returns: 0 if OK to send, 1 if packet needs further defragmentation
4787  */
4788 static int
4789 bxe_chktso_window(struct bxe_softc  *sc,
4790                   int               nsegs,
4791                   bus_dma_segment_t *segs,
4792                   struct mbuf       *m)
4793 {
4794     uint32_t num_wnds, wnd_size, wnd_sum;
4795     int32_t frag_idx, wnd_idx;
4796     unsigned short lso_mss;
4797 
4798     wnd_sum = 0;
4799     wnd_size = 10;
4800     num_wnds = nsegs - wnd_size;
4801     lso_mss = htole16(m->m_pkthdr.tso_segsz);
4802 
4803     /*
4804      * Total header lengths Eth+IP+TCP in first FreeBSD mbuf so calculate the
4805      * first window sum of data while skipping the first assuming it is the
4806      * header in FreeBSD.
4807      */
4808     for (frag_idx = 1; (frag_idx <= wnd_size); frag_idx++) {
4809         wnd_sum += htole16(segs[frag_idx].ds_len);
4810     }
4811 
4812     /* check the first 10 bd window size */
4813     if (wnd_sum < lso_mss) {
4814         return (1);
4815     }
4816 
4817     /* run through the windows */
4818     for (wnd_idx = 0; wnd_idx < num_wnds; wnd_idx++, frag_idx++) {
4819         /* subtract the first mbuf->m_len of the last wndw(-header) */
4820         wnd_sum -= htole16(segs[wnd_idx+1].ds_len);
4821         /* add the next mbuf len to the len of our new window */
4822         wnd_sum += htole16(segs[frag_idx].ds_len);
4823         if (wnd_sum < lso_mss) {
4824             return (1);
4825         }
4826     }
4827 
4828     return (0);
4829 }
4830 
4831 static uint8_t
4832 bxe_set_pbd_csum_e2(struct bxe_fastpath *fp,
4833                     struct mbuf         *m,
4834                     uint32_t            *parsing_data)
4835 {
4836     struct ether_vlan_header *eh = NULL;
4837     struct ip *ip4 = NULL;
4838     struct ip6_hdr *ip6 = NULL;
4839     caddr_t ip = NULL;
4840     struct tcphdr *th = NULL;
4841     int e_hlen, ip_hlen, l4_off;
4842     uint16_t proto;
4843 
4844     if (m->m_pkthdr.csum_flags == CSUM_IP) {
4845         /* no L4 checksum offload needed */
4846         return (0);
4847     }
4848 
4849     /* get the Ethernet header */
4850     eh = mtod(m, struct ether_vlan_header *);
4851 
4852     /* handle VLAN encapsulation if present */
4853     if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
4854         e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
4855         proto  = ntohs(eh->evl_proto);
4856     } else {
4857         e_hlen = ETHER_HDR_LEN;
4858         proto  = ntohs(eh->evl_encap_proto);
4859     }
4860 
4861     switch (proto) {
4862     case ETHERTYPE_IP:
4863         /* get the IP header, if mbuf len < 20 then header in next mbuf */
4864         ip4 = (m->m_len < sizeof(struct ip)) ?
4865                   (struct ip *)m->m_next->m_data :
4866                   (struct ip *)(m->m_data + e_hlen);
4867         /* ip_hl is number of 32-bit words */
4868         ip_hlen = (ip4->ip_hl << 2);
4869         ip = (caddr_t)ip4;
4870         break;
4871     case ETHERTYPE_IPV6:
4872         /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */
4873         ip6 = (m->m_len < sizeof(struct ip6_hdr)) ?
4874                   (struct ip6_hdr *)m->m_next->m_data :
4875                   (struct ip6_hdr *)(m->m_data + e_hlen);
4876         /* XXX cannot support offload with IPv6 extensions */
4877         ip_hlen = sizeof(struct ip6_hdr);
4878         ip = (caddr_t)ip6;
4879         break;
4880     default:
4881         /* We can't offload in this case... */
4882         /* XXX error stat ??? */
4883         return (0);
4884     }
4885 
4886     /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */
4887     l4_off = (e_hlen + ip_hlen);
4888 
4889     *parsing_data |=
4890         (((l4_off >> 1) << ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
4891          ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W);
4892 
4893     if (m->m_pkthdr.csum_flags & (CSUM_TCP |
4894                                   CSUM_TSO |
4895                                   CSUM_TCP_IPV6)) {
4896         fp->eth_q_stats.tx_ofld_frames_csum_tcp++;
4897         th = (struct tcphdr *)(ip + ip_hlen);
4898         /* th_off is number of 32-bit words */
4899         *parsing_data |= ((th->th_off <<
4900                            ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
4901                           ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW);
4902         return (l4_off + (th->th_off << 2)); /* entire header length */
4903     } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
4904                                          CSUM_UDP_IPV6)) {
4905         fp->eth_q_stats.tx_ofld_frames_csum_udp++;
4906         return (l4_off + sizeof(struct udphdr)); /* entire header length */
4907     } else {
4908         /* XXX error stat ??? */
4909         return (0);
4910     }
4911 }
4912 
4913 static uint8_t
4914 bxe_set_pbd_csum(struct bxe_fastpath        *fp,
4915                  struct mbuf                *m,
4916                  struct eth_tx_parse_bd_e1x *pbd)
4917 {
4918     struct ether_vlan_header *eh = NULL;
4919     struct ip *ip4 = NULL;
4920     struct ip6_hdr *ip6 = NULL;
4921     caddr_t ip = NULL;
4922     struct tcphdr *th = NULL;
4923     struct udphdr *uh = NULL;
4924     int e_hlen, ip_hlen;
4925     uint16_t proto;
4926     uint8_t hlen;
4927     uint16_t tmp_csum;
4928     uint32_t *tmp_uh;
4929 
4930     /* get the Ethernet header */
4931     eh = mtod(m, struct ether_vlan_header *);
4932 
4933     /* handle VLAN encapsulation if present */
4934     if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
4935         e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
4936         proto  = ntohs(eh->evl_proto);
4937     } else {
4938         e_hlen = ETHER_HDR_LEN;
4939         proto  = ntohs(eh->evl_encap_proto);
4940     }
4941 
4942     switch (proto) {
4943     case ETHERTYPE_IP:
4944         /* get the IP header, if mbuf len < 20 then header in next mbuf */
4945         ip4 = (m->m_len < sizeof(struct ip)) ?
4946                   (struct ip *)m->m_next->m_data :
4947                   (struct ip *)(m->m_data + e_hlen);
4948         /* ip_hl is number of 32-bit words */
4949         ip_hlen = (ip4->ip_hl << 1);
4950         ip = (caddr_t)ip4;
4951         break;
4952     case ETHERTYPE_IPV6:
4953         /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */
4954         ip6 = (m->m_len < sizeof(struct ip6_hdr)) ?
4955                   (struct ip6_hdr *)m->m_next->m_data :
4956                   (struct ip6_hdr *)(m->m_data + e_hlen);
4957         /* XXX cannot support offload with IPv6 extensions */
4958         ip_hlen = (sizeof(struct ip6_hdr) >> 1);
4959         ip = (caddr_t)ip6;
4960         break;
4961     default:
4962         /* We can't offload in this case... */
4963         /* XXX error stat ??? */
4964         return (0);
4965     }
4966 
4967     hlen = (e_hlen >> 1);
4968 
4969     /* note that rest of global_data is indirectly zeroed here */
4970     if (m->m_flags & M_VLANTAG) {
4971         pbd->global_data =
4972             htole16(hlen | (1 << ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
4973     } else {
4974         pbd->global_data = htole16(hlen);
4975     }
4976 
4977     pbd->ip_hlen_w = ip_hlen;
4978 
4979     hlen += pbd->ip_hlen_w;
4980 
4981     /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */
4982 
4983     if (m->m_pkthdr.csum_flags & (CSUM_TCP |
4984                                   CSUM_TSO |
4985                                   CSUM_TCP_IPV6)) {
4986         th = (struct tcphdr *)(ip + (ip_hlen << 1));
4987         /* th_off is number of 32-bit words */
4988         hlen += (uint16_t)(th->th_off << 1);
4989     } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
4990                                          CSUM_UDP_IPV6)) {
4991         uh = (struct udphdr *)(ip + (ip_hlen << 1));
4992         hlen += (sizeof(struct udphdr) / 2);
4993     } else {
4994         /* valid case as only CSUM_IP was set */
4995         return (0);
4996     }
4997 
4998     pbd->total_hlen_w = htole16(hlen);
4999 
5000     if (m->m_pkthdr.csum_flags & (CSUM_TCP |
5001                                   CSUM_TSO |
5002                                   CSUM_TCP_IPV6)) {
5003         fp->eth_q_stats.tx_ofld_frames_csum_tcp++;
5004         pbd->tcp_pseudo_csum = ntohs(th->th_sum);
5005     } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
5006                                          CSUM_UDP_IPV6)) {
5007         fp->eth_q_stats.tx_ofld_frames_csum_udp++;
5008 
5009         /*
5010          * Everest1 (i.e. 57710, 57711, 57711E) does not natively support UDP
5011          * checksums and does not know anything about the UDP header and where
5012          * the checksum field is located. It only knows about TCP. Therefore
5013          * we "lie" to the hardware for outgoing UDP packets w/ checksum
5014          * offload. Since the checksum field offset for TCP is 16 bytes and
5015          * for UDP it is 6 bytes we pass a pointer to the hardware that is 10
5016          * bytes less than the start of the UDP header. This allows the
5017          * hardware to write the checksum in the correct spot. But the
5018          * hardware will compute a checksum which includes the last 10 bytes
5019          * of the IP header. To correct this we tweak the stack computed
5020          * pseudo checksum by folding in the calculation of the inverse
5021          * checksum for those final 10 bytes of the IP header. This allows
5022          * the correct checksum to be computed by the hardware.
5023          */
5024 
5025         /* set pointer 10 bytes before UDP header */
5026         tmp_uh = (uint32_t *)((uint8_t *)uh - 10);
5027 
5028         /* calculate a pseudo header checksum over the first 10 bytes */
5029         tmp_csum = in_pseudo(*tmp_uh,
5030                              *(tmp_uh + 1),
5031                              *(uint16_t *)(tmp_uh + 2));
5032 
5033         pbd->tcp_pseudo_csum = ntohs(in_addword(uh->uh_sum, ~tmp_csum));
5034     }
5035 
5036     return (hlen * 2); /* entire header length, number of bytes */
5037 }
5038 
5039 static void
5040 bxe_set_pbd_lso_e2(struct mbuf *m,
5041                    uint32_t    *parsing_data)
5042 {
5043     *parsing_data |= ((m->m_pkthdr.tso_segsz <<
5044                        ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
5045                       ETH_TX_PARSE_BD_E2_LSO_MSS);
5046 
5047     /* XXX test for IPv6 with extension header... */
5048 }
5049 
5050 static void
5051 bxe_set_pbd_lso(struct mbuf                *m,
5052                 struct eth_tx_parse_bd_e1x *pbd)
5053 {
5054     struct ether_vlan_header *eh = NULL;
5055     struct ip *ip = NULL;
5056     struct tcphdr *th = NULL;
5057     int e_hlen;
5058 
5059     /* get the Ethernet header */
5060     eh = mtod(m, struct ether_vlan_header *);
5061 
5062     /* handle VLAN encapsulation if present */
5063     e_hlen = (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) ?
5064                  (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) : ETHER_HDR_LEN;
5065 
5066     /* get the IP and TCP header, with LSO entire header in first mbuf */
5067     /* XXX assuming IPv4 */
5068     ip = (struct ip *)(m->m_data + e_hlen);
5069     th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
5070 
5071     pbd->lso_mss = htole16(m->m_pkthdr.tso_segsz);
5072     pbd->tcp_send_seq = ntohl(th->th_seq);
5073     pbd->tcp_flags = ((ntohl(((uint32_t *)th)[3]) >> 16) & 0xff);
5074 
5075 #if 1
5076         /* XXX IPv4 */
5077         pbd->ip_id = ntohs(ip->ip_id);
5078         pbd->tcp_pseudo_csum =
5079             ntohs(in_pseudo(ip->ip_src.s_addr,
5080                             ip->ip_dst.s_addr,
5081                             htons(IPPROTO_TCP)));
5082 #else
5083         /* XXX IPv6 */
5084         pbd->tcp_pseudo_csum =
5085             ntohs(in_pseudo(&ip6->ip6_src,
5086                             &ip6->ip6_dst,
5087                             htons(IPPROTO_TCP)));
5088 #endif
5089 
5090     pbd->global_data |=
5091         htole16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
5092 }
5093 
5094 /*
5095  * Encapsulte an mbuf cluster into the tx bd chain and makes the memory
5096  * visible to the controller.
5097  *
5098  * If an mbuf is submitted to this routine and cannot be given to the
5099  * controller (e.g. it has too many fragments) then the function may free
5100  * the mbuf and return to the caller.
5101  *
5102  * Returns:
5103  *   0 = Success, !0 = Failure
5104  *   Note the side effect that an mbuf may be freed if it causes a problem.
5105  */
5106 static int
5107 bxe_tx_encap(struct bxe_fastpath *fp, struct mbuf **m_head)
5108 {
5109     bus_dma_segment_t segs[32];
5110     struct mbuf *m0;
5111     struct bxe_sw_tx_bd *tx_buf;
5112     struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
5113     struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
5114     /* struct eth_tx_parse_2nd_bd *pbd2 = NULL; */
5115     struct eth_tx_bd *tx_data_bd;
5116     struct eth_tx_bd *tx_total_pkt_size_bd;
5117     struct eth_tx_start_bd *tx_start_bd;
5118     uint16_t bd_prod, pkt_prod, total_pkt_size;
5119     uint8_t mac_type;
5120     int defragged, error, nsegs, rc, nbds, vlan_off, ovlan;
5121     struct bxe_softc *sc;
5122     uint16_t tx_bd_avail;
5123     struct ether_vlan_header *eh;
5124     uint32_t pbd_e2_parsing_data = 0;
5125     uint8_t hlen = 0;
5126     int tmp_bd;
5127     int i;
5128 
5129     sc = fp->sc;
5130 
5131     M_ASSERTPKTHDR(*m_head);
5132 
5133     m0 = *m_head;
5134     rc = defragged = nbds = ovlan = vlan_off = total_pkt_size = 0;
5135     tx_start_bd = NULL;
5136     tx_data_bd = NULL;
5137     tx_total_pkt_size_bd = NULL;
5138 
5139     /* get the H/W pointer for packets and BDs */
5140     pkt_prod = fp->tx_pkt_prod;
5141     bd_prod = fp->tx_bd_prod;
5142 
5143     mac_type = UNICAST_ADDRESS;
5144 
5145     /* map the mbuf into the next open DMAable memory */
5146     tx_buf = &fp->tx_mbuf_chain[TX_BD(pkt_prod)];
5147     error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5148                                     tx_buf->m_map, m0,
5149                                     segs, &nsegs, BUS_DMA_NOWAIT);
5150 
5151     /* mapping errors */
5152     if(__predict_false(error != 0)) {
5153         fp->eth_q_stats.tx_dma_mapping_failure++;
5154         if (error == ENOMEM) {
5155             /* resource issue, try again later */
5156             rc = ENOMEM;
5157         } else if (error == EFBIG) {
5158             /* possibly recoverable with defragmentation */
5159             fp->eth_q_stats.mbuf_defrag_attempts++;
5160             m0 = m_defrag(*m_head, M_NOWAIT);
5161             if (m0 == NULL) {
5162                 fp->eth_q_stats.mbuf_defrag_failures++;
5163                 rc = ENOBUFS;
5164             } else {
5165                 /* defrag successful, try mapping again */
5166                 *m_head = m0;
5167                 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5168                                                 tx_buf->m_map, m0,
5169                                                 segs, &nsegs, BUS_DMA_NOWAIT);
5170                 if (error) {
5171                     fp->eth_q_stats.tx_dma_mapping_failure++;
5172                     rc = error;
5173                 }
5174             }
5175         } else {
5176             /* unknown, unrecoverable mapping error */
5177             BLOGE(sc, "Unknown TX mapping error rc=%d\n", error);
5178             bxe_dump_mbuf(sc, m0, FALSE);
5179             rc = error;
5180         }
5181 
5182         goto bxe_tx_encap_continue;
5183     }
5184 
5185     tx_bd_avail = bxe_tx_avail(sc, fp);
5186 
5187     /* make sure there is enough room in the send queue */
5188     if (__predict_false(tx_bd_avail < (nsegs + 2))) {
5189         /* Recoverable, try again later. */
5190         fp->eth_q_stats.tx_hw_queue_full++;
5191         bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5192         rc = ENOMEM;
5193         goto bxe_tx_encap_continue;
5194     }
5195 
5196     /* capture the current H/W TX chain high watermark */
5197     if (__predict_false(fp->eth_q_stats.tx_hw_max_queue_depth <
5198                         (TX_BD_USABLE - tx_bd_avail))) {
5199         fp->eth_q_stats.tx_hw_max_queue_depth = (TX_BD_USABLE - tx_bd_avail);
5200     }
5201 
5202     /* make sure it fits in the packet window */
5203     if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) {
5204         /*
5205          * The mbuf may be to big for the controller to handle. If the frame
5206          * is a TSO frame we'll need to do an additional check.
5207          */
5208         if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
5209             if (bxe_chktso_window(sc, nsegs, segs, m0) == 0) {
5210                 goto bxe_tx_encap_continue; /* OK to send */
5211             } else {
5212                 fp->eth_q_stats.tx_window_violation_tso++;
5213             }
5214         } else {
5215             fp->eth_q_stats.tx_window_violation_std++;
5216         }
5217 
5218         /* lets try to defragment this mbuf and remap it */
5219         fp->eth_q_stats.mbuf_defrag_attempts++;
5220         bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5221 
5222         m0 = m_defrag(*m_head, M_NOWAIT);
5223         if (m0 == NULL) {
5224             fp->eth_q_stats.mbuf_defrag_failures++;
5225             /* Ugh, just drop the frame... :( */
5226             rc = ENOBUFS;
5227         } else {
5228             /* defrag successful, try mapping again */
5229             *m_head = m0;
5230             error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5231                                             tx_buf->m_map, m0,
5232                                             segs, &nsegs, BUS_DMA_NOWAIT);
5233             if (error) {
5234                 fp->eth_q_stats.tx_dma_mapping_failure++;
5235                 /* No sense in trying to defrag/copy chain, drop it. :( */
5236                 rc = error;
5237             } else {
5238                /* if the chain is still too long then drop it */
5239                 if(m0->m_pkthdr.csum_flags & CSUM_TSO) {
5240                     /*
5241                      * in case TSO is enabled nsegs should be checked against
5242                      * BXE_TSO_MAX_SEGMENTS
5243                      */
5244                     if (__predict_false(nsegs > BXE_TSO_MAX_SEGMENTS)) {
5245                         bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5246                         fp->eth_q_stats.nsegs_path1_errors++;
5247                         rc = ENODEV;
5248                     }
5249                 } else {
5250                     if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) {
5251                         bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5252                         fp->eth_q_stats.nsegs_path2_errors++;
5253                         rc = ENODEV;
5254                     }
5255                 }
5256             }
5257         }
5258     }
5259 
5260 bxe_tx_encap_continue:
5261 
5262     /* Check for errors */
5263     if (rc) {
5264         if (rc == ENOMEM) {
5265             /* recoverable try again later  */
5266         } else {
5267             fp->eth_q_stats.tx_soft_errors++;
5268             fp->eth_q_stats.mbuf_alloc_tx--;
5269             m_freem(*m_head);
5270             *m_head = NULL;
5271         }
5272 
5273         return (rc);
5274     }
5275 
5276     /* set flag according to packet type (UNICAST_ADDRESS is default) */
5277     if (m0->m_flags & M_BCAST) {
5278         mac_type = BROADCAST_ADDRESS;
5279     } else if (m0->m_flags & M_MCAST) {
5280         mac_type = MULTICAST_ADDRESS;
5281     }
5282 
5283     /* store the mbuf into the mbuf ring */
5284     tx_buf->m        = m0;
5285     tx_buf->first_bd = fp->tx_bd_prod;
5286     tx_buf->flags    = 0;
5287 
5288     /* prepare the first transmit (start) BD for the mbuf */
5289     tx_start_bd = &fp->tx_chain[TX_BD(bd_prod)].start_bd;
5290 
5291     BLOGD(sc, DBG_TX,
5292           "sending pkt_prod=%u tx_buf=%p next_idx=%u bd=%u tx_start_bd=%p\n",
5293           pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
5294 
5295     tx_start_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
5296     tx_start_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
5297     tx_start_bd->nbytes  = htole16(segs[0].ds_len);
5298     total_pkt_size += tx_start_bd->nbytes;
5299     tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
5300 
5301     tx_start_bd->general_data = (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
5302 
5303     /* all frames have at least Start BD + Parsing BD */
5304     nbds = nsegs + 1;
5305     tx_start_bd->nbd = htole16(nbds);
5306 
5307     if (m0->m_flags & M_VLANTAG) {
5308         tx_start_bd->vlan_or_ethertype = htole16(m0->m_pkthdr.ether_vtag);
5309         tx_start_bd->bd_flags.as_bitfield |=
5310             (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
5311     } else {
5312         /* vf tx, start bd must hold the ethertype for fw to enforce it */
5313         if (IS_VF(sc)) {
5314             /* map ethernet header to find type and header length */
5315             eh = mtod(m0, struct ether_vlan_header *);
5316             tx_start_bd->vlan_or_ethertype = eh->evl_encap_proto;
5317         } else {
5318             /* used by FW for packet accounting */
5319             tx_start_bd->vlan_or_ethertype = htole16(fp->tx_pkt_prod);
5320         }
5321     }
5322 
5323     /*
5324      * add a parsing BD from the chain. The parsing BD is always added
5325      * though it is only used for TSO and chksum
5326      */
5327     bd_prod = TX_BD_NEXT(bd_prod);
5328 
5329     if (m0->m_pkthdr.csum_flags) {
5330         if (m0->m_pkthdr.csum_flags & CSUM_IP) {
5331             fp->eth_q_stats.tx_ofld_frames_csum_ip++;
5332             tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
5333         }
5334 
5335         if (m0->m_pkthdr.csum_flags & CSUM_TCP_IPV6) {
5336             tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 |
5337                                                   ETH_TX_BD_FLAGS_L4_CSUM);
5338         } else if (m0->m_pkthdr.csum_flags & CSUM_UDP_IPV6) {
5339             tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6   |
5340                                                   ETH_TX_BD_FLAGS_IS_UDP |
5341                                                   ETH_TX_BD_FLAGS_L4_CSUM);
5342         } else if ((m0->m_pkthdr.csum_flags & CSUM_TCP) ||
5343                    (m0->m_pkthdr.csum_flags & CSUM_TSO)) {
5344             tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
5345         } else if (m0->m_pkthdr.csum_flags & CSUM_UDP) {
5346             tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_L4_CSUM |
5347                                                   ETH_TX_BD_FLAGS_IS_UDP);
5348         }
5349     }
5350 
5351     if (!CHIP_IS_E1x(sc)) {
5352         pbd_e2 = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e2;
5353         memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
5354 
5355         if (m0->m_pkthdr.csum_flags) {
5356             hlen = bxe_set_pbd_csum_e2(fp, m0, &pbd_e2_parsing_data);
5357         }
5358 
5359         SET_FLAG(pbd_e2_parsing_data, ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE,
5360                  mac_type);
5361     } else {
5362         uint16_t global_data = 0;
5363 
5364         pbd_e1x = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e1x;
5365         memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
5366 
5367         if (m0->m_pkthdr.csum_flags) {
5368             hlen = bxe_set_pbd_csum(fp, m0, pbd_e1x);
5369         }
5370 
5371         SET_FLAG(global_data,
5372                  ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
5373         pbd_e1x->global_data |= htole16(global_data);
5374     }
5375 
5376     /* setup the parsing BD with TSO specific info */
5377     if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
5378         fp->eth_q_stats.tx_ofld_frames_lso++;
5379         tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
5380 
5381         if (__predict_false(tx_start_bd->nbytes > hlen)) {
5382             fp->eth_q_stats.tx_ofld_frames_lso_hdr_splits++;
5383 
5384             /* split the first BD into header/data making the fw job easy */
5385             nbds++;
5386             tx_start_bd->nbd = htole16(nbds);
5387             tx_start_bd->nbytes = htole16(hlen);
5388 
5389             bd_prod = TX_BD_NEXT(bd_prod);
5390 
5391             /* new transmit BD after the tx_parse_bd */
5392             tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd;
5393             tx_data_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr + hlen));
5394             tx_data_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr + hlen));
5395             tx_data_bd->nbytes  = htole16(segs[0].ds_len - hlen);
5396             if (tx_total_pkt_size_bd == NULL) {
5397                 tx_total_pkt_size_bd = tx_data_bd;
5398             }
5399 
5400             BLOGD(sc, DBG_TX,
5401                   "TSO split header size is %d (%x:%x) nbds %d\n",
5402                   le16toh(tx_start_bd->nbytes),
5403                   le32toh(tx_start_bd->addr_hi),
5404                   le32toh(tx_start_bd->addr_lo),
5405                   nbds);
5406         }
5407 
5408         if (!CHIP_IS_E1x(sc)) {
5409             bxe_set_pbd_lso_e2(m0, &pbd_e2_parsing_data);
5410         } else {
5411             bxe_set_pbd_lso(m0, pbd_e1x);
5412         }
5413     }
5414 
5415     if (pbd_e2_parsing_data) {
5416         pbd_e2->parsing_data = htole32(pbd_e2_parsing_data);
5417     }
5418 
5419     /* prepare remaining BDs, start tx bd contains first seg/frag */
5420     for (i = 1; i < nsegs ; i++) {
5421         bd_prod = TX_BD_NEXT(bd_prod);
5422         tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd;
5423         tx_data_bd->addr_lo = htole32(U64_LO(segs[i].ds_addr));
5424         tx_data_bd->addr_hi = htole32(U64_HI(segs[i].ds_addr));
5425         tx_data_bd->nbytes  = htole16(segs[i].ds_len);
5426         if (tx_total_pkt_size_bd == NULL) {
5427             tx_total_pkt_size_bd = tx_data_bd;
5428         }
5429         total_pkt_size += tx_data_bd->nbytes;
5430     }
5431 
5432     BLOGD(sc, DBG_TX, "last bd %p\n", tx_data_bd);
5433 
5434     if (tx_total_pkt_size_bd != NULL) {
5435         tx_total_pkt_size_bd->total_pkt_bytes = total_pkt_size;
5436     }
5437 
5438     if (__predict_false(sc->debug & DBG_TX)) {
5439         tmp_bd = tx_buf->first_bd;
5440         for (i = 0; i < nbds; i++)
5441         {
5442             if (i == 0) {
5443                 BLOGD(sc, DBG_TX,
5444                       "TX Strt: %p bd=%d nbd=%d vlan=0x%x "
5445                       "bd_flags=0x%x hdr_nbds=%d\n",
5446                       tx_start_bd,
5447                       tmp_bd,
5448                       le16toh(tx_start_bd->nbd),
5449                       le16toh(tx_start_bd->vlan_or_ethertype),
5450                       tx_start_bd->bd_flags.as_bitfield,
5451                       (tx_start_bd->general_data & ETH_TX_START_BD_HDR_NBDS));
5452             } else if (i == 1) {
5453                 if (pbd_e1x) {
5454                     BLOGD(sc, DBG_TX,
5455                           "-> Prse: %p bd=%d global=0x%x ip_hlen_w=%u "
5456                           "ip_id=%u lso_mss=%u tcp_flags=0x%x csum=0x%x "
5457                           "tcp_seq=%u total_hlen_w=%u\n",
5458                           pbd_e1x,
5459                           tmp_bd,
5460                           pbd_e1x->global_data,
5461                           pbd_e1x->ip_hlen_w,
5462                           pbd_e1x->ip_id,
5463                           pbd_e1x->lso_mss,
5464                           pbd_e1x->tcp_flags,
5465                           pbd_e1x->tcp_pseudo_csum,
5466                           pbd_e1x->tcp_send_seq,
5467                           le16toh(pbd_e1x->total_hlen_w));
5468                 } else { /* if (pbd_e2) */
5469                     BLOGD(sc, DBG_TX,
5470                           "-> Parse: %p bd=%d dst=%02x:%02x:%02x "
5471                           "src=%02x:%02x:%02x parsing_data=0x%x\n",
5472                           pbd_e2,
5473                           tmp_bd,
5474                           pbd_e2->data.mac_addr.dst_hi,
5475                           pbd_e2->data.mac_addr.dst_mid,
5476                           pbd_e2->data.mac_addr.dst_lo,
5477                           pbd_e2->data.mac_addr.src_hi,
5478                           pbd_e2->data.mac_addr.src_mid,
5479                           pbd_e2->data.mac_addr.src_lo,
5480                           pbd_e2->parsing_data);
5481                 }
5482             }
5483 
5484             if (i != 1) { /* skip parse db as it doesn't hold data */
5485                 tx_data_bd = &fp->tx_chain[TX_BD(tmp_bd)].reg_bd;
5486                 BLOGD(sc, DBG_TX,
5487                       "-> Frag: %p bd=%d nbytes=%d hi=0x%x lo: 0x%x\n",
5488                       tx_data_bd,
5489                       tmp_bd,
5490                       le16toh(tx_data_bd->nbytes),
5491                       le32toh(tx_data_bd->addr_hi),
5492                       le32toh(tx_data_bd->addr_lo));
5493             }
5494 
5495             tmp_bd = TX_BD_NEXT(tmp_bd);
5496         }
5497     }
5498 
5499     BLOGD(sc, DBG_TX, "doorbell: nbds=%d bd=%u\n", nbds, bd_prod);
5500 
5501     /* update TX BD producer index value for next TX */
5502     bd_prod = TX_BD_NEXT(bd_prod);
5503 
5504     /*
5505      * If the chain of tx_bd's describing this frame is adjacent to or spans
5506      * an eth_tx_next_bd element then we need to increment the nbds value.
5507      */
5508     if (TX_BD_IDX(bd_prod) < nbds) {
5509         nbds++;
5510     }
5511 
5512     /* don't allow reordering of writes for nbd and packets */
5513     mb();
5514 
5515     fp->tx_db.data.prod += nbds;
5516 
5517     /* producer points to the next free tx_bd at this point */
5518     fp->tx_pkt_prod++;
5519     fp->tx_bd_prod = bd_prod;
5520 
5521     DOORBELL(sc, fp->index, fp->tx_db.raw);
5522 
5523     fp->eth_q_stats.tx_pkts++;
5524 
5525     /* Prevent speculative reads from getting ahead of the status block. */
5526     bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle,
5527                       0, 0, BUS_SPACE_BARRIER_READ);
5528 
5529     /* Prevent speculative reads from getting ahead of the doorbell. */
5530     bus_space_barrier(sc->bar[BAR2].tag, sc->bar[BAR2].handle,
5531                       0, 0, BUS_SPACE_BARRIER_READ);
5532 
5533     return (0);
5534 }
5535 
5536 static void
5537 bxe_tx_start_locked(struct bxe_softc *sc,
5538                     if_t ifp,
5539                     struct bxe_fastpath *fp)
5540 {
5541     struct mbuf *m = NULL;
5542     int tx_count = 0;
5543     uint16_t tx_bd_avail;
5544 
5545     BXE_FP_TX_LOCK_ASSERT(fp);
5546 
5547     /* keep adding entries while there are frames to send */
5548     while (!if_sendq_empty(ifp)) {
5549 
5550         /*
5551          * check for any frames to send
5552          * dequeue can still be NULL even if queue is not empty
5553          */
5554         m = if_dequeue(ifp);
5555         if (__predict_false(m == NULL)) {
5556             break;
5557         }
5558 
5559         /* the mbuf now belongs to us */
5560         fp->eth_q_stats.mbuf_alloc_tx++;
5561 
5562         /*
5563          * Put the frame into the transmit ring. If we don't have room,
5564          * place the mbuf back at the head of the TX queue, set the
5565          * OACTIVE flag, and wait for the NIC to drain the chain.
5566          */
5567         if (__predict_false(bxe_tx_encap(fp, &m))) {
5568             fp->eth_q_stats.tx_encap_failures++;
5569             if (m != NULL) {
5570                 /* mark the TX queue as full and return the frame */
5571                 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
5572 		if_sendq_prepend(ifp, m);
5573                 fp->eth_q_stats.mbuf_alloc_tx--;
5574                 fp->eth_q_stats.tx_queue_xoff++;
5575             }
5576 
5577             /* stop looking for more work */
5578             break;
5579         }
5580 
5581         /* the frame was enqueued successfully */
5582         tx_count++;
5583 
5584         /* send a copy of the frame to any BPF listeners. */
5585         if_etherbpfmtap(ifp, m);
5586 
5587         tx_bd_avail = bxe_tx_avail(sc, fp);
5588 
5589         /* handle any completions if we're running low */
5590         if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
5591             /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
5592             bxe_txeof(sc, fp);
5593             if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) {
5594                 break;
5595             }
5596         }
5597     }
5598 
5599     /* all TX packets were dequeued and/or the tx ring is full */
5600     if (tx_count > 0) {
5601         /* reset the TX watchdog timeout timer */
5602         fp->watchdog_timer = BXE_TX_TIMEOUT;
5603     }
5604 }
5605 
5606 /* Legacy (non-RSS) dispatch routine */
5607 static void
5608 bxe_tx_start(if_t ifp)
5609 {
5610     struct bxe_softc *sc;
5611     struct bxe_fastpath *fp;
5612 
5613     sc = if_getsoftc(ifp);
5614 
5615     if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
5616         BLOGW(sc, "Interface not running, ignoring transmit request\n");
5617         return;
5618     }
5619 
5620     if (!sc->link_vars.link_up) {
5621         BLOGW(sc, "Interface link is down, ignoring transmit request\n");
5622         return;
5623     }
5624 
5625     fp = &sc->fp[0];
5626 
5627     if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) {
5628         fp->eth_q_stats.tx_queue_full_return++;
5629         return;
5630     }
5631 
5632     BXE_FP_TX_LOCK(fp);
5633     bxe_tx_start_locked(sc, ifp, fp);
5634     BXE_FP_TX_UNLOCK(fp);
5635 }
5636 
5637 static int
5638 bxe_tx_mq_start_locked(struct bxe_softc    *sc,
5639                        if_t                ifp,
5640                        struct bxe_fastpath *fp,
5641                        struct mbuf         *m)
5642 {
5643     struct buf_ring *tx_br = fp->tx_br;
5644     struct mbuf *next;
5645     int depth, rc, tx_count;
5646     uint16_t tx_bd_avail;
5647 
5648     rc = tx_count = 0;
5649 
5650     BXE_FP_TX_LOCK_ASSERT(fp);
5651 
5652     if (sc->state != BXE_STATE_OPEN)  {
5653         fp->eth_q_stats.bxe_tx_mq_sc_state_failures++;
5654         return ENETDOWN;
5655     }
5656 
5657     if (!tx_br) {
5658         BLOGE(sc, "Multiqueue TX and no buf_ring!\n");
5659         return (EINVAL);
5660     }
5661 
5662     if (m != NULL) {
5663         rc = drbr_enqueue(ifp, tx_br, m);
5664         if (rc != 0) {
5665             fp->eth_q_stats.tx_soft_errors++;
5666             goto bxe_tx_mq_start_locked_exit;
5667         }
5668     }
5669 
5670     if (!sc->link_vars.link_up || !(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
5671         fp->eth_q_stats.tx_request_link_down_failures++;
5672         goto bxe_tx_mq_start_locked_exit;
5673     }
5674 
5675     /* fetch the depth of the driver queue */
5676     depth = drbr_inuse(ifp, tx_br);
5677     if (depth > fp->eth_q_stats.tx_max_drbr_queue_depth) {
5678         fp->eth_q_stats.tx_max_drbr_queue_depth = depth;
5679     }
5680 
5681     /* keep adding entries while there are frames to send */
5682     while ((next = drbr_peek(ifp, tx_br)) != NULL) {
5683         /* handle any completions if we're running low */
5684         tx_bd_avail = bxe_tx_avail(sc, fp);
5685         if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
5686             /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
5687             bxe_txeof(sc, fp);
5688             tx_bd_avail = bxe_tx_avail(sc, fp);
5689             if (tx_bd_avail < (BXE_TSO_MAX_SEGMENTS + 1)) {
5690                 fp->eth_q_stats.bd_avail_too_less_failures++;
5691                 m_freem(next);
5692                 drbr_advance(ifp, tx_br);
5693                 rc = ENOBUFS;
5694                 break;
5695             }
5696         }
5697 
5698         /* the mbuf now belongs to us */
5699         fp->eth_q_stats.mbuf_alloc_tx++;
5700 
5701         /*
5702          * Put the frame into the transmit ring. If we don't have room,
5703          * place the mbuf back at the head of the TX queue, set the
5704          * OACTIVE flag, and wait for the NIC to drain the chain.
5705          */
5706         rc = bxe_tx_encap(fp, &next);
5707         if (__predict_false(rc != 0)) {
5708             fp->eth_q_stats.tx_encap_failures++;
5709             if (next != NULL) {
5710                 /* mark the TX queue as full and save the frame */
5711                 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
5712                 drbr_putback(ifp, tx_br, next);
5713                 fp->eth_q_stats.mbuf_alloc_tx--;
5714                 fp->eth_q_stats.tx_frames_deferred++;
5715             } else
5716                 drbr_advance(ifp, tx_br);
5717 
5718             /* stop looking for more work */
5719             break;
5720         }
5721 
5722         /* the transmit frame was enqueued successfully */
5723         tx_count++;
5724 
5725         /* send a copy of the frame to any BPF listeners */
5726 	if_etherbpfmtap(ifp, next);
5727 
5728         drbr_advance(ifp, tx_br);
5729     }
5730 
5731     /* all TX packets were dequeued and/or the tx ring is full */
5732     if (tx_count > 0) {
5733         /* reset the TX watchdog timeout timer */
5734         fp->watchdog_timer = BXE_TX_TIMEOUT;
5735     }
5736 
5737 bxe_tx_mq_start_locked_exit:
5738     /* If we didn't drain the drbr, enqueue a task in the future to do it. */
5739     if (!drbr_empty(ifp, tx_br)) {
5740         fp->eth_q_stats.tx_mq_not_empty++;
5741         taskqueue_enqueue_timeout(fp->tq, &fp->tx_timeout_task, 1);
5742     }
5743 
5744     return (rc);
5745 }
5746 
5747 static void
5748 bxe_tx_mq_start_deferred(void *arg,
5749                          int pending)
5750 {
5751     struct bxe_fastpath *fp = (struct bxe_fastpath *)arg;
5752     struct bxe_softc *sc = fp->sc;
5753     if_t ifp = sc->ifp;
5754 
5755     BXE_FP_TX_LOCK(fp);
5756     bxe_tx_mq_start_locked(sc, ifp, fp, NULL);
5757     BXE_FP_TX_UNLOCK(fp);
5758 }
5759 
5760 /* Multiqueue (TSS) dispatch routine. */
5761 static int
5762 bxe_tx_mq_start(struct ifnet *ifp,
5763                 struct mbuf  *m)
5764 {
5765     struct bxe_softc *sc = if_getsoftc(ifp);
5766     struct bxe_fastpath *fp;
5767     int fp_index, rc;
5768 
5769     fp_index = 0; /* default is the first queue */
5770 
5771     /* check if flowid is set */
5772 
5773     if (BXE_VALID_FLOWID(m))
5774         fp_index = (m->m_pkthdr.flowid % sc->num_queues);
5775 
5776     fp = &sc->fp[fp_index];
5777 
5778     if (sc->state != BXE_STATE_OPEN)  {
5779         fp->eth_q_stats.bxe_tx_mq_sc_state_failures++;
5780         return ENETDOWN;
5781     }
5782 
5783     if (BXE_FP_TX_TRYLOCK(fp)) {
5784         rc = bxe_tx_mq_start_locked(sc, ifp, fp, m);
5785         BXE_FP_TX_UNLOCK(fp);
5786     } else {
5787         rc = drbr_enqueue(ifp, fp->tx_br, m);
5788         taskqueue_enqueue(fp->tq, &fp->tx_task);
5789     }
5790 
5791     return (rc);
5792 }
5793 
5794 static void
5795 bxe_mq_flush(struct ifnet *ifp)
5796 {
5797     struct bxe_softc *sc = if_getsoftc(ifp);
5798     struct bxe_fastpath *fp;
5799     struct mbuf *m;
5800     int i;
5801 
5802     for (i = 0; i < sc->num_queues; i++) {
5803         fp = &sc->fp[i];
5804 
5805         if (fp->state != BXE_FP_STATE_IRQ) {
5806             BLOGD(sc, DBG_LOAD, "Not clearing fp[%02d] buf_ring (state=%d)\n",
5807                   fp->index, fp->state);
5808             continue;
5809         }
5810 
5811         if (fp->tx_br != NULL) {
5812             BLOGD(sc, DBG_LOAD, "Clearing fp[%02d] buf_ring\n", fp->index);
5813             BXE_FP_TX_LOCK(fp);
5814             while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) {
5815                 m_freem(m);
5816             }
5817             BXE_FP_TX_UNLOCK(fp);
5818         }
5819     }
5820 
5821     if_qflush(ifp);
5822 }
5823 
5824 static uint16_t
5825 bxe_cid_ilt_lines(struct bxe_softc *sc)
5826 {
5827     if (IS_SRIOV(sc)) {
5828         return ((BXE_FIRST_VF_CID + BXE_VF_CIDS) / ILT_PAGE_CIDS);
5829     }
5830     return (L2_ILT_LINES(sc));
5831 }
5832 
5833 static void
5834 bxe_ilt_set_info(struct bxe_softc *sc)
5835 {
5836     struct ilt_client_info *ilt_client;
5837     struct ecore_ilt *ilt = sc->ilt;
5838     uint16_t line = 0;
5839 
5840     ilt->start_line = FUNC_ILT_BASE(SC_FUNC(sc));
5841     BLOGD(sc, DBG_LOAD, "ilt starts at line %d\n", ilt->start_line);
5842 
5843     /* CDU */
5844     ilt_client = &ilt->clients[ILT_CLIENT_CDU];
5845     ilt_client->client_num = ILT_CLIENT_CDU;
5846     ilt_client->page_size = CDU_ILT_PAGE_SZ;
5847     ilt_client->flags = ILT_CLIENT_SKIP_MEM;
5848     ilt_client->start = line;
5849     line += bxe_cid_ilt_lines(sc);
5850 
5851     if (CNIC_SUPPORT(sc)) {
5852         line += CNIC_ILT_LINES;
5853     }
5854 
5855     ilt_client->end = (line - 1);
5856 
5857     BLOGD(sc, DBG_LOAD,
5858           "ilt client[CDU]: start %d, end %d, "
5859           "psz 0x%x, flags 0x%x, hw psz %d\n",
5860           ilt_client->start, ilt_client->end,
5861           ilt_client->page_size,
5862           ilt_client->flags,
5863           ilog2(ilt_client->page_size >> 12));
5864 
5865     /* QM */
5866     if (QM_INIT(sc->qm_cid_count)) {
5867         ilt_client = &ilt->clients[ILT_CLIENT_QM];
5868         ilt_client->client_num = ILT_CLIENT_QM;
5869         ilt_client->page_size = QM_ILT_PAGE_SZ;
5870         ilt_client->flags = 0;
5871         ilt_client->start = line;
5872 
5873         /* 4 bytes for each cid */
5874         line += DIV_ROUND_UP(sc->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
5875                              QM_ILT_PAGE_SZ);
5876 
5877         ilt_client->end = (line - 1);
5878 
5879         BLOGD(sc, DBG_LOAD,
5880               "ilt client[QM]: start %d, end %d, "
5881               "psz 0x%x, flags 0x%x, hw psz %d\n",
5882               ilt_client->start, ilt_client->end,
5883               ilt_client->page_size, ilt_client->flags,
5884               ilog2(ilt_client->page_size >> 12));
5885     }
5886 
5887     if (CNIC_SUPPORT(sc)) {
5888         /* SRC */
5889         ilt_client = &ilt->clients[ILT_CLIENT_SRC];
5890         ilt_client->client_num = ILT_CLIENT_SRC;
5891         ilt_client->page_size = SRC_ILT_PAGE_SZ;
5892         ilt_client->flags = 0;
5893         ilt_client->start = line;
5894         line += SRC_ILT_LINES;
5895         ilt_client->end = (line - 1);
5896 
5897         BLOGD(sc, DBG_LOAD,
5898               "ilt client[SRC]: start %d, end %d, "
5899               "psz 0x%x, flags 0x%x, hw psz %d\n",
5900               ilt_client->start, ilt_client->end,
5901               ilt_client->page_size, ilt_client->flags,
5902               ilog2(ilt_client->page_size >> 12));
5903 
5904         /* TM */
5905         ilt_client = &ilt->clients[ILT_CLIENT_TM];
5906         ilt_client->client_num = ILT_CLIENT_TM;
5907         ilt_client->page_size = TM_ILT_PAGE_SZ;
5908         ilt_client->flags = 0;
5909         ilt_client->start = line;
5910         line += TM_ILT_LINES;
5911         ilt_client->end = (line - 1);
5912 
5913         BLOGD(sc, DBG_LOAD,
5914               "ilt client[TM]: start %d, end %d, "
5915               "psz 0x%x, flags 0x%x, hw psz %d\n",
5916               ilt_client->start, ilt_client->end,
5917               ilt_client->page_size, ilt_client->flags,
5918               ilog2(ilt_client->page_size >> 12));
5919     }
5920 
5921     KASSERT((line <= ILT_MAX_LINES), ("Invalid number of ILT lines!"));
5922 }
5923 
5924 static void
5925 bxe_set_fp_rx_buf_size(struct bxe_softc *sc)
5926 {
5927     int i;
5928     uint32_t rx_buf_size;
5929 
5930     rx_buf_size = (IP_HEADER_ALIGNMENT_PADDING + ETH_OVERHEAD + sc->mtu);
5931 
5932     for (i = 0; i < sc->num_queues; i++) {
5933         if(rx_buf_size <= MCLBYTES){
5934             sc->fp[i].rx_buf_size = rx_buf_size;
5935             sc->fp[i].mbuf_alloc_size = MCLBYTES;
5936         }else if (rx_buf_size <= MJUMPAGESIZE){
5937             sc->fp[i].rx_buf_size = rx_buf_size;
5938             sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE;
5939         }else if (rx_buf_size <= (MJUMPAGESIZE + MCLBYTES)){
5940             sc->fp[i].rx_buf_size = MCLBYTES;
5941             sc->fp[i].mbuf_alloc_size = MCLBYTES;
5942         }else if (rx_buf_size <= (2 * MJUMPAGESIZE)){
5943             sc->fp[i].rx_buf_size = MJUMPAGESIZE;
5944             sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE;
5945         }else {
5946             sc->fp[i].rx_buf_size = MCLBYTES;
5947             sc->fp[i].mbuf_alloc_size = MCLBYTES;
5948         }
5949     }
5950 }
5951 
5952 static int
5953 bxe_alloc_ilt_mem(struct bxe_softc *sc)
5954 {
5955     int rc = 0;
5956 
5957     if ((sc->ilt =
5958          (struct ecore_ilt *)malloc(sizeof(struct ecore_ilt),
5959                                     M_BXE_ILT,
5960                                     (M_NOWAIT | M_ZERO))) == NULL) {
5961         rc = 1;
5962     }
5963 
5964     return (rc);
5965 }
5966 
5967 static int
5968 bxe_alloc_ilt_lines_mem(struct bxe_softc *sc)
5969 {
5970     int rc = 0;
5971 
5972     if ((sc->ilt->lines =
5973          (struct ilt_line *)malloc((sizeof(struct ilt_line) * ILT_MAX_LINES),
5974                                     M_BXE_ILT,
5975                                     (M_NOWAIT | M_ZERO))) == NULL) {
5976         rc = 1;
5977     }
5978 
5979     return (rc);
5980 }
5981 
5982 static void
5983 bxe_free_ilt_mem(struct bxe_softc *sc)
5984 {
5985     if (sc->ilt != NULL) {
5986         free(sc->ilt, M_BXE_ILT);
5987         sc->ilt = NULL;
5988     }
5989 }
5990 
5991 static void
5992 bxe_free_ilt_lines_mem(struct bxe_softc *sc)
5993 {
5994     if (sc->ilt->lines != NULL) {
5995         free(sc->ilt->lines, M_BXE_ILT);
5996         sc->ilt->lines = NULL;
5997     }
5998 }
5999 
6000 static void
6001 bxe_free_mem(struct bxe_softc *sc)
6002 {
6003     int i;
6004 
6005     for (i = 0; i < L2_ILT_LINES(sc); i++) {
6006         bxe_dma_free(sc, &sc->context[i].vcxt_dma);
6007         sc->context[i].vcxt = NULL;
6008         sc->context[i].size = 0;
6009     }
6010 
6011     ecore_ilt_mem_op(sc, ILT_MEMOP_FREE);
6012 
6013     bxe_free_ilt_lines_mem(sc);
6014 
6015 }
6016 
6017 static int
6018 bxe_alloc_mem(struct bxe_softc *sc)
6019 {
6020 
6021     int context_size;
6022     int allocated;
6023     int i;
6024 
6025     /*
6026      * Allocate memory for CDU context:
6027      * This memory is allocated separately and not in the generic ILT
6028      * functions because CDU differs in few aspects:
6029      * 1. There can be multiple entities allocating memory for context -
6030      * regular L2, CNIC, and SRIOV drivers. Each separately controls
6031      * its own ILT lines.
6032      * 2. Since CDU page-size is not a single 4KB page (which is the case
6033      * for the other ILT clients), to be efficient we want to support
6034      * allocation of sub-page-size in the last entry.
6035      * 3. Context pointers are used by the driver to pass to FW / update
6036      * the context (for the other ILT clients the pointers are used just to
6037      * free the memory during unload).
6038      */
6039     context_size = (sizeof(union cdu_context) * BXE_L2_CID_COUNT(sc));
6040     for (i = 0, allocated = 0; allocated < context_size; i++) {
6041         sc->context[i].size = min(CDU_ILT_PAGE_SZ,
6042                                   (context_size - allocated));
6043 
6044         if (bxe_dma_alloc(sc, sc->context[i].size,
6045                           &sc->context[i].vcxt_dma,
6046                           "cdu context") != 0) {
6047             bxe_free_mem(sc);
6048             return (-1);
6049         }
6050 
6051         sc->context[i].vcxt =
6052             (union cdu_context *)sc->context[i].vcxt_dma.vaddr;
6053 
6054         allocated += sc->context[i].size;
6055     }
6056 
6057     bxe_alloc_ilt_lines_mem(sc);
6058 
6059     BLOGD(sc, DBG_LOAD, "ilt=%p start_line=%u lines=%p\n",
6060           sc->ilt, sc->ilt->start_line, sc->ilt->lines);
6061     {
6062         for (i = 0; i < 4; i++) {
6063             BLOGD(sc, DBG_LOAD,
6064                   "c%d page_size=%u start=%u end=%u num=%u flags=0x%x\n",
6065                   i,
6066                   sc->ilt->clients[i].page_size,
6067                   sc->ilt->clients[i].start,
6068                   sc->ilt->clients[i].end,
6069                   sc->ilt->clients[i].client_num,
6070                   sc->ilt->clients[i].flags);
6071         }
6072     }
6073     if (ecore_ilt_mem_op(sc, ILT_MEMOP_ALLOC)) {
6074         BLOGE(sc, "ecore_ilt_mem_op ILT_MEMOP_ALLOC failed\n");
6075         bxe_free_mem(sc);
6076         return (-1);
6077     }
6078 
6079     return (0);
6080 }
6081 
6082 static void
6083 bxe_free_rx_bd_chain(struct bxe_fastpath *fp)
6084 {
6085     int i;
6086 
6087     if (fp->rx_mbuf_tag == NULL) {
6088         return;
6089     }
6090 
6091     /* free all mbufs and unload all maps */
6092     for (i = 0; i < RX_BD_TOTAL; i++) {
6093         if (fp->rx_mbuf_chain[i].m_map != NULL) {
6094             bus_dmamap_sync(fp->rx_mbuf_tag,
6095                             fp->rx_mbuf_chain[i].m_map,
6096                             BUS_DMASYNC_POSTREAD);
6097             bus_dmamap_unload(fp->rx_mbuf_tag,
6098                               fp->rx_mbuf_chain[i].m_map);
6099         }
6100 
6101         if (fp->rx_mbuf_chain[i].m != NULL) {
6102             m_freem(fp->rx_mbuf_chain[i].m);
6103             fp->rx_mbuf_chain[i].m = NULL;
6104             fp->eth_q_stats.mbuf_alloc_rx--;
6105         }
6106     }
6107 }
6108 
6109 static void
6110 bxe_free_tpa_pool(struct bxe_fastpath *fp)
6111 {
6112     struct bxe_softc *sc;
6113     int i, max_agg_queues;
6114 
6115     sc = fp->sc;
6116 
6117     if (fp->rx_mbuf_tag == NULL) {
6118         return;
6119     }
6120 
6121     max_agg_queues = MAX_AGG_QS(sc);
6122 
6123     /* release all mbufs and unload all DMA maps in the TPA pool */
6124     for (i = 0; i < max_agg_queues; i++) {
6125         if (fp->rx_tpa_info[i].bd.m_map != NULL) {
6126             bus_dmamap_sync(fp->rx_mbuf_tag,
6127                             fp->rx_tpa_info[i].bd.m_map,
6128                             BUS_DMASYNC_POSTREAD);
6129             bus_dmamap_unload(fp->rx_mbuf_tag,
6130                               fp->rx_tpa_info[i].bd.m_map);
6131         }
6132 
6133         if (fp->rx_tpa_info[i].bd.m != NULL) {
6134             m_freem(fp->rx_tpa_info[i].bd.m);
6135             fp->rx_tpa_info[i].bd.m = NULL;
6136             fp->eth_q_stats.mbuf_alloc_tpa--;
6137         }
6138     }
6139 }
6140 
6141 static void
6142 bxe_free_sge_chain(struct bxe_fastpath *fp)
6143 {
6144     int i;
6145 
6146     if (fp->rx_sge_mbuf_tag == NULL) {
6147         return;
6148     }
6149 
6150     /* rree all mbufs and unload all maps */
6151     for (i = 0; i < RX_SGE_TOTAL; i++) {
6152         if (fp->rx_sge_mbuf_chain[i].m_map != NULL) {
6153             bus_dmamap_sync(fp->rx_sge_mbuf_tag,
6154                             fp->rx_sge_mbuf_chain[i].m_map,
6155                             BUS_DMASYNC_POSTREAD);
6156             bus_dmamap_unload(fp->rx_sge_mbuf_tag,
6157                               fp->rx_sge_mbuf_chain[i].m_map);
6158         }
6159 
6160         if (fp->rx_sge_mbuf_chain[i].m != NULL) {
6161             m_freem(fp->rx_sge_mbuf_chain[i].m);
6162             fp->rx_sge_mbuf_chain[i].m = NULL;
6163             fp->eth_q_stats.mbuf_alloc_sge--;
6164         }
6165     }
6166 }
6167 
6168 static void
6169 bxe_free_fp_buffers(struct bxe_softc *sc)
6170 {
6171     struct bxe_fastpath *fp;
6172     int i;
6173 
6174     for (i = 0; i < sc->num_queues; i++) {
6175         fp = &sc->fp[i];
6176 
6177         if (fp->tx_br != NULL) {
6178             /* just in case bxe_mq_flush() wasn't called */
6179             if (mtx_initialized(&fp->tx_mtx)) {
6180                 struct mbuf *m;
6181 
6182                 BXE_FP_TX_LOCK(fp);
6183                 while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL)
6184                     m_freem(m);
6185                 BXE_FP_TX_UNLOCK(fp);
6186             }
6187         }
6188 
6189         /* free all RX buffers */
6190         bxe_free_rx_bd_chain(fp);
6191         bxe_free_tpa_pool(fp);
6192         bxe_free_sge_chain(fp);
6193 
6194         if (fp->eth_q_stats.mbuf_alloc_rx != 0) {
6195             BLOGE(sc, "failed to claim all rx mbufs (%d left)\n",
6196                   fp->eth_q_stats.mbuf_alloc_rx);
6197         }
6198 
6199         if (fp->eth_q_stats.mbuf_alloc_sge != 0) {
6200             BLOGE(sc, "failed to claim all sge mbufs (%d left)\n",
6201                   fp->eth_q_stats.mbuf_alloc_sge);
6202         }
6203 
6204         if (fp->eth_q_stats.mbuf_alloc_tpa != 0) {
6205             BLOGE(sc, "failed to claim all sge mbufs (%d left)\n",
6206                   fp->eth_q_stats.mbuf_alloc_tpa);
6207         }
6208 
6209         if (fp->eth_q_stats.mbuf_alloc_tx != 0) {
6210             BLOGE(sc, "failed to release tx mbufs (%d left)\n",
6211                   fp->eth_q_stats.mbuf_alloc_tx);
6212         }
6213 
6214         /* XXX verify all mbufs were reclaimed */
6215     }
6216 }
6217 
6218 static int
6219 bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp,
6220                      uint16_t            prev_index,
6221                      uint16_t            index)
6222 {
6223     struct bxe_sw_rx_bd *rx_buf;
6224     struct eth_rx_bd *rx_bd;
6225     bus_dma_segment_t segs[1];
6226     bus_dmamap_t map;
6227     struct mbuf *m;
6228     int nsegs, rc;
6229 
6230     rc = 0;
6231 
6232     /* allocate the new RX BD mbuf */
6233     m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
6234     if (__predict_false(m == NULL)) {
6235         fp->eth_q_stats.mbuf_rx_bd_alloc_failed++;
6236         return (ENOBUFS);
6237     }
6238 
6239     fp->eth_q_stats.mbuf_alloc_rx++;
6240 
6241     /* initialize the mbuf buffer length */
6242     m->m_pkthdr.len = m->m_len = fp->rx_buf_size;
6243 
6244     /* map the mbuf into non-paged pool */
6245     rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
6246                                  fp->rx_mbuf_spare_map,
6247                                  m, segs, &nsegs, BUS_DMA_NOWAIT);
6248     if (__predict_false(rc != 0)) {
6249         fp->eth_q_stats.mbuf_rx_bd_mapping_failed++;
6250         m_freem(m);
6251         fp->eth_q_stats.mbuf_alloc_rx--;
6252         return (rc);
6253     }
6254 
6255     /* all mbufs must map to a single segment */
6256     KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6257 
6258     /* release any existing RX BD mbuf mappings */
6259 
6260     if (prev_index != index) {
6261         rx_buf = &fp->rx_mbuf_chain[prev_index];
6262 
6263         if (rx_buf->m_map != NULL) {
6264             bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6265                             BUS_DMASYNC_POSTREAD);
6266             bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
6267         }
6268 
6269         /*
6270          * We only get here from bxe_rxeof() when the maximum number
6271          * of rx buffers is less than RX_BD_USABLE. bxe_rxeof() already
6272          * holds the mbuf in the prev_index so it's OK to NULL it out
6273          * here without concern of a memory leak.
6274          */
6275         fp->rx_mbuf_chain[prev_index].m = NULL;
6276     }
6277 
6278     rx_buf = &fp->rx_mbuf_chain[index];
6279 
6280     if (rx_buf->m_map != NULL) {
6281         bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6282                         BUS_DMASYNC_POSTREAD);
6283         bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
6284     }
6285 
6286     /* save the mbuf and mapping info for a future packet */
6287     map = (prev_index != index) ?
6288               fp->rx_mbuf_chain[prev_index].m_map : rx_buf->m_map;
6289     rx_buf->m_map = fp->rx_mbuf_spare_map;
6290     fp->rx_mbuf_spare_map = map;
6291     bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6292                     BUS_DMASYNC_PREREAD);
6293     rx_buf->m = m;
6294 
6295     rx_bd = &fp->rx_chain[index];
6296     rx_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
6297     rx_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
6298 
6299     return (rc);
6300 }
6301 
6302 static int
6303 bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp,
6304                       int                 queue)
6305 {
6306     struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue];
6307     bus_dma_segment_t segs[1];
6308     bus_dmamap_t map;
6309     struct mbuf *m;
6310     int nsegs;
6311     int rc = 0;
6312 
6313     /* allocate the new TPA mbuf */
6314     m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
6315     if (__predict_false(m == NULL)) {
6316         fp->eth_q_stats.mbuf_rx_tpa_alloc_failed++;
6317         return (ENOBUFS);
6318     }
6319 
6320     fp->eth_q_stats.mbuf_alloc_tpa++;
6321 
6322     /* initialize the mbuf buffer length */
6323     m->m_pkthdr.len = m->m_len = fp->rx_buf_size;
6324 
6325     /* map the mbuf into non-paged pool */
6326     rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
6327                                  fp->rx_tpa_info_mbuf_spare_map,
6328                                  m, segs, &nsegs, BUS_DMA_NOWAIT);
6329     if (__predict_false(rc != 0)) {
6330         fp->eth_q_stats.mbuf_rx_tpa_mapping_failed++;
6331         m_free(m);
6332         fp->eth_q_stats.mbuf_alloc_tpa--;
6333         return (rc);
6334     }
6335 
6336     /* all mbufs must map to a single segment */
6337     KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6338 
6339     /* release any existing TPA mbuf mapping */
6340     if (tpa_info->bd.m_map != NULL) {
6341         bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map,
6342                         BUS_DMASYNC_POSTREAD);
6343         bus_dmamap_unload(fp->rx_mbuf_tag, tpa_info->bd.m_map);
6344     }
6345 
6346     /* save the mbuf and mapping info for the TPA mbuf */
6347     map = tpa_info->bd.m_map;
6348     tpa_info->bd.m_map = fp->rx_tpa_info_mbuf_spare_map;
6349     fp->rx_tpa_info_mbuf_spare_map = map;
6350     bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map,
6351                     BUS_DMASYNC_PREREAD);
6352     tpa_info->bd.m = m;
6353     tpa_info->seg = segs[0];
6354 
6355     return (rc);
6356 }
6357 
6358 /*
6359  * Allocate an mbuf and assign it to the receive scatter gather chain. The
6360  * caller must take care to save a copy of the existing mbuf in the SG mbuf
6361  * chain.
6362  */
6363 static int
6364 bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp,
6365                       uint16_t            index)
6366 {
6367     struct bxe_sw_rx_bd *sge_buf;
6368     struct eth_rx_sge *sge;
6369     bus_dma_segment_t segs[1];
6370     bus_dmamap_t map;
6371     struct mbuf *m;
6372     int nsegs;
6373     int rc = 0;
6374 
6375     /* allocate a new SGE mbuf */
6376     m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, SGE_PAGE_SIZE);
6377     if (__predict_false(m == NULL)) {
6378         fp->eth_q_stats.mbuf_rx_sge_alloc_failed++;
6379         return (ENOMEM);
6380     }
6381 
6382     fp->eth_q_stats.mbuf_alloc_sge++;
6383 
6384     /* initialize the mbuf buffer length */
6385     m->m_pkthdr.len = m->m_len = SGE_PAGE_SIZE;
6386 
6387     /* map the SGE mbuf into non-paged pool */
6388     rc = bus_dmamap_load_mbuf_sg(fp->rx_sge_mbuf_tag,
6389                                  fp->rx_sge_mbuf_spare_map,
6390                                  m, segs, &nsegs, BUS_DMA_NOWAIT);
6391     if (__predict_false(rc != 0)) {
6392         fp->eth_q_stats.mbuf_rx_sge_mapping_failed++;
6393         m_freem(m);
6394         fp->eth_q_stats.mbuf_alloc_sge--;
6395         return (rc);
6396     }
6397 
6398     /* all mbufs must map to a single segment */
6399     KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6400 
6401     sge_buf = &fp->rx_sge_mbuf_chain[index];
6402 
6403     /* release any existing SGE mbuf mapping */
6404     if (sge_buf->m_map != NULL) {
6405         bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map,
6406                         BUS_DMASYNC_POSTREAD);
6407         bus_dmamap_unload(fp->rx_sge_mbuf_tag, sge_buf->m_map);
6408     }
6409 
6410     /* save the mbuf and mapping info for a future packet */
6411     map = sge_buf->m_map;
6412     sge_buf->m_map = fp->rx_sge_mbuf_spare_map;
6413     fp->rx_sge_mbuf_spare_map = map;
6414     bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map,
6415                     BUS_DMASYNC_PREREAD);
6416     sge_buf->m = m;
6417 
6418     sge = &fp->rx_sge_chain[index];
6419     sge->addr_hi = htole32(U64_HI(segs[0].ds_addr));
6420     sge->addr_lo = htole32(U64_LO(segs[0].ds_addr));
6421 
6422     return (rc);
6423 }
6424 
6425 static __noinline int
6426 bxe_alloc_fp_buffers(struct bxe_softc *sc)
6427 {
6428     struct bxe_fastpath *fp;
6429     int i, j, rc = 0;
6430     int ring_prod, cqe_ring_prod;
6431     int max_agg_queues;
6432 
6433     for (i = 0; i < sc->num_queues; i++) {
6434         fp = &sc->fp[i];
6435 
6436         ring_prod = cqe_ring_prod = 0;
6437         fp->rx_bd_cons = 0;
6438         fp->rx_cq_cons = 0;
6439 
6440         /* allocate buffers for the RX BDs in RX BD chain */
6441         for (j = 0; j < sc->max_rx_bufs; j++) {
6442             rc = bxe_alloc_rx_bd_mbuf(fp, ring_prod, ring_prod);
6443             if (rc != 0) {
6444                 BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n",
6445                       i, rc);
6446                 goto bxe_alloc_fp_buffers_error;
6447             }
6448 
6449             ring_prod     = RX_BD_NEXT(ring_prod);
6450             cqe_ring_prod = RCQ_NEXT(cqe_ring_prod);
6451         }
6452 
6453         fp->rx_bd_prod = ring_prod;
6454         fp->rx_cq_prod = cqe_ring_prod;
6455         fp->eth_q_stats.rx_calls = fp->eth_q_stats.rx_pkts = 0;
6456 
6457         max_agg_queues = MAX_AGG_QS(sc);
6458 
6459         fp->tpa_enable = TRUE;
6460 
6461         /* fill the TPA pool */
6462         for (j = 0; j < max_agg_queues; j++) {
6463             rc = bxe_alloc_rx_tpa_mbuf(fp, j);
6464             if (rc != 0) {
6465                 BLOGE(sc, "mbuf alloc fail for fp[%02d] TPA queue %d\n",
6466                           i, j);
6467                 fp->tpa_enable = FALSE;
6468                 goto bxe_alloc_fp_buffers_error;
6469             }
6470 
6471             fp->rx_tpa_info[j].state = BXE_TPA_STATE_STOP;
6472         }
6473 
6474         if (fp->tpa_enable) {
6475             /* fill the RX SGE chain */
6476             ring_prod = 0;
6477             for (j = 0; j < RX_SGE_USABLE; j++) {
6478                 rc = bxe_alloc_rx_sge_mbuf(fp, ring_prod);
6479                 if (rc != 0) {
6480                     BLOGE(sc, "mbuf alloc fail for fp[%02d] SGE %d\n",
6481                               i, ring_prod);
6482                     fp->tpa_enable = FALSE;
6483                     ring_prod = 0;
6484                     goto bxe_alloc_fp_buffers_error;
6485                 }
6486 
6487                 ring_prod = RX_SGE_NEXT(ring_prod);
6488             }
6489 
6490             fp->rx_sge_prod = ring_prod;
6491         }
6492     }
6493 
6494     return (0);
6495 
6496 bxe_alloc_fp_buffers_error:
6497 
6498     /* unwind what was already allocated */
6499     bxe_free_rx_bd_chain(fp);
6500     bxe_free_tpa_pool(fp);
6501     bxe_free_sge_chain(fp);
6502 
6503     return (ENOBUFS);
6504 }
6505 
6506 static void
6507 bxe_free_fw_stats_mem(struct bxe_softc *sc)
6508 {
6509     bxe_dma_free(sc, &sc->fw_stats_dma);
6510 
6511     sc->fw_stats_num = 0;
6512 
6513     sc->fw_stats_req_size = 0;
6514     sc->fw_stats_req = NULL;
6515     sc->fw_stats_req_mapping = 0;
6516 
6517     sc->fw_stats_data_size = 0;
6518     sc->fw_stats_data = NULL;
6519     sc->fw_stats_data_mapping = 0;
6520 }
6521 
6522 static int
6523 bxe_alloc_fw_stats_mem(struct bxe_softc *sc)
6524 {
6525     uint8_t num_queue_stats;
6526     int num_groups;
6527 
6528     /* number of queues for statistics is number of eth queues */
6529     num_queue_stats = BXE_NUM_ETH_QUEUES(sc);
6530 
6531     /*
6532      * Total number of FW statistics requests =
6533      *   1 for port stats + 1 for PF stats + num of queues
6534      */
6535     sc->fw_stats_num = (2 + num_queue_stats);
6536 
6537     /*
6538      * Request is built from stats_query_header and an array of
6539      * stats_query_cmd_group each of which contains STATS_QUERY_CMD_COUNT
6540      * rules. The real number or requests is configured in the
6541      * stats_query_header.
6542      */
6543     num_groups =
6544         ((sc->fw_stats_num / STATS_QUERY_CMD_COUNT) +
6545          ((sc->fw_stats_num % STATS_QUERY_CMD_COUNT) ? 1 : 0));
6546 
6547     BLOGD(sc, DBG_LOAD, "stats fw_stats_num %d num_groups %d\n",
6548           sc->fw_stats_num, num_groups);
6549 
6550     sc->fw_stats_req_size =
6551         (sizeof(struct stats_query_header) +
6552          (num_groups * sizeof(struct stats_query_cmd_group)));
6553 
6554     /*
6555      * Data for statistics requests + stats_counter.
6556      * stats_counter holds per-STORM counters that are incremented when
6557      * STORM has finished with the current request. Memory for FCoE
6558      * offloaded statistics are counted anyway, even if they will not be sent.
6559      * VF stats are not accounted for here as the data of VF stats is stored
6560      * in memory allocated by the VF, not here.
6561      */
6562     sc->fw_stats_data_size =
6563         (sizeof(struct stats_counter) +
6564          sizeof(struct per_port_stats) +
6565          sizeof(struct per_pf_stats) +
6566          /* sizeof(struct fcoe_statistics_params) + */
6567          (sizeof(struct per_queue_stats) * num_queue_stats));
6568 
6569     if (bxe_dma_alloc(sc, (sc->fw_stats_req_size + sc->fw_stats_data_size),
6570                       &sc->fw_stats_dma, "fw stats") != 0) {
6571         bxe_free_fw_stats_mem(sc);
6572         return (-1);
6573     }
6574 
6575     /* set up the shortcuts */
6576 
6577     sc->fw_stats_req =
6578         (struct bxe_fw_stats_req *)sc->fw_stats_dma.vaddr;
6579     sc->fw_stats_req_mapping = sc->fw_stats_dma.paddr;
6580 
6581     sc->fw_stats_data =
6582         (struct bxe_fw_stats_data *)((uint8_t *)sc->fw_stats_dma.vaddr +
6583                                      sc->fw_stats_req_size);
6584     sc->fw_stats_data_mapping = (sc->fw_stats_dma.paddr +
6585                                  sc->fw_stats_req_size);
6586 
6587     BLOGD(sc, DBG_LOAD, "statistics request base address set to %#jx\n",
6588           (uintmax_t)sc->fw_stats_req_mapping);
6589 
6590     BLOGD(sc, DBG_LOAD, "statistics data base address set to %#jx\n",
6591           (uintmax_t)sc->fw_stats_data_mapping);
6592 
6593     return (0);
6594 }
6595 
6596 /*
6597  * Bits map:
6598  * 0-7  - Engine0 load counter.
6599  * 8-15 - Engine1 load counter.
6600  * 16   - Engine0 RESET_IN_PROGRESS bit.
6601  * 17   - Engine1 RESET_IN_PROGRESS bit.
6602  * 18   - Engine0 ONE_IS_LOADED. Set when there is at least one active
6603  *        function on the engine
6604  * 19   - Engine1 ONE_IS_LOADED.
6605  * 20   - Chip reset flow bit. When set none-leader must wait for both engines
6606  *        leader to complete (check for both RESET_IN_PROGRESS bits and not
6607  *        for just the one belonging to its engine).
6608  */
6609 #define BXE_RECOVERY_GLOB_REG     MISC_REG_GENERIC_POR_1
6610 #define BXE_PATH0_LOAD_CNT_MASK   0x000000ff
6611 #define BXE_PATH0_LOAD_CNT_SHIFT  0
6612 #define BXE_PATH1_LOAD_CNT_MASK   0x0000ff00
6613 #define BXE_PATH1_LOAD_CNT_SHIFT  8
6614 #define BXE_PATH0_RST_IN_PROG_BIT 0x00010000
6615 #define BXE_PATH1_RST_IN_PROG_BIT 0x00020000
6616 #define BXE_GLOBAL_RESET_BIT      0x00040000
6617 
6618 /* set the GLOBAL_RESET bit, should be run under rtnl lock */
6619 static void
6620 bxe_set_reset_global(struct bxe_softc *sc)
6621 {
6622     uint32_t val;
6623     bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6624     val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6625     REG_WR(sc, BXE_RECOVERY_GLOB_REG, val | BXE_GLOBAL_RESET_BIT);
6626     bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6627 }
6628 
6629 /* clear the GLOBAL_RESET bit, should be run under rtnl lock */
6630 static void
6631 bxe_clear_reset_global(struct bxe_softc *sc)
6632 {
6633     uint32_t val;
6634     bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6635     val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6636     REG_WR(sc, BXE_RECOVERY_GLOB_REG, val & (~BXE_GLOBAL_RESET_BIT));
6637     bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6638 }
6639 
6640 /* checks the GLOBAL_RESET bit, should be run under rtnl lock */
6641 static uint8_t
6642 bxe_reset_is_global(struct bxe_softc *sc)
6643 {
6644     uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6645     BLOGD(sc, DBG_LOAD, "GLOB_REG=0x%08x\n", val);
6646     return (val & BXE_GLOBAL_RESET_BIT) ? TRUE : FALSE;
6647 }
6648 
6649 /* clear RESET_IN_PROGRESS bit for the engine, should be run under rtnl lock */
6650 static void
6651 bxe_set_reset_done(struct bxe_softc *sc)
6652 {
6653     uint32_t val;
6654     uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT :
6655                                  BXE_PATH0_RST_IN_PROG_BIT;
6656 
6657     bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6658 
6659     val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6660     /* Clear the bit */
6661     val &= ~bit;
6662     REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6663 
6664     bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6665 }
6666 
6667 /* set RESET_IN_PROGRESS for the engine, should be run under rtnl lock */
6668 static void
6669 bxe_set_reset_in_progress(struct bxe_softc *sc)
6670 {
6671     uint32_t val;
6672     uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT :
6673                                  BXE_PATH0_RST_IN_PROG_BIT;
6674 
6675     bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6676 
6677     val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6678     /* Set the bit */
6679     val |= bit;
6680     REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6681 
6682     bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6683 }
6684 
6685 /* check RESET_IN_PROGRESS bit for an engine, should be run under rtnl lock */
6686 static uint8_t
6687 bxe_reset_is_done(struct bxe_softc *sc,
6688                   int              engine)
6689 {
6690     uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6691     uint32_t bit = engine ? BXE_PATH1_RST_IN_PROG_BIT :
6692                             BXE_PATH0_RST_IN_PROG_BIT;
6693 
6694     /* return false if bit is set */
6695     return (val & bit) ? FALSE : TRUE;
6696 }
6697 
6698 /* get the load status for an engine, should be run under rtnl lock */
6699 static uint8_t
6700 bxe_get_load_status(struct bxe_softc *sc,
6701                     int              engine)
6702 {
6703     uint32_t mask = engine ? BXE_PATH1_LOAD_CNT_MASK :
6704                              BXE_PATH0_LOAD_CNT_MASK;
6705     uint32_t shift = engine ? BXE_PATH1_LOAD_CNT_SHIFT :
6706                               BXE_PATH0_LOAD_CNT_SHIFT;
6707     uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6708 
6709     BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val);
6710 
6711     val = ((val & mask) >> shift);
6712 
6713     BLOGD(sc, DBG_LOAD, "Load mask engine %d = 0x%08x\n", engine, val);
6714 
6715     return (val != 0);
6716 }
6717 
6718 /* set pf load mark */
6719 /* XXX needs to be under rtnl lock */
6720 static void
6721 bxe_set_pf_load(struct bxe_softc *sc)
6722 {
6723     uint32_t val;
6724     uint32_t val1;
6725     uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK :
6726                                   BXE_PATH0_LOAD_CNT_MASK;
6727     uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT :
6728                                    BXE_PATH0_LOAD_CNT_SHIFT;
6729 
6730     bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6731 
6732     val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6733     BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val);
6734 
6735     /* get the current counter value */
6736     val1 = ((val & mask) >> shift);
6737 
6738     /* set bit of this PF */
6739     val1 |= (1 << SC_ABS_FUNC(sc));
6740 
6741     /* clear the old value */
6742     val &= ~mask;
6743 
6744     /* set the new one */
6745     val |= ((val1 << shift) & mask);
6746 
6747     REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6748 
6749     bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6750 }
6751 
6752 /* clear pf load mark */
6753 /* XXX needs to be under rtnl lock */
6754 static uint8_t
6755 bxe_clear_pf_load(struct bxe_softc *sc)
6756 {
6757     uint32_t val1, val;
6758     uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK :
6759                                   BXE_PATH0_LOAD_CNT_MASK;
6760     uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT :
6761                                    BXE_PATH0_LOAD_CNT_SHIFT;
6762 
6763     bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6764     val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6765     BLOGD(sc, DBG_LOAD, "Old GEN_REG_VAL=0x%08x\n", val);
6766 
6767     /* get the current counter value */
6768     val1 = (val & mask) >> shift;
6769 
6770     /* clear bit of that PF */
6771     val1 &= ~(1 << SC_ABS_FUNC(sc));
6772 
6773     /* clear the old value */
6774     val &= ~mask;
6775 
6776     /* set the new one */
6777     val |= ((val1 << shift) & mask);
6778 
6779     REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6780     bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6781     return (val1 != 0);
6782 }
6783 
6784 /* send load requrest to mcp and analyze response */
6785 static int
6786 bxe_nic_load_request(struct bxe_softc *sc,
6787                      uint32_t         *load_code)
6788 {
6789     /* init fw_seq */
6790     sc->fw_seq =
6791         (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
6792          DRV_MSG_SEQ_NUMBER_MASK);
6793 
6794     BLOGD(sc, DBG_LOAD, "initial fw_seq 0x%04x\n", sc->fw_seq);
6795 
6796     /* get the current FW pulse sequence */
6797     sc->fw_drv_pulse_wr_seq =
6798         (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb) &
6799          DRV_PULSE_SEQ_MASK);
6800 
6801     BLOGD(sc, DBG_LOAD, "initial drv_pulse 0x%04x\n",
6802           sc->fw_drv_pulse_wr_seq);
6803 
6804     /* load request */
6805     (*load_code) = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ,
6806                                   DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
6807 
6808     /* if the MCP fails to respond we must abort */
6809     if (!(*load_code)) {
6810         BLOGE(sc, "MCP response failure!\n");
6811         return (-1);
6812     }
6813 
6814     /* if MCP refused then must abort */
6815     if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6816         BLOGE(sc, "MCP refused load request\n");
6817         return (-1);
6818     }
6819 
6820     return (0);
6821 }
6822 
6823 /*
6824  * Check whether another PF has already loaded FW to chip. In virtualized
6825  * environments a pf from anoth VM may have already initialized the device
6826  * including loading FW.
6827  */
6828 static int
6829 bxe_nic_load_analyze_req(struct bxe_softc *sc,
6830                          uint32_t         load_code)
6831 {
6832     uint32_t my_fw, loaded_fw;
6833 
6834     /* is another pf loaded on this engine? */
6835     if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
6836         (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
6837         /* build my FW version dword */
6838         my_fw = (BCM_5710_FW_MAJOR_VERSION +
6839                  (BCM_5710_FW_MINOR_VERSION << 8 ) +
6840                  (BCM_5710_FW_REVISION_VERSION << 16) +
6841                  (BCM_5710_FW_ENGINEERING_VERSION << 24));
6842 
6843         /* read loaded FW from chip */
6844         loaded_fw = REG_RD(sc, XSEM_REG_PRAM);
6845         BLOGD(sc, DBG_LOAD, "loaded FW 0x%08x / my FW 0x%08x\n",
6846               loaded_fw, my_fw);
6847 
6848         /* abort nic load if version mismatch */
6849         if (my_fw != loaded_fw) {
6850             BLOGE(sc, "FW 0x%08x already loaded (mine is 0x%08x)",
6851                   loaded_fw, my_fw);
6852             return (-1);
6853         }
6854     }
6855 
6856     return (0);
6857 }
6858 
6859 /* mark PMF if applicable */
6860 static void
6861 bxe_nic_load_pmf(struct bxe_softc *sc,
6862                  uint32_t         load_code)
6863 {
6864     uint32_t ncsi_oem_data_addr;
6865 
6866     if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6867         (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
6868         (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
6869         /*
6870          * Barrier here for ordering between the writing to sc->port.pmf here
6871          * and reading it from the periodic task.
6872          */
6873         sc->port.pmf = 1;
6874         mb();
6875     } else {
6876         sc->port.pmf = 0;
6877     }
6878 
6879     BLOGD(sc, DBG_LOAD, "pmf %d\n", sc->port.pmf);
6880 
6881     /* XXX needed? */
6882     if (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) {
6883         if (SHMEM2_HAS(sc, ncsi_oem_data_addr)) {
6884             ncsi_oem_data_addr = SHMEM2_RD(sc, ncsi_oem_data_addr);
6885             if (ncsi_oem_data_addr) {
6886                 REG_WR(sc,
6887                        (ncsi_oem_data_addr +
6888                         offsetof(struct glob_ncsi_oem_data, driver_version)),
6889                        0);
6890             }
6891         }
6892     }
6893 }
6894 
6895 static void
6896 bxe_read_mf_cfg(struct bxe_softc *sc)
6897 {
6898     int n = (CHIP_IS_MODE_4_PORT(sc) ? 2 : 1);
6899     int abs_func;
6900     int vn;
6901 
6902     if (BXE_NOMCP(sc)) {
6903         return; /* what should be the default bvalue in this case */
6904     }
6905 
6906     /*
6907      * The formula for computing the absolute function number is...
6908      * For 2 port configuration (4 functions per port):
6909      *   abs_func = 2 * vn + SC_PORT + SC_PATH
6910      * For 4 port configuration (2 functions per port):
6911      *   abs_func = 4 * vn + 2 * SC_PORT + SC_PATH
6912      */
6913     for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
6914         abs_func = (n * (2 * vn + SC_PORT(sc)) + SC_PATH(sc));
6915         if (abs_func >= E1H_FUNC_MAX) {
6916             break;
6917         }
6918         sc->devinfo.mf_info.mf_config[vn] =
6919             MFCFG_RD(sc, func_mf_config[abs_func].config);
6920     }
6921 
6922     if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] &
6923         FUNC_MF_CFG_FUNC_DISABLED) {
6924         BLOGD(sc, DBG_LOAD, "mf_cfg function disabled\n");
6925         sc->flags |= BXE_MF_FUNC_DIS;
6926     } else {
6927         BLOGD(sc, DBG_LOAD, "mf_cfg function enabled\n");
6928         sc->flags &= ~BXE_MF_FUNC_DIS;
6929     }
6930 }
6931 
6932 /* acquire split MCP access lock register */
6933 static int bxe_acquire_alr(struct bxe_softc *sc)
6934 {
6935     uint32_t j, val;
6936 
6937     for (j = 0; j < 1000; j++) {
6938         val = (1UL << 31);
6939         REG_WR(sc, GRCBASE_MCP + 0x9c, val);
6940         val = REG_RD(sc, GRCBASE_MCP + 0x9c);
6941         if (val & (1L << 31))
6942             break;
6943 
6944         DELAY(5000);
6945     }
6946 
6947     if (!(val & (1L << 31))) {
6948         BLOGE(sc, "Cannot acquire MCP access lock register\n");
6949         return (-1);
6950     }
6951 
6952     return (0);
6953 }
6954 
6955 /* release split MCP access lock register */
6956 static void bxe_release_alr(struct bxe_softc *sc)
6957 {
6958     REG_WR(sc, GRCBASE_MCP + 0x9c, 0);
6959 }
6960 
6961 static void
6962 bxe_fan_failure(struct bxe_softc *sc)
6963 {
6964     int port = SC_PORT(sc);
6965     uint32_t ext_phy_config;
6966 
6967     /* mark the failure */
6968     ext_phy_config =
6969         SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config);
6970 
6971     ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6972     ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
6973     SHMEM_WR(sc, dev_info.port_hw_config[port].external_phy_config,
6974              ext_phy_config);
6975 
6976     /* log the failure */
6977     BLOGW(sc, "Fan Failure has caused the driver to shutdown "
6978               "the card to prevent permanent damage. "
6979               "Please contact OEM Support for assistance\n");
6980 
6981     /* XXX */
6982 #if 1
6983     bxe_panic(sc, ("Schedule task to handle fan failure\n"));
6984 #else
6985     /*
6986      * Schedule device reset (unload)
6987      * This is due to some boards consuming sufficient power when driver is
6988      * up to overheat if fan fails.
6989      */
6990     bxe_set_bit(BXE_SP_RTNL_FAN_FAILURE, &sc->sp_rtnl_state);
6991     schedule_delayed_work(&sc->sp_rtnl_task, 0);
6992 #endif
6993 }
6994 
6995 /* this function is called upon a link interrupt */
6996 static void
6997 bxe_link_attn(struct bxe_softc *sc)
6998 {
6999     uint32_t pause_enabled = 0;
7000     struct host_port_stats *pstats;
7001     int cmng_fns;
7002     struct bxe_fastpath *fp;
7003     int i;
7004 
7005     /* Make sure that we are synced with the current statistics */
7006     bxe_stats_handle(sc, STATS_EVENT_STOP);
7007     BLOGD(sc, DBG_LOAD, "link_vars phy_flags : %x\n", sc->link_vars.phy_flags);
7008     elink_link_update(&sc->link_params, &sc->link_vars);
7009 
7010     if (sc->link_vars.link_up) {
7011 
7012         /* dropless flow control */
7013         if (!CHIP_IS_E1(sc) && sc->dropless_fc) {
7014             pause_enabled = 0;
7015 
7016             if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) {
7017                 pause_enabled = 1;
7018             }
7019 
7020             REG_WR(sc,
7021                    (BAR_USTRORM_INTMEM +
7022                     USTORM_ETH_PAUSE_ENABLED_OFFSET(SC_PORT(sc))),
7023                    pause_enabled);
7024         }
7025 
7026         if (sc->link_vars.mac_type != ELINK_MAC_TYPE_EMAC) {
7027             pstats = BXE_SP(sc, port_stats);
7028             /* reset old mac stats */
7029             memset(&(pstats->mac_stx[0]), 0, sizeof(struct mac_stx));
7030         }
7031 
7032         if (sc->state == BXE_STATE_OPEN) {
7033             bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
7034 	    /* Restart tx when the link comes back. */
7035 	    FOR_EACH_ETH_QUEUE(sc, i) {
7036 		fp = &sc->fp[i];
7037 		taskqueue_enqueue(fp->tq, &fp->tx_task);
7038 	    }
7039         }
7040 
7041     }
7042 
7043     if (sc->link_vars.link_up && sc->link_vars.line_speed) {
7044         cmng_fns = bxe_get_cmng_fns_mode(sc);
7045 
7046         if (cmng_fns != CMNG_FNS_NONE) {
7047             bxe_cmng_fns_init(sc, FALSE, cmng_fns);
7048             storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
7049         } else {
7050             /* rate shaping and fairness are disabled */
7051             BLOGD(sc, DBG_LOAD, "single function mode without fairness\n");
7052         }
7053     }
7054 
7055     bxe_link_report_locked(sc);
7056 
7057     if (IS_MF(sc)) {
7058         ; // XXX bxe_link_sync_notify(sc);
7059     }
7060 }
7061 
7062 static void
7063 bxe_attn_int_asserted(struct bxe_softc *sc,
7064                       uint32_t         asserted)
7065 {
7066     int port = SC_PORT(sc);
7067     uint32_t aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7068                                MISC_REG_AEU_MASK_ATTN_FUNC_0;
7069     uint32_t nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
7070                                         NIG_REG_MASK_INTERRUPT_PORT0;
7071     uint32_t aeu_mask;
7072     uint32_t nig_mask = 0;
7073     uint32_t reg_addr;
7074     uint32_t igu_acked;
7075     uint32_t cnt;
7076 
7077     if (sc->attn_state & asserted) {
7078         BLOGE(sc, "IGU ERROR attn=0x%08x\n", asserted);
7079     }
7080 
7081     bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
7082 
7083     aeu_mask = REG_RD(sc, aeu_addr);
7084 
7085     BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly asserted 0x%08x\n",
7086           aeu_mask, asserted);
7087 
7088     aeu_mask &= ~(asserted & 0x3ff);
7089 
7090     BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask);
7091 
7092     REG_WR(sc, aeu_addr, aeu_mask);
7093 
7094     bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
7095 
7096     BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state);
7097     sc->attn_state |= asserted;
7098     BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state);
7099 
7100     if (asserted & ATTN_HARD_WIRED_MASK) {
7101         if (asserted & ATTN_NIG_FOR_FUNC) {
7102 
7103 	    bxe_acquire_phy_lock(sc);
7104             /* save nig interrupt mask */
7105             nig_mask = REG_RD(sc, nig_int_mask_addr);
7106 
7107             /* If nig_mask is not set, no need to call the update function */
7108             if (nig_mask) {
7109                 REG_WR(sc, nig_int_mask_addr, 0);
7110 
7111                 bxe_link_attn(sc);
7112             }
7113 
7114             /* handle unicore attn? */
7115         }
7116 
7117         if (asserted & ATTN_SW_TIMER_4_FUNC) {
7118             BLOGD(sc, DBG_INTR, "ATTN_SW_TIMER_4_FUNC!\n");
7119         }
7120 
7121         if (asserted & GPIO_2_FUNC) {
7122             BLOGD(sc, DBG_INTR, "GPIO_2_FUNC!\n");
7123         }
7124 
7125         if (asserted & GPIO_3_FUNC) {
7126             BLOGD(sc, DBG_INTR, "GPIO_3_FUNC!\n");
7127         }
7128 
7129         if (asserted & GPIO_4_FUNC) {
7130             BLOGD(sc, DBG_INTR, "GPIO_4_FUNC!\n");
7131         }
7132 
7133         if (port == 0) {
7134             if (asserted & ATTN_GENERAL_ATTN_1) {
7135                 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_1!\n");
7136                 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
7137             }
7138             if (asserted & ATTN_GENERAL_ATTN_2) {
7139                 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_2!\n");
7140                 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
7141             }
7142             if (asserted & ATTN_GENERAL_ATTN_3) {
7143                 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_3!\n");
7144                 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
7145             }
7146         } else {
7147             if (asserted & ATTN_GENERAL_ATTN_4) {
7148                 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_4!\n");
7149                 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
7150             }
7151             if (asserted & ATTN_GENERAL_ATTN_5) {
7152                 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_5!\n");
7153                 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
7154             }
7155             if (asserted & ATTN_GENERAL_ATTN_6) {
7156                 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_6!\n");
7157                 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
7158             }
7159         }
7160     } /* hardwired */
7161 
7162     if (sc->devinfo.int_block == INT_BLOCK_HC) {
7163         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_SET);
7164     } else {
7165         reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
7166     }
7167 
7168     BLOGD(sc, DBG_INTR, "about to mask 0x%08x at %s addr 0x%08x\n",
7169           asserted,
7170           (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
7171     REG_WR(sc, reg_addr, asserted);
7172 
7173     /* now set back the mask */
7174     if (asserted & ATTN_NIG_FOR_FUNC) {
7175         /*
7176          * Verify that IGU ack through BAR was written before restoring
7177          * NIG mask. This loop should exit after 2-3 iterations max.
7178          */
7179         if (sc->devinfo.int_block != INT_BLOCK_HC) {
7180             cnt = 0;
7181 
7182             do {
7183                 igu_acked = REG_RD(sc, IGU_REG_ATTENTION_ACK_BITS);
7184             } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
7185                      (++cnt < MAX_IGU_ATTN_ACK_TO));
7186 
7187             if (!igu_acked) {
7188                 BLOGE(sc, "Failed to verify IGU ack on time\n");
7189             }
7190 
7191             mb();
7192         }
7193 
7194         REG_WR(sc, nig_int_mask_addr, nig_mask);
7195 
7196 	bxe_release_phy_lock(sc);
7197     }
7198 }
7199 
7200 static void
7201 bxe_print_next_block(struct bxe_softc *sc,
7202                      int              idx,
7203                      const char       *blk)
7204 {
7205     BLOGI(sc, "%s%s", idx ? ", " : "", blk);
7206 }
7207 
7208 static int
7209 bxe_check_blocks_with_parity0(struct bxe_softc *sc,
7210                               uint32_t         sig,
7211                               int              par_num,
7212                               uint8_t          print)
7213 {
7214     uint32_t cur_bit = 0;
7215     int i = 0;
7216 
7217     for (i = 0; sig; i++) {
7218         cur_bit = ((uint32_t)0x1 << i);
7219         if (sig & cur_bit) {
7220             switch (cur_bit) {
7221             case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
7222                 if (print)
7223                     bxe_print_next_block(sc, par_num++, "BRB");
7224                 break;
7225             case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
7226                 if (print)
7227                     bxe_print_next_block(sc, par_num++, "PARSER");
7228                 break;
7229             case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
7230                 if (print)
7231                     bxe_print_next_block(sc, par_num++, "TSDM");
7232                 break;
7233             case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
7234                 if (print)
7235                     bxe_print_next_block(sc, par_num++, "SEARCHER");
7236                 break;
7237             case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
7238                 if (print)
7239                     bxe_print_next_block(sc, par_num++, "TCM");
7240                 break;
7241             case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
7242                 if (print)
7243                     bxe_print_next_block(sc, par_num++, "TSEMI");
7244                 break;
7245             case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
7246                 if (print)
7247                     bxe_print_next_block(sc, par_num++, "XPB");
7248                 break;
7249             }
7250 
7251             /* Clear the bit */
7252             sig &= ~cur_bit;
7253         }
7254     }
7255 
7256     return (par_num);
7257 }
7258 
7259 static int
7260 bxe_check_blocks_with_parity1(struct bxe_softc *sc,
7261                               uint32_t         sig,
7262                               int              par_num,
7263                               uint8_t          *global,
7264                               uint8_t          print)
7265 {
7266     int i = 0;
7267     uint32_t cur_bit = 0;
7268     for (i = 0; sig; i++) {
7269         cur_bit = ((uint32_t)0x1 << i);
7270         if (sig & cur_bit) {
7271             switch (cur_bit) {
7272             case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
7273                 if (print)
7274                     bxe_print_next_block(sc, par_num++, "PBF");
7275                 break;
7276             case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
7277                 if (print)
7278                     bxe_print_next_block(sc, par_num++, "QM");
7279                 break;
7280             case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
7281                 if (print)
7282                     bxe_print_next_block(sc, par_num++, "TM");
7283                 break;
7284             case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
7285                 if (print)
7286                     bxe_print_next_block(sc, par_num++, "XSDM");
7287                 break;
7288             case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
7289                 if (print)
7290                     bxe_print_next_block(sc, par_num++, "XCM");
7291                 break;
7292             case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
7293                 if (print)
7294                     bxe_print_next_block(sc, par_num++, "XSEMI");
7295                 break;
7296             case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
7297                 if (print)
7298                     bxe_print_next_block(sc, par_num++, "DOORBELLQ");
7299                 break;
7300             case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
7301                 if (print)
7302                     bxe_print_next_block(sc, par_num++, "NIG");
7303                 break;
7304             case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
7305                 if (print)
7306                     bxe_print_next_block(sc, par_num++, "VAUX PCI CORE");
7307                 *global = TRUE;
7308                 break;
7309             case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
7310                 if (print)
7311                     bxe_print_next_block(sc, par_num++, "DEBUG");
7312                 break;
7313             case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
7314                 if (print)
7315                     bxe_print_next_block(sc, par_num++, "USDM");
7316                 break;
7317             case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
7318                 if (print)
7319                     bxe_print_next_block(sc, par_num++, "UCM");
7320                 break;
7321             case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
7322                 if (print)
7323                     bxe_print_next_block(sc, par_num++, "USEMI");
7324                 break;
7325             case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
7326                 if (print)
7327                     bxe_print_next_block(sc, par_num++, "UPB");
7328                 break;
7329             case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
7330                 if (print)
7331                     bxe_print_next_block(sc, par_num++, "CSDM");
7332                 break;
7333             case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
7334                 if (print)
7335                     bxe_print_next_block(sc, par_num++, "CCM");
7336                 break;
7337             }
7338 
7339             /* Clear the bit */
7340             sig &= ~cur_bit;
7341         }
7342     }
7343 
7344     return (par_num);
7345 }
7346 
7347 static int
7348 bxe_check_blocks_with_parity2(struct bxe_softc *sc,
7349                               uint32_t         sig,
7350                               int              par_num,
7351                               uint8_t          print)
7352 {
7353     uint32_t cur_bit = 0;
7354     int i = 0;
7355 
7356     for (i = 0; sig; i++) {
7357         cur_bit = ((uint32_t)0x1 << i);
7358         if (sig & cur_bit) {
7359             switch (cur_bit) {
7360             case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
7361                 if (print)
7362                     bxe_print_next_block(sc, par_num++, "CSEMI");
7363                 break;
7364             case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
7365                 if (print)
7366                     bxe_print_next_block(sc, par_num++, "PXP");
7367                 break;
7368             case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
7369                 if (print)
7370                     bxe_print_next_block(sc, par_num++, "PXPPCICLOCKCLIENT");
7371                 break;
7372             case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
7373                 if (print)
7374                     bxe_print_next_block(sc, par_num++, "CFC");
7375                 break;
7376             case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
7377                 if (print)
7378                     bxe_print_next_block(sc, par_num++, "CDU");
7379                 break;
7380             case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
7381                 if (print)
7382                     bxe_print_next_block(sc, par_num++, "DMAE");
7383                 break;
7384             case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
7385                 if (print)
7386                     bxe_print_next_block(sc, par_num++, "IGU");
7387                 break;
7388             case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
7389                 if (print)
7390                     bxe_print_next_block(sc, par_num++, "MISC");
7391                 break;
7392             }
7393 
7394             /* Clear the bit */
7395             sig &= ~cur_bit;
7396         }
7397     }
7398 
7399     return (par_num);
7400 }
7401 
7402 static int
7403 bxe_check_blocks_with_parity3(struct bxe_softc *sc,
7404                               uint32_t         sig,
7405                               int              par_num,
7406                               uint8_t          *global,
7407                               uint8_t          print)
7408 {
7409     uint32_t cur_bit = 0;
7410     int i = 0;
7411 
7412     for (i = 0; sig; i++) {
7413         cur_bit = ((uint32_t)0x1 << i);
7414         if (sig & cur_bit) {
7415             switch (cur_bit) {
7416             case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
7417                 if (print)
7418                     bxe_print_next_block(sc, par_num++, "MCP ROM");
7419                 *global = TRUE;
7420                 break;
7421             case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
7422                 if (print)
7423                     bxe_print_next_block(sc, par_num++,
7424                               "MCP UMP RX");
7425                 *global = TRUE;
7426                 break;
7427             case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
7428                 if (print)
7429                     bxe_print_next_block(sc, par_num++,
7430                               "MCP UMP TX");
7431                 *global = TRUE;
7432                 break;
7433             case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
7434                 if (print)
7435                     bxe_print_next_block(sc, par_num++,
7436                               "MCP SCPAD");
7437                 *global = TRUE;
7438                 break;
7439             }
7440 
7441             /* Clear the bit */
7442             sig &= ~cur_bit;
7443         }
7444     }
7445 
7446     return (par_num);
7447 }
7448 
7449 static int
7450 bxe_check_blocks_with_parity4(struct bxe_softc *sc,
7451                               uint32_t         sig,
7452                               int              par_num,
7453                               uint8_t          print)
7454 {
7455     uint32_t cur_bit = 0;
7456     int i = 0;
7457 
7458     for (i = 0; sig; i++) {
7459         cur_bit = ((uint32_t)0x1 << i);
7460         if (sig & cur_bit) {
7461             switch (cur_bit) {
7462             case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
7463                 if (print)
7464                     bxe_print_next_block(sc, par_num++, "PGLUE_B");
7465                 break;
7466             case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
7467                 if (print)
7468                     bxe_print_next_block(sc, par_num++, "ATC");
7469                 break;
7470             }
7471 
7472             /* Clear the bit */
7473             sig &= ~cur_bit;
7474         }
7475     }
7476 
7477     return (par_num);
7478 }
7479 
7480 static uint8_t
7481 bxe_parity_attn(struct bxe_softc *sc,
7482                 uint8_t          *global,
7483                 uint8_t          print,
7484                 uint32_t         *sig)
7485 {
7486     int par_num = 0;
7487 
7488     if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
7489         (sig[1] & HW_PRTY_ASSERT_SET_1) ||
7490         (sig[2] & HW_PRTY_ASSERT_SET_2) ||
7491         (sig[3] & HW_PRTY_ASSERT_SET_3) ||
7492         (sig[4] & HW_PRTY_ASSERT_SET_4)) {
7493         BLOGE(sc, "Parity error: HW block parity attention:\n"
7494                   "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
7495               (uint32_t)(sig[0] & HW_PRTY_ASSERT_SET_0),
7496               (uint32_t)(sig[1] & HW_PRTY_ASSERT_SET_1),
7497               (uint32_t)(sig[2] & HW_PRTY_ASSERT_SET_2),
7498               (uint32_t)(sig[3] & HW_PRTY_ASSERT_SET_3),
7499               (uint32_t)(sig[4] & HW_PRTY_ASSERT_SET_4));
7500 
7501         if (print)
7502             BLOGI(sc, "Parity errors detected in blocks: ");
7503 
7504         par_num =
7505             bxe_check_blocks_with_parity0(sc, sig[0] &
7506                                           HW_PRTY_ASSERT_SET_0,
7507                                           par_num, print);
7508         par_num =
7509             bxe_check_blocks_with_parity1(sc, sig[1] &
7510                                           HW_PRTY_ASSERT_SET_1,
7511                                           par_num, global, print);
7512         par_num =
7513             bxe_check_blocks_with_parity2(sc, sig[2] &
7514                                           HW_PRTY_ASSERT_SET_2,
7515                                           par_num, print);
7516         par_num =
7517             bxe_check_blocks_with_parity3(sc, sig[3] &
7518                                           HW_PRTY_ASSERT_SET_3,
7519                                           par_num, global, print);
7520         par_num =
7521             bxe_check_blocks_with_parity4(sc, sig[4] &
7522                                           HW_PRTY_ASSERT_SET_4,
7523                                           par_num, print);
7524 
7525         if (print)
7526             BLOGI(sc, "\n");
7527 
7528 	if( *global == TRUE ) {
7529                 BXE_SET_ERROR_BIT(sc, BXE_ERR_GLOBAL);
7530         }
7531 
7532         return (TRUE);
7533     }
7534 
7535     return (FALSE);
7536 }
7537 
7538 static uint8_t
7539 bxe_chk_parity_attn(struct bxe_softc *sc,
7540                     uint8_t          *global,
7541                     uint8_t          print)
7542 {
7543     struct attn_route attn = { {0} };
7544     int port = SC_PORT(sc);
7545 
7546     if(sc->state != BXE_STATE_OPEN)
7547         return FALSE;
7548 
7549     attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
7550     attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
7551     attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
7552     attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
7553 
7554     /*
7555      * Since MCP attentions can't be disabled inside the block, we need to
7556      * read AEU registers to see whether they're currently disabled
7557      */
7558     attn.sig[3] &= ((REG_RD(sc, (!port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
7559                                       : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0)) &
7560                          MISC_AEU_ENABLE_MCP_PRTY_BITS) |
7561                         ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
7562 
7563 
7564     if (!CHIP_IS_E1x(sc))
7565         attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
7566 
7567     return (bxe_parity_attn(sc, global, print, attn.sig));
7568 }
7569 
7570 static void
7571 bxe_attn_int_deasserted4(struct bxe_softc *sc,
7572                          uint32_t         attn)
7573 {
7574     uint32_t val;
7575     boolean_t err_flg = FALSE;
7576 
7577     if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
7578         val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
7579         BLOGE(sc, "PGLUE hw attention 0x%08x\n", val);
7580         err_flg = TRUE;
7581         if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
7582             BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
7583         if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
7584             BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
7585         if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
7586             BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
7587         if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
7588             BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
7589         if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
7590             BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
7591         if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
7592             BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
7593         if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
7594             BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
7595         if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
7596             BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
7597         if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
7598             BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
7599     }
7600 
7601     if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
7602         val = REG_RD(sc, ATC_REG_ATC_INT_STS_CLR);
7603         BLOGE(sc, "ATC hw attention 0x%08x\n", val);
7604 	err_flg = TRUE;
7605         if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
7606             BLOGE(sc, "ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
7607         if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
7608             BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
7609         if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
7610             BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
7611         if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
7612             BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
7613         if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
7614             BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
7615         if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
7616             BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
7617     }
7618 
7619     if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
7620                 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
7621         BLOGE(sc, "FATAL parity attention set4 0x%08x\n",
7622               (uint32_t)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
7623                                  AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
7624 	err_flg = TRUE;
7625     }
7626     if (err_flg) {
7627 	BXE_SET_ERROR_BIT(sc, BXE_ERR_MISC);
7628 	taskqueue_enqueue_timeout(taskqueue_thread,
7629 	    &sc->sp_err_timeout_task, hz/10);
7630     }
7631 
7632 }
7633 
7634 static void
7635 bxe_e1h_disable(struct bxe_softc *sc)
7636 {
7637     int port = SC_PORT(sc);
7638 
7639     bxe_tx_disable(sc);
7640 
7641     REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7642 }
7643 
7644 static void
7645 bxe_e1h_enable(struct bxe_softc *sc)
7646 {
7647     int port = SC_PORT(sc);
7648 
7649     REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1);
7650 
7651     // XXX bxe_tx_enable(sc);
7652 }
7653 
7654 /*
7655  * called due to MCP event (on pmf):
7656  *   reread new bandwidth configuration
7657  *   configure FW
7658  *   notify others function about the change
7659  */
7660 static void
7661 bxe_config_mf_bw(struct bxe_softc *sc)
7662 {
7663     if (sc->link_vars.link_up) {
7664         bxe_cmng_fns_init(sc, TRUE, CMNG_FNS_MINMAX);
7665         // XXX bxe_link_sync_notify(sc);
7666     }
7667 
7668     storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
7669 }
7670 
7671 static void
7672 bxe_set_mf_bw(struct bxe_softc *sc)
7673 {
7674     bxe_config_mf_bw(sc);
7675     bxe_fw_command(sc, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
7676 }
7677 
7678 static void
7679 bxe_handle_eee_event(struct bxe_softc *sc)
7680 {
7681     BLOGD(sc, DBG_INTR, "EEE - LLDP event\n");
7682     bxe_fw_command(sc, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
7683 }
7684 
7685 #define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
7686 
7687 static void
7688 bxe_drv_info_ether_stat(struct bxe_softc *sc)
7689 {
7690     struct eth_stats_info *ether_stat =
7691         &sc->sp->drv_info_to_mcp.ether_stat;
7692 
7693     strlcpy(ether_stat->version, BXE_DRIVER_VERSION,
7694             ETH_STAT_INFO_VERSION_LEN);
7695 
7696     /* XXX (+ MAC_PAD) taken from other driver... verify this is right */
7697     sc->sp_objs[0].mac_obj.get_n_elements(sc, &sc->sp_objs[0].mac_obj,
7698                                           DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
7699                                           ether_stat->mac_local + MAC_PAD,
7700                                           MAC_PAD, ETH_ALEN);
7701 
7702     ether_stat->mtu_size = sc->mtu;
7703 
7704     ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
7705     if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) {
7706         ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
7707     }
7708 
7709     // XXX ether_stat->feature_flags |= ???;
7710 
7711     ether_stat->promiscuous_mode = 0; // (flags & PROMISC) ? 1 : 0;
7712 
7713     ether_stat->txq_size = sc->tx_ring_size;
7714     ether_stat->rxq_size = sc->rx_ring_size;
7715 }
7716 
7717 static void
7718 bxe_handle_drv_info_req(struct bxe_softc *sc)
7719 {
7720     enum drv_info_opcode op_code;
7721     uint32_t drv_info_ctl = SHMEM2_RD(sc, drv_info_control);
7722 
7723     /* if drv_info version supported by MFW doesn't match - send NACK */
7724     if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
7725         bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0);
7726         return;
7727     }
7728 
7729     op_code = ((drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
7730                DRV_INFO_CONTROL_OP_CODE_SHIFT);
7731 
7732     memset(&sc->sp->drv_info_to_mcp, 0, sizeof(union drv_info_to_mcp));
7733 
7734     switch (op_code) {
7735     case ETH_STATS_OPCODE:
7736         bxe_drv_info_ether_stat(sc);
7737         break;
7738     case FCOE_STATS_OPCODE:
7739     case ISCSI_STATS_OPCODE:
7740     default:
7741         /* if op code isn't supported - send NACK */
7742         bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0);
7743         return;
7744     }
7745 
7746     /*
7747      * If we got drv_info attn from MFW then these fields are defined in
7748      * shmem2 for sure
7749      */
7750     SHMEM2_WR(sc, drv_info_host_addr_lo,
7751               U64_LO(BXE_SP_MAPPING(sc, drv_info_to_mcp)));
7752     SHMEM2_WR(sc, drv_info_host_addr_hi,
7753               U64_HI(BXE_SP_MAPPING(sc, drv_info_to_mcp)));
7754 
7755     bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_ACK, 0);
7756 }
7757 
7758 static void
7759 bxe_dcc_event(struct bxe_softc *sc,
7760               uint32_t         dcc_event)
7761 {
7762     BLOGD(sc, DBG_INTR, "dcc_event 0x%08x\n", dcc_event);
7763 
7764     if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
7765         /*
7766          * This is the only place besides the function initialization
7767          * where the sc->flags can change so it is done without any
7768          * locks
7769          */
7770         if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) {
7771             BLOGD(sc, DBG_INTR, "mf_cfg function disabled\n");
7772             sc->flags |= BXE_MF_FUNC_DIS;
7773             bxe_e1h_disable(sc);
7774         } else {
7775             BLOGD(sc, DBG_INTR, "mf_cfg function enabled\n");
7776             sc->flags &= ~BXE_MF_FUNC_DIS;
7777             bxe_e1h_enable(sc);
7778         }
7779         dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
7780     }
7781 
7782     if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
7783         bxe_config_mf_bw(sc);
7784         dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
7785     }
7786 
7787     /* Report results to MCP */
7788     if (dcc_event)
7789         bxe_fw_command(sc, DRV_MSG_CODE_DCC_FAILURE, 0);
7790     else
7791         bxe_fw_command(sc, DRV_MSG_CODE_DCC_OK, 0);
7792 }
7793 
7794 static void
7795 bxe_pmf_update(struct bxe_softc *sc)
7796 {
7797     int port = SC_PORT(sc);
7798     uint32_t val;
7799 
7800     sc->port.pmf = 1;
7801     BLOGD(sc, DBG_INTR, "pmf %d\n", sc->port.pmf);
7802 
7803     /*
7804      * We need the mb() to ensure the ordering between the writing to
7805      * sc->port.pmf here and reading it from the bxe_periodic_task().
7806      */
7807     mb();
7808 
7809     /* queue a periodic task */
7810     // XXX schedule task...
7811 
7812     // XXX bxe_dcbx_pmf_update(sc);
7813 
7814     /* enable nig attention */
7815     val = (0xff0f | (1 << (SC_VN(sc) + 4)));
7816     if (sc->devinfo.int_block == INT_BLOCK_HC) {
7817         REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, val);
7818         REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, val);
7819     } else if (!CHIP_IS_E1x(sc)) {
7820         REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val);
7821         REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val);
7822     }
7823 
7824     bxe_stats_handle(sc, STATS_EVENT_PMF);
7825 }
7826 
7827 static int
7828 bxe_mc_assert(struct bxe_softc *sc)
7829 {
7830     char last_idx;
7831     int i, rc = 0;
7832     uint32_t row0, row1, row2, row3;
7833 
7834     /* XSTORM */
7835     last_idx = REG_RD8(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_INDEX_OFFSET);
7836     if (last_idx)
7837         BLOGE(sc, "XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7838 
7839     /* print the asserts */
7840     for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7841 
7842         row0 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i));
7843         row1 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 4);
7844         row2 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 8);
7845         row3 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 12);
7846 
7847         if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7848             BLOGE(sc, "XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7849                   i, row3, row2, row1, row0);
7850             rc++;
7851         } else {
7852             break;
7853         }
7854     }
7855 
7856     /* TSTORM */
7857     last_idx = REG_RD8(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_INDEX_OFFSET);
7858     if (last_idx) {
7859         BLOGE(sc, "TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7860     }
7861 
7862     /* print the asserts */
7863     for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7864 
7865         row0 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i));
7866         row1 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 4);
7867         row2 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 8);
7868         row3 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 12);
7869 
7870         if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7871             BLOGE(sc, "TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7872                   i, row3, row2, row1, row0);
7873             rc++;
7874         } else {
7875             break;
7876         }
7877     }
7878 
7879     /* CSTORM */
7880     last_idx = REG_RD8(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_INDEX_OFFSET);
7881     if (last_idx) {
7882         BLOGE(sc, "CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7883     }
7884 
7885     /* print the asserts */
7886     for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7887 
7888         row0 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i));
7889         row1 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 4);
7890         row2 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 8);
7891         row3 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 12);
7892 
7893         if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7894             BLOGE(sc, "CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7895                   i, row3, row2, row1, row0);
7896             rc++;
7897         } else {
7898             break;
7899         }
7900     }
7901 
7902     /* USTORM */
7903     last_idx = REG_RD8(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_INDEX_OFFSET);
7904     if (last_idx) {
7905         BLOGE(sc, "USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7906     }
7907 
7908     /* print the asserts */
7909     for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7910 
7911         row0 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i));
7912         row1 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 4);
7913         row2 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 8);
7914         row3 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 12);
7915 
7916         if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7917             BLOGE(sc, "USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7918                   i, row3, row2, row1, row0);
7919             rc++;
7920         } else {
7921             break;
7922         }
7923     }
7924 
7925     return (rc);
7926 }
7927 
7928 static void
7929 bxe_attn_int_deasserted3(struct bxe_softc *sc,
7930                          uint32_t         attn)
7931 {
7932     int func = SC_FUNC(sc);
7933     uint32_t val;
7934 
7935     if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
7936 
7937         if (attn & BXE_PMF_LINK_ASSERT(sc)) {
7938 
7939             REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7940             bxe_read_mf_cfg(sc);
7941             sc->devinfo.mf_info.mf_config[SC_VN(sc)] =
7942                 MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
7943             val = SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_status);
7944 
7945             if (val & DRV_STATUS_DCC_EVENT_MASK)
7946                 bxe_dcc_event(sc, (val & DRV_STATUS_DCC_EVENT_MASK));
7947 
7948             if (val & DRV_STATUS_SET_MF_BW)
7949                 bxe_set_mf_bw(sc);
7950 
7951             if (val & DRV_STATUS_DRV_INFO_REQ)
7952                 bxe_handle_drv_info_req(sc);
7953 
7954             if ((sc->port.pmf == 0) && (val & DRV_STATUS_PMF))
7955                 bxe_pmf_update(sc);
7956 
7957             if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
7958                 bxe_handle_eee_event(sc);
7959 
7960             if (sc->link_vars.periodic_flags &
7961                 ELINK_PERIODIC_FLAGS_LINK_EVENT) {
7962                 /* sync with link */
7963 		bxe_acquire_phy_lock(sc);
7964                 sc->link_vars.periodic_flags &=
7965                     ~ELINK_PERIODIC_FLAGS_LINK_EVENT;
7966 		bxe_release_phy_lock(sc);
7967                 if (IS_MF(sc))
7968                     ; // XXX bxe_link_sync_notify(sc);
7969                 bxe_link_report(sc);
7970             }
7971 
7972             /*
7973              * Always call it here: bxe_link_report() will
7974              * prevent the link indication duplication.
7975              */
7976             bxe_link_status_update(sc);
7977 
7978         } else if (attn & BXE_MC_ASSERT_BITS) {
7979 
7980             BLOGE(sc, "MC assert!\n");
7981             bxe_mc_assert(sc);
7982             REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_10, 0);
7983             REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_9, 0);
7984             REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_8, 0);
7985             REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_7, 0);
7986             bxe_int_disable(sc);
7987             BXE_SET_ERROR_BIT(sc, BXE_ERR_MC_ASSERT);
7988             taskqueue_enqueue_timeout(taskqueue_thread,
7989                 &sc->sp_err_timeout_task, hz/10);
7990 
7991         } else if (attn & BXE_MCP_ASSERT) {
7992 
7993             BLOGE(sc, "MCP assert!\n");
7994             REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_11, 0);
7995             BXE_SET_ERROR_BIT(sc, BXE_ERR_MCP_ASSERT);
7996             taskqueue_enqueue_timeout(taskqueue_thread,
7997                 &sc->sp_err_timeout_task, hz/10);
7998             bxe_int_disable(sc);  /*avoid repetive assert alert */
7999 
8000 
8001         } else {
8002             BLOGE(sc, "Unknown HW assert! (attn 0x%08x)\n", attn);
8003         }
8004     }
8005 
8006     if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
8007         BLOGE(sc, "LATCHED attention 0x%08x (masked)\n", attn);
8008         if (attn & BXE_GRC_TIMEOUT) {
8009             val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_TIMEOUT_ATTN);
8010             BLOGE(sc, "GRC time-out 0x%08x\n", val);
8011         }
8012         if (attn & BXE_GRC_RSV) {
8013             val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_RSV_ATTN);
8014             BLOGE(sc, "GRC reserved 0x%08x\n", val);
8015         }
8016         REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
8017     }
8018 }
8019 
8020 static void
8021 bxe_attn_int_deasserted2(struct bxe_softc *sc,
8022                          uint32_t         attn)
8023 {
8024     int port = SC_PORT(sc);
8025     int reg_offset;
8026     uint32_t val0, mask0, val1, mask1;
8027     uint32_t val;
8028     boolean_t err_flg = FALSE;
8029 
8030     if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
8031         val = REG_RD(sc, CFC_REG_CFC_INT_STS_CLR);
8032         BLOGE(sc, "CFC hw attention 0x%08x\n", val);
8033         /* CFC error attention */
8034         if (val & 0x2) {
8035             BLOGE(sc, "FATAL error from CFC\n");
8036 	    err_flg = TRUE;
8037         }
8038     }
8039 
8040     if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
8041         val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_0);
8042         BLOGE(sc, "PXP hw attention-0 0x%08x\n", val);
8043         /* RQ_USDMDP_FIFO_OVERFLOW */
8044         if (val & 0x18000) {
8045             BLOGE(sc, "FATAL error from PXP\n");
8046 	    err_flg = TRUE;
8047         }
8048 
8049         if (!CHIP_IS_E1x(sc)) {
8050             val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_1);
8051             BLOGE(sc, "PXP hw attention-1 0x%08x\n", val);
8052 	    err_flg = TRUE;
8053         }
8054     }
8055 
8056 #define PXP2_EOP_ERROR_BIT  PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR
8057 #define AEU_PXP2_HW_INT_BIT AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT
8058 
8059     if (attn & AEU_PXP2_HW_INT_BIT) {
8060         /*  CQ47854 workaround do not panic on
8061          *  PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR
8062          */
8063         if (!CHIP_IS_E1x(sc)) {
8064             mask0 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_0);
8065             val1 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_1);
8066             mask1 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_1);
8067             val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_0);
8068             /*
8069              * If the only PXP2_EOP_ERROR_BIT is set in
8070              * STS0 and STS1 - clear it
8071              *
8072              * probably we lose additional attentions between
8073              * STS0 and STS_CLR0, in this case user will not
8074              * be notified about them
8075              */
8076             if (val0 & mask0 & PXP2_EOP_ERROR_BIT &&
8077                 !(val1 & mask1))
8078                 val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
8079 
8080             /* print the register, since no one can restore it */
8081             BLOGE(sc, "PXP2_REG_PXP2_INT_STS_CLR_0 0x%08x\n", val0);
8082 
8083             /*
8084              * if PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR
8085              * then notify
8086              */
8087             if (val0 & PXP2_EOP_ERROR_BIT) {
8088                 BLOGE(sc, "PXP2_WR_PGLUE_EOP_ERROR\n");
8089 		err_flg = TRUE;
8090 
8091                 /*
8092                  * if only PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR is
8093                  * set then clear attention from PXP2 block without panic
8094                  */
8095                 if (((val0 & mask0) == PXP2_EOP_ERROR_BIT) &&
8096                     ((val1 & mask1) == 0))
8097                     attn &= ~AEU_PXP2_HW_INT_BIT;
8098             }
8099         }
8100     }
8101 
8102     if (attn & HW_INTERRUT_ASSERT_SET_2) {
8103         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
8104                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
8105 
8106         val = REG_RD(sc, reg_offset);
8107         val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
8108         REG_WR(sc, reg_offset, val);
8109 
8110         BLOGE(sc, "FATAL HW block attention set2 0x%x\n",
8111               (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_2));
8112 	err_flg = TRUE;
8113         bxe_panic(sc, ("HW block attention set2\n"));
8114     }
8115     if(err_flg) {
8116         BXE_SET_ERROR_BIT(sc, BXE_ERR_GLOBAL);
8117         taskqueue_enqueue_timeout(taskqueue_thread,
8118            &sc->sp_err_timeout_task, hz/10);
8119     }
8120 
8121 }
8122 
8123 static void
8124 bxe_attn_int_deasserted1(struct bxe_softc *sc,
8125                          uint32_t         attn)
8126 {
8127     int port = SC_PORT(sc);
8128     int reg_offset;
8129     uint32_t val;
8130     boolean_t err_flg = FALSE;
8131 
8132     if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
8133         val = REG_RD(sc, DORQ_REG_DORQ_INT_STS_CLR);
8134         BLOGE(sc, "DB hw attention 0x%08x\n", val);
8135         /* DORQ discard attention */
8136         if (val & 0x2) {
8137             BLOGE(sc, "FATAL error from DORQ\n");
8138 	    err_flg = TRUE;
8139         }
8140     }
8141 
8142     if (attn & HW_INTERRUT_ASSERT_SET_1) {
8143         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
8144                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
8145 
8146         val = REG_RD(sc, reg_offset);
8147         val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
8148         REG_WR(sc, reg_offset, val);
8149 
8150         BLOGE(sc, "FATAL HW block attention set1 0x%08x\n",
8151               (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_1));
8152         err_flg = TRUE;
8153         bxe_panic(sc, ("HW block attention set1\n"));
8154     }
8155     if(err_flg) {
8156         BXE_SET_ERROR_BIT(sc, BXE_ERR_MISC);
8157         taskqueue_enqueue_timeout(taskqueue_thread,
8158            &sc->sp_err_timeout_task, hz/10);
8159     }
8160 
8161 }
8162 
8163 static void
8164 bxe_attn_int_deasserted0(struct bxe_softc *sc,
8165                          uint32_t         attn)
8166 {
8167     int port = SC_PORT(sc);
8168     int reg_offset;
8169     uint32_t val;
8170 
8171     reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
8172                           MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
8173 
8174     if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
8175         val = REG_RD(sc, reg_offset);
8176         val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
8177         REG_WR(sc, reg_offset, val);
8178 
8179         BLOGW(sc, "SPIO5 hw attention\n");
8180 
8181         /* Fan failure attention */
8182         elink_hw_reset_phy(&sc->link_params);
8183         bxe_fan_failure(sc);
8184     }
8185 
8186     if ((attn & sc->link_vars.aeu_int_mask) && sc->port.pmf) {
8187 	bxe_acquire_phy_lock(sc);
8188         elink_handle_module_detect_int(&sc->link_params);
8189 	bxe_release_phy_lock(sc);
8190     }
8191 
8192     if (attn & HW_INTERRUT_ASSERT_SET_0) {
8193         val = REG_RD(sc, reg_offset);
8194         val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
8195         REG_WR(sc, reg_offset, val);
8196 
8197 
8198         BXE_SET_ERROR_BIT(sc, BXE_ERR_MISC);
8199         taskqueue_enqueue_timeout(taskqueue_thread,
8200            &sc->sp_err_timeout_task, hz/10);
8201 
8202         bxe_panic(sc, ("FATAL HW block attention set0 0x%lx\n",
8203                        (attn & HW_INTERRUT_ASSERT_SET_0)));
8204     }
8205 }
8206 
8207 static void
8208 bxe_attn_int_deasserted(struct bxe_softc *sc,
8209                         uint32_t         deasserted)
8210 {
8211     struct attn_route attn;
8212     struct attn_route *group_mask;
8213     int port = SC_PORT(sc);
8214     int index;
8215     uint32_t reg_addr;
8216     uint32_t val;
8217     uint32_t aeu_mask;
8218     uint8_t global = FALSE;
8219 
8220     /*
8221      * Need to take HW lock because MCP or other port might also
8222      * try to handle this event.
8223      */
8224     bxe_acquire_alr(sc);
8225 
8226     if (bxe_chk_parity_attn(sc, &global, TRUE)) {
8227         /* XXX
8228          * In case of parity errors don't handle attentions so that
8229          * other function would "see" parity errors.
8230          */
8231         // XXX schedule a recovery task...
8232         /* disable HW interrupts */
8233         bxe_int_disable(sc);
8234         BXE_SET_ERROR_BIT(sc, BXE_ERR_PARITY);
8235         taskqueue_enqueue_timeout(taskqueue_thread,
8236            &sc->sp_err_timeout_task, hz/10);
8237         bxe_release_alr(sc);
8238         return;
8239     }
8240 
8241     attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
8242     attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
8243     attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
8244     attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
8245     if (!CHIP_IS_E1x(sc)) {
8246         attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
8247     } else {
8248         attn.sig[4] = 0;
8249     }
8250 
8251     BLOGD(sc, DBG_INTR, "attn: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
8252           attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
8253 
8254     for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
8255         if (deasserted & (1 << index)) {
8256             group_mask = &sc->attn_group[index];
8257 
8258             BLOGD(sc, DBG_INTR,
8259                   "group[%d]: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", index,
8260                   group_mask->sig[0], group_mask->sig[1],
8261                   group_mask->sig[2], group_mask->sig[3],
8262                   group_mask->sig[4]);
8263 
8264             bxe_attn_int_deasserted4(sc, attn.sig[4] & group_mask->sig[4]);
8265             bxe_attn_int_deasserted3(sc, attn.sig[3] & group_mask->sig[3]);
8266             bxe_attn_int_deasserted1(sc, attn.sig[1] & group_mask->sig[1]);
8267             bxe_attn_int_deasserted2(sc, attn.sig[2] & group_mask->sig[2]);
8268             bxe_attn_int_deasserted0(sc, attn.sig[0] & group_mask->sig[0]);
8269         }
8270     }
8271 
8272     bxe_release_alr(sc);
8273 
8274     if (sc->devinfo.int_block == INT_BLOCK_HC) {
8275         reg_addr = (HC_REG_COMMAND_REG + port*32 +
8276                     COMMAND_REG_ATTN_BITS_CLR);
8277     } else {
8278         reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
8279     }
8280 
8281     val = ~deasserted;
8282     BLOGD(sc, DBG_INTR,
8283           "about to mask 0x%08x at %s addr 0x%08x\n", val,
8284           (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
8285     REG_WR(sc, reg_addr, val);
8286 
8287     if (~sc->attn_state & deasserted) {
8288         BLOGE(sc, "IGU error\n");
8289     }
8290 
8291     reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8292                       MISC_REG_AEU_MASK_ATTN_FUNC_0;
8293 
8294     bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
8295 
8296     aeu_mask = REG_RD(sc, reg_addr);
8297 
8298     BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly deasserted 0x%08x\n",
8299           aeu_mask, deasserted);
8300     aeu_mask |= (deasserted & 0x3ff);
8301     BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask);
8302 
8303     REG_WR(sc, reg_addr, aeu_mask);
8304     bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
8305 
8306     BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state);
8307     sc->attn_state &= ~deasserted;
8308     BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state);
8309 }
8310 
8311 static void
8312 bxe_attn_int(struct bxe_softc *sc)
8313 {
8314     /* read local copy of bits */
8315     uint32_t attn_bits = le32toh(sc->def_sb->atten_status_block.attn_bits);
8316     uint32_t attn_ack = le32toh(sc->def_sb->atten_status_block.attn_bits_ack);
8317     uint32_t attn_state = sc->attn_state;
8318 
8319     /* look for changed bits */
8320     uint32_t asserted   =  attn_bits & ~attn_ack & ~attn_state;
8321     uint32_t deasserted = ~attn_bits &  attn_ack &  attn_state;
8322 
8323     BLOGD(sc, DBG_INTR,
8324           "attn_bits 0x%08x attn_ack 0x%08x asserted 0x%08x deasserted 0x%08x\n",
8325           attn_bits, attn_ack, asserted, deasserted);
8326 
8327     if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) {
8328         BLOGE(sc, "BAD attention state\n");
8329     }
8330 
8331     /* handle bits that were raised */
8332     if (asserted) {
8333         bxe_attn_int_asserted(sc, asserted);
8334     }
8335 
8336     if (deasserted) {
8337         bxe_attn_int_deasserted(sc, deasserted);
8338     }
8339 }
8340 
8341 static uint16_t
8342 bxe_update_dsb_idx(struct bxe_softc *sc)
8343 {
8344     struct host_sp_status_block *def_sb = sc->def_sb;
8345     uint16_t rc = 0;
8346 
8347     mb(); /* status block is written to by the chip */
8348 
8349     if (sc->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
8350         sc->def_att_idx = def_sb->atten_status_block.attn_bits_index;
8351         rc |= BXE_DEF_SB_ATT_IDX;
8352     }
8353 
8354     if (sc->def_idx != def_sb->sp_sb.running_index) {
8355         sc->def_idx = def_sb->sp_sb.running_index;
8356         rc |= BXE_DEF_SB_IDX;
8357     }
8358 
8359     mb();
8360 
8361     return (rc);
8362 }
8363 
8364 static inline struct ecore_queue_sp_obj *
8365 bxe_cid_to_q_obj(struct bxe_softc *sc,
8366                  uint32_t         cid)
8367 {
8368     BLOGD(sc, DBG_SP, "retrieving fp from cid %d\n", cid);
8369     return (&sc->sp_objs[CID_TO_FP(cid, sc)].q_obj);
8370 }
8371 
8372 static void
8373 bxe_handle_mcast_eqe(struct bxe_softc *sc)
8374 {
8375     struct ecore_mcast_ramrod_params rparam;
8376     int rc;
8377 
8378     memset(&rparam, 0, sizeof(rparam));
8379 
8380     rparam.mcast_obj = &sc->mcast_obj;
8381 
8382     BXE_MCAST_LOCK(sc);
8383 
8384     /* clear pending state for the last command */
8385     sc->mcast_obj.raw.clear_pending(&sc->mcast_obj.raw);
8386 
8387     /* if there are pending mcast commands - send them */
8388     if (sc->mcast_obj.check_pending(&sc->mcast_obj)) {
8389         rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
8390         if (rc < 0) {
8391             BLOGD(sc, DBG_SP,
8392                 "ERROR: Failed to send pending mcast commands (%d)\n", rc);
8393         }
8394     }
8395 
8396     BXE_MCAST_UNLOCK(sc);
8397 }
8398 
8399 static void
8400 bxe_handle_classification_eqe(struct bxe_softc      *sc,
8401                               union event_ring_elem *elem)
8402 {
8403     unsigned long ramrod_flags = 0;
8404     int rc = 0;
8405     uint32_t cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK;
8406     struct ecore_vlan_mac_obj *vlan_mac_obj;
8407 
8408     /* always push next commands out, don't wait here */
8409     bit_set(&ramrod_flags, RAMROD_CONT);
8410 
8411     switch (le32toh(elem->message.data.eth_event.echo) >> BXE_SWCID_SHIFT) {
8412     case ECORE_FILTER_MAC_PENDING:
8413         BLOGD(sc, DBG_SP, "Got SETUP_MAC completions\n");
8414         vlan_mac_obj = &sc->sp_objs[cid].mac_obj;
8415         break;
8416 
8417     case ECORE_FILTER_MCAST_PENDING:
8418         BLOGD(sc, DBG_SP, "Got SETUP_MCAST completions\n");
8419         /*
8420          * This is only relevant for 57710 where multicast MACs are
8421          * configured as unicast MACs using the same ramrod.
8422          */
8423         bxe_handle_mcast_eqe(sc);
8424         return;
8425 
8426     default:
8427         BLOGE(sc, "Unsupported classification command: %d\n",
8428               elem->message.data.eth_event.echo);
8429         return;
8430     }
8431 
8432     rc = vlan_mac_obj->complete(sc, vlan_mac_obj, elem, &ramrod_flags);
8433 
8434     if (rc < 0) {
8435         BLOGE(sc, "Failed to schedule new commands (%d)\n", rc);
8436     } else if (rc > 0) {
8437         BLOGD(sc, DBG_SP, "Scheduled next pending commands...\n");
8438     }
8439 }
8440 
8441 static void
8442 bxe_handle_rx_mode_eqe(struct bxe_softc      *sc,
8443                        union event_ring_elem *elem)
8444 {
8445     bxe_clear_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
8446 
8447     /* send rx_mode command again if was requested */
8448     if (bxe_test_and_clear_bit(ECORE_FILTER_RX_MODE_SCHED,
8449                                &sc->sp_state)) {
8450         bxe_set_storm_rx_mode(sc);
8451     }
8452 }
8453 
8454 static void
8455 bxe_update_eq_prod(struct bxe_softc *sc,
8456                    uint16_t         prod)
8457 {
8458     storm_memset_eq_prod(sc, prod, SC_FUNC(sc));
8459     wmb(); /* keep prod updates ordered */
8460 }
8461 
8462 static void
8463 bxe_eq_int(struct bxe_softc *sc)
8464 {
8465     uint16_t hw_cons, sw_cons, sw_prod;
8466     union event_ring_elem *elem;
8467     uint8_t echo;
8468     uint32_t cid;
8469     uint8_t opcode;
8470     int spqe_cnt = 0;
8471     struct ecore_queue_sp_obj *q_obj;
8472     struct ecore_func_sp_obj *f_obj = &sc->func_obj;
8473     struct ecore_raw_obj *rss_raw = &sc->rss_conf_obj.raw;
8474 
8475     hw_cons = le16toh(*sc->eq_cons_sb);
8476 
8477     /*
8478      * The hw_cons range is 1-255, 257 - the sw_cons range is 0-254, 256.
8479      * when we get to the next-page we need to adjust so the loop
8480      * condition below will be met. The next element is the size of a
8481      * regular element and hence incrementing by 1
8482      */
8483     if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) {
8484         hw_cons++;
8485     }
8486 
8487     /*
8488      * This function may never run in parallel with itself for a
8489      * specific sc and no need for a read memory barrier here.
8490      */
8491     sw_cons = sc->eq_cons;
8492     sw_prod = sc->eq_prod;
8493 
8494     BLOGD(sc, DBG_SP,"EQ: hw_cons=%u sw_cons=%u eq_spq_left=0x%lx\n",
8495           hw_cons, sw_cons, atomic_load_acq_long(&sc->eq_spq_left));
8496 
8497     for (;
8498          sw_cons != hw_cons;
8499          sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
8500 
8501         elem = &sc->eq[EQ_DESC(sw_cons)];
8502 
8503         /* elem CID originates from FW, actually LE */
8504         cid = SW_CID(elem->message.data.cfc_del_event.cid);
8505         opcode = elem->message.opcode;
8506 
8507         /* handle eq element */
8508         switch (opcode) {
8509 
8510         case EVENT_RING_OPCODE_STAT_QUERY:
8511             BLOGD(sc, DBG_SP, "got statistics completion event %d\n",
8512                   sc->stats_comp++);
8513             /* nothing to do with stats comp */
8514             goto next_spqe;
8515 
8516         case EVENT_RING_OPCODE_CFC_DEL:
8517             /* handle according to cid range */
8518             /* we may want to verify here that the sc state is HALTING */
8519             BLOGD(sc, DBG_SP, "got delete ramrod for MULTI[%d]\n", cid);
8520             q_obj = bxe_cid_to_q_obj(sc, cid);
8521             if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_CFC_DEL)) {
8522                 break;
8523             }
8524             goto next_spqe;
8525 
8526         case EVENT_RING_OPCODE_STOP_TRAFFIC:
8527             BLOGD(sc, DBG_SP, "got STOP TRAFFIC\n");
8528             if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_STOP)) {
8529                 break;
8530             }
8531             // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_PAUSED);
8532             goto next_spqe;
8533 
8534         case EVENT_RING_OPCODE_START_TRAFFIC:
8535             BLOGD(sc, DBG_SP, "got START TRAFFIC\n");
8536             if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_START)) {
8537                 break;
8538             }
8539             // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_RELEASED);
8540             goto next_spqe;
8541 
8542         case EVENT_RING_OPCODE_FUNCTION_UPDATE:
8543             echo = elem->message.data.function_update_event.echo;
8544             if (echo == SWITCH_UPDATE) {
8545                 BLOGD(sc, DBG_SP, "got FUNC_SWITCH_UPDATE ramrod\n");
8546                 if (f_obj->complete_cmd(sc, f_obj,
8547                                         ECORE_F_CMD_SWITCH_UPDATE)) {
8548                     break;
8549                 }
8550             }
8551             else {
8552                 BLOGD(sc, DBG_SP,
8553                       "AFEX: ramrod completed FUNCTION_UPDATE\n");
8554             }
8555             goto next_spqe;
8556 
8557         case EVENT_RING_OPCODE_FORWARD_SETUP:
8558             q_obj = &bxe_fwd_sp_obj(sc, q_obj);
8559             if (q_obj->complete_cmd(sc, q_obj,
8560                                     ECORE_Q_CMD_SETUP_TX_ONLY)) {
8561                 break;
8562             }
8563             goto next_spqe;
8564 
8565         case EVENT_RING_OPCODE_FUNCTION_START:
8566             BLOGD(sc, DBG_SP, "got FUNC_START ramrod\n");
8567             if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_START)) {
8568                 break;
8569             }
8570             goto next_spqe;
8571 
8572         case EVENT_RING_OPCODE_FUNCTION_STOP:
8573             BLOGD(sc, DBG_SP, "got FUNC_STOP ramrod\n");
8574             if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_STOP)) {
8575                 break;
8576             }
8577             goto next_spqe;
8578         }
8579 
8580         switch (opcode | sc->state) {
8581         case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPEN):
8582         case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPENING_WAITING_PORT):
8583             cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK;
8584             BLOGD(sc, DBG_SP, "got RSS_UPDATE ramrod. CID %d\n", cid);
8585             rss_raw->clear_pending(rss_raw);
8586             break;
8587 
8588         case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_OPEN):
8589         case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_DIAG):
8590         case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_CLOSING_WAITING_HALT):
8591         case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_OPEN):
8592         case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_DIAG):
8593         case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8594             BLOGD(sc, DBG_SP, "got (un)set mac ramrod\n");
8595             bxe_handle_classification_eqe(sc, elem);
8596             break;
8597 
8598         case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_OPEN):
8599         case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_DIAG):
8600         case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8601             BLOGD(sc, DBG_SP, "got mcast ramrod\n");
8602             bxe_handle_mcast_eqe(sc);
8603             break;
8604 
8605         case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_OPEN):
8606         case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_DIAG):
8607         case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8608             BLOGD(sc, DBG_SP, "got rx_mode ramrod\n");
8609             bxe_handle_rx_mode_eqe(sc, elem);
8610             break;
8611 
8612         default:
8613             /* unknown event log error and continue */
8614             BLOGE(sc, "Unknown EQ event %d, sc->state 0x%x\n",
8615                   elem->message.opcode, sc->state);
8616         }
8617 
8618 next_spqe:
8619         spqe_cnt++;
8620     } /* for */
8621 
8622     mb();
8623     atomic_add_acq_long(&sc->eq_spq_left, spqe_cnt);
8624 
8625     sc->eq_cons = sw_cons;
8626     sc->eq_prod = sw_prod;
8627 
8628     /* make sure that above mem writes were issued towards the memory */
8629     wmb();
8630 
8631     /* update producer */
8632     bxe_update_eq_prod(sc, sc->eq_prod);
8633 }
8634 
8635 static void
8636 bxe_handle_sp_tq(void *context,
8637                  int  pending)
8638 {
8639     struct bxe_softc *sc = (struct bxe_softc *)context;
8640     uint16_t status;
8641 
8642     BLOGD(sc, DBG_SP, "---> SP TASK <---\n");
8643 
8644     /* what work needs to be performed? */
8645     status = bxe_update_dsb_idx(sc);
8646 
8647     BLOGD(sc, DBG_SP, "dsb status 0x%04x\n", status);
8648 
8649     /* HW attentions */
8650     if (status & BXE_DEF_SB_ATT_IDX) {
8651         BLOGD(sc, DBG_SP, "---> ATTN INTR <---\n");
8652         bxe_attn_int(sc);
8653         status &= ~BXE_DEF_SB_ATT_IDX;
8654     }
8655 
8656     /* SP events: STAT_QUERY and others */
8657     if (status & BXE_DEF_SB_IDX) {
8658         /* handle EQ completions */
8659         BLOGD(sc, DBG_SP, "---> EQ INTR <---\n");
8660         bxe_eq_int(sc);
8661         bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID,
8662                    le16toh(sc->def_idx), IGU_INT_NOP, 1);
8663         status &= ~BXE_DEF_SB_IDX;
8664     }
8665 
8666     /* if status is non zero then something went wrong */
8667     if (__predict_false(status)) {
8668         BLOGE(sc, "Got an unknown SP interrupt! (0x%04x)\n", status);
8669     }
8670 
8671     /* ack status block only if something was actually handled */
8672     bxe_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID,
8673                le16toh(sc->def_att_idx), IGU_INT_ENABLE, 1);
8674 
8675     /*
8676      * Must be called after the EQ processing (since eq leads to sriov
8677      * ramrod completion flows).
8678      * This flow may have been scheduled by the arrival of a ramrod
8679      * completion, or by the sriov code rescheduling itself.
8680      */
8681     // XXX bxe_iov_sp_task(sc);
8682 
8683 }
8684 
8685 static void
8686 bxe_handle_fp_tq(void *context,
8687                  int  pending)
8688 {
8689     struct bxe_fastpath *fp = (struct bxe_fastpath *)context;
8690     struct bxe_softc *sc = fp->sc;
8691     /* uint8_t more_tx = FALSE; */
8692     uint8_t more_rx = FALSE;
8693 
8694     BLOGD(sc, DBG_INTR, "---> FP TASK QUEUE (%d) <---\n", fp->index);
8695 
8696     /* XXX
8697      * IFF_DRV_RUNNING state can't be checked here since we process
8698      * slowpath events on a client queue during setup. Instead
8699      * we need to add a "process/continue" flag here that the driver
8700      * can use to tell the task here not to do anything.
8701      */
8702 #if 0
8703     if (!(if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) {
8704         return;
8705     }
8706 #endif
8707 
8708     /* update the fastpath index */
8709     bxe_update_fp_sb_idx(fp);
8710 
8711     /* XXX add loop here if ever support multiple tx CoS */
8712     /* fp->txdata[cos] */
8713     if (bxe_has_tx_work(fp)) {
8714         BXE_FP_TX_LOCK(fp);
8715         /* more_tx = */ bxe_txeof(sc, fp);
8716         BXE_FP_TX_UNLOCK(fp);
8717     }
8718 
8719     if (bxe_has_rx_work(fp)) {
8720         more_rx = bxe_rxeof(sc, fp);
8721     }
8722 
8723     if (more_rx /*|| more_tx*/) {
8724         /* still more work to do */
8725         taskqueue_enqueue(fp->tq, &fp->tq_task);
8726         return;
8727     }
8728 
8729     bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
8730                le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
8731 }
8732 
8733 static void
8734 bxe_task_fp(struct bxe_fastpath *fp)
8735 {
8736     struct bxe_softc *sc = fp->sc;
8737     /* uint8_t more_tx = FALSE; */
8738     uint8_t more_rx = FALSE;
8739 
8740     BLOGD(sc, DBG_INTR, "---> FP TASK ISR (%d) <---\n", fp->index);
8741 
8742     /* update the fastpath index */
8743     bxe_update_fp_sb_idx(fp);
8744 
8745     /* XXX add loop here if ever support multiple tx CoS */
8746     /* fp->txdata[cos] */
8747     if (bxe_has_tx_work(fp)) {
8748         BXE_FP_TX_LOCK(fp);
8749         /* more_tx = */ bxe_txeof(sc, fp);
8750         BXE_FP_TX_UNLOCK(fp);
8751     }
8752 
8753     if (bxe_has_rx_work(fp)) {
8754         more_rx = bxe_rxeof(sc, fp);
8755     }
8756 
8757     if (more_rx /*|| more_tx*/) {
8758         /* still more work to do, bail out if this ISR and process later */
8759         taskqueue_enqueue(fp->tq, &fp->tq_task);
8760         return;
8761     }
8762 
8763     /*
8764      * Here we write the fastpath index taken before doing any tx or rx work.
8765      * It is very well possible other hw events occurred up to this point and
8766      * they were actually processed accordingly above. Since we're going to
8767      * write an older fastpath index, an interrupt is coming which we might
8768      * not do any work in.
8769      */
8770     bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
8771                le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
8772 }
8773 
8774 /*
8775  * Legacy interrupt entry point.
8776  *
8777  * Verifies that the controller generated the interrupt and
8778  * then calls a separate routine to handle the various
8779  * interrupt causes: link, RX, and TX.
8780  */
8781 static void
8782 bxe_intr_legacy(void *xsc)
8783 {
8784     struct bxe_softc *sc = (struct bxe_softc *)xsc;
8785     struct bxe_fastpath *fp;
8786     uint16_t status, mask;
8787     int i;
8788 
8789     BLOGD(sc, DBG_INTR, "---> BXE INTx <---\n");
8790 
8791     /*
8792      * 0 for ustorm, 1 for cstorm
8793      * the bits returned from ack_int() are 0-15
8794      * bit 0 = attention status block
8795      * bit 1 = fast path status block
8796      * a mask of 0x2 or more = tx/rx event
8797      * a mask of 1 = slow path event
8798      */
8799 
8800     status = bxe_ack_int(sc);
8801 
8802     /* the interrupt is not for us */
8803     if (__predict_false(status == 0)) {
8804         BLOGD(sc, DBG_INTR, "Not our interrupt!\n");
8805         return;
8806     }
8807 
8808     BLOGD(sc, DBG_INTR, "Interrupt status 0x%04x\n", status);
8809 
8810     FOR_EACH_ETH_QUEUE(sc, i) {
8811         fp = &sc->fp[i];
8812         mask = (0x2 << (fp->index + CNIC_SUPPORT(sc)));
8813         if (status & mask) {
8814             /* acknowledge and disable further fastpath interrupts */
8815             bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8816             bxe_task_fp(fp);
8817             status &= ~mask;
8818         }
8819     }
8820 
8821     if (__predict_false(status & 0x1)) {
8822         /* acknowledge and disable further slowpath interrupts */
8823         bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8824 
8825         /* schedule slowpath handler */
8826         taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task);
8827 
8828         status &= ~0x1;
8829     }
8830 
8831     if (__predict_false(status)) {
8832         BLOGW(sc, "Unexpected fastpath status (0x%08x)!\n", status);
8833     }
8834 }
8835 
8836 /* slowpath interrupt entry point */
8837 static void
8838 bxe_intr_sp(void *xsc)
8839 {
8840     struct bxe_softc *sc = (struct bxe_softc *)xsc;
8841 
8842     BLOGD(sc, (DBG_INTR | DBG_SP), "---> SP INTR <---\n");
8843 
8844     /* acknowledge and disable further slowpath interrupts */
8845     bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8846 
8847     /* schedule slowpath handler */
8848     taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task);
8849 }
8850 
8851 /* fastpath interrupt entry point */
8852 static void
8853 bxe_intr_fp(void *xfp)
8854 {
8855     struct bxe_fastpath *fp = (struct bxe_fastpath *)xfp;
8856     struct bxe_softc *sc = fp->sc;
8857 
8858     BLOGD(sc, DBG_INTR, "---> FP INTR %d <---\n", fp->index);
8859 
8860     BLOGD(sc, DBG_INTR,
8861           "(cpu=%d) MSI-X fp=%d fw_sb=%d igu_sb=%d\n",
8862           curcpu, fp->index, fp->fw_sb_id, fp->igu_sb_id);
8863 
8864     /* acknowledge and disable further fastpath interrupts */
8865     bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8866 
8867     bxe_task_fp(fp);
8868 }
8869 
8870 /* Release all interrupts allocated by the driver. */
8871 static void
8872 bxe_interrupt_free(struct bxe_softc *sc)
8873 {
8874     int i;
8875 
8876     switch (sc->interrupt_mode) {
8877     case INTR_MODE_INTX:
8878         BLOGD(sc, DBG_LOAD, "Releasing legacy INTx vector\n");
8879         if (sc->intr[0].resource != NULL) {
8880             bus_release_resource(sc->dev,
8881                                  SYS_RES_IRQ,
8882                                  sc->intr[0].rid,
8883                                  sc->intr[0].resource);
8884         }
8885         break;
8886     case INTR_MODE_MSI:
8887         for (i = 0; i < sc->intr_count; i++) {
8888             BLOGD(sc, DBG_LOAD, "Releasing MSI vector %d\n", i);
8889             if (sc->intr[i].resource && sc->intr[i].rid) {
8890                 bus_release_resource(sc->dev,
8891                                      SYS_RES_IRQ,
8892                                      sc->intr[i].rid,
8893                                      sc->intr[i].resource);
8894             }
8895         }
8896         pci_release_msi(sc->dev);
8897         break;
8898     case INTR_MODE_MSIX:
8899         for (i = 0; i < sc->intr_count; i++) {
8900             BLOGD(sc, DBG_LOAD, "Releasing MSI-X vector %d\n", i);
8901             if (sc->intr[i].resource && sc->intr[i].rid) {
8902                 bus_release_resource(sc->dev,
8903                                      SYS_RES_IRQ,
8904                                      sc->intr[i].rid,
8905                                      sc->intr[i].resource);
8906             }
8907         }
8908         pci_release_msi(sc->dev);
8909         break;
8910     default:
8911         /* nothing to do as initial allocation failed */
8912         break;
8913     }
8914 }
8915 
8916 /*
8917  * This function determines and allocates the appropriate
8918  * interrupt based on system capabilites and user request.
8919  *
8920  * The user may force a particular interrupt mode, specify
8921  * the number of receive queues, specify the method for
8922  * distribuitng received frames to receive queues, or use
8923  * the default settings which will automatically select the
8924  * best supported combination.  In addition, the OS may or
8925  * may not support certain combinations of these settings.
8926  * This routine attempts to reconcile the settings requested
8927  * by the user with the capabilites available from the system
8928  * to select the optimal combination of features.
8929  *
8930  * Returns:
8931  *   0 = Success, !0 = Failure.
8932  */
8933 static int
8934 bxe_interrupt_alloc(struct bxe_softc *sc)
8935 {
8936     int msix_count = 0;
8937     int msi_count = 0;
8938     int num_requested = 0;
8939     int num_allocated = 0;
8940     int rid, i, j;
8941     int rc;
8942 
8943     /* get the number of available MSI/MSI-X interrupts from the OS */
8944     if (sc->interrupt_mode > 0) {
8945         if (sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) {
8946             msix_count = pci_msix_count(sc->dev);
8947         }
8948 
8949         if (sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) {
8950             msi_count = pci_msi_count(sc->dev);
8951         }
8952 
8953         BLOGD(sc, DBG_LOAD, "%d MSI and %d MSI-X vectors available\n",
8954               msi_count, msix_count);
8955     }
8956 
8957     do { /* try allocating MSI-X interrupt resources (at least 2) */
8958         if (sc->interrupt_mode != INTR_MODE_MSIX) {
8959             break;
8960         }
8961 
8962         if (((sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) == 0) ||
8963             (msix_count < 2)) {
8964             sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8965             break;
8966         }
8967 
8968         /* ask for the necessary number of MSI-X vectors */
8969         num_requested = min((sc->num_queues + 1), msix_count);
8970 
8971         BLOGD(sc, DBG_LOAD, "Requesting %d MSI-X vectors\n", num_requested);
8972 
8973         num_allocated = num_requested;
8974         if ((rc = pci_alloc_msix(sc->dev, &num_allocated)) != 0) {
8975             BLOGE(sc, "MSI-X alloc failed! (%d)\n", rc);
8976             sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8977             break;
8978         }
8979 
8980         if (num_allocated < 2) { /* possible? */
8981             BLOGE(sc, "MSI-X allocation less than 2!\n");
8982             sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8983             pci_release_msi(sc->dev);
8984             break;
8985         }
8986 
8987         BLOGI(sc, "MSI-X vectors Requested %d and Allocated %d\n",
8988               num_requested, num_allocated);
8989 
8990         /* best effort so use the number of vectors allocated to us */
8991         sc->intr_count = num_allocated;
8992         sc->num_queues = num_allocated - 1;
8993 
8994         rid = 1; /* initial resource identifier */
8995 
8996         /* allocate the MSI-X vectors */
8997         for (i = 0; i < num_allocated; i++) {
8998             sc->intr[i].rid = (rid + i);
8999 
9000             if ((sc->intr[i].resource =
9001                  bus_alloc_resource_any(sc->dev,
9002                                         SYS_RES_IRQ,
9003                                         &sc->intr[i].rid,
9004                                         RF_ACTIVE)) == NULL) {
9005                 BLOGE(sc, "Failed to map MSI-X[%d] (rid=%d)!\n",
9006                       i, (rid + i));
9007 
9008                 for (j = (i - 1); j >= 0; j--) {
9009                     bus_release_resource(sc->dev,
9010                                          SYS_RES_IRQ,
9011                                          sc->intr[j].rid,
9012                                          sc->intr[j].resource);
9013                 }
9014 
9015                 sc->intr_count = 0;
9016                 sc->num_queues = 0;
9017                 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
9018                 pci_release_msi(sc->dev);
9019                 break;
9020             }
9021 
9022             BLOGD(sc, DBG_LOAD, "Mapped MSI-X[%d] (rid=%d)\n", i, (rid + i));
9023         }
9024     } while (0);
9025 
9026     do { /* try allocating MSI vector resources (at least 2) */
9027         if (sc->interrupt_mode != INTR_MODE_MSI) {
9028             break;
9029         }
9030 
9031         if (((sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) == 0) ||
9032             (msi_count < 1)) {
9033             sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9034             break;
9035         }
9036 
9037         /* ask for a single MSI vector */
9038         num_requested = 1;
9039 
9040         BLOGD(sc, DBG_LOAD, "Requesting %d MSI vectors\n", num_requested);
9041 
9042         num_allocated = num_requested;
9043         if ((rc = pci_alloc_msi(sc->dev, &num_allocated)) != 0) {
9044             BLOGE(sc, "MSI alloc failed (%d)!\n", rc);
9045             sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9046             break;
9047         }
9048 
9049         if (num_allocated != 1) { /* possible? */
9050             BLOGE(sc, "MSI allocation is not 1!\n");
9051             sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9052             pci_release_msi(sc->dev);
9053             break;
9054         }
9055 
9056         BLOGI(sc, "MSI vectors Requested %d and Allocated %d\n",
9057               num_requested, num_allocated);
9058 
9059         /* best effort so use the number of vectors allocated to us */
9060         sc->intr_count = num_allocated;
9061         sc->num_queues = num_allocated;
9062 
9063         rid = 1; /* initial resource identifier */
9064 
9065         sc->intr[0].rid = rid;
9066 
9067         if ((sc->intr[0].resource =
9068              bus_alloc_resource_any(sc->dev,
9069                                     SYS_RES_IRQ,
9070                                     &sc->intr[0].rid,
9071                                     RF_ACTIVE)) == NULL) {
9072             BLOGE(sc, "Failed to map MSI[0] (rid=%d)!\n", rid);
9073             sc->intr_count = 0;
9074             sc->num_queues = 0;
9075             sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9076             pci_release_msi(sc->dev);
9077             break;
9078         }
9079 
9080         BLOGD(sc, DBG_LOAD, "Mapped MSI[0] (rid=%d)\n", rid);
9081     } while (0);
9082 
9083     do { /* try allocating INTx vector resources */
9084         if (sc->interrupt_mode != INTR_MODE_INTX) {
9085             break;
9086         }
9087 
9088         BLOGD(sc, DBG_LOAD, "Requesting legacy INTx interrupt\n");
9089 
9090         /* only one vector for INTx */
9091         sc->intr_count = 1;
9092         sc->num_queues = 1;
9093 
9094         rid = 0; /* initial resource identifier */
9095 
9096         sc->intr[0].rid = rid;
9097 
9098         if ((sc->intr[0].resource =
9099              bus_alloc_resource_any(sc->dev,
9100                                     SYS_RES_IRQ,
9101                                     &sc->intr[0].rid,
9102                                     (RF_ACTIVE | RF_SHAREABLE))) == NULL) {
9103             BLOGE(sc, "Failed to map INTx (rid=%d)!\n", rid);
9104             sc->intr_count = 0;
9105             sc->num_queues = 0;
9106             sc->interrupt_mode = -1; /* Failed! */
9107             break;
9108         }
9109 
9110         BLOGD(sc, DBG_LOAD, "Mapped INTx (rid=%d)\n", rid);
9111     } while (0);
9112 
9113     if (sc->interrupt_mode == -1) {
9114         BLOGE(sc, "Interrupt Allocation: FAILED!!!\n");
9115         rc = 1;
9116     } else {
9117         BLOGD(sc, DBG_LOAD,
9118               "Interrupt Allocation: interrupt_mode=%d, num_queues=%d\n",
9119               sc->interrupt_mode, sc->num_queues);
9120         rc = 0;
9121     }
9122 
9123     return (rc);
9124 }
9125 
9126 static void
9127 bxe_interrupt_detach(struct bxe_softc *sc)
9128 {
9129     struct bxe_fastpath *fp;
9130     int i;
9131 
9132     /* release interrupt resources */
9133     for (i = 0; i < sc->intr_count; i++) {
9134         if (sc->intr[i].resource && sc->intr[i].tag) {
9135             BLOGD(sc, DBG_LOAD, "Disabling interrupt vector %d\n", i);
9136             bus_teardown_intr(sc->dev, sc->intr[i].resource, sc->intr[i].tag);
9137         }
9138     }
9139 
9140     for (i = 0; i < sc->num_queues; i++) {
9141         fp = &sc->fp[i];
9142         if (fp->tq) {
9143             taskqueue_drain(fp->tq, &fp->tq_task);
9144             taskqueue_drain(fp->tq, &fp->tx_task);
9145             while (taskqueue_cancel_timeout(fp->tq, &fp->tx_timeout_task,
9146                 NULL))
9147                 taskqueue_drain_timeout(fp->tq, &fp->tx_timeout_task);
9148         }
9149 
9150         for (i = 0; i < sc->num_queues; i++) {
9151             fp = &sc->fp[i];
9152             if (fp->tq != NULL) {
9153                 taskqueue_free(fp->tq);
9154                 fp->tq = NULL;
9155             }
9156         }
9157     }
9158 
9159     if (sc->sp_tq) {
9160         taskqueue_drain(sc->sp_tq, &sc->sp_tq_task);
9161         taskqueue_free(sc->sp_tq);
9162         sc->sp_tq = NULL;
9163     }
9164 }
9165 
9166 /*
9167  * Enables interrupts and attach to the ISR.
9168  *
9169  * When using multiple MSI/MSI-X vectors the first vector
9170  * is used for slowpath operations while all remaining
9171  * vectors are used for fastpath operations.  If only a
9172  * single MSI/MSI-X vector is used (SINGLE_ISR) then the
9173  * ISR must look for both slowpath and fastpath completions.
9174  */
9175 static int
9176 bxe_interrupt_attach(struct bxe_softc *sc)
9177 {
9178     struct bxe_fastpath *fp;
9179     int rc = 0;
9180     int i;
9181 
9182     snprintf(sc->sp_tq_name, sizeof(sc->sp_tq_name),
9183              "bxe%d_sp_tq", sc->unit);
9184     TASK_INIT(&sc->sp_tq_task, 0, bxe_handle_sp_tq, sc);
9185     sc->sp_tq = taskqueue_create(sc->sp_tq_name, M_NOWAIT,
9186                                  taskqueue_thread_enqueue,
9187                                  &sc->sp_tq);
9188     taskqueue_start_threads(&sc->sp_tq, 1, PWAIT, /* lower priority */
9189                             "%s", sc->sp_tq_name);
9190 
9191 
9192     for (i = 0; i < sc->num_queues; i++) {
9193         fp = &sc->fp[i];
9194         snprintf(fp->tq_name, sizeof(fp->tq_name),
9195                  "bxe%d_fp%d_tq", sc->unit, i);
9196         NET_TASK_INIT(&fp->tq_task, 0, bxe_handle_fp_tq, fp);
9197         TASK_INIT(&fp->tx_task, 0, bxe_tx_mq_start_deferred, fp);
9198         fp->tq = taskqueue_create(fp->tq_name, M_NOWAIT,
9199                                   taskqueue_thread_enqueue,
9200                                   &fp->tq);
9201         TIMEOUT_TASK_INIT(fp->tq, &fp->tx_timeout_task, 0,
9202                           bxe_tx_mq_start_deferred, fp);
9203         taskqueue_start_threads(&fp->tq, 1, PI_NET, /* higher priority */
9204                                 "%s", fp->tq_name);
9205     }
9206 
9207     /* setup interrupt handlers */
9208     if (sc->interrupt_mode == INTR_MODE_MSIX) {
9209         BLOGD(sc, DBG_LOAD, "Enabling slowpath MSI-X[0] vector\n");
9210 
9211         /*
9212          * Setup the interrupt handler. Note that we pass the driver instance
9213          * to the interrupt handler for the slowpath.
9214          */
9215         if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9216                                  (INTR_TYPE_NET | INTR_MPSAFE),
9217                                  NULL, bxe_intr_sp, sc,
9218                                  &sc->intr[0].tag)) != 0) {
9219             BLOGE(sc, "Failed to allocate MSI-X[0] vector (%d)\n", rc);
9220             goto bxe_interrupt_attach_exit;
9221         }
9222 
9223         bus_describe_intr(sc->dev, sc->intr[0].resource,
9224                           sc->intr[0].tag, "sp");
9225 
9226         /* bus_bind_intr(sc->dev, sc->intr[0].resource, 0); */
9227 
9228         /* initialize the fastpath vectors (note the first was used for sp) */
9229         for (i = 0; i < sc->num_queues; i++) {
9230             fp = &sc->fp[i];
9231             BLOGD(sc, DBG_LOAD, "Enabling MSI-X[%d] vector\n", (i + 1));
9232 
9233             /*
9234              * Setup the interrupt handler. Note that we pass the
9235              * fastpath context to the interrupt handler in this
9236              * case.
9237              */
9238             if ((rc = bus_setup_intr(sc->dev, sc->intr[i + 1].resource,
9239                                      (INTR_TYPE_NET | INTR_MPSAFE),
9240                                      NULL, bxe_intr_fp, fp,
9241                                      &sc->intr[i + 1].tag)) != 0) {
9242                 BLOGE(sc, "Failed to allocate MSI-X[%d] vector (%d)\n",
9243                       (i + 1), rc);
9244                 goto bxe_interrupt_attach_exit;
9245             }
9246 
9247             bus_describe_intr(sc->dev, sc->intr[i + 1].resource,
9248                               sc->intr[i + 1].tag, "fp%02d", i);
9249 
9250             /* bind the fastpath instance to a cpu */
9251             if (sc->num_queues > 1) {
9252                 bus_bind_intr(sc->dev, sc->intr[i + 1].resource, i);
9253             }
9254 
9255             fp->state = BXE_FP_STATE_IRQ;
9256         }
9257     } else if (sc->interrupt_mode == INTR_MODE_MSI) {
9258         BLOGD(sc, DBG_LOAD, "Enabling MSI[0] vector\n");
9259 
9260         /*
9261          * Setup the interrupt handler. Note that we pass the
9262          * driver instance to the interrupt handler which
9263          * will handle both the slowpath and fastpath.
9264          */
9265         if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9266                                  (INTR_TYPE_NET | INTR_MPSAFE),
9267                                  NULL, bxe_intr_legacy, sc,
9268                                  &sc->intr[0].tag)) != 0) {
9269             BLOGE(sc, "Failed to allocate MSI[0] vector (%d)\n", rc);
9270             goto bxe_interrupt_attach_exit;
9271         }
9272 
9273     } else { /* (sc->interrupt_mode == INTR_MODE_INTX) */
9274         BLOGD(sc, DBG_LOAD, "Enabling INTx interrupts\n");
9275 
9276         /*
9277          * Setup the interrupt handler. Note that we pass the
9278          * driver instance to the interrupt handler which
9279          * will handle both the slowpath and fastpath.
9280          */
9281         if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9282                                  (INTR_TYPE_NET | INTR_MPSAFE),
9283                                  NULL, bxe_intr_legacy, sc,
9284                                  &sc->intr[0].tag)) != 0) {
9285             BLOGE(sc, "Failed to allocate INTx interrupt (%d)\n", rc);
9286             goto bxe_interrupt_attach_exit;
9287         }
9288     }
9289 
9290 bxe_interrupt_attach_exit:
9291 
9292     return (rc);
9293 }
9294 
9295 static int  bxe_init_hw_common_chip(struct bxe_softc *sc);
9296 static int  bxe_init_hw_common(struct bxe_softc *sc);
9297 static int  bxe_init_hw_port(struct bxe_softc *sc);
9298 static int  bxe_init_hw_func(struct bxe_softc *sc);
9299 static void bxe_reset_common(struct bxe_softc *sc);
9300 static void bxe_reset_port(struct bxe_softc *sc);
9301 static void bxe_reset_func(struct bxe_softc *sc);
9302 static int  bxe_gunzip_init(struct bxe_softc *sc);
9303 static void bxe_gunzip_end(struct bxe_softc *sc);
9304 static int  bxe_init_firmware(struct bxe_softc *sc);
9305 static void bxe_release_firmware(struct bxe_softc *sc);
9306 
9307 static struct
9308 ecore_func_sp_drv_ops bxe_func_sp_drv = {
9309     .init_hw_cmn_chip = bxe_init_hw_common_chip,
9310     .init_hw_cmn      = bxe_init_hw_common,
9311     .init_hw_port     = bxe_init_hw_port,
9312     .init_hw_func     = bxe_init_hw_func,
9313 
9314     .reset_hw_cmn     = bxe_reset_common,
9315     .reset_hw_port    = bxe_reset_port,
9316     .reset_hw_func    = bxe_reset_func,
9317 
9318     .gunzip_init      = bxe_gunzip_init,
9319     .gunzip_end       = bxe_gunzip_end,
9320 
9321     .init_fw          = bxe_init_firmware,
9322     .release_fw       = bxe_release_firmware,
9323 };
9324 
9325 static void
9326 bxe_init_func_obj(struct bxe_softc *sc)
9327 {
9328     sc->dmae_ready = 0;
9329 
9330     ecore_init_func_obj(sc,
9331                         &sc->func_obj,
9332                         BXE_SP(sc, func_rdata),
9333                         BXE_SP_MAPPING(sc, func_rdata),
9334                         BXE_SP(sc, func_afex_rdata),
9335                         BXE_SP_MAPPING(sc, func_afex_rdata),
9336                         &bxe_func_sp_drv);
9337 }
9338 
9339 static int
9340 bxe_init_hw(struct bxe_softc *sc,
9341             uint32_t         load_code)
9342 {
9343     struct ecore_func_state_params func_params = { NULL };
9344     int rc;
9345 
9346     /* prepare the parameters for function state transitions */
9347     bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT);
9348 
9349     func_params.f_obj = &sc->func_obj;
9350     func_params.cmd = ECORE_F_CMD_HW_INIT;
9351 
9352     func_params.params.hw_init.load_phase = load_code;
9353 
9354     /*
9355      * Via a plethora of function pointers, we will eventually reach
9356      * bxe_init_hw_common(), bxe_init_hw_port(), or bxe_init_hw_func().
9357      */
9358     rc = ecore_func_state_change(sc, &func_params);
9359 
9360     return (rc);
9361 }
9362 
9363 static void
9364 bxe_fill(struct bxe_softc *sc,
9365          uint32_t         addr,
9366          int              fill,
9367          uint32_t         len)
9368 {
9369     uint32_t i;
9370 
9371     if (!(len % 4) && !(addr % 4)) {
9372         for (i = 0; i < len; i += 4) {
9373             REG_WR(sc, (addr + i), fill);
9374         }
9375     } else {
9376         for (i = 0; i < len; i++) {
9377             REG_WR8(sc, (addr + i), fill);
9378         }
9379     }
9380 }
9381 
9382 /* writes FP SP data to FW - data_size in dwords */
9383 static void
9384 bxe_wr_fp_sb_data(struct bxe_softc *sc,
9385                   int              fw_sb_id,
9386                   uint32_t         *sb_data_p,
9387                   uint32_t         data_size)
9388 {
9389     int index;
9390 
9391     for (index = 0; index < data_size; index++) {
9392         REG_WR(sc,
9393                (BAR_CSTRORM_INTMEM +
9394                 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
9395                 (sizeof(uint32_t) * index)),
9396                *(sb_data_p + index));
9397     }
9398 }
9399 
9400 static void
9401 bxe_zero_fp_sb(struct bxe_softc *sc,
9402                int              fw_sb_id)
9403 {
9404     struct hc_status_block_data_e2 sb_data_e2;
9405     struct hc_status_block_data_e1x sb_data_e1x;
9406     uint32_t *sb_data_p;
9407     uint32_t data_size = 0;
9408 
9409     if (!CHIP_IS_E1x(sc)) {
9410         memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
9411         sb_data_e2.common.state = SB_DISABLED;
9412         sb_data_e2.common.p_func.vf_valid = FALSE;
9413         sb_data_p = (uint32_t *)&sb_data_e2;
9414         data_size = (sizeof(struct hc_status_block_data_e2) /
9415                      sizeof(uint32_t));
9416     } else {
9417         memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x));
9418         sb_data_e1x.common.state = SB_DISABLED;
9419         sb_data_e1x.common.p_func.vf_valid = FALSE;
9420         sb_data_p = (uint32_t *)&sb_data_e1x;
9421         data_size = (sizeof(struct hc_status_block_data_e1x) /
9422                      sizeof(uint32_t));
9423     }
9424 
9425     bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size);
9426 
9427     bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id)),
9428              0, CSTORM_STATUS_BLOCK_SIZE);
9429     bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id)),
9430              0, CSTORM_SYNC_BLOCK_SIZE);
9431 }
9432 
9433 static void
9434 bxe_wr_sp_sb_data(struct bxe_softc               *sc,
9435                   struct hc_sp_status_block_data *sp_sb_data)
9436 {
9437     int i;
9438 
9439     for (i = 0;
9440          i < (sizeof(struct hc_sp_status_block_data) / sizeof(uint32_t));
9441          i++) {
9442         REG_WR(sc,
9443                (BAR_CSTRORM_INTMEM +
9444                 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(SC_FUNC(sc)) +
9445                 (i * sizeof(uint32_t))),
9446                *((uint32_t *)sp_sb_data + i));
9447     }
9448 }
9449 
9450 static void
9451 bxe_zero_sp_sb(struct bxe_softc *sc)
9452 {
9453     struct hc_sp_status_block_data sp_sb_data;
9454 
9455     memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
9456 
9457     sp_sb_data.state           = SB_DISABLED;
9458     sp_sb_data.p_func.vf_valid = FALSE;
9459 
9460     bxe_wr_sp_sb_data(sc, &sp_sb_data);
9461 
9462     bxe_fill(sc,
9463              (BAR_CSTRORM_INTMEM +
9464               CSTORM_SP_STATUS_BLOCK_OFFSET(SC_FUNC(sc))),
9465               0, CSTORM_SP_STATUS_BLOCK_SIZE);
9466     bxe_fill(sc,
9467              (BAR_CSTRORM_INTMEM +
9468               CSTORM_SP_SYNC_BLOCK_OFFSET(SC_FUNC(sc))),
9469               0, CSTORM_SP_SYNC_BLOCK_SIZE);
9470 }
9471 
9472 static void
9473 bxe_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
9474                              int                       igu_sb_id,
9475                              int                       igu_seg_id)
9476 {
9477     hc_sm->igu_sb_id      = igu_sb_id;
9478     hc_sm->igu_seg_id     = igu_seg_id;
9479     hc_sm->timer_value    = 0xFF;
9480     hc_sm->time_to_expire = 0xFFFFFFFF;
9481 }
9482 
9483 static void
9484 bxe_map_sb_state_machines(struct hc_index_data *index_data)
9485 {
9486     /* zero out state machine indices */
9487 
9488     /* rx indices */
9489     index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
9490 
9491     /* tx indices */
9492     index_data[HC_INDEX_OOO_TX_CQ_CONS].flags      &= ~HC_INDEX_DATA_SM_ID;
9493     index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
9494     index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
9495     index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
9496 
9497     /* map indices */
9498 
9499     /* rx indices */
9500     index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
9501         (SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9502 
9503     /* tx indices */
9504     index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
9505         (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9506     index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
9507         (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9508     index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
9509         (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9510     index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
9511         (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9512 }
9513 
9514 static void
9515 bxe_init_sb(struct bxe_softc *sc,
9516             bus_addr_t       busaddr,
9517             int              vfid,
9518             uint8_t          vf_valid,
9519             int              fw_sb_id,
9520             int              igu_sb_id)
9521 {
9522     struct hc_status_block_data_e2  sb_data_e2;
9523     struct hc_status_block_data_e1x sb_data_e1x;
9524     struct hc_status_block_sm       *hc_sm_p;
9525     uint32_t *sb_data_p;
9526     int igu_seg_id;
9527     int data_size;
9528 
9529     if (CHIP_INT_MODE_IS_BC(sc)) {
9530         igu_seg_id = HC_SEG_ACCESS_NORM;
9531     } else {
9532         igu_seg_id = IGU_SEG_ACCESS_NORM;
9533     }
9534 
9535     bxe_zero_fp_sb(sc, fw_sb_id);
9536 
9537     if (!CHIP_IS_E1x(sc)) {
9538         memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
9539         sb_data_e2.common.state = SB_ENABLED;
9540         sb_data_e2.common.p_func.pf_id = SC_FUNC(sc);
9541         sb_data_e2.common.p_func.vf_id = vfid;
9542         sb_data_e2.common.p_func.vf_valid = vf_valid;
9543         sb_data_e2.common.p_func.vnic_id = SC_VN(sc);
9544         sb_data_e2.common.same_igu_sb_1b = TRUE;
9545         sb_data_e2.common.host_sb_addr.hi = U64_HI(busaddr);
9546         sb_data_e2.common.host_sb_addr.lo = U64_LO(busaddr);
9547         hc_sm_p = sb_data_e2.common.state_machine;
9548         sb_data_p = (uint32_t *)&sb_data_e2;
9549         data_size = (sizeof(struct hc_status_block_data_e2) /
9550                      sizeof(uint32_t));
9551         bxe_map_sb_state_machines(sb_data_e2.index_data);
9552     } else {
9553         memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x));
9554         sb_data_e1x.common.state = SB_ENABLED;
9555         sb_data_e1x.common.p_func.pf_id = SC_FUNC(sc);
9556         sb_data_e1x.common.p_func.vf_id = 0xff;
9557         sb_data_e1x.common.p_func.vf_valid = FALSE;
9558         sb_data_e1x.common.p_func.vnic_id = SC_VN(sc);
9559         sb_data_e1x.common.same_igu_sb_1b = TRUE;
9560         sb_data_e1x.common.host_sb_addr.hi = U64_HI(busaddr);
9561         sb_data_e1x.common.host_sb_addr.lo = U64_LO(busaddr);
9562         hc_sm_p = sb_data_e1x.common.state_machine;
9563         sb_data_p = (uint32_t *)&sb_data_e1x;
9564         data_size = (sizeof(struct hc_status_block_data_e1x) /
9565                      sizeof(uint32_t));
9566         bxe_map_sb_state_machines(sb_data_e1x.index_data);
9567     }
9568 
9569     bxe_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], igu_sb_id, igu_seg_id);
9570     bxe_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], igu_sb_id, igu_seg_id);
9571 
9572     BLOGD(sc, DBG_LOAD, "Init FW SB %d\n", fw_sb_id);
9573 
9574     /* write indices to HW - PCI guarantees endianity of regpairs */
9575     bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size);
9576 }
9577 
9578 static inline uint8_t
9579 bxe_fp_qzone_id(struct bxe_fastpath *fp)
9580 {
9581     if (CHIP_IS_E1x(fp->sc)) {
9582         return (fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H);
9583     } else {
9584         return (fp->cl_id);
9585     }
9586 }
9587 
9588 static inline uint32_t
9589 bxe_rx_ustorm_prods_offset(struct bxe_softc    *sc,
9590                            struct bxe_fastpath *fp)
9591 {
9592     uint32_t offset = BAR_USTRORM_INTMEM;
9593 
9594     if (!CHIP_IS_E1x(sc)) {
9595         offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
9596     } else {
9597         offset += USTORM_RX_PRODS_E1X_OFFSET(SC_PORT(sc), fp->cl_id);
9598     }
9599 
9600     return (offset);
9601 }
9602 
9603 static void
9604 bxe_init_eth_fp(struct bxe_softc *sc,
9605                 int              idx)
9606 {
9607     struct bxe_fastpath *fp = &sc->fp[idx];
9608     uint32_t cids[ECORE_MULTI_TX_COS] = { 0 };
9609     unsigned long q_type = 0;
9610     int cos;
9611 
9612     fp->sc    = sc;
9613     fp->index = idx;
9614 
9615     fp->igu_sb_id = (sc->igu_base_sb + idx + CNIC_SUPPORT(sc));
9616     fp->fw_sb_id = (sc->base_fw_ndsb + idx + CNIC_SUPPORT(sc));
9617 
9618     fp->cl_id = (CHIP_IS_E1x(sc)) ?
9619                     (SC_L_ID(sc) + idx) :
9620                     /* want client ID same as IGU SB ID for non-E1 */
9621                     fp->igu_sb_id;
9622     fp->cl_qzone_id = bxe_fp_qzone_id(fp);
9623 
9624     /* setup sb indices */
9625     if (!CHIP_IS_E1x(sc)) {
9626         fp->sb_index_values  = fp->status_block.e2_sb->sb.index_values;
9627         fp->sb_running_index = fp->status_block.e2_sb->sb.running_index;
9628     } else {
9629         fp->sb_index_values  = fp->status_block.e1x_sb->sb.index_values;
9630         fp->sb_running_index = fp->status_block.e1x_sb->sb.running_index;
9631     }
9632 
9633     /* init shortcut */
9634     fp->ustorm_rx_prods_offset = bxe_rx_ustorm_prods_offset(sc, fp);
9635 
9636     fp->rx_cq_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS];
9637 
9638     /*
9639      * XXX If multiple CoS is ever supported then each fastpath structure
9640      * will need to maintain tx producer/consumer/dma/etc values *per* CoS.
9641      */
9642     for (cos = 0; cos < sc->max_cos; cos++) {
9643         cids[cos] = idx;
9644     }
9645     fp->tx_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0];
9646 
9647     /* nothing more for a VF to do */
9648     if (IS_VF(sc)) {
9649         return;
9650     }
9651 
9652     bxe_init_sb(sc, fp->sb_dma.paddr, BXE_VF_ID_INVALID, FALSE,
9653                 fp->fw_sb_id, fp->igu_sb_id);
9654 
9655     bxe_update_fp_sb_idx(fp);
9656 
9657     /* Configure Queue State object */
9658     bit_set(&q_type, ECORE_Q_TYPE_HAS_RX);
9659     bit_set(&q_type, ECORE_Q_TYPE_HAS_TX);
9660 
9661     ecore_init_queue_obj(sc,
9662                          &sc->sp_objs[idx].q_obj,
9663                          fp->cl_id,
9664                          cids,
9665                          sc->max_cos,
9666                          SC_FUNC(sc),
9667                          BXE_SP(sc, q_rdata),
9668                          BXE_SP_MAPPING(sc, q_rdata),
9669                          q_type);
9670 
9671     /* configure classification DBs */
9672     ecore_init_mac_obj(sc,
9673                        &sc->sp_objs[idx].mac_obj,
9674                        fp->cl_id,
9675                        idx,
9676                        SC_FUNC(sc),
9677                        BXE_SP(sc, mac_rdata),
9678                        BXE_SP_MAPPING(sc, mac_rdata),
9679                        ECORE_FILTER_MAC_PENDING,
9680                        &sc->sp_state,
9681                        ECORE_OBJ_TYPE_RX_TX,
9682                        &sc->macs_pool);
9683 
9684     BLOGD(sc, DBG_LOAD, "fp[%d]: sb=%p cl_id=%d fw_sb=%d igu_sb=%d\n",
9685           idx, fp->status_block.e2_sb, fp->cl_id, fp->fw_sb_id, fp->igu_sb_id);
9686 }
9687 
9688 static inline void
9689 bxe_update_rx_prod(struct bxe_softc    *sc,
9690                    struct bxe_fastpath *fp,
9691                    uint16_t            rx_bd_prod,
9692                    uint16_t            rx_cq_prod,
9693                    uint16_t            rx_sge_prod)
9694 {
9695     struct ustorm_eth_rx_producers rx_prods = { 0 };
9696     uint32_t i;
9697 
9698     /* update producers */
9699     rx_prods.bd_prod  = rx_bd_prod;
9700     rx_prods.cqe_prod = rx_cq_prod;
9701     rx_prods.sge_prod = rx_sge_prod;
9702 
9703     /*
9704      * Make sure that the BD and SGE data is updated before updating the
9705      * producers since FW might read the BD/SGE right after the producer
9706      * is updated.
9707      * This is only applicable for weak-ordered memory model archs such
9708      * as IA-64. The following barrier is also mandatory since FW will
9709      * assumes BDs must have buffers.
9710      */
9711     wmb();
9712 
9713     for (i = 0; i < (sizeof(rx_prods) / 4); i++) {
9714         REG_WR(sc,
9715                (fp->ustorm_rx_prods_offset + (i * 4)),
9716                ((uint32_t *)&rx_prods)[i]);
9717     }
9718 
9719     wmb(); /* keep prod updates ordered */
9720 
9721     BLOGD(sc, DBG_RX,
9722           "RX fp[%d]: wrote prods bd_prod=%u cqe_prod=%u sge_prod=%u\n",
9723           fp->index, rx_bd_prod, rx_cq_prod, rx_sge_prod);
9724 }
9725 
9726 static void
9727 bxe_init_rx_rings(struct bxe_softc *sc)
9728 {
9729     struct bxe_fastpath *fp;
9730     int i;
9731 
9732     for (i = 0; i < sc->num_queues; i++) {
9733         fp = &sc->fp[i];
9734 
9735         fp->rx_bd_cons = 0;
9736 
9737         /*
9738          * Activate the BD ring...
9739          * Warning, this will generate an interrupt (to the TSTORM)
9740          * so this can only be done after the chip is initialized
9741          */
9742         bxe_update_rx_prod(sc, fp,
9743                            fp->rx_bd_prod,
9744                            fp->rx_cq_prod,
9745                            fp->rx_sge_prod);
9746 
9747         if (i != 0) {
9748             continue;
9749         }
9750 
9751         if (CHIP_IS_E1(sc)) {
9752             REG_WR(sc,
9753                    (BAR_USTRORM_INTMEM +
9754                     USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc))),
9755                    U64_LO(fp->rcq_dma.paddr));
9756             REG_WR(sc,
9757                    (BAR_USTRORM_INTMEM +
9758                     USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc)) + 4),
9759                    U64_HI(fp->rcq_dma.paddr));
9760         }
9761     }
9762 }
9763 
9764 static void
9765 bxe_init_tx_ring_one(struct bxe_fastpath *fp)
9766 {
9767     SET_FLAG(fp->tx_db.data.header.data, DOORBELL_HDR_T_DB_TYPE, 1);
9768     fp->tx_db.data.zero_fill1 = 0;
9769     fp->tx_db.data.prod = 0;
9770 
9771     fp->tx_pkt_prod = 0;
9772     fp->tx_pkt_cons = 0;
9773     fp->tx_bd_prod = 0;
9774     fp->tx_bd_cons = 0;
9775     fp->eth_q_stats.tx_pkts = 0;
9776 }
9777 
9778 static inline void
9779 bxe_init_tx_rings(struct bxe_softc *sc)
9780 {
9781     int i;
9782 
9783     for (i = 0; i < sc->num_queues; i++) {
9784         bxe_init_tx_ring_one(&sc->fp[i]);
9785     }
9786 }
9787 
9788 static void
9789 bxe_init_def_sb(struct bxe_softc *sc)
9790 {
9791     struct host_sp_status_block *def_sb = sc->def_sb;
9792     bus_addr_t mapping = sc->def_sb_dma.paddr;
9793     int igu_sp_sb_index;
9794     int igu_seg_id;
9795     int port = SC_PORT(sc);
9796     int func = SC_FUNC(sc);
9797     int reg_offset, reg_offset_en5;
9798     uint64_t section;
9799     int index, sindex;
9800     struct hc_sp_status_block_data sp_sb_data;
9801 
9802     memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
9803 
9804     if (CHIP_INT_MODE_IS_BC(sc)) {
9805         igu_sp_sb_index = DEF_SB_IGU_ID;
9806         igu_seg_id = HC_SEG_ACCESS_DEF;
9807     } else {
9808         igu_sp_sb_index = sc->igu_dsb_id;
9809         igu_seg_id = IGU_SEG_ACCESS_DEF;
9810     }
9811 
9812     /* attentions */
9813     section = ((uint64_t)mapping +
9814                offsetof(struct host_sp_status_block, atten_status_block));
9815     def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
9816     sc->attn_state = 0;
9817 
9818     reg_offset = (port) ?
9819                      MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
9820                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
9821     reg_offset_en5 = (port) ?
9822                          MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
9823                          MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0;
9824 
9825     for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
9826         /* take care of sig[0]..sig[4] */
9827         for (sindex = 0; sindex < 4; sindex++) {
9828             sc->attn_group[index].sig[sindex] =
9829                 REG_RD(sc, (reg_offset + (sindex * 0x4) + (0x10 * index)));
9830         }
9831 
9832         if (!CHIP_IS_E1x(sc)) {
9833             /*
9834              * enable5 is separate from the rest of the registers,
9835              * and the address skip is 4 and not 16 between the
9836              * different groups
9837              */
9838             sc->attn_group[index].sig[4] =
9839                 REG_RD(sc, (reg_offset_en5 + (0x4 * index)));
9840         } else {
9841             sc->attn_group[index].sig[4] = 0;
9842         }
9843     }
9844 
9845     if (sc->devinfo.int_block == INT_BLOCK_HC) {
9846         reg_offset = (port) ?
9847                          HC_REG_ATTN_MSG1_ADDR_L :
9848                          HC_REG_ATTN_MSG0_ADDR_L;
9849         REG_WR(sc, reg_offset, U64_LO(section));
9850         REG_WR(sc, (reg_offset + 4), U64_HI(section));
9851     } else if (!CHIP_IS_E1x(sc)) {
9852         REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
9853         REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
9854     }
9855 
9856     section = ((uint64_t)mapping +
9857                offsetof(struct host_sp_status_block, sp_sb));
9858 
9859     bxe_zero_sp_sb(sc);
9860 
9861     /* PCI guarantees endianity of regpair */
9862     sp_sb_data.state           = SB_ENABLED;
9863     sp_sb_data.host_sb_addr.lo = U64_LO(section);
9864     sp_sb_data.host_sb_addr.hi = U64_HI(section);
9865     sp_sb_data.igu_sb_id       = igu_sp_sb_index;
9866     sp_sb_data.igu_seg_id      = igu_seg_id;
9867     sp_sb_data.p_func.pf_id    = func;
9868     sp_sb_data.p_func.vnic_id  = SC_VN(sc);
9869     sp_sb_data.p_func.vf_id    = 0xff;
9870 
9871     bxe_wr_sp_sb_data(sc, &sp_sb_data);
9872 
9873     bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
9874 }
9875 
9876 static void
9877 bxe_init_sp_ring(struct bxe_softc *sc)
9878 {
9879     atomic_store_rel_long(&sc->cq_spq_left, MAX_SPQ_PENDING);
9880     sc->spq_prod_idx = 0;
9881     sc->dsb_sp_prod = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_ETH_DEF_CONS];
9882     sc->spq_prod_bd = sc->spq;
9883     sc->spq_last_bd = (sc->spq_prod_bd + MAX_SP_DESC_CNT);
9884 }
9885 
9886 static void
9887 bxe_init_eq_ring(struct bxe_softc *sc)
9888 {
9889     union event_ring_elem *elem;
9890     int i;
9891 
9892     for (i = 1; i <= NUM_EQ_PAGES; i++) {
9893         elem = &sc->eq[EQ_DESC_CNT_PAGE * i - 1];
9894 
9895         elem->next_page.addr.hi = htole32(U64_HI(sc->eq_dma.paddr +
9896                                                  BCM_PAGE_SIZE *
9897                                                  (i % NUM_EQ_PAGES)));
9898         elem->next_page.addr.lo = htole32(U64_LO(sc->eq_dma.paddr +
9899                                                  BCM_PAGE_SIZE *
9900                                                  (i % NUM_EQ_PAGES)));
9901     }
9902 
9903     sc->eq_cons    = 0;
9904     sc->eq_prod    = NUM_EQ_DESC;
9905     sc->eq_cons_sb = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_EQ_CONS];
9906 
9907     atomic_store_rel_long(&sc->eq_spq_left,
9908                           (min((MAX_SP_DESC_CNT - MAX_SPQ_PENDING),
9909                                NUM_EQ_DESC) - 1));
9910 }
9911 
9912 static void
9913 bxe_init_internal_common(struct bxe_softc *sc)
9914 {
9915     int i;
9916 
9917     /*
9918      * Zero this manually as its initialization is currently missing
9919      * in the initTool.
9920      */
9921     for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) {
9922         REG_WR(sc,
9923                (BAR_USTRORM_INTMEM + USTORM_AGG_DATA_OFFSET + (i * 4)),
9924                0);
9925     }
9926 
9927     if (!CHIP_IS_E1x(sc)) {
9928         REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET),
9929                 CHIP_INT_MODE_IS_BC(sc) ? HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
9930     }
9931 }
9932 
9933 static void
9934 bxe_init_internal(struct bxe_softc *sc,
9935                   uint32_t         load_code)
9936 {
9937     switch (load_code) {
9938     case FW_MSG_CODE_DRV_LOAD_COMMON:
9939     case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
9940         bxe_init_internal_common(sc);
9941         /* no break */
9942 
9943     case FW_MSG_CODE_DRV_LOAD_PORT:
9944         /* nothing to do */
9945         /* no break */
9946 
9947     case FW_MSG_CODE_DRV_LOAD_FUNCTION:
9948         /* internal memory per function is initialized inside bxe_pf_init */
9949         break;
9950 
9951     default:
9952         BLOGE(sc, "Unknown load_code (0x%x) from MCP\n", load_code);
9953         break;
9954     }
9955 }
9956 
9957 static void
9958 storm_memset_func_cfg(struct bxe_softc                         *sc,
9959                       struct tstorm_eth_function_common_config *tcfg,
9960                       uint16_t                                  abs_fid)
9961 {
9962     uint32_t addr;
9963     size_t size;
9964 
9965     addr = (BAR_TSTRORM_INTMEM +
9966             TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid));
9967     size = sizeof(struct tstorm_eth_function_common_config);
9968     ecore_storm_memset_struct(sc, addr, size, (uint32_t *)tcfg);
9969 }
9970 
9971 static void
9972 bxe_func_init(struct bxe_softc            *sc,
9973               struct bxe_func_init_params *p)
9974 {
9975     struct tstorm_eth_function_common_config tcfg = { 0 };
9976 
9977     if (CHIP_IS_E1x(sc)) {
9978         storm_memset_func_cfg(sc, &tcfg, p->func_id);
9979     }
9980 
9981     /* Enable the function in the FW */
9982     storm_memset_vf_to_pf(sc, p->func_id, p->pf_id);
9983     storm_memset_func_en(sc, p->func_id, 1);
9984 
9985     /* spq */
9986     if (p->func_flgs & FUNC_FLG_SPQ) {
9987         storm_memset_spq_addr(sc, p->spq_map, p->func_id);
9988         REG_WR(sc,
9989                (XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(p->func_id)),
9990                p->spq_prod);
9991     }
9992 }
9993 
9994 /*
9995  * Calculates the sum of vn_min_rates.
9996  * It's needed for further normalizing of the min_rates.
9997  * Returns:
9998  *   sum of vn_min_rates.
9999  *     or
10000  *   0 - if all the min_rates are 0.
10001  * In the later case fainess algorithm should be deactivated.
10002  * If all min rates are not zero then those that are zeroes will be set to 1.
10003  */
10004 static void
10005 bxe_calc_vn_min(struct bxe_softc       *sc,
10006                 struct cmng_init_input *input)
10007 {
10008     uint32_t vn_cfg;
10009     uint32_t vn_min_rate;
10010     int all_zero = 1;
10011     int vn;
10012 
10013     for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10014         vn_cfg = sc->devinfo.mf_info.mf_config[vn];
10015         vn_min_rate = (((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
10016                         FUNC_MF_CFG_MIN_BW_SHIFT) * 100);
10017 
10018         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
10019             /* skip hidden VNs */
10020             vn_min_rate = 0;
10021         } else if (!vn_min_rate) {
10022             /* If min rate is zero - set it to 100 */
10023             vn_min_rate = DEF_MIN_RATE;
10024         } else {
10025             all_zero = 0;
10026         }
10027 
10028         input->vnic_min_rate[vn] = vn_min_rate;
10029     }
10030 
10031     /* if ETS or all min rates are zeros - disable fairness */
10032     if (BXE_IS_ETS_ENABLED(sc)) {
10033         input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10034         BLOGD(sc, DBG_LOAD, "Fairness disabled (ETS)\n");
10035     } else if (all_zero) {
10036         input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10037         BLOGD(sc, DBG_LOAD,
10038               "Fariness disabled (all MIN values are zeroes)\n");
10039     } else {
10040         input->flags.cmng_enables |= CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10041     }
10042 }
10043 
10044 static inline uint16_t
10045 bxe_extract_max_cfg(struct bxe_softc *sc,
10046                     uint32_t         mf_cfg)
10047 {
10048     uint16_t max_cfg = ((mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
10049                         FUNC_MF_CFG_MAX_BW_SHIFT);
10050 
10051     if (!max_cfg) {
10052         BLOGD(sc, DBG_LOAD, "Max BW configured to 0 - using 100 instead\n");
10053         max_cfg = 100;
10054     }
10055 
10056     return (max_cfg);
10057 }
10058 
10059 static void
10060 bxe_calc_vn_max(struct bxe_softc       *sc,
10061                 int                    vn,
10062                 struct cmng_init_input *input)
10063 {
10064     uint16_t vn_max_rate;
10065     uint32_t vn_cfg = sc->devinfo.mf_info.mf_config[vn];
10066     uint32_t max_cfg;
10067 
10068     if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
10069         vn_max_rate = 0;
10070     } else {
10071         max_cfg = bxe_extract_max_cfg(sc, vn_cfg);
10072 
10073         if (IS_MF_SI(sc)) {
10074             /* max_cfg in percents of linkspeed */
10075             vn_max_rate = ((sc->link_vars.line_speed * max_cfg) / 100);
10076         } else { /* SD modes */
10077             /* max_cfg is absolute in 100Mb units */
10078             vn_max_rate = (max_cfg * 100);
10079         }
10080     }
10081 
10082     BLOGD(sc, DBG_LOAD, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
10083 
10084     input->vnic_max_rate[vn] = vn_max_rate;
10085 }
10086 
10087 static void
10088 bxe_cmng_fns_init(struct bxe_softc *sc,
10089                   uint8_t          read_cfg,
10090                   uint8_t          cmng_type)
10091 {
10092     struct cmng_init_input input;
10093     int vn;
10094 
10095     memset(&input, 0, sizeof(struct cmng_init_input));
10096 
10097     input.port_rate = sc->link_vars.line_speed;
10098 
10099     if (cmng_type == CMNG_FNS_MINMAX) {
10100         /* read mf conf from shmem */
10101         if (read_cfg) {
10102             bxe_read_mf_cfg(sc);
10103         }
10104 
10105         /* get VN min rate and enable fairness if not 0 */
10106         bxe_calc_vn_min(sc, &input);
10107 
10108         /* get VN max rate */
10109         if (sc->port.pmf) {
10110             for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10111                 bxe_calc_vn_max(sc, vn, &input);
10112             }
10113         }
10114 
10115         /* always enable rate shaping and fairness */
10116         input.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
10117 
10118         ecore_init_cmng(&input, &sc->cmng);
10119         return;
10120     }
10121 
10122     /* rate shaping and fairness are disabled */
10123     BLOGD(sc, DBG_LOAD, "rate shaping and fairness have been disabled\n");
10124 }
10125 
10126 static int
10127 bxe_get_cmng_fns_mode(struct bxe_softc *sc)
10128 {
10129     if (CHIP_REV_IS_SLOW(sc)) {
10130         return (CMNG_FNS_NONE);
10131     }
10132 
10133     if (IS_MF(sc)) {
10134         return (CMNG_FNS_MINMAX);
10135     }
10136 
10137     return (CMNG_FNS_NONE);
10138 }
10139 
10140 static void
10141 storm_memset_cmng(struct bxe_softc *sc,
10142                   struct cmng_init *cmng,
10143                   uint8_t          port)
10144 {
10145     int vn;
10146     int func;
10147     uint32_t addr;
10148     size_t size;
10149 
10150     addr = (BAR_XSTRORM_INTMEM +
10151             XSTORM_CMNG_PER_PORT_VARS_OFFSET(port));
10152     size = sizeof(struct cmng_struct_per_port);
10153     ecore_storm_memset_struct(sc, addr, size, (uint32_t *)&cmng->port);
10154 
10155     for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10156         func = func_by_vn(sc, vn);
10157 
10158         addr = (BAR_XSTRORM_INTMEM +
10159                 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func));
10160         size = sizeof(struct rate_shaping_vars_per_vn);
10161         ecore_storm_memset_struct(sc, addr, size,
10162                                   (uint32_t *)&cmng->vnic.vnic_max_rate[vn]);
10163 
10164         addr = (BAR_XSTRORM_INTMEM +
10165                 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func));
10166         size = sizeof(struct fairness_vars_per_vn);
10167         ecore_storm_memset_struct(sc, addr, size,
10168                                   (uint32_t *)&cmng->vnic.vnic_min_rate[vn]);
10169     }
10170 }
10171 
10172 static void
10173 bxe_pf_init(struct bxe_softc *sc)
10174 {
10175     struct bxe_func_init_params func_init = { 0 };
10176     struct event_ring_data eq_data = { { 0 } };
10177     uint16_t flags;
10178 
10179     if (!CHIP_IS_E1x(sc)) {
10180         /* reset IGU PF statistics: MSIX + ATTN */
10181         /* PF */
10182         REG_WR(sc,
10183                (IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
10184                 (BXE_IGU_STAS_MSG_VF_CNT * 4) +
10185                 ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)),
10186                0);
10187         /* ATTN */
10188         REG_WR(sc,
10189                (IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
10190                 (BXE_IGU_STAS_MSG_VF_CNT * 4) +
10191                 (BXE_IGU_STAS_MSG_PF_CNT * 4) +
10192                 ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)),
10193                0);
10194     }
10195 
10196     /* function setup flags */
10197     flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
10198 
10199     /*
10200      * This flag is relevant for E1x only.
10201      * E2 doesn't have a TPA configuration in a function level.
10202      */
10203     flags |= (if_getcapenable(sc->ifp) & IFCAP_LRO) ? FUNC_FLG_TPA : 0;
10204 
10205     func_init.func_flgs = flags;
10206     func_init.pf_id     = SC_FUNC(sc);
10207     func_init.func_id   = SC_FUNC(sc);
10208     func_init.spq_map   = sc->spq_dma.paddr;
10209     func_init.spq_prod  = sc->spq_prod_idx;
10210 
10211     bxe_func_init(sc, &func_init);
10212 
10213     memset(&sc->cmng, 0, sizeof(struct cmng_struct_per_port));
10214 
10215     /*
10216      * Congestion management values depend on the link rate.
10217      * There is no active link so initial link rate is set to 10Gbps.
10218      * When the link comes up the congestion management values are
10219      * re-calculated according to the actual link rate.
10220      */
10221     sc->link_vars.line_speed = SPEED_10000;
10222     bxe_cmng_fns_init(sc, TRUE, bxe_get_cmng_fns_mode(sc));
10223 
10224     /* Only the PMF sets the HW */
10225     if (sc->port.pmf) {
10226         storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
10227     }
10228 
10229     /* init Event Queue - PCI bus guarantees correct endainity */
10230     eq_data.base_addr.hi = U64_HI(sc->eq_dma.paddr);
10231     eq_data.base_addr.lo = U64_LO(sc->eq_dma.paddr);
10232     eq_data.producer     = sc->eq_prod;
10233     eq_data.index_id     = HC_SP_INDEX_EQ_CONS;
10234     eq_data.sb_id        = DEF_SB_ID;
10235     storm_memset_eq_data(sc, &eq_data, SC_FUNC(sc));
10236 }
10237 
10238 static void
10239 bxe_hc_int_enable(struct bxe_softc *sc)
10240 {
10241     int port = SC_PORT(sc);
10242     uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
10243     uint32_t val = REG_RD(sc, addr);
10244     uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE;
10245     uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) &&
10246                            (sc->intr_count == 1)) ? TRUE : FALSE;
10247     uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE;
10248 
10249     if (msix) {
10250         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10251                  HC_CONFIG_0_REG_INT_LINE_EN_0);
10252         val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10253                 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10254         if (single_msix) {
10255             val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
10256         }
10257     } else if (msi) {
10258         val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
10259         val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10260                 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10261                 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10262     } else {
10263         val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10264                 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10265                 HC_CONFIG_0_REG_INT_LINE_EN_0 |
10266                 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10267 
10268         if (!CHIP_IS_E1(sc)) {
10269             BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n",
10270                   val, port, addr);
10271 
10272             REG_WR(sc, addr, val);
10273 
10274             val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
10275         }
10276     }
10277 
10278     if (CHIP_IS_E1(sc)) {
10279         REG_WR(sc, (HC_REG_INT_MASK + port*4), 0x1FFFF);
10280     }
10281 
10282     BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
10283           val, port, addr, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx")));
10284 
10285     REG_WR(sc, addr, val);
10286 
10287     /* ensure that HC_CONFIG is written before leading/trailing edge config */
10288     mb();
10289 
10290     if (!CHIP_IS_E1(sc)) {
10291         /* init leading/trailing edge */
10292         if (IS_MF(sc)) {
10293             val = (0xee0f | (1 << (SC_VN(sc) + 4)));
10294             if (sc->port.pmf) {
10295                 /* enable nig and gpio3 attention */
10296                 val |= 0x1100;
10297             }
10298         } else {
10299             val = 0xffff;
10300         }
10301 
10302         REG_WR(sc, (HC_REG_TRAILING_EDGE_0 + port*8), val);
10303         REG_WR(sc, (HC_REG_LEADING_EDGE_0 + port*8), val);
10304     }
10305 
10306     /* make sure that interrupts are indeed enabled from here on */
10307     mb();
10308 }
10309 
10310 static void
10311 bxe_igu_int_enable(struct bxe_softc *sc)
10312 {
10313     uint32_t val;
10314     uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE;
10315     uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) &&
10316                            (sc->intr_count == 1)) ? TRUE : FALSE;
10317     uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE;
10318 
10319     val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
10320 
10321     if (msix) {
10322         val &= ~(IGU_PF_CONF_INT_LINE_EN |
10323                  IGU_PF_CONF_SINGLE_ISR_EN);
10324         val |= (IGU_PF_CONF_MSI_MSIX_EN |
10325                 IGU_PF_CONF_ATTN_BIT_EN);
10326         if (single_msix) {
10327             val |= IGU_PF_CONF_SINGLE_ISR_EN;
10328         }
10329     } else if (msi) {
10330         val &= ~IGU_PF_CONF_INT_LINE_EN;
10331         val |= (IGU_PF_CONF_MSI_MSIX_EN |
10332                 IGU_PF_CONF_ATTN_BIT_EN |
10333                 IGU_PF_CONF_SINGLE_ISR_EN);
10334     } else {
10335         val &= ~IGU_PF_CONF_MSI_MSIX_EN;
10336         val |= (IGU_PF_CONF_INT_LINE_EN |
10337                 IGU_PF_CONF_ATTN_BIT_EN |
10338                 IGU_PF_CONF_SINGLE_ISR_EN);
10339     }
10340 
10341     /* clean previous status - need to configure igu prior to ack*/
10342     if ((!msix) || single_msix) {
10343         REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10344         bxe_ack_int(sc);
10345     }
10346 
10347     val |= IGU_PF_CONF_FUNC_EN;
10348 
10349     BLOGD(sc, DBG_INTR, "write 0x%x to IGU mode %s\n",
10350           val, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx")));
10351 
10352     REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10353 
10354     mb();
10355 
10356     /* init leading/trailing edge */
10357     if (IS_MF(sc)) {
10358         val = (0xee0f | (1 << (SC_VN(sc) + 4)));
10359         if (sc->port.pmf) {
10360             /* enable nig and gpio3 attention */
10361             val |= 0x1100;
10362         }
10363     } else {
10364         val = 0xffff;
10365     }
10366 
10367     REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val);
10368     REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val);
10369 
10370     /* make sure that interrupts are indeed enabled from here on */
10371     mb();
10372 }
10373 
10374 static void
10375 bxe_int_enable(struct bxe_softc *sc)
10376 {
10377     if (sc->devinfo.int_block == INT_BLOCK_HC) {
10378         bxe_hc_int_enable(sc);
10379     } else {
10380         bxe_igu_int_enable(sc);
10381     }
10382 }
10383 
10384 static void
10385 bxe_hc_int_disable(struct bxe_softc *sc)
10386 {
10387     int port = SC_PORT(sc);
10388     uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
10389     uint32_t val = REG_RD(sc, addr);
10390 
10391     /*
10392      * In E1 we must use only PCI configuration space to disable MSI/MSIX
10393      * capablility. It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC
10394      * block
10395      */
10396     if (CHIP_IS_E1(sc)) {
10397         /*
10398          * Since IGU_PF_CONF_MSI_MSIX_EN still always on use mask register
10399          * to prevent from HC sending interrupts after we exit the function
10400          */
10401         REG_WR(sc, (HC_REG_INT_MASK + port*4), 0);
10402 
10403         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10404                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
10405                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10406     } else {
10407         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10408                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10409                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
10410                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10411     }
10412 
10413     BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", val, port, addr);
10414 
10415     /* flush all outstanding writes */
10416     mb();
10417 
10418     REG_WR(sc, addr, val);
10419     if (REG_RD(sc, addr) != val) {
10420         BLOGE(sc, "proper val not read from HC IGU!\n");
10421     }
10422 }
10423 
10424 static void
10425 bxe_igu_int_disable(struct bxe_softc *sc)
10426 {
10427     uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
10428 
10429     val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
10430              IGU_PF_CONF_INT_LINE_EN |
10431              IGU_PF_CONF_ATTN_BIT_EN);
10432 
10433     BLOGD(sc, DBG_INTR, "write %x to IGU\n", val);
10434 
10435     /* flush all outstanding writes */
10436     mb();
10437 
10438     REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10439     if (REG_RD(sc, IGU_REG_PF_CONFIGURATION) != val) {
10440         BLOGE(sc, "proper val not read from IGU!\n");
10441     }
10442 }
10443 
10444 static void
10445 bxe_int_disable(struct bxe_softc *sc)
10446 {
10447     if (sc->devinfo.int_block == INT_BLOCK_HC) {
10448         bxe_hc_int_disable(sc);
10449     } else {
10450         bxe_igu_int_disable(sc);
10451     }
10452 }
10453 
10454 static void
10455 bxe_nic_init(struct bxe_softc *sc,
10456              int              load_code)
10457 {
10458     int i;
10459 
10460     for (i = 0; i < sc->num_queues; i++) {
10461         bxe_init_eth_fp(sc, i);
10462     }
10463 
10464     rmb(); /* ensure status block indices were read */
10465 
10466     bxe_init_rx_rings(sc);
10467     bxe_init_tx_rings(sc);
10468 
10469     if (IS_VF(sc)) {
10470         return;
10471     }
10472 
10473     /* initialize MOD_ABS interrupts */
10474     elink_init_mod_abs_int(sc, &sc->link_vars,
10475                            sc->devinfo.chip_id,
10476                            sc->devinfo.shmem_base,
10477                            sc->devinfo.shmem2_base,
10478                            SC_PORT(sc));
10479 
10480     bxe_init_def_sb(sc);
10481     bxe_update_dsb_idx(sc);
10482     bxe_init_sp_ring(sc);
10483     bxe_init_eq_ring(sc);
10484     bxe_init_internal(sc, load_code);
10485     bxe_pf_init(sc);
10486     bxe_stats_init(sc);
10487 
10488     /* flush all before enabling interrupts */
10489     mb();
10490 
10491     bxe_int_enable(sc);
10492 
10493     /* check for SPIO5 */
10494     bxe_attn_int_deasserted0(sc,
10495                              REG_RD(sc,
10496                                     (MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
10497                                      SC_PORT(sc)*4)) &
10498                              AEU_INPUTS_ATTN_BITS_SPIO5);
10499 }
10500 
10501 static inline void
10502 bxe_init_objs(struct bxe_softc *sc)
10503 {
10504     /* mcast rules must be added to tx if tx switching is enabled */
10505     ecore_obj_type o_type =
10506         (sc->flags & BXE_TX_SWITCHING) ? ECORE_OBJ_TYPE_RX_TX :
10507                                          ECORE_OBJ_TYPE_RX;
10508 
10509     /* RX_MODE controlling object */
10510     ecore_init_rx_mode_obj(sc, &sc->rx_mode_obj);
10511 
10512     /* multicast configuration controlling object */
10513     ecore_init_mcast_obj(sc,
10514                          &sc->mcast_obj,
10515                          sc->fp[0].cl_id,
10516                          sc->fp[0].index,
10517                          SC_FUNC(sc),
10518                          SC_FUNC(sc),
10519                          BXE_SP(sc, mcast_rdata),
10520                          BXE_SP_MAPPING(sc, mcast_rdata),
10521                          ECORE_FILTER_MCAST_PENDING,
10522                          &sc->sp_state,
10523                          o_type);
10524 
10525     /* Setup CAM credit pools */
10526     ecore_init_mac_credit_pool(sc,
10527                                &sc->macs_pool,
10528                                SC_FUNC(sc),
10529                                CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) :
10530                                                  VNICS_PER_PATH(sc));
10531 
10532     ecore_init_vlan_credit_pool(sc,
10533                                 &sc->vlans_pool,
10534                                 SC_ABS_FUNC(sc) >> 1,
10535                                 CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) :
10536                                                   VNICS_PER_PATH(sc));
10537 
10538     /* RSS configuration object */
10539     ecore_init_rss_config_obj(sc,
10540                               &sc->rss_conf_obj,
10541                               sc->fp[0].cl_id,
10542                               sc->fp[0].index,
10543                               SC_FUNC(sc),
10544                               SC_FUNC(sc),
10545                               BXE_SP(sc, rss_rdata),
10546                               BXE_SP_MAPPING(sc, rss_rdata),
10547                               ECORE_FILTER_RSS_CONF_PENDING,
10548                               &sc->sp_state, ECORE_OBJ_TYPE_RX);
10549 }
10550 
10551 /*
10552  * Initialize the function. This must be called before sending CLIENT_SETUP
10553  * for the first client.
10554  */
10555 static inline int
10556 bxe_func_start(struct bxe_softc *sc)
10557 {
10558     struct ecore_func_state_params func_params = { NULL };
10559     struct ecore_func_start_params *start_params = &func_params.params.start;
10560 
10561     /* Prepare parameters for function state transitions */
10562     bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT);
10563 
10564     func_params.f_obj = &sc->func_obj;
10565     func_params.cmd = ECORE_F_CMD_START;
10566 
10567     /* Function parameters */
10568     start_params->mf_mode     = sc->devinfo.mf_info.mf_mode;
10569     start_params->sd_vlan_tag = OVLAN(sc);
10570 
10571     if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) {
10572         start_params->network_cos_mode = STATIC_COS;
10573     } else { /* CHIP_IS_E1X */
10574         start_params->network_cos_mode = FW_WRR;
10575     }
10576 
10577     //start_params->gre_tunnel_mode = 0;
10578     //start_params->gre_tunnel_rss  = 0;
10579 
10580     return (ecore_func_state_change(sc, &func_params));
10581 }
10582 
10583 static int
10584 bxe_set_power_state(struct bxe_softc *sc,
10585                     uint8_t          state)
10586 {
10587     uint16_t pmcsr;
10588 
10589     /* If there is no power capability, silently succeed */
10590     if (!(sc->devinfo.pcie_cap_flags & BXE_PM_CAPABLE_FLAG)) {
10591         BLOGW(sc, "No power capability\n");
10592         return (0);
10593     }
10594 
10595     pmcsr = pci_read_config(sc->dev,
10596                             (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10597                             2);
10598 
10599     switch (state) {
10600     case PCI_PM_D0:
10601         pci_write_config(sc->dev,
10602                          (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10603                          ((pmcsr & ~PCIM_PSTAT_DMASK) | PCIM_PSTAT_PME), 2);
10604 
10605         if (pmcsr & PCIM_PSTAT_DMASK) {
10606             /* delay required during transition out of D3hot */
10607             DELAY(20000);
10608         }
10609 
10610         break;
10611 
10612     case PCI_PM_D3hot:
10613         /* XXX if there are other clients above don't shut down the power */
10614 
10615         /* don't shut down the power for emulation and FPGA */
10616         if (CHIP_REV_IS_SLOW(sc)) {
10617             return (0);
10618         }
10619 
10620         pmcsr &= ~PCIM_PSTAT_DMASK;
10621         pmcsr |= PCIM_PSTAT_D3;
10622 
10623         if (sc->wol) {
10624             pmcsr |= PCIM_PSTAT_PMEENABLE;
10625         }
10626 
10627         pci_write_config(sc->dev,
10628                          (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10629                          pmcsr, 4);
10630 
10631         /*
10632          * No more memory access after this point until device is brought back
10633          * to D0 state.
10634          */
10635         break;
10636 
10637     default:
10638         BLOGE(sc, "Can't support PCI power state = 0x%x pmcsr 0x%x\n",
10639             state, pmcsr);
10640         return (-1);
10641     }
10642 
10643     return (0);
10644 }
10645 
10646 
10647 /* return true if succeeded to acquire the lock */
10648 static uint8_t
10649 bxe_trylock_hw_lock(struct bxe_softc *sc,
10650                     uint32_t         resource)
10651 {
10652     uint32_t lock_status;
10653     uint32_t resource_bit = (1 << resource);
10654     int func = SC_FUNC(sc);
10655     uint32_t hw_lock_control_reg;
10656 
10657     BLOGD(sc, DBG_LOAD, "Trying to take a resource lock 0x%x\n", resource);
10658 
10659     /* Validating that the resource is within range */
10660     if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
10661         BLOGD(sc, DBG_LOAD,
10662               "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
10663               resource, HW_LOCK_MAX_RESOURCE_VALUE);
10664         return (FALSE);
10665     }
10666 
10667     if (func <= 5) {
10668         hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
10669     } else {
10670         hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
10671     }
10672 
10673     /* try to acquire the lock */
10674     REG_WR(sc, hw_lock_control_reg + 4, resource_bit);
10675     lock_status = REG_RD(sc, hw_lock_control_reg);
10676     if (lock_status & resource_bit) {
10677         return (TRUE);
10678     }
10679 
10680     BLOGE(sc, "Failed to get a resource lock 0x%x func %d "
10681         "lock_status 0x%x resource_bit 0x%x\n", resource, func,
10682         lock_status, resource_bit);
10683 
10684     return (FALSE);
10685 }
10686 
10687 /*
10688  * Get the recovery leader resource id according to the engine this function
10689  * belongs to. Currently only only 2 engines is supported.
10690  */
10691 static int
10692 bxe_get_leader_lock_resource(struct bxe_softc *sc)
10693 {
10694     if (SC_PATH(sc)) {
10695         return (HW_LOCK_RESOURCE_RECOVERY_LEADER_1);
10696     } else {
10697         return (HW_LOCK_RESOURCE_RECOVERY_LEADER_0);
10698     }
10699 }
10700 
10701 /* try to acquire a leader lock for current engine */
10702 static uint8_t
10703 bxe_trylock_leader_lock(struct bxe_softc *sc)
10704 {
10705     return (bxe_trylock_hw_lock(sc, bxe_get_leader_lock_resource(sc)));
10706 }
10707 
10708 static int
10709 bxe_release_leader_lock(struct bxe_softc *sc)
10710 {
10711     return (bxe_release_hw_lock(sc, bxe_get_leader_lock_resource(sc)));
10712 }
10713 
10714 /* close gates #2, #3 and #4 */
10715 static void
10716 bxe_set_234_gates(struct bxe_softc *sc,
10717                   uint8_t          close)
10718 {
10719     uint32_t val;
10720 
10721     /* gates #2 and #4a are closed/opened for "not E1" only */
10722     if (!CHIP_IS_E1(sc)) {
10723         /* #4 */
10724         REG_WR(sc, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
10725         /* #2 */
10726         REG_WR(sc, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
10727     }
10728 
10729     /* #3 */
10730     if (CHIP_IS_E1x(sc)) {
10731         /* prevent interrupts from HC on both ports */
10732         val = REG_RD(sc, HC_REG_CONFIG_1);
10733         REG_WR(sc, HC_REG_CONFIG_1,
10734                (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
10735                (val & ~(uint32_t)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
10736 
10737         val = REG_RD(sc, HC_REG_CONFIG_0);
10738         REG_WR(sc, HC_REG_CONFIG_0,
10739                (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
10740                (val & ~(uint32_t)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
10741     } else {
10742         /* Prevent incoming interrupts in IGU */
10743         val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
10744 
10745         REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION,
10746                (!close) ?
10747                (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
10748                (val & ~(uint32_t)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
10749     }
10750 
10751     BLOGD(sc, DBG_LOAD, "%s gates #2, #3 and #4\n",
10752           close ? "closing" : "opening");
10753 
10754     wmb();
10755 }
10756 
10757 /* poll for pending writes bit, it should get cleared in no more than 1s */
10758 static int
10759 bxe_er_poll_igu_vq(struct bxe_softc *sc)
10760 {
10761     uint32_t cnt = 1000;
10762     uint32_t pend_bits = 0;
10763 
10764     do {
10765         pend_bits = REG_RD(sc, IGU_REG_PENDING_BITS_STATUS);
10766 
10767         if (pend_bits == 0) {
10768             break;
10769         }
10770 
10771         DELAY(1000);
10772     } while (--cnt > 0);
10773 
10774     if (cnt == 0) {
10775         BLOGE(sc, "Still pending IGU requests bits=0x%08x!\n", pend_bits);
10776         return (-1);
10777     }
10778 
10779     return (0);
10780 }
10781 
10782 #define SHARED_MF_CLP_MAGIC  0x80000000 /* 'magic' bit */
10783 
10784 static void
10785 bxe_clp_reset_prep(struct bxe_softc *sc,
10786                    uint32_t         *magic_val)
10787 {
10788     /* Do some magic... */
10789     uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb);
10790     *magic_val = val & SHARED_MF_CLP_MAGIC;
10791     MFCFG_WR(sc, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
10792 }
10793 
10794 /* restore the value of the 'magic' bit */
10795 static void
10796 bxe_clp_reset_done(struct bxe_softc *sc,
10797                    uint32_t         magic_val)
10798 {
10799     /* Restore the 'magic' bit value... */
10800     uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb);
10801     MFCFG_WR(sc, shared_mf_config.clp_mb,
10802               (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
10803 }
10804 
10805 /* prepare for MCP reset, takes care of CLP configurations */
10806 static void
10807 bxe_reset_mcp_prep(struct bxe_softc *sc,
10808                    uint32_t         *magic_val)
10809 {
10810     uint32_t shmem;
10811     uint32_t validity_offset;
10812 
10813     /* set `magic' bit in order to save MF config */
10814     if (!CHIP_IS_E1(sc)) {
10815         bxe_clp_reset_prep(sc, magic_val);
10816     }
10817 
10818     /* get shmem offset */
10819     shmem = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
10820     validity_offset =
10821         offsetof(struct shmem_region, validity_map[SC_PORT(sc)]);
10822 
10823     /* Clear validity map flags */
10824     if (shmem > 0) {
10825         REG_WR(sc, shmem + validity_offset, 0);
10826     }
10827 }
10828 
10829 #define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
10830 #define MCP_ONE_TIMEOUT  100    /* 100 ms */
10831 
10832 static void
10833 bxe_mcp_wait_one(struct bxe_softc *sc)
10834 {
10835     /* special handling for emulation and FPGA (10 times longer) */
10836     if (CHIP_REV_IS_SLOW(sc)) {
10837         DELAY((MCP_ONE_TIMEOUT*10) * 1000);
10838     } else {
10839         DELAY((MCP_ONE_TIMEOUT) * 1000);
10840     }
10841 }
10842 
10843 /* initialize shmem_base and waits for validity signature to appear */
10844 static int
10845 bxe_init_shmem(struct bxe_softc *sc)
10846 {
10847     int cnt = 0;
10848     uint32_t val = 0;
10849 
10850     do {
10851         sc->devinfo.shmem_base     =
10852         sc->link_params.shmem_base =
10853             REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
10854 
10855         if (sc->devinfo.shmem_base) {
10856             val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]);
10857             if (val & SHR_MEM_VALIDITY_MB)
10858                 return (0);
10859         }
10860 
10861         bxe_mcp_wait_one(sc);
10862 
10863     } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
10864 
10865     BLOGE(sc, "BAD MCP validity signature\n");
10866 
10867     return (-1);
10868 }
10869 
10870 static int
10871 bxe_reset_mcp_comp(struct bxe_softc *sc,
10872                    uint32_t         magic_val)
10873 {
10874     int rc = bxe_init_shmem(sc);
10875 
10876     /* Restore the `magic' bit value */
10877     if (!CHIP_IS_E1(sc)) {
10878         bxe_clp_reset_done(sc, magic_val);
10879     }
10880 
10881     return (rc);
10882 }
10883 
10884 static void
10885 bxe_pxp_prep(struct bxe_softc *sc)
10886 {
10887     if (!CHIP_IS_E1(sc)) {
10888         REG_WR(sc, PXP2_REG_RD_START_INIT, 0);
10889         REG_WR(sc, PXP2_REG_RQ_RBC_DONE, 0);
10890         wmb();
10891     }
10892 }
10893 
10894 /*
10895  * Reset the whole chip except for:
10896  *      - PCIE core
10897  *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by one reset bit)
10898  *      - IGU
10899  *      - MISC (including AEU)
10900  *      - GRC
10901  *      - RBCN, RBCP
10902  */
10903 static void
10904 bxe_process_kill_chip_reset(struct bxe_softc *sc,
10905                             uint8_t          global)
10906 {
10907     uint32_t not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
10908     uint32_t global_bits2, stay_reset2;
10909 
10910     /*
10911      * Bits that have to be set in reset_mask2 if we want to reset 'global'
10912      * (per chip) blocks.
10913      */
10914     global_bits2 =
10915         MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
10916         MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
10917 
10918     /*
10919      * Don't reset the following blocks.
10920      * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be
10921      *            reset, as in 4 port device they might still be owned
10922      *            by the MCP (there is only one leader per path).
10923      */
10924     not_reset_mask1 =
10925         MISC_REGISTERS_RESET_REG_1_RST_HC |
10926         MISC_REGISTERS_RESET_REG_1_RST_PXPV |
10927         MISC_REGISTERS_RESET_REG_1_RST_PXP;
10928 
10929     not_reset_mask2 =
10930         MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
10931         MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
10932         MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
10933         MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
10934         MISC_REGISTERS_RESET_REG_2_RST_RBCN |
10935         MISC_REGISTERS_RESET_REG_2_RST_GRC  |
10936         MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
10937         MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
10938         MISC_REGISTERS_RESET_REG_2_RST_ATC |
10939         MISC_REGISTERS_RESET_REG_2_PGLC |
10940         MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
10941         MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
10942         MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
10943         MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
10944         MISC_REGISTERS_RESET_REG_2_UMAC0 |
10945         MISC_REGISTERS_RESET_REG_2_UMAC1;
10946 
10947     /*
10948      * Keep the following blocks in reset:
10949      *  - all xxMACs are handled by the elink code.
10950      */
10951     stay_reset2 =
10952         MISC_REGISTERS_RESET_REG_2_XMAC |
10953         MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
10954 
10955     /* Full reset masks according to the chip */
10956     reset_mask1 = 0xffffffff;
10957 
10958     if (CHIP_IS_E1(sc))
10959         reset_mask2 = 0xffff;
10960     else if (CHIP_IS_E1H(sc))
10961         reset_mask2 = 0x1ffff;
10962     else if (CHIP_IS_E2(sc))
10963         reset_mask2 = 0xfffff;
10964     else /* CHIP_IS_E3 */
10965         reset_mask2 = 0x3ffffff;
10966 
10967     /* Don't reset global blocks unless we need to */
10968     if (!global)
10969         reset_mask2 &= ~global_bits2;
10970 
10971     /*
10972      * In case of attention in the QM, we need to reset PXP
10973      * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM
10974      * because otherwise QM reset would release 'close the gates' shortly
10975      * before resetting the PXP, then the PSWRQ would send a write
10976      * request to PGLUE. Then when PXP is reset, PGLUE would try to
10977      * read the payload data from PSWWR, but PSWWR would not
10978      * respond. The write queue in PGLUE would stuck, dmae commands
10979      * would not return. Therefore it's important to reset the second
10980      * reset register (containing the
10981      * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the
10982      * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM
10983      * bit).
10984      */
10985     REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
10986            reset_mask2 & (~not_reset_mask2));
10987 
10988     REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
10989            reset_mask1 & (~not_reset_mask1));
10990 
10991     mb();
10992     wmb();
10993 
10994     REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
10995            reset_mask2 & (~stay_reset2));
10996 
10997     mb();
10998     wmb();
10999 
11000     REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
11001     wmb();
11002 }
11003 
11004 static int
11005 bxe_process_kill(struct bxe_softc *sc,
11006                  uint8_t          global)
11007 {
11008     int cnt = 1000;
11009     uint32_t val = 0;
11010     uint32_t sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
11011     uint32_t tags_63_32 = 0;
11012 
11013     /* Empty the Tetris buffer, wait for 1s */
11014     do {
11015         sr_cnt  = REG_RD(sc, PXP2_REG_RD_SR_CNT);
11016         blk_cnt = REG_RD(sc, PXP2_REG_RD_BLK_CNT);
11017         port_is_idle_0 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_0);
11018         port_is_idle_1 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_1);
11019         pgl_exp_rom2 = REG_RD(sc, PXP2_REG_PGL_EXP_ROM2);
11020         if (CHIP_IS_E3(sc)) {
11021             tags_63_32 = REG_RD(sc, PGLUE_B_REG_TAGS_63_32);
11022         }
11023 
11024         if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
11025             ((port_is_idle_0 & 0x1) == 0x1) &&
11026             ((port_is_idle_1 & 0x1) == 0x1) &&
11027             (pgl_exp_rom2 == 0xffffffff) &&
11028             (!CHIP_IS_E3(sc) || (tags_63_32 == 0xffffffff)))
11029             break;
11030         DELAY(1000);
11031     } while (cnt-- > 0);
11032 
11033     if (cnt <= 0) {
11034         BLOGE(sc, "ERROR: Tetris buffer didn't get empty or there "
11035                   "are still outstanding read requests after 1s! "
11036                   "sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, "
11037                   "port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
11038               sr_cnt, blk_cnt, port_is_idle_0,
11039               port_is_idle_1, pgl_exp_rom2);
11040         return (-1);
11041     }
11042 
11043     mb();
11044 
11045     /* Close gates #2, #3 and #4 */
11046     bxe_set_234_gates(sc, TRUE);
11047 
11048     /* Poll for IGU VQs for 57712 and newer chips */
11049     if (!CHIP_IS_E1x(sc) && bxe_er_poll_igu_vq(sc)) {
11050         return (-1);
11051     }
11052 
11053     /* XXX indicate that "process kill" is in progress to MCP */
11054 
11055     /* clear "unprepared" bit */
11056     REG_WR(sc, MISC_REG_UNPREPARED, 0);
11057     mb();
11058 
11059     /* Make sure all is written to the chip before the reset */
11060     wmb();
11061 
11062     /*
11063      * Wait for 1ms to empty GLUE and PCI-E core queues,
11064      * PSWHST, GRC and PSWRD Tetris buffer.
11065      */
11066     DELAY(1000);
11067 
11068     /* Prepare to chip reset: */
11069     /* MCP */
11070     if (global) {
11071         bxe_reset_mcp_prep(sc, &val);
11072     }
11073 
11074     /* PXP */
11075     bxe_pxp_prep(sc);
11076     mb();
11077 
11078     /* reset the chip */
11079     bxe_process_kill_chip_reset(sc, global);
11080     mb();
11081 
11082     /* clear errors in PGB */
11083     if (!CHIP_IS_E1(sc))
11084         REG_WR(sc, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f);
11085 
11086     /* Recover after reset: */
11087     /* MCP */
11088     if (global && bxe_reset_mcp_comp(sc, val)) {
11089         return (-1);
11090     }
11091 
11092     /* XXX add resetting the NO_MCP mode DB here */
11093 
11094     /* Open the gates #2, #3 and #4 */
11095     bxe_set_234_gates(sc, FALSE);
11096 
11097     /* XXX
11098      * IGU/AEU preparation bring back the AEU/IGU to a reset state
11099      * re-enable attentions
11100      */
11101 
11102     return (0);
11103 }
11104 
11105 static int
11106 bxe_leader_reset(struct bxe_softc *sc)
11107 {
11108     int rc = 0;
11109     uint8_t global = bxe_reset_is_global(sc);
11110     uint32_t load_code;
11111 
11112     /*
11113      * If not going to reset MCP, load "fake" driver to reset HW while
11114      * driver is owner of the HW.
11115      */
11116     if (!global && !BXE_NOMCP(sc)) {
11117         load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ,
11118                                    DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
11119         if (!load_code) {
11120             BLOGE(sc, "MCP response failure, aborting\n");
11121             rc = -1;
11122             goto exit_leader_reset;
11123         }
11124 
11125         if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
11126             (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
11127             BLOGE(sc, "MCP unexpected response, aborting\n");
11128             rc = -1;
11129             goto exit_leader_reset2;
11130         }
11131 
11132         load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
11133         if (!load_code) {
11134             BLOGE(sc, "MCP response failure, aborting\n");
11135             rc = -1;
11136             goto exit_leader_reset2;
11137         }
11138     }
11139 
11140     /* try to recover after the failure */
11141     if (bxe_process_kill(sc, global)) {
11142         BLOGE(sc, "Something bad occurred on engine %d!\n", SC_PATH(sc));
11143         rc = -1;
11144         goto exit_leader_reset2;
11145     }
11146 
11147     /*
11148      * Clear the RESET_IN_PROGRESS and RESET_GLOBAL bits and update the driver
11149      * state.
11150      */
11151     bxe_set_reset_done(sc);
11152     if (global) {
11153         bxe_clear_reset_global(sc);
11154     }
11155 
11156 exit_leader_reset2:
11157 
11158     /* unload "fake driver" if it was loaded */
11159     if (!global && !BXE_NOMCP(sc)) {
11160         bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
11161         bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0);
11162     }
11163 
11164 exit_leader_reset:
11165 
11166     sc->is_leader = 0;
11167     bxe_release_leader_lock(sc);
11168 
11169     mb();
11170     return (rc);
11171 }
11172 
11173 /*
11174  * prepare INIT transition, parameters configured:
11175  *   - HC configuration
11176  *   - Queue's CDU context
11177  */
11178 static void
11179 bxe_pf_q_prep_init(struct bxe_softc               *sc,
11180                    struct bxe_fastpath            *fp,
11181                    struct ecore_queue_init_params *init_params)
11182 {
11183     uint8_t cos;
11184     int cxt_index, cxt_offset;
11185 
11186     bxe_set_bit(ECORE_Q_FLG_HC, &init_params->rx.flags);
11187     bxe_set_bit(ECORE_Q_FLG_HC, &init_params->tx.flags);
11188 
11189     bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->rx.flags);
11190     bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->tx.flags);
11191 
11192     /* HC rate */
11193     init_params->rx.hc_rate =
11194         sc->hc_rx_ticks ? (1000000 / sc->hc_rx_ticks) : 0;
11195     init_params->tx.hc_rate =
11196         sc->hc_tx_ticks ? (1000000 / sc->hc_tx_ticks) : 0;
11197 
11198     /* FW SB ID */
11199     init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = fp->fw_sb_id;
11200 
11201     /* CQ index among the SB indices */
11202     init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
11203     init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
11204 
11205     /* set maximum number of COSs supported by this queue */
11206     init_params->max_cos = sc->max_cos;
11207 
11208     BLOGD(sc, DBG_LOAD, "fp %d setting queue params max cos to %d\n",
11209           fp->index, init_params->max_cos);
11210 
11211     /* set the context pointers queue object */
11212     for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
11213         /* XXX change index/cid here if ever support multiple tx CoS */
11214         /* fp->txdata[cos]->cid */
11215         cxt_index = fp->index / ILT_PAGE_CIDS;
11216         cxt_offset = fp->index - (cxt_index * ILT_PAGE_CIDS);
11217         init_params->cxts[cos] = &sc->context[cxt_index].vcxt[cxt_offset].eth;
11218     }
11219 }
11220 
11221 /* set flags that are common for the Tx-only and not normal connections */
11222 static unsigned long
11223 bxe_get_common_flags(struct bxe_softc    *sc,
11224                      struct bxe_fastpath *fp,
11225                      uint8_t             zero_stats)
11226 {
11227     unsigned long flags = 0;
11228 
11229     /* PF driver will always initialize the Queue to an ACTIVE state */
11230     bxe_set_bit(ECORE_Q_FLG_ACTIVE, &flags);
11231 
11232     /*
11233      * tx only connections collect statistics (on the same index as the
11234      * parent connection). The statistics are zeroed when the parent
11235      * connection is initialized.
11236      */
11237 
11238     bxe_set_bit(ECORE_Q_FLG_STATS, &flags);
11239     if (zero_stats) {
11240         bxe_set_bit(ECORE_Q_FLG_ZERO_STATS, &flags);
11241     }
11242 
11243     /*
11244      * tx only connections can support tx-switching, though their
11245      * CoS-ness doesn't survive the loopback
11246      */
11247     if (sc->flags & BXE_TX_SWITCHING) {
11248         bxe_set_bit(ECORE_Q_FLG_TX_SWITCH, &flags);
11249     }
11250 
11251     bxe_set_bit(ECORE_Q_FLG_PCSUM_ON_PKT, &flags);
11252 
11253     return (flags);
11254 }
11255 
11256 static unsigned long
11257 bxe_get_q_flags(struct bxe_softc    *sc,
11258                 struct bxe_fastpath *fp,
11259                 uint8_t             leading)
11260 {
11261     unsigned long flags = 0;
11262 
11263     if (IS_MF_SD(sc)) {
11264         bxe_set_bit(ECORE_Q_FLG_OV, &flags);
11265     }
11266 
11267     if (if_getcapenable(sc->ifp) & IFCAP_LRO) {
11268         bxe_set_bit(ECORE_Q_FLG_TPA, &flags);
11269         bxe_set_bit(ECORE_Q_FLG_TPA_IPV6, &flags);
11270     }
11271 
11272     if (leading) {
11273         bxe_set_bit(ECORE_Q_FLG_LEADING_RSS, &flags);
11274         bxe_set_bit(ECORE_Q_FLG_MCAST, &flags);
11275     }
11276 
11277     bxe_set_bit(ECORE_Q_FLG_VLAN, &flags);
11278 
11279     /* merge with common flags */
11280     return (flags | bxe_get_common_flags(sc, fp, TRUE));
11281 }
11282 
11283 static void
11284 bxe_pf_q_prep_general(struct bxe_softc                  *sc,
11285                       struct bxe_fastpath               *fp,
11286                       struct ecore_general_setup_params *gen_init,
11287                       uint8_t                           cos)
11288 {
11289     gen_init->stat_id = bxe_stats_id(fp);
11290     gen_init->spcl_id = fp->cl_id;
11291     gen_init->mtu = sc->mtu;
11292     gen_init->cos = cos;
11293 }
11294 
11295 static void
11296 bxe_pf_rx_q_prep(struct bxe_softc              *sc,
11297                  struct bxe_fastpath           *fp,
11298                  struct rxq_pause_params       *pause,
11299                  struct ecore_rxq_setup_params *rxq_init)
11300 {
11301     uint8_t max_sge = 0;
11302     uint16_t sge_sz = 0;
11303     uint16_t tpa_agg_size = 0;
11304 
11305     pause->sge_th_lo = SGE_TH_LO(sc);
11306     pause->sge_th_hi = SGE_TH_HI(sc);
11307 
11308     /* validate SGE ring has enough to cross high threshold */
11309     if (sc->dropless_fc &&
11310             (pause->sge_th_hi + FW_PREFETCH_CNT) >
11311             (RX_SGE_USABLE_PER_PAGE * RX_SGE_NUM_PAGES)) {
11312         BLOGW(sc, "sge ring threshold limit\n");
11313     }
11314 
11315     /* minimum max_aggregation_size is 2*MTU (two full buffers) */
11316     tpa_agg_size = (2 * sc->mtu);
11317     if (tpa_agg_size < sc->max_aggregation_size) {
11318         tpa_agg_size = sc->max_aggregation_size;
11319     }
11320 
11321     max_sge = SGE_PAGE_ALIGN(sc->mtu) >> SGE_PAGE_SHIFT;
11322     max_sge = ((max_sge + PAGES_PER_SGE - 1) &
11323                    (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
11324     sge_sz = (uint16_t)min(SGE_PAGES, 0xffff);
11325 
11326     /* pause - not for e1 */
11327     if (!CHIP_IS_E1(sc)) {
11328         pause->bd_th_lo = BD_TH_LO(sc);
11329         pause->bd_th_hi = BD_TH_HI(sc);
11330 
11331         pause->rcq_th_lo = RCQ_TH_LO(sc);
11332         pause->rcq_th_hi = RCQ_TH_HI(sc);
11333 
11334         /* validate rings have enough entries to cross high thresholds */
11335         if (sc->dropless_fc &&
11336             pause->bd_th_hi + FW_PREFETCH_CNT >
11337             sc->rx_ring_size) {
11338             BLOGW(sc, "rx bd ring threshold limit\n");
11339         }
11340 
11341         if (sc->dropless_fc &&
11342             pause->rcq_th_hi + FW_PREFETCH_CNT >
11343             RCQ_NUM_PAGES * RCQ_USABLE_PER_PAGE) {
11344             BLOGW(sc, "rcq ring threshold limit\n");
11345         }
11346 
11347         pause->pri_map = 1;
11348     }
11349 
11350     /* rxq setup */
11351     rxq_init->dscr_map   = fp->rx_dma.paddr;
11352     rxq_init->sge_map    = fp->rx_sge_dma.paddr;
11353     rxq_init->rcq_map    = fp->rcq_dma.paddr;
11354     rxq_init->rcq_np_map = (fp->rcq_dma.paddr + BCM_PAGE_SIZE);
11355 
11356     /*
11357      * This should be a maximum number of data bytes that may be
11358      * placed on the BD (not including paddings).
11359      */
11360     rxq_init->buf_sz = (fp->rx_buf_size -
11361                         IP_HEADER_ALIGNMENT_PADDING);
11362 
11363     rxq_init->cl_qzone_id     = fp->cl_qzone_id;
11364     rxq_init->tpa_agg_sz      = tpa_agg_size;
11365     rxq_init->sge_buf_sz      = sge_sz;
11366     rxq_init->max_sges_pkt    = max_sge;
11367     rxq_init->rss_engine_id   = SC_FUNC(sc);
11368     rxq_init->mcast_engine_id = SC_FUNC(sc);
11369 
11370     /*
11371      * Maximum number or simultaneous TPA aggregation for this Queue.
11372      * For PF Clients it should be the maximum available number.
11373      * VF driver(s) may want to define it to a smaller value.
11374      */
11375     rxq_init->max_tpa_queues = MAX_AGG_QS(sc);
11376 
11377     rxq_init->cache_line_log = BXE_RX_ALIGN_SHIFT;
11378     rxq_init->fw_sb_id = fp->fw_sb_id;
11379 
11380     rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
11381 
11382     /*
11383      * configure silent vlan removal
11384      * if multi function mode is afex, then mask default vlan
11385      */
11386     if (IS_MF_AFEX(sc)) {
11387         rxq_init->silent_removal_value =
11388             sc->devinfo.mf_info.afex_def_vlan_tag;
11389         rxq_init->silent_removal_mask = EVL_VLID_MASK;
11390     }
11391 }
11392 
11393 static void
11394 bxe_pf_tx_q_prep(struct bxe_softc              *sc,
11395                  struct bxe_fastpath           *fp,
11396                  struct ecore_txq_setup_params *txq_init,
11397                  uint8_t                       cos)
11398 {
11399     /*
11400      * XXX If multiple CoS is ever supported then each fastpath structure
11401      * will need to maintain tx producer/consumer/dma/etc values *per* CoS.
11402      * fp->txdata[cos]->tx_dma.paddr;
11403      */
11404     txq_init->dscr_map     = fp->tx_dma.paddr;
11405     txq_init->sb_cq_index  = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
11406     txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
11407     txq_init->fw_sb_id     = fp->fw_sb_id;
11408 
11409     /*
11410      * set the TSS leading client id for TX classfication to the
11411      * leading RSS client id
11412      */
11413     txq_init->tss_leading_cl_id = BXE_FP(sc, 0, cl_id);
11414 }
11415 
11416 /*
11417  * This function performs 2 steps in a queue state machine:
11418  *   1) RESET->INIT
11419  *   2) INIT->SETUP
11420  */
11421 static int
11422 bxe_setup_queue(struct bxe_softc    *sc,
11423                 struct bxe_fastpath *fp,
11424                 uint8_t             leading)
11425 {
11426     struct ecore_queue_state_params q_params = { NULL };
11427     struct ecore_queue_setup_params *setup_params =
11428                         &q_params.params.setup;
11429     int rc;
11430 
11431     BLOGD(sc, DBG_LOAD, "setting up queue %d\n", fp->index);
11432 
11433     bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
11434 
11435     q_params.q_obj = &BXE_SP_OBJ(sc, fp).q_obj;
11436 
11437     /* we want to wait for completion in this context */
11438     bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
11439 
11440     /* prepare the INIT parameters */
11441     bxe_pf_q_prep_init(sc, fp, &q_params.params.init);
11442 
11443     /* Set the command */
11444     q_params.cmd = ECORE_Q_CMD_INIT;
11445 
11446     /* Change the state to INIT */
11447     rc = ecore_queue_state_change(sc, &q_params);
11448     if (rc) {
11449         BLOGE(sc, "Queue(%d) INIT failed rc = %d\n", fp->index, rc);
11450         return (rc);
11451     }
11452 
11453     BLOGD(sc, DBG_LOAD, "init complete\n");
11454 
11455     /* now move the Queue to the SETUP state */
11456     memset(setup_params, 0, sizeof(*setup_params));
11457 
11458     /* set Queue flags */
11459     setup_params->flags = bxe_get_q_flags(sc, fp, leading);
11460 
11461     /* set general SETUP parameters */
11462     bxe_pf_q_prep_general(sc, fp, &setup_params->gen_params,
11463                           FIRST_TX_COS_INDEX);
11464 
11465     bxe_pf_rx_q_prep(sc, fp,
11466                      &setup_params->pause_params,
11467                      &setup_params->rxq_params);
11468 
11469     bxe_pf_tx_q_prep(sc, fp,
11470                      &setup_params->txq_params,
11471                      FIRST_TX_COS_INDEX);
11472 
11473     /* Set the command */
11474     q_params.cmd = ECORE_Q_CMD_SETUP;
11475 
11476     /* change the state to SETUP */
11477     rc = ecore_queue_state_change(sc, &q_params);
11478     if (rc) {
11479         BLOGE(sc, "Queue(%d) SETUP failed (rc = %d)\n", fp->index, rc);
11480         return (rc);
11481     }
11482 
11483     return (rc);
11484 }
11485 
11486 static int
11487 bxe_setup_leading(struct bxe_softc *sc)
11488 {
11489     return (bxe_setup_queue(sc, &sc->fp[0], TRUE));
11490 }
11491 
11492 static int
11493 bxe_config_rss_pf(struct bxe_softc            *sc,
11494                   struct ecore_rss_config_obj *rss_obj,
11495                   uint8_t                     config_hash)
11496 {
11497     struct ecore_config_rss_params params = { NULL };
11498     int i;
11499 
11500     /*
11501      * Although RSS is meaningless when there is a single HW queue we
11502      * still need it enabled in order to have HW Rx hash generated.
11503      */
11504 
11505     params.rss_obj = rss_obj;
11506 
11507     bxe_set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
11508 
11509     bxe_set_bit(ECORE_RSS_MODE_REGULAR, &params.rss_flags);
11510 
11511     /* RSS configuration */
11512     bxe_set_bit(ECORE_RSS_IPV4, &params.rss_flags);
11513     bxe_set_bit(ECORE_RSS_IPV4_TCP, &params.rss_flags);
11514     bxe_set_bit(ECORE_RSS_IPV6, &params.rss_flags);
11515     bxe_set_bit(ECORE_RSS_IPV6_TCP, &params.rss_flags);
11516     if (rss_obj->udp_rss_v4) {
11517         bxe_set_bit(ECORE_RSS_IPV4_UDP, &params.rss_flags);
11518     }
11519     if (rss_obj->udp_rss_v6) {
11520         bxe_set_bit(ECORE_RSS_IPV6_UDP, &params.rss_flags);
11521     }
11522 
11523     /* Hash bits */
11524     params.rss_result_mask = MULTI_MASK;
11525 
11526     memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
11527 
11528     if (config_hash) {
11529         /* RSS keys */
11530         for (i = 0; i < sizeof(params.rss_key) / 4; i++) {
11531             params.rss_key[i] = arc4random();
11532         }
11533 
11534         bxe_set_bit(ECORE_RSS_SET_SRCH, &params.rss_flags);
11535     }
11536 
11537     return (ecore_config_rss(sc, &params));
11538 }
11539 
11540 static int
11541 bxe_config_rss_eth(struct bxe_softc *sc,
11542                    uint8_t          config_hash)
11543 {
11544     return (bxe_config_rss_pf(sc, &sc->rss_conf_obj, config_hash));
11545 }
11546 
11547 static int
11548 bxe_init_rss_pf(struct bxe_softc *sc)
11549 {
11550     uint8_t num_eth_queues = BXE_NUM_ETH_QUEUES(sc);
11551     int i;
11552 
11553     /*
11554      * Prepare the initial contents of the indirection table if
11555      * RSS is enabled
11556      */
11557     for (i = 0; i < sizeof(sc->rss_conf_obj.ind_table); i++) {
11558         sc->rss_conf_obj.ind_table[i] =
11559             (sc->fp->cl_id + (i % num_eth_queues));
11560     }
11561 
11562     if (sc->udp_rss) {
11563         sc->rss_conf_obj.udp_rss_v4 = sc->rss_conf_obj.udp_rss_v6 = 1;
11564     }
11565 
11566     /*
11567      * For 57710 and 57711 SEARCHER configuration (rss_keys) is
11568      * per-port, so if explicit configuration is needed, do it only
11569      * for a PMF.
11570      *
11571      * For 57712 and newer it's a per-function configuration.
11572      */
11573     return (bxe_config_rss_eth(sc, sc->port.pmf || !CHIP_IS_E1x(sc)));
11574 }
11575 
11576 static int
11577 bxe_set_mac_one(struct bxe_softc          *sc,
11578                 uint8_t                   *mac,
11579                 struct ecore_vlan_mac_obj *obj,
11580                 uint8_t                   set,
11581                 int                       mac_type,
11582                 unsigned long             *ramrod_flags)
11583 {
11584     struct ecore_vlan_mac_ramrod_params ramrod_param;
11585     int rc;
11586 
11587     memset(&ramrod_param, 0, sizeof(ramrod_param));
11588 
11589     /* fill in general parameters */
11590     ramrod_param.vlan_mac_obj = obj;
11591     ramrod_param.ramrod_flags = *ramrod_flags;
11592 
11593     /* fill a user request section if needed */
11594     if (!bxe_test_bit(RAMROD_CONT, ramrod_flags)) {
11595         memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
11596 
11597         bxe_set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
11598 
11599         /* Set the command: ADD or DEL */
11600         ramrod_param.user_req.cmd = (set) ? ECORE_VLAN_MAC_ADD :
11601                                             ECORE_VLAN_MAC_DEL;
11602     }
11603 
11604     rc = ecore_config_vlan_mac(sc, &ramrod_param);
11605 
11606     if (rc == ECORE_EXISTS) {
11607         BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n");
11608         /* do not treat adding same MAC as error */
11609         rc = 0;
11610     } else if (rc < 0) {
11611         BLOGE(sc, "%s MAC failed (%d)\n", (set ? "Set" : "Delete"), rc);
11612     }
11613 
11614     return (rc);
11615 }
11616 
11617 static int
11618 bxe_set_eth_mac(struct bxe_softc *sc,
11619                 uint8_t          set)
11620 {
11621     unsigned long ramrod_flags = 0;
11622 
11623     BLOGD(sc, DBG_LOAD, "Adding Ethernet MAC\n");
11624 
11625     bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
11626 
11627     /* Eth MAC is set on RSS leading client (fp[0]) */
11628     return (bxe_set_mac_one(sc, sc->link_params.mac_addr,
11629                             &sc->sp_objs->mac_obj,
11630                             set, ECORE_ETH_MAC, &ramrod_flags));
11631 }
11632 
11633 static int
11634 bxe_get_cur_phy_idx(struct bxe_softc *sc)
11635 {
11636     uint32_t sel_phy_idx = 0;
11637 
11638     if (sc->link_params.num_phys <= 1) {
11639         return (ELINK_INT_PHY);
11640     }
11641 
11642     if (sc->link_vars.link_up) {
11643         sel_phy_idx = ELINK_EXT_PHY1;
11644         /* In case link is SERDES, check if the ELINK_EXT_PHY2 is the one */
11645         if ((sc->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
11646             (sc->link_params.phy[ELINK_EXT_PHY2].supported &
11647              ELINK_SUPPORTED_FIBRE))
11648             sel_phy_idx = ELINK_EXT_PHY2;
11649     } else {
11650         switch (elink_phy_selection(&sc->link_params)) {
11651         case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
11652         case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
11653         case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
11654                sel_phy_idx = ELINK_EXT_PHY1;
11655                break;
11656         case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
11657         case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
11658                sel_phy_idx = ELINK_EXT_PHY2;
11659                break;
11660         }
11661     }
11662 
11663     return (sel_phy_idx);
11664 }
11665 
11666 static int
11667 bxe_get_link_cfg_idx(struct bxe_softc *sc)
11668 {
11669     uint32_t sel_phy_idx = bxe_get_cur_phy_idx(sc);
11670 
11671     /*
11672      * The selected activated PHY is always after swapping (in case PHY
11673      * swapping is enabled). So when swapping is enabled, we need to reverse
11674      * the configuration
11675      */
11676 
11677     if (sc->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
11678         if (sel_phy_idx == ELINK_EXT_PHY1)
11679             sel_phy_idx = ELINK_EXT_PHY2;
11680         else if (sel_phy_idx == ELINK_EXT_PHY2)
11681             sel_phy_idx = ELINK_EXT_PHY1;
11682     }
11683 
11684     return (ELINK_LINK_CONFIG_IDX(sel_phy_idx));
11685 }
11686 
11687 static void
11688 bxe_set_requested_fc(struct bxe_softc *sc)
11689 {
11690     /*
11691      * Initialize link parameters structure variables
11692      * It is recommended to turn off RX FC for jumbo frames
11693      * for better performance
11694      */
11695     if (CHIP_IS_E1x(sc) && (sc->mtu > 5000)) {
11696         sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_TX;
11697     } else {
11698         sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_BOTH;
11699     }
11700 }
11701 
11702 static void
11703 bxe_calc_fc_adv(struct bxe_softc *sc)
11704 {
11705     uint8_t cfg_idx = bxe_get_link_cfg_idx(sc);
11706 
11707 
11708     sc->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
11709                                            ADVERTISED_Pause);
11710 
11711     switch (sc->link_vars.ieee_fc &
11712             MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
11713 
11714     case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
11715         sc->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
11716                                           ADVERTISED_Pause);
11717         break;
11718 
11719     case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
11720         sc->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
11721         break;
11722 
11723     default:
11724         break;
11725 
11726     }
11727 }
11728 
11729 static uint16_t
11730 bxe_get_mf_speed(struct bxe_softc *sc)
11731 {
11732     uint16_t line_speed = sc->link_vars.line_speed;
11733     if (IS_MF(sc)) {
11734         uint16_t maxCfg =
11735             bxe_extract_max_cfg(sc, sc->devinfo.mf_info.mf_config[SC_VN(sc)]);
11736 
11737         /* calculate the current MAX line speed limit for the MF devices */
11738         if (IS_MF_SI(sc)) {
11739             line_speed = (line_speed * maxCfg) / 100;
11740         } else { /* SD mode */
11741             uint16_t vn_max_rate = maxCfg * 100;
11742 
11743             if (vn_max_rate < line_speed) {
11744                 line_speed = vn_max_rate;
11745             }
11746         }
11747     }
11748 
11749     return (line_speed);
11750 }
11751 
11752 static void
11753 bxe_fill_report_data(struct bxe_softc            *sc,
11754                      struct bxe_link_report_data *data)
11755 {
11756     uint16_t line_speed = bxe_get_mf_speed(sc);
11757 
11758     memset(data, 0, sizeof(*data));
11759 
11760     /* fill the report data with the effective line speed */
11761     data->line_speed = line_speed;
11762 
11763     /* Link is down */
11764     if (!sc->link_vars.link_up || (sc->flags & BXE_MF_FUNC_DIS)) {
11765         bxe_set_bit(BXE_LINK_REPORT_LINK_DOWN, &data->link_report_flags);
11766     }
11767 
11768     /* Full DUPLEX */
11769     if (sc->link_vars.duplex == DUPLEX_FULL) {
11770         bxe_set_bit(BXE_LINK_REPORT_FULL_DUPLEX, &data->link_report_flags);
11771     }
11772 
11773     /* Rx Flow Control is ON */
11774     if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_RX) {
11775         bxe_set_bit(BXE_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
11776     }
11777 
11778     /* Tx Flow Control is ON */
11779     if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) {
11780         bxe_set_bit(BXE_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
11781     }
11782 }
11783 
11784 /* report link status to OS, should be called under phy_lock */
11785 static void
11786 bxe_link_report_locked(struct bxe_softc *sc)
11787 {
11788     struct bxe_link_report_data cur_data;
11789 
11790     /* reread mf_cfg */
11791     if (IS_PF(sc) && !CHIP_IS_E1(sc)) {
11792         bxe_read_mf_cfg(sc);
11793     }
11794 
11795     /* Read the current link report info */
11796     bxe_fill_report_data(sc, &cur_data);
11797 
11798     /* Don't report link down or exactly the same link status twice */
11799     if (!memcmp(&cur_data, &sc->last_reported_link, sizeof(cur_data)) ||
11800         (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11801                       &sc->last_reported_link.link_report_flags) &&
11802          bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11803                       &cur_data.link_report_flags))) {
11804         return;
11805     }
11806 
11807 	ELINK_DEBUG_P2(sc, "Change in link status : cur_data = %x, last_reported_link = %x\n",
11808 					cur_data.link_report_flags, sc->last_reported_link.link_report_flags);
11809     sc->link_cnt++;
11810 
11811 	ELINK_DEBUG_P1(sc, "link status change count = %x\n", sc->link_cnt);
11812     /* report new link params and remember the state for the next time */
11813     memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data));
11814 
11815     if (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11816                      &cur_data.link_report_flags)) {
11817         if_link_state_change(sc->ifp, LINK_STATE_DOWN);
11818     } else {
11819         const char *duplex;
11820         const char *flow;
11821 
11822         if (bxe_test_and_clear_bit(BXE_LINK_REPORT_FULL_DUPLEX,
11823                                    &cur_data.link_report_flags)) {
11824             duplex = "full";
11825 			ELINK_DEBUG_P0(sc, "link set to full duplex\n");
11826         } else {
11827             duplex = "half";
11828 			ELINK_DEBUG_P0(sc, "link set to half duplex\n");
11829         }
11830 
11831         /*
11832          * Handle the FC at the end so that only these flags would be
11833          * possibly set. This way we may easily check if there is no FC
11834          * enabled.
11835          */
11836         if (cur_data.link_report_flags) {
11837             if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11838                              &cur_data.link_report_flags) &&
11839                 bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11840                              &cur_data.link_report_flags)) {
11841                 flow = "ON - receive & transmit";
11842             } else if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11843                                     &cur_data.link_report_flags) &&
11844                        !bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11845                                      &cur_data.link_report_flags)) {
11846                 flow = "ON - receive";
11847             } else if (!bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11848                                      &cur_data.link_report_flags) &&
11849                        bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11850                                     &cur_data.link_report_flags)) {
11851                 flow = "ON - transmit";
11852             } else {
11853                 flow = "none"; /* possible? */
11854             }
11855         } else {
11856             flow = "none";
11857         }
11858 
11859         if_link_state_change(sc->ifp, LINK_STATE_UP);
11860         BLOGI(sc, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
11861               cur_data.line_speed, duplex, flow);
11862     }
11863 }
11864 
11865 static void
11866 bxe_link_report(struct bxe_softc *sc)
11867 {
11868     bxe_acquire_phy_lock(sc);
11869     bxe_link_report_locked(sc);
11870     bxe_release_phy_lock(sc);
11871 }
11872 
11873 static void
11874 bxe_link_status_update(struct bxe_softc *sc)
11875 {
11876     if (sc->state != BXE_STATE_OPEN) {
11877         return;
11878     }
11879 
11880     if (IS_PF(sc) && !CHIP_REV_IS_SLOW(sc)) {
11881         elink_link_status_update(&sc->link_params, &sc->link_vars);
11882     } else {
11883         sc->port.supported[0] |= (ELINK_SUPPORTED_10baseT_Half |
11884                                   ELINK_SUPPORTED_10baseT_Full |
11885                                   ELINK_SUPPORTED_100baseT_Half |
11886                                   ELINK_SUPPORTED_100baseT_Full |
11887                                   ELINK_SUPPORTED_1000baseT_Full |
11888                                   ELINK_SUPPORTED_2500baseX_Full |
11889                                   ELINK_SUPPORTED_10000baseT_Full |
11890                                   ELINK_SUPPORTED_TP |
11891                                   ELINK_SUPPORTED_FIBRE |
11892                                   ELINK_SUPPORTED_Autoneg |
11893                                   ELINK_SUPPORTED_Pause |
11894                                   ELINK_SUPPORTED_Asym_Pause);
11895         sc->port.advertising[0] = sc->port.supported[0];
11896 
11897         sc->link_params.sc                = sc;
11898         sc->link_params.port              = SC_PORT(sc);
11899         sc->link_params.req_duplex[0]     = DUPLEX_FULL;
11900         sc->link_params.req_flow_ctrl[0]  = ELINK_FLOW_CTRL_NONE;
11901         sc->link_params.req_line_speed[0] = SPEED_10000;
11902         sc->link_params.speed_cap_mask[0] = 0x7f0000;
11903         sc->link_params.switch_cfg        = ELINK_SWITCH_CFG_10G;
11904 
11905         if (CHIP_REV_IS_FPGA(sc)) {
11906             sc->link_vars.mac_type    = ELINK_MAC_TYPE_EMAC;
11907             sc->link_vars.line_speed  = ELINK_SPEED_1000;
11908             sc->link_vars.link_status = (LINK_STATUS_LINK_UP |
11909                                          LINK_STATUS_SPEED_AND_DUPLEX_1000TFD);
11910         } else {
11911             sc->link_vars.mac_type    = ELINK_MAC_TYPE_BMAC;
11912             sc->link_vars.line_speed  = ELINK_SPEED_10000;
11913             sc->link_vars.link_status = (LINK_STATUS_LINK_UP |
11914                                          LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
11915         }
11916 
11917         sc->link_vars.link_up = 1;
11918 
11919         sc->link_vars.duplex    = DUPLEX_FULL;
11920         sc->link_vars.flow_ctrl = ELINK_FLOW_CTRL_NONE;
11921 
11922         if (IS_PF(sc)) {
11923             REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + sc->link_params.port*4, 0);
11924             bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11925             bxe_link_report(sc);
11926         }
11927     }
11928 
11929     if (IS_PF(sc)) {
11930         if (sc->link_vars.link_up) {
11931             bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11932         } else {
11933             bxe_stats_handle(sc, STATS_EVENT_STOP);
11934         }
11935         bxe_link_report(sc);
11936     } else {
11937         bxe_link_report(sc);
11938         bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11939     }
11940 }
11941 
11942 static int
11943 bxe_initial_phy_init(struct bxe_softc *sc,
11944                      int              load_mode)
11945 {
11946     int rc, cfg_idx = bxe_get_link_cfg_idx(sc);
11947     uint16_t req_line_speed = sc->link_params.req_line_speed[cfg_idx];
11948     struct elink_params *lp = &sc->link_params;
11949 
11950     bxe_set_requested_fc(sc);
11951 
11952     if (CHIP_REV_IS_SLOW(sc)) {
11953         uint32_t bond = CHIP_BOND_ID(sc);
11954         uint32_t feat = 0;
11955 
11956         if (CHIP_IS_E2(sc) && CHIP_IS_MODE_4_PORT(sc)) {
11957             feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC;
11958         } else if (bond & 0x4) {
11959             if (CHIP_IS_E3(sc)) {
11960                 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC;
11961             } else {
11962                 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC;
11963             }
11964         } else if (bond & 0x8) {
11965             if (CHIP_IS_E3(sc)) {
11966                 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC;
11967             } else {
11968                 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC;
11969             }
11970         }
11971 
11972         /* disable EMAC for E3 and above */
11973         if (bond & 0x2) {
11974             feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC;
11975         }
11976 
11977         sc->link_params.feature_config_flags |= feat;
11978     }
11979 
11980     bxe_acquire_phy_lock(sc);
11981 
11982     if (load_mode == LOAD_DIAG) {
11983         lp->loopback_mode = ELINK_LOOPBACK_XGXS;
11984         /* Prefer doing PHY loopback at 10G speed, if possible */
11985         if (lp->req_line_speed[cfg_idx] < ELINK_SPEED_10000) {
11986             if (lp->speed_cap_mask[cfg_idx] &
11987                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
11988                 lp->req_line_speed[cfg_idx] = ELINK_SPEED_10000;
11989             } else {
11990                 lp->req_line_speed[cfg_idx] = ELINK_SPEED_1000;
11991             }
11992         }
11993     }
11994 
11995     if (load_mode == LOAD_LOOPBACK_EXT) {
11996         lp->loopback_mode = ELINK_LOOPBACK_EXT;
11997     }
11998 
11999     rc = elink_phy_init(&sc->link_params, &sc->link_vars);
12000 
12001     bxe_release_phy_lock(sc);
12002 
12003     bxe_calc_fc_adv(sc);
12004 
12005     if (sc->link_vars.link_up) {
12006         bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
12007         bxe_link_report(sc);
12008     }
12009 
12010     if (!CHIP_REV_IS_SLOW(sc)) {
12011         bxe_periodic_start(sc);
12012     }
12013 
12014     sc->link_params.req_line_speed[cfg_idx] = req_line_speed;
12015     return (rc);
12016 }
12017 
12018 static u_int
12019 bxe_push_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
12020 {
12021     struct ecore_mcast_list_elem *mc_mac = arg;
12022 
12023     mc_mac += cnt;
12024     mc_mac->mac = (uint8_t *)LLADDR(sdl);
12025 
12026     return (1);
12027 }
12028 
12029 static int
12030 bxe_init_mcast_macs_list(struct bxe_softc                 *sc,
12031                          struct ecore_mcast_ramrod_params *p)
12032 {
12033     if_t ifp = sc->ifp;
12034     int mc_count;
12035     struct ecore_mcast_list_elem *mc_mac;
12036 
12037     ECORE_LIST_INIT(&p->mcast_list);
12038     p->mcast_list_len = 0;
12039 
12040     /* XXXGL: multicast count may change later */
12041     mc_count = if_llmaddr_count(ifp);
12042 
12043     if (!mc_count) {
12044         return (0);
12045     }
12046 
12047     mc_mac = malloc(sizeof(*mc_mac) * mc_count, M_DEVBUF,
12048                     (M_NOWAIT | M_ZERO));
12049     if (!mc_mac) {
12050         BLOGE(sc, "Failed to allocate temp mcast list\n");
12051         return (-1);
12052     }
12053     bzero(mc_mac, (sizeof(*mc_mac) * mc_count));
12054     if_foreach_llmaddr(ifp, bxe_push_maddr, mc_mac);
12055 
12056     for (int i = 0; i < mc_count; i ++) {
12057         ECORE_LIST_PUSH_TAIL(&mc_mac[i].link, &p->mcast_list);
12058         BLOGD(sc, DBG_LOAD,
12059               "Setting MCAST %02X:%02X:%02X:%02X:%02X:%02X and mc_count %d\n",
12060               mc_mac[i].mac[0], mc_mac[i].mac[1], mc_mac[i].mac[2],
12061               mc_mac[i].mac[3], mc_mac[i].mac[4], mc_mac[i].mac[5],
12062               mc_count);
12063     }
12064 
12065     p->mcast_list_len = mc_count;
12066 
12067     return (0);
12068 }
12069 
12070 static void
12071 bxe_free_mcast_macs_list(struct ecore_mcast_ramrod_params *p)
12072 {
12073     struct ecore_mcast_list_elem *mc_mac =
12074         ECORE_LIST_FIRST_ENTRY(&p->mcast_list,
12075                                struct ecore_mcast_list_elem,
12076                                link);
12077 
12078     if (mc_mac) {
12079         /* only a single free as all mc_macs are in the same heap array */
12080         free(mc_mac, M_DEVBUF);
12081     }
12082 }
12083 static int
12084 bxe_set_mc_list(struct bxe_softc *sc)
12085 {
12086     struct ecore_mcast_ramrod_params rparam = { NULL };
12087     int rc = 0;
12088 
12089     rparam.mcast_obj = &sc->mcast_obj;
12090 
12091     BXE_MCAST_LOCK(sc);
12092 
12093     /* first, clear all configured multicast MACs */
12094     rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
12095     if (rc < 0) {
12096         BLOGE(sc, "Failed to clear multicast configuration: %d\n", rc);
12097         /* Manual backport parts of FreeBSD upstream r284470. */
12098         BXE_MCAST_UNLOCK(sc);
12099         return (rc);
12100     }
12101 
12102     /* configure a new MACs list */
12103     rc = bxe_init_mcast_macs_list(sc, &rparam);
12104     if (rc) {
12105         BLOGE(sc, "Failed to create mcast MACs list (%d)\n", rc);
12106         BXE_MCAST_UNLOCK(sc);
12107         return (rc);
12108     }
12109 
12110     /* Now add the new MACs */
12111     rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_ADD);
12112     if (rc < 0) {
12113         BLOGE(sc, "Failed to set new mcast config (%d)\n", rc);
12114     }
12115 
12116     bxe_free_mcast_macs_list(&rparam);
12117 
12118     BXE_MCAST_UNLOCK(sc);
12119 
12120     return (rc);
12121 }
12122 
12123 struct bxe_set_addr_ctx {
12124    struct bxe_softc *sc;
12125    unsigned long ramrod_flags;
12126    int rc;
12127 };
12128 
12129 static u_int
12130 bxe_set_addr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
12131 {
12132     struct bxe_set_addr_ctx *ctx = arg;
12133     struct ecore_vlan_mac_obj *mac_obj = &ctx->sc->sp_objs->mac_obj;
12134     int rc;
12135 
12136     if (ctx->rc < 0)
12137 	return (0);
12138 
12139     rc = bxe_set_mac_one(ctx->sc, (uint8_t *)LLADDR(sdl), mac_obj, TRUE,
12140                          ECORE_UC_LIST_MAC, &ctx->ramrod_flags);
12141 
12142     /* do not treat adding same MAC as an error */
12143     if (rc == -EEXIST)
12144 	BLOGD(ctx->sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n");
12145     else if (rc < 0) {
12146             BLOGE(ctx->sc, "Failed to schedule ADD operations (%d)\n", rc);
12147             ctx->rc = rc;
12148     }
12149 
12150     return (1);
12151 }
12152 
12153 static int
12154 bxe_set_uc_list(struct bxe_softc *sc)
12155 {
12156     if_t ifp = sc->ifp;
12157     struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj;
12158     struct bxe_set_addr_ctx ctx = { sc, 0, 0 };
12159     int rc;
12160 
12161     /* first schedule a cleanup up of old configuration */
12162     rc = bxe_del_all_macs(sc, mac_obj, ECORE_UC_LIST_MAC, FALSE);
12163     if (rc < 0) {
12164         BLOGE(sc, "Failed to schedule delete of all ETH MACs (%d)\n", rc);
12165         return (rc);
12166     }
12167 
12168     if_foreach_lladdr(ifp, bxe_set_addr, &ctx);
12169     if (ctx.rc < 0)
12170 	return (ctx.rc);
12171 
12172     /* Execute the pending commands */
12173     bit_set(&ctx.ramrod_flags, RAMROD_CONT);
12174     return (bxe_set_mac_one(sc, NULL, mac_obj, FALSE /* don't care */,
12175                             ECORE_UC_LIST_MAC, &ctx.ramrod_flags));
12176 }
12177 
12178 static void
12179 bxe_set_rx_mode(struct bxe_softc *sc)
12180 {
12181     if_t ifp = sc->ifp;
12182     uint32_t rx_mode = BXE_RX_MODE_NORMAL;
12183 
12184     if (sc->state != BXE_STATE_OPEN) {
12185         BLOGD(sc, DBG_SP, "state is %x, returning\n", sc->state);
12186         return;
12187     }
12188 
12189     BLOGD(sc, DBG_SP, "if_flags(ifp)=0x%x\n", if_getflags(sc->ifp));
12190 
12191     if (if_getflags(ifp) & IFF_PROMISC) {
12192         rx_mode = BXE_RX_MODE_PROMISC;
12193     } else if ((if_getflags(ifp) & IFF_ALLMULTI) ||
12194                ((if_getamcount(ifp) > BXE_MAX_MULTICAST) &&
12195                 CHIP_IS_E1(sc))) {
12196         rx_mode = BXE_RX_MODE_ALLMULTI;
12197     } else {
12198         if (IS_PF(sc)) {
12199             /* some multicasts */
12200             if (bxe_set_mc_list(sc) < 0) {
12201                 rx_mode = BXE_RX_MODE_ALLMULTI;
12202             }
12203             if (bxe_set_uc_list(sc) < 0) {
12204                 rx_mode = BXE_RX_MODE_PROMISC;
12205             }
12206         }
12207     }
12208 
12209     sc->rx_mode = rx_mode;
12210 
12211     /* schedule the rx_mode command */
12212     if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) {
12213         BLOGD(sc, DBG_LOAD, "Scheduled setting rx_mode with ECORE...\n");
12214         bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state);
12215         return;
12216     }
12217 
12218     if (IS_PF(sc)) {
12219         bxe_set_storm_rx_mode(sc);
12220     }
12221 }
12222 
12223 
12224 /* update flags in shmem */
12225 static void
12226 bxe_update_drv_flags(struct bxe_softc *sc,
12227                      uint32_t         flags,
12228                      uint32_t         set)
12229 {
12230     uint32_t drv_flags;
12231 
12232     if (SHMEM2_HAS(sc, drv_flags)) {
12233         bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS);
12234         drv_flags = SHMEM2_RD(sc, drv_flags);
12235 
12236         if (set) {
12237             SET_FLAGS(drv_flags, flags);
12238         } else {
12239             RESET_FLAGS(drv_flags, flags);
12240         }
12241 
12242         SHMEM2_WR(sc, drv_flags, drv_flags);
12243         BLOGD(sc, DBG_LOAD, "drv_flags 0x%08x\n", drv_flags);
12244 
12245         bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS);
12246     }
12247 }
12248 
12249 /* periodic timer callout routine, only runs when the interface is up */
12250 
12251 static void
12252 bxe_periodic_callout_func(void *xsc)
12253 {
12254     struct bxe_softc *sc = (struct bxe_softc *)xsc;
12255     int i;
12256 
12257     if (!BXE_CORE_TRYLOCK(sc)) {
12258         /* just bail and try again next time */
12259 
12260         if ((sc->state == BXE_STATE_OPEN) &&
12261             (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) {
12262             /* schedule the next periodic callout */
12263             callout_reset(&sc->periodic_callout, hz,
12264                           bxe_periodic_callout_func, sc);
12265         }
12266 
12267         return;
12268     }
12269 
12270     if ((sc->state != BXE_STATE_OPEN) ||
12271         (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) {
12272         BLOGW(sc, "periodic callout exit (state=0x%x)\n", sc->state);
12273         BXE_CORE_UNLOCK(sc);
12274         return;
12275         }
12276 
12277 
12278     /* Check for TX timeouts on any fastpath. */
12279     FOR_EACH_QUEUE(sc, i) {
12280         if (bxe_watchdog(sc, &sc->fp[i]) != 0) {
12281             /* Ruh-Roh, chip was reset! */
12282             break;
12283         }
12284     }
12285 
12286     if (!CHIP_REV_IS_SLOW(sc)) {
12287         /*
12288          * This barrier is needed to ensure the ordering between the writing
12289          * to the sc->port.pmf in the bxe_nic_load() or bxe_pmf_update() and
12290          * the reading here.
12291          */
12292         mb();
12293         if (sc->port.pmf) {
12294 	    bxe_acquire_phy_lock(sc);
12295             elink_period_func(&sc->link_params, &sc->link_vars);
12296 	    bxe_release_phy_lock(sc);
12297         }
12298     }
12299 
12300     if (IS_PF(sc) && !(sc->flags & BXE_NO_PULSE)) {
12301         int mb_idx = SC_FW_MB_IDX(sc);
12302         uint32_t drv_pulse;
12303         uint32_t mcp_pulse;
12304 
12305         ++sc->fw_drv_pulse_wr_seq;
12306         sc->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
12307 
12308         drv_pulse = sc->fw_drv_pulse_wr_seq;
12309         bxe_drv_pulse(sc);
12310 
12311         mcp_pulse = (SHMEM_RD(sc, func_mb[mb_idx].mcp_pulse_mb) &
12312                      MCP_PULSE_SEQ_MASK);
12313 
12314         /*
12315          * The delta between driver pulse and mcp response should
12316          * be 1 (before mcp response) or 0 (after mcp response).
12317          */
12318         if ((drv_pulse != mcp_pulse) &&
12319             (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
12320             /* someone lost a heartbeat... */
12321             BLOGE(sc, "drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
12322                   drv_pulse, mcp_pulse);
12323         }
12324     }
12325 
12326     /* state is BXE_STATE_OPEN */
12327     bxe_stats_handle(sc, STATS_EVENT_UPDATE);
12328 
12329     BXE_CORE_UNLOCK(sc);
12330 
12331     if ((sc->state == BXE_STATE_OPEN) &&
12332         (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) {
12333         /* schedule the next periodic callout */
12334         callout_reset(&sc->periodic_callout, hz,
12335                       bxe_periodic_callout_func, sc);
12336     }
12337 }
12338 
12339 static void
12340 bxe_periodic_start(struct bxe_softc *sc)
12341 {
12342     atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO);
12343     callout_reset(&sc->periodic_callout, hz, bxe_periodic_callout_func, sc);
12344 }
12345 
12346 static void
12347 bxe_periodic_stop(struct bxe_softc *sc)
12348 {
12349     atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP);
12350     callout_drain(&sc->periodic_callout);
12351 }
12352 
12353 void
12354 bxe_parity_recover(struct bxe_softc *sc)
12355 {
12356     uint8_t global = FALSE;
12357     uint32_t error_recovered, error_unrecovered;
12358 
12359 
12360     if ((sc->recovery_state == BXE_RECOVERY_FAILED) &&
12361         (sc->state == BXE_STATE_ERROR)) {
12362         BLOGE(sc, "RECOVERY failed, "
12363             "stack notified driver is NOT running! "
12364             "Please reboot/power cycle the system.\n");
12365         return;
12366     }
12367 
12368     while (1) {
12369         BLOGD(sc, DBG_SP,
12370            "%s sc=%p state=0x%x rec_state=0x%x error_status=%x\n",
12371             __func__, sc, sc->state, sc->recovery_state, sc->error_status);
12372 
12373         switch(sc->recovery_state) {
12374 
12375         case BXE_RECOVERY_INIT:
12376             bxe_chk_parity_attn(sc, &global, FALSE);
12377 
12378             if ((CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ||
12379                 (sc->error_status & BXE_ERR_MCP_ASSERT) ||
12380                 (sc->error_status & BXE_ERR_GLOBAL)) {
12381 
12382                 BXE_CORE_LOCK(sc);
12383                 if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
12384                     bxe_periodic_stop(sc);
12385                 }
12386                 bxe_nic_unload(sc, UNLOAD_RECOVERY, false);
12387                 sc->state = BXE_STATE_ERROR;
12388                 sc->recovery_state = BXE_RECOVERY_FAILED;
12389                 BLOGE(sc, " No Recovery tried for error 0x%x"
12390                     " stack notified driver is NOT running!"
12391                     " Please reboot/power cycle the system.\n",
12392                     sc->error_status);
12393                 BXE_CORE_UNLOCK(sc);
12394                 return;
12395             }
12396 
12397 
12398            /* Try to get a LEADER_LOCK HW lock */
12399             if (bxe_trylock_leader_lock(sc)) {
12400 
12401                 bxe_set_reset_in_progress(sc);
12402                 /*
12403                  * Check if there is a global attention and if
12404                  * there was a global attention, set the global
12405                  * reset bit.
12406                  */
12407                 if (global) {
12408                     bxe_set_reset_global(sc);
12409                 }
12410                 sc->is_leader = 1;
12411             }
12412 
12413             /* If interface has been removed - break */
12414 
12415             if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
12416                 bxe_periodic_stop(sc);
12417             }
12418 
12419             BXE_CORE_LOCK(sc);
12420             bxe_nic_unload(sc,UNLOAD_RECOVERY, false);
12421             sc->recovery_state = BXE_RECOVERY_WAIT;
12422             BXE_CORE_UNLOCK(sc);
12423 
12424             /*
12425              * Ensure "is_leader", MCP command sequence and
12426              * "recovery_state" update values are seen on other
12427              * CPUs.
12428              */
12429             mb();
12430             break;
12431         case BXE_RECOVERY_WAIT:
12432 
12433             if (sc->is_leader) {
12434                 int other_engine = SC_PATH(sc) ? 0 : 1;
12435                 bool other_load_status =
12436                     bxe_get_load_status(sc, other_engine);
12437                 bool load_status =
12438                     bxe_get_load_status(sc, SC_PATH(sc));
12439                 global = bxe_reset_is_global(sc);
12440 
12441                 /*
12442                  * In case of a parity in a global block, let
12443                  * the first leader that performs a
12444                  * leader_reset() reset the global blocks in
12445                  * order to clear global attentions. Otherwise
12446                  * the gates will remain closed for that
12447                  * engine.
12448                  */
12449                 if (load_status ||
12450                     (global && other_load_status)) {
12451                     /*
12452                      * Wait until all other functions get
12453                      * down.
12454                      */
12455                     taskqueue_enqueue_timeout(taskqueue_thread,
12456                         &sc->sp_err_timeout_task, hz/10);
12457                     return;
12458                 } else {
12459                     /*
12460                      * If all other functions got down
12461                      * try to bring the chip back to
12462                      * normal. In any case it's an exit
12463                      * point for a leader.
12464                      */
12465                     if (bxe_leader_reset(sc)) {
12466                         BLOGE(sc, "RECOVERY failed, "
12467                             "stack notified driver is NOT running!\n");
12468                         sc->recovery_state = BXE_RECOVERY_FAILED;
12469                         sc->state = BXE_STATE_ERROR;
12470                         mb();
12471                         return;
12472                     }
12473 
12474                     /*
12475                      * If we are here, means that the
12476                      * leader has succeeded and doesn't
12477                      * want to be a leader any more. Try
12478                      * to continue as a none-leader.
12479                      */
12480                 break;
12481                 }
12482 
12483             } else { /* non-leader */
12484                 if (!bxe_reset_is_done(sc, SC_PATH(sc))) {
12485                     /*
12486                      * Try to get a LEADER_LOCK HW lock as
12487                      * long as a former leader may have
12488                      * been unloaded by the user or
12489                      * released a leadership by another
12490                      * reason.
12491                      */
12492                     if (bxe_trylock_leader_lock(sc)) {
12493                         /*
12494                          * I'm a leader now! Restart a
12495                          * switch case.
12496                          */
12497                         sc->is_leader = 1;
12498                         break;
12499                     }
12500 
12501                     taskqueue_enqueue_timeout(taskqueue_thread,
12502                         &sc->sp_err_timeout_task, hz/10);
12503                     return;
12504 
12505                 } else {
12506                     /*
12507                      * If there was a global attention, wait
12508                      * for it to be cleared.
12509                      */
12510                     if (bxe_reset_is_global(sc)) {
12511                         taskqueue_enqueue_timeout(taskqueue_thread,
12512                             &sc->sp_err_timeout_task, hz/10);
12513                         return;
12514                      }
12515 
12516                      error_recovered =
12517                          sc->eth_stats.recoverable_error;
12518                      error_unrecovered =
12519                          sc->eth_stats.unrecoverable_error;
12520                      BXE_CORE_LOCK(sc);
12521                      sc->recovery_state =
12522                          BXE_RECOVERY_NIC_LOADING;
12523                      if (bxe_nic_load(sc, LOAD_NORMAL)) {
12524                          error_unrecovered++;
12525                          sc->recovery_state = BXE_RECOVERY_FAILED;
12526                          sc->state = BXE_STATE_ERROR;
12527                          BLOGE(sc, "Recovery is NOT successfull, "
12528                             " state=0x%x recovery_state=0x%x error=%x\n",
12529                             sc->state, sc->recovery_state, sc->error_status);
12530                          sc->error_status = 0;
12531                      } else {
12532                          sc->recovery_state =
12533                              BXE_RECOVERY_DONE;
12534                          error_recovered++;
12535                          BLOGI(sc, "Recovery is successfull from errors %x,"
12536                             " state=0x%x"
12537                             " recovery_state=0x%x \n", sc->error_status,
12538                             sc->state, sc->recovery_state);
12539                          mb();
12540                      }
12541                      sc->error_status = 0;
12542                      BXE_CORE_UNLOCK(sc);
12543                      sc->eth_stats.recoverable_error =
12544                          error_recovered;
12545                      sc->eth_stats.unrecoverable_error =
12546                          error_unrecovered;
12547 
12548                      return;
12549                  }
12550              }
12551          default:
12552              return;
12553          }
12554     }
12555 }
12556 void
12557 bxe_handle_error(struct bxe_softc * sc)
12558 {
12559 
12560     if(sc->recovery_state == BXE_RECOVERY_WAIT) {
12561         return;
12562     }
12563     if(sc->error_status) {
12564         if (sc->state == BXE_STATE_OPEN)  {
12565             bxe_int_disable(sc);
12566         }
12567         if (sc->link_vars.link_up) {
12568             if_link_state_change(sc->ifp, LINK_STATE_DOWN);
12569         }
12570         sc->recovery_state = BXE_RECOVERY_INIT;
12571         BLOGI(sc, "bxe%d: Recovery started errors 0x%x recovery state 0x%x\n",
12572             sc->unit, sc->error_status, sc->recovery_state);
12573         bxe_parity_recover(sc);
12574    }
12575 }
12576 
12577 static void
12578 bxe_sp_err_timeout_task(void *arg, int pending)
12579 {
12580 
12581     struct bxe_softc *sc = (struct bxe_softc *)arg;
12582 
12583     BLOGD(sc, DBG_SP,
12584         "%s state = 0x%x rec state=0x%x error_status=%x\n",
12585         __func__, sc->state, sc->recovery_state, sc->error_status);
12586 
12587     if((sc->recovery_state == BXE_RECOVERY_FAILED) &&
12588        (sc->state == BXE_STATE_ERROR)) {
12589         return;
12590     }
12591     /* if can be taken */
12592     if ((sc->error_status) && (sc->trigger_grcdump)) {
12593         bxe_grc_dump(sc);
12594     }
12595     if (sc->recovery_state != BXE_RECOVERY_DONE) {
12596         bxe_handle_error(sc);
12597         bxe_parity_recover(sc);
12598     } else if (sc->error_status) {
12599         bxe_handle_error(sc);
12600     }
12601 
12602     return;
12603 }
12604 
12605 /* start the controller */
12606 static __noinline int
12607 bxe_nic_load(struct bxe_softc *sc,
12608              int              load_mode)
12609 {
12610     uint32_t val;
12611     int load_code = 0;
12612     int i, rc = 0;
12613 
12614     BXE_CORE_LOCK_ASSERT(sc);
12615 
12616     BLOGD(sc, DBG_LOAD, "Starting NIC load...\n");
12617 
12618     sc->state = BXE_STATE_OPENING_WAITING_LOAD;
12619 
12620     if (IS_PF(sc)) {
12621         /* must be called before memory allocation and HW init */
12622         bxe_ilt_set_info(sc);
12623     }
12624 
12625     sc->last_reported_link_state = LINK_STATE_UNKNOWN;
12626 
12627     bxe_set_fp_rx_buf_size(sc);
12628 
12629     if (bxe_alloc_fp_buffers(sc) != 0) {
12630         BLOGE(sc, "Failed to allocate fastpath memory\n");
12631         sc->state = BXE_STATE_CLOSED;
12632         rc = ENOMEM;
12633         goto bxe_nic_load_error0;
12634     }
12635 
12636     if (bxe_alloc_mem(sc) != 0) {
12637         sc->state = BXE_STATE_CLOSED;
12638         rc = ENOMEM;
12639         goto bxe_nic_load_error0;
12640     }
12641 
12642     if (bxe_alloc_fw_stats_mem(sc) != 0) {
12643         sc->state = BXE_STATE_CLOSED;
12644         rc = ENOMEM;
12645         goto bxe_nic_load_error0;
12646     }
12647 
12648     if (IS_PF(sc)) {
12649         /* set pf load just before approaching the MCP */
12650         bxe_set_pf_load(sc);
12651 
12652         /* if MCP exists send load request and analyze response */
12653         if (!BXE_NOMCP(sc)) {
12654             /* attempt to load pf */
12655             if (bxe_nic_load_request(sc, &load_code) != 0) {
12656                 sc->state = BXE_STATE_CLOSED;
12657                 rc = ENXIO;
12658                 goto bxe_nic_load_error1;
12659             }
12660 
12661             /* what did the MCP say? */
12662             if (bxe_nic_load_analyze_req(sc, load_code) != 0) {
12663                 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12664                 sc->state = BXE_STATE_CLOSED;
12665                 rc = ENXIO;
12666                 goto bxe_nic_load_error2;
12667             }
12668         } else {
12669             BLOGI(sc, "Device has no MCP!\n");
12670             load_code = bxe_nic_load_no_mcp(sc);
12671         }
12672 
12673         /* mark PMF if applicable */
12674         bxe_nic_load_pmf(sc, load_code);
12675 
12676         /* Init Function state controlling object */
12677         bxe_init_func_obj(sc);
12678 
12679         /* Initialize HW */
12680         if (bxe_init_hw(sc, load_code) != 0) {
12681             BLOGE(sc, "HW init failed\n");
12682             bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12683             sc->state = BXE_STATE_CLOSED;
12684             rc = ENXIO;
12685             goto bxe_nic_load_error2;
12686         }
12687     }
12688 
12689     /* set ALWAYS_ALIVE bit in shmem */
12690     sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
12691     bxe_drv_pulse(sc);
12692     sc->flags |= BXE_NO_PULSE;
12693 
12694     /* attach interrupts */
12695     if (bxe_interrupt_attach(sc) != 0) {
12696         sc->state = BXE_STATE_CLOSED;
12697         rc = ENXIO;
12698         goto bxe_nic_load_error2;
12699     }
12700 
12701     bxe_nic_init(sc, load_code);
12702 
12703     /* Init per-function objects */
12704     if (IS_PF(sc)) {
12705         bxe_init_objs(sc);
12706         // XXX bxe_iov_nic_init(sc);
12707 
12708         /* set AFEX default VLAN tag to an invalid value */
12709         sc->devinfo.mf_info.afex_def_vlan_tag = -1;
12710         // XXX bxe_nic_load_afex_dcc(sc, load_code);
12711 
12712         sc->state = BXE_STATE_OPENING_WAITING_PORT;
12713         rc = bxe_func_start(sc);
12714         if (rc) {
12715             BLOGE(sc, "Function start failed! rc = %d\n", rc);
12716             bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12717             sc->state = BXE_STATE_ERROR;
12718             goto bxe_nic_load_error3;
12719         }
12720 
12721         /* send LOAD_DONE command to MCP */
12722         if (!BXE_NOMCP(sc)) {
12723             load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12724             if (!load_code) {
12725                 BLOGE(sc, "MCP response failure, aborting\n");
12726                 sc->state = BXE_STATE_ERROR;
12727                 rc = ENXIO;
12728                 goto bxe_nic_load_error3;
12729             }
12730         }
12731 
12732         rc = bxe_setup_leading(sc);
12733         if (rc) {
12734             BLOGE(sc, "Setup leading failed! rc = %d\n", rc);
12735             sc->state = BXE_STATE_ERROR;
12736             goto bxe_nic_load_error3;
12737         }
12738 
12739         FOR_EACH_NONDEFAULT_ETH_QUEUE(sc, i) {
12740             rc = bxe_setup_queue(sc, &sc->fp[i], FALSE);
12741             if (rc) {
12742                 BLOGE(sc, "Queue(%d) setup failed rc = %d\n", i, rc);
12743                 sc->state = BXE_STATE_ERROR;
12744                 goto bxe_nic_load_error3;
12745             }
12746         }
12747 
12748         rc = bxe_init_rss_pf(sc);
12749         if (rc) {
12750             BLOGE(sc, "PF RSS init failed\n");
12751             sc->state = BXE_STATE_ERROR;
12752             goto bxe_nic_load_error3;
12753         }
12754     }
12755     /* XXX VF */
12756 
12757     /* now when Clients are configured we are ready to work */
12758     sc->state = BXE_STATE_OPEN;
12759 
12760     /* Configure a ucast MAC */
12761     if (IS_PF(sc)) {
12762         rc = bxe_set_eth_mac(sc, TRUE);
12763     }
12764     if (rc) {
12765         BLOGE(sc, "Setting Ethernet MAC failed rc = %d\n", rc);
12766         sc->state = BXE_STATE_ERROR;
12767         goto bxe_nic_load_error3;
12768     }
12769 
12770     if (sc->port.pmf) {
12771         rc = bxe_initial_phy_init(sc, /* XXX load_mode */LOAD_OPEN);
12772         if (rc) {
12773             sc->state = BXE_STATE_ERROR;
12774             goto bxe_nic_load_error3;
12775         }
12776     }
12777 
12778     sc->link_params.feature_config_flags &=
12779         ~ELINK_FEATURE_CONFIG_BOOT_FROM_SAN;
12780 
12781     /* start fast path */
12782 
12783     /* Initialize Rx filter */
12784     bxe_set_rx_mode(sc);
12785 
12786     /* start the Tx */
12787     switch (/* XXX load_mode */LOAD_OPEN) {
12788     case LOAD_NORMAL:
12789     case LOAD_OPEN:
12790         break;
12791 
12792     case LOAD_DIAG:
12793     case LOAD_LOOPBACK_EXT:
12794         sc->state = BXE_STATE_DIAG;
12795         break;
12796 
12797     default:
12798         break;
12799     }
12800 
12801     if (sc->port.pmf) {
12802         bxe_update_drv_flags(sc, 1 << DRV_FLAGS_PORT_MASK, 0);
12803     } else {
12804         bxe_link_status_update(sc);
12805     }
12806 
12807     /* start the periodic timer callout */
12808     bxe_periodic_start(sc);
12809 
12810     if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
12811         /* mark driver is loaded in shmem2 */
12812         val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
12813         SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)],
12814                   (val |
12815                    DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
12816                    DRV_FLAGS_CAPABILITIES_LOADED_L2));
12817     }
12818 
12819     /* wait for all pending SP commands to complete */
12820     if (IS_PF(sc) && !bxe_wait_sp_comp(sc, ~0x0UL)) {
12821         BLOGE(sc, "Timeout waiting for all SPs to complete!\n");
12822         bxe_periodic_stop(sc);
12823         bxe_nic_unload(sc, UNLOAD_CLOSE, FALSE);
12824         return (ENXIO);
12825     }
12826 
12827     /* Tell the stack the driver is running! */
12828     if_setdrvflags(sc->ifp, IFF_DRV_RUNNING);
12829 
12830     BLOGD(sc, DBG_LOAD, "NIC successfully loaded\n");
12831 
12832     return (0);
12833 
12834 bxe_nic_load_error3:
12835 
12836     if (IS_PF(sc)) {
12837         bxe_int_disable_sync(sc, 1);
12838 
12839         /* clean out queued objects */
12840         bxe_squeeze_objects(sc);
12841     }
12842 
12843     bxe_interrupt_detach(sc);
12844 
12845 bxe_nic_load_error2:
12846 
12847     if (IS_PF(sc) && !BXE_NOMCP(sc)) {
12848         bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
12849         bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0);
12850     }
12851 
12852     sc->port.pmf = 0;
12853 
12854 bxe_nic_load_error1:
12855 
12856     /* clear pf_load status, as it was already set */
12857     if (IS_PF(sc)) {
12858         bxe_clear_pf_load(sc);
12859     }
12860 
12861 bxe_nic_load_error0:
12862 
12863     bxe_free_fw_stats_mem(sc);
12864     bxe_free_fp_buffers(sc);
12865     bxe_free_mem(sc);
12866 
12867     return (rc);
12868 }
12869 
12870 static int
12871 bxe_init_locked(struct bxe_softc *sc)
12872 {
12873     int other_engine = SC_PATH(sc) ? 0 : 1;
12874     uint8_t other_load_status, load_status;
12875     uint8_t global = FALSE;
12876     int rc;
12877 
12878     BXE_CORE_LOCK_ASSERT(sc);
12879 
12880     /* check if the driver is already running */
12881     if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
12882         BLOGD(sc, DBG_LOAD, "Init called while driver is running!\n");
12883         return (0);
12884     }
12885 
12886     if((sc->state == BXE_STATE_ERROR) &&
12887         (sc->recovery_state == BXE_RECOVERY_FAILED)) {
12888         BLOGE(sc, "Initialization not done, "
12889                   "as previous recovery failed."
12890                   "Reboot/Power-cycle the system\n" );
12891         return (ENXIO);
12892     }
12893 
12894 
12895     bxe_set_power_state(sc, PCI_PM_D0);
12896 
12897     /*
12898      * If parity occurred during the unload, then attentions and/or
12899      * RECOVERY_IN_PROGRES may still be set. If so we want the first function
12900      * loaded on the current engine to complete the recovery. Parity recovery
12901      * is only relevant for PF driver.
12902      */
12903     if (IS_PF(sc)) {
12904         other_load_status = bxe_get_load_status(sc, other_engine);
12905         load_status = bxe_get_load_status(sc, SC_PATH(sc));
12906 
12907         if (!bxe_reset_is_done(sc, SC_PATH(sc)) ||
12908             bxe_chk_parity_attn(sc, &global, TRUE)) {
12909             do {
12910                 /*
12911                  * If there are attentions and they are in global blocks, set
12912                  * the GLOBAL_RESET bit regardless whether it will be this
12913                  * function that will complete the recovery or not.
12914                  */
12915                 if (global) {
12916                     bxe_set_reset_global(sc);
12917                 }
12918 
12919                 /*
12920                  * Only the first function on the current engine should try
12921                  * to recover in open. In case of attentions in global blocks
12922                  * only the first in the chip should try to recover.
12923                  */
12924                 if ((!load_status && (!global || !other_load_status)) &&
12925                     bxe_trylock_leader_lock(sc) && !bxe_leader_reset(sc)) {
12926                     BLOGI(sc, "Recovered during init\n");
12927                     break;
12928                 }
12929 
12930                 /* recovery has failed... */
12931                 bxe_set_power_state(sc, PCI_PM_D3hot);
12932                 sc->recovery_state = BXE_RECOVERY_FAILED;
12933 
12934                 BLOGE(sc, "Recovery flow hasn't properly "
12935                           "completed yet, try again later. "
12936                           "If you still see this message after a "
12937                           "few retries then power cycle is required.\n");
12938 
12939                 rc = ENXIO;
12940                 goto bxe_init_locked_done;
12941             } while (0);
12942         }
12943     }
12944 
12945     sc->recovery_state = BXE_RECOVERY_DONE;
12946 
12947     rc = bxe_nic_load(sc, LOAD_OPEN);
12948 
12949 bxe_init_locked_done:
12950 
12951     if (rc) {
12952         /* Tell the stack the driver is NOT running! */
12953         BLOGE(sc, "Initialization failed, "
12954                   "stack notified driver is NOT running!\n");
12955 	if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING);
12956     }
12957 
12958     return (rc);
12959 }
12960 
12961 static int
12962 bxe_stop_locked(struct bxe_softc *sc)
12963 {
12964     BXE_CORE_LOCK_ASSERT(sc);
12965     return (bxe_nic_unload(sc, UNLOAD_NORMAL, TRUE));
12966 }
12967 
12968 /*
12969  * Handles controller initialization when called from an unlocked routine.
12970  * ifconfig calls this function.
12971  *
12972  * Returns:
12973  *   void
12974  */
12975 static void
12976 bxe_init(void *xsc)
12977 {
12978     struct bxe_softc *sc = (struct bxe_softc *)xsc;
12979 
12980     BXE_CORE_LOCK(sc);
12981     bxe_init_locked(sc);
12982     BXE_CORE_UNLOCK(sc);
12983 }
12984 
12985 static int
12986 bxe_init_ifnet(struct bxe_softc *sc)
12987 {
12988     if_t ifp;
12989     int capabilities;
12990 
12991     /* ifconfig entrypoint for media type/status reporting */
12992     ifmedia_init(&sc->ifmedia, IFM_IMASK,
12993                  bxe_ifmedia_update,
12994                  bxe_ifmedia_status);
12995 
12996     /* set the default interface values */
12997     ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_FDX | sc->media), 0, NULL);
12998     ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL);
12999     ifmedia_set(&sc->ifmedia, (IFM_ETHER | IFM_AUTO));
13000 
13001     sc->ifmedia.ifm_media = sc->ifmedia.ifm_cur->ifm_media; /* XXX ? */
13002 	BLOGI(sc, "IFMEDIA flags : %x\n", sc->ifmedia.ifm_media);
13003 
13004     /* allocate the ifnet structure */
13005     if ((ifp = if_gethandle(IFT_ETHER)) == NULL) {
13006         BLOGE(sc, "Interface allocation failed!\n");
13007         return (ENXIO);
13008     }
13009 
13010     if_setsoftc(ifp, sc);
13011     if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
13012     if_setflags(ifp, (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST));
13013     if_setioctlfn(ifp, bxe_ioctl);
13014     if_setstartfn(ifp, bxe_tx_start);
13015     if_setgetcounterfn(ifp, bxe_get_counter);
13016     if_settransmitfn(ifp, bxe_tx_mq_start);
13017     if_setqflushfn(ifp, bxe_mq_flush);
13018     if_setinitfn(ifp, bxe_init);
13019     if_setmtu(ifp, sc->mtu);
13020     if_sethwassist(ifp, (CSUM_IP      |
13021                         CSUM_TCP      |
13022                         CSUM_UDP      |
13023                         CSUM_TSO      |
13024                         CSUM_TCP_IPV6 |
13025                         CSUM_UDP_IPV6));
13026 
13027     capabilities =
13028         (IFCAP_VLAN_MTU       |
13029          IFCAP_VLAN_HWTAGGING |
13030          IFCAP_VLAN_HWTSO     |
13031          IFCAP_VLAN_HWFILTER  |
13032          IFCAP_VLAN_HWCSUM    |
13033          IFCAP_HWCSUM         |
13034          IFCAP_JUMBO_MTU      |
13035          IFCAP_LRO            |
13036          IFCAP_TSO4           |
13037          IFCAP_TSO6           |
13038          IFCAP_WOL_MAGIC);
13039     if_setcapabilitiesbit(ifp, capabilities, 0); /* XXX */
13040     if_setcapenable(ifp, if_getcapabilities(ifp));
13041     if_setbaudrate(ifp, IF_Gbps(10));
13042 /* XXX */
13043     if_setsendqlen(ifp, sc->tx_ring_size);
13044     if_setsendqready(ifp);
13045 /* XXX */
13046 
13047     sc->ifp = ifp;
13048 
13049     /* attach to the Ethernet interface list */
13050     ether_ifattach(ifp, sc->link_params.mac_addr);
13051 
13052     /* Attach driver debugnet methods. */
13053     DEBUGNET_SET(ifp, bxe);
13054 
13055     return (0);
13056 }
13057 
13058 static void
13059 bxe_deallocate_bars(struct bxe_softc *sc)
13060 {
13061     int i;
13062 
13063     for (i = 0; i < MAX_BARS; i++) {
13064         if (sc->bar[i].resource != NULL) {
13065             bus_release_resource(sc->dev,
13066                                  SYS_RES_MEMORY,
13067                                  sc->bar[i].rid,
13068                                  sc->bar[i].resource);
13069             BLOGD(sc, DBG_LOAD, "Released PCI BAR%d [%02x] memory\n",
13070                   i, PCIR_BAR(i));
13071         }
13072     }
13073 }
13074 
13075 static int
13076 bxe_allocate_bars(struct bxe_softc *sc)
13077 {
13078     u_int flags;
13079     int i;
13080 
13081     memset(sc->bar, 0, sizeof(sc->bar));
13082 
13083     for (i = 0; i < MAX_BARS; i++) {
13084 
13085         /* memory resources reside at BARs 0, 2, 4 */
13086         /* Run `pciconf -lb` to see mappings */
13087         if ((i != 0) && (i != 2) && (i != 4)) {
13088             continue;
13089         }
13090 
13091         sc->bar[i].rid = PCIR_BAR(i);
13092 
13093         flags = RF_ACTIVE;
13094         if (i == 0) {
13095             flags |= RF_SHAREABLE;
13096         }
13097 
13098         if ((sc->bar[i].resource =
13099              bus_alloc_resource_any(sc->dev,
13100                                     SYS_RES_MEMORY,
13101                                     &sc->bar[i].rid,
13102                                     flags)) == NULL) {
13103             return (0);
13104         }
13105 
13106         sc->bar[i].tag    = rman_get_bustag(sc->bar[i].resource);
13107         sc->bar[i].handle = rman_get_bushandle(sc->bar[i].resource);
13108         sc->bar[i].kva    = (vm_offset_t)rman_get_virtual(sc->bar[i].resource);
13109 
13110         BLOGI(sc, "PCI BAR%d [%02x] memory allocated: %#jx-%#jx (%jd) -> %#jx\n",
13111               i, PCIR_BAR(i),
13112               rman_get_start(sc->bar[i].resource),
13113               rman_get_end(sc->bar[i].resource),
13114               rman_get_size(sc->bar[i].resource),
13115               (uintmax_t)sc->bar[i].kva);
13116     }
13117 
13118     return (0);
13119 }
13120 
13121 static void
13122 bxe_get_function_num(struct bxe_softc *sc)
13123 {
13124     uint32_t val = 0;
13125 
13126     /*
13127      * Read the ME register to get the function number. The ME register
13128      * holds the relative-function number and absolute-function number. The
13129      * absolute-function number appears only in E2 and above. Before that
13130      * these bits always contained zero, therefore we cannot blindly use them.
13131      */
13132 
13133     val = REG_RD(sc, BAR_ME_REGISTER);
13134 
13135     sc->pfunc_rel =
13136         (uint8_t)((val & ME_REG_PF_NUM) >> ME_REG_PF_NUM_SHIFT);
13137     sc->path_id =
13138         (uint8_t)((val & ME_REG_ABS_PF_NUM) >> ME_REG_ABS_PF_NUM_SHIFT) & 1;
13139 
13140     if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
13141         sc->pfunc_abs = ((sc->pfunc_rel << 1) | sc->path_id);
13142     } else {
13143         sc->pfunc_abs = (sc->pfunc_rel | sc->path_id);
13144     }
13145 
13146     BLOGD(sc, DBG_LOAD,
13147           "Relative function %d, Absolute function %d, Path %d\n",
13148           sc->pfunc_rel, sc->pfunc_abs, sc->path_id);
13149 }
13150 
13151 static uint32_t
13152 bxe_get_shmem_mf_cfg_base(struct bxe_softc *sc)
13153 {
13154     uint32_t shmem2_size;
13155     uint32_t offset;
13156     uint32_t mf_cfg_offset_value;
13157 
13158     /* Non 57712 */
13159     offset = (SHMEM_RD(sc, func_mb) +
13160               (MAX_FUNC_NUM * sizeof(struct drv_func_mb)));
13161 
13162     /* 57712 plus */
13163     if (sc->devinfo.shmem2_base != 0) {
13164         shmem2_size = SHMEM2_RD(sc, size);
13165         if (shmem2_size > offsetof(struct shmem2_region, mf_cfg_addr)) {
13166             mf_cfg_offset_value = SHMEM2_RD(sc, mf_cfg_addr);
13167             if (SHMEM_MF_CFG_ADDR_NONE != mf_cfg_offset_value) {
13168                 offset = mf_cfg_offset_value;
13169             }
13170         }
13171     }
13172 
13173     return (offset);
13174 }
13175 
13176 static uint32_t
13177 bxe_pcie_capability_read(struct bxe_softc *sc,
13178                          int    reg,
13179                          int    width)
13180 {
13181     int pcie_reg;
13182 
13183     /* ensure PCIe capability is enabled */
13184     if (pci_find_cap(sc->dev, PCIY_EXPRESS, &pcie_reg) == 0) {
13185         if (pcie_reg != 0) {
13186             BLOGD(sc, DBG_LOAD, "PCIe capability at 0x%04x\n", pcie_reg);
13187             return (pci_read_config(sc->dev, (pcie_reg + reg), width));
13188         }
13189     }
13190 
13191     BLOGE(sc, "PCIe capability NOT FOUND!!!\n");
13192 
13193     return (0);
13194 }
13195 
13196 static uint8_t
13197 bxe_is_pcie_pending(struct bxe_softc *sc)
13198 {
13199     return (bxe_pcie_capability_read(sc, PCIER_DEVICE_STA, 2) &
13200             PCIEM_STA_TRANSACTION_PND);
13201 }
13202 
13203 /*
13204  * Walk the PCI capabiites list for the device to find what features are
13205  * supported. These capabilites may be enabled/disabled by firmware so it's
13206  * best to walk the list rather than make assumptions.
13207  */
13208 static void
13209 bxe_probe_pci_caps(struct bxe_softc *sc)
13210 {
13211     uint16_t link_status;
13212     int reg;
13213 
13214     /* check if PCI Power Management is enabled */
13215     if (pci_find_cap(sc->dev, PCIY_PMG, &reg) == 0) {
13216         if (reg != 0) {
13217             BLOGD(sc, DBG_LOAD, "Found PM capability at 0x%04x\n", reg);
13218 
13219             sc->devinfo.pcie_cap_flags |= BXE_PM_CAPABLE_FLAG;
13220             sc->devinfo.pcie_pm_cap_reg = (uint16_t)reg;
13221         }
13222     }
13223 
13224     link_status = bxe_pcie_capability_read(sc, PCIER_LINK_STA, 2);
13225 
13226     /* handle PCIe 2.0 workarounds for 57710 */
13227     if (CHIP_IS_E1(sc)) {
13228         /* workaround for 57710 errata E4_57710_27462 */
13229         sc->devinfo.pcie_link_speed =
13230             (REG_RD(sc, 0x3d04) & (1 << 24)) ? 2 : 1;
13231 
13232         /* workaround for 57710 errata E4_57710_27488 */
13233         sc->devinfo.pcie_link_width =
13234             ((link_status & PCIEM_LINK_STA_WIDTH) >> 4);
13235         if (sc->devinfo.pcie_link_speed > 1) {
13236             sc->devinfo.pcie_link_width =
13237                 ((link_status & PCIEM_LINK_STA_WIDTH) >> 4) >> 1;
13238         }
13239     } else {
13240         sc->devinfo.pcie_link_speed =
13241             (link_status & PCIEM_LINK_STA_SPEED);
13242         sc->devinfo.pcie_link_width =
13243             ((link_status & PCIEM_LINK_STA_WIDTH) >> 4);
13244     }
13245 
13246     BLOGD(sc, DBG_LOAD, "PCIe link speed=%d width=%d\n",
13247           sc->devinfo.pcie_link_speed, sc->devinfo.pcie_link_width);
13248 
13249     sc->devinfo.pcie_cap_flags |= BXE_PCIE_CAPABLE_FLAG;
13250     sc->devinfo.pcie_pcie_cap_reg = (uint16_t)reg;
13251 
13252     /* check if MSI capability is enabled */
13253     if (pci_find_cap(sc->dev, PCIY_MSI, &reg) == 0) {
13254         if (reg != 0) {
13255             BLOGD(sc, DBG_LOAD, "Found MSI capability at 0x%04x\n", reg);
13256 
13257             sc->devinfo.pcie_cap_flags |= BXE_MSI_CAPABLE_FLAG;
13258             sc->devinfo.pcie_msi_cap_reg = (uint16_t)reg;
13259         }
13260     }
13261 
13262     /* check if MSI-X capability is enabled */
13263     if (pci_find_cap(sc->dev, PCIY_MSIX, &reg) == 0) {
13264         if (reg != 0) {
13265             BLOGD(sc, DBG_LOAD, "Found MSI-X capability at 0x%04x\n", reg);
13266 
13267             sc->devinfo.pcie_cap_flags |= BXE_MSIX_CAPABLE_FLAG;
13268             sc->devinfo.pcie_msix_cap_reg = (uint16_t)reg;
13269         }
13270     }
13271 }
13272 
13273 static int
13274 bxe_get_shmem_mf_cfg_info_sd(struct bxe_softc *sc)
13275 {
13276     struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13277     uint32_t val;
13278 
13279     /* get the outer vlan if we're in switch-dependent mode */
13280 
13281     val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13282     mf_info->ext_id = (uint16_t)val;
13283 
13284     mf_info->multi_vnics_mode = 1;
13285 
13286     if (!VALID_OVLAN(mf_info->ext_id)) {
13287         BLOGE(sc, "Invalid VLAN (%d)\n", mf_info->ext_id);
13288         return (1);
13289     }
13290 
13291     /* get the capabilities */
13292     if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) ==
13293         FUNC_MF_CFG_PROTOCOL_ISCSI) {
13294         mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ISCSI;
13295     } else if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) ==
13296                FUNC_MF_CFG_PROTOCOL_FCOE) {
13297         mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_FCOE;
13298     } else {
13299         mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ETHERNET;
13300     }
13301 
13302     mf_info->vnics_per_port =
13303         (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13304 
13305     return (0);
13306 }
13307 
13308 static uint32_t
13309 bxe_get_shmem_ext_proto_support_flags(struct bxe_softc *sc)
13310 {
13311     uint32_t retval = 0;
13312     uint32_t val;
13313 
13314     val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg);
13315 
13316     if (val & MACP_FUNC_CFG_FLAGS_ENABLED) {
13317         if (val & MACP_FUNC_CFG_FLAGS_ETHERNET) {
13318             retval |= MF_PROTO_SUPPORT_ETHERNET;
13319         }
13320         if (val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
13321             retval |= MF_PROTO_SUPPORT_ISCSI;
13322         }
13323         if (val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
13324             retval |= MF_PROTO_SUPPORT_FCOE;
13325         }
13326     }
13327 
13328     return (retval);
13329 }
13330 
13331 static int
13332 bxe_get_shmem_mf_cfg_info_si(struct bxe_softc *sc)
13333 {
13334     struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13335     uint32_t val;
13336 
13337     /*
13338      * There is no outer vlan if we're in switch-independent mode.
13339      * If the mac is valid then assume multi-function.
13340      */
13341 
13342     val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg);
13343 
13344     mf_info->multi_vnics_mode = ((val & MACP_FUNC_CFG_FLAGS_MASK) != 0);
13345 
13346     mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc);
13347 
13348     mf_info->vnics_per_port =
13349         (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13350 
13351     return (0);
13352 }
13353 
13354 static int
13355 bxe_get_shmem_mf_cfg_info_niv(struct bxe_softc *sc)
13356 {
13357     struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13358     uint32_t e1hov_tag;
13359     uint32_t func_config;
13360     uint32_t niv_config;
13361 
13362     mf_info->multi_vnics_mode = 1;
13363 
13364     e1hov_tag   = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13365     func_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
13366     niv_config  = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].afex_config);
13367 
13368     mf_info->ext_id =
13369         (uint16_t)((e1hov_tag & FUNC_MF_CFG_E1HOV_TAG_MASK) >>
13370                    FUNC_MF_CFG_E1HOV_TAG_SHIFT);
13371 
13372     mf_info->default_vlan =
13373         (uint16_t)((e1hov_tag & FUNC_MF_CFG_AFEX_VLAN_MASK) >>
13374                    FUNC_MF_CFG_AFEX_VLAN_SHIFT);
13375 
13376     mf_info->niv_allowed_priorities =
13377         (uint8_t)((niv_config & FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
13378                   FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT);
13379 
13380     mf_info->niv_default_cos =
13381         (uint8_t)((func_config & FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
13382                   FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT);
13383 
13384     mf_info->afex_vlan_mode =
13385         ((niv_config & FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
13386          FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT);
13387 
13388     mf_info->niv_mba_enabled =
13389         ((niv_config & FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK) >>
13390          FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT);
13391 
13392     mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc);
13393 
13394     mf_info->vnics_per_port =
13395         (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13396 
13397     return (0);
13398 }
13399 
13400 static int
13401 bxe_check_valid_mf_cfg(struct bxe_softc *sc)
13402 {
13403     struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13404     uint32_t mf_cfg1;
13405     uint32_t mf_cfg2;
13406     uint32_t ovlan1;
13407     uint32_t ovlan2;
13408     uint8_t i, j;
13409 
13410     BLOGD(sc, DBG_LOAD, "MF config parameters for function %d\n",
13411           SC_PORT(sc));
13412     BLOGD(sc, DBG_LOAD, "\tmf_config=0x%x\n",
13413           mf_info->mf_config[SC_VN(sc)]);
13414     BLOGD(sc, DBG_LOAD, "\tmulti_vnics_mode=%d\n",
13415           mf_info->multi_vnics_mode);
13416     BLOGD(sc, DBG_LOAD, "\tvnics_per_port=%d\n",
13417           mf_info->vnics_per_port);
13418     BLOGD(sc, DBG_LOAD, "\tovlan/vifid=%d\n",
13419           mf_info->ext_id);
13420     BLOGD(sc, DBG_LOAD, "\tmin_bw=%d/%d/%d/%d\n",
13421           mf_info->min_bw[0], mf_info->min_bw[1],
13422           mf_info->min_bw[2], mf_info->min_bw[3]);
13423     BLOGD(sc, DBG_LOAD, "\tmax_bw=%d/%d/%d/%d\n",
13424           mf_info->max_bw[0], mf_info->max_bw[1],
13425           mf_info->max_bw[2], mf_info->max_bw[3]);
13426     BLOGD(sc, DBG_LOAD, "\tmac_addr: %s\n",
13427           sc->mac_addr_str);
13428 
13429     /* various MF mode sanity checks... */
13430 
13431     if (mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_HIDE) {
13432         BLOGE(sc, "Enumerated function %d is marked as hidden\n",
13433               SC_PORT(sc));
13434         return (1);
13435     }
13436 
13437     if ((mf_info->vnics_per_port > 1) && !mf_info->multi_vnics_mode) {
13438         BLOGE(sc, "vnics_per_port=%d multi_vnics_mode=%d\n",
13439               mf_info->vnics_per_port, mf_info->multi_vnics_mode);
13440         return (1);
13441     }
13442 
13443     if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
13444         /* vnic id > 0 must have valid ovlan in switch-dependent mode */
13445         if ((SC_VN(sc) > 0) && !VALID_OVLAN(OVLAN(sc))) {
13446             BLOGE(sc, "mf_mode=SD vnic_id=%d ovlan=%d\n",
13447                   SC_VN(sc), OVLAN(sc));
13448             return (1);
13449         }
13450 
13451         if (!VALID_OVLAN(OVLAN(sc)) && mf_info->multi_vnics_mode) {
13452             BLOGE(sc, "mf_mode=SD multi_vnics_mode=%d ovlan=%d\n",
13453                   mf_info->multi_vnics_mode, OVLAN(sc));
13454             return (1);
13455         }
13456 
13457         /*
13458          * Verify all functions are either MF or SF mode. If MF, make sure
13459          * sure that all non-hidden functions have a valid ovlan. If SF,
13460          * make sure that all non-hidden functions have an invalid ovlan.
13461          */
13462         FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13463             mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config);
13464             ovlan1  = MFCFG_RD(sc, func_mf_config[i].e1hov_tag);
13465             if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) &&
13466                 (((mf_info->multi_vnics_mode) && !VALID_OVLAN(ovlan1)) ||
13467                  ((!mf_info->multi_vnics_mode) && VALID_OVLAN(ovlan1)))) {
13468                 BLOGE(sc, "mf_mode=SD function %d MF config "
13469                           "mismatch, multi_vnics_mode=%d ovlan=%d\n",
13470                       i, mf_info->multi_vnics_mode, ovlan1);
13471                 return (1);
13472             }
13473         }
13474 
13475         /* Verify all funcs on the same port each have a different ovlan. */
13476         FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13477             mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config);
13478             ovlan1  = MFCFG_RD(sc, func_mf_config[i].e1hov_tag);
13479             /* iterate from the next function on the port to the max func */
13480             for (j = i + 2; j < MAX_FUNC_NUM; j += 2) {
13481                 mf_cfg2 = MFCFG_RD(sc, func_mf_config[j].config);
13482                 ovlan2  = MFCFG_RD(sc, func_mf_config[j].e1hov_tag);
13483                 if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) &&
13484                     VALID_OVLAN(ovlan1) &&
13485                     !(mf_cfg2 & FUNC_MF_CFG_FUNC_HIDE) &&
13486                     VALID_OVLAN(ovlan2) &&
13487                     (ovlan1 == ovlan2)) {
13488                     BLOGE(sc, "mf_mode=SD functions %d and %d "
13489                               "have the same ovlan (%d)\n",
13490                           i, j, ovlan1);
13491                     return (1);
13492                 }
13493             }
13494         }
13495     } /* MULTI_FUNCTION_SD */
13496 
13497     return (0);
13498 }
13499 
13500 static int
13501 bxe_get_mf_cfg_info(struct bxe_softc *sc)
13502 {
13503     struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13504     uint32_t val, mac_upper;
13505     uint8_t i, vnic;
13506 
13507     /* initialize mf_info defaults */
13508     mf_info->vnics_per_port   = 1;
13509     mf_info->multi_vnics_mode = FALSE;
13510     mf_info->path_has_ovlan   = FALSE;
13511     mf_info->mf_mode          = SINGLE_FUNCTION;
13512 
13513     if (!CHIP_IS_MF_CAP(sc)) {
13514         return (0);
13515     }
13516 
13517     if (sc->devinfo.mf_cfg_base == SHMEM_MF_CFG_ADDR_NONE) {
13518         BLOGE(sc, "Invalid mf_cfg_base!\n");
13519         return (1);
13520     }
13521 
13522     /* get the MF mode (switch dependent / independent / single-function) */
13523 
13524     val = SHMEM_RD(sc, dev_info.shared_feature_config.config);
13525 
13526     switch (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK)
13527     {
13528     case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
13529 
13530         mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13531 
13532         /* check for legal upper mac bytes */
13533         if (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT) {
13534             mf_info->mf_mode = MULTI_FUNCTION_SI;
13535         } else {
13536             BLOGE(sc, "Invalid config for Switch Independent mode\n");
13537         }
13538 
13539         break;
13540 
13541     case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
13542     case SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4:
13543 
13544         /* get outer vlan configuration */
13545         val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13546 
13547         if ((val & FUNC_MF_CFG_E1HOV_TAG_MASK) !=
13548             FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
13549             mf_info->mf_mode = MULTI_FUNCTION_SD;
13550         } else {
13551             BLOGE(sc, "Invalid config for Switch Dependent mode\n");
13552         }
13553 
13554         break;
13555 
13556     case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
13557 
13558         /* not in MF mode, vnics_per_port=1 and multi_vnics_mode=FALSE */
13559         return (0);
13560 
13561     case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
13562 
13563         /*
13564          * Mark MF mode as NIV if MCP version includes NPAR-SD support
13565          * and the MAC address is valid.
13566          */
13567         mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13568 
13569         if ((SHMEM2_HAS(sc, afex_driver_support)) &&
13570             (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT)) {
13571             mf_info->mf_mode = MULTI_FUNCTION_AFEX;
13572         } else {
13573             BLOGE(sc, "Invalid config for AFEX mode\n");
13574         }
13575 
13576         break;
13577 
13578     default:
13579 
13580         BLOGE(sc, "Unknown MF mode (0x%08x)\n",
13581               (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK));
13582 
13583         return (1);
13584     }
13585 
13586     /* set path mf_mode (which could be different than function mf_mode) */
13587     if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
13588         mf_info->path_has_ovlan = TRUE;
13589     } else if (mf_info->mf_mode == SINGLE_FUNCTION) {
13590         /*
13591          * Decide on path multi vnics mode. If we're not in MF mode and in
13592          * 4-port mode, this is good enough to check vnic-0 of the other port
13593          * on the same path
13594          */
13595         if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
13596             uint8_t other_port = !(PORT_ID(sc) & 1);
13597             uint8_t abs_func_other_port = (SC_PATH(sc) + (2 * other_port));
13598 
13599             val = MFCFG_RD(sc, func_mf_config[abs_func_other_port].e1hov_tag);
13600 
13601             mf_info->path_has_ovlan = VALID_OVLAN((uint16_t)val) ? 1 : 0;
13602         }
13603     }
13604 
13605     if (mf_info->mf_mode == SINGLE_FUNCTION) {
13606         /* invalid MF config */
13607         if (SC_VN(sc) >= 1) {
13608             BLOGE(sc, "VNIC ID >= 1 in SF mode\n");
13609             return (1);
13610         }
13611 
13612         return (0);
13613     }
13614 
13615     /* get the MF configuration */
13616     mf_info->mf_config[SC_VN(sc)] =
13617         MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
13618 
13619     switch(mf_info->mf_mode)
13620     {
13621     case MULTI_FUNCTION_SD:
13622 
13623         bxe_get_shmem_mf_cfg_info_sd(sc);
13624         break;
13625 
13626     case MULTI_FUNCTION_SI:
13627 
13628         bxe_get_shmem_mf_cfg_info_si(sc);
13629         break;
13630 
13631     case MULTI_FUNCTION_AFEX:
13632 
13633         bxe_get_shmem_mf_cfg_info_niv(sc);
13634         break;
13635 
13636     default:
13637 
13638         BLOGE(sc, "Get MF config failed (mf_mode=0x%08x)\n",
13639               mf_info->mf_mode);
13640         return (1);
13641     }
13642 
13643     /* get the congestion management parameters */
13644 
13645     vnic = 0;
13646     FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13647         /* get min/max bw */
13648         val = MFCFG_RD(sc, func_mf_config[i].config);
13649         mf_info->min_bw[vnic] =
13650             ((val & FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_SHIFT);
13651         mf_info->max_bw[vnic] =
13652             ((val & FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT);
13653         vnic++;
13654     }
13655 
13656     return (bxe_check_valid_mf_cfg(sc));
13657 }
13658 
13659 static int
13660 bxe_get_shmem_info(struct bxe_softc *sc)
13661 {
13662     int port;
13663     uint32_t mac_hi, mac_lo, val;
13664 
13665     port = SC_PORT(sc);
13666     mac_hi = mac_lo = 0;
13667 
13668     sc->link_params.sc   = sc;
13669     sc->link_params.port = port;
13670 
13671     /* get the hardware config info */
13672     sc->devinfo.hw_config =
13673         SHMEM_RD(sc, dev_info.shared_hw_config.config);
13674     sc->devinfo.hw_config2 =
13675         SHMEM_RD(sc, dev_info.shared_hw_config.config2);
13676 
13677     sc->link_params.hw_led_mode =
13678         ((sc->devinfo.hw_config & SHARED_HW_CFG_LED_MODE_MASK) >>
13679          SHARED_HW_CFG_LED_MODE_SHIFT);
13680 
13681     /* get the port feature config */
13682     sc->port.config =
13683         SHMEM_RD(sc, dev_info.port_feature_config[port].config);
13684 
13685     /* get the link params */
13686     sc->link_params.speed_cap_mask[0] =
13687         SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask);
13688     sc->link_params.speed_cap_mask[1] =
13689         SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask2);
13690 
13691     /* get the lane config */
13692     sc->link_params.lane_config =
13693         SHMEM_RD(sc, dev_info.port_hw_config[port].lane_config);
13694 
13695     /* get the link config */
13696     val = SHMEM_RD(sc, dev_info.port_feature_config[port].link_config);
13697     sc->port.link_config[ELINK_INT_PHY] = val;
13698     sc->link_params.switch_cfg = (val & PORT_FEATURE_CONNECTED_SWITCH_MASK);
13699     sc->port.link_config[ELINK_EXT_PHY1] =
13700         SHMEM_RD(sc, dev_info.port_feature_config[port].link_config2);
13701 
13702     /* get the override preemphasis flag and enable it or turn it off */
13703     val = SHMEM_RD(sc, dev_info.shared_feature_config.config);
13704     if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) {
13705         sc->link_params.feature_config_flags |=
13706             ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
13707     } else {
13708         sc->link_params.feature_config_flags &=
13709             ~ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
13710     }
13711 
13712     /* get the initial value of the link params */
13713     sc->link_params.multi_phy_config =
13714         SHMEM_RD(sc, dev_info.port_hw_config[port].multi_phy_config);
13715 
13716     /* get external phy info */
13717     sc->port.ext_phy_config =
13718         SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config);
13719 
13720     /* get the multifunction configuration */
13721     bxe_get_mf_cfg_info(sc);
13722 
13723     /* get the mac address */
13724     if (IS_MF(sc)) {
13725         mac_hi = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13726         mac_lo = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_lower);
13727     } else {
13728         mac_hi = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_upper);
13729         mac_lo = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_lower);
13730     }
13731 
13732     if ((mac_lo == 0) && (mac_hi == 0)) {
13733         *sc->mac_addr_str = 0;
13734         BLOGE(sc, "No Ethernet address programmed!\n");
13735     } else {
13736         sc->link_params.mac_addr[0] = (uint8_t)(mac_hi >> 8);
13737         sc->link_params.mac_addr[1] = (uint8_t)(mac_hi);
13738         sc->link_params.mac_addr[2] = (uint8_t)(mac_lo >> 24);
13739         sc->link_params.mac_addr[3] = (uint8_t)(mac_lo >> 16);
13740         sc->link_params.mac_addr[4] = (uint8_t)(mac_lo >> 8);
13741         sc->link_params.mac_addr[5] = (uint8_t)(mac_lo);
13742         snprintf(sc->mac_addr_str, sizeof(sc->mac_addr_str),
13743                  "%02x:%02x:%02x:%02x:%02x:%02x",
13744                  sc->link_params.mac_addr[0], sc->link_params.mac_addr[1],
13745                  sc->link_params.mac_addr[2], sc->link_params.mac_addr[3],
13746                  sc->link_params.mac_addr[4], sc->link_params.mac_addr[5]);
13747         BLOGD(sc, DBG_LOAD, "Ethernet address: %s\n", sc->mac_addr_str);
13748     }
13749 
13750     return (0);
13751 }
13752 
13753 static void
13754 bxe_get_tunable_params(struct bxe_softc *sc)
13755 {
13756     /* sanity checks */
13757 
13758     if ((bxe_interrupt_mode != INTR_MODE_INTX) &&
13759         (bxe_interrupt_mode != INTR_MODE_MSI)  &&
13760         (bxe_interrupt_mode != INTR_MODE_MSIX)) {
13761         BLOGW(sc, "invalid interrupt_mode value (%d)\n", bxe_interrupt_mode);
13762         bxe_interrupt_mode = INTR_MODE_MSIX;
13763     }
13764 
13765     if ((bxe_queue_count < 0) || (bxe_queue_count > MAX_RSS_CHAINS)) {
13766         BLOGW(sc, "invalid queue_count value (%d)\n", bxe_queue_count);
13767         bxe_queue_count = 0;
13768     }
13769 
13770     if ((bxe_max_rx_bufs < 1) || (bxe_max_rx_bufs > RX_BD_USABLE)) {
13771         if (bxe_max_rx_bufs == 0) {
13772             bxe_max_rx_bufs = RX_BD_USABLE;
13773         } else {
13774             BLOGW(sc, "invalid max_rx_bufs (%d)\n", bxe_max_rx_bufs);
13775             bxe_max_rx_bufs = 2048;
13776         }
13777     }
13778 
13779     if ((bxe_hc_rx_ticks < 1) || (bxe_hc_rx_ticks > 100)) {
13780         BLOGW(sc, "invalid hc_rx_ticks (%d)\n", bxe_hc_rx_ticks);
13781         bxe_hc_rx_ticks = 25;
13782     }
13783 
13784     if ((bxe_hc_tx_ticks < 1) || (bxe_hc_tx_ticks > 100)) {
13785         BLOGW(sc, "invalid hc_tx_ticks (%d)\n", bxe_hc_tx_ticks);
13786         bxe_hc_tx_ticks = 50;
13787     }
13788 
13789     if (bxe_max_aggregation_size == 0) {
13790         bxe_max_aggregation_size = TPA_AGG_SIZE;
13791     }
13792 
13793     if (bxe_max_aggregation_size > 0xffff) {
13794         BLOGW(sc, "invalid max_aggregation_size (%d)\n",
13795               bxe_max_aggregation_size);
13796         bxe_max_aggregation_size = TPA_AGG_SIZE;
13797     }
13798 
13799     if ((bxe_mrrs < -1) || (bxe_mrrs > 3)) {
13800         BLOGW(sc, "invalid mrrs (%d)\n", bxe_mrrs);
13801         bxe_mrrs = -1;
13802     }
13803 
13804     if ((bxe_autogreeen < 0) || (bxe_autogreeen > 2)) {
13805         BLOGW(sc, "invalid autogreeen (%d)\n", bxe_autogreeen);
13806         bxe_autogreeen = 0;
13807     }
13808 
13809     if ((bxe_udp_rss < 0) || (bxe_udp_rss > 1)) {
13810         BLOGW(sc, "invalid udp_rss (%d)\n", bxe_udp_rss);
13811         bxe_udp_rss = 0;
13812     }
13813 
13814     /* pull in user settings */
13815 
13816     sc->interrupt_mode       = bxe_interrupt_mode;
13817     sc->max_rx_bufs          = bxe_max_rx_bufs;
13818     sc->hc_rx_ticks          = bxe_hc_rx_ticks;
13819     sc->hc_tx_ticks          = bxe_hc_tx_ticks;
13820     sc->max_aggregation_size = bxe_max_aggregation_size;
13821     sc->mrrs                 = bxe_mrrs;
13822     sc->autogreeen           = bxe_autogreeen;
13823     sc->udp_rss              = bxe_udp_rss;
13824 
13825     if (bxe_interrupt_mode == INTR_MODE_INTX) {
13826         sc->num_queues = 1;
13827     } else { /* INTR_MODE_MSI or INTR_MODE_MSIX */
13828         sc->num_queues =
13829             min((bxe_queue_count ? bxe_queue_count : mp_ncpus),
13830                 MAX_RSS_CHAINS);
13831         if (sc->num_queues > mp_ncpus) {
13832             sc->num_queues = mp_ncpus;
13833         }
13834     }
13835 
13836     BLOGD(sc, DBG_LOAD,
13837           "User Config: "
13838           "debug=0x%lx "
13839           "interrupt_mode=%d "
13840           "queue_count=%d "
13841           "hc_rx_ticks=%d "
13842           "hc_tx_ticks=%d "
13843           "rx_budget=%d "
13844           "max_aggregation_size=%d "
13845           "mrrs=%d "
13846           "autogreeen=%d "
13847           "udp_rss=%d\n",
13848           bxe_debug,
13849           sc->interrupt_mode,
13850           sc->num_queues,
13851           sc->hc_rx_ticks,
13852           sc->hc_tx_ticks,
13853           bxe_rx_budget,
13854           sc->max_aggregation_size,
13855           sc->mrrs,
13856           sc->autogreeen,
13857           sc->udp_rss);
13858 }
13859 
13860 static int
13861 bxe_media_detect(struct bxe_softc *sc)
13862 {
13863     int port_type;
13864     uint32_t phy_idx = bxe_get_cur_phy_idx(sc);
13865 
13866     switch (sc->link_params.phy[phy_idx].media_type) {
13867     case ELINK_ETH_PHY_SFPP_10G_FIBER:
13868     case ELINK_ETH_PHY_XFP_FIBER:
13869         BLOGI(sc, "Found 10Gb Fiber media.\n");
13870         sc->media = IFM_10G_SR;
13871         port_type = PORT_FIBRE;
13872         break;
13873     case ELINK_ETH_PHY_SFP_1G_FIBER:
13874         BLOGI(sc, "Found 1Gb Fiber media.\n");
13875         sc->media = IFM_1000_SX;
13876         port_type = PORT_FIBRE;
13877         break;
13878     case ELINK_ETH_PHY_KR:
13879     case ELINK_ETH_PHY_CX4:
13880         BLOGI(sc, "Found 10GBase-CX4 media.\n");
13881         sc->media = IFM_10G_CX4;
13882         port_type = PORT_FIBRE;
13883         break;
13884     case ELINK_ETH_PHY_DA_TWINAX:
13885         BLOGI(sc, "Found 10Gb Twinax media.\n");
13886         sc->media = IFM_10G_TWINAX;
13887         port_type = PORT_DA;
13888         break;
13889     case ELINK_ETH_PHY_BASE_T:
13890         if (sc->link_params.speed_cap_mask[0] &
13891             PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
13892             BLOGI(sc, "Found 10GBase-T media.\n");
13893             sc->media = IFM_10G_T;
13894             port_type = PORT_TP;
13895         } else {
13896             BLOGI(sc, "Found 1000Base-T media.\n");
13897             sc->media = IFM_1000_T;
13898             port_type = PORT_TP;
13899         }
13900         break;
13901     case ELINK_ETH_PHY_NOT_PRESENT:
13902         BLOGI(sc, "Media not present.\n");
13903         sc->media = 0;
13904         port_type = PORT_OTHER;
13905         break;
13906     case ELINK_ETH_PHY_UNSPECIFIED:
13907     default:
13908         BLOGI(sc, "Unknown media!\n");
13909         sc->media = 0;
13910         port_type = PORT_OTHER;
13911         break;
13912     }
13913     return port_type;
13914 }
13915 
13916 #define GET_FIELD(value, fname)                     \
13917     (((value) & (fname##_MASK)) >> (fname##_SHIFT))
13918 #define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
13919 #define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
13920 
13921 static int
13922 bxe_get_igu_cam_info(struct bxe_softc *sc)
13923 {
13924     int pfid = SC_FUNC(sc);
13925     int igu_sb_id;
13926     uint32_t val;
13927     uint8_t fid, igu_sb_cnt = 0;
13928 
13929     sc->igu_base_sb = 0xff;
13930 
13931     if (CHIP_INT_MODE_IS_BC(sc)) {
13932         int vn = SC_VN(sc);
13933         igu_sb_cnt = sc->igu_sb_cnt;
13934         sc->igu_base_sb = ((CHIP_IS_MODE_4_PORT(sc) ? pfid : vn) *
13935                            FP_SB_MAX_E1x);
13936         sc->igu_dsb_id = (E1HVN_MAX * FP_SB_MAX_E1x +
13937                           (CHIP_IS_MODE_4_PORT(sc) ? pfid : vn));
13938         return (0);
13939     }
13940 
13941     /* IGU in normal mode - read CAM */
13942     for (igu_sb_id = 0;
13943          igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
13944          igu_sb_id++) {
13945         val = REG_RD(sc, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
13946         if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) {
13947             continue;
13948         }
13949         fid = IGU_FID(val);
13950         if ((fid & IGU_FID_ENCODE_IS_PF)) {
13951             if ((fid & IGU_FID_PF_NUM_MASK) != pfid) {
13952                 continue;
13953             }
13954             if (IGU_VEC(val) == 0) {
13955                 /* default status block */
13956                 sc->igu_dsb_id = igu_sb_id;
13957             } else {
13958                 if (sc->igu_base_sb == 0xff) {
13959                     sc->igu_base_sb = igu_sb_id;
13960                 }
13961                 igu_sb_cnt++;
13962             }
13963         }
13964     }
13965 
13966     /*
13967      * Due to new PF resource allocation by MFW T7.4 and above, it's optional
13968      * that number of CAM entries will not be equal to the value advertised in
13969      * PCI. Driver should use the minimal value of both as the actual status
13970      * block count
13971      */
13972     sc->igu_sb_cnt = min(sc->igu_sb_cnt, igu_sb_cnt);
13973 
13974     if (igu_sb_cnt == 0) {
13975         BLOGE(sc, "CAM configuration error\n");
13976         return (-1);
13977     }
13978 
13979     return (0);
13980 }
13981 
13982 /*
13983  * Gather various information from the device config space, the device itself,
13984  * shmem, and the user input.
13985  */
13986 static int
13987 bxe_get_device_info(struct bxe_softc *sc)
13988 {
13989     uint32_t val;
13990     int rc;
13991 
13992     /* Get the data for the device */
13993     sc->devinfo.vendor_id    = pci_get_vendor(sc->dev);
13994     sc->devinfo.device_id    = pci_get_device(sc->dev);
13995     sc->devinfo.subvendor_id = pci_get_subvendor(sc->dev);
13996     sc->devinfo.subdevice_id = pci_get_subdevice(sc->dev);
13997 
13998     /* get the chip revision (chip metal comes from pci config space) */
13999     sc->devinfo.chip_id     =
14000     sc->link_params.chip_id =
14001         (((REG_RD(sc, MISC_REG_CHIP_NUM)                   & 0xffff) << 16) |
14002          ((REG_RD(sc, MISC_REG_CHIP_REV)                   & 0xf)    << 12) |
14003          (((REG_RD(sc, PCICFG_OFFSET + PCI_ID_VAL3) >> 24) & 0xf)    << 4)  |
14004          ((REG_RD(sc, MISC_REG_BOND_ID)                    & 0xf)    << 0));
14005 
14006     /* force 57811 according to MISC register */
14007     if (REG_RD(sc, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
14008         if (CHIP_IS_57810(sc)) {
14009             sc->devinfo.chip_id = ((CHIP_NUM_57811 << 16) |
14010                                    (sc->devinfo.chip_id & 0x0000ffff));
14011         } else if (CHIP_IS_57810_MF(sc)) {
14012             sc->devinfo.chip_id = ((CHIP_NUM_57811_MF << 16) |
14013                                    (sc->devinfo.chip_id & 0x0000ffff));
14014         }
14015         sc->devinfo.chip_id |= 0x1;
14016     }
14017 
14018     BLOGD(sc, DBG_LOAD,
14019           "chip_id=0x%08x (num=0x%04x rev=0x%01x metal=0x%02x bond=0x%01x)\n",
14020           sc->devinfo.chip_id,
14021           ((sc->devinfo.chip_id >> 16) & 0xffff),
14022           ((sc->devinfo.chip_id >> 12) & 0xf),
14023           ((sc->devinfo.chip_id >>  4) & 0xff),
14024           ((sc->devinfo.chip_id >>  0) & 0xf));
14025 
14026     val = (REG_RD(sc, 0x2874) & 0x55);
14027     if ((sc->devinfo.chip_id & 0x1) ||
14028         (CHIP_IS_E1(sc) && val) ||
14029         (CHIP_IS_E1H(sc) && (val == 0x55))) {
14030         sc->flags |= BXE_ONE_PORT_FLAG;
14031         BLOGD(sc, DBG_LOAD, "single port device\n");
14032     }
14033 
14034     /* set the doorbell size */
14035     sc->doorbell_size = (1 << BXE_DB_SHIFT);
14036 
14037     /* determine whether the device is in 2 port or 4 port mode */
14038     sc->devinfo.chip_port_mode = CHIP_PORT_MODE_NONE; /* E1 & E1h*/
14039     if (CHIP_IS_E2E3(sc)) {
14040         /*
14041          * Read port4mode_en_ovwr[0]:
14042          *   If 1, four port mode is in port4mode_en_ovwr[1].
14043          *   If 0, four port mode is in port4mode_en[0].
14044          */
14045         val = REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR);
14046         if (val & 1) {
14047             val = ((val >> 1) & 1);
14048         } else {
14049             val = REG_RD(sc, MISC_REG_PORT4MODE_EN);
14050         }
14051 
14052         sc->devinfo.chip_port_mode =
14053             (val) ? CHIP_4_PORT_MODE : CHIP_2_PORT_MODE;
14054 
14055         BLOGD(sc, DBG_LOAD, "Port mode = %s\n", (val) ? "4" : "2");
14056     }
14057 
14058     /* get the function and path info for the device */
14059     bxe_get_function_num(sc);
14060 
14061     /* get the shared memory base address */
14062     sc->devinfo.shmem_base     =
14063     sc->link_params.shmem_base =
14064         REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
14065     sc->devinfo.shmem2_base =
14066         REG_RD(sc, (SC_PATH(sc) ? MISC_REG_GENERIC_CR_1 :
14067                                   MISC_REG_GENERIC_CR_0));
14068 
14069     BLOGD(sc, DBG_LOAD, "shmem_base=0x%08x, shmem2_base=0x%08x\n",
14070           sc->devinfo.shmem_base, sc->devinfo.shmem2_base);
14071 
14072     if (!sc->devinfo.shmem_base) {
14073         /* this should ONLY prevent upcoming shmem reads */
14074         BLOGI(sc, "MCP not active\n");
14075         sc->flags |= BXE_NO_MCP_FLAG;
14076         return (0);
14077     }
14078 
14079     /* make sure the shared memory contents are valid */
14080     val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]);
14081     if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
14082         (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
14083         BLOGE(sc, "Invalid SHMEM validity signature: 0x%08x\n", val);
14084         return (0);
14085     }
14086     BLOGD(sc, DBG_LOAD, "Valid SHMEM validity signature: 0x%08x\n", val);
14087 
14088     /* get the bootcode version */
14089     sc->devinfo.bc_ver = SHMEM_RD(sc, dev_info.bc_rev);
14090     snprintf(sc->devinfo.bc_ver_str,
14091              sizeof(sc->devinfo.bc_ver_str),
14092              "%d.%d.%d",
14093              ((sc->devinfo.bc_ver >> 24) & 0xff),
14094              ((sc->devinfo.bc_ver >> 16) & 0xff),
14095              ((sc->devinfo.bc_ver >>  8) & 0xff));
14096     BLOGD(sc, DBG_LOAD, "Bootcode version: %s\n", sc->devinfo.bc_ver_str);
14097 
14098     /* get the bootcode shmem address */
14099     sc->devinfo.mf_cfg_base = bxe_get_shmem_mf_cfg_base(sc);
14100     BLOGD(sc, DBG_LOAD, "mf_cfg_base=0x08%x \n", sc->devinfo.mf_cfg_base);
14101 
14102     /* clean indirect addresses as they're not used */
14103     pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
14104     if (IS_PF(sc)) {
14105         REG_WR(sc, PXP2_REG_PGL_ADDR_88_F0, 0);
14106         REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F0, 0);
14107         REG_WR(sc, PXP2_REG_PGL_ADDR_90_F0, 0);
14108         REG_WR(sc, PXP2_REG_PGL_ADDR_94_F0, 0);
14109         if (CHIP_IS_E1x(sc)) {
14110             REG_WR(sc, PXP2_REG_PGL_ADDR_88_F1, 0);
14111             REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F1, 0);
14112             REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0);
14113             REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0);
14114         }
14115 
14116         /*
14117          * Enable internal target-read (in case we are probed after PF
14118          * FLR). Must be done prior to any BAR read access. Only for
14119          * 57712 and up
14120          */
14121         if (!CHIP_IS_E1x(sc)) {
14122             REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
14123         }
14124     }
14125 
14126     /* get the nvram size */
14127     val = REG_RD(sc, MCP_REG_MCPR_NVM_CFG4);
14128     sc->devinfo.flash_size =
14129         (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE));
14130     BLOGD(sc, DBG_LOAD, "nvram flash size: %d\n", sc->devinfo.flash_size);
14131 
14132     /* get PCI capabilites */
14133     bxe_probe_pci_caps(sc);
14134 
14135     bxe_set_power_state(sc, PCI_PM_D0);
14136 
14137     /* get various configuration parameters from shmem */
14138     bxe_get_shmem_info(sc);
14139 
14140     if (sc->devinfo.pcie_msix_cap_reg != 0) {
14141         val = pci_read_config(sc->dev,
14142                               (sc->devinfo.pcie_msix_cap_reg +
14143                                PCIR_MSIX_CTRL),
14144                               2);
14145         sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE);
14146     } else {
14147         sc->igu_sb_cnt = 1;
14148     }
14149 
14150     sc->igu_base_addr = BAR_IGU_INTMEM;
14151 
14152     /* initialize IGU parameters */
14153     if (CHIP_IS_E1x(sc)) {
14154         sc->devinfo.int_block = INT_BLOCK_HC;
14155         sc->igu_dsb_id = DEF_SB_IGU_ID;
14156         sc->igu_base_sb = 0;
14157     } else {
14158         sc->devinfo.int_block = INT_BLOCK_IGU;
14159 
14160         /* do not allow device reset during IGU info preocessing */
14161         bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
14162 
14163         val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
14164 
14165         if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
14166             int tout = 5000;
14167 
14168             BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode\n");
14169 
14170             val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
14171             REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, val);
14172             REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x7f);
14173 
14174             while (tout && REG_RD(sc, IGU_REG_RESET_MEMORIES)) {
14175                 tout--;
14176                 DELAY(1000);
14177             }
14178 
14179             if (REG_RD(sc, IGU_REG_RESET_MEMORIES)) {
14180                 BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode failed!!!\n");
14181                 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
14182                 return (-1);
14183             }
14184         }
14185 
14186         if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
14187             BLOGD(sc, DBG_LOAD, "IGU Backward Compatible Mode\n");
14188             sc->devinfo.int_block |= INT_BLOCK_MODE_BW_COMP;
14189         } else {
14190             BLOGD(sc, DBG_LOAD, "IGU Normal Mode\n");
14191         }
14192 
14193         rc = bxe_get_igu_cam_info(sc);
14194 
14195         bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
14196 
14197         if (rc) {
14198             return (rc);
14199         }
14200     }
14201 
14202     /*
14203      * Get base FW non-default (fast path) status block ID. This value is
14204      * used to initialize the fw_sb_id saved on the fp/queue structure to
14205      * determine the id used by the FW.
14206      */
14207     if (CHIP_IS_E1x(sc)) {
14208         sc->base_fw_ndsb = ((SC_PORT(sc) * FP_SB_MAX_E1x) + SC_L_ID(sc));
14209     } else {
14210         /*
14211          * 57712+ - We currently use one FW SB per IGU SB (Rx and Tx of
14212          * the same queue are indicated on the same IGU SB). So we prefer
14213          * FW and IGU SBs to be the same value.
14214          */
14215         sc->base_fw_ndsb = sc->igu_base_sb;
14216     }
14217 
14218     BLOGD(sc, DBG_LOAD,
14219           "igu_dsb_id=%d igu_base_sb=%d igu_sb_cnt=%d base_fw_ndsb=%d\n",
14220           sc->igu_dsb_id, sc->igu_base_sb,
14221           sc->igu_sb_cnt, sc->base_fw_ndsb);
14222 
14223     elink_phy_probe(&sc->link_params);
14224 
14225     return (0);
14226 }
14227 
14228 static void
14229 bxe_link_settings_supported(struct bxe_softc *sc,
14230                             uint32_t         switch_cfg)
14231 {
14232     uint32_t cfg_size = 0;
14233     uint32_t idx;
14234     uint8_t port = SC_PORT(sc);
14235 
14236     /* aggregation of supported attributes of all external phys */
14237     sc->port.supported[0] = 0;
14238     sc->port.supported[1] = 0;
14239 
14240     switch (sc->link_params.num_phys) {
14241     case 1:
14242         sc->port.supported[0] = sc->link_params.phy[ELINK_INT_PHY].supported;
14243         cfg_size = 1;
14244         break;
14245     case 2:
14246         sc->port.supported[0] = sc->link_params.phy[ELINK_EXT_PHY1].supported;
14247         cfg_size = 1;
14248         break;
14249     case 3:
14250         if (sc->link_params.multi_phy_config &
14251             PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
14252             sc->port.supported[1] =
14253                 sc->link_params.phy[ELINK_EXT_PHY1].supported;
14254             sc->port.supported[0] =
14255                 sc->link_params.phy[ELINK_EXT_PHY2].supported;
14256         } else {
14257             sc->port.supported[0] =
14258                 sc->link_params.phy[ELINK_EXT_PHY1].supported;
14259             sc->port.supported[1] =
14260                 sc->link_params.phy[ELINK_EXT_PHY2].supported;
14261         }
14262         cfg_size = 2;
14263         break;
14264     }
14265 
14266     if (!(sc->port.supported[0] || sc->port.supported[1])) {
14267         BLOGE(sc, "Invalid phy config in NVRAM (PHY1=0x%08x PHY2=0x%08x)\n",
14268               SHMEM_RD(sc,
14269                        dev_info.port_hw_config[port].external_phy_config),
14270               SHMEM_RD(sc,
14271                        dev_info.port_hw_config[port].external_phy_config2));
14272         return;
14273     }
14274 
14275     if (CHIP_IS_E3(sc))
14276         sc->port.phy_addr = REG_RD(sc, MISC_REG_WC0_CTRL_PHY_ADDR);
14277     else {
14278         switch (switch_cfg) {
14279         case ELINK_SWITCH_CFG_1G:
14280             sc->port.phy_addr =
14281                 REG_RD(sc, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
14282             break;
14283         case ELINK_SWITCH_CFG_10G:
14284             sc->port.phy_addr =
14285                 REG_RD(sc, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
14286             break;
14287         default:
14288             BLOGE(sc, "Invalid switch config in link_config=0x%08x\n",
14289                   sc->port.link_config[0]);
14290             return;
14291         }
14292     }
14293 
14294     BLOGD(sc, DBG_LOAD, "PHY addr 0x%08x\n", sc->port.phy_addr);
14295 
14296     /* mask what we support according to speed_cap_mask per configuration */
14297     for (idx = 0; idx < cfg_size; idx++) {
14298         if (!(sc->link_params.speed_cap_mask[idx] &
14299               PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) {
14300             sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Half;
14301         }
14302 
14303         if (!(sc->link_params.speed_cap_mask[idx] &
14304               PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) {
14305             sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Full;
14306         }
14307 
14308         if (!(sc->link_params.speed_cap_mask[idx] &
14309               PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) {
14310             sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Half;
14311         }
14312 
14313         if (!(sc->link_params.speed_cap_mask[idx] &
14314               PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) {
14315             sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Full;
14316         }
14317 
14318         if (!(sc->link_params.speed_cap_mask[idx] &
14319               PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) {
14320             sc->port.supported[idx] &= ~ELINK_SUPPORTED_1000baseT_Full;
14321         }
14322 
14323         if (!(sc->link_params.speed_cap_mask[idx] &
14324               PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) {
14325             sc->port.supported[idx] &= ~ELINK_SUPPORTED_2500baseX_Full;
14326         }
14327 
14328         if (!(sc->link_params.speed_cap_mask[idx] &
14329               PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
14330             sc->port.supported[idx] &= ~ELINK_SUPPORTED_10000baseT_Full;
14331         }
14332 
14333         if (!(sc->link_params.speed_cap_mask[idx] &
14334               PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) {
14335             sc->port.supported[idx] &= ~ELINK_SUPPORTED_20000baseKR2_Full;
14336         }
14337     }
14338 
14339     BLOGD(sc, DBG_LOAD, "PHY supported 0=0x%08x 1=0x%08x\n",
14340           sc->port.supported[0], sc->port.supported[1]);
14341 	ELINK_DEBUG_P2(sc, "PHY supported 0=0x%08x 1=0x%08x\n",
14342 					sc->port.supported[0], sc->port.supported[1]);
14343 }
14344 
14345 static void
14346 bxe_link_settings_requested(struct bxe_softc *sc)
14347 {
14348     uint32_t link_config;
14349     uint32_t idx;
14350     uint32_t cfg_size = 0;
14351 
14352     sc->port.advertising[0] = 0;
14353     sc->port.advertising[1] = 0;
14354 
14355     switch (sc->link_params.num_phys) {
14356     case 1:
14357     case 2:
14358         cfg_size = 1;
14359         break;
14360     case 3:
14361         cfg_size = 2;
14362         break;
14363     }
14364 
14365     for (idx = 0; idx < cfg_size; idx++) {
14366         sc->link_params.req_duplex[idx] = DUPLEX_FULL;
14367         link_config = sc->port.link_config[idx];
14368 
14369         switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
14370         case PORT_FEATURE_LINK_SPEED_AUTO:
14371             if (sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg) {
14372                 sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG;
14373                 sc->port.advertising[idx] |= sc->port.supported[idx];
14374                 if (sc->link_params.phy[ELINK_EXT_PHY1].type ==
14375                     PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
14376                     sc->port.advertising[idx] |=
14377                         (ELINK_SUPPORTED_100baseT_Half |
14378                          ELINK_SUPPORTED_100baseT_Full);
14379             } else {
14380                 /* force 10G, no AN */
14381                 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000;
14382                 sc->port.advertising[idx] |=
14383                     (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
14384                 continue;
14385             }
14386             break;
14387 
14388         case PORT_FEATURE_LINK_SPEED_10M_FULL:
14389             if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Full) {
14390                 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10;
14391                 sc->port.advertising[idx] |= (ADVERTISED_10baseT_Full |
14392                                               ADVERTISED_TP);
14393             } else {
14394                 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14395                           "speed_cap_mask=0x%08x\n",
14396                       link_config, sc->link_params.speed_cap_mask[idx]);
14397                 return;
14398             }
14399             break;
14400 
14401         case PORT_FEATURE_LINK_SPEED_10M_HALF:
14402             if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Half) {
14403                 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10;
14404                 sc->link_params.req_duplex[idx] = DUPLEX_HALF;
14405                 sc->port.advertising[idx] |= (ADVERTISED_10baseT_Half |
14406                                               ADVERTISED_TP);
14407 				ELINK_DEBUG_P1(sc, "driver requesting DUPLEX_HALF req_duplex = %x!\n",
14408 								sc->link_params.req_duplex[idx]);
14409             } else {
14410                 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14411                           "speed_cap_mask=0x%08x\n",
14412                       link_config, sc->link_params.speed_cap_mask[idx]);
14413                 return;
14414             }
14415             break;
14416 
14417         case PORT_FEATURE_LINK_SPEED_100M_FULL:
14418             if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Full) {
14419                 sc->link_params.req_line_speed[idx] = ELINK_SPEED_100;
14420                 sc->port.advertising[idx] |= (ADVERTISED_100baseT_Full |
14421                                               ADVERTISED_TP);
14422             } else {
14423                 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14424                           "speed_cap_mask=0x%08x\n",
14425                       link_config, sc->link_params.speed_cap_mask[idx]);
14426                 return;
14427             }
14428             break;
14429 
14430         case PORT_FEATURE_LINK_SPEED_100M_HALF:
14431             if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Half) {
14432                 sc->link_params.req_line_speed[idx] = ELINK_SPEED_100;
14433                 sc->link_params.req_duplex[idx] = DUPLEX_HALF;
14434                 sc->port.advertising[idx] |= (ADVERTISED_100baseT_Half |
14435                                               ADVERTISED_TP);
14436             } else {
14437                 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14438                           "speed_cap_mask=0x%08x\n",
14439                       link_config, sc->link_params.speed_cap_mask[idx]);
14440                 return;
14441             }
14442             break;
14443 
14444         case PORT_FEATURE_LINK_SPEED_1G:
14445             if (sc->port.supported[idx] & ELINK_SUPPORTED_1000baseT_Full) {
14446                 sc->link_params.req_line_speed[idx] = ELINK_SPEED_1000;
14447                 sc->port.advertising[idx] |= (ADVERTISED_1000baseT_Full |
14448                                               ADVERTISED_TP);
14449             } else {
14450                 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14451                           "speed_cap_mask=0x%08x\n",
14452                       link_config, sc->link_params.speed_cap_mask[idx]);
14453                 return;
14454             }
14455             break;
14456 
14457         case PORT_FEATURE_LINK_SPEED_2_5G:
14458             if (sc->port.supported[idx] & ELINK_SUPPORTED_2500baseX_Full) {
14459                 sc->link_params.req_line_speed[idx] = ELINK_SPEED_2500;
14460                 sc->port.advertising[idx] |= (ADVERTISED_2500baseX_Full |
14461                                               ADVERTISED_TP);
14462             } else {
14463                 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14464                           "speed_cap_mask=0x%08x\n",
14465                       link_config, sc->link_params.speed_cap_mask[idx]);
14466                 return;
14467             }
14468             break;
14469 
14470         case PORT_FEATURE_LINK_SPEED_10G_CX4:
14471             if (sc->port.supported[idx] & ELINK_SUPPORTED_10000baseT_Full) {
14472                 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000;
14473                 sc->port.advertising[idx] |= (ADVERTISED_10000baseT_Full |
14474                                               ADVERTISED_FIBRE);
14475             } else {
14476                 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14477                           "speed_cap_mask=0x%08x\n",
14478                       link_config, sc->link_params.speed_cap_mask[idx]);
14479                 return;
14480             }
14481             break;
14482 
14483         case PORT_FEATURE_LINK_SPEED_20G:
14484             sc->link_params.req_line_speed[idx] = ELINK_SPEED_20000;
14485             break;
14486 
14487         default:
14488             BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14489                       "speed_cap_mask=0x%08x\n",
14490                   link_config, sc->link_params.speed_cap_mask[idx]);
14491             sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG;
14492             sc->port.advertising[idx] = sc->port.supported[idx];
14493             break;
14494         }
14495 
14496         sc->link_params.req_flow_ctrl[idx] =
14497             (link_config & PORT_FEATURE_FLOW_CONTROL_MASK);
14498 
14499         if (sc->link_params.req_flow_ctrl[idx] == ELINK_FLOW_CTRL_AUTO) {
14500             if (!(sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg)) {
14501                 sc->link_params.req_flow_ctrl[idx] = ELINK_FLOW_CTRL_NONE;
14502             } else {
14503                 bxe_set_requested_fc(sc);
14504             }
14505         }
14506 
14507         BLOGD(sc, DBG_LOAD, "req_line_speed=%d req_duplex=%d "
14508                             "req_flow_ctrl=0x%x advertising=0x%x\n",
14509               sc->link_params.req_line_speed[idx],
14510               sc->link_params.req_duplex[idx],
14511               sc->link_params.req_flow_ctrl[idx],
14512               sc->port.advertising[idx]);
14513 		ELINK_DEBUG_P3(sc, "req_line_speed=%d req_duplex=%d "
14514 						"advertising=0x%x\n",
14515 						sc->link_params.req_line_speed[idx],
14516 						sc->link_params.req_duplex[idx],
14517 						sc->port.advertising[idx]);
14518     }
14519 }
14520 
14521 static void
14522 bxe_get_phy_info(struct bxe_softc *sc)
14523 {
14524     uint8_t port = SC_PORT(sc);
14525     uint32_t config = sc->port.config;
14526     uint32_t eee_mode;
14527 
14528     /* shmem data already read in bxe_get_shmem_info() */
14529 
14530     ELINK_DEBUG_P3(sc, "lane_config=0x%08x speed_cap_mask0=0x%08x "
14531                         "link_config0=0x%08x\n",
14532                sc->link_params.lane_config,
14533                sc->link_params.speed_cap_mask[0],
14534                sc->port.link_config[0]);
14535 
14536 
14537     bxe_link_settings_supported(sc, sc->link_params.switch_cfg);
14538     bxe_link_settings_requested(sc);
14539 
14540     if (sc->autogreeen == AUTO_GREEN_FORCE_ON) {
14541         sc->link_params.feature_config_flags |=
14542             ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14543     } else if (sc->autogreeen == AUTO_GREEN_FORCE_OFF) {
14544         sc->link_params.feature_config_flags &=
14545             ~ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14546     } else if (config & PORT_FEAT_CFG_AUTOGREEEN_ENABLED) {
14547         sc->link_params.feature_config_flags |=
14548             ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14549     }
14550 
14551     /* configure link feature according to nvram value */
14552     eee_mode =
14553         (((SHMEM_RD(sc, dev_info.port_feature_config[port].eee_power_mode)) &
14554           PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
14555          PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
14556     if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
14557         sc->link_params.eee_mode = (ELINK_EEE_MODE_ADV_LPI |
14558                                     ELINK_EEE_MODE_ENABLE_LPI |
14559                                     ELINK_EEE_MODE_OUTPUT_TIME);
14560     } else {
14561         sc->link_params.eee_mode = 0;
14562     }
14563 
14564     /* get the media type */
14565     bxe_media_detect(sc);
14566 	ELINK_DEBUG_P1(sc, "detected media type\n", sc->media);
14567 }
14568 
14569 static void
14570 bxe_get_params(struct bxe_softc *sc)
14571 {
14572     /* get user tunable params */
14573     bxe_get_tunable_params(sc);
14574 
14575     /* select the RX and TX ring sizes */
14576     sc->tx_ring_size = TX_BD_USABLE;
14577     sc->rx_ring_size = RX_BD_USABLE;
14578 
14579     /* XXX disable WoL */
14580     sc->wol = 0;
14581 }
14582 
14583 static void
14584 bxe_set_modes_bitmap(struct bxe_softc *sc)
14585 {
14586     uint32_t flags = 0;
14587 
14588     if (CHIP_REV_IS_FPGA(sc)) {
14589         SET_FLAGS(flags, MODE_FPGA);
14590     } else if (CHIP_REV_IS_EMUL(sc)) {
14591         SET_FLAGS(flags, MODE_EMUL);
14592     } else {
14593         SET_FLAGS(flags, MODE_ASIC);
14594     }
14595 
14596     if (CHIP_IS_MODE_4_PORT(sc)) {
14597         SET_FLAGS(flags, MODE_PORT4);
14598     } else {
14599         SET_FLAGS(flags, MODE_PORT2);
14600     }
14601 
14602     if (CHIP_IS_E2(sc)) {
14603         SET_FLAGS(flags, MODE_E2);
14604     } else if (CHIP_IS_E3(sc)) {
14605         SET_FLAGS(flags, MODE_E3);
14606         if (CHIP_REV(sc) == CHIP_REV_Ax) {
14607             SET_FLAGS(flags, MODE_E3_A0);
14608         } else /*if (CHIP_REV(sc) == CHIP_REV_Bx)*/ {
14609             SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
14610         }
14611     }
14612 
14613     if (IS_MF(sc)) {
14614         SET_FLAGS(flags, MODE_MF);
14615         switch (sc->devinfo.mf_info.mf_mode) {
14616         case MULTI_FUNCTION_SD:
14617             SET_FLAGS(flags, MODE_MF_SD);
14618             break;
14619         case MULTI_FUNCTION_SI:
14620             SET_FLAGS(flags, MODE_MF_SI);
14621             break;
14622         case MULTI_FUNCTION_AFEX:
14623             SET_FLAGS(flags, MODE_MF_AFEX);
14624             break;
14625         }
14626     } else {
14627         SET_FLAGS(flags, MODE_SF);
14628     }
14629 
14630 #if defined(__LITTLE_ENDIAN)
14631     SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
14632 #else /* __BIG_ENDIAN */
14633     SET_FLAGS(flags, MODE_BIG_ENDIAN);
14634 #endif
14635 
14636     INIT_MODE_FLAGS(sc) = flags;
14637 }
14638 
14639 static int
14640 bxe_alloc_hsi_mem(struct bxe_softc *sc)
14641 {
14642     struct bxe_fastpath *fp;
14643     bus_addr_t busaddr;
14644     int max_agg_queues;
14645     int max_segments;
14646     bus_size_t max_size;
14647     bus_size_t max_seg_size;
14648     char buf[32];
14649     int rc;
14650     int i, j;
14651 
14652     /* XXX zero out all vars here and call bxe_alloc_hsi_mem on error */
14653 
14654     /* allocate the parent bus DMA tag */
14655     rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent tag */
14656                             1,                        /* alignment */
14657                             0,                        /* boundary limit */
14658                             BUS_SPACE_MAXADDR,        /* restricted low */
14659                             BUS_SPACE_MAXADDR,        /* restricted hi */
14660                             NULL,                     /* addr filter() */
14661                             NULL,                     /* addr filter() arg */
14662                             BUS_SPACE_MAXSIZE_32BIT,  /* max map size */
14663                             BUS_SPACE_UNRESTRICTED,   /* num discontinuous */
14664                             BUS_SPACE_MAXSIZE_32BIT,  /* max seg size */
14665                             0,                        /* flags */
14666                             NULL,                     /* lock() */
14667                             NULL,                     /* lock() arg */
14668                             &sc->parent_dma_tag);     /* returned dma tag */
14669     if (rc != 0) {
14670         BLOGE(sc, "Failed to alloc parent DMA tag (%d)!\n", rc);
14671         return (1);
14672     }
14673 
14674     /************************/
14675     /* DEFAULT STATUS BLOCK */
14676     /************************/
14677 
14678     if (bxe_dma_alloc(sc, sizeof(struct host_sp_status_block),
14679                       &sc->def_sb_dma, "default status block") != 0) {
14680         /* XXX */
14681         bus_dma_tag_destroy(sc->parent_dma_tag);
14682         return (1);
14683     }
14684 
14685     sc->def_sb = (struct host_sp_status_block *)sc->def_sb_dma.vaddr;
14686 
14687     /***************/
14688     /* EVENT QUEUE */
14689     /***************/
14690 
14691     if (bxe_dma_alloc(sc, BCM_PAGE_SIZE,
14692                       &sc->eq_dma, "event queue") != 0) {
14693         /* XXX */
14694         bxe_dma_free(sc, &sc->def_sb_dma);
14695         sc->def_sb = NULL;
14696         bus_dma_tag_destroy(sc->parent_dma_tag);
14697         return (1);
14698     }
14699 
14700     sc->eq = (union event_ring_elem * )sc->eq_dma.vaddr;
14701 
14702     /*************/
14703     /* SLOW PATH */
14704     /*************/
14705 
14706     if (bxe_dma_alloc(sc, sizeof(struct bxe_slowpath),
14707                       &sc->sp_dma, "slow path") != 0) {
14708         /* XXX */
14709         bxe_dma_free(sc, &sc->eq_dma);
14710         sc->eq = NULL;
14711         bxe_dma_free(sc, &sc->def_sb_dma);
14712         sc->def_sb = NULL;
14713         bus_dma_tag_destroy(sc->parent_dma_tag);
14714         return (1);
14715     }
14716 
14717     sc->sp = (struct bxe_slowpath *)sc->sp_dma.vaddr;
14718 
14719     /*******************/
14720     /* SLOW PATH QUEUE */
14721     /*******************/
14722 
14723     if (bxe_dma_alloc(sc, BCM_PAGE_SIZE,
14724                       &sc->spq_dma, "slow path queue") != 0) {
14725         /* XXX */
14726         bxe_dma_free(sc, &sc->sp_dma);
14727         sc->sp = NULL;
14728         bxe_dma_free(sc, &sc->eq_dma);
14729         sc->eq = NULL;
14730         bxe_dma_free(sc, &sc->def_sb_dma);
14731         sc->def_sb = NULL;
14732         bus_dma_tag_destroy(sc->parent_dma_tag);
14733         return (1);
14734     }
14735 
14736     sc->spq = (struct eth_spe *)sc->spq_dma.vaddr;
14737 
14738     /***************************/
14739     /* FW DECOMPRESSION BUFFER */
14740     /***************************/
14741 
14742     if (bxe_dma_alloc(sc, FW_BUF_SIZE, &sc->gz_buf_dma,
14743                       "fw decompression buffer") != 0) {
14744         /* XXX */
14745         bxe_dma_free(sc, &sc->spq_dma);
14746         sc->spq = NULL;
14747         bxe_dma_free(sc, &sc->sp_dma);
14748         sc->sp = NULL;
14749         bxe_dma_free(sc, &sc->eq_dma);
14750         sc->eq = NULL;
14751         bxe_dma_free(sc, &sc->def_sb_dma);
14752         sc->def_sb = NULL;
14753         bus_dma_tag_destroy(sc->parent_dma_tag);
14754         return (1);
14755     }
14756 
14757     sc->gz_buf = (void *)sc->gz_buf_dma.vaddr;
14758 
14759     if ((sc->gz_strm =
14760          malloc(sizeof(*sc->gz_strm), M_DEVBUF, M_NOWAIT)) == NULL) {
14761         /* XXX */
14762         bxe_dma_free(sc, &sc->gz_buf_dma);
14763         sc->gz_buf = NULL;
14764         bxe_dma_free(sc, &sc->spq_dma);
14765         sc->spq = NULL;
14766         bxe_dma_free(sc, &sc->sp_dma);
14767         sc->sp = NULL;
14768         bxe_dma_free(sc, &sc->eq_dma);
14769         sc->eq = NULL;
14770         bxe_dma_free(sc, &sc->def_sb_dma);
14771         sc->def_sb = NULL;
14772         bus_dma_tag_destroy(sc->parent_dma_tag);
14773         return (1);
14774     }
14775 
14776     /*************/
14777     /* FASTPATHS */
14778     /*************/
14779 
14780     /* allocate DMA memory for each fastpath structure */
14781     for (i = 0; i < sc->num_queues; i++) {
14782         fp = &sc->fp[i];
14783         fp->sc    = sc;
14784         fp->index = i;
14785 
14786         /*******************/
14787         /* FP STATUS BLOCK */
14788         /*******************/
14789 
14790         snprintf(buf, sizeof(buf), "fp %d status block", i);
14791         if (bxe_dma_alloc(sc, sizeof(union bxe_host_hc_status_block),
14792                           &fp->sb_dma, buf) != 0) {
14793             /* XXX unwind and free previous fastpath allocations */
14794             BLOGE(sc, "Failed to alloc %s\n", buf);
14795             return (1);
14796         } else {
14797             if (CHIP_IS_E2E3(sc)) {
14798                 fp->status_block.e2_sb =
14799                     (struct host_hc_status_block_e2 *)fp->sb_dma.vaddr;
14800             } else {
14801                 fp->status_block.e1x_sb =
14802                     (struct host_hc_status_block_e1x *)fp->sb_dma.vaddr;
14803             }
14804         }
14805 
14806         /******************/
14807         /* FP TX BD CHAIN */
14808         /******************/
14809 
14810         snprintf(buf, sizeof(buf), "fp %d tx bd chain", i);
14811         if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * TX_BD_NUM_PAGES),
14812                           &fp->tx_dma, buf) != 0) {
14813             /* XXX unwind and free previous fastpath allocations */
14814             BLOGE(sc, "Failed to alloc %s\n", buf);
14815             return (1);
14816         } else {
14817             fp->tx_chain = (union eth_tx_bd_types *)fp->tx_dma.vaddr;
14818         }
14819 
14820         /* link together the tx bd chain pages */
14821         for (j = 1; j <= TX_BD_NUM_PAGES; j++) {
14822             /* index into the tx bd chain array to last entry per page */
14823             struct eth_tx_next_bd *tx_next_bd =
14824                 &fp->tx_chain[TX_BD_TOTAL_PER_PAGE * j - 1].next_bd;
14825             /* point to the next page and wrap from last page */
14826             busaddr = (fp->tx_dma.paddr +
14827                        (BCM_PAGE_SIZE * (j % TX_BD_NUM_PAGES)));
14828             tx_next_bd->addr_hi = htole32(U64_HI(busaddr));
14829             tx_next_bd->addr_lo = htole32(U64_LO(busaddr));
14830         }
14831 
14832         /******************/
14833         /* FP RX BD CHAIN */
14834         /******************/
14835 
14836         snprintf(buf, sizeof(buf), "fp %d rx bd chain", i);
14837         if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_BD_NUM_PAGES),
14838                           &fp->rx_dma, buf) != 0) {
14839             /* XXX unwind and free previous fastpath allocations */
14840             BLOGE(sc, "Failed to alloc %s\n", buf);
14841             return (1);
14842         } else {
14843             fp->rx_chain = (struct eth_rx_bd *)fp->rx_dma.vaddr;
14844         }
14845 
14846         /* link together the rx bd chain pages */
14847         for (j = 1; j <= RX_BD_NUM_PAGES; j++) {
14848             /* index into the rx bd chain array to last entry per page */
14849             struct eth_rx_bd *rx_bd =
14850                 &fp->rx_chain[RX_BD_TOTAL_PER_PAGE * j - 2];
14851             /* point to the next page and wrap from last page */
14852             busaddr = (fp->rx_dma.paddr +
14853                        (BCM_PAGE_SIZE * (j % RX_BD_NUM_PAGES)));
14854             rx_bd->addr_hi = htole32(U64_HI(busaddr));
14855             rx_bd->addr_lo = htole32(U64_LO(busaddr));
14856         }
14857 
14858         /*******************/
14859         /* FP RX RCQ CHAIN */
14860         /*******************/
14861 
14862         snprintf(buf, sizeof(buf), "fp %d rcq chain", i);
14863         if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RCQ_NUM_PAGES),
14864                           &fp->rcq_dma, buf) != 0) {
14865             /* XXX unwind and free previous fastpath allocations */
14866             BLOGE(sc, "Failed to alloc %s\n", buf);
14867             return (1);
14868         } else {
14869             fp->rcq_chain = (union eth_rx_cqe *)fp->rcq_dma.vaddr;
14870         }
14871 
14872         /* link together the rcq chain pages */
14873         for (j = 1; j <= RCQ_NUM_PAGES; j++) {
14874             /* index into the rcq chain array to last entry per page */
14875             struct eth_rx_cqe_next_page *rx_cqe_next =
14876                 (struct eth_rx_cqe_next_page *)
14877                 &fp->rcq_chain[RCQ_TOTAL_PER_PAGE * j - 1];
14878             /* point to the next page and wrap from last page */
14879             busaddr = (fp->rcq_dma.paddr +
14880                        (BCM_PAGE_SIZE * (j % RCQ_NUM_PAGES)));
14881             rx_cqe_next->addr_hi = htole32(U64_HI(busaddr));
14882             rx_cqe_next->addr_lo = htole32(U64_LO(busaddr));
14883         }
14884 
14885         /*******************/
14886         /* FP RX SGE CHAIN */
14887         /*******************/
14888 
14889         snprintf(buf, sizeof(buf), "fp %d sge chain", i);
14890         if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES),
14891                           &fp->rx_sge_dma, buf) != 0) {
14892             /* XXX unwind and free previous fastpath allocations */
14893             BLOGE(sc, "Failed to alloc %s\n", buf);
14894             return (1);
14895         } else {
14896             fp->rx_sge_chain = (struct eth_rx_sge *)fp->rx_sge_dma.vaddr;
14897         }
14898 
14899         /* link together the sge chain pages */
14900         for (j = 1; j <= RX_SGE_NUM_PAGES; j++) {
14901             /* index into the rcq chain array to last entry per page */
14902             struct eth_rx_sge *rx_sge =
14903                 &fp->rx_sge_chain[RX_SGE_TOTAL_PER_PAGE * j - 2];
14904             /* point to the next page and wrap from last page */
14905             busaddr = (fp->rx_sge_dma.paddr +
14906                        (BCM_PAGE_SIZE * (j % RX_SGE_NUM_PAGES)));
14907             rx_sge->addr_hi = htole32(U64_HI(busaddr));
14908             rx_sge->addr_lo = htole32(U64_LO(busaddr));
14909         }
14910 
14911         /***********************/
14912         /* FP TX MBUF DMA MAPS */
14913         /***********************/
14914 
14915         /* set required sizes before mapping to conserve resources */
14916         if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) {
14917             max_size     = BXE_TSO_MAX_SIZE;
14918             max_segments = BXE_TSO_MAX_SEGMENTS;
14919             max_seg_size = BXE_TSO_MAX_SEG_SIZE;
14920         } else {
14921             max_size     = (MCLBYTES * BXE_MAX_SEGMENTS);
14922             max_segments = BXE_MAX_SEGMENTS;
14923             max_seg_size = MCLBYTES;
14924         }
14925 
14926         /* create a dma tag for the tx mbufs */
14927         rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
14928                                 1,                  /* alignment */
14929                                 0,                  /* boundary limit */
14930                                 BUS_SPACE_MAXADDR,  /* restricted low */
14931                                 BUS_SPACE_MAXADDR,  /* restricted hi */
14932                                 NULL,               /* addr filter() */
14933                                 NULL,               /* addr filter() arg */
14934                                 max_size,           /* max map size */
14935                                 max_segments,       /* num discontinuous */
14936                                 max_seg_size,       /* max seg size */
14937                                 0,                  /* flags */
14938                                 NULL,               /* lock() */
14939                                 NULL,               /* lock() arg */
14940                                 &fp->tx_mbuf_tag);  /* returned dma tag */
14941         if (rc != 0) {
14942             /* XXX unwind and free previous fastpath allocations */
14943             BLOGE(sc, "Failed to create dma tag for "
14944                       "'fp %d tx mbufs' (%d)\n", i, rc);
14945             return (1);
14946         }
14947 
14948         /* create dma maps for each of the tx mbuf clusters */
14949         for (j = 0; j < TX_BD_TOTAL; j++) {
14950             if (bus_dmamap_create(fp->tx_mbuf_tag,
14951                                   BUS_DMA_NOWAIT,
14952                                   &fp->tx_mbuf_chain[j].m_map)) {
14953                 /* XXX unwind and free previous fastpath allocations */
14954                 BLOGE(sc, "Failed to create dma map for "
14955                           "'fp %d tx mbuf %d' (%d)\n", i, j, rc);
14956                 return (1);
14957             }
14958         }
14959 
14960         /***********************/
14961         /* FP RX MBUF DMA MAPS */
14962         /***********************/
14963 
14964         /* create a dma tag for the rx mbufs */
14965         rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
14966                                 1,                  /* alignment */
14967                                 0,                  /* boundary limit */
14968                                 BUS_SPACE_MAXADDR,  /* restricted low */
14969                                 BUS_SPACE_MAXADDR,  /* restricted hi */
14970                                 NULL,               /* addr filter() */
14971                                 NULL,               /* addr filter() arg */
14972                                 MJUM9BYTES,         /* max map size */
14973                                 1,                  /* num discontinuous */
14974                                 MJUM9BYTES,         /* max seg size */
14975                                 0,                  /* flags */
14976                                 NULL,               /* lock() */
14977                                 NULL,               /* lock() arg */
14978                                 &fp->rx_mbuf_tag);  /* returned dma tag */
14979         if (rc != 0) {
14980             /* XXX unwind and free previous fastpath allocations */
14981             BLOGE(sc, "Failed to create dma tag for "
14982                       "'fp %d rx mbufs' (%d)\n", i, rc);
14983             return (1);
14984         }
14985 
14986         /* create dma maps for each of the rx mbuf clusters */
14987         for (j = 0; j < RX_BD_TOTAL; j++) {
14988             if (bus_dmamap_create(fp->rx_mbuf_tag,
14989                                   BUS_DMA_NOWAIT,
14990                                   &fp->rx_mbuf_chain[j].m_map)) {
14991                 /* XXX unwind and free previous fastpath allocations */
14992                 BLOGE(sc, "Failed to create dma map for "
14993                           "'fp %d rx mbuf %d' (%d)\n", i, j, rc);
14994                 return (1);
14995             }
14996         }
14997 
14998         /* create dma map for the spare rx mbuf cluster */
14999         if (bus_dmamap_create(fp->rx_mbuf_tag,
15000                               BUS_DMA_NOWAIT,
15001                               &fp->rx_mbuf_spare_map)) {
15002             /* XXX unwind and free previous fastpath allocations */
15003             BLOGE(sc, "Failed to create dma map for "
15004                       "'fp %d spare rx mbuf' (%d)\n", i, rc);
15005             return (1);
15006         }
15007 
15008         /***************************/
15009         /* FP RX SGE MBUF DMA MAPS */
15010         /***************************/
15011 
15012         /* create a dma tag for the rx sge mbufs */
15013         rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
15014                                 1,                  /* alignment */
15015                                 0,                  /* boundary limit */
15016                                 BUS_SPACE_MAXADDR,  /* restricted low */
15017                                 BUS_SPACE_MAXADDR,  /* restricted hi */
15018                                 NULL,               /* addr filter() */
15019                                 NULL,               /* addr filter() arg */
15020                                 BCM_PAGE_SIZE,      /* max map size */
15021                                 1,                  /* num discontinuous */
15022                                 BCM_PAGE_SIZE,      /* max seg size */
15023                                 0,                  /* flags */
15024                                 NULL,               /* lock() */
15025                                 NULL,               /* lock() arg */
15026                                 &fp->rx_sge_mbuf_tag); /* returned dma tag */
15027         if (rc != 0) {
15028             /* XXX unwind and free previous fastpath allocations */
15029             BLOGE(sc, "Failed to create dma tag for "
15030                       "'fp %d rx sge mbufs' (%d)\n", i, rc);
15031             return (1);
15032         }
15033 
15034         /* create dma maps for the rx sge mbuf clusters */
15035         for (j = 0; j < RX_SGE_TOTAL; j++) {
15036             if (bus_dmamap_create(fp->rx_sge_mbuf_tag,
15037                                   BUS_DMA_NOWAIT,
15038                                   &fp->rx_sge_mbuf_chain[j].m_map)) {
15039                 /* XXX unwind and free previous fastpath allocations */
15040                 BLOGE(sc, "Failed to create dma map for "
15041                           "'fp %d rx sge mbuf %d' (%d)\n", i, j, rc);
15042                 return (1);
15043             }
15044         }
15045 
15046         /* create dma map for the spare rx sge mbuf cluster */
15047         if (bus_dmamap_create(fp->rx_sge_mbuf_tag,
15048                               BUS_DMA_NOWAIT,
15049                               &fp->rx_sge_mbuf_spare_map)) {
15050             /* XXX unwind and free previous fastpath allocations */
15051             BLOGE(sc, "Failed to create dma map for "
15052                       "'fp %d spare rx sge mbuf' (%d)\n", i, rc);
15053             return (1);
15054         }
15055 
15056         /***************************/
15057         /* FP RX TPA MBUF DMA MAPS */
15058         /***************************/
15059 
15060         /* create dma maps for the rx tpa mbuf clusters */
15061         max_agg_queues = MAX_AGG_QS(sc);
15062 
15063         for (j = 0; j < max_agg_queues; j++) {
15064             if (bus_dmamap_create(fp->rx_mbuf_tag,
15065                                   BUS_DMA_NOWAIT,
15066                                   &fp->rx_tpa_info[j].bd.m_map)) {
15067                 /* XXX unwind and free previous fastpath allocations */
15068                 BLOGE(sc, "Failed to create dma map for "
15069                           "'fp %d rx tpa mbuf %d' (%d)\n", i, j, rc);
15070                 return (1);
15071             }
15072         }
15073 
15074         /* create dma map for the spare rx tpa mbuf cluster */
15075         if (bus_dmamap_create(fp->rx_mbuf_tag,
15076                               BUS_DMA_NOWAIT,
15077                               &fp->rx_tpa_info_mbuf_spare_map)) {
15078             /* XXX unwind and free previous fastpath allocations */
15079             BLOGE(sc, "Failed to create dma map for "
15080                       "'fp %d spare rx tpa mbuf' (%d)\n", i, rc);
15081             return (1);
15082         }
15083 
15084         bxe_init_sge_ring_bit_mask(fp);
15085     }
15086 
15087     return (0);
15088 }
15089 
15090 static void
15091 bxe_free_hsi_mem(struct bxe_softc *sc)
15092 {
15093     struct bxe_fastpath *fp;
15094     int max_agg_queues;
15095     int i, j;
15096 
15097     if (sc->parent_dma_tag == NULL) {
15098         return; /* assume nothing was allocated */
15099     }
15100 
15101     for (i = 0; i < sc->num_queues; i++) {
15102         fp = &sc->fp[i];
15103 
15104         /*******************/
15105         /* FP STATUS BLOCK */
15106         /*******************/
15107 
15108         bxe_dma_free(sc, &fp->sb_dma);
15109         memset(&fp->status_block, 0, sizeof(fp->status_block));
15110 
15111         /******************/
15112         /* FP TX BD CHAIN */
15113         /******************/
15114 
15115         bxe_dma_free(sc, &fp->tx_dma);
15116         fp->tx_chain = NULL;
15117 
15118         /******************/
15119         /* FP RX BD CHAIN */
15120         /******************/
15121 
15122         bxe_dma_free(sc, &fp->rx_dma);
15123         fp->rx_chain = NULL;
15124 
15125         /*******************/
15126         /* FP RX RCQ CHAIN */
15127         /*******************/
15128 
15129         bxe_dma_free(sc, &fp->rcq_dma);
15130         fp->rcq_chain = NULL;
15131 
15132         /*******************/
15133         /* FP RX SGE CHAIN */
15134         /*******************/
15135 
15136         bxe_dma_free(sc, &fp->rx_sge_dma);
15137         fp->rx_sge_chain = NULL;
15138 
15139         /***********************/
15140         /* FP TX MBUF DMA MAPS */
15141         /***********************/
15142 
15143         if (fp->tx_mbuf_tag != NULL) {
15144             for (j = 0; j < TX_BD_TOTAL; j++) {
15145                 if (fp->tx_mbuf_chain[j].m_map != NULL) {
15146                     bus_dmamap_unload(fp->tx_mbuf_tag,
15147                                       fp->tx_mbuf_chain[j].m_map);
15148                     bus_dmamap_destroy(fp->tx_mbuf_tag,
15149                                        fp->tx_mbuf_chain[j].m_map);
15150                 }
15151             }
15152 
15153             bus_dma_tag_destroy(fp->tx_mbuf_tag);
15154             fp->tx_mbuf_tag = NULL;
15155         }
15156 
15157         /***********************/
15158         /* FP RX MBUF DMA MAPS */
15159         /***********************/
15160 
15161         if (fp->rx_mbuf_tag != NULL) {
15162             for (j = 0; j < RX_BD_TOTAL; j++) {
15163                 if (fp->rx_mbuf_chain[j].m_map != NULL) {
15164                     bus_dmamap_unload(fp->rx_mbuf_tag,
15165                                       fp->rx_mbuf_chain[j].m_map);
15166                     bus_dmamap_destroy(fp->rx_mbuf_tag,
15167                                        fp->rx_mbuf_chain[j].m_map);
15168                 }
15169             }
15170 
15171             if (fp->rx_mbuf_spare_map != NULL) {
15172                 bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map);
15173                 bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map);
15174             }
15175 
15176             /***************************/
15177             /* FP RX TPA MBUF DMA MAPS */
15178             /***************************/
15179 
15180             max_agg_queues = MAX_AGG_QS(sc);
15181 
15182             for (j = 0; j < max_agg_queues; j++) {
15183                 if (fp->rx_tpa_info[j].bd.m_map != NULL) {
15184                     bus_dmamap_unload(fp->rx_mbuf_tag,
15185                                       fp->rx_tpa_info[j].bd.m_map);
15186                     bus_dmamap_destroy(fp->rx_mbuf_tag,
15187                                        fp->rx_tpa_info[j].bd.m_map);
15188                 }
15189             }
15190 
15191             if (fp->rx_tpa_info_mbuf_spare_map != NULL) {
15192                 bus_dmamap_unload(fp->rx_mbuf_tag,
15193                                   fp->rx_tpa_info_mbuf_spare_map);
15194                 bus_dmamap_destroy(fp->rx_mbuf_tag,
15195                                    fp->rx_tpa_info_mbuf_spare_map);
15196             }
15197 
15198             bus_dma_tag_destroy(fp->rx_mbuf_tag);
15199             fp->rx_mbuf_tag = NULL;
15200         }
15201 
15202         /***************************/
15203         /* FP RX SGE MBUF DMA MAPS */
15204         /***************************/
15205 
15206         if (fp->rx_sge_mbuf_tag != NULL) {
15207             for (j = 0; j < RX_SGE_TOTAL; j++) {
15208                 if (fp->rx_sge_mbuf_chain[j].m_map != NULL) {
15209                     bus_dmamap_unload(fp->rx_sge_mbuf_tag,
15210                                       fp->rx_sge_mbuf_chain[j].m_map);
15211                     bus_dmamap_destroy(fp->rx_sge_mbuf_tag,
15212                                        fp->rx_sge_mbuf_chain[j].m_map);
15213                 }
15214             }
15215 
15216             if (fp->rx_sge_mbuf_spare_map != NULL) {
15217                 bus_dmamap_unload(fp->rx_sge_mbuf_tag,
15218                                   fp->rx_sge_mbuf_spare_map);
15219                 bus_dmamap_destroy(fp->rx_sge_mbuf_tag,
15220                                    fp->rx_sge_mbuf_spare_map);
15221             }
15222 
15223             bus_dma_tag_destroy(fp->rx_sge_mbuf_tag);
15224             fp->rx_sge_mbuf_tag = NULL;
15225         }
15226     }
15227 
15228     /***************************/
15229     /* FW DECOMPRESSION BUFFER */
15230     /***************************/
15231 
15232     bxe_dma_free(sc, &sc->gz_buf_dma);
15233     sc->gz_buf = NULL;
15234     free(sc->gz_strm, M_DEVBUF);
15235     sc->gz_strm = NULL;
15236 
15237     /*******************/
15238     /* SLOW PATH QUEUE */
15239     /*******************/
15240 
15241     bxe_dma_free(sc, &sc->spq_dma);
15242     sc->spq = NULL;
15243 
15244     /*************/
15245     /* SLOW PATH */
15246     /*************/
15247 
15248     bxe_dma_free(sc, &sc->sp_dma);
15249     sc->sp = NULL;
15250 
15251     /***************/
15252     /* EVENT QUEUE */
15253     /***************/
15254 
15255     bxe_dma_free(sc, &sc->eq_dma);
15256     sc->eq = NULL;
15257 
15258     /************************/
15259     /* DEFAULT STATUS BLOCK */
15260     /************************/
15261 
15262     bxe_dma_free(sc, &sc->def_sb_dma);
15263     sc->def_sb = NULL;
15264 
15265     bus_dma_tag_destroy(sc->parent_dma_tag);
15266     sc->parent_dma_tag = NULL;
15267 }
15268 
15269 /*
15270  * Previous driver DMAE transaction may have occurred when pre-boot stage
15271  * ended and boot began. This would invalidate the addresses of the
15272  * transaction, resulting in was-error bit set in the PCI causing all
15273  * hw-to-host PCIe transactions to timeout. If this happened we want to clear
15274  * the interrupt which detected this from the pglueb and the was-done bit
15275  */
15276 static void
15277 bxe_prev_interrupted_dmae(struct bxe_softc *sc)
15278 {
15279     uint32_t val;
15280 
15281     if (!CHIP_IS_E1x(sc)) {
15282         val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS);
15283         if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
15284             BLOGD(sc, DBG_LOAD,
15285                   "Clearing 'was-error' bit that was set in pglueb");
15286             REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << SC_FUNC(sc));
15287         }
15288     }
15289 }
15290 
15291 static int
15292 bxe_prev_mcp_done(struct bxe_softc *sc)
15293 {
15294     uint32_t rc = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE,
15295                                  DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
15296     if (!rc) {
15297         BLOGE(sc, "MCP response failure, aborting\n");
15298         return (-1);
15299     }
15300 
15301     return (0);
15302 }
15303 
15304 static struct bxe_prev_list_node *
15305 bxe_prev_path_get_entry(struct bxe_softc *sc)
15306 {
15307     struct bxe_prev_list_node *tmp;
15308 
15309     LIST_FOREACH(tmp, &bxe_prev_list, node) {
15310         if ((sc->pcie_bus == tmp->bus) &&
15311             (sc->pcie_device == tmp->slot) &&
15312             (SC_PATH(sc) == tmp->path)) {
15313             return (tmp);
15314         }
15315     }
15316 
15317     return (NULL);
15318 }
15319 
15320 static uint8_t
15321 bxe_prev_is_path_marked(struct bxe_softc *sc)
15322 {
15323     struct bxe_prev_list_node *tmp;
15324     int rc = FALSE;
15325 
15326     mtx_lock(&bxe_prev_mtx);
15327 
15328     tmp = bxe_prev_path_get_entry(sc);
15329     if (tmp) {
15330         if (tmp->aer) {
15331             BLOGD(sc, DBG_LOAD,
15332                   "Path %d/%d/%d was marked by AER\n",
15333                   sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15334         } else {
15335             rc = TRUE;
15336             BLOGD(sc, DBG_LOAD,
15337                   "Path %d/%d/%d was already cleaned from previous drivers\n",
15338                   sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15339         }
15340     }
15341 
15342     mtx_unlock(&bxe_prev_mtx);
15343 
15344     return (rc);
15345 }
15346 
15347 static int
15348 bxe_prev_mark_path(struct bxe_softc *sc,
15349                    uint8_t          after_undi)
15350 {
15351     struct bxe_prev_list_node *tmp;
15352 
15353     mtx_lock(&bxe_prev_mtx);
15354 
15355     /* Check whether the entry for this path already exists */
15356     tmp = bxe_prev_path_get_entry(sc);
15357     if (tmp) {
15358         if (!tmp->aer) {
15359             BLOGD(sc, DBG_LOAD,
15360                   "Re-marking AER in path %d/%d/%d\n",
15361                   sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15362         } else {
15363             BLOGD(sc, DBG_LOAD,
15364                   "Removing AER indication from path %d/%d/%d\n",
15365                   sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15366             tmp->aer = 0;
15367         }
15368 
15369         mtx_unlock(&bxe_prev_mtx);
15370         return (0);
15371     }
15372 
15373     mtx_unlock(&bxe_prev_mtx);
15374 
15375     /* Create an entry for this path and add it */
15376     tmp = malloc(sizeof(struct bxe_prev_list_node), M_DEVBUF,
15377                  (M_NOWAIT | M_ZERO));
15378     if (!tmp) {
15379         BLOGE(sc, "Failed to allocate 'bxe_prev_list_node'\n");
15380         return (-1);
15381     }
15382 
15383     tmp->bus  = sc->pcie_bus;
15384     tmp->slot = sc->pcie_device;
15385     tmp->path = SC_PATH(sc);
15386     tmp->aer  = 0;
15387     tmp->undi = after_undi ? (1 << SC_PORT(sc)) : 0;
15388 
15389     mtx_lock(&bxe_prev_mtx);
15390 
15391     BLOGD(sc, DBG_LOAD,
15392           "Marked path %d/%d/%d - finished previous unload\n",
15393           sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15394     LIST_INSERT_HEAD(&bxe_prev_list, tmp, node);
15395 
15396     mtx_unlock(&bxe_prev_mtx);
15397 
15398     return (0);
15399 }
15400 
15401 static int
15402 bxe_do_flr(struct bxe_softc *sc)
15403 {
15404     int i;
15405 
15406     /* only E2 and onwards support FLR */
15407     if (CHIP_IS_E1x(sc)) {
15408         BLOGD(sc, DBG_LOAD, "FLR not supported in E1/E1H\n");
15409         return (-1);
15410     }
15411 
15412     /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */
15413     if (sc->devinfo.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
15414         BLOGD(sc, DBG_LOAD, "FLR not supported by BC_VER: 0x%08x\n",
15415               sc->devinfo.bc_ver);
15416         return (-1);
15417     }
15418 
15419     /* Wait for Transaction Pending bit clean */
15420     for (i = 0; i < 4; i++) {
15421         if (i) {
15422             DELAY(((1 << (i - 1)) * 100) * 1000);
15423         }
15424 
15425         if (!bxe_is_pcie_pending(sc)) {
15426             goto clear;
15427         }
15428     }
15429 
15430     BLOGE(sc, "PCIE transaction is not cleared, "
15431               "proceeding with reset anyway\n");
15432 
15433 clear:
15434 
15435     BLOGD(sc, DBG_LOAD, "Initiating FLR\n");
15436     bxe_fw_command(sc, DRV_MSG_CODE_INITIATE_FLR, 0);
15437 
15438     return (0);
15439 }
15440 
15441 struct bxe_mac_vals {
15442     uint32_t xmac_addr;
15443     uint32_t xmac_val;
15444     uint32_t emac_addr;
15445     uint32_t emac_val;
15446     uint32_t umac_addr;
15447     uint32_t umac_val;
15448     uint32_t bmac_addr;
15449     uint32_t bmac_val[2];
15450 };
15451 
15452 static void
15453 bxe_prev_unload_close_mac(struct bxe_softc *sc,
15454                           struct bxe_mac_vals *vals)
15455 {
15456     uint32_t val, base_addr, offset, mask, reset_reg;
15457     uint8_t mac_stopped = FALSE;
15458     uint8_t port = SC_PORT(sc);
15459     uint32_t wb_data[2];
15460 
15461     /* reset addresses as they also mark which values were changed */
15462     vals->bmac_addr = 0;
15463     vals->umac_addr = 0;
15464     vals->xmac_addr = 0;
15465     vals->emac_addr = 0;
15466 
15467     reset_reg = REG_RD(sc, MISC_REG_RESET_REG_2);
15468 
15469     if (!CHIP_IS_E3(sc)) {
15470         val = REG_RD(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
15471         mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port;
15472         if ((mask & reset_reg) && val) {
15473             BLOGD(sc, DBG_LOAD, "Disable BMAC Rx\n");
15474             base_addr = SC_PORT(sc) ? NIG_REG_INGRESS_BMAC1_MEM
15475                                     : NIG_REG_INGRESS_BMAC0_MEM;
15476             offset = CHIP_IS_E2(sc) ? BIGMAC2_REGISTER_BMAC_CONTROL
15477                                     : BIGMAC_REGISTER_BMAC_CONTROL;
15478 
15479             /*
15480              * use rd/wr since we cannot use dmae. This is safe
15481              * since MCP won't access the bus due to the request
15482              * to unload, and no function on the path can be
15483              * loaded at this time.
15484              */
15485             wb_data[0] = REG_RD(sc, base_addr + offset);
15486             wb_data[1] = REG_RD(sc, base_addr + offset + 0x4);
15487             vals->bmac_addr = base_addr + offset;
15488             vals->bmac_val[0] = wb_data[0];
15489             vals->bmac_val[1] = wb_data[1];
15490             wb_data[0] &= ~ELINK_BMAC_CONTROL_RX_ENABLE;
15491             REG_WR(sc, vals->bmac_addr, wb_data[0]);
15492             REG_WR(sc, vals->bmac_addr + 0x4, wb_data[1]);
15493         }
15494 
15495         BLOGD(sc, DBG_LOAD, "Disable EMAC Rx\n");
15496         vals->emac_addr = NIG_REG_NIG_EMAC0_EN + SC_PORT(sc)*4;
15497         vals->emac_val = REG_RD(sc, vals->emac_addr);
15498         REG_WR(sc, vals->emac_addr, 0);
15499         mac_stopped = TRUE;
15500     } else {
15501         if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
15502             BLOGD(sc, DBG_LOAD, "Disable XMAC Rx\n");
15503             base_addr = SC_PORT(sc) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
15504             val = REG_RD(sc, base_addr + XMAC_REG_PFC_CTRL_HI);
15505             REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val & ~(1 << 1));
15506             REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val | (1 << 1));
15507             vals->xmac_addr = base_addr + XMAC_REG_CTRL;
15508             vals->xmac_val = REG_RD(sc, vals->xmac_addr);
15509             REG_WR(sc, vals->xmac_addr, 0);
15510             mac_stopped = TRUE;
15511         }
15512 
15513         mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
15514         if (mask & reset_reg) {
15515             BLOGD(sc, DBG_LOAD, "Disable UMAC Rx\n");
15516             base_addr = SC_PORT(sc) ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
15517             vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG;
15518             vals->umac_val = REG_RD(sc, vals->umac_addr);
15519             REG_WR(sc, vals->umac_addr, 0);
15520             mac_stopped = TRUE;
15521         }
15522     }
15523 
15524     if (mac_stopped) {
15525         DELAY(20000);
15526     }
15527 }
15528 
15529 #define BXE_PREV_UNDI_PROD_ADDR(p)  (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
15530 #define BXE_PREV_UNDI_RCQ(val)      ((val) & 0xffff)
15531 #define BXE_PREV_UNDI_BD(val)       ((val) >> 16 & 0xffff)
15532 #define BXE_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
15533 
15534 static void
15535 bxe_prev_unload_undi_inc(struct bxe_softc *sc,
15536                          uint8_t          port,
15537                          uint8_t          inc)
15538 {
15539     uint16_t rcq, bd;
15540     uint32_t tmp_reg = REG_RD(sc, BXE_PREV_UNDI_PROD_ADDR(port));
15541 
15542     rcq = BXE_PREV_UNDI_RCQ(tmp_reg) + inc;
15543     bd = BXE_PREV_UNDI_BD(tmp_reg) + inc;
15544 
15545     tmp_reg = BXE_PREV_UNDI_PROD(rcq, bd);
15546     REG_WR(sc, BXE_PREV_UNDI_PROD_ADDR(port), tmp_reg);
15547 
15548     BLOGD(sc, DBG_LOAD,
15549           "UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n",
15550           port, bd, rcq);
15551 }
15552 
15553 static int
15554 bxe_prev_unload_common(struct bxe_softc *sc)
15555 {
15556     uint32_t reset_reg, tmp_reg = 0, rc;
15557     uint8_t prev_undi = FALSE;
15558     struct bxe_mac_vals mac_vals;
15559     uint32_t timer_count = 1000;
15560     uint32_t prev_brb;
15561 
15562     /*
15563      * It is possible a previous function received 'common' answer,
15564      * but hasn't loaded yet, therefore creating a scenario of
15565      * multiple functions receiving 'common' on the same path.
15566      */
15567     BLOGD(sc, DBG_LOAD, "Common unload Flow\n");
15568 
15569     memset(&mac_vals, 0, sizeof(mac_vals));
15570 
15571     if (bxe_prev_is_path_marked(sc)) {
15572         return (bxe_prev_mcp_done(sc));
15573     }
15574 
15575     reset_reg = REG_RD(sc, MISC_REG_RESET_REG_1);
15576 
15577     /* Reset should be performed after BRB is emptied */
15578     if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
15579         /* Close the MAC Rx to prevent BRB from filling up */
15580         bxe_prev_unload_close_mac(sc, &mac_vals);
15581 
15582         /* close LLH filters towards the BRB */
15583         elink_set_rx_filter(&sc->link_params, 0);
15584 
15585         /*
15586          * Check if the UNDI driver was previously loaded.
15587          * UNDI driver initializes CID offset for normal bell to 0x7
15588          */
15589         if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
15590             tmp_reg = REG_RD(sc, DORQ_REG_NORM_CID_OFST);
15591             if (tmp_reg == 0x7) {
15592                 BLOGD(sc, DBG_LOAD, "UNDI previously loaded\n");
15593                 prev_undi = TRUE;
15594                 /* clear the UNDI indication */
15595                 REG_WR(sc, DORQ_REG_NORM_CID_OFST, 0);
15596                 /* clear possible idle check errors */
15597                 REG_RD(sc, NIG_REG_NIG_INT_STS_CLR_0);
15598             }
15599         }
15600 
15601         /* wait until BRB is empty */
15602         tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS);
15603         while (timer_count) {
15604             prev_brb = tmp_reg;
15605 
15606             tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS);
15607             if (!tmp_reg) {
15608                 break;
15609             }
15610 
15611             BLOGD(sc, DBG_LOAD, "BRB still has 0x%08x\n", tmp_reg);
15612 
15613             /* reset timer as long as BRB actually gets emptied */
15614             if (prev_brb > tmp_reg) {
15615                 timer_count = 1000;
15616             } else {
15617                 timer_count--;
15618             }
15619 
15620             /* If UNDI resides in memory, manually increment it */
15621             if (prev_undi) {
15622                 bxe_prev_unload_undi_inc(sc, SC_PORT(sc), 1);
15623             }
15624 
15625             DELAY(10);
15626         }
15627 
15628         if (!timer_count) {
15629             BLOGE(sc, "Failed to empty BRB\n");
15630         }
15631     }
15632 
15633     /* No packets are in the pipeline, path is ready for reset */
15634     bxe_reset_common(sc);
15635 
15636     if (mac_vals.xmac_addr) {
15637         REG_WR(sc, mac_vals.xmac_addr, mac_vals.xmac_val);
15638     }
15639     if (mac_vals.umac_addr) {
15640         REG_WR(sc, mac_vals.umac_addr, mac_vals.umac_val);
15641     }
15642     if (mac_vals.emac_addr) {
15643         REG_WR(sc, mac_vals.emac_addr, mac_vals.emac_val);
15644     }
15645     if (mac_vals.bmac_addr) {
15646         REG_WR(sc, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
15647         REG_WR(sc, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
15648     }
15649 
15650     rc = bxe_prev_mark_path(sc, prev_undi);
15651     if (rc) {
15652         bxe_prev_mcp_done(sc);
15653         return (rc);
15654     }
15655 
15656     return (bxe_prev_mcp_done(sc));
15657 }
15658 
15659 static int
15660 bxe_prev_unload_uncommon(struct bxe_softc *sc)
15661 {
15662     int rc;
15663 
15664     BLOGD(sc, DBG_LOAD, "Uncommon unload Flow\n");
15665 
15666     /* Test if previous unload process was already finished for this path */
15667     if (bxe_prev_is_path_marked(sc)) {
15668         return (bxe_prev_mcp_done(sc));
15669     }
15670 
15671     BLOGD(sc, DBG_LOAD, "Path is unmarked\n");
15672 
15673     /*
15674      * If function has FLR capabilities, and existing FW version matches
15675      * the one required, then FLR will be sufficient to clean any residue
15676      * left by previous driver
15677      */
15678     rc = bxe_nic_load_analyze_req(sc, FW_MSG_CODE_DRV_LOAD_FUNCTION);
15679     if (!rc) {
15680         /* fw version is good */
15681         BLOGD(sc, DBG_LOAD, "FW version matches our own, attempting FLR\n");
15682         rc = bxe_do_flr(sc);
15683     }
15684 
15685     if (!rc) {
15686         /* FLR was performed */
15687         BLOGD(sc, DBG_LOAD, "FLR successful\n");
15688         return (0);
15689     }
15690 
15691     BLOGD(sc, DBG_LOAD, "Could not FLR\n");
15692 
15693     /* Close the MCP request, return failure*/
15694     rc = bxe_prev_mcp_done(sc);
15695     if (!rc) {
15696         rc = BXE_PREV_WAIT_NEEDED;
15697     }
15698 
15699     return (rc);
15700 }
15701 
15702 static int
15703 bxe_prev_unload(struct bxe_softc *sc)
15704 {
15705     int time_counter = 10;
15706     uint32_t fw, hw_lock_reg, hw_lock_val;
15707     uint32_t rc = 0;
15708 
15709     /*
15710      * Clear HW from errors which may have resulted from an interrupted
15711      * DMAE transaction.
15712      */
15713     bxe_prev_interrupted_dmae(sc);
15714 
15715     /* Release previously held locks */
15716     hw_lock_reg =
15717         (SC_FUNC(sc) <= 5) ?
15718             (MISC_REG_DRIVER_CONTROL_1 + SC_FUNC(sc) * 8) :
15719             (MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8);
15720 
15721     hw_lock_val = (REG_RD(sc, hw_lock_reg));
15722     if (hw_lock_val) {
15723         if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
15724             BLOGD(sc, DBG_LOAD, "Releasing previously held NVRAM lock\n");
15725             REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
15726                    (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << SC_PORT(sc)));
15727         }
15728         BLOGD(sc, DBG_LOAD, "Releasing previously held HW lock\n");
15729         REG_WR(sc, hw_lock_reg, 0xffffffff);
15730     } else {
15731         BLOGD(sc, DBG_LOAD, "No need to release HW/NVRAM locks\n");
15732     }
15733 
15734     if (MCPR_ACCESS_LOCK_LOCK & REG_RD(sc, MCP_REG_MCPR_ACCESS_LOCK)) {
15735         BLOGD(sc, DBG_LOAD, "Releasing previously held ALR\n");
15736         REG_WR(sc, MCP_REG_MCPR_ACCESS_LOCK, 0);
15737     }
15738 
15739     do {
15740         /* Lock MCP using an unload request */
15741         fw = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
15742         if (!fw) {
15743             BLOGE(sc, "MCP response failure, aborting\n");
15744             rc = -1;
15745             break;
15746         }
15747 
15748         if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
15749             rc = bxe_prev_unload_common(sc);
15750             break;
15751         }
15752 
15753         /* non-common reply from MCP night require looping */
15754         rc = bxe_prev_unload_uncommon(sc);
15755         if (rc != BXE_PREV_WAIT_NEEDED) {
15756             break;
15757         }
15758 
15759         DELAY(20000);
15760     } while (--time_counter);
15761 
15762     if (!time_counter || rc) {
15763         BLOGE(sc, "Failed to unload previous driver!"
15764             " time_counter %d rc %d\n", time_counter, rc);
15765         rc = -1;
15766     }
15767 
15768     return (rc);
15769 }
15770 
15771 void
15772 bxe_dcbx_set_state(struct bxe_softc *sc,
15773                    uint8_t          dcb_on,
15774                    uint32_t         dcbx_enabled)
15775 {
15776     if (!CHIP_IS_E1x(sc)) {
15777         sc->dcb_state = dcb_on;
15778         sc->dcbx_enabled = dcbx_enabled;
15779     } else {
15780         sc->dcb_state = FALSE;
15781         sc->dcbx_enabled = BXE_DCBX_ENABLED_INVALID;
15782     }
15783     BLOGD(sc, DBG_LOAD,
15784           "DCB state [%s:%s]\n",
15785           dcb_on ? "ON" : "OFF",
15786           (dcbx_enabled == BXE_DCBX_ENABLED_OFF) ? "user-mode" :
15787           (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_OFF) ? "on-chip static" :
15788           (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_ON) ?
15789           "on-chip with negotiation" : "invalid");
15790 }
15791 
15792 /* must be called after sriov-enable */
15793 static int
15794 bxe_set_qm_cid_count(struct bxe_softc *sc)
15795 {
15796     int cid_count = BXE_L2_MAX_CID(sc);
15797 
15798     if (IS_SRIOV(sc)) {
15799         cid_count += BXE_VF_CIDS;
15800     }
15801 
15802     if (CNIC_SUPPORT(sc)) {
15803         cid_count += CNIC_CID_MAX;
15804     }
15805 
15806     return (roundup(cid_count, QM_CID_ROUND));
15807 }
15808 
15809 static void
15810 bxe_init_multi_cos(struct bxe_softc *sc)
15811 {
15812     int pri, cos;
15813 
15814     uint32_t pri_map = 0; /* XXX change to user config */
15815 
15816     for (pri = 0; pri < BXE_MAX_PRIORITY; pri++) {
15817         cos = ((pri_map & (0xf << (pri * 4))) >> (pri * 4));
15818         if (cos < sc->max_cos) {
15819             sc->prio_to_cos[pri] = cos;
15820         } else {
15821             BLOGW(sc, "Invalid COS %d for priority %d "
15822                       "(max COS is %d), setting to 0\n",
15823                   cos, pri, (sc->max_cos - 1));
15824             sc->prio_to_cos[pri] = 0;
15825         }
15826     }
15827 }
15828 
15829 static int
15830 bxe_sysctl_state(SYSCTL_HANDLER_ARGS)
15831 {
15832     struct bxe_softc *sc;
15833     int error, result;
15834 
15835     result = 0;
15836     error = sysctl_handle_int(oidp, &result, 0, req);
15837 
15838     if (error || !req->newptr) {
15839         return (error);
15840     }
15841 
15842     if (result == 1) {
15843         uint32_t  temp;
15844         sc = (struct bxe_softc *)arg1;
15845 
15846         BLOGI(sc, "... dumping driver state ...\n");
15847         temp = SHMEM2_RD(sc, temperature_in_half_celsius);
15848         BLOGI(sc, "\t Device Temperature = %d Celsius\n", (temp/2));
15849     }
15850 
15851     return (error);
15852 }
15853 
15854 static int
15855 bxe_sysctl_eth_stat(SYSCTL_HANDLER_ARGS)
15856 {
15857     struct bxe_softc *sc = (struct bxe_softc *)arg1;
15858     uint32_t *eth_stats = (uint32_t *)&sc->eth_stats;
15859     uint32_t *offset;
15860     uint64_t value = 0;
15861     int index = (int)arg2;
15862 
15863     if (index >= BXE_NUM_ETH_STATS) {
15864         BLOGE(sc, "bxe_eth_stats index out of range (%d)\n", index);
15865         return (-1);
15866     }
15867 
15868     offset = (eth_stats + bxe_eth_stats_arr[index].offset);
15869 
15870     switch (bxe_eth_stats_arr[index].size) {
15871     case 4:
15872         value = (uint64_t)*offset;
15873         break;
15874     case 8:
15875         value = HILO_U64(*offset, *(offset + 1));
15876         break;
15877     default:
15878         BLOGE(sc, "Invalid bxe_eth_stats size (index=%d size=%d)\n",
15879               index, bxe_eth_stats_arr[index].size);
15880         return (-1);
15881     }
15882 
15883     return (sysctl_handle_64(oidp, &value, 0, req));
15884 }
15885 
15886 static int
15887 bxe_sysctl_eth_q_stat(SYSCTL_HANDLER_ARGS)
15888 {
15889     struct bxe_softc *sc = (struct bxe_softc *)arg1;
15890     uint32_t *eth_stats;
15891     uint32_t *offset;
15892     uint64_t value = 0;
15893     uint32_t q_stat = (uint32_t)arg2;
15894     uint32_t fp_index = ((q_stat >> 16) & 0xffff);
15895     uint32_t index = (q_stat & 0xffff);
15896 
15897     eth_stats = (uint32_t *)&sc->fp[fp_index].eth_q_stats;
15898 
15899     if (index >= BXE_NUM_ETH_Q_STATS) {
15900         BLOGE(sc, "bxe_eth_q_stats index out of range (%d)\n", index);
15901         return (-1);
15902     }
15903 
15904     offset = (eth_stats + bxe_eth_q_stats_arr[index].offset);
15905 
15906     switch (bxe_eth_q_stats_arr[index].size) {
15907     case 4:
15908         value = (uint64_t)*offset;
15909         break;
15910     case 8:
15911         value = HILO_U64(*offset, *(offset + 1));
15912         break;
15913     default:
15914         BLOGE(sc, "Invalid bxe_eth_q_stats size (index=%d size=%d)\n",
15915               index, bxe_eth_q_stats_arr[index].size);
15916         return (-1);
15917     }
15918 
15919     return (sysctl_handle_64(oidp, &value, 0, req));
15920 }
15921 
15922 static void bxe_force_link_reset(struct bxe_softc *sc)
15923 {
15924 
15925         bxe_acquire_phy_lock(sc);
15926         elink_link_reset(&sc->link_params, &sc->link_vars, 1);
15927         bxe_release_phy_lock(sc);
15928 }
15929 
15930 static int
15931 bxe_sysctl_pauseparam(SYSCTL_HANDLER_ARGS)
15932 {
15933         struct bxe_softc *sc = (struct bxe_softc *)arg1;
15934         uint32_t cfg_idx = bxe_get_link_cfg_idx(sc);
15935         int rc = 0;
15936         int error;
15937         int result;
15938 
15939 
15940         error = sysctl_handle_int(oidp, &sc->bxe_pause_param, 0, req);
15941 
15942         if (error || !req->newptr) {
15943                 return (error);
15944         }
15945         if ((sc->bxe_pause_param < 0) ||  (sc->bxe_pause_param > 8)) {
15946                 BLOGW(sc, "invalid pause param (%d) - use integers between 1 & 8\n",sc->bxe_pause_param);
15947                 sc->bxe_pause_param = 8;
15948         }
15949 
15950         result = (sc->bxe_pause_param << PORT_FEATURE_FLOW_CONTROL_SHIFT);
15951 
15952 
15953         if((result & 0x400) && !(sc->port.supported[cfg_idx] & ELINK_SUPPORTED_Autoneg))  {
15954                         BLOGW(sc, "Does not support Autoneg pause_param %d\n", sc->bxe_pause_param);
15955                         return -EINVAL;
15956         }
15957 
15958         if(IS_MF(sc))
15959                 return 0;
15960        sc->link_params.req_flow_ctrl[cfg_idx] = ELINK_FLOW_CTRL_AUTO;
15961         if(result & ELINK_FLOW_CTRL_RX)
15962                 sc->link_params.req_flow_ctrl[cfg_idx] |= ELINK_FLOW_CTRL_RX;
15963 
15964         if(result & ELINK_FLOW_CTRL_TX)
15965                 sc->link_params.req_flow_ctrl[cfg_idx] |= ELINK_FLOW_CTRL_TX;
15966         if(sc->link_params.req_flow_ctrl[cfg_idx] == ELINK_FLOW_CTRL_AUTO)
15967                 sc->link_params.req_flow_ctrl[cfg_idx] = ELINK_FLOW_CTRL_NONE;
15968 
15969         if(result & 0x400) {
15970                 if (sc->link_params.req_line_speed[cfg_idx] == ELINK_SPEED_AUTO_NEG) {
15971                         sc->link_params.req_flow_ctrl[cfg_idx] =
15972                                 ELINK_FLOW_CTRL_AUTO;
15973                 }
15974                 sc->link_params.req_fc_auto_adv = 0;
15975                 if (result & ELINK_FLOW_CTRL_RX)
15976                         sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_RX;
15977 
15978                 if (result & ELINK_FLOW_CTRL_TX)
15979                         sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_TX;
15980                 if (!sc->link_params.req_fc_auto_adv)
15981                         sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_NONE;
15982         }
15983          if (IS_PF(sc)) {
15984                         if (sc->link_vars.link_up) {
15985                                 bxe_stats_handle(sc, STATS_EVENT_STOP);
15986                         }
15987 			if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
15988                         bxe_force_link_reset(sc);
15989                         bxe_acquire_phy_lock(sc);
15990 
15991                         rc = elink_phy_init(&sc->link_params, &sc->link_vars);
15992 
15993                         bxe_release_phy_lock(sc);
15994 
15995                         bxe_calc_fc_adv(sc);
15996                         }
15997         }
15998         return rc;
15999 }
16000 
16001 
16002 static void
16003 bxe_add_sysctls(struct bxe_softc *sc)
16004 {
16005     struct sysctl_ctx_list *ctx;
16006     struct sysctl_oid_list *children;
16007     struct sysctl_oid *queue_top, *queue;
16008     struct sysctl_oid_list *queue_top_children, *queue_children;
16009     char queue_num_buf[32];
16010     uint32_t q_stat;
16011     int i, j;
16012 
16013     ctx = device_get_sysctl_ctx(sc->dev);
16014     children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
16015 
16016     SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "version",
16017                       CTLFLAG_RD, BXE_DRIVER_VERSION, 0,
16018                       "version");
16019 
16020     snprintf(sc->fw_ver_str, sizeof(sc->fw_ver_str), "%d.%d.%d.%d",
16021              BCM_5710_FW_MAJOR_VERSION,
16022              BCM_5710_FW_MINOR_VERSION,
16023              BCM_5710_FW_REVISION_VERSION,
16024              BCM_5710_FW_ENGINEERING_VERSION);
16025 
16026     snprintf(sc->mf_mode_str, sizeof(sc->mf_mode_str), "%s",
16027         ((sc->devinfo.mf_info.mf_mode == SINGLE_FUNCTION)     ? "Single"  :
16028          (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SD)   ? "MF-SD"   :
16029          (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SI)   ? "MF-SI"   :
16030          (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_AFEX) ? "MF-AFEX" :
16031                                                                 "Unknown"));
16032     SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "mf_vnics",
16033                     CTLFLAG_RD, &sc->devinfo.mf_info.vnics_per_port, 0,
16034                     "multifunction vnics per port");
16035 
16036     snprintf(sc->pci_link_str, sizeof(sc->pci_link_str), "%s x%d",
16037         ((sc->devinfo.pcie_link_speed == 1) ? "2.5GT/s" :
16038          (sc->devinfo.pcie_link_speed == 2) ? "5.0GT/s" :
16039          (sc->devinfo.pcie_link_speed == 4) ? "8.0GT/s" :
16040                                               "???GT/s"),
16041         sc->devinfo.pcie_link_width);
16042 
16043     sc->debug = bxe_debug;
16044 
16045     SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version",
16046                       CTLFLAG_RD, sc->devinfo.bc_ver_str, 0,
16047                       "bootcode version");
16048     SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version",
16049                       CTLFLAG_RD, sc->fw_ver_str, 0,
16050                       "firmware version");
16051     SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode",
16052                       CTLFLAG_RD, sc->mf_mode_str, 0,
16053                       "multifunction mode");
16054     SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr",
16055                       CTLFLAG_RD, sc->mac_addr_str, 0,
16056                       "mac address");
16057     SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link",
16058                       CTLFLAG_RD, sc->pci_link_str, 0,
16059                       "pci link status");
16060     SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "debug",
16061                     CTLFLAG_RW, &sc->debug,
16062                     "debug logging mode");
16063 
16064     sc->trigger_grcdump = 0;
16065     SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "trigger_grcdump",
16066                    CTLFLAG_RW, &sc->trigger_grcdump, 0,
16067                    "trigger grcdump should be invoked"
16068                    "  before collecting grcdump");
16069 
16070     sc->grcdump_started = 0;
16071     sc->grcdump_done = 0;
16072     SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "grcdump_done",
16073                    CTLFLAG_RD, &sc->grcdump_done, 0,
16074                    "set by driver when grcdump is done");
16075 
16076     sc->rx_budget = bxe_rx_budget;
16077     SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_budget",
16078                     CTLFLAG_RW, &sc->rx_budget, 0,
16079                     "rx processing budget");
16080 
16081     SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_param",
16082         CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
16083         bxe_sysctl_pauseparam, "IU",
16084         "need pause frames- DEF:0/TX:1/RX:2/BOTH:3/AUTO:4/AUTOTX:5/AUTORX:6/AUTORXTX:7/NONE:8");
16085 
16086 
16087     SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "state",
16088         CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
16089         bxe_sysctl_state, "IU", "dump driver state");
16090 
16091     for (i = 0; i < BXE_NUM_ETH_STATS; i++) {
16092         SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
16093             bxe_eth_stats_arr[i].string,
16094             CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, i,
16095             bxe_sysctl_eth_stat, "LU", bxe_eth_stats_arr[i].string);
16096     }
16097 
16098     /* add a new parent node for all queues "dev.bxe.#.queue" */
16099     queue_top = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "queue",
16100         CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "queue");
16101     queue_top_children = SYSCTL_CHILDREN(queue_top);
16102 
16103     for (i = 0; i < sc->num_queues; i++) {
16104         /* add a new parent node for a single queue "dev.bxe.#.queue.#" */
16105         snprintf(queue_num_buf, sizeof(queue_num_buf), "%d", i);
16106         queue = SYSCTL_ADD_NODE(ctx, queue_top_children, OID_AUTO,
16107             queue_num_buf, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "single queue");
16108         queue_children = SYSCTL_CHILDREN(queue);
16109 
16110         for (j = 0; j < BXE_NUM_ETH_Q_STATS; j++) {
16111             q_stat = ((i << 16) | j);
16112             SYSCTL_ADD_PROC(ctx, queue_children, OID_AUTO,
16113                  bxe_eth_q_stats_arr[j].string,
16114                  CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, q_stat,
16115                  bxe_sysctl_eth_q_stat, "LU", bxe_eth_q_stats_arr[j].string);
16116         }
16117     }
16118 }
16119 
16120 static int
16121 bxe_alloc_buf_rings(struct bxe_softc *sc)
16122 {
16123     int i;
16124     struct bxe_fastpath *fp;
16125 
16126     for (i = 0; i < sc->num_queues; i++) {
16127 
16128         fp = &sc->fp[i];
16129 
16130         fp->tx_br = buf_ring_alloc(BXE_BR_SIZE, M_DEVBUF,
16131                                    M_NOWAIT, &fp->tx_mtx);
16132         if (fp->tx_br == NULL)
16133             return (-1);
16134     }
16135 
16136     return (0);
16137 }
16138 
16139 static void
16140 bxe_free_buf_rings(struct bxe_softc *sc)
16141 {
16142     int i;
16143     struct bxe_fastpath *fp;
16144 
16145     for (i = 0; i < sc->num_queues; i++) {
16146 
16147         fp = &sc->fp[i];
16148 
16149         if (fp->tx_br) {
16150             buf_ring_free(fp->tx_br, M_DEVBUF);
16151             fp->tx_br = NULL;
16152         }
16153     }
16154 }
16155 
16156 static void
16157 bxe_init_fp_mutexs(struct bxe_softc *sc)
16158 {
16159     int i;
16160     struct bxe_fastpath *fp;
16161 
16162     for (i = 0; i < sc->num_queues; i++) {
16163 
16164         fp = &sc->fp[i];
16165 
16166         snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
16167             "bxe%d_fp%d_tx_lock", sc->unit, i);
16168         mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
16169 
16170         snprintf(fp->rx_mtx_name, sizeof(fp->rx_mtx_name),
16171             "bxe%d_fp%d_rx_lock", sc->unit, i);
16172         mtx_init(&fp->rx_mtx, fp->rx_mtx_name, NULL, MTX_DEF);
16173     }
16174 }
16175 
16176 static void
16177 bxe_destroy_fp_mutexs(struct bxe_softc *sc)
16178 {
16179     int i;
16180     struct bxe_fastpath *fp;
16181 
16182     for (i = 0; i < sc->num_queues; i++) {
16183 
16184         fp = &sc->fp[i];
16185 
16186         if (mtx_initialized(&fp->tx_mtx)) {
16187             mtx_destroy(&fp->tx_mtx);
16188         }
16189 
16190         if (mtx_initialized(&fp->rx_mtx)) {
16191             mtx_destroy(&fp->rx_mtx);
16192         }
16193     }
16194 }
16195 
16196 
16197 /*
16198  * Device attach function.
16199  *
16200  * Allocates device resources, performs secondary chip identification, and
16201  * initializes driver instance variables. This function is called from driver
16202  * load after a successful probe.
16203  *
16204  * Returns:
16205  *   0 = Success, >0 = Failure
16206  */
16207 static int
16208 bxe_attach(device_t dev)
16209 {
16210     struct bxe_softc *sc;
16211 
16212     sc = device_get_softc(dev);
16213 
16214     BLOGD(sc, DBG_LOAD, "Starting attach...\n");
16215 
16216     sc->state = BXE_STATE_CLOSED;
16217 
16218     sc->dev  = dev;
16219     sc->unit = device_get_unit(dev);
16220 
16221     BLOGD(sc, DBG_LOAD, "softc = %p\n", sc);
16222 
16223     sc->pcie_bus    = pci_get_bus(dev);
16224     sc->pcie_device = pci_get_slot(dev);
16225     sc->pcie_func   = pci_get_function(dev);
16226 
16227     /* enable bus master capability */
16228     pci_enable_busmaster(dev);
16229 
16230     /* get the BARs */
16231     if (bxe_allocate_bars(sc) != 0) {
16232         return (ENXIO);
16233     }
16234 
16235     /* initialize the mutexes */
16236     bxe_init_mutexes(sc);
16237 
16238     /* prepare the periodic callout */
16239     callout_init(&sc->periodic_callout, 1);
16240 
16241     /* prepare the chip taskqueue */
16242     sc->chip_tq_flags = CHIP_TQ_NONE;
16243     snprintf(sc->chip_tq_name, sizeof(sc->chip_tq_name),
16244              "bxe%d_chip_tq", sc->unit);
16245     TASK_INIT(&sc->chip_tq_task, 0, bxe_handle_chip_tq, sc);
16246     sc->chip_tq = taskqueue_create(sc->chip_tq_name, M_NOWAIT,
16247                                    taskqueue_thread_enqueue,
16248                                    &sc->chip_tq);
16249     taskqueue_start_threads(&sc->chip_tq, 1, PWAIT, /* lower priority */
16250                             "%s", sc->chip_tq_name);
16251 
16252     TIMEOUT_TASK_INIT(taskqueue_thread,
16253         &sc->sp_err_timeout_task, 0, bxe_sp_err_timeout_task,  sc);
16254 
16255 
16256     /* get device info and set params */
16257     if (bxe_get_device_info(sc) != 0) {
16258         BLOGE(sc, "getting device info\n");
16259         bxe_deallocate_bars(sc);
16260         pci_disable_busmaster(dev);
16261         return (ENXIO);
16262     }
16263 
16264     /* get final misc params */
16265     bxe_get_params(sc);
16266 
16267     /* set the default MTU (changed via ifconfig) */
16268     sc->mtu = ETHERMTU;
16269 
16270     bxe_set_modes_bitmap(sc);
16271 
16272     /* XXX
16273      * If in AFEX mode and the function is configured for FCoE
16274      * then bail... no L2 allowed.
16275      */
16276 
16277     /* get phy settings from shmem and 'and' against admin settings */
16278     bxe_get_phy_info(sc);
16279 
16280     /* initialize the FreeBSD ifnet interface */
16281     if (bxe_init_ifnet(sc) != 0) {
16282         bxe_release_mutexes(sc);
16283         bxe_deallocate_bars(sc);
16284         pci_disable_busmaster(dev);
16285         return (ENXIO);
16286     }
16287 
16288     if (bxe_add_cdev(sc) != 0) {
16289         if (sc->ifp != NULL) {
16290             ether_ifdetach(sc->ifp);
16291         }
16292         ifmedia_removeall(&sc->ifmedia);
16293         bxe_release_mutexes(sc);
16294         bxe_deallocate_bars(sc);
16295         pci_disable_busmaster(dev);
16296         return (ENXIO);
16297     }
16298 
16299     /* allocate device interrupts */
16300     if (bxe_interrupt_alloc(sc) != 0) {
16301         bxe_del_cdev(sc);
16302         if (sc->ifp != NULL) {
16303             ether_ifdetach(sc->ifp);
16304         }
16305         ifmedia_removeall(&sc->ifmedia);
16306         bxe_release_mutexes(sc);
16307         bxe_deallocate_bars(sc);
16308         pci_disable_busmaster(dev);
16309         return (ENXIO);
16310     }
16311 
16312     bxe_init_fp_mutexs(sc);
16313 
16314     if (bxe_alloc_buf_rings(sc) != 0) {
16315 	bxe_free_buf_rings(sc);
16316         bxe_interrupt_free(sc);
16317         bxe_del_cdev(sc);
16318         if (sc->ifp != NULL) {
16319             ether_ifdetach(sc->ifp);
16320         }
16321         ifmedia_removeall(&sc->ifmedia);
16322         bxe_release_mutexes(sc);
16323         bxe_deallocate_bars(sc);
16324         pci_disable_busmaster(dev);
16325         return (ENXIO);
16326     }
16327 
16328     /* allocate ilt */
16329     if (bxe_alloc_ilt_mem(sc) != 0) {
16330 	bxe_free_buf_rings(sc);
16331         bxe_interrupt_free(sc);
16332         bxe_del_cdev(sc);
16333         if (sc->ifp != NULL) {
16334             ether_ifdetach(sc->ifp);
16335         }
16336         ifmedia_removeall(&sc->ifmedia);
16337         bxe_release_mutexes(sc);
16338         bxe_deallocate_bars(sc);
16339         pci_disable_busmaster(dev);
16340         return (ENXIO);
16341     }
16342 
16343     /* allocate the host hardware/software hsi structures */
16344     if (bxe_alloc_hsi_mem(sc) != 0) {
16345         bxe_free_ilt_mem(sc);
16346 	bxe_free_buf_rings(sc);
16347         bxe_interrupt_free(sc);
16348         bxe_del_cdev(sc);
16349         if (sc->ifp != NULL) {
16350             ether_ifdetach(sc->ifp);
16351         }
16352         ifmedia_removeall(&sc->ifmedia);
16353         bxe_release_mutexes(sc);
16354         bxe_deallocate_bars(sc);
16355         pci_disable_busmaster(dev);
16356         return (ENXIO);
16357     }
16358 
16359     /* need to reset chip if UNDI was active */
16360     if (IS_PF(sc) && !BXE_NOMCP(sc)) {
16361         /* init fw_seq */
16362         sc->fw_seq =
16363             (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
16364              DRV_MSG_SEQ_NUMBER_MASK);
16365         BLOGD(sc, DBG_LOAD, "prev unload fw_seq 0x%04x\n", sc->fw_seq);
16366         bxe_prev_unload(sc);
16367     }
16368 
16369 #if 1
16370     /* XXX */
16371     bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF);
16372 #else
16373     if (SHMEM2_HAS(sc, dcbx_lldp_params_offset) &&
16374         SHMEM2_HAS(sc, dcbx_lldp_dcbx_stat_offset) &&
16375         SHMEM2_RD(sc, dcbx_lldp_params_offset) &&
16376         SHMEM2_RD(sc, dcbx_lldp_dcbx_stat_offset)) {
16377         bxe_dcbx_set_state(sc, TRUE, BXE_DCBX_ENABLED_ON_NEG_ON);
16378         bxe_dcbx_init_params(sc);
16379     } else {
16380         bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF);
16381     }
16382 #endif
16383 
16384     /* calculate qm_cid_count */
16385     sc->qm_cid_count = bxe_set_qm_cid_count(sc);
16386     BLOGD(sc, DBG_LOAD, "qm_cid_count=%d\n", sc->qm_cid_count);
16387 
16388     sc->max_cos = 1;
16389     bxe_init_multi_cos(sc);
16390 
16391     bxe_add_sysctls(sc);
16392 
16393     return (0);
16394 }
16395 
16396 /*
16397  * Device detach function.
16398  *
16399  * Stops the controller, resets the controller, and releases resources.
16400  *
16401  * Returns:
16402  *   0 = Success, >0 = Failure
16403  */
16404 static int
16405 bxe_detach(device_t dev)
16406 {
16407     struct bxe_softc *sc;
16408     if_t ifp;
16409 
16410     sc = device_get_softc(dev);
16411 
16412     BLOGD(sc, DBG_LOAD, "Starting detach...\n");
16413 
16414     ifp = sc->ifp;
16415     if (ifp != NULL && if_vlantrunkinuse(ifp)) {
16416         BLOGE(sc, "Cannot detach while VLANs are in use.\n");
16417         return(EBUSY);
16418     }
16419 
16420     bxe_del_cdev(sc);
16421 
16422     /* stop the periodic callout */
16423     bxe_periodic_stop(sc);
16424 
16425     /* stop the chip taskqueue */
16426     atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_NONE);
16427     if (sc->chip_tq) {
16428         taskqueue_drain(sc->chip_tq, &sc->chip_tq_task);
16429         taskqueue_free(sc->chip_tq);
16430         sc->chip_tq = NULL;
16431         taskqueue_drain_timeout(taskqueue_thread,
16432             &sc->sp_err_timeout_task);
16433     }
16434 
16435     /* stop and reset the controller if it was open */
16436     if (sc->state != BXE_STATE_CLOSED) {
16437         BXE_CORE_LOCK(sc);
16438         bxe_nic_unload(sc, UNLOAD_CLOSE, TRUE);
16439         sc->state = BXE_STATE_DISABLED;
16440         BXE_CORE_UNLOCK(sc);
16441     }
16442 
16443     /* release the network interface */
16444     if (ifp != NULL) {
16445         ether_ifdetach(ifp);
16446     }
16447     ifmedia_removeall(&sc->ifmedia);
16448 
16449     /* XXX do the following based on driver state... */
16450 
16451     /* free the host hardware/software hsi structures */
16452     bxe_free_hsi_mem(sc);
16453 
16454     /* free ilt */
16455     bxe_free_ilt_mem(sc);
16456 
16457     bxe_free_buf_rings(sc);
16458 
16459     /* release the interrupts */
16460     bxe_interrupt_free(sc);
16461 
16462     /* Release the mutexes*/
16463     bxe_destroy_fp_mutexs(sc);
16464     bxe_release_mutexes(sc);
16465 
16466 
16467     /* Release the PCIe BAR mapped memory */
16468     bxe_deallocate_bars(sc);
16469 
16470     /* Release the FreeBSD interface. */
16471     if (sc->ifp != NULL) {
16472         if_free(sc->ifp);
16473     }
16474 
16475     pci_disable_busmaster(dev);
16476 
16477     return (0);
16478 }
16479 
16480 /*
16481  * Device shutdown function.
16482  *
16483  * Stops and resets the controller.
16484  *
16485  * Returns:
16486  *   Nothing
16487  */
16488 static int
16489 bxe_shutdown(device_t dev)
16490 {
16491     struct bxe_softc *sc;
16492 
16493     sc = device_get_softc(dev);
16494 
16495     BLOGD(sc, DBG_LOAD, "Starting shutdown...\n");
16496 
16497     /* stop the periodic callout */
16498     bxe_periodic_stop(sc);
16499 
16500     if (sc->state != BXE_STATE_CLOSED) {
16501     	BXE_CORE_LOCK(sc);
16502     	bxe_nic_unload(sc, UNLOAD_NORMAL, FALSE);
16503     	BXE_CORE_UNLOCK(sc);
16504     }
16505 
16506     return (0);
16507 }
16508 
16509 void
16510 bxe_igu_ack_sb(struct bxe_softc *sc,
16511                uint8_t          igu_sb_id,
16512                uint8_t          segment,
16513                uint16_t         index,
16514                uint8_t          op,
16515                uint8_t          update)
16516 {
16517     uint32_t igu_addr = sc->igu_base_addr;
16518     igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
16519     bxe_igu_ack_sb_gen(sc, igu_sb_id, segment, index, op, update, igu_addr);
16520 }
16521 
16522 static void
16523 bxe_igu_clear_sb_gen(struct bxe_softc *sc,
16524                      uint8_t          func,
16525                      uint8_t          idu_sb_id,
16526                      uint8_t          is_pf)
16527 {
16528     uint32_t data, ctl, cnt = 100;
16529     uint32_t igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
16530     uint32_t igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
16531     uint32_t igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
16532     uint32_t sb_bit =  1 << (idu_sb_id%32);
16533     uint32_t func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
16534     uint32_t addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
16535 
16536     /* Not supported in BC mode */
16537     if (CHIP_INT_MODE_IS_BC(sc)) {
16538         return;
16539     }
16540 
16541     data = ((IGU_USE_REGISTER_cstorm_type_0_sb_cleanup <<
16542              IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
16543             IGU_REGULAR_CLEANUP_SET |
16544             IGU_REGULAR_BCLEANUP);
16545 
16546     ctl = ((addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT) |
16547            (func_encode << IGU_CTRL_REG_FID_SHIFT) |
16548            (IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT));
16549 
16550     BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
16551             data, igu_addr_data);
16552     REG_WR(sc, igu_addr_data, data);
16553 
16554     bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
16555                       BUS_SPACE_BARRIER_WRITE);
16556     mb();
16557 
16558     BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
16559             ctl, igu_addr_ctl);
16560     REG_WR(sc, igu_addr_ctl, ctl);
16561 
16562     bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
16563                       BUS_SPACE_BARRIER_WRITE);
16564     mb();
16565 
16566     /* wait for clean up to finish */
16567     while (!(REG_RD(sc, igu_addr_ack) & sb_bit) && --cnt) {
16568         DELAY(20000);
16569     }
16570 
16571     if (!(REG_RD(sc, igu_addr_ack) & sb_bit)) {
16572         BLOGD(sc, DBG_LOAD,
16573               "Unable to finish IGU cleanup: "
16574               "idu_sb_id %d offset %d bit %d (cnt %d)\n",
16575               idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
16576     }
16577 }
16578 
16579 static void
16580 bxe_igu_clear_sb(struct bxe_softc *sc,
16581                  uint8_t          idu_sb_id)
16582 {
16583     bxe_igu_clear_sb_gen(sc, SC_FUNC(sc), idu_sb_id, TRUE /*PF*/);
16584 }
16585 
16586 
16587 
16588 
16589 
16590 
16591 
16592 /*******************/
16593 /* ECORE CALLBACKS */
16594 /*******************/
16595 
16596 static void
16597 bxe_reset_common(struct bxe_softc *sc)
16598 {
16599     uint32_t val = 0x1400;
16600 
16601     /* reset_common */
16602     REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR), 0xd3ffff7f);
16603 
16604     if (CHIP_IS_E3(sc)) {
16605         val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
16606         val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
16607     }
16608 
16609     REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR), val);
16610 }
16611 
16612 static void
16613 bxe_common_init_phy(struct bxe_softc *sc)
16614 {
16615     uint32_t shmem_base[2];
16616     uint32_t shmem2_base[2];
16617 
16618     /* Avoid common init in case MFW supports LFA */
16619     if (SHMEM2_RD(sc, size) >
16620         (uint32_t)offsetof(struct shmem2_region,
16621                            lfa_host_addr[SC_PORT(sc)])) {
16622         return;
16623     }
16624 
16625     shmem_base[0]  = sc->devinfo.shmem_base;
16626     shmem2_base[0] = sc->devinfo.shmem2_base;
16627 
16628     if (!CHIP_IS_E1x(sc)) {
16629         shmem_base[1]  = SHMEM2_RD(sc, other_shmem_base_addr);
16630         shmem2_base[1] = SHMEM2_RD(sc, other_shmem2_base_addr);
16631     }
16632 
16633     bxe_acquire_phy_lock(sc);
16634     elink_common_init_phy(sc, shmem_base, shmem2_base,
16635                           sc->devinfo.chip_id, 0);
16636     bxe_release_phy_lock(sc);
16637 }
16638 
16639 static void
16640 bxe_pf_disable(struct bxe_softc *sc)
16641 {
16642     uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
16643 
16644     val &= ~IGU_PF_CONF_FUNC_EN;
16645 
16646     REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
16647     REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
16648     REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 0);
16649 }
16650 
16651 static void
16652 bxe_init_pxp(struct bxe_softc *sc)
16653 {
16654     uint16_t devctl;
16655     int r_order, w_order;
16656 
16657     devctl = bxe_pcie_capability_read(sc, PCIER_DEVICE_CTL, 2);
16658 
16659     BLOGD(sc, DBG_LOAD, "read 0x%08x from devctl\n", devctl);
16660 
16661     w_order = ((devctl & PCIEM_CTL_MAX_PAYLOAD) >> 5);
16662 
16663     if (sc->mrrs == -1) {
16664         r_order = ((devctl & PCIEM_CTL_MAX_READ_REQUEST) >> 12);
16665     } else {
16666         BLOGD(sc, DBG_LOAD, "forcing read order to %d\n", sc->mrrs);
16667         r_order = sc->mrrs;
16668     }
16669 
16670     ecore_init_pxp_arb(sc, r_order, w_order);
16671 }
16672 
16673 static uint32_t
16674 bxe_get_pretend_reg(struct bxe_softc *sc)
16675 {
16676     uint32_t base = PXP2_REG_PGL_PRETEND_FUNC_F0;
16677     uint32_t stride = (PXP2_REG_PGL_PRETEND_FUNC_F1 - base);
16678     return (base + (SC_ABS_FUNC(sc)) * stride);
16679 }
16680 
16681 /*
16682  * Called only on E1H or E2.
16683  * When pretending to be PF, the pretend value is the function number 0..7.
16684  * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
16685  * combination.
16686  */
16687 static int
16688 bxe_pretend_func(struct bxe_softc *sc,
16689                  uint16_t         pretend_func_val)
16690 {
16691     uint32_t pretend_reg;
16692 
16693     if (CHIP_IS_E1H(sc) && (pretend_func_val > E1H_FUNC_MAX)) {
16694         return (-1);
16695     }
16696 
16697     /* get my own pretend register */
16698     pretend_reg = bxe_get_pretend_reg(sc);
16699     REG_WR(sc, pretend_reg, pretend_func_val);
16700     REG_RD(sc, pretend_reg);
16701     return (0);
16702 }
16703 
16704 static void
16705 bxe_iov_init_dmae(struct bxe_softc *sc)
16706 {
16707     return;
16708 }
16709 
16710 static void
16711 bxe_iov_init_dq(struct bxe_softc *sc)
16712 {
16713     return;
16714 }
16715 
16716 /* send a NIG loopback debug packet */
16717 static void
16718 bxe_lb_pckt(struct bxe_softc *sc)
16719 {
16720     uint32_t wb_write[3];
16721 
16722     /* Ethernet source and destination addresses */
16723     wb_write[0] = 0x55555555;
16724     wb_write[1] = 0x55555555;
16725     wb_write[2] = 0x20;     /* SOP */
16726     REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
16727 
16728     /* NON-IP protocol */
16729     wb_write[0] = 0x09000000;
16730     wb_write[1] = 0x55555555;
16731     wb_write[2] = 0x10;     /* EOP, eop_bvalid = 0 */
16732     REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
16733 }
16734 
16735 /*
16736  * Some of the internal memories are not directly readable from the driver.
16737  * To test them we send debug packets.
16738  */
16739 static int
16740 bxe_int_mem_test(struct bxe_softc *sc)
16741 {
16742     int factor;
16743     int count, i;
16744     uint32_t val = 0;
16745 
16746     if (CHIP_REV_IS_FPGA(sc)) {
16747         factor = 120;
16748     } else if (CHIP_REV_IS_EMUL(sc)) {
16749         factor = 200;
16750     } else {
16751         factor = 1;
16752     }
16753 
16754     /* disable inputs of parser neighbor blocks */
16755     REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0);
16756     REG_WR(sc, TCM_REG_PRS_IFEN, 0x0);
16757     REG_WR(sc, CFC_REG_DEBUG0, 0x1);
16758     REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0);
16759 
16760     /*  write 0 to parser credits for CFC search request */
16761     REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
16762 
16763     /* send Ethernet packet */
16764     bxe_lb_pckt(sc);
16765 
16766     /* TODO do i reset NIG statistic? */
16767     /* Wait until NIG register shows 1 packet of size 0x10 */
16768     count = 1000 * factor;
16769     while (count) {
16770         bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
16771         val = *BXE_SP(sc, wb_data[0]);
16772         if (val == 0x10) {
16773             break;
16774         }
16775 
16776         DELAY(10000);
16777         count--;
16778     }
16779 
16780     if (val != 0x10) {
16781         BLOGE(sc, "NIG timeout val=0x%x\n", val);
16782         return (-1);
16783     }
16784 
16785     /* wait until PRS register shows 1 packet */
16786     count = (1000 * factor);
16787     while (count) {
16788         val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16789         if (val == 1) {
16790             break;
16791         }
16792 
16793         DELAY(10000);
16794         count--;
16795     }
16796 
16797     if (val != 0x1) {
16798         BLOGE(sc, "PRS timeout val=0x%x\n", val);
16799         return (-2);
16800     }
16801 
16802     /* Reset and init BRB, PRS */
16803     REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
16804     DELAY(50000);
16805     REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
16806     DELAY(50000);
16807     ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
16808     ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
16809 
16810     /* Disable inputs of parser neighbor blocks */
16811     REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0);
16812     REG_WR(sc, TCM_REG_PRS_IFEN, 0x0);
16813     REG_WR(sc, CFC_REG_DEBUG0, 0x1);
16814     REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0);
16815 
16816     /* Write 0 to parser credits for CFC search request */
16817     REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
16818 
16819     /* send 10 Ethernet packets */
16820     for (i = 0; i < 10; i++) {
16821         bxe_lb_pckt(sc);
16822     }
16823 
16824     /* Wait until NIG register shows 10+1 packets of size 11*0x10 = 0xb0 */
16825     count = (1000 * factor);
16826     while (count) {
16827         bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
16828         val = *BXE_SP(sc, wb_data[0]);
16829         if (val == 0xb0) {
16830             break;
16831         }
16832 
16833         DELAY(10000);
16834         count--;
16835     }
16836 
16837     if (val != 0xb0) {
16838         BLOGE(sc, "NIG timeout val=0x%x\n", val);
16839         return (-3);
16840     }
16841 
16842     /* Wait until PRS register shows 2 packets */
16843     val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16844     if (val != 2) {
16845         BLOGE(sc, "PRS timeout val=0x%x\n", val);
16846     }
16847 
16848     /* Write 1 to parser credits for CFC search request */
16849     REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
16850 
16851     /* Wait until PRS register shows 3 packets */
16852     DELAY(10000 * factor);
16853 
16854     /* Wait until NIG register shows 1 packet of size 0x10 */
16855     val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16856     if (val != 3) {
16857         BLOGE(sc, "PRS timeout val=0x%x\n", val);
16858     }
16859 
16860     /* clear NIG EOP FIFO */
16861     for (i = 0; i < 11; i++) {
16862         REG_RD(sc, NIG_REG_INGRESS_EOP_LB_FIFO);
16863     }
16864 
16865     val = REG_RD(sc, NIG_REG_INGRESS_EOP_LB_EMPTY);
16866     if (val != 1) {
16867         BLOGE(sc, "clear of NIG failed val=0x%x\n", val);
16868         return (-4);
16869     }
16870 
16871     /* Reset and init BRB, PRS, NIG */
16872     REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
16873     DELAY(50000);
16874     REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
16875     DELAY(50000);
16876     ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
16877     ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
16878     if (!CNIC_SUPPORT(sc)) {
16879         /* set NIC mode */
16880         REG_WR(sc, PRS_REG_NIC_MODE, 1);
16881     }
16882 
16883     /* Enable inputs of parser neighbor blocks */
16884     REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x7fffffff);
16885     REG_WR(sc, TCM_REG_PRS_IFEN, 0x1);
16886     REG_WR(sc, CFC_REG_DEBUG0, 0x0);
16887     REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x1);
16888 
16889     return (0);
16890 }
16891 
16892 static void
16893 bxe_setup_fan_failure_detection(struct bxe_softc *sc)
16894 {
16895     int is_required;
16896     uint32_t val;
16897     int port;
16898 
16899     is_required = 0;
16900     val = (SHMEM_RD(sc, dev_info.shared_hw_config.config2) &
16901            SHARED_HW_CFG_FAN_FAILURE_MASK);
16902 
16903     if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) {
16904         is_required = 1;
16905     }
16906     /*
16907      * The fan failure mechanism is usually related to the PHY type since
16908      * the power consumption of the board is affected by the PHY. Currently,
16909      * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
16910      */
16911     else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) {
16912         for (port = PORT_0; port < PORT_MAX; port++) {
16913             is_required |= elink_fan_failure_det_req(sc,
16914                                                      sc->devinfo.shmem_base,
16915                                                      sc->devinfo.shmem2_base,
16916                                                      port);
16917         }
16918     }
16919 
16920     BLOGD(sc, DBG_LOAD, "fan detection setting: %d\n", is_required);
16921 
16922     if (is_required == 0) {
16923         return;
16924     }
16925 
16926     /* Fan failure is indicated by SPIO 5 */
16927     bxe_set_spio(sc, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);
16928 
16929     /* set to active low mode */
16930     val = REG_RD(sc, MISC_REG_SPIO_INT);
16931     val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS);
16932     REG_WR(sc, MISC_REG_SPIO_INT, val);
16933 
16934     /* enable interrupt to signal the IGU */
16935     val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN);
16936     val |= MISC_SPIO_SPIO5;
16937     REG_WR(sc, MISC_REG_SPIO_EVENT_EN, val);
16938 }
16939 
16940 static void
16941 bxe_enable_blocks_attention(struct bxe_softc *sc)
16942 {
16943     uint32_t val;
16944 
16945     REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0);
16946     if (!CHIP_IS_E1x(sc)) {
16947         REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0x40);
16948     } else {
16949         REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0);
16950     }
16951     REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
16952     REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
16953     /*
16954      * mask read length error interrupts in brb for parser
16955      * (parsing unit and 'checksum and crc' unit)
16956      * these errors are legal (PU reads fixed length and CAC can cause
16957      * read length error on truncated packets)
16958      */
16959     REG_WR(sc, BRB1_REG_BRB1_INT_MASK, 0xFC00);
16960     REG_WR(sc, QM_REG_QM_INT_MASK, 0);
16961     REG_WR(sc, TM_REG_TM_INT_MASK, 0);
16962     REG_WR(sc, XSDM_REG_XSDM_INT_MASK_0, 0);
16963     REG_WR(sc, XSDM_REG_XSDM_INT_MASK_1, 0);
16964     REG_WR(sc, XCM_REG_XCM_INT_MASK, 0);
16965 /*      REG_WR(sc, XSEM_REG_XSEM_INT_MASK_0, 0); */
16966 /*      REG_WR(sc, XSEM_REG_XSEM_INT_MASK_1, 0); */
16967     REG_WR(sc, USDM_REG_USDM_INT_MASK_0, 0);
16968     REG_WR(sc, USDM_REG_USDM_INT_MASK_1, 0);
16969     REG_WR(sc, UCM_REG_UCM_INT_MASK, 0);
16970 /*      REG_WR(sc, USEM_REG_USEM_INT_MASK_0, 0); */
16971 /*      REG_WR(sc, USEM_REG_USEM_INT_MASK_1, 0); */
16972     REG_WR(sc, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
16973     REG_WR(sc, CSDM_REG_CSDM_INT_MASK_0, 0);
16974     REG_WR(sc, CSDM_REG_CSDM_INT_MASK_1, 0);
16975     REG_WR(sc, CCM_REG_CCM_INT_MASK, 0);
16976 /*      REG_WR(sc, CSEM_REG_CSEM_INT_MASK_0, 0); */
16977 /*      REG_WR(sc, CSEM_REG_CSEM_INT_MASK_1, 0); */
16978 
16979     val = (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT |
16980            PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
16981            PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN);
16982     if (!CHIP_IS_E1x(sc)) {
16983         val |= (PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED |
16984                 PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED);
16985     }
16986     REG_WR(sc, PXP2_REG_PXP2_INT_MASK_0, val);
16987 
16988     REG_WR(sc, TSDM_REG_TSDM_INT_MASK_0, 0);
16989     REG_WR(sc, TSDM_REG_TSDM_INT_MASK_1, 0);
16990     REG_WR(sc, TCM_REG_TCM_INT_MASK, 0);
16991 /*      REG_WR(sc, TSEM_REG_TSEM_INT_MASK_0, 0); */
16992 
16993     if (!CHIP_IS_E1x(sc)) {
16994         /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */
16995         REG_WR(sc, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
16996     }
16997 
16998     REG_WR(sc, CDU_REG_CDU_INT_MASK, 0);
16999     REG_WR(sc, DMAE_REG_DMAE_INT_MASK, 0);
17000 /*      REG_WR(sc, MISC_REG_MISC_INT_MASK, 0); */
17001     REG_WR(sc, PBF_REG_PBF_INT_MASK, 0x18);     /* bit 3,4 masked */
17002 }
17003 
17004 /**
17005  * bxe_init_hw_common - initialize the HW at the COMMON phase.
17006  *
17007  * @sc:     driver handle
17008  */
17009 static int
17010 bxe_init_hw_common(struct bxe_softc *sc)
17011 {
17012     uint8_t abs_func_id;
17013     uint32_t val;
17014 
17015     BLOGD(sc, DBG_LOAD, "starting common init for func %d\n",
17016           SC_ABS_FUNC(sc));
17017 
17018     /*
17019      * take the RESET lock to protect undi_unload flow from accessing
17020      * registers while we are resetting the chip
17021      */
17022     bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
17023 
17024     bxe_reset_common(sc);
17025 
17026     REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET), 0xffffffff);
17027 
17028     val = 0xfffc;
17029     if (CHIP_IS_E3(sc)) {
17030         val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
17031         val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
17032     }
17033 
17034     REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET), val);
17035 
17036     bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
17037 
17038     ecore_init_block(sc, BLOCK_MISC, PHASE_COMMON);
17039     BLOGD(sc, DBG_LOAD, "after misc block init\n");
17040 
17041     if (!CHIP_IS_E1x(sc)) {
17042         /*
17043          * 4-port mode or 2-port mode we need to turn off master-enable for
17044          * everyone. After that we turn it back on for self. So, we disregard
17045          * multi-function, and always disable all functions on the given path,
17046          * this means 0,2,4,6 for path 0 and 1,3,5,7 for path 1
17047          */
17048         for (abs_func_id = SC_PATH(sc);
17049              abs_func_id < (E2_FUNC_MAX * 2);
17050              abs_func_id += 2) {
17051             if (abs_func_id == SC_ABS_FUNC(sc)) {
17052                 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17053                 continue;
17054             }
17055 
17056             bxe_pretend_func(sc, abs_func_id);
17057 
17058             /* clear pf enable */
17059             bxe_pf_disable(sc);
17060 
17061             bxe_pretend_func(sc, SC_ABS_FUNC(sc));
17062         }
17063     }
17064 
17065     BLOGD(sc, DBG_LOAD, "after pf disable\n");
17066 
17067     ecore_init_block(sc, BLOCK_PXP, PHASE_COMMON);
17068 
17069     if (CHIP_IS_E1(sc)) {
17070         /*
17071          * enable HW interrupt from PXP on USDM overflow
17072          * bit 16 on INT_MASK_0
17073          */
17074         REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0);
17075     }
17076 
17077     ecore_init_block(sc, BLOCK_PXP2, PHASE_COMMON);
17078     bxe_init_pxp(sc);
17079 
17080 #ifdef __BIG_ENDIAN
17081     REG_WR(sc, PXP2_REG_RQ_QM_ENDIAN_M, 1);
17082     REG_WR(sc, PXP2_REG_RQ_TM_ENDIAN_M, 1);
17083     REG_WR(sc, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
17084     REG_WR(sc, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
17085     REG_WR(sc, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
17086     /* make sure this value is 0 */
17087     REG_WR(sc, PXP2_REG_RQ_HC_ENDIAN_M, 0);
17088 
17089     //REG_WR(sc, PXP2_REG_RD_PBF_SWAP_MODE, 1);
17090     REG_WR(sc, PXP2_REG_RD_QM_SWAP_MODE, 1);
17091     REG_WR(sc, PXP2_REG_RD_TM_SWAP_MODE, 1);
17092     REG_WR(sc, PXP2_REG_RD_SRC_SWAP_MODE, 1);
17093     REG_WR(sc, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
17094 #endif
17095 
17096     ecore_ilt_init_page_size(sc, INITOP_SET);
17097 
17098     if (CHIP_REV_IS_FPGA(sc) && CHIP_IS_E1H(sc)) {
17099         REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
17100     }
17101 
17102     /* let the HW do it's magic... */
17103     DELAY(100000);
17104 
17105     /* finish PXP init */
17106     val = REG_RD(sc, PXP2_REG_RQ_CFG_DONE);
17107     if (val != 1) {
17108         BLOGE(sc, "PXP2 CFG failed PXP2_REG_RQ_CFG_DONE val = 0x%x\n",
17109             val);
17110         return (-1);
17111     }
17112     val = REG_RD(sc, PXP2_REG_RD_INIT_DONE);
17113     if (val != 1) {
17114         BLOGE(sc, "PXP2 RD_INIT failed val = 0x%x\n", val);
17115         return (-1);
17116     }
17117 
17118     BLOGD(sc, DBG_LOAD, "after pxp init\n");
17119 
17120     /*
17121      * Timer bug workaround for E2 only. We need to set the entire ILT to have
17122      * entries with value "0" and valid bit on. This needs to be done by the
17123      * first PF that is loaded in a path (i.e. common phase)
17124      */
17125     if (!CHIP_IS_E1x(sc)) {
17126 /*
17127  * In E2 there is a bug in the timers block that can cause function 6 / 7
17128  * (i.e. vnic3) to start even if it is marked as "scan-off".
17129  * This occurs when a different function (func2,3) is being marked
17130  * as "scan-off". Real-life scenario for example: if a driver is being
17131  * load-unloaded while func6,7 are down. This will cause the timer to access
17132  * the ilt, translate to a logical address and send a request to read/write.
17133  * Since the ilt for the function that is down is not valid, this will cause
17134  * a translation error which is unrecoverable.
17135  * The Workaround is intended to make sure that when this happens nothing
17136  * fatal will occur. The workaround:
17137  *  1.  First PF driver which loads on a path will:
17138  *      a.  After taking the chip out of reset, by using pretend,
17139  *          it will write "0" to the following registers of
17140  *          the other vnics.
17141  *          REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
17142  *          REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0);
17143  *          REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0);
17144  *          And for itself it will write '1' to
17145  *          PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable
17146  *          dmae-operations (writing to pram for example.)
17147  *          note: can be done for only function 6,7 but cleaner this
17148  *            way.
17149  *      b.  Write zero+valid to the entire ILT.
17150  *      c.  Init the first_timers_ilt_entry, last_timers_ilt_entry of
17151  *          VNIC3 (of that port). The range allocated will be the
17152  *          entire ILT. This is needed to prevent  ILT range error.
17153  *  2.  Any PF driver load flow:
17154  *      a.  ILT update with the physical addresses of the allocated
17155  *          logical pages.
17156  *      b.  Wait 20msec. - note that this timeout is needed to make
17157  *          sure there are no requests in one of the PXP internal
17158  *          queues with "old" ILT addresses.
17159  *      c.  PF enable in the PGLC.
17160  *      d.  Clear the was_error of the PF in the PGLC. (could have
17161  *          occurred while driver was down)
17162  *      e.  PF enable in the CFC (WEAK + STRONG)
17163  *      f.  Timers scan enable
17164  *  3.  PF driver unload flow:
17165  *      a.  Clear the Timers scan_en.
17166  *      b.  Polling for scan_on=0 for that PF.
17167  *      c.  Clear the PF enable bit in the PXP.
17168  *      d.  Clear the PF enable in the CFC (WEAK + STRONG)
17169  *      e.  Write zero+valid to all ILT entries (The valid bit must
17170  *          stay set)
17171  *      f.  If this is VNIC 3 of a port then also init
17172  *          first_timers_ilt_entry to zero and last_timers_ilt_entry
17173  *          to the last entry in the ILT.
17174  *
17175  *      Notes:
17176  *      Currently the PF error in the PGLC is non recoverable.
17177  *      In the future the there will be a recovery routine for this error.
17178  *      Currently attention is masked.
17179  *      Having an MCP lock on the load/unload process does not guarantee that
17180  *      there is no Timer disable during Func6/7 enable. This is because the
17181  *      Timers scan is currently being cleared by the MCP on FLR.
17182  *      Step 2.d can be done only for PF6/7 and the driver can also check if
17183  *      there is error before clearing it. But the flow above is simpler and
17184  *      more general.
17185  *      All ILT entries are written by zero+valid and not just PF6/7
17186  *      ILT entries since in the future the ILT entries allocation for
17187  *      PF-s might be dynamic.
17188  */
17189         struct ilt_client_info ilt_cli;
17190         struct ecore_ilt ilt;
17191 
17192         memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
17193         memset(&ilt, 0, sizeof(struct ecore_ilt));
17194 
17195         /* initialize dummy TM client */
17196         ilt_cli.start      = 0;
17197         ilt_cli.end        = ILT_NUM_PAGE_ENTRIES - 1;
17198         ilt_cli.client_num = ILT_CLIENT_TM;
17199 
17200         /*
17201          * Step 1: set zeroes to all ilt page entries with valid bit on
17202          * Step 2: set the timers first/last ilt entry to point
17203          * to the entire range to prevent ILT range error for 3rd/4th
17204          * vnic (this code assumes existence of the vnic)
17205          *
17206          * both steps performed by call to ecore_ilt_client_init_op()
17207          * with dummy TM client
17208          *
17209          * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
17210          * and his brother are split registers
17211          */
17212 
17213         bxe_pretend_func(sc, (SC_PATH(sc) + 6));
17214         ecore_ilt_client_init_op_ilt(sc, &ilt, &ilt_cli, INITOP_CLEAR);
17215         bxe_pretend_func(sc, SC_ABS_FUNC(sc));
17216 
17217         REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN, BXE_PXP_DRAM_ALIGN);
17218         REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_RD, BXE_PXP_DRAM_ALIGN);
17219         REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
17220     }
17221 
17222     REG_WR(sc, PXP2_REG_RQ_DISABLE_INPUTS, 0);
17223     REG_WR(sc, PXP2_REG_RD_DISABLE_INPUTS, 0);
17224 
17225     if (!CHIP_IS_E1x(sc)) {
17226         int factor = CHIP_REV_IS_EMUL(sc) ? 1000 :
17227                      (CHIP_REV_IS_FPGA(sc) ? 400 : 0);
17228 
17229         ecore_init_block(sc, BLOCK_PGLUE_B, PHASE_COMMON);
17230         ecore_init_block(sc, BLOCK_ATC, PHASE_COMMON);
17231 
17232         /* let the HW do it's magic... */
17233         do {
17234             DELAY(200000);
17235             val = REG_RD(sc, ATC_REG_ATC_INIT_DONE);
17236         } while (factor-- && (val != 1));
17237 
17238         if (val != 1) {
17239             BLOGE(sc, "ATC_INIT failed val = 0x%x\n", val);
17240             return (-1);
17241         }
17242     }
17243 
17244     BLOGD(sc, DBG_LOAD, "after pglue and atc init\n");
17245 
17246     ecore_init_block(sc, BLOCK_DMAE, PHASE_COMMON);
17247 
17248     bxe_iov_init_dmae(sc);
17249 
17250     /* clean the DMAE memory */
17251     sc->dmae_ready = 1;
17252     ecore_init_fill(sc, TSEM_REG_PRAM, 0, 8, 1);
17253 
17254     ecore_init_block(sc, BLOCK_TCM, PHASE_COMMON);
17255 
17256     ecore_init_block(sc, BLOCK_UCM, PHASE_COMMON);
17257 
17258     ecore_init_block(sc, BLOCK_CCM, PHASE_COMMON);
17259 
17260     ecore_init_block(sc, BLOCK_XCM, PHASE_COMMON);
17261 
17262     bxe_read_dmae(sc, XSEM_REG_PASSIVE_BUFFER, 3);
17263     bxe_read_dmae(sc, CSEM_REG_PASSIVE_BUFFER, 3);
17264     bxe_read_dmae(sc, TSEM_REG_PASSIVE_BUFFER, 3);
17265     bxe_read_dmae(sc, USEM_REG_PASSIVE_BUFFER, 3);
17266 
17267     ecore_init_block(sc, BLOCK_QM, PHASE_COMMON);
17268 
17269     /* QM queues pointers table */
17270     ecore_qm_init_ptr_table(sc, sc->qm_cid_count, INITOP_SET);
17271 
17272     /* soft reset pulse */
17273     REG_WR(sc, QM_REG_SOFT_RESET, 1);
17274     REG_WR(sc, QM_REG_SOFT_RESET, 0);
17275 
17276     if (CNIC_SUPPORT(sc))
17277         ecore_init_block(sc, BLOCK_TM, PHASE_COMMON);
17278 
17279     ecore_init_block(sc, BLOCK_DORQ, PHASE_COMMON);
17280     REG_WR(sc, DORQ_REG_DPM_CID_OFST, BXE_DB_SHIFT);
17281     if (!CHIP_REV_IS_SLOW(sc)) {
17282         /* enable hw interrupt from doorbell Q */
17283         REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
17284     }
17285 
17286     ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
17287 
17288     ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
17289     REG_WR(sc, PRS_REG_A_PRSU_20, 0xf);
17290 
17291     if (!CHIP_IS_E1(sc)) {
17292         REG_WR(sc, PRS_REG_E1HOV_MODE, sc->devinfo.mf_info.path_has_ovlan);
17293     }
17294 
17295     if (!CHIP_IS_E1x(sc) && !CHIP_IS_E3B0(sc)) {
17296         if (IS_MF_AFEX(sc)) {
17297             /*
17298              * configure that AFEX and VLAN headers must be
17299              * received in AFEX mode
17300              */
17301             REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 0xE);
17302             REG_WR(sc, PRS_REG_MUST_HAVE_HDRS, 0xA);
17303             REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
17304             REG_WR(sc, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
17305             REG_WR(sc, PRS_REG_TAG_LEN_0, 0x4);
17306         } else {
17307             /*
17308              * Bit-map indicating which L2 hdrs may appear
17309              * after the basic Ethernet header
17310              */
17311             REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC,
17312                    sc->devinfo.mf_info.path_has_ovlan ? 7 : 6);
17313         }
17314     }
17315 
17316     ecore_init_block(sc, BLOCK_TSDM, PHASE_COMMON);
17317     ecore_init_block(sc, BLOCK_CSDM, PHASE_COMMON);
17318     ecore_init_block(sc, BLOCK_USDM, PHASE_COMMON);
17319     ecore_init_block(sc, BLOCK_XSDM, PHASE_COMMON);
17320 
17321     if (!CHIP_IS_E1x(sc)) {
17322         /* reset VFC memories */
17323         REG_WR(sc, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
17324                VFC_MEMORIES_RST_REG_CAM_RST |
17325                VFC_MEMORIES_RST_REG_RAM_RST);
17326         REG_WR(sc, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
17327                VFC_MEMORIES_RST_REG_CAM_RST |
17328                VFC_MEMORIES_RST_REG_RAM_RST);
17329 
17330         DELAY(20000);
17331     }
17332 
17333     ecore_init_block(sc, BLOCK_TSEM, PHASE_COMMON);
17334     ecore_init_block(sc, BLOCK_USEM, PHASE_COMMON);
17335     ecore_init_block(sc, BLOCK_CSEM, PHASE_COMMON);
17336     ecore_init_block(sc, BLOCK_XSEM, PHASE_COMMON);
17337 
17338     /* sync semi rtc */
17339     REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
17340            0x80000000);
17341     REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
17342            0x80000000);
17343 
17344     ecore_init_block(sc, BLOCK_UPB, PHASE_COMMON);
17345     ecore_init_block(sc, BLOCK_XPB, PHASE_COMMON);
17346     ecore_init_block(sc, BLOCK_PBF, PHASE_COMMON);
17347 
17348     if (!CHIP_IS_E1x(sc)) {
17349         if (IS_MF_AFEX(sc)) {
17350             /*
17351              * configure that AFEX and VLAN headers must be
17352              * sent in AFEX mode
17353              */
17354             REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 0xE);
17355             REG_WR(sc, PBF_REG_MUST_HAVE_HDRS, 0xA);
17356             REG_WR(sc, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
17357             REG_WR(sc, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
17358             REG_WR(sc, PBF_REG_TAG_LEN_0, 0x4);
17359         } else {
17360             REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC,
17361                    sc->devinfo.mf_info.path_has_ovlan ? 7 : 6);
17362         }
17363     }
17364 
17365     REG_WR(sc, SRC_REG_SOFT_RST, 1);
17366 
17367     ecore_init_block(sc, BLOCK_SRC, PHASE_COMMON);
17368 
17369     if (CNIC_SUPPORT(sc)) {
17370         REG_WR(sc, SRC_REG_KEYSEARCH_0, 0x63285672);
17371         REG_WR(sc, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
17372         REG_WR(sc, SRC_REG_KEYSEARCH_2, 0x223aef9b);
17373         REG_WR(sc, SRC_REG_KEYSEARCH_3, 0x26001e3a);
17374         REG_WR(sc, SRC_REG_KEYSEARCH_4, 0x7ae91116);
17375         REG_WR(sc, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
17376         REG_WR(sc, SRC_REG_KEYSEARCH_6, 0x298d8adf);
17377         REG_WR(sc, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
17378         REG_WR(sc, SRC_REG_KEYSEARCH_8, 0x1830f82f);
17379         REG_WR(sc, SRC_REG_KEYSEARCH_9, 0x01e46be7);
17380     }
17381     REG_WR(sc, SRC_REG_SOFT_RST, 0);
17382 
17383     if (sizeof(union cdu_context) != 1024) {
17384         /* we currently assume that a context is 1024 bytes */
17385         BLOGE(sc, "please adjust the size of cdu_context(%ld)\n",
17386               (long)sizeof(union cdu_context));
17387     }
17388 
17389     ecore_init_block(sc, BLOCK_CDU, PHASE_COMMON);
17390     val = (4 << 24) + (0 << 12) + 1024;
17391     REG_WR(sc, CDU_REG_CDU_GLOBAL_PARAMS, val);
17392 
17393     ecore_init_block(sc, BLOCK_CFC, PHASE_COMMON);
17394 
17395     REG_WR(sc, CFC_REG_INIT_REG, 0x7FF);
17396     /* enable context validation interrupt from CFC */
17397     REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
17398 
17399     /* set the thresholds to prevent CFC/CDU race */
17400     REG_WR(sc, CFC_REG_DEBUG0, 0x20020000);
17401     ecore_init_block(sc, BLOCK_HC, PHASE_COMMON);
17402 
17403     if (!CHIP_IS_E1x(sc) && BXE_NOMCP(sc)) {
17404         REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x36);
17405     }
17406 
17407     ecore_init_block(sc, BLOCK_IGU, PHASE_COMMON);
17408     ecore_init_block(sc, BLOCK_MISC_AEU, PHASE_COMMON);
17409 
17410     /* Reset PCIE errors for debug */
17411     REG_WR(sc, 0x2814, 0xffffffff);
17412     REG_WR(sc, 0x3820, 0xffffffff);
17413 
17414     if (!CHIP_IS_E1x(sc)) {
17415         REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
17416                (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
17417                 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
17418         REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
17419                (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
17420                 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
17421                 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
17422         REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
17423                (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
17424                 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
17425                 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
17426     }
17427 
17428     ecore_init_block(sc, BLOCK_NIG, PHASE_COMMON);
17429 
17430     if (!CHIP_IS_E1(sc)) {
17431         /* in E3 this done in per-port section */
17432         if (!CHIP_IS_E3(sc))
17433             REG_WR(sc, NIG_REG_LLH_MF_MODE, IS_MF(sc));
17434     }
17435 
17436     if (CHIP_IS_E1H(sc)) {
17437         /* not applicable for E2 (and above ...) */
17438         REG_WR(sc, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(sc));
17439     }
17440 
17441     if (CHIP_REV_IS_SLOW(sc)) {
17442         DELAY(200000);
17443     }
17444 
17445     /* finish CFC init */
17446     val = reg_poll(sc, CFC_REG_LL_INIT_DONE, 1, 100, 10);
17447     if (val != 1) {
17448         BLOGE(sc, "CFC LL_INIT failed val=0x%x\n", val);
17449         return (-1);
17450     }
17451     val = reg_poll(sc, CFC_REG_AC_INIT_DONE, 1, 100, 10);
17452     if (val != 1) {
17453         BLOGE(sc, "CFC AC_INIT failed val=0x%x\n", val);
17454         return (-1);
17455     }
17456     val = reg_poll(sc, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
17457     if (val != 1) {
17458         BLOGE(sc, "CFC CAM_INIT failed val=0x%x\n", val);
17459         return (-1);
17460     }
17461     REG_WR(sc, CFC_REG_DEBUG0, 0);
17462 
17463     if (CHIP_IS_E1(sc)) {
17464         /* read NIG statistic to see if this is our first up since powerup */
17465         bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
17466         val = *BXE_SP(sc, wb_data[0]);
17467 
17468         /* do internal memory self test */
17469         if ((val == 0) && bxe_int_mem_test(sc)) {
17470             BLOGE(sc, "internal mem self test failed val=0x%x\n", val);
17471             return (-1);
17472         }
17473     }
17474 
17475     bxe_setup_fan_failure_detection(sc);
17476 
17477     /* clear PXP2 attentions */
17478     REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
17479 
17480     bxe_enable_blocks_attention(sc);
17481 
17482     if (!CHIP_REV_IS_SLOW(sc)) {
17483         ecore_enable_blocks_parity(sc);
17484     }
17485 
17486     if (!BXE_NOMCP(sc)) {
17487         if (CHIP_IS_E1x(sc)) {
17488             bxe_common_init_phy(sc);
17489         }
17490     }
17491 
17492     return (0);
17493 }
17494 
17495 /**
17496  * bxe_init_hw_common_chip - init HW at the COMMON_CHIP phase.
17497  *
17498  * @sc:     driver handle
17499  */
17500 static int
17501 bxe_init_hw_common_chip(struct bxe_softc *sc)
17502 {
17503     int rc = bxe_init_hw_common(sc);
17504 
17505     if (rc) {
17506         BLOGE(sc, "bxe_init_hw_common failed rc=%d\n", rc);
17507         return (rc);
17508     }
17509 
17510     /* In E2 2-PORT mode, same ext phy is used for the two paths */
17511     if (!BXE_NOMCP(sc)) {
17512         bxe_common_init_phy(sc);
17513     }
17514 
17515     return (0);
17516 }
17517 
17518 static int
17519 bxe_init_hw_port(struct bxe_softc *sc)
17520 {
17521     int port = SC_PORT(sc);
17522     int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
17523     uint32_t low, high;
17524     uint32_t val;
17525 
17526     BLOGD(sc, DBG_LOAD, "starting port init for port %d\n", port);
17527 
17528     REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
17529 
17530     ecore_init_block(sc, BLOCK_MISC, init_phase);
17531     ecore_init_block(sc, BLOCK_PXP, init_phase);
17532     ecore_init_block(sc, BLOCK_PXP2, init_phase);
17533 
17534     /*
17535      * Timers bug workaround: disables the pf_master bit in pglue at
17536      * common phase, we need to enable it here before any dmae access are
17537      * attempted. Therefore we manually added the enable-master to the
17538      * port phase (it also happens in the function phase)
17539      */
17540     if (!CHIP_IS_E1x(sc)) {
17541         REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17542     }
17543 
17544     ecore_init_block(sc, BLOCK_ATC, init_phase);
17545     ecore_init_block(sc, BLOCK_DMAE, init_phase);
17546     ecore_init_block(sc, BLOCK_PGLUE_B, init_phase);
17547     ecore_init_block(sc, BLOCK_QM, init_phase);
17548 
17549     ecore_init_block(sc, BLOCK_TCM, init_phase);
17550     ecore_init_block(sc, BLOCK_UCM, init_phase);
17551     ecore_init_block(sc, BLOCK_CCM, init_phase);
17552     ecore_init_block(sc, BLOCK_XCM, init_phase);
17553 
17554     /* QM cid (connection) count */
17555     ecore_qm_init_cid_count(sc, sc->qm_cid_count, INITOP_SET);
17556 
17557     if (CNIC_SUPPORT(sc)) {
17558         ecore_init_block(sc, BLOCK_TM, init_phase);
17559         REG_WR(sc, TM_REG_LIN0_SCAN_TIME + port*4, 20);
17560         REG_WR(sc, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
17561     }
17562 
17563     ecore_init_block(sc, BLOCK_DORQ, init_phase);
17564 
17565     ecore_init_block(sc, BLOCK_BRB1, init_phase);
17566 
17567     if (CHIP_IS_E1(sc) || CHIP_IS_E1H(sc)) {
17568         if (IS_MF(sc)) {
17569             low = (BXE_ONE_PORT(sc) ? 160 : 246);
17570         } else if (sc->mtu > 4096) {
17571             if (BXE_ONE_PORT(sc)) {
17572                 low = 160;
17573             } else {
17574                 val = sc->mtu;
17575                 /* (24*1024 + val*4)/256 */
17576                 low = (96 + (val / 64) + ((val % 64) ? 1 : 0));
17577             }
17578         } else {
17579             low = (BXE_ONE_PORT(sc) ? 80 : 160);
17580         }
17581         high = (low + 56); /* 14*1024/256 */
17582         REG_WR(sc, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
17583         REG_WR(sc, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
17584     }
17585 
17586     if (CHIP_IS_MODE_4_PORT(sc)) {
17587         REG_WR(sc, SC_PORT(sc) ?
17588                BRB1_REG_MAC_GUARANTIED_1 :
17589                BRB1_REG_MAC_GUARANTIED_0, 40);
17590     }
17591 
17592     ecore_init_block(sc, BLOCK_PRS, init_phase);
17593     if (CHIP_IS_E3B0(sc)) {
17594         if (IS_MF_AFEX(sc)) {
17595             /* configure headers for AFEX mode */
17596             REG_WR(sc, SC_PORT(sc) ?
17597                    PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
17598                    PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
17599             REG_WR(sc, SC_PORT(sc) ?
17600                    PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
17601                    PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
17602             REG_WR(sc, SC_PORT(sc) ?
17603                    PRS_REG_MUST_HAVE_HDRS_PORT_1 :
17604                    PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
17605         } else {
17606             /* Ovlan exists only if we are in multi-function +
17607              * switch-dependent mode, in switch-independent there
17608              * is no ovlan headers
17609              */
17610             REG_WR(sc, SC_PORT(sc) ?
17611                    PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
17612                    PRS_REG_HDRS_AFTER_BASIC_PORT_0,
17613                    (sc->devinfo.mf_info.path_has_ovlan ? 7 : 6));
17614         }
17615     }
17616 
17617     ecore_init_block(sc, BLOCK_TSDM, init_phase);
17618     ecore_init_block(sc, BLOCK_CSDM, init_phase);
17619     ecore_init_block(sc, BLOCK_USDM, init_phase);
17620     ecore_init_block(sc, BLOCK_XSDM, init_phase);
17621 
17622     ecore_init_block(sc, BLOCK_TSEM, init_phase);
17623     ecore_init_block(sc, BLOCK_USEM, init_phase);
17624     ecore_init_block(sc, BLOCK_CSEM, init_phase);
17625     ecore_init_block(sc, BLOCK_XSEM, init_phase);
17626 
17627     ecore_init_block(sc, BLOCK_UPB, init_phase);
17628     ecore_init_block(sc, BLOCK_XPB, init_phase);
17629 
17630     ecore_init_block(sc, BLOCK_PBF, init_phase);
17631 
17632     if (CHIP_IS_E1x(sc)) {
17633         /* configure PBF to work without PAUSE mtu 9000 */
17634         REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
17635 
17636         /* update threshold */
17637         REG_WR(sc, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
17638         /* update init credit */
17639         REG_WR(sc, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
17640 
17641         /* probe changes */
17642         REG_WR(sc, PBF_REG_INIT_P0 + port*4, 1);
17643         DELAY(50);
17644         REG_WR(sc, PBF_REG_INIT_P0 + port*4, 0);
17645     }
17646 
17647     if (CNIC_SUPPORT(sc)) {
17648         ecore_init_block(sc, BLOCK_SRC, init_phase);
17649     }
17650 
17651     ecore_init_block(sc, BLOCK_CDU, init_phase);
17652     ecore_init_block(sc, BLOCK_CFC, init_phase);
17653 
17654     if (CHIP_IS_E1(sc)) {
17655         REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
17656         REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
17657     }
17658     ecore_init_block(sc, BLOCK_HC, init_phase);
17659 
17660     ecore_init_block(sc, BLOCK_IGU, init_phase);
17661 
17662     ecore_init_block(sc, BLOCK_MISC_AEU, init_phase);
17663     /* init aeu_mask_attn_func_0/1:
17664      *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
17665      *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
17666      *             bits 4-7 are used for "per vn group attention" */
17667     val = IS_MF(sc) ? 0xF7 : 0x7;
17668     /* Enable DCBX attention for all but E1 */
17669     val |= CHIP_IS_E1(sc) ? 0 : 0x10;
17670     REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
17671 
17672     ecore_init_block(sc, BLOCK_NIG, init_phase);
17673 
17674     if (!CHIP_IS_E1x(sc)) {
17675         /* Bit-map indicating which L2 hdrs may appear after the
17676          * basic Ethernet header
17677          */
17678         if (IS_MF_AFEX(sc)) {
17679             REG_WR(sc, SC_PORT(sc) ?
17680                    NIG_REG_P1_HDRS_AFTER_BASIC :
17681                    NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
17682         } else {
17683             REG_WR(sc, SC_PORT(sc) ?
17684                    NIG_REG_P1_HDRS_AFTER_BASIC :
17685                    NIG_REG_P0_HDRS_AFTER_BASIC,
17686                    IS_MF_SD(sc) ? 7 : 6);
17687         }
17688 
17689         if (CHIP_IS_E3(sc)) {
17690             REG_WR(sc, SC_PORT(sc) ?
17691                    NIG_REG_LLH1_MF_MODE :
17692                    NIG_REG_LLH_MF_MODE, IS_MF(sc));
17693         }
17694     }
17695     if (!CHIP_IS_E3(sc)) {
17696         REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
17697     }
17698 
17699     if (!CHIP_IS_E1(sc)) {
17700         /* 0x2 disable mf_ov, 0x1 enable */
17701         REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
17702                (IS_MF_SD(sc) ? 0x1 : 0x2));
17703 
17704         if (!CHIP_IS_E1x(sc)) {
17705             val = 0;
17706             switch (sc->devinfo.mf_info.mf_mode) {
17707             case MULTI_FUNCTION_SD:
17708                 val = 1;
17709                 break;
17710             case MULTI_FUNCTION_SI:
17711             case MULTI_FUNCTION_AFEX:
17712                 val = 2;
17713                 break;
17714             }
17715 
17716             REG_WR(sc, (SC_PORT(sc) ? NIG_REG_LLH1_CLS_TYPE :
17717                         NIG_REG_LLH0_CLS_TYPE), val);
17718         }
17719         REG_WR(sc, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
17720         REG_WR(sc, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
17721         REG_WR(sc, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
17722     }
17723 
17724     /* If SPIO5 is set to generate interrupts, enable it for this port */
17725     val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN);
17726     if (val & MISC_SPIO_SPIO5) {
17727         uint32_t reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
17728                                     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
17729         val = REG_RD(sc, reg_addr);
17730         val |= AEU_INPUTS_ATTN_BITS_SPIO5;
17731         REG_WR(sc, reg_addr, val);
17732     }
17733 
17734     return (0);
17735 }
17736 
17737 static uint32_t
17738 bxe_flr_clnup_reg_poll(struct bxe_softc *sc,
17739                        uint32_t         reg,
17740                        uint32_t         expected,
17741                        uint32_t         poll_count)
17742 {
17743     uint32_t cur_cnt = poll_count;
17744     uint32_t val;
17745 
17746     while ((val = REG_RD(sc, reg)) != expected && cur_cnt--) {
17747         DELAY(FLR_WAIT_INTERVAL);
17748     }
17749 
17750     return (val);
17751 }
17752 
17753 static int
17754 bxe_flr_clnup_poll_hw_counter(struct bxe_softc *sc,
17755                               uint32_t         reg,
17756                               char             *msg,
17757                               uint32_t         poll_cnt)
17758 {
17759     uint32_t val = bxe_flr_clnup_reg_poll(sc, reg, 0, poll_cnt);
17760 
17761     if (val != 0) {
17762         BLOGE(sc, "%s usage count=%d\n", msg, val);
17763         return (1);
17764     }
17765 
17766     return (0);
17767 }
17768 
17769 /* Common routines with VF FLR cleanup */
17770 static uint32_t
17771 bxe_flr_clnup_poll_count(struct bxe_softc *sc)
17772 {
17773     /* adjust polling timeout */
17774     if (CHIP_REV_IS_EMUL(sc)) {
17775         return (FLR_POLL_CNT * 2000);
17776     }
17777 
17778     if (CHIP_REV_IS_FPGA(sc)) {
17779         return (FLR_POLL_CNT * 120);
17780     }
17781 
17782     return (FLR_POLL_CNT);
17783 }
17784 
17785 static int
17786 bxe_poll_hw_usage_counters(struct bxe_softc *sc,
17787                            uint32_t         poll_cnt)
17788 {
17789     /* wait for CFC PF usage-counter to zero (includes all the VFs) */
17790     if (bxe_flr_clnup_poll_hw_counter(sc,
17791                                       CFC_REG_NUM_LCIDS_INSIDE_PF,
17792                                       "CFC PF usage counter timed out",
17793                                       poll_cnt)) {
17794         return (1);
17795     }
17796 
17797     /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
17798     if (bxe_flr_clnup_poll_hw_counter(sc,
17799                                       DORQ_REG_PF_USAGE_CNT,
17800                                       "DQ PF usage counter timed out",
17801                                       poll_cnt)) {
17802         return (1);
17803     }
17804 
17805     /* Wait for QM PF usage-counter to zero (until DQ cleanup) */
17806     if (bxe_flr_clnup_poll_hw_counter(sc,
17807                                       QM_REG_PF_USG_CNT_0 + 4*SC_FUNC(sc),
17808                                       "QM PF usage counter timed out",
17809                                       poll_cnt)) {
17810         return (1);
17811     }
17812 
17813     /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
17814     if (bxe_flr_clnup_poll_hw_counter(sc,
17815                                       TM_REG_LIN0_VNIC_UC + 4*SC_PORT(sc),
17816                                       "Timers VNIC usage counter timed out",
17817                                       poll_cnt)) {
17818         return (1);
17819     }
17820 
17821     if (bxe_flr_clnup_poll_hw_counter(sc,
17822                                       TM_REG_LIN0_NUM_SCANS + 4*SC_PORT(sc),
17823                                       "Timers NUM_SCANS usage counter timed out",
17824                                       poll_cnt)) {
17825         return (1);
17826     }
17827 
17828     /* Wait DMAE PF usage counter to zero */
17829     if (bxe_flr_clnup_poll_hw_counter(sc,
17830                                       dmae_reg_go_c[INIT_DMAE_C(sc)],
17831                                       "DMAE dommand register timed out",
17832                                       poll_cnt)) {
17833         return (1);
17834     }
17835 
17836     return (0);
17837 }
17838 
17839 #define OP_GEN_PARAM(param)                                            \
17840     (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
17841 #define OP_GEN_TYPE(type)                                           \
17842     (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
17843 #define OP_GEN_AGG_VECT(index)                                             \
17844     (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
17845 
17846 static int
17847 bxe_send_final_clnup(struct bxe_softc *sc,
17848                      uint8_t          clnup_func,
17849                      uint32_t         poll_cnt)
17850 {
17851     uint32_t op_gen_command = 0;
17852     uint32_t comp_addr = (BAR_CSTRORM_INTMEM +
17853                           CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func));
17854     int ret = 0;
17855 
17856     if (REG_RD(sc, comp_addr)) {
17857         BLOGE(sc, "Cleanup complete was not 0 before sending\n");
17858         return (1);
17859     }
17860 
17861     op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
17862     op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
17863     op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
17864     op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
17865 
17866     BLOGD(sc, DBG_LOAD, "sending FW Final cleanup\n");
17867     REG_WR(sc, XSDM_REG_OPERATION_GEN, op_gen_command);
17868 
17869     if (bxe_flr_clnup_reg_poll(sc, comp_addr, 1, poll_cnt) != 1) {
17870         BLOGE(sc, "FW final cleanup did not succeed\n");
17871         BLOGD(sc, DBG_LOAD, "At timeout completion address contained %x\n",
17872               (REG_RD(sc, comp_addr)));
17873         bxe_panic(sc, ("FLR cleanup failed\n"));
17874         return (1);
17875     }
17876 
17877     /* Zero completion for nxt FLR */
17878     REG_WR(sc, comp_addr, 0);
17879 
17880     return (ret);
17881 }
17882 
17883 static void
17884 bxe_pbf_pN_buf_flushed(struct bxe_softc       *sc,
17885                        struct pbf_pN_buf_regs *regs,
17886                        uint32_t               poll_count)
17887 {
17888     uint32_t init_crd, crd, crd_start, crd_freed, crd_freed_start;
17889     uint32_t cur_cnt = poll_count;
17890 
17891     crd_freed = crd_freed_start = REG_RD(sc, regs->crd_freed);
17892     crd = crd_start = REG_RD(sc, regs->crd);
17893     init_crd = REG_RD(sc, regs->init_crd);
17894 
17895     BLOGD(sc, DBG_LOAD, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
17896     BLOGD(sc, DBG_LOAD, "CREDIT[%d]      : s:%x\n", regs->pN, crd);
17897     BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
17898 
17899     while ((crd != init_crd) &&
17900            ((uint32_t)((int32_t)crd_freed - (int32_t)crd_freed_start) <
17901             (init_crd - crd_start))) {
17902         if (cur_cnt--) {
17903             DELAY(FLR_WAIT_INTERVAL);
17904             crd = REG_RD(sc, regs->crd);
17905             crd_freed = REG_RD(sc, regs->crd_freed);
17906         } else {
17907             BLOGD(sc, DBG_LOAD, "PBF tx buffer[%d] timed out\n", regs->pN);
17908             BLOGD(sc, DBG_LOAD, "CREDIT[%d]      : c:%x\n", regs->pN, crd);
17909             BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: c:%x\n", regs->pN, crd_freed);
17910             break;
17911         }
17912     }
17913 
17914     BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF tx buffer[%d]\n",
17915           poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
17916 }
17917 
17918 static void
17919 bxe_pbf_pN_cmd_flushed(struct bxe_softc       *sc,
17920                        struct pbf_pN_cmd_regs *regs,
17921                        uint32_t               poll_count)
17922 {
17923     uint32_t occup, to_free, freed, freed_start;
17924     uint32_t cur_cnt = poll_count;
17925 
17926     occup = to_free = REG_RD(sc, regs->lines_occup);
17927     freed = freed_start = REG_RD(sc, regs->lines_freed);
17928 
17929     BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d]   : s:%x\n", regs->pN, occup);
17930     BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
17931 
17932     while (occup &&
17933            ((uint32_t)((int32_t)freed - (int32_t)freed_start) < to_free)) {
17934         if (cur_cnt--) {
17935             DELAY(FLR_WAIT_INTERVAL);
17936             occup = REG_RD(sc, regs->lines_occup);
17937             freed = REG_RD(sc, regs->lines_freed);
17938         } else {
17939             BLOGD(sc, DBG_LOAD, "PBF cmd queue[%d] timed out\n", regs->pN);
17940             BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d]   : s:%x\n", regs->pN, occup);
17941             BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
17942             break;
17943         }
17944     }
17945 
17946     BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF cmd queue[%d]\n",
17947           poll_count - cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
17948 }
17949 
17950 static void
17951 bxe_tx_hw_flushed(struct bxe_softc *sc, uint32_t poll_count)
17952 {
17953     struct pbf_pN_cmd_regs cmd_regs[] = {
17954         {0, (CHIP_IS_E3B0(sc)) ?
17955             PBF_REG_TQ_OCCUPANCY_Q0 :
17956             PBF_REG_P0_TQ_OCCUPANCY,
17957             (CHIP_IS_E3B0(sc)) ?
17958             PBF_REG_TQ_LINES_FREED_CNT_Q0 :
17959             PBF_REG_P0_TQ_LINES_FREED_CNT},
17960         {1, (CHIP_IS_E3B0(sc)) ?
17961             PBF_REG_TQ_OCCUPANCY_Q1 :
17962             PBF_REG_P1_TQ_OCCUPANCY,
17963             (CHIP_IS_E3B0(sc)) ?
17964             PBF_REG_TQ_LINES_FREED_CNT_Q1 :
17965             PBF_REG_P1_TQ_LINES_FREED_CNT},
17966         {4, (CHIP_IS_E3B0(sc)) ?
17967             PBF_REG_TQ_OCCUPANCY_LB_Q :
17968             PBF_REG_P4_TQ_OCCUPANCY,
17969             (CHIP_IS_E3B0(sc)) ?
17970             PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
17971             PBF_REG_P4_TQ_LINES_FREED_CNT}
17972     };
17973 
17974     struct pbf_pN_buf_regs buf_regs[] = {
17975         {0, (CHIP_IS_E3B0(sc)) ?
17976             PBF_REG_INIT_CRD_Q0 :
17977             PBF_REG_P0_INIT_CRD ,
17978             (CHIP_IS_E3B0(sc)) ?
17979             PBF_REG_CREDIT_Q0 :
17980             PBF_REG_P0_CREDIT,
17981             (CHIP_IS_E3B0(sc)) ?
17982             PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
17983             PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
17984         {1, (CHIP_IS_E3B0(sc)) ?
17985             PBF_REG_INIT_CRD_Q1 :
17986             PBF_REG_P1_INIT_CRD,
17987             (CHIP_IS_E3B0(sc)) ?
17988             PBF_REG_CREDIT_Q1 :
17989             PBF_REG_P1_CREDIT,
17990             (CHIP_IS_E3B0(sc)) ?
17991             PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
17992             PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
17993         {4, (CHIP_IS_E3B0(sc)) ?
17994             PBF_REG_INIT_CRD_LB_Q :
17995             PBF_REG_P4_INIT_CRD,
17996             (CHIP_IS_E3B0(sc)) ?
17997             PBF_REG_CREDIT_LB_Q :
17998             PBF_REG_P4_CREDIT,
17999             (CHIP_IS_E3B0(sc)) ?
18000             PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
18001             PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
18002     };
18003 
18004     int i;
18005 
18006     /* Verify the command queues are flushed P0, P1, P4 */
18007     for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) {
18008         bxe_pbf_pN_cmd_flushed(sc, &cmd_regs[i], poll_count);
18009     }
18010 
18011     /* Verify the transmission buffers are flushed P0, P1, P4 */
18012     for (i = 0; i < ARRAY_SIZE(buf_regs); i++) {
18013         bxe_pbf_pN_buf_flushed(sc, &buf_regs[i], poll_count);
18014     }
18015 }
18016 
18017 static void
18018 bxe_hw_enable_status(struct bxe_softc *sc)
18019 {
18020     uint32_t val;
18021 
18022     val = REG_RD(sc, CFC_REG_WEAK_ENABLE_PF);
18023     BLOGD(sc, DBG_LOAD, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
18024 
18025     val = REG_RD(sc, PBF_REG_DISABLE_PF);
18026     BLOGD(sc, DBG_LOAD, "PBF_REG_DISABLE_PF is 0x%x\n", val);
18027 
18028     val = REG_RD(sc, IGU_REG_PCI_PF_MSI_EN);
18029     BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
18030 
18031     val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_EN);
18032     BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
18033 
18034     val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
18035     BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
18036 
18037     val = REG_RD(sc, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
18038     BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
18039 
18040     val = REG_RD(sc, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
18041     BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
18042 
18043     val = REG_RD(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
18044     BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", val);
18045 }
18046 
18047 static int
18048 bxe_pf_flr_clnup(struct bxe_softc *sc)
18049 {
18050     uint32_t poll_cnt = bxe_flr_clnup_poll_count(sc);
18051 
18052     BLOGD(sc, DBG_LOAD, "Cleanup after FLR PF[%d]\n", SC_ABS_FUNC(sc));
18053 
18054     /* Re-enable PF target read access */
18055     REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
18056 
18057     /* Poll HW usage counters */
18058     BLOGD(sc, DBG_LOAD, "Polling usage counters\n");
18059     if (bxe_poll_hw_usage_counters(sc, poll_cnt)) {
18060         return (-1);
18061     }
18062 
18063     /* Zero the igu 'trailing edge' and 'leading edge' */
18064 
18065     /* Send the FW cleanup command */
18066     if (bxe_send_final_clnup(sc, (uint8_t)SC_FUNC(sc), poll_cnt)) {
18067         return (-1);
18068     }
18069 
18070     /* ATC cleanup */
18071 
18072     /* Verify TX hw is flushed */
18073     bxe_tx_hw_flushed(sc, poll_cnt);
18074 
18075     /* Wait 100ms (not adjusted according to platform) */
18076     DELAY(100000);
18077 
18078     /* Verify no pending pci transactions */
18079     if (bxe_is_pcie_pending(sc)) {
18080         BLOGE(sc, "PCIE Transactions still pending\n");
18081     }
18082 
18083     /* Debug */
18084     bxe_hw_enable_status(sc);
18085 
18086     /*
18087      * Master enable - Due to WB DMAE writes performed before this
18088      * register is re-initialized as part of the regular function init
18089      */
18090     REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
18091 
18092     return (0);
18093 }
18094 
18095 static int
18096 bxe_init_hw_func(struct bxe_softc *sc)
18097 {
18098     int port = SC_PORT(sc);
18099     int func = SC_FUNC(sc);
18100     int init_phase = PHASE_PF0 + func;
18101     struct ecore_ilt *ilt = sc->ilt;
18102     uint16_t cdu_ilt_start;
18103     uint32_t addr, val;
18104     uint32_t main_mem_base, main_mem_size, main_mem_prty_clr;
18105     int i, main_mem_width, rc;
18106 
18107     BLOGD(sc, DBG_LOAD, "starting func init for func %d\n", func);
18108 
18109     /* FLR cleanup */
18110     if (!CHIP_IS_E1x(sc)) {
18111         rc = bxe_pf_flr_clnup(sc);
18112         if (rc) {
18113             BLOGE(sc, "FLR cleanup failed!\n");
18114             // XXX bxe_fw_dump(sc);
18115             // XXX bxe_idle_chk(sc);
18116             return (rc);
18117         }
18118     }
18119 
18120     /* set MSI reconfigure capability */
18121     if (sc->devinfo.int_block == INT_BLOCK_HC) {
18122         addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
18123         val = REG_RD(sc, addr);
18124         val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
18125         REG_WR(sc, addr, val);
18126     }
18127 
18128     ecore_init_block(sc, BLOCK_PXP, init_phase);
18129     ecore_init_block(sc, BLOCK_PXP2, init_phase);
18130 
18131     ilt = sc->ilt;
18132     cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
18133 
18134     for (i = 0; i < L2_ILT_LINES(sc); i++) {
18135         ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt;
18136         ilt->lines[cdu_ilt_start + i].page_mapping =
18137             sc->context[i].vcxt_dma.paddr;
18138         ilt->lines[cdu_ilt_start + i].size = sc->context[i].size;
18139     }
18140     ecore_ilt_init_op(sc, INITOP_SET);
18141 
18142     /* Set NIC mode */
18143     REG_WR(sc, PRS_REG_NIC_MODE, 1);
18144     BLOGD(sc, DBG_LOAD, "NIC MODE configured\n");
18145 
18146     if (!CHIP_IS_E1x(sc)) {
18147         uint32_t pf_conf = IGU_PF_CONF_FUNC_EN;
18148 
18149         /* Turn on a single ISR mode in IGU if driver is going to use
18150          * INT#x or MSI
18151          */
18152         if (sc->interrupt_mode != INTR_MODE_MSIX) {
18153             pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
18154         }
18155 
18156         /*
18157          * Timers workaround bug: function init part.
18158          * Need to wait 20msec after initializing ILT,
18159          * needed to make sure there are no requests in
18160          * one of the PXP internal queues with "old" ILT addresses
18161          */
18162         DELAY(20000);
18163 
18164         /*
18165          * Master enable - Due to WB DMAE writes performed before this
18166          * register is re-initialized as part of the regular function
18167          * init
18168          */
18169         REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
18170         /* Enable the function in IGU */
18171         REG_WR(sc, IGU_REG_PF_CONFIGURATION, pf_conf);
18172     }
18173 
18174     sc->dmae_ready = 1;
18175 
18176     ecore_init_block(sc, BLOCK_PGLUE_B, init_phase);
18177 
18178     if (!CHIP_IS_E1x(sc))
18179         REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
18180 
18181     ecore_init_block(sc, BLOCK_ATC, init_phase);
18182     ecore_init_block(sc, BLOCK_DMAE, init_phase);
18183     ecore_init_block(sc, BLOCK_NIG, init_phase);
18184     ecore_init_block(sc, BLOCK_SRC, init_phase);
18185     ecore_init_block(sc, BLOCK_MISC, init_phase);
18186     ecore_init_block(sc, BLOCK_TCM, init_phase);
18187     ecore_init_block(sc, BLOCK_UCM, init_phase);
18188     ecore_init_block(sc, BLOCK_CCM, init_phase);
18189     ecore_init_block(sc, BLOCK_XCM, init_phase);
18190     ecore_init_block(sc, BLOCK_TSEM, init_phase);
18191     ecore_init_block(sc, BLOCK_USEM, init_phase);
18192     ecore_init_block(sc, BLOCK_CSEM, init_phase);
18193     ecore_init_block(sc, BLOCK_XSEM, init_phase);
18194 
18195     if (!CHIP_IS_E1x(sc))
18196         REG_WR(sc, QM_REG_PF_EN, 1);
18197 
18198     if (!CHIP_IS_E1x(sc)) {
18199         REG_WR(sc, TSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
18200         REG_WR(sc, USEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
18201         REG_WR(sc, CSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
18202         REG_WR(sc, XSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
18203     }
18204     ecore_init_block(sc, BLOCK_QM, init_phase);
18205 
18206     ecore_init_block(sc, BLOCK_TM, init_phase);
18207     ecore_init_block(sc, BLOCK_DORQ, init_phase);
18208 
18209     bxe_iov_init_dq(sc);
18210 
18211     ecore_init_block(sc, BLOCK_BRB1, init_phase);
18212     ecore_init_block(sc, BLOCK_PRS, init_phase);
18213     ecore_init_block(sc, BLOCK_TSDM, init_phase);
18214     ecore_init_block(sc, BLOCK_CSDM, init_phase);
18215     ecore_init_block(sc, BLOCK_USDM, init_phase);
18216     ecore_init_block(sc, BLOCK_XSDM, init_phase);
18217     ecore_init_block(sc, BLOCK_UPB, init_phase);
18218     ecore_init_block(sc, BLOCK_XPB, init_phase);
18219     ecore_init_block(sc, BLOCK_PBF, init_phase);
18220     if (!CHIP_IS_E1x(sc))
18221         REG_WR(sc, PBF_REG_DISABLE_PF, 0);
18222 
18223     ecore_init_block(sc, BLOCK_CDU, init_phase);
18224 
18225     ecore_init_block(sc, BLOCK_CFC, init_phase);
18226 
18227     if (!CHIP_IS_E1x(sc))
18228         REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 1);
18229 
18230     if (IS_MF(sc)) {
18231         REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1);
18232         REG_WR(sc, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, OVLAN(sc));
18233     }
18234 
18235     ecore_init_block(sc, BLOCK_MISC_AEU, init_phase);
18236 
18237     /* HC init per function */
18238     if (sc->devinfo.int_block == INT_BLOCK_HC) {
18239         if (CHIP_IS_E1H(sc)) {
18240             REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
18241 
18242             REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
18243             REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
18244         }
18245         ecore_init_block(sc, BLOCK_HC, init_phase);
18246 
18247     } else {
18248         int num_segs, sb_idx, prod_offset;
18249 
18250         REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
18251 
18252         if (!CHIP_IS_E1x(sc)) {
18253             REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0);
18254             REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0);
18255         }
18256 
18257         ecore_init_block(sc, BLOCK_IGU, init_phase);
18258 
18259         if (!CHIP_IS_E1x(sc)) {
18260             int dsb_idx = 0;
18261             /**
18262              * Producer memory:
18263              * E2 mode: address 0-135 match to the mapping memory;
18264              * 136 - PF0 default prod; 137 - PF1 default prod;
18265              * 138 - PF2 default prod; 139 - PF3 default prod;
18266              * 140 - PF0 attn prod;    141 - PF1 attn prod;
18267              * 142 - PF2 attn prod;    143 - PF3 attn prod;
18268              * 144-147 reserved.
18269              *
18270              * E1.5 mode - In backward compatible mode;
18271              * for non default SB; each even line in the memory
18272              * holds the U producer and each odd line hold
18273              * the C producer. The first 128 producers are for
18274              * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
18275              * producers are for the DSB for each PF.
18276              * Each PF has five segments: (the order inside each
18277              * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
18278              * 132-135 C prods; 136-139 X prods; 140-143 T prods;
18279              * 144-147 attn prods;
18280              */
18281             /* non-default-status-blocks */
18282             num_segs = CHIP_INT_MODE_IS_BC(sc) ?
18283                 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
18284             for (sb_idx = 0; sb_idx < sc->igu_sb_cnt; sb_idx++) {
18285                 prod_offset = (sc->igu_base_sb + sb_idx) *
18286                     num_segs;
18287 
18288                 for (i = 0; i < num_segs; i++) {
18289                     addr = IGU_REG_PROD_CONS_MEMORY +
18290                             (prod_offset + i) * 4;
18291                     REG_WR(sc, addr, 0);
18292                 }
18293                 /* send consumer update with value 0 */
18294                 bxe_ack_sb(sc, sc->igu_base_sb + sb_idx,
18295                            USTORM_ID, 0, IGU_INT_NOP, 1);
18296                 bxe_igu_clear_sb(sc, sc->igu_base_sb + sb_idx);
18297             }
18298 
18299             /* default-status-blocks */
18300             num_segs = CHIP_INT_MODE_IS_BC(sc) ?
18301                 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
18302 
18303             if (CHIP_IS_MODE_4_PORT(sc))
18304                 dsb_idx = SC_FUNC(sc);
18305             else
18306                 dsb_idx = SC_VN(sc);
18307 
18308             prod_offset = (CHIP_INT_MODE_IS_BC(sc) ?
18309                        IGU_BC_BASE_DSB_PROD + dsb_idx :
18310                        IGU_NORM_BASE_DSB_PROD + dsb_idx);
18311 
18312             /*
18313              * igu prods come in chunks of E1HVN_MAX (4) -
18314              * does not matters what is the current chip mode
18315              */
18316             for (i = 0; i < (num_segs * E1HVN_MAX);
18317                  i += E1HVN_MAX) {
18318                 addr = IGU_REG_PROD_CONS_MEMORY +
18319                             (prod_offset + i)*4;
18320                 REG_WR(sc, addr, 0);
18321             }
18322             /* send consumer update with 0 */
18323             if (CHIP_INT_MODE_IS_BC(sc)) {
18324                 bxe_ack_sb(sc, sc->igu_dsb_id,
18325                            USTORM_ID, 0, IGU_INT_NOP, 1);
18326                 bxe_ack_sb(sc, sc->igu_dsb_id,
18327                            CSTORM_ID, 0, IGU_INT_NOP, 1);
18328                 bxe_ack_sb(sc, sc->igu_dsb_id,
18329                            XSTORM_ID, 0, IGU_INT_NOP, 1);
18330                 bxe_ack_sb(sc, sc->igu_dsb_id,
18331                            TSTORM_ID, 0, IGU_INT_NOP, 1);
18332                 bxe_ack_sb(sc, sc->igu_dsb_id,
18333                            ATTENTION_ID, 0, IGU_INT_NOP, 1);
18334             } else {
18335                 bxe_ack_sb(sc, sc->igu_dsb_id,
18336                            USTORM_ID, 0, IGU_INT_NOP, 1);
18337                 bxe_ack_sb(sc, sc->igu_dsb_id,
18338                            ATTENTION_ID, 0, IGU_INT_NOP, 1);
18339             }
18340             bxe_igu_clear_sb(sc, sc->igu_dsb_id);
18341 
18342             /* !!! these should become driver const once
18343                rf-tool supports split-68 const */
18344             REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
18345             REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
18346             REG_WR(sc, IGU_REG_SB_MASK_LSB, 0);
18347             REG_WR(sc, IGU_REG_SB_MASK_MSB, 0);
18348             REG_WR(sc, IGU_REG_PBA_STATUS_LSB, 0);
18349             REG_WR(sc, IGU_REG_PBA_STATUS_MSB, 0);
18350         }
18351     }
18352 
18353     /* Reset PCIE errors for debug */
18354     REG_WR(sc, 0x2114, 0xffffffff);
18355     REG_WR(sc, 0x2120, 0xffffffff);
18356 
18357     if (CHIP_IS_E1x(sc)) {
18358         main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
18359         main_mem_base = HC_REG_MAIN_MEMORY +
18360                 SC_PORT(sc) * (main_mem_size * 4);
18361         main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
18362         main_mem_width = 8;
18363 
18364         val = REG_RD(sc, main_mem_prty_clr);
18365         if (val) {
18366             BLOGD(sc, DBG_LOAD,
18367                   "Parity errors in HC block during function init (0x%x)!\n",
18368                   val);
18369         }
18370 
18371         /* Clear "false" parity errors in MSI-X table */
18372         for (i = main_mem_base;
18373              i < main_mem_base + main_mem_size * 4;
18374              i += main_mem_width) {
18375             bxe_read_dmae(sc, i, main_mem_width / 4);
18376             bxe_write_dmae(sc, BXE_SP_MAPPING(sc, wb_data),
18377                            i, main_mem_width / 4);
18378         }
18379         /* Clear HC parity attention */
18380         REG_RD(sc, main_mem_prty_clr);
18381     }
18382 
18383 #if 1
18384     /* Enable STORMs SP logging */
18385     REG_WR8(sc, BAR_USTRORM_INTMEM +
18386            USTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18387     REG_WR8(sc, BAR_TSTRORM_INTMEM +
18388            TSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18389     REG_WR8(sc, BAR_CSTRORM_INTMEM +
18390            CSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18391     REG_WR8(sc, BAR_XSTRORM_INTMEM +
18392            XSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18393 #endif
18394 
18395     elink_phy_probe(&sc->link_params);
18396 
18397     return (0);
18398 }
18399 
18400 static void
18401 bxe_link_reset(struct bxe_softc *sc)
18402 {
18403     if (!BXE_NOMCP(sc)) {
18404 	bxe_acquire_phy_lock(sc);
18405         elink_lfa_reset(&sc->link_params, &sc->link_vars);
18406 	bxe_release_phy_lock(sc);
18407     } else {
18408         if (!CHIP_REV_IS_SLOW(sc)) {
18409             BLOGW(sc, "Bootcode is missing - cannot reset link\n");
18410         }
18411     }
18412 }
18413 
18414 static void
18415 bxe_reset_port(struct bxe_softc *sc)
18416 {
18417     int port = SC_PORT(sc);
18418     uint32_t val;
18419 
18420 	ELINK_DEBUG_P0(sc, "bxe_reset_port called\n");
18421     /* reset physical Link */
18422     bxe_link_reset(sc);
18423 
18424     REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
18425 
18426     /* Do not rcv packets to BRB */
18427     REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
18428     /* Do not direct rcv packets that are not for MCP to the BRB */
18429     REG_WR(sc, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
18430                NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
18431 
18432     /* Configure AEU */
18433     REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
18434 
18435     DELAY(100000);
18436 
18437     /* Check for BRB port occupancy */
18438     val = REG_RD(sc, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
18439     if (val) {
18440         BLOGD(sc, DBG_LOAD,
18441               "BRB1 is not empty, %d blocks are occupied\n", val);
18442     }
18443 
18444     /* TODO: Close Doorbell port? */
18445 }
18446 
18447 static void
18448 bxe_ilt_wr(struct bxe_softc *sc,
18449            uint32_t         index,
18450            bus_addr_t       addr)
18451 {
18452     int reg;
18453     uint32_t wb_write[2];
18454 
18455     if (CHIP_IS_E1(sc)) {
18456         reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
18457     } else {
18458         reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
18459     }
18460 
18461     wb_write[0] = ONCHIP_ADDR1(addr);
18462     wb_write[1] = ONCHIP_ADDR2(addr);
18463     REG_WR_DMAE(sc, reg, wb_write, 2);
18464 }
18465 
18466 static void
18467 bxe_clear_func_ilt(struct bxe_softc *sc,
18468                    uint32_t         func)
18469 {
18470     uint32_t i, base = FUNC_ILT_BASE(func);
18471     for (i = base; i < base + ILT_PER_FUNC; i++) {
18472         bxe_ilt_wr(sc, i, 0);
18473     }
18474 }
18475 
18476 static void
18477 bxe_reset_func(struct bxe_softc *sc)
18478 {
18479     struct bxe_fastpath *fp;
18480     int port = SC_PORT(sc);
18481     int func = SC_FUNC(sc);
18482     int i;
18483 
18484     /* Disable the function in the FW */
18485     REG_WR8(sc, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
18486     REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
18487     REG_WR8(sc, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
18488     REG_WR8(sc, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
18489 
18490     /* FP SBs */
18491     FOR_EACH_ETH_QUEUE(sc, i) {
18492         fp = &sc->fp[i];
18493         REG_WR8(sc, BAR_CSTRORM_INTMEM +
18494                 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
18495                 SB_DISABLED);
18496     }
18497 
18498     /* SP SB */
18499     REG_WR8(sc, BAR_CSTRORM_INTMEM +
18500             CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
18501             SB_DISABLED);
18502 
18503     for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) {
18504         REG_WR(sc, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 0);
18505     }
18506 
18507     /* Configure IGU */
18508     if (sc->devinfo.int_block == INT_BLOCK_HC) {
18509         REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
18510         REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
18511     } else {
18512         REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0);
18513         REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0);
18514     }
18515 
18516     if (CNIC_LOADED(sc)) {
18517         /* Disable Timer scan */
18518         REG_WR(sc, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
18519         /*
18520          * Wait for at least 10ms and up to 2 second for the timers
18521          * scan to complete
18522          */
18523         for (i = 0; i < 200; i++) {
18524             DELAY(10000);
18525             if (!REG_RD(sc, TM_REG_LIN0_SCAN_ON + port*4))
18526                 break;
18527         }
18528     }
18529 
18530     /* Clear ILT */
18531     bxe_clear_func_ilt(sc, func);
18532 
18533     /*
18534      * Timers workaround bug for E2: if this is vnic-3,
18535      * we need to set the entire ilt range for this timers.
18536      */
18537     if (!CHIP_IS_E1x(sc) && SC_VN(sc) == 3) {
18538         struct ilt_client_info ilt_cli;
18539         /* use dummy TM client */
18540         memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
18541         ilt_cli.start = 0;
18542         ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
18543         ilt_cli.client_num = ILT_CLIENT_TM;
18544 
18545         ecore_ilt_boundry_init_op(sc, &ilt_cli, 0, INITOP_CLEAR);
18546     }
18547 
18548     /* this assumes that reset_port() called before reset_func()*/
18549     if (!CHIP_IS_E1x(sc)) {
18550         bxe_pf_disable(sc);
18551     }
18552 
18553     sc->dmae_ready = 0;
18554 }
18555 
18556 static int
18557 bxe_gunzip_init(struct bxe_softc *sc)
18558 {
18559     return (0);
18560 }
18561 
18562 static void
18563 bxe_gunzip_end(struct bxe_softc *sc)
18564 {
18565     return;
18566 }
18567 
18568 static int
18569 bxe_init_firmware(struct bxe_softc *sc)
18570 {
18571     if (CHIP_IS_E1(sc)) {
18572         ecore_init_e1_firmware(sc);
18573         sc->iro_array = e1_iro_arr;
18574     } else if (CHIP_IS_E1H(sc)) {
18575         ecore_init_e1h_firmware(sc);
18576         sc->iro_array = e1h_iro_arr;
18577     } else if (!CHIP_IS_E1x(sc)) {
18578         ecore_init_e2_firmware(sc);
18579         sc->iro_array = e2_iro_arr;
18580     } else {
18581         BLOGE(sc, "Unsupported chip revision\n");
18582         return (-1);
18583     }
18584 
18585     return (0);
18586 }
18587 
18588 static void
18589 bxe_release_firmware(struct bxe_softc *sc)
18590 {
18591     /* Do nothing */
18592     return;
18593 }
18594 
18595 static int
18596 ecore_gunzip(struct bxe_softc *sc,
18597              const uint8_t    *zbuf,
18598              int              len)
18599 {
18600     /* XXX : Implement... */
18601     BLOGD(sc, DBG_LOAD, "ECORE_GUNZIP NOT IMPLEMENTED\n");
18602     return (FALSE);
18603 }
18604 
18605 static void
18606 ecore_reg_wr_ind(struct bxe_softc *sc,
18607                  uint32_t         addr,
18608                  uint32_t         val)
18609 {
18610     bxe_reg_wr_ind(sc, addr, val);
18611 }
18612 
18613 static void
18614 ecore_write_dmae_phys_len(struct bxe_softc *sc,
18615                           bus_addr_t       phys_addr,
18616                           uint32_t         addr,
18617                           uint32_t         len)
18618 {
18619     bxe_write_dmae_phys_len(sc, phys_addr, addr, len);
18620 }
18621 
18622 void
18623 ecore_storm_memset_struct(struct bxe_softc *sc,
18624                           uint32_t         addr,
18625                           size_t           size,
18626                           uint32_t         *data)
18627 {
18628     uint8_t i;
18629     for (i = 0; i < size/4; i++) {
18630         REG_WR(sc, addr + (i * 4), data[i]);
18631     }
18632 }
18633 
18634 
18635 /*
18636  * character device - ioctl interface definitions
18637  */
18638 
18639 
18640 #include "bxe_dump.h"
18641 #include "bxe_ioctl.h"
18642 #include <sys/conf.h>
18643 
18644 static int bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
18645                 struct thread *td);
18646 
18647 static struct cdevsw bxe_cdevsw = {
18648     .d_version = D_VERSION,
18649     .d_ioctl = bxe_eioctl,
18650     .d_name = "bxecnic",
18651 };
18652 
18653 #define BXE_PATH(sc)    (CHIP_IS_E1x(sc) ? 0 : (sc->pcie_func & 1))
18654 
18655 
18656 #define DUMP_ALL_PRESETS        0x1FFF
18657 #define DUMP_MAX_PRESETS        13
18658 #define IS_E1_REG(chips)        ((chips & DUMP_CHIP_E1) == DUMP_CHIP_E1)
18659 #define IS_E1H_REG(chips)       ((chips & DUMP_CHIP_E1H) == DUMP_CHIP_E1H)
18660 #define IS_E2_REG(chips)        ((chips & DUMP_CHIP_E2) == DUMP_CHIP_E2)
18661 #define IS_E3A0_REG(chips)      ((chips & DUMP_CHIP_E3A0) == DUMP_CHIP_E3A0)
18662 #define IS_E3B0_REG(chips)      ((chips & DUMP_CHIP_E3B0) == DUMP_CHIP_E3B0)
18663 
18664 #define IS_REG_IN_PRESET(presets, idx)  \
18665                 ((presets & (1 << (idx-1))) == (1 << (idx-1)))
18666 
18667 
18668 static int
18669 bxe_get_preset_regs_len(struct bxe_softc *sc, uint32_t preset)
18670 {
18671     if (CHIP_IS_E1(sc))
18672         return dump_num_registers[0][preset-1];
18673     else if (CHIP_IS_E1H(sc))
18674         return dump_num_registers[1][preset-1];
18675     else if (CHIP_IS_E2(sc))
18676         return dump_num_registers[2][preset-1];
18677     else if (CHIP_IS_E3A0(sc))
18678         return dump_num_registers[3][preset-1];
18679     else if (CHIP_IS_E3B0(sc))
18680         return dump_num_registers[4][preset-1];
18681     else
18682         return 0;
18683 }
18684 
18685 static int
18686 bxe_get_total_regs_len32(struct bxe_softc *sc)
18687 {
18688     uint32_t preset_idx;
18689     int regdump_len32 = 0;
18690 
18691 
18692     /* Calculate the total preset regs length */
18693     for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
18694         regdump_len32 += bxe_get_preset_regs_len(sc, preset_idx);
18695     }
18696 
18697     return regdump_len32;
18698 }
18699 
18700 static const uint32_t *
18701 __bxe_get_page_addr_ar(struct bxe_softc *sc)
18702 {
18703     if (CHIP_IS_E2(sc))
18704         return page_vals_e2;
18705     else if (CHIP_IS_E3(sc))
18706         return page_vals_e3;
18707     else
18708         return NULL;
18709 }
18710 
18711 static uint32_t
18712 __bxe_get_page_reg_num(struct bxe_softc *sc)
18713 {
18714     if (CHIP_IS_E2(sc))
18715         return PAGE_MODE_VALUES_E2;
18716     else if (CHIP_IS_E3(sc))
18717         return PAGE_MODE_VALUES_E3;
18718     else
18719         return 0;
18720 }
18721 
18722 static const uint32_t *
18723 __bxe_get_page_write_ar(struct bxe_softc *sc)
18724 {
18725     if (CHIP_IS_E2(sc))
18726         return page_write_regs_e2;
18727     else if (CHIP_IS_E3(sc))
18728         return page_write_regs_e3;
18729     else
18730         return NULL;
18731 }
18732 
18733 static uint32_t
18734 __bxe_get_page_write_num(struct bxe_softc *sc)
18735 {
18736     if (CHIP_IS_E2(sc))
18737         return PAGE_WRITE_REGS_E2;
18738     else if (CHIP_IS_E3(sc))
18739         return PAGE_WRITE_REGS_E3;
18740     else
18741         return 0;
18742 }
18743 
18744 static const struct reg_addr *
18745 __bxe_get_page_read_ar(struct bxe_softc *sc)
18746 {
18747     if (CHIP_IS_E2(sc))
18748         return page_read_regs_e2;
18749     else if (CHIP_IS_E3(sc))
18750         return page_read_regs_e3;
18751     else
18752         return NULL;
18753 }
18754 
18755 static uint32_t
18756 __bxe_get_page_read_num(struct bxe_softc *sc)
18757 {
18758     if (CHIP_IS_E2(sc))
18759         return PAGE_READ_REGS_E2;
18760     else if (CHIP_IS_E3(sc))
18761         return PAGE_READ_REGS_E3;
18762     else
18763         return 0;
18764 }
18765 
18766 static bool
18767 bxe_is_reg_in_chip(struct bxe_softc *sc, const struct reg_addr *reg_info)
18768 {
18769     if (CHIP_IS_E1(sc))
18770         return IS_E1_REG(reg_info->chips);
18771     else if (CHIP_IS_E1H(sc))
18772         return IS_E1H_REG(reg_info->chips);
18773     else if (CHIP_IS_E2(sc))
18774         return IS_E2_REG(reg_info->chips);
18775     else if (CHIP_IS_E3A0(sc))
18776         return IS_E3A0_REG(reg_info->chips);
18777     else if (CHIP_IS_E3B0(sc))
18778         return IS_E3B0_REG(reg_info->chips);
18779     else
18780         return 0;
18781 }
18782 
18783 static bool
18784 bxe_is_wreg_in_chip(struct bxe_softc *sc, const struct wreg_addr *wreg_info)
18785 {
18786     if (CHIP_IS_E1(sc))
18787         return IS_E1_REG(wreg_info->chips);
18788     else if (CHIP_IS_E1H(sc))
18789         return IS_E1H_REG(wreg_info->chips);
18790     else if (CHIP_IS_E2(sc))
18791         return IS_E2_REG(wreg_info->chips);
18792     else if (CHIP_IS_E3A0(sc))
18793         return IS_E3A0_REG(wreg_info->chips);
18794     else if (CHIP_IS_E3B0(sc))
18795         return IS_E3B0_REG(wreg_info->chips);
18796     else
18797         return 0;
18798 }
18799 
18800 /**
18801  * bxe_read_pages_regs - read "paged" registers
18802  *
18803  * @bp          device handle
18804  * @p           output buffer
18805  *
18806  * Reads "paged" memories: memories that may only be read by first writing to a
18807  * specific address ("write address") and then reading from a specific address
18808  * ("read address"). There may be more than one write address per "page" and
18809  * more than one read address per write address.
18810  */
18811 static void
18812 bxe_read_pages_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset)
18813 {
18814     uint32_t i, j, k, n;
18815 
18816     /* addresses of the paged registers */
18817     const uint32_t *page_addr = __bxe_get_page_addr_ar(sc);
18818     /* number of paged registers */
18819     int num_pages = __bxe_get_page_reg_num(sc);
18820     /* write addresses */
18821     const uint32_t *write_addr = __bxe_get_page_write_ar(sc);
18822     /* number of write addresses */
18823     int write_num = __bxe_get_page_write_num(sc);
18824     /* read addresses info */
18825     const struct reg_addr *read_addr = __bxe_get_page_read_ar(sc);
18826     /* number of read addresses */
18827     int read_num = __bxe_get_page_read_num(sc);
18828     uint32_t addr, size;
18829 
18830     for (i = 0; i < num_pages; i++) {
18831         for (j = 0; j < write_num; j++) {
18832             REG_WR(sc, write_addr[j], page_addr[i]);
18833 
18834             for (k = 0; k < read_num; k++) {
18835                 if (IS_REG_IN_PRESET(read_addr[k].presets, preset)) {
18836                     size = read_addr[k].size;
18837                     for (n = 0; n < size; n++) {
18838                         addr = read_addr[k].addr + n*4;
18839                         *p++ = REG_RD(sc, addr);
18840                     }
18841                 }
18842             }
18843         }
18844     }
18845     return;
18846 }
18847 
18848 
18849 static int
18850 bxe_get_preset_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset)
18851 {
18852     uint32_t i, j, addr;
18853     const struct wreg_addr *wreg_addr_p = NULL;
18854 
18855     if (CHIP_IS_E1(sc))
18856         wreg_addr_p = &wreg_addr_e1;
18857     else if (CHIP_IS_E1H(sc))
18858         wreg_addr_p = &wreg_addr_e1h;
18859     else if (CHIP_IS_E2(sc))
18860         wreg_addr_p = &wreg_addr_e2;
18861     else if (CHIP_IS_E3A0(sc))
18862         wreg_addr_p = &wreg_addr_e3;
18863     else if (CHIP_IS_E3B0(sc))
18864         wreg_addr_p = &wreg_addr_e3b0;
18865     else
18866         return (-1);
18867 
18868     /* Read the idle_chk registers */
18869     for (i = 0; i < IDLE_REGS_COUNT; i++) {
18870         if (bxe_is_reg_in_chip(sc, &idle_reg_addrs[i]) &&
18871             IS_REG_IN_PRESET(idle_reg_addrs[i].presets, preset)) {
18872             for (j = 0; j < idle_reg_addrs[i].size; j++)
18873                 *p++ = REG_RD(sc, idle_reg_addrs[i].addr + j*4);
18874         }
18875     }
18876 
18877     /* Read the regular registers */
18878     for (i = 0; i < REGS_COUNT; i++) {
18879         if (bxe_is_reg_in_chip(sc, &reg_addrs[i]) &&
18880             IS_REG_IN_PRESET(reg_addrs[i].presets, preset)) {
18881             for (j = 0; j < reg_addrs[i].size; j++)
18882                 *p++ = REG_RD(sc, reg_addrs[i].addr + j*4);
18883         }
18884     }
18885 
18886     /* Read the CAM registers */
18887     if (bxe_is_wreg_in_chip(sc, wreg_addr_p) &&
18888         IS_REG_IN_PRESET(wreg_addr_p->presets, preset)) {
18889         for (i = 0; i < wreg_addr_p->size; i++) {
18890             *p++ = REG_RD(sc, wreg_addr_p->addr + i*4);
18891 
18892             /* In case of wreg_addr register, read additional
18893                registers from read_regs array
18894              */
18895             for (j = 0; j < wreg_addr_p->read_regs_count; j++) {
18896                 addr = *(wreg_addr_p->read_regs);
18897                 *p++ = REG_RD(sc, addr + j*4);
18898             }
18899         }
18900     }
18901 
18902     /* Paged registers are supported in E2 & E3 only */
18903     if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) {
18904         /* Read "paged" registers */
18905         bxe_read_pages_regs(sc, p, preset);
18906     }
18907 
18908     return 0;
18909 }
18910 
18911 int
18912 bxe_grc_dump(struct bxe_softc *sc)
18913 {
18914     int rval = 0;
18915     uint32_t preset_idx;
18916     uint8_t *buf;
18917     uint32_t size;
18918     struct  dump_header *d_hdr;
18919     uint32_t i;
18920     uint32_t reg_val;
18921     uint32_t reg_addr;
18922     uint32_t cmd_offset;
18923     struct ecore_ilt *ilt = SC_ILT(sc);
18924     struct bxe_fastpath *fp;
18925     struct ilt_client_info *ilt_cli;
18926     int grc_dump_size;
18927 
18928 
18929     if (sc->grcdump_done || sc->grcdump_started)
18930 	return (rval);
18931 
18932     sc->grcdump_started = 1;
18933     BLOGI(sc, "Started collecting grcdump\n");
18934 
18935     grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
18936                 sizeof(struct  dump_header);
18937 
18938     sc->grc_dump = malloc(grc_dump_size, M_DEVBUF, M_NOWAIT);
18939 
18940     if (sc->grc_dump == NULL) {
18941         BLOGW(sc, "Unable to allocate memory for grcdump collection\n");
18942         return(ENOMEM);
18943     }
18944 
18945 
18946 
18947     /* Disable parity attentions as long as following dump may
18948      * cause false alarms by reading never written registers. We
18949      * will re-enable parity attentions right after the dump.
18950      */
18951 
18952     /* Disable parity on path 0 */
18953     bxe_pretend_func(sc, 0);
18954 
18955     ecore_disable_blocks_parity(sc);
18956 
18957     /* Disable parity on path 1 */
18958     bxe_pretend_func(sc, 1);
18959     ecore_disable_blocks_parity(sc);
18960 
18961     /* Return to current function */
18962     bxe_pretend_func(sc, SC_ABS_FUNC(sc));
18963 
18964     buf = sc->grc_dump;
18965     d_hdr = sc->grc_dump;
18966 
18967     d_hdr->header_size = (sizeof(struct  dump_header) >> 2) - 1;
18968     d_hdr->version = BNX2X_DUMP_VERSION;
18969     d_hdr->preset = DUMP_ALL_PRESETS;
18970 
18971     if (CHIP_IS_E1(sc)) {
18972         d_hdr->dump_meta_data = DUMP_CHIP_E1;
18973     } else if (CHIP_IS_E1H(sc)) {
18974         d_hdr->dump_meta_data = DUMP_CHIP_E1H;
18975     } else if (CHIP_IS_E2(sc)) {
18976         d_hdr->dump_meta_data = DUMP_CHIP_E2 |
18977                 (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18978     } else if (CHIP_IS_E3A0(sc)) {
18979         d_hdr->dump_meta_data = DUMP_CHIP_E3A0 |
18980                 (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18981     } else if (CHIP_IS_E3B0(sc)) {
18982         d_hdr->dump_meta_data = DUMP_CHIP_E3B0 |
18983                 (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18984     }
18985 
18986     buf += sizeof(struct  dump_header);
18987 
18988     for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
18989 
18990         /* Skip presets with IOR */
18991         if ((preset_idx == 2) || (preset_idx == 5) || (preset_idx == 8) ||
18992             (preset_idx == 11))
18993             continue;
18994 
18995         rval = bxe_get_preset_regs(sc, (uint32_t *)buf, preset_idx);
18996 
18997 	if (rval)
18998             break;
18999 
19000         size = bxe_get_preset_regs_len(sc, preset_idx) * (sizeof (uint32_t));
19001 
19002         buf += size;
19003     }
19004 
19005     bxe_pretend_func(sc, 0);
19006     ecore_clear_blocks_parity(sc);
19007     ecore_enable_blocks_parity(sc);
19008 
19009     bxe_pretend_func(sc, 1);
19010     ecore_clear_blocks_parity(sc);
19011     ecore_enable_blocks_parity(sc);
19012 
19013     /* Return to current function */
19014     bxe_pretend_func(sc, SC_ABS_FUNC(sc));
19015 
19016 
19017 
19018     if(sc->state == BXE_STATE_OPEN) {
19019         if(sc->fw_stats_req  != NULL) {
19020     		BLOGI(sc, "fw stats start_paddr %#jx end_paddr %#jx vaddr %p size 0x%x\n",
19021         			(uintmax_t)sc->fw_stats_req_mapping,
19022         			(uintmax_t)sc->fw_stats_data_mapping,
19023         			sc->fw_stats_req, (sc->fw_stats_req_size + sc->fw_stats_data_size));
19024 		}
19025 		if(sc->def_sb != NULL) {
19026 			BLOGI(sc, "def_status_block paddr %p vaddr %p size 0x%zx\n",
19027         			(void *)sc->def_sb_dma.paddr, sc->def_sb,
19028         			sizeof(struct host_sp_status_block));
19029 		}
19030 		if(sc->eq_dma.vaddr != NULL) {
19031     		BLOGI(sc, "event_queue paddr %#jx vaddr %p size 0x%x\n",
19032         			(uintmax_t)sc->eq_dma.paddr, sc->eq_dma.vaddr, BCM_PAGE_SIZE);
19033 		}
19034 		if(sc->sp_dma.vaddr != NULL) {
19035     		BLOGI(sc, "slow path paddr %#jx vaddr %p size 0x%zx\n",
19036         			(uintmax_t)sc->sp_dma.paddr, sc->sp_dma.vaddr,
19037         			sizeof(struct bxe_slowpath));
19038 		}
19039 		if(sc->spq_dma.vaddr != NULL) {
19040     		BLOGI(sc, "slow path queue paddr %#jx vaddr %p size 0x%x\n",
19041         			(uintmax_t)sc->spq_dma.paddr, sc->spq_dma.vaddr, BCM_PAGE_SIZE);
19042 		}
19043 		if(sc->gz_buf_dma.vaddr != NULL) {
19044     		BLOGI(sc, "fw_buf paddr %#jx vaddr %p size 0x%x\n",
19045         			(uintmax_t)sc->gz_buf_dma.paddr, sc->gz_buf_dma.vaddr,
19046         			FW_BUF_SIZE);
19047 		}
19048     	for (i = 0; i < sc->num_queues; i++) {
19049         	fp = &sc->fp[i];
19050 			if(fp->sb_dma.vaddr != NULL && fp->tx_dma.vaddr != NULL &&
19051                         fp->rx_dma.vaddr != NULL && fp->rcq_dma.vaddr != NULL &&
19052                         fp->rx_sge_dma.vaddr != NULL) {
19053 
19054 				BLOGI(sc, "FP status block fp %d paddr %#jx vaddr %p size 0x%zx\n", i,
19055             			(uintmax_t)fp->sb_dma.paddr, fp->sb_dma.vaddr,
19056             			sizeof(union bxe_host_hc_status_block));
19057 				BLOGI(sc, "TX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
19058             			(uintmax_t)fp->tx_dma.paddr, fp->tx_dma.vaddr,
19059             			(BCM_PAGE_SIZE * TX_BD_NUM_PAGES));
19060         		BLOGI(sc, "RX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
19061             			(uintmax_t)fp->rx_dma.paddr, fp->rx_dma.vaddr,
19062             			(BCM_PAGE_SIZE * RX_BD_NUM_PAGES));
19063         		BLOGI(sc, "RX RCQ CHAIN fp %d paddr %#jx vaddr %p size 0x%zx\n", i,
19064             			(uintmax_t)fp->rcq_dma.paddr, fp->rcq_dma.vaddr,
19065             			(BCM_PAGE_SIZE * RCQ_NUM_PAGES));
19066         		BLOGI(sc, "RX SGE CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
19067             			(uintmax_t)fp->rx_sge_dma.paddr, fp->rx_sge_dma.vaddr,
19068             			(BCM_PAGE_SIZE * RX_SGE_NUM_PAGES));
19069     		}
19070 		}
19071 		if(ilt != NULL ) {
19072     		ilt_cli = &ilt->clients[1];
19073 			if(ilt->lines != NULL) {
19074     		for (i = ilt_cli->start; i <= ilt_cli->end; i++) {
19075         		BLOGI(sc, "ECORE_ILT paddr %#jx vaddr %p size 0x%x\n",
19076             			(uintmax_t)(((struct bxe_dma *)((&ilt->lines[i])->page))->paddr),
19077             			((struct bxe_dma *)((&ilt->lines[i])->page))->vaddr, BCM_PAGE_SIZE);
19078     		}
19079 			}
19080 		}
19081 
19082 
19083     	cmd_offset = DMAE_REG_CMD_MEM;
19084     	for (i = 0; i < 224; i++) {
19085         	reg_addr = (cmd_offset +(i * 4));
19086         	reg_val = REG_RD(sc, reg_addr);
19087         	BLOGI(sc, "DMAE_REG_CMD_MEM i=%d reg_addr 0x%x reg_val 0x%08x\n",i,
19088             			reg_addr, reg_val);
19089     	}
19090 	}
19091 
19092     BLOGI(sc, "Collection of grcdump done\n");
19093     sc->grcdump_done = 1;
19094     return(rval);
19095 }
19096 
19097 static int
19098 bxe_add_cdev(struct bxe_softc *sc)
19099 {
19100     sc->eeprom = malloc(BXE_EEPROM_MAX_DATA_LEN, M_DEVBUF, M_NOWAIT);
19101 
19102     if (sc->eeprom == NULL) {
19103         BLOGW(sc, "Unable to alloc for eeprom size buffer\n");
19104         return (-1);
19105     }
19106 
19107     sc->ioctl_dev = make_dev(&bxe_cdevsw,
19108                             sc->ifp->if_dunit,
19109                             UID_ROOT,
19110                             GID_WHEEL,
19111                             0600,
19112                             "%s",
19113                             if_name(sc->ifp));
19114 
19115     if (sc->ioctl_dev == NULL) {
19116         free(sc->eeprom, M_DEVBUF);
19117         sc->eeprom = NULL;
19118         return (-1);
19119     }
19120 
19121     sc->ioctl_dev->si_drv1 = sc;
19122 
19123     return (0);
19124 }
19125 
19126 static void
19127 bxe_del_cdev(struct bxe_softc *sc)
19128 {
19129     if (sc->ioctl_dev != NULL)
19130         destroy_dev(sc->ioctl_dev);
19131 
19132     if (sc->eeprom != NULL) {
19133         free(sc->eeprom, M_DEVBUF);
19134         sc->eeprom = NULL;
19135     }
19136     sc->ioctl_dev = NULL;
19137 
19138     return;
19139 }
19140 
19141 static bool bxe_is_nvram_accessible(struct bxe_softc *sc)
19142 {
19143 
19144     if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0)
19145         return FALSE;
19146 
19147     return TRUE;
19148 }
19149 
19150 
19151 static int
19152 bxe_wr_eeprom(struct bxe_softc *sc, void *data, uint32_t offset, uint32_t len)
19153 {
19154     int rval = 0;
19155 
19156     if(!bxe_is_nvram_accessible(sc)) {
19157         BLOGW(sc, "Cannot access eeprom when interface is down\n");
19158         return (-EAGAIN);
19159     }
19160     rval = bxe_nvram_write(sc, offset, (uint8_t *)data, len);
19161 
19162 
19163    return (rval);
19164 }
19165 
19166 static int
19167 bxe_rd_eeprom(struct bxe_softc *sc, void *data, uint32_t offset, uint32_t len)
19168 {
19169     int rval = 0;
19170 
19171     if(!bxe_is_nvram_accessible(sc)) {
19172         BLOGW(sc, "Cannot access eeprom when interface is down\n");
19173         return (-EAGAIN);
19174     }
19175     rval = bxe_nvram_read(sc, offset, (uint8_t *)data, len);
19176 
19177    return (rval);
19178 }
19179 
19180 static int
19181 bxe_eeprom_rd_wr(struct bxe_softc *sc, bxe_eeprom_t *eeprom)
19182 {
19183     int rval = 0;
19184 
19185     switch (eeprom->eeprom_cmd) {
19186 
19187     case BXE_EEPROM_CMD_SET_EEPROM:
19188 
19189         rval = copyin(eeprom->eeprom_data, sc->eeprom,
19190                        eeprom->eeprom_data_len);
19191 
19192         if (rval)
19193             break;
19194 
19195         rval = bxe_wr_eeprom(sc, sc->eeprom, eeprom->eeprom_offset,
19196                        eeprom->eeprom_data_len);
19197         break;
19198 
19199     case BXE_EEPROM_CMD_GET_EEPROM:
19200 
19201         rval = bxe_rd_eeprom(sc, sc->eeprom, eeprom->eeprom_offset,
19202                        eeprom->eeprom_data_len);
19203 
19204         if (rval) {
19205             break;
19206         }
19207 
19208         rval = copyout(sc->eeprom, eeprom->eeprom_data,
19209                        eeprom->eeprom_data_len);
19210         break;
19211 
19212     default:
19213             rval = EINVAL;
19214             break;
19215     }
19216 
19217     if (rval) {
19218         BLOGW(sc, "ioctl cmd %d  failed rval %d\n", eeprom->eeprom_cmd, rval);
19219     }
19220 
19221     return (rval);
19222 }
19223 
19224 static int
19225 bxe_get_settings(struct bxe_softc *sc, bxe_dev_setting_t *dev_p)
19226 {
19227     uint32_t ext_phy_config;
19228     int port = SC_PORT(sc);
19229     int cfg_idx = bxe_get_link_cfg_idx(sc);
19230 
19231     dev_p->supported = sc->port.supported[cfg_idx] |
19232             (sc->port.supported[cfg_idx ^ 1] &
19233             (ELINK_SUPPORTED_TP | ELINK_SUPPORTED_FIBRE));
19234     dev_p->advertising = sc->port.advertising[cfg_idx];
19235     if(sc->link_params.phy[bxe_get_cur_phy_idx(sc)].media_type ==
19236         ELINK_ETH_PHY_SFP_1G_FIBER) {
19237         dev_p->supported = ~(ELINK_SUPPORTED_10000baseT_Full);
19238         dev_p->advertising &= ~(ADVERTISED_10000baseT_Full);
19239     }
19240     if ((sc->state == BXE_STATE_OPEN) && sc->link_vars.link_up &&
19241         !(sc->flags & BXE_MF_FUNC_DIS)) {
19242         dev_p->duplex = sc->link_vars.duplex;
19243         if (IS_MF(sc) && !BXE_NOMCP(sc))
19244             dev_p->speed = bxe_get_mf_speed(sc);
19245         else
19246             dev_p->speed = sc->link_vars.line_speed;
19247     } else {
19248         dev_p->duplex = DUPLEX_UNKNOWN;
19249         dev_p->speed = SPEED_UNKNOWN;
19250     }
19251 
19252     dev_p->port = bxe_media_detect(sc);
19253 
19254     ext_phy_config = SHMEM_RD(sc,
19255                          dev_info.port_hw_config[port].external_phy_config);
19256     if((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) ==
19257         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
19258         dev_p->phy_address =  sc->port.phy_addr;
19259     else if(((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) !=
19260             PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
19261         ((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) !=
19262             PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
19263         dev_p->phy_address = ELINK_XGXS_EXT_PHY_ADDR(ext_phy_config);
19264     else
19265         dev_p->phy_address = 0;
19266 
19267     if(sc->link_params.req_line_speed[cfg_idx] == ELINK_SPEED_AUTO_NEG)
19268         dev_p->autoneg = AUTONEG_ENABLE;
19269     else
19270        dev_p->autoneg = AUTONEG_DISABLE;
19271 
19272 
19273     return 0;
19274 }
19275 
19276 static int
19277 bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
19278         struct thread *td)
19279 {
19280     struct bxe_softc    *sc;
19281     int                 rval = 0;
19282     bxe_grcdump_t       *dump = NULL;
19283     int grc_dump_size;
19284     bxe_drvinfo_t   *drv_infop = NULL;
19285     bxe_dev_setting_t  *dev_p;
19286     bxe_dev_setting_t  dev_set;
19287     bxe_get_regs_t  *reg_p;
19288     bxe_reg_rdw_t *reg_rdw_p;
19289     bxe_pcicfg_rdw_t *cfg_rdw_p;
19290     bxe_perm_mac_addr_t *mac_addr_p;
19291 
19292 
19293     if ((sc = (struct bxe_softc *)dev->si_drv1) == NULL)
19294         return ENXIO;
19295 
19296     dump = (bxe_grcdump_t *)data;
19297 
19298     switch(cmd) {
19299 
19300         case BXE_GRC_DUMP_SIZE:
19301             dump->pci_func = sc->pcie_func;
19302             dump->grcdump_size =
19303                 (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
19304                      sizeof(struct  dump_header);
19305             break;
19306 
19307         case BXE_GRC_DUMP:
19308 
19309             grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
19310                                 sizeof(struct  dump_header);
19311             if ((!sc->trigger_grcdump) || (dump->grcdump == NULL) ||
19312                 (dump->grcdump_size < grc_dump_size)) {
19313                 rval = EINVAL;
19314                 break;
19315             }
19316 
19317             if((sc->trigger_grcdump) && (!sc->grcdump_done) &&
19318                 (!sc->grcdump_started)) {
19319                 rval =  bxe_grc_dump(sc);
19320             }
19321 
19322             if((!rval) && (sc->grcdump_done) && (sc->grcdump_started) &&
19323                 (sc->grc_dump != NULL))  {
19324                 dump->grcdump_dwords = grc_dump_size >> 2;
19325                 rval = copyout(sc->grc_dump, dump->grcdump, grc_dump_size);
19326                 free(sc->grc_dump, M_DEVBUF);
19327                 sc->grc_dump = NULL;
19328                 sc->grcdump_started = 0;
19329                 sc->grcdump_done = 0;
19330             }
19331 
19332             break;
19333 
19334         case BXE_DRV_INFO:
19335             drv_infop = (bxe_drvinfo_t *)data;
19336             snprintf(drv_infop->drv_name, BXE_DRV_NAME_LENGTH, "%s", "bxe");
19337             snprintf(drv_infop->drv_version, BXE_DRV_VERSION_LENGTH, "v:%s",
19338                 BXE_DRIVER_VERSION);
19339             snprintf(drv_infop->mfw_version, BXE_MFW_VERSION_LENGTH, "%s",
19340                 sc->devinfo.bc_ver_str);
19341             snprintf(drv_infop->stormfw_version, BXE_STORMFW_VERSION_LENGTH,
19342                 "%s", sc->fw_ver_str);
19343             drv_infop->eeprom_dump_len = sc->devinfo.flash_size;
19344             drv_infop->reg_dump_len =
19345                 (bxe_get_total_regs_len32(sc) * sizeof(uint32_t))
19346                     + sizeof(struct  dump_header);
19347             snprintf(drv_infop->bus_info, BXE_BUS_INFO_LENGTH, "%d:%d:%d",
19348                 sc->pcie_bus, sc->pcie_device, sc->pcie_func);
19349             break;
19350 
19351         case BXE_DEV_SETTING:
19352             dev_p = (bxe_dev_setting_t *)data;
19353             bxe_get_settings(sc, &dev_set);
19354             dev_p->supported = dev_set.supported;
19355             dev_p->advertising = dev_set.advertising;
19356             dev_p->speed = dev_set.speed;
19357             dev_p->duplex = dev_set.duplex;
19358             dev_p->port = dev_set.port;
19359             dev_p->phy_address = dev_set.phy_address;
19360             dev_p->autoneg = dev_set.autoneg;
19361 
19362             break;
19363 
19364         case BXE_GET_REGS:
19365 
19366             reg_p = (bxe_get_regs_t *)data;
19367             grc_dump_size = reg_p->reg_buf_len;
19368 
19369             if((!sc->grcdump_done) && (!sc->grcdump_started)) {
19370                 bxe_grc_dump(sc);
19371             }
19372             if((sc->grcdump_done) && (sc->grcdump_started) &&
19373                 (sc->grc_dump != NULL))  {
19374                 rval = copyout(sc->grc_dump, reg_p->reg_buf, grc_dump_size);
19375                 free(sc->grc_dump, M_DEVBUF);
19376                 sc->grc_dump = NULL;
19377                 sc->grcdump_started = 0;
19378                 sc->grcdump_done = 0;
19379             }
19380 
19381             break;
19382 
19383         case BXE_RDW_REG:
19384             reg_rdw_p = (bxe_reg_rdw_t *)data;
19385             if((reg_rdw_p->reg_cmd == BXE_READ_REG_CMD) &&
19386                 (reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT))
19387                 reg_rdw_p->reg_val = REG_RD(sc, reg_rdw_p->reg_id);
19388 
19389             if((reg_rdw_p->reg_cmd == BXE_WRITE_REG_CMD) &&
19390                 (reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT))
19391                 REG_WR(sc, reg_rdw_p->reg_id, reg_rdw_p->reg_val);
19392 
19393             break;
19394 
19395         case BXE_RDW_PCICFG:
19396             cfg_rdw_p = (bxe_pcicfg_rdw_t *)data;
19397             if(cfg_rdw_p->cfg_cmd == BXE_READ_PCICFG) {
19398 
19399                 cfg_rdw_p->cfg_val = pci_read_config(sc->dev, cfg_rdw_p->cfg_id,
19400                                          cfg_rdw_p->cfg_width);
19401 
19402             } else if(cfg_rdw_p->cfg_cmd == BXE_WRITE_PCICFG) {
19403                 pci_write_config(sc->dev, cfg_rdw_p->cfg_id, cfg_rdw_p->cfg_val,
19404                             cfg_rdw_p->cfg_width);
19405             } else {
19406                 BLOGW(sc, "BXE_RDW_PCICFG ioctl wrong cmd passed\n");
19407             }
19408             break;
19409 
19410         case BXE_MAC_ADDR:
19411             mac_addr_p = (bxe_perm_mac_addr_t *)data;
19412             snprintf(mac_addr_p->mac_addr_str, sizeof(sc->mac_addr_str), "%s",
19413                 sc->mac_addr_str);
19414             break;
19415 
19416         case BXE_EEPROM:
19417             rval = bxe_eeprom_rd_wr(sc, (bxe_eeprom_t *)data);
19418             break;
19419 
19420 
19421         default:
19422             break;
19423     }
19424 
19425     return (rval);
19426 }
19427 
19428 #ifdef DEBUGNET
19429 static void
19430 bxe_debugnet_init(struct ifnet *ifp, int *nrxr, int *ncl, int *clsize)
19431 {
19432 	struct bxe_softc *sc;
19433 
19434 	sc = if_getsoftc(ifp);
19435 	BXE_CORE_LOCK(sc);
19436 	*nrxr = sc->num_queues;
19437 	*ncl = DEBUGNET_MAX_IN_FLIGHT;
19438 	*clsize = sc->fp[0].mbuf_alloc_size;
19439 	BXE_CORE_UNLOCK(sc);
19440 }
19441 
19442 static void
19443 bxe_debugnet_event(struct ifnet *ifp __unused, enum debugnet_ev event __unused)
19444 {
19445 }
19446 
19447 static int
19448 bxe_debugnet_transmit(struct ifnet *ifp, struct mbuf *m)
19449 {
19450 	struct bxe_softc *sc;
19451 	int error;
19452 
19453 	sc = if_getsoftc(ifp);
19454 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
19455 	    IFF_DRV_RUNNING || !sc->link_vars.link_up)
19456 		return (ENOENT);
19457 
19458 	error = bxe_tx_encap(&sc->fp[0], &m);
19459 	if (error != 0 && m != NULL)
19460 		m_freem(m);
19461 	return (error);
19462 }
19463 
19464 static int
19465 bxe_debugnet_poll(struct ifnet *ifp, int count)
19466 {
19467 	struct bxe_softc *sc;
19468 	int i;
19469 
19470 	sc = if_getsoftc(ifp);
19471 	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0 ||
19472 	    !sc->link_vars.link_up)
19473 		return (ENOENT);
19474 
19475 	for (i = 0; i < sc->num_queues; i++)
19476 		(void)bxe_rxeof(sc, &sc->fp[i]);
19477 	(void)bxe_txeof(sc, &sc->fp[0]);
19478 	return (0);
19479 }
19480 #endif /* DEBUGNET */
19481