xref: /freebsd/sys/dev/bxe/bxe.c (revision 8881d206f4e68b564c2c5f50fc717086fc3e827a)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  *
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26  * THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #define BXE_DRIVER_VERSION "1.78.91"
33 
34 #include "bxe.h"
35 #include "ecore_sp.h"
36 #include "ecore_init.h"
37 #include "ecore_init_ops.h"
38 
39 #include "57710_int_offsets.h"
40 #include "57711_int_offsets.h"
41 #include "57712_int_offsets.h"
42 
43 /*
44  * CTLTYPE_U64 and sysctl_handle_64 were added in r217616. Define these
45  * explicitly here for older kernels that don't include this changeset.
46  */
47 #ifndef CTLTYPE_U64
48 #define CTLTYPE_U64      CTLTYPE_QUAD
49 #define sysctl_handle_64 sysctl_handle_quad
50 #endif
51 
52 /*
53  * CSUM_TCP_IPV6 and CSUM_UDP_IPV6 were added in r236170. Define these
54  * here as zero(0) for older kernels that don't include this changeset
55  * thereby masking the functionality.
56  */
57 #ifndef CSUM_TCP_IPV6
58 #define CSUM_TCP_IPV6 0
59 #define CSUM_UDP_IPV6 0
60 #endif
61 
62 #define BXE_DEF_SB_ATT_IDX 0x0001
63 #define BXE_DEF_SB_IDX     0x0002
64 
65 /*
66  * FLR Support - bxe_pf_flr_clnup() is called during nic_load in the per
67  * function HW initialization.
68  */
69 #define FLR_WAIT_USEC     10000 /* 10 msecs */
70 #define FLR_WAIT_INTERVAL 50    /* usecs */
71 #define FLR_POLL_CNT      (FLR_WAIT_USEC / FLR_WAIT_INTERVAL) /* 200 */
72 
73 struct pbf_pN_buf_regs {
74     int pN;
75     uint32_t init_crd;
76     uint32_t crd;
77     uint32_t crd_freed;
78 };
79 
80 struct pbf_pN_cmd_regs {
81     int pN;
82     uint32_t lines_occup;
83     uint32_t lines_freed;
84 };
85 
86 /*
87  * PCI Device ID Table used by bxe_probe().
88  */
89 #define BXE_DEVDESC_MAX 64
90 static struct bxe_device_type bxe_devs[] = {
91     {
92         BRCM_VENDORID,
93         CHIP_NUM_57710,
94         PCI_ANY_ID, PCI_ANY_ID,
95         "QLogic NetXtreme II BCM57710 10GbE"
96     },
97     {
98         BRCM_VENDORID,
99         CHIP_NUM_57711,
100         PCI_ANY_ID, PCI_ANY_ID,
101         "QLogic NetXtreme II BCM57711 10GbE"
102     },
103     {
104         BRCM_VENDORID,
105         CHIP_NUM_57711E,
106         PCI_ANY_ID, PCI_ANY_ID,
107         "QLogic NetXtreme II BCM57711E 10GbE"
108     },
109     {
110         BRCM_VENDORID,
111         CHIP_NUM_57712,
112         PCI_ANY_ID, PCI_ANY_ID,
113         "QLogic NetXtreme II BCM57712 10GbE"
114     },
115     {
116         BRCM_VENDORID,
117         CHIP_NUM_57712_MF,
118         PCI_ANY_ID, PCI_ANY_ID,
119         "QLogic NetXtreme II BCM57712 MF 10GbE"
120     },
121     {
122         BRCM_VENDORID,
123         CHIP_NUM_57800,
124         PCI_ANY_ID, PCI_ANY_ID,
125         "QLogic NetXtreme II BCM57800 10GbE"
126     },
127     {
128         BRCM_VENDORID,
129         CHIP_NUM_57800_MF,
130         PCI_ANY_ID, PCI_ANY_ID,
131         "QLogic NetXtreme II BCM57800 MF 10GbE"
132     },
133     {
134         BRCM_VENDORID,
135         CHIP_NUM_57810,
136         PCI_ANY_ID, PCI_ANY_ID,
137         "QLogic NetXtreme II BCM57810 10GbE"
138     },
139     {
140         BRCM_VENDORID,
141         CHIP_NUM_57810_MF,
142         PCI_ANY_ID, PCI_ANY_ID,
143         "QLogic NetXtreme II BCM57810 MF 10GbE"
144     },
145     {
146         BRCM_VENDORID,
147         CHIP_NUM_57811,
148         PCI_ANY_ID, PCI_ANY_ID,
149         "QLogic NetXtreme II BCM57811 10GbE"
150     },
151     {
152         BRCM_VENDORID,
153         CHIP_NUM_57811_MF,
154         PCI_ANY_ID, PCI_ANY_ID,
155         "QLogic NetXtreme II BCM57811 MF 10GbE"
156     },
157     {
158         BRCM_VENDORID,
159         CHIP_NUM_57840_4_10,
160         PCI_ANY_ID, PCI_ANY_ID,
161         "QLogic NetXtreme II BCM57840 4x10GbE"
162     },
163     {
164         QLOGIC_VENDORID,
165         CHIP_NUM_57840_4_10,
166         PCI_ANY_ID, PCI_ANY_ID,
167         "QLogic NetXtreme II BCM57840 4x10GbE"
168     },
169     {
170         BRCM_VENDORID,
171         CHIP_NUM_57840_2_20,
172         PCI_ANY_ID, PCI_ANY_ID,
173         "QLogic NetXtreme II BCM57840 2x20GbE"
174     },
175     {
176         BRCM_VENDORID,
177         CHIP_NUM_57840_MF,
178         PCI_ANY_ID, PCI_ANY_ID,
179         "QLogic NetXtreme II BCM57840 MF 10GbE"
180     },
181     {
182         0, 0, 0, 0, NULL
183     }
184 };
185 
186 MALLOC_DECLARE(M_BXE_ILT);
187 MALLOC_DEFINE(M_BXE_ILT, "bxe_ilt", "bxe ILT pointer");
188 
189 /*
190  * FreeBSD device entry points.
191  */
192 static int bxe_probe(device_t);
193 static int bxe_attach(device_t);
194 static int bxe_detach(device_t);
195 static int bxe_shutdown(device_t);
196 
197 
198 /*
199  * FreeBSD KLD module/device interface event handler method.
200  */
201 static device_method_t bxe_methods[] = {
202     /* Device interface (device_if.h) */
203     DEVMETHOD(device_probe,     bxe_probe),
204     DEVMETHOD(device_attach,    bxe_attach),
205     DEVMETHOD(device_detach,    bxe_detach),
206     DEVMETHOD(device_shutdown,  bxe_shutdown),
207     /* Bus interface (bus_if.h) */
208     DEVMETHOD(bus_print_child,  bus_generic_print_child),
209     DEVMETHOD(bus_driver_added, bus_generic_driver_added),
210     KOBJMETHOD_END
211 };
212 
213 /*
214  * FreeBSD KLD Module data declaration
215  */
216 static driver_t bxe_driver = {
217     "bxe",                   /* module name */
218     bxe_methods,             /* event handler */
219     sizeof(struct bxe_softc) /* extra data */
220 };
221 
222 /*
223  * FreeBSD dev class is needed to manage dev instances and
224  * to associate with a bus type
225  */
226 static devclass_t bxe_devclass;
227 
228 MODULE_DEPEND(bxe, pci, 1, 1, 1);
229 MODULE_DEPEND(bxe, ether, 1, 1, 1);
230 DRIVER_MODULE(bxe, pci, bxe_driver, bxe_devclass, 0, 0);
231 
232 DEBUGNET_DEFINE(bxe);
233 
234 /* resources needed for unloading a previously loaded device */
235 
236 #define BXE_PREV_WAIT_NEEDED 1
237 struct mtx bxe_prev_mtx;
238 MTX_SYSINIT(bxe_prev_mtx, &bxe_prev_mtx, "bxe_prev_lock", MTX_DEF);
239 struct bxe_prev_list_node {
240     LIST_ENTRY(bxe_prev_list_node) node;
241     uint8_t bus;
242     uint8_t slot;
243     uint8_t path;
244     uint8_t aer; /* XXX automatic error recovery */
245     uint8_t undi;
246 };
247 static LIST_HEAD(, bxe_prev_list_node) bxe_prev_list = LIST_HEAD_INITIALIZER(bxe_prev_list);
248 
249 static int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
250 
251 /* Tunable device values... */
252 
253 SYSCTL_NODE(_hw, OID_AUTO, bxe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
254     "bxe driver parameters");
255 
256 /* Debug */
257 unsigned long bxe_debug = 0;
258 SYSCTL_ULONG(_hw_bxe, OID_AUTO, debug, CTLFLAG_RDTUN,
259              &bxe_debug, 0, "Debug logging mode");
260 
261 /* Interrupt Mode: 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */
262 static int bxe_interrupt_mode = INTR_MODE_MSIX;
263 SYSCTL_INT(_hw_bxe, OID_AUTO, interrupt_mode, CTLFLAG_RDTUN,
264            &bxe_interrupt_mode, 0, "Interrupt (MSI-X/MSI/INTx) mode");
265 
266 /* Number of Queues: 0 (Auto) or 1 to 16 (fixed queue number) */
267 static int bxe_queue_count = 4;
268 SYSCTL_INT(_hw_bxe, OID_AUTO, queue_count, CTLFLAG_RDTUN,
269            &bxe_queue_count, 0, "Multi-Queue queue count");
270 
271 /* max number of buffers per queue (default RX_BD_USABLE) */
272 static int bxe_max_rx_bufs = 0;
273 SYSCTL_INT(_hw_bxe, OID_AUTO, max_rx_bufs, CTLFLAG_RDTUN,
274            &bxe_max_rx_bufs, 0, "Maximum Number of Rx Buffers Per Queue");
275 
276 /* Host interrupt coalescing RX tick timer (usecs) */
277 static int bxe_hc_rx_ticks = 25;
278 SYSCTL_INT(_hw_bxe, OID_AUTO, hc_rx_ticks, CTLFLAG_RDTUN,
279            &bxe_hc_rx_ticks, 0, "Host Coalescing Rx ticks");
280 
281 /* Host interrupt coalescing TX tick timer (usecs) */
282 static int bxe_hc_tx_ticks = 50;
283 SYSCTL_INT(_hw_bxe, OID_AUTO, hc_tx_ticks, CTLFLAG_RDTUN,
284            &bxe_hc_tx_ticks, 0, "Host Coalescing Tx ticks");
285 
286 /* Maximum number of Rx packets to process at a time */
287 static int bxe_rx_budget = 0xffffffff;
288 SYSCTL_INT(_hw_bxe, OID_AUTO, rx_budget, CTLFLAG_TUN,
289            &bxe_rx_budget, 0, "Rx processing budget");
290 
291 /* Maximum LRO aggregation size */
292 static int bxe_max_aggregation_size = 0;
293 SYSCTL_INT(_hw_bxe, OID_AUTO, max_aggregation_size, CTLFLAG_TUN,
294            &bxe_max_aggregation_size, 0, "max aggregation size");
295 
296 /* PCI MRRS: -1 (Auto), 0 (128B), 1 (256B), 2 (512B), 3 (1KB) */
297 static int bxe_mrrs = -1;
298 SYSCTL_INT(_hw_bxe, OID_AUTO, mrrs, CTLFLAG_RDTUN,
299            &bxe_mrrs, 0, "PCIe maximum read request size");
300 
301 /* AutoGrEEEn: 0 (hardware default), 1 (force on), 2 (force off) */
302 static int bxe_autogreeen = 0;
303 SYSCTL_INT(_hw_bxe, OID_AUTO, autogreeen, CTLFLAG_RDTUN,
304            &bxe_autogreeen, 0, "AutoGrEEEn support");
305 
306 /* 4-tuple RSS support for UDP: 0 (disabled), 1 (enabled) */
307 static int bxe_udp_rss = 0;
308 SYSCTL_INT(_hw_bxe, OID_AUTO, udp_rss, CTLFLAG_RDTUN,
309            &bxe_udp_rss, 0, "UDP RSS support");
310 
311 
312 #define STAT_NAME_LEN 32 /* no stat names below can be longer than this */
313 
314 #define STATS_OFFSET32(stat_name)                   \
315     (offsetof(struct bxe_eth_stats, stat_name) / 4)
316 
317 #define Q_STATS_OFFSET32(stat_name)                   \
318     (offsetof(struct bxe_eth_q_stats, stat_name) / 4)
319 
320 static const struct {
321     uint32_t offset;
322     uint32_t size;
323     uint32_t flags;
324 #define STATS_FLAGS_PORT  1
325 #define STATS_FLAGS_FUNC  2 /* MF only cares about function stats */
326 #define STATS_FLAGS_BOTH  (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
327     char string[STAT_NAME_LEN];
328 } bxe_eth_stats_arr[] = {
329     { STATS_OFFSET32(total_bytes_received_hi),
330                 8, STATS_FLAGS_BOTH, "rx_bytes" },
331     { STATS_OFFSET32(error_bytes_received_hi),
332                 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
333     { STATS_OFFSET32(total_unicast_packets_received_hi),
334                 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
335     { STATS_OFFSET32(total_multicast_packets_received_hi),
336                 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
337     { STATS_OFFSET32(total_broadcast_packets_received_hi),
338                 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
339     { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
340                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
341     { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
342                 8, STATS_FLAGS_PORT, "rx_align_errors" },
343     { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
344                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
345     { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
346                 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
347     { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
348                 8, STATS_FLAGS_PORT, "rx_fragments" },
349     { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
350                 8, STATS_FLAGS_PORT, "rx_jabbers" },
351     { STATS_OFFSET32(no_buff_discard_hi),
352                 8, STATS_FLAGS_BOTH, "rx_discards" },
353     { STATS_OFFSET32(mac_filter_discard),
354                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
355     { STATS_OFFSET32(mf_tag_discard),
356                 4, STATS_FLAGS_PORT, "rx_mf_tag_discard" },
357     { STATS_OFFSET32(pfc_frames_received_hi),
358                 8, STATS_FLAGS_PORT, "pfc_frames_received" },
359     { STATS_OFFSET32(pfc_frames_sent_hi),
360                 8, STATS_FLAGS_PORT, "pfc_frames_sent" },
361     { STATS_OFFSET32(brb_drop_hi),
362                 8, STATS_FLAGS_PORT, "rx_brb_discard" },
363     { STATS_OFFSET32(brb_truncate_hi),
364                 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
365     { STATS_OFFSET32(pause_frames_received_hi),
366                 8, STATS_FLAGS_PORT, "rx_pause_frames" },
367     { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
368                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
369     { STATS_OFFSET32(nig_timer_max),
370                 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
371     { STATS_OFFSET32(total_bytes_transmitted_hi),
372                 8, STATS_FLAGS_BOTH, "tx_bytes" },
373     { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
374                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
375     { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
376                 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
377     { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
378                 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
379     { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
380                 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
381     { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
382                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
383     { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
384                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
385     { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
386                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
387     { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
388                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
389     { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
390                 8, STATS_FLAGS_PORT, "tx_deferred" },
391     { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
392                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
393     { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
394                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
395     { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
396                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
397     { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
398                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
399     { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
400                 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
401     { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
402                 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
403     { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
404                 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
405     { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
406                 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
407     { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
408                 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
409     { STATS_OFFSET32(etherstatspktsover1522octets_hi),
410                 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
411     { STATS_OFFSET32(pause_frames_sent_hi),
412                 8, STATS_FLAGS_PORT, "tx_pause_frames" },
413     { STATS_OFFSET32(total_tpa_aggregations_hi),
414                 8, STATS_FLAGS_FUNC, "tpa_aggregations" },
415     { STATS_OFFSET32(total_tpa_aggregated_frames_hi),
416                 8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"},
417     { STATS_OFFSET32(total_tpa_bytes_hi),
418                 8, STATS_FLAGS_FUNC, "tpa_bytes"},
419     { STATS_OFFSET32(eee_tx_lpi),
420                 4, STATS_FLAGS_PORT, "eee_tx_lpi"},
421     { STATS_OFFSET32(rx_calls),
422                 4, STATS_FLAGS_FUNC, "rx_calls"},
423     { STATS_OFFSET32(rx_pkts),
424                 4, STATS_FLAGS_FUNC, "rx_pkts"},
425     { STATS_OFFSET32(rx_tpa_pkts),
426                 4, STATS_FLAGS_FUNC, "rx_tpa_pkts"},
427     { STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts),
428                 4, STATS_FLAGS_FUNC, "rx_erroneous_jumbo_sge_pkts"},
429     { STATS_OFFSET32(rx_bxe_service_rxsgl),
430                 4, STATS_FLAGS_FUNC, "rx_bxe_service_rxsgl"},
431     { STATS_OFFSET32(rx_jumbo_sge_pkts),
432                 4, STATS_FLAGS_FUNC, "rx_jumbo_sge_pkts"},
433     { STATS_OFFSET32(rx_soft_errors),
434                 4, STATS_FLAGS_FUNC, "rx_soft_errors"},
435     { STATS_OFFSET32(rx_hw_csum_errors),
436                 4, STATS_FLAGS_FUNC, "rx_hw_csum_errors"},
437     { STATS_OFFSET32(rx_ofld_frames_csum_ip),
438                 4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_ip"},
439     { STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp),
440                 4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_tcp_udp"},
441     { STATS_OFFSET32(rx_budget_reached),
442                 4, STATS_FLAGS_FUNC, "rx_budget_reached"},
443     { STATS_OFFSET32(tx_pkts),
444                 4, STATS_FLAGS_FUNC, "tx_pkts"},
445     { STATS_OFFSET32(tx_soft_errors),
446                 4, STATS_FLAGS_FUNC, "tx_soft_errors"},
447     { STATS_OFFSET32(tx_ofld_frames_csum_ip),
448                 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_ip"},
449     { STATS_OFFSET32(tx_ofld_frames_csum_tcp),
450                 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_tcp"},
451     { STATS_OFFSET32(tx_ofld_frames_csum_udp),
452                 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_udp"},
453     { STATS_OFFSET32(tx_ofld_frames_lso),
454                 4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso"},
455     { STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits),
456                 4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso_hdr_splits"},
457     { STATS_OFFSET32(tx_encap_failures),
458                 4, STATS_FLAGS_FUNC, "tx_encap_failures"},
459     { STATS_OFFSET32(tx_hw_queue_full),
460                 4, STATS_FLAGS_FUNC, "tx_hw_queue_full"},
461     { STATS_OFFSET32(tx_hw_max_queue_depth),
462                 4, STATS_FLAGS_FUNC, "tx_hw_max_queue_depth"},
463     { STATS_OFFSET32(tx_dma_mapping_failure),
464                 4, STATS_FLAGS_FUNC, "tx_dma_mapping_failure"},
465     { STATS_OFFSET32(tx_max_drbr_queue_depth),
466                 4, STATS_FLAGS_FUNC, "tx_max_drbr_queue_depth"},
467     { STATS_OFFSET32(tx_window_violation_std),
468                 4, STATS_FLAGS_FUNC, "tx_window_violation_std"},
469     { STATS_OFFSET32(tx_window_violation_tso),
470                 4, STATS_FLAGS_FUNC, "tx_window_violation_tso"},
471     { STATS_OFFSET32(tx_chain_lost_mbuf),
472                 4, STATS_FLAGS_FUNC, "tx_chain_lost_mbuf"},
473     { STATS_OFFSET32(tx_frames_deferred),
474                 4, STATS_FLAGS_FUNC, "tx_frames_deferred"},
475     { STATS_OFFSET32(tx_queue_xoff),
476                 4, STATS_FLAGS_FUNC, "tx_queue_xoff"},
477     { STATS_OFFSET32(mbuf_defrag_attempts),
478                 4, STATS_FLAGS_FUNC, "mbuf_defrag_attempts"},
479     { STATS_OFFSET32(mbuf_defrag_failures),
480                 4, STATS_FLAGS_FUNC, "mbuf_defrag_failures"},
481     { STATS_OFFSET32(mbuf_rx_bd_alloc_failed),
482                 4, STATS_FLAGS_FUNC, "mbuf_rx_bd_alloc_failed"},
483     { STATS_OFFSET32(mbuf_rx_bd_mapping_failed),
484                 4, STATS_FLAGS_FUNC, "mbuf_rx_bd_mapping_failed"},
485     { STATS_OFFSET32(mbuf_rx_tpa_alloc_failed),
486                 4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_alloc_failed"},
487     { STATS_OFFSET32(mbuf_rx_tpa_mapping_failed),
488                 4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_mapping_failed"},
489     { STATS_OFFSET32(mbuf_rx_sge_alloc_failed),
490                 4, STATS_FLAGS_FUNC, "mbuf_rx_sge_alloc_failed"},
491     { STATS_OFFSET32(mbuf_rx_sge_mapping_failed),
492                 4, STATS_FLAGS_FUNC, "mbuf_rx_sge_mapping_failed"},
493     { STATS_OFFSET32(mbuf_alloc_tx),
494                 4, STATS_FLAGS_FUNC, "mbuf_alloc_tx"},
495     { STATS_OFFSET32(mbuf_alloc_rx),
496                 4, STATS_FLAGS_FUNC, "mbuf_alloc_rx"},
497     { STATS_OFFSET32(mbuf_alloc_sge),
498                 4, STATS_FLAGS_FUNC, "mbuf_alloc_sge"},
499     { STATS_OFFSET32(mbuf_alloc_tpa),
500                 4, STATS_FLAGS_FUNC, "mbuf_alloc_tpa"},
501     { STATS_OFFSET32(tx_queue_full_return),
502                 4, STATS_FLAGS_FUNC, "tx_queue_full_return"},
503     { STATS_OFFSET32(bxe_tx_mq_sc_state_failures),
504                 4, STATS_FLAGS_FUNC, "bxe_tx_mq_sc_state_failures"},
505     { STATS_OFFSET32(tx_request_link_down_failures),
506                 4, STATS_FLAGS_FUNC, "tx_request_link_down_failures"},
507     { STATS_OFFSET32(bd_avail_too_less_failures),
508                 4, STATS_FLAGS_FUNC, "bd_avail_too_less_failures"},
509     { STATS_OFFSET32(tx_mq_not_empty),
510                 4, STATS_FLAGS_FUNC, "tx_mq_not_empty"},
511     { STATS_OFFSET32(nsegs_path1_errors),
512                 4, STATS_FLAGS_FUNC, "nsegs_path1_errors"},
513     { STATS_OFFSET32(nsegs_path2_errors),
514                 4, STATS_FLAGS_FUNC, "nsegs_path2_errors"}
515 
516 
517 };
518 
519 static const struct {
520     uint32_t offset;
521     uint32_t size;
522     char string[STAT_NAME_LEN];
523 } bxe_eth_q_stats_arr[] = {
524     { Q_STATS_OFFSET32(total_bytes_received_hi),
525                 8, "rx_bytes" },
526     { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
527                 8, "rx_ucast_packets" },
528     { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
529                 8, "rx_mcast_packets" },
530     { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
531                 8, "rx_bcast_packets" },
532     { Q_STATS_OFFSET32(no_buff_discard_hi),
533                 8, "rx_discards" },
534     { Q_STATS_OFFSET32(total_bytes_transmitted_hi),
535                 8, "tx_bytes" },
536     { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
537                 8, "tx_ucast_packets" },
538     { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
539                 8, "tx_mcast_packets" },
540     { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
541                 8, "tx_bcast_packets" },
542     { Q_STATS_OFFSET32(total_tpa_aggregations_hi),
543                 8, "tpa_aggregations" },
544     { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi),
545                 8, "tpa_aggregated_frames"},
546     { Q_STATS_OFFSET32(total_tpa_bytes_hi),
547                 8, "tpa_bytes"},
548     { Q_STATS_OFFSET32(rx_calls),
549                 4, "rx_calls"},
550     { Q_STATS_OFFSET32(rx_pkts),
551                 4, "rx_pkts"},
552     { Q_STATS_OFFSET32(rx_tpa_pkts),
553                 4, "rx_tpa_pkts"},
554     { Q_STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts),
555                 4, "rx_erroneous_jumbo_sge_pkts"},
556     { Q_STATS_OFFSET32(rx_bxe_service_rxsgl),
557                 4, "rx_bxe_service_rxsgl"},
558     { Q_STATS_OFFSET32(rx_jumbo_sge_pkts),
559                 4, "rx_jumbo_sge_pkts"},
560     { Q_STATS_OFFSET32(rx_soft_errors),
561                 4, "rx_soft_errors"},
562     { Q_STATS_OFFSET32(rx_hw_csum_errors),
563                 4, "rx_hw_csum_errors"},
564     { Q_STATS_OFFSET32(rx_ofld_frames_csum_ip),
565                 4, "rx_ofld_frames_csum_ip"},
566     { Q_STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp),
567                 4, "rx_ofld_frames_csum_tcp_udp"},
568     { Q_STATS_OFFSET32(rx_budget_reached),
569                 4, "rx_budget_reached"},
570     { Q_STATS_OFFSET32(tx_pkts),
571                 4, "tx_pkts"},
572     { Q_STATS_OFFSET32(tx_soft_errors),
573                 4, "tx_soft_errors"},
574     { Q_STATS_OFFSET32(tx_ofld_frames_csum_ip),
575                 4, "tx_ofld_frames_csum_ip"},
576     { Q_STATS_OFFSET32(tx_ofld_frames_csum_tcp),
577                 4, "tx_ofld_frames_csum_tcp"},
578     { Q_STATS_OFFSET32(tx_ofld_frames_csum_udp),
579                 4, "tx_ofld_frames_csum_udp"},
580     { Q_STATS_OFFSET32(tx_ofld_frames_lso),
581                 4, "tx_ofld_frames_lso"},
582     { Q_STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits),
583                 4, "tx_ofld_frames_lso_hdr_splits"},
584     { Q_STATS_OFFSET32(tx_encap_failures),
585                 4, "tx_encap_failures"},
586     { Q_STATS_OFFSET32(tx_hw_queue_full),
587                 4, "tx_hw_queue_full"},
588     { Q_STATS_OFFSET32(tx_hw_max_queue_depth),
589                 4, "tx_hw_max_queue_depth"},
590     { Q_STATS_OFFSET32(tx_dma_mapping_failure),
591                 4, "tx_dma_mapping_failure"},
592     { Q_STATS_OFFSET32(tx_max_drbr_queue_depth),
593                 4, "tx_max_drbr_queue_depth"},
594     { Q_STATS_OFFSET32(tx_window_violation_std),
595                 4, "tx_window_violation_std"},
596     { Q_STATS_OFFSET32(tx_window_violation_tso),
597                 4, "tx_window_violation_tso"},
598     { Q_STATS_OFFSET32(tx_chain_lost_mbuf),
599                 4, "tx_chain_lost_mbuf"},
600     { Q_STATS_OFFSET32(tx_frames_deferred),
601                 4, "tx_frames_deferred"},
602     { Q_STATS_OFFSET32(tx_queue_xoff),
603                 4, "tx_queue_xoff"},
604     { Q_STATS_OFFSET32(mbuf_defrag_attempts),
605                 4, "mbuf_defrag_attempts"},
606     { Q_STATS_OFFSET32(mbuf_defrag_failures),
607                 4, "mbuf_defrag_failures"},
608     { Q_STATS_OFFSET32(mbuf_rx_bd_alloc_failed),
609                 4, "mbuf_rx_bd_alloc_failed"},
610     { Q_STATS_OFFSET32(mbuf_rx_bd_mapping_failed),
611                 4, "mbuf_rx_bd_mapping_failed"},
612     { Q_STATS_OFFSET32(mbuf_rx_tpa_alloc_failed),
613                 4, "mbuf_rx_tpa_alloc_failed"},
614     { Q_STATS_OFFSET32(mbuf_rx_tpa_mapping_failed),
615                 4, "mbuf_rx_tpa_mapping_failed"},
616     { Q_STATS_OFFSET32(mbuf_rx_sge_alloc_failed),
617                 4, "mbuf_rx_sge_alloc_failed"},
618     { Q_STATS_OFFSET32(mbuf_rx_sge_mapping_failed),
619                 4, "mbuf_rx_sge_mapping_failed"},
620     { Q_STATS_OFFSET32(mbuf_alloc_tx),
621                 4, "mbuf_alloc_tx"},
622     { Q_STATS_OFFSET32(mbuf_alloc_rx),
623                 4, "mbuf_alloc_rx"},
624     { Q_STATS_OFFSET32(mbuf_alloc_sge),
625                 4, "mbuf_alloc_sge"},
626     { Q_STATS_OFFSET32(mbuf_alloc_tpa),
627                 4, "mbuf_alloc_tpa"},
628     { Q_STATS_OFFSET32(tx_queue_full_return),
629                 4, "tx_queue_full_return"},
630     { Q_STATS_OFFSET32(bxe_tx_mq_sc_state_failures),
631                 4, "bxe_tx_mq_sc_state_failures"},
632     { Q_STATS_OFFSET32(tx_request_link_down_failures),
633                 4, "tx_request_link_down_failures"},
634     { Q_STATS_OFFSET32(bd_avail_too_less_failures),
635                 4, "bd_avail_too_less_failures"},
636     { Q_STATS_OFFSET32(tx_mq_not_empty),
637                 4, "tx_mq_not_empty"},
638     { Q_STATS_OFFSET32(nsegs_path1_errors),
639                 4, "nsegs_path1_errors"},
640     { Q_STATS_OFFSET32(nsegs_path2_errors),
641                 4, "nsegs_path2_errors"}
642 
643 
644 };
645 
646 #define BXE_NUM_ETH_STATS   ARRAY_SIZE(bxe_eth_stats_arr)
647 #define BXE_NUM_ETH_Q_STATS ARRAY_SIZE(bxe_eth_q_stats_arr)
648 
649 
650 static void    bxe_cmng_fns_init(struct bxe_softc *sc,
651                                  uint8_t          read_cfg,
652                                  uint8_t          cmng_type);
653 static int     bxe_get_cmng_fns_mode(struct bxe_softc *sc);
654 static void    storm_memset_cmng(struct bxe_softc *sc,
655                                  struct cmng_init *cmng,
656                                  uint8_t          port);
657 static void    bxe_set_reset_global(struct bxe_softc *sc);
658 static void    bxe_set_reset_in_progress(struct bxe_softc *sc);
659 static uint8_t bxe_reset_is_done(struct bxe_softc *sc,
660                                  int              engine);
661 static uint8_t bxe_clear_pf_load(struct bxe_softc *sc);
662 static uint8_t bxe_chk_parity_attn(struct bxe_softc *sc,
663                                    uint8_t          *global,
664                                    uint8_t          print);
665 static void    bxe_int_disable(struct bxe_softc *sc);
666 static int     bxe_release_leader_lock(struct bxe_softc *sc);
667 static void    bxe_pf_disable(struct bxe_softc *sc);
668 static void    bxe_free_fp_buffers(struct bxe_softc *sc);
669 static inline void bxe_update_rx_prod(struct bxe_softc    *sc,
670                                       struct bxe_fastpath *fp,
671                                       uint16_t            rx_bd_prod,
672                                       uint16_t            rx_cq_prod,
673                                       uint16_t            rx_sge_prod);
674 static void    bxe_link_report_locked(struct bxe_softc *sc);
675 static void    bxe_link_report(struct bxe_softc *sc);
676 static void    bxe_link_status_update(struct bxe_softc *sc);
677 static void    bxe_periodic_callout_func(void *xsc);
678 static void    bxe_periodic_start(struct bxe_softc *sc);
679 static void    bxe_periodic_stop(struct bxe_softc *sc);
680 static int     bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp,
681                                     uint16_t prev_index,
682                                     uint16_t index);
683 static int     bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp,
684                                      int                 queue);
685 static int     bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp,
686                                      uint16_t            index);
687 static uint8_t bxe_txeof(struct bxe_softc *sc,
688                          struct bxe_fastpath *fp);
689 static void    bxe_task_fp(struct bxe_fastpath *fp);
690 static __noinline void bxe_dump_mbuf(struct bxe_softc *sc,
691                                      struct mbuf      *m,
692                                      uint8_t          contents);
693 static int     bxe_alloc_mem(struct bxe_softc *sc);
694 static void    bxe_free_mem(struct bxe_softc *sc);
695 static int     bxe_alloc_fw_stats_mem(struct bxe_softc *sc);
696 static void    bxe_free_fw_stats_mem(struct bxe_softc *sc);
697 static int     bxe_interrupt_attach(struct bxe_softc *sc);
698 static void    bxe_interrupt_detach(struct bxe_softc *sc);
699 static void    bxe_set_rx_mode(struct bxe_softc *sc);
700 static int     bxe_init_locked(struct bxe_softc *sc);
701 static int     bxe_stop_locked(struct bxe_softc *sc);
702 static void    bxe_sp_err_timeout_task(void *arg, int pending);
703 void           bxe_parity_recover(struct bxe_softc *sc);
704 void           bxe_handle_error(struct bxe_softc *sc);
705 static __noinline int bxe_nic_load(struct bxe_softc *sc,
706                                    int              load_mode);
707 static __noinline int bxe_nic_unload(struct bxe_softc *sc,
708                                      uint32_t         unload_mode,
709                                      uint8_t          keep_link);
710 
711 static void bxe_handle_sp_tq(void *context, int pending);
712 static void bxe_handle_fp_tq(void *context, int pending);
713 
714 static int bxe_add_cdev(struct bxe_softc *sc);
715 static void bxe_del_cdev(struct bxe_softc *sc);
716 int bxe_grc_dump(struct bxe_softc *sc);
717 static int bxe_alloc_buf_rings(struct bxe_softc *sc);
718 static void bxe_free_buf_rings(struct bxe_softc *sc);
719 
720 /* calculate crc32 on a buffer (NOTE: crc32_length MUST be aligned to 8) */
721 uint32_t
722 calc_crc32(uint8_t  *crc32_packet,
723            uint32_t crc32_length,
724            uint32_t crc32_seed,
725            uint8_t  complement)
726 {
727    uint32_t byte         = 0;
728    uint32_t bit          = 0;
729    uint8_t  msb          = 0;
730    uint32_t temp         = 0;
731    uint32_t shft         = 0;
732    uint8_t  current_byte = 0;
733    uint32_t crc32_result = crc32_seed;
734    const uint32_t CRC32_POLY = 0x1edc6f41;
735 
736    if ((crc32_packet == NULL) ||
737        (crc32_length == 0) ||
738        ((crc32_length % 8) != 0))
739     {
740         return (crc32_result);
741     }
742 
743     for (byte = 0; byte < crc32_length; byte = byte + 1)
744     {
745         current_byte = crc32_packet[byte];
746         for (bit = 0; bit < 8; bit = bit + 1)
747         {
748             /* msb = crc32_result[31]; */
749             msb = (uint8_t)(crc32_result >> 31);
750 
751             crc32_result = crc32_result << 1;
752 
753             /* it (msb != current_byte[bit]) */
754             if (msb != (0x1 & (current_byte >> bit)))
755             {
756                 crc32_result = crc32_result ^ CRC32_POLY;
757                 /* crc32_result[0] = 1 */
758                 crc32_result |= 1;
759             }
760         }
761     }
762 
763     /* Last step is to:
764      * 1. "mirror" every bit
765      * 2. swap the 4 bytes
766      * 3. complement each bit
767      */
768 
769     /* Mirror */
770     temp = crc32_result;
771     shft = sizeof(crc32_result) * 8 - 1;
772 
773     for (crc32_result >>= 1; crc32_result; crc32_result >>= 1)
774     {
775         temp <<= 1;
776         temp |= crc32_result & 1;
777         shft-- ;
778     }
779 
780     /* temp[31-bit] = crc32_result[bit] */
781     temp <<= shft;
782 
783     /* Swap */
784     /* crc32_result = {temp[7:0], temp[15:8], temp[23:16], temp[31:24]} */
785     {
786         uint32_t t0, t1, t2, t3;
787         t0 = (0x000000ff & (temp >> 24));
788         t1 = (0x0000ff00 & (temp >> 8));
789         t2 = (0x00ff0000 & (temp << 8));
790         t3 = (0xff000000 & (temp << 24));
791         crc32_result = t0 | t1 | t2 | t3;
792     }
793 
794     /* Complement */
795     if (complement)
796     {
797         crc32_result = ~crc32_result;
798     }
799 
800     return (crc32_result);
801 }
802 
803 int
804 bxe_test_bit(int                    nr,
805              volatile unsigned long *addr)
806 {
807     return ((atomic_load_acq_long(addr) & (1 << nr)) != 0);
808 }
809 
810 void
811 bxe_set_bit(unsigned int           nr,
812             volatile unsigned long *addr)
813 {
814     atomic_set_acq_long(addr, (1 << nr));
815 }
816 
817 void
818 bxe_clear_bit(int                    nr,
819               volatile unsigned long *addr)
820 {
821     atomic_clear_acq_long(addr, (1 << nr));
822 }
823 
824 int
825 bxe_test_and_set_bit(int                    nr,
826                        volatile unsigned long *addr)
827 {
828     unsigned long x;
829     nr = (1 << nr);
830     do {
831         x = *addr;
832     } while (atomic_cmpset_acq_long(addr, x, x | nr) == 0);
833     // if (x & nr) bit_was_set; else bit_was_not_set;
834     return (x & nr);
835 }
836 
837 int
838 bxe_test_and_clear_bit(int                    nr,
839                        volatile unsigned long *addr)
840 {
841     unsigned long x;
842     nr = (1 << nr);
843     do {
844         x = *addr;
845     } while (atomic_cmpset_acq_long(addr, x, x & ~nr) == 0);
846     // if (x & nr) bit_was_set; else bit_was_not_set;
847     return (x & nr);
848 }
849 
850 int
851 bxe_cmpxchg(volatile int *addr,
852             int          old,
853             int          new)
854 {
855     int x;
856     do {
857         x = *addr;
858     } while (atomic_cmpset_acq_int(addr, old, new) == 0);
859     return (x);
860 }
861 
862 /*
863  * Get DMA memory from the OS.
864  *
865  * Validates that the OS has provided DMA buffers in response to a
866  * bus_dmamap_load call and saves the physical address of those buffers.
867  * When the callback is used the OS will return 0 for the mapping function
868  * (bus_dmamap_load) so we use the value of map_arg->maxsegs to pass any
869  * failures back to the caller.
870  *
871  * Returns:
872  *   Nothing.
873  */
874 static void
875 bxe_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
876 {
877     struct bxe_dma *dma = arg;
878 
879     if (error) {
880         dma->paddr = 0;
881         dma->nseg  = 0;
882         BLOGE(dma->sc, "Failed DMA alloc '%s' (%d)!\n", dma->msg, error);
883     } else {
884         dma->paddr = segs->ds_addr;
885         dma->nseg  = nseg;
886     }
887 }
888 
889 /*
890  * Allocate a block of memory and map it for DMA. No partial completions
891  * allowed and release any resources acquired if we can't acquire all
892  * resources.
893  *
894  * Returns:
895  *   0 = Success, !0 = Failure
896  */
897 int
898 bxe_dma_alloc(struct bxe_softc *sc,
899               bus_size_t       size,
900               struct bxe_dma   *dma,
901               const char       *msg)
902 {
903     int rc;
904 
905     if (dma->size > 0) {
906         BLOGE(sc, "dma block '%s' already has size %lu\n", msg,
907               (unsigned long)dma->size);
908         return (1);
909     }
910 
911     memset(dma, 0, sizeof(*dma)); /* sanity */
912     dma->sc   = sc;
913     dma->size = size;
914     snprintf(dma->msg, sizeof(dma->msg), "%s", msg);
915 
916     rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
917                             BCM_PAGE_SIZE,      /* alignment */
918                             0,                  /* boundary limit */
919                             BUS_SPACE_MAXADDR,  /* restricted low */
920                             BUS_SPACE_MAXADDR,  /* restricted hi */
921                             NULL,               /* addr filter() */
922                             NULL,               /* addr filter() arg */
923                             size,               /* max map size */
924                             1,                  /* num discontinuous */
925                             size,               /* max seg size */
926                             BUS_DMA_ALLOCNOW,   /* flags */
927                             NULL,               /* lock() */
928                             NULL,               /* lock() arg */
929                             &dma->tag);         /* returned dma tag */
930     if (rc != 0) {
931         BLOGE(sc, "Failed to create dma tag for '%s' (%d)\n", msg, rc);
932         memset(dma, 0, sizeof(*dma));
933         return (1);
934     }
935 
936     rc = bus_dmamem_alloc(dma->tag,
937                           (void **)&dma->vaddr,
938                           (BUS_DMA_NOWAIT | BUS_DMA_ZERO),
939                           &dma->map);
940     if (rc != 0) {
941         BLOGE(sc, "Failed to alloc dma mem for '%s' (%d)\n", msg, rc);
942         bus_dma_tag_destroy(dma->tag);
943         memset(dma, 0, sizeof(*dma));
944         return (1);
945     }
946 
947     rc = bus_dmamap_load(dma->tag,
948                          dma->map,
949                          dma->vaddr,
950                          size,
951                          bxe_dma_map_addr, /* BLOGD in here */
952                          dma,
953                          BUS_DMA_NOWAIT);
954     if (rc != 0) {
955         BLOGE(sc, "Failed to load dma map for '%s' (%d)\n", msg, rc);
956         bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
957         bus_dma_tag_destroy(dma->tag);
958         memset(dma, 0, sizeof(*dma));
959         return (1);
960     }
961 
962     return (0);
963 }
964 
965 void
966 bxe_dma_free(struct bxe_softc *sc,
967              struct bxe_dma   *dma)
968 {
969     if (dma->size > 0) {
970         DBASSERT(sc, (dma->tag != NULL), ("dma tag is NULL"));
971 
972         bus_dmamap_sync(dma->tag, dma->map,
973                         (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE));
974         bus_dmamap_unload(dma->tag, dma->map);
975         bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
976         bus_dma_tag_destroy(dma->tag);
977     }
978 
979     memset(dma, 0, sizeof(*dma));
980 }
981 
982 /*
983  * These indirect read and write routines are only during init.
984  * The locking is handled by the MCP.
985  */
986 
987 void
988 bxe_reg_wr_ind(struct bxe_softc *sc,
989                uint32_t         addr,
990                uint32_t         val)
991 {
992     pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4);
993     pci_write_config(sc->dev, PCICFG_GRC_DATA, val, 4);
994     pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
995 }
996 
997 uint32_t
998 bxe_reg_rd_ind(struct bxe_softc *sc,
999                uint32_t         addr)
1000 {
1001     uint32_t val;
1002 
1003     pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4);
1004     val = pci_read_config(sc->dev, PCICFG_GRC_DATA, 4);
1005     pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
1006 
1007     return (val);
1008 }
1009 
1010 static int
1011 bxe_acquire_hw_lock(struct bxe_softc *sc,
1012                     uint32_t         resource)
1013 {
1014     uint32_t lock_status;
1015     uint32_t resource_bit = (1 << resource);
1016     int func = SC_FUNC(sc);
1017     uint32_t hw_lock_control_reg;
1018     int cnt;
1019 
1020     /* validate the resource is within range */
1021     if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1022         BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)"
1023             " resource_bit 0x%x\n", resource, resource_bit);
1024         return (-1);
1025     }
1026 
1027     if (func <= 5) {
1028         hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8));
1029     } else {
1030         hw_lock_control_reg =
1031                 (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8));
1032     }
1033 
1034     /* validate the resource is not already taken */
1035     lock_status = REG_RD(sc, hw_lock_control_reg);
1036     if (lock_status & resource_bit) {
1037         BLOGE(sc, "resource (0x%x) in use (status 0x%x bit 0x%x)\n",
1038               resource, lock_status, resource_bit);
1039         return (-1);
1040     }
1041 
1042     /* try every 5ms for 5 seconds */
1043     for (cnt = 0; cnt < 1000; cnt++) {
1044         REG_WR(sc, (hw_lock_control_reg + 4), resource_bit);
1045         lock_status = REG_RD(sc, hw_lock_control_reg);
1046         if (lock_status & resource_bit) {
1047             return (0);
1048         }
1049         DELAY(5000);
1050     }
1051 
1052     BLOGE(sc, "Resource 0x%x resource_bit 0x%x lock timeout!\n",
1053         resource, resource_bit);
1054     return (-1);
1055 }
1056 
1057 static int
1058 bxe_release_hw_lock(struct bxe_softc *sc,
1059                     uint32_t         resource)
1060 {
1061     uint32_t lock_status;
1062     uint32_t resource_bit = (1 << resource);
1063     int func = SC_FUNC(sc);
1064     uint32_t hw_lock_control_reg;
1065 
1066     /* validate the resource is within range */
1067     if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1068         BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)"
1069             " resource_bit 0x%x\n", resource, resource_bit);
1070         return (-1);
1071     }
1072 
1073     if (func <= 5) {
1074         hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8));
1075     } else {
1076         hw_lock_control_reg =
1077                 (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8));
1078     }
1079 
1080     /* validate the resource is currently taken */
1081     lock_status = REG_RD(sc, hw_lock_control_reg);
1082     if (!(lock_status & resource_bit)) {
1083         BLOGE(sc, "resource (0x%x) not in use (status 0x%x bit 0x%x)\n",
1084               resource, lock_status, resource_bit);
1085         return (-1);
1086     }
1087 
1088     REG_WR(sc, hw_lock_control_reg, resource_bit);
1089     return (0);
1090 }
1091 static void bxe_acquire_phy_lock(struct bxe_softc *sc)
1092 {
1093 	BXE_PHY_LOCK(sc);
1094 	bxe_acquire_hw_lock(sc,HW_LOCK_RESOURCE_MDIO);
1095 }
1096 
1097 static void bxe_release_phy_lock(struct bxe_softc *sc)
1098 {
1099 	bxe_release_hw_lock(sc,HW_LOCK_RESOURCE_MDIO);
1100 	BXE_PHY_UNLOCK(sc);
1101 }
1102 /*
1103  * Per pf misc lock must be acquired before the per port mcp lock. Otherwise,
1104  * had we done things the other way around, if two pfs from the same port
1105  * would attempt to access nvram at the same time, we could run into a
1106  * scenario such as:
1107  * pf A takes the port lock.
1108  * pf B succeeds in taking the same lock since they are from the same port.
1109  * pf A takes the per pf misc lock. Performs eeprom access.
1110  * pf A finishes. Unlocks the per pf misc lock.
1111  * Pf B takes the lock and proceeds to perform it's own access.
1112  * pf A unlocks the per port lock, while pf B is still working (!).
1113  * mcp takes the per port lock and corrupts pf B's access (and/or has it's own
1114  * access corrupted by pf B).*
1115  */
1116 static int
1117 bxe_acquire_nvram_lock(struct bxe_softc *sc)
1118 {
1119     int port = SC_PORT(sc);
1120     int count, i;
1121     uint32_t val = 0;
1122 
1123     /* acquire HW lock: protect against other PFs in PF Direct Assignment */
1124     bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM);
1125 
1126     /* adjust timeout for emulation/FPGA */
1127     count = NVRAM_TIMEOUT_COUNT;
1128     if (CHIP_REV_IS_SLOW(sc)) {
1129         count *= 100;
1130     }
1131 
1132     /* request access to nvram interface */
1133     REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
1134            (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
1135 
1136     for (i = 0; i < count*10; i++) {
1137         val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB);
1138         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
1139             break;
1140         }
1141 
1142         DELAY(5);
1143     }
1144 
1145     if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
1146         BLOGE(sc, "Cannot get access to nvram interface "
1147             "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n",
1148             port, val);
1149         return (-1);
1150     }
1151 
1152     return (0);
1153 }
1154 
1155 static int
1156 bxe_release_nvram_lock(struct bxe_softc *sc)
1157 {
1158     int port = SC_PORT(sc);
1159     int count, i;
1160     uint32_t val = 0;
1161 
1162     /* adjust timeout for emulation/FPGA */
1163     count = NVRAM_TIMEOUT_COUNT;
1164     if (CHIP_REV_IS_SLOW(sc)) {
1165         count *= 100;
1166     }
1167 
1168     /* relinquish nvram interface */
1169     REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
1170            (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
1171 
1172     for (i = 0; i < count*10; i++) {
1173         val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB);
1174         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
1175             break;
1176         }
1177 
1178         DELAY(5);
1179     }
1180 
1181     if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
1182         BLOGE(sc, "Cannot free access to nvram interface "
1183             "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n",
1184             port, val);
1185         return (-1);
1186     }
1187 
1188     /* release HW lock: protect against other PFs in PF Direct Assignment */
1189     bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM);
1190 
1191     return (0);
1192 }
1193 
1194 static void
1195 bxe_enable_nvram_access(struct bxe_softc *sc)
1196 {
1197     uint32_t val;
1198 
1199     val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
1200 
1201     /* enable both bits, even on read */
1202     REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
1203            (val | MCPR_NVM_ACCESS_ENABLE_EN | MCPR_NVM_ACCESS_ENABLE_WR_EN));
1204 }
1205 
1206 static void
1207 bxe_disable_nvram_access(struct bxe_softc *sc)
1208 {
1209     uint32_t val;
1210 
1211     val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
1212 
1213     /* disable both bits, even after read */
1214     REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
1215            (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
1216                     MCPR_NVM_ACCESS_ENABLE_WR_EN)));
1217 }
1218 
1219 static int
1220 bxe_nvram_read_dword(struct bxe_softc *sc,
1221                      uint32_t         offset,
1222                      uint32_t         *ret_val,
1223                      uint32_t         cmd_flags)
1224 {
1225     int count, i, rc;
1226     uint32_t val;
1227 
1228     /* build the command word */
1229     cmd_flags |= MCPR_NVM_COMMAND_DOIT;
1230 
1231     /* need to clear DONE bit separately */
1232     REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
1233 
1234     /* address of the NVRAM to read from */
1235     REG_WR(sc, MCP_REG_MCPR_NVM_ADDR,
1236            (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
1237 
1238     /* issue a read command */
1239     REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
1240 
1241     /* adjust timeout for emulation/FPGA */
1242     count = NVRAM_TIMEOUT_COUNT;
1243     if (CHIP_REV_IS_SLOW(sc)) {
1244         count *= 100;
1245     }
1246 
1247     /* wait for completion */
1248     *ret_val = 0;
1249     rc = -1;
1250     for (i = 0; i < count; i++) {
1251         DELAY(5);
1252         val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND);
1253 
1254         if (val & MCPR_NVM_COMMAND_DONE) {
1255             val = REG_RD(sc, MCP_REG_MCPR_NVM_READ);
1256             /* we read nvram data in cpu order
1257              * but ethtool sees it as an array of bytes
1258              * converting to big-endian will do the work
1259              */
1260             *ret_val = htobe32(val);
1261             rc = 0;
1262             break;
1263         }
1264     }
1265 
1266     if (rc == -1) {
1267         BLOGE(sc, "nvram read timeout expired "
1268             "(offset 0x%x cmd_flags 0x%x val 0x%x)\n",
1269             offset, cmd_flags, val);
1270     }
1271 
1272     return (rc);
1273 }
1274 
1275 static int
1276 bxe_nvram_read(struct bxe_softc *sc,
1277                uint32_t         offset,
1278                uint8_t          *ret_buf,
1279                int              buf_size)
1280 {
1281     uint32_t cmd_flags;
1282     uint32_t val;
1283     int rc;
1284 
1285     if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
1286         BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n",
1287               offset, buf_size);
1288         return (-1);
1289     }
1290 
1291     if ((offset + buf_size) > sc->devinfo.flash_size) {
1292         BLOGE(sc, "Invalid parameter, "
1293                   "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1294               offset, buf_size, sc->devinfo.flash_size);
1295         return (-1);
1296     }
1297 
1298     /* request access to nvram interface */
1299     rc = bxe_acquire_nvram_lock(sc);
1300     if (rc) {
1301         return (rc);
1302     }
1303 
1304     /* enable access to nvram interface */
1305     bxe_enable_nvram_access(sc);
1306 
1307     /* read the first word(s) */
1308     cmd_flags = MCPR_NVM_COMMAND_FIRST;
1309     while ((buf_size > sizeof(uint32_t)) && (rc == 0)) {
1310         rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags);
1311         memcpy(ret_buf, &val, 4);
1312 
1313         /* advance to the next dword */
1314         offset += sizeof(uint32_t);
1315         ret_buf += sizeof(uint32_t);
1316         buf_size -= sizeof(uint32_t);
1317         cmd_flags = 0;
1318     }
1319 
1320     if (rc == 0) {
1321         cmd_flags |= MCPR_NVM_COMMAND_LAST;
1322         rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags);
1323         memcpy(ret_buf, &val, 4);
1324     }
1325 
1326     /* disable access to nvram interface */
1327     bxe_disable_nvram_access(sc);
1328     bxe_release_nvram_lock(sc);
1329 
1330     return (rc);
1331 }
1332 
1333 static int
1334 bxe_nvram_write_dword(struct bxe_softc *sc,
1335                       uint32_t         offset,
1336                       uint32_t         val,
1337                       uint32_t         cmd_flags)
1338 {
1339     int count, i, rc;
1340 
1341     /* build the command word */
1342     cmd_flags |= (MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR);
1343 
1344     /* need to clear DONE bit separately */
1345     REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
1346 
1347     /* write the data */
1348     REG_WR(sc, MCP_REG_MCPR_NVM_WRITE, val);
1349 
1350     /* address of the NVRAM to write to */
1351     REG_WR(sc, MCP_REG_MCPR_NVM_ADDR,
1352            (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
1353 
1354     /* issue the write command */
1355     REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
1356 
1357     /* adjust timeout for emulation/FPGA */
1358     count = NVRAM_TIMEOUT_COUNT;
1359     if (CHIP_REV_IS_SLOW(sc)) {
1360         count *= 100;
1361     }
1362 
1363     /* wait for completion */
1364     rc = -1;
1365     for (i = 0; i < count; i++) {
1366         DELAY(5);
1367         val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND);
1368         if (val & MCPR_NVM_COMMAND_DONE) {
1369             rc = 0;
1370             break;
1371         }
1372     }
1373 
1374     if (rc == -1) {
1375         BLOGE(sc, "nvram write timeout expired "
1376             "(offset 0x%x cmd_flags 0x%x val 0x%x)\n",
1377             offset, cmd_flags, val);
1378     }
1379 
1380     return (rc);
1381 }
1382 
1383 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
1384 
1385 static int
1386 bxe_nvram_write1(struct bxe_softc *sc,
1387                  uint32_t         offset,
1388                  uint8_t          *data_buf,
1389                  int              buf_size)
1390 {
1391     uint32_t cmd_flags;
1392     uint32_t align_offset;
1393     uint32_t val;
1394     int rc;
1395 
1396     if ((offset + buf_size) > sc->devinfo.flash_size) {
1397         BLOGE(sc, "Invalid parameter, "
1398                   "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1399               offset, buf_size, sc->devinfo.flash_size);
1400         return (-1);
1401     }
1402 
1403     /* request access to nvram interface */
1404     rc = bxe_acquire_nvram_lock(sc);
1405     if (rc) {
1406         return (rc);
1407     }
1408 
1409     /* enable access to nvram interface */
1410     bxe_enable_nvram_access(sc);
1411 
1412     cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
1413     align_offset = (offset & ~0x03);
1414     rc = bxe_nvram_read_dword(sc, align_offset, &val, cmd_flags);
1415 
1416     if (rc == 0) {
1417         val &= ~(0xff << BYTE_OFFSET(offset));
1418         val |= (*data_buf << BYTE_OFFSET(offset));
1419 
1420         /* nvram data is returned as an array of bytes
1421          * convert it back to cpu order
1422          */
1423         val = be32toh(val);
1424 
1425         rc = bxe_nvram_write_dword(sc, align_offset, val, cmd_flags);
1426     }
1427 
1428     /* disable access to nvram interface */
1429     bxe_disable_nvram_access(sc);
1430     bxe_release_nvram_lock(sc);
1431 
1432     return (rc);
1433 }
1434 
1435 static int
1436 bxe_nvram_write(struct bxe_softc *sc,
1437                 uint32_t         offset,
1438                 uint8_t          *data_buf,
1439                 int              buf_size)
1440 {
1441     uint32_t cmd_flags;
1442     uint32_t val;
1443     uint32_t written_so_far;
1444     int rc;
1445 
1446     if (buf_size == 1) {
1447         return (bxe_nvram_write1(sc, offset, data_buf, buf_size));
1448     }
1449 
1450     if ((offset & 0x03) || (buf_size & 0x03) /* || (buf_size == 0) */) {
1451         BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n",
1452               offset, buf_size);
1453         return (-1);
1454     }
1455 
1456     if (buf_size == 0) {
1457         return (0); /* nothing to do */
1458     }
1459 
1460     if ((offset + buf_size) > sc->devinfo.flash_size) {
1461         BLOGE(sc, "Invalid parameter, "
1462                   "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1463               offset, buf_size, sc->devinfo.flash_size);
1464         return (-1);
1465     }
1466 
1467     /* request access to nvram interface */
1468     rc = bxe_acquire_nvram_lock(sc);
1469     if (rc) {
1470         return (rc);
1471     }
1472 
1473     /* enable access to nvram interface */
1474     bxe_enable_nvram_access(sc);
1475 
1476     written_so_far = 0;
1477     cmd_flags = MCPR_NVM_COMMAND_FIRST;
1478     while ((written_so_far < buf_size) && (rc == 0)) {
1479         if (written_so_far == (buf_size - sizeof(uint32_t))) {
1480             cmd_flags |= MCPR_NVM_COMMAND_LAST;
1481         } else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0) {
1482             cmd_flags |= MCPR_NVM_COMMAND_LAST;
1483         } else if ((offset % NVRAM_PAGE_SIZE) == 0) {
1484             cmd_flags |= MCPR_NVM_COMMAND_FIRST;
1485         }
1486 
1487         memcpy(&val, data_buf, 4);
1488 
1489         rc = bxe_nvram_write_dword(sc, offset, val, cmd_flags);
1490 
1491         /* advance to the next dword */
1492         offset += sizeof(uint32_t);
1493         data_buf += sizeof(uint32_t);
1494         written_so_far += sizeof(uint32_t);
1495         cmd_flags = 0;
1496     }
1497 
1498     /* disable access to nvram interface */
1499     bxe_disable_nvram_access(sc);
1500     bxe_release_nvram_lock(sc);
1501 
1502     return (rc);
1503 }
1504 
1505 /* copy command into DMAE command memory and set DMAE command Go */
1506 void
1507 bxe_post_dmae(struct bxe_softc    *sc,
1508               struct dmae_cmd *dmae,
1509               int                 idx)
1510 {
1511     uint32_t cmd_offset;
1512     int i;
1513 
1514     cmd_offset = (DMAE_REG_CMD_MEM + (sizeof(struct dmae_cmd) * idx));
1515     for (i = 0; i < ((sizeof(struct dmae_cmd) / 4)); i++) {
1516         REG_WR(sc, (cmd_offset + (i * 4)), *(((uint32_t *)dmae) + i));
1517     }
1518 
1519     REG_WR(sc, dmae_reg_go_c[idx], 1);
1520 }
1521 
1522 uint32_t
1523 bxe_dmae_opcode_add_comp(uint32_t opcode,
1524                          uint8_t  comp_type)
1525 {
1526     return (opcode | ((comp_type << DMAE_CMD_C_DST_SHIFT) |
1527                       DMAE_CMD_C_TYPE_ENABLE));
1528 }
1529 
1530 uint32_t
1531 bxe_dmae_opcode_clr_src_reset(uint32_t opcode)
1532 {
1533     return (opcode & ~DMAE_CMD_SRC_RESET);
1534 }
1535 
1536 uint32_t
1537 bxe_dmae_opcode(struct bxe_softc *sc,
1538                 uint8_t          src_type,
1539                 uint8_t          dst_type,
1540                 uint8_t          with_comp,
1541                 uint8_t          comp_type)
1542 {
1543     uint32_t opcode = 0;
1544 
1545     opcode |= ((src_type << DMAE_CMD_SRC_SHIFT) |
1546                (dst_type << DMAE_CMD_DST_SHIFT));
1547 
1548     opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
1549 
1550     opcode |= (SC_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
1551 
1552     opcode |= ((SC_VN(sc) << DMAE_CMD_E1HVN_SHIFT) |
1553                (SC_VN(sc) << DMAE_CMD_DST_VN_SHIFT));
1554 
1555     opcode |= (DMAE_COM_SET_ERR << DMAE_CMD_ERR_POLICY_SHIFT);
1556 
1557 #ifdef __BIG_ENDIAN
1558     opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
1559 #else
1560     opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
1561 #endif
1562 
1563     if (with_comp) {
1564         opcode = bxe_dmae_opcode_add_comp(opcode, comp_type);
1565     }
1566 
1567     return (opcode);
1568 }
1569 
1570 static void
1571 bxe_prep_dmae_with_comp(struct bxe_softc    *sc,
1572                         struct dmae_cmd *dmae,
1573                         uint8_t             src_type,
1574                         uint8_t             dst_type)
1575 {
1576     memset(dmae, 0, sizeof(struct dmae_cmd));
1577 
1578     /* set the opcode */
1579     dmae->opcode = bxe_dmae_opcode(sc, src_type, dst_type,
1580                                    TRUE, DMAE_COMP_PCI);
1581 
1582     /* fill in the completion parameters */
1583     dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_comp));
1584     dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_comp));
1585     dmae->comp_val     = DMAE_COMP_VAL;
1586 }
1587 
1588 /* issue a DMAE command over the init channel and wait for completion */
1589 static int
1590 bxe_issue_dmae_with_comp(struct bxe_softc    *sc,
1591                          struct dmae_cmd *dmae)
1592 {
1593     uint32_t *wb_comp = BXE_SP(sc, wb_comp);
1594     int timeout = CHIP_REV_IS_SLOW(sc) ? 400000 : 4000;
1595 
1596     BXE_DMAE_LOCK(sc);
1597 
1598     /* reset completion */
1599     *wb_comp = 0;
1600 
1601     /* post the command on the channel used for initializations */
1602     bxe_post_dmae(sc, dmae, INIT_DMAE_C(sc));
1603 
1604     /* wait for completion */
1605     DELAY(5);
1606 
1607     while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
1608         if (!timeout ||
1609             (sc->recovery_state != BXE_RECOVERY_DONE &&
1610              sc->recovery_state != BXE_RECOVERY_NIC_LOADING)) {
1611             BLOGE(sc, "DMAE timeout! *wb_comp 0x%x recovery_state 0x%x\n",
1612                 *wb_comp, sc->recovery_state);
1613             BXE_DMAE_UNLOCK(sc);
1614             return (DMAE_TIMEOUT);
1615         }
1616 
1617         timeout--;
1618         DELAY(50);
1619     }
1620 
1621     if (*wb_comp & DMAE_PCI_ERR_FLAG) {
1622         BLOGE(sc, "DMAE PCI error! *wb_comp 0x%x recovery_state 0x%x\n",
1623                 *wb_comp, sc->recovery_state);
1624         BXE_DMAE_UNLOCK(sc);
1625         return (DMAE_PCI_ERROR);
1626     }
1627 
1628     BXE_DMAE_UNLOCK(sc);
1629     return (0);
1630 }
1631 
1632 void
1633 bxe_read_dmae(struct bxe_softc *sc,
1634               uint32_t         src_addr,
1635               uint32_t         len32)
1636 {
1637     struct dmae_cmd dmae;
1638     uint32_t *data;
1639     int i, rc;
1640 
1641     DBASSERT(sc, (len32 <= 4), ("DMAE read length is %d", len32));
1642 
1643     if (!sc->dmae_ready) {
1644         data = BXE_SP(sc, wb_data[0]);
1645 
1646         for (i = 0; i < len32; i++) {
1647             data[i] = (CHIP_IS_E1(sc)) ?
1648                           bxe_reg_rd_ind(sc, (src_addr + (i * 4))) :
1649                           REG_RD(sc, (src_addr + (i * 4)));
1650         }
1651 
1652         return;
1653     }
1654 
1655     /* set opcode and fixed command fields */
1656     bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
1657 
1658     /* fill in addresses and len */
1659     dmae.src_addr_lo = (src_addr >> 2); /* GRC addr has dword resolution */
1660     dmae.src_addr_hi = 0;
1661     dmae.dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_data));
1662     dmae.dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_data));
1663     dmae.len         = len32;
1664 
1665     /* issue the command and wait for completion */
1666     if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) {
1667         bxe_panic(sc, ("DMAE failed (%d)\n", rc));
1668     }
1669 }
1670 
1671 void
1672 bxe_write_dmae(struct bxe_softc *sc,
1673                bus_addr_t       dma_addr,
1674                uint32_t         dst_addr,
1675                uint32_t         len32)
1676 {
1677     struct dmae_cmd dmae;
1678     int rc;
1679 
1680     if (!sc->dmae_ready) {
1681         DBASSERT(sc, (len32 <= 4), ("DMAE not ready and length is %d", len32));
1682 
1683         if (CHIP_IS_E1(sc)) {
1684             ecore_init_ind_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32);
1685         } else {
1686             ecore_init_str_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32);
1687         }
1688 
1689         return;
1690     }
1691 
1692     /* set opcode and fixed command fields */
1693     bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
1694 
1695     /* fill in addresses and len */
1696     dmae.src_addr_lo = U64_LO(dma_addr);
1697     dmae.src_addr_hi = U64_HI(dma_addr);
1698     dmae.dst_addr_lo = (dst_addr >> 2); /* GRC addr has dword resolution */
1699     dmae.dst_addr_hi = 0;
1700     dmae.len         = len32;
1701 
1702     /* issue the command and wait for completion */
1703     if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) {
1704         bxe_panic(sc, ("DMAE failed (%d)\n", rc));
1705     }
1706 }
1707 
1708 void
1709 bxe_write_dmae_phys_len(struct bxe_softc *sc,
1710                         bus_addr_t       phys_addr,
1711                         uint32_t         addr,
1712                         uint32_t         len)
1713 {
1714     int dmae_wr_max = DMAE_LEN32_WR_MAX(sc);
1715     int offset = 0;
1716 
1717     while (len > dmae_wr_max) {
1718         bxe_write_dmae(sc,
1719                        (phys_addr + offset), /* src DMA address */
1720                        (addr + offset),      /* dst GRC address */
1721                        dmae_wr_max);
1722         offset += (dmae_wr_max * 4);
1723         len -= dmae_wr_max;
1724     }
1725 
1726     bxe_write_dmae(sc,
1727                    (phys_addr + offset), /* src DMA address */
1728                    (addr + offset),      /* dst GRC address */
1729                    len);
1730 }
1731 
1732 void
1733 bxe_set_ctx_validation(struct bxe_softc   *sc,
1734                        struct eth_context *cxt,
1735                        uint32_t           cid)
1736 {
1737     /* ustorm cxt validation */
1738     cxt->ustorm_ag_context.cdu_usage =
1739         CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid),
1740             CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
1741     /* xcontext validation */
1742     cxt->xstorm_ag_context.cdu_reserved =
1743         CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid),
1744             CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
1745 }
1746 
1747 static void
1748 bxe_storm_memset_hc_timeout(struct bxe_softc *sc,
1749                             uint8_t          port,
1750                             uint8_t          fw_sb_id,
1751                             uint8_t          sb_index,
1752                             uint8_t          ticks)
1753 {
1754     uint32_t addr =
1755         (BAR_CSTRORM_INTMEM +
1756          CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index));
1757 
1758     REG_WR8(sc, addr, ticks);
1759 
1760     BLOGD(sc, DBG_LOAD,
1761           "port %d fw_sb_id %d sb_index %d ticks %d\n",
1762           port, fw_sb_id, sb_index, ticks);
1763 }
1764 
1765 static void
1766 bxe_storm_memset_hc_disable(struct bxe_softc *sc,
1767                             uint8_t          port,
1768                             uint16_t         fw_sb_id,
1769                             uint8_t          sb_index,
1770                             uint8_t          disable)
1771 {
1772     uint32_t enable_flag =
1773         (disable) ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
1774     uint32_t addr =
1775         (BAR_CSTRORM_INTMEM +
1776          CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index));
1777     uint8_t flags;
1778 
1779     /* clear and set */
1780     flags = REG_RD8(sc, addr);
1781     flags &= ~HC_INDEX_DATA_HC_ENABLED;
1782     flags |= enable_flag;
1783     REG_WR8(sc, addr, flags);
1784 
1785     BLOGD(sc, DBG_LOAD,
1786           "port %d fw_sb_id %d sb_index %d disable %d\n",
1787           port, fw_sb_id, sb_index, disable);
1788 }
1789 
1790 void
1791 bxe_update_coalesce_sb_index(struct bxe_softc *sc,
1792                              uint8_t          fw_sb_id,
1793                              uint8_t          sb_index,
1794                              uint8_t          disable,
1795                              uint16_t         usec)
1796 {
1797     int port = SC_PORT(sc);
1798     uint8_t ticks = (usec / 4); /* XXX ??? */
1799 
1800     bxe_storm_memset_hc_timeout(sc, port, fw_sb_id, sb_index, ticks);
1801 
1802     disable = (disable) ? 1 : ((usec) ? 0 : 1);
1803     bxe_storm_memset_hc_disable(sc, port, fw_sb_id, sb_index, disable);
1804 }
1805 
1806 void
1807 elink_cb_udelay(struct bxe_softc *sc,
1808                 uint32_t         usecs)
1809 {
1810     DELAY(usecs);
1811 }
1812 
1813 uint32_t
1814 elink_cb_reg_read(struct bxe_softc *sc,
1815                   uint32_t         reg_addr)
1816 {
1817     return (REG_RD(sc, reg_addr));
1818 }
1819 
1820 void
1821 elink_cb_reg_write(struct bxe_softc *sc,
1822                    uint32_t         reg_addr,
1823                    uint32_t         val)
1824 {
1825     REG_WR(sc, reg_addr, val);
1826 }
1827 
1828 void
1829 elink_cb_reg_wb_write(struct bxe_softc *sc,
1830                       uint32_t         offset,
1831                       uint32_t         *wb_write,
1832                       uint16_t         len)
1833 {
1834     REG_WR_DMAE(sc, offset, wb_write, len);
1835 }
1836 
1837 void
1838 elink_cb_reg_wb_read(struct bxe_softc *sc,
1839                      uint32_t         offset,
1840                      uint32_t         *wb_write,
1841                      uint16_t         len)
1842 {
1843     REG_RD_DMAE(sc, offset, wb_write, len);
1844 }
1845 
1846 uint8_t
1847 elink_cb_path_id(struct bxe_softc *sc)
1848 {
1849     return (SC_PATH(sc));
1850 }
1851 
1852 void
1853 elink_cb_event_log(struct bxe_softc     *sc,
1854                    const elink_log_id_t elink_log_id,
1855                    ...)
1856 {
1857     /* XXX */
1858     BLOGI(sc, "ELINK EVENT LOG (%d)\n", elink_log_id);
1859 }
1860 
1861 static int
1862 bxe_set_spio(struct bxe_softc *sc,
1863              int              spio,
1864              uint32_t         mode)
1865 {
1866     uint32_t spio_reg;
1867 
1868     /* Only 2 SPIOs are configurable */
1869     if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
1870         BLOGE(sc, "Invalid SPIO 0x%x mode 0x%x\n", spio, mode);
1871         return (-1);
1872     }
1873 
1874     bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
1875 
1876     /* read SPIO and mask except the float bits */
1877     spio_reg = (REG_RD(sc, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
1878 
1879     switch (mode) {
1880     case MISC_SPIO_OUTPUT_LOW:
1881         BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output low\n", spio);
1882         /* clear FLOAT and set CLR */
1883         spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
1884         spio_reg |=  (spio << MISC_SPIO_CLR_POS);
1885         break;
1886 
1887     case MISC_SPIO_OUTPUT_HIGH:
1888         BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output high\n", spio);
1889         /* clear FLOAT and set SET */
1890         spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
1891         spio_reg |=  (spio << MISC_SPIO_SET_POS);
1892         break;
1893 
1894     case MISC_SPIO_INPUT_HI_Z:
1895         BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> input\n", spio);
1896         /* set FLOAT */
1897         spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
1898         break;
1899 
1900     default:
1901         break;
1902     }
1903 
1904     REG_WR(sc, MISC_REG_SPIO, spio_reg);
1905     bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
1906 
1907     return (0);
1908 }
1909 
1910 static int
1911 bxe_gpio_read(struct bxe_softc *sc,
1912               int              gpio_num,
1913               uint8_t          port)
1914 {
1915     /* The GPIO should be swapped if swap register is set and active */
1916     int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
1917                       REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
1918     int gpio_shift = (gpio_num +
1919                       (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
1920     uint32_t gpio_mask = (1 << gpio_shift);
1921     uint32_t gpio_reg;
1922 
1923     if (gpio_num > MISC_REGISTERS_GPIO_3) {
1924         BLOGE(sc, "Invalid GPIO %d port 0x%x gpio_port %d gpio_shift %d"
1925             " gpio_mask 0x%x\n", gpio_num, port, gpio_port, gpio_shift,
1926             gpio_mask);
1927         return (-1);
1928     }
1929 
1930     /* read GPIO value */
1931     gpio_reg = REG_RD(sc, MISC_REG_GPIO);
1932 
1933     /* get the requested pin value */
1934     return ((gpio_reg & gpio_mask) == gpio_mask) ? 1 : 0;
1935 }
1936 
1937 static int
1938 bxe_gpio_write(struct bxe_softc *sc,
1939                int              gpio_num,
1940                uint32_t         mode,
1941                uint8_t          port)
1942 {
1943     /* The GPIO should be swapped if swap register is set and active */
1944     int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
1945                       REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
1946     int gpio_shift = (gpio_num +
1947                       (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
1948     uint32_t gpio_mask = (1 << gpio_shift);
1949     uint32_t gpio_reg;
1950 
1951     if (gpio_num > MISC_REGISTERS_GPIO_3) {
1952         BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d"
1953             " gpio_shift %d gpio_mask 0x%x\n",
1954             gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask);
1955         return (-1);
1956     }
1957 
1958     bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
1959 
1960     /* read GPIO and mask except the float bits */
1961     gpio_reg = (REG_RD(sc, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1962 
1963     switch (mode) {
1964     case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1965         BLOGD(sc, DBG_PHY,
1966               "Set GPIO %d (shift %d) -> output low\n",
1967               gpio_num, gpio_shift);
1968         /* clear FLOAT and set CLR */
1969         gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1970         gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1971         break;
1972 
1973     case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1974         BLOGD(sc, DBG_PHY,
1975               "Set GPIO %d (shift %d) -> output high\n",
1976               gpio_num, gpio_shift);
1977         /* clear FLOAT and set SET */
1978         gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1979         gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1980         break;
1981 
1982     case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1983         BLOGD(sc, DBG_PHY,
1984               "Set GPIO %d (shift %d) -> input\n",
1985               gpio_num, gpio_shift);
1986         /* set FLOAT */
1987         gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1988         break;
1989 
1990     default:
1991         break;
1992     }
1993 
1994     REG_WR(sc, MISC_REG_GPIO, gpio_reg);
1995     bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
1996 
1997     return (0);
1998 }
1999 
2000 static int
2001 bxe_gpio_mult_write(struct bxe_softc *sc,
2002                     uint8_t          pins,
2003                     uint32_t         mode)
2004 {
2005     uint32_t gpio_reg;
2006 
2007     /* any port swapping should be handled by caller */
2008 
2009     bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2010 
2011     /* read GPIO and mask except the float bits */
2012     gpio_reg = REG_RD(sc, MISC_REG_GPIO);
2013     gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2014     gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
2015     gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
2016 
2017     switch (mode) {
2018     case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2019         BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output low\n", pins);
2020         /* set CLR */
2021         gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
2022         break;
2023 
2024     case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2025         BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output high\n", pins);
2026         /* set SET */
2027         gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
2028         break;
2029 
2030     case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2031         BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> input\n", pins);
2032         /* set FLOAT */
2033         gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2034         break;
2035 
2036     default:
2037         BLOGE(sc, "Invalid GPIO mode assignment pins 0x%x mode 0x%x"
2038             " gpio_reg 0x%x\n", pins, mode, gpio_reg);
2039         bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2040         return (-1);
2041     }
2042 
2043     REG_WR(sc, MISC_REG_GPIO, gpio_reg);
2044     bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2045 
2046     return (0);
2047 }
2048 
2049 static int
2050 bxe_gpio_int_write(struct bxe_softc *sc,
2051                    int              gpio_num,
2052                    uint32_t         mode,
2053                    uint8_t          port)
2054 {
2055     /* The GPIO should be swapped if swap register is set and active */
2056     int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
2057                       REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
2058     int gpio_shift = (gpio_num +
2059                       (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
2060     uint32_t gpio_mask = (1 << gpio_shift);
2061     uint32_t gpio_reg;
2062 
2063     if (gpio_num > MISC_REGISTERS_GPIO_3) {
2064         BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d"
2065             " gpio_shift %d gpio_mask 0x%x\n",
2066             gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask);
2067         return (-1);
2068     }
2069 
2070     bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2071 
2072     /* read GPIO int */
2073     gpio_reg = REG_RD(sc, MISC_REG_GPIO_INT);
2074 
2075     switch (mode) {
2076     case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2077         BLOGD(sc, DBG_PHY,
2078               "Clear GPIO INT %d (shift %d) -> output low\n",
2079               gpio_num, gpio_shift);
2080         /* clear SET and set CLR */
2081         gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2082         gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2083         break;
2084 
2085     case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2086         BLOGD(sc, DBG_PHY,
2087               "Set GPIO INT %d (shift %d) -> output high\n",
2088               gpio_num, gpio_shift);
2089         /* clear CLR and set SET */
2090         gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2091         gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2092         break;
2093 
2094     default:
2095         break;
2096     }
2097 
2098     REG_WR(sc, MISC_REG_GPIO_INT, gpio_reg);
2099     bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2100 
2101     return (0);
2102 }
2103 
2104 uint32_t
2105 elink_cb_gpio_read(struct bxe_softc *sc,
2106                    uint16_t         gpio_num,
2107                    uint8_t          port)
2108 {
2109     return (bxe_gpio_read(sc, gpio_num, port));
2110 }
2111 
2112 uint8_t
2113 elink_cb_gpio_write(struct bxe_softc *sc,
2114                     uint16_t         gpio_num,
2115                     uint8_t          mode, /* 0=low 1=high */
2116                     uint8_t          port)
2117 {
2118     return (bxe_gpio_write(sc, gpio_num, mode, port));
2119 }
2120 
2121 uint8_t
2122 elink_cb_gpio_mult_write(struct bxe_softc *sc,
2123                          uint8_t          pins,
2124                          uint8_t          mode) /* 0=low 1=high */
2125 {
2126     return (bxe_gpio_mult_write(sc, pins, mode));
2127 }
2128 
2129 uint8_t
2130 elink_cb_gpio_int_write(struct bxe_softc *sc,
2131                         uint16_t         gpio_num,
2132                         uint8_t          mode, /* 0=low 1=high */
2133                         uint8_t          port)
2134 {
2135     return (bxe_gpio_int_write(sc, gpio_num, mode, port));
2136 }
2137 
2138 void
2139 elink_cb_notify_link_changed(struct bxe_softc *sc)
2140 {
2141     REG_WR(sc, (MISC_REG_AEU_GENERAL_ATTN_12 +
2142                 (SC_FUNC(sc) * sizeof(uint32_t))), 1);
2143 }
2144 
2145 /* send the MCP a request, block until there is a reply */
2146 uint32_t
2147 elink_cb_fw_command(struct bxe_softc *sc,
2148                     uint32_t         command,
2149                     uint32_t         param)
2150 {
2151     int mb_idx = SC_FW_MB_IDX(sc);
2152     uint32_t seq;
2153     uint32_t rc = 0;
2154     uint32_t cnt = 1;
2155     uint8_t delay = CHIP_REV_IS_SLOW(sc) ? 100 : 10;
2156 
2157     BXE_FWMB_LOCK(sc);
2158 
2159     seq = ++sc->fw_seq;
2160     SHMEM_WR(sc, func_mb[mb_idx].drv_mb_param, param);
2161     SHMEM_WR(sc, func_mb[mb_idx].drv_mb_header, (command | seq));
2162 
2163     BLOGD(sc, DBG_PHY,
2164           "wrote command 0x%08x to FW MB param 0x%08x\n",
2165           (command | seq), param);
2166 
2167     /* Let the FW do it's magic. GIve it up to 5 seconds... */
2168     do {
2169         DELAY(delay * 1000);
2170         rc = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_header);
2171     } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2172 
2173     BLOGD(sc, DBG_PHY,
2174           "[after %d ms] read 0x%x seq 0x%x from FW MB\n",
2175           cnt*delay, rc, seq);
2176 
2177     /* is this a reply to our command? */
2178     if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
2179         rc &= FW_MSG_CODE_MASK;
2180     } else {
2181         /* Ruh-roh! */
2182         BLOGE(sc, "FW failed to respond!\n");
2183         // XXX bxe_fw_dump(sc);
2184         rc = 0;
2185     }
2186 
2187     BXE_FWMB_UNLOCK(sc);
2188     return (rc);
2189 }
2190 
2191 static uint32_t
2192 bxe_fw_command(struct bxe_softc *sc,
2193                uint32_t         command,
2194                uint32_t         param)
2195 {
2196     return (elink_cb_fw_command(sc, command, param));
2197 }
2198 
2199 static void
2200 __storm_memset_dma_mapping(struct bxe_softc *sc,
2201                            uint32_t         addr,
2202                            bus_addr_t       mapping)
2203 {
2204     REG_WR(sc, addr, U64_LO(mapping));
2205     REG_WR(sc, (addr + 4), U64_HI(mapping));
2206 }
2207 
2208 static void
2209 storm_memset_spq_addr(struct bxe_softc *sc,
2210                       bus_addr_t       mapping,
2211                       uint16_t         abs_fid)
2212 {
2213     uint32_t addr = (XSEM_REG_FAST_MEMORY +
2214                      XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid));
2215     __storm_memset_dma_mapping(sc, addr, mapping);
2216 }
2217 
2218 static void
2219 storm_memset_vf_to_pf(struct bxe_softc *sc,
2220                       uint16_t         abs_fid,
2221                       uint16_t         pf_id)
2222 {
2223     REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2224     REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2225     REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2226     REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2227 }
2228 
2229 static void
2230 storm_memset_func_en(struct bxe_softc *sc,
2231                      uint16_t         abs_fid,
2232                      uint8_t          enable)
2233 {
2234     REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2235     REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2236     REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2237     REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2238 }
2239 
2240 static void
2241 storm_memset_eq_data(struct bxe_softc       *sc,
2242                      struct event_ring_data *eq_data,
2243                      uint16_t               pfid)
2244 {
2245     uint32_t addr;
2246     size_t size;
2247 
2248     addr = (BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid));
2249     size = sizeof(struct event_ring_data);
2250     ecore_storm_memset_struct(sc, addr, size, (uint32_t *)eq_data);
2251 }
2252 
2253 static void
2254 storm_memset_eq_prod(struct bxe_softc *sc,
2255                      uint16_t         eq_prod,
2256                      uint16_t         pfid)
2257 {
2258     uint32_t addr = (BAR_CSTRORM_INTMEM +
2259                      CSTORM_EVENT_RING_PROD_OFFSET(pfid));
2260     REG_WR16(sc, addr, eq_prod);
2261 }
2262 
2263 /*
2264  * Post a slowpath command.
2265  *
2266  * A slowpath command is used to propagate a configuration change through
2267  * the controller in a controlled manner, allowing each STORM processor and
2268  * other H/W blocks to phase in the change.  The commands sent on the
2269  * slowpath are referred to as ramrods.  Depending on the ramrod used the
2270  * completion of the ramrod will occur in different ways.  Here's a
2271  * breakdown of ramrods and how they complete:
2272  *
2273  * RAMROD_CMD_ID_ETH_PORT_SETUP
2274  *   Used to setup the leading connection on a port.  Completes on the
2275  *   Receive Completion Queue (RCQ) of that port (typically fp[0]).
2276  *
2277  * RAMROD_CMD_ID_ETH_CLIENT_SETUP
2278  *   Used to setup an additional connection on a port.  Completes on the
2279  *   RCQ of the multi-queue/RSS connection being initialized.
2280  *
2281  * RAMROD_CMD_ID_ETH_STAT_QUERY
2282  *   Used to force the storm processors to update the statistics database
2283  *   in host memory.  This ramrod is send on the leading connection CID and
2284  *   completes as an index increment of the CSTORM on the default status
2285  *   block.
2286  *
2287  * RAMROD_CMD_ID_ETH_UPDATE
2288  *   Used to update the state of the leading connection, usually to udpate
2289  *   the RSS indirection table.  Completes on the RCQ of the leading
2290  *   connection. (Not currently used under FreeBSD until OS support becomes
2291  *   available.)
2292  *
2293  * RAMROD_CMD_ID_ETH_HALT
2294  *   Used when tearing down a connection prior to driver unload.  Completes
2295  *   on the RCQ of the multi-queue/RSS connection being torn down.  Don't
2296  *   use this on the leading connection.
2297  *
2298  * RAMROD_CMD_ID_ETH_SET_MAC
2299  *   Sets the Unicast/Broadcast/Multicast used by the port.  Completes on
2300  *   the RCQ of the leading connection.
2301  *
2302  * RAMROD_CMD_ID_ETH_CFC_DEL
2303  *   Used when tearing down a conneciton prior to driver unload.  Completes
2304  *   on the RCQ of the leading connection (since the current connection
2305  *   has been completely removed from controller memory).
2306  *
2307  * RAMROD_CMD_ID_ETH_PORT_DEL
2308  *   Used to tear down the leading connection prior to driver unload,
2309  *   typically fp[0].  Completes as an index increment of the CSTORM on the
2310  *   default status block.
2311  *
2312  * RAMROD_CMD_ID_ETH_FORWARD_SETUP
2313  *   Used for connection offload.  Completes on the RCQ of the multi-queue
2314  *   RSS connection that is being offloaded.  (Not currently used under
2315  *   FreeBSD.)
2316  *
2317  * There can only be one command pending per function.
2318  *
2319  * Returns:
2320  *   0 = Success, !0 = Failure.
2321  */
2322 
2323 /* must be called under the spq lock */
2324 static inline
2325 struct eth_spe *bxe_sp_get_next(struct bxe_softc *sc)
2326 {
2327     struct eth_spe *next_spe = sc->spq_prod_bd;
2328 
2329     if (sc->spq_prod_bd == sc->spq_last_bd) {
2330         /* wrap back to the first eth_spq */
2331         sc->spq_prod_bd = sc->spq;
2332         sc->spq_prod_idx = 0;
2333     } else {
2334         sc->spq_prod_bd++;
2335         sc->spq_prod_idx++;
2336     }
2337 
2338     return (next_spe);
2339 }
2340 
2341 /* must be called under the spq lock */
2342 static inline
2343 void bxe_sp_prod_update(struct bxe_softc *sc)
2344 {
2345     int func = SC_FUNC(sc);
2346 
2347     /*
2348      * Make sure that BD data is updated before writing the producer.
2349      * BD data is written to the memory, the producer is read from the
2350      * memory, thus we need a full memory barrier to ensure the ordering.
2351      */
2352     mb();
2353 
2354     REG_WR16(sc, (BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func)),
2355              sc->spq_prod_idx);
2356 
2357     bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
2358                       BUS_SPACE_BARRIER_WRITE);
2359 }
2360 
2361 /**
2362  * bxe_is_contextless_ramrod - check if the current command ends on EQ
2363  *
2364  * @cmd:      command to check
2365  * @cmd_type: command type
2366  */
2367 static inline
2368 int bxe_is_contextless_ramrod(int cmd,
2369                               int cmd_type)
2370 {
2371     if ((cmd_type == NONE_CONNECTION_TYPE) ||
2372         (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
2373         (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
2374         (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
2375         (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
2376         (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
2377         (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) {
2378         return (TRUE);
2379     } else {
2380         return (FALSE);
2381     }
2382 }
2383 
2384 /**
2385  * bxe_sp_post - place a single command on an SP ring
2386  *
2387  * @sc:         driver handle
2388  * @command:    command to place (e.g. SETUP, FILTER_RULES, etc.)
2389  * @cid:        SW CID the command is related to
2390  * @data_hi:    command private data address (high 32 bits)
2391  * @data_lo:    command private data address (low 32 bits)
2392  * @cmd_type:   command type (e.g. NONE, ETH)
2393  *
2394  * SP data is handled as if it's always an address pair, thus data fields are
2395  * not swapped to little endian in upper functions. Instead this function swaps
2396  * data as if it's two uint32 fields.
2397  */
2398 int
2399 bxe_sp_post(struct bxe_softc *sc,
2400             int              command,
2401             int              cid,
2402             uint32_t         data_hi,
2403             uint32_t         data_lo,
2404             int              cmd_type)
2405 {
2406     struct eth_spe *spe;
2407     uint16_t type;
2408     int common;
2409 
2410     common = bxe_is_contextless_ramrod(command, cmd_type);
2411 
2412     BXE_SP_LOCK(sc);
2413 
2414     if (common) {
2415         if (!atomic_load_acq_long(&sc->eq_spq_left)) {
2416             BLOGE(sc, "EQ ring is full!\n");
2417             BXE_SP_UNLOCK(sc);
2418             return (-1);
2419         }
2420     } else {
2421         if (!atomic_load_acq_long(&sc->cq_spq_left)) {
2422             BLOGE(sc, "SPQ ring is full!\n");
2423             BXE_SP_UNLOCK(sc);
2424             return (-1);
2425         }
2426     }
2427 
2428     spe = bxe_sp_get_next(sc);
2429 
2430     /* CID needs port number to be encoded int it */
2431     spe->hdr.conn_and_cmd_data =
2432         htole32((command << SPE_HDR_T_CMD_ID_SHIFT) | HW_CID(sc, cid));
2433 
2434     type = (cmd_type << SPE_HDR_T_CONN_TYPE_SHIFT) & SPE_HDR_T_CONN_TYPE;
2435 
2436     /* TBD: Check if it works for VFs */
2437     type |= ((SC_FUNC(sc) << SPE_HDR_T_FUNCTION_ID_SHIFT) &
2438              SPE_HDR_T_FUNCTION_ID);
2439 
2440     spe->hdr.type = htole16(type);
2441 
2442     spe->data.update_data_addr.hi = htole32(data_hi);
2443     spe->data.update_data_addr.lo = htole32(data_lo);
2444 
2445     /*
2446      * It's ok if the actual decrement is issued towards the memory
2447      * somewhere between the lock and unlock. Thus no more explict
2448      * memory barrier is needed.
2449      */
2450     if (common) {
2451         atomic_subtract_acq_long(&sc->eq_spq_left, 1);
2452     } else {
2453         atomic_subtract_acq_long(&sc->cq_spq_left, 1);
2454     }
2455 
2456     BLOGD(sc, DBG_SP, "SPQE -> %#jx\n", (uintmax_t)sc->spq_dma.paddr);
2457     BLOGD(sc, DBG_SP, "FUNC_RDATA -> %p / %#jx\n",
2458           BXE_SP(sc, func_rdata), (uintmax_t)BXE_SP_MAPPING(sc, func_rdata));
2459     BLOGD(sc, DBG_SP,
2460           "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%lx,%lx)\n",
2461           sc->spq_prod_idx,
2462           (uint32_t)U64_HI(sc->spq_dma.paddr),
2463           (uint32_t)(U64_LO(sc->spq_dma.paddr) + (uint8_t *)sc->spq_prod_bd - (uint8_t *)sc->spq),
2464           command,
2465           common,
2466           HW_CID(sc, cid),
2467           data_hi,
2468           data_lo,
2469           type,
2470           atomic_load_acq_long(&sc->cq_spq_left),
2471           atomic_load_acq_long(&sc->eq_spq_left));
2472 
2473     bxe_sp_prod_update(sc);
2474 
2475     BXE_SP_UNLOCK(sc);
2476     return (0);
2477 }
2478 
2479 /**
2480  * bxe_debug_print_ind_table - prints the indirection table configuration.
2481  *
2482  * @sc: driver hanlde
2483  * @p:  pointer to rss configuration
2484  */
2485 
2486 /*
2487  * FreeBSD Device probe function.
2488  *
2489  * Compares the device found to the driver's list of supported devices and
2490  * reports back to the bsd loader whether this is the right driver for the device.
2491  * This is the driver entry function called from the "kldload" command.
2492  *
2493  * Returns:
2494  *   BUS_PROBE_DEFAULT on success, positive value on failure.
2495  */
2496 static int
2497 bxe_probe(device_t dev)
2498 {
2499     struct bxe_device_type *t;
2500     char *descbuf;
2501     uint16_t did, sdid, svid, vid;
2502 
2503     /* Find our device structure */
2504     t = bxe_devs;
2505 
2506     /* Get the data for the device to be probed. */
2507     vid  = pci_get_vendor(dev);
2508     did  = pci_get_device(dev);
2509     svid = pci_get_subvendor(dev);
2510     sdid = pci_get_subdevice(dev);
2511 
2512     /* Look through the list of known devices for a match. */
2513     while (t->bxe_name != NULL) {
2514         if ((vid == t->bxe_vid) && (did == t->bxe_did) &&
2515             ((svid == t->bxe_svid) || (t->bxe_svid == PCI_ANY_ID)) &&
2516             ((sdid == t->bxe_sdid) || (t->bxe_sdid == PCI_ANY_ID))) {
2517             descbuf = malloc(BXE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
2518             if (descbuf == NULL)
2519                 return (ENOMEM);
2520 
2521             /* Print out the device identity. */
2522             snprintf(descbuf, BXE_DEVDESC_MAX,
2523                      "%s (%c%d) BXE v:%s", t->bxe_name,
2524                      (((pci_read_config(dev, PCIR_REVID, 4) &
2525                         0xf0) >> 4) + 'A'),
2526                      (pci_read_config(dev, PCIR_REVID, 4) & 0xf),
2527                      BXE_DRIVER_VERSION);
2528 
2529             device_set_desc_copy(dev, descbuf);
2530             free(descbuf, M_TEMP);
2531             return (BUS_PROBE_DEFAULT);
2532         }
2533         t++;
2534     }
2535 
2536     return (ENXIO);
2537 }
2538 
2539 static void
2540 bxe_init_mutexes(struct bxe_softc *sc)
2541 {
2542 #ifdef BXE_CORE_LOCK_SX
2543     snprintf(sc->core_sx_name, sizeof(sc->core_sx_name),
2544              "bxe%d_core_lock", sc->unit);
2545     sx_init(&sc->core_sx, sc->core_sx_name);
2546 #else
2547     snprintf(sc->core_mtx_name, sizeof(sc->core_mtx_name),
2548              "bxe%d_core_lock", sc->unit);
2549     mtx_init(&sc->core_mtx, sc->core_mtx_name, NULL, MTX_DEF);
2550 #endif
2551 
2552     snprintf(sc->sp_mtx_name, sizeof(sc->sp_mtx_name),
2553              "bxe%d_sp_lock", sc->unit);
2554     mtx_init(&sc->sp_mtx, sc->sp_mtx_name, NULL, MTX_DEF);
2555 
2556     snprintf(sc->dmae_mtx_name, sizeof(sc->dmae_mtx_name),
2557              "bxe%d_dmae_lock", sc->unit);
2558     mtx_init(&sc->dmae_mtx, sc->dmae_mtx_name, NULL, MTX_DEF);
2559 
2560     snprintf(sc->port.phy_mtx_name, sizeof(sc->port.phy_mtx_name),
2561              "bxe%d_phy_lock", sc->unit);
2562     mtx_init(&sc->port.phy_mtx, sc->port.phy_mtx_name, NULL, MTX_DEF);
2563 
2564     snprintf(sc->fwmb_mtx_name, sizeof(sc->fwmb_mtx_name),
2565              "bxe%d_fwmb_lock", sc->unit);
2566     mtx_init(&sc->fwmb_mtx, sc->fwmb_mtx_name, NULL, MTX_DEF);
2567 
2568     snprintf(sc->print_mtx_name, sizeof(sc->print_mtx_name),
2569              "bxe%d_print_lock", sc->unit);
2570     mtx_init(&(sc->print_mtx), sc->print_mtx_name, NULL, MTX_DEF);
2571 
2572     snprintf(sc->stats_mtx_name, sizeof(sc->stats_mtx_name),
2573              "bxe%d_stats_lock", sc->unit);
2574     mtx_init(&(sc->stats_mtx), sc->stats_mtx_name, NULL, MTX_DEF);
2575 
2576     snprintf(sc->mcast_mtx_name, sizeof(sc->mcast_mtx_name),
2577              "bxe%d_mcast_lock", sc->unit);
2578     mtx_init(&(sc->mcast_mtx), sc->mcast_mtx_name, NULL, MTX_DEF);
2579 }
2580 
2581 static void
2582 bxe_release_mutexes(struct bxe_softc *sc)
2583 {
2584 #ifdef BXE_CORE_LOCK_SX
2585     sx_destroy(&sc->core_sx);
2586 #else
2587     if (mtx_initialized(&sc->core_mtx)) {
2588         mtx_destroy(&sc->core_mtx);
2589     }
2590 #endif
2591 
2592     if (mtx_initialized(&sc->sp_mtx)) {
2593         mtx_destroy(&sc->sp_mtx);
2594     }
2595 
2596     if (mtx_initialized(&sc->dmae_mtx)) {
2597         mtx_destroy(&sc->dmae_mtx);
2598     }
2599 
2600     if (mtx_initialized(&sc->port.phy_mtx)) {
2601         mtx_destroy(&sc->port.phy_mtx);
2602     }
2603 
2604     if (mtx_initialized(&sc->fwmb_mtx)) {
2605         mtx_destroy(&sc->fwmb_mtx);
2606     }
2607 
2608     if (mtx_initialized(&sc->print_mtx)) {
2609         mtx_destroy(&sc->print_mtx);
2610     }
2611 
2612     if (mtx_initialized(&sc->stats_mtx)) {
2613         mtx_destroy(&sc->stats_mtx);
2614     }
2615 
2616     if (mtx_initialized(&sc->mcast_mtx)) {
2617         mtx_destroy(&sc->mcast_mtx);
2618     }
2619 }
2620 
2621 static void
2622 bxe_tx_disable(struct bxe_softc* sc)
2623 {
2624     if_t ifp = sc->ifp;
2625 
2626     /* tell the stack the driver is stopped and TX queue is full */
2627     if (ifp !=  NULL) {
2628         if_setdrvflags(ifp, 0);
2629     }
2630 }
2631 
2632 static void
2633 bxe_drv_pulse(struct bxe_softc *sc)
2634 {
2635     SHMEM_WR(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb,
2636              sc->fw_drv_pulse_wr_seq);
2637 }
2638 
2639 static inline uint16_t
2640 bxe_tx_avail(struct bxe_softc *sc,
2641              struct bxe_fastpath *fp)
2642 {
2643     int16_t  used;
2644     uint16_t prod;
2645     uint16_t cons;
2646 
2647     prod = fp->tx_bd_prod;
2648     cons = fp->tx_bd_cons;
2649 
2650     used = SUB_S16(prod, cons);
2651 
2652     return (int16_t)(sc->tx_ring_size) - used;
2653 }
2654 
2655 static inline int
2656 bxe_tx_queue_has_work(struct bxe_fastpath *fp)
2657 {
2658     uint16_t hw_cons;
2659 
2660     mb(); /* status block fields can change */
2661     hw_cons = le16toh(*fp->tx_cons_sb);
2662     return (hw_cons != fp->tx_pkt_cons);
2663 }
2664 
2665 static inline uint8_t
2666 bxe_has_tx_work(struct bxe_fastpath *fp)
2667 {
2668     /* expand this for multi-cos if ever supported */
2669     return (bxe_tx_queue_has_work(fp)) ? TRUE : FALSE;
2670 }
2671 
2672 static inline int
2673 bxe_has_rx_work(struct bxe_fastpath *fp)
2674 {
2675     uint16_t rx_cq_cons_sb;
2676 
2677     mb(); /* status block fields can change */
2678     rx_cq_cons_sb = le16toh(*fp->rx_cq_cons_sb);
2679     if ((rx_cq_cons_sb & RCQ_MAX) == RCQ_MAX)
2680         rx_cq_cons_sb++;
2681     return (fp->rx_cq_cons != rx_cq_cons_sb);
2682 }
2683 
2684 static void
2685 bxe_sp_event(struct bxe_softc    *sc,
2686              struct bxe_fastpath *fp,
2687              union eth_rx_cqe    *rr_cqe)
2688 {
2689     int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
2690     int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
2691     enum ecore_queue_cmd drv_cmd = ECORE_Q_CMD_MAX;
2692     struct ecore_queue_sp_obj *q_obj = &BXE_SP_OBJ(sc, fp).q_obj;
2693 
2694     BLOGD(sc, DBG_SP, "fp=%d cid=%d got ramrod #%d state is %x type is %d\n",
2695           fp->index, cid, command, sc->state, rr_cqe->ramrod_cqe.ramrod_type);
2696 
2697     switch (command) {
2698     case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
2699         BLOGD(sc, DBG_SP, "got UPDATE ramrod. CID %d\n", cid);
2700         drv_cmd = ECORE_Q_CMD_UPDATE;
2701         break;
2702 
2703     case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
2704         BLOGD(sc, DBG_SP, "got MULTI[%d] setup ramrod\n", cid);
2705         drv_cmd = ECORE_Q_CMD_SETUP;
2706         break;
2707 
2708     case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
2709         BLOGD(sc, DBG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
2710         drv_cmd = ECORE_Q_CMD_SETUP_TX_ONLY;
2711         break;
2712 
2713     case (RAMROD_CMD_ID_ETH_HALT):
2714         BLOGD(sc, DBG_SP, "got MULTI[%d] halt ramrod\n", cid);
2715         drv_cmd = ECORE_Q_CMD_HALT;
2716         break;
2717 
2718     case (RAMROD_CMD_ID_ETH_TERMINATE):
2719         BLOGD(sc, DBG_SP, "got MULTI[%d] teminate ramrod\n", cid);
2720         drv_cmd = ECORE_Q_CMD_TERMINATE;
2721         break;
2722 
2723     case (RAMROD_CMD_ID_ETH_EMPTY):
2724         BLOGD(sc, DBG_SP, "got MULTI[%d] empty ramrod\n", cid);
2725         drv_cmd = ECORE_Q_CMD_EMPTY;
2726         break;
2727 
2728     default:
2729         BLOGD(sc, DBG_SP, "ERROR: unexpected MC reply (%d) on fp[%d]\n",
2730               command, fp->index);
2731         return;
2732     }
2733 
2734     if ((drv_cmd != ECORE_Q_CMD_MAX) &&
2735         q_obj->complete_cmd(sc, q_obj, drv_cmd)) {
2736         /*
2737          * q_obj->complete_cmd() failure means that this was
2738          * an unexpected completion.
2739          *
2740          * In this case we don't want to increase the sc->spq_left
2741          * because apparently we haven't sent this command the first
2742          * place.
2743          */
2744         // bxe_panic(sc, ("Unexpected SP completion\n"));
2745         return;
2746     }
2747 
2748     atomic_add_acq_long(&sc->cq_spq_left, 1);
2749 
2750     BLOGD(sc, DBG_SP, "sc->cq_spq_left 0x%lx\n",
2751           atomic_load_acq_long(&sc->cq_spq_left));
2752 }
2753 
2754 /*
2755  * The current mbuf is part of an aggregation. Move the mbuf into the TPA
2756  * aggregation queue, put an empty mbuf back onto the receive chain, and mark
2757  * the current aggregation queue as in-progress.
2758  */
2759 static void
2760 bxe_tpa_start(struct bxe_softc            *sc,
2761               struct bxe_fastpath         *fp,
2762               uint16_t                    queue,
2763               uint16_t                    cons,
2764               uint16_t                    prod,
2765               struct eth_fast_path_rx_cqe *cqe)
2766 {
2767     struct bxe_sw_rx_bd tmp_bd;
2768     struct bxe_sw_rx_bd *rx_buf;
2769     struct eth_rx_bd *rx_bd;
2770     int max_agg_queues __diagused;
2771     struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue];
2772     uint16_t index;
2773 
2774     BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA START "
2775                        "cons=%d prod=%d\n",
2776           fp->index, queue, cons, prod);
2777 
2778     max_agg_queues = MAX_AGG_QS(sc);
2779 
2780     KASSERT((queue < max_agg_queues),
2781             ("fp[%02d] invalid aggr queue (%d >= %d)!",
2782              fp->index, queue, max_agg_queues));
2783 
2784     KASSERT((tpa_info->state == BXE_TPA_STATE_STOP),
2785             ("fp[%02d].tpa[%02d] starting aggr on queue not stopped!",
2786              fp->index, queue));
2787 
2788     /* copy the existing mbuf and mapping from the TPA pool */
2789     tmp_bd = tpa_info->bd;
2790 
2791     if (tmp_bd.m == NULL) {
2792         uint32_t *tmp;
2793 
2794         tmp = (uint32_t *)cqe;
2795 
2796         BLOGE(sc, "fp[%02d].tpa[%02d] cons[%d] prod[%d]mbuf not allocated!\n",
2797               fp->index, queue, cons, prod);
2798         BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n",
2799             *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7));
2800 
2801         /* XXX Error handling? */
2802         return;
2803     }
2804 
2805     /* change the TPA queue to the start state */
2806     tpa_info->state            = BXE_TPA_STATE_START;
2807     tpa_info->placement_offset = cqe->placement_offset;
2808     tpa_info->parsing_flags    = le16toh(cqe->pars_flags.flags);
2809     tpa_info->vlan_tag         = le16toh(cqe->vlan_tag);
2810     tpa_info->len_on_bd        = le16toh(cqe->len_on_bd);
2811 
2812     fp->rx_tpa_queue_used |= (1 << queue);
2813 
2814     /*
2815      * If all the buffer descriptors are filled with mbufs then fill in
2816      * the current consumer index with a new BD. Else if a maximum Rx
2817      * buffer limit is imposed then fill in the next producer index.
2818      */
2819     index = (sc->max_rx_bufs != RX_BD_USABLE) ?
2820                 prod : cons;
2821 
2822     /* move the received mbuf and mapping to TPA pool */
2823     tpa_info->bd = fp->rx_mbuf_chain[cons];
2824 
2825     /* release any existing RX BD mbuf mappings */
2826     if (cons != index) {
2827         rx_buf = &fp->rx_mbuf_chain[cons];
2828 
2829         if (rx_buf->m_map != NULL) {
2830             bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
2831                             BUS_DMASYNC_POSTREAD);
2832             bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
2833         }
2834 
2835         /*
2836          * We get here when the maximum number of rx buffers is less than
2837          * RX_BD_USABLE. The mbuf is already saved above so it's OK to NULL
2838          * it out here without concern of a memory leak.
2839          */
2840         fp->rx_mbuf_chain[cons].m = NULL;
2841     }
2842 
2843     /* update the Rx SW BD with the mbuf info from the TPA pool */
2844     fp->rx_mbuf_chain[index] = tmp_bd;
2845 
2846     /* update the Rx BD with the empty mbuf phys address from the TPA pool */
2847     rx_bd = &fp->rx_chain[index];
2848     rx_bd->addr_hi = htole32(U64_HI(tpa_info->seg.ds_addr));
2849     rx_bd->addr_lo = htole32(U64_LO(tpa_info->seg.ds_addr));
2850 }
2851 
2852 /*
2853  * When a TPA aggregation is completed, loop through the individual mbufs
2854  * of the aggregation, combining them into a single mbuf which will be sent
2855  * up the stack. Refill all freed SGEs with mbufs as we go along.
2856  */
2857 static int
2858 bxe_fill_frag_mbuf(struct bxe_softc          *sc,
2859                    struct bxe_fastpath       *fp,
2860                    struct bxe_sw_tpa_info    *tpa_info,
2861                    uint16_t                  queue,
2862                    uint16_t                  pages,
2863                    struct mbuf               *m,
2864 			       struct eth_end_agg_rx_cqe *cqe,
2865                    uint16_t                  cqe_idx)
2866 {
2867     struct mbuf *m_frag;
2868     uint32_t frag_len, frag_size, i;
2869     uint16_t sge_idx;
2870     int rc = 0;
2871     int j;
2872 
2873     frag_size = le16toh(cqe->pkt_len) - tpa_info->len_on_bd;
2874 
2875     BLOGD(sc, DBG_LRO,
2876           "fp[%02d].tpa[%02d] TPA fill len_on_bd=%d frag_size=%d pages=%d\n",
2877           fp->index, queue, tpa_info->len_on_bd, frag_size, pages);
2878 
2879     /* make sure the aggregated frame is not too big to handle */
2880     if (pages > 8 * PAGES_PER_SGE) {
2881 
2882         uint32_t *tmp = (uint32_t *)cqe;
2883 
2884         BLOGE(sc, "fp[%02d].sge[0x%04x] has too many pages (%d)! "
2885                   "pkt_len=%d len_on_bd=%d frag_size=%d\n",
2886               fp->index, cqe_idx, pages, le16toh(cqe->pkt_len),
2887               tpa_info->len_on_bd, frag_size);
2888 
2889         BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n",
2890             *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7));
2891 
2892         bxe_panic(sc, ("sge page count error\n"));
2893         return (EINVAL);
2894     }
2895 
2896     /*
2897      * Scan through the scatter gather list pulling individual mbufs into a
2898      * single mbuf for the host stack.
2899      */
2900     for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
2901         sge_idx = RX_SGE(le16toh(cqe->sgl_or_raw_data.sgl[j]));
2902 
2903         /*
2904          * Firmware gives the indices of the SGE as if the ring is an array
2905          * (meaning that the "next" element will consume 2 indices).
2906          */
2907         frag_len = min(frag_size, (uint32_t)(SGE_PAGES));
2908 
2909         BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA fill i=%d j=%d "
2910                            "sge_idx=%d frag_size=%d frag_len=%d\n",
2911               fp->index, queue, i, j, sge_idx, frag_size, frag_len);
2912 
2913         m_frag = fp->rx_sge_mbuf_chain[sge_idx].m;
2914 
2915         /* allocate a new mbuf for the SGE */
2916         rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx);
2917         if (rc) {
2918             /* Leave all remaining SGEs in the ring! */
2919             return (rc);
2920         }
2921 
2922         /* update the fragment length */
2923         m_frag->m_len = frag_len;
2924 
2925         /* concatenate the fragment to the head mbuf */
2926         m_cat(m, m_frag);
2927         fp->eth_q_stats.mbuf_alloc_sge--;
2928 
2929         /* update the TPA mbuf size and remaining fragment size */
2930         m->m_pkthdr.len += frag_len;
2931         frag_size -= frag_len;
2932     }
2933 
2934     BLOGD(sc, DBG_LRO,
2935           "fp[%02d].tpa[%02d] TPA fill done frag_size=%d\n",
2936           fp->index, queue, frag_size);
2937 
2938     return (rc);
2939 }
2940 
2941 static inline void
2942 bxe_clear_sge_mask_next_elems(struct bxe_fastpath *fp)
2943 {
2944     int i, j;
2945 
2946     for (i = 1; i <= RX_SGE_NUM_PAGES; i++) {
2947         int idx = RX_SGE_TOTAL_PER_PAGE * i - 1;
2948 
2949         for (j = 0; j < 2; j++) {
2950             BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx);
2951             idx--;
2952         }
2953     }
2954 }
2955 
2956 static inline void
2957 bxe_init_sge_ring_bit_mask(struct bxe_fastpath *fp)
2958 {
2959     /* set the mask to all 1's, it's faster to compare to 0 than to 0xf's */
2960     memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask));
2961 
2962     /*
2963      * Clear the two last indices in the page to 1. These are the indices that
2964      * correspond to the "next" element, hence will never be indicated and
2965      * should be removed from the calculations.
2966      */
2967     bxe_clear_sge_mask_next_elems(fp);
2968 }
2969 
2970 static inline void
2971 bxe_update_last_max_sge(struct bxe_fastpath *fp,
2972                         uint16_t            idx)
2973 {
2974     uint16_t last_max = fp->last_max_sge;
2975 
2976     if (SUB_S16(idx, last_max) > 0) {
2977         fp->last_max_sge = idx;
2978     }
2979 }
2980 
2981 static inline void
2982 bxe_update_sge_prod(struct bxe_softc          *sc,
2983                     struct bxe_fastpath       *fp,
2984                     uint16_t                  sge_len,
2985                     union eth_sgl_or_raw_data *cqe)
2986 {
2987     uint16_t last_max, last_elem, first_elem;
2988     uint16_t delta = 0;
2989     uint16_t i;
2990 
2991     if (!sge_len) {
2992         return;
2993     }
2994 
2995     /* first mark all used pages */
2996     for (i = 0; i < sge_len; i++) {
2997         BIT_VEC64_CLEAR_BIT(fp->sge_mask,
2998                             RX_SGE(le16toh(cqe->sgl[i])));
2999     }
3000 
3001     BLOGD(sc, DBG_LRO,
3002           "fp[%02d] fp_cqe->sgl[%d] = %d\n",
3003           fp->index, sge_len - 1,
3004           le16toh(cqe->sgl[sge_len - 1]));
3005 
3006     /* assume that the last SGE index is the biggest */
3007     bxe_update_last_max_sge(fp,
3008                             le16toh(cqe->sgl[sge_len - 1]));
3009 
3010     last_max = RX_SGE(fp->last_max_sge);
3011     last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
3012     first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
3013 
3014     /* if ring is not full */
3015     if (last_elem + 1 != first_elem) {
3016         last_elem++;
3017     }
3018 
3019     /* now update the prod */
3020     for (i = first_elem; i != last_elem; i = RX_SGE_NEXT_MASK_ELEM(i)) {
3021         if (__predict_true(fp->sge_mask[i])) {
3022             break;
3023         }
3024 
3025         fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
3026         delta += BIT_VEC64_ELEM_SZ;
3027     }
3028 
3029     if (delta > 0) {
3030         fp->rx_sge_prod += delta;
3031         /* clear page-end entries */
3032         bxe_clear_sge_mask_next_elems(fp);
3033     }
3034 
3035     BLOGD(sc, DBG_LRO,
3036           "fp[%02d] fp->last_max_sge=%d fp->rx_sge_prod=%d\n",
3037           fp->index, fp->last_max_sge, fp->rx_sge_prod);
3038 }
3039 
3040 /*
3041  * The aggregation on the current TPA queue has completed. Pull the individual
3042  * mbuf fragments together into a single mbuf, perform all necessary checksum
3043  * calculations, and send the resuting mbuf to the stack.
3044  */
3045 static void
3046 bxe_tpa_stop(struct bxe_softc          *sc,
3047              struct bxe_fastpath       *fp,
3048              struct bxe_sw_tpa_info    *tpa_info,
3049              uint16_t                  queue,
3050              uint16_t                  pages,
3051 			 struct eth_end_agg_rx_cqe *cqe,
3052              uint16_t                  cqe_idx)
3053 {
3054     if_t ifp = sc->ifp;
3055     struct mbuf *m;
3056     int rc = 0;
3057 
3058     BLOGD(sc, DBG_LRO,
3059           "fp[%02d].tpa[%02d] pad=%d pkt_len=%d pages=%d vlan=%d\n",
3060           fp->index, queue, tpa_info->placement_offset,
3061           le16toh(cqe->pkt_len), pages, tpa_info->vlan_tag);
3062 
3063     m = tpa_info->bd.m;
3064 
3065     /* allocate a replacement before modifying existing mbuf */
3066     rc = bxe_alloc_rx_tpa_mbuf(fp, queue);
3067     if (rc) {
3068         /* drop the frame and log an error */
3069         fp->eth_q_stats.rx_soft_errors++;
3070         goto bxe_tpa_stop_exit;
3071     }
3072 
3073     /* we have a replacement, fixup the current mbuf */
3074     m_adj(m, tpa_info->placement_offset);
3075     m->m_pkthdr.len = m->m_len = tpa_info->len_on_bd;
3076 
3077     /* mark the checksums valid (taken care of by the firmware) */
3078     fp->eth_q_stats.rx_ofld_frames_csum_ip++;
3079     fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++;
3080     m->m_pkthdr.csum_data = 0xffff;
3081     m->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED |
3082                                CSUM_IP_VALID   |
3083                                CSUM_DATA_VALID |
3084                                CSUM_PSEUDO_HDR);
3085 
3086     /* aggregate all of the SGEs into a single mbuf */
3087     rc = bxe_fill_frag_mbuf(sc, fp, tpa_info, queue, pages, m, cqe, cqe_idx);
3088     if (rc) {
3089         /* drop the packet and log an error */
3090         fp->eth_q_stats.rx_soft_errors++;
3091         m_freem(m);
3092     } else {
3093         if (tpa_info->parsing_flags & PARSING_FLAGS_INNER_VLAN_EXIST) {
3094             m->m_pkthdr.ether_vtag = tpa_info->vlan_tag;
3095             m->m_flags |= M_VLANTAG;
3096         }
3097 
3098         /* assign packet to this interface interface */
3099         if_setrcvif(m, ifp);
3100 
3101         /* specify what RSS queue was used for this flow */
3102         m->m_pkthdr.flowid = fp->index;
3103         BXE_SET_FLOWID(m);
3104 
3105         if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
3106         fp->eth_q_stats.rx_tpa_pkts++;
3107 
3108         /* pass the frame to the stack */
3109         if_input(ifp, m);
3110     }
3111 
3112     /* we passed an mbuf up the stack or dropped the frame */
3113     fp->eth_q_stats.mbuf_alloc_tpa--;
3114 
3115 bxe_tpa_stop_exit:
3116 
3117     fp->rx_tpa_info[queue].state = BXE_TPA_STATE_STOP;
3118     fp->rx_tpa_queue_used &= ~(1 << queue);
3119 }
3120 
3121 static uint8_t
3122 bxe_service_rxsgl(
3123                  struct bxe_fastpath *fp,
3124                  uint16_t len,
3125                  uint16_t lenonbd,
3126                  struct mbuf *m,
3127                  struct eth_fast_path_rx_cqe *cqe_fp)
3128 {
3129     struct mbuf *m_frag;
3130     uint16_t frags, frag_len;
3131     uint16_t sge_idx = 0;
3132     uint16_t j;
3133     uint8_t i, rc = 0;
3134     uint32_t frag_size;
3135 
3136     /* adjust the mbuf */
3137     m->m_len = lenonbd;
3138 
3139     frag_size =  len - lenonbd;
3140     frags = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
3141 
3142     for (i = 0, j = 0; i < frags; i += PAGES_PER_SGE, j++) {
3143         sge_idx = RX_SGE(le16toh(cqe_fp->sgl_or_raw_data.sgl[j]));
3144 
3145         m_frag = fp->rx_sge_mbuf_chain[sge_idx].m;
3146         frag_len = min(frag_size, (uint32_t)(SGE_PAGE_SIZE));
3147         m_frag->m_len = frag_len;
3148 
3149        /* allocate a new mbuf for the SGE */
3150         rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx);
3151         if (rc) {
3152             /* Leave all remaining SGEs in the ring! */
3153             return (rc);
3154         }
3155         fp->eth_q_stats.mbuf_alloc_sge--;
3156 
3157         /* concatenate the fragment to the head mbuf */
3158         m_cat(m, m_frag);
3159 
3160         frag_size -= frag_len;
3161     }
3162 
3163     bxe_update_sge_prod(fp->sc, fp, frags, &cqe_fp->sgl_or_raw_data);
3164 
3165     return rc;
3166 }
3167 
3168 static uint8_t
3169 bxe_rxeof(struct bxe_softc    *sc,
3170           struct bxe_fastpath *fp)
3171 {
3172     if_t ifp = sc->ifp;
3173     uint16_t bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
3174     uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod;
3175     int rx_pkts = 0;
3176     int rc = 0;
3177 
3178     BXE_FP_RX_LOCK(fp);
3179 
3180     /* CQ "next element" is of the size of the regular element */
3181     hw_cq_cons = le16toh(*fp->rx_cq_cons_sb);
3182     if ((hw_cq_cons & RCQ_USABLE_PER_PAGE) == RCQ_USABLE_PER_PAGE) {
3183         hw_cq_cons++;
3184     }
3185 
3186     bd_cons = fp->rx_bd_cons;
3187     bd_prod = fp->rx_bd_prod;
3188     bd_prod_fw = bd_prod;
3189     sw_cq_cons = fp->rx_cq_cons;
3190     sw_cq_prod = fp->rx_cq_prod;
3191 
3192     /*
3193      * Memory barrier necessary as speculative reads of the rx
3194      * buffer can be ahead of the index in the status block
3195      */
3196     rmb();
3197 
3198     BLOGD(sc, DBG_RX,
3199           "fp[%02d] Rx START hw_cq_cons=%u sw_cq_cons=%u\n",
3200           fp->index, hw_cq_cons, sw_cq_cons);
3201 
3202     while (sw_cq_cons != hw_cq_cons) {
3203         struct bxe_sw_rx_bd *rx_buf = NULL;
3204         union eth_rx_cqe *cqe;
3205         struct eth_fast_path_rx_cqe *cqe_fp;
3206         uint8_t cqe_fp_flags;
3207         enum eth_rx_cqe_type cqe_fp_type;
3208         uint16_t len, lenonbd,  pad;
3209         struct mbuf *m = NULL;
3210 
3211         comp_ring_cons = RCQ(sw_cq_cons);
3212         bd_prod = RX_BD(bd_prod);
3213         bd_cons = RX_BD(bd_cons);
3214 
3215         cqe          = &fp->rcq_chain[comp_ring_cons];
3216         cqe_fp       = &cqe->fast_path_cqe;
3217         cqe_fp_flags = cqe_fp->type_error_flags;
3218         cqe_fp_type  = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
3219 
3220         BLOGD(sc, DBG_RX,
3221               "fp[%02d] Rx hw_cq_cons=%d hw_sw_cons=%d "
3222               "BD prod=%d cons=%d CQE type=0x%x err=0x%x "
3223               "status=0x%x rss_hash=0x%x vlan=0x%x len=%u lenonbd=%u\n",
3224               fp->index,
3225               hw_cq_cons,
3226               sw_cq_cons,
3227               bd_prod,
3228               bd_cons,
3229               CQE_TYPE(cqe_fp_flags),
3230               cqe_fp_flags,
3231               cqe_fp->status_flags,
3232               le32toh(cqe_fp->rss_hash_result),
3233               le16toh(cqe_fp->vlan_tag),
3234               le16toh(cqe_fp->pkt_len_or_gro_seg_len),
3235               le16toh(cqe_fp->len_on_bd));
3236 
3237         /* is this a slowpath msg? */
3238         if (__predict_false(CQE_TYPE_SLOW(cqe_fp_type))) {
3239             bxe_sp_event(sc, fp, cqe);
3240             goto next_cqe;
3241         }
3242 
3243         rx_buf = &fp->rx_mbuf_chain[bd_cons];
3244 
3245         if (!CQE_TYPE_FAST(cqe_fp_type)) {
3246             struct bxe_sw_tpa_info *tpa_info;
3247             uint16_t frag_size, pages;
3248             uint8_t queue;
3249 
3250             if (CQE_TYPE_START(cqe_fp_type)) {
3251                 bxe_tpa_start(sc, fp, cqe_fp->queue_index,
3252                               bd_cons, bd_prod, cqe_fp);
3253                 m = NULL; /* packet not ready yet */
3254                 goto next_rx;
3255             }
3256 
3257             KASSERT(CQE_TYPE_STOP(cqe_fp_type),
3258                     ("CQE type is not STOP! (0x%x)\n", cqe_fp_type));
3259 
3260             queue = cqe->end_agg_cqe.queue_index;
3261             tpa_info = &fp->rx_tpa_info[queue];
3262 
3263             BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA STOP\n",
3264                   fp->index, queue);
3265 
3266             frag_size = (le16toh(cqe->end_agg_cqe.pkt_len) -
3267                          tpa_info->len_on_bd);
3268             pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
3269 
3270             bxe_tpa_stop(sc, fp, tpa_info, queue, pages,
3271                          &cqe->end_agg_cqe, comp_ring_cons);
3272 
3273             bxe_update_sge_prod(sc, fp, pages, &cqe->end_agg_cqe.sgl_or_raw_data);
3274 
3275             goto next_cqe;
3276         }
3277 
3278         /* non TPA */
3279 
3280         /* is this an error packet? */
3281         if (__predict_false(cqe_fp_flags &
3282                             ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) {
3283             BLOGE(sc, "flags 0x%x rx packet %u\n", cqe_fp_flags, sw_cq_cons);
3284             fp->eth_q_stats.rx_soft_errors++;
3285             goto next_rx;
3286         }
3287 
3288         len = le16toh(cqe_fp->pkt_len_or_gro_seg_len);
3289         lenonbd = le16toh(cqe_fp->len_on_bd);
3290         pad = cqe_fp->placement_offset;
3291 
3292         m = rx_buf->m;
3293 
3294         if (__predict_false(m == NULL)) {
3295             BLOGE(sc, "No mbuf in rx chain descriptor %d for fp[%02d]\n",
3296                   bd_cons, fp->index);
3297             goto next_rx;
3298         }
3299 
3300         /* XXX double copy if packet length under a threshold */
3301 
3302         /*
3303          * If all the buffer descriptors are filled with mbufs then fill in
3304          * the current consumer index with a new BD. Else if a maximum Rx
3305          * buffer limit is imposed then fill in the next producer index.
3306          */
3307         rc = bxe_alloc_rx_bd_mbuf(fp, bd_cons,
3308                                   (sc->max_rx_bufs != RX_BD_USABLE) ?
3309                                       bd_prod : bd_cons);
3310         if (rc != 0) {
3311 
3312             /* we simply reuse the received mbuf and don't post it to the stack */
3313             m = NULL;
3314 
3315             BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n",
3316                   fp->index, rc);
3317             fp->eth_q_stats.rx_soft_errors++;
3318 
3319             if (sc->max_rx_bufs != RX_BD_USABLE) {
3320                 /* copy this consumer index to the producer index */
3321                 memcpy(&fp->rx_mbuf_chain[bd_prod], rx_buf,
3322                        sizeof(struct bxe_sw_rx_bd));
3323                 memset(rx_buf, 0, sizeof(struct bxe_sw_rx_bd));
3324             }
3325 
3326             goto next_rx;
3327         }
3328 
3329         /* current mbuf was detached from the bd */
3330         fp->eth_q_stats.mbuf_alloc_rx--;
3331 
3332         /* we allocated a replacement mbuf, fixup the current one */
3333         m_adj(m, pad);
3334         m->m_pkthdr.len = m->m_len = len;
3335 
3336         if ((len > 60) && (len > lenonbd)) {
3337             fp->eth_q_stats.rx_bxe_service_rxsgl++;
3338             rc = bxe_service_rxsgl(fp, len, lenonbd, m, cqe_fp);
3339             if (rc)
3340                 break;
3341             fp->eth_q_stats.rx_jumbo_sge_pkts++;
3342         } else if (lenonbd < len) {
3343             fp->eth_q_stats.rx_erroneous_jumbo_sge_pkts++;
3344         }
3345 
3346         /* assign packet to this interface interface */
3347 	if_setrcvif(m, ifp);
3348 
3349         /* assume no hardware checksum has complated */
3350         m->m_pkthdr.csum_flags = 0;
3351 
3352         /* validate checksum if offload enabled */
3353         if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
3354             /* check for a valid IP frame */
3355             if (!(cqe->fast_path_cqe.status_flags &
3356                   ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG)) {
3357                 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3358                 if (__predict_false(cqe_fp_flags &
3359                                     ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) {
3360                     fp->eth_q_stats.rx_hw_csum_errors++;
3361                 } else {
3362                     fp->eth_q_stats.rx_ofld_frames_csum_ip++;
3363                     m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3364                 }
3365             }
3366 
3367             /* check for a valid TCP/UDP frame */
3368             if (!(cqe->fast_path_cqe.status_flags &
3369                   ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)) {
3370                 if (__predict_false(cqe_fp_flags &
3371                                     ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) {
3372                     fp->eth_q_stats.rx_hw_csum_errors++;
3373                 } else {
3374                     fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++;
3375                     m->m_pkthdr.csum_data = 0xFFFF;
3376                     m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID |
3377                                                CSUM_PSEUDO_HDR);
3378                 }
3379             }
3380         }
3381 
3382         /* if there is a VLAN tag then flag that info */
3383         if (cqe->fast_path_cqe.pars_flags.flags & PARSING_FLAGS_INNER_VLAN_EXIST) {
3384             m->m_pkthdr.ether_vtag = cqe->fast_path_cqe.vlan_tag;
3385             m->m_flags |= M_VLANTAG;
3386         }
3387 
3388         /* specify what RSS queue was used for this flow */
3389         m->m_pkthdr.flowid = fp->index;
3390         BXE_SET_FLOWID(m);
3391 
3392 next_rx:
3393 
3394         bd_cons    = RX_BD_NEXT(bd_cons);
3395         bd_prod    = RX_BD_NEXT(bd_prod);
3396         bd_prod_fw = RX_BD_NEXT(bd_prod_fw);
3397 
3398         /* pass the frame to the stack */
3399         if (__predict_true(m != NULL)) {
3400             if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
3401             rx_pkts++;
3402             if_input(ifp, m);
3403         }
3404 
3405 next_cqe:
3406 
3407         sw_cq_prod = RCQ_NEXT(sw_cq_prod);
3408         sw_cq_cons = RCQ_NEXT(sw_cq_cons);
3409 
3410         /* limit spinning on the queue */
3411         if (rc != 0)
3412             break;
3413 
3414         if (rx_pkts == sc->rx_budget) {
3415             fp->eth_q_stats.rx_budget_reached++;
3416             break;
3417         }
3418     } /* while work to do */
3419 
3420     fp->rx_bd_cons = bd_cons;
3421     fp->rx_bd_prod = bd_prod_fw;
3422     fp->rx_cq_cons = sw_cq_cons;
3423     fp->rx_cq_prod = sw_cq_prod;
3424 
3425     /* Update producers */
3426     bxe_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod, fp->rx_sge_prod);
3427 
3428     fp->eth_q_stats.rx_pkts += rx_pkts;
3429     fp->eth_q_stats.rx_calls++;
3430 
3431     BXE_FP_RX_UNLOCK(fp);
3432 
3433     return (sw_cq_cons != hw_cq_cons);
3434 }
3435 
3436 static uint16_t
3437 bxe_free_tx_pkt(struct bxe_softc    *sc,
3438                 struct bxe_fastpath *fp,
3439                 uint16_t            idx)
3440 {
3441     struct bxe_sw_tx_bd *tx_buf = &fp->tx_mbuf_chain[idx];
3442     struct eth_tx_start_bd *tx_start_bd;
3443     uint16_t bd_idx = TX_BD(tx_buf->first_bd);
3444     uint16_t new_cons;
3445     int nbd;
3446 
3447     /* unmap the mbuf from non-paged memory */
3448     bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
3449 
3450     tx_start_bd = &fp->tx_chain[bd_idx].start_bd;
3451     nbd = le16toh(tx_start_bd->nbd) - 1;
3452 
3453     new_cons = (tx_buf->first_bd + nbd);
3454 
3455     /* free the mbuf */
3456     if (__predict_true(tx_buf->m != NULL)) {
3457         m_freem(tx_buf->m);
3458         fp->eth_q_stats.mbuf_alloc_tx--;
3459     } else {
3460         fp->eth_q_stats.tx_chain_lost_mbuf++;
3461     }
3462 
3463     tx_buf->m = NULL;
3464     tx_buf->first_bd = 0;
3465 
3466     return (new_cons);
3467 }
3468 
3469 /* transmit timeout watchdog */
3470 static int
3471 bxe_watchdog(struct bxe_softc    *sc,
3472              struct bxe_fastpath *fp)
3473 {
3474     BXE_FP_TX_LOCK(fp);
3475 
3476     if ((fp->watchdog_timer == 0) || (--fp->watchdog_timer)) {
3477         BXE_FP_TX_UNLOCK(fp);
3478         return (0);
3479     }
3480 
3481     BLOGE(sc, "TX watchdog timeout on fp[%02d], resetting!\n", fp->index);
3482 
3483     BXE_FP_TX_UNLOCK(fp);
3484     BXE_SET_ERROR_BIT(sc, BXE_ERR_TXQ_STUCK);
3485     taskqueue_enqueue_timeout(taskqueue_thread,
3486         &sc->sp_err_timeout_task, hz/10);
3487 
3488     return (-1);
3489 }
3490 
3491 /* processes transmit completions */
3492 static uint8_t
3493 bxe_txeof(struct bxe_softc    *sc,
3494           struct bxe_fastpath *fp)
3495 {
3496     if_t ifp = sc->ifp;
3497     uint16_t bd_cons, hw_cons, sw_cons, pkt_cons;
3498     uint16_t tx_bd_avail;
3499 
3500     BXE_FP_TX_LOCK_ASSERT(fp);
3501 
3502     bd_cons = fp->tx_bd_cons;
3503     hw_cons = le16toh(*fp->tx_cons_sb);
3504     sw_cons = fp->tx_pkt_cons;
3505 
3506     while (sw_cons != hw_cons) {
3507         pkt_cons = TX_BD(sw_cons);
3508 
3509         BLOGD(sc, DBG_TX,
3510               "TX: fp[%d]: hw_cons=%u sw_cons=%u pkt_cons=%u\n",
3511               fp->index, hw_cons, sw_cons, pkt_cons);
3512 
3513         bd_cons = bxe_free_tx_pkt(sc, fp, pkt_cons);
3514 
3515         sw_cons++;
3516     }
3517 
3518     fp->tx_pkt_cons = sw_cons;
3519     fp->tx_bd_cons  = bd_cons;
3520 
3521     BLOGD(sc, DBG_TX,
3522           "TX done: fp[%d]: hw_cons=%u sw_cons=%u sw_prod=%u\n",
3523           fp->index, hw_cons, fp->tx_pkt_cons, fp->tx_pkt_prod);
3524 
3525     mb();
3526 
3527     tx_bd_avail = bxe_tx_avail(sc, fp);
3528 
3529     if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
3530         if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
3531     } else {
3532         if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
3533     }
3534 
3535     if (fp->tx_pkt_prod != fp->tx_pkt_cons) {
3536         /* reset the watchdog timer if there are pending transmits */
3537         fp->watchdog_timer = BXE_TX_TIMEOUT;
3538         return (TRUE);
3539     } else {
3540         /* clear watchdog when there are no pending transmits */
3541         fp->watchdog_timer = 0;
3542         return (FALSE);
3543     }
3544 }
3545 
3546 static void
3547 bxe_drain_tx_queues(struct bxe_softc *sc)
3548 {
3549     struct bxe_fastpath *fp;
3550     int i, count;
3551 
3552     /* wait until all TX fastpath tasks have completed */
3553     for (i = 0; i < sc->num_queues; i++) {
3554         fp = &sc->fp[i];
3555 
3556         count = 1000;
3557 
3558         while (bxe_has_tx_work(fp)) {
3559 
3560             BXE_FP_TX_LOCK(fp);
3561             bxe_txeof(sc, fp);
3562             BXE_FP_TX_UNLOCK(fp);
3563 
3564             if (count == 0) {
3565                 BLOGE(sc, "Timeout waiting for fp[%d] "
3566                           "transmits to complete!\n", i);
3567                 bxe_panic(sc, ("tx drain failure\n"));
3568                 return;
3569             }
3570 
3571             count--;
3572             DELAY(1000);
3573             rmb();
3574         }
3575     }
3576 
3577     return;
3578 }
3579 
3580 static int
3581 bxe_del_all_macs(struct bxe_softc          *sc,
3582                  struct ecore_vlan_mac_obj *mac_obj,
3583                  int                       mac_type,
3584                  uint8_t                   wait_for_comp)
3585 {
3586     unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
3587     int rc;
3588 
3589     /* wait for completion of requested */
3590     if (wait_for_comp) {
3591         bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3592     }
3593 
3594     /* Set the mac type of addresses we want to clear */
3595     bxe_set_bit(mac_type, &vlan_mac_flags);
3596 
3597     rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags);
3598     if (rc < 0) {
3599         BLOGE(sc, "Failed to delete MACs (%d) mac_type %d wait_for_comp 0x%x\n",
3600             rc, mac_type, wait_for_comp);
3601     }
3602 
3603     return (rc);
3604 }
3605 
3606 static int
3607 bxe_fill_accept_flags(struct bxe_softc *sc,
3608                       uint32_t         rx_mode,
3609                       unsigned long    *rx_accept_flags,
3610                       unsigned long    *tx_accept_flags)
3611 {
3612     /* Clear the flags first */
3613     *rx_accept_flags = 0;
3614     *tx_accept_flags = 0;
3615 
3616     switch (rx_mode) {
3617     case BXE_RX_MODE_NONE:
3618         /*
3619          * 'drop all' supersedes any accept flags that may have been
3620          * passed to the function.
3621          */
3622         break;
3623 
3624     case BXE_RX_MODE_NORMAL:
3625         bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3626         bxe_set_bit(ECORE_ACCEPT_MULTICAST, rx_accept_flags);
3627         bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3628 
3629         /* internal switching mode */
3630         bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3631         bxe_set_bit(ECORE_ACCEPT_MULTICAST, tx_accept_flags);
3632         bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3633 
3634         break;
3635 
3636     case BXE_RX_MODE_ALLMULTI:
3637         bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3638         bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags);
3639         bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3640 
3641         /* internal switching mode */
3642         bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3643         bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags);
3644         bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3645 
3646         break;
3647 
3648     case BXE_RX_MODE_PROMISC:
3649         /*
3650          * According to deffinition of SI mode, iface in promisc mode
3651          * should receive matched and unmatched (in resolution of port)
3652          * unicast packets.
3653          */
3654         bxe_set_bit(ECORE_ACCEPT_UNMATCHED, rx_accept_flags);
3655         bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3656         bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags);
3657         bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3658 
3659         /* internal switching mode */
3660         bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags);
3661         bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3662 
3663         if (IS_MF_SI(sc)) {
3664             bxe_set_bit(ECORE_ACCEPT_ALL_UNICAST, tx_accept_flags);
3665         } else {
3666             bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3667         }
3668 
3669         break;
3670 
3671     default:
3672         BLOGE(sc, "Unknown rx_mode (0x%x)\n", rx_mode);
3673         return (-1);
3674     }
3675 
3676     /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
3677     if (rx_mode != BXE_RX_MODE_NONE) {
3678         bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, rx_accept_flags);
3679         bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, tx_accept_flags);
3680     }
3681 
3682     return (0);
3683 }
3684 
3685 static int
3686 bxe_set_q_rx_mode(struct bxe_softc *sc,
3687                   uint8_t          cl_id,
3688                   unsigned long    rx_mode_flags,
3689                   unsigned long    rx_accept_flags,
3690                   unsigned long    tx_accept_flags,
3691                   unsigned long    ramrod_flags)
3692 {
3693     struct ecore_rx_mode_ramrod_params ramrod_param;
3694     int rc;
3695 
3696     memset(&ramrod_param, 0, sizeof(ramrod_param));
3697 
3698     /* Prepare ramrod parameters */
3699     ramrod_param.cid = 0;
3700     ramrod_param.cl_id = cl_id;
3701     ramrod_param.rx_mode_obj = &sc->rx_mode_obj;
3702     ramrod_param.func_id = SC_FUNC(sc);
3703 
3704     ramrod_param.pstate = &sc->sp_state;
3705     ramrod_param.state = ECORE_FILTER_RX_MODE_PENDING;
3706 
3707     ramrod_param.rdata = BXE_SP(sc, rx_mode_rdata);
3708     ramrod_param.rdata_mapping = BXE_SP_MAPPING(sc, rx_mode_rdata);
3709 
3710     bxe_set_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
3711 
3712     ramrod_param.ramrod_flags = ramrod_flags;
3713     ramrod_param.rx_mode_flags = rx_mode_flags;
3714 
3715     ramrod_param.rx_accept_flags = rx_accept_flags;
3716     ramrod_param.tx_accept_flags = tx_accept_flags;
3717 
3718     rc = ecore_config_rx_mode(sc, &ramrod_param);
3719     if (rc < 0) {
3720         BLOGE(sc, "Set rx_mode %d cli_id 0x%x rx_mode_flags 0x%x "
3721             "rx_accept_flags 0x%x tx_accept_flags 0x%x "
3722             "ramrod_flags 0x%x rc %d failed\n", sc->rx_mode, cl_id,
3723             (uint32_t)rx_mode_flags, (uint32_t)rx_accept_flags,
3724             (uint32_t)tx_accept_flags, (uint32_t)ramrod_flags, rc);
3725         return (rc);
3726     }
3727 
3728     return (0);
3729 }
3730 
3731 static int
3732 bxe_set_storm_rx_mode(struct bxe_softc *sc)
3733 {
3734     unsigned long rx_mode_flags = 0, ramrod_flags = 0;
3735     unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
3736     int rc;
3737 
3738     rc = bxe_fill_accept_flags(sc, sc->rx_mode, &rx_accept_flags,
3739                                &tx_accept_flags);
3740     if (rc) {
3741         return (rc);
3742     }
3743 
3744     bxe_set_bit(RAMROD_RX, &ramrod_flags);
3745     bxe_set_bit(RAMROD_TX, &ramrod_flags);
3746 
3747     /* XXX ensure all fastpath have same cl_id and/or move it to bxe_softc */
3748     return (bxe_set_q_rx_mode(sc, sc->fp[0].cl_id, rx_mode_flags,
3749                               rx_accept_flags, tx_accept_flags,
3750                               ramrod_flags));
3751 }
3752 
3753 /* returns the "mcp load_code" according to global load_count array */
3754 static int
3755 bxe_nic_load_no_mcp(struct bxe_softc *sc)
3756 {
3757     int path = SC_PATH(sc);
3758     int port = SC_PORT(sc);
3759 
3760     BLOGI(sc, "NO MCP - load counts[%d]      %d, %d, %d\n",
3761           path, load_count[path][0], load_count[path][1],
3762           load_count[path][2]);
3763     load_count[path][0]++;
3764     load_count[path][1 + port]++;
3765     BLOGI(sc, "NO MCP - new load counts[%d]  %d, %d, %d\n",
3766           path, load_count[path][0], load_count[path][1],
3767           load_count[path][2]);
3768     if (load_count[path][0] == 1) {
3769         return (FW_MSG_CODE_DRV_LOAD_COMMON);
3770     } else if (load_count[path][1 + port] == 1) {
3771         return (FW_MSG_CODE_DRV_LOAD_PORT);
3772     } else {
3773         return (FW_MSG_CODE_DRV_LOAD_FUNCTION);
3774     }
3775 }
3776 
3777 /* returns the "mcp load_code" according to global load_count array */
3778 static int
3779 bxe_nic_unload_no_mcp(struct bxe_softc *sc)
3780 {
3781     int port = SC_PORT(sc);
3782     int path = SC_PATH(sc);
3783 
3784     BLOGI(sc, "NO MCP - load counts[%d]      %d, %d, %d\n",
3785           path, load_count[path][0], load_count[path][1],
3786           load_count[path][2]);
3787     load_count[path][0]--;
3788     load_count[path][1 + port]--;
3789     BLOGI(sc, "NO MCP - new load counts[%d]  %d, %d, %d\n",
3790           path, load_count[path][0], load_count[path][1],
3791           load_count[path][2]);
3792     if (load_count[path][0] == 0) {
3793         return (FW_MSG_CODE_DRV_UNLOAD_COMMON);
3794     } else if (load_count[path][1 + port] == 0) {
3795         return (FW_MSG_CODE_DRV_UNLOAD_PORT);
3796     } else {
3797         return (FW_MSG_CODE_DRV_UNLOAD_FUNCTION);
3798     }
3799 }
3800 
3801 /* request unload mode from the MCP: COMMON, PORT or FUNCTION */
3802 static uint32_t
3803 bxe_send_unload_req(struct bxe_softc *sc,
3804                     int              unload_mode)
3805 {
3806     uint32_t reset_code = 0;
3807 
3808     /* Select the UNLOAD request mode */
3809     if (unload_mode == UNLOAD_NORMAL) {
3810         reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
3811     } else {
3812         reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
3813     }
3814 
3815     /* Send the request to the MCP */
3816     if (!BXE_NOMCP(sc)) {
3817         reset_code = bxe_fw_command(sc, reset_code, 0);
3818     } else {
3819         reset_code = bxe_nic_unload_no_mcp(sc);
3820     }
3821 
3822     return (reset_code);
3823 }
3824 
3825 /* send UNLOAD_DONE command to the MCP */
3826 static void
3827 bxe_send_unload_done(struct bxe_softc *sc,
3828                      uint8_t          keep_link)
3829 {
3830     uint32_t reset_param =
3831         keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
3832 
3833     /* Report UNLOAD_DONE to MCP */
3834     if (!BXE_NOMCP(sc)) {
3835         bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
3836     }
3837 }
3838 
3839 static int
3840 bxe_func_wait_started(struct bxe_softc *sc)
3841 {
3842     int tout = 50;
3843 
3844     if (!sc->port.pmf) {
3845         return (0);
3846     }
3847 
3848     /*
3849      * (assumption: No Attention from MCP at this stage)
3850      * PMF probably in the middle of TX disable/enable transaction
3851      * 1. Sync IRS for default SB
3852      * 2. Sync SP queue - this guarantees us that attention handling started
3853      * 3. Wait, that TX disable/enable transaction completes
3854      *
3855      * 1+2 guarantee that if DCBX attention was scheduled it already changed
3856      * pending bit of transaction from STARTED-->TX_STOPPED, if we already
3857      * received completion for the transaction the state is TX_STOPPED.
3858      * State will return to STARTED after completion of TX_STOPPED-->STARTED
3859      * transaction.
3860      */
3861 
3862     /* XXX make sure default SB ISR is done */
3863     /* need a way to synchronize an irq (intr_mtx?) */
3864 
3865     /* XXX flush any work queues */
3866 
3867     while (ecore_func_get_state(sc, &sc->func_obj) !=
3868            ECORE_F_STATE_STARTED && tout--) {
3869         DELAY(20000);
3870     }
3871 
3872     if (ecore_func_get_state(sc, &sc->func_obj) != ECORE_F_STATE_STARTED) {
3873         /*
3874          * Failed to complete the transaction in a "good way"
3875          * Force both transactions with CLR bit.
3876          */
3877         struct ecore_func_state_params func_params = { NULL };
3878 
3879         BLOGE(sc, "Unexpected function state! "
3880                   "Forcing STARTED-->TX_STOPPED-->STARTED\n");
3881 
3882         func_params.f_obj = &sc->func_obj;
3883         bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
3884 
3885         /* STARTED-->TX_STOPPED */
3886         func_params.cmd = ECORE_F_CMD_TX_STOP;
3887         ecore_func_state_change(sc, &func_params);
3888 
3889         /* TX_STOPPED-->STARTED */
3890         func_params.cmd = ECORE_F_CMD_TX_START;
3891         return (ecore_func_state_change(sc, &func_params));
3892     }
3893 
3894     return (0);
3895 }
3896 
3897 static int
3898 bxe_stop_queue(struct bxe_softc *sc,
3899                int              index)
3900 {
3901     struct bxe_fastpath *fp = &sc->fp[index];
3902     struct ecore_queue_state_params q_params = { NULL };
3903     int rc;
3904 
3905     BLOGD(sc, DBG_LOAD, "stopping queue %d cid %d\n", index, fp->index);
3906 
3907     q_params.q_obj = &sc->sp_objs[fp->index].q_obj;
3908     /* We want to wait for completion in this context */
3909     bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
3910 
3911     /* Stop the primary connection: */
3912 
3913     /* ...halt the connection */
3914     q_params.cmd = ECORE_Q_CMD_HALT;
3915     rc = ecore_queue_state_change(sc, &q_params);
3916     if (rc) {
3917         return (rc);
3918     }
3919 
3920     /* ...terminate the connection */
3921     q_params.cmd = ECORE_Q_CMD_TERMINATE;
3922     memset(&q_params.params.terminate, 0, sizeof(q_params.params.terminate));
3923     q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
3924     rc = ecore_queue_state_change(sc, &q_params);
3925     if (rc) {
3926         return (rc);
3927     }
3928 
3929     /* ...delete cfc entry */
3930     q_params.cmd = ECORE_Q_CMD_CFC_DEL;
3931     memset(&q_params.params.cfc_del, 0, sizeof(q_params.params.cfc_del));
3932     q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
3933     return (ecore_queue_state_change(sc, &q_params));
3934 }
3935 
3936 /* wait for the outstanding SP commands */
3937 static inline uint8_t
3938 bxe_wait_sp_comp(struct bxe_softc *sc,
3939                  unsigned long    mask)
3940 {
3941     unsigned long tmp;
3942     int tout = 5000; /* wait for 5 secs tops */
3943 
3944     while (tout--) {
3945         mb();
3946         if (!(atomic_load_acq_long(&sc->sp_state) & mask)) {
3947             return (TRUE);
3948         }
3949 
3950         DELAY(1000);
3951     }
3952 
3953     mb();
3954 
3955     tmp = atomic_load_acq_long(&sc->sp_state);
3956     if (tmp & mask) {
3957         BLOGE(sc, "Filtering completion timed out: "
3958                   "sp_state 0x%lx, mask 0x%lx\n",
3959               tmp, mask);
3960         return (FALSE);
3961     }
3962 
3963     return (FALSE);
3964 }
3965 
3966 static int
3967 bxe_func_stop(struct bxe_softc *sc)
3968 {
3969     struct ecore_func_state_params func_params = { NULL };
3970     int rc;
3971 
3972     /* prepare parameters for function state transitions */
3973     bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
3974     func_params.f_obj = &sc->func_obj;
3975     func_params.cmd = ECORE_F_CMD_STOP;
3976 
3977     /*
3978      * Try to stop the function the 'good way'. If it fails (in case
3979      * of a parity error during bxe_chip_cleanup()) and we are
3980      * not in a debug mode, perform a state transaction in order to
3981      * enable further HW_RESET transaction.
3982      */
3983     rc = ecore_func_state_change(sc, &func_params);
3984     if (rc) {
3985         BLOGE(sc, "FUNC_STOP ramrod failed. "
3986                   "Running a dry transaction (%d)\n", rc);
3987         bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
3988         return (ecore_func_state_change(sc, &func_params));
3989     }
3990 
3991     return (0);
3992 }
3993 
3994 static int
3995 bxe_reset_hw(struct bxe_softc *sc,
3996              uint32_t         load_code)
3997 {
3998     struct ecore_func_state_params func_params = { NULL };
3999 
4000     /* Prepare parameters for function state transitions */
4001     bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
4002 
4003     func_params.f_obj = &sc->func_obj;
4004     func_params.cmd = ECORE_F_CMD_HW_RESET;
4005 
4006     func_params.params.hw_init.load_phase = load_code;
4007 
4008     return (ecore_func_state_change(sc, &func_params));
4009 }
4010 
4011 static void
4012 bxe_int_disable_sync(struct bxe_softc *sc,
4013                      int              disable_hw)
4014 {
4015     if (disable_hw) {
4016         /* prevent the HW from sending interrupts */
4017         bxe_int_disable(sc);
4018     }
4019 
4020     /* XXX need a way to synchronize ALL irqs (intr_mtx?) */
4021     /* make sure all ISRs are done */
4022 
4023     /* XXX make sure sp_task is not running */
4024     /* cancel and flush work queues */
4025 }
4026 
4027 static void
4028 bxe_chip_cleanup(struct bxe_softc *sc,
4029                  uint32_t         unload_mode,
4030                  uint8_t          keep_link)
4031 {
4032     int port = SC_PORT(sc);
4033     struct ecore_mcast_ramrod_params rparam = { NULL };
4034     uint32_t reset_code;
4035     int i, rc = 0;
4036 
4037     bxe_drain_tx_queues(sc);
4038 
4039     /* give HW time to discard old tx messages */
4040     DELAY(1000);
4041 
4042     /* Clean all ETH MACs */
4043     rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_ETH_MAC, FALSE);
4044     if (rc < 0) {
4045         BLOGE(sc, "Failed to delete all ETH MACs (%d)\n", rc);
4046     }
4047 
4048     /* Clean up UC list  */
4049     rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_UC_LIST_MAC, TRUE);
4050     if (rc < 0) {
4051         BLOGE(sc, "Failed to delete UC MACs list (%d)\n", rc);
4052     }
4053 
4054     /* Disable LLH */
4055     if (!CHIP_IS_E1(sc)) {
4056         REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0);
4057     }
4058 
4059     /* Set "drop all" to stop Rx */
4060 
4061     /*
4062      * We need to take the BXE_MCAST_LOCK() here in order to prevent
4063      * a race between the completion code and this code.
4064      */
4065     BXE_MCAST_LOCK(sc);
4066 
4067     if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) {
4068         bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state);
4069     } else {
4070         bxe_set_storm_rx_mode(sc);
4071     }
4072 
4073     /* Clean up multicast configuration */
4074     rparam.mcast_obj = &sc->mcast_obj;
4075     rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
4076     if (rc < 0) {
4077         BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc);
4078     }
4079 
4080     BXE_MCAST_UNLOCK(sc);
4081 
4082     // XXX bxe_iov_chip_cleanup(sc);
4083 
4084     /*
4085      * Send the UNLOAD_REQUEST to the MCP. This will return if
4086      * this function should perform FUNCTION, PORT, or COMMON HW
4087      * reset.
4088      */
4089     reset_code = bxe_send_unload_req(sc, unload_mode);
4090 
4091     /*
4092      * (assumption: No Attention from MCP at this stage)
4093      * PMF probably in the middle of TX disable/enable transaction
4094      */
4095     rc = bxe_func_wait_started(sc);
4096     if (rc) {
4097         BLOGE(sc, "bxe_func_wait_started failed (%d)\n", rc);
4098     }
4099 
4100     /*
4101      * Close multi and leading connections
4102      * Completions for ramrods are collected in a synchronous way
4103      */
4104     for (i = 0; i < sc->num_queues; i++) {
4105         if (bxe_stop_queue(sc, i)) {
4106             goto unload_error;
4107         }
4108     }
4109 
4110     /*
4111      * If SP settings didn't get completed so far - something
4112      * very wrong has happen.
4113      */
4114     if (!bxe_wait_sp_comp(sc, ~0x0UL)) {
4115         BLOGE(sc, "Common slow path ramrods got stuck!(%d)\n", rc);
4116     }
4117 
4118 unload_error:
4119 
4120     rc = bxe_func_stop(sc);
4121     if (rc) {
4122         BLOGE(sc, "Function stop failed!(%d)\n", rc);
4123     }
4124 
4125     /* disable HW interrupts */
4126     bxe_int_disable_sync(sc, TRUE);
4127 
4128     /* detach interrupts */
4129     bxe_interrupt_detach(sc);
4130 
4131     /* Reset the chip */
4132     rc = bxe_reset_hw(sc, reset_code);
4133     if (rc) {
4134         BLOGE(sc, "Hardware reset failed(%d)\n", rc);
4135     }
4136 
4137     /* Report UNLOAD_DONE to MCP */
4138     bxe_send_unload_done(sc, keep_link);
4139 }
4140 
4141 static void
4142 bxe_disable_close_the_gate(struct bxe_softc *sc)
4143 {
4144     uint32_t val;
4145     int port = SC_PORT(sc);
4146 
4147     BLOGD(sc, DBG_LOAD,
4148           "Disabling 'close the gates'\n");
4149 
4150     if (CHIP_IS_E1(sc)) {
4151         uint32_t addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4152                                MISC_REG_AEU_MASK_ATTN_FUNC_0;
4153         val = REG_RD(sc, addr);
4154         val &= ~(0x300);
4155         REG_WR(sc, addr, val);
4156     } else {
4157         val = REG_RD(sc, MISC_REG_AEU_GENERAL_MASK);
4158         val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
4159                  MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
4160         REG_WR(sc, MISC_REG_AEU_GENERAL_MASK, val);
4161     }
4162 }
4163 
4164 /*
4165  * Cleans the object that have internal lists without sending
4166  * ramrods. Should be run when interrutps are disabled.
4167  */
4168 static void
4169 bxe_squeeze_objects(struct bxe_softc *sc)
4170 {
4171     unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
4172     struct ecore_mcast_ramrod_params rparam = { NULL };
4173     struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj;
4174     int rc;
4175 
4176     /* Cleanup MACs' object first... */
4177 
4178     /* Wait for completion of requested */
4179     bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
4180     /* Perform a dry cleanup */
4181     bxe_set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
4182 
4183     /* Clean ETH primary MAC */
4184     bxe_set_bit(ECORE_ETH_MAC, &vlan_mac_flags);
4185     rc = mac_obj->delete_all(sc, &sc->sp_objs->mac_obj, &vlan_mac_flags,
4186                              &ramrod_flags);
4187     if (rc != 0) {
4188         BLOGE(sc, "Failed to clean ETH MACs (%d)\n", rc);
4189     }
4190 
4191     /* Cleanup UC list */
4192     vlan_mac_flags = 0;
4193     bxe_set_bit(ECORE_UC_LIST_MAC, &vlan_mac_flags);
4194     rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags,
4195                              &ramrod_flags);
4196     if (rc != 0) {
4197         BLOGE(sc, "Failed to clean UC list MACs (%d)\n", rc);
4198     }
4199 
4200     /* Now clean mcast object... */
4201 
4202     rparam.mcast_obj = &sc->mcast_obj;
4203     bxe_set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
4204 
4205     /* Add a DEL command... */
4206     rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
4207     if (rc < 0) {
4208         BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc);
4209     }
4210 
4211     /* now wait until all pending commands are cleared */
4212 
4213     rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
4214     while (rc != 0) {
4215         if (rc < 0) {
4216             BLOGE(sc, "Failed to clean MCAST object (%d)\n", rc);
4217             return;
4218         }
4219 
4220         rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
4221     }
4222 }
4223 
4224 /* stop the controller */
4225 static __noinline int
4226 bxe_nic_unload(struct bxe_softc *sc,
4227                uint32_t         unload_mode,
4228                uint8_t          keep_link)
4229 {
4230     uint8_t global = FALSE;
4231     uint32_t val;
4232     int i;
4233 
4234     BXE_CORE_LOCK_ASSERT(sc);
4235 
4236     if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING);
4237 
4238     for (i = 0; i < sc->num_queues; i++) {
4239         struct bxe_fastpath *fp;
4240 
4241         fp = &sc->fp[i];
4242 	fp->watchdog_timer = 0;
4243         BXE_FP_TX_LOCK(fp);
4244         BXE_FP_TX_UNLOCK(fp);
4245     }
4246 
4247     BLOGD(sc, DBG_LOAD, "Starting NIC unload...\n");
4248 
4249     /* mark driver as unloaded in shmem2 */
4250     if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
4251         val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
4252         SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)],
4253                   val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
4254     }
4255 
4256     if (IS_PF(sc) && sc->recovery_state != BXE_RECOVERY_DONE &&
4257         (sc->state == BXE_STATE_CLOSED || sc->state == BXE_STATE_ERROR)) {
4258 
4259 	if(CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
4260             /*
4261              * We can get here if the driver has been unloaded
4262              * during parity error recovery and is either waiting for a
4263              * leader to complete or for other functions to unload and
4264              * then ifconfig down has been issued. In this case we want to
4265              * unload and let other functions to complete a recovery
4266              * process.
4267              */
4268             sc->recovery_state = BXE_RECOVERY_DONE;
4269             sc->is_leader = 0;
4270             bxe_release_leader_lock(sc);
4271             mb();
4272             BLOGD(sc, DBG_LOAD, "Releasing a leadership...\n");
4273 	}
4274         BLOGE(sc, "Can't unload in closed or error state recover_state 0x%x"
4275             " state = 0x%x\n", sc->recovery_state, sc->state);
4276         return (-1);
4277     }
4278 
4279     /*
4280      * Nothing to do during unload if previous bxe_nic_load()
4281      * did not completed successfully - all resourses are released.
4282      */
4283     if ((sc->state == BXE_STATE_CLOSED) ||
4284         (sc->state == BXE_STATE_ERROR)) {
4285         return (0);
4286     }
4287 
4288     sc->state = BXE_STATE_CLOSING_WAITING_HALT;
4289     mb();
4290 
4291     /* stop tx */
4292     bxe_tx_disable(sc);
4293 
4294     sc->rx_mode = BXE_RX_MODE_NONE;
4295     /* XXX set rx mode ??? */
4296 
4297     if (IS_PF(sc) && !sc->grcdump_done) {
4298         /* set ALWAYS_ALIVE bit in shmem */
4299         sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
4300 
4301         bxe_drv_pulse(sc);
4302 
4303         bxe_stats_handle(sc, STATS_EVENT_STOP);
4304         bxe_save_statistics(sc);
4305     }
4306 
4307     /* wait till consumers catch up with producers in all queues */
4308     bxe_drain_tx_queues(sc);
4309 
4310     /* if VF indicate to PF this function is going down (PF will delete sp
4311      * elements and clear initializations
4312      */
4313     if (IS_VF(sc)) {
4314         ; /* bxe_vfpf_close_vf(sc); */
4315     } else if (unload_mode != UNLOAD_RECOVERY) {
4316         /* if this is a normal/close unload need to clean up chip */
4317         if (!sc->grcdump_done)
4318             bxe_chip_cleanup(sc, unload_mode, keep_link);
4319     } else {
4320         /* Send the UNLOAD_REQUEST to the MCP */
4321         bxe_send_unload_req(sc, unload_mode);
4322 
4323         /*
4324          * Prevent transactions to host from the functions on the
4325          * engine that doesn't reset global blocks in case of global
4326          * attention once gloabl blocks are reset and gates are opened
4327          * (the engine which leader will perform the recovery
4328          * last).
4329          */
4330         if (!CHIP_IS_E1x(sc)) {
4331             bxe_pf_disable(sc);
4332         }
4333 
4334         /* disable HW interrupts */
4335         bxe_int_disable_sync(sc, TRUE);
4336 
4337         /* detach interrupts */
4338         bxe_interrupt_detach(sc);
4339 
4340         /* Report UNLOAD_DONE to MCP */
4341         bxe_send_unload_done(sc, FALSE);
4342     }
4343 
4344     /*
4345      * At this stage no more interrupts will arrive so we may safely clean
4346      * the queue'able objects here in case they failed to get cleaned so far.
4347      */
4348     if (IS_PF(sc)) {
4349         bxe_squeeze_objects(sc);
4350     }
4351 
4352     /* There should be no more pending SP commands at this stage */
4353     sc->sp_state = 0;
4354 
4355     sc->port.pmf = 0;
4356 
4357     bxe_free_fp_buffers(sc);
4358 
4359     if (IS_PF(sc)) {
4360         bxe_free_mem(sc);
4361     }
4362 
4363     bxe_free_fw_stats_mem(sc);
4364 
4365     sc->state = BXE_STATE_CLOSED;
4366 
4367     /*
4368      * Check if there are pending parity attentions. If there are - set
4369      * RECOVERY_IN_PROGRESS.
4370      */
4371     if (IS_PF(sc) && bxe_chk_parity_attn(sc, &global, FALSE)) {
4372         bxe_set_reset_in_progress(sc);
4373 
4374         /* Set RESET_IS_GLOBAL if needed */
4375         if (global) {
4376             bxe_set_reset_global(sc);
4377         }
4378     }
4379 
4380     /*
4381      * The last driver must disable a "close the gate" if there is no
4382      * parity attention or "process kill" pending.
4383      */
4384     if (IS_PF(sc) && !bxe_clear_pf_load(sc) &&
4385         bxe_reset_is_done(sc, SC_PATH(sc))) {
4386         bxe_disable_close_the_gate(sc);
4387     }
4388 
4389     BLOGD(sc, DBG_LOAD, "Ended NIC unload\n");
4390 
4391     bxe_link_report(sc);
4392 
4393     return (0);
4394 }
4395 
4396 /*
4397  * Called by the OS to set various media options (i.e. link, speed, etc.) when
4398  * the user runs "ifconfig bxe media ..." or "ifconfig bxe mediaopt ...".
4399  */
4400 static int
4401 bxe_ifmedia_update(struct ifnet  *ifp)
4402 {
4403     struct bxe_softc *sc = (struct bxe_softc *)if_getsoftc(ifp);
4404     struct ifmedia *ifm;
4405 
4406     ifm = &sc->ifmedia;
4407 
4408     /* We only support Ethernet media type. */
4409     if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
4410         return (EINVAL);
4411     }
4412 
4413     switch (IFM_SUBTYPE(ifm->ifm_media)) {
4414     case IFM_AUTO:
4415          break;
4416     case IFM_10G_CX4:
4417     case IFM_10G_SR:
4418     case IFM_10G_T:
4419     case IFM_10G_TWINAX:
4420     default:
4421         /* We don't support changing the media type. */
4422         BLOGD(sc, DBG_LOAD, "Invalid media type (%d)\n",
4423               IFM_SUBTYPE(ifm->ifm_media));
4424         return (EINVAL);
4425     }
4426 
4427     return (0);
4428 }
4429 
4430 /*
4431  * Called by the OS to get the current media status (i.e. link, speed, etc.).
4432  */
4433 static void
4434 bxe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
4435 {
4436     struct bxe_softc *sc = if_getsoftc(ifp);
4437 
4438     /* Bug 165447: the 'ifconfig' tool skips printing of the "status: ..."
4439        line if the IFM_AVALID flag is *NOT* set. So we need to set this
4440        flag unconditionally (irrespective of the admininistrative
4441        'up/down' state of the interface) to ensure that that line is always
4442        displayed.
4443     */
4444     ifmr->ifm_status = IFM_AVALID;
4445 
4446     /* Setup the default interface info. */
4447     ifmr->ifm_active = IFM_ETHER;
4448 
4449     /* Report link down if the driver isn't running. */
4450     if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
4451         ifmr->ifm_active |= IFM_NONE;
4452         BLOGD(sc, DBG_PHY, "in %s : nic still not loaded fully\n", __func__);
4453         BLOGD(sc, DBG_PHY, "in %s : link_up (1) : %d\n",
4454                 __func__, sc->link_vars.link_up);
4455         return;
4456     }
4457 
4458 
4459     if (sc->link_vars.link_up) {
4460         ifmr->ifm_status |= IFM_ACTIVE;
4461         ifmr->ifm_active |= IFM_FDX;
4462     } else {
4463         ifmr->ifm_active |= IFM_NONE;
4464         BLOGD(sc, DBG_PHY, "in %s : setting IFM_NONE\n",
4465                 __func__);
4466         return;
4467     }
4468 
4469     ifmr->ifm_active |= sc->media;
4470     return;
4471 }
4472 
4473 static void
4474 bxe_handle_chip_tq(void *context,
4475                    int  pending)
4476 {
4477     struct bxe_softc *sc = (struct bxe_softc *)context;
4478     long work = atomic_load_acq_long(&sc->chip_tq_flags);
4479 
4480     switch (work)
4481     {
4482 
4483     case CHIP_TQ_REINIT:
4484         if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
4485             /* restart the interface */
4486             BLOGD(sc, DBG_LOAD, "Restarting the interface...\n");
4487             bxe_periodic_stop(sc);
4488             BXE_CORE_LOCK(sc);
4489             bxe_stop_locked(sc);
4490             bxe_init_locked(sc);
4491             BXE_CORE_UNLOCK(sc);
4492         }
4493         break;
4494 
4495     default:
4496         break;
4497     }
4498 }
4499 
4500 /*
4501  * Handles any IOCTL calls from the operating system.
4502  *
4503  * Returns:
4504  *   0 = Success, >0 Failure
4505  */
4506 static int
4507 bxe_ioctl(if_t ifp,
4508           u_long       command,
4509           caddr_t      data)
4510 {
4511     struct bxe_softc *sc = if_getsoftc(ifp);
4512     struct ifreq *ifr = (struct ifreq *)data;
4513     int mask = 0;
4514     int reinit = 0;
4515     int error = 0;
4516 
4517     int mtu_min = (ETH_MIN_PACKET_SIZE - ETH_HLEN);
4518     int mtu_max = (MJUM9BYTES - ETH_OVERHEAD - IP_HEADER_ALIGNMENT_PADDING);
4519 
4520     switch (command)
4521     {
4522     case SIOCSIFMTU:
4523         BLOGD(sc, DBG_IOCTL, "Received SIOCSIFMTU ioctl (mtu=%d)\n",
4524               ifr->ifr_mtu);
4525 
4526         if (sc->mtu == ifr->ifr_mtu) {
4527             /* nothing to change */
4528             break;
4529         }
4530 
4531         if ((ifr->ifr_mtu < mtu_min) || (ifr->ifr_mtu > mtu_max)) {
4532             BLOGE(sc, "Unsupported MTU size %d (range is %d-%d)\n",
4533                   ifr->ifr_mtu, mtu_min, mtu_max);
4534             error = EINVAL;
4535             break;
4536         }
4537 
4538         atomic_store_rel_int((volatile unsigned int *)&sc->mtu,
4539                              (unsigned long)ifr->ifr_mtu);
4540 	/*
4541         atomic_store_rel_long((volatile unsigned long *)&if_getmtu(ifp),
4542                               (unsigned long)ifr->ifr_mtu);
4543 	XXX - Not sure why it needs to be atomic
4544 	*/
4545 	if_setmtu(ifp, ifr->ifr_mtu);
4546         reinit = 1;
4547         break;
4548 
4549     case SIOCSIFFLAGS:
4550         /* toggle the interface state up or down */
4551         BLOGD(sc, DBG_IOCTL, "Received SIOCSIFFLAGS ioctl\n");
4552 
4553 	BXE_CORE_LOCK(sc);
4554         /* check if the interface is up */
4555         if (if_getflags(ifp) & IFF_UP) {
4556             if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4557                 /* set the receive mode flags */
4558                 bxe_set_rx_mode(sc);
4559             } else if(sc->state != BXE_STATE_DISABLED) {
4560 		bxe_init_locked(sc);
4561             }
4562         } else {
4563             if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4564 		bxe_periodic_stop(sc);
4565 		bxe_stop_locked(sc);
4566             }
4567         }
4568 	BXE_CORE_UNLOCK(sc);
4569 
4570         break;
4571 
4572     case SIOCADDMULTI:
4573     case SIOCDELMULTI:
4574         /* add/delete multicast addresses */
4575         BLOGD(sc, DBG_IOCTL, "Received SIOCADDMULTI/SIOCDELMULTI ioctl\n");
4576 
4577         /* check if the interface is up */
4578         if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4579             /* set the receive mode flags */
4580 	    BXE_CORE_LOCK(sc);
4581             bxe_set_rx_mode(sc);
4582 	    BXE_CORE_UNLOCK(sc);
4583         }
4584 
4585         break;
4586 
4587     case SIOCSIFCAP:
4588         /* find out which capabilities have changed */
4589         mask = (ifr->ifr_reqcap ^ if_getcapenable(ifp));
4590 
4591         BLOGD(sc, DBG_IOCTL, "Received SIOCSIFCAP ioctl (mask=0x%08x)\n",
4592               mask);
4593 
4594         /* toggle the LRO capabilites enable flag */
4595         if (mask & IFCAP_LRO) {
4596 	    if_togglecapenable(ifp, IFCAP_LRO);
4597             BLOGD(sc, DBG_IOCTL, "Turning LRO %s\n",
4598                   (if_getcapenable(ifp) & IFCAP_LRO) ? "ON" : "OFF");
4599             reinit = 1;
4600         }
4601 
4602         /* toggle the TXCSUM checksum capabilites enable flag */
4603         if (mask & IFCAP_TXCSUM) {
4604 	    if_togglecapenable(ifp, IFCAP_TXCSUM);
4605             BLOGD(sc, DBG_IOCTL, "Turning TXCSUM %s\n",
4606                   (if_getcapenable(ifp) & IFCAP_TXCSUM) ? "ON" : "OFF");
4607             if (if_getcapenable(ifp) & IFCAP_TXCSUM) {
4608                 if_sethwassistbits(ifp, (CSUM_IP      |
4609                                     CSUM_TCP      |
4610                                     CSUM_UDP      |
4611                                     CSUM_TSO      |
4612                                     CSUM_TCP_IPV6 |
4613                                     CSUM_UDP_IPV6), 0);
4614             } else {
4615 		if_clearhwassist(ifp); /* XXX */
4616             }
4617         }
4618 
4619         /* toggle the RXCSUM checksum capabilities enable flag */
4620         if (mask & IFCAP_RXCSUM) {
4621 	    if_togglecapenable(ifp, IFCAP_RXCSUM);
4622             BLOGD(sc, DBG_IOCTL, "Turning RXCSUM %s\n",
4623                   (if_getcapenable(ifp) & IFCAP_RXCSUM) ? "ON" : "OFF");
4624             if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
4625                 if_sethwassistbits(ifp, (CSUM_IP      |
4626                                     CSUM_TCP      |
4627                                     CSUM_UDP      |
4628                                     CSUM_TSO      |
4629                                     CSUM_TCP_IPV6 |
4630                                     CSUM_UDP_IPV6), 0);
4631             } else {
4632 		if_clearhwassist(ifp); /* XXX */
4633             }
4634         }
4635 
4636         /* toggle TSO4 capabilities enabled flag */
4637         if (mask & IFCAP_TSO4) {
4638             if_togglecapenable(ifp, IFCAP_TSO4);
4639             BLOGD(sc, DBG_IOCTL, "Turning TSO4 %s\n",
4640                   (if_getcapenable(ifp) & IFCAP_TSO4) ? "ON" : "OFF");
4641         }
4642 
4643         /* toggle TSO6 capabilities enabled flag */
4644         if (mask & IFCAP_TSO6) {
4645 	    if_togglecapenable(ifp, IFCAP_TSO6);
4646             BLOGD(sc, DBG_IOCTL, "Turning TSO6 %s\n",
4647                   (if_getcapenable(ifp) & IFCAP_TSO6) ? "ON" : "OFF");
4648         }
4649 
4650         /* toggle VLAN_HWTSO capabilities enabled flag */
4651         if (mask & IFCAP_VLAN_HWTSO) {
4652 
4653 	    if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
4654             BLOGD(sc, DBG_IOCTL, "Turning VLAN_HWTSO %s\n",
4655                   (if_getcapenable(ifp) & IFCAP_VLAN_HWTSO) ? "ON" : "OFF");
4656         }
4657 
4658         /* toggle VLAN_HWCSUM capabilities enabled flag */
4659         if (mask & IFCAP_VLAN_HWCSUM) {
4660             /* XXX investigate this... */
4661             BLOGE(sc, "Changing VLAN_HWCSUM is not supported!\n");
4662             error = EINVAL;
4663         }
4664 
4665         /* toggle VLAN_MTU capabilities enable flag */
4666         if (mask & IFCAP_VLAN_MTU) {
4667             /* XXX investigate this... */
4668             BLOGE(sc, "Changing VLAN_MTU is not supported!\n");
4669             error = EINVAL;
4670         }
4671 
4672         /* toggle VLAN_HWTAGGING capabilities enabled flag */
4673         if (mask & IFCAP_VLAN_HWTAGGING) {
4674             /* XXX investigate this... */
4675             BLOGE(sc, "Changing VLAN_HWTAGGING is not supported!\n");
4676             error = EINVAL;
4677         }
4678 
4679         /* toggle VLAN_HWFILTER capabilities enabled flag */
4680         if (mask & IFCAP_VLAN_HWFILTER) {
4681             /* XXX investigate this... */
4682             BLOGE(sc, "Changing VLAN_HWFILTER is not supported!\n");
4683             error = EINVAL;
4684         }
4685 
4686         /* XXX not yet...
4687          * IFCAP_WOL_MAGIC
4688          */
4689 
4690         break;
4691 
4692     case SIOCSIFMEDIA:
4693     case SIOCGIFMEDIA:
4694         /* set/get interface media */
4695         BLOGD(sc, DBG_IOCTL,
4696               "Received SIOCSIFMEDIA/SIOCGIFMEDIA ioctl (cmd=%lu)\n",
4697               (command & 0xff));
4698         error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
4699         break;
4700 
4701     default:
4702         BLOGD(sc, DBG_IOCTL, "Received Unknown Ioctl (cmd=%lu)\n",
4703               (command & 0xff));
4704         error = ether_ioctl(ifp, command, data);
4705         break;
4706     }
4707 
4708     if (reinit && (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) {
4709         BLOGD(sc, DBG_LOAD | DBG_IOCTL,
4710               "Re-initializing hardware from IOCTL change\n");
4711 	bxe_periodic_stop(sc);
4712 	BXE_CORE_LOCK(sc);
4713 	bxe_stop_locked(sc);
4714 	bxe_init_locked(sc);
4715 	BXE_CORE_UNLOCK(sc);
4716     }
4717 
4718     return (error);
4719 }
4720 
4721 static __noinline void
4722 bxe_dump_mbuf(struct bxe_softc *sc,
4723               struct mbuf      *m,
4724               uint8_t          contents)
4725 {
4726     char * type;
4727     int i = 0;
4728 
4729     if (!(sc->debug & DBG_MBUF)) {
4730         return;
4731     }
4732 
4733     if (m == NULL) {
4734         BLOGD(sc, DBG_MBUF, "mbuf: null pointer\n");
4735         return;
4736     }
4737 
4738     while (m) {
4739 
4740         BLOGD(sc, DBG_MBUF,
4741               "%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n",
4742               i, m, m->m_len, m->m_flags, M_FLAG_BITS, m->m_data);
4743 
4744         if (m->m_flags & M_PKTHDR) {
4745              BLOGD(sc, DBG_MBUF,
4746                    "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n",
4747                    i, m->m_pkthdr.len, m->m_flags, M_FLAG_BITS,
4748                    (int)m->m_pkthdr.csum_flags, CSUM_BITS);
4749         }
4750 
4751         if (m->m_flags & M_EXT) {
4752             switch (m->m_ext.ext_type) {
4753             case EXT_CLUSTER:    type = "EXT_CLUSTER";    break;
4754             case EXT_SFBUF:      type = "EXT_SFBUF";      break;
4755             case EXT_JUMBOP:     type = "EXT_JUMBOP";     break;
4756             case EXT_JUMBO9:     type = "EXT_JUMBO9";     break;
4757             case EXT_JUMBO16:    type = "EXT_JUMBO16";    break;
4758             case EXT_PACKET:     type = "EXT_PACKET";     break;
4759             case EXT_MBUF:       type = "EXT_MBUF";       break;
4760             case EXT_NET_DRV:    type = "EXT_NET_DRV";    break;
4761             case EXT_MOD_TYPE:   type = "EXT_MOD_TYPE";   break;
4762             case EXT_DISPOSABLE: type = "EXT_DISPOSABLE"; break;
4763             case EXT_EXTREF:     type = "EXT_EXTREF";     break;
4764             default:             type = "UNKNOWN";        break;
4765             }
4766 
4767             BLOGD(sc, DBG_MBUF,
4768                   "%02d: - m_ext: %p ext_size=%d type=%s\n",
4769                   i, m->m_ext.ext_buf, m->m_ext.ext_size, type);
4770         }
4771 
4772         if (contents) {
4773             bxe_dump_mbuf_data(sc, "mbuf data", m, TRUE);
4774         }
4775 
4776         m = m->m_next;
4777         i++;
4778     }
4779 }
4780 
4781 /*
4782  * Checks to ensure the 13 bd sliding window is >= MSS for TSO.
4783  * Check that (13 total bds - 3 bds) = 10 bd window >= MSS.
4784  * The window: 3 bds are = 1 for headers BD + 2 for parse BD and last BD
4785  * The headers comes in a separate bd in FreeBSD so 13-3=10.
4786  * Returns: 0 if OK to send, 1 if packet needs further defragmentation
4787  */
4788 static int
4789 bxe_chktso_window(struct bxe_softc  *sc,
4790                   int               nsegs,
4791                   bus_dma_segment_t *segs,
4792                   struct mbuf       *m)
4793 {
4794     uint32_t num_wnds, wnd_size, wnd_sum;
4795     int32_t frag_idx, wnd_idx;
4796     unsigned short lso_mss;
4797 
4798     wnd_sum = 0;
4799     wnd_size = 10;
4800     num_wnds = nsegs - wnd_size;
4801     lso_mss = htole16(m->m_pkthdr.tso_segsz);
4802 
4803     /*
4804      * Total header lengths Eth+IP+TCP in first FreeBSD mbuf so calculate the
4805      * first window sum of data while skipping the first assuming it is the
4806      * header in FreeBSD.
4807      */
4808     for (frag_idx = 1; (frag_idx <= wnd_size); frag_idx++) {
4809         wnd_sum += htole16(segs[frag_idx].ds_len);
4810     }
4811 
4812     /* check the first 10 bd window size */
4813     if (wnd_sum < lso_mss) {
4814         return (1);
4815     }
4816 
4817     /* run through the windows */
4818     for (wnd_idx = 0; wnd_idx < num_wnds; wnd_idx++, frag_idx++) {
4819         /* subtract the first mbuf->m_len of the last wndw(-header) */
4820         wnd_sum -= htole16(segs[wnd_idx+1].ds_len);
4821         /* add the next mbuf len to the len of our new window */
4822         wnd_sum += htole16(segs[frag_idx].ds_len);
4823         if (wnd_sum < lso_mss) {
4824             return (1);
4825         }
4826     }
4827 
4828     return (0);
4829 }
4830 
4831 static uint8_t
4832 bxe_set_pbd_csum_e2(struct bxe_fastpath *fp,
4833                     struct mbuf         *m,
4834                     uint32_t            *parsing_data)
4835 {
4836     struct ether_vlan_header *eh = NULL;
4837     struct ip *ip4 = NULL;
4838     struct ip6_hdr *ip6 = NULL;
4839     caddr_t ip = NULL;
4840     struct tcphdr *th = NULL;
4841     int e_hlen, ip_hlen, l4_off;
4842     uint16_t proto;
4843 
4844     if (m->m_pkthdr.csum_flags == CSUM_IP) {
4845         /* no L4 checksum offload needed */
4846         return (0);
4847     }
4848 
4849     /* get the Ethernet header */
4850     eh = mtod(m, struct ether_vlan_header *);
4851 
4852     /* handle VLAN encapsulation if present */
4853     if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
4854         e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
4855         proto  = ntohs(eh->evl_proto);
4856     } else {
4857         e_hlen = ETHER_HDR_LEN;
4858         proto  = ntohs(eh->evl_encap_proto);
4859     }
4860 
4861     switch (proto) {
4862     case ETHERTYPE_IP:
4863         /* get the IP header, if mbuf len < 20 then header in next mbuf */
4864         ip4 = (m->m_len < sizeof(struct ip)) ?
4865                   (struct ip *)m->m_next->m_data :
4866                   (struct ip *)(m->m_data + e_hlen);
4867         /* ip_hl is number of 32-bit words */
4868         ip_hlen = (ip4->ip_hl << 2);
4869         ip = (caddr_t)ip4;
4870         break;
4871     case ETHERTYPE_IPV6:
4872         /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */
4873         ip6 = (m->m_len < sizeof(struct ip6_hdr)) ?
4874                   (struct ip6_hdr *)m->m_next->m_data :
4875                   (struct ip6_hdr *)(m->m_data + e_hlen);
4876         /* XXX cannot support offload with IPv6 extensions */
4877         ip_hlen = sizeof(struct ip6_hdr);
4878         ip = (caddr_t)ip6;
4879         break;
4880     default:
4881         /* We can't offload in this case... */
4882         /* XXX error stat ??? */
4883         return (0);
4884     }
4885 
4886     /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */
4887     l4_off = (e_hlen + ip_hlen);
4888 
4889     *parsing_data |=
4890         (((l4_off >> 1) << ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
4891          ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W);
4892 
4893     if (m->m_pkthdr.csum_flags & (CSUM_TCP |
4894                                   CSUM_TSO |
4895                                   CSUM_TCP_IPV6)) {
4896         fp->eth_q_stats.tx_ofld_frames_csum_tcp++;
4897         th = (struct tcphdr *)(ip + ip_hlen);
4898         /* th_off is number of 32-bit words */
4899         *parsing_data |= ((th->th_off <<
4900                            ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
4901                           ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW);
4902         return (l4_off + (th->th_off << 2)); /* entire header length */
4903     } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
4904                                          CSUM_UDP_IPV6)) {
4905         fp->eth_q_stats.tx_ofld_frames_csum_udp++;
4906         return (l4_off + sizeof(struct udphdr)); /* entire header length */
4907     } else {
4908         /* XXX error stat ??? */
4909         return (0);
4910     }
4911 }
4912 
4913 static uint8_t
4914 bxe_set_pbd_csum(struct bxe_fastpath        *fp,
4915                  struct mbuf                *m,
4916                  struct eth_tx_parse_bd_e1x *pbd)
4917 {
4918     struct ether_vlan_header *eh = NULL;
4919     struct ip *ip4 = NULL;
4920     struct ip6_hdr *ip6 = NULL;
4921     caddr_t ip = NULL;
4922     struct tcphdr *th = NULL;
4923     struct udphdr *uh = NULL;
4924     int e_hlen, ip_hlen;
4925     uint16_t proto;
4926     uint8_t hlen;
4927     uint16_t tmp_csum;
4928     uint32_t *tmp_uh;
4929 
4930     /* get the Ethernet header */
4931     eh = mtod(m, struct ether_vlan_header *);
4932 
4933     /* handle VLAN encapsulation if present */
4934     if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
4935         e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
4936         proto  = ntohs(eh->evl_proto);
4937     } else {
4938         e_hlen = ETHER_HDR_LEN;
4939         proto  = ntohs(eh->evl_encap_proto);
4940     }
4941 
4942     switch (proto) {
4943     case ETHERTYPE_IP:
4944         /* get the IP header, if mbuf len < 20 then header in next mbuf */
4945         ip4 = (m->m_len < sizeof(struct ip)) ?
4946                   (struct ip *)m->m_next->m_data :
4947                   (struct ip *)(m->m_data + e_hlen);
4948         /* ip_hl is number of 32-bit words */
4949         ip_hlen = (ip4->ip_hl << 1);
4950         ip = (caddr_t)ip4;
4951         break;
4952     case ETHERTYPE_IPV6:
4953         /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */
4954         ip6 = (m->m_len < sizeof(struct ip6_hdr)) ?
4955                   (struct ip6_hdr *)m->m_next->m_data :
4956                   (struct ip6_hdr *)(m->m_data + e_hlen);
4957         /* XXX cannot support offload with IPv6 extensions */
4958         ip_hlen = (sizeof(struct ip6_hdr) >> 1);
4959         ip = (caddr_t)ip6;
4960         break;
4961     default:
4962         /* We can't offload in this case... */
4963         /* XXX error stat ??? */
4964         return (0);
4965     }
4966 
4967     hlen = (e_hlen >> 1);
4968 
4969     /* note that rest of global_data is indirectly zeroed here */
4970     if (m->m_flags & M_VLANTAG) {
4971         pbd->global_data =
4972             htole16(hlen | (1 << ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
4973     } else {
4974         pbd->global_data = htole16(hlen);
4975     }
4976 
4977     pbd->ip_hlen_w = ip_hlen;
4978 
4979     hlen += pbd->ip_hlen_w;
4980 
4981     /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */
4982 
4983     if (m->m_pkthdr.csum_flags & (CSUM_TCP |
4984                                   CSUM_TSO |
4985                                   CSUM_TCP_IPV6)) {
4986         th = (struct tcphdr *)(ip + (ip_hlen << 1));
4987         /* th_off is number of 32-bit words */
4988         hlen += (uint16_t)(th->th_off << 1);
4989     } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
4990                                          CSUM_UDP_IPV6)) {
4991         uh = (struct udphdr *)(ip + (ip_hlen << 1));
4992         hlen += (sizeof(struct udphdr) / 2);
4993     } else {
4994         /* valid case as only CSUM_IP was set */
4995         return (0);
4996     }
4997 
4998     pbd->total_hlen_w = htole16(hlen);
4999 
5000     if (m->m_pkthdr.csum_flags & (CSUM_TCP |
5001                                   CSUM_TSO |
5002                                   CSUM_TCP_IPV6)) {
5003         fp->eth_q_stats.tx_ofld_frames_csum_tcp++;
5004         pbd->tcp_pseudo_csum = ntohs(th->th_sum);
5005     } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
5006                                          CSUM_UDP_IPV6)) {
5007         fp->eth_q_stats.tx_ofld_frames_csum_udp++;
5008 
5009         /*
5010          * Everest1 (i.e. 57710, 57711, 57711E) does not natively support UDP
5011          * checksums and does not know anything about the UDP header and where
5012          * the checksum field is located. It only knows about TCP. Therefore
5013          * we "lie" to the hardware for outgoing UDP packets w/ checksum
5014          * offload. Since the checksum field offset for TCP is 16 bytes and
5015          * for UDP it is 6 bytes we pass a pointer to the hardware that is 10
5016          * bytes less than the start of the UDP header. This allows the
5017          * hardware to write the checksum in the correct spot. But the
5018          * hardware will compute a checksum which includes the last 10 bytes
5019          * of the IP header. To correct this we tweak the stack computed
5020          * pseudo checksum by folding in the calculation of the inverse
5021          * checksum for those final 10 bytes of the IP header. This allows
5022          * the correct checksum to be computed by the hardware.
5023          */
5024 
5025         /* set pointer 10 bytes before UDP header */
5026         tmp_uh = (uint32_t *)((uint8_t *)uh - 10);
5027 
5028         /* calculate a pseudo header checksum over the first 10 bytes */
5029         tmp_csum = in_pseudo(*tmp_uh,
5030                              *(tmp_uh + 1),
5031                              *(uint16_t *)(tmp_uh + 2));
5032 
5033         pbd->tcp_pseudo_csum = ntohs(in_addword(uh->uh_sum, ~tmp_csum));
5034     }
5035 
5036     return (hlen * 2); /* entire header length, number of bytes */
5037 }
5038 
5039 static void
5040 bxe_set_pbd_lso_e2(struct mbuf *m,
5041                    uint32_t    *parsing_data)
5042 {
5043     *parsing_data |= ((m->m_pkthdr.tso_segsz <<
5044                        ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
5045                       ETH_TX_PARSE_BD_E2_LSO_MSS);
5046 
5047     /* XXX test for IPv6 with extension header... */
5048 }
5049 
5050 static void
5051 bxe_set_pbd_lso(struct mbuf                *m,
5052                 struct eth_tx_parse_bd_e1x *pbd)
5053 {
5054     struct ether_vlan_header *eh = NULL;
5055     struct ip *ip = NULL;
5056     struct tcphdr *th = NULL;
5057     int e_hlen;
5058 
5059     /* get the Ethernet header */
5060     eh = mtod(m, struct ether_vlan_header *);
5061 
5062     /* handle VLAN encapsulation if present */
5063     e_hlen = (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) ?
5064                  (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) : ETHER_HDR_LEN;
5065 
5066     /* get the IP and TCP header, with LSO entire header in first mbuf */
5067     /* XXX assuming IPv4 */
5068     ip = (struct ip *)(m->m_data + e_hlen);
5069     th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
5070 
5071     pbd->lso_mss = htole16(m->m_pkthdr.tso_segsz);
5072     pbd->tcp_send_seq = ntohl(th->th_seq);
5073     pbd->tcp_flags = ((ntohl(((uint32_t *)th)[3]) >> 16) & 0xff);
5074 
5075 #if 1
5076         /* XXX IPv4 */
5077         pbd->ip_id = ntohs(ip->ip_id);
5078         pbd->tcp_pseudo_csum =
5079             ntohs(in_pseudo(ip->ip_src.s_addr,
5080                             ip->ip_dst.s_addr,
5081                             htons(IPPROTO_TCP)));
5082 #else
5083         /* XXX IPv6 */
5084         pbd->tcp_pseudo_csum =
5085             ntohs(in_pseudo(&ip6->ip6_src,
5086                             &ip6->ip6_dst,
5087                             htons(IPPROTO_TCP)));
5088 #endif
5089 
5090     pbd->global_data |=
5091         htole16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
5092 }
5093 
5094 /*
5095  * Encapsulte an mbuf cluster into the tx bd chain and makes the memory
5096  * visible to the controller.
5097  *
5098  * If an mbuf is submitted to this routine and cannot be given to the
5099  * controller (e.g. it has too many fragments) then the function may free
5100  * the mbuf and return to the caller.
5101  *
5102  * Returns:
5103  *   0 = Success, !0 = Failure
5104  *   Note the side effect that an mbuf may be freed if it causes a problem.
5105  */
5106 static int
5107 bxe_tx_encap(struct bxe_fastpath *fp, struct mbuf **m_head)
5108 {
5109     bus_dma_segment_t segs[32];
5110     struct mbuf *m0;
5111     struct bxe_sw_tx_bd *tx_buf;
5112     struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
5113     struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
5114     /* struct eth_tx_parse_2nd_bd *pbd2 = NULL; */
5115     struct eth_tx_bd *tx_data_bd;
5116     struct eth_tx_bd *tx_total_pkt_size_bd;
5117     struct eth_tx_start_bd *tx_start_bd;
5118     uint16_t bd_prod, pkt_prod, total_pkt_size;
5119     uint8_t mac_type;
5120     int defragged, error, nsegs, rc, nbds, vlan_off, ovlan;
5121     struct bxe_softc *sc;
5122     uint16_t tx_bd_avail;
5123     struct ether_vlan_header *eh;
5124     uint32_t pbd_e2_parsing_data = 0;
5125     uint8_t hlen = 0;
5126     int tmp_bd;
5127     int i;
5128 
5129     sc = fp->sc;
5130 
5131     M_ASSERTPKTHDR(*m_head);
5132 
5133     m0 = *m_head;
5134     rc = defragged = nbds = ovlan = vlan_off = total_pkt_size = 0;
5135     tx_start_bd = NULL;
5136     tx_data_bd = NULL;
5137     tx_total_pkt_size_bd = NULL;
5138 
5139     /* get the H/W pointer for packets and BDs */
5140     pkt_prod = fp->tx_pkt_prod;
5141     bd_prod = fp->tx_bd_prod;
5142 
5143     mac_type = UNICAST_ADDRESS;
5144 
5145     /* map the mbuf into the next open DMAable memory */
5146     tx_buf = &fp->tx_mbuf_chain[TX_BD(pkt_prod)];
5147     error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5148                                     tx_buf->m_map, m0,
5149                                     segs, &nsegs, BUS_DMA_NOWAIT);
5150 
5151     /* mapping errors */
5152     if(__predict_false(error != 0)) {
5153         fp->eth_q_stats.tx_dma_mapping_failure++;
5154         if (error == ENOMEM) {
5155             /* resource issue, try again later */
5156             rc = ENOMEM;
5157         } else if (error == EFBIG) {
5158             /* possibly recoverable with defragmentation */
5159             fp->eth_q_stats.mbuf_defrag_attempts++;
5160             m0 = m_defrag(*m_head, M_NOWAIT);
5161             if (m0 == NULL) {
5162                 fp->eth_q_stats.mbuf_defrag_failures++;
5163                 rc = ENOBUFS;
5164             } else {
5165                 /* defrag successful, try mapping again */
5166                 *m_head = m0;
5167                 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5168                                                 tx_buf->m_map, m0,
5169                                                 segs, &nsegs, BUS_DMA_NOWAIT);
5170                 if (error) {
5171                     fp->eth_q_stats.tx_dma_mapping_failure++;
5172                     rc = error;
5173                 }
5174             }
5175         } else {
5176             /* unknown, unrecoverable mapping error */
5177             BLOGE(sc, "Unknown TX mapping error rc=%d\n", error);
5178             bxe_dump_mbuf(sc, m0, FALSE);
5179             rc = error;
5180         }
5181 
5182         goto bxe_tx_encap_continue;
5183     }
5184 
5185     tx_bd_avail = bxe_tx_avail(sc, fp);
5186 
5187     /* make sure there is enough room in the send queue */
5188     if (__predict_false(tx_bd_avail < (nsegs + 2))) {
5189         /* Recoverable, try again later. */
5190         fp->eth_q_stats.tx_hw_queue_full++;
5191         bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5192         rc = ENOMEM;
5193         goto bxe_tx_encap_continue;
5194     }
5195 
5196     /* capture the current H/W TX chain high watermark */
5197     if (__predict_false(fp->eth_q_stats.tx_hw_max_queue_depth <
5198                         (TX_BD_USABLE - tx_bd_avail))) {
5199         fp->eth_q_stats.tx_hw_max_queue_depth = (TX_BD_USABLE - tx_bd_avail);
5200     }
5201 
5202     /* make sure it fits in the packet window */
5203     if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) {
5204         /*
5205          * The mbuf may be to big for the controller to handle. If the frame
5206          * is a TSO frame we'll need to do an additional check.
5207          */
5208         if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
5209             if (bxe_chktso_window(sc, nsegs, segs, m0) == 0) {
5210                 goto bxe_tx_encap_continue; /* OK to send */
5211             } else {
5212                 fp->eth_q_stats.tx_window_violation_tso++;
5213             }
5214         } else {
5215             fp->eth_q_stats.tx_window_violation_std++;
5216         }
5217 
5218         /* lets try to defragment this mbuf and remap it */
5219         fp->eth_q_stats.mbuf_defrag_attempts++;
5220         bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5221 
5222         m0 = m_defrag(*m_head, M_NOWAIT);
5223         if (m0 == NULL) {
5224             fp->eth_q_stats.mbuf_defrag_failures++;
5225             /* Ugh, just drop the frame... :( */
5226             rc = ENOBUFS;
5227         } else {
5228             /* defrag successful, try mapping again */
5229             *m_head = m0;
5230             error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5231                                             tx_buf->m_map, m0,
5232                                             segs, &nsegs, BUS_DMA_NOWAIT);
5233             if (error) {
5234                 fp->eth_q_stats.tx_dma_mapping_failure++;
5235                 /* No sense in trying to defrag/copy chain, drop it. :( */
5236                 rc = error;
5237             } else {
5238                /* if the chain is still too long then drop it */
5239                 if(m0->m_pkthdr.csum_flags & CSUM_TSO) {
5240                     /*
5241                      * in case TSO is enabled nsegs should be checked against
5242                      * BXE_TSO_MAX_SEGMENTS
5243                      */
5244                     if (__predict_false(nsegs > BXE_TSO_MAX_SEGMENTS)) {
5245                         bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5246                         fp->eth_q_stats.nsegs_path1_errors++;
5247                         rc = ENODEV;
5248                     }
5249                 } else {
5250                     if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) {
5251                         bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5252                         fp->eth_q_stats.nsegs_path2_errors++;
5253                         rc = ENODEV;
5254                     }
5255                 }
5256             }
5257         }
5258     }
5259 
5260 bxe_tx_encap_continue:
5261 
5262     /* Check for errors */
5263     if (rc) {
5264         if (rc == ENOMEM) {
5265             /* recoverable try again later  */
5266         } else {
5267             fp->eth_q_stats.tx_soft_errors++;
5268             fp->eth_q_stats.mbuf_alloc_tx--;
5269             m_freem(*m_head);
5270             *m_head = NULL;
5271         }
5272 
5273         return (rc);
5274     }
5275 
5276     /* set flag according to packet type (UNICAST_ADDRESS is default) */
5277     if (m0->m_flags & M_BCAST) {
5278         mac_type = BROADCAST_ADDRESS;
5279     } else if (m0->m_flags & M_MCAST) {
5280         mac_type = MULTICAST_ADDRESS;
5281     }
5282 
5283     /* store the mbuf into the mbuf ring */
5284     tx_buf->m        = m0;
5285     tx_buf->first_bd = fp->tx_bd_prod;
5286     tx_buf->flags    = 0;
5287 
5288     /* prepare the first transmit (start) BD for the mbuf */
5289     tx_start_bd = &fp->tx_chain[TX_BD(bd_prod)].start_bd;
5290 
5291     BLOGD(sc, DBG_TX,
5292           "sending pkt_prod=%u tx_buf=%p next_idx=%u bd=%u tx_start_bd=%p\n",
5293           pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
5294 
5295     tx_start_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
5296     tx_start_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
5297     tx_start_bd->nbytes  = htole16(segs[0].ds_len);
5298     total_pkt_size += tx_start_bd->nbytes;
5299     tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
5300 
5301     tx_start_bd->general_data = (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
5302 
5303     /* all frames have at least Start BD + Parsing BD */
5304     nbds = nsegs + 1;
5305     tx_start_bd->nbd = htole16(nbds);
5306 
5307     if (m0->m_flags & M_VLANTAG) {
5308         tx_start_bd->vlan_or_ethertype = htole16(m0->m_pkthdr.ether_vtag);
5309         tx_start_bd->bd_flags.as_bitfield |=
5310             (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
5311     } else {
5312         /* vf tx, start bd must hold the ethertype for fw to enforce it */
5313         if (IS_VF(sc)) {
5314             /* map ethernet header to find type and header length */
5315             eh = mtod(m0, struct ether_vlan_header *);
5316             tx_start_bd->vlan_or_ethertype = eh->evl_encap_proto;
5317         } else {
5318             /* used by FW for packet accounting */
5319             tx_start_bd->vlan_or_ethertype = htole16(fp->tx_pkt_prod);
5320         }
5321     }
5322 
5323     /*
5324      * add a parsing BD from the chain. The parsing BD is always added
5325      * though it is only used for TSO and chksum
5326      */
5327     bd_prod = TX_BD_NEXT(bd_prod);
5328 
5329     if (m0->m_pkthdr.csum_flags) {
5330         if (m0->m_pkthdr.csum_flags & CSUM_IP) {
5331             fp->eth_q_stats.tx_ofld_frames_csum_ip++;
5332             tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
5333         }
5334 
5335         if (m0->m_pkthdr.csum_flags & CSUM_TCP_IPV6) {
5336             tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 |
5337                                                   ETH_TX_BD_FLAGS_L4_CSUM);
5338         } else if (m0->m_pkthdr.csum_flags & CSUM_UDP_IPV6) {
5339             tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6   |
5340                                                   ETH_TX_BD_FLAGS_IS_UDP |
5341                                                   ETH_TX_BD_FLAGS_L4_CSUM);
5342         } else if ((m0->m_pkthdr.csum_flags & CSUM_TCP) ||
5343                    (m0->m_pkthdr.csum_flags & CSUM_TSO)) {
5344             tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
5345         } else if (m0->m_pkthdr.csum_flags & CSUM_UDP) {
5346             tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_L4_CSUM |
5347                                                   ETH_TX_BD_FLAGS_IS_UDP);
5348         }
5349     }
5350 
5351     if (!CHIP_IS_E1x(sc)) {
5352         pbd_e2 = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e2;
5353         memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
5354 
5355         if (m0->m_pkthdr.csum_flags) {
5356             hlen = bxe_set_pbd_csum_e2(fp, m0, &pbd_e2_parsing_data);
5357         }
5358 
5359         SET_FLAG(pbd_e2_parsing_data, ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE,
5360                  mac_type);
5361     } else {
5362         uint16_t global_data = 0;
5363 
5364         pbd_e1x = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e1x;
5365         memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
5366 
5367         if (m0->m_pkthdr.csum_flags) {
5368             hlen = bxe_set_pbd_csum(fp, m0, pbd_e1x);
5369         }
5370 
5371         SET_FLAG(global_data,
5372                  ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
5373         pbd_e1x->global_data |= htole16(global_data);
5374     }
5375 
5376     /* setup the parsing BD with TSO specific info */
5377     if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
5378         fp->eth_q_stats.tx_ofld_frames_lso++;
5379         tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
5380 
5381         if (__predict_false(tx_start_bd->nbytes > hlen)) {
5382             fp->eth_q_stats.tx_ofld_frames_lso_hdr_splits++;
5383 
5384             /* split the first BD into header/data making the fw job easy */
5385             nbds++;
5386             tx_start_bd->nbd = htole16(nbds);
5387             tx_start_bd->nbytes = htole16(hlen);
5388 
5389             bd_prod = TX_BD_NEXT(bd_prod);
5390 
5391             /* new transmit BD after the tx_parse_bd */
5392             tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd;
5393             tx_data_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr + hlen));
5394             tx_data_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr + hlen));
5395             tx_data_bd->nbytes  = htole16(segs[0].ds_len - hlen);
5396             if (tx_total_pkt_size_bd == NULL) {
5397                 tx_total_pkt_size_bd = tx_data_bd;
5398             }
5399 
5400             BLOGD(sc, DBG_TX,
5401                   "TSO split header size is %d (%x:%x) nbds %d\n",
5402                   le16toh(tx_start_bd->nbytes),
5403                   le32toh(tx_start_bd->addr_hi),
5404                   le32toh(tx_start_bd->addr_lo),
5405                   nbds);
5406         }
5407 
5408         if (!CHIP_IS_E1x(sc)) {
5409             bxe_set_pbd_lso_e2(m0, &pbd_e2_parsing_data);
5410         } else {
5411             bxe_set_pbd_lso(m0, pbd_e1x);
5412         }
5413     }
5414 
5415     if (pbd_e2_parsing_data) {
5416         pbd_e2->parsing_data = htole32(pbd_e2_parsing_data);
5417     }
5418 
5419     /* prepare remaining BDs, start tx bd contains first seg/frag */
5420     for (i = 1; i < nsegs ; i++) {
5421         bd_prod = TX_BD_NEXT(bd_prod);
5422         tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd;
5423         tx_data_bd->addr_lo = htole32(U64_LO(segs[i].ds_addr));
5424         tx_data_bd->addr_hi = htole32(U64_HI(segs[i].ds_addr));
5425         tx_data_bd->nbytes  = htole16(segs[i].ds_len);
5426         if (tx_total_pkt_size_bd == NULL) {
5427             tx_total_pkt_size_bd = tx_data_bd;
5428         }
5429         total_pkt_size += tx_data_bd->nbytes;
5430     }
5431 
5432     BLOGD(sc, DBG_TX, "last bd %p\n", tx_data_bd);
5433 
5434     if (tx_total_pkt_size_bd != NULL) {
5435         tx_total_pkt_size_bd->total_pkt_bytes = total_pkt_size;
5436     }
5437 
5438     if (__predict_false(sc->debug & DBG_TX)) {
5439         tmp_bd = tx_buf->first_bd;
5440         for (i = 0; i < nbds; i++)
5441         {
5442             if (i == 0) {
5443                 BLOGD(sc, DBG_TX,
5444                       "TX Strt: %p bd=%d nbd=%d vlan=0x%x "
5445                       "bd_flags=0x%x hdr_nbds=%d\n",
5446                       tx_start_bd,
5447                       tmp_bd,
5448                       le16toh(tx_start_bd->nbd),
5449                       le16toh(tx_start_bd->vlan_or_ethertype),
5450                       tx_start_bd->bd_flags.as_bitfield,
5451                       (tx_start_bd->general_data & ETH_TX_START_BD_HDR_NBDS));
5452             } else if (i == 1) {
5453                 if (pbd_e1x) {
5454                     BLOGD(sc, DBG_TX,
5455                           "-> Prse: %p bd=%d global=0x%x ip_hlen_w=%u "
5456                           "ip_id=%u lso_mss=%u tcp_flags=0x%x csum=0x%x "
5457                           "tcp_seq=%u total_hlen_w=%u\n",
5458                           pbd_e1x,
5459                           tmp_bd,
5460                           pbd_e1x->global_data,
5461                           pbd_e1x->ip_hlen_w,
5462                           pbd_e1x->ip_id,
5463                           pbd_e1x->lso_mss,
5464                           pbd_e1x->tcp_flags,
5465                           pbd_e1x->tcp_pseudo_csum,
5466                           pbd_e1x->tcp_send_seq,
5467                           le16toh(pbd_e1x->total_hlen_w));
5468                 } else { /* if (pbd_e2) */
5469                     BLOGD(sc, DBG_TX,
5470                           "-> Parse: %p bd=%d dst=%02x:%02x:%02x "
5471                           "src=%02x:%02x:%02x parsing_data=0x%x\n",
5472                           pbd_e2,
5473                           tmp_bd,
5474                           pbd_e2->data.mac_addr.dst_hi,
5475                           pbd_e2->data.mac_addr.dst_mid,
5476                           pbd_e2->data.mac_addr.dst_lo,
5477                           pbd_e2->data.mac_addr.src_hi,
5478                           pbd_e2->data.mac_addr.src_mid,
5479                           pbd_e2->data.mac_addr.src_lo,
5480                           pbd_e2->parsing_data);
5481                 }
5482             }
5483 
5484             if (i != 1) { /* skip parse db as it doesn't hold data */
5485                 tx_data_bd = &fp->tx_chain[TX_BD(tmp_bd)].reg_bd;
5486                 BLOGD(sc, DBG_TX,
5487                       "-> Frag: %p bd=%d nbytes=%d hi=0x%x lo: 0x%x\n",
5488                       tx_data_bd,
5489                       tmp_bd,
5490                       le16toh(tx_data_bd->nbytes),
5491                       le32toh(tx_data_bd->addr_hi),
5492                       le32toh(tx_data_bd->addr_lo));
5493             }
5494 
5495             tmp_bd = TX_BD_NEXT(tmp_bd);
5496         }
5497     }
5498 
5499     BLOGD(sc, DBG_TX, "doorbell: nbds=%d bd=%u\n", nbds, bd_prod);
5500 
5501     /* update TX BD producer index value for next TX */
5502     bd_prod = TX_BD_NEXT(bd_prod);
5503 
5504     /*
5505      * If the chain of tx_bd's describing this frame is adjacent to or spans
5506      * an eth_tx_next_bd element then we need to increment the nbds value.
5507      */
5508     if (TX_BD_IDX(bd_prod) < nbds) {
5509         nbds++;
5510     }
5511 
5512     /* don't allow reordering of writes for nbd and packets */
5513     mb();
5514 
5515     fp->tx_db.data.prod += nbds;
5516 
5517     /* producer points to the next free tx_bd at this point */
5518     fp->tx_pkt_prod++;
5519     fp->tx_bd_prod = bd_prod;
5520 
5521     DOORBELL(sc, fp->index, fp->tx_db.raw);
5522 
5523     fp->eth_q_stats.tx_pkts++;
5524 
5525     /* Prevent speculative reads from getting ahead of the status block. */
5526     bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle,
5527                       0, 0, BUS_SPACE_BARRIER_READ);
5528 
5529     /* Prevent speculative reads from getting ahead of the doorbell. */
5530     bus_space_barrier(sc->bar[BAR2].tag, sc->bar[BAR2].handle,
5531                       0, 0, BUS_SPACE_BARRIER_READ);
5532 
5533     return (0);
5534 }
5535 
5536 static void
5537 bxe_tx_start_locked(struct bxe_softc *sc,
5538                     if_t ifp,
5539                     struct bxe_fastpath *fp)
5540 {
5541     struct mbuf *m = NULL;
5542     int tx_count = 0;
5543     uint16_t tx_bd_avail;
5544 
5545     BXE_FP_TX_LOCK_ASSERT(fp);
5546 
5547     /* keep adding entries while there are frames to send */
5548     while (!if_sendq_empty(ifp)) {
5549 
5550         /*
5551          * check for any frames to send
5552          * dequeue can still be NULL even if queue is not empty
5553          */
5554         m = if_dequeue(ifp);
5555         if (__predict_false(m == NULL)) {
5556             break;
5557         }
5558 
5559         /* the mbuf now belongs to us */
5560         fp->eth_q_stats.mbuf_alloc_tx++;
5561 
5562         /*
5563          * Put the frame into the transmit ring. If we don't have room,
5564          * place the mbuf back at the head of the TX queue, set the
5565          * OACTIVE flag, and wait for the NIC to drain the chain.
5566          */
5567         if (__predict_false(bxe_tx_encap(fp, &m))) {
5568             fp->eth_q_stats.tx_encap_failures++;
5569             if (m != NULL) {
5570                 /* mark the TX queue as full and return the frame */
5571                 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
5572 		if_sendq_prepend(ifp, m);
5573                 fp->eth_q_stats.mbuf_alloc_tx--;
5574                 fp->eth_q_stats.tx_queue_xoff++;
5575             }
5576 
5577             /* stop looking for more work */
5578             break;
5579         }
5580 
5581         /* the frame was enqueued successfully */
5582         tx_count++;
5583 
5584         /* send a copy of the frame to any BPF listeners. */
5585         if_etherbpfmtap(ifp, m);
5586 
5587         tx_bd_avail = bxe_tx_avail(sc, fp);
5588 
5589         /* handle any completions if we're running low */
5590         if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
5591             /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
5592             bxe_txeof(sc, fp);
5593             if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) {
5594                 break;
5595             }
5596         }
5597     }
5598 
5599     /* all TX packets were dequeued and/or the tx ring is full */
5600     if (tx_count > 0) {
5601         /* reset the TX watchdog timeout timer */
5602         fp->watchdog_timer = BXE_TX_TIMEOUT;
5603     }
5604 }
5605 
5606 /* Legacy (non-RSS) dispatch routine */
5607 static void
5608 bxe_tx_start(if_t ifp)
5609 {
5610     struct bxe_softc *sc;
5611     struct bxe_fastpath *fp;
5612 
5613     sc = if_getsoftc(ifp);
5614 
5615     if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
5616         BLOGW(sc, "Interface not running, ignoring transmit request\n");
5617         return;
5618     }
5619 
5620     if (!sc->link_vars.link_up) {
5621         BLOGW(sc, "Interface link is down, ignoring transmit request\n");
5622         return;
5623     }
5624 
5625     fp = &sc->fp[0];
5626 
5627     if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) {
5628         fp->eth_q_stats.tx_queue_full_return++;
5629         return;
5630     }
5631 
5632     BXE_FP_TX_LOCK(fp);
5633     bxe_tx_start_locked(sc, ifp, fp);
5634     BXE_FP_TX_UNLOCK(fp);
5635 }
5636 
5637 static int
5638 bxe_tx_mq_start_locked(struct bxe_softc    *sc,
5639                        if_t                ifp,
5640                        struct bxe_fastpath *fp,
5641                        struct mbuf         *m)
5642 {
5643     struct buf_ring *tx_br = fp->tx_br;
5644     struct mbuf *next;
5645     int depth, rc, tx_count;
5646     uint16_t tx_bd_avail;
5647 
5648     rc = tx_count = 0;
5649 
5650     BXE_FP_TX_LOCK_ASSERT(fp);
5651 
5652     if (sc->state != BXE_STATE_OPEN)  {
5653         fp->eth_q_stats.bxe_tx_mq_sc_state_failures++;
5654         return ENETDOWN;
5655     }
5656 
5657     if (!tx_br) {
5658         BLOGE(sc, "Multiqueue TX and no buf_ring!\n");
5659         return (EINVAL);
5660     }
5661 
5662     if (m != NULL) {
5663         rc = drbr_enqueue(ifp, tx_br, m);
5664         if (rc != 0) {
5665             fp->eth_q_stats.tx_soft_errors++;
5666             goto bxe_tx_mq_start_locked_exit;
5667         }
5668     }
5669 
5670     if (!sc->link_vars.link_up || !(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
5671         fp->eth_q_stats.tx_request_link_down_failures++;
5672         goto bxe_tx_mq_start_locked_exit;
5673     }
5674 
5675     /* fetch the depth of the driver queue */
5676     depth = drbr_inuse(ifp, tx_br);
5677     if (depth > fp->eth_q_stats.tx_max_drbr_queue_depth) {
5678         fp->eth_q_stats.tx_max_drbr_queue_depth = depth;
5679     }
5680 
5681     /* keep adding entries while there are frames to send */
5682     while ((next = drbr_peek(ifp, tx_br)) != NULL) {
5683         /* handle any completions if we're running low */
5684         tx_bd_avail = bxe_tx_avail(sc, fp);
5685         if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
5686             /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
5687             bxe_txeof(sc, fp);
5688             tx_bd_avail = bxe_tx_avail(sc, fp);
5689             if (tx_bd_avail < (BXE_TSO_MAX_SEGMENTS + 1)) {
5690                 fp->eth_q_stats.bd_avail_too_less_failures++;
5691                 m_freem(next);
5692                 drbr_advance(ifp, tx_br);
5693                 rc = ENOBUFS;
5694                 break;
5695             }
5696         }
5697 
5698         /* the mbuf now belongs to us */
5699         fp->eth_q_stats.mbuf_alloc_tx++;
5700 
5701         /*
5702          * Put the frame into the transmit ring. If we don't have room,
5703          * place the mbuf back at the head of the TX queue, set the
5704          * OACTIVE flag, and wait for the NIC to drain the chain.
5705          */
5706         rc = bxe_tx_encap(fp, &next);
5707         if (__predict_false(rc != 0)) {
5708             fp->eth_q_stats.tx_encap_failures++;
5709             if (next != NULL) {
5710                 /* mark the TX queue as full and save the frame */
5711                 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
5712                 drbr_putback(ifp, tx_br, next);
5713                 fp->eth_q_stats.mbuf_alloc_tx--;
5714                 fp->eth_q_stats.tx_frames_deferred++;
5715             } else
5716                 drbr_advance(ifp, tx_br);
5717 
5718             /* stop looking for more work */
5719             break;
5720         }
5721 
5722         /* the transmit frame was enqueued successfully */
5723         tx_count++;
5724 
5725         /* send a copy of the frame to any BPF listeners */
5726 	if_etherbpfmtap(ifp, next);
5727 
5728         drbr_advance(ifp, tx_br);
5729     }
5730 
5731     /* all TX packets were dequeued and/or the tx ring is full */
5732     if (tx_count > 0) {
5733         /* reset the TX watchdog timeout timer */
5734         fp->watchdog_timer = BXE_TX_TIMEOUT;
5735     }
5736 
5737 bxe_tx_mq_start_locked_exit:
5738     /* If we didn't drain the drbr, enqueue a task in the future to do it. */
5739     if (!drbr_empty(ifp, tx_br)) {
5740         fp->eth_q_stats.tx_mq_not_empty++;
5741         taskqueue_enqueue_timeout(fp->tq, &fp->tx_timeout_task, 1);
5742     }
5743 
5744     return (rc);
5745 }
5746 
5747 static void
5748 bxe_tx_mq_start_deferred(void *arg,
5749                          int pending)
5750 {
5751     struct bxe_fastpath *fp = (struct bxe_fastpath *)arg;
5752     struct bxe_softc *sc = fp->sc;
5753     if_t ifp = sc->ifp;
5754 
5755     BXE_FP_TX_LOCK(fp);
5756     bxe_tx_mq_start_locked(sc, ifp, fp, NULL);
5757     BXE_FP_TX_UNLOCK(fp);
5758 }
5759 
5760 /* Multiqueue (TSS) dispatch routine. */
5761 static int
5762 bxe_tx_mq_start(struct ifnet *ifp,
5763                 struct mbuf  *m)
5764 {
5765     struct bxe_softc *sc = if_getsoftc(ifp);
5766     struct bxe_fastpath *fp;
5767     int fp_index, rc;
5768 
5769     fp_index = 0; /* default is the first queue */
5770 
5771     /* check if flowid is set */
5772 
5773     if (BXE_VALID_FLOWID(m))
5774         fp_index = (m->m_pkthdr.flowid % sc->num_queues);
5775 
5776     fp = &sc->fp[fp_index];
5777 
5778     if (sc->state != BXE_STATE_OPEN)  {
5779         fp->eth_q_stats.bxe_tx_mq_sc_state_failures++;
5780         return ENETDOWN;
5781     }
5782 
5783     if (BXE_FP_TX_TRYLOCK(fp)) {
5784         rc = bxe_tx_mq_start_locked(sc, ifp, fp, m);
5785         BXE_FP_TX_UNLOCK(fp);
5786     } else {
5787         rc = drbr_enqueue(ifp, fp->tx_br, m);
5788         taskqueue_enqueue(fp->tq, &fp->tx_task);
5789     }
5790 
5791     return (rc);
5792 }
5793 
5794 static void
5795 bxe_mq_flush(struct ifnet *ifp)
5796 {
5797     struct bxe_softc *sc = if_getsoftc(ifp);
5798     struct bxe_fastpath *fp;
5799     struct mbuf *m;
5800     int i;
5801 
5802     for (i = 0; i < sc->num_queues; i++) {
5803         fp = &sc->fp[i];
5804 
5805         if (fp->state != BXE_FP_STATE_IRQ) {
5806             BLOGD(sc, DBG_LOAD, "Not clearing fp[%02d] buf_ring (state=%d)\n",
5807                   fp->index, fp->state);
5808             continue;
5809         }
5810 
5811         if (fp->tx_br != NULL) {
5812             BLOGD(sc, DBG_LOAD, "Clearing fp[%02d] buf_ring\n", fp->index);
5813             BXE_FP_TX_LOCK(fp);
5814             while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) {
5815                 m_freem(m);
5816             }
5817             BXE_FP_TX_UNLOCK(fp);
5818         }
5819     }
5820 
5821     if_qflush(ifp);
5822 }
5823 
5824 static uint16_t
5825 bxe_cid_ilt_lines(struct bxe_softc *sc)
5826 {
5827     if (IS_SRIOV(sc)) {
5828         return ((BXE_FIRST_VF_CID + BXE_VF_CIDS) / ILT_PAGE_CIDS);
5829     }
5830     return (L2_ILT_LINES(sc));
5831 }
5832 
5833 static void
5834 bxe_ilt_set_info(struct bxe_softc *sc)
5835 {
5836     struct ilt_client_info *ilt_client;
5837     struct ecore_ilt *ilt = sc->ilt;
5838     uint16_t line = 0;
5839 
5840     ilt->start_line = FUNC_ILT_BASE(SC_FUNC(sc));
5841     BLOGD(sc, DBG_LOAD, "ilt starts at line %d\n", ilt->start_line);
5842 
5843     /* CDU */
5844     ilt_client = &ilt->clients[ILT_CLIENT_CDU];
5845     ilt_client->client_num = ILT_CLIENT_CDU;
5846     ilt_client->page_size = CDU_ILT_PAGE_SZ;
5847     ilt_client->flags = ILT_CLIENT_SKIP_MEM;
5848     ilt_client->start = line;
5849     line += bxe_cid_ilt_lines(sc);
5850 
5851     if (CNIC_SUPPORT(sc)) {
5852         line += CNIC_ILT_LINES;
5853     }
5854 
5855     ilt_client->end = (line - 1);
5856 
5857     BLOGD(sc, DBG_LOAD,
5858           "ilt client[CDU]: start %d, end %d, "
5859           "psz 0x%x, flags 0x%x, hw psz %d\n",
5860           ilt_client->start, ilt_client->end,
5861           ilt_client->page_size,
5862           ilt_client->flags,
5863           ilog2(ilt_client->page_size >> 12));
5864 
5865     /* QM */
5866     if (QM_INIT(sc->qm_cid_count)) {
5867         ilt_client = &ilt->clients[ILT_CLIENT_QM];
5868         ilt_client->client_num = ILT_CLIENT_QM;
5869         ilt_client->page_size = QM_ILT_PAGE_SZ;
5870         ilt_client->flags = 0;
5871         ilt_client->start = line;
5872 
5873         /* 4 bytes for each cid */
5874         line += DIV_ROUND_UP(sc->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
5875                              QM_ILT_PAGE_SZ);
5876 
5877         ilt_client->end = (line - 1);
5878 
5879         BLOGD(sc, DBG_LOAD,
5880               "ilt client[QM]: start %d, end %d, "
5881               "psz 0x%x, flags 0x%x, hw psz %d\n",
5882               ilt_client->start, ilt_client->end,
5883               ilt_client->page_size, ilt_client->flags,
5884               ilog2(ilt_client->page_size >> 12));
5885     }
5886 
5887     if (CNIC_SUPPORT(sc)) {
5888         /* SRC */
5889         ilt_client = &ilt->clients[ILT_CLIENT_SRC];
5890         ilt_client->client_num = ILT_CLIENT_SRC;
5891         ilt_client->page_size = SRC_ILT_PAGE_SZ;
5892         ilt_client->flags = 0;
5893         ilt_client->start = line;
5894         line += SRC_ILT_LINES;
5895         ilt_client->end = (line - 1);
5896 
5897         BLOGD(sc, DBG_LOAD,
5898               "ilt client[SRC]: start %d, end %d, "
5899               "psz 0x%x, flags 0x%x, hw psz %d\n",
5900               ilt_client->start, ilt_client->end,
5901               ilt_client->page_size, ilt_client->flags,
5902               ilog2(ilt_client->page_size >> 12));
5903 
5904         /* TM */
5905         ilt_client = &ilt->clients[ILT_CLIENT_TM];
5906         ilt_client->client_num = ILT_CLIENT_TM;
5907         ilt_client->page_size = TM_ILT_PAGE_SZ;
5908         ilt_client->flags = 0;
5909         ilt_client->start = line;
5910         line += TM_ILT_LINES;
5911         ilt_client->end = (line - 1);
5912 
5913         BLOGD(sc, DBG_LOAD,
5914               "ilt client[TM]: start %d, end %d, "
5915               "psz 0x%x, flags 0x%x, hw psz %d\n",
5916               ilt_client->start, ilt_client->end,
5917               ilt_client->page_size, ilt_client->flags,
5918               ilog2(ilt_client->page_size >> 12));
5919     }
5920 
5921     KASSERT((line <= ILT_MAX_LINES), ("Invalid number of ILT lines!"));
5922 }
5923 
5924 static void
5925 bxe_set_fp_rx_buf_size(struct bxe_softc *sc)
5926 {
5927     int i;
5928     uint32_t rx_buf_size;
5929 
5930     rx_buf_size = (IP_HEADER_ALIGNMENT_PADDING + ETH_OVERHEAD + sc->mtu);
5931 
5932     for (i = 0; i < sc->num_queues; i++) {
5933         if(rx_buf_size <= MCLBYTES){
5934             sc->fp[i].rx_buf_size = rx_buf_size;
5935             sc->fp[i].mbuf_alloc_size = MCLBYTES;
5936         }else if (rx_buf_size <= MJUMPAGESIZE){
5937             sc->fp[i].rx_buf_size = rx_buf_size;
5938             sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE;
5939         }else if (rx_buf_size <= (MJUMPAGESIZE + MCLBYTES)){
5940             sc->fp[i].rx_buf_size = MCLBYTES;
5941             sc->fp[i].mbuf_alloc_size = MCLBYTES;
5942         }else if (rx_buf_size <= (2 * MJUMPAGESIZE)){
5943             sc->fp[i].rx_buf_size = MJUMPAGESIZE;
5944             sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE;
5945         }else {
5946             sc->fp[i].rx_buf_size = MCLBYTES;
5947             sc->fp[i].mbuf_alloc_size = MCLBYTES;
5948         }
5949     }
5950 }
5951 
5952 static int
5953 bxe_alloc_ilt_mem(struct bxe_softc *sc)
5954 {
5955     int rc = 0;
5956 
5957     if ((sc->ilt =
5958          (struct ecore_ilt *)malloc(sizeof(struct ecore_ilt),
5959                                     M_BXE_ILT,
5960                                     (M_NOWAIT | M_ZERO))) == NULL) {
5961         rc = 1;
5962     }
5963 
5964     return (rc);
5965 }
5966 
5967 static int
5968 bxe_alloc_ilt_lines_mem(struct bxe_softc *sc)
5969 {
5970     int rc = 0;
5971 
5972     if ((sc->ilt->lines =
5973          (struct ilt_line *)malloc((sizeof(struct ilt_line) * ILT_MAX_LINES),
5974                                     M_BXE_ILT,
5975                                     (M_NOWAIT | M_ZERO))) == NULL) {
5976         rc = 1;
5977     }
5978 
5979     return (rc);
5980 }
5981 
5982 static void
5983 bxe_free_ilt_mem(struct bxe_softc *sc)
5984 {
5985     if (sc->ilt != NULL) {
5986         free(sc->ilt, M_BXE_ILT);
5987         sc->ilt = NULL;
5988     }
5989 }
5990 
5991 static void
5992 bxe_free_ilt_lines_mem(struct bxe_softc *sc)
5993 {
5994     if (sc->ilt->lines != NULL) {
5995         free(sc->ilt->lines, M_BXE_ILT);
5996         sc->ilt->lines = NULL;
5997     }
5998 }
5999 
6000 static void
6001 bxe_free_mem(struct bxe_softc *sc)
6002 {
6003     int i;
6004 
6005     for (i = 0; i < L2_ILT_LINES(sc); i++) {
6006         bxe_dma_free(sc, &sc->context[i].vcxt_dma);
6007         sc->context[i].vcxt = NULL;
6008         sc->context[i].size = 0;
6009     }
6010 
6011     ecore_ilt_mem_op(sc, ILT_MEMOP_FREE);
6012 
6013     bxe_free_ilt_lines_mem(sc);
6014 
6015 }
6016 
6017 static int
6018 bxe_alloc_mem(struct bxe_softc *sc)
6019 {
6020 
6021     int context_size;
6022     int allocated;
6023     int i;
6024 
6025     /*
6026      * Allocate memory for CDU context:
6027      * This memory is allocated separately and not in the generic ILT
6028      * functions because CDU differs in few aspects:
6029      * 1. There can be multiple entities allocating memory for context -
6030      * regular L2, CNIC, and SRIOV drivers. Each separately controls
6031      * its own ILT lines.
6032      * 2. Since CDU page-size is not a single 4KB page (which is the case
6033      * for the other ILT clients), to be efficient we want to support
6034      * allocation of sub-page-size in the last entry.
6035      * 3. Context pointers are used by the driver to pass to FW / update
6036      * the context (for the other ILT clients the pointers are used just to
6037      * free the memory during unload).
6038      */
6039     context_size = (sizeof(union cdu_context) * BXE_L2_CID_COUNT(sc));
6040     for (i = 0, allocated = 0; allocated < context_size; i++) {
6041         sc->context[i].size = min(CDU_ILT_PAGE_SZ,
6042                                   (context_size - allocated));
6043 
6044         if (bxe_dma_alloc(sc, sc->context[i].size,
6045                           &sc->context[i].vcxt_dma,
6046                           "cdu context") != 0) {
6047             bxe_free_mem(sc);
6048             return (-1);
6049         }
6050 
6051         sc->context[i].vcxt =
6052             (union cdu_context *)sc->context[i].vcxt_dma.vaddr;
6053 
6054         allocated += sc->context[i].size;
6055     }
6056 
6057     bxe_alloc_ilt_lines_mem(sc);
6058 
6059     BLOGD(sc, DBG_LOAD, "ilt=%p start_line=%u lines=%p\n",
6060           sc->ilt, sc->ilt->start_line, sc->ilt->lines);
6061     {
6062         for (i = 0; i < 4; i++) {
6063             BLOGD(sc, DBG_LOAD,
6064                   "c%d page_size=%u start=%u end=%u num=%u flags=0x%x\n",
6065                   i,
6066                   sc->ilt->clients[i].page_size,
6067                   sc->ilt->clients[i].start,
6068                   sc->ilt->clients[i].end,
6069                   sc->ilt->clients[i].client_num,
6070                   sc->ilt->clients[i].flags);
6071         }
6072     }
6073     if (ecore_ilt_mem_op(sc, ILT_MEMOP_ALLOC)) {
6074         BLOGE(sc, "ecore_ilt_mem_op ILT_MEMOP_ALLOC failed\n");
6075         bxe_free_mem(sc);
6076         return (-1);
6077     }
6078 
6079     return (0);
6080 }
6081 
6082 static void
6083 bxe_free_rx_bd_chain(struct bxe_fastpath *fp)
6084 {
6085     int i;
6086 
6087     if (fp->rx_mbuf_tag == NULL) {
6088         return;
6089     }
6090 
6091     /* free all mbufs and unload all maps */
6092     for (i = 0; i < RX_BD_TOTAL; i++) {
6093         if (fp->rx_mbuf_chain[i].m_map != NULL) {
6094             bus_dmamap_sync(fp->rx_mbuf_tag,
6095                             fp->rx_mbuf_chain[i].m_map,
6096                             BUS_DMASYNC_POSTREAD);
6097             bus_dmamap_unload(fp->rx_mbuf_tag,
6098                               fp->rx_mbuf_chain[i].m_map);
6099         }
6100 
6101         if (fp->rx_mbuf_chain[i].m != NULL) {
6102             m_freem(fp->rx_mbuf_chain[i].m);
6103             fp->rx_mbuf_chain[i].m = NULL;
6104             fp->eth_q_stats.mbuf_alloc_rx--;
6105         }
6106     }
6107 }
6108 
6109 static void
6110 bxe_free_tpa_pool(struct bxe_fastpath *fp)
6111 {
6112     struct bxe_softc *sc;
6113     int i, max_agg_queues;
6114 
6115     sc = fp->sc;
6116 
6117     if (fp->rx_mbuf_tag == NULL) {
6118         return;
6119     }
6120 
6121     max_agg_queues = MAX_AGG_QS(sc);
6122 
6123     /* release all mbufs and unload all DMA maps in the TPA pool */
6124     for (i = 0; i < max_agg_queues; i++) {
6125         if (fp->rx_tpa_info[i].bd.m_map != NULL) {
6126             bus_dmamap_sync(fp->rx_mbuf_tag,
6127                             fp->rx_tpa_info[i].bd.m_map,
6128                             BUS_DMASYNC_POSTREAD);
6129             bus_dmamap_unload(fp->rx_mbuf_tag,
6130                               fp->rx_tpa_info[i].bd.m_map);
6131         }
6132 
6133         if (fp->rx_tpa_info[i].bd.m != NULL) {
6134             m_freem(fp->rx_tpa_info[i].bd.m);
6135             fp->rx_tpa_info[i].bd.m = NULL;
6136             fp->eth_q_stats.mbuf_alloc_tpa--;
6137         }
6138     }
6139 }
6140 
6141 static void
6142 bxe_free_sge_chain(struct bxe_fastpath *fp)
6143 {
6144     int i;
6145 
6146     if (fp->rx_sge_mbuf_tag == NULL) {
6147         return;
6148     }
6149 
6150     /* rree all mbufs and unload all maps */
6151     for (i = 0; i < RX_SGE_TOTAL; i++) {
6152         if (fp->rx_sge_mbuf_chain[i].m_map != NULL) {
6153             bus_dmamap_sync(fp->rx_sge_mbuf_tag,
6154                             fp->rx_sge_mbuf_chain[i].m_map,
6155                             BUS_DMASYNC_POSTREAD);
6156             bus_dmamap_unload(fp->rx_sge_mbuf_tag,
6157                               fp->rx_sge_mbuf_chain[i].m_map);
6158         }
6159 
6160         if (fp->rx_sge_mbuf_chain[i].m != NULL) {
6161             m_freem(fp->rx_sge_mbuf_chain[i].m);
6162             fp->rx_sge_mbuf_chain[i].m = NULL;
6163             fp->eth_q_stats.mbuf_alloc_sge--;
6164         }
6165     }
6166 }
6167 
6168 static void
6169 bxe_free_fp_buffers(struct bxe_softc *sc)
6170 {
6171     struct bxe_fastpath *fp;
6172     int i;
6173 
6174     for (i = 0; i < sc->num_queues; i++) {
6175         fp = &sc->fp[i];
6176 
6177         if (fp->tx_br != NULL) {
6178             /* just in case bxe_mq_flush() wasn't called */
6179             if (mtx_initialized(&fp->tx_mtx)) {
6180                 struct mbuf *m;
6181 
6182                 BXE_FP_TX_LOCK(fp);
6183                 while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL)
6184                     m_freem(m);
6185                 BXE_FP_TX_UNLOCK(fp);
6186             }
6187         }
6188 
6189         /* free all RX buffers */
6190         bxe_free_rx_bd_chain(fp);
6191         bxe_free_tpa_pool(fp);
6192         bxe_free_sge_chain(fp);
6193 
6194         if (fp->eth_q_stats.mbuf_alloc_rx != 0) {
6195             BLOGE(sc, "failed to claim all rx mbufs (%d left)\n",
6196                   fp->eth_q_stats.mbuf_alloc_rx);
6197         }
6198 
6199         if (fp->eth_q_stats.mbuf_alloc_sge != 0) {
6200             BLOGE(sc, "failed to claim all sge mbufs (%d left)\n",
6201                   fp->eth_q_stats.mbuf_alloc_sge);
6202         }
6203 
6204         if (fp->eth_q_stats.mbuf_alloc_tpa != 0) {
6205             BLOGE(sc, "failed to claim all sge mbufs (%d left)\n",
6206                   fp->eth_q_stats.mbuf_alloc_tpa);
6207         }
6208 
6209         if (fp->eth_q_stats.mbuf_alloc_tx != 0) {
6210             BLOGE(sc, "failed to release tx mbufs (%d left)\n",
6211                   fp->eth_q_stats.mbuf_alloc_tx);
6212         }
6213 
6214         /* XXX verify all mbufs were reclaimed */
6215     }
6216 }
6217 
6218 static int
6219 bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp,
6220                      uint16_t            prev_index,
6221                      uint16_t            index)
6222 {
6223     struct bxe_sw_rx_bd *rx_buf;
6224     struct eth_rx_bd *rx_bd;
6225     bus_dma_segment_t segs[1];
6226     bus_dmamap_t map;
6227     struct mbuf *m;
6228     int nsegs, rc;
6229 
6230     rc = 0;
6231 
6232     /* allocate the new RX BD mbuf */
6233     m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
6234     if (__predict_false(m == NULL)) {
6235         fp->eth_q_stats.mbuf_rx_bd_alloc_failed++;
6236         return (ENOBUFS);
6237     }
6238 
6239     fp->eth_q_stats.mbuf_alloc_rx++;
6240 
6241     /* initialize the mbuf buffer length */
6242     m->m_pkthdr.len = m->m_len = fp->rx_buf_size;
6243 
6244     /* map the mbuf into non-paged pool */
6245     rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
6246                                  fp->rx_mbuf_spare_map,
6247                                  m, segs, &nsegs, BUS_DMA_NOWAIT);
6248     if (__predict_false(rc != 0)) {
6249         fp->eth_q_stats.mbuf_rx_bd_mapping_failed++;
6250         m_freem(m);
6251         fp->eth_q_stats.mbuf_alloc_rx--;
6252         return (rc);
6253     }
6254 
6255     /* all mbufs must map to a single segment */
6256     KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6257 
6258     /* release any existing RX BD mbuf mappings */
6259 
6260     if (prev_index != index) {
6261         rx_buf = &fp->rx_mbuf_chain[prev_index];
6262 
6263         if (rx_buf->m_map != NULL) {
6264             bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6265                             BUS_DMASYNC_POSTREAD);
6266             bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
6267         }
6268 
6269         /*
6270          * We only get here from bxe_rxeof() when the maximum number
6271          * of rx buffers is less than RX_BD_USABLE. bxe_rxeof() already
6272          * holds the mbuf in the prev_index so it's OK to NULL it out
6273          * here without concern of a memory leak.
6274          */
6275         fp->rx_mbuf_chain[prev_index].m = NULL;
6276     }
6277 
6278     rx_buf = &fp->rx_mbuf_chain[index];
6279 
6280     if (rx_buf->m_map != NULL) {
6281         bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6282                         BUS_DMASYNC_POSTREAD);
6283         bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
6284     }
6285 
6286     /* save the mbuf and mapping info for a future packet */
6287     map = (prev_index != index) ?
6288               fp->rx_mbuf_chain[prev_index].m_map : rx_buf->m_map;
6289     rx_buf->m_map = fp->rx_mbuf_spare_map;
6290     fp->rx_mbuf_spare_map = map;
6291     bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6292                     BUS_DMASYNC_PREREAD);
6293     rx_buf->m = m;
6294 
6295     rx_bd = &fp->rx_chain[index];
6296     rx_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
6297     rx_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
6298 
6299     return (rc);
6300 }
6301 
6302 static int
6303 bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp,
6304                       int                 queue)
6305 {
6306     struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue];
6307     bus_dma_segment_t segs[1];
6308     bus_dmamap_t map;
6309     struct mbuf *m;
6310     int nsegs;
6311     int rc = 0;
6312 
6313     /* allocate the new TPA mbuf */
6314     m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
6315     if (__predict_false(m == NULL)) {
6316         fp->eth_q_stats.mbuf_rx_tpa_alloc_failed++;
6317         return (ENOBUFS);
6318     }
6319 
6320     fp->eth_q_stats.mbuf_alloc_tpa++;
6321 
6322     /* initialize the mbuf buffer length */
6323     m->m_pkthdr.len = m->m_len = fp->rx_buf_size;
6324 
6325     /* map the mbuf into non-paged pool */
6326     rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
6327                                  fp->rx_tpa_info_mbuf_spare_map,
6328                                  m, segs, &nsegs, BUS_DMA_NOWAIT);
6329     if (__predict_false(rc != 0)) {
6330         fp->eth_q_stats.mbuf_rx_tpa_mapping_failed++;
6331         m_free(m);
6332         fp->eth_q_stats.mbuf_alloc_tpa--;
6333         return (rc);
6334     }
6335 
6336     /* all mbufs must map to a single segment */
6337     KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6338 
6339     /* release any existing TPA mbuf mapping */
6340     if (tpa_info->bd.m_map != NULL) {
6341         bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map,
6342                         BUS_DMASYNC_POSTREAD);
6343         bus_dmamap_unload(fp->rx_mbuf_tag, tpa_info->bd.m_map);
6344     }
6345 
6346     /* save the mbuf and mapping info for the TPA mbuf */
6347     map = tpa_info->bd.m_map;
6348     tpa_info->bd.m_map = fp->rx_tpa_info_mbuf_spare_map;
6349     fp->rx_tpa_info_mbuf_spare_map = map;
6350     bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map,
6351                     BUS_DMASYNC_PREREAD);
6352     tpa_info->bd.m = m;
6353     tpa_info->seg = segs[0];
6354 
6355     return (rc);
6356 }
6357 
6358 /*
6359  * Allocate an mbuf and assign it to the receive scatter gather chain. The
6360  * caller must take care to save a copy of the existing mbuf in the SG mbuf
6361  * chain.
6362  */
6363 static int
6364 bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp,
6365                       uint16_t            index)
6366 {
6367     struct bxe_sw_rx_bd *sge_buf;
6368     struct eth_rx_sge *sge;
6369     bus_dma_segment_t segs[1];
6370     bus_dmamap_t map;
6371     struct mbuf *m;
6372     int nsegs;
6373     int rc = 0;
6374 
6375     /* allocate a new SGE mbuf */
6376     m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, SGE_PAGE_SIZE);
6377     if (__predict_false(m == NULL)) {
6378         fp->eth_q_stats.mbuf_rx_sge_alloc_failed++;
6379         return (ENOMEM);
6380     }
6381 
6382     fp->eth_q_stats.mbuf_alloc_sge++;
6383 
6384     /* initialize the mbuf buffer length */
6385     m->m_pkthdr.len = m->m_len = SGE_PAGE_SIZE;
6386 
6387     /* map the SGE mbuf into non-paged pool */
6388     rc = bus_dmamap_load_mbuf_sg(fp->rx_sge_mbuf_tag,
6389                                  fp->rx_sge_mbuf_spare_map,
6390                                  m, segs, &nsegs, BUS_DMA_NOWAIT);
6391     if (__predict_false(rc != 0)) {
6392         fp->eth_q_stats.mbuf_rx_sge_mapping_failed++;
6393         m_freem(m);
6394         fp->eth_q_stats.mbuf_alloc_sge--;
6395         return (rc);
6396     }
6397 
6398     /* all mbufs must map to a single segment */
6399     KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6400 
6401     sge_buf = &fp->rx_sge_mbuf_chain[index];
6402 
6403     /* release any existing SGE mbuf mapping */
6404     if (sge_buf->m_map != NULL) {
6405         bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map,
6406                         BUS_DMASYNC_POSTREAD);
6407         bus_dmamap_unload(fp->rx_sge_mbuf_tag, sge_buf->m_map);
6408     }
6409 
6410     /* save the mbuf and mapping info for a future packet */
6411     map = sge_buf->m_map;
6412     sge_buf->m_map = fp->rx_sge_mbuf_spare_map;
6413     fp->rx_sge_mbuf_spare_map = map;
6414     bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map,
6415                     BUS_DMASYNC_PREREAD);
6416     sge_buf->m = m;
6417 
6418     sge = &fp->rx_sge_chain[index];
6419     sge->addr_hi = htole32(U64_HI(segs[0].ds_addr));
6420     sge->addr_lo = htole32(U64_LO(segs[0].ds_addr));
6421 
6422     return (rc);
6423 }
6424 
6425 static __noinline int
6426 bxe_alloc_fp_buffers(struct bxe_softc *sc)
6427 {
6428     struct bxe_fastpath *fp;
6429     int i, j, rc = 0;
6430     int ring_prod, cqe_ring_prod;
6431     int max_agg_queues;
6432 
6433     for (i = 0; i < sc->num_queues; i++) {
6434         fp = &sc->fp[i];
6435 
6436         ring_prod = cqe_ring_prod = 0;
6437         fp->rx_bd_cons = 0;
6438         fp->rx_cq_cons = 0;
6439 
6440         /* allocate buffers for the RX BDs in RX BD chain */
6441         for (j = 0; j < sc->max_rx_bufs; j++) {
6442             rc = bxe_alloc_rx_bd_mbuf(fp, ring_prod, ring_prod);
6443             if (rc != 0) {
6444                 BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n",
6445                       i, rc);
6446                 goto bxe_alloc_fp_buffers_error;
6447             }
6448 
6449             ring_prod     = RX_BD_NEXT(ring_prod);
6450             cqe_ring_prod = RCQ_NEXT(cqe_ring_prod);
6451         }
6452 
6453         fp->rx_bd_prod = ring_prod;
6454         fp->rx_cq_prod = cqe_ring_prod;
6455         fp->eth_q_stats.rx_calls = fp->eth_q_stats.rx_pkts = 0;
6456 
6457         max_agg_queues = MAX_AGG_QS(sc);
6458 
6459         fp->tpa_enable = TRUE;
6460 
6461         /* fill the TPA pool */
6462         for (j = 0; j < max_agg_queues; j++) {
6463             rc = bxe_alloc_rx_tpa_mbuf(fp, j);
6464             if (rc != 0) {
6465                 BLOGE(sc, "mbuf alloc fail for fp[%02d] TPA queue %d\n",
6466                           i, j);
6467                 fp->tpa_enable = FALSE;
6468                 goto bxe_alloc_fp_buffers_error;
6469             }
6470 
6471             fp->rx_tpa_info[j].state = BXE_TPA_STATE_STOP;
6472         }
6473 
6474         if (fp->tpa_enable) {
6475             /* fill the RX SGE chain */
6476             ring_prod = 0;
6477             for (j = 0; j < RX_SGE_USABLE; j++) {
6478                 rc = bxe_alloc_rx_sge_mbuf(fp, ring_prod);
6479                 if (rc != 0) {
6480                     BLOGE(sc, "mbuf alloc fail for fp[%02d] SGE %d\n",
6481                               i, ring_prod);
6482                     fp->tpa_enable = FALSE;
6483                     ring_prod = 0;
6484                     goto bxe_alloc_fp_buffers_error;
6485                 }
6486 
6487                 ring_prod = RX_SGE_NEXT(ring_prod);
6488             }
6489 
6490             fp->rx_sge_prod = ring_prod;
6491         }
6492     }
6493 
6494     return (0);
6495 
6496 bxe_alloc_fp_buffers_error:
6497 
6498     /* unwind what was already allocated */
6499     bxe_free_rx_bd_chain(fp);
6500     bxe_free_tpa_pool(fp);
6501     bxe_free_sge_chain(fp);
6502 
6503     return (ENOBUFS);
6504 }
6505 
6506 static void
6507 bxe_free_fw_stats_mem(struct bxe_softc *sc)
6508 {
6509     bxe_dma_free(sc, &sc->fw_stats_dma);
6510 
6511     sc->fw_stats_num = 0;
6512 
6513     sc->fw_stats_req_size = 0;
6514     sc->fw_stats_req = NULL;
6515     sc->fw_stats_req_mapping = 0;
6516 
6517     sc->fw_stats_data_size = 0;
6518     sc->fw_stats_data = NULL;
6519     sc->fw_stats_data_mapping = 0;
6520 }
6521 
6522 static int
6523 bxe_alloc_fw_stats_mem(struct bxe_softc *sc)
6524 {
6525     uint8_t num_queue_stats;
6526     int num_groups;
6527 
6528     /* number of queues for statistics is number of eth queues */
6529     num_queue_stats = BXE_NUM_ETH_QUEUES(sc);
6530 
6531     /*
6532      * Total number of FW statistics requests =
6533      *   1 for port stats + 1 for PF stats + num of queues
6534      */
6535     sc->fw_stats_num = (2 + num_queue_stats);
6536 
6537     /*
6538      * Request is built from stats_query_header and an array of
6539      * stats_query_cmd_group each of which contains STATS_QUERY_CMD_COUNT
6540      * rules. The real number or requests is configured in the
6541      * stats_query_header.
6542      */
6543     num_groups =
6544         ((sc->fw_stats_num / STATS_QUERY_CMD_COUNT) +
6545          ((sc->fw_stats_num % STATS_QUERY_CMD_COUNT) ? 1 : 0));
6546 
6547     BLOGD(sc, DBG_LOAD, "stats fw_stats_num %d num_groups %d\n",
6548           sc->fw_stats_num, num_groups);
6549 
6550     sc->fw_stats_req_size =
6551         (sizeof(struct stats_query_header) +
6552          (num_groups * sizeof(struct stats_query_cmd_group)));
6553 
6554     /*
6555      * Data for statistics requests + stats_counter.
6556      * stats_counter holds per-STORM counters that are incremented when
6557      * STORM has finished with the current request. Memory for FCoE
6558      * offloaded statistics are counted anyway, even if they will not be sent.
6559      * VF stats are not accounted for here as the data of VF stats is stored
6560      * in memory allocated by the VF, not here.
6561      */
6562     sc->fw_stats_data_size =
6563         (sizeof(struct stats_counter) +
6564          sizeof(struct per_port_stats) +
6565          sizeof(struct per_pf_stats) +
6566          /* sizeof(struct fcoe_statistics_params) + */
6567          (sizeof(struct per_queue_stats) * num_queue_stats));
6568 
6569     if (bxe_dma_alloc(sc, (sc->fw_stats_req_size + sc->fw_stats_data_size),
6570                       &sc->fw_stats_dma, "fw stats") != 0) {
6571         bxe_free_fw_stats_mem(sc);
6572         return (-1);
6573     }
6574 
6575     /* set up the shortcuts */
6576 
6577     sc->fw_stats_req =
6578         (struct bxe_fw_stats_req *)sc->fw_stats_dma.vaddr;
6579     sc->fw_stats_req_mapping = sc->fw_stats_dma.paddr;
6580 
6581     sc->fw_stats_data =
6582         (struct bxe_fw_stats_data *)((uint8_t *)sc->fw_stats_dma.vaddr +
6583                                      sc->fw_stats_req_size);
6584     sc->fw_stats_data_mapping = (sc->fw_stats_dma.paddr +
6585                                  sc->fw_stats_req_size);
6586 
6587     BLOGD(sc, DBG_LOAD, "statistics request base address set to %#jx\n",
6588           (uintmax_t)sc->fw_stats_req_mapping);
6589 
6590     BLOGD(sc, DBG_LOAD, "statistics data base address set to %#jx\n",
6591           (uintmax_t)sc->fw_stats_data_mapping);
6592 
6593     return (0);
6594 }
6595 
6596 /*
6597  * Bits map:
6598  * 0-7  - Engine0 load counter.
6599  * 8-15 - Engine1 load counter.
6600  * 16   - Engine0 RESET_IN_PROGRESS bit.
6601  * 17   - Engine1 RESET_IN_PROGRESS bit.
6602  * 18   - Engine0 ONE_IS_LOADED. Set when there is at least one active
6603  *        function on the engine
6604  * 19   - Engine1 ONE_IS_LOADED.
6605  * 20   - Chip reset flow bit. When set none-leader must wait for both engines
6606  *        leader to complete (check for both RESET_IN_PROGRESS bits and not
6607  *        for just the one belonging to its engine).
6608  */
6609 #define BXE_RECOVERY_GLOB_REG     MISC_REG_GENERIC_POR_1
6610 #define BXE_PATH0_LOAD_CNT_MASK   0x000000ff
6611 #define BXE_PATH0_LOAD_CNT_SHIFT  0
6612 #define BXE_PATH1_LOAD_CNT_MASK   0x0000ff00
6613 #define BXE_PATH1_LOAD_CNT_SHIFT  8
6614 #define BXE_PATH0_RST_IN_PROG_BIT 0x00010000
6615 #define BXE_PATH1_RST_IN_PROG_BIT 0x00020000
6616 #define BXE_GLOBAL_RESET_BIT      0x00040000
6617 
6618 /* set the GLOBAL_RESET bit, should be run under rtnl lock */
6619 static void
6620 bxe_set_reset_global(struct bxe_softc *sc)
6621 {
6622     uint32_t val;
6623     bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6624     val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6625     REG_WR(sc, BXE_RECOVERY_GLOB_REG, val | BXE_GLOBAL_RESET_BIT);
6626     bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6627 }
6628 
6629 /* clear the GLOBAL_RESET bit, should be run under rtnl lock */
6630 static void
6631 bxe_clear_reset_global(struct bxe_softc *sc)
6632 {
6633     uint32_t val;
6634     bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6635     val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6636     REG_WR(sc, BXE_RECOVERY_GLOB_REG, val & (~BXE_GLOBAL_RESET_BIT));
6637     bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6638 }
6639 
6640 /* checks the GLOBAL_RESET bit, should be run under rtnl lock */
6641 static uint8_t
6642 bxe_reset_is_global(struct bxe_softc *sc)
6643 {
6644     uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6645     BLOGD(sc, DBG_LOAD, "GLOB_REG=0x%08x\n", val);
6646     return (val & BXE_GLOBAL_RESET_BIT) ? TRUE : FALSE;
6647 }
6648 
6649 /* clear RESET_IN_PROGRESS bit for the engine, should be run under rtnl lock */
6650 static void
6651 bxe_set_reset_done(struct bxe_softc *sc)
6652 {
6653     uint32_t val;
6654     uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT :
6655                                  BXE_PATH0_RST_IN_PROG_BIT;
6656 
6657     bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6658 
6659     val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6660     /* Clear the bit */
6661     val &= ~bit;
6662     REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6663 
6664     bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6665 }
6666 
6667 /* set RESET_IN_PROGRESS for the engine, should be run under rtnl lock */
6668 static void
6669 bxe_set_reset_in_progress(struct bxe_softc *sc)
6670 {
6671     uint32_t val;
6672     uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT :
6673                                  BXE_PATH0_RST_IN_PROG_BIT;
6674 
6675     bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6676 
6677     val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6678     /* Set the bit */
6679     val |= bit;
6680     REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6681 
6682     bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6683 }
6684 
6685 /* check RESET_IN_PROGRESS bit for an engine, should be run under rtnl lock */
6686 static uint8_t
6687 bxe_reset_is_done(struct bxe_softc *sc,
6688                   int              engine)
6689 {
6690     uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6691     uint32_t bit = engine ? BXE_PATH1_RST_IN_PROG_BIT :
6692                             BXE_PATH0_RST_IN_PROG_BIT;
6693 
6694     /* return false if bit is set */
6695     return (val & bit) ? FALSE : TRUE;
6696 }
6697 
6698 /* get the load status for an engine, should be run under rtnl lock */
6699 static uint8_t
6700 bxe_get_load_status(struct bxe_softc *sc,
6701                     int              engine)
6702 {
6703     uint32_t mask = engine ? BXE_PATH1_LOAD_CNT_MASK :
6704                              BXE_PATH0_LOAD_CNT_MASK;
6705     uint32_t shift = engine ? BXE_PATH1_LOAD_CNT_SHIFT :
6706                               BXE_PATH0_LOAD_CNT_SHIFT;
6707     uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6708 
6709     BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val);
6710 
6711     val = ((val & mask) >> shift);
6712 
6713     BLOGD(sc, DBG_LOAD, "Load mask engine %d = 0x%08x\n", engine, val);
6714 
6715     return (val != 0);
6716 }
6717 
6718 /* set pf load mark */
6719 /* XXX needs to be under rtnl lock */
6720 static void
6721 bxe_set_pf_load(struct bxe_softc *sc)
6722 {
6723     uint32_t val;
6724     uint32_t val1;
6725     uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK :
6726                                   BXE_PATH0_LOAD_CNT_MASK;
6727     uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT :
6728                                    BXE_PATH0_LOAD_CNT_SHIFT;
6729 
6730     bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6731 
6732     val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6733     BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val);
6734 
6735     /* get the current counter value */
6736     val1 = ((val & mask) >> shift);
6737 
6738     /* set bit of this PF */
6739     val1 |= (1 << SC_ABS_FUNC(sc));
6740 
6741     /* clear the old value */
6742     val &= ~mask;
6743 
6744     /* set the new one */
6745     val |= ((val1 << shift) & mask);
6746 
6747     REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6748 
6749     bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6750 }
6751 
6752 /* clear pf load mark */
6753 /* XXX needs to be under rtnl lock */
6754 static uint8_t
6755 bxe_clear_pf_load(struct bxe_softc *sc)
6756 {
6757     uint32_t val1, val;
6758     uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK :
6759                                   BXE_PATH0_LOAD_CNT_MASK;
6760     uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT :
6761                                    BXE_PATH0_LOAD_CNT_SHIFT;
6762 
6763     bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6764     val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6765     BLOGD(sc, DBG_LOAD, "Old GEN_REG_VAL=0x%08x\n", val);
6766 
6767     /* get the current counter value */
6768     val1 = (val & mask) >> shift;
6769 
6770     /* clear bit of that PF */
6771     val1 &= ~(1 << SC_ABS_FUNC(sc));
6772 
6773     /* clear the old value */
6774     val &= ~mask;
6775 
6776     /* set the new one */
6777     val |= ((val1 << shift) & mask);
6778 
6779     REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6780     bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6781     return (val1 != 0);
6782 }
6783 
6784 /* send load requrest to mcp and analyze response */
6785 static int
6786 bxe_nic_load_request(struct bxe_softc *sc,
6787                      uint32_t         *load_code)
6788 {
6789     /* init fw_seq */
6790     sc->fw_seq =
6791         (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
6792          DRV_MSG_SEQ_NUMBER_MASK);
6793 
6794     BLOGD(sc, DBG_LOAD, "initial fw_seq 0x%04x\n", sc->fw_seq);
6795 
6796     /* get the current FW pulse sequence */
6797     sc->fw_drv_pulse_wr_seq =
6798         (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb) &
6799          DRV_PULSE_SEQ_MASK);
6800 
6801     BLOGD(sc, DBG_LOAD, "initial drv_pulse 0x%04x\n",
6802           sc->fw_drv_pulse_wr_seq);
6803 
6804     /* load request */
6805     (*load_code) = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ,
6806                                   DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
6807 
6808     /* if the MCP fails to respond we must abort */
6809     if (!(*load_code)) {
6810         BLOGE(sc, "MCP response failure!\n");
6811         return (-1);
6812     }
6813 
6814     /* if MCP refused then must abort */
6815     if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6816         BLOGE(sc, "MCP refused load request\n");
6817         return (-1);
6818     }
6819 
6820     return (0);
6821 }
6822 
6823 /*
6824  * Check whether another PF has already loaded FW to chip. In virtualized
6825  * environments a pf from anoth VM may have already initialized the device
6826  * including loading FW.
6827  */
6828 static int
6829 bxe_nic_load_analyze_req(struct bxe_softc *sc,
6830                          uint32_t         load_code)
6831 {
6832     uint32_t my_fw, loaded_fw;
6833 
6834     /* is another pf loaded on this engine? */
6835     if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
6836         (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
6837         /* build my FW version dword */
6838         my_fw = (BCM_5710_FW_MAJOR_VERSION +
6839                  (BCM_5710_FW_MINOR_VERSION << 8 ) +
6840                  (BCM_5710_FW_REVISION_VERSION << 16) +
6841                  (BCM_5710_FW_ENGINEERING_VERSION << 24));
6842 
6843         /* read loaded FW from chip */
6844         loaded_fw = REG_RD(sc, XSEM_REG_PRAM);
6845         BLOGD(sc, DBG_LOAD, "loaded FW 0x%08x / my FW 0x%08x\n",
6846               loaded_fw, my_fw);
6847 
6848         /* abort nic load if version mismatch */
6849         if (my_fw != loaded_fw) {
6850             BLOGE(sc, "FW 0x%08x already loaded (mine is 0x%08x)",
6851                   loaded_fw, my_fw);
6852             return (-1);
6853         }
6854     }
6855 
6856     return (0);
6857 }
6858 
6859 /* mark PMF if applicable */
6860 static void
6861 bxe_nic_load_pmf(struct bxe_softc *sc,
6862                  uint32_t         load_code)
6863 {
6864     uint32_t ncsi_oem_data_addr;
6865 
6866     if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6867         (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
6868         (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
6869         /*
6870          * Barrier here for ordering between the writing to sc->port.pmf here
6871          * and reading it from the periodic task.
6872          */
6873         sc->port.pmf = 1;
6874         mb();
6875     } else {
6876         sc->port.pmf = 0;
6877     }
6878 
6879     BLOGD(sc, DBG_LOAD, "pmf %d\n", sc->port.pmf);
6880 
6881     /* XXX needed? */
6882     if (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) {
6883         if (SHMEM2_HAS(sc, ncsi_oem_data_addr)) {
6884             ncsi_oem_data_addr = SHMEM2_RD(sc, ncsi_oem_data_addr);
6885             if (ncsi_oem_data_addr) {
6886                 REG_WR(sc,
6887                        (ncsi_oem_data_addr +
6888                         offsetof(struct glob_ncsi_oem_data, driver_version)),
6889                        0);
6890             }
6891         }
6892     }
6893 }
6894 
6895 static void
6896 bxe_read_mf_cfg(struct bxe_softc *sc)
6897 {
6898     int n = (CHIP_IS_MODE_4_PORT(sc) ? 2 : 1);
6899     int abs_func;
6900     int vn;
6901 
6902     if (BXE_NOMCP(sc)) {
6903         return; /* what should be the default bvalue in this case */
6904     }
6905 
6906     /*
6907      * The formula for computing the absolute function number is...
6908      * For 2 port configuration (4 functions per port):
6909      *   abs_func = 2 * vn + SC_PORT + SC_PATH
6910      * For 4 port configuration (2 functions per port):
6911      *   abs_func = 4 * vn + 2 * SC_PORT + SC_PATH
6912      */
6913     for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
6914         abs_func = (n * (2 * vn + SC_PORT(sc)) + SC_PATH(sc));
6915         if (abs_func >= E1H_FUNC_MAX) {
6916             break;
6917         }
6918         sc->devinfo.mf_info.mf_config[vn] =
6919             MFCFG_RD(sc, func_mf_config[abs_func].config);
6920     }
6921 
6922     if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] &
6923         FUNC_MF_CFG_FUNC_DISABLED) {
6924         BLOGD(sc, DBG_LOAD, "mf_cfg function disabled\n");
6925         sc->flags |= BXE_MF_FUNC_DIS;
6926     } else {
6927         BLOGD(sc, DBG_LOAD, "mf_cfg function enabled\n");
6928         sc->flags &= ~BXE_MF_FUNC_DIS;
6929     }
6930 }
6931 
6932 /* acquire split MCP access lock register */
6933 static int bxe_acquire_alr(struct bxe_softc *sc)
6934 {
6935     uint32_t j, val;
6936 
6937     for (j = 0; j < 1000; j++) {
6938         val = (1UL << 31);
6939         REG_WR(sc, GRCBASE_MCP + 0x9c, val);
6940         val = REG_RD(sc, GRCBASE_MCP + 0x9c);
6941         if (val & (1L << 31))
6942             break;
6943 
6944         DELAY(5000);
6945     }
6946 
6947     if (!(val & (1L << 31))) {
6948         BLOGE(sc, "Cannot acquire MCP access lock register\n");
6949         return (-1);
6950     }
6951 
6952     return (0);
6953 }
6954 
6955 /* release split MCP access lock register */
6956 static void bxe_release_alr(struct bxe_softc *sc)
6957 {
6958     REG_WR(sc, GRCBASE_MCP + 0x9c, 0);
6959 }
6960 
6961 static void
6962 bxe_fan_failure(struct bxe_softc *sc)
6963 {
6964     int port = SC_PORT(sc);
6965     uint32_t ext_phy_config;
6966 
6967     /* mark the failure */
6968     ext_phy_config =
6969         SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config);
6970 
6971     ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6972     ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
6973     SHMEM_WR(sc, dev_info.port_hw_config[port].external_phy_config,
6974              ext_phy_config);
6975 
6976     /* log the failure */
6977     BLOGW(sc, "Fan Failure has caused the driver to shutdown "
6978               "the card to prevent permanent damage. "
6979               "Please contact OEM Support for assistance\n");
6980 
6981     /* XXX */
6982 #if 1
6983     bxe_panic(sc, ("Schedule task to handle fan failure\n"));
6984 #else
6985     /*
6986      * Schedule device reset (unload)
6987      * This is due to some boards consuming sufficient power when driver is
6988      * up to overheat if fan fails.
6989      */
6990     bxe_set_bit(BXE_SP_RTNL_FAN_FAILURE, &sc->sp_rtnl_state);
6991     schedule_delayed_work(&sc->sp_rtnl_task, 0);
6992 #endif
6993 }
6994 
6995 /* this function is called upon a link interrupt */
6996 static void
6997 bxe_link_attn(struct bxe_softc *sc)
6998 {
6999     uint32_t pause_enabled = 0;
7000     struct host_port_stats *pstats;
7001     int cmng_fns;
7002     struct bxe_fastpath *fp;
7003     int i;
7004 
7005     /* Make sure that we are synced with the current statistics */
7006     bxe_stats_handle(sc, STATS_EVENT_STOP);
7007     BLOGD(sc, DBG_LOAD, "link_vars phy_flags : %x\n", sc->link_vars.phy_flags);
7008     elink_link_update(&sc->link_params, &sc->link_vars);
7009 
7010     if (sc->link_vars.link_up) {
7011 
7012         /* dropless flow control */
7013         if (!CHIP_IS_E1(sc) && sc->dropless_fc) {
7014             pause_enabled = 0;
7015 
7016             if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) {
7017                 pause_enabled = 1;
7018             }
7019 
7020             REG_WR(sc,
7021                    (BAR_USTRORM_INTMEM +
7022                     USTORM_ETH_PAUSE_ENABLED_OFFSET(SC_PORT(sc))),
7023                    pause_enabled);
7024         }
7025 
7026         if (sc->link_vars.mac_type != ELINK_MAC_TYPE_EMAC) {
7027             pstats = BXE_SP(sc, port_stats);
7028             /* reset old mac stats */
7029             memset(&(pstats->mac_stx[0]), 0, sizeof(struct mac_stx));
7030         }
7031 
7032         if (sc->state == BXE_STATE_OPEN) {
7033             bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
7034 	    /* Restart tx when the link comes back. */
7035 	    FOR_EACH_ETH_QUEUE(sc, i) {
7036 		fp = &sc->fp[i];
7037 		taskqueue_enqueue(fp->tq, &fp->tx_task);
7038 	    }
7039         }
7040 
7041     }
7042 
7043     if (sc->link_vars.link_up && sc->link_vars.line_speed) {
7044         cmng_fns = bxe_get_cmng_fns_mode(sc);
7045 
7046         if (cmng_fns != CMNG_FNS_NONE) {
7047             bxe_cmng_fns_init(sc, FALSE, cmng_fns);
7048             storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
7049         } else {
7050             /* rate shaping and fairness are disabled */
7051             BLOGD(sc, DBG_LOAD, "single function mode without fairness\n");
7052         }
7053     }
7054 
7055     bxe_link_report_locked(sc);
7056 
7057     if (IS_MF(sc)) {
7058         ; // XXX bxe_link_sync_notify(sc);
7059     }
7060 }
7061 
7062 static void
7063 bxe_attn_int_asserted(struct bxe_softc *sc,
7064                       uint32_t         asserted)
7065 {
7066     int port = SC_PORT(sc);
7067     uint32_t aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7068                                MISC_REG_AEU_MASK_ATTN_FUNC_0;
7069     uint32_t nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
7070                                         NIG_REG_MASK_INTERRUPT_PORT0;
7071     uint32_t aeu_mask;
7072     uint32_t nig_mask = 0;
7073     uint32_t reg_addr;
7074     uint32_t igu_acked;
7075     uint32_t cnt;
7076 
7077     if (sc->attn_state & asserted) {
7078         BLOGE(sc, "IGU ERROR attn=0x%08x\n", asserted);
7079     }
7080 
7081     bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
7082 
7083     aeu_mask = REG_RD(sc, aeu_addr);
7084 
7085     BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly asserted 0x%08x\n",
7086           aeu_mask, asserted);
7087 
7088     aeu_mask &= ~(asserted & 0x3ff);
7089 
7090     BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask);
7091 
7092     REG_WR(sc, aeu_addr, aeu_mask);
7093 
7094     bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
7095 
7096     BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state);
7097     sc->attn_state |= asserted;
7098     BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state);
7099 
7100     if (asserted & ATTN_HARD_WIRED_MASK) {
7101         if (asserted & ATTN_NIG_FOR_FUNC) {
7102 
7103 	    bxe_acquire_phy_lock(sc);
7104             /* save nig interrupt mask */
7105             nig_mask = REG_RD(sc, nig_int_mask_addr);
7106 
7107             /* If nig_mask is not set, no need to call the update function */
7108             if (nig_mask) {
7109                 REG_WR(sc, nig_int_mask_addr, 0);
7110 
7111                 bxe_link_attn(sc);
7112             }
7113 
7114             /* handle unicore attn? */
7115         }
7116 
7117         if (asserted & ATTN_SW_TIMER_4_FUNC) {
7118             BLOGD(sc, DBG_INTR, "ATTN_SW_TIMER_4_FUNC!\n");
7119         }
7120 
7121         if (asserted & GPIO_2_FUNC) {
7122             BLOGD(sc, DBG_INTR, "GPIO_2_FUNC!\n");
7123         }
7124 
7125         if (asserted & GPIO_3_FUNC) {
7126             BLOGD(sc, DBG_INTR, "GPIO_3_FUNC!\n");
7127         }
7128 
7129         if (asserted & GPIO_4_FUNC) {
7130             BLOGD(sc, DBG_INTR, "GPIO_4_FUNC!\n");
7131         }
7132 
7133         if (port == 0) {
7134             if (asserted & ATTN_GENERAL_ATTN_1) {
7135                 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_1!\n");
7136                 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
7137             }
7138             if (asserted & ATTN_GENERAL_ATTN_2) {
7139                 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_2!\n");
7140                 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
7141             }
7142             if (asserted & ATTN_GENERAL_ATTN_3) {
7143                 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_3!\n");
7144                 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
7145             }
7146         } else {
7147             if (asserted & ATTN_GENERAL_ATTN_4) {
7148                 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_4!\n");
7149                 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
7150             }
7151             if (asserted & ATTN_GENERAL_ATTN_5) {
7152                 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_5!\n");
7153                 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
7154             }
7155             if (asserted & ATTN_GENERAL_ATTN_6) {
7156                 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_6!\n");
7157                 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
7158             }
7159         }
7160     } /* hardwired */
7161 
7162     if (sc->devinfo.int_block == INT_BLOCK_HC) {
7163         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_SET);
7164     } else {
7165         reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
7166     }
7167 
7168     BLOGD(sc, DBG_INTR, "about to mask 0x%08x at %s addr 0x%08x\n",
7169           asserted,
7170           (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
7171     REG_WR(sc, reg_addr, asserted);
7172 
7173     /* now set back the mask */
7174     if (asserted & ATTN_NIG_FOR_FUNC) {
7175         /*
7176          * Verify that IGU ack through BAR was written before restoring
7177          * NIG mask. This loop should exit after 2-3 iterations max.
7178          */
7179         if (sc->devinfo.int_block != INT_BLOCK_HC) {
7180             cnt = 0;
7181 
7182             do {
7183                 igu_acked = REG_RD(sc, IGU_REG_ATTENTION_ACK_BITS);
7184             } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
7185                      (++cnt < MAX_IGU_ATTN_ACK_TO));
7186 
7187             if (!igu_acked) {
7188                 BLOGE(sc, "Failed to verify IGU ack on time\n");
7189             }
7190 
7191             mb();
7192         }
7193 
7194         REG_WR(sc, nig_int_mask_addr, nig_mask);
7195 
7196 	bxe_release_phy_lock(sc);
7197     }
7198 }
7199 
7200 static void
7201 bxe_print_next_block(struct bxe_softc *sc,
7202                      int              idx,
7203                      const char       *blk)
7204 {
7205     BLOGI(sc, "%s%s", idx ? ", " : "", blk);
7206 }
7207 
7208 static int
7209 bxe_check_blocks_with_parity0(struct bxe_softc *sc,
7210                               uint32_t         sig,
7211                               int              par_num,
7212                               uint8_t          print)
7213 {
7214     uint32_t cur_bit = 0;
7215     int i = 0;
7216 
7217     for (i = 0; sig; i++) {
7218         cur_bit = ((uint32_t)0x1 << i);
7219         if (sig & cur_bit) {
7220             switch (cur_bit) {
7221             case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
7222                 if (print)
7223                     bxe_print_next_block(sc, par_num++, "BRB");
7224                 break;
7225             case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
7226                 if (print)
7227                     bxe_print_next_block(sc, par_num++, "PARSER");
7228                 break;
7229             case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
7230                 if (print)
7231                     bxe_print_next_block(sc, par_num++, "TSDM");
7232                 break;
7233             case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
7234                 if (print)
7235                     bxe_print_next_block(sc, par_num++, "SEARCHER");
7236                 break;
7237             case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
7238                 if (print)
7239                     bxe_print_next_block(sc, par_num++, "TCM");
7240                 break;
7241             case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
7242                 if (print)
7243                     bxe_print_next_block(sc, par_num++, "TSEMI");
7244                 break;
7245             case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
7246                 if (print)
7247                     bxe_print_next_block(sc, par_num++, "XPB");
7248                 break;
7249             }
7250 
7251             /* Clear the bit */
7252             sig &= ~cur_bit;
7253         }
7254     }
7255 
7256     return (par_num);
7257 }
7258 
7259 static int
7260 bxe_check_blocks_with_parity1(struct bxe_softc *sc,
7261                               uint32_t         sig,
7262                               int              par_num,
7263                               uint8_t          *global,
7264                               uint8_t          print)
7265 {
7266     int i = 0;
7267     uint32_t cur_bit = 0;
7268     for (i = 0; sig; i++) {
7269         cur_bit = ((uint32_t)0x1 << i);
7270         if (sig & cur_bit) {
7271             switch (cur_bit) {
7272             case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
7273                 if (print)
7274                     bxe_print_next_block(sc, par_num++, "PBF");
7275                 break;
7276             case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
7277                 if (print)
7278                     bxe_print_next_block(sc, par_num++, "QM");
7279                 break;
7280             case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
7281                 if (print)
7282                     bxe_print_next_block(sc, par_num++, "TM");
7283                 break;
7284             case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
7285                 if (print)
7286                     bxe_print_next_block(sc, par_num++, "XSDM");
7287                 break;
7288             case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
7289                 if (print)
7290                     bxe_print_next_block(sc, par_num++, "XCM");
7291                 break;
7292             case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
7293                 if (print)
7294                     bxe_print_next_block(sc, par_num++, "XSEMI");
7295                 break;
7296             case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
7297                 if (print)
7298                     bxe_print_next_block(sc, par_num++, "DOORBELLQ");
7299                 break;
7300             case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
7301                 if (print)
7302                     bxe_print_next_block(sc, par_num++, "NIG");
7303                 break;
7304             case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
7305                 if (print)
7306                     bxe_print_next_block(sc, par_num++, "VAUX PCI CORE");
7307                 *global = TRUE;
7308                 break;
7309             case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
7310                 if (print)
7311                     bxe_print_next_block(sc, par_num++, "DEBUG");
7312                 break;
7313             case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
7314                 if (print)
7315                     bxe_print_next_block(sc, par_num++, "USDM");
7316                 break;
7317             case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
7318                 if (print)
7319                     bxe_print_next_block(sc, par_num++, "UCM");
7320                 break;
7321             case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
7322                 if (print)
7323                     bxe_print_next_block(sc, par_num++, "USEMI");
7324                 break;
7325             case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
7326                 if (print)
7327                     bxe_print_next_block(sc, par_num++, "UPB");
7328                 break;
7329             case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
7330                 if (print)
7331                     bxe_print_next_block(sc, par_num++, "CSDM");
7332                 break;
7333             case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
7334                 if (print)
7335                     bxe_print_next_block(sc, par_num++, "CCM");
7336                 break;
7337             }
7338 
7339             /* Clear the bit */
7340             sig &= ~cur_bit;
7341         }
7342     }
7343 
7344     return (par_num);
7345 }
7346 
7347 static int
7348 bxe_check_blocks_with_parity2(struct bxe_softc *sc,
7349                               uint32_t         sig,
7350                               int              par_num,
7351                               uint8_t          print)
7352 {
7353     uint32_t cur_bit = 0;
7354     int i = 0;
7355 
7356     for (i = 0; sig; i++) {
7357         cur_bit = ((uint32_t)0x1 << i);
7358         if (sig & cur_bit) {
7359             switch (cur_bit) {
7360             case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
7361                 if (print)
7362                     bxe_print_next_block(sc, par_num++, "CSEMI");
7363                 break;
7364             case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
7365                 if (print)
7366                     bxe_print_next_block(sc, par_num++, "PXP");
7367                 break;
7368             case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
7369                 if (print)
7370                     bxe_print_next_block(sc, par_num++, "PXPPCICLOCKCLIENT");
7371                 break;
7372             case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
7373                 if (print)
7374                     bxe_print_next_block(sc, par_num++, "CFC");
7375                 break;
7376             case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
7377                 if (print)
7378                     bxe_print_next_block(sc, par_num++, "CDU");
7379                 break;
7380             case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
7381                 if (print)
7382                     bxe_print_next_block(sc, par_num++, "DMAE");
7383                 break;
7384             case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
7385                 if (print)
7386                     bxe_print_next_block(sc, par_num++, "IGU");
7387                 break;
7388             case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
7389                 if (print)
7390                     bxe_print_next_block(sc, par_num++, "MISC");
7391                 break;
7392             }
7393 
7394             /* Clear the bit */
7395             sig &= ~cur_bit;
7396         }
7397     }
7398 
7399     return (par_num);
7400 }
7401 
7402 static int
7403 bxe_check_blocks_with_parity3(struct bxe_softc *sc,
7404                               uint32_t         sig,
7405                               int              par_num,
7406                               uint8_t          *global,
7407                               uint8_t          print)
7408 {
7409     uint32_t cur_bit = 0;
7410     int i = 0;
7411 
7412     for (i = 0; sig; i++) {
7413         cur_bit = ((uint32_t)0x1 << i);
7414         if (sig & cur_bit) {
7415             switch (cur_bit) {
7416             case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
7417                 if (print)
7418                     bxe_print_next_block(sc, par_num++, "MCP ROM");
7419                 *global = TRUE;
7420                 break;
7421             case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
7422                 if (print)
7423                     bxe_print_next_block(sc, par_num++,
7424                               "MCP UMP RX");
7425                 *global = TRUE;
7426                 break;
7427             case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
7428                 if (print)
7429                     bxe_print_next_block(sc, par_num++,
7430                               "MCP UMP TX");
7431                 *global = TRUE;
7432                 break;
7433             case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
7434                 if (print)
7435                     bxe_print_next_block(sc, par_num++,
7436                               "MCP SCPAD");
7437                 *global = TRUE;
7438                 break;
7439             }
7440 
7441             /* Clear the bit */
7442             sig &= ~cur_bit;
7443         }
7444     }
7445 
7446     return (par_num);
7447 }
7448 
7449 static int
7450 bxe_check_blocks_with_parity4(struct bxe_softc *sc,
7451                               uint32_t         sig,
7452                               int              par_num,
7453                               uint8_t          print)
7454 {
7455     uint32_t cur_bit = 0;
7456     int i = 0;
7457 
7458     for (i = 0; sig; i++) {
7459         cur_bit = ((uint32_t)0x1 << i);
7460         if (sig & cur_bit) {
7461             switch (cur_bit) {
7462             case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
7463                 if (print)
7464                     bxe_print_next_block(sc, par_num++, "PGLUE_B");
7465                 break;
7466             case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
7467                 if (print)
7468                     bxe_print_next_block(sc, par_num++, "ATC");
7469                 break;
7470             }
7471 
7472             /* Clear the bit */
7473             sig &= ~cur_bit;
7474         }
7475     }
7476 
7477     return (par_num);
7478 }
7479 
7480 static uint8_t
7481 bxe_parity_attn(struct bxe_softc *sc,
7482                 uint8_t          *global,
7483                 uint8_t          print,
7484                 uint32_t         *sig)
7485 {
7486     int par_num = 0;
7487 
7488     if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
7489         (sig[1] & HW_PRTY_ASSERT_SET_1) ||
7490         (sig[2] & HW_PRTY_ASSERT_SET_2) ||
7491         (sig[3] & HW_PRTY_ASSERT_SET_3) ||
7492         (sig[4] & HW_PRTY_ASSERT_SET_4)) {
7493         BLOGE(sc, "Parity error: HW block parity attention:\n"
7494                   "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
7495               (uint32_t)(sig[0] & HW_PRTY_ASSERT_SET_0),
7496               (uint32_t)(sig[1] & HW_PRTY_ASSERT_SET_1),
7497               (uint32_t)(sig[2] & HW_PRTY_ASSERT_SET_2),
7498               (uint32_t)(sig[3] & HW_PRTY_ASSERT_SET_3),
7499               (uint32_t)(sig[4] & HW_PRTY_ASSERT_SET_4));
7500 
7501         if (print)
7502             BLOGI(sc, "Parity errors detected in blocks: ");
7503 
7504         par_num =
7505             bxe_check_blocks_with_parity0(sc, sig[0] &
7506                                           HW_PRTY_ASSERT_SET_0,
7507                                           par_num, print);
7508         par_num =
7509             bxe_check_blocks_with_parity1(sc, sig[1] &
7510                                           HW_PRTY_ASSERT_SET_1,
7511                                           par_num, global, print);
7512         par_num =
7513             bxe_check_blocks_with_parity2(sc, sig[2] &
7514                                           HW_PRTY_ASSERT_SET_2,
7515                                           par_num, print);
7516         par_num =
7517             bxe_check_blocks_with_parity3(sc, sig[3] &
7518                                           HW_PRTY_ASSERT_SET_3,
7519                                           par_num, global, print);
7520         par_num =
7521             bxe_check_blocks_with_parity4(sc, sig[4] &
7522                                           HW_PRTY_ASSERT_SET_4,
7523                                           par_num, print);
7524 
7525         if (print)
7526             BLOGI(sc, "\n");
7527 
7528 	if( *global == TRUE ) {
7529                 BXE_SET_ERROR_BIT(sc, BXE_ERR_GLOBAL);
7530         }
7531 
7532         return (TRUE);
7533     }
7534 
7535     return (FALSE);
7536 }
7537 
7538 static uint8_t
7539 bxe_chk_parity_attn(struct bxe_softc *sc,
7540                     uint8_t          *global,
7541                     uint8_t          print)
7542 {
7543     struct attn_route attn = { {0} };
7544     int port = SC_PORT(sc);
7545 
7546     if(sc->state != BXE_STATE_OPEN)
7547         return FALSE;
7548 
7549     attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
7550     attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
7551     attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
7552     attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
7553 
7554     /*
7555      * Since MCP attentions can't be disabled inside the block, we need to
7556      * read AEU registers to see whether they're currently disabled
7557      */
7558     attn.sig[3] &= ((REG_RD(sc, (!port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
7559                                       : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0)) &
7560                          MISC_AEU_ENABLE_MCP_PRTY_BITS) |
7561                         ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
7562 
7563 
7564     if (!CHIP_IS_E1x(sc))
7565         attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
7566 
7567     return (bxe_parity_attn(sc, global, print, attn.sig));
7568 }
7569 
7570 static void
7571 bxe_attn_int_deasserted4(struct bxe_softc *sc,
7572                          uint32_t         attn)
7573 {
7574     uint32_t val;
7575     boolean_t err_flg = FALSE;
7576 
7577     if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
7578         val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
7579         BLOGE(sc, "PGLUE hw attention 0x%08x\n", val);
7580         err_flg = TRUE;
7581         if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
7582             BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
7583         if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
7584             BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
7585         if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
7586             BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
7587         if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
7588             BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
7589         if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
7590             BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
7591         if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
7592             BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
7593         if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
7594             BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
7595         if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
7596             BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
7597         if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
7598             BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
7599     }
7600 
7601     if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
7602         val = REG_RD(sc, ATC_REG_ATC_INT_STS_CLR);
7603         BLOGE(sc, "ATC hw attention 0x%08x\n", val);
7604 	err_flg = TRUE;
7605         if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
7606             BLOGE(sc, "ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
7607         if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
7608             BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
7609         if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
7610             BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
7611         if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
7612             BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
7613         if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
7614             BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
7615         if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
7616             BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
7617     }
7618 
7619     if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
7620                 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
7621         BLOGE(sc, "FATAL parity attention set4 0x%08x\n",
7622               (uint32_t)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
7623                                  AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
7624 	err_flg = TRUE;
7625     }
7626     if (err_flg) {
7627 	BXE_SET_ERROR_BIT(sc, BXE_ERR_MISC);
7628 	taskqueue_enqueue_timeout(taskqueue_thread,
7629 	    &sc->sp_err_timeout_task, hz/10);
7630     }
7631 
7632 }
7633 
7634 static void
7635 bxe_e1h_disable(struct bxe_softc *sc)
7636 {
7637     int port = SC_PORT(sc);
7638 
7639     bxe_tx_disable(sc);
7640 
7641     REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7642 }
7643 
7644 static void
7645 bxe_e1h_enable(struct bxe_softc *sc)
7646 {
7647     int port = SC_PORT(sc);
7648 
7649     REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1);
7650 
7651     // XXX bxe_tx_enable(sc);
7652 }
7653 
7654 /*
7655  * called due to MCP event (on pmf):
7656  *   reread new bandwidth configuration
7657  *   configure FW
7658  *   notify others function about the change
7659  */
7660 static void
7661 bxe_config_mf_bw(struct bxe_softc *sc)
7662 {
7663     if (sc->link_vars.link_up) {
7664         bxe_cmng_fns_init(sc, TRUE, CMNG_FNS_MINMAX);
7665         // XXX bxe_link_sync_notify(sc);
7666     }
7667 
7668     storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
7669 }
7670 
7671 static void
7672 bxe_set_mf_bw(struct bxe_softc *sc)
7673 {
7674     bxe_config_mf_bw(sc);
7675     bxe_fw_command(sc, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
7676 }
7677 
7678 static void
7679 bxe_handle_eee_event(struct bxe_softc *sc)
7680 {
7681     BLOGD(sc, DBG_INTR, "EEE - LLDP event\n");
7682     bxe_fw_command(sc, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
7683 }
7684 
7685 #define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
7686 
7687 static void
7688 bxe_drv_info_ether_stat(struct bxe_softc *sc)
7689 {
7690     struct eth_stats_info *ether_stat =
7691         &sc->sp->drv_info_to_mcp.ether_stat;
7692 
7693     strlcpy(ether_stat->version, BXE_DRIVER_VERSION,
7694             ETH_STAT_INFO_VERSION_LEN);
7695 
7696     /* XXX (+ MAC_PAD) taken from other driver... verify this is right */
7697     sc->sp_objs[0].mac_obj.get_n_elements(sc, &sc->sp_objs[0].mac_obj,
7698                                           DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
7699                                           ether_stat->mac_local + MAC_PAD,
7700                                           MAC_PAD, ETH_ALEN);
7701 
7702     ether_stat->mtu_size = sc->mtu;
7703 
7704     ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
7705     if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) {
7706         ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
7707     }
7708 
7709     // XXX ether_stat->feature_flags |= ???;
7710 
7711     ether_stat->promiscuous_mode = 0; // (flags & PROMISC) ? 1 : 0;
7712 
7713     ether_stat->txq_size = sc->tx_ring_size;
7714     ether_stat->rxq_size = sc->rx_ring_size;
7715 }
7716 
7717 static void
7718 bxe_handle_drv_info_req(struct bxe_softc *sc)
7719 {
7720     enum drv_info_opcode op_code;
7721     uint32_t drv_info_ctl = SHMEM2_RD(sc, drv_info_control);
7722 
7723     /* if drv_info version supported by MFW doesn't match - send NACK */
7724     if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
7725         bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0);
7726         return;
7727     }
7728 
7729     op_code = ((drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
7730                DRV_INFO_CONTROL_OP_CODE_SHIFT);
7731 
7732     memset(&sc->sp->drv_info_to_mcp, 0, sizeof(union drv_info_to_mcp));
7733 
7734     switch (op_code) {
7735     case ETH_STATS_OPCODE:
7736         bxe_drv_info_ether_stat(sc);
7737         break;
7738     case FCOE_STATS_OPCODE:
7739     case ISCSI_STATS_OPCODE:
7740     default:
7741         /* if op code isn't supported - send NACK */
7742         bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0);
7743         return;
7744     }
7745 
7746     /*
7747      * If we got drv_info attn from MFW then these fields are defined in
7748      * shmem2 for sure
7749      */
7750     SHMEM2_WR(sc, drv_info_host_addr_lo,
7751               U64_LO(BXE_SP_MAPPING(sc, drv_info_to_mcp)));
7752     SHMEM2_WR(sc, drv_info_host_addr_hi,
7753               U64_HI(BXE_SP_MAPPING(sc, drv_info_to_mcp)));
7754 
7755     bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_ACK, 0);
7756 }
7757 
7758 static void
7759 bxe_dcc_event(struct bxe_softc *sc,
7760               uint32_t         dcc_event)
7761 {
7762     BLOGD(sc, DBG_INTR, "dcc_event 0x%08x\n", dcc_event);
7763 
7764     if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
7765         /*
7766          * This is the only place besides the function initialization
7767          * where the sc->flags can change so it is done without any
7768          * locks
7769          */
7770         if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) {
7771             BLOGD(sc, DBG_INTR, "mf_cfg function disabled\n");
7772             sc->flags |= BXE_MF_FUNC_DIS;
7773             bxe_e1h_disable(sc);
7774         } else {
7775             BLOGD(sc, DBG_INTR, "mf_cfg function enabled\n");
7776             sc->flags &= ~BXE_MF_FUNC_DIS;
7777             bxe_e1h_enable(sc);
7778         }
7779         dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
7780     }
7781 
7782     if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
7783         bxe_config_mf_bw(sc);
7784         dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
7785     }
7786 
7787     /* Report results to MCP */
7788     if (dcc_event)
7789         bxe_fw_command(sc, DRV_MSG_CODE_DCC_FAILURE, 0);
7790     else
7791         bxe_fw_command(sc, DRV_MSG_CODE_DCC_OK, 0);
7792 }
7793 
7794 static void
7795 bxe_pmf_update(struct bxe_softc *sc)
7796 {
7797     int port = SC_PORT(sc);
7798     uint32_t val;
7799 
7800     sc->port.pmf = 1;
7801     BLOGD(sc, DBG_INTR, "pmf %d\n", sc->port.pmf);
7802 
7803     /*
7804      * We need the mb() to ensure the ordering between the writing to
7805      * sc->port.pmf here and reading it from the bxe_periodic_task().
7806      */
7807     mb();
7808 
7809     /* queue a periodic task */
7810     // XXX schedule task...
7811 
7812     // XXX bxe_dcbx_pmf_update(sc);
7813 
7814     /* enable nig attention */
7815     val = (0xff0f | (1 << (SC_VN(sc) + 4)));
7816     if (sc->devinfo.int_block == INT_BLOCK_HC) {
7817         REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, val);
7818         REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, val);
7819     } else if (!CHIP_IS_E1x(sc)) {
7820         REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val);
7821         REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val);
7822     }
7823 
7824     bxe_stats_handle(sc, STATS_EVENT_PMF);
7825 }
7826 
7827 static int
7828 bxe_mc_assert(struct bxe_softc *sc)
7829 {
7830     char last_idx;
7831     int i, rc = 0;
7832     uint32_t row0, row1, row2, row3;
7833 
7834     /* XSTORM */
7835     last_idx = REG_RD8(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_INDEX_OFFSET);
7836     if (last_idx)
7837         BLOGE(sc, "XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7838 
7839     /* print the asserts */
7840     for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7841 
7842         row0 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i));
7843         row1 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 4);
7844         row2 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 8);
7845         row3 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 12);
7846 
7847         if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7848             BLOGE(sc, "XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7849                   i, row3, row2, row1, row0);
7850             rc++;
7851         } else {
7852             break;
7853         }
7854     }
7855 
7856     /* TSTORM */
7857     last_idx = REG_RD8(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_INDEX_OFFSET);
7858     if (last_idx) {
7859         BLOGE(sc, "TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7860     }
7861 
7862     /* print the asserts */
7863     for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7864 
7865         row0 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i));
7866         row1 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 4);
7867         row2 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 8);
7868         row3 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 12);
7869 
7870         if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7871             BLOGE(sc, "TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7872                   i, row3, row2, row1, row0);
7873             rc++;
7874         } else {
7875             break;
7876         }
7877     }
7878 
7879     /* CSTORM */
7880     last_idx = REG_RD8(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_INDEX_OFFSET);
7881     if (last_idx) {
7882         BLOGE(sc, "CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7883     }
7884 
7885     /* print the asserts */
7886     for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7887 
7888         row0 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i));
7889         row1 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 4);
7890         row2 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 8);
7891         row3 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 12);
7892 
7893         if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7894             BLOGE(sc, "CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7895                   i, row3, row2, row1, row0);
7896             rc++;
7897         } else {
7898             break;
7899         }
7900     }
7901 
7902     /* USTORM */
7903     last_idx = REG_RD8(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_INDEX_OFFSET);
7904     if (last_idx) {
7905         BLOGE(sc, "USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7906     }
7907 
7908     /* print the asserts */
7909     for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7910 
7911         row0 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i));
7912         row1 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 4);
7913         row2 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 8);
7914         row3 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 12);
7915 
7916         if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7917             BLOGE(sc, "USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7918                   i, row3, row2, row1, row0);
7919             rc++;
7920         } else {
7921             break;
7922         }
7923     }
7924 
7925     return (rc);
7926 }
7927 
7928 static void
7929 bxe_attn_int_deasserted3(struct bxe_softc *sc,
7930                          uint32_t         attn)
7931 {
7932     int func = SC_FUNC(sc);
7933     uint32_t val;
7934 
7935     if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
7936 
7937         if (attn & BXE_PMF_LINK_ASSERT(sc)) {
7938 
7939             REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7940             bxe_read_mf_cfg(sc);
7941             sc->devinfo.mf_info.mf_config[SC_VN(sc)] =
7942                 MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
7943             val = SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_status);
7944 
7945             if (val & DRV_STATUS_DCC_EVENT_MASK)
7946                 bxe_dcc_event(sc, (val & DRV_STATUS_DCC_EVENT_MASK));
7947 
7948             if (val & DRV_STATUS_SET_MF_BW)
7949                 bxe_set_mf_bw(sc);
7950 
7951             if (val & DRV_STATUS_DRV_INFO_REQ)
7952                 bxe_handle_drv_info_req(sc);
7953 
7954             if ((sc->port.pmf == 0) && (val & DRV_STATUS_PMF))
7955                 bxe_pmf_update(sc);
7956 
7957             if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
7958                 bxe_handle_eee_event(sc);
7959 
7960             if (sc->link_vars.periodic_flags &
7961                 ELINK_PERIODIC_FLAGS_LINK_EVENT) {
7962                 /* sync with link */
7963 		bxe_acquire_phy_lock(sc);
7964                 sc->link_vars.periodic_flags &=
7965                     ~ELINK_PERIODIC_FLAGS_LINK_EVENT;
7966 		bxe_release_phy_lock(sc);
7967                 if (IS_MF(sc))
7968                     ; // XXX bxe_link_sync_notify(sc);
7969                 bxe_link_report(sc);
7970             }
7971 
7972             /*
7973              * Always call it here: bxe_link_report() will
7974              * prevent the link indication duplication.
7975              */
7976             bxe_link_status_update(sc);
7977 
7978         } else if (attn & BXE_MC_ASSERT_BITS) {
7979 
7980             BLOGE(sc, "MC assert!\n");
7981             bxe_mc_assert(sc);
7982             REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_10, 0);
7983             REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_9, 0);
7984             REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_8, 0);
7985             REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_7, 0);
7986             bxe_int_disable(sc);
7987             BXE_SET_ERROR_BIT(sc, BXE_ERR_MC_ASSERT);
7988             taskqueue_enqueue_timeout(taskqueue_thread,
7989                 &sc->sp_err_timeout_task, hz/10);
7990 
7991         } else if (attn & BXE_MCP_ASSERT) {
7992 
7993             BLOGE(sc, "MCP assert!\n");
7994             REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_11, 0);
7995             BXE_SET_ERROR_BIT(sc, BXE_ERR_MCP_ASSERT);
7996             taskqueue_enqueue_timeout(taskqueue_thread,
7997                 &sc->sp_err_timeout_task, hz/10);
7998             bxe_int_disable(sc);  /*avoid repetive assert alert */
7999 
8000 
8001         } else {
8002             BLOGE(sc, "Unknown HW assert! (attn 0x%08x)\n", attn);
8003         }
8004     }
8005 
8006     if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
8007         BLOGE(sc, "LATCHED attention 0x%08x (masked)\n", attn);
8008         if (attn & BXE_GRC_TIMEOUT) {
8009             val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_TIMEOUT_ATTN);
8010             BLOGE(sc, "GRC time-out 0x%08x\n", val);
8011         }
8012         if (attn & BXE_GRC_RSV) {
8013             val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_RSV_ATTN);
8014             BLOGE(sc, "GRC reserved 0x%08x\n", val);
8015         }
8016         REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
8017     }
8018 }
8019 
8020 static void
8021 bxe_attn_int_deasserted2(struct bxe_softc *sc,
8022                          uint32_t         attn)
8023 {
8024     int port = SC_PORT(sc);
8025     int reg_offset;
8026     uint32_t val0, mask0, val1, mask1;
8027     uint32_t val;
8028     boolean_t err_flg = FALSE;
8029 
8030     if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
8031         val = REG_RD(sc, CFC_REG_CFC_INT_STS_CLR);
8032         BLOGE(sc, "CFC hw attention 0x%08x\n", val);
8033         /* CFC error attention */
8034         if (val & 0x2) {
8035             BLOGE(sc, "FATAL error from CFC\n");
8036 	    err_flg = TRUE;
8037         }
8038     }
8039 
8040     if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
8041         val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_0);
8042         BLOGE(sc, "PXP hw attention-0 0x%08x\n", val);
8043         /* RQ_USDMDP_FIFO_OVERFLOW */
8044         if (val & 0x18000) {
8045             BLOGE(sc, "FATAL error from PXP\n");
8046 	    err_flg = TRUE;
8047         }
8048 
8049         if (!CHIP_IS_E1x(sc)) {
8050             val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_1);
8051             BLOGE(sc, "PXP hw attention-1 0x%08x\n", val);
8052 	    err_flg = TRUE;
8053         }
8054     }
8055 
8056 #define PXP2_EOP_ERROR_BIT  PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR
8057 #define AEU_PXP2_HW_INT_BIT AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT
8058 
8059     if (attn & AEU_PXP2_HW_INT_BIT) {
8060         /*  CQ47854 workaround do not panic on
8061          *  PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR
8062          */
8063         if (!CHIP_IS_E1x(sc)) {
8064             mask0 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_0);
8065             val1 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_1);
8066             mask1 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_1);
8067             val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_0);
8068             /*
8069              * If the only PXP2_EOP_ERROR_BIT is set in
8070              * STS0 and STS1 - clear it
8071              *
8072              * probably we lose additional attentions between
8073              * STS0 and STS_CLR0, in this case user will not
8074              * be notified about them
8075              */
8076             if (val0 & mask0 & PXP2_EOP_ERROR_BIT &&
8077                 !(val1 & mask1))
8078                 val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
8079 
8080             /* print the register, since no one can restore it */
8081             BLOGE(sc, "PXP2_REG_PXP2_INT_STS_CLR_0 0x%08x\n", val0);
8082 
8083             /*
8084              * if PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR
8085              * then notify
8086              */
8087             if (val0 & PXP2_EOP_ERROR_BIT) {
8088                 BLOGE(sc, "PXP2_WR_PGLUE_EOP_ERROR\n");
8089 		err_flg = TRUE;
8090 
8091                 /*
8092                  * if only PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR is
8093                  * set then clear attention from PXP2 block without panic
8094                  */
8095                 if (((val0 & mask0) == PXP2_EOP_ERROR_BIT) &&
8096                     ((val1 & mask1) == 0))
8097                     attn &= ~AEU_PXP2_HW_INT_BIT;
8098             }
8099         }
8100     }
8101 
8102     if (attn & HW_INTERRUT_ASSERT_SET_2) {
8103         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
8104                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
8105 
8106         val = REG_RD(sc, reg_offset);
8107         val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
8108         REG_WR(sc, reg_offset, val);
8109 
8110         BLOGE(sc, "FATAL HW block attention set2 0x%x\n",
8111               (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_2));
8112 	err_flg = TRUE;
8113         bxe_panic(sc, ("HW block attention set2\n"));
8114     }
8115     if(err_flg) {
8116         BXE_SET_ERROR_BIT(sc, BXE_ERR_GLOBAL);
8117         taskqueue_enqueue_timeout(taskqueue_thread,
8118            &sc->sp_err_timeout_task, hz/10);
8119     }
8120 
8121 }
8122 
8123 static void
8124 bxe_attn_int_deasserted1(struct bxe_softc *sc,
8125                          uint32_t         attn)
8126 {
8127     int port = SC_PORT(sc);
8128     int reg_offset;
8129     uint32_t val;
8130     boolean_t err_flg = FALSE;
8131 
8132     if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
8133         val = REG_RD(sc, DORQ_REG_DORQ_INT_STS_CLR);
8134         BLOGE(sc, "DB hw attention 0x%08x\n", val);
8135         /* DORQ discard attention */
8136         if (val & 0x2) {
8137             BLOGE(sc, "FATAL error from DORQ\n");
8138 	    err_flg = TRUE;
8139         }
8140     }
8141 
8142     if (attn & HW_INTERRUT_ASSERT_SET_1) {
8143         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
8144                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
8145 
8146         val = REG_RD(sc, reg_offset);
8147         val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
8148         REG_WR(sc, reg_offset, val);
8149 
8150         BLOGE(sc, "FATAL HW block attention set1 0x%08x\n",
8151               (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_1));
8152         err_flg = TRUE;
8153         bxe_panic(sc, ("HW block attention set1\n"));
8154     }
8155     if(err_flg) {
8156         BXE_SET_ERROR_BIT(sc, BXE_ERR_MISC);
8157         taskqueue_enqueue_timeout(taskqueue_thread,
8158            &sc->sp_err_timeout_task, hz/10);
8159     }
8160 
8161 }
8162 
8163 static void
8164 bxe_attn_int_deasserted0(struct bxe_softc *sc,
8165                          uint32_t         attn)
8166 {
8167     int port = SC_PORT(sc);
8168     int reg_offset;
8169     uint32_t val;
8170 
8171     reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
8172                           MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
8173 
8174     if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
8175         val = REG_RD(sc, reg_offset);
8176         val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
8177         REG_WR(sc, reg_offset, val);
8178 
8179         BLOGW(sc, "SPIO5 hw attention\n");
8180 
8181         /* Fan failure attention */
8182         elink_hw_reset_phy(&sc->link_params);
8183         bxe_fan_failure(sc);
8184     }
8185 
8186     if ((attn & sc->link_vars.aeu_int_mask) && sc->port.pmf) {
8187 	bxe_acquire_phy_lock(sc);
8188         elink_handle_module_detect_int(&sc->link_params);
8189 	bxe_release_phy_lock(sc);
8190     }
8191 
8192     if (attn & HW_INTERRUT_ASSERT_SET_0) {
8193         val = REG_RD(sc, reg_offset);
8194         val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
8195         REG_WR(sc, reg_offset, val);
8196 
8197 
8198         BXE_SET_ERROR_BIT(sc, BXE_ERR_MISC);
8199         taskqueue_enqueue_timeout(taskqueue_thread,
8200            &sc->sp_err_timeout_task, hz/10);
8201 
8202         bxe_panic(sc, ("FATAL HW block attention set0 0x%lx\n",
8203                        (attn & HW_INTERRUT_ASSERT_SET_0)));
8204     }
8205 }
8206 
8207 static void
8208 bxe_attn_int_deasserted(struct bxe_softc *sc,
8209                         uint32_t         deasserted)
8210 {
8211     struct attn_route attn;
8212     struct attn_route *group_mask;
8213     int port = SC_PORT(sc);
8214     int index;
8215     uint32_t reg_addr;
8216     uint32_t val;
8217     uint32_t aeu_mask;
8218     uint8_t global = FALSE;
8219 
8220     /*
8221      * Need to take HW lock because MCP or other port might also
8222      * try to handle this event.
8223      */
8224     bxe_acquire_alr(sc);
8225 
8226     if (bxe_chk_parity_attn(sc, &global, TRUE)) {
8227         /* XXX
8228          * In case of parity errors don't handle attentions so that
8229          * other function would "see" parity errors.
8230          */
8231         // XXX schedule a recovery task...
8232         /* disable HW interrupts */
8233         bxe_int_disable(sc);
8234         BXE_SET_ERROR_BIT(sc, BXE_ERR_PARITY);
8235         taskqueue_enqueue_timeout(taskqueue_thread,
8236            &sc->sp_err_timeout_task, hz/10);
8237         bxe_release_alr(sc);
8238         return;
8239     }
8240 
8241     attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
8242     attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
8243     attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
8244     attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
8245     if (!CHIP_IS_E1x(sc)) {
8246         attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
8247     } else {
8248         attn.sig[4] = 0;
8249     }
8250 
8251     BLOGD(sc, DBG_INTR, "attn: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
8252           attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
8253 
8254     for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
8255         if (deasserted & (1 << index)) {
8256             group_mask = &sc->attn_group[index];
8257 
8258             BLOGD(sc, DBG_INTR,
8259                   "group[%d]: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", index,
8260                   group_mask->sig[0], group_mask->sig[1],
8261                   group_mask->sig[2], group_mask->sig[3],
8262                   group_mask->sig[4]);
8263 
8264             bxe_attn_int_deasserted4(sc, attn.sig[4] & group_mask->sig[4]);
8265             bxe_attn_int_deasserted3(sc, attn.sig[3] & group_mask->sig[3]);
8266             bxe_attn_int_deasserted1(sc, attn.sig[1] & group_mask->sig[1]);
8267             bxe_attn_int_deasserted2(sc, attn.sig[2] & group_mask->sig[2]);
8268             bxe_attn_int_deasserted0(sc, attn.sig[0] & group_mask->sig[0]);
8269         }
8270     }
8271 
8272     bxe_release_alr(sc);
8273 
8274     if (sc->devinfo.int_block == INT_BLOCK_HC) {
8275         reg_addr = (HC_REG_COMMAND_REG + port*32 +
8276                     COMMAND_REG_ATTN_BITS_CLR);
8277     } else {
8278         reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
8279     }
8280 
8281     val = ~deasserted;
8282     BLOGD(sc, DBG_INTR,
8283           "about to mask 0x%08x at %s addr 0x%08x\n", val,
8284           (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
8285     REG_WR(sc, reg_addr, val);
8286 
8287     if (~sc->attn_state & deasserted) {
8288         BLOGE(sc, "IGU error\n");
8289     }
8290 
8291     reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8292                       MISC_REG_AEU_MASK_ATTN_FUNC_0;
8293 
8294     bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
8295 
8296     aeu_mask = REG_RD(sc, reg_addr);
8297 
8298     BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly deasserted 0x%08x\n",
8299           aeu_mask, deasserted);
8300     aeu_mask |= (deasserted & 0x3ff);
8301     BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask);
8302 
8303     REG_WR(sc, reg_addr, aeu_mask);
8304     bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
8305 
8306     BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state);
8307     sc->attn_state &= ~deasserted;
8308     BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state);
8309 }
8310 
8311 static void
8312 bxe_attn_int(struct bxe_softc *sc)
8313 {
8314     /* read local copy of bits */
8315     uint32_t attn_bits = le32toh(sc->def_sb->atten_status_block.attn_bits);
8316     uint32_t attn_ack = le32toh(sc->def_sb->atten_status_block.attn_bits_ack);
8317     uint32_t attn_state = sc->attn_state;
8318 
8319     /* look for changed bits */
8320     uint32_t asserted   =  attn_bits & ~attn_ack & ~attn_state;
8321     uint32_t deasserted = ~attn_bits &  attn_ack &  attn_state;
8322 
8323     BLOGD(sc, DBG_INTR,
8324           "attn_bits 0x%08x attn_ack 0x%08x asserted 0x%08x deasserted 0x%08x\n",
8325           attn_bits, attn_ack, asserted, deasserted);
8326 
8327     if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) {
8328         BLOGE(sc, "BAD attention state\n");
8329     }
8330 
8331     /* handle bits that were raised */
8332     if (asserted) {
8333         bxe_attn_int_asserted(sc, asserted);
8334     }
8335 
8336     if (deasserted) {
8337         bxe_attn_int_deasserted(sc, deasserted);
8338     }
8339 }
8340 
8341 static uint16_t
8342 bxe_update_dsb_idx(struct bxe_softc *sc)
8343 {
8344     struct host_sp_status_block *def_sb = sc->def_sb;
8345     uint16_t rc = 0;
8346 
8347     mb(); /* status block is written to by the chip */
8348 
8349     if (sc->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
8350         sc->def_att_idx = def_sb->atten_status_block.attn_bits_index;
8351         rc |= BXE_DEF_SB_ATT_IDX;
8352     }
8353 
8354     if (sc->def_idx != def_sb->sp_sb.running_index) {
8355         sc->def_idx = def_sb->sp_sb.running_index;
8356         rc |= BXE_DEF_SB_IDX;
8357     }
8358 
8359     mb();
8360 
8361     return (rc);
8362 }
8363 
8364 static inline struct ecore_queue_sp_obj *
8365 bxe_cid_to_q_obj(struct bxe_softc *sc,
8366                  uint32_t         cid)
8367 {
8368     BLOGD(sc, DBG_SP, "retrieving fp from cid %d\n", cid);
8369     return (&sc->sp_objs[CID_TO_FP(cid, sc)].q_obj);
8370 }
8371 
8372 static void
8373 bxe_handle_mcast_eqe(struct bxe_softc *sc)
8374 {
8375     struct ecore_mcast_ramrod_params rparam;
8376     int rc;
8377 
8378     memset(&rparam, 0, sizeof(rparam));
8379 
8380     rparam.mcast_obj = &sc->mcast_obj;
8381 
8382     BXE_MCAST_LOCK(sc);
8383 
8384     /* clear pending state for the last command */
8385     sc->mcast_obj.raw.clear_pending(&sc->mcast_obj.raw);
8386 
8387     /* if there are pending mcast commands - send them */
8388     if (sc->mcast_obj.check_pending(&sc->mcast_obj)) {
8389         rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
8390         if (rc < 0) {
8391             BLOGD(sc, DBG_SP,
8392                 "ERROR: Failed to send pending mcast commands (%d)\n", rc);
8393         }
8394     }
8395 
8396     BXE_MCAST_UNLOCK(sc);
8397 }
8398 
8399 static void
8400 bxe_handle_classification_eqe(struct bxe_softc      *sc,
8401                               union event_ring_elem *elem)
8402 {
8403     unsigned long ramrod_flags = 0;
8404     int rc = 0;
8405     uint32_t cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK;
8406     struct ecore_vlan_mac_obj *vlan_mac_obj;
8407 
8408     /* always push next commands out, don't wait here */
8409     bit_set(&ramrod_flags, RAMROD_CONT);
8410 
8411     switch (le32toh(elem->message.data.eth_event.echo) >> BXE_SWCID_SHIFT) {
8412     case ECORE_FILTER_MAC_PENDING:
8413         BLOGD(sc, DBG_SP, "Got SETUP_MAC completions\n");
8414         vlan_mac_obj = &sc->sp_objs[cid].mac_obj;
8415         break;
8416 
8417     case ECORE_FILTER_MCAST_PENDING:
8418         BLOGD(sc, DBG_SP, "Got SETUP_MCAST completions\n");
8419         /*
8420          * This is only relevant for 57710 where multicast MACs are
8421          * configured as unicast MACs using the same ramrod.
8422          */
8423         bxe_handle_mcast_eqe(sc);
8424         return;
8425 
8426     default:
8427         BLOGE(sc, "Unsupported classification command: %d\n",
8428               elem->message.data.eth_event.echo);
8429         return;
8430     }
8431 
8432     rc = vlan_mac_obj->complete(sc, vlan_mac_obj, elem, &ramrod_flags);
8433 
8434     if (rc < 0) {
8435         BLOGE(sc, "Failed to schedule new commands (%d)\n", rc);
8436     } else if (rc > 0) {
8437         BLOGD(sc, DBG_SP, "Scheduled next pending commands...\n");
8438     }
8439 }
8440 
8441 static void
8442 bxe_handle_rx_mode_eqe(struct bxe_softc      *sc,
8443                        union event_ring_elem *elem)
8444 {
8445     bxe_clear_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
8446 
8447     /* send rx_mode command again if was requested */
8448     if (bxe_test_and_clear_bit(ECORE_FILTER_RX_MODE_SCHED,
8449                                &sc->sp_state)) {
8450         bxe_set_storm_rx_mode(sc);
8451     }
8452 }
8453 
8454 static void
8455 bxe_update_eq_prod(struct bxe_softc *sc,
8456                    uint16_t         prod)
8457 {
8458     storm_memset_eq_prod(sc, prod, SC_FUNC(sc));
8459     wmb(); /* keep prod updates ordered */
8460 }
8461 
8462 static void
8463 bxe_eq_int(struct bxe_softc *sc)
8464 {
8465     uint16_t hw_cons, sw_cons, sw_prod;
8466     union event_ring_elem *elem;
8467     uint8_t echo;
8468     uint32_t cid;
8469     uint8_t opcode;
8470     int spqe_cnt = 0;
8471     struct ecore_queue_sp_obj *q_obj;
8472     struct ecore_func_sp_obj *f_obj = &sc->func_obj;
8473     struct ecore_raw_obj *rss_raw = &sc->rss_conf_obj.raw;
8474 
8475     hw_cons = le16toh(*sc->eq_cons_sb);
8476 
8477     /*
8478      * The hw_cons range is 1-255, 257 - the sw_cons range is 0-254, 256.
8479      * when we get to the next-page we need to adjust so the loop
8480      * condition below will be met. The next element is the size of a
8481      * regular element and hence incrementing by 1
8482      */
8483     if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) {
8484         hw_cons++;
8485     }
8486 
8487     /*
8488      * This function may never run in parallel with itself for a
8489      * specific sc and no need for a read memory barrier here.
8490      */
8491     sw_cons = sc->eq_cons;
8492     sw_prod = sc->eq_prod;
8493 
8494     BLOGD(sc, DBG_SP,"EQ: hw_cons=%u sw_cons=%u eq_spq_left=0x%lx\n",
8495           hw_cons, sw_cons, atomic_load_acq_long(&sc->eq_spq_left));
8496 
8497     for (;
8498          sw_cons != hw_cons;
8499          sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
8500 
8501         elem = &sc->eq[EQ_DESC(sw_cons)];
8502 
8503         /* elem CID originates from FW, actually LE */
8504         cid = SW_CID(elem->message.data.cfc_del_event.cid);
8505         opcode = elem->message.opcode;
8506 
8507         /* handle eq element */
8508         switch (opcode) {
8509 
8510         case EVENT_RING_OPCODE_STAT_QUERY:
8511             BLOGD(sc, DBG_SP, "got statistics completion event %d\n",
8512                   sc->stats_comp++);
8513             /* nothing to do with stats comp */
8514             goto next_spqe;
8515 
8516         case EVENT_RING_OPCODE_CFC_DEL:
8517             /* handle according to cid range */
8518             /* we may want to verify here that the sc state is HALTING */
8519             BLOGD(sc, DBG_SP, "got delete ramrod for MULTI[%d]\n", cid);
8520             q_obj = bxe_cid_to_q_obj(sc, cid);
8521             if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_CFC_DEL)) {
8522                 break;
8523             }
8524             goto next_spqe;
8525 
8526         case EVENT_RING_OPCODE_STOP_TRAFFIC:
8527             BLOGD(sc, DBG_SP, "got STOP TRAFFIC\n");
8528             if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_STOP)) {
8529                 break;
8530             }
8531             // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_PAUSED);
8532             goto next_spqe;
8533 
8534         case EVENT_RING_OPCODE_START_TRAFFIC:
8535             BLOGD(sc, DBG_SP, "got START TRAFFIC\n");
8536             if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_START)) {
8537                 break;
8538             }
8539             // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_RELEASED);
8540             goto next_spqe;
8541 
8542         case EVENT_RING_OPCODE_FUNCTION_UPDATE:
8543             echo = elem->message.data.function_update_event.echo;
8544             if (echo == SWITCH_UPDATE) {
8545                 BLOGD(sc, DBG_SP, "got FUNC_SWITCH_UPDATE ramrod\n");
8546                 if (f_obj->complete_cmd(sc, f_obj,
8547                                         ECORE_F_CMD_SWITCH_UPDATE)) {
8548                     break;
8549                 }
8550             }
8551             else {
8552                 BLOGD(sc, DBG_SP,
8553                       "AFEX: ramrod completed FUNCTION_UPDATE\n");
8554             }
8555             goto next_spqe;
8556 
8557         case EVENT_RING_OPCODE_FORWARD_SETUP:
8558             q_obj = &bxe_fwd_sp_obj(sc, q_obj);
8559             if (q_obj->complete_cmd(sc, q_obj,
8560                                     ECORE_Q_CMD_SETUP_TX_ONLY)) {
8561                 break;
8562             }
8563             goto next_spqe;
8564 
8565         case EVENT_RING_OPCODE_FUNCTION_START:
8566             BLOGD(sc, DBG_SP, "got FUNC_START ramrod\n");
8567             if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_START)) {
8568                 break;
8569             }
8570             goto next_spqe;
8571 
8572         case EVENT_RING_OPCODE_FUNCTION_STOP:
8573             BLOGD(sc, DBG_SP, "got FUNC_STOP ramrod\n");
8574             if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_STOP)) {
8575                 break;
8576             }
8577             goto next_spqe;
8578         }
8579 
8580         switch (opcode | sc->state) {
8581         case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPEN):
8582         case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPENING_WAITING_PORT):
8583             cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK;
8584             BLOGD(sc, DBG_SP, "got RSS_UPDATE ramrod. CID %d\n", cid);
8585             rss_raw->clear_pending(rss_raw);
8586             break;
8587 
8588         case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_OPEN):
8589         case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_DIAG):
8590         case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_CLOSING_WAITING_HALT):
8591         case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_OPEN):
8592         case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_DIAG):
8593         case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8594             BLOGD(sc, DBG_SP, "got (un)set mac ramrod\n");
8595             bxe_handle_classification_eqe(sc, elem);
8596             break;
8597 
8598         case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_OPEN):
8599         case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_DIAG):
8600         case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8601             BLOGD(sc, DBG_SP, "got mcast ramrod\n");
8602             bxe_handle_mcast_eqe(sc);
8603             break;
8604 
8605         case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_OPEN):
8606         case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_DIAG):
8607         case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8608             BLOGD(sc, DBG_SP, "got rx_mode ramrod\n");
8609             bxe_handle_rx_mode_eqe(sc, elem);
8610             break;
8611 
8612         default:
8613             /* unknown event log error and continue */
8614             BLOGE(sc, "Unknown EQ event %d, sc->state 0x%x\n",
8615                   elem->message.opcode, sc->state);
8616         }
8617 
8618 next_spqe:
8619         spqe_cnt++;
8620     } /* for */
8621 
8622     mb();
8623     atomic_add_acq_long(&sc->eq_spq_left, spqe_cnt);
8624 
8625     sc->eq_cons = sw_cons;
8626     sc->eq_prod = sw_prod;
8627 
8628     /* make sure that above mem writes were issued towards the memory */
8629     wmb();
8630 
8631     /* update producer */
8632     bxe_update_eq_prod(sc, sc->eq_prod);
8633 }
8634 
8635 static void
8636 bxe_handle_sp_tq(void *context,
8637                  int  pending)
8638 {
8639     struct bxe_softc *sc = (struct bxe_softc *)context;
8640     uint16_t status;
8641 
8642     BLOGD(sc, DBG_SP, "---> SP TASK <---\n");
8643 
8644     /* what work needs to be performed? */
8645     status = bxe_update_dsb_idx(sc);
8646 
8647     BLOGD(sc, DBG_SP, "dsb status 0x%04x\n", status);
8648 
8649     /* HW attentions */
8650     if (status & BXE_DEF_SB_ATT_IDX) {
8651         BLOGD(sc, DBG_SP, "---> ATTN INTR <---\n");
8652         bxe_attn_int(sc);
8653         status &= ~BXE_DEF_SB_ATT_IDX;
8654     }
8655 
8656     /* SP events: STAT_QUERY and others */
8657     if (status & BXE_DEF_SB_IDX) {
8658         /* handle EQ completions */
8659         BLOGD(sc, DBG_SP, "---> EQ INTR <---\n");
8660         bxe_eq_int(sc);
8661         bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID,
8662                    le16toh(sc->def_idx), IGU_INT_NOP, 1);
8663         status &= ~BXE_DEF_SB_IDX;
8664     }
8665 
8666     /* if status is non zero then something went wrong */
8667     if (__predict_false(status)) {
8668         BLOGE(sc, "Got an unknown SP interrupt! (0x%04x)\n", status);
8669     }
8670 
8671     /* ack status block only if something was actually handled */
8672     bxe_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID,
8673                le16toh(sc->def_att_idx), IGU_INT_ENABLE, 1);
8674 
8675     /*
8676      * Must be called after the EQ processing (since eq leads to sriov
8677      * ramrod completion flows).
8678      * This flow may have been scheduled by the arrival of a ramrod
8679      * completion, or by the sriov code rescheduling itself.
8680      */
8681     // XXX bxe_iov_sp_task(sc);
8682 
8683 }
8684 
8685 static void
8686 bxe_handle_fp_tq(void *context,
8687                  int  pending)
8688 {
8689     struct bxe_fastpath *fp = (struct bxe_fastpath *)context;
8690     struct bxe_softc *sc = fp->sc;
8691     uint8_t more_tx = FALSE;
8692     uint8_t more_rx = FALSE;
8693 
8694     BLOGD(sc, DBG_INTR, "---> FP TASK QUEUE (%d) <---\n", fp->index);
8695 
8696     /* XXX
8697      * IFF_DRV_RUNNING state can't be checked here since we process
8698      * slowpath events on a client queue during setup. Instead
8699      * we need to add a "process/continue" flag here that the driver
8700      * can use to tell the task here not to do anything.
8701      */
8702 #if 0
8703     if (!(if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) {
8704         return;
8705     }
8706 #endif
8707 
8708     /* update the fastpath index */
8709     bxe_update_fp_sb_idx(fp);
8710 
8711     /* XXX add loop here if ever support multiple tx CoS */
8712     /* fp->txdata[cos] */
8713     if (bxe_has_tx_work(fp)) {
8714         BXE_FP_TX_LOCK(fp);
8715         more_tx = bxe_txeof(sc, fp);
8716         BXE_FP_TX_UNLOCK(fp);
8717     }
8718 
8719     if (bxe_has_rx_work(fp)) {
8720         more_rx = bxe_rxeof(sc, fp);
8721     }
8722 
8723     if (more_rx /*|| more_tx*/) {
8724         /* still more work to do */
8725         taskqueue_enqueue(fp->tq, &fp->tq_task);
8726         return;
8727     }
8728 
8729     bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
8730                le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
8731 }
8732 
8733 static void
8734 bxe_task_fp(struct bxe_fastpath *fp)
8735 {
8736     struct bxe_softc *sc = fp->sc;
8737     uint8_t more_tx = FALSE;
8738     uint8_t more_rx = FALSE;
8739 
8740     BLOGD(sc, DBG_INTR, "---> FP TASK ISR (%d) <---\n", fp->index);
8741 
8742     /* update the fastpath index */
8743     bxe_update_fp_sb_idx(fp);
8744 
8745     /* XXX add loop here if ever support multiple tx CoS */
8746     /* fp->txdata[cos] */
8747     if (bxe_has_tx_work(fp)) {
8748         BXE_FP_TX_LOCK(fp);
8749         more_tx = bxe_txeof(sc, fp);
8750         BXE_FP_TX_UNLOCK(fp);
8751     }
8752 
8753     if (bxe_has_rx_work(fp)) {
8754         more_rx = bxe_rxeof(sc, fp);
8755     }
8756 
8757     if (more_rx /*|| more_tx*/) {
8758         /* still more work to do, bail out if this ISR and process later */
8759         taskqueue_enqueue(fp->tq, &fp->tq_task);
8760         return;
8761     }
8762 
8763     /*
8764      * Here we write the fastpath index taken before doing any tx or rx work.
8765      * It is very well possible other hw events occurred up to this point and
8766      * they were actually processed accordingly above. Since we're going to
8767      * write an older fastpath index, an interrupt is coming which we might
8768      * not do any work in.
8769      */
8770     bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
8771                le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
8772 }
8773 
8774 /*
8775  * Legacy interrupt entry point.
8776  *
8777  * Verifies that the controller generated the interrupt and
8778  * then calls a separate routine to handle the various
8779  * interrupt causes: link, RX, and TX.
8780  */
8781 static void
8782 bxe_intr_legacy(void *xsc)
8783 {
8784     struct bxe_softc *sc = (struct bxe_softc *)xsc;
8785     struct bxe_fastpath *fp;
8786     uint16_t status, mask;
8787     int i;
8788 
8789     BLOGD(sc, DBG_INTR, "---> BXE INTx <---\n");
8790 
8791     /*
8792      * 0 for ustorm, 1 for cstorm
8793      * the bits returned from ack_int() are 0-15
8794      * bit 0 = attention status block
8795      * bit 1 = fast path status block
8796      * a mask of 0x2 or more = tx/rx event
8797      * a mask of 1 = slow path event
8798      */
8799 
8800     status = bxe_ack_int(sc);
8801 
8802     /* the interrupt is not for us */
8803     if (__predict_false(status == 0)) {
8804         BLOGD(sc, DBG_INTR, "Not our interrupt!\n");
8805         return;
8806     }
8807 
8808     BLOGD(sc, DBG_INTR, "Interrupt status 0x%04x\n", status);
8809 
8810     FOR_EACH_ETH_QUEUE(sc, i) {
8811         fp = &sc->fp[i];
8812         mask = (0x2 << (fp->index + CNIC_SUPPORT(sc)));
8813         if (status & mask) {
8814             /* acknowledge and disable further fastpath interrupts */
8815             bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8816             bxe_task_fp(fp);
8817             status &= ~mask;
8818         }
8819     }
8820 
8821     if (__predict_false(status & 0x1)) {
8822         /* acknowledge and disable further slowpath interrupts */
8823         bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8824 
8825         /* schedule slowpath handler */
8826         taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task);
8827 
8828         status &= ~0x1;
8829     }
8830 
8831     if (__predict_false(status)) {
8832         BLOGW(sc, "Unexpected fastpath status (0x%08x)!\n", status);
8833     }
8834 }
8835 
8836 /* slowpath interrupt entry point */
8837 static void
8838 bxe_intr_sp(void *xsc)
8839 {
8840     struct bxe_softc *sc = (struct bxe_softc *)xsc;
8841 
8842     BLOGD(sc, (DBG_INTR | DBG_SP), "---> SP INTR <---\n");
8843 
8844     /* acknowledge and disable further slowpath interrupts */
8845     bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8846 
8847     /* schedule slowpath handler */
8848     taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task);
8849 }
8850 
8851 /* fastpath interrupt entry point */
8852 static void
8853 bxe_intr_fp(void *xfp)
8854 {
8855     struct bxe_fastpath *fp = (struct bxe_fastpath *)xfp;
8856     struct bxe_softc *sc = fp->sc;
8857 
8858     BLOGD(sc, DBG_INTR, "---> FP INTR %d <---\n", fp->index);
8859 
8860     BLOGD(sc, DBG_INTR,
8861           "(cpu=%d) MSI-X fp=%d fw_sb=%d igu_sb=%d\n",
8862           curcpu, fp->index, fp->fw_sb_id, fp->igu_sb_id);
8863 
8864     /* acknowledge and disable further fastpath interrupts */
8865     bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8866 
8867     bxe_task_fp(fp);
8868 }
8869 
8870 /* Release all interrupts allocated by the driver. */
8871 static void
8872 bxe_interrupt_free(struct bxe_softc *sc)
8873 {
8874     int i;
8875 
8876     switch (sc->interrupt_mode) {
8877     case INTR_MODE_INTX:
8878         BLOGD(sc, DBG_LOAD, "Releasing legacy INTx vector\n");
8879         if (sc->intr[0].resource != NULL) {
8880             bus_release_resource(sc->dev,
8881                                  SYS_RES_IRQ,
8882                                  sc->intr[0].rid,
8883                                  sc->intr[0].resource);
8884         }
8885         break;
8886     case INTR_MODE_MSI:
8887         for (i = 0; i < sc->intr_count; i++) {
8888             BLOGD(sc, DBG_LOAD, "Releasing MSI vector %d\n", i);
8889             if (sc->intr[i].resource && sc->intr[i].rid) {
8890                 bus_release_resource(sc->dev,
8891                                      SYS_RES_IRQ,
8892                                      sc->intr[i].rid,
8893                                      sc->intr[i].resource);
8894             }
8895         }
8896         pci_release_msi(sc->dev);
8897         break;
8898     case INTR_MODE_MSIX:
8899         for (i = 0; i < sc->intr_count; i++) {
8900             BLOGD(sc, DBG_LOAD, "Releasing MSI-X vector %d\n", i);
8901             if (sc->intr[i].resource && sc->intr[i].rid) {
8902                 bus_release_resource(sc->dev,
8903                                      SYS_RES_IRQ,
8904                                      sc->intr[i].rid,
8905                                      sc->intr[i].resource);
8906             }
8907         }
8908         pci_release_msi(sc->dev);
8909         break;
8910     default:
8911         /* nothing to do as initial allocation failed */
8912         break;
8913     }
8914 }
8915 
8916 /*
8917  * This function determines and allocates the appropriate
8918  * interrupt based on system capabilites and user request.
8919  *
8920  * The user may force a particular interrupt mode, specify
8921  * the number of receive queues, specify the method for
8922  * distribuitng received frames to receive queues, or use
8923  * the default settings which will automatically select the
8924  * best supported combination.  In addition, the OS may or
8925  * may not support certain combinations of these settings.
8926  * This routine attempts to reconcile the settings requested
8927  * by the user with the capabilites available from the system
8928  * to select the optimal combination of features.
8929  *
8930  * Returns:
8931  *   0 = Success, !0 = Failure.
8932  */
8933 static int
8934 bxe_interrupt_alloc(struct bxe_softc *sc)
8935 {
8936     int msix_count = 0;
8937     int msi_count = 0;
8938     int num_requested = 0;
8939     int num_allocated = 0;
8940     int rid, i, j;
8941     int rc;
8942 
8943     /* get the number of available MSI/MSI-X interrupts from the OS */
8944     if (sc->interrupt_mode > 0) {
8945         if (sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) {
8946             msix_count = pci_msix_count(sc->dev);
8947         }
8948 
8949         if (sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) {
8950             msi_count = pci_msi_count(sc->dev);
8951         }
8952 
8953         BLOGD(sc, DBG_LOAD, "%d MSI and %d MSI-X vectors available\n",
8954               msi_count, msix_count);
8955     }
8956 
8957     do { /* try allocating MSI-X interrupt resources (at least 2) */
8958         if (sc->interrupt_mode != INTR_MODE_MSIX) {
8959             break;
8960         }
8961 
8962         if (((sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) == 0) ||
8963             (msix_count < 2)) {
8964             sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8965             break;
8966         }
8967 
8968         /* ask for the necessary number of MSI-X vectors */
8969         num_requested = min((sc->num_queues + 1), msix_count);
8970 
8971         BLOGD(sc, DBG_LOAD, "Requesting %d MSI-X vectors\n", num_requested);
8972 
8973         num_allocated = num_requested;
8974         if ((rc = pci_alloc_msix(sc->dev, &num_allocated)) != 0) {
8975             BLOGE(sc, "MSI-X alloc failed! (%d)\n", rc);
8976             sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8977             break;
8978         }
8979 
8980         if (num_allocated < 2) { /* possible? */
8981             BLOGE(sc, "MSI-X allocation less than 2!\n");
8982             sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8983             pci_release_msi(sc->dev);
8984             break;
8985         }
8986 
8987         BLOGI(sc, "MSI-X vectors Requested %d and Allocated %d\n",
8988               num_requested, num_allocated);
8989 
8990         /* best effort so use the number of vectors allocated to us */
8991         sc->intr_count = num_allocated;
8992         sc->num_queues = num_allocated - 1;
8993 
8994         rid = 1; /* initial resource identifier */
8995 
8996         /* allocate the MSI-X vectors */
8997         for (i = 0; i < num_allocated; i++) {
8998             sc->intr[i].rid = (rid + i);
8999 
9000             if ((sc->intr[i].resource =
9001                  bus_alloc_resource_any(sc->dev,
9002                                         SYS_RES_IRQ,
9003                                         &sc->intr[i].rid,
9004                                         RF_ACTIVE)) == NULL) {
9005                 BLOGE(sc, "Failed to map MSI-X[%d] (rid=%d)!\n",
9006                       i, (rid + i));
9007 
9008                 for (j = (i - 1); j >= 0; j--) {
9009                     bus_release_resource(sc->dev,
9010                                          SYS_RES_IRQ,
9011                                          sc->intr[j].rid,
9012                                          sc->intr[j].resource);
9013                 }
9014 
9015                 sc->intr_count = 0;
9016                 sc->num_queues = 0;
9017                 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
9018                 pci_release_msi(sc->dev);
9019                 break;
9020             }
9021 
9022             BLOGD(sc, DBG_LOAD, "Mapped MSI-X[%d] (rid=%d)\n", i, (rid + i));
9023         }
9024     } while (0);
9025 
9026     do { /* try allocating MSI vector resources (at least 2) */
9027         if (sc->interrupt_mode != INTR_MODE_MSI) {
9028             break;
9029         }
9030 
9031         if (((sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) == 0) ||
9032             (msi_count < 1)) {
9033             sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9034             break;
9035         }
9036 
9037         /* ask for a single MSI vector */
9038         num_requested = 1;
9039 
9040         BLOGD(sc, DBG_LOAD, "Requesting %d MSI vectors\n", num_requested);
9041 
9042         num_allocated = num_requested;
9043         if ((rc = pci_alloc_msi(sc->dev, &num_allocated)) != 0) {
9044             BLOGE(sc, "MSI alloc failed (%d)!\n", rc);
9045             sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9046             break;
9047         }
9048 
9049         if (num_allocated != 1) { /* possible? */
9050             BLOGE(sc, "MSI allocation is not 1!\n");
9051             sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9052             pci_release_msi(sc->dev);
9053             break;
9054         }
9055 
9056         BLOGI(sc, "MSI vectors Requested %d and Allocated %d\n",
9057               num_requested, num_allocated);
9058 
9059         /* best effort so use the number of vectors allocated to us */
9060         sc->intr_count = num_allocated;
9061         sc->num_queues = num_allocated;
9062 
9063         rid = 1; /* initial resource identifier */
9064 
9065         sc->intr[0].rid = rid;
9066 
9067         if ((sc->intr[0].resource =
9068              bus_alloc_resource_any(sc->dev,
9069                                     SYS_RES_IRQ,
9070                                     &sc->intr[0].rid,
9071                                     RF_ACTIVE)) == NULL) {
9072             BLOGE(sc, "Failed to map MSI[0] (rid=%d)!\n", rid);
9073             sc->intr_count = 0;
9074             sc->num_queues = 0;
9075             sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9076             pci_release_msi(sc->dev);
9077             break;
9078         }
9079 
9080         BLOGD(sc, DBG_LOAD, "Mapped MSI[0] (rid=%d)\n", rid);
9081     } while (0);
9082 
9083     do { /* try allocating INTx vector resources */
9084         if (sc->interrupt_mode != INTR_MODE_INTX) {
9085             break;
9086         }
9087 
9088         BLOGD(sc, DBG_LOAD, "Requesting legacy INTx interrupt\n");
9089 
9090         /* only one vector for INTx */
9091         sc->intr_count = 1;
9092         sc->num_queues = 1;
9093 
9094         rid = 0; /* initial resource identifier */
9095 
9096         sc->intr[0].rid = rid;
9097 
9098         if ((sc->intr[0].resource =
9099              bus_alloc_resource_any(sc->dev,
9100                                     SYS_RES_IRQ,
9101                                     &sc->intr[0].rid,
9102                                     (RF_ACTIVE | RF_SHAREABLE))) == NULL) {
9103             BLOGE(sc, "Failed to map INTx (rid=%d)!\n", rid);
9104             sc->intr_count = 0;
9105             sc->num_queues = 0;
9106             sc->interrupt_mode = -1; /* Failed! */
9107             break;
9108         }
9109 
9110         BLOGD(sc, DBG_LOAD, "Mapped INTx (rid=%d)\n", rid);
9111     } while (0);
9112 
9113     if (sc->interrupt_mode == -1) {
9114         BLOGE(sc, "Interrupt Allocation: FAILED!!!\n");
9115         rc = 1;
9116     } else {
9117         BLOGD(sc, DBG_LOAD,
9118               "Interrupt Allocation: interrupt_mode=%d, num_queues=%d\n",
9119               sc->interrupt_mode, sc->num_queues);
9120         rc = 0;
9121     }
9122 
9123     return (rc);
9124 }
9125 
9126 static void
9127 bxe_interrupt_detach(struct bxe_softc *sc)
9128 {
9129     struct bxe_fastpath *fp;
9130     int i;
9131 
9132     /* release interrupt resources */
9133     for (i = 0; i < sc->intr_count; i++) {
9134         if (sc->intr[i].resource && sc->intr[i].tag) {
9135             BLOGD(sc, DBG_LOAD, "Disabling interrupt vector %d\n", i);
9136             bus_teardown_intr(sc->dev, sc->intr[i].resource, sc->intr[i].tag);
9137         }
9138     }
9139 
9140     for (i = 0; i < sc->num_queues; i++) {
9141         fp = &sc->fp[i];
9142         if (fp->tq) {
9143             taskqueue_drain(fp->tq, &fp->tq_task);
9144             taskqueue_drain(fp->tq, &fp->tx_task);
9145             while (taskqueue_cancel_timeout(fp->tq, &fp->tx_timeout_task,
9146                 NULL))
9147                 taskqueue_drain_timeout(fp->tq, &fp->tx_timeout_task);
9148         }
9149 
9150         for (i = 0; i < sc->num_queues; i++) {
9151             fp = &sc->fp[i];
9152             if (fp->tq != NULL) {
9153                 taskqueue_free(fp->tq);
9154                 fp->tq = NULL;
9155             }
9156         }
9157     }
9158 
9159     if (sc->sp_tq) {
9160         taskqueue_drain(sc->sp_tq, &sc->sp_tq_task);
9161         taskqueue_free(sc->sp_tq);
9162         sc->sp_tq = NULL;
9163     }
9164 }
9165 
9166 /*
9167  * Enables interrupts and attach to the ISR.
9168  *
9169  * When using multiple MSI/MSI-X vectors the first vector
9170  * is used for slowpath operations while all remaining
9171  * vectors are used for fastpath operations.  If only a
9172  * single MSI/MSI-X vector is used (SINGLE_ISR) then the
9173  * ISR must look for both slowpath and fastpath completions.
9174  */
9175 static int
9176 bxe_interrupt_attach(struct bxe_softc *sc)
9177 {
9178     struct bxe_fastpath *fp;
9179     int rc = 0;
9180     int i;
9181 
9182     snprintf(sc->sp_tq_name, sizeof(sc->sp_tq_name),
9183              "bxe%d_sp_tq", sc->unit);
9184     TASK_INIT(&sc->sp_tq_task, 0, bxe_handle_sp_tq, sc);
9185     sc->sp_tq = taskqueue_create(sc->sp_tq_name, M_NOWAIT,
9186                                  taskqueue_thread_enqueue,
9187                                  &sc->sp_tq);
9188     taskqueue_start_threads(&sc->sp_tq, 1, PWAIT, /* lower priority */
9189                             "%s", sc->sp_tq_name);
9190 
9191 
9192     for (i = 0; i < sc->num_queues; i++) {
9193         fp = &sc->fp[i];
9194         snprintf(fp->tq_name, sizeof(fp->tq_name),
9195                  "bxe%d_fp%d_tq", sc->unit, i);
9196         NET_TASK_INIT(&fp->tq_task, 0, bxe_handle_fp_tq, fp);
9197         TASK_INIT(&fp->tx_task, 0, bxe_tx_mq_start_deferred, fp);
9198         fp->tq = taskqueue_create(fp->tq_name, M_NOWAIT,
9199                                   taskqueue_thread_enqueue,
9200                                   &fp->tq);
9201         TIMEOUT_TASK_INIT(fp->tq, &fp->tx_timeout_task, 0,
9202                           bxe_tx_mq_start_deferred, fp);
9203         taskqueue_start_threads(&fp->tq, 1, PI_NET, /* higher priority */
9204                                 "%s", fp->tq_name);
9205     }
9206 
9207     /* setup interrupt handlers */
9208     if (sc->interrupt_mode == INTR_MODE_MSIX) {
9209         BLOGD(sc, DBG_LOAD, "Enabling slowpath MSI-X[0] vector\n");
9210 
9211         /*
9212          * Setup the interrupt handler. Note that we pass the driver instance
9213          * to the interrupt handler for the slowpath.
9214          */
9215         if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9216                                  (INTR_TYPE_NET | INTR_MPSAFE),
9217                                  NULL, bxe_intr_sp, sc,
9218                                  &sc->intr[0].tag)) != 0) {
9219             BLOGE(sc, "Failed to allocate MSI-X[0] vector (%d)\n", rc);
9220             goto bxe_interrupt_attach_exit;
9221         }
9222 
9223         bus_describe_intr(sc->dev, sc->intr[0].resource,
9224                           sc->intr[0].tag, "sp");
9225 
9226         /* bus_bind_intr(sc->dev, sc->intr[0].resource, 0); */
9227 
9228         /* initialize the fastpath vectors (note the first was used for sp) */
9229         for (i = 0; i < sc->num_queues; i++) {
9230             fp = &sc->fp[i];
9231             BLOGD(sc, DBG_LOAD, "Enabling MSI-X[%d] vector\n", (i + 1));
9232 
9233             /*
9234              * Setup the interrupt handler. Note that we pass the
9235              * fastpath context to the interrupt handler in this
9236              * case.
9237              */
9238             if ((rc = bus_setup_intr(sc->dev, sc->intr[i + 1].resource,
9239                                      (INTR_TYPE_NET | INTR_MPSAFE),
9240                                      NULL, bxe_intr_fp, fp,
9241                                      &sc->intr[i + 1].tag)) != 0) {
9242                 BLOGE(sc, "Failed to allocate MSI-X[%d] vector (%d)\n",
9243                       (i + 1), rc);
9244                 goto bxe_interrupt_attach_exit;
9245             }
9246 
9247             bus_describe_intr(sc->dev, sc->intr[i + 1].resource,
9248                               sc->intr[i + 1].tag, "fp%02d", i);
9249 
9250             /* bind the fastpath instance to a cpu */
9251             if (sc->num_queues > 1) {
9252                 bus_bind_intr(sc->dev, sc->intr[i + 1].resource, i);
9253             }
9254 
9255             fp->state = BXE_FP_STATE_IRQ;
9256         }
9257     } else if (sc->interrupt_mode == INTR_MODE_MSI) {
9258         BLOGD(sc, DBG_LOAD, "Enabling MSI[0] vector\n");
9259 
9260         /*
9261          * Setup the interrupt handler. Note that we pass the
9262          * driver instance to the interrupt handler which
9263          * will handle both the slowpath and fastpath.
9264          */
9265         if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9266                                  (INTR_TYPE_NET | INTR_MPSAFE),
9267                                  NULL, bxe_intr_legacy, sc,
9268                                  &sc->intr[0].tag)) != 0) {
9269             BLOGE(sc, "Failed to allocate MSI[0] vector (%d)\n", rc);
9270             goto bxe_interrupt_attach_exit;
9271         }
9272 
9273     } else { /* (sc->interrupt_mode == INTR_MODE_INTX) */
9274         BLOGD(sc, DBG_LOAD, "Enabling INTx interrupts\n");
9275 
9276         /*
9277          * Setup the interrupt handler. Note that we pass the
9278          * driver instance to the interrupt handler which
9279          * will handle both the slowpath and fastpath.
9280          */
9281         if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9282                                  (INTR_TYPE_NET | INTR_MPSAFE),
9283                                  NULL, bxe_intr_legacy, sc,
9284                                  &sc->intr[0].tag)) != 0) {
9285             BLOGE(sc, "Failed to allocate INTx interrupt (%d)\n", rc);
9286             goto bxe_interrupt_attach_exit;
9287         }
9288     }
9289 
9290 bxe_interrupt_attach_exit:
9291 
9292     return (rc);
9293 }
9294 
9295 static int  bxe_init_hw_common_chip(struct bxe_softc *sc);
9296 static int  bxe_init_hw_common(struct bxe_softc *sc);
9297 static int  bxe_init_hw_port(struct bxe_softc *sc);
9298 static int  bxe_init_hw_func(struct bxe_softc *sc);
9299 static void bxe_reset_common(struct bxe_softc *sc);
9300 static void bxe_reset_port(struct bxe_softc *sc);
9301 static void bxe_reset_func(struct bxe_softc *sc);
9302 static int  bxe_gunzip_init(struct bxe_softc *sc);
9303 static void bxe_gunzip_end(struct bxe_softc *sc);
9304 static int  bxe_init_firmware(struct bxe_softc *sc);
9305 static void bxe_release_firmware(struct bxe_softc *sc);
9306 
9307 static struct
9308 ecore_func_sp_drv_ops bxe_func_sp_drv = {
9309     .init_hw_cmn_chip = bxe_init_hw_common_chip,
9310     .init_hw_cmn      = bxe_init_hw_common,
9311     .init_hw_port     = bxe_init_hw_port,
9312     .init_hw_func     = bxe_init_hw_func,
9313 
9314     .reset_hw_cmn     = bxe_reset_common,
9315     .reset_hw_port    = bxe_reset_port,
9316     .reset_hw_func    = bxe_reset_func,
9317 
9318     .gunzip_init      = bxe_gunzip_init,
9319     .gunzip_end       = bxe_gunzip_end,
9320 
9321     .init_fw          = bxe_init_firmware,
9322     .release_fw       = bxe_release_firmware,
9323 };
9324 
9325 static void
9326 bxe_init_func_obj(struct bxe_softc *sc)
9327 {
9328     sc->dmae_ready = 0;
9329 
9330     ecore_init_func_obj(sc,
9331                         &sc->func_obj,
9332                         BXE_SP(sc, func_rdata),
9333                         BXE_SP_MAPPING(sc, func_rdata),
9334                         BXE_SP(sc, func_afex_rdata),
9335                         BXE_SP_MAPPING(sc, func_afex_rdata),
9336                         &bxe_func_sp_drv);
9337 }
9338 
9339 static int
9340 bxe_init_hw(struct bxe_softc *sc,
9341             uint32_t         load_code)
9342 {
9343     struct ecore_func_state_params func_params = { NULL };
9344     int rc;
9345 
9346     /* prepare the parameters for function state transitions */
9347     bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT);
9348 
9349     func_params.f_obj = &sc->func_obj;
9350     func_params.cmd = ECORE_F_CMD_HW_INIT;
9351 
9352     func_params.params.hw_init.load_phase = load_code;
9353 
9354     /*
9355      * Via a plethora of function pointers, we will eventually reach
9356      * bxe_init_hw_common(), bxe_init_hw_port(), or bxe_init_hw_func().
9357      */
9358     rc = ecore_func_state_change(sc, &func_params);
9359 
9360     return (rc);
9361 }
9362 
9363 static void
9364 bxe_fill(struct bxe_softc *sc,
9365          uint32_t         addr,
9366          int              fill,
9367          uint32_t         len)
9368 {
9369     uint32_t i;
9370 
9371     if (!(len % 4) && !(addr % 4)) {
9372         for (i = 0; i < len; i += 4) {
9373             REG_WR(sc, (addr + i), fill);
9374         }
9375     } else {
9376         for (i = 0; i < len; i++) {
9377             REG_WR8(sc, (addr + i), fill);
9378         }
9379     }
9380 }
9381 
9382 /* writes FP SP data to FW - data_size in dwords */
9383 static void
9384 bxe_wr_fp_sb_data(struct bxe_softc *sc,
9385                   int              fw_sb_id,
9386                   uint32_t         *sb_data_p,
9387                   uint32_t         data_size)
9388 {
9389     int index;
9390 
9391     for (index = 0; index < data_size; index++) {
9392         REG_WR(sc,
9393                (BAR_CSTRORM_INTMEM +
9394                 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
9395                 (sizeof(uint32_t) * index)),
9396                *(sb_data_p + index));
9397     }
9398 }
9399 
9400 static void
9401 bxe_zero_fp_sb(struct bxe_softc *sc,
9402                int              fw_sb_id)
9403 {
9404     struct hc_status_block_data_e2 sb_data_e2;
9405     struct hc_status_block_data_e1x sb_data_e1x;
9406     uint32_t *sb_data_p;
9407     uint32_t data_size = 0;
9408 
9409     if (!CHIP_IS_E1x(sc)) {
9410         memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
9411         sb_data_e2.common.state = SB_DISABLED;
9412         sb_data_e2.common.p_func.vf_valid = FALSE;
9413         sb_data_p = (uint32_t *)&sb_data_e2;
9414         data_size = (sizeof(struct hc_status_block_data_e2) /
9415                      sizeof(uint32_t));
9416     } else {
9417         memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x));
9418         sb_data_e1x.common.state = SB_DISABLED;
9419         sb_data_e1x.common.p_func.vf_valid = FALSE;
9420         sb_data_p = (uint32_t *)&sb_data_e1x;
9421         data_size = (sizeof(struct hc_status_block_data_e1x) /
9422                      sizeof(uint32_t));
9423     }
9424 
9425     bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size);
9426 
9427     bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id)),
9428              0, CSTORM_STATUS_BLOCK_SIZE);
9429     bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id)),
9430              0, CSTORM_SYNC_BLOCK_SIZE);
9431 }
9432 
9433 static void
9434 bxe_wr_sp_sb_data(struct bxe_softc               *sc,
9435                   struct hc_sp_status_block_data *sp_sb_data)
9436 {
9437     int i;
9438 
9439     for (i = 0;
9440          i < (sizeof(struct hc_sp_status_block_data) / sizeof(uint32_t));
9441          i++) {
9442         REG_WR(sc,
9443                (BAR_CSTRORM_INTMEM +
9444                 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(SC_FUNC(sc)) +
9445                 (i * sizeof(uint32_t))),
9446                *((uint32_t *)sp_sb_data + i));
9447     }
9448 }
9449 
9450 static void
9451 bxe_zero_sp_sb(struct bxe_softc *sc)
9452 {
9453     struct hc_sp_status_block_data sp_sb_data;
9454 
9455     memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
9456 
9457     sp_sb_data.state           = SB_DISABLED;
9458     sp_sb_data.p_func.vf_valid = FALSE;
9459 
9460     bxe_wr_sp_sb_data(sc, &sp_sb_data);
9461 
9462     bxe_fill(sc,
9463              (BAR_CSTRORM_INTMEM +
9464               CSTORM_SP_STATUS_BLOCK_OFFSET(SC_FUNC(sc))),
9465               0, CSTORM_SP_STATUS_BLOCK_SIZE);
9466     bxe_fill(sc,
9467              (BAR_CSTRORM_INTMEM +
9468               CSTORM_SP_SYNC_BLOCK_OFFSET(SC_FUNC(sc))),
9469               0, CSTORM_SP_SYNC_BLOCK_SIZE);
9470 }
9471 
9472 static void
9473 bxe_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
9474                              int                       igu_sb_id,
9475                              int                       igu_seg_id)
9476 {
9477     hc_sm->igu_sb_id      = igu_sb_id;
9478     hc_sm->igu_seg_id     = igu_seg_id;
9479     hc_sm->timer_value    = 0xFF;
9480     hc_sm->time_to_expire = 0xFFFFFFFF;
9481 }
9482 
9483 static void
9484 bxe_map_sb_state_machines(struct hc_index_data *index_data)
9485 {
9486     /* zero out state machine indices */
9487 
9488     /* rx indices */
9489     index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
9490 
9491     /* tx indices */
9492     index_data[HC_INDEX_OOO_TX_CQ_CONS].flags      &= ~HC_INDEX_DATA_SM_ID;
9493     index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
9494     index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
9495     index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
9496 
9497     /* map indices */
9498 
9499     /* rx indices */
9500     index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
9501         (SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9502 
9503     /* tx indices */
9504     index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
9505         (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9506     index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
9507         (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9508     index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
9509         (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9510     index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
9511         (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9512 }
9513 
9514 static void
9515 bxe_init_sb(struct bxe_softc *sc,
9516             bus_addr_t       busaddr,
9517             int              vfid,
9518             uint8_t          vf_valid,
9519             int              fw_sb_id,
9520             int              igu_sb_id)
9521 {
9522     struct hc_status_block_data_e2  sb_data_e2;
9523     struct hc_status_block_data_e1x sb_data_e1x;
9524     struct hc_status_block_sm       *hc_sm_p;
9525     uint32_t *sb_data_p;
9526     int igu_seg_id;
9527     int data_size;
9528 
9529     if (CHIP_INT_MODE_IS_BC(sc)) {
9530         igu_seg_id = HC_SEG_ACCESS_NORM;
9531     } else {
9532         igu_seg_id = IGU_SEG_ACCESS_NORM;
9533     }
9534 
9535     bxe_zero_fp_sb(sc, fw_sb_id);
9536 
9537     if (!CHIP_IS_E1x(sc)) {
9538         memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
9539         sb_data_e2.common.state = SB_ENABLED;
9540         sb_data_e2.common.p_func.pf_id = SC_FUNC(sc);
9541         sb_data_e2.common.p_func.vf_id = vfid;
9542         sb_data_e2.common.p_func.vf_valid = vf_valid;
9543         sb_data_e2.common.p_func.vnic_id = SC_VN(sc);
9544         sb_data_e2.common.same_igu_sb_1b = TRUE;
9545         sb_data_e2.common.host_sb_addr.hi = U64_HI(busaddr);
9546         sb_data_e2.common.host_sb_addr.lo = U64_LO(busaddr);
9547         hc_sm_p = sb_data_e2.common.state_machine;
9548         sb_data_p = (uint32_t *)&sb_data_e2;
9549         data_size = (sizeof(struct hc_status_block_data_e2) /
9550                      sizeof(uint32_t));
9551         bxe_map_sb_state_machines(sb_data_e2.index_data);
9552     } else {
9553         memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x));
9554         sb_data_e1x.common.state = SB_ENABLED;
9555         sb_data_e1x.common.p_func.pf_id = SC_FUNC(sc);
9556         sb_data_e1x.common.p_func.vf_id = 0xff;
9557         sb_data_e1x.common.p_func.vf_valid = FALSE;
9558         sb_data_e1x.common.p_func.vnic_id = SC_VN(sc);
9559         sb_data_e1x.common.same_igu_sb_1b = TRUE;
9560         sb_data_e1x.common.host_sb_addr.hi = U64_HI(busaddr);
9561         sb_data_e1x.common.host_sb_addr.lo = U64_LO(busaddr);
9562         hc_sm_p = sb_data_e1x.common.state_machine;
9563         sb_data_p = (uint32_t *)&sb_data_e1x;
9564         data_size = (sizeof(struct hc_status_block_data_e1x) /
9565                      sizeof(uint32_t));
9566         bxe_map_sb_state_machines(sb_data_e1x.index_data);
9567     }
9568 
9569     bxe_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], igu_sb_id, igu_seg_id);
9570     bxe_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], igu_sb_id, igu_seg_id);
9571 
9572     BLOGD(sc, DBG_LOAD, "Init FW SB %d\n", fw_sb_id);
9573 
9574     /* write indices to HW - PCI guarantees endianity of regpairs */
9575     bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size);
9576 }
9577 
9578 static inline uint8_t
9579 bxe_fp_qzone_id(struct bxe_fastpath *fp)
9580 {
9581     if (CHIP_IS_E1x(fp->sc)) {
9582         return (fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H);
9583     } else {
9584         return (fp->cl_id);
9585     }
9586 }
9587 
9588 static inline uint32_t
9589 bxe_rx_ustorm_prods_offset(struct bxe_softc    *sc,
9590                            struct bxe_fastpath *fp)
9591 {
9592     uint32_t offset = BAR_USTRORM_INTMEM;
9593 
9594     if (!CHIP_IS_E1x(sc)) {
9595         offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
9596     } else {
9597         offset += USTORM_RX_PRODS_E1X_OFFSET(SC_PORT(sc), fp->cl_id);
9598     }
9599 
9600     return (offset);
9601 }
9602 
9603 static void
9604 bxe_init_eth_fp(struct bxe_softc *sc,
9605                 int              idx)
9606 {
9607     struct bxe_fastpath *fp = &sc->fp[idx];
9608     uint32_t cids[ECORE_MULTI_TX_COS] = { 0 };
9609     unsigned long q_type = 0;
9610     int cos;
9611 
9612     fp->sc    = sc;
9613     fp->index = idx;
9614 
9615     fp->igu_sb_id = (sc->igu_base_sb + idx + CNIC_SUPPORT(sc));
9616     fp->fw_sb_id = (sc->base_fw_ndsb + idx + CNIC_SUPPORT(sc));
9617 
9618     fp->cl_id = (CHIP_IS_E1x(sc)) ?
9619                     (SC_L_ID(sc) + idx) :
9620                     /* want client ID same as IGU SB ID for non-E1 */
9621                     fp->igu_sb_id;
9622     fp->cl_qzone_id = bxe_fp_qzone_id(fp);
9623 
9624     /* setup sb indices */
9625     if (!CHIP_IS_E1x(sc)) {
9626         fp->sb_index_values  = fp->status_block.e2_sb->sb.index_values;
9627         fp->sb_running_index = fp->status_block.e2_sb->sb.running_index;
9628     } else {
9629         fp->sb_index_values  = fp->status_block.e1x_sb->sb.index_values;
9630         fp->sb_running_index = fp->status_block.e1x_sb->sb.running_index;
9631     }
9632 
9633     /* init shortcut */
9634     fp->ustorm_rx_prods_offset = bxe_rx_ustorm_prods_offset(sc, fp);
9635 
9636     fp->rx_cq_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS];
9637 
9638     /*
9639      * XXX If multiple CoS is ever supported then each fastpath structure
9640      * will need to maintain tx producer/consumer/dma/etc values *per* CoS.
9641      */
9642     for (cos = 0; cos < sc->max_cos; cos++) {
9643         cids[cos] = idx;
9644     }
9645     fp->tx_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0];
9646 
9647     /* nothing more for a VF to do */
9648     if (IS_VF(sc)) {
9649         return;
9650     }
9651 
9652     bxe_init_sb(sc, fp->sb_dma.paddr, BXE_VF_ID_INVALID, FALSE,
9653                 fp->fw_sb_id, fp->igu_sb_id);
9654 
9655     bxe_update_fp_sb_idx(fp);
9656 
9657     /* Configure Queue State object */
9658     bit_set(&q_type, ECORE_Q_TYPE_HAS_RX);
9659     bit_set(&q_type, ECORE_Q_TYPE_HAS_TX);
9660 
9661     ecore_init_queue_obj(sc,
9662                          &sc->sp_objs[idx].q_obj,
9663                          fp->cl_id,
9664                          cids,
9665                          sc->max_cos,
9666                          SC_FUNC(sc),
9667                          BXE_SP(sc, q_rdata),
9668                          BXE_SP_MAPPING(sc, q_rdata),
9669                          q_type);
9670 
9671     /* configure classification DBs */
9672     ecore_init_mac_obj(sc,
9673                        &sc->sp_objs[idx].mac_obj,
9674                        fp->cl_id,
9675                        idx,
9676                        SC_FUNC(sc),
9677                        BXE_SP(sc, mac_rdata),
9678                        BXE_SP_MAPPING(sc, mac_rdata),
9679                        ECORE_FILTER_MAC_PENDING,
9680                        &sc->sp_state,
9681                        ECORE_OBJ_TYPE_RX_TX,
9682                        &sc->macs_pool);
9683 
9684     BLOGD(sc, DBG_LOAD, "fp[%d]: sb=%p cl_id=%d fw_sb=%d igu_sb=%d\n",
9685           idx, fp->status_block.e2_sb, fp->cl_id, fp->fw_sb_id, fp->igu_sb_id);
9686 }
9687 
9688 static inline void
9689 bxe_update_rx_prod(struct bxe_softc    *sc,
9690                    struct bxe_fastpath *fp,
9691                    uint16_t            rx_bd_prod,
9692                    uint16_t            rx_cq_prod,
9693                    uint16_t            rx_sge_prod)
9694 {
9695     struct ustorm_eth_rx_producers rx_prods = { 0 };
9696     uint32_t i;
9697 
9698     /* update producers */
9699     rx_prods.bd_prod  = rx_bd_prod;
9700     rx_prods.cqe_prod = rx_cq_prod;
9701     rx_prods.sge_prod = rx_sge_prod;
9702 
9703     /*
9704      * Make sure that the BD and SGE data is updated before updating the
9705      * producers since FW might read the BD/SGE right after the producer
9706      * is updated.
9707      * This is only applicable for weak-ordered memory model archs such
9708      * as IA-64. The following barrier is also mandatory since FW will
9709      * assumes BDs must have buffers.
9710      */
9711     wmb();
9712 
9713     for (i = 0; i < (sizeof(rx_prods) / 4); i++) {
9714         REG_WR(sc,
9715                (fp->ustorm_rx_prods_offset + (i * 4)),
9716                ((uint32_t *)&rx_prods)[i]);
9717     }
9718 
9719     wmb(); /* keep prod updates ordered */
9720 
9721     BLOGD(sc, DBG_RX,
9722           "RX fp[%d]: wrote prods bd_prod=%u cqe_prod=%u sge_prod=%u\n",
9723           fp->index, rx_bd_prod, rx_cq_prod, rx_sge_prod);
9724 }
9725 
9726 static void
9727 bxe_init_rx_rings(struct bxe_softc *sc)
9728 {
9729     struct bxe_fastpath *fp;
9730     int i;
9731 
9732     for (i = 0; i < sc->num_queues; i++) {
9733         fp = &sc->fp[i];
9734 
9735         fp->rx_bd_cons = 0;
9736 
9737         /*
9738          * Activate the BD ring...
9739          * Warning, this will generate an interrupt (to the TSTORM)
9740          * so this can only be done after the chip is initialized
9741          */
9742         bxe_update_rx_prod(sc, fp,
9743                            fp->rx_bd_prod,
9744                            fp->rx_cq_prod,
9745                            fp->rx_sge_prod);
9746 
9747         if (i != 0) {
9748             continue;
9749         }
9750 
9751         if (CHIP_IS_E1(sc)) {
9752             REG_WR(sc,
9753                    (BAR_USTRORM_INTMEM +
9754                     USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc))),
9755                    U64_LO(fp->rcq_dma.paddr));
9756             REG_WR(sc,
9757                    (BAR_USTRORM_INTMEM +
9758                     USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc)) + 4),
9759                    U64_HI(fp->rcq_dma.paddr));
9760         }
9761     }
9762 }
9763 
9764 static void
9765 bxe_init_tx_ring_one(struct bxe_fastpath *fp)
9766 {
9767     SET_FLAG(fp->tx_db.data.header.data, DOORBELL_HDR_T_DB_TYPE, 1);
9768     fp->tx_db.data.zero_fill1 = 0;
9769     fp->tx_db.data.prod = 0;
9770 
9771     fp->tx_pkt_prod = 0;
9772     fp->tx_pkt_cons = 0;
9773     fp->tx_bd_prod = 0;
9774     fp->tx_bd_cons = 0;
9775     fp->eth_q_stats.tx_pkts = 0;
9776 }
9777 
9778 static inline void
9779 bxe_init_tx_rings(struct bxe_softc *sc)
9780 {
9781     int i;
9782 
9783     for (i = 0; i < sc->num_queues; i++) {
9784         bxe_init_tx_ring_one(&sc->fp[i]);
9785     }
9786 }
9787 
9788 static void
9789 bxe_init_def_sb(struct bxe_softc *sc)
9790 {
9791     struct host_sp_status_block *def_sb = sc->def_sb;
9792     bus_addr_t mapping = sc->def_sb_dma.paddr;
9793     int igu_sp_sb_index;
9794     int igu_seg_id;
9795     int port = SC_PORT(sc);
9796     int func = SC_FUNC(sc);
9797     int reg_offset, reg_offset_en5;
9798     uint64_t section;
9799     int index, sindex;
9800     struct hc_sp_status_block_data sp_sb_data;
9801 
9802     memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
9803 
9804     if (CHIP_INT_MODE_IS_BC(sc)) {
9805         igu_sp_sb_index = DEF_SB_IGU_ID;
9806         igu_seg_id = HC_SEG_ACCESS_DEF;
9807     } else {
9808         igu_sp_sb_index = sc->igu_dsb_id;
9809         igu_seg_id = IGU_SEG_ACCESS_DEF;
9810     }
9811 
9812     /* attentions */
9813     section = ((uint64_t)mapping +
9814                offsetof(struct host_sp_status_block, atten_status_block));
9815     def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
9816     sc->attn_state = 0;
9817 
9818     reg_offset = (port) ?
9819                      MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
9820                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
9821     reg_offset_en5 = (port) ?
9822                          MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
9823                          MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0;
9824 
9825     for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
9826         /* take care of sig[0]..sig[4] */
9827         for (sindex = 0; sindex < 4; sindex++) {
9828             sc->attn_group[index].sig[sindex] =
9829                 REG_RD(sc, (reg_offset + (sindex * 0x4) + (0x10 * index)));
9830         }
9831 
9832         if (!CHIP_IS_E1x(sc)) {
9833             /*
9834              * enable5 is separate from the rest of the registers,
9835              * and the address skip is 4 and not 16 between the
9836              * different groups
9837              */
9838             sc->attn_group[index].sig[4] =
9839                 REG_RD(sc, (reg_offset_en5 + (0x4 * index)));
9840         } else {
9841             sc->attn_group[index].sig[4] = 0;
9842         }
9843     }
9844 
9845     if (sc->devinfo.int_block == INT_BLOCK_HC) {
9846         reg_offset = (port) ?
9847                          HC_REG_ATTN_MSG1_ADDR_L :
9848                          HC_REG_ATTN_MSG0_ADDR_L;
9849         REG_WR(sc, reg_offset, U64_LO(section));
9850         REG_WR(sc, (reg_offset + 4), U64_HI(section));
9851     } else if (!CHIP_IS_E1x(sc)) {
9852         REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
9853         REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
9854     }
9855 
9856     section = ((uint64_t)mapping +
9857                offsetof(struct host_sp_status_block, sp_sb));
9858 
9859     bxe_zero_sp_sb(sc);
9860 
9861     /* PCI guarantees endianity of regpair */
9862     sp_sb_data.state           = SB_ENABLED;
9863     sp_sb_data.host_sb_addr.lo = U64_LO(section);
9864     sp_sb_data.host_sb_addr.hi = U64_HI(section);
9865     sp_sb_data.igu_sb_id       = igu_sp_sb_index;
9866     sp_sb_data.igu_seg_id      = igu_seg_id;
9867     sp_sb_data.p_func.pf_id    = func;
9868     sp_sb_data.p_func.vnic_id  = SC_VN(sc);
9869     sp_sb_data.p_func.vf_id    = 0xff;
9870 
9871     bxe_wr_sp_sb_data(sc, &sp_sb_data);
9872 
9873     bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
9874 }
9875 
9876 static void
9877 bxe_init_sp_ring(struct bxe_softc *sc)
9878 {
9879     atomic_store_rel_long(&sc->cq_spq_left, MAX_SPQ_PENDING);
9880     sc->spq_prod_idx = 0;
9881     sc->dsb_sp_prod = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_ETH_DEF_CONS];
9882     sc->spq_prod_bd = sc->spq;
9883     sc->spq_last_bd = (sc->spq_prod_bd + MAX_SP_DESC_CNT);
9884 }
9885 
9886 static void
9887 bxe_init_eq_ring(struct bxe_softc *sc)
9888 {
9889     union event_ring_elem *elem;
9890     int i;
9891 
9892     for (i = 1; i <= NUM_EQ_PAGES; i++) {
9893         elem = &sc->eq[EQ_DESC_CNT_PAGE * i - 1];
9894 
9895         elem->next_page.addr.hi = htole32(U64_HI(sc->eq_dma.paddr +
9896                                                  BCM_PAGE_SIZE *
9897                                                  (i % NUM_EQ_PAGES)));
9898         elem->next_page.addr.lo = htole32(U64_LO(sc->eq_dma.paddr +
9899                                                  BCM_PAGE_SIZE *
9900                                                  (i % NUM_EQ_PAGES)));
9901     }
9902 
9903     sc->eq_cons    = 0;
9904     sc->eq_prod    = NUM_EQ_DESC;
9905     sc->eq_cons_sb = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_EQ_CONS];
9906 
9907     atomic_store_rel_long(&sc->eq_spq_left,
9908                           (min((MAX_SP_DESC_CNT - MAX_SPQ_PENDING),
9909                                NUM_EQ_DESC) - 1));
9910 }
9911 
9912 static void
9913 bxe_init_internal_common(struct bxe_softc *sc)
9914 {
9915     int i;
9916 
9917     /*
9918      * Zero this manually as its initialization is currently missing
9919      * in the initTool.
9920      */
9921     for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) {
9922         REG_WR(sc,
9923                (BAR_USTRORM_INTMEM + USTORM_AGG_DATA_OFFSET + (i * 4)),
9924                0);
9925     }
9926 
9927     if (!CHIP_IS_E1x(sc)) {
9928         REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET),
9929                 CHIP_INT_MODE_IS_BC(sc) ? HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
9930     }
9931 }
9932 
9933 static void
9934 bxe_init_internal(struct bxe_softc *sc,
9935                   uint32_t         load_code)
9936 {
9937     switch (load_code) {
9938     case FW_MSG_CODE_DRV_LOAD_COMMON:
9939     case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
9940         bxe_init_internal_common(sc);
9941         /* no break */
9942 
9943     case FW_MSG_CODE_DRV_LOAD_PORT:
9944         /* nothing to do */
9945         /* no break */
9946 
9947     case FW_MSG_CODE_DRV_LOAD_FUNCTION:
9948         /* internal memory per function is initialized inside bxe_pf_init */
9949         break;
9950 
9951     default:
9952         BLOGE(sc, "Unknown load_code (0x%x) from MCP\n", load_code);
9953         break;
9954     }
9955 }
9956 
9957 static void
9958 storm_memset_func_cfg(struct bxe_softc                         *sc,
9959                       struct tstorm_eth_function_common_config *tcfg,
9960                       uint16_t                                  abs_fid)
9961 {
9962     uint32_t addr;
9963     size_t size;
9964 
9965     addr = (BAR_TSTRORM_INTMEM +
9966             TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid));
9967     size = sizeof(struct tstorm_eth_function_common_config);
9968     ecore_storm_memset_struct(sc, addr, size, (uint32_t *)tcfg);
9969 }
9970 
9971 static void
9972 bxe_func_init(struct bxe_softc            *sc,
9973               struct bxe_func_init_params *p)
9974 {
9975     struct tstorm_eth_function_common_config tcfg = { 0 };
9976 
9977     if (CHIP_IS_E1x(sc)) {
9978         storm_memset_func_cfg(sc, &tcfg, p->func_id);
9979     }
9980 
9981     /* Enable the function in the FW */
9982     storm_memset_vf_to_pf(sc, p->func_id, p->pf_id);
9983     storm_memset_func_en(sc, p->func_id, 1);
9984 
9985     /* spq */
9986     if (p->func_flgs & FUNC_FLG_SPQ) {
9987         storm_memset_spq_addr(sc, p->spq_map, p->func_id);
9988         REG_WR(sc,
9989                (XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(p->func_id)),
9990                p->spq_prod);
9991     }
9992 }
9993 
9994 /*
9995  * Calculates the sum of vn_min_rates.
9996  * It's needed for further normalizing of the min_rates.
9997  * Returns:
9998  *   sum of vn_min_rates.
9999  *     or
10000  *   0 - if all the min_rates are 0.
10001  * In the later case fainess algorithm should be deactivated.
10002  * If all min rates are not zero then those that are zeroes will be set to 1.
10003  */
10004 static void
10005 bxe_calc_vn_min(struct bxe_softc       *sc,
10006                 struct cmng_init_input *input)
10007 {
10008     uint32_t vn_cfg;
10009     uint32_t vn_min_rate;
10010     int all_zero = 1;
10011     int vn;
10012 
10013     for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10014         vn_cfg = sc->devinfo.mf_info.mf_config[vn];
10015         vn_min_rate = (((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
10016                         FUNC_MF_CFG_MIN_BW_SHIFT) * 100);
10017 
10018         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
10019             /* skip hidden VNs */
10020             vn_min_rate = 0;
10021         } else if (!vn_min_rate) {
10022             /* If min rate is zero - set it to 100 */
10023             vn_min_rate = DEF_MIN_RATE;
10024         } else {
10025             all_zero = 0;
10026         }
10027 
10028         input->vnic_min_rate[vn] = vn_min_rate;
10029     }
10030 
10031     /* if ETS or all min rates are zeros - disable fairness */
10032     if (BXE_IS_ETS_ENABLED(sc)) {
10033         input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10034         BLOGD(sc, DBG_LOAD, "Fairness disabled (ETS)\n");
10035     } else if (all_zero) {
10036         input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10037         BLOGD(sc, DBG_LOAD,
10038               "Fariness disabled (all MIN values are zeroes)\n");
10039     } else {
10040         input->flags.cmng_enables |= CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10041     }
10042 }
10043 
10044 static inline uint16_t
10045 bxe_extract_max_cfg(struct bxe_softc *sc,
10046                     uint32_t         mf_cfg)
10047 {
10048     uint16_t max_cfg = ((mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
10049                         FUNC_MF_CFG_MAX_BW_SHIFT);
10050 
10051     if (!max_cfg) {
10052         BLOGD(sc, DBG_LOAD, "Max BW configured to 0 - using 100 instead\n");
10053         max_cfg = 100;
10054     }
10055 
10056     return (max_cfg);
10057 }
10058 
10059 static void
10060 bxe_calc_vn_max(struct bxe_softc       *sc,
10061                 int                    vn,
10062                 struct cmng_init_input *input)
10063 {
10064     uint16_t vn_max_rate;
10065     uint32_t vn_cfg = sc->devinfo.mf_info.mf_config[vn];
10066     uint32_t max_cfg;
10067 
10068     if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
10069         vn_max_rate = 0;
10070     } else {
10071         max_cfg = bxe_extract_max_cfg(sc, vn_cfg);
10072 
10073         if (IS_MF_SI(sc)) {
10074             /* max_cfg in percents of linkspeed */
10075             vn_max_rate = ((sc->link_vars.line_speed * max_cfg) / 100);
10076         } else { /* SD modes */
10077             /* max_cfg is absolute in 100Mb units */
10078             vn_max_rate = (max_cfg * 100);
10079         }
10080     }
10081 
10082     BLOGD(sc, DBG_LOAD, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
10083 
10084     input->vnic_max_rate[vn] = vn_max_rate;
10085 }
10086 
10087 static void
10088 bxe_cmng_fns_init(struct bxe_softc *sc,
10089                   uint8_t          read_cfg,
10090                   uint8_t          cmng_type)
10091 {
10092     struct cmng_init_input input;
10093     int vn;
10094 
10095     memset(&input, 0, sizeof(struct cmng_init_input));
10096 
10097     input.port_rate = sc->link_vars.line_speed;
10098 
10099     if (cmng_type == CMNG_FNS_MINMAX) {
10100         /* read mf conf from shmem */
10101         if (read_cfg) {
10102             bxe_read_mf_cfg(sc);
10103         }
10104 
10105         /* get VN min rate and enable fairness if not 0 */
10106         bxe_calc_vn_min(sc, &input);
10107 
10108         /* get VN max rate */
10109         if (sc->port.pmf) {
10110             for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10111                 bxe_calc_vn_max(sc, vn, &input);
10112             }
10113         }
10114 
10115         /* always enable rate shaping and fairness */
10116         input.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
10117 
10118         ecore_init_cmng(&input, &sc->cmng);
10119         return;
10120     }
10121 
10122     /* rate shaping and fairness are disabled */
10123     BLOGD(sc, DBG_LOAD, "rate shaping and fairness have been disabled\n");
10124 }
10125 
10126 static int
10127 bxe_get_cmng_fns_mode(struct bxe_softc *sc)
10128 {
10129     if (CHIP_REV_IS_SLOW(sc)) {
10130         return (CMNG_FNS_NONE);
10131     }
10132 
10133     if (IS_MF(sc)) {
10134         return (CMNG_FNS_MINMAX);
10135     }
10136 
10137     return (CMNG_FNS_NONE);
10138 }
10139 
10140 static void
10141 storm_memset_cmng(struct bxe_softc *sc,
10142                   struct cmng_init *cmng,
10143                   uint8_t          port)
10144 {
10145     int vn;
10146     int func;
10147     uint32_t addr;
10148     size_t size;
10149 
10150     addr = (BAR_XSTRORM_INTMEM +
10151             XSTORM_CMNG_PER_PORT_VARS_OFFSET(port));
10152     size = sizeof(struct cmng_struct_per_port);
10153     ecore_storm_memset_struct(sc, addr, size, (uint32_t *)&cmng->port);
10154 
10155     for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10156         func = func_by_vn(sc, vn);
10157 
10158         addr = (BAR_XSTRORM_INTMEM +
10159                 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func));
10160         size = sizeof(struct rate_shaping_vars_per_vn);
10161         ecore_storm_memset_struct(sc, addr, size,
10162                                   (uint32_t *)&cmng->vnic.vnic_max_rate[vn]);
10163 
10164         addr = (BAR_XSTRORM_INTMEM +
10165                 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func));
10166         size = sizeof(struct fairness_vars_per_vn);
10167         ecore_storm_memset_struct(sc, addr, size,
10168                                   (uint32_t *)&cmng->vnic.vnic_min_rate[vn]);
10169     }
10170 }
10171 
10172 static void
10173 bxe_pf_init(struct bxe_softc *sc)
10174 {
10175     struct bxe_func_init_params func_init = { 0 };
10176     struct event_ring_data eq_data = { { 0 } };
10177     uint16_t flags;
10178 
10179     if (!CHIP_IS_E1x(sc)) {
10180         /* reset IGU PF statistics: MSIX + ATTN */
10181         /* PF */
10182         REG_WR(sc,
10183                (IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
10184                 (BXE_IGU_STAS_MSG_VF_CNT * 4) +
10185                 ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)),
10186                0);
10187         /* ATTN */
10188         REG_WR(sc,
10189                (IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
10190                 (BXE_IGU_STAS_MSG_VF_CNT * 4) +
10191                 (BXE_IGU_STAS_MSG_PF_CNT * 4) +
10192                 ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)),
10193                0);
10194     }
10195 
10196     /* function setup flags */
10197     flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
10198 
10199     /*
10200      * This flag is relevant for E1x only.
10201      * E2 doesn't have a TPA configuration in a function level.
10202      */
10203     flags |= (if_getcapenable(sc->ifp) & IFCAP_LRO) ? FUNC_FLG_TPA : 0;
10204 
10205     func_init.func_flgs = flags;
10206     func_init.pf_id     = SC_FUNC(sc);
10207     func_init.func_id   = SC_FUNC(sc);
10208     func_init.spq_map   = sc->spq_dma.paddr;
10209     func_init.spq_prod  = sc->spq_prod_idx;
10210 
10211     bxe_func_init(sc, &func_init);
10212 
10213     memset(&sc->cmng, 0, sizeof(struct cmng_struct_per_port));
10214 
10215     /*
10216      * Congestion management values depend on the link rate.
10217      * There is no active link so initial link rate is set to 10Gbps.
10218      * When the link comes up the congestion management values are
10219      * re-calculated according to the actual link rate.
10220      */
10221     sc->link_vars.line_speed = SPEED_10000;
10222     bxe_cmng_fns_init(sc, TRUE, bxe_get_cmng_fns_mode(sc));
10223 
10224     /* Only the PMF sets the HW */
10225     if (sc->port.pmf) {
10226         storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
10227     }
10228 
10229     /* init Event Queue - PCI bus guarantees correct endainity */
10230     eq_data.base_addr.hi = U64_HI(sc->eq_dma.paddr);
10231     eq_data.base_addr.lo = U64_LO(sc->eq_dma.paddr);
10232     eq_data.producer     = sc->eq_prod;
10233     eq_data.index_id     = HC_SP_INDEX_EQ_CONS;
10234     eq_data.sb_id        = DEF_SB_ID;
10235     storm_memset_eq_data(sc, &eq_data, SC_FUNC(sc));
10236 }
10237 
10238 static void
10239 bxe_hc_int_enable(struct bxe_softc *sc)
10240 {
10241     int port = SC_PORT(sc);
10242     uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
10243     uint32_t val = REG_RD(sc, addr);
10244     uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE;
10245     uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) &&
10246                            (sc->intr_count == 1)) ? TRUE : FALSE;
10247     uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE;
10248 
10249     if (msix) {
10250         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10251                  HC_CONFIG_0_REG_INT_LINE_EN_0);
10252         val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10253                 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10254         if (single_msix) {
10255             val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
10256         }
10257     } else if (msi) {
10258         val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
10259         val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10260                 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10261                 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10262     } else {
10263         val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10264                 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10265                 HC_CONFIG_0_REG_INT_LINE_EN_0 |
10266                 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10267 
10268         if (!CHIP_IS_E1(sc)) {
10269             BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n",
10270                   val, port, addr);
10271 
10272             REG_WR(sc, addr, val);
10273 
10274             val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
10275         }
10276     }
10277 
10278     if (CHIP_IS_E1(sc)) {
10279         REG_WR(sc, (HC_REG_INT_MASK + port*4), 0x1FFFF);
10280     }
10281 
10282     BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
10283           val, port, addr, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx")));
10284 
10285     REG_WR(sc, addr, val);
10286 
10287     /* ensure that HC_CONFIG is written before leading/trailing edge config */
10288     mb();
10289 
10290     if (!CHIP_IS_E1(sc)) {
10291         /* init leading/trailing edge */
10292         if (IS_MF(sc)) {
10293             val = (0xee0f | (1 << (SC_VN(sc) + 4)));
10294             if (sc->port.pmf) {
10295                 /* enable nig and gpio3 attention */
10296                 val |= 0x1100;
10297             }
10298         } else {
10299             val = 0xffff;
10300         }
10301 
10302         REG_WR(sc, (HC_REG_TRAILING_EDGE_0 + port*8), val);
10303         REG_WR(sc, (HC_REG_LEADING_EDGE_0 + port*8), val);
10304     }
10305 
10306     /* make sure that interrupts are indeed enabled from here on */
10307     mb();
10308 }
10309 
10310 static void
10311 bxe_igu_int_enable(struct bxe_softc *sc)
10312 {
10313     uint32_t val;
10314     uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE;
10315     uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) &&
10316                            (sc->intr_count == 1)) ? TRUE : FALSE;
10317     uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE;
10318 
10319     val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
10320 
10321     if (msix) {
10322         val &= ~(IGU_PF_CONF_INT_LINE_EN |
10323                  IGU_PF_CONF_SINGLE_ISR_EN);
10324         val |= (IGU_PF_CONF_MSI_MSIX_EN |
10325                 IGU_PF_CONF_ATTN_BIT_EN);
10326         if (single_msix) {
10327             val |= IGU_PF_CONF_SINGLE_ISR_EN;
10328         }
10329     } else if (msi) {
10330         val &= ~IGU_PF_CONF_INT_LINE_EN;
10331         val |= (IGU_PF_CONF_MSI_MSIX_EN |
10332                 IGU_PF_CONF_ATTN_BIT_EN |
10333                 IGU_PF_CONF_SINGLE_ISR_EN);
10334     } else {
10335         val &= ~IGU_PF_CONF_MSI_MSIX_EN;
10336         val |= (IGU_PF_CONF_INT_LINE_EN |
10337                 IGU_PF_CONF_ATTN_BIT_EN |
10338                 IGU_PF_CONF_SINGLE_ISR_EN);
10339     }
10340 
10341     /* clean previous status - need to configure igu prior to ack*/
10342     if ((!msix) || single_msix) {
10343         REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10344         bxe_ack_int(sc);
10345     }
10346 
10347     val |= IGU_PF_CONF_FUNC_EN;
10348 
10349     BLOGD(sc, DBG_INTR, "write 0x%x to IGU mode %s\n",
10350           val, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx")));
10351 
10352     REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10353 
10354     mb();
10355 
10356     /* init leading/trailing edge */
10357     if (IS_MF(sc)) {
10358         val = (0xee0f | (1 << (SC_VN(sc) + 4)));
10359         if (sc->port.pmf) {
10360             /* enable nig and gpio3 attention */
10361             val |= 0x1100;
10362         }
10363     } else {
10364         val = 0xffff;
10365     }
10366 
10367     REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val);
10368     REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val);
10369 
10370     /* make sure that interrupts are indeed enabled from here on */
10371     mb();
10372 }
10373 
10374 static void
10375 bxe_int_enable(struct bxe_softc *sc)
10376 {
10377     if (sc->devinfo.int_block == INT_BLOCK_HC) {
10378         bxe_hc_int_enable(sc);
10379     } else {
10380         bxe_igu_int_enable(sc);
10381     }
10382 }
10383 
10384 static void
10385 bxe_hc_int_disable(struct bxe_softc *sc)
10386 {
10387     int port = SC_PORT(sc);
10388     uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
10389     uint32_t val = REG_RD(sc, addr);
10390 
10391     /*
10392      * In E1 we must use only PCI configuration space to disable MSI/MSIX
10393      * capablility. It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC
10394      * block
10395      */
10396     if (CHIP_IS_E1(sc)) {
10397         /*
10398          * Since IGU_PF_CONF_MSI_MSIX_EN still always on use mask register
10399          * to prevent from HC sending interrupts after we exit the function
10400          */
10401         REG_WR(sc, (HC_REG_INT_MASK + port*4), 0);
10402 
10403         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10404                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
10405                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10406     } else {
10407         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10408                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10409                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
10410                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10411     }
10412 
10413     BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", val, port, addr);
10414 
10415     /* flush all outstanding writes */
10416     mb();
10417 
10418     REG_WR(sc, addr, val);
10419     if (REG_RD(sc, addr) != val) {
10420         BLOGE(sc, "proper val not read from HC IGU!\n");
10421     }
10422 }
10423 
10424 static void
10425 bxe_igu_int_disable(struct bxe_softc *sc)
10426 {
10427     uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
10428 
10429     val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
10430              IGU_PF_CONF_INT_LINE_EN |
10431              IGU_PF_CONF_ATTN_BIT_EN);
10432 
10433     BLOGD(sc, DBG_INTR, "write %x to IGU\n", val);
10434 
10435     /* flush all outstanding writes */
10436     mb();
10437 
10438     REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10439     if (REG_RD(sc, IGU_REG_PF_CONFIGURATION) != val) {
10440         BLOGE(sc, "proper val not read from IGU!\n");
10441     }
10442 }
10443 
10444 static void
10445 bxe_int_disable(struct bxe_softc *sc)
10446 {
10447     if (sc->devinfo.int_block == INT_BLOCK_HC) {
10448         bxe_hc_int_disable(sc);
10449     } else {
10450         bxe_igu_int_disable(sc);
10451     }
10452 }
10453 
10454 static void
10455 bxe_nic_init(struct bxe_softc *sc,
10456              int              load_code)
10457 {
10458     int i;
10459 
10460     for (i = 0; i < sc->num_queues; i++) {
10461         bxe_init_eth_fp(sc, i);
10462     }
10463 
10464     rmb(); /* ensure status block indices were read */
10465 
10466     bxe_init_rx_rings(sc);
10467     bxe_init_tx_rings(sc);
10468 
10469     if (IS_VF(sc)) {
10470         return;
10471     }
10472 
10473     /* initialize MOD_ABS interrupts */
10474     elink_init_mod_abs_int(sc, &sc->link_vars,
10475                            sc->devinfo.chip_id,
10476                            sc->devinfo.shmem_base,
10477                            sc->devinfo.shmem2_base,
10478                            SC_PORT(sc));
10479 
10480     bxe_init_def_sb(sc);
10481     bxe_update_dsb_idx(sc);
10482     bxe_init_sp_ring(sc);
10483     bxe_init_eq_ring(sc);
10484     bxe_init_internal(sc, load_code);
10485     bxe_pf_init(sc);
10486     bxe_stats_init(sc);
10487 
10488     /* flush all before enabling interrupts */
10489     mb();
10490 
10491     bxe_int_enable(sc);
10492 
10493     /* check for SPIO5 */
10494     bxe_attn_int_deasserted0(sc,
10495                              REG_RD(sc,
10496                                     (MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
10497                                      SC_PORT(sc)*4)) &
10498                              AEU_INPUTS_ATTN_BITS_SPIO5);
10499 }
10500 
10501 static inline void
10502 bxe_init_objs(struct bxe_softc *sc)
10503 {
10504     /* mcast rules must be added to tx if tx switching is enabled */
10505     ecore_obj_type o_type =
10506         (sc->flags & BXE_TX_SWITCHING) ? ECORE_OBJ_TYPE_RX_TX :
10507                                          ECORE_OBJ_TYPE_RX;
10508 
10509     /* RX_MODE controlling object */
10510     ecore_init_rx_mode_obj(sc, &sc->rx_mode_obj);
10511 
10512     /* multicast configuration controlling object */
10513     ecore_init_mcast_obj(sc,
10514                          &sc->mcast_obj,
10515                          sc->fp[0].cl_id,
10516                          sc->fp[0].index,
10517                          SC_FUNC(sc),
10518                          SC_FUNC(sc),
10519                          BXE_SP(sc, mcast_rdata),
10520                          BXE_SP_MAPPING(sc, mcast_rdata),
10521                          ECORE_FILTER_MCAST_PENDING,
10522                          &sc->sp_state,
10523                          o_type);
10524 
10525     /* Setup CAM credit pools */
10526     ecore_init_mac_credit_pool(sc,
10527                                &sc->macs_pool,
10528                                SC_FUNC(sc),
10529                                CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) :
10530                                                  VNICS_PER_PATH(sc));
10531 
10532     ecore_init_vlan_credit_pool(sc,
10533                                 &sc->vlans_pool,
10534                                 SC_ABS_FUNC(sc) >> 1,
10535                                 CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) :
10536                                                   VNICS_PER_PATH(sc));
10537 
10538     /* RSS configuration object */
10539     ecore_init_rss_config_obj(sc,
10540                               &sc->rss_conf_obj,
10541                               sc->fp[0].cl_id,
10542                               sc->fp[0].index,
10543                               SC_FUNC(sc),
10544                               SC_FUNC(sc),
10545                               BXE_SP(sc, rss_rdata),
10546                               BXE_SP_MAPPING(sc, rss_rdata),
10547                               ECORE_FILTER_RSS_CONF_PENDING,
10548                               &sc->sp_state, ECORE_OBJ_TYPE_RX);
10549 }
10550 
10551 /*
10552  * Initialize the function. This must be called before sending CLIENT_SETUP
10553  * for the first client.
10554  */
10555 static inline int
10556 bxe_func_start(struct bxe_softc *sc)
10557 {
10558     struct ecore_func_state_params func_params = { NULL };
10559     struct ecore_func_start_params *start_params = &func_params.params.start;
10560 
10561     /* Prepare parameters for function state transitions */
10562     bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT);
10563 
10564     func_params.f_obj = &sc->func_obj;
10565     func_params.cmd = ECORE_F_CMD_START;
10566 
10567     /* Function parameters */
10568     start_params->mf_mode     = sc->devinfo.mf_info.mf_mode;
10569     start_params->sd_vlan_tag = OVLAN(sc);
10570 
10571     if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) {
10572         start_params->network_cos_mode = STATIC_COS;
10573     } else { /* CHIP_IS_E1X */
10574         start_params->network_cos_mode = FW_WRR;
10575     }
10576 
10577     //start_params->gre_tunnel_mode = 0;
10578     //start_params->gre_tunnel_rss  = 0;
10579 
10580     return (ecore_func_state_change(sc, &func_params));
10581 }
10582 
10583 static int
10584 bxe_set_power_state(struct bxe_softc *sc,
10585                     uint8_t          state)
10586 {
10587     uint16_t pmcsr;
10588 
10589     /* If there is no power capability, silently succeed */
10590     if (!(sc->devinfo.pcie_cap_flags & BXE_PM_CAPABLE_FLAG)) {
10591         BLOGW(sc, "No power capability\n");
10592         return (0);
10593     }
10594 
10595     pmcsr = pci_read_config(sc->dev,
10596                             (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10597                             2);
10598 
10599     switch (state) {
10600     case PCI_PM_D0:
10601         pci_write_config(sc->dev,
10602                          (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10603                          ((pmcsr & ~PCIM_PSTAT_DMASK) | PCIM_PSTAT_PME), 2);
10604 
10605         if (pmcsr & PCIM_PSTAT_DMASK) {
10606             /* delay required during transition out of D3hot */
10607             DELAY(20000);
10608         }
10609 
10610         break;
10611 
10612     case PCI_PM_D3hot:
10613         /* XXX if there are other clients above don't shut down the power */
10614 
10615         /* don't shut down the power for emulation and FPGA */
10616         if (CHIP_REV_IS_SLOW(sc)) {
10617             return (0);
10618         }
10619 
10620         pmcsr &= ~PCIM_PSTAT_DMASK;
10621         pmcsr |= PCIM_PSTAT_D3;
10622 
10623         if (sc->wol) {
10624             pmcsr |= PCIM_PSTAT_PMEENABLE;
10625         }
10626 
10627         pci_write_config(sc->dev,
10628                          (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10629                          pmcsr, 4);
10630 
10631         /*
10632          * No more memory access after this point until device is brought back
10633          * to D0 state.
10634          */
10635         break;
10636 
10637     default:
10638         BLOGE(sc, "Can't support PCI power state = 0x%x pmcsr 0x%x\n",
10639             state, pmcsr);
10640         return (-1);
10641     }
10642 
10643     return (0);
10644 }
10645 
10646 
10647 /* return true if succeeded to acquire the lock */
10648 static uint8_t
10649 bxe_trylock_hw_lock(struct bxe_softc *sc,
10650                     uint32_t         resource)
10651 {
10652     uint32_t lock_status;
10653     uint32_t resource_bit = (1 << resource);
10654     int func = SC_FUNC(sc);
10655     uint32_t hw_lock_control_reg;
10656 
10657     BLOGD(sc, DBG_LOAD, "Trying to take a resource lock 0x%x\n", resource);
10658 
10659     /* Validating that the resource is within range */
10660     if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
10661         BLOGD(sc, DBG_LOAD,
10662               "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
10663               resource, HW_LOCK_MAX_RESOURCE_VALUE);
10664         return (FALSE);
10665     }
10666 
10667     if (func <= 5) {
10668         hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
10669     } else {
10670         hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
10671     }
10672 
10673     /* try to acquire the lock */
10674     REG_WR(sc, hw_lock_control_reg + 4, resource_bit);
10675     lock_status = REG_RD(sc, hw_lock_control_reg);
10676     if (lock_status & resource_bit) {
10677         return (TRUE);
10678     }
10679 
10680     BLOGE(sc, "Failed to get a resource lock 0x%x func %d "
10681         "lock_status 0x%x resource_bit 0x%x\n", resource, func,
10682         lock_status, resource_bit);
10683 
10684     return (FALSE);
10685 }
10686 
10687 /*
10688  * Get the recovery leader resource id according to the engine this function
10689  * belongs to. Currently only only 2 engines is supported.
10690  */
10691 static int
10692 bxe_get_leader_lock_resource(struct bxe_softc *sc)
10693 {
10694     if (SC_PATH(sc)) {
10695         return (HW_LOCK_RESOURCE_RECOVERY_LEADER_1);
10696     } else {
10697         return (HW_LOCK_RESOURCE_RECOVERY_LEADER_0);
10698     }
10699 }
10700 
10701 /* try to acquire a leader lock for current engine */
10702 static uint8_t
10703 bxe_trylock_leader_lock(struct bxe_softc *sc)
10704 {
10705     return (bxe_trylock_hw_lock(sc, bxe_get_leader_lock_resource(sc)));
10706 }
10707 
10708 static int
10709 bxe_release_leader_lock(struct bxe_softc *sc)
10710 {
10711     return (bxe_release_hw_lock(sc, bxe_get_leader_lock_resource(sc)));
10712 }
10713 
10714 /* close gates #2, #3 and #4 */
10715 static void
10716 bxe_set_234_gates(struct bxe_softc *sc,
10717                   uint8_t          close)
10718 {
10719     uint32_t val;
10720 
10721     /* gates #2 and #4a are closed/opened for "not E1" only */
10722     if (!CHIP_IS_E1(sc)) {
10723         /* #4 */
10724         REG_WR(sc, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
10725         /* #2 */
10726         REG_WR(sc, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
10727     }
10728 
10729     /* #3 */
10730     if (CHIP_IS_E1x(sc)) {
10731         /* prevent interrupts from HC on both ports */
10732         val = REG_RD(sc, HC_REG_CONFIG_1);
10733         REG_WR(sc, HC_REG_CONFIG_1,
10734                (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
10735                (val & ~(uint32_t)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
10736 
10737         val = REG_RD(sc, HC_REG_CONFIG_0);
10738         REG_WR(sc, HC_REG_CONFIG_0,
10739                (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
10740                (val & ~(uint32_t)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
10741     } else {
10742         /* Prevent incoming interrupts in IGU */
10743         val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
10744 
10745         REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION,
10746                (!close) ?
10747                (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
10748                (val & ~(uint32_t)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
10749     }
10750 
10751     BLOGD(sc, DBG_LOAD, "%s gates #2, #3 and #4\n",
10752           close ? "closing" : "opening");
10753 
10754     wmb();
10755 }
10756 
10757 /* poll for pending writes bit, it should get cleared in no more than 1s */
10758 static int
10759 bxe_er_poll_igu_vq(struct bxe_softc *sc)
10760 {
10761     uint32_t cnt = 1000;
10762     uint32_t pend_bits = 0;
10763 
10764     do {
10765         pend_bits = REG_RD(sc, IGU_REG_PENDING_BITS_STATUS);
10766 
10767         if (pend_bits == 0) {
10768             break;
10769         }
10770 
10771         DELAY(1000);
10772     } while (--cnt > 0);
10773 
10774     if (cnt == 0) {
10775         BLOGE(sc, "Still pending IGU requests bits=0x%08x!\n", pend_bits);
10776         return (-1);
10777     }
10778 
10779     return (0);
10780 }
10781 
10782 #define SHARED_MF_CLP_MAGIC  0x80000000 /* 'magic' bit */
10783 
10784 static void
10785 bxe_clp_reset_prep(struct bxe_softc *sc,
10786                    uint32_t         *magic_val)
10787 {
10788     /* Do some magic... */
10789     uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb);
10790     *magic_val = val & SHARED_MF_CLP_MAGIC;
10791     MFCFG_WR(sc, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
10792 }
10793 
10794 /* restore the value of the 'magic' bit */
10795 static void
10796 bxe_clp_reset_done(struct bxe_softc *sc,
10797                    uint32_t         magic_val)
10798 {
10799     /* Restore the 'magic' bit value... */
10800     uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb);
10801     MFCFG_WR(sc, shared_mf_config.clp_mb,
10802               (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
10803 }
10804 
10805 /* prepare for MCP reset, takes care of CLP configurations */
10806 static void
10807 bxe_reset_mcp_prep(struct bxe_softc *sc,
10808                    uint32_t         *magic_val)
10809 {
10810     uint32_t shmem;
10811     uint32_t validity_offset;
10812 
10813     /* set `magic' bit in order to save MF config */
10814     if (!CHIP_IS_E1(sc)) {
10815         bxe_clp_reset_prep(sc, magic_val);
10816     }
10817 
10818     /* get shmem offset */
10819     shmem = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
10820     validity_offset =
10821         offsetof(struct shmem_region, validity_map[SC_PORT(sc)]);
10822 
10823     /* Clear validity map flags */
10824     if (shmem > 0) {
10825         REG_WR(sc, shmem + validity_offset, 0);
10826     }
10827 }
10828 
10829 #define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
10830 #define MCP_ONE_TIMEOUT  100    /* 100 ms */
10831 
10832 static void
10833 bxe_mcp_wait_one(struct bxe_softc *sc)
10834 {
10835     /* special handling for emulation and FPGA (10 times longer) */
10836     if (CHIP_REV_IS_SLOW(sc)) {
10837         DELAY((MCP_ONE_TIMEOUT*10) * 1000);
10838     } else {
10839         DELAY((MCP_ONE_TIMEOUT) * 1000);
10840     }
10841 }
10842 
10843 /* initialize shmem_base and waits for validity signature to appear */
10844 static int
10845 bxe_init_shmem(struct bxe_softc *sc)
10846 {
10847     int cnt = 0;
10848     uint32_t val = 0;
10849 
10850     do {
10851         sc->devinfo.shmem_base     =
10852         sc->link_params.shmem_base =
10853             REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
10854 
10855         if (sc->devinfo.shmem_base) {
10856             val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]);
10857             if (val & SHR_MEM_VALIDITY_MB)
10858                 return (0);
10859         }
10860 
10861         bxe_mcp_wait_one(sc);
10862 
10863     } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
10864 
10865     BLOGE(sc, "BAD MCP validity signature\n");
10866 
10867     return (-1);
10868 }
10869 
10870 static int
10871 bxe_reset_mcp_comp(struct bxe_softc *sc,
10872                    uint32_t         magic_val)
10873 {
10874     int rc = bxe_init_shmem(sc);
10875 
10876     /* Restore the `magic' bit value */
10877     if (!CHIP_IS_E1(sc)) {
10878         bxe_clp_reset_done(sc, magic_val);
10879     }
10880 
10881     return (rc);
10882 }
10883 
10884 static void
10885 bxe_pxp_prep(struct bxe_softc *sc)
10886 {
10887     if (!CHIP_IS_E1(sc)) {
10888         REG_WR(sc, PXP2_REG_RD_START_INIT, 0);
10889         REG_WR(sc, PXP2_REG_RQ_RBC_DONE, 0);
10890         wmb();
10891     }
10892 }
10893 
10894 /*
10895  * Reset the whole chip except for:
10896  *      - PCIE core
10897  *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by one reset bit)
10898  *      - IGU
10899  *      - MISC (including AEU)
10900  *      - GRC
10901  *      - RBCN, RBCP
10902  */
10903 static void
10904 bxe_process_kill_chip_reset(struct bxe_softc *sc,
10905                             uint8_t          global)
10906 {
10907     uint32_t not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
10908     uint32_t global_bits2, stay_reset2;
10909 
10910     /*
10911      * Bits that have to be set in reset_mask2 if we want to reset 'global'
10912      * (per chip) blocks.
10913      */
10914     global_bits2 =
10915         MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
10916         MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
10917 
10918     /*
10919      * Don't reset the following blocks.
10920      * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be
10921      *            reset, as in 4 port device they might still be owned
10922      *            by the MCP (there is only one leader per path).
10923      */
10924     not_reset_mask1 =
10925         MISC_REGISTERS_RESET_REG_1_RST_HC |
10926         MISC_REGISTERS_RESET_REG_1_RST_PXPV |
10927         MISC_REGISTERS_RESET_REG_1_RST_PXP;
10928 
10929     not_reset_mask2 =
10930         MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
10931         MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
10932         MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
10933         MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
10934         MISC_REGISTERS_RESET_REG_2_RST_RBCN |
10935         MISC_REGISTERS_RESET_REG_2_RST_GRC  |
10936         MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
10937         MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
10938         MISC_REGISTERS_RESET_REG_2_RST_ATC |
10939         MISC_REGISTERS_RESET_REG_2_PGLC |
10940         MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
10941         MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
10942         MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
10943         MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
10944         MISC_REGISTERS_RESET_REG_2_UMAC0 |
10945         MISC_REGISTERS_RESET_REG_2_UMAC1;
10946 
10947     /*
10948      * Keep the following blocks in reset:
10949      *  - all xxMACs are handled by the elink code.
10950      */
10951     stay_reset2 =
10952         MISC_REGISTERS_RESET_REG_2_XMAC |
10953         MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
10954 
10955     /* Full reset masks according to the chip */
10956     reset_mask1 = 0xffffffff;
10957 
10958     if (CHIP_IS_E1(sc))
10959         reset_mask2 = 0xffff;
10960     else if (CHIP_IS_E1H(sc))
10961         reset_mask2 = 0x1ffff;
10962     else if (CHIP_IS_E2(sc))
10963         reset_mask2 = 0xfffff;
10964     else /* CHIP_IS_E3 */
10965         reset_mask2 = 0x3ffffff;
10966 
10967     /* Don't reset global blocks unless we need to */
10968     if (!global)
10969         reset_mask2 &= ~global_bits2;
10970 
10971     /*
10972      * In case of attention in the QM, we need to reset PXP
10973      * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM
10974      * because otherwise QM reset would release 'close the gates' shortly
10975      * before resetting the PXP, then the PSWRQ would send a write
10976      * request to PGLUE. Then when PXP is reset, PGLUE would try to
10977      * read the payload data from PSWWR, but PSWWR would not
10978      * respond. The write queue in PGLUE would stuck, dmae commands
10979      * would not return. Therefore it's important to reset the second
10980      * reset register (containing the
10981      * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the
10982      * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM
10983      * bit).
10984      */
10985     REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
10986            reset_mask2 & (~not_reset_mask2));
10987 
10988     REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
10989            reset_mask1 & (~not_reset_mask1));
10990 
10991     mb();
10992     wmb();
10993 
10994     REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
10995            reset_mask2 & (~stay_reset2));
10996 
10997     mb();
10998     wmb();
10999 
11000     REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
11001     wmb();
11002 }
11003 
11004 static int
11005 bxe_process_kill(struct bxe_softc *sc,
11006                  uint8_t          global)
11007 {
11008     int cnt = 1000;
11009     uint32_t val = 0;
11010     uint32_t sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
11011     uint32_t tags_63_32 = 0;
11012 
11013     /* Empty the Tetris buffer, wait for 1s */
11014     do {
11015         sr_cnt  = REG_RD(sc, PXP2_REG_RD_SR_CNT);
11016         blk_cnt = REG_RD(sc, PXP2_REG_RD_BLK_CNT);
11017         port_is_idle_0 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_0);
11018         port_is_idle_1 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_1);
11019         pgl_exp_rom2 = REG_RD(sc, PXP2_REG_PGL_EXP_ROM2);
11020         if (CHIP_IS_E3(sc)) {
11021             tags_63_32 = REG_RD(sc, PGLUE_B_REG_TAGS_63_32);
11022         }
11023 
11024         if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
11025             ((port_is_idle_0 & 0x1) == 0x1) &&
11026             ((port_is_idle_1 & 0x1) == 0x1) &&
11027             (pgl_exp_rom2 == 0xffffffff) &&
11028             (!CHIP_IS_E3(sc) || (tags_63_32 == 0xffffffff)))
11029             break;
11030         DELAY(1000);
11031     } while (cnt-- > 0);
11032 
11033     if (cnt <= 0) {
11034         BLOGE(sc, "ERROR: Tetris buffer didn't get empty or there "
11035                   "are still outstanding read requests after 1s! "
11036                   "sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, "
11037                   "port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
11038               sr_cnt, blk_cnt, port_is_idle_0,
11039               port_is_idle_1, pgl_exp_rom2);
11040         return (-1);
11041     }
11042 
11043     mb();
11044 
11045     /* Close gates #2, #3 and #4 */
11046     bxe_set_234_gates(sc, TRUE);
11047 
11048     /* Poll for IGU VQs for 57712 and newer chips */
11049     if (!CHIP_IS_E1x(sc) && bxe_er_poll_igu_vq(sc)) {
11050         return (-1);
11051     }
11052 
11053     /* XXX indicate that "process kill" is in progress to MCP */
11054 
11055     /* clear "unprepared" bit */
11056     REG_WR(sc, MISC_REG_UNPREPARED, 0);
11057     mb();
11058 
11059     /* Make sure all is written to the chip before the reset */
11060     wmb();
11061 
11062     /*
11063      * Wait for 1ms to empty GLUE and PCI-E core queues,
11064      * PSWHST, GRC and PSWRD Tetris buffer.
11065      */
11066     DELAY(1000);
11067 
11068     /* Prepare to chip reset: */
11069     /* MCP */
11070     if (global) {
11071         bxe_reset_mcp_prep(sc, &val);
11072     }
11073 
11074     /* PXP */
11075     bxe_pxp_prep(sc);
11076     mb();
11077 
11078     /* reset the chip */
11079     bxe_process_kill_chip_reset(sc, global);
11080     mb();
11081 
11082     /* clear errors in PGB */
11083     if (!CHIP_IS_E1(sc))
11084         REG_WR(sc, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f);
11085 
11086     /* Recover after reset: */
11087     /* MCP */
11088     if (global && bxe_reset_mcp_comp(sc, val)) {
11089         return (-1);
11090     }
11091 
11092     /* XXX add resetting the NO_MCP mode DB here */
11093 
11094     /* Open the gates #2, #3 and #4 */
11095     bxe_set_234_gates(sc, FALSE);
11096 
11097     /* XXX
11098      * IGU/AEU preparation bring back the AEU/IGU to a reset state
11099      * re-enable attentions
11100      */
11101 
11102     return (0);
11103 }
11104 
11105 static int
11106 bxe_leader_reset(struct bxe_softc *sc)
11107 {
11108     int rc = 0;
11109     uint8_t global = bxe_reset_is_global(sc);
11110     uint32_t load_code;
11111 
11112     /*
11113      * If not going to reset MCP, load "fake" driver to reset HW while
11114      * driver is owner of the HW.
11115      */
11116     if (!global && !BXE_NOMCP(sc)) {
11117         load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ,
11118                                    DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
11119         if (!load_code) {
11120             BLOGE(sc, "MCP response failure, aborting\n");
11121             rc = -1;
11122             goto exit_leader_reset;
11123         }
11124 
11125         if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
11126             (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
11127             BLOGE(sc, "MCP unexpected response, aborting\n");
11128             rc = -1;
11129             goto exit_leader_reset2;
11130         }
11131 
11132         load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
11133         if (!load_code) {
11134             BLOGE(sc, "MCP response failure, aborting\n");
11135             rc = -1;
11136             goto exit_leader_reset2;
11137         }
11138     }
11139 
11140     /* try to recover after the failure */
11141     if (bxe_process_kill(sc, global)) {
11142         BLOGE(sc, "Something bad occurred on engine %d!\n", SC_PATH(sc));
11143         rc = -1;
11144         goto exit_leader_reset2;
11145     }
11146 
11147     /*
11148      * Clear the RESET_IN_PROGRESS and RESET_GLOBAL bits and update the driver
11149      * state.
11150      */
11151     bxe_set_reset_done(sc);
11152     if (global) {
11153         bxe_clear_reset_global(sc);
11154     }
11155 
11156 exit_leader_reset2:
11157 
11158     /* unload "fake driver" if it was loaded */
11159     if (!global && !BXE_NOMCP(sc)) {
11160         bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
11161         bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0);
11162     }
11163 
11164 exit_leader_reset:
11165 
11166     sc->is_leader = 0;
11167     bxe_release_leader_lock(sc);
11168 
11169     mb();
11170     return (rc);
11171 }
11172 
11173 /*
11174  * prepare INIT transition, parameters configured:
11175  *   - HC configuration
11176  *   - Queue's CDU context
11177  */
11178 static void
11179 bxe_pf_q_prep_init(struct bxe_softc               *sc,
11180                    struct bxe_fastpath            *fp,
11181                    struct ecore_queue_init_params *init_params)
11182 {
11183     uint8_t cos;
11184     int cxt_index, cxt_offset;
11185 
11186     bxe_set_bit(ECORE_Q_FLG_HC, &init_params->rx.flags);
11187     bxe_set_bit(ECORE_Q_FLG_HC, &init_params->tx.flags);
11188 
11189     bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->rx.flags);
11190     bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->tx.flags);
11191 
11192     /* HC rate */
11193     init_params->rx.hc_rate =
11194         sc->hc_rx_ticks ? (1000000 / sc->hc_rx_ticks) : 0;
11195     init_params->tx.hc_rate =
11196         sc->hc_tx_ticks ? (1000000 / sc->hc_tx_ticks) : 0;
11197 
11198     /* FW SB ID */
11199     init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = fp->fw_sb_id;
11200 
11201     /* CQ index among the SB indices */
11202     init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
11203     init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
11204 
11205     /* set maximum number of COSs supported by this queue */
11206     init_params->max_cos = sc->max_cos;
11207 
11208     BLOGD(sc, DBG_LOAD, "fp %d setting queue params max cos to %d\n",
11209           fp->index, init_params->max_cos);
11210 
11211     /* set the context pointers queue object */
11212     for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
11213         /* XXX change index/cid here if ever support multiple tx CoS */
11214         /* fp->txdata[cos]->cid */
11215         cxt_index = fp->index / ILT_PAGE_CIDS;
11216         cxt_offset = fp->index - (cxt_index * ILT_PAGE_CIDS);
11217         init_params->cxts[cos] = &sc->context[cxt_index].vcxt[cxt_offset].eth;
11218     }
11219 }
11220 
11221 /* set flags that are common for the Tx-only and not normal connections */
11222 static unsigned long
11223 bxe_get_common_flags(struct bxe_softc    *sc,
11224                      struct bxe_fastpath *fp,
11225                      uint8_t             zero_stats)
11226 {
11227     unsigned long flags = 0;
11228 
11229     /* PF driver will always initialize the Queue to an ACTIVE state */
11230     bxe_set_bit(ECORE_Q_FLG_ACTIVE, &flags);
11231 
11232     /*
11233      * tx only connections collect statistics (on the same index as the
11234      * parent connection). The statistics are zeroed when the parent
11235      * connection is initialized.
11236      */
11237 
11238     bxe_set_bit(ECORE_Q_FLG_STATS, &flags);
11239     if (zero_stats) {
11240         bxe_set_bit(ECORE_Q_FLG_ZERO_STATS, &flags);
11241     }
11242 
11243     /*
11244      * tx only connections can support tx-switching, though their
11245      * CoS-ness doesn't survive the loopback
11246      */
11247     if (sc->flags & BXE_TX_SWITCHING) {
11248         bxe_set_bit(ECORE_Q_FLG_TX_SWITCH, &flags);
11249     }
11250 
11251     bxe_set_bit(ECORE_Q_FLG_PCSUM_ON_PKT, &flags);
11252 
11253     return (flags);
11254 }
11255 
11256 static unsigned long
11257 bxe_get_q_flags(struct bxe_softc    *sc,
11258                 struct bxe_fastpath *fp,
11259                 uint8_t             leading)
11260 {
11261     unsigned long flags = 0;
11262 
11263     if (IS_MF_SD(sc)) {
11264         bxe_set_bit(ECORE_Q_FLG_OV, &flags);
11265     }
11266 
11267     if (if_getcapenable(sc->ifp) & IFCAP_LRO) {
11268         bxe_set_bit(ECORE_Q_FLG_TPA, &flags);
11269         bxe_set_bit(ECORE_Q_FLG_TPA_IPV6, &flags);
11270     }
11271 
11272     if (leading) {
11273         bxe_set_bit(ECORE_Q_FLG_LEADING_RSS, &flags);
11274         bxe_set_bit(ECORE_Q_FLG_MCAST, &flags);
11275     }
11276 
11277     bxe_set_bit(ECORE_Q_FLG_VLAN, &flags);
11278 
11279     /* merge with common flags */
11280     return (flags | bxe_get_common_flags(sc, fp, TRUE));
11281 }
11282 
11283 static void
11284 bxe_pf_q_prep_general(struct bxe_softc                  *sc,
11285                       struct bxe_fastpath               *fp,
11286                       struct ecore_general_setup_params *gen_init,
11287                       uint8_t                           cos)
11288 {
11289     gen_init->stat_id = bxe_stats_id(fp);
11290     gen_init->spcl_id = fp->cl_id;
11291     gen_init->mtu = sc->mtu;
11292     gen_init->cos = cos;
11293 }
11294 
11295 static void
11296 bxe_pf_rx_q_prep(struct bxe_softc              *sc,
11297                  struct bxe_fastpath           *fp,
11298                  struct rxq_pause_params       *pause,
11299                  struct ecore_rxq_setup_params *rxq_init)
11300 {
11301     uint8_t max_sge = 0;
11302     uint16_t sge_sz = 0;
11303     uint16_t tpa_agg_size = 0;
11304 
11305     pause->sge_th_lo = SGE_TH_LO(sc);
11306     pause->sge_th_hi = SGE_TH_HI(sc);
11307 
11308     /* validate SGE ring has enough to cross high threshold */
11309     if (sc->dropless_fc &&
11310             (pause->sge_th_hi + FW_PREFETCH_CNT) >
11311             (RX_SGE_USABLE_PER_PAGE * RX_SGE_NUM_PAGES)) {
11312         BLOGW(sc, "sge ring threshold limit\n");
11313     }
11314 
11315     /* minimum max_aggregation_size is 2*MTU (two full buffers) */
11316     tpa_agg_size = (2 * sc->mtu);
11317     if (tpa_agg_size < sc->max_aggregation_size) {
11318         tpa_agg_size = sc->max_aggregation_size;
11319     }
11320 
11321     max_sge = SGE_PAGE_ALIGN(sc->mtu) >> SGE_PAGE_SHIFT;
11322     max_sge = ((max_sge + PAGES_PER_SGE - 1) &
11323                    (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
11324     sge_sz = (uint16_t)min(SGE_PAGES, 0xffff);
11325 
11326     /* pause - not for e1 */
11327     if (!CHIP_IS_E1(sc)) {
11328         pause->bd_th_lo = BD_TH_LO(sc);
11329         pause->bd_th_hi = BD_TH_HI(sc);
11330 
11331         pause->rcq_th_lo = RCQ_TH_LO(sc);
11332         pause->rcq_th_hi = RCQ_TH_HI(sc);
11333 
11334         /* validate rings have enough entries to cross high thresholds */
11335         if (sc->dropless_fc &&
11336             pause->bd_th_hi + FW_PREFETCH_CNT >
11337             sc->rx_ring_size) {
11338             BLOGW(sc, "rx bd ring threshold limit\n");
11339         }
11340 
11341         if (sc->dropless_fc &&
11342             pause->rcq_th_hi + FW_PREFETCH_CNT >
11343             RCQ_NUM_PAGES * RCQ_USABLE_PER_PAGE) {
11344             BLOGW(sc, "rcq ring threshold limit\n");
11345         }
11346 
11347         pause->pri_map = 1;
11348     }
11349 
11350     /* rxq setup */
11351     rxq_init->dscr_map   = fp->rx_dma.paddr;
11352     rxq_init->sge_map    = fp->rx_sge_dma.paddr;
11353     rxq_init->rcq_map    = fp->rcq_dma.paddr;
11354     rxq_init->rcq_np_map = (fp->rcq_dma.paddr + BCM_PAGE_SIZE);
11355 
11356     /*
11357      * This should be a maximum number of data bytes that may be
11358      * placed on the BD (not including paddings).
11359      */
11360     rxq_init->buf_sz = (fp->rx_buf_size -
11361                         IP_HEADER_ALIGNMENT_PADDING);
11362 
11363     rxq_init->cl_qzone_id     = fp->cl_qzone_id;
11364     rxq_init->tpa_agg_sz      = tpa_agg_size;
11365     rxq_init->sge_buf_sz      = sge_sz;
11366     rxq_init->max_sges_pkt    = max_sge;
11367     rxq_init->rss_engine_id   = SC_FUNC(sc);
11368     rxq_init->mcast_engine_id = SC_FUNC(sc);
11369 
11370     /*
11371      * Maximum number or simultaneous TPA aggregation for this Queue.
11372      * For PF Clients it should be the maximum available number.
11373      * VF driver(s) may want to define it to a smaller value.
11374      */
11375     rxq_init->max_tpa_queues = MAX_AGG_QS(sc);
11376 
11377     rxq_init->cache_line_log = BXE_RX_ALIGN_SHIFT;
11378     rxq_init->fw_sb_id = fp->fw_sb_id;
11379 
11380     rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
11381 
11382     /*
11383      * configure silent vlan removal
11384      * if multi function mode is afex, then mask default vlan
11385      */
11386     if (IS_MF_AFEX(sc)) {
11387         rxq_init->silent_removal_value =
11388             sc->devinfo.mf_info.afex_def_vlan_tag;
11389         rxq_init->silent_removal_mask = EVL_VLID_MASK;
11390     }
11391 }
11392 
11393 static void
11394 bxe_pf_tx_q_prep(struct bxe_softc              *sc,
11395                  struct bxe_fastpath           *fp,
11396                  struct ecore_txq_setup_params *txq_init,
11397                  uint8_t                       cos)
11398 {
11399     /*
11400      * XXX If multiple CoS is ever supported then each fastpath structure
11401      * will need to maintain tx producer/consumer/dma/etc values *per* CoS.
11402      * fp->txdata[cos]->tx_dma.paddr;
11403      */
11404     txq_init->dscr_map     = fp->tx_dma.paddr;
11405     txq_init->sb_cq_index  = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
11406     txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
11407     txq_init->fw_sb_id     = fp->fw_sb_id;
11408 
11409     /*
11410      * set the TSS leading client id for TX classfication to the
11411      * leading RSS client id
11412      */
11413     txq_init->tss_leading_cl_id = BXE_FP(sc, 0, cl_id);
11414 }
11415 
11416 /*
11417  * This function performs 2 steps in a queue state machine:
11418  *   1) RESET->INIT
11419  *   2) INIT->SETUP
11420  */
11421 static int
11422 bxe_setup_queue(struct bxe_softc    *sc,
11423                 struct bxe_fastpath *fp,
11424                 uint8_t             leading)
11425 {
11426     struct ecore_queue_state_params q_params = { NULL };
11427     struct ecore_queue_setup_params *setup_params =
11428                         &q_params.params.setup;
11429     int rc;
11430 
11431     BLOGD(sc, DBG_LOAD, "setting up queue %d\n", fp->index);
11432 
11433     bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
11434 
11435     q_params.q_obj = &BXE_SP_OBJ(sc, fp).q_obj;
11436 
11437     /* we want to wait for completion in this context */
11438     bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
11439 
11440     /* prepare the INIT parameters */
11441     bxe_pf_q_prep_init(sc, fp, &q_params.params.init);
11442 
11443     /* Set the command */
11444     q_params.cmd = ECORE_Q_CMD_INIT;
11445 
11446     /* Change the state to INIT */
11447     rc = ecore_queue_state_change(sc, &q_params);
11448     if (rc) {
11449         BLOGE(sc, "Queue(%d) INIT failed rc = %d\n", fp->index, rc);
11450         return (rc);
11451     }
11452 
11453     BLOGD(sc, DBG_LOAD, "init complete\n");
11454 
11455     /* now move the Queue to the SETUP state */
11456     memset(setup_params, 0, sizeof(*setup_params));
11457 
11458     /* set Queue flags */
11459     setup_params->flags = bxe_get_q_flags(sc, fp, leading);
11460 
11461     /* set general SETUP parameters */
11462     bxe_pf_q_prep_general(sc, fp, &setup_params->gen_params,
11463                           FIRST_TX_COS_INDEX);
11464 
11465     bxe_pf_rx_q_prep(sc, fp,
11466                      &setup_params->pause_params,
11467                      &setup_params->rxq_params);
11468 
11469     bxe_pf_tx_q_prep(sc, fp,
11470                      &setup_params->txq_params,
11471                      FIRST_TX_COS_INDEX);
11472 
11473     /* Set the command */
11474     q_params.cmd = ECORE_Q_CMD_SETUP;
11475 
11476     /* change the state to SETUP */
11477     rc = ecore_queue_state_change(sc, &q_params);
11478     if (rc) {
11479         BLOGE(sc, "Queue(%d) SETUP failed (rc = %d)\n", fp->index, rc);
11480         return (rc);
11481     }
11482 
11483     return (rc);
11484 }
11485 
11486 static int
11487 bxe_setup_leading(struct bxe_softc *sc)
11488 {
11489     return (bxe_setup_queue(sc, &sc->fp[0], TRUE));
11490 }
11491 
11492 static int
11493 bxe_config_rss_pf(struct bxe_softc            *sc,
11494                   struct ecore_rss_config_obj *rss_obj,
11495                   uint8_t                     config_hash)
11496 {
11497     struct ecore_config_rss_params params = { NULL };
11498     int i;
11499 
11500     /*
11501      * Although RSS is meaningless when there is a single HW queue we
11502      * still need it enabled in order to have HW Rx hash generated.
11503      */
11504 
11505     params.rss_obj = rss_obj;
11506 
11507     bxe_set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
11508 
11509     bxe_set_bit(ECORE_RSS_MODE_REGULAR, &params.rss_flags);
11510 
11511     /* RSS configuration */
11512     bxe_set_bit(ECORE_RSS_IPV4, &params.rss_flags);
11513     bxe_set_bit(ECORE_RSS_IPV4_TCP, &params.rss_flags);
11514     bxe_set_bit(ECORE_RSS_IPV6, &params.rss_flags);
11515     bxe_set_bit(ECORE_RSS_IPV6_TCP, &params.rss_flags);
11516     if (rss_obj->udp_rss_v4) {
11517         bxe_set_bit(ECORE_RSS_IPV4_UDP, &params.rss_flags);
11518     }
11519     if (rss_obj->udp_rss_v6) {
11520         bxe_set_bit(ECORE_RSS_IPV6_UDP, &params.rss_flags);
11521     }
11522 
11523     /* Hash bits */
11524     params.rss_result_mask = MULTI_MASK;
11525 
11526     memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
11527 
11528     if (config_hash) {
11529         /* RSS keys */
11530         for (i = 0; i < sizeof(params.rss_key) / 4; i++) {
11531             params.rss_key[i] = arc4random();
11532         }
11533 
11534         bxe_set_bit(ECORE_RSS_SET_SRCH, &params.rss_flags);
11535     }
11536 
11537     return (ecore_config_rss(sc, &params));
11538 }
11539 
11540 static int
11541 bxe_config_rss_eth(struct bxe_softc *sc,
11542                    uint8_t          config_hash)
11543 {
11544     return (bxe_config_rss_pf(sc, &sc->rss_conf_obj, config_hash));
11545 }
11546 
11547 static int
11548 bxe_init_rss_pf(struct bxe_softc *sc)
11549 {
11550     uint8_t num_eth_queues = BXE_NUM_ETH_QUEUES(sc);
11551     int i;
11552 
11553     /*
11554      * Prepare the initial contents of the indirection table if
11555      * RSS is enabled
11556      */
11557     for (i = 0; i < sizeof(sc->rss_conf_obj.ind_table); i++) {
11558         sc->rss_conf_obj.ind_table[i] =
11559             (sc->fp->cl_id + (i % num_eth_queues));
11560     }
11561 
11562     if (sc->udp_rss) {
11563         sc->rss_conf_obj.udp_rss_v4 = sc->rss_conf_obj.udp_rss_v6 = 1;
11564     }
11565 
11566     /*
11567      * For 57710 and 57711 SEARCHER configuration (rss_keys) is
11568      * per-port, so if explicit configuration is needed, do it only
11569      * for a PMF.
11570      *
11571      * For 57712 and newer it's a per-function configuration.
11572      */
11573     return (bxe_config_rss_eth(sc, sc->port.pmf || !CHIP_IS_E1x(sc)));
11574 }
11575 
11576 static int
11577 bxe_set_mac_one(struct bxe_softc          *sc,
11578                 uint8_t                   *mac,
11579                 struct ecore_vlan_mac_obj *obj,
11580                 uint8_t                   set,
11581                 int                       mac_type,
11582                 unsigned long             *ramrod_flags)
11583 {
11584     struct ecore_vlan_mac_ramrod_params ramrod_param;
11585     int rc;
11586 
11587     memset(&ramrod_param, 0, sizeof(ramrod_param));
11588 
11589     /* fill in general parameters */
11590     ramrod_param.vlan_mac_obj = obj;
11591     ramrod_param.ramrod_flags = *ramrod_flags;
11592 
11593     /* fill a user request section if needed */
11594     if (!bxe_test_bit(RAMROD_CONT, ramrod_flags)) {
11595         memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
11596 
11597         bxe_set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
11598 
11599         /* Set the command: ADD or DEL */
11600         ramrod_param.user_req.cmd = (set) ? ECORE_VLAN_MAC_ADD :
11601                                             ECORE_VLAN_MAC_DEL;
11602     }
11603 
11604     rc = ecore_config_vlan_mac(sc, &ramrod_param);
11605 
11606     if (rc == ECORE_EXISTS) {
11607         BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n");
11608         /* do not treat adding same MAC as error */
11609         rc = 0;
11610     } else if (rc < 0) {
11611         BLOGE(sc, "%s MAC failed (%d)\n", (set ? "Set" : "Delete"), rc);
11612     }
11613 
11614     return (rc);
11615 }
11616 
11617 static int
11618 bxe_set_eth_mac(struct bxe_softc *sc,
11619                 uint8_t          set)
11620 {
11621     unsigned long ramrod_flags = 0;
11622 
11623     BLOGD(sc, DBG_LOAD, "Adding Ethernet MAC\n");
11624 
11625     bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
11626 
11627     /* Eth MAC is set on RSS leading client (fp[0]) */
11628     return (bxe_set_mac_one(sc, sc->link_params.mac_addr,
11629                             &sc->sp_objs->mac_obj,
11630                             set, ECORE_ETH_MAC, &ramrod_flags));
11631 }
11632 
11633 static int
11634 bxe_get_cur_phy_idx(struct bxe_softc *sc)
11635 {
11636     uint32_t sel_phy_idx = 0;
11637 
11638     if (sc->link_params.num_phys <= 1) {
11639         return (ELINK_INT_PHY);
11640     }
11641 
11642     if (sc->link_vars.link_up) {
11643         sel_phy_idx = ELINK_EXT_PHY1;
11644         /* In case link is SERDES, check if the ELINK_EXT_PHY2 is the one */
11645         if ((sc->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
11646             (sc->link_params.phy[ELINK_EXT_PHY2].supported &
11647              ELINK_SUPPORTED_FIBRE))
11648             sel_phy_idx = ELINK_EXT_PHY2;
11649     } else {
11650         switch (elink_phy_selection(&sc->link_params)) {
11651         case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
11652         case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
11653         case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
11654                sel_phy_idx = ELINK_EXT_PHY1;
11655                break;
11656         case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
11657         case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
11658                sel_phy_idx = ELINK_EXT_PHY2;
11659                break;
11660         }
11661     }
11662 
11663     return (sel_phy_idx);
11664 }
11665 
11666 static int
11667 bxe_get_link_cfg_idx(struct bxe_softc *sc)
11668 {
11669     uint32_t sel_phy_idx = bxe_get_cur_phy_idx(sc);
11670 
11671     /*
11672      * The selected activated PHY is always after swapping (in case PHY
11673      * swapping is enabled). So when swapping is enabled, we need to reverse
11674      * the configuration
11675      */
11676 
11677     if (sc->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
11678         if (sel_phy_idx == ELINK_EXT_PHY1)
11679             sel_phy_idx = ELINK_EXT_PHY2;
11680         else if (sel_phy_idx == ELINK_EXT_PHY2)
11681             sel_phy_idx = ELINK_EXT_PHY1;
11682     }
11683 
11684     return (ELINK_LINK_CONFIG_IDX(sel_phy_idx));
11685 }
11686 
11687 static void
11688 bxe_set_requested_fc(struct bxe_softc *sc)
11689 {
11690     /*
11691      * Initialize link parameters structure variables
11692      * It is recommended to turn off RX FC for jumbo frames
11693      * for better performance
11694      */
11695     if (CHIP_IS_E1x(sc) && (sc->mtu > 5000)) {
11696         sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_TX;
11697     } else {
11698         sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_BOTH;
11699     }
11700 }
11701 
11702 static void
11703 bxe_calc_fc_adv(struct bxe_softc *sc)
11704 {
11705     uint8_t cfg_idx = bxe_get_link_cfg_idx(sc);
11706 
11707 
11708     sc->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
11709                                            ADVERTISED_Pause);
11710 
11711     switch (sc->link_vars.ieee_fc &
11712             MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
11713 
11714     case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
11715         sc->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
11716                                           ADVERTISED_Pause);
11717         break;
11718 
11719     case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
11720         sc->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
11721         break;
11722 
11723     default:
11724         break;
11725 
11726     }
11727 }
11728 
11729 static uint16_t
11730 bxe_get_mf_speed(struct bxe_softc *sc)
11731 {
11732     uint16_t line_speed = sc->link_vars.line_speed;
11733     if (IS_MF(sc)) {
11734         uint16_t maxCfg =
11735             bxe_extract_max_cfg(sc, sc->devinfo.mf_info.mf_config[SC_VN(sc)]);
11736 
11737         /* calculate the current MAX line speed limit for the MF devices */
11738         if (IS_MF_SI(sc)) {
11739             line_speed = (line_speed * maxCfg) / 100;
11740         } else { /* SD mode */
11741             uint16_t vn_max_rate = maxCfg * 100;
11742 
11743             if (vn_max_rate < line_speed) {
11744                 line_speed = vn_max_rate;
11745             }
11746         }
11747     }
11748 
11749     return (line_speed);
11750 }
11751 
11752 static void
11753 bxe_fill_report_data(struct bxe_softc            *sc,
11754                      struct bxe_link_report_data *data)
11755 {
11756     uint16_t line_speed = bxe_get_mf_speed(sc);
11757 
11758     memset(data, 0, sizeof(*data));
11759 
11760     /* fill the report data with the effective line speed */
11761     data->line_speed = line_speed;
11762 
11763     /* Link is down */
11764     if (!sc->link_vars.link_up || (sc->flags & BXE_MF_FUNC_DIS)) {
11765         bxe_set_bit(BXE_LINK_REPORT_LINK_DOWN, &data->link_report_flags);
11766     }
11767 
11768     /* Full DUPLEX */
11769     if (sc->link_vars.duplex == DUPLEX_FULL) {
11770         bxe_set_bit(BXE_LINK_REPORT_FULL_DUPLEX, &data->link_report_flags);
11771     }
11772 
11773     /* Rx Flow Control is ON */
11774     if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_RX) {
11775         bxe_set_bit(BXE_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
11776     }
11777 
11778     /* Tx Flow Control is ON */
11779     if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) {
11780         bxe_set_bit(BXE_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
11781     }
11782 }
11783 
11784 /* report link status to OS, should be called under phy_lock */
11785 static void
11786 bxe_link_report_locked(struct bxe_softc *sc)
11787 {
11788     struct bxe_link_report_data cur_data;
11789 
11790     /* reread mf_cfg */
11791     if (IS_PF(sc) && !CHIP_IS_E1(sc)) {
11792         bxe_read_mf_cfg(sc);
11793     }
11794 
11795     /* Read the current link report info */
11796     bxe_fill_report_data(sc, &cur_data);
11797 
11798     /* Don't report link down or exactly the same link status twice */
11799     if (!memcmp(&cur_data, &sc->last_reported_link, sizeof(cur_data)) ||
11800         (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11801                       &sc->last_reported_link.link_report_flags) &&
11802          bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11803                       &cur_data.link_report_flags))) {
11804         return;
11805     }
11806 
11807 	ELINK_DEBUG_P2(sc, "Change in link status : cur_data = %x, last_reported_link = %x\n",
11808 					cur_data.link_report_flags, sc->last_reported_link.link_report_flags);
11809     sc->link_cnt++;
11810 
11811 	ELINK_DEBUG_P1(sc, "link status change count = %x\n", sc->link_cnt);
11812     /* report new link params and remember the state for the next time */
11813     memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data));
11814 
11815     if (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11816                      &cur_data.link_report_flags)) {
11817         if_link_state_change(sc->ifp, LINK_STATE_DOWN);
11818     } else {
11819         const char *duplex;
11820         const char *flow;
11821 
11822         if (bxe_test_and_clear_bit(BXE_LINK_REPORT_FULL_DUPLEX,
11823                                    &cur_data.link_report_flags)) {
11824             duplex = "full";
11825 			ELINK_DEBUG_P0(sc, "link set to full duplex\n");
11826         } else {
11827             duplex = "half";
11828 			ELINK_DEBUG_P0(sc, "link set to half duplex\n");
11829         }
11830 
11831         /*
11832          * Handle the FC at the end so that only these flags would be
11833          * possibly set. This way we may easily check if there is no FC
11834          * enabled.
11835          */
11836         if (cur_data.link_report_flags) {
11837             if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11838                              &cur_data.link_report_flags) &&
11839                 bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11840                              &cur_data.link_report_flags)) {
11841                 flow = "ON - receive & transmit";
11842             } else if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11843                                     &cur_data.link_report_flags) &&
11844                        !bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11845                                      &cur_data.link_report_flags)) {
11846                 flow = "ON - receive";
11847             } else if (!bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11848                                      &cur_data.link_report_flags) &&
11849                        bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11850                                     &cur_data.link_report_flags)) {
11851                 flow = "ON - transmit";
11852             } else {
11853                 flow = "none"; /* possible? */
11854             }
11855         } else {
11856             flow = "none";
11857         }
11858 
11859         if_link_state_change(sc->ifp, LINK_STATE_UP);
11860         BLOGI(sc, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
11861               cur_data.line_speed, duplex, flow);
11862     }
11863 }
11864 
11865 static void
11866 bxe_link_report(struct bxe_softc *sc)
11867 {
11868     bxe_acquire_phy_lock(sc);
11869     bxe_link_report_locked(sc);
11870     bxe_release_phy_lock(sc);
11871 }
11872 
11873 static void
11874 bxe_link_status_update(struct bxe_softc *sc)
11875 {
11876     if (sc->state != BXE_STATE_OPEN) {
11877         return;
11878     }
11879 
11880     if (IS_PF(sc) && !CHIP_REV_IS_SLOW(sc)) {
11881         elink_link_status_update(&sc->link_params, &sc->link_vars);
11882     } else {
11883         sc->port.supported[0] |= (ELINK_SUPPORTED_10baseT_Half |
11884                                   ELINK_SUPPORTED_10baseT_Full |
11885                                   ELINK_SUPPORTED_100baseT_Half |
11886                                   ELINK_SUPPORTED_100baseT_Full |
11887                                   ELINK_SUPPORTED_1000baseT_Full |
11888                                   ELINK_SUPPORTED_2500baseX_Full |
11889                                   ELINK_SUPPORTED_10000baseT_Full |
11890                                   ELINK_SUPPORTED_TP |
11891                                   ELINK_SUPPORTED_FIBRE |
11892                                   ELINK_SUPPORTED_Autoneg |
11893                                   ELINK_SUPPORTED_Pause |
11894                                   ELINK_SUPPORTED_Asym_Pause);
11895         sc->port.advertising[0] = sc->port.supported[0];
11896 
11897         sc->link_params.sc                = sc;
11898         sc->link_params.port              = SC_PORT(sc);
11899         sc->link_params.req_duplex[0]     = DUPLEX_FULL;
11900         sc->link_params.req_flow_ctrl[0]  = ELINK_FLOW_CTRL_NONE;
11901         sc->link_params.req_line_speed[0] = SPEED_10000;
11902         sc->link_params.speed_cap_mask[0] = 0x7f0000;
11903         sc->link_params.switch_cfg        = ELINK_SWITCH_CFG_10G;
11904 
11905         if (CHIP_REV_IS_FPGA(sc)) {
11906             sc->link_vars.mac_type    = ELINK_MAC_TYPE_EMAC;
11907             sc->link_vars.line_speed  = ELINK_SPEED_1000;
11908             sc->link_vars.link_status = (LINK_STATUS_LINK_UP |
11909                                          LINK_STATUS_SPEED_AND_DUPLEX_1000TFD);
11910         } else {
11911             sc->link_vars.mac_type    = ELINK_MAC_TYPE_BMAC;
11912             sc->link_vars.line_speed  = ELINK_SPEED_10000;
11913             sc->link_vars.link_status = (LINK_STATUS_LINK_UP |
11914                                          LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
11915         }
11916 
11917         sc->link_vars.link_up = 1;
11918 
11919         sc->link_vars.duplex    = DUPLEX_FULL;
11920         sc->link_vars.flow_ctrl = ELINK_FLOW_CTRL_NONE;
11921 
11922         if (IS_PF(sc)) {
11923             REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + sc->link_params.port*4, 0);
11924             bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11925             bxe_link_report(sc);
11926         }
11927     }
11928 
11929     if (IS_PF(sc)) {
11930         if (sc->link_vars.link_up) {
11931             bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11932         } else {
11933             bxe_stats_handle(sc, STATS_EVENT_STOP);
11934         }
11935         bxe_link_report(sc);
11936     } else {
11937         bxe_link_report(sc);
11938         bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11939     }
11940 }
11941 
11942 static int
11943 bxe_initial_phy_init(struct bxe_softc *sc,
11944                      int              load_mode)
11945 {
11946     int rc, cfg_idx = bxe_get_link_cfg_idx(sc);
11947     uint16_t req_line_speed = sc->link_params.req_line_speed[cfg_idx];
11948     struct elink_params *lp = &sc->link_params;
11949 
11950     bxe_set_requested_fc(sc);
11951 
11952     if (CHIP_REV_IS_SLOW(sc)) {
11953         uint32_t bond = CHIP_BOND_ID(sc);
11954         uint32_t feat = 0;
11955 
11956         if (CHIP_IS_E2(sc) && CHIP_IS_MODE_4_PORT(sc)) {
11957             feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC;
11958         } else if (bond & 0x4) {
11959             if (CHIP_IS_E3(sc)) {
11960                 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC;
11961             } else {
11962                 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC;
11963             }
11964         } else if (bond & 0x8) {
11965             if (CHIP_IS_E3(sc)) {
11966                 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC;
11967             } else {
11968                 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC;
11969             }
11970         }
11971 
11972         /* disable EMAC for E3 and above */
11973         if (bond & 0x2) {
11974             feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC;
11975         }
11976 
11977         sc->link_params.feature_config_flags |= feat;
11978     }
11979 
11980     bxe_acquire_phy_lock(sc);
11981 
11982     if (load_mode == LOAD_DIAG) {
11983         lp->loopback_mode = ELINK_LOOPBACK_XGXS;
11984         /* Prefer doing PHY loopback at 10G speed, if possible */
11985         if (lp->req_line_speed[cfg_idx] < ELINK_SPEED_10000) {
11986             if (lp->speed_cap_mask[cfg_idx] &
11987                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
11988                 lp->req_line_speed[cfg_idx] = ELINK_SPEED_10000;
11989             } else {
11990                 lp->req_line_speed[cfg_idx] = ELINK_SPEED_1000;
11991             }
11992         }
11993     }
11994 
11995     if (load_mode == LOAD_LOOPBACK_EXT) {
11996         lp->loopback_mode = ELINK_LOOPBACK_EXT;
11997     }
11998 
11999     rc = elink_phy_init(&sc->link_params, &sc->link_vars);
12000 
12001     bxe_release_phy_lock(sc);
12002 
12003     bxe_calc_fc_adv(sc);
12004 
12005     if (sc->link_vars.link_up) {
12006         bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
12007         bxe_link_report(sc);
12008     }
12009 
12010     if (!CHIP_REV_IS_SLOW(sc)) {
12011         bxe_periodic_start(sc);
12012     }
12013 
12014     sc->link_params.req_line_speed[cfg_idx] = req_line_speed;
12015     return (rc);
12016 }
12017 
12018 static u_int
12019 bxe_push_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
12020 {
12021     struct ecore_mcast_list_elem *mc_mac = arg;
12022 
12023     mc_mac += cnt;
12024     mc_mac->mac = (uint8_t *)LLADDR(sdl);
12025 
12026     return (1);
12027 }
12028 
12029 static int
12030 bxe_init_mcast_macs_list(struct bxe_softc                 *sc,
12031                          struct ecore_mcast_ramrod_params *p)
12032 {
12033     if_t ifp = sc->ifp;
12034     int mc_count;
12035     struct ecore_mcast_list_elem *mc_mac;
12036 
12037     ECORE_LIST_INIT(&p->mcast_list);
12038     p->mcast_list_len = 0;
12039 
12040     /* XXXGL: multicast count may change later */
12041     mc_count = if_llmaddr_count(ifp);
12042 
12043     if (!mc_count) {
12044         return (0);
12045     }
12046 
12047     mc_mac = malloc(sizeof(*mc_mac) * mc_count, M_DEVBUF,
12048                     (M_NOWAIT | M_ZERO));
12049     if (!mc_mac) {
12050         BLOGE(sc, "Failed to allocate temp mcast list\n");
12051         return (-1);
12052     }
12053     bzero(mc_mac, (sizeof(*mc_mac) * mc_count));
12054     if_foreach_llmaddr(ifp, bxe_push_maddr, mc_mac);
12055 
12056     for (int i = 0; i < mc_count; i ++) {
12057         ECORE_LIST_PUSH_TAIL(&mc_mac[i].link, &p->mcast_list);
12058         BLOGD(sc, DBG_LOAD,
12059               "Setting MCAST %02X:%02X:%02X:%02X:%02X:%02X and mc_count %d\n",
12060               mc_mac[i].mac[0], mc_mac[i].mac[1], mc_mac[i].mac[2],
12061               mc_mac[i].mac[3], mc_mac[i].mac[4], mc_mac[i].mac[5],
12062               mc_count);
12063     }
12064 
12065     p->mcast_list_len = mc_count;
12066 
12067     return (0);
12068 }
12069 
12070 static void
12071 bxe_free_mcast_macs_list(struct ecore_mcast_ramrod_params *p)
12072 {
12073     struct ecore_mcast_list_elem *mc_mac =
12074         ECORE_LIST_FIRST_ENTRY(&p->mcast_list,
12075                                struct ecore_mcast_list_elem,
12076                                link);
12077 
12078     if (mc_mac) {
12079         /* only a single free as all mc_macs are in the same heap array */
12080         free(mc_mac, M_DEVBUF);
12081     }
12082 }
12083 static int
12084 bxe_set_mc_list(struct bxe_softc *sc)
12085 {
12086     struct ecore_mcast_ramrod_params rparam = { NULL };
12087     int rc = 0;
12088 
12089     rparam.mcast_obj = &sc->mcast_obj;
12090 
12091     BXE_MCAST_LOCK(sc);
12092 
12093     /* first, clear all configured multicast MACs */
12094     rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
12095     if (rc < 0) {
12096         BLOGE(sc, "Failed to clear multicast configuration: %d\n", rc);
12097         /* Manual backport parts of FreeBSD upstream r284470. */
12098         BXE_MCAST_UNLOCK(sc);
12099         return (rc);
12100     }
12101 
12102     /* configure a new MACs list */
12103     rc = bxe_init_mcast_macs_list(sc, &rparam);
12104     if (rc) {
12105         BLOGE(sc, "Failed to create mcast MACs list (%d)\n", rc);
12106         BXE_MCAST_UNLOCK(sc);
12107         return (rc);
12108     }
12109 
12110     /* Now add the new MACs */
12111     rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_ADD);
12112     if (rc < 0) {
12113         BLOGE(sc, "Failed to set new mcast config (%d)\n", rc);
12114     }
12115 
12116     bxe_free_mcast_macs_list(&rparam);
12117 
12118     BXE_MCAST_UNLOCK(sc);
12119 
12120     return (rc);
12121 }
12122 
12123 struct bxe_set_addr_ctx {
12124    struct bxe_softc *sc;
12125    unsigned long ramrod_flags;
12126    int rc;
12127 };
12128 
12129 static u_int
12130 bxe_set_addr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
12131 {
12132     struct bxe_set_addr_ctx *ctx = arg;
12133     struct ecore_vlan_mac_obj *mac_obj = &ctx->sc->sp_objs->mac_obj;
12134     int rc;
12135 
12136     if (ctx->rc < 0)
12137 	return (0);
12138 
12139     rc = bxe_set_mac_one(ctx->sc, (uint8_t *)LLADDR(sdl), mac_obj, TRUE,
12140                          ECORE_UC_LIST_MAC, &ctx->ramrod_flags);
12141 
12142     /* do not treat adding same MAC as an error */
12143     if (rc == -EEXIST)
12144 	BLOGD(ctx->sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n");
12145     else if (rc < 0) {
12146             BLOGE(ctx->sc, "Failed to schedule ADD operations (%d)\n", rc);
12147             ctx->rc = rc;
12148     }
12149 
12150     return (1);
12151 }
12152 
12153 static int
12154 bxe_set_uc_list(struct bxe_softc *sc)
12155 {
12156     if_t ifp = sc->ifp;
12157     struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj;
12158     struct bxe_set_addr_ctx ctx = { sc, 0, 0 };
12159     int rc;
12160 
12161     /* first schedule a cleanup up of old configuration */
12162     rc = bxe_del_all_macs(sc, mac_obj, ECORE_UC_LIST_MAC, FALSE);
12163     if (rc < 0) {
12164         BLOGE(sc, "Failed to schedule delete of all ETH MACs (%d)\n", rc);
12165         return (rc);
12166     }
12167 
12168     if_foreach_lladdr(ifp, bxe_set_addr, &ctx);
12169     if (ctx.rc < 0)
12170 	return (ctx.rc);
12171 
12172     /* Execute the pending commands */
12173     bit_set(&ctx.ramrod_flags, RAMROD_CONT);
12174     return (bxe_set_mac_one(sc, NULL, mac_obj, FALSE /* don't care */,
12175                             ECORE_UC_LIST_MAC, &ctx.ramrod_flags));
12176 }
12177 
12178 static void
12179 bxe_set_rx_mode(struct bxe_softc *sc)
12180 {
12181     if_t ifp = sc->ifp;
12182     uint32_t rx_mode = BXE_RX_MODE_NORMAL;
12183 
12184     if (sc->state != BXE_STATE_OPEN) {
12185         BLOGD(sc, DBG_SP, "state is %x, returning\n", sc->state);
12186         return;
12187     }
12188 
12189     BLOGD(sc, DBG_SP, "if_flags(ifp)=0x%x\n", if_getflags(sc->ifp));
12190 
12191     if (if_getflags(ifp) & IFF_PROMISC) {
12192         rx_mode = BXE_RX_MODE_PROMISC;
12193     } else if ((if_getflags(ifp) & IFF_ALLMULTI) ||
12194                ((if_getamcount(ifp) > BXE_MAX_MULTICAST) &&
12195                 CHIP_IS_E1(sc))) {
12196         rx_mode = BXE_RX_MODE_ALLMULTI;
12197     } else {
12198         if (IS_PF(sc)) {
12199             /* some multicasts */
12200             if (bxe_set_mc_list(sc) < 0) {
12201                 rx_mode = BXE_RX_MODE_ALLMULTI;
12202             }
12203             if (bxe_set_uc_list(sc) < 0) {
12204                 rx_mode = BXE_RX_MODE_PROMISC;
12205             }
12206         }
12207     }
12208 
12209     sc->rx_mode = rx_mode;
12210 
12211     /* schedule the rx_mode command */
12212     if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) {
12213         BLOGD(sc, DBG_LOAD, "Scheduled setting rx_mode with ECORE...\n");
12214         bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state);
12215         return;
12216     }
12217 
12218     if (IS_PF(sc)) {
12219         bxe_set_storm_rx_mode(sc);
12220     }
12221 }
12222 
12223 
12224 /* update flags in shmem */
12225 static void
12226 bxe_update_drv_flags(struct bxe_softc *sc,
12227                      uint32_t         flags,
12228                      uint32_t         set)
12229 {
12230     uint32_t drv_flags;
12231 
12232     if (SHMEM2_HAS(sc, drv_flags)) {
12233         bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS);
12234         drv_flags = SHMEM2_RD(sc, drv_flags);
12235 
12236         if (set) {
12237             SET_FLAGS(drv_flags, flags);
12238         } else {
12239             RESET_FLAGS(drv_flags, flags);
12240         }
12241 
12242         SHMEM2_WR(sc, drv_flags, drv_flags);
12243         BLOGD(sc, DBG_LOAD, "drv_flags 0x%08x\n", drv_flags);
12244 
12245         bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS);
12246     }
12247 }
12248 
12249 /* periodic timer callout routine, only runs when the interface is up */
12250 
12251 static void
12252 bxe_periodic_callout_func(void *xsc)
12253 {
12254     struct bxe_softc *sc = (struct bxe_softc *)xsc;
12255     int i;
12256 
12257     if (!BXE_CORE_TRYLOCK(sc)) {
12258         /* just bail and try again next time */
12259 
12260         if ((sc->state == BXE_STATE_OPEN) &&
12261             (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) {
12262             /* schedule the next periodic callout */
12263             callout_reset(&sc->periodic_callout, hz,
12264                           bxe_periodic_callout_func, sc);
12265         }
12266 
12267         return;
12268     }
12269 
12270     if ((sc->state != BXE_STATE_OPEN) ||
12271         (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) {
12272         BLOGW(sc, "periodic callout exit (state=0x%x)\n", sc->state);
12273         BXE_CORE_UNLOCK(sc);
12274         return;
12275         }
12276 
12277 
12278     /* Check for TX timeouts on any fastpath. */
12279     FOR_EACH_QUEUE(sc, i) {
12280         if (bxe_watchdog(sc, &sc->fp[i]) != 0) {
12281             /* Ruh-Roh, chip was reset! */
12282             break;
12283         }
12284     }
12285 
12286     if (!CHIP_REV_IS_SLOW(sc)) {
12287         /*
12288          * This barrier is needed to ensure the ordering between the writing
12289          * to the sc->port.pmf in the bxe_nic_load() or bxe_pmf_update() and
12290          * the reading here.
12291          */
12292         mb();
12293         if (sc->port.pmf) {
12294 	    bxe_acquire_phy_lock(sc);
12295             elink_period_func(&sc->link_params, &sc->link_vars);
12296 	    bxe_release_phy_lock(sc);
12297         }
12298     }
12299 
12300     if (IS_PF(sc) && !(sc->flags & BXE_NO_PULSE)) {
12301         int mb_idx = SC_FW_MB_IDX(sc);
12302         uint32_t drv_pulse;
12303         uint32_t mcp_pulse;
12304 
12305         ++sc->fw_drv_pulse_wr_seq;
12306         sc->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
12307 
12308         drv_pulse = sc->fw_drv_pulse_wr_seq;
12309         bxe_drv_pulse(sc);
12310 
12311         mcp_pulse = (SHMEM_RD(sc, func_mb[mb_idx].mcp_pulse_mb) &
12312                      MCP_PULSE_SEQ_MASK);
12313 
12314         /*
12315          * The delta between driver pulse and mcp response should
12316          * be 1 (before mcp response) or 0 (after mcp response).
12317          */
12318         if ((drv_pulse != mcp_pulse) &&
12319             (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
12320             /* someone lost a heartbeat... */
12321             BLOGE(sc, "drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
12322                   drv_pulse, mcp_pulse);
12323         }
12324     }
12325 
12326     /* state is BXE_STATE_OPEN */
12327     bxe_stats_handle(sc, STATS_EVENT_UPDATE);
12328 
12329     BXE_CORE_UNLOCK(sc);
12330 
12331     if ((sc->state == BXE_STATE_OPEN) &&
12332         (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) {
12333         /* schedule the next periodic callout */
12334         callout_reset(&sc->periodic_callout, hz,
12335                       bxe_periodic_callout_func, sc);
12336     }
12337 }
12338 
12339 static void
12340 bxe_periodic_start(struct bxe_softc *sc)
12341 {
12342     atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO);
12343     callout_reset(&sc->periodic_callout, hz, bxe_periodic_callout_func, sc);
12344 }
12345 
12346 static void
12347 bxe_periodic_stop(struct bxe_softc *sc)
12348 {
12349     atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP);
12350     callout_drain(&sc->periodic_callout);
12351 }
12352 
12353 void
12354 bxe_parity_recover(struct bxe_softc *sc)
12355 {
12356     uint8_t global = FALSE;
12357     uint32_t error_recovered, error_unrecovered;
12358     bool is_parity;
12359 
12360 
12361     if ((sc->recovery_state == BXE_RECOVERY_FAILED) &&
12362         (sc->state == BXE_STATE_ERROR)) {
12363         BLOGE(sc, "RECOVERY failed, "
12364             "stack notified driver is NOT running! "
12365             "Please reboot/power cycle the system.\n");
12366         return;
12367     }
12368 
12369     while (1) {
12370         BLOGD(sc, DBG_SP,
12371            "%s sc=%p state=0x%x rec_state=0x%x error_status=%x\n",
12372             __func__, sc, sc->state, sc->recovery_state, sc->error_status);
12373 
12374         switch(sc->recovery_state) {
12375 
12376         case BXE_RECOVERY_INIT:
12377             is_parity = bxe_chk_parity_attn(sc, &global, FALSE);
12378 
12379             if ((CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ||
12380                 (sc->error_status & BXE_ERR_MCP_ASSERT) ||
12381                 (sc->error_status & BXE_ERR_GLOBAL)) {
12382 
12383                 BXE_CORE_LOCK(sc);
12384                 if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
12385                     bxe_periodic_stop(sc);
12386                 }
12387                 bxe_nic_unload(sc, UNLOAD_RECOVERY, false);
12388                 sc->state = BXE_STATE_ERROR;
12389                 sc->recovery_state = BXE_RECOVERY_FAILED;
12390                 BLOGE(sc, " No Recovery tried for error 0x%x"
12391                     " stack notified driver is NOT running!"
12392                     " Please reboot/power cycle the system.\n",
12393                     sc->error_status);
12394                 BXE_CORE_UNLOCK(sc);
12395                 return;
12396             }
12397 
12398 
12399            /* Try to get a LEADER_LOCK HW lock */
12400             if (bxe_trylock_leader_lock(sc)) {
12401 
12402                 bxe_set_reset_in_progress(sc);
12403                 /*
12404                  * Check if there is a global attention and if
12405                  * there was a global attention, set the global
12406                  * reset bit.
12407                  */
12408                 if (global) {
12409                     bxe_set_reset_global(sc);
12410                 }
12411                 sc->is_leader = 1;
12412             }
12413 
12414             /* If interface has been removed - break */
12415 
12416             if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
12417                 bxe_periodic_stop(sc);
12418             }
12419 
12420             BXE_CORE_LOCK(sc);
12421             bxe_nic_unload(sc,UNLOAD_RECOVERY, false);
12422             sc->recovery_state = BXE_RECOVERY_WAIT;
12423             BXE_CORE_UNLOCK(sc);
12424 
12425             /*
12426              * Ensure "is_leader", MCP command sequence and
12427              * "recovery_state" update values are seen on other
12428              * CPUs.
12429              */
12430             mb();
12431             break;
12432         case BXE_RECOVERY_WAIT:
12433 
12434             if (sc->is_leader) {
12435                 int other_engine = SC_PATH(sc) ? 0 : 1;
12436                 bool other_load_status =
12437                     bxe_get_load_status(sc, other_engine);
12438                 bool load_status =
12439                     bxe_get_load_status(sc, SC_PATH(sc));
12440                 global = bxe_reset_is_global(sc);
12441 
12442                 /*
12443                  * In case of a parity in a global block, let
12444                  * the first leader that performs a
12445                  * leader_reset() reset the global blocks in
12446                  * order to clear global attentions. Otherwise
12447                  * the gates will remain closed for that
12448                  * engine.
12449                  */
12450                 if (load_status ||
12451                     (global && other_load_status)) {
12452                     /*
12453                      * Wait until all other functions get
12454                      * down.
12455                      */
12456                     taskqueue_enqueue_timeout(taskqueue_thread,
12457                         &sc->sp_err_timeout_task, hz/10);
12458                     return;
12459                 } else {
12460                     /*
12461                      * If all other functions got down
12462                      * try to bring the chip back to
12463                      * normal. In any case it's an exit
12464                      * point for a leader.
12465                      */
12466                     if (bxe_leader_reset(sc)) {
12467                         BLOGE(sc, "RECOVERY failed, "
12468                             "stack notified driver is NOT running!\n");
12469                         sc->recovery_state = BXE_RECOVERY_FAILED;
12470                         sc->state = BXE_STATE_ERROR;
12471                         mb();
12472                         return;
12473                     }
12474 
12475                     /*
12476                      * If we are here, means that the
12477                      * leader has succeeded and doesn't
12478                      * want to be a leader any more. Try
12479                      * to continue as a none-leader.
12480                      */
12481                 break;
12482                 }
12483 
12484             } else { /* non-leader */
12485                 if (!bxe_reset_is_done(sc, SC_PATH(sc))) {
12486                     /*
12487                      * Try to get a LEADER_LOCK HW lock as
12488                      * long as a former leader may have
12489                      * been unloaded by the user or
12490                      * released a leadership by another
12491                      * reason.
12492                      */
12493                     if (bxe_trylock_leader_lock(sc)) {
12494                         /*
12495                          * I'm a leader now! Restart a
12496                          * switch case.
12497                          */
12498                         sc->is_leader = 1;
12499                         break;
12500                     }
12501 
12502                     taskqueue_enqueue_timeout(taskqueue_thread,
12503                         &sc->sp_err_timeout_task, hz/10);
12504                     return;
12505 
12506                 } else {
12507                     /*
12508                      * If there was a global attention, wait
12509                      * for it to be cleared.
12510                      */
12511                     if (bxe_reset_is_global(sc)) {
12512                         taskqueue_enqueue_timeout(taskqueue_thread,
12513                             &sc->sp_err_timeout_task, hz/10);
12514                         return;
12515                      }
12516 
12517                      error_recovered =
12518                          sc->eth_stats.recoverable_error;
12519                      error_unrecovered =
12520                          sc->eth_stats.unrecoverable_error;
12521                      BXE_CORE_LOCK(sc);
12522                      sc->recovery_state =
12523                          BXE_RECOVERY_NIC_LOADING;
12524                      if (bxe_nic_load(sc, LOAD_NORMAL)) {
12525                          error_unrecovered++;
12526                          sc->recovery_state = BXE_RECOVERY_FAILED;
12527                          sc->state = BXE_STATE_ERROR;
12528                          BLOGE(sc, "Recovery is NOT successfull, "
12529                             " state=0x%x recovery_state=0x%x error=%x\n",
12530                             sc->state, sc->recovery_state, sc->error_status);
12531                          sc->error_status = 0;
12532                      } else {
12533                          sc->recovery_state =
12534                              BXE_RECOVERY_DONE;
12535                          error_recovered++;
12536                          BLOGI(sc, "Recovery is successfull from errors %x,"
12537                             " state=0x%x"
12538                             " recovery_state=0x%x \n", sc->error_status,
12539                             sc->state, sc->recovery_state);
12540                          mb();
12541                      }
12542                      sc->error_status = 0;
12543                      BXE_CORE_UNLOCK(sc);
12544                      sc->eth_stats.recoverable_error =
12545                          error_recovered;
12546                      sc->eth_stats.unrecoverable_error =
12547                          error_unrecovered;
12548 
12549                      return;
12550                  }
12551              }
12552          default:
12553              return;
12554          }
12555     }
12556 }
12557 void
12558 bxe_handle_error(struct bxe_softc * sc)
12559 {
12560 
12561     if(sc->recovery_state == BXE_RECOVERY_WAIT) {
12562         return;
12563     }
12564     if(sc->error_status) {
12565         if (sc->state == BXE_STATE_OPEN)  {
12566             bxe_int_disable(sc);
12567         }
12568         if (sc->link_vars.link_up) {
12569             if_link_state_change(sc->ifp, LINK_STATE_DOWN);
12570         }
12571         sc->recovery_state = BXE_RECOVERY_INIT;
12572         BLOGI(sc, "bxe%d: Recovery started errors 0x%x recovery state 0x%x\n",
12573             sc->unit, sc->error_status, sc->recovery_state);
12574         bxe_parity_recover(sc);
12575    }
12576 }
12577 
12578 static void
12579 bxe_sp_err_timeout_task(void *arg, int pending)
12580 {
12581 
12582     struct bxe_softc *sc = (struct bxe_softc *)arg;
12583 
12584     BLOGD(sc, DBG_SP,
12585         "%s state = 0x%x rec state=0x%x error_status=%x\n",
12586         __func__, sc->state, sc->recovery_state, sc->error_status);
12587 
12588     if((sc->recovery_state == BXE_RECOVERY_FAILED) &&
12589        (sc->state == BXE_STATE_ERROR)) {
12590         return;
12591     }
12592     /* if can be taken */
12593     if ((sc->error_status) && (sc->trigger_grcdump)) {
12594         bxe_grc_dump(sc);
12595     }
12596     if (sc->recovery_state != BXE_RECOVERY_DONE) {
12597         bxe_handle_error(sc);
12598         bxe_parity_recover(sc);
12599     } else if (sc->error_status) {
12600         bxe_handle_error(sc);
12601     }
12602 
12603     return;
12604 }
12605 
12606 /* start the controller */
12607 static __noinline int
12608 bxe_nic_load(struct bxe_softc *sc,
12609              int              load_mode)
12610 {
12611     uint32_t val;
12612     int load_code = 0;
12613     int i, rc = 0;
12614 
12615     BXE_CORE_LOCK_ASSERT(sc);
12616 
12617     BLOGD(sc, DBG_LOAD, "Starting NIC load...\n");
12618 
12619     sc->state = BXE_STATE_OPENING_WAITING_LOAD;
12620 
12621     if (IS_PF(sc)) {
12622         /* must be called before memory allocation and HW init */
12623         bxe_ilt_set_info(sc);
12624     }
12625 
12626     sc->last_reported_link_state = LINK_STATE_UNKNOWN;
12627 
12628     bxe_set_fp_rx_buf_size(sc);
12629 
12630     if (bxe_alloc_fp_buffers(sc) != 0) {
12631         BLOGE(sc, "Failed to allocate fastpath memory\n");
12632         sc->state = BXE_STATE_CLOSED;
12633         rc = ENOMEM;
12634         goto bxe_nic_load_error0;
12635     }
12636 
12637     if (bxe_alloc_mem(sc) != 0) {
12638         sc->state = BXE_STATE_CLOSED;
12639         rc = ENOMEM;
12640         goto bxe_nic_load_error0;
12641     }
12642 
12643     if (bxe_alloc_fw_stats_mem(sc) != 0) {
12644         sc->state = BXE_STATE_CLOSED;
12645         rc = ENOMEM;
12646         goto bxe_nic_load_error0;
12647     }
12648 
12649     if (IS_PF(sc)) {
12650         /* set pf load just before approaching the MCP */
12651         bxe_set_pf_load(sc);
12652 
12653         /* if MCP exists send load request and analyze response */
12654         if (!BXE_NOMCP(sc)) {
12655             /* attempt to load pf */
12656             if (bxe_nic_load_request(sc, &load_code) != 0) {
12657                 sc->state = BXE_STATE_CLOSED;
12658                 rc = ENXIO;
12659                 goto bxe_nic_load_error1;
12660             }
12661 
12662             /* what did the MCP say? */
12663             if (bxe_nic_load_analyze_req(sc, load_code) != 0) {
12664                 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12665                 sc->state = BXE_STATE_CLOSED;
12666                 rc = ENXIO;
12667                 goto bxe_nic_load_error2;
12668             }
12669         } else {
12670             BLOGI(sc, "Device has no MCP!\n");
12671             load_code = bxe_nic_load_no_mcp(sc);
12672         }
12673 
12674         /* mark PMF if applicable */
12675         bxe_nic_load_pmf(sc, load_code);
12676 
12677         /* Init Function state controlling object */
12678         bxe_init_func_obj(sc);
12679 
12680         /* Initialize HW */
12681         if (bxe_init_hw(sc, load_code) != 0) {
12682             BLOGE(sc, "HW init failed\n");
12683             bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12684             sc->state = BXE_STATE_CLOSED;
12685             rc = ENXIO;
12686             goto bxe_nic_load_error2;
12687         }
12688     }
12689 
12690     /* set ALWAYS_ALIVE bit in shmem */
12691     sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
12692     bxe_drv_pulse(sc);
12693     sc->flags |= BXE_NO_PULSE;
12694 
12695     /* attach interrupts */
12696     if (bxe_interrupt_attach(sc) != 0) {
12697         sc->state = BXE_STATE_CLOSED;
12698         rc = ENXIO;
12699         goto bxe_nic_load_error2;
12700     }
12701 
12702     bxe_nic_init(sc, load_code);
12703 
12704     /* Init per-function objects */
12705     if (IS_PF(sc)) {
12706         bxe_init_objs(sc);
12707         // XXX bxe_iov_nic_init(sc);
12708 
12709         /* set AFEX default VLAN tag to an invalid value */
12710         sc->devinfo.mf_info.afex_def_vlan_tag = -1;
12711         // XXX bxe_nic_load_afex_dcc(sc, load_code);
12712 
12713         sc->state = BXE_STATE_OPENING_WAITING_PORT;
12714         rc = bxe_func_start(sc);
12715         if (rc) {
12716             BLOGE(sc, "Function start failed! rc = %d\n", rc);
12717             bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12718             sc->state = BXE_STATE_ERROR;
12719             goto bxe_nic_load_error3;
12720         }
12721 
12722         /* send LOAD_DONE command to MCP */
12723         if (!BXE_NOMCP(sc)) {
12724             load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12725             if (!load_code) {
12726                 BLOGE(sc, "MCP response failure, aborting\n");
12727                 sc->state = BXE_STATE_ERROR;
12728                 rc = ENXIO;
12729                 goto bxe_nic_load_error3;
12730             }
12731         }
12732 
12733         rc = bxe_setup_leading(sc);
12734         if (rc) {
12735             BLOGE(sc, "Setup leading failed! rc = %d\n", rc);
12736             sc->state = BXE_STATE_ERROR;
12737             goto bxe_nic_load_error3;
12738         }
12739 
12740         FOR_EACH_NONDEFAULT_ETH_QUEUE(sc, i) {
12741             rc = bxe_setup_queue(sc, &sc->fp[i], FALSE);
12742             if (rc) {
12743                 BLOGE(sc, "Queue(%d) setup failed rc = %d\n", i, rc);
12744                 sc->state = BXE_STATE_ERROR;
12745                 goto bxe_nic_load_error3;
12746             }
12747         }
12748 
12749         rc = bxe_init_rss_pf(sc);
12750         if (rc) {
12751             BLOGE(sc, "PF RSS init failed\n");
12752             sc->state = BXE_STATE_ERROR;
12753             goto bxe_nic_load_error3;
12754         }
12755     }
12756     /* XXX VF */
12757 
12758     /* now when Clients are configured we are ready to work */
12759     sc->state = BXE_STATE_OPEN;
12760 
12761     /* Configure a ucast MAC */
12762     if (IS_PF(sc)) {
12763         rc = bxe_set_eth_mac(sc, TRUE);
12764     }
12765     if (rc) {
12766         BLOGE(sc, "Setting Ethernet MAC failed rc = %d\n", rc);
12767         sc->state = BXE_STATE_ERROR;
12768         goto bxe_nic_load_error3;
12769     }
12770 
12771     if (sc->port.pmf) {
12772         rc = bxe_initial_phy_init(sc, /* XXX load_mode */LOAD_OPEN);
12773         if (rc) {
12774             sc->state = BXE_STATE_ERROR;
12775             goto bxe_nic_load_error3;
12776         }
12777     }
12778 
12779     sc->link_params.feature_config_flags &=
12780         ~ELINK_FEATURE_CONFIG_BOOT_FROM_SAN;
12781 
12782     /* start fast path */
12783 
12784     /* Initialize Rx filter */
12785     bxe_set_rx_mode(sc);
12786 
12787     /* start the Tx */
12788     switch (/* XXX load_mode */LOAD_OPEN) {
12789     case LOAD_NORMAL:
12790     case LOAD_OPEN:
12791         break;
12792 
12793     case LOAD_DIAG:
12794     case LOAD_LOOPBACK_EXT:
12795         sc->state = BXE_STATE_DIAG;
12796         break;
12797 
12798     default:
12799         break;
12800     }
12801 
12802     if (sc->port.pmf) {
12803         bxe_update_drv_flags(sc, 1 << DRV_FLAGS_PORT_MASK, 0);
12804     } else {
12805         bxe_link_status_update(sc);
12806     }
12807 
12808     /* start the periodic timer callout */
12809     bxe_periodic_start(sc);
12810 
12811     if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
12812         /* mark driver is loaded in shmem2 */
12813         val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
12814         SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)],
12815                   (val |
12816                    DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
12817                    DRV_FLAGS_CAPABILITIES_LOADED_L2));
12818     }
12819 
12820     /* wait for all pending SP commands to complete */
12821     if (IS_PF(sc) && !bxe_wait_sp_comp(sc, ~0x0UL)) {
12822         BLOGE(sc, "Timeout waiting for all SPs to complete!\n");
12823         bxe_periodic_stop(sc);
12824         bxe_nic_unload(sc, UNLOAD_CLOSE, FALSE);
12825         return (ENXIO);
12826     }
12827 
12828     /* Tell the stack the driver is running! */
12829     if_setdrvflags(sc->ifp, IFF_DRV_RUNNING);
12830 
12831     BLOGD(sc, DBG_LOAD, "NIC successfully loaded\n");
12832 
12833     return (0);
12834 
12835 bxe_nic_load_error3:
12836 
12837     if (IS_PF(sc)) {
12838         bxe_int_disable_sync(sc, 1);
12839 
12840         /* clean out queued objects */
12841         bxe_squeeze_objects(sc);
12842     }
12843 
12844     bxe_interrupt_detach(sc);
12845 
12846 bxe_nic_load_error2:
12847 
12848     if (IS_PF(sc) && !BXE_NOMCP(sc)) {
12849         bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
12850         bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0);
12851     }
12852 
12853     sc->port.pmf = 0;
12854 
12855 bxe_nic_load_error1:
12856 
12857     /* clear pf_load status, as it was already set */
12858     if (IS_PF(sc)) {
12859         bxe_clear_pf_load(sc);
12860     }
12861 
12862 bxe_nic_load_error0:
12863 
12864     bxe_free_fw_stats_mem(sc);
12865     bxe_free_fp_buffers(sc);
12866     bxe_free_mem(sc);
12867 
12868     return (rc);
12869 }
12870 
12871 static int
12872 bxe_init_locked(struct bxe_softc *sc)
12873 {
12874     int other_engine = SC_PATH(sc) ? 0 : 1;
12875     uint8_t other_load_status, load_status;
12876     uint8_t global = FALSE;
12877     int rc;
12878 
12879     BXE_CORE_LOCK_ASSERT(sc);
12880 
12881     /* check if the driver is already running */
12882     if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
12883         BLOGD(sc, DBG_LOAD, "Init called while driver is running!\n");
12884         return (0);
12885     }
12886 
12887     if((sc->state == BXE_STATE_ERROR) &&
12888         (sc->recovery_state == BXE_RECOVERY_FAILED)) {
12889         BLOGE(sc, "Initialization not done, "
12890                   "as previous recovery failed."
12891                   "Reboot/Power-cycle the system\n" );
12892         return (ENXIO);
12893     }
12894 
12895 
12896     bxe_set_power_state(sc, PCI_PM_D0);
12897 
12898     /*
12899      * If parity occurred during the unload, then attentions and/or
12900      * RECOVERY_IN_PROGRES may still be set. If so we want the first function
12901      * loaded on the current engine to complete the recovery. Parity recovery
12902      * is only relevant for PF driver.
12903      */
12904     if (IS_PF(sc)) {
12905         other_load_status = bxe_get_load_status(sc, other_engine);
12906         load_status = bxe_get_load_status(sc, SC_PATH(sc));
12907 
12908         if (!bxe_reset_is_done(sc, SC_PATH(sc)) ||
12909             bxe_chk_parity_attn(sc, &global, TRUE)) {
12910             do {
12911                 /*
12912                  * If there are attentions and they are in global blocks, set
12913                  * the GLOBAL_RESET bit regardless whether it will be this
12914                  * function that will complete the recovery or not.
12915                  */
12916                 if (global) {
12917                     bxe_set_reset_global(sc);
12918                 }
12919 
12920                 /*
12921                  * Only the first function on the current engine should try
12922                  * to recover in open. In case of attentions in global blocks
12923                  * only the first in the chip should try to recover.
12924                  */
12925                 if ((!load_status && (!global || !other_load_status)) &&
12926                     bxe_trylock_leader_lock(sc) && !bxe_leader_reset(sc)) {
12927                     BLOGI(sc, "Recovered during init\n");
12928                     break;
12929                 }
12930 
12931                 /* recovery has failed... */
12932                 bxe_set_power_state(sc, PCI_PM_D3hot);
12933                 sc->recovery_state = BXE_RECOVERY_FAILED;
12934 
12935                 BLOGE(sc, "Recovery flow hasn't properly "
12936                           "completed yet, try again later. "
12937                           "If you still see this message after a "
12938                           "few retries then power cycle is required.\n");
12939 
12940                 rc = ENXIO;
12941                 goto bxe_init_locked_done;
12942             } while (0);
12943         }
12944     }
12945 
12946     sc->recovery_state = BXE_RECOVERY_DONE;
12947 
12948     rc = bxe_nic_load(sc, LOAD_OPEN);
12949 
12950 bxe_init_locked_done:
12951 
12952     if (rc) {
12953         /* Tell the stack the driver is NOT running! */
12954         BLOGE(sc, "Initialization failed, "
12955                   "stack notified driver is NOT running!\n");
12956 	if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING);
12957     }
12958 
12959     return (rc);
12960 }
12961 
12962 static int
12963 bxe_stop_locked(struct bxe_softc *sc)
12964 {
12965     BXE_CORE_LOCK_ASSERT(sc);
12966     return (bxe_nic_unload(sc, UNLOAD_NORMAL, TRUE));
12967 }
12968 
12969 /*
12970  * Handles controller initialization when called from an unlocked routine.
12971  * ifconfig calls this function.
12972  *
12973  * Returns:
12974  *   void
12975  */
12976 static void
12977 bxe_init(void *xsc)
12978 {
12979     struct bxe_softc *sc = (struct bxe_softc *)xsc;
12980 
12981     BXE_CORE_LOCK(sc);
12982     bxe_init_locked(sc);
12983     BXE_CORE_UNLOCK(sc);
12984 }
12985 
12986 static int
12987 bxe_init_ifnet(struct bxe_softc *sc)
12988 {
12989     if_t ifp;
12990     int capabilities;
12991 
12992     /* ifconfig entrypoint for media type/status reporting */
12993     ifmedia_init(&sc->ifmedia, IFM_IMASK,
12994                  bxe_ifmedia_update,
12995                  bxe_ifmedia_status);
12996 
12997     /* set the default interface values */
12998     ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_FDX | sc->media), 0, NULL);
12999     ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL);
13000     ifmedia_set(&sc->ifmedia, (IFM_ETHER | IFM_AUTO));
13001 
13002     sc->ifmedia.ifm_media = sc->ifmedia.ifm_cur->ifm_media; /* XXX ? */
13003 	BLOGI(sc, "IFMEDIA flags : %x\n", sc->ifmedia.ifm_media);
13004 
13005     /* allocate the ifnet structure */
13006     if ((ifp = if_gethandle(IFT_ETHER)) == NULL) {
13007         BLOGE(sc, "Interface allocation failed!\n");
13008         return (ENXIO);
13009     }
13010 
13011     if_setsoftc(ifp, sc);
13012     if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
13013     if_setflags(ifp, (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST));
13014     if_setioctlfn(ifp, bxe_ioctl);
13015     if_setstartfn(ifp, bxe_tx_start);
13016     if_setgetcounterfn(ifp, bxe_get_counter);
13017     if_settransmitfn(ifp, bxe_tx_mq_start);
13018     if_setqflushfn(ifp, bxe_mq_flush);
13019     if_setinitfn(ifp, bxe_init);
13020     if_setmtu(ifp, sc->mtu);
13021     if_sethwassist(ifp, (CSUM_IP      |
13022                         CSUM_TCP      |
13023                         CSUM_UDP      |
13024                         CSUM_TSO      |
13025                         CSUM_TCP_IPV6 |
13026                         CSUM_UDP_IPV6));
13027 
13028     capabilities =
13029         (IFCAP_VLAN_MTU       |
13030          IFCAP_VLAN_HWTAGGING |
13031          IFCAP_VLAN_HWTSO     |
13032          IFCAP_VLAN_HWFILTER  |
13033          IFCAP_VLAN_HWCSUM    |
13034          IFCAP_HWCSUM         |
13035          IFCAP_JUMBO_MTU      |
13036          IFCAP_LRO            |
13037          IFCAP_TSO4           |
13038          IFCAP_TSO6           |
13039          IFCAP_WOL_MAGIC);
13040     if_setcapabilitiesbit(ifp, capabilities, 0); /* XXX */
13041     if_setcapenable(ifp, if_getcapabilities(ifp));
13042     if_setbaudrate(ifp, IF_Gbps(10));
13043 /* XXX */
13044     if_setsendqlen(ifp, sc->tx_ring_size);
13045     if_setsendqready(ifp);
13046 /* XXX */
13047 
13048     sc->ifp = ifp;
13049 
13050     /* attach to the Ethernet interface list */
13051     ether_ifattach(ifp, sc->link_params.mac_addr);
13052 
13053     /* Attach driver debugnet methods. */
13054     DEBUGNET_SET(ifp, bxe);
13055 
13056     return (0);
13057 }
13058 
13059 static void
13060 bxe_deallocate_bars(struct bxe_softc *sc)
13061 {
13062     int i;
13063 
13064     for (i = 0; i < MAX_BARS; i++) {
13065         if (sc->bar[i].resource != NULL) {
13066             bus_release_resource(sc->dev,
13067                                  SYS_RES_MEMORY,
13068                                  sc->bar[i].rid,
13069                                  sc->bar[i].resource);
13070             BLOGD(sc, DBG_LOAD, "Released PCI BAR%d [%02x] memory\n",
13071                   i, PCIR_BAR(i));
13072         }
13073     }
13074 }
13075 
13076 static int
13077 bxe_allocate_bars(struct bxe_softc *sc)
13078 {
13079     u_int flags;
13080     int i;
13081 
13082     memset(sc->bar, 0, sizeof(sc->bar));
13083 
13084     for (i = 0; i < MAX_BARS; i++) {
13085 
13086         /* memory resources reside at BARs 0, 2, 4 */
13087         /* Run `pciconf -lb` to see mappings */
13088         if ((i != 0) && (i != 2) && (i != 4)) {
13089             continue;
13090         }
13091 
13092         sc->bar[i].rid = PCIR_BAR(i);
13093 
13094         flags = RF_ACTIVE;
13095         if (i == 0) {
13096             flags |= RF_SHAREABLE;
13097         }
13098 
13099         if ((sc->bar[i].resource =
13100              bus_alloc_resource_any(sc->dev,
13101                                     SYS_RES_MEMORY,
13102                                     &sc->bar[i].rid,
13103                                     flags)) == NULL) {
13104             return (0);
13105         }
13106 
13107         sc->bar[i].tag    = rman_get_bustag(sc->bar[i].resource);
13108         sc->bar[i].handle = rman_get_bushandle(sc->bar[i].resource);
13109         sc->bar[i].kva    = (vm_offset_t)rman_get_virtual(sc->bar[i].resource);
13110 
13111         BLOGI(sc, "PCI BAR%d [%02x] memory allocated: %#jx-%#jx (%jd) -> %#jx\n",
13112               i, PCIR_BAR(i),
13113               rman_get_start(sc->bar[i].resource),
13114               rman_get_end(sc->bar[i].resource),
13115               rman_get_size(sc->bar[i].resource),
13116               (uintmax_t)sc->bar[i].kva);
13117     }
13118 
13119     return (0);
13120 }
13121 
13122 static void
13123 bxe_get_function_num(struct bxe_softc *sc)
13124 {
13125     uint32_t val = 0;
13126 
13127     /*
13128      * Read the ME register to get the function number. The ME register
13129      * holds the relative-function number and absolute-function number. The
13130      * absolute-function number appears only in E2 and above. Before that
13131      * these bits always contained zero, therefore we cannot blindly use them.
13132      */
13133 
13134     val = REG_RD(sc, BAR_ME_REGISTER);
13135 
13136     sc->pfunc_rel =
13137         (uint8_t)((val & ME_REG_PF_NUM) >> ME_REG_PF_NUM_SHIFT);
13138     sc->path_id =
13139         (uint8_t)((val & ME_REG_ABS_PF_NUM) >> ME_REG_ABS_PF_NUM_SHIFT) & 1;
13140 
13141     if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
13142         sc->pfunc_abs = ((sc->pfunc_rel << 1) | sc->path_id);
13143     } else {
13144         sc->pfunc_abs = (sc->pfunc_rel | sc->path_id);
13145     }
13146 
13147     BLOGD(sc, DBG_LOAD,
13148           "Relative function %d, Absolute function %d, Path %d\n",
13149           sc->pfunc_rel, sc->pfunc_abs, sc->path_id);
13150 }
13151 
13152 static uint32_t
13153 bxe_get_shmem_mf_cfg_base(struct bxe_softc *sc)
13154 {
13155     uint32_t shmem2_size;
13156     uint32_t offset;
13157     uint32_t mf_cfg_offset_value;
13158 
13159     /* Non 57712 */
13160     offset = (SHMEM_RD(sc, func_mb) +
13161               (MAX_FUNC_NUM * sizeof(struct drv_func_mb)));
13162 
13163     /* 57712 plus */
13164     if (sc->devinfo.shmem2_base != 0) {
13165         shmem2_size = SHMEM2_RD(sc, size);
13166         if (shmem2_size > offsetof(struct shmem2_region, mf_cfg_addr)) {
13167             mf_cfg_offset_value = SHMEM2_RD(sc, mf_cfg_addr);
13168             if (SHMEM_MF_CFG_ADDR_NONE != mf_cfg_offset_value) {
13169                 offset = mf_cfg_offset_value;
13170             }
13171         }
13172     }
13173 
13174     return (offset);
13175 }
13176 
13177 static uint32_t
13178 bxe_pcie_capability_read(struct bxe_softc *sc,
13179                          int    reg,
13180                          int    width)
13181 {
13182     int pcie_reg;
13183 
13184     /* ensure PCIe capability is enabled */
13185     if (pci_find_cap(sc->dev, PCIY_EXPRESS, &pcie_reg) == 0) {
13186         if (pcie_reg != 0) {
13187             BLOGD(sc, DBG_LOAD, "PCIe capability at 0x%04x\n", pcie_reg);
13188             return (pci_read_config(sc->dev, (pcie_reg + reg), width));
13189         }
13190     }
13191 
13192     BLOGE(sc, "PCIe capability NOT FOUND!!!\n");
13193 
13194     return (0);
13195 }
13196 
13197 static uint8_t
13198 bxe_is_pcie_pending(struct bxe_softc *sc)
13199 {
13200     return (bxe_pcie_capability_read(sc, PCIER_DEVICE_STA, 2) &
13201             PCIEM_STA_TRANSACTION_PND);
13202 }
13203 
13204 /*
13205  * Walk the PCI capabiites list for the device to find what features are
13206  * supported. These capabilites may be enabled/disabled by firmware so it's
13207  * best to walk the list rather than make assumptions.
13208  */
13209 static void
13210 bxe_probe_pci_caps(struct bxe_softc *sc)
13211 {
13212     uint16_t link_status;
13213     int reg;
13214 
13215     /* check if PCI Power Management is enabled */
13216     if (pci_find_cap(sc->dev, PCIY_PMG, &reg) == 0) {
13217         if (reg != 0) {
13218             BLOGD(sc, DBG_LOAD, "Found PM capability at 0x%04x\n", reg);
13219 
13220             sc->devinfo.pcie_cap_flags |= BXE_PM_CAPABLE_FLAG;
13221             sc->devinfo.pcie_pm_cap_reg = (uint16_t)reg;
13222         }
13223     }
13224 
13225     link_status = bxe_pcie_capability_read(sc, PCIER_LINK_STA, 2);
13226 
13227     /* handle PCIe 2.0 workarounds for 57710 */
13228     if (CHIP_IS_E1(sc)) {
13229         /* workaround for 57710 errata E4_57710_27462 */
13230         sc->devinfo.pcie_link_speed =
13231             (REG_RD(sc, 0x3d04) & (1 << 24)) ? 2 : 1;
13232 
13233         /* workaround for 57710 errata E4_57710_27488 */
13234         sc->devinfo.pcie_link_width =
13235             ((link_status & PCIEM_LINK_STA_WIDTH) >> 4);
13236         if (sc->devinfo.pcie_link_speed > 1) {
13237             sc->devinfo.pcie_link_width =
13238                 ((link_status & PCIEM_LINK_STA_WIDTH) >> 4) >> 1;
13239         }
13240     } else {
13241         sc->devinfo.pcie_link_speed =
13242             (link_status & PCIEM_LINK_STA_SPEED);
13243         sc->devinfo.pcie_link_width =
13244             ((link_status & PCIEM_LINK_STA_WIDTH) >> 4);
13245     }
13246 
13247     BLOGD(sc, DBG_LOAD, "PCIe link speed=%d width=%d\n",
13248           sc->devinfo.pcie_link_speed, sc->devinfo.pcie_link_width);
13249 
13250     sc->devinfo.pcie_cap_flags |= BXE_PCIE_CAPABLE_FLAG;
13251     sc->devinfo.pcie_pcie_cap_reg = (uint16_t)reg;
13252 
13253     /* check if MSI capability is enabled */
13254     if (pci_find_cap(sc->dev, PCIY_MSI, &reg) == 0) {
13255         if (reg != 0) {
13256             BLOGD(sc, DBG_LOAD, "Found MSI capability at 0x%04x\n", reg);
13257 
13258             sc->devinfo.pcie_cap_flags |= BXE_MSI_CAPABLE_FLAG;
13259             sc->devinfo.pcie_msi_cap_reg = (uint16_t)reg;
13260         }
13261     }
13262 
13263     /* check if MSI-X capability is enabled */
13264     if (pci_find_cap(sc->dev, PCIY_MSIX, &reg) == 0) {
13265         if (reg != 0) {
13266             BLOGD(sc, DBG_LOAD, "Found MSI-X capability at 0x%04x\n", reg);
13267 
13268             sc->devinfo.pcie_cap_flags |= BXE_MSIX_CAPABLE_FLAG;
13269             sc->devinfo.pcie_msix_cap_reg = (uint16_t)reg;
13270         }
13271     }
13272 }
13273 
13274 static int
13275 bxe_get_shmem_mf_cfg_info_sd(struct bxe_softc *sc)
13276 {
13277     struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13278     uint32_t val;
13279 
13280     /* get the outer vlan if we're in switch-dependent mode */
13281 
13282     val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13283     mf_info->ext_id = (uint16_t)val;
13284 
13285     mf_info->multi_vnics_mode = 1;
13286 
13287     if (!VALID_OVLAN(mf_info->ext_id)) {
13288         BLOGE(sc, "Invalid VLAN (%d)\n", mf_info->ext_id);
13289         return (1);
13290     }
13291 
13292     /* get the capabilities */
13293     if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) ==
13294         FUNC_MF_CFG_PROTOCOL_ISCSI) {
13295         mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ISCSI;
13296     } else if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) ==
13297                FUNC_MF_CFG_PROTOCOL_FCOE) {
13298         mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_FCOE;
13299     } else {
13300         mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ETHERNET;
13301     }
13302 
13303     mf_info->vnics_per_port =
13304         (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13305 
13306     return (0);
13307 }
13308 
13309 static uint32_t
13310 bxe_get_shmem_ext_proto_support_flags(struct bxe_softc *sc)
13311 {
13312     uint32_t retval = 0;
13313     uint32_t val;
13314 
13315     val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg);
13316 
13317     if (val & MACP_FUNC_CFG_FLAGS_ENABLED) {
13318         if (val & MACP_FUNC_CFG_FLAGS_ETHERNET) {
13319             retval |= MF_PROTO_SUPPORT_ETHERNET;
13320         }
13321         if (val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
13322             retval |= MF_PROTO_SUPPORT_ISCSI;
13323         }
13324         if (val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
13325             retval |= MF_PROTO_SUPPORT_FCOE;
13326         }
13327     }
13328 
13329     return (retval);
13330 }
13331 
13332 static int
13333 bxe_get_shmem_mf_cfg_info_si(struct bxe_softc *sc)
13334 {
13335     struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13336     uint32_t val;
13337 
13338     /*
13339      * There is no outer vlan if we're in switch-independent mode.
13340      * If the mac is valid then assume multi-function.
13341      */
13342 
13343     val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg);
13344 
13345     mf_info->multi_vnics_mode = ((val & MACP_FUNC_CFG_FLAGS_MASK) != 0);
13346 
13347     mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc);
13348 
13349     mf_info->vnics_per_port =
13350         (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13351 
13352     return (0);
13353 }
13354 
13355 static int
13356 bxe_get_shmem_mf_cfg_info_niv(struct bxe_softc *sc)
13357 {
13358     struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13359     uint32_t e1hov_tag;
13360     uint32_t func_config;
13361     uint32_t niv_config;
13362 
13363     mf_info->multi_vnics_mode = 1;
13364 
13365     e1hov_tag   = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13366     func_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
13367     niv_config  = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].afex_config);
13368 
13369     mf_info->ext_id =
13370         (uint16_t)((e1hov_tag & FUNC_MF_CFG_E1HOV_TAG_MASK) >>
13371                    FUNC_MF_CFG_E1HOV_TAG_SHIFT);
13372 
13373     mf_info->default_vlan =
13374         (uint16_t)((e1hov_tag & FUNC_MF_CFG_AFEX_VLAN_MASK) >>
13375                    FUNC_MF_CFG_AFEX_VLAN_SHIFT);
13376 
13377     mf_info->niv_allowed_priorities =
13378         (uint8_t)((niv_config & FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
13379                   FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT);
13380 
13381     mf_info->niv_default_cos =
13382         (uint8_t)((func_config & FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
13383                   FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT);
13384 
13385     mf_info->afex_vlan_mode =
13386         ((niv_config & FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
13387          FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT);
13388 
13389     mf_info->niv_mba_enabled =
13390         ((niv_config & FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK) >>
13391          FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT);
13392 
13393     mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc);
13394 
13395     mf_info->vnics_per_port =
13396         (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13397 
13398     return (0);
13399 }
13400 
13401 static int
13402 bxe_check_valid_mf_cfg(struct bxe_softc *sc)
13403 {
13404     struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13405     uint32_t mf_cfg1;
13406     uint32_t mf_cfg2;
13407     uint32_t ovlan1;
13408     uint32_t ovlan2;
13409     uint8_t i, j;
13410 
13411     BLOGD(sc, DBG_LOAD, "MF config parameters for function %d\n",
13412           SC_PORT(sc));
13413     BLOGD(sc, DBG_LOAD, "\tmf_config=0x%x\n",
13414           mf_info->mf_config[SC_VN(sc)]);
13415     BLOGD(sc, DBG_LOAD, "\tmulti_vnics_mode=%d\n",
13416           mf_info->multi_vnics_mode);
13417     BLOGD(sc, DBG_LOAD, "\tvnics_per_port=%d\n",
13418           mf_info->vnics_per_port);
13419     BLOGD(sc, DBG_LOAD, "\tovlan/vifid=%d\n",
13420           mf_info->ext_id);
13421     BLOGD(sc, DBG_LOAD, "\tmin_bw=%d/%d/%d/%d\n",
13422           mf_info->min_bw[0], mf_info->min_bw[1],
13423           mf_info->min_bw[2], mf_info->min_bw[3]);
13424     BLOGD(sc, DBG_LOAD, "\tmax_bw=%d/%d/%d/%d\n",
13425           mf_info->max_bw[0], mf_info->max_bw[1],
13426           mf_info->max_bw[2], mf_info->max_bw[3]);
13427     BLOGD(sc, DBG_LOAD, "\tmac_addr: %s\n",
13428           sc->mac_addr_str);
13429 
13430     /* various MF mode sanity checks... */
13431 
13432     if (mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_HIDE) {
13433         BLOGE(sc, "Enumerated function %d is marked as hidden\n",
13434               SC_PORT(sc));
13435         return (1);
13436     }
13437 
13438     if ((mf_info->vnics_per_port > 1) && !mf_info->multi_vnics_mode) {
13439         BLOGE(sc, "vnics_per_port=%d multi_vnics_mode=%d\n",
13440               mf_info->vnics_per_port, mf_info->multi_vnics_mode);
13441         return (1);
13442     }
13443 
13444     if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
13445         /* vnic id > 0 must have valid ovlan in switch-dependent mode */
13446         if ((SC_VN(sc) > 0) && !VALID_OVLAN(OVLAN(sc))) {
13447             BLOGE(sc, "mf_mode=SD vnic_id=%d ovlan=%d\n",
13448                   SC_VN(sc), OVLAN(sc));
13449             return (1);
13450         }
13451 
13452         if (!VALID_OVLAN(OVLAN(sc)) && mf_info->multi_vnics_mode) {
13453             BLOGE(sc, "mf_mode=SD multi_vnics_mode=%d ovlan=%d\n",
13454                   mf_info->multi_vnics_mode, OVLAN(sc));
13455             return (1);
13456         }
13457 
13458         /*
13459          * Verify all functions are either MF or SF mode. If MF, make sure
13460          * sure that all non-hidden functions have a valid ovlan. If SF,
13461          * make sure that all non-hidden functions have an invalid ovlan.
13462          */
13463         FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13464             mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config);
13465             ovlan1  = MFCFG_RD(sc, func_mf_config[i].e1hov_tag);
13466             if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) &&
13467                 (((mf_info->multi_vnics_mode) && !VALID_OVLAN(ovlan1)) ||
13468                  ((!mf_info->multi_vnics_mode) && VALID_OVLAN(ovlan1)))) {
13469                 BLOGE(sc, "mf_mode=SD function %d MF config "
13470                           "mismatch, multi_vnics_mode=%d ovlan=%d\n",
13471                       i, mf_info->multi_vnics_mode, ovlan1);
13472                 return (1);
13473             }
13474         }
13475 
13476         /* Verify all funcs on the same port each have a different ovlan. */
13477         FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13478             mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config);
13479             ovlan1  = MFCFG_RD(sc, func_mf_config[i].e1hov_tag);
13480             /* iterate from the next function on the port to the max func */
13481             for (j = i + 2; j < MAX_FUNC_NUM; j += 2) {
13482                 mf_cfg2 = MFCFG_RD(sc, func_mf_config[j].config);
13483                 ovlan2  = MFCFG_RD(sc, func_mf_config[j].e1hov_tag);
13484                 if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) &&
13485                     VALID_OVLAN(ovlan1) &&
13486                     !(mf_cfg2 & FUNC_MF_CFG_FUNC_HIDE) &&
13487                     VALID_OVLAN(ovlan2) &&
13488                     (ovlan1 == ovlan2)) {
13489                     BLOGE(sc, "mf_mode=SD functions %d and %d "
13490                               "have the same ovlan (%d)\n",
13491                           i, j, ovlan1);
13492                     return (1);
13493                 }
13494             }
13495         }
13496     } /* MULTI_FUNCTION_SD */
13497 
13498     return (0);
13499 }
13500 
13501 static int
13502 bxe_get_mf_cfg_info(struct bxe_softc *sc)
13503 {
13504     struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13505     uint32_t val, mac_upper;
13506     uint8_t i, vnic;
13507 
13508     /* initialize mf_info defaults */
13509     mf_info->vnics_per_port   = 1;
13510     mf_info->multi_vnics_mode = FALSE;
13511     mf_info->path_has_ovlan   = FALSE;
13512     mf_info->mf_mode          = SINGLE_FUNCTION;
13513 
13514     if (!CHIP_IS_MF_CAP(sc)) {
13515         return (0);
13516     }
13517 
13518     if (sc->devinfo.mf_cfg_base == SHMEM_MF_CFG_ADDR_NONE) {
13519         BLOGE(sc, "Invalid mf_cfg_base!\n");
13520         return (1);
13521     }
13522 
13523     /* get the MF mode (switch dependent / independent / single-function) */
13524 
13525     val = SHMEM_RD(sc, dev_info.shared_feature_config.config);
13526 
13527     switch (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK)
13528     {
13529     case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
13530 
13531         mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13532 
13533         /* check for legal upper mac bytes */
13534         if (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT) {
13535             mf_info->mf_mode = MULTI_FUNCTION_SI;
13536         } else {
13537             BLOGE(sc, "Invalid config for Switch Independent mode\n");
13538         }
13539 
13540         break;
13541 
13542     case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
13543     case SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4:
13544 
13545         /* get outer vlan configuration */
13546         val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13547 
13548         if ((val & FUNC_MF_CFG_E1HOV_TAG_MASK) !=
13549             FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
13550             mf_info->mf_mode = MULTI_FUNCTION_SD;
13551         } else {
13552             BLOGE(sc, "Invalid config for Switch Dependent mode\n");
13553         }
13554 
13555         break;
13556 
13557     case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
13558 
13559         /* not in MF mode, vnics_per_port=1 and multi_vnics_mode=FALSE */
13560         return (0);
13561 
13562     case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
13563 
13564         /*
13565          * Mark MF mode as NIV if MCP version includes NPAR-SD support
13566          * and the MAC address is valid.
13567          */
13568         mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13569 
13570         if ((SHMEM2_HAS(sc, afex_driver_support)) &&
13571             (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT)) {
13572             mf_info->mf_mode = MULTI_FUNCTION_AFEX;
13573         } else {
13574             BLOGE(sc, "Invalid config for AFEX mode\n");
13575         }
13576 
13577         break;
13578 
13579     default:
13580 
13581         BLOGE(sc, "Unknown MF mode (0x%08x)\n",
13582               (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK));
13583 
13584         return (1);
13585     }
13586 
13587     /* set path mf_mode (which could be different than function mf_mode) */
13588     if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
13589         mf_info->path_has_ovlan = TRUE;
13590     } else if (mf_info->mf_mode == SINGLE_FUNCTION) {
13591         /*
13592          * Decide on path multi vnics mode. If we're not in MF mode and in
13593          * 4-port mode, this is good enough to check vnic-0 of the other port
13594          * on the same path
13595          */
13596         if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
13597             uint8_t other_port = !(PORT_ID(sc) & 1);
13598             uint8_t abs_func_other_port = (SC_PATH(sc) + (2 * other_port));
13599 
13600             val = MFCFG_RD(sc, func_mf_config[abs_func_other_port].e1hov_tag);
13601 
13602             mf_info->path_has_ovlan = VALID_OVLAN((uint16_t)val) ? 1 : 0;
13603         }
13604     }
13605 
13606     if (mf_info->mf_mode == SINGLE_FUNCTION) {
13607         /* invalid MF config */
13608         if (SC_VN(sc) >= 1) {
13609             BLOGE(sc, "VNIC ID >= 1 in SF mode\n");
13610             return (1);
13611         }
13612 
13613         return (0);
13614     }
13615 
13616     /* get the MF configuration */
13617     mf_info->mf_config[SC_VN(sc)] =
13618         MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
13619 
13620     switch(mf_info->mf_mode)
13621     {
13622     case MULTI_FUNCTION_SD:
13623 
13624         bxe_get_shmem_mf_cfg_info_sd(sc);
13625         break;
13626 
13627     case MULTI_FUNCTION_SI:
13628 
13629         bxe_get_shmem_mf_cfg_info_si(sc);
13630         break;
13631 
13632     case MULTI_FUNCTION_AFEX:
13633 
13634         bxe_get_shmem_mf_cfg_info_niv(sc);
13635         break;
13636 
13637     default:
13638 
13639         BLOGE(sc, "Get MF config failed (mf_mode=0x%08x)\n",
13640               mf_info->mf_mode);
13641         return (1);
13642     }
13643 
13644     /* get the congestion management parameters */
13645 
13646     vnic = 0;
13647     FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13648         /* get min/max bw */
13649         val = MFCFG_RD(sc, func_mf_config[i].config);
13650         mf_info->min_bw[vnic] =
13651             ((val & FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_SHIFT);
13652         mf_info->max_bw[vnic] =
13653             ((val & FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT);
13654         vnic++;
13655     }
13656 
13657     return (bxe_check_valid_mf_cfg(sc));
13658 }
13659 
13660 static int
13661 bxe_get_shmem_info(struct bxe_softc *sc)
13662 {
13663     int port;
13664     uint32_t mac_hi, mac_lo, val;
13665 
13666     port = SC_PORT(sc);
13667     mac_hi = mac_lo = 0;
13668 
13669     sc->link_params.sc   = sc;
13670     sc->link_params.port = port;
13671 
13672     /* get the hardware config info */
13673     sc->devinfo.hw_config =
13674         SHMEM_RD(sc, dev_info.shared_hw_config.config);
13675     sc->devinfo.hw_config2 =
13676         SHMEM_RD(sc, dev_info.shared_hw_config.config2);
13677 
13678     sc->link_params.hw_led_mode =
13679         ((sc->devinfo.hw_config & SHARED_HW_CFG_LED_MODE_MASK) >>
13680          SHARED_HW_CFG_LED_MODE_SHIFT);
13681 
13682     /* get the port feature config */
13683     sc->port.config =
13684         SHMEM_RD(sc, dev_info.port_feature_config[port].config);
13685 
13686     /* get the link params */
13687     sc->link_params.speed_cap_mask[0] =
13688         SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask);
13689     sc->link_params.speed_cap_mask[1] =
13690         SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask2);
13691 
13692     /* get the lane config */
13693     sc->link_params.lane_config =
13694         SHMEM_RD(sc, dev_info.port_hw_config[port].lane_config);
13695 
13696     /* get the link config */
13697     val = SHMEM_RD(sc, dev_info.port_feature_config[port].link_config);
13698     sc->port.link_config[ELINK_INT_PHY] = val;
13699     sc->link_params.switch_cfg = (val & PORT_FEATURE_CONNECTED_SWITCH_MASK);
13700     sc->port.link_config[ELINK_EXT_PHY1] =
13701         SHMEM_RD(sc, dev_info.port_feature_config[port].link_config2);
13702 
13703     /* get the override preemphasis flag and enable it or turn it off */
13704     val = SHMEM_RD(sc, dev_info.shared_feature_config.config);
13705     if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) {
13706         sc->link_params.feature_config_flags |=
13707             ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
13708     } else {
13709         sc->link_params.feature_config_flags &=
13710             ~ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
13711     }
13712 
13713     /* get the initial value of the link params */
13714     sc->link_params.multi_phy_config =
13715         SHMEM_RD(sc, dev_info.port_hw_config[port].multi_phy_config);
13716 
13717     /* get external phy info */
13718     sc->port.ext_phy_config =
13719         SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config);
13720 
13721     /* get the multifunction configuration */
13722     bxe_get_mf_cfg_info(sc);
13723 
13724     /* get the mac address */
13725     if (IS_MF(sc)) {
13726         mac_hi = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13727         mac_lo = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_lower);
13728     } else {
13729         mac_hi = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_upper);
13730         mac_lo = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_lower);
13731     }
13732 
13733     if ((mac_lo == 0) && (mac_hi == 0)) {
13734         *sc->mac_addr_str = 0;
13735         BLOGE(sc, "No Ethernet address programmed!\n");
13736     } else {
13737         sc->link_params.mac_addr[0] = (uint8_t)(mac_hi >> 8);
13738         sc->link_params.mac_addr[1] = (uint8_t)(mac_hi);
13739         sc->link_params.mac_addr[2] = (uint8_t)(mac_lo >> 24);
13740         sc->link_params.mac_addr[3] = (uint8_t)(mac_lo >> 16);
13741         sc->link_params.mac_addr[4] = (uint8_t)(mac_lo >> 8);
13742         sc->link_params.mac_addr[5] = (uint8_t)(mac_lo);
13743         snprintf(sc->mac_addr_str, sizeof(sc->mac_addr_str),
13744                  "%02x:%02x:%02x:%02x:%02x:%02x",
13745                  sc->link_params.mac_addr[0], sc->link_params.mac_addr[1],
13746                  sc->link_params.mac_addr[2], sc->link_params.mac_addr[3],
13747                  sc->link_params.mac_addr[4], sc->link_params.mac_addr[5]);
13748         BLOGD(sc, DBG_LOAD, "Ethernet address: %s\n", sc->mac_addr_str);
13749     }
13750 
13751     return (0);
13752 }
13753 
13754 static void
13755 bxe_get_tunable_params(struct bxe_softc *sc)
13756 {
13757     /* sanity checks */
13758 
13759     if ((bxe_interrupt_mode != INTR_MODE_INTX) &&
13760         (bxe_interrupt_mode != INTR_MODE_MSI)  &&
13761         (bxe_interrupt_mode != INTR_MODE_MSIX)) {
13762         BLOGW(sc, "invalid interrupt_mode value (%d)\n", bxe_interrupt_mode);
13763         bxe_interrupt_mode = INTR_MODE_MSIX;
13764     }
13765 
13766     if ((bxe_queue_count < 0) || (bxe_queue_count > MAX_RSS_CHAINS)) {
13767         BLOGW(sc, "invalid queue_count value (%d)\n", bxe_queue_count);
13768         bxe_queue_count = 0;
13769     }
13770 
13771     if ((bxe_max_rx_bufs < 1) || (bxe_max_rx_bufs > RX_BD_USABLE)) {
13772         if (bxe_max_rx_bufs == 0) {
13773             bxe_max_rx_bufs = RX_BD_USABLE;
13774         } else {
13775             BLOGW(sc, "invalid max_rx_bufs (%d)\n", bxe_max_rx_bufs);
13776             bxe_max_rx_bufs = 2048;
13777         }
13778     }
13779 
13780     if ((bxe_hc_rx_ticks < 1) || (bxe_hc_rx_ticks > 100)) {
13781         BLOGW(sc, "invalid hc_rx_ticks (%d)\n", bxe_hc_rx_ticks);
13782         bxe_hc_rx_ticks = 25;
13783     }
13784 
13785     if ((bxe_hc_tx_ticks < 1) || (bxe_hc_tx_ticks > 100)) {
13786         BLOGW(sc, "invalid hc_tx_ticks (%d)\n", bxe_hc_tx_ticks);
13787         bxe_hc_tx_ticks = 50;
13788     }
13789 
13790     if (bxe_max_aggregation_size == 0) {
13791         bxe_max_aggregation_size = TPA_AGG_SIZE;
13792     }
13793 
13794     if (bxe_max_aggregation_size > 0xffff) {
13795         BLOGW(sc, "invalid max_aggregation_size (%d)\n",
13796               bxe_max_aggregation_size);
13797         bxe_max_aggregation_size = TPA_AGG_SIZE;
13798     }
13799 
13800     if ((bxe_mrrs < -1) || (bxe_mrrs > 3)) {
13801         BLOGW(sc, "invalid mrrs (%d)\n", bxe_mrrs);
13802         bxe_mrrs = -1;
13803     }
13804 
13805     if ((bxe_autogreeen < 0) || (bxe_autogreeen > 2)) {
13806         BLOGW(sc, "invalid autogreeen (%d)\n", bxe_autogreeen);
13807         bxe_autogreeen = 0;
13808     }
13809 
13810     if ((bxe_udp_rss < 0) || (bxe_udp_rss > 1)) {
13811         BLOGW(sc, "invalid udp_rss (%d)\n", bxe_udp_rss);
13812         bxe_udp_rss = 0;
13813     }
13814 
13815     /* pull in user settings */
13816 
13817     sc->interrupt_mode       = bxe_interrupt_mode;
13818     sc->max_rx_bufs          = bxe_max_rx_bufs;
13819     sc->hc_rx_ticks          = bxe_hc_rx_ticks;
13820     sc->hc_tx_ticks          = bxe_hc_tx_ticks;
13821     sc->max_aggregation_size = bxe_max_aggregation_size;
13822     sc->mrrs                 = bxe_mrrs;
13823     sc->autogreeen           = bxe_autogreeen;
13824     sc->udp_rss              = bxe_udp_rss;
13825 
13826     if (bxe_interrupt_mode == INTR_MODE_INTX) {
13827         sc->num_queues = 1;
13828     } else { /* INTR_MODE_MSI or INTR_MODE_MSIX */
13829         sc->num_queues =
13830             min((bxe_queue_count ? bxe_queue_count : mp_ncpus),
13831                 MAX_RSS_CHAINS);
13832         if (sc->num_queues > mp_ncpus) {
13833             sc->num_queues = mp_ncpus;
13834         }
13835     }
13836 
13837     BLOGD(sc, DBG_LOAD,
13838           "User Config: "
13839           "debug=0x%lx "
13840           "interrupt_mode=%d "
13841           "queue_count=%d "
13842           "hc_rx_ticks=%d "
13843           "hc_tx_ticks=%d "
13844           "rx_budget=%d "
13845           "max_aggregation_size=%d "
13846           "mrrs=%d "
13847           "autogreeen=%d "
13848           "udp_rss=%d\n",
13849           bxe_debug,
13850           sc->interrupt_mode,
13851           sc->num_queues,
13852           sc->hc_rx_ticks,
13853           sc->hc_tx_ticks,
13854           bxe_rx_budget,
13855           sc->max_aggregation_size,
13856           sc->mrrs,
13857           sc->autogreeen,
13858           sc->udp_rss);
13859 }
13860 
13861 static int
13862 bxe_media_detect(struct bxe_softc *sc)
13863 {
13864     int port_type;
13865     uint32_t phy_idx = bxe_get_cur_phy_idx(sc);
13866 
13867     switch (sc->link_params.phy[phy_idx].media_type) {
13868     case ELINK_ETH_PHY_SFPP_10G_FIBER:
13869     case ELINK_ETH_PHY_XFP_FIBER:
13870         BLOGI(sc, "Found 10Gb Fiber media.\n");
13871         sc->media = IFM_10G_SR;
13872         port_type = PORT_FIBRE;
13873         break;
13874     case ELINK_ETH_PHY_SFP_1G_FIBER:
13875         BLOGI(sc, "Found 1Gb Fiber media.\n");
13876         sc->media = IFM_1000_SX;
13877         port_type = PORT_FIBRE;
13878         break;
13879     case ELINK_ETH_PHY_KR:
13880     case ELINK_ETH_PHY_CX4:
13881         BLOGI(sc, "Found 10GBase-CX4 media.\n");
13882         sc->media = IFM_10G_CX4;
13883         port_type = PORT_FIBRE;
13884         break;
13885     case ELINK_ETH_PHY_DA_TWINAX:
13886         BLOGI(sc, "Found 10Gb Twinax media.\n");
13887         sc->media = IFM_10G_TWINAX;
13888         port_type = PORT_DA;
13889         break;
13890     case ELINK_ETH_PHY_BASE_T:
13891         if (sc->link_params.speed_cap_mask[0] &
13892             PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
13893             BLOGI(sc, "Found 10GBase-T media.\n");
13894             sc->media = IFM_10G_T;
13895             port_type = PORT_TP;
13896         } else {
13897             BLOGI(sc, "Found 1000Base-T media.\n");
13898             sc->media = IFM_1000_T;
13899             port_type = PORT_TP;
13900         }
13901         break;
13902     case ELINK_ETH_PHY_NOT_PRESENT:
13903         BLOGI(sc, "Media not present.\n");
13904         sc->media = 0;
13905         port_type = PORT_OTHER;
13906         break;
13907     case ELINK_ETH_PHY_UNSPECIFIED:
13908     default:
13909         BLOGI(sc, "Unknown media!\n");
13910         sc->media = 0;
13911         port_type = PORT_OTHER;
13912         break;
13913     }
13914     return port_type;
13915 }
13916 
13917 #define GET_FIELD(value, fname)                     \
13918     (((value) & (fname##_MASK)) >> (fname##_SHIFT))
13919 #define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
13920 #define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
13921 
13922 static int
13923 bxe_get_igu_cam_info(struct bxe_softc *sc)
13924 {
13925     int pfid = SC_FUNC(sc);
13926     int igu_sb_id;
13927     uint32_t val;
13928     uint8_t fid, igu_sb_cnt = 0;
13929 
13930     sc->igu_base_sb = 0xff;
13931 
13932     if (CHIP_INT_MODE_IS_BC(sc)) {
13933         int vn = SC_VN(sc);
13934         igu_sb_cnt = sc->igu_sb_cnt;
13935         sc->igu_base_sb = ((CHIP_IS_MODE_4_PORT(sc) ? pfid : vn) *
13936                            FP_SB_MAX_E1x);
13937         sc->igu_dsb_id = (E1HVN_MAX * FP_SB_MAX_E1x +
13938                           (CHIP_IS_MODE_4_PORT(sc) ? pfid : vn));
13939         return (0);
13940     }
13941 
13942     /* IGU in normal mode - read CAM */
13943     for (igu_sb_id = 0;
13944          igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
13945          igu_sb_id++) {
13946         val = REG_RD(sc, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
13947         if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) {
13948             continue;
13949         }
13950         fid = IGU_FID(val);
13951         if ((fid & IGU_FID_ENCODE_IS_PF)) {
13952             if ((fid & IGU_FID_PF_NUM_MASK) != pfid) {
13953                 continue;
13954             }
13955             if (IGU_VEC(val) == 0) {
13956                 /* default status block */
13957                 sc->igu_dsb_id = igu_sb_id;
13958             } else {
13959                 if (sc->igu_base_sb == 0xff) {
13960                     sc->igu_base_sb = igu_sb_id;
13961                 }
13962                 igu_sb_cnt++;
13963             }
13964         }
13965     }
13966 
13967     /*
13968      * Due to new PF resource allocation by MFW T7.4 and above, it's optional
13969      * that number of CAM entries will not be equal to the value advertised in
13970      * PCI. Driver should use the minimal value of both as the actual status
13971      * block count
13972      */
13973     sc->igu_sb_cnt = min(sc->igu_sb_cnt, igu_sb_cnt);
13974 
13975     if (igu_sb_cnt == 0) {
13976         BLOGE(sc, "CAM configuration error\n");
13977         return (-1);
13978     }
13979 
13980     return (0);
13981 }
13982 
13983 /*
13984  * Gather various information from the device config space, the device itself,
13985  * shmem, and the user input.
13986  */
13987 static int
13988 bxe_get_device_info(struct bxe_softc *sc)
13989 {
13990     uint32_t val;
13991     int rc;
13992 
13993     /* Get the data for the device */
13994     sc->devinfo.vendor_id    = pci_get_vendor(sc->dev);
13995     sc->devinfo.device_id    = pci_get_device(sc->dev);
13996     sc->devinfo.subvendor_id = pci_get_subvendor(sc->dev);
13997     sc->devinfo.subdevice_id = pci_get_subdevice(sc->dev);
13998 
13999     /* get the chip revision (chip metal comes from pci config space) */
14000     sc->devinfo.chip_id     =
14001     sc->link_params.chip_id =
14002         (((REG_RD(sc, MISC_REG_CHIP_NUM)                   & 0xffff) << 16) |
14003          ((REG_RD(sc, MISC_REG_CHIP_REV)                   & 0xf)    << 12) |
14004          (((REG_RD(sc, PCICFG_OFFSET + PCI_ID_VAL3) >> 24) & 0xf)    << 4)  |
14005          ((REG_RD(sc, MISC_REG_BOND_ID)                    & 0xf)    << 0));
14006 
14007     /* force 57811 according to MISC register */
14008     if (REG_RD(sc, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
14009         if (CHIP_IS_57810(sc)) {
14010             sc->devinfo.chip_id = ((CHIP_NUM_57811 << 16) |
14011                                    (sc->devinfo.chip_id & 0x0000ffff));
14012         } else if (CHIP_IS_57810_MF(sc)) {
14013             sc->devinfo.chip_id = ((CHIP_NUM_57811_MF << 16) |
14014                                    (sc->devinfo.chip_id & 0x0000ffff));
14015         }
14016         sc->devinfo.chip_id |= 0x1;
14017     }
14018 
14019     BLOGD(sc, DBG_LOAD,
14020           "chip_id=0x%08x (num=0x%04x rev=0x%01x metal=0x%02x bond=0x%01x)\n",
14021           sc->devinfo.chip_id,
14022           ((sc->devinfo.chip_id >> 16) & 0xffff),
14023           ((sc->devinfo.chip_id >> 12) & 0xf),
14024           ((sc->devinfo.chip_id >>  4) & 0xff),
14025           ((sc->devinfo.chip_id >>  0) & 0xf));
14026 
14027     val = (REG_RD(sc, 0x2874) & 0x55);
14028     if ((sc->devinfo.chip_id & 0x1) ||
14029         (CHIP_IS_E1(sc) && val) ||
14030         (CHIP_IS_E1H(sc) && (val == 0x55))) {
14031         sc->flags |= BXE_ONE_PORT_FLAG;
14032         BLOGD(sc, DBG_LOAD, "single port device\n");
14033     }
14034 
14035     /* set the doorbell size */
14036     sc->doorbell_size = (1 << BXE_DB_SHIFT);
14037 
14038     /* determine whether the device is in 2 port or 4 port mode */
14039     sc->devinfo.chip_port_mode = CHIP_PORT_MODE_NONE; /* E1 & E1h*/
14040     if (CHIP_IS_E2E3(sc)) {
14041         /*
14042          * Read port4mode_en_ovwr[0]:
14043          *   If 1, four port mode is in port4mode_en_ovwr[1].
14044          *   If 0, four port mode is in port4mode_en[0].
14045          */
14046         val = REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR);
14047         if (val & 1) {
14048             val = ((val >> 1) & 1);
14049         } else {
14050             val = REG_RD(sc, MISC_REG_PORT4MODE_EN);
14051         }
14052 
14053         sc->devinfo.chip_port_mode =
14054             (val) ? CHIP_4_PORT_MODE : CHIP_2_PORT_MODE;
14055 
14056         BLOGD(sc, DBG_LOAD, "Port mode = %s\n", (val) ? "4" : "2");
14057     }
14058 
14059     /* get the function and path info for the device */
14060     bxe_get_function_num(sc);
14061 
14062     /* get the shared memory base address */
14063     sc->devinfo.shmem_base     =
14064     sc->link_params.shmem_base =
14065         REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
14066     sc->devinfo.shmem2_base =
14067         REG_RD(sc, (SC_PATH(sc) ? MISC_REG_GENERIC_CR_1 :
14068                                   MISC_REG_GENERIC_CR_0));
14069 
14070     BLOGD(sc, DBG_LOAD, "shmem_base=0x%08x, shmem2_base=0x%08x\n",
14071           sc->devinfo.shmem_base, sc->devinfo.shmem2_base);
14072 
14073     if (!sc->devinfo.shmem_base) {
14074         /* this should ONLY prevent upcoming shmem reads */
14075         BLOGI(sc, "MCP not active\n");
14076         sc->flags |= BXE_NO_MCP_FLAG;
14077         return (0);
14078     }
14079 
14080     /* make sure the shared memory contents are valid */
14081     val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]);
14082     if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
14083         (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
14084         BLOGE(sc, "Invalid SHMEM validity signature: 0x%08x\n", val);
14085         return (0);
14086     }
14087     BLOGD(sc, DBG_LOAD, "Valid SHMEM validity signature: 0x%08x\n", val);
14088 
14089     /* get the bootcode version */
14090     sc->devinfo.bc_ver = SHMEM_RD(sc, dev_info.bc_rev);
14091     snprintf(sc->devinfo.bc_ver_str,
14092              sizeof(sc->devinfo.bc_ver_str),
14093              "%d.%d.%d",
14094              ((sc->devinfo.bc_ver >> 24) & 0xff),
14095              ((sc->devinfo.bc_ver >> 16) & 0xff),
14096              ((sc->devinfo.bc_ver >>  8) & 0xff));
14097     BLOGD(sc, DBG_LOAD, "Bootcode version: %s\n", sc->devinfo.bc_ver_str);
14098 
14099     /* get the bootcode shmem address */
14100     sc->devinfo.mf_cfg_base = bxe_get_shmem_mf_cfg_base(sc);
14101     BLOGD(sc, DBG_LOAD, "mf_cfg_base=0x08%x \n", sc->devinfo.mf_cfg_base);
14102 
14103     /* clean indirect addresses as they're not used */
14104     pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
14105     if (IS_PF(sc)) {
14106         REG_WR(sc, PXP2_REG_PGL_ADDR_88_F0, 0);
14107         REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F0, 0);
14108         REG_WR(sc, PXP2_REG_PGL_ADDR_90_F0, 0);
14109         REG_WR(sc, PXP2_REG_PGL_ADDR_94_F0, 0);
14110         if (CHIP_IS_E1x(sc)) {
14111             REG_WR(sc, PXP2_REG_PGL_ADDR_88_F1, 0);
14112             REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F1, 0);
14113             REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0);
14114             REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0);
14115         }
14116 
14117         /*
14118          * Enable internal target-read (in case we are probed after PF
14119          * FLR). Must be done prior to any BAR read access. Only for
14120          * 57712 and up
14121          */
14122         if (!CHIP_IS_E1x(sc)) {
14123             REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
14124         }
14125     }
14126 
14127     /* get the nvram size */
14128     val = REG_RD(sc, MCP_REG_MCPR_NVM_CFG4);
14129     sc->devinfo.flash_size =
14130         (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE));
14131     BLOGD(sc, DBG_LOAD, "nvram flash size: %d\n", sc->devinfo.flash_size);
14132 
14133     /* get PCI capabilites */
14134     bxe_probe_pci_caps(sc);
14135 
14136     bxe_set_power_state(sc, PCI_PM_D0);
14137 
14138     /* get various configuration parameters from shmem */
14139     bxe_get_shmem_info(sc);
14140 
14141     if (sc->devinfo.pcie_msix_cap_reg != 0) {
14142         val = pci_read_config(sc->dev,
14143                               (sc->devinfo.pcie_msix_cap_reg +
14144                                PCIR_MSIX_CTRL),
14145                               2);
14146         sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE);
14147     } else {
14148         sc->igu_sb_cnt = 1;
14149     }
14150 
14151     sc->igu_base_addr = BAR_IGU_INTMEM;
14152 
14153     /* initialize IGU parameters */
14154     if (CHIP_IS_E1x(sc)) {
14155         sc->devinfo.int_block = INT_BLOCK_HC;
14156         sc->igu_dsb_id = DEF_SB_IGU_ID;
14157         sc->igu_base_sb = 0;
14158     } else {
14159         sc->devinfo.int_block = INT_BLOCK_IGU;
14160 
14161         /* do not allow device reset during IGU info preocessing */
14162         bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
14163 
14164         val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
14165 
14166         if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
14167             int tout = 5000;
14168 
14169             BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode\n");
14170 
14171             val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
14172             REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, val);
14173             REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x7f);
14174 
14175             while (tout && REG_RD(sc, IGU_REG_RESET_MEMORIES)) {
14176                 tout--;
14177                 DELAY(1000);
14178             }
14179 
14180             if (REG_RD(sc, IGU_REG_RESET_MEMORIES)) {
14181                 BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode failed!!!\n");
14182                 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
14183                 return (-1);
14184             }
14185         }
14186 
14187         if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
14188             BLOGD(sc, DBG_LOAD, "IGU Backward Compatible Mode\n");
14189             sc->devinfo.int_block |= INT_BLOCK_MODE_BW_COMP;
14190         } else {
14191             BLOGD(sc, DBG_LOAD, "IGU Normal Mode\n");
14192         }
14193 
14194         rc = bxe_get_igu_cam_info(sc);
14195 
14196         bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
14197 
14198         if (rc) {
14199             return (rc);
14200         }
14201     }
14202 
14203     /*
14204      * Get base FW non-default (fast path) status block ID. This value is
14205      * used to initialize the fw_sb_id saved on the fp/queue structure to
14206      * determine the id used by the FW.
14207      */
14208     if (CHIP_IS_E1x(sc)) {
14209         sc->base_fw_ndsb = ((SC_PORT(sc) * FP_SB_MAX_E1x) + SC_L_ID(sc));
14210     } else {
14211         /*
14212          * 57712+ - We currently use one FW SB per IGU SB (Rx and Tx of
14213          * the same queue are indicated on the same IGU SB). So we prefer
14214          * FW and IGU SBs to be the same value.
14215          */
14216         sc->base_fw_ndsb = sc->igu_base_sb;
14217     }
14218 
14219     BLOGD(sc, DBG_LOAD,
14220           "igu_dsb_id=%d igu_base_sb=%d igu_sb_cnt=%d base_fw_ndsb=%d\n",
14221           sc->igu_dsb_id, sc->igu_base_sb,
14222           sc->igu_sb_cnt, sc->base_fw_ndsb);
14223 
14224     elink_phy_probe(&sc->link_params);
14225 
14226     return (0);
14227 }
14228 
14229 static void
14230 bxe_link_settings_supported(struct bxe_softc *sc,
14231                             uint32_t         switch_cfg)
14232 {
14233     uint32_t cfg_size = 0;
14234     uint32_t idx;
14235     uint8_t port = SC_PORT(sc);
14236 
14237     /* aggregation of supported attributes of all external phys */
14238     sc->port.supported[0] = 0;
14239     sc->port.supported[1] = 0;
14240 
14241     switch (sc->link_params.num_phys) {
14242     case 1:
14243         sc->port.supported[0] = sc->link_params.phy[ELINK_INT_PHY].supported;
14244         cfg_size = 1;
14245         break;
14246     case 2:
14247         sc->port.supported[0] = sc->link_params.phy[ELINK_EXT_PHY1].supported;
14248         cfg_size = 1;
14249         break;
14250     case 3:
14251         if (sc->link_params.multi_phy_config &
14252             PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
14253             sc->port.supported[1] =
14254                 sc->link_params.phy[ELINK_EXT_PHY1].supported;
14255             sc->port.supported[0] =
14256                 sc->link_params.phy[ELINK_EXT_PHY2].supported;
14257         } else {
14258             sc->port.supported[0] =
14259                 sc->link_params.phy[ELINK_EXT_PHY1].supported;
14260             sc->port.supported[1] =
14261                 sc->link_params.phy[ELINK_EXT_PHY2].supported;
14262         }
14263         cfg_size = 2;
14264         break;
14265     }
14266 
14267     if (!(sc->port.supported[0] || sc->port.supported[1])) {
14268         BLOGE(sc, "Invalid phy config in NVRAM (PHY1=0x%08x PHY2=0x%08x)\n",
14269               SHMEM_RD(sc,
14270                        dev_info.port_hw_config[port].external_phy_config),
14271               SHMEM_RD(sc,
14272                        dev_info.port_hw_config[port].external_phy_config2));
14273         return;
14274     }
14275 
14276     if (CHIP_IS_E3(sc))
14277         sc->port.phy_addr = REG_RD(sc, MISC_REG_WC0_CTRL_PHY_ADDR);
14278     else {
14279         switch (switch_cfg) {
14280         case ELINK_SWITCH_CFG_1G:
14281             sc->port.phy_addr =
14282                 REG_RD(sc, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
14283             break;
14284         case ELINK_SWITCH_CFG_10G:
14285             sc->port.phy_addr =
14286                 REG_RD(sc, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
14287             break;
14288         default:
14289             BLOGE(sc, "Invalid switch config in link_config=0x%08x\n",
14290                   sc->port.link_config[0]);
14291             return;
14292         }
14293     }
14294 
14295     BLOGD(sc, DBG_LOAD, "PHY addr 0x%08x\n", sc->port.phy_addr);
14296 
14297     /* mask what we support according to speed_cap_mask per configuration */
14298     for (idx = 0; idx < cfg_size; idx++) {
14299         if (!(sc->link_params.speed_cap_mask[idx] &
14300               PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) {
14301             sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Half;
14302         }
14303 
14304         if (!(sc->link_params.speed_cap_mask[idx] &
14305               PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) {
14306             sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Full;
14307         }
14308 
14309         if (!(sc->link_params.speed_cap_mask[idx] &
14310               PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) {
14311             sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Half;
14312         }
14313 
14314         if (!(sc->link_params.speed_cap_mask[idx] &
14315               PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) {
14316             sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Full;
14317         }
14318 
14319         if (!(sc->link_params.speed_cap_mask[idx] &
14320               PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) {
14321             sc->port.supported[idx] &= ~ELINK_SUPPORTED_1000baseT_Full;
14322         }
14323 
14324         if (!(sc->link_params.speed_cap_mask[idx] &
14325               PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) {
14326             sc->port.supported[idx] &= ~ELINK_SUPPORTED_2500baseX_Full;
14327         }
14328 
14329         if (!(sc->link_params.speed_cap_mask[idx] &
14330               PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
14331             sc->port.supported[idx] &= ~ELINK_SUPPORTED_10000baseT_Full;
14332         }
14333 
14334         if (!(sc->link_params.speed_cap_mask[idx] &
14335               PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) {
14336             sc->port.supported[idx] &= ~ELINK_SUPPORTED_20000baseKR2_Full;
14337         }
14338     }
14339 
14340     BLOGD(sc, DBG_LOAD, "PHY supported 0=0x%08x 1=0x%08x\n",
14341           sc->port.supported[0], sc->port.supported[1]);
14342 	ELINK_DEBUG_P2(sc, "PHY supported 0=0x%08x 1=0x%08x\n",
14343 					sc->port.supported[0], sc->port.supported[1]);
14344 }
14345 
14346 static void
14347 bxe_link_settings_requested(struct bxe_softc *sc)
14348 {
14349     uint32_t link_config;
14350     uint32_t idx;
14351     uint32_t cfg_size = 0;
14352 
14353     sc->port.advertising[0] = 0;
14354     sc->port.advertising[1] = 0;
14355 
14356     switch (sc->link_params.num_phys) {
14357     case 1:
14358     case 2:
14359         cfg_size = 1;
14360         break;
14361     case 3:
14362         cfg_size = 2;
14363         break;
14364     }
14365 
14366     for (idx = 0; idx < cfg_size; idx++) {
14367         sc->link_params.req_duplex[idx] = DUPLEX_FULL;
14368         link_config = sc->port.link_config[idx];
14369 
14370         switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
14371         case PORT_FEATURE_LINK_SPEED_AUTO:
14372             if (sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg) {
14373                 sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG;
14374                 sc->port.advertising[idx] |= sc->port.supported[idx];
14375                 if (sc->link_params.phy[ELINK_EXT_PHY1].type ==
14376                     PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
14377                     sc->port.advertising[idx] |=
14378                         (ELINK_SUPPORTED_100baseT_Half |
14379                          ELINK_SUPPORTED_100baseT_Full);
14380             } else {
14381                 /* force 10G, no AN */
14382                 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000;
14383                 sc->port.advertising[idx] |=
14384                     (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
14385                 continue;
14386             }
14387             break;
14388 
14389         case PORT_FEATURE_LINK_SPEED_10M_FULL:
14390             if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Full) {
14391                 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10;
14392                 sc->port.advertising[idx] |= (ADVERTISED_10baseT_Full |
14393                                               ADVERTISED_TP);
14394             } else {
14395                 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14396                           "speed_cap_mask=0x%08x\n",
14397                       link_config, sc->link_params.speed_cap_mask[idx]);
14398                 return;
14399             }
14400             break;
14401 
14402         case PORT_FEATURE_LINK_SPEED_10M_HALF:
14403             if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Half) {
14404                 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10;
14405                 sc->link_params.req_duplex[idx] = DUPLEX_HALF;
14406                 sc->port.advertising[idx] |= (ADVERTISED_10baseT_Half |
14407                                               ADVERTISED_TP);
14408 				ELINK_DEBUG_P1(sc, "driver requesting DUPLEX_HALF req_duplex = %x!\n",
14409 								sc->link_params.req_duplex[idx]);
14410             } else {
14411                 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14412                           "speed_cap_mask=0x%08x\n",
14413                       link_config, sc->link_params.speed_cap_mask[idx]);
14414                 return;
14415             }
14416             break;
14417 
14418         case PORT_FEATURE_LINK_SPEED_100M_FULL:
14419             if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Full) {
14420                 sc->link_params.req_line_speed[idx] = ELINK_SPEED_100;
14421                 sc->port.advertising[idx] |= (ADVERTISED_100baseT_Full |
14422                                               ADVERTISED_TP);
14423             } else {
14424                 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14425                           "speed_cap_mask=0x%08x\n",
14426                       link_config, sc->link_params.speed_cap_mask[idx]);
14427                 return;
14428             }
14429             break;
14430 
14431         case PORT_FEATURE_LINK_SPEED_100M_HALF:
14432             if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Half) {
14433                 sc->link_params.req_line_speed[idx] = ELINK_SPEED_100;
14434                 sc->link_params.req_duplex[idx] = DUPLEX_HALF;
14435                 sc->port.advertising[idx] |= (ADVERTISED_100baseT_Half |
14436                                               ADVERTISED_TP);
14437             } else {
14438                 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14439                           "speed_cap_mask=0x%08x\n",
14440                       link_config, sc->link_params.speed_cap_mask[idx]);
14441                 return;
14442             }
14443             break;
14444 
14445         case PORT_FEATURE_LINK_SPEED_1G:
14446             if (sc->port.supported[idx] & ELINK_SUPPORTED_1000baseT_Full) {
14447                 sc->link_params.req_line_speed[idx] = ELINK_SPEED_1000;
14448                 sc->port.advertising[idx] |= (ADVERTISED_1000baseT_Full |
14449                                               ADVERTISED_TP);
14450             } else {
14451                 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14452                           "speed_cap_mask=0x%08x\n",
14453                       link_config, sc->link_params.speed_cap_mask[idx]);
14454                 return;
14455             }
14456             break;
14457 
14458         case PORT_FEATURE_LINK_SPEED_2_5G:
14459             if (sc->port.supported[idx] & ELINK_SUPPORTED_2500baseX_Full) {
14460                 sc->link_params.req_line_speed[idx] = ELINK_SPEED_2500;
14461                 sc->port.advertising[idx] |= (ADVERTISED_2500baseX_Full |
14462                                               ADVERTISED_TP);
14463             } else {
14464                 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14465                           "speed_cap_mask=0x%08x\n",
14466                       link_config, sc->link_params.speed_cap_mask[idx]);
14467                 return;
14468             }
14469             break;
14470 
14471         case PORT_FEATURE_LINK_SPEED_10G_CX4:
14472             if (sc->port.supported[idx] & ELINK_SUPPORTED_10000baseT_Full) {
14473                 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000;
14474                 sc->port.advertising[idx] |= (ADVERTISED_10000baseT_Full |
14475                                               ADVERTISED_FIBRE);
14476             } else {
14477                 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14478                           "speed_cap_mask=0x%08x\n",
14479                       link_config, sc->link_params.speed_cap_mask[idx]);
14480                 return;
14481             }
14482             break;
14483 
14484         case PORT_FEATURE_LINK_SPEED_20G:
14485             sc->link_params.req_line_speed[idx] = ELINK_SPEED_20000;
14486             break;
14487 
14488         default:
14489             BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14490                       "speed_cap_mask=0x%08x\n",
14491                   link_config, sc->link_params.speed_cap_mask[idx]);
14492             sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG;
14493             sc->port.advertising[idx] = sc->port.supported[idx];
14494             break;
14495         }
14496 
14497         sc->link_params.req_flow_ctrl[idx] =
14498             (link_config & PORT_FEATURE_FLOW_CONTROL_MASK);
14499 
14500         if (sc->link_params.req_flow_ctrl[idx] == ELINK_FLOW_CTRL_AUTO) {
14501             if (!(sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg)) {
14502                 sc->link_params.req_flow_ctrl[idx] = ELINK_FLOW_CTRL_NONE;
14503             } else {
14504                 bxe_set_requested_fc(sc);
14505             }
14506         }
14507 
14508         BLOGD(sc, DBG_LOAD, "req_line_speed=%d req_duplex=%d "
14509                             "req_flow_ctrl=0x%x advertising=0x%x\n",
14510               sc->link_params.req_line_speed[idx],
14511               sc->link_params.req_duplex[idx],
14512               sc->link_params.req_flow_ctrl[idx],
14513               sc->port.advertising[idx]);
14514 		ELINK_DEBUG_P3(sc, "req_line_speed=%d req_duplex=%d "
14515 						"advertising=0x%x\n",
14516 						sc->link_params.req_line_speed[idx],
14517 						sc->link_params.req_duplex[idx],
14518 						sc->port.advertising[idx]);
14519     }
14520 }
14521 
14522 static void
14523 bxe_get_phy_info(struct bxe_softc *sc)
14524 {
14525     uint8_t port = SC_PORT(sc);
14526     uint32_t config = sc->port.config;
14527     uint32_t eee_mode;
14528 
14529     /* shmem data already read in bxe_get_shmem_info() */
14530 
14531     ELINK_DEBUG_P3(sc, "lane_config=0x%08x speed_cap_mask0=0x%08x "
14532                         "link_config0=0x%08x\n",
14533                sc->link_params.lane_config,
14534                sc->link_params.speed_cap_mask[0],
14535                sc->port.link_config[0]);
14536 
14537 
14538     bxe_link_settings_supported(sc, sc->link_params.switch_cfg);
14539     bxe_link_settings_requested(sc);
14540 
14541     if (sc->autogreeen == AUTO_GREEN_FORCE_ON) {
14542         sc->link_params.feature_config_flags |=
14543             ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14544     } else if (sc->autogreeen == AUTO_GREEN_FORCE_OFF) {
14545         sc->link_params.feature_config_flags &=
14546             ~ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14547     } else if (config & PORT_FEAT_CFG_AUTOGREEEN_ENABLED) {
14548         sc->link_params.feature_config_flags |=
14549             ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14550     }
14551 
14552     /* configure link feature according to nvram value */
14553     eee_mode =
14554         (((SHMEM_RD(sc, dev_info.port_feature_config[port].eee_power_mode)) &
14555           PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
14556          PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
14557     if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
14558         sc->link_params.eee_mode = (ELINK_EEE_MODE_ADV_LPI |
14559                                     ELINK_EEE_MODE_ENABLE_LPI |
14560                                     ELINK_EEE_MODE_OUTPUT_TIME);
14561     } else {
14562         sc->link_params.eee_mode = 0;
14563     }
14564 
14565     /* get the media type */
14566     bxe_media_detect(sc);
14567 	ELINK_DEBUG_P1(sc, "detected media type\n", sc->media);
14568 }
14569 
14570 static void
14571 bxe_get_params(struct bxe_softc *sc)
14572 {
14573     /* get user tunable params */
14574     bxe_get_tunable_params(sc);
14575 
14576     /* select the RX and TX ring sizes */
14577     sc->tx_ring_size = TX_BD_USABLE;
14578     sc->rx_ring_size = RX_BD_USABLE;
14579 
14580     /* XXX disable WoL */
14581     sc->wol = 0;
14582 }
14583 
14584 static void
14585 bxe_set_modes_bitmap(struct bxe_softc *sc)
14586 {
14587     uint32_t flags = 0;
14588 
14589     if (CHIP_REV_IS_FPGA(sc)) {
14590         SET_FLAGS(flags, MODE_FPGA);
14591     } else if (CHIP_REV_IS_EMUL(sc)) {
14592         SET_FLAGS(flags, MODE_EMUL);
14593     } else {
14594         SET_FLAGS(flags, MODE_ASIC);
14595     }
14596 
14597     if (CHIP_IS_MODE_4_PORT(sc)) {
14598         SET_FLAGS(flags, MODE_PORT4);
14599     } else {
14600         SET_FLAGS(flags, MODE_PORT2);
14601     }
14602 
14603     if (CHIP_IS_E2(sc)) {
14604         SET_FLAGS(flags, MODE_E2);
14605     } else if (CHIP_IS_E3(sc)) {
14606         SET_FLAGS(flags, MODE_E3);
14607         if (CHIP_REV(sc) == CHIP_REV_Ax) {
14608             SET_FLAGS(flags, MODE_E3_A0);
14609         } else /*if (CHIP_REV(sc) == CHIP_REV_Bx)*/ {
14610             SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
14611         }
14612     }
14613 
14614     if (IS_MF(sc)) {
14615         SET_FLAGS(flags, MODE_MF);
14616         switch (sc->devinfo.mf_info.mf_mode) {
14617         case MULTI_FUNCTION_SD:
14618             SET_FLAGS(flags, MODE_MF_SD);
14619             break;
14620         case MULTI_FUNCTION_SI:
14621             SET_FLAGS(flags, MODE_MF_SI);
14622             break;
14623         case MULTI_FUNCTION_AFEX:
14624             SET_FLAGS(flags, MODE_MF_AFEX);
14625             break;
14626         }
14627     } else {
14628         SET_FLAGS(flags, MODE_SF);
14629     }
14630 
14631 #if defined(__LITTLE_ENDIAN)
14632     SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
14633 #else /* __BIG_ENDIAN */
14634     SET_FLAGS(flags, MODE_BIG_ENDIAN);
14635 #endif
14636 
14637     INIT_MODE_FLAGS(sc) = flags;
14638 }
14639 
14640 static int
14641 bxe_alloc_hsi_mem(struct bxe_softc *sc)
14642 {
14643     struct bxe_fastpath *fp;
14644     bus_addr_t busaddr;
14645     int max_agg_queues;
14646     int max_segments;
14647     bus_size_t max_size;
14648     bus_size_t max_seg_size;
14649     char buf[32];
14650     int rc;
14651     int i, j;
14652 
14653     /* XXX zero out all vars here and call bxe_alloc_hsi_mem on error */
14654 
14655     /* allocate the parent bus DMA tag */
14656     rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent tag */
14657                             1,                        /* alignment */
14658                             0,                        /* boundary limit */
14659                             BUS_SPACE_MAXADDR,        /* restricted low */
14660                             BUS_SPACE_MAXADDR,        /* restricted hi */
14661                             NULL,                     /* addr filter() */
14662                             NULL,                     /* addr filter() arg */
14663                             BUS_SPACE_MAXSIZE_32BIT,  /* max map size */
14664                             BUS_SPACE_UNRESTRICTED,   /* num discontinuous */
14665                             BUS_SPACE_MAXSIZE_32BIT,  /* max seg size */
14666                             0,                        /* flags */
14667                             NULL,                     /* lock() */
14668                             NULL,                     /* lock() arg */
14669                             &sc->parent_dma_tag);     /* returned dma tag */
14670     if (rc != 0) {
14671         BLOGE(sc, "Failed to alloc parent DMA tag (%d)!\n", rc);
14672         return (1);
14673     }
14674 
14675     /************************/
14676     /* DEFAULT STATUS BLOCK */
14677     /************************/
14678 
14679     if (bxe_dma_alloc(sc, sizeof(struct host_sp_status_block),
14680                       &sc->def_sb_dma, "default status block") != 0) {
14681         /* XXX */
14682         bus_dma_tag_destroy(sc->parent_dma_tag);
14683         return (1);
14684     }
14685 
14686     sc->def_sb = (struct host_sp_status_block *)sc->def_sb_dma.vaddr;
14687 
14688     /***************/
14689     /* EVENT QUEUE */
14690     /***************/
14691 
14692     if (bxe_dma_alloc(sc, BCM_PAGE_SIZE,
14693                       &sc->eq_dma, "event queue") != 0) {
14694         /* XXX */
14695         bxe_dma_free(sc, &sc->def_sb_dma);
14696         sc->def_sb = NULL;
14697         bus_dma_tag_destroy(sc->parent_dma_tag);
14698         return (1);
14699     }
14700 
14701     sc->eq = (union event_ring_elem * )sc->eq_dma.vaddr;
14702 
14703     /*************/
14704     /* SLOW PATH */
14705     /*************/
14706 
14707     if (bxe_dma_alloc(sc, sizeof(struct bxe_slowpath),
14708                       &sc->sp_dma, "slow path") != 0) {
14709         /* XXX */
14710         bxe_dma_free(sc, &sc->eq_dma);
14711         sc->eq = NULL;
14712         bxe_dma_free(sc, &sc->def_sb_dma);
14713         sc->def_sb = NULL;
14714         bus_dma_tag_destroy(sc->parent_dma_tag);
14715         return (1);
14716     }
14717 
14718     sc->sp = (struct bxe_slowpath *)sc->sp_dma.vaddr;
14719 
14720     /*******************/
14721     /* SLOW PATH QUEUE */
14722     /*******************/
14723 
14724     if (bxe_dma_alloc(sc, BCM_PAGE_SIZE,
14725                       &sc->spq_dma, "slow path queue") != 0) {
14726         /* XXX */
14727         bxe_dma_free(sc, &sc->sp_dma);
14728         sc->sp = NULL;
14729         bxe_dma_free(sc, &sc->eq_dma);
14730         sc->eq = NULL;
14731         bxe_dma_free(sc, &sc->def_sb_dma);
14732         sc->def_sb = NULL;
14733         bus_dma_tag_destroy(sc->parent_dma_tag);
14734         return (1);
14735     }
14736 
14737     sc->spq = (struct eth_spe *)sc->spq_dma.vaddr;
14738 
14739     /***************************/
14740     /* FW DECOMPRESSION BUFFER */
14741     /***************************/
14742 
14743     if (bxe_dma_alloc(sc, FW_BUF_SIZE, &sc->gz_buf_dma,
14744                       "fw decompression buffer") != 0) {
14745         /* XXX */
14746         bxe_dma_free(sc, &sc->spq_dma);
14747         sc->spq = NULL;
14748         bxe_dma_free(sc, &sc->sp_dma);
14749         sc->sp = NULL;
14750         bxe_dma_free(sc, &sc->eq_dma);
14751         sc->eq = NULL;
14752         bxe_dma_free(sc, &sc->def_sb_dma);
14753         sc->def_sb = NULL;
14754         bus_dma_tag_destroy(sc->parent_dma_tag);
14755         return (1);
14756     }
14757 
14758     sc->gz_buf = (void *)sc->gz_buf_dma.vaddr;
14759 
14760     if ((sc->gz_strm =
14761          malloc(sizeof(*sc->gz_strm), M_DEVBUF, M_NOWAIT)) == NULL) {
14762         /* XXX */
14763         bxe_dma_free(sc, &sc->gz_buf_dma);
14764         sc->gz_buf = NULL;
14765         bxe_dma_free(sc, &sc->spq_dma);
14766         sc->spq = NULL;
14767         bxe_dma_free(sc, &sc->sp_dma);
14768         sc->sp = NULL;
14769         bxe_dma_free(sc, &sc->eq_dma);
14770         sc->eq = NULL;
14771         bxe_dma_free(sc, &sc->def_sb_dma);
14772         sc->def_sb = NULL;
14773         bus_dma_tag_destroy(sc->parent_dma_tag);
14774         return (1);
14775     }
14776 
14777     /*************/
14778     /* FASTPATHS */
14779     /*************/
14780 
14781     /* allocate DMA memory for each fastpath structure */
14782     for (i = 0; i < sc->num_queues; i++) {
14783         fp = &sc->fp[i];
14784         fp->sc    = sc;
14785         fp->index = i;
14786 
14787         /*******************/
14788         /* FP STATUS BLOCK */
14789         /*******************/
14790 
14791         snprintf(buf, sizeof(buf), "fp %d status block", i);
14792         if (bxe_dma_alloc(sc, sizeof(union bxe_host_hc_status_block),
14793                           &fp->sb_dma, buf) != 0) {
14794             /* XXX unwind and free previous fastpath allocations */
14795             BLOGE(sc, "Failed to alloc %s\n", buf);
14796             return (1);
14797         } else {
14798             if (CHIP_IS_E2E3(sc)) {
14799                 fp->status_block.e2_sb =
14800                     (struct host_hc_status_block_e2 *)fp->sb_dma.vaddr;
14801             } else {
14802                 fp->status_block.e1x_sb =
14803                     (struct host_hc_status_block_e1x *)fp->sb_dma.vaddr;
14804             }
14805         }
14806 
14807         /******************/
14808         /* FP TX BD CHAIN */
14809         /******************/
14810 
14811         snprintf(buf, sizeof(buf), "fp %d tx bd chain", i);
14812         if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * TX_BD_NUM_PAGES),
14813                           &fp->tx_dma, buf) != 0) {
14814             /* XXX unwind and free previous fastpath allocations */
14815             BLOGE(sc, "Failed to alloc %s\n", buf);
14816             return (1);
14817         } else {
14818             fp->tx_chain = (union eth_tx_bd_types *)fp->tx_dma.vaddr;
14819         }
14820 
14821         /* link together the tx bd chain pages */
14822         for (j = 1; j <= TX_BD_NUM_PAGES; j++) {
14823             /* index into the tx bd chain array to last entry per page */
14824             struct eth_tx_next_bd *tx_next_bd =
14825                 &fp->tx_chain[TX_BD_TOTAL_PER_PAGE * j - 1].next_bd;
14826             /* point to the next page and wrap from last page */
14827             busaddr = (fp->tx_dma.paddr +
14828                        (BCM_PAGE_SIZE * (j % TX_BD_NUM_PAGES)));
14829             tx_next_bd->addr_hi = htole32(U64_HI(busaddr));
14830             tx_next_bd->addr_lo = htole32(U64_LO(busaddr));
14831         }
14832 
14833         /******************/
14834         /* FP RX BD CHAIN */
14835         /******************/
14836 
14837         snprintf(buf, sizeof(buf), "fp %d rx bd chain", i);
14838         if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_BD_NUM_PAGES),
14839                           &fp->rx_dma, buf) != 0) {
14840             /* XXX unwind and free previous fastpath allocations */
14841             BLOGE(sc, "Failed to alloc %s\n", buf);
14842             return (1);
14843         } else {
14844             fp->rx_chain = (struct eth_rx_bd *)fp->rx_dma.vaddr;
14845         }
14846 
14847         /* link together the rx bd chain pages */
14848         for (j = 1; j <= RX_BD_NUM_PAGES; j++) {
14849             /* index into the rx bd chain array to last entry per page */
14850             struct eth_rx_bd *rx_bd =
14851                 &fp->rx_chain[RX_BD_TOTAL_PER_PAGE * j - 2];
14852             /* point to the next page and wrap from last page */
14853             busaddr = (fp->rx_dma.paddr +
14854                        (BCM_PAGE_SIZE * (j % RX_BD_NUM_PAGES)));
14855             rx_bd->addr_hi = htole32(U64_HI(busaddr));
14856             rx_bd->addr_lo = htole32(U64_LO(busaddr));
14857         }
14858 
14859         /*******************/
14860         /* FP RX RCQ CHAIN */
14861         /*******************/
14862 
14863         snprintf(buf, sizeof(buf), "fp %d rcq chain", i);
14864         if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RCQ_NUM_PAGES),
14865                           &fp->rcq_dma, buf) != 0) {
14866             /* XXX unwind and free previous fastpath allocations */
14867             BLOGE(sc, "Failed to alloc %s\n", buf);
14868             return (1);
14869         } else {
14870             fp->rcq_chain = (union eth_rx_cqe *)fp->rcq_dma.vaddr;
14871         }
14872 
14873         /* link together the rcq chain pages */
14874         for (j = 1; j <= RCQ_NUM_PAGES; j++) {
14875             /* index into the rcq chain array to last entry per page */
14876             struct eth_rx_cqe_next_page *rx_cqe_next =
14877                 (struct eth_rx_cqe_next_page *)
14878                 &fp->rcq_chain[RCQ_TOTAL_PER_PAGE * j - 1];
14879             /* point to the next page and wrap from last page */
14880             busaddr = (fp->rcq_dma.paddr +
14881                        (BCM_PAGE_SIZE * (j % RCQ_NUM_PAGES)));
14882             rx_cqe_next->addr_hi = htole32(U64_HI(busaddr));
14883             rx_cqe_next->addr_lo = htole32(U64_LO(busaddr));
14884         }
14885 
14886         /*******************/
14887         /* FP RX SGE CHAIN */
14888         /*******************/
14889 
14890         snprintf(buf, sizeof(buf), "fp %d sge chain", i);
14891         if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES),
14892                           &fp->rx_sge_dma, buf) != 0) {
14893             /* XXX unwind and free previous fastpath allocations */
14894             BLOGE(sc, "Failed to alloc %s\n", buf);
14895             return (1);
14896         } else {
14897             fp->rx_sge_chain = (struct eth_rx_sge *)fp->rx_sge_dma.vaddr;
14898         }
14899 
14900         /* link together the sge chain pages */
14901         for (j = 1; j <= RX_SGE_NUM_PAGES; j++) {
14902             /* index into the rcq chain array to last entry per page */
14903             struct eth_rx_sge *rx_sge =
14904                 &fp->rx_sge_chain[RX_SGE_TOTAL_PER_PAGE * j - 2];
14905             /* point to the next page and wrap from last page */
14906             busaddr = (fp->rx_sge_dma.paddr +
14907                        (BCM_PAGE_SIZE * (j % RX_SGE_NUM_PAGES)));
14908             rx_sge->addr_hi = htole32(U64_HI(busaddr));
14909             rx_sge->addr_lo = htole32(U64_LO(busaddr));
14910         }
14911 
14912         /***********************/
14913         /* FP TX MBUF DMA MAPS */
14914         /***********************/
14915 
14916         /* set required sizes before mapping to conserve resources */
14917         if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) {
14918             max_size     = BXE_TSO_MAX_SIZE;
14919             max_segments = BXE_TSO_MAX_SEGMENTS;
14920             max_seg_size = BXE_TSO_MAX_SEG_SIZE;
14921         } else {
14922             max_size     = (MCLBYTES * BXE_MAX_SEGMENTS);
14923             max_segments = BXE_MAX_SEGMENTS;
14924             max_seg_size = MCLBYTES;
14925         }
14926 
14927         /* create a dma tag for the tx mbufs */
14928         rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
14929                                 1,                  /* alignment */
14930                                 0,                  /* boundary limit */
14931                                 BUS_SPACE_MAXADDR,  /* restricted low */
14932                                 BUS_SPACE_MAXADDR,  /* restricted hi */
14933                                 NULL,               /* addr filter() */
14934                                 NULL,               /* addr filter() arg */
14935                                 max_size,           /* max map size */
14936                                 max_segments,       /* num discontinuous */
14937                                 max_seg_size,       /* max seg size */
14938                                 0,                  /* flags */
14939                                 NULL,               /* lock() */
14940                                 NULL,               /* lock() arg */
14941                                 &fp->tx_mbuf_tag);  /* returned dma tag */
14942         if (rc != 0) {
14943             /* XXX unwind and free previous fastpath allocations */
14944             BLOGE(sc, "Failed to create dma tag for "
14945                       "'fp %d tx mbufs' (%d)\n", i, rc);
14946             return (1);
14947         }
14948 
14949         /* create dma maps for each of the tx mbuf clusters */
14950         for (j = 0; j < TX_BD_TOTAL; j++) {
14951             if (bus_dmamap_create(fp->tx_mbuf_tag,
14952                                   BUS_DMA_NOWAIT,
14953                                   &fp->tx_mbuf_chain[j].m_map)) {
14954                 /* XXX unwind and free previous fastpath allocations */
14955                 BLOGE(sc, "Failed to create dma map for "
14956                           "'fp %d tx mbuf %d' (%d)\n", i, j, rc);
14957                 return (1);
14958             }
14959         }
14960 
14961         /***********************/
14962         /* FP RX MBUF DMA MAPS */
14963         /***********************/
14964 
14965         /* create a dma tag for the rx mbufs */
14966         rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
14967                                 1,                  /* alignment */
14968                                 0,                  /* boundary limit */
14969                                 BUS_SPACE_MAXADDR,  /* restricted low */
14970                                 BUS_SPACE_MAXADDR,  /* restricted hi */
14971                                 NULL,               /* addr filter() */
14972                                 NULL,               /* addr filter() arg */
14973                                 MJUM9BYTES,         /* max map size */
14974                                 1,                  /* num discontinuous */
14975                                 MJUM9BYTES,         /* max seg size */
14976                                 0,                  /* flags */
14977                                 NULL,               /* lock() */
14978                                 NULL,               /* lock() arg */
14979                                 &fp->rx_mbuf_tag);  /* returned dma tag */
14980         if (rc != 0) {
14981             /* XXX unwind and free previous fastpath allocations */
14982             BLOGE(sc, "Failed to create dma tag for "
14983                       "'fp %d rx mbufs' (%d)\n", i, rc);
14984             return (1);
14985         }
14986 
14987         /* create dma maps for each of the rx mbuf clusters */
14988         for (j = 0; j < RX_BD_TOTAL; j++) {
14989             if (bus_dmamap_create(fp->rx_mbuf_tag,
14990                                   BUS_DMA_NOWAIT,
14991                                   &fp->rx_mbuf_chain[j].m_map)) {
14992                 /* XXX unwind and free previous fastpath allocations */
14993                 BLOGE(sc, "Failed to create dma map for "
14994                           "'fp %d rx mbuf %d' (%d)\n", i, j, rc);
14995                 return (1);
14996             }
14997         }
14998 
14999         /* create dma map for the spare rx mbuf cluster */
15000         if (bus_dmamap_create(fp->rx_mbuf_tag,
15001                               BUS_DMA_NOWAIT,
15002                               &fp->rx_mbuf_spare_map)) {
15003             /* XXX unwind and free previous fastpath allocations */
15004             BLOGE(sc, "Failed to create dma map for "
15005                       "'fp %d spare rx mbuf' (%d)\n", i, rc);
15006             return (1);
15007         }
15008 
15009         /***************************/
15010         /* FP RX SGE MBUF DMA MAPS */
15011         /***************************/
15012 
15013         /* create a dma tag for the rx sge mbufs */
15014         rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
15015                                 1,                  /* alignment */
15016                                 0,                  /* boundary limit */
15017                                 BUS_SPACE_MAXADDR,  /* restricted low */
15018                                 BUS_SPACE_MAXADDR,  /* restricted hi */
15019                                 NULL,               /* addr filter() */
15020                                 NULL,               /* addr filter() arg */
15021                                 BCM_PAGE_SIZE,      /* max map size */
15022                                 1,                  /* num discontinuous */
15023                                 BCM_PAGE_SIZE,      /* max seg size */
15024                                 0,                  /* flags */
15025                                 NULL,               /* lock() */
15026                                 NULL,               /* lock() arg */
15027                                 &fp->rx_sge_mbuf_tag); /* returned dma tag */
15028         if (rc != 0) {
15029             /* XXX unwind and free previous fastpath allocations */
15030             BLOGE(sc, "Failed to create dma tag for "
15031                       "'fp %d rx sge mbufs' (%d)\n", i, rc);
15032             return (1);
15033         }
15034 
15035         /* create dma maps for the rx sge mbuf clusters */
15036         for (j = 0; j < RX_SGE_TOTAL; j++) {
15037             if (bus_dmamap_create(fp->rx_sge_mbuf_tag,
15038                                   BUS_DMA_NOWAIT,
15039                                   &fp->rx_sge_mbuf_chain[j].m_map)) {
15040                 /* XXX unwind and free previous fastpath allocations */
15041                 BLOGE(sc, "Failed to create dma map for "
15042                           "'fp %d rx sge mbuf %d' (%d)\n", i, j, rc);
15043                 return (1);
15044             }
15045         }
15046 
15047         /* create dma map for the spare rx sge mbuf cluster */
15048         if (bus_dmamap_create(fp->rx_sge_mbuf_tag,
15049                               BUS_DMA_NOWAIT,
15050                               &fp->rx_sge_mbuf_spare_map)) {
15051             /* XXX unwind and free previous fastpath allocations */
15052             BLOGE(sc, "Failed to create dma map for "
15053                       "'fp %d spare rx sge mbuf' (%d)\n", i, rc);
15054             return (1);
15055         }
15056 
15057         /***************************/
15058         /* FP RX TPA MBUF DMA MAPS */
15059         /***************************/
15060 
15061         /* create dma maps for the rx tpa mbuf clusters */
15062         max_agg_queues = MAX_AGG_QS(sc);
15063 
15064         for (j = 0; j < max_agg_queues; j++) {
15065             if (bus_dmamap_create(fp->rx_mbuf_tag,
15066                                   BUS_DMA_NOWAIT,
15067                                   &fp->rx_tpa_info[j].bd.m_map)) {
15068                 /* XXX unwind and free previous fastpath allocations */
15069                 BLOGE(sc, "Failed to create dma map for "
15070                           "'fp %d rx tpa mbuf %d' (%d)\n", i, j, rc);
15071                 return (1);
15072             }
15073         }
15074 
15075         /* create dma map for the spare rx tpa mbuf cluster */
15076         if (bus_dmamap_create(fp->rx_mbuf_tag,
15077                               BUS_DMA_NOWAIT,
15078                               &fp->rx_tpa_info_mbuf_spare_map)) {
15079             /* XXX unwind and free previous fastpath allocations */
15080             BLOGE(sc, "Failed to create dma map for "
15081                       "'fp %d spare rx tpa mbuf' (%d)\n", i, rc);
15082             return (1);
15083         }
15084 
15085         bxe_init_sge_ring_bit_mask(fp);
15086     }
15087 
15088     return (0);
15089 }
15090 
15091 static void
15092 bxe_free_hsi_mem(struct bxe_softc *sc)
15093 {
15094     struct bxe_fastpath *fp;
15095     int max_agg_queues;
15096     int i, j;
15097 
15098     if (sc->parent_dma_tag == NULL) {
15099         return; /* assume nothing was allocated */
15100     }
15101 
15102     for (i = 0; i < sc->num_queues; i++) {
15103         fp = &sc->fp[i];
15104 
15105         /*******************/
15106         /* FP STATUS BLOCK */
15107         /*******************/
15108 
15109         bxe_dma_free(sc, &fp->sb_dma);
15110         memset(&fp->status_block, 0, sizeof(fp->status_block));
15111 
15112         /******************/
15113         /* FP TX BD CHAIN */
15114         /******************/
15115 
15116         bxe_dma_free(sc, &fp->tx_dma);
15117         fp->tx_chain = NULL;
15118 
15119         /******************/
15120         /* FP RX BD CHAIN */
15121         /******************/
15122 
15123         bxe_dma_free(sc, &fp->rx_dma);
15124         fp->rx_chain = NULL;
15125 
15126         /*******************/
15127         /* FP RX RCQ CHAIN */
15128         /*******************/
15129 
15130         bxe_dma_free(sc, &fp->rcq_dma);
15131         fp->rcq_chain = NULL;
15132 
15133         /*******************/
15134         /* FP RX SGE CHAIN */
15135         /*******************/
15136 
15137         bxe_dma_free(sc, &fp->rx_sge_dma);
15138         fp->rx_sge_chain = NULL;
15139 
15140         /***********************/
15141         /* FP TX MBUF DMA MAPS */
15142         /***********************/
15143 
15144         if (fp->tx_mbuf_tag != NULL) {
15145             for (j = 0; j < TX_BD_TOTAL; j++) {
15146                 if (fp->tx_mbuf_chain[j].m_map != NULL) {
15147                     bus_dmamap_unload(fp->tx_mbuf_tag,
15148                                       fp->tx_mbuf_chain[j].m_map);
15149                     bus_dmamap_destroy(fp->tx_mbuf_tag,
15150                                        fp->tx_mbuf_chain[j].m_map);
15151                 }
15152             }
15153 
15154             bus_dma_tag_destroy(fp->tx_mbuf_tag);
15155             fp->tx_mbuf_tag = NULL;
15156         }
15157 
15158         /***********************/
15159         /* FP RX MBUF DMA MAPS */
15160         /***********************/
15161 
15162         if (fp->rx_mbuf_tag != NULL) {
15163             for (j = 0; j < RX_BD_TOTAL; j++) {
15164                 if (fp->rx_mbuf_chain[j].m_map != NULL) {
15165                     bus_dmamap_unload(fp->rx_mbuf_tag,
15166                                       fp->rx_mbuf_chain[j].m_map);
15167                     bus_dmamap_destroy(fp->rx_mbuf_tag,
15168                                        fp->rx_mbuf_chain[j].m_map);
15169                 }
15170             }
15171 
15172             if (fp->rx_mbuf_spare_map != NULL) {
15173                 bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map);
15174                 bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map);
15175             }
15176 
15177             /***************************/
15178             /* FP RX TPA MBUF DMA MAPS */
15179             /***************************/
15180 
15181             max_agg_queues = MAX_AGG_QS(sc);
15182 
15183             for (j = 0; j < max_agg_queues; j++) {
15184                 if (fp->rx_tpa_info[j].bd.m_map != NULL) {
15185                     bus_dmamap_unload(fp->rx_mbuf_tag,
15186                                       fp->rx_tpa_info[j].bd.m_map);
15187                     bus_dmamap_destroy(fp->rx_mbuf_tag,
15188                                        fp->rx_tpa_info[j].bd.m_map);
15189                 }
15190             }
15191 
15192             if (fp->rx_tpa_info_mbuf_spare_map != NULL) {
15193                 bus_dmamap_unload(fp->rx_mbuf_tag,
15194                                   fp->rx_tpa_info_mbuf_spare_map);
15195                 bus_dmamap_destroy(fp->rx_mbuf_tag,
15196                                    fp->rx_tpa_info_mbuf_spare_map);
15197             }
15198 
15199             bus_dma_tag_destroy(fp->rx_mbuf_tag);
15200             fp->rx_mbuf_tag = NULL;
15201         }
15202 
15203         /***************************/
15204         /* FP RX SGE MBUF DMA MAPS */
15205         /***************************/
15206 
15207         if (fp->rx_sge_mbuf_tag != NULL) {
15208             for (j = 0; j < RX_SGE_TOTAL; j++) {
15209                 if (fp->rx_sge_mbuf_chain[j].m_map != NULL) {
15210                     bus_dmamap_unload(fp->rx_sge_mbuf_tag,
15211                                       fp->rx_sge_mbuf_chain[j].m_map);
15212                     bus_dmamap_destroy(fp->rx_sge_mbuf_tag,
15213                                        fp->rx_sge_mbuf_chain[j].m_map);
15214                 }
15215             }
15216 
15217             if (fp->rx_sge_mbuf_spare_map != NULL) {
15218                 bus_dmamap_unload(fp->rx_sge_mbuf_tag,
15219                                   fp->rx_sge_mbuf_spare_map);
15220                 bus_dmamap_destroy(fp->rx_sge_mbuf_tag,
15221                                    fp->rx_sge_mbuf_spare_map);
15222             }
15223 
15224             bus_dma_tag_destroy(fp->rx_sge_mbuf_tag);
15225             fp->rx_sge_mbuf_tag = NULL;
15226         }
15227     }
15228 
15229     /***************************/
15230     /* FW DECOMPRESSION BUFFER */
15231     /***************************/
15232 
15233     bxe_dma_free(sc, &sc->gz_buf_dma);
15234     sc->gz_buf = NULL;
15235     free(sc->gz_strm, M_DEVBUF);
15236     sc->gz_strm = NULL;
15237 
15238     /*******************/
15239     /* SLOW PATH QUEUE */
15240     /*******************/
15241 
15242     bxe_dma_free(sc, &sc->spq_dma);
15243     sc->spq = NULL;
15244 
15245     /*************/
15246     /* SLOW PATH */
15247     /*************/
15248 
15249     bxe_dma_free(sc, &sc->sp_dma);
15250     sc->sp = NULL;
15251 
15252     /***************/
15253     /* EVENT QUEUE */
15254     /***************/
15255 
15256     bxe_dma_free(sc, &sc->eq_dma);
15257     sc->eq = NULL;
15258 
15259     /************************/
15260     /* DEFAULT STATUS BLOCK */
15261     /************************/
15262 
15263     bxe_dma_free(sc, &sc->def_sb_dma);
15264     sc->def_sb = NULL;
15265 
15266     bus_dma_tag_destroy(sc->parent_dma_tag);
15267     sc->parent_dma_tag = NULL;
15268 }
15269 
15270 /*
15271  * Previous driver DMAE transaction may have occurred when pre-boot stage
15272  * ended and boot began. This would invalidate the addresses of the
15273  * transaction, resulting in was-error bit set in the PCI causing all
15274  * hw-to-host PCIe transactions to timeout. If this happened we want to clear
15275  * the interrupt which detected this from the pglueb and the was-done bit
15276  */
15277 static void
15278 bxe_prev_interrupted_dmae(struct bxe_softc *sc)
15279 {
15280     uint32_t val;
15281 
15282     if (!CHIP_IS_E1x(sc)) {
15283         val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS);
15284         if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
15285             BLOGD(sc, DBG_LOAD,
15286                   "Clearing 'was-error' bit that was set in pglueb");
15287             REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << SC_FUNC(sc));
15288         }
15289     }
15290 }
15291 
15292 static int
15293 bxe_prev_mcp_done(struct bxe_softc *sc)
15294 {
15295     uint32_t rc = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE,
15296                                  DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
15297     if (!rc) {
15298         BLOGE(sc, "MCP response failure, aborting\n");
15299         return (-1);
15300     }
15301 
15302     return (0);
15303 }
15304 
15305 static struct bxe_prev_list_node *
15306 bxe_prev_path_get_entry(struct bxe_softc *sc)
15307 {
15308     struct bxe_prev_list_node *tmp;
15309 
15310     LIST_FOREACH(tmp, &bxe_prev_list, node) {
15311         if ((sc->pcie_bus == tmp->bus) &&
15312             (sc->pcie_device == tmp->slot) &&
15313             (SC_PATH(sc) == tmp->path)) {
15314             return (tmp);
15315         }
15316     }
15317 
15318     return (NULL);
15319 }
15320 
15321 static uint8_t
15322 bxe_prev_is_path_marked(struct bxe_softc *sc)
15323 {
15324     struct bxe_prev_list_node *tmp;
15325     int rc = FALSE;
15326 
15327     mtx_lock(&bxe_prev_mtx);
15328 
15329     tmp = bxe_prev_path_get_entry(sc);
15330     if (tmp) {
15331         if (tmp->aer) {
15332             BLOGD(sc, DBG_LOAD,
15333                   "Path %d/%d/%d was marked by AER\n",
15334                   sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15335         } else {
15336             rc = TRUE;
15337             BLOGD(sc, DBG_LOAD,
15338                   "Path %d/%d/%d was already cleaned from previous drivers\n",
15339                   sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15340         }
15341     }
15342 
15343     mtx_unlock(&bxe_prev_mtx);
15344 
15345     return (rc);
15346 }
15347 
15348 static int
15349 bxe_prev_mark_path(struct bxe_softc *sc,
15350                    uint8_t          after_undi)
15351 {
15352     struct bxe_prev_list_node *tmp;
15353 
15354     mtx_lock(&bxe_prev_mtx);
15355 
15356     /* Check whether the entry for this path already exists */
15357     tmp = bxe_prev_path_get_entry(sc);
15358     if (tmp) {
15359         if (!tmp->aer) {
15360             BLOGD(sc, DBG_LOAD,
15361                   "Re-marking AER in path %d/%d/%d\n",
15362                   sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15363         } else {
15364             BLOGD(sc, DBG_LOAD,
15365                   "Removing AER indication from path %d/%d/%d\n",
15366                   sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15367             tmp->aer = 0;
15368         }
15369 
15370         mtx_unlock(&bxe_prev_mtx);
15371         return (0);
15372     }
15373 
15374     mtx_unlock(&bxe_prev_mtx);
15375 
15376     /* Create an entry for this path and add it */
15377     tmp = malloc(sizeof(struct bxe_prev_list_node), M_DEVBUF,
15378                  (M_NOWAIT | M_ZERO));
15379     if (!tmp) {
15380         BLOGE(sc, "Failed to allocate 'bxe_prev_list_node'\n");
15381         return (-1);
15382     }
15383 
15384     tmp->bus  = sc->pcie_bus;
15385     tmp->slot = sc->pcie_device;
15386     tmp->path = SC_PATH(sc);
15387     tmp->aer  = 0;
15388     tmp->undi = after_undi ? (1 << SC_PORT(sc)) : 0;
15389 
15390     mtx_lock(&bxe_prev_mtx);
15391 
15392     BLOGD(sc, DBG_LOAD,
15393           "Marked path %d/%d/%d - finished previous unload\n",
15394           sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15395     LIST_INSERT_HEAD(&bxe_prev_list, tmp, node);
15396 
15397     mtx_unlock(&bxe_prev_mtx);
15398 
15399     return (0);
15400 }
15401 
15402 static int
15403 bxe_do_flr(struct bxe_softc *sc)
15404 {
15405     int i;
15406 
15407     /* only E2 and onwards support FLR */
15408     if (CHIP_IS_E1x(sc)) {
15409         BLOGD(sc, DBG_LOAD, "FLR not supported in E1/E1H\n");
15410         return (-1);
15411     }
15412 
15413     /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */
15414     if (sc->devinfo.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
15415         BLOGD(sc, DBG_LOAD, "FLR not supported by BC_VER: 0x%08x\n",
15416               sc->devinfo.bc_ver);
15417         return (-1);
15418     }
15419 
15420     /* Wait for Transaction Pending bit clean */
15421     for (i = 0; i < 4; i++) {
15422         if (i) {
15423             DELAY(((1 << (i - 1)) * 100) * 1000);
15424         }
15425 
15426         if (!bxe_is_pcie_pending(sc)) {
15427             goto clear;
15428         }
15429     }
15430 
15431     BLOGE(sc, "PCIE transaction is not cleared, "
15432               "proceeding with reset anyway\n");
15433 
15434 clear:
15435 
15436     BLOGD(sc, DBG_LOAD, "Initiating FLR\n");
15437     bxe_fw_command(sc, DRV_MSG_CODE_INITIATE_FLR, 0);
15438 
15439     return (0);
15440 }
15441 
15442 struct bxe_mac_vals {
15443     uint32_t xmac_addr;
15444     uint32_t xmac_val;
15445     uint32_t emac_addr;
15446     uint32_t emac_val;
15447     uint32_t umac_addr;
15448     uint32_t umac_val;
15449     uint32_t bmac_addr;
15450     uint32_t bmac_val[2];
15451 };
15452 
15453 static void
15454 bxe_prev_unload_close_mac(struct bxe_softc *sc,
15455                           struct bxe_mac_vals *vals)
15456 {
15457     uint32_t val, base_addr, offset, mask, reset_reg;
15458     uint8_t mac_stopped = FALSE;
15459     uint8_t port = SC_PORT(sc);
15460     uint32_t wb_data[2];
15461 
15462     /* reset addresses as they also mark which values were changed */
15463     vals->bmac_addr = 0;
15464     vals->umac_addr = 0;
15465     vals->xmac_addr = 0;
15466     vals->emac_addr = 0;
15467 
15468     reset_reg = REG_RD(sc, MISC_REG_RESET_REG_2);
15469 
15470     if (!CHIP_IS_E3(sc)) {
15471         val = REG_RD(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
15472         mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port;
15473         if ((mask & reset_reg) && val) {
15474             BLOGD(sc, DBG_LOAD, "Disable BMAC Rx\n");
15475             base_addr = SC_PORT(sc) ? NIG_REG_INGRESS_BMAC1_MEM
15476                                     : NIG_REG_INGRESS_BMAC0_MEM;
15477             offset = CHIP_IS_E2(sc) ? BIGMAC2_REGISTER_BMAC_CONTROL
15478                                     : BIGMAC_REGISTER_BMAC_CONTROL;
15479 
15480             /*
15481              * use rd/wr since we cannot use dmae. This is safe
15482              * since MCP won't access the bus due to the request
15483              * to unload, and no function on the path can be
15484              * loaded at this time.
15485              */
15486             wb_data[0] = REG_RD(sc, base_addr + offset);
15487             wb_data[1] = REG_RD(sc, base_addr + offset + 0x4);
15488             vals->bmac_addr = base_addr + offset;
15489             vals->bmac_val[0] = wb_data[0];
15490             vals->bmac_val[1] = wb_data[1];
15491             wb_data[0] &= ~ELINK_BMAC_CONTROL_RX_ENABLE;
15492             REG_WR(sc, vals->bmac_addr, wb_data[0]);
15493             REG_WR(sc, vals->bmac_addr + 0x4, wb_data[1]);
15494         }
15495 
15496         BLOGD(sc, DBG_LOAD, "Disable EMAC Rx\n");
15497         vals->emac_addr = NIG_REG_NIG_EMAC0_EN + SC_PORT(sc)*4;
15498         vals->emac_val = REG_RD(sc, vals->emac_addr);
15499         REG_WR(sc, vals->emac_addr, 0);
15500         mac_stopped = TRUE;
15501     } else {
15502         if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
15503             BLOGD(sc, DBG_LOAD, "Disable XMAC Rx\n");
15504             base_addr = SC_PORT(sc) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
15505             val = REG_RD(sc, base_addr + XMAC_REG_PFC_CTRL_HI);
15506             REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val & ~(1 << 1));
15507             REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val | (1 << 1));
15508             vals->xmac_addr = base_addr + XMAC_REG_CTRL;
15509             vals->xmac_val = REG_RD(sc, vals->xmac_addr);
15510             REG_WR(sc, vals->xmac_addr, 0);
15511             mac_stopped = TRUE;
15512         }
15513 
15514         mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
15515         if (mask & reset_reg) {
15516             BLOGD(sc, DBG_LOAD, "Disable UMAC Rx\n");
15517             base_addr = SC_PORT(sc) ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
15518             vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG;
15519             vals->umac_val = REG_RD(sc, vals->umac_addr);
15520             REG_WR(sc, vals->umac_addr, 0);
15521             mac_stopped = TRUE;
15522         }
15523     }
15524 
15525     if (mac_stopped) {
15526         DELAY(20000);
15527     }
15528 }
15529 
15530 #define BXE_PREV_UNDI_PROD_ADDR(p)  (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
15531 #define BXE_PREV_UNDI_RCQ(val)      ((val) & 0xffff)
15532 #define BXE_PREV_UNDI_BD(val)       ((val) >> 16 & 0xffff)
15533 #define BXE_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
15534 
15535 static void
15536 bxe_prev_unload_undi_inc(struct bxe_softc *sc,
15537                          uint8_t          port,
15538                          uint8_t          inc)
15539 {
15540     uint16_t rcq, bd;
15541     uint32_t tmp_reg = REG_RD(sc, BXE_PREV_UNDI_PROD_ADDR(port));
15542 
15543     rcq = BXE_PREV_UNDI_RCQ(tmp_reg) + inc;
15544     bd = BXE_PREV_UNDI_BD(tmp_reg) + inc;
15545 
15546     tmp_reg = BXE_PREV_UNDI_PROD(rcq, bd);
15547     REG_WR(sc, BXE_PREV_UNDI_PROD_ADDR(port), tmp_reg);
15548 
15549     BLOGD(sc, DBG_LOAD,
15550           "UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n",
15551           port, bd, rcq);
15552 }
15553 
15554 static int
15555 bxe_prev_unload_common(struct bxe_softc *sc)
15556 {
15557     uint32_t reset_reg, tmp_reg = 0, rc;
15558     uint8_t prev_undi = FALSE;
15559     struct bxe_mac_vals mac_vals;
15560     uint32_t timer_count = 1000;
15561     uint32_t prev_brb;
15562 
15563     /*
15564      * It is possible a previous function received 'common' answer,
15565      * but hasn't loaded yet, therefore creating a scenario of
15566      * multiple functions receiving 'common' on the same path.
15567      */
15568     BLOGD(sc, DBG_LOAD, "Common unload Flow\n");
15569 
15570     memset(&mac_vals, 0, sizeof(mac_vals));
15571 
15572     if (bxe_prev_is_path_marked(sc)) {
15573         return (bxe_prev_mcp_done(sc));
15574     }
15575 
15576     reset_reg = REG_RD(sc, MISC_REG_RESET_REG_1);
15577 
15578     /* Reset should be performed after BRB is emptied */
15579     if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
15580         /* Close the MAC Rx to prevent BRB from filling up */
15581         bxe_prev_unload_close_mac(sc, &mac_vals);
15582 
15583         /* close LLH filters towards the BRB */
15584         elink_set_rx_filter(&sc->link_params, 0);
15585 
15586         /*
15587          * Check if the UNDI driver was previously loaded.
15588          * UNDI driver initializes CID offset for normal bell to 0x7
15589          */
15590         if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
15591             tmp_reg = REG_RD(sc, DORQ_REG_NORM_CID_OFST);
15592             if (tmp_reg == 0x7) {
15593                 BLOGD(sc, DBG_LOAD, "UNDI previously loaded\n");
15594                 prev_undi = TRUE;
15595                 /* clear the UNDI indication */
15596                 REG_WR(sc, DORQ_REG_NORM_CID_OFST, 0);
15597                 /* clear possible idle check errors */
15598                 REG_RD(sc, NIG_REG_NIG_INT_STS_CLR_0);
15599             }
15600         }
15601 
15602         /* wait until BRB is empty */
15603         tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS);
15604         while (timer_count) {
15605             prev_brb = tmp_reg;
15606 
15607             tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS);
15608             if (!tmp_reg) {
15609                 break;
15610             }
15611 
15612             BLOGD(sc, DBG_LOAD, "BRB still has 0x%08x\n", tmp_reg);
15613 
15614             /* reset timer as long as BRB actually gets emptied */
15615             if (prev_brb > tmp_reg) {
15616                 timer_count = 1000;
15617             } else {
15618                 timer_count--;
15619             }
15620 
15621             /* If UNDI resides in memory, manually increment it */
15622             if (prev_undi) {
15623                 bxe_prev_unload_undi_inc(sc, SC_PORT(sc), 1);
15624             }
15625 
15626             DELAY(10);
15627         }
15628 
15629         if (!timer_count) {
15630             BLOGE(sc, "Failed to empty BRB\n");
15631         }
15632     }
15633 
15634     /* No packets are in the pipeline, path is ready for reset */
15635     bxe_reset_common(sc);
15636 
15637     if (mac_vals.xmac_addr) {
15638         REG_WR(sc, mac_vals.xmac_addr, mac_vals.xmac_val);
15639     }
15640     if (mac_vals.umac_addr) {
15641         REG_WR(sc, mac_vals.umac_addr, mac_vals.umac_val);
15642     }
15643     if (mac_vals.emac_addr) {
15644         REG_WR(sc, mac_vals.emac_addr, mac_vals.emac_val);
15645     }
15646     if (mac_vals.bmac_addr) {
15647         REG_WR(sc, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
15648         REG_WR(sc, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
15649     }
15650 
15651     rc = bxe_prev_mark_path(sc, prev_undi);
15652     if (rc) {
15653         bxe_prev_mcp_done(sc);
15654         return (rc);
15655     }
15656 
15657     return (bxe_prev_mcp_done(sc));
15658 }
15659 
15660 static int
15661 bxe_prev_unload_uncommon(struct bxe_softc *sc)
15662 {
15663     int rc;
15664 
15665     BLOGD(sc, DBG_LOAD, "Uncommon unload Flow\n");
15666 
15667     /* Test if previous unload process was already finished for this path */
15668     if (bxe_prev_is_path_marked(sc)) {
15669         return (bxe_prev_mcp_done(sc));
15670     }
15671 
15672     BLOGD(sc, DBG_LOAD, "Path is unmarked\n");
15673 
15674     /*
15675      * If function has FLR capabilities, and existing FW version matches
15676      * the one required, then FLR will be sufficient to clean any residue
15677      * left by previous driver
15678      */
15679     rc = bxe_nic_load_analyze_req(sc, FW_MSG_CODE_DRV_LOAD_FUNCTION);
15680     if (!rc) {
15681         /* fw version is good */
15682         BLOGD(sc, DBG_LOAD, "FW version matches our own, attempting FLR\n");
15683         rc = bxe_do_flr(sc);
15684     }
15685 
15686     if (!rc) {
15687         /* FLR was performed */
15688         BLOGD(sc, DBG_LOAD, "FLR successful\n");
15689         return (0);
15690     }
15691 
15692     BLOGD(sc, DBG_LOAD, "Could not FLR\n");
15693 
15694     /* Close the MCP request, return failure*/
15695     rc = bxe_prev_mcp_done(sc);
15696     if (!rc) {
15697         rc = BXE_PREV_WAIT_NEEDED;
15698     }
15699 
15700     return (rc);
15701 }
15702 
15703 static int
15704 bxe_prev_unload(struct bxe_softc *sc)
15705 {
15706     int time_counter = 10;
15707     uint32_t fw, hw_lock_reg, hw_lock_val;
15708     uint32_t rc = 0;
15709 
15710     /*
15711      * Clear HW from errors which may have resulted from an interrupted
15712      * DMAE transaction.
15713      */
15714     bxe_prev_interrupted_dmae(sc);
15715 
15716     /* Release previously held locks */
15717     hw_lock_reg =
15718         (SC_FUNC(sc) <= 5) ?
15719             (MISC_REG_DRIVER_CONTROL_1 + SC_FUNC(sc) * 8) :
15720             (MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8);
15721 
15722     hw_lock_val = (REG_RD(sc, hw_lock_reg));
15723     if (hw_lock_val) {
15724         if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
15725             BLOGD(sc, DBG_LOAD, "Releasing previously held NVRAM lock\n");
15726             REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
15727                    (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << SC_PORT(sc)));
15728         }
15729         BLOGD(sc, DBG_LOAD, "Releasing previously held HW lock\n");
15730         REG_WR(sc, hw_lock_reg, 0xffffffff);
15731     } else {
15732         BLOGD(sc, DBG_LOAD, "No need to release HW/NVRAM locks\n");
15733     }
15734 
15735     if (MCPR_ACCESS_LOCK_LOCK & REG_RD(sc, MCP_REG_MCPR_ACCESS_LOCK)) {
15736         BLOGD(sc, DBG_LOAD, "Releasing previously held ALR\n");
15737         REG_WR(sc, MCP_REG_MCPR_ACCESS_LOCK, 0);
15738     }
15739 
15740     do {
15741         /* Lock MCP using an unload request */
15742         fw = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
15743         if (!fw) {
15744             BLOGE(sc, "MCP response failure, aborting\n");
15745             rc = -1;
15746             break;
15747         }
15748 
15749         if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
15750             rc = bxe_prev_unload_common(sc);
15751             break;
15752         }
15753 
15754         /* non-common reply from MCP night require looping */
15755         rc = bxe_prev_unload_uncommon(sc);
15756         if (rc != BXE_PREV_WAIT_NEEDED) {
15757             break;
15758         }
15759 
15760         DELAY(20000);
15761     } while (--time_counter);
15762 
15763     if (!time_counter || rc) {
15764         BLOGE(sc, "Failed to unload previous driver!"
15765             " time_counter %d rc %d\n", time_counter, rc);
15766         rc = -1;
15767     }
15768 
15769     return (rc);
15770 }
15771 
15772 void
15773 bxe_dcbx_set_state(struct bxe_softc *sc,
15774                    uint8_t          dcb_on,
15775                    uint32_t         dcbx_enabled)
15776 {
15777     if (!CHIP_IS_E1x(sc)) {
15778         sc->dcb_state = dcb_on;
15779         sc->dcbx_enabled = dcbx_enabled;
15780     } else {
15781         sc->dcb_state = FALSE;
15782         sc->dcbx_enabled = BXE_DCBX_ENABLED_INVALID;
15783     }
15784     BLOGD(sc, DBG_LOAD,
15785           "DCB state [%s:%s]\n",
15786           dcb_on ? "ON" : "OFF",
15787           (dcbx_enabled == BXE_DCBX_ENABLED_OFF) ? "user-mode" :
15788           (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_OFF) ? "on-chip static" :
15789           (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_ON) ?
15790           "on-chip with negotiation" : "invalid");
15791 }
15792 
15793 /* must be called after sriov-enable */
15794 static int
15795 bxe_set_qm_cid_count(struct bxe_softc *sc)
15796 {
15797     int cid_count = BXE_L2_MAX_CID(sc);
15798 
15799     if (IS_SRIOV(sc)) {
15800         cid_count += BXE_VF_CIDS;
15801     }
15802 
15803     if (CNIC_SUPPORT(sc)) {
15804         cid_count += CNIC_CID_MAX;
15805     }
15806 
15807     return (roundup(cid_count, QM_CID_ROUND));
15808 }
15809 
15810 static void
15811 bxe_init_multi_cos(struct bxe_softc *sc)
15812 {
15813     int pri, cos;
15814 
15815     uint32_t pri_map = 0; /* XXX change to user config */
15816 
15817     for (pri = 0; pri < BXE_MAX_PRIORITY; pri++) {
15818         cos = ((pri_map & (0xf << (pri * 4))) >> (pri * 4));
15819         if (cos < sc->max_cos) {
15820             sc->prio_to_cos[pri] = cos;
15821         } else {
15822             BLOGW(sc, "Invalid COS %d for priority %d "
15823                       "(max COS is %d), setting to 0\n",
15824                   cos, pri, (sc->max_cos - 1));
15825             sc->prio_to_cos[pri] = 0;
15826         }
15827     }
15828 }
15829 
15830 static int
15831 bxe_sysctl_state(SYSCTL_HANDLER_ARGS)
15832 {
15833     struct bxe_softc *sc;
15834     int error, result;
15835 
15836     result = 0;
15837     error = sysctl_handle_int(oidp, &result, 0, req);
15838 
15839     if (error || !req->newptr) {
15840         return (error);
15841     }
15842 
15843     if (result == 1) {
15844         uint32_t  temp;
15845         sc = (struct bxe_softc *)arg1;
15846 
15847         BLOGI(sc, "... dumping driver state ...\n");
15848         temp = SHMEM2_RD(sc, temperature_in_half_celsius);
15849         BLOGI(sc, "\t Device Temperature = %d Celsius\n", (temp/2));
15850     }
15851 
15852     return (error);
15853 }
15854 
15855 static int
15856 bxe_sysctl_eth_stat(SYSCTL_HANDLER_ARGS)
15857 {
15858     struct bxe_softc *sc = (struct bxe_softc *)arg1;
15859     uint32_t *eth_stats = (uint32_t *)&sc->eth_stats;
15860     uint32_t *offset;
15861     uint64_t value = 0;
15862     int index = (int)arg2;
15863 
15864     if (index >= BXE_NUM_ETH_STATS) {
15865         BLOGE(sc, "bxe_eth_stats index out of range (%d)\n", index);
15866         return (-1);
15867     }
15868 
15869     offset = (eth_stats + bxe_eth_stats_arr[index].offset);
15870 
15871     switch (bxe_eth_stats_arr[index].size) {
15872     case 4:
15873         value = (uint64_t)*offset;
15874         break;
15875     case 8:
15876         value = HILO_U64(*offset, *(offset + 1));
15877         break;
15878     default:
15879         BLOGE(sc, "Invalid bxe_eth_stats size (index=%d size=%d)\n",
15880               index, bxe_eth_stats_arr[index].size);
15881         return (-1);
15882     }
15883 
15884     return (sysctl_handle_64(oidp, &value, 0, req));
15885 }
15886 
15887 static int
15888 bxe_sysctl_eth_q_stat(SYSCTL_HANDLER_ARGS)
15889 {
15890     struct bxe_softc *sc = (struct bxe_softc *)arg1;
15891     uint32_t *eth_stats;
15892     uint32_t *offset;
15893     uint64_t value = 0;
15894     uint32_t q_stat = (uint32_t)arg2;
15895     uint32_t fp_index = ((q_stat >> 16) & 0xffff);
15896     uint32_t index = (q_stat & 0xffff);
15897 
15898     eth_stats = (uint32_t *)&sc->fp[fp_index].eth_q_stats;
15899 
15900     if (index >= BXE_NUM_ETH_Q_STATS) {
15901         BLOGE(sc, "bxe_eth_q_stats index out of range (%d)\n", index);
15902         return (-1);
15903     }
15904 
15905     offset = (eth_stats + bxe_eth_q_stats_arr[index].offset);
15906 
15907     switch (bxe_eth_q_stats_arr[index].size) {
15908     case 4:
15909         value = (uint64_t)*offset;
15910         break;
15911     case 8:
15912         value = HILO_U64(*offset, *(offset + 1));
15913         break;
15914     default:
15915         BLOGE(sc, "Invalid bxe_eth_q_stats size (index=%d size=%d)\n",
15916               index, bxe_eth_q_stats_arr[index].size);
15917         return (-1);
15918     }
15919 
15920     return (sysctl_handle_64(oidp, &value, 0, req));
15921 }
15922 
15923 static void bxe_force_link_reset(struct bxe_softc *sc)
15924 {
15925 
15926         bxe_acquire_phy_lock(sc);
15927         elink_link_reset(&sc->link_params, &sc->link_vars, 1);
15928         bxe_release_phy_lock(sc);
15929 }
15930 
15931 static int
15932 bxe_sysctl_pauseparam(SYSCTL_HANDLER_ARGS)
15933 {
15934         struct bxe_softc *sc = (struct bxe_softc *)arg1;
15935         uint32_t cfg_idx = bxe_get_link_cfg_idx(sc);
15936         int rc = 0;
15937         int error;
15938         int result;
15939 
15940 
15941         error = sysctl_handle_int(oidp, &sc->bxe_pause_param, 0, req);
15942 
15943         if (error || !req->newptr) {
15944                 return (error);
15945         }
15946         if ((sc->bxe_pause_param < 0) ||  (sc->bxe_pause_param > 8)) {
15947                 BLOGW(sc, "invalid pause param (%d) - use intergers between 1 & 8\n",sc->bxe_pause_param);
15948                 sc->bxe_pause_param = 8;
15949         }
15950 
15951         result = (sc->bxe_pause_param << PORT_FEATURE_FLOW_CONTROL_SHIFT);
15952 
15953 
15954         if((result & 0x400) && !(sc->port.supported[cfg_idx] & ELINK_SUPPORTED_Autoneg))  {
15955                         BLOGW(sc, "Does not support Autoneg pause_param %d\n", sc->bxe_pause_param);
15956                         return -EINVAL;
15957         }
15958 
15959         if(IS_MF(sc))
15960                 return 0;
15961        sc->link_params.req_flow_ctrl[cfg_idx] = ELINK_FLOW_CTRL_AUTO;
15962         if(result & ELINK_FLOW_CTRL_RX)
15963                 sc->link_params.req_flow_ctrl[cfg_idx] |= ELINK_FLOW_CTRL_RX;
15964 
15965         if(result & ELINK_FLOW_CTRL_TX)
15966                 sc->link_params.req_flow_ctrl[cfg_idx] |= ELINK_FLOW_CTRL_TX;
15967         if(sc->link_params.req_flow_ctrl[cfg_idx] == ELINK_FLOW_CTRL_AUTO)
15968                 sc->link_params.req_flow_ctrl[cfg_idx] = ELINK_FLOW_CTRL_NONE;
15969 
15970         if(result & 0x400) {
15971                 if (sc->link_params.req_line_speed[cfg_idx] == ELINK_SPEED_AUTO_NEG) {
15972                         sc->link_params.req_flow_ctrl[cfg_idx] =
15973                                 ELINK_FLOW_CTRL_AUTO;
15974                 }
15975                 sc->link_params.req_fc_auto_adv = 0;
15976                 if (result & ELINK_FLOW_CTRL_RX)
15977                         sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_RX;
15978 
15979                 if (result & ELINK_FLOW_CTRL_TX)
15980                         sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_TX;
15981                 if (!sc->link_params.req_fc_auto_adv)
15982                         sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_NONE;
15983         }
15984          if (IS_PF(sc)) {
15985                         if (sc->link_vars.link_up) {
15986                                 bxe_stats_handle(sc, STATS_EVENT_STOP);
15987                         }
15988 			if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
15989                         bxe_force_link_reset(sc);
15990                         bxe_acquire_phy_lock(sc);
15991 
15992                         rc = elink_phy_init(&sc->link_params, &sc->link_vars);
15993 
15994                         bxe_release_phy_lock(sc);
15995 
15996                         bxe_calc_fc_adv(sc);
15997                         }
15998         }
15999         return rc;
16000 }
16001 
16002 
16003 static void
16004 bxe_add_sysctls(struct bxe_softc *sc)
16005 {
16006     struct sysctl_ctx_list *ctx;
16007     struct sysctl_oid_list *children;
16008     struct sysctl_oid *queue_top, *queue;
16009     struct sysctl_oid_list *queue_top_children, *queue_children;
16010     char queue_num_buf[32];
16011     uint32_t q_stat;
16012     int i, j;
16013 
16014     ctx = device_get_sysctl_ctx(sc->dev);
16015     children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
16016 
16017     SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "version",
16018                       CTLFLAG_RD, BXE_DRIVER_VERSION, 0,
16019                       "version");
16020 
16021     snprintf(sc->fw_ver_str, sizeof(sc->fw_ver_str), "%d.%d.%d.%d",
16022              BCM_5710_FW_MAJOR_VERSION,
16023              BCM_5710_FW_MINOR_VERSION,
16024              BCM_5710_FW_REVISION_VERSION,
16025              BCM_5710_FW_ENGINEERING_VERSION);
16026 
16027     snprintf(sc->mf_mode_str, sizeof(sc->mf_mode_str), "%s",
16028         ((sc->devinfo.mf_info.mf_mode == SINGLE_FUNCTION)     ? "Single"  :
16029          (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SD)   ? "MF-SD"   :
16030          (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SI)   ? "MF-SI"   :
16031          (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_AFEX) ? "MF-AFEX" :
16032                                                                 "Unknown"));
16033     SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "mf_vnics",
16034                     CTLFLAG_RD, &sc->devinfo.mf_info.vnics_per_port, 0,
16035                     "multifunction vnics per port");
16036 
16037     snprintf(sc->pci_link_str, sizeof(sc->pci_link_str), "%s x%d",
16038         ((sc->devinfo.pcie_link_speed == 1) ? "2.5GT/s" :
16039          (sc->devinfo.pcie_link_speed == 2) ? "5.0GT/s" :
16040          (sc->devinfo.pcie_link_speed == 4) ? "8.0GT/s" :
16041                                               "???GT/s"),
16042         sc->devinfo.pcie_link_width);
16043 
16044     sc->debug = bxe_debug;
16045 
16046     SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version",
16047                       CTLFLAG_RD, sc->devinfo.bc_ver_str, 0,
16048                       "bootcode version");
16049     SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version",
16050                       CTLFLAG_RD, sc->fw_ver_str, 0,
16051                       "firmware version");
16052     SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode",
16053                       CTLFLAG_RD, sc->mf_mode_str, 0,
16054                       "multifunction mode");
16055     SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr",
16056                       CTLFLAG_RD, sc->mac_addr_str, 0,
16057                       "mac address");
16058     SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link",
16059                       CTLFLAG_RD, sc->pci_link_str, 0,
16060                       "pci link status");
16061     SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "debug",
16062                     CTLFLAG_RW, &sc->debug,
16063                     "debug logging mode");
16064 
16065     sc->trigger_grcdump = 0;
16066     SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "trigger_grcdump",
16067                    CTLFLAG_RW, &sc->trigger_grcdump, 0,
16068                    "trigger grcdump should be invoked"
16069                    "  before collecting grcdump");
16070 
16071     sc->grcdump_started = 0;
16072     sc->grcdump_done = 0;
16073     SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "grcdump_done",
16074                    CTLFLAG_RD, &sc->grcdump_done, 0,
16075                    "set by driver when grcdump is done");
16076 
16077     sc->rx_budget = bxe_rx_budget;
16078     SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_budget",
16079                     CTLFLAG_RW, &sc->rx_budget, 0,
16080                     "rx processing budget");
16081 
16082     SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_param",
16083         CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
16084         bxe_sysctl_pauseparam, "IU",
16085         "need pause frames- DEF:0/TX:1/RX:2/BOTH:3/AUTO:4/AUTOTX:5/AUTORX:6/AUTORXTX:7/NONE:8");
16086 
16087 
16088     SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "state",
16089         CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
16090         bxe_sysctl_state, "IU", "dump driver state");
16091 
16092     for (i = 0; i < BXE_NUM_ETH_STATS; i++) {
16093         SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
16094             bxe_eth_stats_arr[i].string,
16095             CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, i,
16096             bxe_sysctl_eth_stat, "LU", bxe_eth_stats_arr[i].string);
16097     }
16098 
16099     /* add a new parent node for all queues "dev.bxe.#.queue" */
16100     queue_top = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "queue",
16101         CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "queue");
16102     queue_top_children = SYSCTL_CHILDREN(queue_top);
16103 
16104     for (i = 0; i < sc->num_queues; i++) {
16105         /* add a new parent node for a single queue "dev.bxe.#.queue.#" */
16106         snprintf(queue_num_buf, sizeof(queue_num_buf), "%d", i);
16107         queue = SYSCTL_ADD_NODE(ctx, queue_top_children, OID_AUTO,
16108             queue_num_buf, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "single queue");
16109         queue_children = SYSCTL_CHILDREN(queue);
16110 
16111         for (j = 0; j < BXE_NUM_ETH_Q_STATS; j++) {
16112             q_stat = ((i << 16) | j);
16113             SYSCTL_ADD_PROC(ctx, queue_children, OID_AUTO,
16114                  bxe_eth_q_stats_arr[j].string,
16115                  CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, q_stat,
16116                  bxe_sysctl_eth_q_stat, "LU", bxe_eth_q_stats_arr[j].string);
16117         }
16118     }
16119 }
16120 
16121 static int
16122 bxe_alloc_buf_rings(struct bxe_softc *sc)
16123 {
16124     int i;
16125     struct bxe_fastpath *fp;
16126 
16127     for (i = 0; i < sc->num_queues; i++) {
16128 
16129         fp = &sc->fp[i];
16130 
16131         fp->tx_br = buf_ring_alloc(BXE_BR_SIZE, M_DEVBUF,
16132                                    M_NOWAIT, &fp->tx_mtx);
16133         if (fp->tx_br == NULL)
16134             return (-1);
16135     }
16136 
16137     return (0);
16138 }
16139 
16140 static void
16141 bxe_free_buf_rings(struct bxe_softc *sc)
16142 {
16143     int i;
16144     struct bxe_fastpath *fp;
16145 
16146     for (i = 0; i < sc->num_queues; i++) {
16147 
16148         fp = &sc->fp[i];
16149 
16150         if (fp->tx_br) {
16151             buf_ring_free(fp->tx_br, M_DEVBUF);
16152             fp->tx_br = NULL;
16153         }
16154     }
16155 }
16156 
16157 static void
16158 bxe_init_fp_mutexs(struct bxe_softc *sc)
16159 {
16160     int i;
16161     struct bxe_fastpath *fp;
16162 
16163     for (i = 0; i < sc->num_queues; i++) {
16164 
16165         fp = &sc->fp[i];
16166 
16167         snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
16168             "bxe%d_fp%d_tx_lock", sc->unit, i);
16169         mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
16170 
16171         snprintf(fp->rx_mtx_name, sizeof(fp->rx_mtx_name),
16172             "bxe%d_fp%d_rx_lock", sc->unit, i);
16173         mtx_init(&fp->rx_mtx, fp->rx_mtx_name, NULL, MTX_DEF);
16174     }
16175 }
16176 
16177 static void
16178 bxe_destroy_fp_mutexs(struct bxe_softc *sc)
16179 {
16180     int i;
16181     struct bxe_fastpath *fp;
16182 
16183     for (i = 0; i < sc->num_queues; i++) {
16184 
16185         fp = &sc->fp[i];
16186 
16187         if (mtx_initialized(&fp->tx_mtx)) {
16188             mtx_destroy(&fp->tx_mtx);
16189         }
16190 
16191         if (mtx_initialized(&fp->rx_mtx)) {
16192             mtx_destroy(&fp->rx_mtx);
16193         }
16194     }
16195 }
16196 
16197 
16198 /*
16199  * Device attach function.
16200  *
16201  * Allocates device resources, performs secondary chip identification, and
16202  * initializes driver instance variables. This function is called from driver
16203  * load after a successful probe.
16204  *
16205  * Returns:
16206  *   0 = Success, >0 = Failure
16207  */
16208 static int
16209 bxe_attach(device_t dev)
16210 {
16211     struct bxe_softc *sc;
16212 
16213     sc = device_get_softc(dev);
16214 
16215     BLOGD(sc, DBG_LOAD, "Starting attach...\n");
16216 
16217     sc->state = BXE_STATE_CLOSED;
16218 
16219     sc->dev  = dev;
16220     sc->unit = device_get_unit(dev);
16221 
16222     BLOGD(sc, DBG_LOAD, "softc = %p\n", sc);
16223 
16224     sc->pcie_bus    = pci_get_bus(dev);
16225     sc->pcie_device = pci_get_slot(dev);
16226     sc->pcie_func   = pci_get_function(dev);
16227 
16228     /* enable bus master capability */
16229     pci_enable_busmaster(dev);
16230 
16231     /* get the BARs */
16232     if (bxe_allocate_bars(sc) != 0) {
16233         return (ENXIO);
16234     }
16235 
16236     /* initialize the mutexes */
16237     bxe_init_mutexes(sc);
16238 
16239     /* prepare the periodic callout */
16240     callout_init(&sc->periodic_callout, 1);
16241 
16242     /* prepare the chip taskqueue */
16243     sc->chip_tq_flags = CHIP_TQ_NONE;
16244     snprintf(sc->chip_tq_name, sizeof(sc->chip_tq_name),
16245              "bxe%d_chip_tq", sc->unit);
16246     TASK_INIT(&sc->chip_tq_task, 0, bxe_handle_chip_tq, sc);
16247     sc->chip_tq = taskqueue_create(sc->chip_tq_name, M_NOWAIT,
16248                                    taskqueue_thread_enqueue,
16249                                    &sc->chip_tq);
16250     taskqueue_start_threads(&sc->chip_tq, 1, PWAIT, /* lower priority */
16251                             "%s", sc->chip_tq_name);
16252 
16253     TIMEOUT_TASK_INIT(taskqueue_thread,
16254         &sc->sp_err_timeout_task, 0, bxe_sp_err_timeout_task,  sc);
16255 
16256 
16257     /* get device info and set params */
16258     if (bxe_get_device_info(sc) != 0) {
16259         BLOGE(sc, "getting device info\n");
16260         bxe_deallocate_bars(sc);
16261         pci_disable_busmaster(dev);
16262         return (ENXIO);
16263     }
16264 
16265     /* get final misc params */
16266     bxe_get_params(sc);
16267 
16268     /* set the default MTU (changed via ifconfig) */
16269     sc->mtu = ETHERMTU;
16270 
16271     bxe_set_modes_bitmap(sc);
16272 
16273     /* XXX
16274      * If in AFEX mode and the function is configured for FCoE
16275      * then bail... no L2 allowed.
16276      */
16277 
16278     /* get phy settings from shmem and 'and' against admin settings */
16279     bxe_get_phy_info(sc);
16280 
16281     /* initialize the FreeBSD ifnet interface */
16282     if (bxe_init_ifnet(sc) != 0) {
16283         bxe_release_mutexes(sc);
16284         bxe_deallocate_bars(sc);
16285         pci_disable_busmaster(dev);
16286         return (ENXIO);
16287     }
16288 
16289     if (bxe_add_cdev(sc) != 0) {
16290         if (sc->ifp != NULL) {
16291             ether_ifdetach(sc->ifp);
16292         }
16293         ifmedia_removeall(&sc->ifmedia);
16294         bxe_release_mutexes(sc);
16295         bxe_deallocate_bars(sc);
16296         pci_disable_busmaster(dev);
16297         return (ENXIO);
16298     }
16299 
16300     /* allocate device interrupts */
16301     if (bxe_interrupt_alloc(sc) != 0) {
16302         bxe_del_cdev(sc);
16303         if (sc->ifp != NULL) {
16304             ether_ifdetach(sc->ifp);
16305         }
16306         ifmedia_removeall(&sc->ifmedia);
16307         bxe_release_mutexes(sc);
16308         bxe_deallocate_bars(sc);
16309         pci_disable_busmaster(dev);
16310         return (ENXIO);
16311     }
16312 
16313     bxe_init_fp_mutexs(sc);
16314 
16315     if (bxe_alloc_buf_rings(sc) != 0) {
16316 	bxe_free_buf_rings(sc);
16317         bxe_interrupt_free(sc);
16318         bxe_del_cdev(sc);
16319         if (sc->ifp != NULL) {
16320             ether_ifdetach(sc->ifp);
16321         }
16322         ifmedia_removeall(&sc->ifmedia);
16323         bxe_release_mutexes(sc);
16324         bxe_deallocate_bars(sc);
16325         pci_disable_busmaster(dev);
16326         return (ENXIO);
16327     }
16328 
16329     /* allocate ilt */
16330     if (bxe_alloc_ilt_mem(sc) != 0) {
16331 	bxe_free_buf_rings(sc);
16332         bxe_interrupt_free(sc);
16333         bxe_del_cdev(sc);
16334         if (sc->ifp != NULL) {
16335             ether_ifdetach(sc->ifp);
16336         }
16337         ifmedia_removeall(&sc->ifmedia);
16338         bxe_release_mutexes(sc);
16339         bxe_deallocate_bars(sc);
16340         pci_disable_busmaster(dev);
16341         return (ENXIO);
16342     }
16343 
16344     /* allocate the host hardware/software hsi structures */
16345     if (bxe_alloc_hsi_mem(sc) != 0) {
16346         bxe_free_ilt_mem(sc);
16347 	bxe_free_buf_rings(sc);
16348         bxe_interrupt_free(sc);
16349         bxe_del_cdev(sc);
16350         if (sc->ifp != NULL) {
16351             ether_ifdetach(sc->ifp);
16352         }
16353         ifmedia_removeall(&sc->ifmedia);
16354         bxe_release_mutexes(sc);
16355         bxe_deallocate_bars(sc);
16356         pci_disable_busmaster(dev);
16357         return (ENXIO);
16358     }
16359 
16360     /* need to reset chip if UNDI was active */
16361     if (IS_PF(sc) && !BXE_NOMCP(sc)) {
16362         /* init fw_seq */
16363         sc->fw_seq =
16364             (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
16365              DRV_MSG_SEQ_NUMBER_MASK);
16366         BLOGD(sc, DBG_LOAD, "prev unload fw_seq 0x%04x\n", sc->fw_seq);
16367         bxe_prev_unload(sc);
16368     }
16369 
16370 #if 1
16371     /* XXX */
16372     bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF);
16373 #else
16374     if (SHMEM2_HAS(sc, dcbx_lldp_params_offset) &&
16375         SHMEM2_HAS(sc, dcbx_lldp_dcbx_stat_offset) &&
16376         SHMEM2_RD(sc, dcbx_lldp_params_offset) &&
16377         SHMEM2_RD(sc, dcbx_lldp_dcbx_stat_offset)) {
16378         bxe_dcbx_set_state(sc, TRUE, BXE_DCBX_ENABLED_ON_NEG_ON);
16379         bxe_dcbx_init_params(sc);
16380     } else {
16381         bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF);
16382     }
16383 #endif
16384 
16385     /* calculate qm_cid_count */
16386     sc->qm_cid_count = bxe_set_qm_cid_count(sc);
16387     BLOGD(sc, DBG_LOAD, "qm_cid_count=%d\n", sc->qm_cid_count);
16388 
16389     sc->max_cos = 1;
16390     bxe_init_multi_cos(sc);
16391 
16392     bxe_add_sysctls(sc);
16393 
16394     return (0);
16395 }
16396 
16397 /*
16398  * Device detach function.
16399  *
16400  * Stops the controller, resets the controller, and releases resources.
16401  *
16402  * Returns:
16403  *   0 = Success, >0 = Failure
16404  */
16405 static int
16406 bxe_detach(device_t dev)
16407 {
16408     struct bxe_softc *sc;
16409     if_t ifp;
16410 
16411     sc = device_get_softc(dev);
16412 
16413     BLOGD(sc, DBG_LOAD, "Starting detach...\n");
16414 
16415     ifp = sc->ifp;
16416     if (ifp != NULL && if_vlantrunkinuse(ifp)) {
16417         BLOGE(sc, "Cannot detach while VLANs are in use.\n");
16418         return(EBUSY);
16419     }
16420 
16421     bxe_del_cdev(sc);
16422 
16423     /* stop the periodic callout */
16424     bxe_periodic_stop(sc);
16425 
16426     /* stop the chip taskqueue */
16427     atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_NONE);
16428     if (sc->chip_tq) {
16429         taskqueue_drain(sc->chip_tq, &sc->chip_tq_task);
16430         taskqueue_free(sc->chip_tq);
16431         sc->chip_tq = NULL;
16432         taskqueue_drain_timeout(taskqueue_thread,
16433             &sc->sp_err_timeout_task);
16434     }
16435 
16436     /* stop and reset the controller if it was open */
16437     if (sc->state != BXE_STATE_CLOSED) {
16438         BXE_CORE_LOCK(sc);
16439         bxe_nic_unload(sc, UNLOAD_CLOSE, TRUE);
16440         sc->state = BXE_STATE_DISABLED;
16441         BXE_CORE_UNLOCK(sc);
16442     }
16443 
16444     /* release the network interface */
16445     if (ifp != NULL) {
16446         ether_ifdetach(ifp);
16447     }
16448     ifmedia_removeall(&sc->ifmedia);
16449 
16450     /* XXX do the following based on driver state... */
16451 
16452     /* free the host hardware/software hsi structures */
16453     bxe_free_hsi_mem(sc);
16454 
16455     /* free ilt */
16456     bxe_free_ilt_mem(sc);
16457 
16458     bxe_free_buf_rings(sc);
16459 
16460     /* release the interrupts */
16461     bxe_interrupt_free(sc);
16462 
16463     /* Release the mutexes*/
16464     bxe_destroy_fp_mutexs(sc);
16465     bxe_release_mutexes(sc);
16466 
16467 
16468     /* Release the PCIe BAR mapped memory */
16469     bxe_deallocate_bars(sc);
16470 
16471     /* Release the FreeBSD interface. */
16472     if (sc->ifp != NULL) {
16473         if_free(sc->ifp);
16474     }
16475 
16476     pci_disable_busmaster(dev);
16477 
16478     return (0);
16479 }
16480 
16481 /*
16482  * Device shutdown function.
16483  *
16484  * Stops and resets the controller.
16485  *
16486  * Returns:
16487  *   Nothing
16488  */
16489 static int
16490 bxe_shutdown(device_t dev)
16491 {
16492     struct bxe_softc *sc;
16493 
16494     sc = device_get_softc(dev);
16495 
16496     BLOGD(sc, DBG_LOAD, "Starting shutdown...\n");
16497 
16498     /* stop the periodic callout */
16499     bxe_periodic_stop(sc);
16500 
16501     if (sc->state != BXE_STATE_CLOSED) {
16502     	BXE_CORE_LOCK(sc);
16503     	bxe_nic_unload(sc, UNLOAD_NORMAL, FALSE);
16504     	BXE_CORE_UNLOCK(sc);
16505     }
16506 
16507     return (0);
16508 }
16509 
16510 void
16511 bxe_igu_ack_sb(struct bxe_softc *sc,
16512                uint8_t          igu_sb_id,
16513                uint8_t          segment,
16514                uint16_t         index,
16515                uint8_t          op,
16516                uint8_t          update)
16517 {
16518     uint32_t igu_addr = sc->igu_base_addr;
16519     igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
16520     bxe_igu_ack_sb_gen(sc, igu_sb_id, segment, index, op, update, igu_addr);
16521 }
16522 
16523 static void
16524 bxe_igu_clear_sb_gen(struct bxe_softc *sc,
16525                      uint8_t          func,
16526                      uint8_t          idu_sb_id,
16527                      uint8_t          is_pf)
16528 {
16529     uint32_t data, ctl, cnt = 100;
16530     uint32_t igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
16531     uint32_t igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
16532     uint32_t igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
16533     uint32_t sb_bit =  1 << (idu_sb_id%32);
16534     uint32_t func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
16535     uint32_t addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
16536 
16537     /* Not supported in BC mode */
16538     if (CHIP_INT_MODE_IS_BC(sc)) {
16539         return;
16540     }
16541 
16542     data = ((IGU_USE_REGISTER_cstorm_type_0_sb_cleanup <<
16543              IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
16544             IGU_REGULAR_CLEANUP_SET |
16545             IGU_REGULAR_BCLEANUP);
16546 
16547     ctl = ((addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT) |
16548            (func_encode << IGU_CTRL_REG_FID_SHIFT) |
16549            (IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT));
16550 
16551     BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
16552             data, igu_addr_data);
16553     REG_WR(sc, igu_addr_data, data);
16554 
16555     bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
16556                       BUS_SPACE_BARRIER_WRITE);
16557     mb();
16558 
16559     BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
16560             ctl, igu_addr_ctl);
16561     REG_WR(sc, igu_addr_ctl, ctl);
16562 
16563     bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
16564                       BUS_SPACE_BARRIER_WRITE);
16565     mb();
16566 
16567     /* wait for clean up to finish */
16568     while (!(REG_RD(sc, igu_addr_ack) & sb_bit) && --cnt) {
16569         DELAY(20000);
16570     }
16571 
16572     if (!(REG_RD(sc, igu_addr_ack) & sb_bit)) {
16573         BLOGD(sc, DBG_LOAD,
16574               "Unable to finish IGU cleanup: "
16575               "idu_sb_id %d offset %d bit %d (cnt %d)\n",
16576               idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
16577     }
16578 }
16579 
16580 static void
16581 bxe_igu_clear_sb(struct bxe_softc *sc,
16582                  uint8_t          idu_sb_id)
16583 {
16584     bxe_igu_clear_sb_gen(sc, SC_FUNC(sc), idu_sb_id, TRUE /*PF*/);
16585 }
16586 
16587 
16588 
16589 
16590 
16591 
16592 
16593 /*******************/
16594 /* ECORE CALLBACKS */
16595 /*******************/
16596 
16597 static void
16598 bxe_reset_common(struct bxe_softc *sc)
16599 {
16600     uint32_t val = 0x1400;
16601 
16602     /* reset_common */
16603     REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR), 0xd3ffff7f);
16604 
16605     if (CHIP_IS_E3(sc)) {
16606         val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
16607         val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
16608     }
16609 
16610     REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR), val);
16611 }
16612 
16613 static void
16614 bxe_common_init_phy(struct bxe_softc *sc)
16615 {
16616     uint32_t shmem_base[2];
16617     uint32_t shmem2_base[2];
16618 
16619     /* Avoid common init in case MFW supports LFA */
16620     if (SHMEM2_RD(sc, size) >
16621         (uint32_t)offsetof(struct shmem2_region,
16622                            lfa_host_addr[SC_PORT(sc)])) {
16623         return;
16624     }
16625 
16626     shmem_base[0]  = sc->devinfo.shmem_base;
16627     shmem2_base[0] = sc->devinfo.shmem2_base;
16628 
16629     if (!CHIP_IS_E1x(sc)) {
16630         shmem_base[1]  = SHMEM2_RD(sc, other_shmem_base_addr);
16631         shmem2_base[1] = SHMEM2_RD(sc, other_shmem2_base_addr);
16632     }
16633 
16634     bxe_acquire_phy_lock(sc);
16635     elink_common_init_phy(sc, shmem_base, shmem2_base,
16636                           sc->devinfo.chip_id, 0);
16637     bxe_release_phy_lock(sc);
16638 }
16639 
16640 static void
16641 bxe_pf_disable(struct bxe_softc *sc)
16642 {
16643     uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
16644 
16645     val &= ~IGU_PF_CONF_FUNC_EN;
16646 
16647     REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
16648     REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
16649     REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 0);
16650 }
16651 
16652 static void
16653 bxe_init_pxp(struct bxe_softc *sc)
16654 {
16655     uint16_t devctl;
16656     int r_order, w_order;
16657 
16658     devctl = bxe_pcie_capability_read(sc, PCIER_DEVICE_CTL, 2);
16659 
16660     BLOGD(sc, DBG_LOAD, "read 0x%08x from devctl\n", devctl);
16661 
16662     w_order = ((devctl & PCIEM_CTL_MAX_PAYLOAD) >> 5);
16663 
16664     if (sc->mrrs == -1) {
16665         r_order = ((devctl & PCIEM_CTL_MAX_READ_REQUEST) >> 12);
16666     } else {
16667         BLOGD(sc, DBG_LOAD, "forcing read order to %d\n", sc->mrrs);
16668         r_order = sc->mrrs;
16669     }
16670 
16671     ecore_init_pxp_arb(sc, r_order, w_order);
16672 }
16673 
16674 static uint32_t
16675 bxe_get_pretend_reg(struct bxe_softc *sc)
16676 {
16677     uint32_t base = PXP2_REG_PGL_PRETEND_FUNC_F0;
16678     uint32_t stride = (PXP2_REG_PGL_PRETEND_FUNC_F1 - base);
16679     return (base + (SC_ABS_FUNC(sc)) * stride);
16680 }
16681 
16682 /*
16683  * Called only on E1H or E2.
16684  * When pretending to be PF, the pretend value is the function number 0..7.
16685  * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
16686  * combination.
16687  */
16688 static int
16689 bxe_pretend_func(struct bxe_softc *sc,
16690                  uint16_t         pretend_func_val)
16691 {
16692     uint32_t pretend_reg;
16693 
16694     if (CHIP_IS_E1H(sc) && (pretend_func_val > E1H_FUNC_MAX)) {
16695         return (-1);
16696     }
16697 
16698     /* get my own pretend register */
16699     pretend_reg = bxe_get_pretend_reg(sc);
16700     REG_WR(sc, pretend_reg, pretend_func_val);
16701     REG_RD(sc, pretend_reg);
16702     return (0);
16703 }
16704 
16705 static void
16706 bxe_iov_init_dmae(struct bxe_softc *sc)
16707 {
16708     return;
16709 }
16710 
16711 static void
16712 bxe_iov_init_dq(struct bxe_softc *sc)
16713 {
16714     return;
16715 }
16716 
16717 /* send a NIG loopback debug packet */
16718 static void
16719 bxe_lb_pckt(struct bxe_softc *sc)
16720 {
16721     uint32_t wb_write[3];
16722 
16723     /* Ethernet source and destination addresses */
16724     wb_write[0] = 0x55555555;
16725     wb_write[1] = 0x55555555;
16726     wb_write[2] = 0x20;     /* SOP */
16727     REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
16728 
16729     /* NON-IP protocol */
16730     wb_write[0] = 0x09000000;
16731     wb_write[1] = 0x55555555;
16732     wb_write[2] = 0x10;     /* EOP, eop_bvalid = 0 */
16733     REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
16734 }
16735 
16736 /*
16737  * Some of the internal memories are not directly readable from the driver.
16738  * To test them we send debug packets.
16739  */
16740 static int
16741 bxe_int_mem_test(struct bxe_softc *sc)
16742 {
16743     int factor;
16744     int count, i;
16745     uint32_t val = 0;
16746 
16747     if (CHIP_REV_IS_FPGA(sc)) {
16748         factor = 120;
16749     } else if (CHIP_REV_IS_EMUL(sc)) {
16750         factor = 200;
16751     } else {
16752         factor = 1;
16753     }
16754 
16755     /* disable inputs of parser neighbor blocks */
16756     REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0);
16757     REG_WR(sc, TCM_REG_PRS_IFEN, 0x0);
16758     REG_WR(sc, CFC_REG_DEBUG0, 0x1);
16759     REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0);
16760 
16761     /*  write 0 to parser credits for CFC search request */
16762     REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
16763 
16764     /* send Ethernet packet */
16765     bxe_lb_pckt(sc);
16766 
16767     /* TODO do i reset NIG statistic? */
16768     /* Wait until NIG register shows 1 packet of size 0x10 */
16769     count = 1000 * factor;
16770     while (count) {
16771         bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
16772         val = *BXE_SP(sc, wb_data[0]);
16773         if (val == 0x10) {
16774             break;
16775         }
16776 
16777         DELAY(10000);
16778         count--;
16779     }
16780 
16781     if (val != 0x10) {
16782         BLOGE(sc, "NIG timeout val=0x%x\n", val);
16783         return (-1);
16784     }
16785 
16786     /* wait until PRS register shows 1 packet */
16787     count = (1000 * factor);
16788     while (count) {
16789         val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16790         if (val == 1) {
16791             break;
16792         }
16793 
16794         DELAY(10000);
16795         count--;
16796     }
16797 
16798     if (val != 0x1) {
16799         BLOGE(sc, "PRS timeout val=0x%x\n", val);
16800         return (-2);
16801     }
16802 
16803     /* Reset and init BRB, PRS */
16804     REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
16805     DELAY(50000);
16806     REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
16807     DELAY(50000);
16808     ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
16809     ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
16810 
16811     /* Disable inputs of parser neighbor blocks */
16812     REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0);
16813     REG_WR(sc, TCM_REG_PRS_IFEN, 0x0);
16814     REG_WR(sc, CFC_REG_DEBUG0, 0x1);
16815     REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0);
16816 
16817     /* Write 0 to parser credits for CFC search request */
16818     REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
16819 
16820     /* send 10 Ethernet packets */
16821     for (i = 0; i < 10; i++) {
16822         bxe_lb_pckt(sc);
16823     }
16824 
16825     /* Wait until NIG register shows 10+1 packets of size 11*0x10 = 0xb0 */
16826     count = (1000 * factor);
16827     while (count) {
16828         bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
16829         val = *BXE_SP(sc, wb_data[0]);
16830         if (val == 0xb0) {
16831             break;
16832         }
16833 
16834         DELAY(10000);
16835         count--;
16836     }
16837 
16838     if (val != 0xb0) {
16839         BLOGE(sc, "NIG timeout val=0x%x\n", val);
16840         return (-3);
16841     }
16842 
16843     /* Wait until PRS register shows 2 packets */
16844     val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16845     if (val != 2) {
16846         BLOGE(sc, "PRS timeout val=0x%x\n", val);
16847     }
16848 
16849     /* Write 1 to parser credits for CFC search request */
16850     REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
16851 
16852     /* Wait until PRS register shows 3 packets */
16853     DELAY(10000 * factor);
16854 
16855     /* Wait until NIG register shows 1 packet of size 0x10 */
16856     val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16857     if (val != 3) {
16858         BLOGE(sc, "PRS timeout val=0x%x\n", val);
16859     }
16860 
16861     /* clear NIG EOP FIFO */
16862     for (i = 0; i < 11; i++) {
16863         REG_RD(sc, NIG_REG_INGRESS_EOP_LB_FIFO);
16864     }
16865 
16866     val = REG_RD(sc, NIG_REG_INGRESS_EOP_LB_EMPTY);
16867     if (val != 1) {
16868         BLOGE(sc, "clear of NIG failed val=0x%x\n", val);
16869         return (-4);
16870     }
16871 
16872     /* Reset and init BRB, PRS, NIG */
16873     REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
16874     DELAY(50000);
16875     REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
16876     DELAY(50000);
16877     ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
16878     ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
16879     if (!CNIC_SUPPORT(sc)) {
16880         /* set NIC mode */
16881         REG_WR(sc, PRS_REG_NIC_MODE, 1);
16882     }
16883 
16884     /* Enable inputs of parser neighbor blocks */
16885     REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x7fffffff);
16886     REG_WR(sc, TCM_REG_PRS_IFEN, 0x1);
16887     REG_WR(sc, CFC_REG_DEBUG0, 0x0);
16888     REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x1);
16889 
16890     return (0);
16891 }
16892 
16893 static void
16894 bxe_setup_fan_failure_detection(struct bxe_softc *sc)
16895 {
16896     int is_required;
16897     uint32_t val;
16898     int port;
16899 
16900     is_required = 0;
16901     val = (SHMEM_RD(sc, dev_info.shared_hw_config.config2) &
16902            SHARED_HW_CFG_FAN_FAILURE_MASK);
16903 
16904     if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) {
16905         is_required = 1;
16906     }
16907     /*
16908      * The fan failure mechanism is usually related to the PHY type since
16909      * the power consumption of the board is affected by the PHY. Currently,
16910      * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
16911      */
16912     else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) {
16913         for (port = PORT_0; port < PORT_MAX; port++) {
16914             is_required |= elink_fan_failure_det_req(sc,
16915                                                      sc->devinfo.shmem_base,
16916                                                      sc->devinfo.shmem2_base,
16917                                                      port);
16918         }
16919     }
16920 
16921     BLOGD(sc, DBG_LOAD, "fan detection setting: %d\n", is_required);
16922 
16923     if (is_required == 0) {
16924         return;
16925     }
16926 
16927     /* Fan failure is indicated by SPIO 5 */
16928     bxe_set_spio(sc, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);
16929 
16930     /* set to active low mode */
16931     val = REG_RD(sc, MISC_REG_SPIO_INT);
16932     val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS);
16933     REG_WR(sc, MISC_REG_SPIO_INT, val);
16934 
16935     /* enable interrupt to signal the IGU */
16936     val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN);
16937     val |= MISC_SPIO_SPIO5;
16938     REG_WR(sc, MISC_REG_SPIO_EVENT_EN, val);
16939 }
16940 
16941 static void
16942 bxe_enable_blocks_attention(struct bxe_softc *sc)
16943 {
16944     uint32_t val;
16945 
16946     REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0);
16947     if (!CHIP_IS_E1x(sc)) {
16948         REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0x40);
16949     } else {
16950         REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0);
16951     }
16952     REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
16953     REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
16954     /*
16955      * mask read length error interrupts in brb for parser
16956      * (parsing unit and 'checksum and crc' unit)
16957      * these errors are legal (PU reads fixed length and CAC can cause
16958      * read length error on truncated packets)
16959      */
16960     REG_WR(sc, BRB1_REG_BRB1_INT_MASK, 0xFC00);
16961     REG_WR(sc, QM_REG_QM_INT_MASK, 0);
16962     REG_WR(sc, TM_REG_TM_INT_MASK, 0);
16963     REG_WR(sc, XSDM_REG_XSDM_INT_MASK_0, 0);
16964     REG_WR(sc, XSDM_REG_XSDM_INT_MASK_1, 0);
16965     REG_WR(sc, XCM_REG_XCM_INT_MASK, 0);
16966 /*      REG_WR(sc, XSEM_REG_XSEM_INT_MASK_0, 0); */
16967 /*      REG_WR(sc, XSEM_REG_XSEM_INT_MASK_1, 0); */
16968     REG_WR(sc, USDM_REG_USDM_INT_MASK_0, 0);
16969     REG_WR(sc, USDM_REG_USDM_INT_MASK_1, 0);
16970     REG_WR(sc, UCM_REG_UCM_INT_MASK, 0);
16971 /*      REG_WR(sc, USEM_REG_USEM_INT_MASK_0, 0); */
16972 /*      REG_WR(sc, USEM_REG_USEM_INT_MASK_1, 0); */
16973     REG_WR(sc, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
16974     REG_WR(sc, CSDM_REG_CSDM_INT_MASK_0, 0);
16975     REG_WR(sc, CSDM_REG_CSDM_INT_MASK_1, 0);
16976     REG_WR(sc, CCM_REG_CCM_INT_MASK, 0);
16977 /*      REG_WR(sc, CSEM_REG_CSEM_INT_MASK_0, 0); */
16978 /*      REG_WR(sc, CSEM_REG_CSEM_INT_MASK_1, 0); */
16979 
16980     val = (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT |
16981            PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
16982            PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN);
16983     if (!CHIP_IS_E1x(sc)) {
16984         val |= (PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED |
16985                 PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED);
16986     }
16987     REG_WR(sc, PXP2_REG_PXP2_INT_MASK_0, val);
16988 
16989     REG_WR(sc, TSDM_REG_TSDM_INT_MASK_0, 0);
16990     REG_WR(sc, TSDM_REG_TSDM_INT_MASK_1, 0);
16991     REG_WR(sc, TCM_REG_TCM_INT_MASK, 0);
16992 /*      REG_WR(sc, TSEM_REG_TSEM_INT_MASK_0, 0); */
16993 
16994     if (!CHIP_IS_E1x(sc)) {
16995         /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */
16996         REG_WR(sc, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
16997     }
16998 
16999     REG_WR(sc, CDU_REG_CDU_INT_MASK, 0);
17000     REG_WR(sc, DMAE_REG_DMAE_INT_MASK, 0);
17001 /*      REG_WR(sc, MISC_REG_MISC_INT_MASK, 0); */
17002     REG_WR(sc, PBF_REG_PBF_INT_MASK, 0x18);     /* bit 3,4 masked */
17003 }
17004 
17005 /**
17006  * bxe_init_hw_common - initialize the HW at the COMMON phase.
17007  *
17008  * @sc:     driver handle
17009  */
17010 static int
17011 bxe_init_hw_common(struct bxe_softc *sc)
17012 {
17013     uint8_t abs_func_id;
17014     uint32_t val;
17015 
17016     BLOGD(sc, DBG_LOAD, "starting common init for func %d\n",
17017           SC_ABS_FUNC(sc));
17018 
17019     /*
17020      * take the RESET lock to protect undi_unload flow from accessing
17021      * registers while we are resetting the chip
17022      */
17023     bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
17024 
17025     bxe_reset_common(sc);
17026 
17027     REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET), 0xffffffff);
17028 
17029     val = 0xfffc;
17030     if (CHIP_IS_E3(sc)) {
17031         val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
17032         val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
17033     }
17034 
17035     REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET), val);
17036 
17037     bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
17038 
17039     ecore_init_block(sc, BLOCK_MISC, PHASE_COMMON);
17040     BLOGD(sc, DBG_LOAD, "after misc block init\n");
17041 
17042     if (!CHIP_IS_E1x(sc)) {
17043         /*
17044          * 4-port mode or 2-port mode we need to turn off master-enable for
17045          * everyone. After that we turn it back on for self. So, we disregard
17046          * multi-function, and always disable all functions on the given path,
17047          * this means 0,2,4,6 for path 0 and 1,3,5,7 for path 1
17048          */
17049         for (abs_func_id = SC_PATH(sc);
17050              abs_func_id < (E2_FUNC_MAX * 2);
17051              abs_func_id += 2) {
17052             if (abs_func_id == SC_ABS_FUNC(sc)) {
17053                 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17054                 continue;
17055             }
17056 
17057             bxe_pretend_func(sc, abs_func_id);
17058 
17059             /* clear pf enable */
17060             bxe_pf_disable(sc);
17061 
17062             bxe_pretend_func(sc, SC_ABS_FUNC(sc));
17063         }
17064     }
17065 
17066     BLOGD(sc, DBG_LOAD, "after pf disable\n");
17067 
17068     ecore_init_block(sc, BLOCK_PXP, PHASE_COMMON);
17069 
17070     if (CHIP_IS_E1(sc)) {
17071         /*
17072          * enable HW interrupt from PXP on USDM overflow
17073          * bit 16 on INT_MASK_0
17074          */
17075         REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0);
17076     }
17077 
17078     ecore_init_block(sc, BLOCK_PXP2, PHASE_COMMON);
17079     bxe_init_pxp(sc);
17080 
17081 #ifdef __BIG_ENDIAN
17082     REG_WR(sc, PXP2_REG_RQ_QM_ENDIAN_M, 1);
17083     REG_WR(sc, PXP2_REG_RQ_TM_ENDIAN_M, 1);
17084     REG_WR(sc, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
17085     REG_WR(sc, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
17086     REG_WR(sc, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
17087     /* make sure this value is 0 */
17088     REG_WR(sc, PXP2_REG_RQ_HC_ENDIAN_M, 0);
17089 
17090     //REG_WR(sc, PXP2_REG_RD_PBF_SWAP_MODE, 1);
17091     REG_WR(sc, PXP2_REG_RD_QM_SWAP_MODE, 1);
17092     REG_WR(sc, PXP2_REG_RD_TM_SWAP_MODE, 1);
17093     REG_WR(sc, PXP2_REG_RD_SRC_SWAP_MODE, 1);
17094     REG_WR(sc, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
17095 #endif
17096 
17097     ecore_ilt_init_page_size(sc, INITOP_SET);
17098 
17099     if (CHIP_REV_IS_FPGA(sc) && CHIP_IS_E1H(sc)) {
17100         REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
17101     }
17102 
17103     /* let the HW do it's magic... */
17104     DELAY(100000);
17105 
17106     /* finish PXP init */
17107     val = REG_RD(sc, PXP2_REG_RQ_CFG_DONE);
17108     if (val != 1) {
17109         BLOGE(sc, "PXP2 CFG failed PXP2_REG_RQ_CFG_DONE val = 0x%x\n",
17110             val);
17111         return (-1);
17112     }
17113     val = REG_RD(sc, PXP2_REG_RD_INIT_DONE);
17114     if (val != 1) {
17115         BLOGE(sc, "PXP2 RD_INIT failed val = 0x%x\n", val);
17116         return (-1);
17117     }
17118 
17119     BLOGD(sc, DBG_LOAD, "after pxp init\n");
17120 
17121     /*
17122      * Timer bug workaround for E2 only. We need to set the entire ILT to have
17123      * entries with value "0" and valid bit on. This needs to be done by the
17124      * first PF that is loaded in a path (i.e. common phase)
17125      */
17126     if (!CHIP_IS_E1x(sc)) {
17127 /*
17128  * In E2 there is a bug in the timers block that can cause function 6 / 7
17129  * (i.e. vnic3) to start even if it is marked as "scan-off".
17130  * This occurs when a different function (func2,3) is being marked
17131  * as "scan-off". Real-life scenario for example: if a driver is being
17132  * load-unloaded while func6,7 are down. This will cause the timer to access
17133  * the ilt, translate to a logical address and send a request to read/write.
17134  * Since the ilt for the function that is down is not valid, this will cause
17135  * a translation error which is unrecoverable.
17136  * The Workaround is intended to make sure that when this happens nothing
17137  * fatal will occur. The workaround:
17138  *  1.  First PF driver which loads on a path will:
17139  *      a.  After taking the chip out of reset, by using pretend,
17140  *          it will write "0" to the following registers of
17141  *          the other vnics.
17142  *          REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
17143  *          REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0);
17144  *          REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0);
17145  *          And for itself it will write '1' to
17146  *          PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable
17147  *          dmae-operations (writing to pram for example.)
17148  *          note: can be done for only function 6,7 but cleaner this
17149  *            way.
17150  *      b.  Write zero+valid to the entire ILT.
17151  *      c.  Init the first_timers_ilt_entry, last_timers_ilt_entry of
17152  *          VNIC3 (of that port). The range allocated will be the
17153  *          entire ILT. This is needed to prevent  ILT range error.
17154  *  2.  Any PF driver load flow:
17155  *      a.  ILT update with the physical addresses of the allocated
17156  *          logical pages.
17157  *      b.  Wait 20msec. - note that this timeout is needed to make
17158  *          sure there are no requests in one of the PXP internal
17159  *          queues with "old" ILT addresses.
17160  *      c.  PF enable in the PGLC.
17161  *      d.  Clear the was_error of the PF in the PGLC. (could have
17162  *          occurred while driver was down)
17163  *      e.  PF enable in the CFC (WEAK + STRONG)
17164  *      f.  Timers scan enable
17165  *  3.  PF driver unload flow:
17166  *      a.  Clear the Timers scan_en.
17167  *      b.  Polling for scan_on=0 for that PF.
17168  *      c.  Clear the PF enable bit in the PXP.
17169  *      d.  Clear the PF enable in the CFC (WEAK + STRONG)
17170  *      e.  Write zero+valid to all ILT entries (The valid bit must
17171  *          stay set)
17172  *      f.  If this is VNIC 3 of a port then also init
17173  *          first_timers_ilt_entry to zero and last_timers_ilt_entry
17174  *          to the last entry in the ILT.
17175  *
17176  *      Notes:
17177  *      Currently the PF error in the PGLC is non recoverable.
17178  *      In the future the there will be a recovery routine for this error.
17179  *      Currently attention is masked.
17180  *      Having an MCP lock on the load/unload process does not guarantee that
17181  *      there is no Timer disable during Func6/7 enable. This is because the
17182  *      Timers scan is currently being cleared by the MCP on FLR.
17183  *      Step 2.d can be done only for PF6/7 and the driver can also check if
17184  *      there is error before clearing it. But the flow above is simpler and
17185  *      more general.
17186  *      All ILT entries are written by zero+valid and not just PF6/7
17187  *      ILT entries since in the future the ILT entries allocation for
17188  *      PF-s might be dynamic.
17189  */
17190         struct ilt_client_info ilt_cli;
17191         struct ecore_ilt ilt;
17192 
17193         memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
17194         memset(&ilt, 0, sizeof(struct ecore_ilt));
17195 
17196         /* initialize dummy TM client */
17197         ilt_cli.start      = 0;
17198         ilt_cli.end        = ILT_NUM_PAGE_ENTRIES - 1;
17199         ilt_cli.client_num = ILT_CLIENT_TM;
17200 
17201         /*
17202          * Step 1: set zeroes to all ilt page entries with valid bit on
17203          * Step 2: set the timers first/last ilt entry to point
17204          * to the entire range to prevent ILT range error for 3rd/4th
17205          * vnic (this code assumes existence of the vnic)
17206          *
17207          * both steps performed by call to ecore_ilt_client_init_op()
17208          * with dummy TM client
17209          *
17210          * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
17211          * and his brother are split registers
17212          */
17213 
17214         bxe_pretend_func(sc, (SC_PATH(sc) + 6));
17215         ecore_ilt_client_init_op_ilt(sc, &ilt, &ilt_cli, INITOP_CLEAR);
17216         bxe_pretend_func(sc, SC_ABS_FUNC(sc));
17217 
17218         REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN, BXE_PXP_DRAM_ALIGN);
17219         REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_RD, BXE_PXP_DRAM_ALIGN);
17220         REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
17221     }
17222 
17223     REG_WR(sc, PXP2_REG_RQ_DISABLE_INPUTS, 0);
17224     REG_WR(sc, PXP2_REG_RD_DISABLE_INPUTS, 0);
17225 
17226     if (!CHIP_IS_E1x(sc)) {
17227         int factor = CHIP_REV_IS_EMUL(sc) ? 1000 :
17228                      (CHIP_REV_IS_FPGA(sc) ? 400 : 0);
17229 
17230         ecore_init_block(sc, BLOCK_PGLUE_B, PHASE_COMMON);
17231         ecore_init_block(sc, BLOCK_ATC, PHASE_COMMON);
17232 
17233         /* let the HW do it's magic... */
17234         do {
17235             DELAY(200000);
17236             val = REG_RD(sc, ATC_REG_ATC_INIT_DONE);
17237         } while (factor-- && (val != 1));
17238 
17239         if (val != 1) {
17240             BLOGE(sc, "ATC_INIT failed val = 0x%x\n", val);
17241             return (-1);
17242         }
17243     }
17244 
17245     BLOGD(sc, DBG_LOAD, "after pglue and atc init\n");
17246 
17247     ecore_init_block(sc, BLOCK_DMAE, PHASE_COMMON);
17248 
17249     bxe_iov_init_dmae(sc);
17250 
17251     /* clean the DMAE memory */
17252     sc->dmae_ready = 1;
17253     ecore_init_fill(sc, TSEM_REG_PRAM, 0, 8, 1);
17254 
17255     ecore_init_block(sc, BLOCK_TCM, PHASE_COMMON);
17256 
17257     ecore_init_block(sc, BLOCK_UCM, PHASE_COMMON);
17258 
17259     ecore_init_block(sc, BLOCK_CCM, PHASE_COMMON);
17260 
17261     ecore_init_block(sc, BLOCK_XCM, PHASE_COMMON);
17262 
17263     bxe_read_dmae(sc, XSEM_REG_PASSIVE_BUFFER, 3);
17264     bxe_read_dmae(sc, CSEM_REG_PASSIVE_BUFFER, 3);
17265     bxe_read_dmae(sc, TSEM_REG_PASSIVE_BUFFER, 3);
17266     bxe_read_dmae(sc, USEM_REG_PASSIVE_BUFFER, 3);
17267 
17268     ecore_init_block(sc, BLOCK_QM, PHASE_COMMON);
17269 
17270     /* QM queues pointers table */
17271     ecore_qm_init_ptr_table(sc, sc->qm_cid_count, INITOP_SET);
17272 
17273     /* soft reset pulse */
17274     REG_WR(sc, QM_REG_SOFT_RESET, 1);
17275     REG_WR(sc, QM_REG_SOFT_RESET, 0);
17276 
17277     if (CNIC_SUPPORT(sc))
17278         ecore_init_block(sc, BLOCK_TM, PHASE_COMMON);
17279 
17280     ecore_init_block(sc, BLOCK_DORQ, PHASE_COMMON);
17281     REG_WR(sc, DORQ_REG_DPM_CID_OFST, BXE_DB_SHIFT);
17282     if (!CHIP_REV_IS_SLOW(sc)) {
17283         /* enable hw interrupt from doorbell Q */
17284         REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
17285     }
17286 
17287     ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
17288 
17289     ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
17290     REG_WR(sc, PRS_REG_A_PRSU_20, 0xf);
17291 
17292     if (!CHIP_IS_E1(sc)) {
17293         REG_WR(sc, PRS_REG_E1HOV_MODE, sc->devinfo.mf_info.path_has_ovlan);
17294     }
17295 
17296     if (!CHIP_IS_E1x(sc) && !CHIP_IS_E3B0(sc)) {
17297         if (IS_MF_AFEX(sc)) {
17298             /*
17299              * configure that AFEX and VLAN headers must be
17300              * received in AFEX mode
17301              */
17302             REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 0xE);
17303             REG_WR(sc, PRS_REG_MUST_HAVE_HDRS, 0xA);
17304             REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
17305             REG_WR(sc, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
17306             REG_WR(sc, PRS_REG_TAG_LEN_0, 0x4);
17307         } else {
17308             /*
17309              * Bit-map indicating which L2 hdrs may appear
17310              * after the basic Ethernet header
17311              */
17312             REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC,
17313                    sc->devinfo.mf_info.path_has_ovlan ? 7 : 6);
17314         }
17315     }
17316 
17317     ecore_init_block(sc, BLOCK_TSDM, PHASE_COMMON);
17318     ecore_init_block(sc, BLOCK_CSDM, PHASE_COMMON);
17319     ecore_init_block(sc, BLOCK_USDM, PHASE_COMMON);
17320     ecore_init_block(sc, BLOCK_XSDM, PHASE_COMMON);
17321 
17322     if (!CHIP_IS_E1x(sc)) {
17323         /* reset VFC memories */
17324         REG_WR(sc, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
17325                VFC_MEMORIES_RST_REG_CAM_RST |
17326                VFC_MEMORIES_RST_REG_RAM_RST);
17327         REG_WR(sc, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
17328                VFC_MEMORIES_RST_REG_CAM_RST |
17329                VFC_MEMORIES_RST_REG_RAM_RST);
17330 
17331         DELAY(20000);
17332     }
17333 
17334     ecore_init_block(sc, BLOCK_TSEM, PHASE_COMMON);
17335     ecore_init_block(sc, BLOCK_USEM, PHASE_COMMON);
17336     ecore_init_block(sc, BLOCK_CSEM, PHASE_COMMON);
17337     ecore_init_block(sc, BLOCK_XSEM, PHASE_COMMON);
17338 
17339     /* sync semi rtc */
17340     REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
17341            0x80000000);
17342     REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
17343            0x80000000);
17344 
17345     ecore_init_block(sc, BLOCK_UPB, PHASE_COMMON);
17346     ecore_init_block(sc, BLOCK_XPB, PHASE_COMMON);
17347     ecore_init_block(sc, BLOCK_PBF, PHASE_COMMON);
17348 
17349     if (!CHIP_IS_E1x(sc)) {
17350         if (IS_MF_AFEX(sc)) {
17351             /*
17352              * configure that AFEX and VLAN headers must be
17353              * sent in AFEX mode
17354              */
17355             REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 0xE);
17356             REG_WR(sc, PBF_REG_MUST_HAVE_HDRS, 0xA);
17357             REG_WR(sc, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
17358             REG_WR(sc, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
17359             REG_WR(sc, PBF_REG_TAG_LEN_0, 0x4);
17360         } else {
17361             REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC,
17362                    sc->devinfo.mf_info.path_has_ovlan ? 7 : 6);
17363         }
17364     }
17365 
17366     REG_WR(sc, SRC_REG_SOFT_RST, 1);
17367 
17368     ecore_init_block(sc, BLOCK_SRC, PHASE_COMMON);
17369 
17370     if (CNIC_SUPPORT(sc)) {
17371         REG_WR(sc, SRC_REG_KEYSEARCH_0, 0x63285672);
17372         REG_WR(sc, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
17373         REG_WR(sc, SRC_REG_KEYSEARCH_2, 0x223aef9b);
17374         REG_WR(sc, SRC_REG_KEYSEARCH_3, 0x26001e3a);
17375         REG_WR(sc, SRC_REG_KEYSEARCH_4, 0x7ae91116);
17376         REG_WR(sc, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
17377         REG_WR(sc, SRC_REG_KEYSEARCH_6, 0x298d8adf);
17378         REG_WR(sc, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
17379         REG_WR(sc, SRC_REG_KEYSEARCH_8, 0x1830f82f);
17380         REG_WR(sc, SRC_REG_KEYSEARCH_9, 0x01e46be7);
17381     }
17382     REG_WR(sc, SRC_REG_SOFT_RST, 0);
17383 
17384     if (sizeof(union cdu_context) != 1024) {
17385         /* we currently assume that a context is 1024 bytes */
17386         BLOGE(sc, "please adjust the size of cdu_context(%ld)\n",
17387               (long)sizeof(union cdu_context));
17388     }
17389 
17390     ecore_init_block(sc, BLOCK_CDU, PHASE_COMMON);
17391     val = (4 << 24) + (0 << 12) + 1024;
17392     REG_WR(sc, CDU_REG_CDU_GLOBAL_PARAMS, val);
17393 
17394     ecore_init_block(sc, BLOCK_CFC, PHASE_COMMON);
17395 
17396     REG_WR(sc, CFC_REG_INIT_REG, 0x7FF);
17397     /* enable context validation interrupt from CFC */
17398     REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
17399 
17400     /* set the thresholds to prevent CFC/CDU race */
17401     REG_WR(sc, CFC_REG_DEBUG0, 0x20020000);
17402     ecore_init_block(sc, BLOCK_HC, PHASE_COMMON);
17403 
17404     if (!CHIP_IS_E1x(sc) && BXE_NOMCP(sc)) {
17405         REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x36);
17406     }
17407 
17408     ecore_init_block(sc, BLOCK_IGU, PHASE_COMMON);
17409     ecore_init_block(sc, BLOCK_MISC_AEU, PHASE_COMMON);
17410 
17411     /* Reset PCIE errors for debug */
17412     REG_WR(sc, 0x2814, 0xffffffff);
17413     REG_WR(sc, 0x3820, 0xffffffff);
17414 
17415     if (!CHIP_IS_E1x(sc)) {
17416         REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
17417                (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
17418                 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
17419         REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
17420                (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
17421                 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
17422                 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
17423         REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
17424                (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
17425                 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
17426                 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
17427     }
17428 
17429     ecore_init_block(sc, BLOCK_NIG, PHASE_COMMON);
17430 
17431     if (!CHIP_IS_E1(sc)) {
17432         /* in E3 this done in per-port section */
17433         if (!CHIP_IS_E3(sc))
17434             REG_WR(sc, NIG_REG_LLH_MF_MODE, IS_MF(sc));
17435     }
17436 
17437     if (CHIP_IS_E1H(sc)) {
17438         /* not applicable for E2 (and above ...) */
17439         REG_WR(sc, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(sc));
17440     }
17441 
17442     if (CHIP_REV_IS_SLOW(sc)) {
17443         DELAY(200000);
17444     }
17445 
17446     /* finish CFC init */
17447     val = reg_poll(sc, CFC_REG_LL_INIT_DONE, 1, 100, 10);
17448     if (val != 1) {
17449         BLOGE(sc, "CFC LL_INIT failed val=0x%x\n", val);
17450         return (-1);
17451     }
17452     val = reg_poll(sc, CFC_REG_AC_INIT_DONE, 1, 100, 10);
17453     if (val != 1) {
17454         BLOGE(sc, "CFC AC_INIT failed val=0x%x\n", val);
17455         return (-1);
17456     }
17457     val = reg_poll(sc, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
17458     if (val != 1) {
17459         BLOGE(sc, "CFC CAM_INIT failed val=0x%x\n", val);
17460         return (-1);
17461     }
17462     REG_WR(sc, CFC_REG_DEBUG0, 0);
17463 
17464     if (CHIP_IS_E1(sc)) {
17465         /* read NIG statistic to see if this is our first up since powerup */
17466         bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
17467         val = *BXE_SP(sc, wb_data[0]);
17468 
17469         /* do internal memory self test */
17470         if ((val == 0) && bxe_int_mem_test(sc)) {
17471             BLOGE(sc, "internal mem self test failed val=0x%x\n", val);
17472             return (-1);
17473         }
17474     }
17475 
17476     bxe_setup_fan_failure_detection(sc);
17477 
17478     /* clear PXP2 attentions */
17479     REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
17480 
17481     bxe_enable_blocks_attention(sc);
17482 
17483     if (!CHIP_REV_IS_SLOW(sc)) {
17484         ecore_enable_blocks_parity(sc);
17485     }
17486 
17487     if (!BXE_NOMCP(sc)) {
17488         if (CHIP_IS_E1x(sc)) {
17489             bxe_common_init_phy(sc);
17490         }
17491     }
17492 
17493     return (0);
17494 }
17495 
17496 /**
17497  * bxe_init_hw_common_chip - init HW at the COMMON_CHIP phase.
17498  *
17499  * @sc:     driver handle
17500  */
17501 static int
17502 bxe_init_hw_common_chip(struct bxe_softc *sc)
17503 {
17504     int rc = bxe_init_hw_common(sc);
17505 
17506     if (rc) {
17507         BLOGE(sc, "bxe_init_hw_common failed rc=%d\n", rc);
17508         return (rc);
17509     }
17510 
17511     /* In E2 2-PORT mode, same ext phy is used for the two paths */
17512     if (!BXE_NOMCP(sc)) {
17513         bxe_common_init_phy(sc);
17514     }
17515 
17516     return (0);
17517 }
17518 
17519 static int
17520 bxe_init_hw_port(struct bxe_softc *sc)
17521 {
17522     int port = SC_PORT(sc);
17523     int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
17524     uint32_t low, high;
17525     uint32_t val;
17526 
17527     BLOGD(sc, DBG_LOAD, "starting port init for port %d\n", port);
17528 
17529     REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
17530 
17531     ecore_init_block(sc, BLOCK_MISC, init_phase);
17532     ecore_init_block(sc, BLOCK_PXP, init_phase);
17533     ecore_init_block(sc, BLOCK_PXP2, init_phase);
17534 
17535     /*
17536      * Timers bug workaround: disables the pf_master bit in pglue at
17537      * common phase, we need to enable it here before any dmae access are
17538      * attempted. Therefore we manually added the enable-master to the
17539      * port phase (it also happens in the function phase)
17540      */
17541     if (!CHIP_IS_E1x(sc)) {
17542         REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17543     }
17544 
17545     ecore_init_block(sc, BLOCK_ATC, init_phase);
17546     ecore_init_block(sc, BLOCK_DMAE, init_phase);
17547     ecore_init_block(sc, BLOCK_PGLUE_B, init_phase);
17548     ecore_init_block(sc, BLOCK_QM, init_phase);
17549 
17550     ecore_init_block(sc, BLOCK_TCM, init_phase);
17551     ecore_init_block(sc, BLOCK_UCM, init_phase);
17552     ecore_init_block(sc, BLOCK_CCM, init_phase);
17553     ecore_init_block(sc, BLOCK_XCM, init_phase);
17554 
17555     /* QM cid (connection) count */
17556     ecore_qm_init_cid_count(sc, sc->qm_cid_count, INITOP_SET);
17557 
17558     if (CNIC_SUPPORT(sc)) {
17559         ecore_init_block(sc, BLOCK_TM, init_phase);
17560         REG_WR(sc, TM_REG_LIN0_SCAN_TIME + port*4, 20);
17561         REG_WR(sc, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
17562     }
17563 
17564     ecore_init_block(sc, BLOCK_DORQ, init_phase);
17565 
17566     ecore_init_block(sc, BLOCK_BRB1, init_phase);
17567 
17568     if (CHIP_IS_E1(sc) || CHIP_IS_E1H(sc)) {
17569         if (IS_MF(sc)) {
17570             low = (BXE_ONE_PORT(sc) ? 160 : 246);
17571         } else if (sc->mtu > 4096) {
17572             if (BXE_ONE_PORT(sc)) {
17573                 low = 160;
17574             } else {
17575                 val = sc->mtu;
17576                 /* (24*1024 + val*4)/256 */
17577                 low = (96 + (val / 64) + ((val % 64) ? 1 : 0));
17578             }
17579         } else {
17580             low = (BXE_ONE_PORT(sc) ? 80 : 160);
17581         }
17582         high = (low + 56); /* 14*1024/256 */
17583         REG_WR(sc, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
17584         REG_WR(sc, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
17585     }
17586 
17587     if (CHIP_IS_MODE_4_PORT(sc)) {
17588         REG_WR(sc, SC_PORT(sc) ?
17589                BRB1_REG_MAC_GUARANTIED_1 :
17590                BRB1_REG_MAC_GUARANTIED_0, 40);
17591     }
17592 
17593     ecore_init_block(sc, BLOCK_PRS, init_phase);
17594     if (CHIP_IS_E3B0(sc)) {
17595         if (IS_MF_AFEX(sc)) {
17596             /* configure headers for AFEX mode */
17597             REG_WR(sc, SC_PORT(sc) ?
17598                    PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
17599                    PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
17600             REG_WR(sc, SC_PORT(sc) ?
17601                    PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
17602                    PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
17603             REG_WR(sc, SC_PORT(sc) ?
17604                    PRS_REG_MUST_HAVE_HDRS_PORT_1 :
17605                    PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
17606         } else {
17607             /* Ovlan exists only if we are in multi-function +
17608              * switch-dependent mode, in switch-independent there
17609              * is no ovlan headers
17610              */
17611             REG_WR(sc, SC_PORT(sc) ?
17612                    PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
17613                    PRS_REG_HDRS_AFTER_BASIC_PORT_0,
17614                    (sc->devinfo.mf_info.path_has_ovlan ? 7 : 6));
17615         }
17616     }
17617 
17618     ecore_init_block(sc, BLOCK_TSDM, init_phase);
17619     ecore_init_block(sc, BLOCK_CSDM, init_phase);
17620     ecore_init_block(sc, BLOCK_USDM, init_phase);
17621     ecore_init_block(sc, BLOCK_XSDM, init_phase);
17622 
17623     ecore_init_block(sc, BLOCK_TSEM, init_phase);
17624     ecore_init_block(sc, BLOCK_USEM, init_phase);
17625     ecore_init_block(sc, BLOCK_CSEM, init_phase);
17626     ecore_init_block(sc, BLOCK_XSEM, init_phase);
17627 
17628     ecore_init_block(sc, BLOCK_UPB, init_phase);
17629     ecore_init_block(sc, BLOCK_XPB, init_phase);
17630 
17631     ecore_init_block(sc, BLOCK_PBF, init_phase);
17632 
17633     if (CHIP_IS_E1x(sc)) {
17634         /* configure PBF to work without PAUSE mtu 9000 */
17635         REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
17636 
17637         /* update threshold */
17638         REG_WR(sc, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
17639         /* update init credit */
17640         REG_WR(sc, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
17641 
17642         /* probe changes */
17643         REG_WR(sc, PBF_REG_INIT_P0 + port*4, 1);
17644         DELAY(50);
17645         REG_WR(sc, PBF_REG_INIT_P0 + port*4, 0);
17646     }
17647 
17648     if (CNIC_SUPPORT(sc)) {
17649         ecore_init_block(sc, BLOCK_SRC, init_phase);
17650     }
17651 
17652     ecore_init_block(sc, BLOCK_CDU, init_phase);
17653     ecore_init_block(sc, BLOCK_CFC, init_phase);
17654 
17655     if (CHIP_IS_E1(sc)) {
17656         REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
17657         REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
17658     }
17659     ecore_init_block(sc, BLOCK_HC, init_phase);
17660 
17661     ecore_init_block(sc, BLOCK_IGU, init_phase);
17662 
17663     ecore_init_block(sc, BLOCK_MISC_AEU, init_phase);
17664     /* init aeu_mask_attn_func_0/1:
17665      *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
17666      *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
17667      *             bits 4-7 are used for "per vn group attention" */
17668     val = IS_MF(sc) ? 0xF7 : 0x7;
17669     /* Enable DCBX attention for all but E1 */
17670     val |= CHIP_IS_E1(sc) ? 0 : 0x10;
17671     REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
17672 
17673     ecore_init_block(sc, BLOCK_NIG, init_phase);
17674 
17675     if (!CHIP_IS_E1x(sc)) {
17676         /* Bit-map indicating which L2 hdrs may appear after the
17677          * basic Ethernet header
17678          */
17679         if (IS_MF_AFEX(sc)) {
17680             REG_WR(sc, SC_PORT(sc) ?
17681                    NIG_REG_P1_HDRS_AFTER_BASIC :
17682                    NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
17683         } else {
17684             REG_WR(sc, SC_PORT(sc) ?
17685                    NIG_REG_P1_HDRS_AFTER_BASIC :
17686                    NIG_REG_P0_HDRS_AFTER_BASIC,
17687                    IS_MF_SD(sc) ? 7 : 6);
17688         }
17689 
17690         if (CHIP_IS_E3(sc)) {
17691             REG_WR(sc, SC_PORT(sc) ?
17692                    NIG_REG_LLH1_MF_MODE :
17693                    NIG_REG_LLH_MF_MODE, IS_MF(sc));
17694         }
17695     }
17696     if (!CHIP_IS_E3(sc)) {
17697         REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
17698     }
17699 
17700     if (!CHIP_IS_E1(sc)) {
17701         /* 0x2 disable mf_ov, 0x1 enable */
17702         REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
17703                (IS_MF_SD(sc) ? 0x1 : 0x2));
17704 
17705         if (!CHIP_IS_E1x(sc)) {
17706             val = 0;
17707             switch (sc->devinfo.mf_info.mf_mode) {
17708             case MULTI_FUNCTION_SD:
17709                 val = 1;
17710                 break;
17711             case MULTI_FUNCTION_SI:
17712             case MULTI_FUNCTION_AFEX:
17713                 val = 2;
17714                 break;
17715             }
17716 
17717             REG_WR(sc, (SC_PORT(sc) ? NIG_REG_LLH1_CLS_TYPE :
17718                         NIG_REG_LLH0_CLS_TYPE), val);
17719         }
17720         REG_WR(sc, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
17721         REG_WR(sc, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
17722         REG_WR(sc, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
17723     }
17724 
17725     /* If SPIO5 is set to generate interrupts, enable it for this port */
17726     val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN);
17727     if (val & MISC_SPIO_SPIO5) {
17728         uint32_t reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
17729                                     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
17730         val = REG_RD(sc, reg_addr);
17731         val |= AEU_INPUTS_ATTN_BITS_SPIO5;
17732         REG_WR(sc, reg_addr, val);
17733     }
17734 
17735     return (0);
17736 }
17737 
17738 static uint32_t
17739 bxe_flr_clnup_reg_poll(struct bxe_softc *sc,
17740                        uint32_t         reg,
17741                        uint32_t         expected,
17742                        uint32_t         poll_count)
17743 {
17744     uint32_t cur_cnt = poll_count;
17745     uint32_t val;
17746 
17747     while ((val = REG_RD(sc, reg)) != expected && cur_cnt--) {
17748         DELAY(FLR_WAIT_INTERVAL);
17749     }
17750 
17751     return (val);
17752 }
17753 
17754 static int
17755 bxe_flr_clnup_poll_hw_counter(struct bxe_softc *sc,
17756                               uint32_t         reg,
17757                               char             *msg,
17758                               uint32_t         poll_cnt)
17759 {
17760     uint32_t val = bxe_flr_clnup_reg_poll(sc, reg, 0, poll_cnt);
17761 
17762     if (val != 0) {
17763         BLOGE(sc, "%s usage count=%d\n", msg, val);
17764         return (1);
17765     }
17766 
17767     return (0);
17768 }
17769 
17770 /* Common routines with VF FLR cleanup */
17771 static uint32_t
17772 bxe_flr_clnup_poll_count(struct bxe_softc *sc)
17773 {
17774     /* adjust polling timeout */
17775     if (CHIP_REV_IS_EMUL(sc)) {
17776         return (FLR_POLL_CNT * 2000);
17777     }
17778 
17779     if (CHIP_REV_IS_FPGA(sc)) {
17780         return (FLR_POLL_CNT * 120);
17781     }
17782 
17783     return (FLR_POLL_CNT);
17784 }
17785 
17786 static int
17787 bxe_poll_hw_usage_counters(struct bxe_softc *sc,
17788                            uint32_t         poll_cnt)
17789 {
17790     /* wait for CFC PF usage-counter to zero (includes all the VFs) */
17791     if (bxe_flr_clnup_poll_hw_counter(sc,
17792                                       CFC_REG_NUM_LCIDS_INSIDE_PF,
17793                                       "CFC PF usage counter timed out",
17794                                       poll_cnt)) {
17795         return (1);
17796     }
17797 
17798     /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
17799     if (bxe_flr_clnup_poll_hw_counter(sc,
17800                                       DORQ_REG_PF_USAGE_CNT,
17801                                       "DQ PF usage counter timed out",
17802                                       poll_cnt)) {
17803         return (1);
17804     }
17805 
17806     /* Wait for QM PF usage-counter to zero (until DQ cleanup) */
17807     if (bxe_flr_clnup_poll_hw_counter(sc,
17808                                       QM_REG_PF_USG_CNT_0 + 4*SC_FUNC(sc),
17809                                       "QM PF usage counter timed out",
17810                                       poll_cnt)) {
17811         return (1);
17812     }
17813 
17814     /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
17815     if (bxe_flr_clnup_poll_hw_counter(sc,
17816                                       TM_REG_LIN0_VNIC_UC + 4*SC_PORT(sc),
17817                                       "Timers VNIC usage counter timed out",
17818                                       poll_cnt)) {
17819         return (1);
17820     }
17821 
17822     if (bxe_flr_clnup_poll_hw_counter(sc,
17823                                       TM_REG_LIN0_NUM_SCANS + 4*SC_PORT(sc),
17824                                       "Timers NUM_SCANS usage counter timed out",
17825                                       poll_cnt)) {
17826         return (1);
17827     }
17828 
17829     /* Wait DMAE PF usage counter to zero */
17830     if (bxe_flr_clnup_poll_hw_counter(sc,
17831                                       dmae_reg_go_c[INIT_DMAE_C(sc)],
17832                                       "DMAE dommand register timed out",
17833                                       poll_cnt)) {
17834         return (1);
17835     }
17836 
17837     return (0);
17838 }
17839 
17840 #define OP_GEN_PARAM(param)                                            \
17841     (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
17842 #define OP_GEN_TYPE(type)                                           \
17843     (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
17844 #define OP_GEN_AGG_VECT(index)                                             \
17845     (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
17846 
17847 static int
17848 bxe_send_final_clnup(struct bxe_softc *sc,
17849                      uint8_t          clnup_func,
17850                      uint32_t         poll_cnt)
17851 {
17852     uint32_t op_gen_command = 0;
17853     uint32_t comp_addr = (BAR_CSTRORM_INTMEM +
17854                           CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func));
17855     int ret = 0;
17856 
17857     if (REG_RD(sc, comp_addr)) {
17858         BLOGE(sc, "Cleanup complete was not 0 before sending\n");
17859         return (1);
17860     }
17861 
17862     op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
17863     op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
17864     op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
17865     op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
17866 
17867     BLOGD(sc, DBG_LOAD, "sending FW Final cleanup\n");
17868     REG_WR(sc, XSDM_REG_OPERATION_GEN, op_gen_command);
17869 
17870     if (bxe_flr_clnup_reg_poll(sc, comp_addr, 1, poll_cnt) != 1) {
17871         BLOGE(sc, "FW final cleanup did not succeed\n");
17872         BLOGD(sc, DBG_LOAD, "At timeout completion address contained %x\n",
17873               (REG_RD(sc, comp_addr)));
17874         bxe_panic(sc, ("FLR cleanup failed\n"));
17875         return (1);
17876     }
17877 
17878     /* Zero completion for nxt FLR */
17879     REG_WR(sc, comp_addr, 0);
17880 
17881     return (ret);
17882 }
17883 
17884 static void
17885 bxe_pbf_pN_buf_flushed(struct bxe_softc       *sc,
17886                        struct pbf_pN_buf_regs *regs,
17887                        uint32_t               poll_count)
17888 {
17889     uint32_t init_crd, crd, crd_start, crd_freed, crd_freed_start;
17890     uint32_t cur_cnt = poll_count;
17891 
17892     crd_freed = crd_freed_start = REG_RD(sc, regs->crd_freed);
17893     crd = crd_start = REG_RD(sc, regs->crd);
17894     init_crd = REG_RD(sc, regs->init_crd);
17895 
17896     BLOGD(sc, DBG_LOAD, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
17897     BLOGD(sc, DBG_LOAD, "CREDIT[%d]      : s:%x\n", regs->pN, crd);
17898     BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
17899 
17900     while ((crd != init_crd) &&
17901            ((uint32_t)((int32_t)crd_freed - (int32_t)crd_freed_start) <
17902             (init_crd - crd_start))) {
17903         if (cur_cnt--) {
17904             DELAY(FLR_WAIT_INTERVAL);
17905             crd = REG_RD(sc, regs->crd);
17906             crd_freed = REG_RD(sc, regs->crd_freed);
17907         } else {
17908             BLOGD(sc, DBG_LOAD, "PBF tx buffer[%d] timed out\n", regs->pN);
17909             BLOGD(sc, DBG_LOAD, "CREDIT[%d]      : c:%x\n", regs->pN, crd);
17910             BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: c:%x\n", regs->pN, crd_freed);
17911             break;
17912         }
17913     }
17914 
17915     BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF tx buffer[%d]\n",
17916           poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
17917 }
17918 
17919 static void
17920 bxe_pbf_pN_cmd_flushed(struct bxe_softc       *sc,
17921                        struct pbf_pN_cmd_regs *regs,
17922                        uint32_t               poll_count)
17923 {
17924     uint32_t occup, to_free, freed, freed_start;
17925     uint32_t cur_cnt = poll_count;
17926 
17927     occup = to_free = REG_RD(sc, regs->lines_occup);
17928     freed = freed_start = REG_RD(sc, regs->lines_freed);
17929 
17930     BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d]   : s:%x\n", regs->pN, occup);
17931     BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
17932 
17933     while (occup &&
17934            ((uint32_t)((int32_t)freed - (int32_t)freed_start) < to_free)) {
17935         if (cur_cnt--) {
17936             DELAY(FLR_WAIT_INTERVAL);
17937             occup = REG_RD(sc, regs->lines_occup);
17938             freed = REG_RD(sc, regs->lines_freed);
17939         } else {
17940             BLOGD(sc, DBG_LOAD, "PBF cmd queue[%d] timed out\n", regs->pN);
17941             BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d]   : s:%x\n", regs->pN, occup);
17942             BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
17943             break;
17944         }
17945     }
17946 
17947     BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF cmd queue[%d]\n",
17948           poll_count - cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
17949 }
17950 
17951 static void
17952 bxe_tx_hw_flushed(struct bxe_softc *sc, uint32_t poll_count)
17953 {
17954     struct pbf_pN_cmd_regs cmd_regs[] = {
17955         {0, (CHIP_IS_E3B0(sc)) ?
17956             PBF_REG_TQ_OCCUPANCY_Q0 :
17957             PBF_REG_P0_TQ_OCCUPANCY,
17958             (CHIP_IS_E3B0(sc)) ?
17959             PBF_REG_TQ_LINES_FREED_CNT_Q0 :
17960             PBF_REG_P0_TQ_LINES_FREED_CNT},
17961         {1, (CHIP_IS_E3B0(sc)) ?
17962             PBF_REG_TQ_OCCUPANCY_Q1 :
17963             PBF_REG_P1_TQ_OCCUPANCY,
17964             (CHIP_IS_E3B0(sc)) ?
17965             PBF_REG_TQ_LINES_FREED_CNT_Q1 :
17966             PBF_REG_P1_TQ_LINES_FREED_CNT},
17967         {4, (CHIP_IS_E3B0(sc)) ?
17968             PBF_REG_TQ_OCCUPANCY_LB_Q :
17969             PBF_REG_P4_TQ_OCCUPANCY,
17970             (CHIP_IS_E3B0(sc)) ?
17971             PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
17972             PBF_REG_P4_TQ_LINES_FREED_CNT}
17973     };
17974 
17975     struct pbf_pN_buf_regs buf_regs[] = {
17976         {0, (CHIP_IS_E3B0(sc)) ?
17977             PBF_REG_INIT_CRD_Q0 :
17978             PBF_REG_P0_INIT_CRD ,
17979             (CHIP_IS_E3B0(sc)) ?
17980             PBF_REG_CREDIT_Q0 :
17981             PBF_REG_P0_CREDIT,
17982             (CHIP_IS_E3B0(sc)) ?
17983             PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
17984             PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
17985         {1, (CHIP_IS_E3B0(sc)) ?
17986             PBF_REG_INIT_CRD_Q1 :
17987             PBF_REG_P1_INIT_CRD,
17988             (CHIP_IS_E3B0(sc)) ?
17989             PBF_REG_CREDIT_Q1 :
17990             PBF_REG_P1_CREDIT,
17991             (CHIP_IS_E3B0(sc)) ?
17992             PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
17993             PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
17994         {4, (CHIP_IS_E3B0(sc)) ?
17995             PBF_REG_INIT_CRD_LB_Q :
17996             PBF_REG_P4_INIT_CRD,
17997             (CHIP_IS_E3B0(sc)) ?
17998             PBF_REG_CREDIT_LB_Q :
17999             PBF_REG_P4_CREDIT,
18000             (CHIP_IS_E3B0(sc)) ?
18001             PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
18002             PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
18003     };
18004 
18005     int i;
18006 
18007     /* Verify the command queues are flushed P0, P1, P4 */
18008     for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) {
18009         bxe_pbf_pN_cmd_flushed(sc, &cmd_regs[i], poll_count);
18010     }
18011 
18012     /* Verify the transmission buffers are flushed P0, P1, P4 */
18013     for (i = 0; i < ARRAY_SIZE(buf_regs); i++) {
18014         bxe_pbf_pN_buf_flushed(sc, &buf_regs[i], poll_count);
18015     }
18016 }
18017 
18018 static void
18019 bxe_hw_enable_status(struct bxe_softc *sc)
18020 {
18021     uint32_t val;
18022 
18023     val = REG_RD(sc, CFC_REG_WEAK_ENABLE_PF);
18024     BLOGD(sc, DBG_LOAD, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
18025 
18026     val = REG_RD(sc, PBF_REG_DISABLE_PF);
18027     BLOGD(sc, DBG_LOAD, "PBF_REG_DISABLE_PF is 0x%x\n", val);
18028 
18029     val = REG_RD(sc, IGU_REG_PCI_PF_MSI_EN);
18030     BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
18031 
18032     val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_EN);
18033     BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
18034 
18035     val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
18036     BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
18037 
18038     val = REG_RD(sc, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
18039     BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
18040 
18041     val = REG_RD(sc, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
18042     BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
18043 
18044     val = REG_RD(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
18045     BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", val);
18046 }
18047 
18048 static int
18049 bxe_pf_flr_clnup(struct bxe_softc *sc)
18050 {
18051     uint32_t poll_cnt = bxe_flr_clnup_poll_count(sc);
18052 
18053     BLOGD(sc, DBG_LOAD, "Cleanup after FLR PF[%d]\n", SC_ABS_FUNC(sc));
18054 
18055     /* Re-enable PF target read access */
18056     REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
18057 
18058     /* Poll HW usage counters */
18059     BLOGD(sc, DBG_LOAD, "Polling usage counters\n");
18060     if (bxe_poll_hw_usage_counters(sc, poll_cnt)) {
18061         return (-1);
18062     }
18063 
18064     /* Zero the igu 'trailing edge' and 'leading edge' */
18065 
18066     /* Send the FW cleanup command */
18067     if (bxe_send_final_clnup(sc, (uint8_t)SC_FUNC(sc), poll_cnt)) {
18068         return (-1);
18069     }
18070 
18071     /* ATC cleanup */
18072 
18073     /* Verify TX hw is flushed */
18074     bxe_tx_hw_flushed(sc, poll_cnt);
18075 
18076     /* Wait 100ms (not adjusted according to platform) */
18077     DELAY(100000);
18078 
18079     /* Verify no pending pci transactions */
18080     if (bxe_is_pcie_pending(sc)) {
18081         BLOGE(sc, "PCIE Transactions still pending\n");
18082     }
18083 
18084     /* Debug */
18085     bxe_hw_enable_status(sc);
18086 
18087     /*
18088      * Master enable - Due to WB DMAE writes performed before this
18089      * register is re-initialized as part of the regular function init
18090      */
18091     REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
18092 
18093     return (0);
18094 }
18095 
18096 static int
18097 bxe_init_hw_func(struct bxe_softc *sc)
18098 {
18099     int port = SC_PORT(sc);
18100     int func = SC_FUNC(sc);
18101     int init_phase = PHASE_PF0 + func;
18102     struct ecore_ilt *ilt = sc->ilt;
18103     uint16_t cdu_ilt_start;
18104     uint32_t addr, val;
18105     uint32_t main_mem_base, main_mem_size, main_mem_prty_clr;
18106     int i, main_mem_width, rc;
18107 
18108     BLOGD(sc, DBG_LOAD, "starting func init for func %d\n", func);
18109 
18110     /* FLR cleanup */
18111     if (!CHIP_IS_E1x(sc)) {
18112         rc = bxe_pf_flr_clnup(sc);
18113         if (rc) {
18114             BLOGE(sc, "FLR cleanup failed!\n");
18115             // XXX bxe_fw_dump(sc);
18116             // XXX bxe_idle_chk(sc);
18117             return (rc);
18118         }
18119     }
18120 
18121     /* set MSI reconfigure capability */
18122     if (sc->devinfo.int_block == INT_BLOCK_HC) {
18123         addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
18124         val = REG_RD(sc, addr);
18125         val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
18126         REG_WR(sc, addr, val);
18127     }
18128 
18129     ecore_init_block(sc, BLOCK_PXP, init_phase);
18130     ecore_init_block(sc, BLOCK_PXP2, init_phase);
18131 
18132     ilt = sc->ilt;
18133     cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
18134 
18135     for (i = 0; i < L2_ILT_LINES(sc); i++) {
18136         ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt;
18137         ilt->lines[cdu_ilt_start + i].page_mapping =
18138             sc->context[i].vcxt_dma.paddr;
18139         ilt->lines[cdu_ilt_start + i].size = sc->context[i].size;
18140     }
18141     ecore_ilt_init_op(sc, INITOP_SET);
18142 
18143     /* Set NIC mode */
18144     REG_WR(sc, PRS_REG_NIC_MODE, 1);
18145     BLOGD(sc, DBG_LOAD, "NIC MODE configured\n");
18146 
18147     if (!CHIP_IS_E1x(sc)) {
18148         uint32_t pf_conf = IGU_PF_CONF_FUNC_EN;
18149 
18150         /* Turn on a single ISR mode in IGU if driver is going to use
18151          * INT#x or MSI
18152          */
18153         if (sc->interrupt_mode != INTR_MODE_MSIX) {
18154             pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
18155         }
18156 
18157         /*
18158          * Timers workaround bug: function init part.
18159          * Need to wait 20msec after initializing ILT,
18160          * needed to make sure there are no requests in
18161          * one of the PXP internal queues with "old" ILT addresses
18162          */
18163         DELAY(20000);
18164 
18165         /*
18166          * Master enable - Due to WB DMAE writes performed before this
18167          * register is re-initialized as part of the regular function
18168          * init
18169          */
18170         REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
18171         /* Enable the function in IGU */
18172         REG_WR(sc, IGU_REG_PF_CONFIGURATION, pf_conf);
18173     }
18174 
18175     sc->dmae_ready = 1;
18176 
18177     ecore_init_block(sc, BLOCK_PGLUE_B, init_phase);
18178 
18179     if (!CHIP_IS_E1x(sc))
18180         REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
18181 
18182     ecore_init_block(sc, BLOCK_ATC, init_phase);
18183     ecore_init_block(sc, BLOCK_DMAE, init_phase);
18184     ecore_init_block(sc, BLOCK_NIG, init_phase);
18185     ecore_init_block(sc, BLOCK_SRC, init_phase);
18186     ecore_init_block(sc, BLOCK_MISC, init_phase);
18187     ecore_init_block(sc, BLOCK_TCM, init_phase);
18188     ecore_init_block(sc, BLOCK_UCM, init_phase);
18189     ecore_init_block(sc, BLOCK_CCM, init_phase);
18190     ecore_init_block(sc, BLOCK_XCM, init_phase);
18191     ecore_init_block(sc, BLOCK_TSEM, init_phase);
18192     ecore_init_block(sc, BLOCK_USEM, init_phase);
18193     ecore_init_block(sc, BLOCK_CSEM, init_phase);
18194     ecore_init_block(sc, BLOCK_XSEM, init_phase);
18195 
18196     if (!CHIP_IS_E1x(sc))
18197         REG_WR(sc, QM_REG_PF_EN, 1);
18198 
18199     if (!CHIP_IS_E1x(sc)) {
18200         REG_WR(sc, TSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
18201         REG_WR(sc, USEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
18202         REG_WR(sc, CSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
18203         REG_WR(sc, XSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
18204     }
18205     ecore_init_block(sc, BLOCK_QM, init_phase);
18206 
18207     ecore_init_block(sc, BLOCK_TM, init_phase);
18208     ecore_init_block(sc, BLOCK_DORQ, init_phase);
18209 
18210     bxe_iov_init_dq(sc);
18211 
18212     ecore_init_block(sc, BLOCK_BRB1, init_phase);
18213     ecore_init_block(sc, BLOCK_PRS, init_phase);
18214     ecore_init_block(sc, BLOCK_TSDM, init_phase);
18215     ecore_init_block(sc, BLOCK_CSDM, init_phase);
18216     ecore_init_block(sc, BLOCK_USDM, init_phase);
18217     ecore_init_block(sc, BLOCK_XSDM, init_phase);
18218     ecore_init_block(sc, BLOCK_UPB, init_phase);
18219     ecore_init_block(sc, BLOCK_XPB, init_phase);
18220     ecore_init_block(sc, BLOCK_PBF, init_phase);
18221     if (!CHIP_IS_E1x(sc))
18222         REG_WR(sc, PBF_REG_DISABLE_PF, 0);
18223 
18224     ecore_init_block(sc, BLOCK_CDU, init_phase);
18225 
18226     ecore_init_block(sc, BLOCK_CFC, init_phase);
18227 
18228     if (!CHIP_IS_E1x(sc))
18229         REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 1);
18230 
18231     if (IS_MF(sc)) {
18232         REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1);
18233         REG_WR(sc, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, OVLAN(sc));
18234     }
18235 
18236     ecore_init_block(sc, BLOCK_MISC_AEU, init_phase);
18237 
18238     /* HC init per function */
18239     if (sc->devinfo.int_block == INT_BLOCK_HC) {
18240         if (CHIP_IS_E1H(sc)) {
18241             REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
18242 
18243             REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
18244             REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
18245         }
18246         ecore_init_block(sc, BLOCK_HC, init_phase);
18247 
18248     } else {
18249         int num_segs, sb_idx, prod_offset;
18250 
18251         REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
18252 
18253         if (!CHIP_IS_E1x(sc)) {
18254             REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0);
18255             REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0);
18256         }
18257 
18258         ecore_init_block(sc, BLOCK_IGU, init_phase);
18259 
18260         if (!CHIP_IS_E1x(sc)) {
18261             int dsb_idx = 0;
18262             /**
18263              * Producer memory:
18264              * E2 mode: address 0-135 match to the mapping memory;
18265              * 136 - PF0 default prod; 137 - PF1 default prod;
18266              * 138 - PF2 default prod; 139 - PF3 default prod;
18267              * 140 - PF0 attn prod;    141 - PF1 attn prod;
18268              * 142 - PF2 attn prod;    143 - PF3 attn prod;
18269              * 144-147 reserved.
18270              *
18271              * E1.5 mode - In backward compatible mode;
18272              * for non default SB; each even line in the memory
18273              * holds the U producer and each odd line hold
18274              * the C producer. The first 128 producers are for
18275              * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
18276              * producers are for the DSB for each PF.
18277              * Each PF has five segments: (the order inside each
18278              * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
18279              * 132-135 C prods; 136-139 X prods; 140-143 T prods;
18280              * 144-147 attn prods;
18281              */
18282             /* non-default-status-blocks */
18283             num_segs = CHIP_INT_MODE_IS_BC(sc) ?
18284                 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
18285             for (sb_idx = 0; sb_idx < sc->igu_sb_cnt; sb_idx++) {
18286                 prod_offset = (sc->igu_base_sb + sb_idx) *
18287                     num_segs;
18288 
18289                 for (i = 0; i < num_segs; i++) {
18290                     addr = IGU_REG_PROD_CONS_MEMORY +
18291                             (prod_offset + i) * 4;
18292                     REG_WR(sc, addr, 0);
18293                 }
18294                 /* send consumer update with value 0 */
18295                 bxe_ack_sb(sc, sc->igu_base_sb + sb_idx,
18296                            USTORM_ID, 0, IGU_INT_NOP, 1);
18297                 bxe_igu_clear_sb(sc, sc->igu_base_sb + sb_idx);
18298             }
18299 
18300             /* default-status-blocks */
18301             num_segs = CHIP_INT_MODE_IS_BC(sc) ?
18302                 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
18303 
18304             if (CHIP_IS_MODE_4_PORT(sc))
18305                 dsb_idx = SC_FUNC(sc);
18306             else
18307                 dsb_idx = SC_VN(sc);
18308 
18309             prod_offset = (CHIP_INT_MODE_IS_BC(sc) ?
18310                        IGU_BC_BASE_DSB_PROD + dsb_idx :
18311                        IGU_NORM_BASE_DSB_PROD + dsb_idx);
18312 
18313             /*
18314              * igu prods come in chunks of E1HVN_MAX (4) -
18315              * does not matters what is the current chip mode
18316              */
18317             for (i = 0; i < (num_segs * E1HVN_MAX);
18318                  i += E1HVN_MAX) {
18319                 addr = IGU_REG_PROD_CONS_MEMORY +
18320                             (prod_offset + i)*4;
18321                 REG_WR(sc, addr, 0);
18322             }
18323             /* send consumer update with 0 */
18324             if (CHIP_INT_MODE_IS_BC(sc)) {
18325                 bxe_ack_sb(sc, sc->igu_dsb_id,
18326                            USTORM_ID, 0, IGU_INT_NOP, 1);
18327                 bxe_ack_sb(sc, sc->igu_dsb_id,
18328                            CSTORM_ID, 0, IGU_INT_NOP, 1);
18329                 bxe_ack_sb(sc, sc->igu_dsb_id,
18330                            XSTORM_ID, 0, IGU_INT_NOP, 1);
18331                 bxe_ack_sb(sc, sc->igu_dsb_id,
18332                            TSTORM_ID, 0, IGU_INT_NOP, 1);
18333                 bxe_ack_sb(sc, sc->igu_dsb_id,
18334                            ATTENTION_ID, 0, IGU_INT_NOP, 1);
18335             } else {
18336                 bxe_ack_sb(sc, sc->igu_dsb_id,
18337                            USTORM_ID, 0, IGU_INT_NOP, 1);
18338                 bxe_ack_sb(sc, sc->igu_dsb_id,
18339                            ATTENTION_ID, 0, IGU_INT_NOP, 1);
18340             }
18341             bxe_igu_clear_sb(sc, sc->igu_dsb_id);
18342 
18343             /* !!! these should become driver const once
18344                rf-tool supports split-68 const */
18345             REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
18346             REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
18347             REG_WR(sc, IGU_REG_SB_MASK_LSB, 0);
18348             REG_WR(sc, IGU_REG_SB_MASK_MSB, 0);
18349             REG_WR(sc, IGU_REG_PBA_STATUS_LSB, 0);
18350             REG_WR(sc, IGU_REG_PBA_STATUS_MSB, 0);
18351         }
18352     }
18353 
18354     /* Reset PCIE errors for debug */
18355     REG_WR(sc, 0x2114, 0xffffffff);
18356     REG_WR(sc, 0x2120, 0xffffffff);
18357 
18358     if (CHIP_IS_E1x(sc)) {
18359         main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
18360         main_mem_base = HC_REG_MAIN_MEMORY +
18361                 SC_PORT(sc) * (main_mem_size * 4);
18362         main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
18363         main_mem_width = 8;
18364 
18365         val = REG_RD(sc, main_mem_prty_clr);
18366         if (val) {
18367             BLOGD(sc, DBG_LOAD,
18368                   "Parity errors in HC block during function init (0x%x)!\n",
18369                   val);
18370         }
18371 
18372         /* Clear "false" parity errors in MSI-X table */
18373         for (i = main_mem_base;
18374              i < main_mem_base + main_mem_size * 4;
18375              i += main_mem_width) {
18376             bxe_read_dmae(sc, i, main_mem_width / 4);
18377             bxe_write_dmae(sc, BXE_SP_MAPPING(sc, wb_data),
18378                            i, main_mem_width / 4);
18379         }
18380         /* Clear HC parity attention */
18381         REG_RD(sc, main_mem_prty_clr);
18382     }
18383 
18384 #if 1
18385     /* Enable STORMs SP logging */
18386     REG_WR8(sc, BAR_USTRORM_INTMEM +
18387            USTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18388     REG_WR8(sc, BAR_TSTRORM_INTMEM +
18389            TSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18390     REG_WR8(sc, BAR_CSTRORM_INTMEM +
18391            CSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18392     REG_WR8(sc, BAR_XSTRORM_INTMEM +
18393            XSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18394 #endif
18395 
18396     elink_phy_probe(&sc->link_params);
18397 
18398     return (0);
18399 }
18400 
18401 static void
18402 bxe_link_reset(struct bxe_softc *sc)
18403 {
18404     if (!BXE_NOMCP(sc)) {
18405 	bxe_acquire_phy_lock(sc);
18406         elink_lfa_reset(&sc->link_params, &sc->link_vars);
18407 	bxe_release_phy_lock(sc);
18408     } else {
18409         if (!CHIP_REV_IS_SLOW(sc)) {
18410             BLOGW(sc, "Bootcode is missing - cannot reset link\n");
18411         }
18412     }
18413 }
18414 
18415 static void
18416 bxe_reset_port(struct bxe_softc *sc)
18417 {
18418     int port = SC_PORT(sc);
18419     uint32_t val;
18420 
18421 	ELINK_DEBUG_P0(sc, "bxe_reset_port called\n");
18422     /* reset physical Link */
18423     bxe_link_reset(sc);
18424 
18425     REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
18426 
18427     /* Do not rcv packets to BRB */
18428     REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
18429     /* Do not direct rcv packets that are not for MCP to the BRB */
18430     REG_WR(sc, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
18431                NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
18432 
18433     /* Configure AEU */
18434     REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
18435 
18436     DELAY(100000);
18437 
18438     /* Check for BRB port occupancy */
18439     val = REG_RD(sc, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
18440     if (val) {
18441         BLOGD(sc, DBG_LOAD,
18442               "BRB1 is not empty, %d blocks are occupied\n", val);
18443     }
18444 
18445     /* TODO: Close Doorbell port? */
18446 }
18447 
18448 static void
18449 bxe_ilt_wr(struct bxe_softc *sc,
18450            uint32_t         index,
18451            bus_addr_t       addr)
18452 {
18453     int reg;
18454     uint32_t wb_write[2];
18455 
18456     if (CHIP_IS_E1(sc)) {
18457         reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
18458     } else {
18459         reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
18460     }
18461 
18462     wb_write[0] = ONCHIP_ADDR1(addr);
18463     wb_write[1] = ONCHIP_ADDR2(addr);
18464     REG_WR_DMAE(sc, reg, wb_write, 2);
18465 }
18466 
18467 static void
18468 bxe_clear_func_ilt(struct bxe_softc *sc,
18469                    uint32_t         func)
18470 {
18471     uint32_t i, base = FUNC_ILT_BASE(func);
18472     for (i = base; i < base + ILT_PER_FUNC; i++) {
18473         bxe_ilt_wr(sc, i, 0);
18474     }
18475 }
18476 
18477 static void
18478 bxe_reset_func(struct bxe_softc *sc)
18479 {
18480     struct bxe_fastpath *fp;
18481     int port = SC_PORT(sc);
18482     int func = SC_FUNC(sc);
18483     int i;
18484 
18485     /* Disable the function in the FW */
18486     REG_WR8(sc, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
18487     REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
18488     REG_WR8(sc, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
18489     REG_WR8(sc, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
18490 
18491     /* FP SBs */
18492     FOR_EACH_ETH_QUEUE(sc, i) {
18493         fp = &sc->fp[i];
18494         REG_WR8(sc, BAR_CSTRORM_INTMEM +
18495                 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
18496                 SB_DISABLED);
18497     }
18498 
18499     /* SP SB */
18500     REG_WR8(sc, BAR_CSTRORM_INTMEM +
18501             CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
18502             SB_DISABLED);
18503 
18504     for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) {
18505         REG_WR(sc, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 0);
18506     }
18507 
18508     /* Configure IGU */
18509     if (sc->devinfo.int_block == INT_BLOCK_HC) {
18510         REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
18511         REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
18512     } else {
18513         REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0);
18514         REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0);
18515     }
18516 
18517     if (CNIC_LOADED(sc)) {
18518         /* Disable Timer scan */
18519         REG_WR(sc, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
18520         /*
18521          * Wait for at least 10ms and up to 2 second for the timers
18522          * scan to complete
18523          */
18524         for (i = 0; i < 200; i++) {
18525             DELAY(10000);
18526             if (!REG_RD(sc, TM_REG_LIN0_SCAN_ON + port*4))
18527                 break;
18528         }
18529     }
18530 
18531     /* Clear ILT */
18532     bxe_clear_func_ilt(sc, func);
18533 
18534     /*
18535      * Timers workaround bug for E2: if this is vnic-3,
18536      * we need to set the entire ilt range for this timers.
18537      */
18538     if (!CHIP_IS_E1x(sc) && SC_VN(sc) == 3) {
18539         struct ilt_client_info ilt_cli;
18540         /* use dummy TM client */
18541         memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
18542         ilt_cli.start = 0;
18543         ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
18544         ilt_cli.client_num = ILT_CLIENT_TM;
18545 
18546         ecore_ilt_boundry_init_op(sc, &ilt_cli, 0, INITOP_CLEAR);
18547     }
18548 
18549     /* this assumes that reset_port() called before reset_func()*/
18550     if (!CHIP_IS_E1x(sc)) {
18551         bxe_pf_disable(sc);
18552     }
18553 
18554     sc->dmae_ready = 0;
18555 }
18556 
18557 static int
18558 bxe_gunzip_init(struct bxe_softc *sc)
18559 {
18560     return (0);
18561 }
18562 
18563 static void
18564 bxe_gunzip_end(struct bxe_softc *sc)
18565 {
18566     return;
18567 }
18568 
18569 static int
18570 bxe_init_firmware(struct bxe_softc *sc)
18571 {
18572     if (CHIP_IS_E1(sc)) {
18573         ecore_init_e1_firmware(sc);
18574         sc->iro_array = e1_iro_arr;
18575     } else if (CHIP_IS_E1H(sc)) {
18576         ecore_init_e1h_firmware(sc);
18577         sc->iro_array = e1h_iro_arr;
18578     } else if (!CHIP_IS_E1x(sc)) {
18579         ecore_init_e2_firmware(sc);
18580         sc->iro_array = e2_iro_arr;
18581     } else {
18582         BLOGE(sc, "Unsupported chip revision\n");
18583         return (-1);
18584     }
18585 
18586     return (0);
18587 }
18588 
18589 static void
18590 bxe_release_firmware(struct bxe_softc *sc)
18591 {
18592     /* Do nothing */
18593     return;
18594 }
18595 
18596 static int
18597 ecore_gunzip(struct bxe_softc *sc,
18598              const uint8_t    *zbuf,
18599              int              len)
18600 {
18601     /* XXX : Implement... */
18602     BLOGD(sc, DBG_LOAD, "ECORE_GUNZIP NOT IMPLEMENTED\n");
18603     return (FALSE);
18604 }
18605 
18606 static void
18607 ecore_reg_wr_ind(struct bxe_softc *sc,
18608                  uint32_t         addr,
18609                  uint32_t         val)
18610 {
18611     bxe_reg_wr_ind(sc, addr, val);
18612 }
18613 
18614 static void
18615 ecore_write_dmae_phys_len(struct bxe_softc *sc,
18616                           bus_addr_t       phys_addr,
18617                           uint32_t         addr,
18618                           uint32_t         len)
18619 {
18620     bxe_write_dmae_phys_len(sc, phys_addr, addr, len);
18621 }
18622 
18623 void
18624 ecore_storm_memset_struct(struct bxe_softc *sc,
18625                           uint32_t         addr,
18626                           size_t           size,
18627                           uint32_t         *data)
18628 {
18629     uint8_t i;
18630     for (i = 0; i < size/4; i++) {
18631         REG_WR(sc, addr + (i * 4), data[i]);
18632     }
18633 }
18634 
18635 
18636 /*
18637  * character device - ioctl interface definitions
18638  */
18639 
18640 
18641 #include "bxe_dump.h"
18642 #include "bxe_ioctl.h"
18643 #include <sys/conf.h>
18644 
18645 static int bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
18646                 struct thread *td);
18647 
18648 static struct cdevsw bxe_cdevsw = {
18649     .d_version = D_VERSION,
18650     .d_ioctl = bxe_eioctl,
18651     .d_name = "bxecnic",
18652 };
18653 
18654 #define BXE_PATH(sc)    (CHIP_IS_E1x(sc) ? 0 : (sc->pcie_func & 1))
18655 
18656 
18657 #define DUMP_ALL_PRESETS        0x1FFF
18658 #define DUMP_MAX_PRESETS        13
18659 #define IS_E1_REG(chips)        ((chips & DUMP_CHIP_E1) == DUMP_CHIP_E1)
18660 #define IS_E1H_REG(chips)       ((chips & DUMP_CHIP_E1H) == DUMP_CHIP_E1H)
18661 #define IS_E2_REG(chips)        ((chips & DUMP_CHIP_E2) == DUMP_CHIP_E2)
18662 #define IS_E3A0_REG(chips)      ((chips & DUMP_CHIP_E3A0) == DUMP_CHIP_E3A0)
18663 #define IS_E3B0_REG(chips)      ((chips & DUMP_CHIP_E3B0) == DUMP_CHIP_E3B0)
18664 
18665 #define IS_REG_IN_PRESET(presets, idx)  \
18666                 ((presets & (1 << (idx-1))) == (1 << (idx-1)))
18667 
18668 
18669 static int
18670 bxe_get_preset_regs_len(struct bxe_softc *sc, uint32_t preset)
18671 {
18672     if (CHIP_IS_E1(sc))
18673         return dump_num_registers[0][preset-1];
18674     else if (CHIP_IS_E1H(sc))
18675         return dump_num_registers[1][preset-1];
18676     else if (CHIP_IS_E2(sc))
18677         return dump_num_registers[2][preset-1];
18678     else if (CHIP_IS_E3A0(sc))
18679         return dump_num_registers[3][preset-1];
18680     else if (CHIP_IS_E3B0(sc))
18681         return dump_num_registers[4][preset-1];
18682     else
18683         return 0;
18684 }
18685 
18686 static int
18687 bxe_get_total_regs_len32(struct bxe_softc *sc)
18688 {
18689     uint32_t preset_idx;
18690     int regdump_len32 = 0;
18691 
18692 
18693     /* Calculate the total preset regs length */
18694     for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
18695         regdump_len32 += bxe_get_preset_regs_len(sc, preset_idx);
18696     }
18697 
18698     return regdump_len32;
18699 }
18700 
18701 static const uint32_t *
18702 __bxe_get_page_addr_ar(struct bxe_softc *sc)
18703 {
18704     if (CHIP_IS_E2(sc))
18705         return page_vals_e2;
18706     else if (CHIP_IS_E3(sc))
18707         return page_vals_e3;
18708     else
18709         return NULL;
18710 }
18711 
18712 static uint32_t
18713 __bxe_get_page_reg_num(struct bxe_softc *sc)
18714 {
18715     if (CHIP_IS_E2(sc))
18716         return PAGE_MODE_VALUES_E2;
18717     else if (CHIP_IS_E3(sc))
18718         return PAGE_MODE_VALUES_E3;
18719     else
18720         return 0;
18721 }
18722 
18723 static const uint32_t *
18724 __bxe_get_page_write_ar(struct bxe_softc *sc)
18725 {
18726     if (CHIP_IS_E2(sc))
18727         return page_write_regs_e2;
18728     else if (CHIP_IS_E3(sc))
18729         return page_write_regs_e3;
18730     else
18731         return NULL;
18732 }
18733 
18734 static uint32_t
18735 __bxe_get_page_write_num(struct bxe_softc *sc)
18736 {
18737     if (CHIP_IS_E2(sc))
18738         return PAGE_WRITE_REGS_E2;
18739     else if (CHIP_IS_E3(sc))
18740         return PAGE_WRITE_REGS_E3;
18741     else
18742         return 0;
18743 }
18744 
18745 static const struct reg_addr *
18746 __bxe_get_page_read_ar(struct bxe_softc *sc)
18747 {
18748     if (CHIP_IS_E2(sc))
18749         return page_read_regs_e2;
18750     else if (CHIP_IS_E3(sc))
18751         return page_read_regs_e3;
18752     else
18753         return NULL;
18754 }
18755 
18756 static uint32_t
18757 __bxe_get_page_read_num(struct bxe_softc *sc)
18758 {
18759     if (CHIP_IS_E2(sc))
18760         return PAGE_READ_REGS_E2;
18761     else if (CHIP_IS_E3(sc))
18762         return PAGE_READ_REGS_E3;
18763     else
18764         return 0;
18765 }
18766 
18767 static bool
18768 bxe_is_reg_in_chip(struct bxe_softc *sc, const struct reg_addr *reg_info)
18769 {
18770     if (CHIP_IS_E1(sc))
18771         return IS_E1_REG(reg_info->chips);
18772     else if (CHIP_IS_E1H(sc))
18773         return IS_E1H_REG(reg_info->chips);
18774     else if (CHIP_IS_E2(sc))
18775         return IS_E2_REG(reg_info->chips);
18776     else if (CHIP_IS_E3A0(sc))
18777         return IS_E3A0_REG(reg_info->chips);
18778     else if (CHIP_IS_E3B0(sc))
18779         return IS_E3B0_REG(reg_info->chips);
18780     else
18781         return 0;
18782 }
18783 
18784 static bool
18785 bxe_is_wreg_in_chip(struct bxe_softc *sc, const struct wreg_addr *wreg_info)
18786 {
18787     if (CHIP_IS_E1(sc))
18788         return IS_E1_REG(wreg_info->chips);
18789     else if (CHIP_IS_E1H(sc))
18790         return IS_E1H_REG(wreg_info->chips);
18791     else if (CHIP_IS_E2(sc))
18792         return IS_E2_REG(wreg_info->chips);
18793     else if (CHIP_IS_E3A0(sc))
18794         return IS_E3A0_REG(wreg_info->chips);
18795     else if (CHIP_IS_E3B0(sc))
18796         return IS_E3B0_REG(wreg_info->chips);
18797     else
18798         return 0;
18799 }
18800 
18801 /**
18802  * bxe_read_pages_regs - read "paged" registers
18803  *
18804  * @bp          device handle
18805  * @p           output buffer
18806  *
18807  * Reads "paged" memories: memories that may only be read by first writing to a
18808  * specific address ("write address") and then reading from a specific address
18809  * ("read address"). There may be more than one write address per "page" and
18810  * more than one read address per write address.
18811  */
18812 static void
18813 bxe_read_pages_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset)
18814 {
18815     uint32_t i, j, k, n;
18816 
18817     /* addresses of the paged registers */
18818     const uint32_t *page_addr = __bxe_get_page_addr_ar(sc);
18819     /* number of paged registers */
18820     int num_pages = __bxe_get_page_reg_num(sc);
18821     /* write addresses */
18822     const uint32_t *write_addr = __bxe_get_page_write_ar(sc);
18823     /* number of write addresses */
18824     int write_num = __bxe_get_page_write_num(sc);
18825     /* read addresses info */
18826     const struct reg_addr *read_addr = __bxe_get_page_read_ar(sc);
18827     /* number of read addresses */
18828     int read_num = __bxe_get_page_read_num(sc);
18829     uint32_t addr, size;
18830 
18831     for (i = 0; i < num_pages; i++) {
18832         for (j = 0; j < write_num; j++) {
18833             REG_WR(sc, write_addr[j], page_addr[i]);
18834 
18835             for (k = 0; k < read_num; k++) {
18836                 if (IS_REG_IN_PRESET(read_addr[k].presets, preset)) {
18837                     size = read_addr[k].size;
18838                     for (n = 0; n < size; n++) {
18839                         addr = read_addr[k].addr + n*4;
18840                         *p++ = REG_RD(sc, addr);
18841                     }
18842                 }
18843             }
18844         }
18845     }
18846     return;
18847 }
18848 
18849 
18850 static int
18851 bxe_get_preset_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset)
18852 {
18853     uint32_t i, j, addr;
18854     const struct wreg_addr *wreg_addr_p = NULL;
18855 
18856     if (CHIP_IS_E1(sc))
18857         wreg_addr_p = &wreg_addr_e1;
18858     else if (CHIP_IS_E1H(sc))
18859         wreg_addr_p = &wreg_addr_e1h;
18860     else if (CHIP_IS_E2(sc))
18861         wreg_addr_p = &wreg_addr_e2;
18862     else if (CHIP_IS_E3A0(sc))
18863         wreg_addr_p = &wreg_addr_e3;
18864     else if (CHIP_IS_E3B0(sc))
18865         wreg_addr_p = &wreg_addr_e3b0;
18866     else
18867         return (-1);
18868 
18869     /* Read the idle_chk registers */
18870     for (i = 0; i < IDLE_REGS_COUNT; i++) {
18871         if (bxe_is_reg_in_chip(sc, &idle_reg_addrs[i]) &&
18872             IS_REG_IN_PRESET(idle_reg_addrs[i].presets, preset)) {
18873             for (j = 0; j < idle_reg_addrs[i].size; j++)
18874                 *p++ = REG_RD(sc, idle_reg_addrs[i].addr + j*4);
18875         }
18876     }
18877 
18878     /* Read the regular registers */
18879     for (i = 0; i < REGS_COUNT; i++) {
18880         if (bxe_is_reg_in_chip(sc, &reg_addrs[i]) &&
18881             IS_REG_IN_PRESET(reg_addrs[i].presets, preset)) {
18882             for (j = 0; j < reg_addrs[i].size; j++)
18883                 *p++ = REG_RD(sc, reg_addrs[i].addr + j*4);
18884         }
18885     }
18886 
18887     /* Read the CAM registers */
18888     if (bxe_is_wreg_in_chip(sc, wreg_addr_p) &&
18889         IS_REG_IN_PRESET(wreg_addr_p->presets, preset)) {
18890         for (i = 0; i < wreg_addr_p->size; i++) {
18891             *p++ = REG_RD(sc, wreg_addr_p->addr + i*4);
18892 
18893             /* In case of wreg_addr register, read additional
18894                registers from read_regs array
18895              */
18896             for (j = 0; j < wreg_addr_p->read_regs_count; j++) {
18897                 addr = *(wreg_addr_p->read_regs);
18898                 *p++ = REG_RD(sc, addr + j*4);
18899             }
18900         }
18901     }
18902 
18903     /* Paged registers are supported in E2 & E3 only */
18904     if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) {
18905         /* Read "paged" registers */
18906         bxe_read_pages_regs(sc, p, preset);
18907     }
18908 
18909     return 0;
18910 }
18911 
18912 int
18913 bxe_grc_dump(struct bxe_softc *sc)
18914 {
18915     int rval = 0;
18916     uint32_t preset_idx;
18917     uint8_t *buf;
18918     uint32_t size;
18919     struct  dump_header *d_hdr;
18920     uint32_t i;
18921     uint32_t reg_val;
18922     uint32_t reg_addr;
18923     uint32_t cmd_offset;
18924     struct ecore_ilt *ilt = SC_ILT(sc);
18925     struct bxe_fastpath *fp;
18926     struct ilt_client_info *ilt_cli;
18927     int grc_dump_size;
18928 
18929 
18930     if (sc->grcdump_done || sc->grcdump_started)
18931 	return (rval);
18932 
18933     sc->grcdump_started = 1;
18934     BLOGI(sc, "Started collecting grcdump\n");
18935 
18936     grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
18937                 sizeof(struct  dump_header);
18938 
18939     sc->grc_dump = malloc(grc_dump_size, M_DEVBUF, M_NOWAIT);
18940 
18941     if (sc->grc_dump == NULL) {
18942         BLOGW(sc, "Unable to allocate memory for grcdump collection\n");
18943         return(ENOMEM);
18944     }
18945 
18946 
18947 
18948     /* Disable parity attentions as long as following dump may
18949      * cause false alarms by reading never written registers. We
18950      * will re-enable parity attentions right after the dump.
18951      */
18952 
18953     /* Disable parity on path 0 */
18954     bxe_pretend_func(sc, 0);
18955 
18956     ecore_disable_blocks_parity(sc);
18957 
18958     /* Disable parity on path 1 */
18959     bxe_pretend_func(sc, 1);
18960     ecore_disable_blocks_parity(sc);
18961 
18962     /* Return to current function */
18963     bxe_pretend_func(sc, SC_ABS_FUNC(sc));
18964 
18965     buf = sc->grc_dump;
18966     d_hdr = sc->grc_dump;
18967 
18968     d_hdr->header_size = (sizeof(struct  dump_header) >> 2) - 1;
18969     d_hdr->version = BNX2X_DUMP_VERSION;
18970     d_hdr->preset = DUMP_ALL_PRESETS;
18971 
18972     if (CHIP_IS_E1(sc)) {
18973         d_hdr->dump_meta_data = DUMP_CHIP_E1;
18974     } else if (CHIP_IS_E1H(sc)) {
18975         d_hdr->dump_meta_data = DUMP_CHIP_E1H;
18976     } else if (CHIP_IS_E2(sc)) {
18977         d_hdr->dump_meta_data = DUMP_CHIP_E2 |
18978                 (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18979     } else if (CHIP_IS_E3A0(sc)) {
18980         d_hdr->dump_meta_data = DUMP_CHIP_E3A0 |
18981                 (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18982     } else if (CHIP_IS_E3B0(sc)) {
18983         d_hdr->dump_meta_data = DUMP_CHIP_E3B0 |
18984                 (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18985     }
18986 
18987     buf += sizeof(struct  dump_header);
18988 
18989     for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
18990 
18991         /* Skip presets with IOR */
18992         if ((preset_idx == 2) || (preset_idx == 5) || (preset_idx == 8) ||
18993             (preset_idx == 11))
18994             continue;
18995 
18996         rval = bxe_get_preset_regs(sc, (uint32_t *)buf, preset_idx);
18997 
18998 	if (rval)
18999             break;
19000 
19001         size = bxe_get_preset_regs_len(sc, preset_idx) * (sizeof (uint32_t));
19002 
19003         buf += size;
19004     }
19005 
19006     bxe_pretend_func(sc, 0);
19007     ecore_clear_blocks_parity(sc);
19008     ecore_enable_blocks_parity(sc);
19009 
19010     bxe_pretend_func(sc, 1);
19011     ecore_clear_blocks_parity(sc);
19012     ecore_enable_blocks_parity(sc);
19013 
19014     /* Return to current function */
19015     bxe_pretend_func(sc, SC_ABS_FUNC(sc));
19016 
19017 
19018 
19019     if(sc->state == BXE_STATE_OPEN) {
19020         if(sc->fw_stats_req  != NULL) {
19021     		BLOGI(sc, "fw stats start_paddr %#jx end_paddr %#jx vaddr %p size 0x%x\n",
19022         			(uintmax_t)sc->fw_stats_req_mapping,
19023         			(uintmax_t)sc->fw_stats_data_mapping,
19024         			sc->fw_stats_req, (sc->fw_stats_req_size + sc->fw_stats_data_size));
19025 		}
19026 		if(sc->def_sb != NULL) {
19027 			BLOGI(sc, "def_status_block paddr %p vaddr %p size 0x%zx\n",
19028         			(void *)sc->def_sb_dma.paddr, sc->def_sb,
19029         			sizeof(struct host_sp_status_block));
19030 		}
19031 		if(sc->eq_dma.vaddr != NULL) {
19032     		BLOGI(sc, "event_queue paddr %#jx vaddr %p size 0x%x\n",
19033         			(uintmax_t)sc->eq_dma.paddr, sc->eq_dma.vaddr, BCM_PAGE_SIZE);
19034 		}
19035 		if(sc->sp_dma.vaddr != NULL) {
19036     		BLOGI(sc, "slow path paddr %#jx vaddr %p size 0x%zx\n",
19037         			(uintmax_t)sc->sp_dma.paddr, sc->sp_dma.vaddr,
19038         			sizeof(struct bxe_slowpath));
19039 		}
19040 		if(sc->spq_dma.vaddr != NULL) {
19041     		BLOGI(sc, "slow path queue paddr %#jx vaddr %p size 0x%x\n",
19042         			(uintmax_t)sc->spq_dma.paddr, sc->spq_dma.vaddr, BCM_PAGE_SIZE);
19043 		}
19044 		if(sc->gz_buf_dma.vaddr != NULL) {
19045     		BLOGI(sc, "fw_buf paddr %#jx vaddr %p size 0x%x\n",
19046         			(uintmax_t)sc->gz_buf_dma.paddr, sc->gz_buf_dma.vaddr,
19047         			FW_BUF_SIZE);
19048 		}
19049     	for (i = 0; i < sc->num_queues; i++) {
19050         	fp = &sc->fp[i];
19051 			if(fp->sb_dma.vaddr != NULL && fp->tx_dma.vaddr != NULL &&
19052                         fp->rx_dma.vaddr != NULL && fp->rcq_dma.vaddr != NULL &&
19053                         fp->rx_sge_dma.vaddr != NULL) {
19054 
19055 				BLOGI(sc, "FP status block fp %d paddr %#jx vaddr %p size 0x%zx\n", i,
19056             			(uintmax_t)fp->sb_dma.paddr, fp->sb_dma.vaddr,
19057             			sizeof(union bxe_host_hc_status_block));
19058 				BLOGI(sc, "TX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
19059             			(uintmax_t)fp->tx_dma.paddr, fp->tx_dma.vaddr,
19060             			(BCM_PAGE_SIZE * TX_BD_NUM_PAGES));
19061         		BLOGI(sc, "RX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
19062             			(uintmax_t)fp->rx_dma.paddr, fp->rx_dma.vaddr,
19063             			(BCM_PAGE_SIZE * RX_BD_NUM_PAGES));
19064         		BLOGI(sc, "RX RCQ CHAIN fp %d paddr %#jx vaddr %p size 0x%zx\n", i,
19065             			(uintmax_t)fp->rcq_dma.paddr, fp->rcq_dma.vaddr,
19066             			(BCM_PAGE_SIZE * RCQ_NUM_PAGES));
19067         		BLOGI(sc, "RX SGE CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
19068             			(uintmax_t)fp->rx_sge_dma.paddr, fp->rx_sge_dma.vaddr,
19069             			(BCM_PAGE_SIZE * RX_SGE_NUM_PAGES));
19070     		}
19071 		}
19072 		if(ilt != NULL ) {
19073     		ilt_cli = &ilt->clients[1];
19074 			if(ilt->lines != NULL) {
19075     		for (i = ilt_cli->start; i <= ilt_cli->end; i++) {
19076         		BLOGI(sc, "ECORE_ILT paddr %#jx vaddr %p size 0x%x\n",
19077             			(uintmax_t)(((struct bxe_dma *)((&ilt->lines[i])->page))->paddr),
19078             			((struct bxe_dma *)((&ilt->lines[i])->page))->vaddr, BCM_PAGE_SIZE);
19079     		}
19080 			}
19081 		}
19082 
19083 
19084     	cmd_offset = DMAE_REG_CMD_MEM;
19085     	for (i = 0; i < 224; i++) {
19086         	reg_addr = (cmd_offset +(i * 4));
19087         	reg_val = REG_RD(sc, reg_addr);
19088         	BLOGI(sc, "DMAE_REG_CMD_MEM i=%d reg_addr 0x%x reg_val 0x%08x\n",i,
19089             			reg_addr, reg_val);
19090     	}
19091 	}
19092 
19093     BLOGI(sc, "Collection of grcdump done\n");
19094     sc->grcdump_done = 1;
19095     return(rval);
19096 }
19097 
19098 static int
19099 bxe_add_cdev(struct bxe_softc *sc)
19100 {
19101     sc->eeprom = malloc(BXE_EEPROM_MAX_DATA_LEN, M_DEVBUF, M_NOWAIT);
19102 
19103     if (sc->eeprom == NULL) {
19104         BLOGW(sc, "Unable to alloc for eeprom size buffer\n");
19105         return (-1);
19106     }
19107 
19108     sc->ioctl_dev = make_dev(&bxe_cdevsw,
19109                             sc->ifp->if_dunit,
19110                             UID_ROOT,
19111                             GID_WHEEL,
19112                             0600,
19113                             "%s",
19114                             if_name(sc->ifp));
19115 
19116     if (sc->ioctl_dev == NULL) {
19117         free(sc->eeprom, M_DEVBUF);
19118         sc->eeprom = NULL;
19119         return (-1);
19120     }
19121 
19122     sc->ioctl_dev->si_drv1 = sc;
19123 
19124     return (0);
19125 }
19126 
19127 static void
19128 bxe_del_cdev(struct bxe_softc *sc)
19129 {
19130     if (sc->ioctl_dev != NULL)
19131         destroy_dev(sc->ioctl_dev);
19132 
19133     if (sc->eeprom != NULL) {
19134         free(sc->eeprom, M_DEVBUF);
19135         sc->eeprom = NULL;
19136     }
19137     sc->ioctl_dev = NULL;
19138 
19139     return;
19140 }
19141 
19142 static bool bxe_is_nvram_accessible(struct bxe_softc *sc)
19143 {
19144 
19145     if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0)
19146         return FALSE;
19147 
19148     return TRUE;
19149 }
19150 
19151 
19152 static int
19153 bxe_wr_eeprom(struct bxe_softc *sc, void *data, uint32_t offset, uint32_t len)
19154 {
19155     int rval = 0;
19156 
19157     if(!bxe_is_nvram_accessible(sc)) {
19158         BLOGW(sc, "Cannot access eeprom when interface is down\n");
19159         return (-EAGAIN);
19160     }
19161     rval = bxe_nvram_write(sc, offset, (uint8_t *)data, len);
19162 
19163 
19164    return (rval);
19165 }
19166 
19167 static int
19168 bxe_rd_eeprom(struct bxe_softc *sc, void *data, uint32_t offset, uint32_t len)
19169 {
19170     int rval = 0;
19171 
19172     if(!bxe_is_nvram_accessible(sc)) {
19173         BLOGW(sc, "Cannot access eeprom when interface is down\n");
19174         return (-EAGAIN);
19175     }
19176     rval = bxe_nvram_read(sc, offset, (uint8_t *)data, len);
19177 
19178    return (rval);
19179 }
19180 
19181 static int
19182 bxe_eeprom_rd_wr(struct bxe_softc *sc, bxe_eeprom_t *eeprom)
19183 {
19184     int rval = 0;
19185 
19186     switch (eeprom->eeprom_cmd) {
19187 
19188     case BXE_EEPROM_CMD_SET_EEPROM:
19189 
19190         rval = copyin(eeprom->eeprom_data, sc->eeprom,
19191                        eeprom->eeprom_data_len);
19192 
19193         if (rval)
19194             break;
19195 
19196         rval = bxe_wr_eeprom(sc, sc->eeprom, eeprom->eeprom_offset,
19197                        eeprom->eeprom_data_len);
19198         break;
19199 
19200     case BXE_EEPROM_CMD_GET_EEPROM:
19201 
19202         rval = bxe_rd_eeprom(sc, sc->eeprom, eeprom->eeprom_offset,
19203                        eeprom->eeprom_data_len);
19204 
19205         if (rval) {
19206             break;
19207         }
19208 
19209         rval = copyout(sc->eeprom, eeprom->eeprom_data,
19210                        eeprom->eeprom_data_len);
19211         break;
19212 
19213     default:
19214             rval = EINVAL;
19215             break;
19216     }
19217 
19218     if (rval) {
19219         BLOGW(sc, "ioctl cmd %d  failed rval %d\n", eeprom->eeprom_cmd, rval);
19220     }
19221 
19222     return (rval);
19223 }
19224 
19225 static int
19226 bxe_get_settings(struct bxe_softc *sc, bxe_dev_setting_t *dev_p)
19227 {
19228     uint32_t ext_phy_config;
19229     int port = SC_PORT(sc);
19230     int cfg_idx = bxe_get_link_cfg_idx(sc);
19231 
19232     dev_p->supported = sc->port.supported[cfg_idx] |
19233             (sc->port.supported[cfg_idx ^ 1] &
19234             (ELINK_SUPPORTED_TP | ELINK_SUPPORTED_FIBRE));
19235     dev_p->advertising = sc->port.advertising[cfg_idx];
19236     if(sc->link_params.phy[bxe_get_cur_phy_idx(sc)].media_type ==
19237         ELINK_ETH_PHY_SFP_1G_FIBER) {
19238         dev_p->supported = ~(ELINK_SUPPORTED_10000baseT_Full);
19239         dev_p->advertising &= ~(ADVERTISED_10000baseT_Full);
19240     }
19241     if ((sc->state == BXE_STATE_OPEN) && sc->link_vars.link_up &&
19242         !(sc->flags & BXE_MF_FUNC_DIS)) {
19243         dev_p->duplex = sc->link_vars.duplex;
19244         if (IS_MF(sc) && !BXE_NOMCP(sc))
19245             dev_p->speed = bxe_get_mf_speed(sc);
19246         else
19247             dev_p->speed = sc->link_vars.line_speed;
19248     } else {
19249         dev_p->duplex = DUPLEX_UNKNOWN;
19250         dev_p->speed = SPEED_UNKNOWN;
19251     }
19252 
19253     dev_p->port = bxe_media_detect(sc);
19254 
19255     ext_phy_config = SHMEM_RD(sc,
19256                          dev_info.port_hw_config[port].external_phy_config);
19257     if((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) ==
19258         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
19259         dev_p->phy_address =  sc->port.phy_addr;
19260     else if(((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) !=
19261             PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
19262         ((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) !=
19263             PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
19264         dev_p->phy_address = ELINK_XGXS_EXT_PHY_ADDR(ext_phy_config);
19265     else
19266         dev_p->phy_address = 0;
19267 
19268     if(sc->link_params.req_line_speed[cfg_idx] == ELINK_SPEED_AUTO_NEG)
19269         dev_p->autoneg = AUTONEG_ENABLE;
19270     else
19271        dev_p->autoneg = AUTONEG_DISABLE;
19272 
19273 
19274     return 0;
19275 }
19276 
19277 static int
19278 bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
19279         struct thread *td)
19280 {
19281     struct bxe_softc    *sc;
19282     int                 rval = 0;
19283     bxe_grcdump_t       *dump = NULL;
19284     int grc_dump_size;
19285     bxe_drvinfo_t   *drv_infop = NULL;
19286     bxe_dev_setting_t  *dev_p;
19287     bxe_dev_setting_t  dev_set;
19288     bxe_get_regs_t  *reg_p;
19289     bxe_reg_rdw_t *reg_rdw_p;
19290     bxe_pcicfg_rdw_t *cfg_rdw_p;
19291     bxe_perm_mac_addr_t *mac_addr_p;
19292 
19293 
19294     if ((sc = (struct bxe_softc *)dev->si_drv1) == NULL)
19295         return ENXIO;
19296 
19297     dump = (bxe_grcdump_t *)data;
19298 
19299     switch(cmd) {
19300 
19301         case BXE_GRC_DUMP_SIZE:
19302             dump->pci_func = sc->pcie_func;
19303             dump->grcdump_size =
19304                 (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
19305                      sizeof(struct  dump_header);
19306             break;
19307 
19308         case BXE_GRC_DUMP:
19309 
19310             grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
19311                                 sizeof(struct  dump_header);
19312             if ((!sc->trigger_grcdump) || (dump->grcdump == NULL) ||
19313                 (dump->grcdump_size < grc_dump_size)) {
19314                 rval = EINVAL;
19315                 break;
19316             }
19317 
19318             if((sc->trigger_grcdump) && (!sc->grcdump_done) &&
19319                 (!sc->grcdump_started)) {
19320                 rval =  bxe_grc_dump(sc);
19321             }
19322 
19323             if((!rval) && (sc->grcdump_done) && (sc->grcdump_started) &&
19324                 (sc->grc_dump != NULL))  {
19325                 dump->grcdump_dwords = grc_dump_size >> 2;
19326                 rval = copyout(sc->grc_dump, dump->grcdump, grc_dump_size);
19327                 free(sc->grc_dump, M_DEVBUF);
19328                 sc->grc_dump = NULL;
19329                 sc->grcdump_started = 0;
19330                 sc->grcdump_done = 0;
19331             }
19332 
19333             break;
19334 
19335         case BXE_DRV_INFO:
19336             drv_infop = (bxe_drvinfo_t *)data;
19337             snprintf(drv_infop->drv_name, BXE_DRV_NAME_LENGTH, "%s", "bxe");
19338             snprintf(drv_infop->drv_version, BXE_DRV_VERSION_LENGTH, "v:%s",
19339                 BXE_DRIVER_VERSION);
19340             snprintf(drv_infop->mfw_version, BXE_MFW_VERSION_LENGTH, "%s",
19341                 sc->devinfo.bc_ver_str);
19342             snprintf(drv_infop->stormfw_version, BXE_STORMFW_VERSION_LENGTH,
19343                 "%s", sc->fw_ver_str);
19344             drv_infop->eeprom_dump_len = sc->devinfo.flash_size;
19345             drv_infop->reg_dump_len =
19346                 (bxe_get_total_regs_len32(sc) * sizeof(uint32_t))
19347                     + sizeof(struct  dump_header);
19348             snprintf(drv_infop->bus_info, BXE_BUS_INFO_LENGTH, "%d:%d:%d",
19349                 sc->pcie_bus, sc->pcie_device, sc->pcie_func);
19350             break;
19351 
19352         case BXE_DEV_SETTING:
19353             dev_p = (bxe_dev_setting_t *)data;
19354             bxe_get_settings(sc, &dev_set);
19355             dev_p->supported = dev_set.supported;
19356             dev_p->advertising = dev_set.advertising;
19357             dev_p->speed = dev_set.speed;
19358             dev_p->duplex = dev_set.duplex;
19359             dev_p->port = dev_set.port;
19360             dev_p->phy_address = dev_set.phy_address;
19361             dev_p->autoneg = dev_set.autoneg;
19362 
19363             break;
19364 
19365         case BXE_GET_REGS:
19366 
19367             reg_p = (bxe_get_regs_t *)data;
19368             grc_dump_size = reg_p->reg_buf_len;
19369 
19370             if((!sc->grcdump_done) && (!sc->grcdump_started)) {
19371                 bxe_grc_dump(sc);
19372             }
19373             if((sc->grcdump_done) && (sc->grcdump_started) &&
19374                 (sc->grc_dump != NULL))  {
19375                 rval = copyout(sc->grc_dump, reg_p->reg_buf, grc_dump_size);
19376                 free(sc->grc_dump, M_DEVBUF);
19377                 sc->grc_dump = NULL;
19378                 sc->grcdump_started = 0;
19379                 sc->grcdump_done = 0;
19380             }
19381 
19382             break;
19383 
19384         case BXE_RDW_REG:
19385             reg_rdw_p = (bxe_reg_rdw_t *)data;
19386             if((reg_rdw_p->reg_cmd == BXE_READ_REG_CMD) &&
19387                 (reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT))
19388                 reg_rdw_p->reg_val = REG_RD(sc, reg_rdw_p->reg_id);
19389 
19390             if((reg_rdw_p->reg_cmd == BXE_WRITE_REG_CMD) &&
19391                 (reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT))
19392                 REG_WR(sc, reg_rdw_p->reg_id, reg_rdw_p->reg_val);
19393 
19394             break;
19395 
19396         case BXE_RDW_PCICFG:
19397             cfg_rdw_p = (bxe_pcicfg_rdw_t *)data;
19398             if(cfg_rdw_p->cfg_cmd == BXE_READ_PCICFG) {
19399 
19400                 cfg_rdw_p->cfg_val = pci_read_config(sc->dev, cfg_rdw_p->cfg_id,
19401                                          cfg_rdw_p->cfg_width);
19402 
19403             } else if(cfg_rdw_p->cfg_cmd == BXE_WRITE_PCICFG) {
19404                 pci_write_config(sc->dev, cfg_rdw_p->cfg_id, cfg_rdw_p->cfg_val,
19405                             cfg_rdw_p->cfg_width);
19406             } else {
19407                 BLOGW(sc, "BXE_RDW_PCICFG ioctl wrong cmd passed\n");
19408             }
19409             break;
19410 
19411         case BXE_MAC_ADDR:
19412             mac_addr_p = (bxe_perm_mac_addr_t *)data;
19413             snprintf(mac_addr_p->mac_addr_str, sizeof(sc->mac_addr_str), "%s",
19414                 sc->mac_addr_str);
19415             break;
19416 
19417         case BXE_EEPROM:
19418             rval = bxe_eeprom_rd_wr(sc, (bxe_eeprom_t *)data);
19419             break;
19420 
19421 
19422         default:
19423             break;
19424     }
19425 
19426     return (rval);
19427 }
19428 
19429 #ifdef DEBUGNET
19430 static void
19431 bxe_debugnet_init(struct ifnet *ifp, int *nrxr, int *ncl, int *clsize)
19432 {
19433 	struct bxe_softc *sc;
19434 
19435 	sc = if_getsoftc(ifp);
19436 	BXE_CORE_LOCK(sc);
19437 	*nrxr = sc->num_queues;
19438 	*ncl = DEBUGNET_MAX_IN_FLIGHT;
19439 	*clsize = sc->fp[0].mbuf_alloc_size;
19440 	BXE_CORE_UNLOCK(sc);
19441 }
19442 
19443 static void
19444 bxe_debugnet_event(struct ifnet *ifp __unused, enum debugnet_ev event __unused)
19445 {
19446 }
19447 
19448 static int
19449 bxe_debugnet_transmit(struct ifnet *ifp, struct mbuf *m)
19450 {
19451 	struct bxe_softc *sc;
19452 	int error;
19453 
19454 	sc = if_getsoftc(ifp);
19455 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
19456 	    IFF_DRV_RUNNING || !sc->link_vars.link_up)
19457 		return (ENOENT);
19458 
19459 	error = bxe_tx_encap(&sc->fp[0], &m);
19460 	if (error != 0 && m != NULL)
19461 		m_freem(m);
19462 	return (error);
19463 }
19464 
19465 static int
19466 bxe_debugnet_poll(struct ifnet *ifp, int count)
19467 {
19468 	struct bxe_softc *sc;
19469 	int i;
19470 
19471 	sc = if_getsoftc(ifp);
19472 	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0 ||
19473 	    !sc->link_vars.link_up)
19474 		return (ENOENT);
19475 
19476 	for (i = 0; i < sc->num_queues; i++)
19477 		(void)bxe_rxeof(sc, &sc->fp[i]);
19478 	(void)bxe_txeof(sc, &sc->fp[0]);
19479 	return (0);
19480 }
19481 #endif /* DEBUGNET */
19482