xref: /freebsd/sys/dev/bxe/bxe.c (revision 408c909dc64f77d2696d6fec77a2e0b00255cf96)
1  /*-
2   * SPDX-License-Identifier: BSD-2-Clause
3   *
4   * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
5   *
6   * Redistribution and use in source and binary forms, with or without
7   * modification, are permitted provided that the following conditions
8   * are met:
9   *
10   * 1. Redistributions of source code must retain the above copyright
11   *    notice, this list of conditions and the following disclaimer.
12   * 2. Redistributions in binary form must reproduce the above copyright
13   *    notice, this list of conditions and the following disclaimer in the
14   *    documentation and/or other materials provided with the distribution.
15   *
16   * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
17   * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18   * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19   * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
20   * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21   * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22   * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23   * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24   * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25   * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26   * THE POSSIBILITY OF SUCH DAMAGE.
27   */
28  
29  #include <sys/cdefs.h>
30  #define BXE_DRIVER_VERSION "1.78.91"
31  
32  #include "bxe.h"
33  #include "ecore_sp.h"
34  #include "ecore_init.h"
35  #include "ecore_init_ops.h"
36  
37  #include "57710_int_offsets.h"
38  #include "57711_int_offsets.h"
39  #include "57712_int_offsets.h"
40  
41  /*
42   * CTLTYPE_U64 and sysctl_handle_64 were added in r217616. Define these
43   * explicitly here for older kernels that don't include this changeset.
44   */
45  #ifndef CTLTYPE_U64
46  #define CTLTYPE_U64      CTLTYPE_QUAD
47  #define sysctl_handle_64 sysctl_handle_quad
48  #endif
49  
50  /*
51   * CSUM_TCP_IPV6 and CSUM_UDP_IPV6 were added in r236170. Define these
52   * here as zero(0) for older kernels that don't include this changeset
53   * thereby masking the functionality.
54   */
55  #ifndef CSUM_TCP_IPV6
56  #define CSUM_TCP_IPV6 0
57  #define CSUM_UDP_IPV6 0
58  #endif
59  
60  #define BXE_DEF_SB_ATT_IDX 0x0001
61  #define BXE_DEF_SB_IDX     0x0002
62  
63  /*
64   * FLR Support - bxe_pf_flr_clnup() is called during nic_load in the per
65   * function HW initialization.
66   */
67  #define FLR_WAIT_USEC     10000 /* 10 msecs */
68  #define FLR_WAIT_INTERVAL 50    /* usecs */
69  #define FLR_POLL_CNT      (FLR_WAIT_USEC / FLR_WAIT_INTERVAL) /* 200 */
70  
71  struct pbf_pN_buf_regs {
72      int pN;
73      uint32_t init_crd;
74      uint32_t crd;
75      uint32_t crd_freed;
76  };
77  
78  struct pbf_pN_cmd_regs {
79      int pN;
80      uint32_t lines_occup;
81      uint32_t lines_freed;
82  };
83  
84  /*
85   * PCI Device ID Table used by bxe_probe().
86   */
87  #define BXE_DEVDESC_MAX 64
88  static struct bxe_device_type bxe_devs[] = {
89      {
90          BRCM_VENDORID,
91          CHIP_NUM_57710,
92          PCI_ANY_ID, PCI_ANY_ID,
93          "QLogic NetXtreme II BCM57710 10GbE"
94      },
95      {
96          BRCM_VENDORID,
97          CHIP_NUM_57711,
98          PCI_ANY_ID, PCI_ANY_ID,
99          "QLogic NetXtreme II BCM57711 10GbE"
100      },
101      {
102          BRCM_VENDORID,
103          CHIP_NUM_57711E,
104          PCI_ANY_ID, PCI_ANY_ID,
105          "QLogic NetXtreme II BCM57711E 10GbE"
106      },
107      {
108          BRCM_VENDORID,
109          CHIP_NUM_57712,
110          PCI_ANY_ID, PCI_ANY_ID,
111          "QLogic NetXtreme II BCM57712 10GbE"
112      },
113      {
114          BRCM_VENDORID,
115          CHIP_NUM_57712_MF,
116          PCI_ANY_ID, PCI_ANY_ID,
117          "QLogic NetXtreme II BCM57712 MF 10GbE"
118      },
119      {
120          BRCM_VENDORID,
121          CHIP_NUM_57800,
122          PCI_ANY_ID, PCI_ANY_ID,
123          "QLogic NetXtreme II BCM57800 10GbE"
124      },
125      {
126          BRCM_VENDORID,
127          CHIP_NUM_57800_MF,
128          PCI_ANY_ID, PCI_ANY_ID,
129          "QLogic NetXtreme II BCM57800 MF 10GbE"
130      },
131      {
132          BRCM_VENDORID,
133          CHIP_NUM_57810,
134          PCI_ANY_ID, PCI_ANY_ID,
135          "QLogic NetXtreme II BCM57810 10GbE"
136      },
137      {
138          BRCM_VENDORID,
139          CHIP_NUM_57810_MF,
140          PCI_ANY_ID, PCI_ANY_ID,
141          "QLogic NetXtreme II BCM57810 MF 10GbE"
142      },
143      {
144          BRCM_VENDORID,
145          CHIP_NUM_57811,
146          PCI_ANY_ID, PCI_ANY_ID,
147          "QLogic NetXtreme II BCM57811 10GbE"
148      },
149      {
150          BRCM_VENDORID,
151          CHIP_NUM_57811_MF,
152          PCI_ANY_ID, PCI_ANY_ID,
153          "QLogic NetXtreme II BCM57811 MF 10GbE"
154      },
155      {
156          BRCM_VENDORID,
157          CHIP_NUM_57840_4_10,
158          PCI_ANY_ID, PCI_ANY_ID,
159          "QLogic NetXtreme II BCM57840 4x10GbE"
160      },
161      {
162          QLOGIC_VENDORID,
163          CHIP_NUM_57840_4_10,
164          PCI_ANY_ID, PCI_ANY_ID,
165          "QLogic NetXtreme II BCM57840 4x10GbE"
166      },
167      {
168          BRCM_VENDORID,
169          CHIP_NUM_57840_2_20,
170          PCI_ANY_ID, PCI_ANY_ID,
171          "QLogic NetXtreme II BCM57840 2x20GbE"
172      },
173      {
174          BRCM_VENDORID,
175          CHIP_NUM_57840_MF,
176          PCI_ANY_ID, PCI_ANY_ID,
177          "QLogic NetXtreme II BCM57840 MF 10GbE"
178      },
179      {
180          0, 0, 0, 0, NULL
181      }
182  };
183  
184  MALLOC_DECLARE(M_BXE_ILT);
185  MALLOC_DEFINE(M_BXE_ILT, "bxe_ilt", "bxe ILT pointer");
186  
187  /*
188   * FreeBSD device entry points.
189   */
190  static int bxe_probe(device_t);
191  static int bxe_attach(device_t);
192  static int bxe_detach(device_t);
193  static int bxe_shutdown(device_t);
194  
195  
196  /*
197   * FreeBSD KLD module/device interface event handler method.
198   */
199  static device_method_t bxe_methods[] = {
200      /* Device interface (device_if.h) */
201      DEVMETHOD(device_probe,     bxe_probe),
202      DEVMETHOD(device_attach,    bxe_attach),
203      DEVMETHOD(device_detach,    bxe_detach),
204      DEVMETHOD(device_shutdown,  bxe_shutdown),
205      /* Bus interface (bus_if.h) */
206      DEVMETHOD(bus_print_child,  bus_generic_print_child),
207      DEVMETHOD(bus_driver_added, bus_generic_driver_added),
208      KOBJMETHOD_END
209  };
210  
211  /*
212   * FreeBSD KLD Module data declaration
213   */
214  static driver_t bxe_driver = {
215      "bxe",                   /* module name */
216      bxe_methods,             /* event handler */
217      sizeof(struct bxe_softc) /* extra data */
218  };
219  
220  MODULE_DEPEND(bxe, pci, 1, 1, 1);
221  MODULE_DEPEND(bxe, ether, 1, 1, 1);
222  DRIVER_MODULE(bxe, pci, bxe_driver, 0, 0);
223  
224  DEBUGNET_DEFINE(bxe);
225  
226  /* resources needed for unloading a previously loaded device */
227  
228  #define BXE_PREV_WAIT_NEEDED 1
229  struct mtx bxe_prev_mtx;
230  MTX_SYSINIT(bxe_prev_mtx, &bxe_prev_mtx, "bxe_prev_lock", MTX_DEF);
231  struct bxe_prev_list_node {
232      LIST_ENTRY(bxe_prev_list_node) node;
233      uint8_t bus;
234      uint8_t slot;
235      uint8_t path;
236      uint8_t aer; /* XXX automatic error recovery */
237      uint8_t undi;
238  };
239  static LIST_HEAD(, bxe_prev_list_node) bxe_prev_list = LIST_HEAD_INITIALIZER(bxe_prev_list);
240  
241  static int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
242  
243  /* Tunable device values... */
244  
245  SYSCTL_NODE(_hw, OID_AUTO, bxe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
246      "bxe driver parameters");
247  
248  /* Debug */
249  unsigned long bxe_debug = 0;
250  SYSCTL_ULONG(_hw_bxe, OID_AUTO, debug, CTLFLAG_RDTUN,
251               &bxe_debug, 0, "Debug logging mode");
252  
253  /* Interrupt Mode: 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */
254  static int bxe_interrupt_mode = INTR_MODE_MSIX;
255  SYSCTL_INT(_hw_bxe, OID_AUTO, interrupt_mode, CTLFLAG_RDTUN,
256             &bxe_interrupt_mode, 0, "Interrupt (MSI-X/MSI/INTx) mode");
257  
258  /* Number of Queues: 0 (Auto) or 1 to 16 (fixed queue number) */
259  static int bxe_queue_count = 4;
260  SYSCTL_INT(_hw_bxe, OID_AUTO, queue_count, CTLFLAG_RDTUN,
261             &bxe_queue_count, 0, "Multi-Queue queue count");
262  
263  /* max number of buffers per queue (default RX_BD_USABLE) */
264  static int bxe_max_rx_bufs = 0;
265  SYSCTL_INT(_hw_bxe, OID_AUTO, max_rx_bufs, CTLFLAG_RDTUN,
266             &bxe_max_rx_bufs, 0, "Maximum Number of Rx Buffers Per Queue");
267  
268  /* Host interrupt coalescing RX tick timer (usecs) */
269  static int bxe_hc_rx_ticks = 25;
270  SYSCTL_INT(_hw_bxe, OID_AUTO, hc_rx_ticks, CTLFLAG_RDTUN,
271             &bxe_hc_rx_ticks, 0, "Host Coalescing Rx ticks");
272  
273  /* Host interrupt coalescing TX tick timer (usecs) */
274  static int bxe_hc_tx_ticks = 50;
275  SYSCTL_INT(_hw_bxe, OID_AUTO, hc_tx_ticks, CTLFLAG_RDTUN,
276             &bxe_hc_tx_ticks, 0, "Host Coalescing Tx ticks");
277  
278  /* Maximum number of Rx packets to process at a time */
279  static int bxe_rx_budget = 0xffffffff;
280  SYSCTL_INT(_hw_bxe, OID_AUTO, rx_budget, CTLFLAG_RDTUN,
281             &bxe_rx_budget, 0, "Rx processing budget");
282  
283  /* Maximum LRO aggregation size */
284  static int bxe_max_aggregation_size = 0;
285  SYSCTL_INT(_hw_bxe, OID_AUTO, max_aggregation_size, CTLFLAG_RDTUN,
286             &bxe_max_aggregation_size, 0, "max aggregation size");
287  
288  /* PCI MRRS: -1 (Auto), 0 (128B), 1 (256B), 2 (512B), 3 (1KB) */
289  static int bxe_mrrs = -1;
290  SYSCTL_INT(_hw_bxe, OID_AUTO, mrrs, CTLFLAG_RDTUN,
291             &bxe_mrrs, 0, "PCIe maximum read request size");
292  
293  /* AutoGrEEEn: 0 (hardware default), 1 (force on), 2 (force off) */
294  static int bxe_autogreeen = 0;
295  SYSCTL_INT(_hw_bxe, OID_AUTO, autogreeen, CTLFLAG_RDTUN,
296             &bxe_autogreeen, 0, "AutoGrEEEn support");
297  
298  /* 4-tuple RSS support for UDP: 0 (disabled), 1 (enabled) */
299  static int bxe_udp_rss = 0;
300  SYSCTL_INT(_hw_bxe, OID_AUTO, udp_rss, CTLFLAG_RDTUN,
301             &bxe_udp_rss, 0, "UDP RSS support");
302  
303  
304  #define STAT_NAME_LEN 32 /* no stat names below can be longer than this */
305  
306  #define STATS_OFFSET32(stat_name)                   \
307      (offsetof(struct bxe_eth_stats, stat_name) / 4)
308  
309  #define Q_STATS_OFFSET32(stat_name)                   \
310      (offsetof(struct bxe_eth_q_stats, stat_name) / 4)
311  
312  static const struct {
313      uint32_t offset;
314      uint32_t size;
315      uint32_t flags;
316  #define STATS_FLAGS_PORT  1
317  #define STATS_FLAGS_FUNC  2 /* MF only cares about function stats */
318  #define STATS_FLAGS_BOTH  (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
319      char string[STAT_NAME_LEN];
320  } bxe_eth_stats_arr[] = {
321      { STATS_OFFSET32(total_bytes_received_hi),
322                  8, STATS_FLAGS_BOTH, "rx_bytes" },
323      { STATS_OFFSET32(error_bytes_received_hi),
324                  8, STATS_FLAGS_BOTH, "rx_error_bytes" },
325      { STATS_OFFSET32(total_unicast_packets_received_hi),
326                  8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
327      { STATS_OFFSET32(total_multicast_packets_received_hi),
328                  8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
329      { STATS_OFFSET32(total_broadcast_packets_received_hi),
330                  8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
331      { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
332                  8, STATS_FLAGS_PORT, "rx_crc_errors" },
333      { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
334                  8, STATS_FLAGS_PORT, "rx_align_errors" },
335      { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
336                  8, STATS_FLAGS_PORT, "rx_undersize_packets" },
337      { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
338                  8, STATS_FLAGS_PORT, "rx_oversize_packets" },
339      { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
340                  8, STATS_FLAGS_PORT, "rx_fragments" },
341      { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
342                  8, STATS_FLAGS_PORT, "rx_jabbers" },
343      { STATS_OFFSET32(no_buff_discard_hi),
344                  8, STATS_FLAGS_BOTH, "rx_discards" },
345      { STATS_OFFSET32(mac_filter_discard),
346                  4, STATS_FLAGS_PORT, "rx_filtered_packets" },
347      { STATS_OFFSET32(mf_tag_discard),
348                  4, STATS_FLAGS_PORT, "rx_mf_tag_discard" },
349      { STATS_OFFSET32(pfc_frames_received_hi),
350                  8, STATS_FLAGS_PORT, "pfc_frames_received" },
351      { STATS_OFFSET32(pfc_frames_sent_hi),
352                  8, STATS_FLAGS_PORT, "pfc_frames_sent" },
353      { STATS_OFFSET32(brb_drop_hi),
354                  8, STATS_FLAGS_PORT, "rx_brb_discard" },
355      { STATS_OFFSET32(brb_truncate_hi),
356                  8, STATS_FLAGS_PORT, "rx_brb_truncate" },
357      { STATS_OFFSET32(pause_frames_received_hi),
358                  8, STATS_FLAGS_PORT, "rx_pause_frames" },
359      { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
360                  8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
361      { STATS_OFFSET32(nig_timer_max),
362                  4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
363      { STATS_OFFSET32(total_bytes_transmitted_hi),
364                  8, STATS_FLAGS_BOTH, "tx_bytes" },
365      { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
366                  8, STATS_FLAGS_PORT, "tx_error_bytes" },
367      { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
368                  8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
369      { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
370                  8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
371      { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
372                  8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
373      { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
374                  8, STATS_FLAGS_PORT, "tx_mac_errors" },
375      { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
376                  8, STATS_FLAGS_PORT, "tx_carrier_errors" },
377      { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
378                  8, STATS_FLAGS_PORT, "tx_single_collisions" },
379      { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
380                  8, STATS_FLAGS_PORT, "tx_multi_collisions" },
381      { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
382                  8, STATS_FLAGS_PORT, "tx_deferred" },
383      { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
384                  8, STATS_FLAGS_PORT, "tx_excess_collisions" },
385      { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
386                  8, STATS_FLAGS_PORT, "tx_late_collisions" },
387      { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
388                  8, STATS_FLAGS_PORT, "tx_total_collisions" },
389      { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
390                  8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
391      { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
392                  8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
393      { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
394                  8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
395      { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
396                  8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
397      { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
398                  8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
399      { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
400                  8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
401      { STATS_OFFSET32(etherstatspktsover1522octets_hi),
402                  8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
403      { STATS_OFFSET32(pause_frames_sent_hi),
404                  8, STATS_FLAGS_PORT, "tx_pause_frames" },
405      { STATS_OFFSET32(total_tpa_aggregations_hi),
406                  8, STATS_FLAGS_FUNC, "tpa_aggregations" },
407      { STATS_OFFSET32(total_tpa_aggregated_frames_hi),
408                  8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"},
409      { STATS_OFFSET32(total_tpa_bytes_hi),
410                  8, STATS_FLAGS_FUNC, "tpa_bytes"},
411      { STATS_OFFSET32(eee_tx_lpi),
412                  4, STATS_FLAGS_PORT, "eee_tx_lpi"},
413      { STATS_OFFSET32(rx_calls),
414                  4, STATS_FLAGS_FUNC, "rx_calls"},
415      { STATS_OFFSET32(rx_pkts),
416                  4, STATS_FLAGS_FUNC, "rx_pkts"},
417      { STATS_OFFSET32(rx_tpa_pkts),
418                  4, STATS_FLAGS_FUNC, "rx_tpa_pkts"},
419      { STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts),
420                  4, STATS_FLAGS_FUNC, "rx_erroneous_jumbo_sge_pkts"},
421      { STATS_OFFSET32(rx_bxe_service_rxsgl),
422                  4, STATS_FLAGS_FUNC, "rx_bxe_service_rxsgl"},
423      { STATS_OFFSET32(rx_jumbo_sge_pkts),
424                  4, STATS_FLAGS_FUNC, "rx_jumbo_sge_pkts"},
425      { STATS_OFFSET32(rx_soft_errors),
426                  4, STATS_FLAGS_FUNC, "rx_soft_errors"},
427      { STATS_OFFSET32(rx_hw_csum_errors),
428                  4, STATS_FLAGS_FUNC, "rx_hw_csum_errors"},
429      { STATS_OFFSET32(rx_ofld_frames_csum_ip),
430                  4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_ip"},
431      { STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp),
432                  4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_tcp_udp"},
433      { STATS_OFFSET32(rx_budget_reached),
434                  4, STATS_FLAGS_FUNC, "rx_budget_reached"},
435      { STATS_OFFSET32(tx_pkts),
436                  4, STATS_FLAGS_FUNC, "tx_pkts"},
437      { STATS_OFFSET32(tx_soft_errors),
438                  4, STATS_FLAGS_FUNC, "tx_soft_errors"},
439      { STATS_OFFSET32(tx_ofld_frames_csum_ip),
440                  4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_ip"},
441      { STATS_OFFSET32(tx_ofld_frames_csum_tcp),
442                  4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_tcp"},
443      { STATS_OFFSET32(tx_ofld_frames_csum_udp),
444                  4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_udp"},
445      { STATS_OFFSET32(tx_ofld_frames_lso),
446                  4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso"},
447      { STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits),
448                  4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso_hdr_splits"},
449      { STATS_OFFSET32(tx_encap_failures),
450                  4, STATS_FLAGS_FUNC, "tx_encap_failures"},
451      { STATS_OFFSET32(tx_hw_queue_full),
452                  4, STATS_FLAGS_FUNC, "tx_hw_queue_full"},
453      { STATS_OFFSET32(tx_hw_max_queue_depth),
454                  4, STATS_FLAGS_FUNC, "tx_hw_max_queue_depth"},
455      { STATS_OFFSET32(tx_dma_mapping_failure),
456                  4, STATS_FLAGS_FUNC, "tx_dma_mapping_failure"},
457      { STATS_OFFSET32(tx_max_drbr_queue_depth),
458                  4, STATS_FLAGS_FUNC, "tx_max_drbr_queue_depth"},
459      { STATS_OFFSET32(tx_window_violation_std),
460                  4, STATS_FLAGS_FUNC, "tx_window_violation_std"},
461      { STATS_OFFSET32(tx_window_violation_tso),
462                  4, STATS_FLAGS_FUNC, "tx_window_violation_tso"},
463      { STATS_OFFSET32(tx_chain_lost_mbuf),
464                  4, STATS_FLAGS_FUNC, "tx_chain_lost_mbuf"},
465      { STATS_OFFSET32(tx_frames_deferred),
466                  4, STATS_FLAGS_FUNC, "tx_frames_deferred"},
467      { STATS_OFFSET32(tx_queue_xoff),
468                  4, STATS_FLAGS_FUNC, "tx_queue_xoff"},
469      { STATS_OFFSET32(mbuf_defrag_attempts),
470                  4, STATS_FLAGS_FUNC, "mbuf_defrag_attempts"},
471      { STATS_OFFSET32(mbuf_defrag_failures),
472                  4, STATS_FLAGS_FUNC, "mbuf_defrag_failures"},
473      { STATS_OFFSET32(mbuf_rx_bd_alloc_failed),
474                  4, STATS_FLAGS_FUNC, "mbuf_rx_bd_alloc_failed"},
475      { STATS_OFFSET32(mbuf_rx_bd_mapping_failed),
476                  4, STATS_FLAGS_FUNC, "mbuf_rx_bd_mapping_failed"},
477      { STATS_OFFSET32(mbuf_rx_tpa_alloc_failed),
478                  4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_alloc_failed"},
479      { STATS_OFFSET32(mbuf_rx_tpa_mapping_failed),
480                  4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_mapping_failed"},
481      { STATS_OFFSET32(mbuf_rx_sge_alloc_failed),
482                  4, STATS_FLAGS_FUNC, "mbuf_rx_sge_alloc_failed"},
483      { STATS_OFFSET32(mbuf_rx_sge_mapping_failed),
484                  4, STATS_FLAGS_FUNC, "mbuf_rx_sge_mapping_failed"},
485      { STATS_OFFSET32(mbuf_alloc_tx),
486                  4, STATS_FLAGS_FUNC, "mbuf_alloc_tx"},
487      { STATS_OFFSET32(mbuf_alloc_rx),
488                  4, STATS_FLAGS_FUNC, "mbuf_alloc_rx"},
489      { STATS_OFFSET32(mbuf_alloc_sge),
490                  4, STATS_FLAGS_FUNC, "mbuf_alloc_sge"},
491      { STATS_OFFSET32(mbuf_alloc_tpa),
492                  4, STATS_FLAGS_FUNC, "mbuf_alloc_tpa"},
493      { STATS_OFFSET32(tx_queue_full_return),
494                  4, STATS_FLAGS_FUNC, "tx_queue_full_return"},
495      { STATS_OFFSET32(bxe_tx_mq_sc_state_failures),
496                  4, STATS_FLAGS_FUNC, "bxe_tx_mq_sc_state_failures"},
497      { STATS_OFFSET32(tx_request_link_down_failures),
498                  4, STATS_FLAGS_FUNC, "tx_request_link_down_failures"},
499      { STATS_OFFSET32(bd_avail_too_less_failures),
500                  4, STATS_FLAGS_FUNC, "bd_avail_too_less_failures"},
501      { STATS_OFFSET32(tx_mq_not_empty),
502                  4, STATS_FLAGS_FUNC, "tx_mq_not_empty"},
503      { STATS_OFFSET32(nsegs_path1_errors),
504                  4, STATS_FLAGS_FUNC, "nsegs_path1_errors"},
505      { STATS_OFFSET32(nsegs_path2_errors),
506                  4, STATS_FLAGS_FUNC, "nsegs_path2_errors"}
507  
508  
509  };
510  
511  static const struct {
512      uint32_t offset;
513      uint32_t size;
514      char string[STAT_NAME_LEN];
515  } bxe_eth_q_stats_arr[] = {
516      { Q_STATS_OFFSET32(total_bytes_received_hi),
517                  8, "rx_bytes" },
518      { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
519                  8, "rx_ucast_packets" },
520      { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
521                  8, "rx_mcast_packets" },
522      { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
523                  8, "rx_bcast_packets" },
524      { Q_STATS_OFFSET32(no_buff_discard_hi),
525                  8, "rx_discards" },
526      { Q_STATS_OFFSET32(total_bytes_transmitted_hi),
527                  8, "tx_bytes" },
528      { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
529                  8, "tx_ucast_packets" },
530      { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
531                  8, "tx_mcast_packets" },
532      { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
533                  8, "tx_bcast_packets" },
534      { Q_STATS_OFFSET32(total_tpa_aggregations_hi),
535                  8, "tpa_aggregations" },
536      { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi),
537                  8, "tpa_aggregated_frames"},
538      { Q_STATS_OFFSET32(total_tpa_bytes_hi),
539                  8, "tpa_bytes"},
540      { Q_STATS_OFFSET32(rx_calls),
541                  4, "rx_calls"},
542      { Q_STATS_OFFSET32(rx_pkts),
543                  4, "rx_pkts"},
544      { Q_STATS_OFFSET32(rx_tpa_pkts),
545                  4, "rx_tpa_pkts"},
546      { Q_STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts),
547                  4, "rx_erroneous_jumbo_sge_pkts"},
548      { Q_STATS_OFFSET32(rx_bxe_service_rxsgl),
549                  4, "rx_bxe_service_rxsgl"},
550      { Q_STATS_OFFSET32(rx_jumbo_sge_pkts),
551                  4, "rx_jumbo_sge_pkts"},
552      { Q_STATS_OFFSET32(rx_soft_errors),
553                  4, "rx_soft_errors"},
554      { Q_STATS_OFFSET32(rx_hw_csum_errors),
555                  4, "rx_hw_csum_errors"},
556      { Q_STATS_OFFSET32(rx_ofld_frames_csum_ip),
557                  4, "rx_ofld_frames_csum_ip"},
558      { Q_STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp),
559                  4, "rx_ofld_frames_csum_tcp_udp"},
560      { Q_STATS_OFFSET32(rx_budget_reached),
561                  4, "rx_budget_reached"},
562      { Q_STATS_OFFSET32(tx_pkts),
563                  4, "tx_pkts"},
564      { Q_STATS_OFFSET32(tx_soft_errors),
565                  4, "tx_soft_errors"},
566      { Q_STATS_OFFSET32(tx_ofld_frames_csum_ip),
567                  4, "tx_ofld_frames_csum_ip"},
568      { Q_STATS_OFFSET32(tx_ofld_frames_csum_tcp),
569                  4, "tx_ofld_frames_csum_tcp"},
570      { Q_STATS_OFFSET32(tx_ofld_frames_csum_udp),
571                  4, "tx_ofld_frames_csum_udp"},
572      { Q_STATS_OFFSET32(tx_ofld_frames_lso),
573                  4, "tx_ofld_frames_lso"},
574      { Q_STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits),
575                  4, "tx_ofld_frames_lso_hdr_splits"},
576      { Q_STATS_OFFSET32(tx_encap_failures),
577                  4, "tx_encap_failures"},
578      { Q_STATS_OFFSET32(tx_hw_queue_full),
579                  4, "tx_hw_queue_full"},
580      { Q_STATS_OFFSET32(tx_hw_max_queue_depth),
581                  4, "tx_hw_max_queue_depth"},
582      { Q_STATS_OFFSET32(tx_dma_mapping_failure),
583                  4, "tx_dma_mapping_failure"},
584      { Q_STATS_OFFSET32(tx_max_drbr_queue_depth),
585                  4, "tx_max_drbr_queue_depth"},
586      { Q_STATS_OFFSET32(tx_window_violation_std),
587                  4, "tx_window_violation_std"},
588      { Q_STATS_OFFSET32(tx_window_violation_tso),
589                  4, "tx_window_violation_tso"},
590      { Q_STATS_OFFSET32(tx_chain_lost_mbuf),
591                  4, "tx_chain_lost_mbuf"},
592      { Q_STATS_OFFSET32(tx_frames_deferred),
593                  4, "tx_frames_deferred"},
594      { Q_STATS_OFFSET32(tx_queue_xoff),
595                  4, "tx_queue_xoff"},
596      { Q_STATS_OFFSET32(mbuf_defrag_attempts),
597                  4, "mbuf_defrag_attempts"},
598      { Q_STATS_OFFSET32(mbuf_defrag_failures),
599                  4, "mbuf_defrag_failures"},
600      { Q_STATS_OFFSET32(mbuf_rx_bd_alloc_failed),
601                  4, "mbuf_rx_bd_alloc_failed"},
602      { Q_STATS_OFFSET32(mbuf_rx_bd_mapping_failed),
603                  4, "mbuf_rx_bd_mapping_failed"},
604      { Q_STATS_OFFSET32(mbuf_rx_tpa_alloc_failed),
605                  4, "mbuf_rx_tpa_alloc_failed"},
606      { Q_STATS_OFFSET32(mbuf_rx_tpa_mapping_failed),
607                  4, "mbuf_rx_tpa_mapping_failed"},
608      { Q_STATS_OFFSET32(mbuf_rx_sge_alloc_failed),
609                  4, "mbuf_rx_sge_alloc_failed"},
610      { Q_STATS_OFFSET32(mbuf_rx_sge_mapping_failed),
611                  4, "mbuf_rx_sge_mapping_failed"},
612      { Q_STATS_OFFSET32(mbuf_alloc_tx),
613                  4, "mbuf_alloc_tx"},
614      { Q_STATS_OFFSET32(mbuf_alloc_rx),
615                  4, "mbuf_alloc_rx"},
616      { Q_STATS_OFFSET32(mbuf_alloc_sge),
617                  4, "mbuf_alloc_sge"},
618      { Q_STATS_OFFSET32(mbuf_alloc_tpa),
619                  4, "mbuf_alloc_tpa"},
620      { Q_STATS_OFFSET32(tx_queue_full_return),
621                  4, "tx_queue_full_return"},
622      { Q_STATS_OFFSET32(bxe_tx_mq_sc_state_failures),
623                  4, "bxe_tx_mq_sc_state_failures"},
624      { Q_STATS_OFFSET32(tx_request_link_down_failures),
625                  4, "tx_request_link_down_failures"},
626      { Q_STATS_OFFSET32(bd_avail_too_less_failures),
627                  4, "bd_avail_too_less_failures"},
628      { Q_STATS_OFFSET32(tx_mq_not_empty),
629                  4, "tx_mq_not_empty"},
630      { Q_STATS_OFFSET32(nsegs_path1_errors),
631                  4, "nsegs_path1_errors"},
632      { Q_STATS_OFFSET32(nsegs_path2_errors),
633                  4, "nsegs_path2_errors"}
634  
635  
636  };
637  
638  #define BXE_NUM_ETH_STATS   ARRAY_SIZE(bxe_eth_stats_arr)
639  #define BXE_NUM_ETH_Q_STATS ARRAY_SIZE(bxe_eth_q_stats_arr)
640  
641  
642  static void    bxe_cmng_fns_init(struct bxe_softc *sc,
643                                   uint8_t          read_cfg,
644                                   uint8_t          cmng_type);
645  static int     bxe_get_cmng_fns_mode(struct bxe_softc *sc);
646  static void    storm_memset_cmng(struct bxe_softc *sc,
647                                   struct cmng_init *cmng,
648                                   uint8_t          port);
649  static void    bxe_set_reset_global(struct bxe_softc *sc);
650  static void    bxe_set_reset_in_progress(struct bxe_softc *sc);
651  static uint8_t bxe_reset_is_done(struct bxe_softc *sc,
652                                   int              engine);
653  static uint8_t bxe_clear_pf_load(struct bxe_softc *sc);
654  static uint8_t bxe_chk_parity_attn(struct bxe_softc *sc,
655                                     uint8_t          *global,
656                                     uint8_t          print);
657  static void    bxe_int_disable(struct bxe_softc *sc);
658  static int     bxe_release_leader_lock(struct bxe_softc *sc);
659  static void    bxe_pf_disable(struct bxe_softc *sc);
660  static void    bxe_free_fp_buffers(struct bxe_softc *sc);
661  static inline void bxe_update_rx_prod(struct bxe_softc    *sc,
662                                        struct bxe_fastpath *fp,
663                                        uint16_t            rx_bd_prod,
664                                        uint16_t            rx_cq_prod,
665                                        uint16_t            rx_sge_prod);
666  static void    bxe_link_report_locked(struct bxe_softc *sc);
667  static void    bxe_link_report(struct bxe_softc *sc);
668  static void    bxe_link_status_update(struct bxe_softc *sc);
669  static void    bxe_periodic_callout_func(void *xsc);
670  static void    bxe_periodic_start(struct bxe_softc *sc);
671  static void    bxe_periodic_stop(struct bxe_softc *sc);
672  static int     bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp,
673                                      uint16_t prev_index,
674                                      uint16_t index);
675  static int     bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp,
676                                       int                 queue);
677  static int     bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp,
678                                       uint16_t            index);
679  static uint8_t bxe_txeof(struct bxe_softc *sc,
680                           struct bxe_fastpath *fp);
681  static void    bxe_task_fp(struct bxe_fastpath *fp);
682  static __noinline void bxe_dump_mbuf(struct bxe_softc *sc,
683                                       struct mbuf      *m,
684                                       uint8_t          contents);
685  static int     bxe_alloc_mem(struct bxe_softc *sc);
686  static void    bxe_free_mem(struct bxe_softc *sc);
687  static int     bxe_alloc_fw_stats_mem(struct bxe_softc *sc);
688  static void    bxe_free_fw_stats_mem(struct bxe_softc *sc);
689  static int     bxe_interrupt_attach(struct bxe_softc *sc);
690  static void    bxe_interrupt_detach(struct bxe_softc *sc);
691  static void    bxe_set_rx_mode(struct bxe_softc *sc);
692  static int     bxe_init_locked(struct bxe_softc *sc);
693  static int     bxe_stop_locked(struct bxe_softc *sc);
694  static void    bxe_sp_err_timeout_task(void *arg, int pending);
695  void           bxe_parity_recover(struct bxe_softc *sc);
696  void           bxe_handle_error(struct bxe_softc *sc);
697  static __noinline int bxe_nic_load(struct bxe_softc *sc,
698                                     int              load_mode);
699  static __noinline int bxe_nic_unload(struct bxe_softc *sc,
700                                       uint32_t         unload_mode,
701                                       uint8_t          keep_link);
702  
703  static void bxe_handle_sp_tq(void *context, int pending);
704  static void bxe_handle_fp_tq(void *context, int pending);
705  
706  static int bxe_add_cdev(struct bxe_softc *sc);
707  static void bxe_del_cdev(struct bxe_softc *sc);
708  int bxe_grc_dump(struct bxe_softc *sc);
709  static int bxe_alloc_buf_rings(struct bxe_softc *sc);
710  static void bxe_free_buf_rings(struct bxe_softc *sc);
711  
712  /* calculate crc32 on a buffer (NOTE: crc32_length MUST be aligned to 8) */
713  uint32_t
calc_crc32(uint8_t * crc32_packet,uint32_t crc32_length,uint32_t crc32_seed,uint8_t complement)714  calc_crc32(uint8_t  *crc32_packet,
715             uint32_t crc32_length,
716             uint32_t crc32_seed,
717             uint8_t  complement)
718  {
719     uint32_t byte         = 0;
720     uint32_t bit          = 0;
721     uint8_t  msb          = 0;
722     uint32_t temp         = 0;
723     uint32_t shft         = 0;
724     uint8_t  current_byte = 0;
725     uint32_t crc32_result = crc32_seed;
726     const uint32_t CRC32_POLY = 0x1edc6f41;
727  
728     if ((crc32_packet == NULL) ||
729         (crc32_length == 0) ||
730         ((crc32_length % 8) != 0))
731      {
732          return (crc32_result);
733      }
734  
735      for (byte = 0; byte < crc32_length; byte = byte + 1)
736      {
737          current_byte = crc32_packet[byte];
738          for (bit = 0; bit < 8; bit = bit + 1)
739          {
740              /* msb = crc32_result[31]; */
741              msb = (uint8_t)(crc32_result >> 31);
742  
743              crc32_result = crc32_result << 1;
744  
745              /* it (msb != current_byte[bit]) */
746              if (msb != (0x1 & (current_byte >> bit)))
747              {
748                  crc32_result = crc32_result ^ CRC32_POLY;
749                  /* crc32_result[0] = 1 */
750                  crc32_result |= 1;
751              }
752          }
753      }
754  
755      /* Last step is to:
756       * 1. "mirror" every bit
757       * 2. swap the 4 bytes
758       * 3. complement each bit
759       */
760  
761      /* Mirror */
762      temp = crc32_result;
763      shft = sizeof(crc32_result) * 8 - 1;
764  
765      for (crc32_result >>= 1; crc32_result; crc32_result >>= 1)
766      {
767          temp <<= 1;
768          temp |= crc32_result & 1;
769          shft-- ;
770      }
771  
772      /* temp[31-bit] = crc32_result[bit] */
773      temp <<= shft;
774  
775      /* Swap */
776      /* crc32_result = {temp[7:0], temp[15:8], temp[23:16], temp[31:24]} */
777      {
778          uint32_t t0, t1, t2, t3;
779          t0 = (0x000000ff & (temp >> 24));
780          t1 = (0x0000ff00 & (temp >> 8));
781          t2 = (0x00ff0000 & (temp << 8));
782          t3 = (0xff000000 & (temp << 24));
783          crc32_result = t0 | t1 | t2 | t3;
784      }
785  
786      /* Complement */
787      if (complement)
788      {
789          crc32_result = ~crc32_result;
790      }
791  
792      return (crc32_result);
793  }
794  
795  int
bxe_test_bit(int nr,volatile unsigned long * addr)796  bxe_test_bit(int                    nr,
797               volatile unsigned long *addr)
798  {
799      return ((atomic_load_acq_long(addr) & (1 << nr)) != 0);
800  }
801  
802  void
bxe_set_bit(unsigned int nr,volatile unsigned long * addr)803  bxe_set_bit(unsigned int           nr,
804              volatile unsigned long *addr)
805  {
806      atomic_set_acq_long(addr, (1 << nr));
807  }
808  
809  void
bxe_clear_bit(int nr,volatile unsigned long * addr)810  bxe_clear_bit(int                    nr,
811                volatile unsigned long *addr)
812  {
813      atomic_clear_acq_long(addr, (1 << nr));
814  }
815  
816  int
bxe_test_and_set_bit(int nr,volatile unsigned long * addr)817  bxe_test_and_set_bit(int                    nr,
818                         volatile unsigned long *addr)
819  {
820      unsigned long x;
821      nr = (1 << nr);
822      do {
823          x = *addr;
824      } while (atomic_cmpset_acq_long(addr, x, x | nr) == 0);
825      // if (x & nr) bit_was_set; else bit_was_not_set;
826      return (x & nr);
827  }
828  
829  int
bxe_test_and_clear_bit(int nr,volatile unsigned long * addr)830  bxe_test_and_clear_bit(int                    nr,
831                         volatile unsigned long *addr)
832  {
833      unsigned long x;
834      nr = (1 << nr);
835      do {
836          x = *addr;
837      } while (atomic_cmpset_acq_long(addr, x, x & ~nr) == 0);
838      // if (x & nr) bit_was_set; else bit_was_not_set;
839      return (x & nr);
840  }
841  
842  int
bxe_cmpxchg(volatile int * addr,int old,int new)843  bxe_cmpxchg(volatile int *addr,
844              int          old,
845              int          new)
846  {
847      int x;
848      do {
849          x = *addr;
850      } while (atomic_cmpset_acq_int(addr, old, new) == 0);
851      return (x);
852  }
853  
854  /*
855   * Get DMA memory from the OS.
856   *
857   * Validates that the OS has provided DMA buffers in response to a
858   * bus_dmamap_load call and saves the physical address of those buffers.
859   * When the callback is used the OS will return 0 for the mapping function
860   * (bus_dmamap_load) so we use the value of map_arg->maxsegs to pass any
861   * failures back to the caller.
862   *
863   * Returns:
864   *   Nothing.
865   */
866  static void
bxe_dma_map_addr(void * arg,bus_dma_segment_t * segs,int nseg,int error)867  bxe_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
868  {
869      struct bxe_dma *dma = arg;
870  
871      if (error) {
872          dma->paddr = 0;
873          dma->nseg  = 0;
874          BLOGE(dma->sc, "Failed DMA alloc '%s' (%d)!\n", dma->msg, error);
875      } else {
876          dma->paddr = segs->ds_addr;
877          dma->nseg  = nseg;
878      }
879  }
880  
881  /*
882   * Allocate a block of memory and map it for DMA. No partial completions
883   * allowed and release any resources acquired if we can't acquire all
884   * resources.
885   *
886   * Returns:
887   *   0 = Success, !0 = Failure
888   */
889  int
bxe_dma_alloc(struct bxe_softc * sc,bus_size_t size,struct bxe_dma * dma,const char * msg)890  bxe_dma_alloc(struct bxe_softc *sc,
891                bus_size_t       size,
892                struct bxe_dma   *dma,
893                const char       *msg)
894  {
895      int rc;
896  
897      if (dma->size > 0) {
898          BLOGE(sc, "dma block '%s' already has size %lu\n", msg,
899                (unsigned long)dma->size);
900          return (1);
901      }
902  
903      memset(dma, 0, sizeof(*dma)); /* sanity */
904      dma->sc   = sc;
905      dma->size = size;
906      snprintf(dma->msg, sizeof(dma->msg), "%s", msg);
907  
908      rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
909                              BCM_PAGE_SIZE,      /* alignment */
910                              0,                  /* boundary limit */
911                              BUS_SPACE_MAXADDR,  /* restricted low */
912                              BUS_SPACE_MAXADDR,  /* restricted hi */
913                              NULL,               /* addr filter() */
914                              NULL,               /* addr filter() arg */
915                              size,               /* max map size */
916                              1,                  /* num discontinuous */
917                              size,               /* max seg size */
918                              BUS_DMA_ALLOCNOW,   /* flags */
919                              NULL,               /* lock() */
920                              NULL,               /* lock() arg */
921                              &dma->tag);         /* returned dma tag */
922      if (rc != 0) {
923          BLOGE(sc, "Failed to create dma tag for '%s' (%d)\n", msg, rc);
924          memset(dma, 0, sizeof(*dma));
925          return (1);
926      }
927  
928      rc = bus_dmamem_alloc(dma->tag,
929                            (void **)&dma->vaddr,
930                            (BUS_DMA_NOWAIT | BUS_DMA_ZERO),
931                            &dma->map);
932      if (rc != 0) {
933          BLOGE(sc, "Failed to alloc dma mem for '%s' (%d)\n", msg, rc);
934          bus_dma_tag_destroy(dma->tag);
935          memset(dma, 0, sizeof(*dma));
936          return (1);
937      }
938  
939      rc = bus_dmamap_load(dma->tag,
940                           dma->map,
941                           dma->vaddr,
942                           size,
943                           bxe_dma_map_addr, /* BLOGD in here */
944                           dma,
945                           BUS_DMA_NOWAIT);
946      if (rc != 0) {
947          BLOGE(sc, "Failed to load dma map for '%s' (%d)\n", msg, rc);
948          bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
949          bus_dma_tag_destroy(dma->tag);
950          memset(dma, 0, sizeof(*dma));
951          return (1);
952      }
953  
954      return (0);
955  }
956  
957  void
bxe_dma_free(struct bxe_softc * sc,struct bxe_dma * dma)958  bxe_dma_free(struct bxe_softc *sc,
959               struct bxe_dma   *dma)
960  {
961      if (dma->size > 0) {
962          DBASSERT(sc, (dma->tag != NULL), ("dma tag is NULL"));
963  
964          bus_dmamap_sync(dma->tag, dma->map,
965                          (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE));
966          bus_dmamap_unload(dma->tag, dma->map);
967          bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
968          bus_dma_tag_destroy(dma->tag);
969      }
970  
971      memset(dma, 0, sizeof(*dma));
972  }
973  
974  /*
975   * These indirect read and write routines are only during init.
976   * The locking is handled by the MCP.
977   */
978  
979  void
bxe_reg_wr_ind(struct bxe_softc * sc,uint32_t addr,uint32_t val)980  bxe_reg_wr_ind(struct bxe_softc *sc,
981                 uint32_t         addr,
982                 uint32_t         val)
983  {
984      pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4);
985      pci_write_config(sc->dev, PCICFG_GRC_DATA, val, 4);
986      pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
987  }
988  
989  uint32_t
bxe_reg_rd_ind(struct bxe_softc * sc,uint32_t addr)990  bxe_reg_rd_ind(struct bxe_softc *sc,
991                 uint32_t         addr)
992  {
993      uint32_t val;
994  
995      pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4);
996      val = pci_read_config(sc->dev, PCICFG_GRC_DATA, 4);
997      pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
998  
999      return (val);
1000  }
1001  
1002  static int
bxe_acquire_hw_lock(struct bxe_softc * sc,uint32_t resource)1003  bxe_acquire_hw_lock(struct bxe_softc *sc,
1004                      uint32_t         resource)
1005  {
1006      uint32_t lock_status;
1007      uint32_t resource_bit = (1 << resource);
1008      int func = SC_FUNC(sc);
1009      uint32_t hw_lock_control_reg;
1010      int cnt;
1011  
1012      /* validate the resource is within range */
1013      if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1014          BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)"
1015              " resource_bit 0x%x\n", resource, resource_bit);
1016          return (-1);
1017      }
1018  
1019      if (func <= 5) {
1020          hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8));
1021      } else {
1022          hw_lock_control_reg =
1023                  (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8));
1024      }
1025  
1026      /* validate the resource is not already taken */
1027      lock_status = REG_RD(sc, hw_lock_control_reg);
1028      if (lock_status & resource_bit) {
1029          BLOGE(sc, "resource (0x%x) in use (status 0x%x bit 0x%x)\n",
1030                resource, lock_status, resource_bit);
1031          return (-1);
1032      }
1033  
1034      /* try every 5ms for 5 seconds */
1035      for (cnt = 0; cnt < 1000; cnt++) {
1036          REG_WR(sc, (hw_lock_control_reg + 4), resource_bit);
1037          lock_status = REG_RD(sc, hw_lock_control_reg);
1038          if (lock_status & resource_bit) {
1039              return (0);
1040          }
1041          DELAY(5000);
1042      }
1043  
1044      BLOGE(sc, "Resource 0x%x resource_bit 0x%x lock timeout!\n",
1045          resource, resource_bit);
1046      return (-1);
1047  }
1048  
1049  static int
bxe_release_hw_lock(struct bxe_softc * sc,uint32_t resource)1050  bxe_release_hw_lock(struct bxe_softc *sc,
1051                      uint32_t         resource)
1052  {
1053      uint32_t lock_status;
1054      uint32_t resource_bit = (1 << resource);
1055      int func = SC_FUNC(sc);
1056      uint32_t hw_lock_control_reg;
1057  
1058      /* validate the resource is within range */
1059      if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1060          BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)"
1061              " resource_bit 0x%x\n", resource, resource_bit);
1062          return (-1);
1063      }
1064  
1065      if (func <= 5) {
1066          hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8));
1067      } else {
1068          hw_lock_control_reg =
1069                  (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8));
1070      }
1071  
1072      /* validate the resource is currently taken */
1073      lock_status = REG_RD(sc, hw_lock_control_reg);
1074      if (!(lock_status & resource_bit)) {
1075          BLOGE(sc, "resource (0x%x) not in use (status 0x%x bit 0x%x)\n",
1076                resource, lock_status, resource_bit);
1077          return (-1);
1078      }
1079  
1080      REG_WR(sc, hw_lock_control_reg, resource_bit);
1081      return (0);
1082  }
bxe_acquire_phy_lock(struct bxe_softc * sc)1083  static void bxe_acquire_phy_lock(struct bxe_softc *sc)
1084  {
1085  	BXE_PHY_LOCK(sc);
1086  	bxe_acquire_hw_lock(sc,HW_LOCK_RESOURCE_MDIO);
1087  }
1088  
bxe_release_phy_lock(struct bxe_softc * sc)1089  static void bxe_release_phy_lock(struct bxe_softc *sc)
1090  {
1091  	bxe_release_hw_lock(sc,HW_LOCK_RESOURCE_MDIO);
1092  	BXE_PHY_UNLOCK(sc);
1093  }
1094  /*
1095   * Per pf misc lock must be acquired before the per port mcp lock. Otherwise,
1096   * had we done things the other way around, if two pfs from the same port
1097   * would attempt to access nvram at the same time, we could run into a
1098   * scenario such as:
1099   * pf A takes the port lock.
1100   * pf B succeeds in taking the same lock since they are from the same port.
1101   * pf A takes the per pf misc lock. Performs eeprom access.
1102   * pf A finishes. Unlocks the per pf misc lock.
1103   * Pf B takes the lock and proceeds to perform it's own access.
1104   * pf A unlocks the per port lock, while pf B is still working (!).
1105   * mcp takes the per port lock and corrupts pf B's access (and/or has it's own
1106   * access corrupted by pf B).*
1107   */
1108  static int
bxe_acquire_nvram_lock(struct bxe_softc * sc)1109  bxe_acquire_nvram_lock(struct bxe_softc *sc)
1110  {
1111      int port = SC_PORT(sc);
1112      int count, i;
1113      uint32_t val = 0;
1114  
1115      /* acquire HW lock: protect against other PFs in PF Direct Assignment */
1116      bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM);
1117  
1118      /* adjust timeout for emulation/FPGA */
1119      count = NVRAM_TIMEOUT_COUNT;
1120      if (CHIP_REV_IS_SLOW(sc)) {
1121          count *= 100;
1122      }
1123  
1124      /* request access to nvram interface */
1125      REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
1126             (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
1127  
1128      for (i = 0; i < count*10; i++) {
1129          val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB);
1130          if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
1131              break;
1132          }
1133  
1134          DELAY(5);
1135      }
1136  
1137      if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
1138          BLOGE(sc, "Cannot get access to nvram interface "
1139              "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n",
1140              port, val);
1141          return (-1);
1142      }
1143  
1144      return (0);
1145  }
1146  
1147  static int
bxe_release_nvram_lock(struct bxe_softc * sc)1148  bxe_release_nvram_lock(struct bxe_softc *sc)
1149  {
1150      int port = SC_PORT(sc);
1151      int count, i;
1152      uint32_t val = 0;
1153  
1154      /* adjust timeout for emulation/FPGA */
1155      count = NVRAM_TIMEOUT_COUNT;
1156      if (CHIP_REV_IS_SLOW(sc)) {
1157          count *= 100;
1158      }
1159  
1160      /* relinquish nvram interface */
1161      REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
1162             (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
1163  
1164      for (i = 0; i < count*10; i++) {
1165          val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB);
1166          if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
1167              break;
1168          }
1169  
1170          DELAY(5);
1171      }
1172  
1173      if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
1174          BLOGE(sc, "Cannot free access to nvram interface "
1175              "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n",
1176              port, val);
1177          return (-1);
1178      }
1179  
1180      /* release HW lock: protect against other PFs in PF Direct Assignment */
1181      bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM);
1182  
1183      return (0);
1184  }
1185  
1186  static void
bxe_enable_nvram_access(struct bxe_softc * sc)1187  bxe_enable_nvram_access(struct bxe_softc *sc)
1188  {
1189      uint32_t val;
1190  
1191      val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
1192  
1193      /* enable both bits, even on read */
1194      REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
1195             (val | MCPR_NVM_ACCESS_ENABLE_EN | MCPR_NVM_ACCESS_ENABLE_WR_EN));
1196  }
1197  
1198  static void
bxe_disable_nvram_access(struct bxe_softc * sc)1199  bxe_disable_nvram_access(struct bxe_softc *sc)
1200  {
1201      uint32_t val;
1202  
1203      val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
1204  
1205      /* disable both bits, even after read */
1206      REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
1207             (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
1208                      MCPR_NVM_ACCESS_ENABLE_WR_EN)));
1209  }
1210  
1211  static int
bxe_nvram_read_dword(struct bxe_softc * sc,uint32_t offset,uint32_t * ret_val,uint32_t cmd_flags)1212  bxe_nvram_read_dword(struct bxe_softc *sc,
1213                       uint32_t         offset,
1214                       uint32_t         *ret_val,
1215                       uint32_t         cmd_flags)
1216  {
1217      int count, i, rc;
1218      uint32_t val;
1219  
1220      /* build the command word */
1221      cmd_flags |= MCPR_NVM_COMMAND_DOIT;
1222  
1223      /* need to clear DONE bit separately */
1224      REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
1225  
1226      /* address of the NVRAM to read from */
1227      REG_WR(sc, MCP_REG_MCPR_NVM_ADDR,
1228             (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
1229  
1230      /* issue a read command */
1231      REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
1232  
1233      /* adjust timeout for emulation/FPGA */
1234      count = NVRAM_TIMEOUT_COUNT;
1235      if (CHIP_REV_IS_SLOW(sc)) {
1236          count *= 100;
1237      }
1238  
1239      /* wait for completion */
1240      *ret_val = 0;
1241      rc = -1;
1242      for (i = 0; i < count; i++) {
1243          DELAY(5);
1244          val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND);
1245  
1246          if (val & MCPR_NVM_COMMAND_DONE) {
1247              val = REG_RD(sc, MCP_REG_MCPR_NVM_READ);
1248              /* we read nvram data in cpu order
1249               * but ethtool sees it as an array of bytes
1250               * converting to big-endian will do the work
1251               */
1252              *ret_val = htobe32(val);
1253              rc = 0;
1254              break;
1255          }
1256      }
1257  
1258      if (rc == -1) {
1259          BLOGE(sc, "nvram read timeout expired "
1260              "(offset 0x%x cmd_flags 0x%x val 0x%x)\n",
1261              offset, cmd_flags, val);
1262      }
1263  
1264      return (rc);
1265  }
1266  
1267  static int
bxe_nvram_read(struct bxe_softc * sc,uint32_t offset,uint8_t * ret_buf,int buf_size)1268  bxe_nvram_read(struct bxe_softc *sc,
1269                 uint32_t         offset,
1270                 uint8_t          *ret_buf,
1271                 int              buf_size)
1272  {
1273      uint32_t cmd_flags;
1274      uint32_t val;
1275      int rc;
1276  
1277      if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
1278          BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n",
1279                offset, buf_size);
1280          return (-1);
1281      }
1282  
1283      if ((offset + buf_size) > sc->devinfo.flash_size) {
1284          BLOGE(sc, "Invalid parameter, "
1285                    "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1286                offset, buf_size, sc->devinfo.flash_size);
1287          return (-1);
1288      }
1289  
1290      /* request access to nvram interface */
1291      rc = bxe_acquire_nvram_lock(sc);
1292      if (rc) {
1293          return (rc);
1294      }
1295  
1296      /* enable access to nvram interface */
1297      bxe_enable_nvram_access(sc);
1298  
1299      /* read the first word(s) */
1300      cmd_flags = MCPR_NVM_COMMAND_FIRST;
1301      while ((buf_size > sizeof(uint32_t)) && (rc == 0)) {
1302          rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags);
1303          memcpy(ret_buf, &val, 4);
1304  
1305          /* advance to the next dword */
1306          offset += sizeof(uint32_t);
1307          ret_buf += sizeof(uint32_t);
1308          buf_size -= sizeof(uint32_t);
1309          cmd_flags = 0;
1310      }
1311  
1312      if (rc == 0) {
1313          cmd_flags |= MCPR_NVM_COMMAND_LAST;
1314          rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags);
1315          memcpy(ret_buf, &val, 4);
1316      }
1317  
1318      /* disable access to nvram interface */
1319      bxe_disable_nvram_access(sc);
1320      bxe_release_nvram_lock(sc);
1321  
1322      return (rc);
1323  }
1324  
1325  static int
bxe_nvram_write_dword(struct bxe_softc * sc,uint32_t offset,uint32_t val,uint32_t cmd_flags)1326  bxe_nvram_write_dword(struct bxe_softc *sc,
1327                        uint32_t         offset,
1328                        uint32_t         val,
1329                        uint32_t         cmd_flags)
1330  {
1331      int count, i, rc;
1332  
1333      /* build the command word */
1334      cmd_flags |= (MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR);
1335  
1336      /* need to clear DONE bit separately */
1337      REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
1338  
1339      /* write the data */
1340      REG_WR(sc, MCP_REG_MCPR_NVM_WRITE, val);
1341  
1342      /* address of the NVRAM to write to */
1343      REG_WR(sc, MCP_REG_MCPR_NVM_ADDR,
1344             (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
1345  
1346      /* issue the write command */
1347      REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
1348  
1349      /* adjust timeout for emulation/FPGA */
1350      count = NVRAM_TIMEOUT_COUNT;
1351      if (CHIP_REV_IS_SLOW(sc)) {
1352          count *= 100;
1353      }
1354  
1355      /* wait for completion */
1356      rc = -1;
1357      for (i = 0; i < count; i++) {
1358          DELAY(5);
1359          val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND);
1360          if (val & MCPR_NVM_COMMAND_DONE) {
1361              rc = 0;
1362              break;
1363          }
1364      }
1365  
1366      if (rc == -1) {
1367          BLOGE(sc, "nvram write timeout expired "
1368              "(offset 0x%x cmd_flags 0x%x val 0x%x)\n",
1369              offset, cmd_flags, val);
1370      }
1371  
1372      return (rc);
1373  }
1374  
1375  #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
1376  
1377  static int
bxe_nvram_write1(struct bxe_softc * sc,uint32_t offset,uint8_t * data_buf,int buf_size)1378  bxe_nvram_write1(struct bxe_softc *sc,
1379                   uint32_t         offset,
1380                   uint8_t          *data_buf,
1381                   int              buf_size)
1382  {
1383      uint32_t cmd_flags;
1384      uint32_t align_offset;
1385      uint32_t val;
1386      int rc;
1387  
1388      if ((offset + buf_size) > sc->devinfo.flash_size) {
1389          BLOGE(sc, "Invalid parameter, "
1390                    "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1391                offset, buf_size, sc->devinfo.flash_size);
1392          return (-1);
1393      }
1394  
1395      /* request access to nvram interface */
1396      rc = bxe_acquire_nvram_lock(sc);
1397      if (rc) {
1398          return (rc);
1399      }
1400  
1401      /* enable access to nvram interface */
1402      bxe_enable_nvram_access(sc);
1403  
1404      cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
1405      align_offset = (offset & ~0x03);
1406      rc = bxe_nvram_read_dword(sc, align_offset, &val, cmd_flags);
1407  
1408      if (rc == 0) {
1409          val &= ~(0xff << BYTE_OFFSET(offset));
1410          val |= (*data_buf << BYTE_OFFSET(offset));
1411  
1412          /* nvram data is returned as an array of bytes
1413           * convert it back to cpu order
1414           */
1415          val = be32toh(val);
1416  
1417          rc = bxe_nvram_write_dword(sc, align_offset, val, cmd_flags);
1418      }
1419  
1420      /* disable access to nvram interface */
1421      bxe_disable_nvram_access(sc);
1422      bxe_release_nvram_lock(sc);
1423  
1424      return (rc);
1425  }
1426  
1427  static int
bxe_nvram_write(struct bxe_softc * sc,uint32_t offset,uint8_t * data_buf,int buf_size)1428  bxe_nvram_write(struct bxe_softc *sc,
1429                  uint32_t         offset,
1430                  uint8_t          *data_buf,
1431                  int              buf_size)
1432  {
1433      uint32_t cmd_flags;
1434      uint32_t val;
1435      uint32_t written_so_far;
1436      int rc;
1437  
1438      if (buf_size == 1) {
1439          return (bxe_nvram_write1(sc, offset, data_buf, buf_size));
1440      }
1441  
1442      if ((offset & 0x03) || (buf_size & 0x03) /* || (buf_size == 0) */) {
1443          BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n",
1444                offset, buf_size);
1445          return (-1);
1446      }
1447  
1448      if (buf_size == 0) {
1449          return (0); /* nothing to do */
1450      }
1451  
1452      if ((offset + buf_size) > sc->devinfo.flash_size) {
1453          BLOGE(sc, "Invalid parameter, "
1454                    "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1455                offset, buf_size, sc->devinfo.flash_size);
1456          return (-1);
1457      }
1458  
1459      /* request access to nvram interface */
1460      rc = bxe_acquire_nvram_lock(sc);
1461      if (rc) {
1462          return (rc);
1463      }
1464  
1465      /* enable access to nvram interface */
1466      bxe_enable_nvram_access(sc);
1467  
1468      written_so_far = 0;
1469      cmd_flags = MCPR_NVM_COMMAND_FIRST;
1470      while ((written_so_far < buf_size) && (rc == 0)) {
1471          if (written_so_far == (buf_size - sizeof(uint32_t))) {
1472              cmd_flags |= MCPR_NVM_COMMAND_LAST;
1473          } else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0) {
1474              cmd_flags |= MCPR_NVM_COMMAND_LAST;
1475          } else if ((offset % NVRAM_PAGE_SIZE) == 0) {
1476              cmd_flags |= MCPR_NVM_COMMAND_FIRST;
1477          }
1478  
1479          memcpy(&val, data_buf, 4);
1480  
1481          rc = bxe_nvram_write_dword(sc, offset, val, cmd_flags);
1482  
1483          /* advance to the next dword */
1484          offset += sizeof(uint32_t);
1485          data_buf += sizeof(uint32_t);
1486          written_so_far += sizeof(uint32_t);
1487          cmd_flags = 0;
1488      }
1489  
1490      /* disable access to nvram interface */
1491      bxe_disable_nvram_access(sc);
1492      bxe_release_nvram_lock(sc);
1493  
1494      return (rc);
1495  }
1496  
1497  /* copy command into DMAE command memory and set DMAE command Go */
1498  void
bxe_post_dmae(struct bxe_softc * sc,struct dmae_cmd * dmae,int idx)1499  bxe_post_dmae(struct bxe_softc    *sc,
1500                struct dmae_cmd *dmae,
1501                int                 idx)
1502  {
1503      uint32_t cmd_offset;
1504      int i;
1505  
1506      cmd_offset = (DMAE_REG_CMD_MEM + (sizeof(struct dmae_cmd) * idx));
1507      for (i = 0; i < ((sizeof(struct dmae_cmd) / 4)); i++) {
1508          REG_WR(sc, (cmd_offset + (i * 4)), *(((uint32_t *)dmae) + i));
1509      }
1510  
1511      REG_WR(sc, dmae_reg_go_c[idx], 1);
1512  }
1513  
1514  uint32_t
bxe_dmae_opcode_add_comp(uint32_t opcode,uint8_t comp_type)1515  bxe_dmae_opcode_add_comp(uint32_t opcode,
1516                           uint8_t  comp_type)
1517  {
1518      return (opcode | ((comp_type << DMAE_CMD_C_DST_SHIFT) |
1519                        DMAE_CMD_C_TYPE_ENABLE));
1520  }
1521  
1522  uint32_t
bxe_dmae_opcode_clr_src_reset(uint32_t opcode)1523  bxe_dmae_opcode_clr_src_reset(uint32_t opcode)
1524  {
1525      return (opcode & ~DMAE_CMD_SRC_RESET);
1526  }
1527  
1528  uint32_t
bxe_dmae_opcode(struct bxe_softc * sc,uint8_t src_type,uint8_t dst_type,uint8_t with_comp,uint8_t comp_type)1529  bxe_dmae_opcode(struct bxe_softc *sc,
1530                  uint8_t          src_type,
1531                  uint8_t          dst_type,
1532                  uint8_t          with_comp,
1533                  uint8_t          comp_type)
1534  {
1535      uint32_t opcode = 0;
1536  
1537      opcode |= ((src_type << DMAE_CMD_SRC_SHIFT) |
1538                 (dst_type << DMAE_CMD_DST_SHIFT));
1539  
1540      opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
1541  
1542      opcode |= (SC_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
1543  
1544      opcode |= ((SC_VN(sc) << DMAE_CMD_E1HVN_SHIFT) |
1545                 (SC_VN(sc) << DMAE_CMD_DST_VN_SHIFT));
1546  
1547      opcode |= (DMAE_COM_SET_ERR << DMAE_CMD_ERR_POLICY_SHIFT);
1548  
1549  #ifdef __BIG_ENDIAN
1550      opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
1551  #else
1552      opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
1553  #endif
1554  
1555      if (with_comp) {
1556          opcode = bxe_dmae_opcode_add_comp(opcode, comp_type);
1557      }
1558  
1559      return (opcode);
1560  }
1561  
1562  static void
bxe_prep_dmae_with_comp(struct bxe_softc * sc,struct dmae_cmd * dmae,uint8_t src_type,uint8_t dst_type)1563  bxe_prep_dmae_with_comp(struct bxe_softc    *sc,
1564                          struct dmae_cmd *dmae,
1565                          uint8_t             src_type,
1566                          uint8_t             dst_type)
1567  {
1568      memset(dmae, 0, sizeof(struct dmae_cmd));
1569  
1570      /* set the opcode */
1571      dmae->opcode = bxe_dmae_opcode(sc, src_type, dst_type,
1572                                     TRUE, DMAE_COMP_PCI);
1573  
1574      /* fill in the completion parameters */
1575      dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_comp));
1576      dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_comp));
1577      dmae->comp_val     = DMAE_COMP_VAL;
1578  }
1579  
1580  /* issue a DMAE command over the init channel and wait for completion */
1581  static int
bxe_issue_dmae_with_comp(struct bxe_softc * sc,struct dmae_cmd * dmae)1582  bxe_issue_dmae_with_comp(struct bxe_softc    *sc,
1583                           struct dmae_cmd *dmae)
1584  {
1585      uint32_t *wb_comp = BXE_SP(sc, wb_comp);
1586      int timeout = CHIP_REV_IS_SLOW(sc) ? 400000 : 4000;
1587  
1588      BXE_DMAE_LOCK(sc);
1589  
1590      /* reset completion */
1591      *wb_comp = 0;
1592  
1593      /* post the command on the channel used for initializations */
1594      bxe_post_dmae(sc, dmae, INIT_DMAE_C(sc));
1595  
1596      /* wait for completion */
1597      DELAY(5);
1598  
1599      while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
1600          if (!timeout ||
1601              (sc->recovery_state != BXE_RECOVERY_DONE &&
1602               sc->recovery_state != BXE_RECOVERY_NIC_LOADING)) {
1603              BLOGE(sc, "DMAE timeout! *wb_comp 0x%x recovery_state 0x%x\n",
1604                  *wb_comp, sc->recovery_state);
1605              BXE_DMAE_UNLOCK(sc);
1606              return (DMAE_TIMEOUT);
1607          }
1608  
1609          timeout--;
1610          DELAY(50);
1611      }
1612  
1613      if (*wb_comp & DMAE_PCI_ERR_FLAG) {
1614          BLOGE(sc, "DMAE PCI error! *wb_comp 0x%x recovery_state 0x%x\n",
1615                  *wb_comp, sc->recovery_state);
1616          BXE_DMAE_UNLOCK(sc);
1617          return (DMAE_PCI_ERROR);
1618      }
1619  
1620      BXE_DMAE_UNLOCK(sc);
1621      return (0);
1622  }
1623  
1624  void
bxe_read_dmae(struct bxe_softc * sc,uint32_t src_addr,uint32_t len32)1625  bxe_read_dmae(struct bxe_softc *sc,
1626                uint32_t         src_addr,
1627                uint32_t         len32)
1628  {
1629      struct dmae_cmd dmae;
1630      uint32_t *data;
1631      int i, rc;
1632  
1633      DBASSERT(sc, (len32 <= 4), ("DMAE read length is %d", len32));
1634  
1635      if (!sc->dmae_ready) {
1636          data = BXE_SP(sc, wb_data[0]);
1637  
1638          for (i = 0; i < len32; i++) {
1639              data[i] = (CHIP_IS_E1(sc)) ?
1640                            bxe_reg_rd_ind(sc, (src_addr + (i * 4))) :
1641                            REG_RD(sc, (src_addr + (i * 4)));
1642          }
1643  
1644          return;
1645      }
1646  
1647      /* set opcode and fixed command fields */
1648      bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
1649  
1650      /* fill in addresses and len */
1651      dmae.src_addr_lo = (src_addr >> 2); /* GRC addr has dword resolution */
1652      dmae.src_addr_hi = 0;
1653      dmae.dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_data));
1654      dmae.dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_data));
1655      dmae.len         = len32;
1656  
1657      /* issue the command and wait for completion */
1658      if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) {
1659          bxe_panic(sc, ("DMAE failed (%d)\n", rc));
1660      }
1661  }
1662  
1663  void
bxe_write_dmae(struct bxe_softc * sc,bus_addr_t dma_addr,uint32_t dst_addr,uint32_t len32)1664  bxe_write_dmae(struct bxe_softc *sc,
1665                 bus_addr_t       dma_addr,
1666                 uint32_t         dst_addr,
1667                 uint32_t         len32)
1668  {
1669      struct dmae_cmd dmae;
1670      int rc;
1671  
1672      if (!sc->dmae_ready) {
1673          DBASSERT(sc, (len32 <= 4), ("DMAE not ready and length is %d", len32));
1674  
1675          if (CHIP_IS_E1(sc)) {
1676              ecore_init_ind_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32);
1677          } else {
1678              ecore_init_str_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32);
1679          }
1680  
1681          return;
1682      }
1683  
1684      /* set opcode and fixed command fields */
1685      bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
1686  
1687      /* fill in addresses and len */
1688      dmae.src_addr_lo = U64_LO(dma_addr);
1689      dmae.src_addr_hi = U64_HI(dma_addr);
1690      dmae.dst_addr_lo = (dst_addr >> 2); /* GRC addr has dword resolution */
1691      dmae.dst_addr_hi = 0;
1692      dmae.len         = len32;
1693  
1694      /* issue the command and wait for completion */
1695      if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) {
1696          bxe_panic(sc, ("DMAE failed (%d)\n", rc));
1697      }
1698  }
1699  
1700  void
bxe_write_dmae_phys_len(struct bxe_softc * sc,bus_addr_t phys_addr,uint32_t addr,uint32_t len)1701  bxe_write_dmae_phys_len(struct bxe_softc *sc,
1702                          bus_addr_t       phys_addr,
1703                          uint32_t         addr,
1704                          uint32_t         len)
1705  {
1706      int dmae_wr_max = DMAE_LEN32_WR_MAX(sc);
1707      int offset = 0;
1708  
1709      while (len > dmae_wr_max) {
1710          bxe_write_dmae(sc,
1711                         (phys_addr + offset), /* src DMA address */
1712                         (addr + offset),      /* dst GRC address */
1713                         dmae_wr_max);
1714          offset += (dmae_wr_max * 4);
1715          len -= dmae_wr_max;
1716      }
1717  
1718      bxe_write_dmae(sc,
1719                     (phys_addr + offset), /* src DMA address */
1720                     (addr + offset),      /* dst GRC address */
1721                     len);
1722  }
1723  
1724  void
bxe_set_ctx_validation(struct bxe_softc * sc,struct eth_context * cxt,uint32_t cid)1725  bxe_set_ctx_validation(struct bxe_softc   *sc,
1726                         struct eth_context *cxt,
1727                         uint32_t           cid)
1728  {
1729      /* ustorm cxt validation */
1730      cxt->ustorm_ag_context.cdu_usage =
1731          CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid),
1732              CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
1733      /* xcontext validation */
1734      cxt->xstorm_ag_context.cdu_reserved =
1735          CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid),
1736              CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
1737  }
1738  
1739  static void
bxe_storm_memset_hc_timeout(struct bxe_softc * sc,uint8_t port,uint8_t fw_sb_id,uint8_t sb_index,uint8_t ticks)1740  bxe_storm_memset_hc_timeout(struct bxe_softc *sc,
1741                              uint8_t          port,
1742                              uint8_t          fw_sb_id,
1743                              uint8_t          sb_index,
1744                              uint8_t          ticks)
1745  {
1746      uint32_t addr =
1747          (BAR_CSTRORM_INTMEM +
1748           CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index));
1749  
1750      REG_WR8(sc, addr, ticks);
1751  
1752      BLOGD(sc, DBG_LOAD,
1753            "port %d fw_sb_id %d sb_index %d ticks %d\n",
1754            port, fw_sb_id, sb_index, ticks);
1755  }
1756  
1757  static void
bxe_storm_memset_hc_disable(struct bxe_softc * sc,uint8_t port,uint16_t fw_sb_id,uint8_t sb_index,uint8_t disable)1758  bxe_storm_memset_hc_disable(struct bxe_softc *sc,
1759                              uint8_t          port,
1760                              uint16_t         fw_sb_id,
1761                              uint8_t          sb_index,
1762                              uint8_t          disable)
1763  {
1764      uint32_t enable_flag =
1765          (disable) ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
1766      uint32_t addr =
1767          (BAR_CSTRORM_INTMEM +
1768           CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index));
1769      uint8_t flags;
1770  
1771      /* clear and set */
1772      flags = REG_RD8(sc, addr);
1773      flags &= ~HC_INDEX_DATA_HC_ENABLED;
1774      flags |= enable_flag;
1775      REG_WR8(sc, addr, flags);
1776  
1777      BLOGD(sc, DBG_LOAD,
1778            "port %d fw_sb_id %d sb_index %d disable %d\n",
1779            port, fw_sb_id, sb_index, disable);
1780  }
1781  
1782  void
bxe_update_coalesce_sb_index(struct bxe_softc * sc,uint8_t fw_sb_id,uint8_t sb_index,uint8_t disable,uint16_t usec)1783  bxe_update_coalesce_sb_index(struct bxe_softc *sc,
1784                               uint8_t          fw_sb_id,
1785                               uint8_t          sb_index,
1786                               uint8_t          disable,
1787                               uint16_t         usec)
1788  {
1789      int port = SC_PORT(sc);
1790      uint8_t ticks = (usec / 4); /* XXX ??? */
1791  
1792      bxe_storm_memset_hc_timeout(sc, port, fw_sb_id, sb_index, ticks);
1793  
1794      disable = (disable) ? 1 : ((usec) ? 0 : 1);
1795      bxe_storm_memset_hc_disable(sc, port, fw_sb_id, sb_index, disable);
1796  }
1797  
1798  void
elink_cb_udelay(struct bxe_softc * sc,uint32_t usecs)1799  elink_cb_udelay(struct bxe_softc *sc,
1800                  uint32_t         usecs)
1801  {
1802      DELAY(usecs);
1803  }
1804  
1805  uint32_t
elink_cb_reg_read(struct bxe_softc * sc,uint32_t reg_addr)1806  elink_cb_reg_read(struct bxe_softc *sc,
1807                    uint32_t         reg_addr)
1808  {
1809      return (REG_RD(sc, reg_addr));
1810  }
1811  
1812  void
elink_cb_reg_write(struct bxe_softc * sc,uint32_t reg_addr,uint32_t val)1813  elink_cb_reg_write(struct bxe_softc *sc,
1814                     uint32_t         reg_addr,
1815                     uint32_t         val)
1816  {
1817      REG_WR(sc, reg_addr, val);
1818  }
1819  
1820  void
elink_cb_reg_wb_write(struct bxe_softc * sc,uint32_t offset,uint32_t * wb_write,uint16_t len)1821  elink_cb_reg_wb_write(struct bxe_softc *sc,
1822                        uint32_t         offset,
1823                        uint32_t         *wb_write,
1824                        uint16_t         len)
1825  {
1826      REG_WR_DMAE(sc, offset, wb_write, len);
1827  }
1828  
1829  void
elink_cb_reg_wb_read(struct bxe_softc * sc,uint32_t offset,uint32_t * wb_write,uint16_t len)1830  elink_cb_reg_wb_read(struct bxe_softc *sc,
1831                       uint32_t         offset,
1832                       uint32_t         *wb_write,
1833                       uint16_t         len)
1834  {
1835      REG_RD_DMAE(sc, offset, wb_write, len);
1836  }
1837  
1838  uint8_t
elink_cb_path_id(struct bxe_softc * sc)1839  elink_cb_path_id(struct bxe_softc *sc)
1840  {
1841      return (SC_PATH(sc));
1842  }
1843  
1844  void
elink_cb_event_log(struct bxe_softc * sc,const elink_log_id_t elink_log_id,...)1845  elink_cb_event_log(struct bxe_softc     *sc,
1846                     const elink_log_id_t elink_log_id,
1847                     ...)
1848  {
1849      /* XXX */
1850      BLOGI(sc, "ELINK EVENT LOG (%d)\n", elink_log_id);
1851  }
1852  
1853  static int
bxe_set_spio(struct bxe_softc * sc,int spio,uint32_t mode)1854  bxe_set_spio(struct bxe_softc *sc,
1855               int              spio,
1856               uint32_t         mode)
1857  {
1858      uint32_t spio_reg;
1859  
1860      /* Only 2 SPIOs are configurable */
1861      if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
1862          BLOGE(sc, "Invalid SPIO 0x%x mode 0x%x\n", spio, mode);
1863          return (-1);
1864      }
1865  
1866      bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
1867  
1868      /* read SPIO and mask except the float bits */
1869      spio_reg = (REG_RD(sc, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
1870  
1871      switch (mode) {
1872      case MISC_SPIO_OUTPUT_LOW:
1873          BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output low\n", spio);
1874          /* clear FLOAT and set CLR */
1875          spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
1876          spio_reg |=  (spio << MISC_SPIO_CLR_POS);
1877          break;
1878  
1879      case MISC_SPIO_OUTPUT_HIGH:
1880          BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output high\n", spio);
1881          /* clear FLOAT and set SET */
1882          spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
1883          spio_reg |=  (spio << MISC_SPIO_SET_POS);
1884          break;
1885  
1886      case MISC_SPIO_INPUT_HI_Z:
1887          BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> input\n", spio);
1888          /* set FLOAT */
1889          spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
1890          break;
1891  
1892      default:
1893          break;
1894      }
1895  
1896      REG_WR(sc, MISC_REG_SPIO, spio_reg);
1897      bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
1898  
1899      return (0);
1900  }
1901  
1902  static int
bxe_gpio_read(struct bxe_softc * sc,int gpio_num,uint8_t port)1903  bxe_gpio_read(struct bxe_softc *sc,
1904                int              gpio_num,
1905                uint8_t          port)
1906  {
1907      /* The GPIO should be swapped if swap register is set and active */
1908      int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
1909                        REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
1910      int gpio_shift = (gpio_num +
1911                        (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
1912      uint32_t gpio_mask = (1 << gpio_shift);
1913      uint32_t gpio_reg;
1914  
1915      if (gpio_num > MISC_REGISTERS_GPIO_3) {
1916          BLOGE(sc, "Invalid GPIO %d port 0x%x gpio_port %d gpio_shift %d"
1917              " gpio_mask 0x%x\n", gpio_num, port, gpio_port, gpio_shift,
1918              gpio_mask);
1919          return (-1);
1920      }
1921  
1922      /* read GPIO value */
1923      gpio_reg = REG_RD(sc, MISC_REG_GPIO);
1924  
1925      /* get the requested pin value */
1926      return ((gpio_reg & gpio_mask) == gpio_mask) ? 1 : 0;
1927  }
1928  
1929  static int
bxe_gpio_write(struct bxe_softc * sc,int gpio_num,uint32_t mode,uint8_t port)1930  bxe_gpio_write(struct bxe_softc *sc,
1931                 int              gpio_num,
1932                 uint32_t         mode,
1933                 uint8_t          port)
1934  {
1935      /* The GPIO should be swapped if swap register is set and active */
1936      int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
1937                        REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
1938      int gpio_shift = (gpio_num +
1939                        (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
1940      uint32_t gpio_mask = (1 << gpio_shift);
1941      uint32_t gpio_reg;
1942  
1943      if (gpio_num > MISC_REGISTERS_GPIO_3) {
1944          BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d"
1945              " gpio_shift %d gpio_mask 0x%x\n",
1946              gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask);
1947          return (-1);
1948      }
1949  
1950      bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
1951  
1952      /* read GPIO and mask except the float bits */
1953      gpio_reg = (REG_RD(sc, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1954  
1955      switch (mode) {
1956      case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1957          BLOGD(sc, DBG_PHY,
1958                "Set GPIO %d (shift %d) -> output low\n",
1959                gpio_num, gpio_shift);
1960          /* clear FLOAT and set CLR */
1961          gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1962          gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1963          break;
1964  
1965      case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1966          BLOGD(sc, DBG_PHY,
1967                "Set GPIO %d (shift %d) -> output high\n",
1968                gpio_num, gpio_shift);
1969          /* clear FLOAT and set SET */
1970          gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1971          gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1972          break;
1973  
1974      case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1975          BLOGD(sc, DBG_PHY,
1976                "Set GPIO %d (shift %d) -> input\n",
1977                gpio_num, gpio_shift);
1978          /* set FLOAT */
1979          gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1980          break;
1981  
1982      default:
1983          break;
1984      }
1985  
1986      REG_WR(sc, MISC_REG_GPIO, gpio_reg);
1987      bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
1988  
1989      return (0);
1990  }
1991  
1992  static int
bxe_gpio_mult_write(struct bxe_softc * sc,uint8_t pins,uint32_t mode)1993  bxe_gpio_mult_write(struct bxe_softc *sc,
1994                      uint8_t          pins,
1995                      uint32_t         mode)
1996  {
1997      uint32_t gpio_reg;
1998  
1999      /* any port swapping should be handled by caller */
2000  
2001      bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2002  
2003      /* read GPIO and mask except the float bits */
2004      gpio_reg = REG_RD(sc, MISC_REG_GPIO);
2005      gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2006      gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
2007      gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
2008  
2009      switch (mode) {
2010      case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2011          BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output low\n", pins);
2012          /* set CLR */
2013          gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
2014          break;
2015  
2016      case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2017          BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output high\n", pins);
2018          /* set SET */
2019          gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
2020          break;
2021  
2022      case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2023          BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> input\n", pins);
2024          /* set FLOAT */
2025          gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2026          break;
2027  
2028      default:
2029          BLOGE(sc, "Invalid GPIO mode assignment pins 0x%x mode 0x%x"
2030              " gpio_reg 0x%x\n", pins, mode, gpio_reg);
2031          bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2032          return (-1);
2033      }
2034  
2035      REG_WR(sc, MISC_REG_GPIO, gpio_reg);
2036      bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2037  
2038      return (0);
2039  }
2040  
2041  static int
bxe_gpio_int_write(struct bxe_softc * sc,int gpio_num,uint32_t mode,uint8_t port)2042  bxe_gpio_int_write(struct bxe_softc *sc,
2043                     int              gpio_num,
2044                     uint32_t         mode,
2045                     uint8_t          port)
2046  {
2047      /* The GPIO should be swapped if swap register is set and active */
2048      int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
2049                        REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
2050      int gpio_shift = (gpio_num +
2051                        (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
2052      uint32_t gpio_mask = (1 << gpio_shift);
2053      uint32_t gpio_reg;
2054  
2055      if (gpio_num > MISC_REGISTERS_GPIO_3) {
2056          BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d"
2057              " gpio_shift %d gpio_mask 0x%x\n",
2058              gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask);
2059          return (-1);
2060      }
2061  
2062      bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2063  
2064      /* read GPIO int */
2065      gpio_reg = REG_RD(sc, MISC_REG_GPIO_INT);
2066  
2067      switch (mode) {
2068      case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2069          BLOGD(sc, DBG_PHY,
2070                "Clear GPIO INT %d (shift %d) -> output low\n",
2071                gpio_num, gpio_shift);
2072          /* clear SET and set CLR */
2073          gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2074          gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2075          break;
2076  
2077      case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2078          BLOGD(sc, DBG_PHY,
2079                "Set GPIO INT %d (shift %d) -> output high\n",
2080                gpio_num, gpio_shift);
2081          /* clear CLR and set SET */
2082          gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2083          gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2084          break;
2085  
2086      default:
2087          break;
2088      }
2089  
2090      REG_WR(sc, MISC_REG_GPIO_INT, gpio_reg);
2091      bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2092  
2093      return (0);
2094  }
2095  
2096  uint32_t
elink_cb_gpio_read(struct bxe_softc * sc,uint16_t gpio_num,uint8_t port)2097  elink_cb_gpio_read(struct bxe_softc *sc,
2098                     uint16_t         gpio_num,
2099                     uint8_t          port)
2100  {
2101      return (bxe_gpio_read(sc, gpio_num, port));
2102  }
2103  
2104  uint8_t
elink_cb_gpio_write(struct bxe_softc * sc,uint16_t gpio_num,uint8_t mode,uint8_t port)2105  elink_cb_gpio_write(struct bxe_softc *sc,
2106                      uint16_t         gpio_num,
2107                      uint8_t          mode, /* 0=low 1=high */
2108                      uint8_t          port)
2109  {
2110      return (bxe_gpio_write(sc, gpio_num, mode, port));
2111  }
2112  
2113  uint8_t
elink_cb_gpio_mult_write(struct bxe_softc * sc,uint8_t pins,uint8_t mode)2114  elink_cb_gpio_mult_write(struct bxe_softc *sc,
2115                           uint8_t          pins,
2116                           uint8_t          mode) /* 0=low 1=high */
2117  {
2118      return (bxe_gpio_mult_write(sc, pins, mode));
2119  }
2120  
2121  uint8_t
elink_cb_gpio_int_write(struct bxe_softc * sc,uint16_t gpio_num,uint8_t mode,uint8_t port)2122  elink_cb_gpio_int_write(struct bxe_softc *sc,
2123                          uint16_t         gpio_num,
2124                          uint8_t          mode, /* 0=low 1=high */
2125                          uint8_t          port)
2126  {
2127      return (bxe_gpio_int_write(sc, gpio_num, mode, port));
2128  }
2129  
2130  void
elink_cb_notify_link_changed(struct bxe_softc * sc)2131  elink_cb_notify_link_changed(struct bxe_softc *sc)
2132  {
2133      REG_WR(sc, (MISC_REG_AEU_GENERAL_ATTN_12 +
2134                  (SC_FUNC(sc) * sizeof(uint32_t))), 1);
2135  }
2136  
2137  /* send the MCP a request, block until there is a reply */
2138  uint32_t
elink_cb_fw_command(struct bxe_softc * sc,uint32_t command,uint32_t param)2139  elink_cb_fw_command(struct bxe_softc *sc,
2140                      uint32_t         command,
2141                      uint32_t         param)
2142  {
2143      int mb_idx = SC_FW_MB_IDX(sc);
2144      uint32_t seq;
2145      uint32_t rc = 0;
2146      uint32_t cnt = 1;
2147      uint8_t delay = CHIP_REV_IS_SLOW(sc) ? 100 : 10;
2148  
2149      BXE_FWMB_LOCK(sc);
2150  
2151      seq = ++sc->fw_seq;
2152      SHMEM_WR(sc, func_mb[mb_idx].drv_mb_param, param);
2153      SHMEM_WR(sc, func_mb[mb_idx].drv_mb_header, (command | seq));
2154  
2155      BLOGD(sc, DBG_PHY,
2156            "wrote command 0x%08x to FW MB param 0x%08x\n",
2157            (command | seq), param);
2158  
2159      /* Let the FW do it's magic. GIve it up to 5 seconds... */
2160      do {
2161          DELAY(delay * 1000);
2162          rc = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_header);
2163      } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2164  
2165      BLOGD(sc, DBG_PHY,
2166            "[after %d ms] read 0x%x seq 0x%x from FW MB\n",
2167            cnt*delay, rc, seq);
2168  
2169      /* is this a reply to our command? */
2170      if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
2171          rc &= FW_MSG_CODE_MASK;
2172      } else {
2173          /* Ruh-roh! */
2174          BLOGE(sc, "FW failed to respond!\n");
2175          // XXX bxe_fw_dump(sc);
2176          rc = 0;
2177      }
2178  
2179      BXE_FWMB_UNLOCK(sc);
2180      return (rc);
2181  }
2182  
2183  static uint32_t
bxe_fw_command(struct bxe_softc * sc,uint32_t command,uint32_t param)2184  bxe_fw_command(struct bxe_softc *sc,
2185                 uint32_t         command,
2186                 uint32_t         param)
2187  {
2188      return (elink_cb_fw_command(sc, command, param));
2189  }
2190  
2191  static void
__storm_memset_dma_mapping(struct bxe_softc * sc,uint32_t addr,bus_addr_t mapping)2192  __storm_memset_dma_mapping(struct bxe_softc *sc,
2193                             uint32_t         addr,
2194                             bus_addr_t       mapping)
2195  {
2196      REG_WR(sc, addr, U64_LO(mapping));
2197      REG_WR(sc, (addr + 4), U64_HI(mapping));
2198  }
2199  
2200  static void
storm_memset_spq_addr(struct bxe_softc * sc,bus_addr_t mapping,uint16_t abs_fid)2201  storm_memset_spq_addr(struct bxe_softc *sc,
2202                        bus_addr_t       mapping,
2203                        uint16_t         abs_fid)
2204  {
2205      uint32_t addr = (XSEM_REG_FAST_MEMORY +
2206                       XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid));
2207      __storm_memset_dma_mapping(sc, addr, mapping);
2208  }
2209  
2210  static void
storm_memset_vf_to_pf(struct bxe_softc * sc,uint16_t abs_fid,uint16_t pf_id)2211  storm_memset_vf_to_pf(struct bxe_softc *sc,
2212                        uint16_t         abs_fid,
2213                        uint16_t         pf_id)
2214  {
2215      REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2216      REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2217      REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2218      REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2219  }
2220  
2221  static void
storm_memset_func_en(struct bxe_softc * sc,uint16_t abs_fid,uint8_t enable)2222  storm_memset_func_en(struct bxe_softc *sc,
2223                       uint16_t         abs_fid,
2224                       uint8_t          enable)
2225  {
2226      REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2227      REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2228      REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2229      REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2230  }
2231  
2232  static void
storm_memset_eq_data(struct bxe_softc * sc,struct event_ring_data * eq_data,uint16_t pfid)2233  storm_memset_eq_data(struct bxe_softc       *sc,
2234                       struct event_ring_data *eq_data,
2235                       uint16_t               pfid)
2236  {
2237      uint32_t addr;
2238      size_t size;
2239  
2240      addr = (BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid));
2241      size = sizeof(struct event_ring_data);
2242      ecore_storm_memset_struct(sc, addr, size, (uint32_t *)eq_data);
2243  }
2244  
2245  static void
storm_memset_eq_prod(struct bxe_softc * sc,uint16_t eq_prod,uint16_t pfid)2246  storm_memset_eq_prod(struct bxe_softc *sc,
2247                       uint16_t         eq_prod,
2248                       uint16_t         pfid)
2249  {
2250      uint32_t addr = (BAR_CSTRORM_INTMEM +
2251                       CSTORM_EVENT_RING_PROD_OFFSET(pfid));
2252      REG_WR16(sc, addr, eq_prod);
2253  }
2254  
2255  /*
2256   * Post a slowpath command.
2257   *
2258   * A slowpath command is used to propagate a configuration change through
2259   * the controller in a controlled manner, allowing each STORM processor and
2260   * other H/W blocks to phase in the change.  The commands sent on the
2261   * slowpath are referred to as ramrods.  Depending on the ramrod used the
2262   * completion of the ramrod will occur in different ways.  Here's a
2263   * breakdown of ramrods and how they complete:
2264   *
2265   * RAMROD_CMD_ID_ETH_PORT_SETUP
2266   *   Used to setup the leading connection on a port.  Completes on the
2267   *   Receive Completion Queue (RCQ) of that port (typically fp[0]).
2268   *
2269   * RAMROD_CMD_ID_ETH_CLIENT_SETUP
2270   *   Used to setup an additional connection on a port.  Completes on the
2271   *   RCQ of the multi-queue/RSS connection being initialized.
2272   *
2273   * RAMROD_CMD_ID_ETH_STAT_QUERY
2274   *   Used to force the storm processors to update the statistics database
2275   *   in host memory.  This ramrod is send on the leading connection CID and
2276   *   completes as an index increment of the CSTORM on the default status
2277   *   block.
2278   *
2279   * RAMROD_CMD_ID_ETH_UPDATE
2280   *   Used to update the state of the leading connection, usually to udpate
2281   *   the RSS indirection table.  Completes on the RCQ of the leading
2282   *   connection. (Not currently used under FreeBSD until OS support becomes
2283   *   available.)
2284   *
2285   * RAMROD_CMD_ID_ETH_HALT
2286   *   Used when tearing down a connection prior to driver unload.  Completes
2287   *   on the RCQ of the multi-queue/RSS connection being torn down.  Don't
2288   *   use this on the leading connection.
2289   *
2290   * RAMROD_CMD_ID_ETH_SET_MAC
2291   *   Sets the Unicast/Broadcast/Multicast used by the port.  Completes on
2292   *   the RCQ of the leading connection.
2293   *
2294   * RAMROD_CMD_ID_ETH_CFC_DEL
2295   *   Used when tearing down a conneciton prior to driver unload.  Completes
2296   *   on the RCQ of the leading connection (since the current connection
2297   *   has been completely removed from controller memory).
2298   *
2299   * RAMROD_CMD_ID_ETH_PORT_DEL
2300   *   Used to tear down the leading connection prior to driver unload,
2301   *   typically fp[0].  Completes as an index increment of the CSTORM on the
2302   *   default status block.
2303   *
2304   * RAMROD_CMD_ID_ETH_FORWARD_SETUP
2305   *   Used for connection offload.  Completes on the RCQ of the multi-queue
2306   *   RSS connection that is being offloaded.  (Not currently used under
2307   *   FreeBSD.)
2308   *
2309   * There can only be one command pending per function.
2310   *
2311   * Returns:
2312   *   0 = Success, !0 = Failure.
2313   */
2314  
2315  /* must be called under the spq lock */
2316  static inline
bxe_sp_get_next(struct bxe_softc * sc)2317  struct eth_spe *bxe_sp_get_next(struct bxe_softc *sc)
2318  {
2319      struct eth_spe *next_spe = sc->spq_prod_bd;
2320  
2321      if (sc->spq_prod_bd == sc->spq_last_bd) {
2322          /* wrap back to the first eth_spq */
2323          sc->spq_prod_bd = sc->spq;
2324          sc->spq_prod_idx = 0;
2325      } else {
2326          sc->spq_prod_bd++;
2327          sc->spq_prod_idx++;
2328      }
2329  
2330      return (next_spe);
2331  }
2332  
2333  /* must be called under the spq lock */
2334  static inline
bxe_sp_prod_update(struct bxe_softc * sc)2335  void bxe_sp_prod_update(struct bxe_softc *sc)
2336  {
2337      int func = SC_FUNC(sc);
2338  
2339      /*
2340       * Make sure that BD data is updated before writing the producer.
2341       * BD data is written to the memory, the producer is read from the
2342       * memory, thus we need a full memory barrier to ensure the ordering.
2343       */
2344      mb();
2345  
2346      REG_WR16(sc, (BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func)),
2347               sc->spq_prod_idx);
2348  
2349      bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
2350                        BUS_SPACE_BARRIER_WRITE);
2351  }
2352  
2353  /**
2354   * bxe_is_contextless_ramrod - check if the current command ends on EQ
2355   *
2356   * @cmd:      command to check
2357   * @cmd_type: command type
2358   */
2359  static inline
bxe_is_contextless_ramrod(int cmd,int cmd_type)2360  int bxe_is_contextless_ramrod(int cmd,
2361                                int cmd_type)
2362  {
2363      if ((cmd_type == NONE_CONNECTION_TYPE) ||
2364          (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
2365          (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
2366          (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
2367          (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
2368          (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
2369          (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) {
2370          return (TRUE);
2371      } else {
2372          return (FALSE);
2373      }
2374  }
2375  
2376  /**
2377   * bxe_sp_post - place a single command on an SP ring
2378   *
2379   * @sc:         driver handle
2380   * @command:    command to place (e.g. SETUP, FILTER_RULES, etc.)
2381   * @cid:        SW CID the command is related to
2382   * @data_hi:    command private data address (high 32 bits)
2383   * @data_lo:    command private data address (low 32 bits)
2384   * @cmd_type:   command type (e.g. NONE, ETH)
2385   *
2386   * SP data is handled as if it's always an address pair, thus data fields are
2387   * not swapped to little endian in upper functions. Instead this function swaps
2388   * data as if it's two uint32 fields.
2389   */
2390  int
bxe_sp_post(struct bxe_softc * sc,int command,int cid,uint32_t data_hi,uint32_t data_lo,int cmd_type)2391  bxe_sp_post(struct bxe_softc *sc,
2392              int              command,
2393              int              cid,
2394              uint32_t         data_hi,
2395              uint32_t         data_lo,
2396              int              cmd_type)
2397  {
2398      struct eth_spe *spe;
2399      uint16_t type;
2400      int common;
2401  
2402      common = bxe_is_contextless_ramrod(command, cmd_type);
2403  
2404      BXE_SP_LOCK(sc);
2405  
2406      if (common) {
2407          if (!atomic_load_acq_long(&sc->eq_spq_left)) {
2408              BLOGE(sc, "EQ ring is full!\n");
2409              BXE_SP_UNLOCK(sc);
2410              return (-1);
2411          }
2412      } else {
2413          if (!atomic_load_acq_long(&sc->cq_spq_left)) {
2414              BLOGE(sc, "SPQ ring is full!\n");
2415              BXE_SP_UNLOCK(sc);
2416              return (-1);
2417          }
2418      }
2419  
2420      spe = bxe_sp_get_next(sc);
2421  
2422      /* CID needs port number to be encoded int it */
2423      spe->hdr.conn_and_cmd_data =
2424          htole32((command << SPE_HDR_T_CMD_ID_SHIFT) | HW_CID(sc, cid));
2425  
2426      type = (cmd_type << SPE_HDR_T_CONN_TYPE_SHIFT) & SPE_HDR_T_CONN_TYPE;
2427  
2428      /* TBD: Check if it works for VFs */
2429      type |= ((SC_FUNC(sc) << SPE_HDR_T_FUNCTION_ID_SHIFT) &
2430               SPE_HDR_T_FUNCTION_ID);
2431  
2432      spe->hdr.type = htole16(type);
2433  
2434      spe->data.update_data_addr.hi = htole32(data_hi);
2435      spe->data.update_data_addr.lo = htole32(data_lo);
2436  
2437      /*
2438       * It's ok if the actual decrement is issued towards the memory
2439       * somewhere between the lock and unlock. Thus no more explict
2440       * memory barrier is needed.
2441       */
2442      if (common) {
2443          atomic_subtract_acq_long(&sc->eq_spq_left, 1);
2444      } else {
2445          atomic_subtract_acq_long(&sc->cq_spq_left, 1);
2446      }
2447  
2448      BLOGD(sc, DBG_SP, "SPQE -> %#jx\n", (uintmax_t)sc->spq_dma.paddr);
2449      BLOGD(sc, DBG_SP, "FUNC_RDATA -> %p / %#jx\n",
2450            BXE_SP(sc, func_rdata), (uintmax_t)BXE_SP_MAPPING(sc, func_rdata));
2451      BLOGD(sc, DBG_SP,
2452            "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%lx,%lx)\n",
2453            sc->spq_prod_idx,
2454            (uint32_t)U64_HI(sc->spq_dma.paddr),
2455            (uint32_t)(U64_LO(sc->spq_dma.paddr) + (uint8_t *)sc->spq_prod_bd - (uint8_t *)sc->spq),
2456            command,
2457            common,
2458            HW_CID(sc, cid),
2459            data_hi,
2460            data_lo,
2461            type,
2462            atomic_load_acq_long(&sc->cq_spq_left),
2463            atomic_load_acq_long(&sc->eq_spq_left));
2464  
2465      bxe_sp_prod_update(sc);
2466  
2467      BXE_SP_UNLOCK(sc);
2468      return (0);
2469  }
2470  
2471  /**
2472   * bxe_debug_print_ind_table - prints the indirection table configuration.
2473   *
2474   * @sc: driver hanlde
2475   * @p:  pointer to rss configuration
2476   */
2477  
2478  /*
2479   * FreeBSD Device probe function.
2480   *
2481   * Compares the device found to the driver's list of supported devices and
2482   * reports back to the bsd loader whether this is the right driver for the device.
2483   * This is the driver entry function called from the "kldload" command.
2484   *
2485   * Returns:
2486   *   BUS_PROBE_DEFAULT on success, positive value on failure.
2487   */
2488  static int
bxe_probe(device_t dev)2489  bxe_probe(device_t dev)
2490  {
2491      struct bxe_device_type *t;
2492      uint16_t did, sdid, svid, vid;
2493  
2494      /* Find our device structure */
2495      t = bxe_devs;
2496  
2497      /* Get the data for the device to be probed. */
2498      vid  = pci_get_vendor(dev);
2499      did  = pci_get_device(dev);
2500      svid = pci_get_subvendor(dev);
2501      sdid = pci_get_subdevice(dev);
2502  
2503      /* Look through the list of known devices for a match. */
2504      while (t->bxe_name != NULL) {
2505          if ((vid == t->bxe_vid) && (did == t->bxe_did) &&
2506              ((svid == t->bxe_svid) || (t->bxe_svid == PCI_ANY_ID)) &&
2507              ((sdid == t->bxe_sdid) || (t->bxe_sdid == PCI_ANY_ID))) {
2508              device_set_descf(dev,
2509                       "%s (%c%d) BXE v:%s", t->bxe_name,
2510                       (((pci_read_config(dev, PCIR_REVID, 4) &
2511                          0xf0) >> 4) + 'A'),
2512                       (pci_read_config(dev, PCIR_REVID, 4) & 0xf),
2513                       BXE_DRIVER_VERSION);
2514              return (BUS_PROBE_DEFAULT);
2515          }
2516          t++;
2517      }
2518  
2519      return (ENXIO);
2520  }
2521  
2522  static void
bxe_init_mutexes(struct bxe_softc * sc)2523  bxe_init_mutexes(struct bxe_softc *sc)
2524  {
2525  #ifdef BXE_CORE_LOCK_SX
2526      snprintf(sc->core_sx_name, sizeof(sc->core_sx_name),
2527               "bxe%d_core_lock", sc->unit);
2528      sx_init(&sc->core_sx, sc->core_sx_name);
2529  #else
2530      snprintf(sc->core_mtx_name, sizeof(sc->core_mtx_name),
2531               "bxe%d_core_lock", sc->unit);
2532      mtx_init(&sc->core_mtx, sc->core_mtx_name, NULL, MTX_DEF);
2533  #endif
2534  
2535      snprintf(sc->sp_mtx_name, sizeof(sc->sp_mtx_name),
2536               "bxe%d_sp_lock", sc->unit);
2537      mtx_init(&sc->sp_mtx, sc->sp_mtx_name, NULL, MTX_DEF);
2538  
2539      snprintf(sc->dmae_mtx_name, sizeof(sc->dmae_mtx_name),
2540               "bxe%d_dmae_lock", sc->unit);
2541      mtx_init(&sc->dmae_mtx, sc->dmae_mtx_name, NULL, MTX_DEF);
2542  
2543      snprintf(sc->port.phy_mtx_name, sizeof(sc->port.phy_mtx_name),
2544               "bxe%d_phy_lock", sc->unit);
2545      mtx_init(&sc->port.phy_mtx, sc->port.phy_mtx_name, NULL, MTX_DEF);
2546  
2547      snprintf(sc->fwmb_mtx_name, sizeof(sc->fwmb_mtx_name),
2548               "bxe%d_fwmb_lock", sc->unit);
2549      mtx_init(&sc->fwmb_mtx, sc->fwmb_mtx_name, NULL, MTX_DEF);
2550  
2551      snprintf(sc->print_mtx_name, sizeof(sc->print_mtx_name),
2552               "bxe%d_print_lock", sc->unit);
2553      mtx_init(&(sc->print_mtx), sc->print_mtx_name, NULL, MTX_DEF);
2554  
2555      snprintf(sc->stats_mtx_name, sizeof(sc->stats_mtx_name),
2556               "bxe%d_stats_lock", sc->unit);
2557      mtx_init(&(sc->stats_mtx), sc->stats_mtx_name, NULL, MTX_DEF);
2558  
2559      snprintf(sc->mcast_mtx_name, sizeof(sc->mcast_mtx_name),
2560               "bxe%d_mcast_lock", sc->unit);
2561      mtx_init(&(sc->mcast_mtx), sc->mcast_mtx_name, NULL, MTX_DEF);
2562  }
2563  
2564  static void
bxe_release_mutexes(struct bxe_softc * sc)2565  bxe_release_mutexes(struct bxe_softc *sc)
2566  {
2567  #ifdef BXE_CORE_LOCK_SX
2568      sx_destroy(&sc->core_sx);
2569  #else
2570      if (mtx_initialized(&sc->core_mtx)) {
2571          mtx_destroy(&sc->core_mtx);
2572      }
2573  #endif
2574  
2575      if (mtx_initialized(&sc->sp_mtx)) {
2576          mtx_destroy(&sc->sp_mtx);
2577      }
2578  
2579      if (mtx_initialized(&sc->dmae_mtx)) {
2580          mtx_destroy(&sc->dmae_mtx);
2581      }
2582  
2583      if (mtx_initialized(&sc->port.phy_mtx)) {
2584          mtx_destroy(&sc->port.phy_mtx);
2585      }
2586  
2587      if (mtx_initialized(&sc->fwmb_mtx)) {
2588          mtx_destroy(&sc->fwmb_mtx);
2589      }
2590  
2591      if (mtx_initialized(&sc->print_mtx)) {
2592          mtx_destroy(&sc->print_mtx);
2593      }
2594  
2595      if (mtx_initialized(&sc->stats_mtx)) {
2596          mtx_destroy(&sc->stats_mtx);
2597      }
2598  
2599      if (mtx_initialized(&sc->mcast_mtx)) {
2600          mtx_destroy(&sc->mcast_mtx);
2601      }
2602  }
2603  
2604  static void
bxe_tx_disable(struct bxe_softc * sc)2605  bxe_tx_disable(struct bxe_softc* sc)
2606  {
2607      if_t ifp = sc->ifp;
2608  
2609      /* tell the stack the driver is stopped and TX queue is full */
2610      if (ifp !=  NULL) {
2611          if_setdrvflags(ifp, 0);
2612      }
2613  }
2614  
2615  static void
bxe_drv_pulse(struct bxe_softc * sc)2616  bxe_drv_pulse(struct bxe_softc *sc)
2617  {
2618      SHMEM_WR(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb,
2619               sc->fw_drv_pulse_wr_seq);
2620  }
2621  
2622  static inline uint16_t
bxe_tx_avail(struct bxe_softc * sc,struct bxe_fastpath * fp)2623  bxe_tx_avail(struct bxe_softc *sc,
2624               struct bxe_fastpath *fp)
2625  {
2626      int16_t  used;
2627      uint16_t prod;
2628      uint16_t cons;
2629  
2630      prod = fp->tx_bd_prod;
2631      cons = fp->tx_bd_cons;
2632  
2633      used = SUB_S16(prod, cons);
2634  
2635      return (int16_t)(sc->tx_ring_size) - used;
2636  }
2637  
2638  static inline int
bxe_tx_queue_has_work(struct bxe_fastpath * fp)2639  bxe_tx_queue_has_work(struct bxe_fastpath *fp)
2640  {
2641      uint16_t hw_cons;
2642  
2643      mb(); /* status block fields can change */
2644      hw_cons = le16toh(*fp->tx_cons_sb);
2645      return (hw_cons != fp->tx_pkt_cons);
2646  }
2647  
2648  static inline uint8_t
bxe_has_tx_work(struct bxe_fastpath * fp)2649  bxe_has_tx_work(struct bxe_fastpath *fp)
2650  {
2651      /* expand this for multi-cos if ever supported */
2652      return (bxe_tx_queue_has_work(fp)) ? TRUE : FALSE;
2653  }
2654  
2655  static inline int
bxe_has_rx_work(struct bxe_fastpath * fp)2656  bxe_has_rx_work(struct bxe_fastpath *fp)
2657  {
2658      uint16_t rx_cq_cons_sb;
2659  
2660      mb(); /* status block fields can change */
2661      rx_cq_cons_sb = le16toh(*fp->rx_cq_cons_sb);
2662      if ((rx_cq_cons_sb & RCQ_MAX) == RCQ_MAX)
2663          rx_cq_cons_sb++;
2664      return (fp->rx_cq_cons != rx_cq_cons_sb);
2665  }
2666  
2667  static void
bxe_sp_event(struct bxe_softc * sc,struct bxe_fastpath * fp,union eth_rx_cqe * rr_cqe)2668  bxe_sp_event(struct bxe_softc    *sc,
2669               struct bxe_fastpath *fp,
2670               union eth_rx_cqe    *rr_cqe)
2671  {
2672      int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
2673      int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
2674      enum ecore_queue_cmd drv_cmd = ECORE_Q_CMD_MAX;
2675      struct ecore_queue_sp_obj *q_obj = &BXE_SP_OBJ(sc, fp).q_obj;
2676  
2677      BLOGD(sc, DBG_SP, "fp=%d cid=%d got ramrod #%d state is %x type is %d\n",
2678            fp->index, cid, command, sc->state, rr_cqe->ramrod_cqe.ramrod_type);
2679  
2680      switch (command) {
2681      case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
2682          BLOGD(sc, DBG_SP, "got UPDATE ramrod. CID %d\n", cid);
2683          drv_cmd = ECORE_Q_CMD_UPDATE;
2684          break;
2685  
2686      case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
2687          BLOGD(sc, DBG_SP, "got MULTI[%d] setup ramrod\n", cid);
2688          drv_cmd = ECORE_Q_CMD_SETUP;
2689          break;
2690  
2691      case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
2692          BLOGD(sc, DBG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
2693          drv_cmd = ECORE_Q_CMD_SETUP_TX_ONLY;
2694          break;
2695  
2696      case (RAMROD_CMD_ID_ETH_HALT):
2697          BLOGD(sc, DBG_SP, "got MULTI[%d] halt ramrod\n", cid);
2698          drv_cmd = ECORE_Q_CMD_HALT;
2699          break;
2700  
2701      case (RAMROD_CMD_ID_ETH_TERMINATE):
2702          BLOGD(sc, DBG_SP, "got MULTI[%d] teminate ramrod\n", cid);
2703          drv_cmd = ECORE_Q_CMD_TERMINATE;
2704          break;
2705  
2706      case (RAMROD_CMD_ID_ETH_EMPTY):
2707          BLOGD(sc, DBG_SP, "got MULTI[%d] empty ramrod\n", cid);
2708          drv_cmd = ECORE_Q_CMD_EMPTY;
2709          break;
2710  
2711      default:
2712          BLOGD(sc, DBG_SP, "ERROR: unexpected MC reply (%d) on fp[%d]\n",
2713                command, fp->index);
2714          return;
2715      }
2716  
2717      if ((drv_cmd != ECORE_Q_CMD_MAX) &&
2718          q_obj->complete_cmd(sc, q_obj, drv_cmd)) {
2719          /*
2720           * q_obj->complete_cmd() failure means that this was
2721           * an unexpected completion.
2722           *
2723           * In this case we don't want to increase the sc->spq_left
2724           * because apparently we haven't sent this command the first
2725           * place.
2726           */
2727          // bxe_panic(sc, ("Unexpected SP completion\n"));
2728          return;
2729      }
2730  
2731      atomic_add_acq_long(&sc->cq_spq_left, 1);
2732  
2733      BLOGD(sc, DBG_SP, "sc->cq_spq_left 0x%lx\n",
2734            atomic_load_acq_long(&sc->cq_spq_left));
2735  }
2736  
2737  /*
2738   * The current mbuf is part of an aggregation. Move the mbuf into the TPA
2739   * aggregation queue, put an empty mbuf back onto the receive chain, and mark
2740   * the current aggregation queue as in-progress.
2741   */
2742  static void
bxe_tpa_start(struct bxe_softc * sc,struct bxe_fastpath * fp,uint16_t queue,uint16_t cons,uint16_t prod,struct eth_fast_path_rx_cqe * cqe)2743  bxe_tpa_start(struct bxe_softc            *sc,
2744                struct bxe_fastpath         *fp,
2745                uint16_t                    queue,
2746                uint16_t                    cons,
2747                uint16_t                    prod,
2748                struct eth_fast_path_rx_cqe *cqe)
2749  {
2750      struct bxe_sw_rx_bd tmp_bd;
2751      struct bxe_sw_rx_bd *rx_buf;
2752      struct eth_rx_bd *rx_bd;
2753      int max_agg_queues __diagused;
2754      struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue];
2755      uint16_t index;
2756  
2757      BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA START "
2758                         "cons=%d prod=%d\n",
2759            fp->index, queue, cons, prod);
2760  
2761      max_agg_queues = MAX_AGG_QS(sc);
2762  
2763      KASSERT((queue < max_agg_queues),
2764              ("fp[%02d] invalid aggr queue (%d >= %d)!",
2765               fp->index, queue, max_agg_queues));
2766  
2767      KASSERT((tpa_info->state == BXE_TPA_STATE_STOP),
2768              ("fp[%02d].tpa[%02d] starting aggr on queue not stopped!",
2769               fp->index, queue));
2770  
2771      /* copy the existing mbuf and mapping from the TPA pool */
2772      tmp_bd = tpa_info->bd;
2773  
2774      if (tmp_bd.m == NULL) {
2775          uint32_t *tmp;
2776  
2777          tmp = (uint32_t *)cqe;
2778  
2779          BLOGE(sc, "fp[%02d].tpa[%02d] cons[%d] prod[%d]mbuf not allocated!\n",
2780                fp->index, queue, cons, prod);
2781          BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n",
2782              *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7));
2783  
2784          /* XXX Error handling? */
2785          return;
2786      }
2787  
2788      /* change the TPA queue to the start state */
2789      tpa_info->state            = BXE_TPA_STATE_START;
2790      tpa_info->placement_offset = cqe->placement_offset;
2791      tpa_info->parsing_flags    = le16toh(cqe->pars_flags.flags);
2792      tpa_info->vlan_tag         = le16toh(cqe->vlan_tag);
2793      tpa_info->len_on_bd        = le16toh(cqe->len_on_bd);
2794  
2795      fp->rx_tpa_queue_used |= (1 << queue);
2796  
2797      /*
2798       * If all the buffer descriptors are filled with mbufs then fill in
2799       * the current consumer index with a new BD. Else if a maximum Rx
2800       * buffer limit is imposed then fill in the next producer index.
2801       */
2802      index = (sc->max_rx_bufs != RX_BD_USABLE) ?
2803                  prod : cons;
2804  
2805      /* move the received mbuf and mapping to TPA pool */
2806      tpa_info->bd = fp->rx_mbuf_chain[cons];
2807  
2808      /* release any existing RX BD mbuf mappings */
2809      if (cons != index) {
2810          rx_buf = &fp->rx_mbuf_chain[cons];
2811  
2812          if (rx_buf->m_map != NULL) {
2813              bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
2814                              BUS_DMASYNC_POSTREAD);
2815              bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
2816          }
2817  
2818          /*
2819           * We get here when the maximum number of rx buffers is less than
2820           * RX_BD_USABLE. The mbuf is already saved above so it's OK to NULL
2821           * it out here without concern of a memory leak.
2822           */
2823          fp->rx_mbuf_chain[cons].m = NULL;
2824      }
2825  
2826      /* update the Rx SW BD with the mbuf info from the TPA pool */
2827      fp->rx_mbuf_chain[index] = tmp_bd;
2828  
2829      /* update the Rx BD with the empty mbuf phys address from the TPA pool */
2830      rx_bd = &fp->rx_chain[index];
2831      rx_bd->addr_hi = htole32(U64_HI(tpa_info->seg.ds_addr));
2832      rx_bd->addr_lo = htole32(U64_LO(tpa_info->seg.ds_addr));
2833  }
2834  
2835  /*
2836   * When a TPA aggregation is completed, loop through the individual mbufs
2837   * of the aggregation, combining them into a single mbuf which will be sent
2838   * up the stack. Refill all freed SGEs with mbufs as we go along.
2839   */
2840  static int
bxe_fill_frag_mbuf(struct bxe_softc * sc,struct bxe_fastpath * fp,struct bxe_sw_tpa_info * tpa_info,uint16_t queue,uint16_t pages,struct mbuf * m,struct eth_end_agg_rx_cqe * cqe,uint16_t cqe_idx)2841  bxe_fill_frag_mbuf(struct bxe_softc          *sc,
2842                     struct bxe_fastpath       *fp,
2843                     struct bxe_sw_tpa_info    *tpa_info,
2844                     uint16_t                  queue,
2845                     uint16_t                  pages,
2846                     struct mbuf               *m,
2847  			       struct eth_end_agg_rx_cqe *cqe,
2848                     uint16_t                  cqe_idx)
2849  {
2850      struct mbuf *m_frag;
2851      uint32_t frag_len, frag_size, i;
2852      uint16_t sge_idx;
2853      int rc = 0;
2854      int j;
2855  
2856      frag_size = le16toh(cqe->pkt_len) - tpa_info->len_on_bd;
2857  
2858      BLOGD(sc, DBG_LRO,
2859            "fp[%02d].tpa[%02d] TPA fill len_on_bd=%d frag_size=%d pages=%d\n",
2860            fp->index, queue, tpa_info->len_on_bd, frag_size, pages);
2861  
2862      /* make sure the aggregated frame is not too big to handle */
2863      if (pages > 8 * PAGES_PER_SGE) {
2864  
2865          uint32_t *tmp = (uint32_t *)cqe;
2866  
2867          BLOGE(sc, "fp[%02d].sge[0x%04x] has too many pages (%d)! "
2868                    "pkt_len=%d len_on_bd=%d frag_size=%d\n",
2869                fp->index, cqe_idx, pages, le16toh(cqe->pkt_len),
2870                tpa_info->len_on_bd, frag_size);
2871  
2872          BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n",
2873              *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7));
2874  
2875          bxe_panic(sc, ("sge page count error\n"));
2876          return (EINVAL);
2877      }
2878  
2879      /*
2880       * Scan through the scatter gather list pulling individual mbufs into a
2881       * single mbuf for the host stack.
2882       */
2883      for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
2884          sge_idx = RX_SGE(le16toh(cqe->sgl_or_raw_data.sgl[j]));
2885  
2886          /*
2887           * Firmware gives the indices of the SGE as if the ring is an array
2888           * (meaning that the "next" element will consume 2 indices).
2889           */
2890          frag_len = min(frag_size, (uint32_t)(SGE_PAGES));
2891  
2892          BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA fill i=%d j=%d "
2893                             "sge_idx=%d frag_size=%d frag_len=%d\n",
2894                fp->index, queue, i, j, sge_idx, frag_size, frag_len);
2895  
2896          m_frag = fp->rx_sge_mbuf_chain[sge_idx].m;
2897  
2898          /* allocate a new mbuf for the SGE */
2899          rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx);
2900          if (rc) {
2901              /* Leave all remaining SGEs in the ring! */
2902              return (rc);
2903          }
2904  
2905          /* update the fragment length */
2906          m_frag->m_len = frag_len;
2907  
2908          /* concatenate the fragment to the head mbuf */
2909          m_cat(m, m_frag);
2910          fp->eth_q_stats.mbuf_alloc_sge--;
2911  
2912          /* update the TPA mbuf size and remaining fragment size */
2913          m->m_pkthdr.len += frag_len;
2914          frag_size -= frag_len;
2915      }
2916  
2917      BLOGD(sc, DBG_LRO,
2918            "fp[%02d].tpa[%02d] TPA fill done frag_size=%d\n",
2919            fp->index, queue, frag_size);
2920  
2921      return (rc);
2922  }
2923  
2924  static inline void
bxe_clear_sge_mask_next_elems(struct bxe_fastpath * fp)2925  bxe_clear_sge_mask_next_elems(struct bxe_fastpath *fp)
2926  {
2927      int i, j;
2928  
2929      for (i = 1; i <= RX_SGE_NUM_PAGES; i++) {
2930          int idx = RX_SGE_TOTAL_PER_PAGE * i - 1;
2931  
2932          for (j = 0; j < 2; j++) {
2933              BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx);
2934              idx--;
2935          }
2936      }
2937  }
2938  
2939  static inline void
bxe_init_sge_ring_bit_mask(struct bxe_fastpath * fp)2940  bxe_init_sge_ring_bit_mask(struct bxe_fastpath *fp)
2941  {
2942      /* set the mask to all 1's, it's faster to compare to 0 than to 0xf's */
2943      memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask));
2944  
2945      /*
2946       * Clear the two last indices in the page to 1. These are the indices that
2947       * correspond to the "next" element, hence will never be indicated and
2948       * should be removed from the calculations.
2949       */
2950      bxe_clear_sge_mask_next_elems(fp);
2951  }
2952  
2953  static inline void
bxe_update_last_max_sge(struct bxe_fastpath * fp,uint16_t idx)2954  bxe_update_last_max_sge(struct bxe_fastpath *fp,
2955                          uint16_t            idx)
2956  {
2957      uint16_t last_max = fp->last_max_sge;
2958  
2959      if (SUB_S16(idx, last_max) > 0) {
2960          fp->last_max_sge = idx;
2961      }
2962  }
2963  
2964  static inline void
bxe_update_sge_prod(struct bxe_softc * sc,struct bxe_fastpath * fp,uint16_t sge_len,union eth_sgl_or_raw_data * cqe)2965  bxe_update_sge_prod(struct bxe_softc          *sc,
2966                      struct bxe_fastpath       *fp,
2967                      uint16_t                  sge_len,
2968                      union eth_sgl_or_raw_data *cqe)
2969  {
2970      uint16_t last_max, last_elem, first_elem;
2971      uint16_t delta = 0;
2972      uint16_t i;
2973  
2974      if (!sge_len) {
2975          return;
2976      }
2977  
2978      /* first mark all used pages */
2979      for (i = 0; i < sge_len; i++) {
2980          BIT_VEC64_CLEAR_BIT(fp->sge_mask,
2981                              RX_SGE(le16toh(cqe->sgl[i])));
2982      }
2983  
2984      BLOGD(sc, DBG_LRO,
2985            "fp[%02d] fp_cqe->sgl[%d] = %d\n",
2986            fp->index, sge_len - 1,
2987            le16toh(cqe->sgl[sge_len - 1]));
2988  
2989      /* assume that the last SGE index is the biggest */
2990      bxe_update_last_max_sge(fp,
2991                              le16toh(cqe->sgl[sge_len - 1]));
2992  
2993      last_max = RX_SGE(fp->last_max_sge);
2994      last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
2995      first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
2996  
2997      /* if ring is not full */
2998      if (last_elem + 1 != first_elem) {
2999          last_elem++;
3000      }
3001  
3002      /* now update the prod */
3003      for (i = first_elem; i != last_elem; i = RX_SGE_NEXT_MASK_ELEM(i)) {
3004          if (__predict_true(fp->sge_mask[i])) {
3005              break;
3006          }
3007  
3008          fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
3009          delta += BIT_VEC64_ELEM_SZ;
3010      }
3011  
3012      if (delta > 0) {
3013          fp->rx_sge_prod += delta;
3014          /* clear page-end entries */
3015          bxe_clear_sge_mask_next_elems(fp);
3016      }
3017  
3018      BLOGD(sc, DBG_LRO,
3019            "fp[%02d] fp->last_max_sge=%d fp->rx_sge_prod=%d\n",
3020            fp->index, fp->last_max_sge, fp->rx_sge_prod);
3021  }
3022  
3023  /*
3024   * The aggregation on the current TPA queue has completed. Pull the individual
3025   * mbuf fragments together into a single mbuf, perform all necessary checksum
3026   * calculations, and send the resuting mbuf to the stack.
3027   */
3028  static void
bxe_tpa_stop(struct bxe_softc * sc,struct bxe_fastpath * fp,struct bxe_sw_tpa_info * tpa_info,uint16_t queue,uint16_t pages,struct eth_end_agg_rx_cqe * cqe,uint16_t cqe_idx)3029  bxe_tpa_stop(struct bxe_softc          *sc,
3030               struct bxe_fastpath       *fp,
3031               struct bxe_sw_tpa_info    *tpa_info,
3032               uint16_t                  queue,
3033               uint16_t                  pages,
3034  			 struct eth_end_agg_rx_cqe *cqe,
3035               uint16_t                  cqe_idx)
3036  {
3037      if_t ifp = sc->ifp;
3038      struct mbuf *m;
3039      int rc = 0;
3040  
3041      BLOGD(sc, DBG_LRO,
3042            "fp[%02d].tpa[%02d] pad=%d pkt_len=%d pages=%d vlan=%d\n",
3043            fp->index, queue, tpa_info->placement_offset,
3044            le16toh(cqe->pkt_len), pages, tpa_info->vlan_tag);
3045  
3046      m = tpa_info->bd.m;
3047  
3048      /* allocate a replacement before modifying existing mbuf */
3049      rc = bxe_alloc_rx_tpa_mbuf(fp, queue);
3050      if (rc) {
3051          /* drop the frame and log an error */
3052          fp->eth_q_stats.rx_soft_errors++;
3053          goto bxe_tpa_stop_exit;
3054      }
3055  
3056      /* we have a replacement, fixup the current mbuf */
3057      m_adj(m, tpa_info->placement_offset);
3058      m->m_pkthdr.len = m->m_len = tpa_info->len_on_bd;
3059  
3060      /* mark the checksums valid (taken care of by the firmware) */
3061      fp->eth_q_stats.rx_ofld_frames_csum_ip++;
3062      fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++;
3063      m->m_pkthdr.csum_data = 0xffff;
3064      m->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED |
3065                                 CSUM_IP_VALID   |
3066                                 CSUM_DATA_VALID |
3067                                 CSUM_PSEUDO_HDR);
3068  
3069      /* aggregate all of the SGEs into a single mbuf */
3070      rc = bxe_fill_frag_mbuf(sc, fp, tpa_info, queue, pages, m, cqe, cqe_idx);
3071      if (rc) {
3072          /* drop the packet and log an error */
3073          fp->eth_q_stats.rx_soft_errors++;
3074          m_freem(m);
3075      } else {
3076          if (tpa_info->parsing_flags & PARSING_FLAGS_INNER_VLAN_EXIST) {
3077              m->m_pkthdr.ether_vtag = tpa_info->vlan_tag;
3078              m->m_flags |= M_VLANTAG;
3079          }
3080  
3081          /* assign packet to this interface interface */
3082          if_setrcvif(m, ifp);
3083  
3084          /* specify what RSS queue was used for this flow */
3085          m->m_pkthdr.flowid = fp->index;
3086          BXE_SET_FLOWID(m);
3087  
3088          if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
3089          fp->eth_q_stats.rx_tpa_pkts++;
3090  
3091          /* pass the frame to the stack */
3092          if_input(ifp, m);
3093      }
3094  
3095      /* we passed an mbuf up the stack or dropped the frame */
3096      fp->eth_q_stats.mbuf_alloc_tpa--;
3097  
3098  bxe_tpa_stop_exit:
3099  
3100      fp->rx_tpa_info[queue].state = BXE_TPA_STATE_STOP;
3101      fp->rx_tpa_queue_used &= ~(1 << queue);
3102  }
3103  
3104  static uint8_t
bxe_service_rxsgl(struct bxe_fastpath * fp,uint16_t len,uint16_t lenonbd,struct mbuf * m,struct eth_fast_path_rx_cqe * cqe_fp)3105  bxe_service_rxsgl(
3106                   struct bxe_fastpath *fp,
3107                   uint16_t len,
3108                   uint16_t lenonbd,
3109                   struct mbuf *m,
3110                   struct eth_fast_path_rx_cqe *cqe_fp)
3111  {
3112      struct mbuf *m_frag;
3113      uint16_t frags, frag_len;
3114      uint16_t sge_idx = 0;
3115      uint16_t j;
3116      uint8_t i, rc = 0;
3117      uint32_t frag_size;
3118  
3119      /* adjust the mbuf */
3120      m->m_len = lenonbd;
3121  
3122      frag_size =  len - lenonbd;
3123      frags = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
3124  
3125      for (i = 0, j = 0; i < frags; i += PAGES_PER_SGE, j++) {
3126          sge_idx = RX_SGE(le16toh(cqe_fp->sgl_or_raw_data.sgl[j]));
3127  
3128          m_frag = fp->rx_sge_mbuf_chain[sge_idx].m;
3129          frag_len = min(frag_size, (uint32_t)(SGE_PAGE_SIZE));
3130          m_frag->m_len = frag_len;
3131  
3132         /* allocate a new mbuf for the SGE */
3133          rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx);
3134          if (rc) {
3135              /* Leave all remaining SGEs in the ring! */
3136              return (rc);
3137          }
3138          fp->eth_q_stats.mbuf_alloc_sge--;
3139  
3140          /* concatenate the fragment to the head mbuf */
3141          m_cat(m, m_frag);
3142  
3143          frag_size -= frag_len;
3144      }
3145  
3146      bxe_update_sge_prod(fp->sc, fp, frags, &cqe_fp->sgl_or_raw_data);
3147  
3148      return rc;
3149  }
3150  
3151  static uint8_t
bxe_rxeof(struct bxe_softc * sc,struct bxe_fastpath * fp)3152  bxe_rxeof(struct bxe_softc    *sc,
3153            struct bxe_fastpath *fp)
3154  {
3155      if_t ifp = sc->ifp;
3156      uint16_t bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
3157      uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod;
3158      int rx_pkts = 0;
3159      int rc = 0;
3160  
3161      BXE_FP_RX_LOCK(fp);
3162  
3163      /* CQ "next element" is of the size of the regular element */
3164      hw_cq_cons = le16toh(*fp->rx_cq_cons_sb);
3165      if ((hw_cq_cons & RCQ_USABLE_PER_PAGE) == RCQ_USABLE_PER_PAGE) {
3166          hw_cq_cons++;
3167      }
3168  
3169      bd_cons = fp->rx_bd_cons;
3170      bd_prod = fp->rx_bd_prod;
3171      bd_prod_fw = bd_prod;
3172      sw_cq_cons = fp->rx_cq_cons;
3173      sw_cq_prod = fp->rx_cq_prod;
3174  
3175      /*
3176       * Memory barrier necessary as speculative reads of the rx
3177       * buffer can be ahead of the index in the status block
3178       */
3179      rmb();
3180  
3181      BLOGD(sc, DBG_RX,
3182            "fp[%02d] Rx START hw_cq_cons=%u sw_cq_cons=%u\n",
3183            fp->index, hw_cq_cons, sw_cq_cons);
3184  
3185      while (sw_cq_cons != hw_cq_cons) {
3186          struct bxe_sw_rx_bd *rx_buf = NULL;
3187          union eth_rx_cqe *cqe;
3188          struct eth_fast_path_rx_cqe *cqe_fp;
3189          uint8_t cqe_fp_flags;
3190          enum eth_rx_cqe_type cqe_fp_type;
3191          uint16_t len, lenonbd,  pad;
3192          struct mbuf *m = NULL;
3193  
3194          comp_ring_cons = RCQ(sw_cq_cons);
3195          bd_prod = RX_BD(bd_prod);
3196          bd_cons = RX_BD(bd_cons);
3197  
3198          cqe          = &fp->rcq_chain[comp_ring_cons];
3199          cqe_fp       = &cqe->fast_path_cqe;
3200          cqe_fp_flags = cqe_fp->type_error_flags;
3201          cqe_fp_type  = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
3202  
3203          BLOGD(sc, DBG_RX,
3204                "fp[%02d] Rx hw_cq_cons=%d hw_sw_cons=%d "
3205                "BD prod=%d cons=%d CQE type=0x%x err=0x%x "
3206                "status=0x%x rss_hash=0x%x vlan=0x%x len=%u lenonbd=%u\n",
3207                fp->index,
3208                hw_cq_cons,
3209                sw_cq_cons,
3210                bd_prod,
3211                bd_cons,
3212                CQE_TYPE(cqe_fp_flags),
3213                cqe_fp_flags,
3214                cqe_fp->status_flags,
3215                le32toh(cqe_fp->rss_hash_result),
3216                le16toh(cqe_fp->vlan_tag),
3217                le16toh(cqe_fp->pkt_len_or_gro_seg_len),
3218                le16toh(cqe_fp->len_on_bd));
3219  
3220          /* is this a slowpath msg? */
3221          if (__predict_false(CQE_TYPE_SLOW(cqe_fp_type))) {
3222              bxe_sp_event(sc, fp, cqe);
3223              goto next_cqe;
3224          }
3225  
3226          rx_buf = &fp->rx_mbuf_chain[bd_cons];
3227  
3228          if (!CQE_TYPE_FAST(cqe_fp_type)) {
3229              struct bxe_sw_tpa_info *tpa_info;
3230              uint16_t frag_size, pages;
3231              uint8_t queue;
3232  
3233              if (CQE_TYPE_START(cqe_fp_type)) {
3234                  bxe_tpa_start(sc, fp, cqe_fp->queue_index,
3235                                bd_cons, bd_prod, cqe_fp);
3236                  m = NULL; /* packet not ready yet */
3237                  goto next_rx;
3238              }
3239  
3240              KASSERT(CQE_TYPE_STOP(cqe_fp_type),
3241                      ("CQE type is not STOP! (0x%x)\n", cqe_fp_type));
3242  
3243              queue = cqe->end_agg_cqe.queue_index;
3244              tpa_info = &fp->rx_tpa_info[queue];
3245  
3246              BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA STOP\n",
3247                    fp->index, queue);
3248  
3249              frag_size = (le16toh(cqe->end_agg_cqe.pkt_len) -
3250                           tpa_info->len_on_bd);
3251              pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
3252  
3253              bxe_tpa_stop(sc, fp, tpa_info, queue, pages,
3254                           &cqe->end_agg_cqe, comp_ring_cons);
3255  
3256              bxe_update_sge_prod(sc, fp, pages, &cqe->end_agg_cqe.sgl_or_raw_data);
3257  
3258              goto next_cqe;
3259          }
3260  
3261          /* non TPA */
3262  
3263          /* is this an error packet? */
3264          if (__predict_false(cqe_fp_flags &
3265                              ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) {
3266              BLOGE(sc, "flags 0x%x rx packet %u\n", cqe_fp_flags, sw_cq_cons);
3267              fp->eth_q_stats.rx_soft_errors++;
3268              goto next_rx;
3269          }
3270  
3271          len = le16toh(cqe_fp->pkt_len_or_gro_seg_len);
3272          lenonbd = le16toh(cqe_fp->len_on_bd);
3273          pad = cqe_fp->placement_offset;
3274  
3275          m = rx_buf->m;
3276  
3277          if (__predict_false(m == NULL)) {
3278              BLOGE(sc, "No mbuf in rx chain descriptor %d for fp[%02d]\n",
3279                    bd_cons, fp->index);
3280              goto next_rx;
3281          }
3282  
3283          /* XXX double copy if packet length under a threshold */
3284  
3285          /*
3286           * If all the buffer descriptors are filled with mbufs then fill in
3287           * the current consumer index with a new BD. Else if a maximum Rx
3288           * buffer limit is imposed then fill in the next producer index.
3289           */
3290          rc = bxe_alloc_rx_bd_mbuf(fp, bd_cons,
3291                                    (sc->max_rx_bufs != RX_BD_USABLE) ?
3292                                        bd_prod : bd_cons);
3293          if (rc != 0) {
3294  
3295              /* we simply reuse the received mbuf and don't post it to the stack */
3296              m = NULL;
3297  
3298              BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n",
3299                    fp->index, rc);
3300              fp->eth_q_stats.rx_soft_errors++;
3301  
3302              if (sc->max_rx_bufs != RX_BD_USABLE) {
3303                  /* copy this consumer index to the producer index */
3304                  memcpy(&fp->rx_mbuf_chain[bd_prod], rx_buf,
3305                         sizeof(struct bxe_sw_rx_bd));
3306                  memset(rx_buf, 0, sizeof(struct bxe_sw_rx_bd));
3307              }
3308  
3309              goto next_rx;
3310          }
3311  
3312          /* current mbuf was detached from the bd */
3313          fp->eth_q_stats.mbuf_alloc_rx--;
3314  
3315          /* we allocated a replacement mbuf, fixup the current one */
3316          m_adj(m, pad);
3317          m->m_pkthdr.len = m->m_len = len;
3318  
3319          if ((len > 60) && (len > lenonbd)) {
3320              fp->eth_q_stats.rx_bxe_service_rxsgl++;
3321              rc = bxe_service_rxsgl(fp, len, lenonbd, m, cqe_fp);
3322              if (rc)
3323                  break;
3324              fp->eth_q_stats.rx_jumbo_sge_pkts++;
3325          } else if (lenonbd < len) {
3326              fp->eth_q_stats.rx_erroneous_jumbo_sge_pkts++;
3327          }
3328  
3329          /* assign packet to this interface interface */
3330  	if_setrcvif(m, ifp);
3331  
3332          /* assume no hardware checksum has complated */
3333          m->m_pkthdr.csum_flags = 0;
3334  
3335          /* validate checksum if offload enabled */
3336          if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
3337              /* check for a valid IP frame */
3338              if (!(cqe->fast_path_cqe.status_flags &
3339                    ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG)) {
3340                  m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3341                  if (__predict_false(cqe_fp_flags &
3342                                      ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) {
3343                      fp->eth_q_stats.rx_hw_csum_errors++;
3344                  } else {
3345                      fp->eth_q_stats.rx_ofld_frames_csum_ip++;
3346                      m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3347                  }
3348              }
3349  
3350              /* check for a valid TCP/UDP frame */
3351              if (!(cqe->fast_path_cqe.status_flags &
3352                    ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)) {
3353                  if (__predict_false(cqe_fp_flags &
3354                                      ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) {
3355                      fp->eth_q_stats.rx_hw_csum_errors++;
3356                  } else {
3357                      fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++;
3358                      m->m_pkthdr.csum_data = 0xFFFF;
3359                      m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID |
3360                                                 CSUM_PSEUDO_HDR);
3361                  }
3362              }
3363          }
3364  
3365          /* if there is a VLAN tag then flag that info */
3366          if (cqe->fast_path_cqe.pars_flags.flags & PARSING_FLAGS_INNER_VLAN_EXIST) {
3367              m->m_pkthdr.ether_vtag = cqe->fast_path_cqe.vlan_tag;
3368              m->m_flags |= M_VLANTAG;
3369          }
3370  
3371          /* specify what RSS queue was used for this flow */
3372          m->m_pkthdr.flowid = fp->index;
3373          BXE_SET_FLOWID(m);
3374  
3375  next_rx:
3376  
3377          bd_cons    = RX_BD_NEXT(bd_cons);
3378          bd_prod    = RX_BD_NEXT(bd_prod);
3379          bd_prod_fw = RX_BD_NEXT(bd_prod_fw);
3380  
3381          /* pass the frame to the stack */
3382          if (__predict_true(m != NULL)) {
3383              if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
3384              rx_pkts++;
3385              if_input(ifp, m);
3386          }
3387  
3388  next_cqe:
3389  
3390          sw_cq_prod = RCQ_NEXT(sw_cq_prod);
3391          sw_cq_cons = RCQ_NEXT(sw_cq_cons);
3392  
3393          /* limit spinning on the queue */
3394          if (rc != 0)
3395              break;
3396  
3397          if (rx_pkts == sc->rx_budget) {
3398              fp->eth_q_stats.rx_budget_reached++;
3399              break;
3400          }
3401      } /* while work to do */
3402  
3403      fp->rx_bd_cons = bd_cons;
3404      fp->rx_bd_prod = bd_prod_fw;
3405      fp->rx_cq_cons = sw_cq_cons;
3406      fp->rx_cq_prod = sw_cq_prod;
3407  
3408      /* Update producers */
3409      bxe_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod, fp->rx_sge_prod);
3410  
3411      fp->eth_q_stats.rx_pkts += rx_pkts;
3412      fp->eth_q_stats.rx_calls++;
3413  
3414      BXE_FP_RX_UNLOCK(fp);
3415  
3416      return (sw_cq_cons != hw_cq_cons);
3417  }
3418  
3419  static uint16_t
bxe_free_tx_pkt(struct bxe_softc * sc,struct bxe_fastpath * fp,uint16_t idx)3420  bxe_free_tx_pkt(struct bxe_softc    *sc,
3421                  struct bxe_fastpath *fp,
3422                  uint16_t            idx)
3423  {
3424      struct bxe_sw_tx_bd *tx_buf = &fp->tx_mbuf_chain[idx];
3425      struct eth_tx_start_bd *tx_start_bd;
3426      uint16_t bd_idx = TX_BD(tx_buf->first_bd);
3427      uint16_t new_cons;
3428      int nbd;
3429  
3430      /* unmap the mbuf from non-paged memory */
3431      bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
3432  
3433      tx_start_bd = &fp->tx_chain[bd_idx].start_bd;
3434      nbd = le16toh(tx_start_bd->nbd) - 1;
3435  
3436      new_cons = (tx_buf->first_bd + nbd);
3437  
3438      /* free the mbuf */
3439      if (__predict_true(tx_buf->m != NULL)) {
3440          m_freem(tx_buf->m);
3441          fp->eth_q_stats.mbuf_alloc_tx--;
3442      } else {
3443          fp->eth_q_stats.tx_chain_lost_mbuf++;
3444      }
3445  
3446      tx_buf->m = NULL;
3447      tx_buf->first_bd = 0;
3448  
3449      return (new_cons);
3450  }
3451  
3452  /* transmit timeout watchdog */
3453  static int
bxe_watchdog(struct bxe_softc * sc,struct bxe_fastpath * fp)3454  bxe_watchdog(struct bxe_softc    *sc,
3455               struct bxe_fastpath *fp)
3456  {
3457      BXE_FP_TX_LOCK(fp);
3458  
3459      if ((fp->watchdog_timer == 0) || (--fp->watchdog_timer)) {
3460          BXE_FP_TX_UNLOCK(fp);
3461          return (0);
3462      }
3463  
3464      BLOGE(sc, "TX watchdog timeout on fp[%02d], resetting!\n", fp->index);
3465  
3466      BXE_FP_TX_UNLOCK(fp);
3467      BXE_SET_ERROR_BIT(sc, BXE_ERR_TXQ_STUCK);
3468      taskqueue_enqueue_timeout(taskqueue_thread,
3469          &sc->sp_err_timeout_task, hz/10);
3470  
3471      return (-1);
3472  }
3473  
3474  /* processes transmit completions */
3475  static uint8_t
bxe_txeof(struct bxe_softc * sc,struct bxe_fastpath * fp)3476  bxe_txeof(struct bxe_softc    *sc,
3477            struct bxe_fastpath *fp)
3478  {
3479      if_t ifp = sc->ifp;
3480      uint16_t bd_cons, hw_cons, sw_cons, pkt_cons;
3481      uint16_t tx_bd_avail;
3482  
3483      BXE_FP_TX_LOCK_ASSERT(fp);
3484  
3485      bd_cons = fp->tx_bd_cons;
3486      hw_cons = le16toh(*fp->tx_cons_sb);
3487      sw_cons = fp->tx_pkt_cons;
3488  
3489      while (sw_cons != hw_cons) {
3490          pkt_cons = TX_BD(sw_cons);
3491  
3492          BLOGD(sc, DBG_TX,
3493                "TX: fp[%d]: hw_cons=%u sw_cons=%u pkt_cons=%u\n",
3494                fp->index, hw_cons, sw_cons, pkt_cons);
3495  
3496          bd_cons = bxe_free_tx_pkt(sc, fp, pkt_cons);
3497  
3498          sw_cons++;
3499      }
3500  
3501      fp->tx_pkt_cons = sw_cons;
3502      fp->tx_bd_cons  = bd_cons;
3503  
3504      BLOGD(sc, DBG_TX,
3505            "TX done: fp[%d]: hw_cons=%u sw_cons=%u sw_prod=%u\n",
3506            fp->index, hw_cons, fp->tx_pkt_cons, fp->tx_pkt_prod);
3507  
3508      mb();
3509  
3510      tx_bd_avail = bxe_tx_avail(sc, fp);
3511  
3512      if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
3513          if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
3514      } else {
3515          if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
3516      }
3517  
3518      if (fp->tx_pkt_prod != fp->tx_pkt_cons) {
3519          /* reset the watchdog timer if there are pending transmits */
3520          fp->watchdog_timer = BXE_TX_TIMEOUT;
3521          return (TRUE);
3522      } else {
3523          /* clear watchdog when there are no pending transmits */
3524          fp->watchdog_timer = 0;
3525          return (FALSE);
3526      }
3527  }
3528  
3529  static void
bxe_drain_tx_queues(struct bxe_softc * sc)3530  bxe_drain_tx_queues(struct bxe_softc *sc)
3531  {
3532      struct bxe_fastpath *fp;
3533      int i, count;
3534  
3535      /* wait until all TX fastpath tasks have completed */
3536      for (i = 0; i < sc->num_queues; i++) {
3537          fp = &sc->fp[i];
3538  
3539          count = 1000;
3540  
3541          while (bxe_has_tx_work(fp)) {
3542  
3543              BXE_FP_TX_LOCK(fp);
3544              bxe_txeof(sc, fp);
3545              BXE_FP_TX_UNLOCK(fp);
3546  
3547              if (count == 0) {
3548                  BLOGE(sc, "Timeout waiting for fp[%d] "
3549                            "transmits to complete!\n", i);
3550                  bxe_panic(sc, ("tx drain failure\n"));
3551                  return;
3552              }
3553  
3554              count--;
3555              DELAY(1000);
3556              rmb();
3557          }
3558      }
3559  
3560      return;
3561  }
3562  
3563  static int
bxe_del_all_macs(struct bxe_softc * sc,struct ecore_vlan_mac_obj * mac_obj,int mac_type,uint8_t wait_for_comp)3564  bxe_del_all_macs(struct bxe_softc          *sc,
3565                   struct ecore_vlan_mac_obj *mac_obj,
3566                   int                       mac_type,
3567                   uint8_t                   wait_for_comp)
3568  {
3569      unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
3570      int rc;
3571  
3572      /* wait for completion of requested */
3573      if (wait_for_comp) {
3574          bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3575      }
3576  
3577      /* Set the mac type of addresses we want to clear */
3578      bxe_set_bit(mac_type, &vlan_mac_flags);
3579  
3580      rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags);
3581      if (rc < 0) {
3582          BLOGE(sc, "Failed to delete MACs (%d) mac_type %d wait_for_comp 0x%x\n",
3583              rc, mac_type, wait_for_comp);
3584      }
3585  
3586      return (rc);
3587  }
3588  
3589  static int
bxe_fill_accept_flags(struct bxe_softc * sc,uint32_t rx_mode,unsigned long * rx_accept_flags,unsigned long * tx_accept_flags)3590  bxe_fill_accept_flags(struct bxe_softc *sc,
3591                        uint32_t         rx_mode,
3592                        unsigned long    *rx_accept_flags,
3593                        unsigned long    *tx_accept_flags)
3594  {
3595      /* Clear the flags first */
3596      *rx_accept_flags = 0;
3597      *tx_accept_flags = 0;
3598  
3599      switch (rx_mode) {
3600      case BXE_RX_MODE_NONE:
3601          /*
3602           * 'drop all' supersedes any accept flags that may have been
3603           * passed to the function.
3604           */
3605          break;
3606  
3607      case BXE_RX_MODE_NORMAL:
3608          bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3609          bxe_set_bit(ECORE_ACCEPT_MULTICAST, rx_accept_flags);
3610          bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3611  
3612          /* internal switching mode */
3613          bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3614          bxe_set_bit(ECORE_ACCEPT_MULTICAST, tx_accept_flags);
3615          bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3616  
3617          break;
3618  
3619      case BXE_RX_MODE_ALLMULTI:
3620          bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3621          bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags);
3622          bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3623  
3624          /* internal switching mode */
3625          bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3626          bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags);
3627          bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3628  
3629          break;
3630  
3631      case BXE_RX_MODE_PROMISC:
3632          /*
3633           * According to deffinition of SI mode, iface in promisc mode
3634           * should receive matched and unmatched (in resolution of port)
3635           * unicast packets.
3636           */
3637          bxe_set_bit(ECORE_ACCEPT_UNMATCHED, rx_accept_flags);
3638          bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3639          bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags);
3640          bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3641  
3642          /* internal switching mode */
3643          bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags);
3644          bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3645  
3646          if (IS_MF_SI(sc)) {
3647              bxe_set_bit(ECORE_ACCEPT_ALL_UNICAST, tx_accept_flags);
3648          } else {
3649              bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3650          }
3651  
3652          break;
3653  
3654      default:
3655          BLOGE(sc, "Unknown rx_mode (0x%x)\n", rx_mode);
3656          return (-1);
3657      }
3658  
3659      /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
3660      if (rx_mode != BXE_RX_MODE_NONE) {
3661          bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, rx_accept_flags);
3662          bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, tx_accept_flags);
3663      }
3664  
3665      return (0);
3666  }
3667  
3668  static int
bxe_set_q_rx_mode(struct bxe_softc * sc,uint8_t cl_id,unsigned long rx_mode_flags,unsigned long rx_accept_flags,unsigned long tx_accept_flags,unsigned long ramrod_flags)3669  bxe_set_q_rx_mode(struct bxe_softc *sc,
3670                    uint8_t          cl_id,
3671                    unsigned long    rx_mode_flags,
3672                    unsigned long    rx_accept_flags,
3673                    unsigned long    tx_accept_flags,
3674                    unsigned long    ramrod_flags)
3675  {
3676      struct ecore_rx_mode_ramrod_params ramrod_param;
3677      int rc;
3678  
3679      memset(&ramrod_param, 0, sizeof(ramrod_param));
3680  
3681      /* Prepare ramrod parameters */
3682      ramrod_param.cid = 0;
3683      ramrod_param.cl_id = cl_id;
3684      ramrod_param.rx_mode_obj = &sc->rx_mode_obj;
3685      ramrod_param.func_id = SC_FUNC(sc);
3686  
3687      ramrod_param.pstate = &sc->sp_state;
3688      ramrod_param.state = ECORE_FILTER_RX_MODE_PENDING;
3689  
3690      ramrod_param.rdata = BXE_SP(sc, rx_mode_rdata);
3691      ramrod_param.rdata_mapping = BXE_SP_MAPPING(sc, rx_mode_rdata);
3692  
3693      bxe_set_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
3694  
3695      ramrod_param.ramrod_flags = ramrod_flags;
3696      ramrod_param.rx_mode_flags = rx_mode_flags;
3697  
3698      ramrod_param.rx_accept_flags = rx_accept_flags;
3699      ramrod_param.tx_accept_flags = tx_accept_flags;
3700  
3701      rc = ecore_config_rx_mode(sc, &ramrod_param);
3702      if (rc < 0) {
3703          BLOGE(sc, "Set rx_mode %d cli_id 0x%x rx_mode_flags 0x%x "
3704              "rx_accept_flags 0x%x tx_accept_flags 0x%x "
3705              "ramrod_flags 0x%x rc %d failed\n", sc->rx_mode, cl_id,
3706              (uint32_t)rx_mode_flags, (uint32_t)rx_accept_flags,
3707              (uint32_t)tx_accept_flags, (uint32_t)ramrod_flags, rc);
3708          return (rc);
3709      }
3710  
3711      return (0);
3712  }
3713  
3714  static int
bxe_set_storm_rx_mode(struct bxe_softc * sc)3715  bxe_set_storm_rx_mode(struct bxe_softc *sc)
3716  {
3717      unsigned long rx_mode_flags = 0, ramrod_flags = 0;
3718      unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
3719      int rc;
3720  
3721      rc = bxe_fill_accept_flags(sc, sc->rx_mode, &rx_accept_flags,
3722                                 &tx_accept_flags);
3723      if (rc) {
3724          return (rc);
3725      }
3726  
3727      bxe_set_bit(RAMROD_RX, &ramrod_flags);
3728      bxe_set_bit(RAMROD_TX, &ramrod_flags);
3729  
3730      /* XXX ensure all fastpath have same cl_id and/or move it to bxe_softc */
3731      return (bxe_set_q_rx_mode(sc, sc->fp[0].cl_id, rx_mode_flags,
3732                                rx_accept_flags, tx_accept_flags,
3733                                ramrod_flags));
3734  }
3735  
3736  /* returns the "mcp load_code" according to global load_count array */
3737  static int
bxe_nic_load_no_mcp(struct bxe_softc * sc)3738  bxe_nic_load_no_mcp(struct bxe_softc *sc)
3739  {
3740      int path = SC_PATH(sc);
3741      int port = SC_PORT(sc);
3742  
3743      BLOGI(sc, "NO MCP - load counts[%d]      %d, %d, %d\n",
3744            path, load_count[path][0], load_count[path][1],
3745            load_count[path][2]);
3746      load_count[path][0]++;
3747      load_count[path][1 + port]++;
3748      BLOGI(sc, "NO MCP - new load counts[%d]  %d, %d, %d\n",
3749            path, load_count[path][0], load_count[path][1],
3750            load_count[path][2]);
3751      if (load_count[path][0] == 1) {
3752          return (FW_MSG_CODE_DRV_LOAD_COMMON);
3753      } else if (load_count[path][1 + port] == 1) {
3754          return (FW_MSG_CODE_DRV_LOAD_PORT);
3755      } else {
3756          return (FW_MSG_CODE_DRV_LOAD_FUNCTION);
3757      }
3758  }
3759  
3760  /* returns the "mcp load_code" according to global load_count array */
3761  static int
bxe_nic_unload_no_mcp(struct bxe_softc * sc)3762  bxe_nic_unload_no_mcp(struct bxe_softc *sc)
3763  {
3764      int port = SC_PORT(sc);
3765      int path = SC_PATH(sc);
3766  
3767      BLOGI(sc, "NO MCP - load counts[%d]      %d, %d, %d\n",
3768            path, load_count[path][0], load_count[path][1],
3769            load_count[path][2]);
3770      load_count[path][0]--;
3771      load_count[path][1 + port]--;
3772      BLOGI(sc, "NO MCP - new load counts[%d]  %d, %d, %d\n",
3773            path, load_count[path][0], load_count[path][1],
3774            load_count[path][2]);
3775      if (load_count[path][0] == 0) {
3776          return (FW_MSG_CODE_DRV_UNLOAD_COMMON);
3777      } else if (load_count[path][1 + port] == 0) {
3778          return (FW_MSG_CODE_DRV_UNLOAD_PORT);
3779      } else {
3780          return (FW_MSG_CODE_DRV_UNLOAD_FUNCTION);
3781      }
3782  }
3783  
3784  /* request unload mode from the MCP: COMMON, PORT or FUNCTION */
3785  static uint32_t
bxe_send_unload_req(struct bxe_softc * sc,int unload_mode)3786  bxe_send_unload_req(struct bxe_softc *sc,
3787                      int              unload_mode)
3788  {
3789      uint32_t reset_code = 0;
3790  
3791      /* Select the UNLOAD request mode */
3792      if (unload_mode == UNLOAD_NORMAL) {
3793          reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
3794      } else {
3795          reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
3796      }
3797  
3798      /* Send the request to the MCP */
3799      if (!BXE_NOMCP(sc)) {
3800          reset_code = bxe_fw_command(sc, reset_code, 0);
3801      } else {
3802          reset_code = bxe_nic_unload_no_mcp(sc);
3803      }
3804  
3805      return (reset_code);
3806  }
3807  
3808  /* send UNLOAD_DONE command to the MCP */
3809  static void
bxe_send_unload_done(struct bxe_softc * sc,uint8_t keep_link)3810  bxe_send_unload_done(struct bxe_softc *sc,
3811                       uint8_t          keep_link)
3812  {
3813      uint32_t reset_param =
3814          keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
3815  
3816      /* Report UNLOAD_DONE to MCP */
3817      if (!BXE_NOMCP(sc)) {
3818          bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
3819      }
3820  }
3821  
3822  static int
bxe_func_wait_started(struct bxe_softc * sc)3823  bxe_func_wait_started(struct bxe_softc *sc)
3824  {
3825      int tout = 50;
3826  
3827      if (!sc->port.pmf) {
3828          return (0);
3829      }
3830  
3831      /*
3832       * (assumption: No Attention from MCP at this stage)
3833       * PMF probably in the middle of TX disable/enable transaction
3834       * 1. Sync IRS for default SB
3835       * 2. Sync SP queue - this guarantees us that attention handling started
3836       * 3. Wait, that TX disable/enable transaction completes
3837       *
3838       * 1+2 guarantee that if DCBX attention was scheduled it already changed
3839       * pending bit of transaction from STARTED-->TX_STOPPED, if we already
3840       * received completion for the transaction the state is TX_STOPPED.
3841       * State will return to STARTED after completion of TX_STOPPED-->STARTED
3842       * transaction.
3843       */
3844  
3845      /* XXX make sure default SB ISR is done */
3846      /* need a way to synchronize an irq (intr_mtx?) */
3847  
3848      /* XXX flush any work queues */
3849  
3850      while (ecore_func_get_state(sc, &sc->func_obj) !=
3851             ECORE_F_STATE_STARTED && tout--) {
3852          DELAY(20000);
3853      }
3854  
3855      if (ecore_func_get_state(sc, &sc->func_obj) != ECORE_F_STATE_STARTED) {
3856          /*
3857           * Failed to complete the transaction in a "good way"
3858           * Force both transactions with CLR bit.
3859           */
3860          struct ecore_func_state_params func_params = { NULL };
3861  
3862          BLOGE(sc, "Unexpected function state! "
3863                    "Forcing STARTED-->TX_STOPPED-->STARTED\n");
3864  
3865          func_params.f_obj = &sc->func_obj;
3866          bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
3867  
3868          /* STARTED-->TX_STOPPED */
3869          func_params.cmd = ECORE_F_CMD_TX_STOP;
3870          ecore_func_state_change(sc, &func_params);
3871  
3872          /* TX_STOPPED-->STARTED */
3873          func_params.cmd = ECORE_F_CMD_TX_START;
3874          return (ecore_func_state_change(sc, &func_params));
3875      }
3876  
3877      return (0);
3878  }
3879  
3880  static int
bxe_stop_queue(struct bxe_softc * sc,int index)3881  bxe_stop_queue(struct bxe_softc *sc,
3882                 int              index)
3883  {
3884      struct bxe_fastpath *fp = &sc->fp[index];
3885      struct ecore_queue_state_params q_params = { NULL };
3886      int rc;
3887  
3888      BLOGD(sc, DBG_LOAD, "stopping queue %d cid %d\n", index, fp->index);
3889  
3890      q_params.q_obj = &sc->sp_objs[fp->index].q_obj;
3891      /* We want to wait for completion in this context */
3892      bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
3893  
3894      /* Stop the primary connection: */
3895  
3896      /* ...halt the connection */
3897      q_params.cmd = ECORE_Q_CMD_HALT;
3898      rc = ecore_queue_state_change(sc, &q_params);
3899      if (rc) {
3900          return (rc);
3901      }
3902  
3903      /* ...terminate the connection */
3904      q_params.cmd = ECORE_Q_CMD_TERMINATE;
3905      memset(&q_params.params.terminate, 0, sizeof(q_params.params.terminate));
3906      q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
3907      rc = ecore_queue_state_change(sc, &q_params);
3908      if (rc) {
3909          return (rc);
3910      }
3911  
3912      /* ...delete cfc entry */
3913      q_params.cmd = ECORE_Q_CMD_CFC_DEL;
3914      memset(&q_params.params.cfc_del, 0, sizeof(q_params.params.cfc_del));
3915      q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
3916      return (ecore_queue_state_change(sc, &q_params));
3917  }
3918  
3919  /* wait for the outstanding SP commands */
3920  static inline uint8_t
bxe_wait_sp_comp(struct bxe_softc * sc,unsigned long mask)3921  bxe_wait_sp_comp(struct bxe_softc *sc,
3922                   unsigned long    mask)
3923  {
3924      unsigned long tmp;
3925      int tout = 5000; /* wait for 5 secs tops */
3926  
3927      while (tout--) {
3928          mb();
3929          if (!(atomic_load_acq_long(&sc->sp_state) & mask)) {
3930              return (TRUE);
3931          }
3932  
3933          DELAY(1000);
3934      }
3935  
3936      mb();
3937  
3938      tmp = atomic_load_acq_long(&sc->sp_state);
3939      if (tmp & mask) {
3940          BLOGE(sc, "Filtering completion timed out: "
3941                    "sp_state 0x%lx, mask 0x%lx\n",
3942                tmp, mask);
3943          return (FALSE);
3944      }
3945  
3946      return (FALSE);
3947  }
3948  
3949  static int
bxe_func_stop(struct bxe_softc * sc)3950  bxe_func_stop(struct bxe_softc *sc)
3951  {
3952      struct ecore_func_state_params func_params = { NULL };
3953      int rc;
3954  
3955      /* prepare parameters for function state transitions */
3956      bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
3957      func_params.f_obj = &sc->func_obj;
3958      func_params.cmd = ECORE_F_CMD_STOP;
3959  
3960      /*
3961       * Try to stop the function the 'good way'. If it fails (in case
3962       * of a parity error during bxe_chip_cleanup()) and we are
3963       * not in a debug mode, perform a state transaction in order to
3964       * enable further HW_RESET transaction.
3965       */
3966      rc = ecore_func_state_change(sc, &func_params);
3967      if (rc) {
3968          BLOGE(sc, "FUNC_STOP ramrod failed. "
3969                    "Running a dry transaction (%d)\n", rc);
3970          bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
3971          return (ecore_func_state_change(sc, &func_params));
3972      }
3973  
3974      return (0);
3975  }
3976  
3977  static int
bxe_reset_hw(struct bxe_softc * sc,uint32_t load_code)3978  bxe_reset_hw(struct bxe_softc *sc,
3979               uint32_t         load_code)
3980  {
3981      struct ecore_func_state_params func_params = { NULL };
3982  
3983      /* Prepare parameters for function state transitions */
3984      bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
3985  
3986      func_params.f_obj = &sc->func_obj;
3987      func_params.cmd = ECORE_F_CMD_HW_RESET;
3988  
3989      func_params.params.hw_init.load_phase = load_code;
3990  
3991      return (ecore_func_state_change(sc, &func_params));
3992  }
3993  
3994  static void
bxe_int_disable_sync(struct bxe_softc * sc,int disable_hw)3995  bxe_int_disable_sync(struct bxe_softc *sc,
3996                       int              disable_hw)
3997  {
3998      if (disable_hw) {
3999          /* prevent the HW from sending interrupts */
4000          bxe_int_disable(sc);
4001      }
4002  
4003      /* XXX need a way to synchronize ALL irqs (intr_mtx?) */
4004      /* make sure all ISRs are done */
4005  
4006      /* XXX make sure sp_task is not running */
4007      /* cancel and flush work queues */
4008  }
4009  
4010  static void
bxe_chip_cleanup(struct bxe_softc * sc,uint32_t unload_mode,uint8_t keep_link)4011  bxe_chip_cleanup(struct bxe_softc *sc,
4012                   uint32_t         unload_mode,
4013                   uint8_t          keep_link)
4014  {
4015      int port = SC_PORT(sc);
4016      struct ecore_mcast_ramrod_params rparam = { NULL };
4017      uint32_t reset_code;
4018      int i, rc = 0;
4019  
4020      bxe_drain_tx_queues(sc);
4021  
4022      /* give HW time to discard old tx messages */
4023      DELAY(1000);
4024  
4025      /* Clean all ETH MACs */
4026      rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_ETH_MAC, FALSE);
4027      if (rc < 0) {
4028          BLOGE(sc, "Failed to delete all ETH MACs (%d)\n", rc);
4029      }
4030  
4031      /* Clean up UC list  */
4032      rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_UC_LIST_MAC, TRUE);
4033      if (rc < 0) {
4034          BLOGE(sc, "Failed to delete UC MACs list (%d)\n", rc);
4035      }
4036  
4037      /* Disable LLH */
4038      if (!CHIP_IS_E1(sc)) {
4039          REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0);
4040      }
4041  
4042      /* Set "drop all" to stop Rx */
4043  
4044      /*
4045       * We need to take the BXE_MCAST_LOCK() here in order to prevent
4046       * a race between the completion code and this code.
4047       */
4048      BXE_MCAST_LOCK(sc);
4049  
4050      if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) {
4051          bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state);
4052      } else {
4053          bxe_set_storm_rx_mode(sc);
4054      }
4055  
4056      /* Clean up multicast configuration */
4057      rparam.mcast_obj = &sc->mcast_obj;
4058      rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
4059      if (rc < 0) {
4060          BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc);
4061      }
4062  
4063      BXE_MCAST_UNLOCK(sc);
4064  
4065      // XXX bxe_iov_chip_cleanup(sc);
4066  
4067      /*
4068       * Send the UNLOAD_REQUEST to the MCP. This will return if
4069       * this function should perform FUNCTION, PORT, or COMMON HW
4070       * reset.
4071       */
4072      reset_code = bxe_send_unload_req(sc, unload_mode);
4073  
4074      /*
4075       * (assumption: No Attention from MCP at this stage)
4076       * PMF probably in the middle of TX disable/enable transaction
4077       */
4078      rc = bxe_func_wait_started(sc);
4079      if (rc) {
4080          BLOGE(sc, "bxe_func_wait_started failed (%d)\n", rc);
4081      }
4082  
4083      /*
4084       * Close multi and leading connections
4085       * Completions for ramrods are collected in a synchronous way
4086       */
4087      for (i = 0; i < sc->num_queues; i++) {
4088          if (bxe_stop_queue(sc, i)) {
4089              goto unload_error;
4090          }
4091      }
4092  
4093      /*
4094       * If SP settings didn't get completed so far - something
4095       * very wrong has happen.
4096       */
4097      if (!bxe_wait_sp_comp(sc, ~0x0UL)) {
4098          BLOGE(sc, "Common slow path ramrods got stuck!(%d)\n", rc);
4099      }
4100  
4101  unload_error:
4102  
4103      rc = bxe_func_stop(sc);
4104      if (rc) {
4105          BLOGE(sc, "Function stop failed!(%d)\n", rc);
4106      }
4107  
4108      /* disable HW interrupts */
4109      bxe_int_disable_sync(sc, TRUE);
4110  
4111      /* detach interrupts */
4112      bxe_interrupt_detach(sc);
4113  
4114      /* Reset the chip */
4115      rc = bxe_reset_hw(sc, reset_code);
4116      if (rc) {
4117          BLOGE(sc, "Hardware reset failed(%d)\n", rc);
4118      }
4119  
4120      /* Report UNLOAD_DONE to MCP */
4121      bxe_send_unload_done(sc, keep_link);
4122  }
4123  
4124  static void
bxe_disable_close_the_gate(struct bxe_softc * sc)4125  bxe_disable_close_the_gate(struct bxe_softc *sc)
4126  {
4127      uint32_t val;
4128      int port = SC_PORT(sc);
4129  
4130      BLOGD(sc, DBG_LOAD,
4131            "Disabling 'close the gates'\n");
4132  
4133      if (CHIP_IS_E1(sc)) {
4134          uint32_t addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4135                                 MISC_REG_AEU_MASK_ATTN_FUNC_0;
4136          val = REG_RD(sc, addr);
4137          val &= ~(0x300);
4138          REG_WR(sc, addr, val);
4139      } else {
4140          val = REG_RD(sc, MISC_REG_AEU_GENERAL_MASK);
4141          val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
4142                   MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
4143          REG_WR(sc, MISC_REG_AEU_GENERAL_MASK, val);
4144      }
4145  }
4146  
4147  /*
4148   * Cleans the object that have internal lists without sending
4149   * ramrods. Should be run when interrupts are disabled.
4150   */
4151  static void
bxe_squeeze_objects(struct bxe_softc * sc)4152  bxe_squeeze_objects(struct bxe_softc *sc)
4153  {
4154      unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
4155      struct ecore_mcast_ramrod_params rparam = { NULL };
4156      struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj;
4157      int rc;
4158  
4159      /* Cleanup MACs' object first... */
4160  
4161      /* Wait for completion of requested */
4162      bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
4163      /* Perform a dry cleanup */
4164      bxe_set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
4165  
4166      /* Clean ETH primary MAC */
4167      bxe_set_bit(ECORE_ETH_MAC, &vlan_mac_flags);
4168      rc = mac_obj->delete_all(sc, &sc->sp_objs->mac_obj, &vlan_mac_flags,
4169                               &ramrod_flags);
4170      if (rc != 0) {
4171          BLOGE(sc, "Failed to clean ETH MACs (%d)\n", rc);
4172      }
4173  
4174      /* Cleanup UC list */
4175      vlan_mac_flags = 0;
4176      bxe_set_bit(ECORE_UC_LIST_MAC, &vlan_mac_flags);
4177      rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags,
4178                               &ramrod_flags);
4179      if (rc != 0) {
4180          BLOGE(sc, "Failed to clean UC list MACs (%d)\n", rc);
4181      }
4182  
4183      /* Now clean mcast object... */
4184  
4185      rparam.mcast_obj = &sc->mcast_obj;
4186      bxe_set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
4187  
4188      /* Add a DEL command... */
4189      rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
4190      if (rc < 0) {
4191          BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc);
4192      }
4193  
4194      /* now wait until all pending commands are cleared */
4195  
4196      rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
4197      while (rc != 0) {
4198          if (rc < 0) {
4199              BLOGE(sc, "Failed to clean MCAST object (%d)\n", rc);
4200              return;
4201          }
4202  
4203          rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
4204      }
4205  }
4206  
4207  /* stop the controller */
4208  static __noinline int
bxe_nic_unload(struct bxe_softc * sc,uint32_t unload_mode,uint8_t keep_link)4209  bxe_nic_unload(struct bxe_softc *sc,
4210                 uint32_t         unload_mode,
4211                 uint8_t          keep_link)
4212  {
4213      uint8_t global = FALSE;
4214      uint32_t val;
4215      int i;
4216  
4217      BXE_CORE_LOCK_ASSERT(sc);
4218  
4219      if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING);
4220  
4221      for (i = 0; i < sc->num_queues; i++) {
4222          struct bxe_fastpath *fp;
4223  
4224          fp = &sc->fp[i];
4225  	fp->watchdog_timer = 0;
4226          BXE_FP_TX_LOCK(fp);
4227          BXE_FP_TX_UNLOCK(fp);
4228      }
4229  
4230      BLOGD(sc, DBG_LOAD, "Starting NIC unload...\n");
4231  
4232      /* mark driver as unloaded in shmem2 */
4233      if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
4234          val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
4235          SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)],
4236                    val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
4237      }
4238  
4239      if (IS_PF(sc) && sc->recovery_state != BXE_RECOVERY_DONE &&
4240          (sc->state == BXE_STATE_CLOSED || sc->state == BXE_STATE_ERROR)) {
4241  
4242  	if(CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
4243              /*
4244               * We can get here if the driver has been unloaded
4245               * during parity error recovery and is either waiting for a
4246               * leader to complete or for other functions to unload and
4247               * then ifconfig down has been issued. In this case we want to
4248               * unload and let other functions to complete a recovery
4249               * process.
4250               */
4251              sc->recovery_state = BXE_RECOVERY_DONE;
4252              sc->is_leader = 0;
4253              bxe_release_leader_lock(sc);
4254              mb();
4255              BLOGD(sc, DBG_LOAD, "Releasing a leadership...\n");
4256  	}
4257          BLOGE(sc, "Can't unload in closed or error state recover_state 0x%x"
4258              " state = 0x%x\n", sc->recovery_state, sc->state);
4259          return (-1);
4260      }
4261  
4262      /*
4263       * Nothing to do during unload if previous bxe_nic_load()
4264       * did not completed successfully - all resourses are released.
4265       */
4266      if ((sc->state == BXE_STATE_CLOSED) ||
4267          (sc->state == BXE_STATE_ERROR)) {
4268          return (0);
4269      }
4270  
4271      sc->state = BXE_STATE_CLOSING_WAITING_HALT;
4272      mb();
4273  
4274      /* stop tx */
4275      bxe_tx_disable(sc);
4276  
4277      sc->rx_mode = BXE_RX_MODE_NONE;
4278      /* XXX set rx mode ??? */
4279  
4280      if (IS_PF(sc) && !sc->grcdump_done) {
4281          /* set ALWAYS_ALIVE bit in shmem */
4282          sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
4283  
4284          bxe_drv_pulse(sc);
4285  
4286          bxe_stats_handle(sc, STATS_EVENT_STOP);
4287          bxe_save_statistics(sc);
4288      }
4289  
4290      /* wait till consumers catch up with producers in all queues */
4291      bxe_drain_tx_queues(sc);
4292  
4293      /* if VF indicate to PF this function is going down (PF will delete sp
4294       * elements and clear initializations
4295       */
4296      if (IS_VF(sc)) {
4297          ; /* bxe_vfpf_close_vf(sc); */
4298      } else if (unload_mode != UNLOAD_RECOVERY) {
4299          /* if this is a normal/close unload need to clean up chip */
4300          if (!sc->grcdump_done)
4301              bxe_chip_cleanup(sc, unload_mode, keep_link);
4302      } else {
4303          /* Send the UNLOAD_REQUEST to the MCP */
4304          bxe_send_unload_req(sc, unload_mode);
4305  
4306          /*
4307           * Prevent transactions to host from the functions on the
4308           * engine that doesn't reset global blocks in case of global
4309           * attention once gloabl blocks are reset and gates are opened
4310           * (the engine which leader will perform the recovery
4311           * last).
4312           */
4313          if (!CHIP_IS_E1x(sc)) {
4314              bxe_pf_disable(sc);
4315          }
4316  
4317          /* disable HW interrupts */
4318          bxe_int_disable_sync(sc, TRUE);
4319  
4320          /* detach interrupts */
4321          bxe_interrupt_detach(sc);
4322  
4323          /* Report UNLOAD_DONE to MCP */
4324          bxe_send_unload_done(sc, FALSE);
4325      }
4326  
4327      /*
4328       * At this stage no more interrupts will arrive so we may safely clean
4329       * the queue'able objects here in case they failed to get cleaned so far.
4330       */
4331      if (IS_PF(sc)) {
4332          bxe_squeeze_objects(sc);
4333      }
4334  
4335      /* There should be no more pending SP commands at this stage */
4336      sc->sp_state = 0;
4337  
4338      sc->port.pmf = 0;
4339  
4340      bxe_free_fp_buffers(sc);
4341  
4342      if (IS_PF(sc)) {
4343          bxe_free_mem(sc);
4344      }
4345  
4346      bxe_free_fw_stats_mem(sc);
4347  
4348      sc->state = BXE_STATE_CLOSED;
4349  
4350      /*
4351       * Check if there are pending parity attentions. If there are - set
4352       * RECOVERY_IN_PROGRESS.
4353       */
4354      if (IS_PF(sc) && bxe_chk_parity_attn(sc, &global, FALSE)) {
4355          bxe_set_reset_in_progress(sc);
4356  
4357          /* Set RESET_IS_GLOBAL if needed */
4358          if (global) {
4359              bxe_set_reset_global(sc);
4360          }
4361      }
4362  
4363      /*
4364       * The last driver must disable a "close the gate" if there is no
4365       * parity attention or "process kill" pending.
4366       */
4367      if (IS_PF(sc) && !bxe_clear_pf_load(sc) &&
4368          bxe_reset_is_done(sc, SC_PATH(sc))) {
4369          bxe_disable_close_the_gate(sc);
4370      }
4371  
4372      BLOGD(sc, DBG_LOAD, "Ended NIC unload\n");
4373  
4374      bxe_link_report(sc);
4375  
4376      return (0);
4377  }
4378  
4379  /*
4380   * Called by the OS to set various media options (i.e. link, speed, etc.) when
4381   * the user runs "ifconfig bxe media ..." or "ifconfig bxe mediaopt ...".
4382   */
4383  static int
bxe_ifmedia_update(if_t ifp)4384  bxe_ifmedia_update(if_t ifp)
4385  {
4386      struct bxe_softc *sc = (struct bxe_softc *)if_getsoftc(ifp);
4387      struct ifmedia *ifm;
4388  
4389      ifm = &sc->ifmedia;
4390  
4391      /* We only support Ethernet media type. */
4392      if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
4393          return (EINVAL);
4394      }
4395  
4396      switch (IFM_SUBTYPE(ifm->ifm_media)) {
4397      case IFM_AUTO:
4398           break;
4399      case IFM_10G_CX4:
4400      case IFM_10G_SR:
4401      case IFM_10G_T:
4402      case IFM_10G_TWINAX:
4403      default:
4404          /* We don't support changing the media type. */
4405          BLOGD(sc, DBG_LOAD, "Invalid media type (%d)\n",
4406                IFM_SUBTYPE(ifm->ifm_media));
4407          return (EINVAL);
4408      }
4409  
4410      return (0);
4411  }
4412  
4413  /*
4414   * Called by the OS to get the current media status (i.e. link, speed, etc.).
4415   */
4416  static void
bxe_ifmedia_status(if_t ifp,struct ifmediareq * ifmr)4417  bxe_ifmedia_status(if_t ifp, struct ifmediareq *ifmr)
4418  {
4419      struct bxe_softc *sc = if_getsoftc(ifp);
4420  
4421      /* Bug 165447: the 'ifconfig' tool skips printing of the "status: ..."
4422         line if the IFM_AVALID flag is *NOT* set. So we need to set this
4423         flag unconditionally (irrespective of the admininistrative
4424         'up/down' state of the interface) to ensure that the line is always
4425         displayed.
4426      */
4427      ifmr->ifm_status = IFM_AVALID;
4428  
4429      /* Setup the default interface info. */
4430      ifmr->ifm_active = IFM_ETHER;
4431  
4432      /* Report link down if the driver isn't running. */
4433      if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
4434          ifmr->ifm_active |= IFM_NONE;
4435          BLOGD(sc, DBG_PHY, "in %s : nic still not loaded fully\n", __func__);
4436          BLOGD(sc, DBG_PHY, "in %s : link_up (1) : %d\n",
4437                  __func__, sc->link_vars.link_up);
4438          return;
4439      }
4440  
4441  
4442      if (sc->link_vars.link_up) {
4443          ifmr->ifm_status |= IFM_ACTIVE;
4444          ifmr->ifm_active |= IFM_FDX;
4445      } else {
4446          ifmr->ifm_active |= IFM_NONE;
4447          BLOGD(sc, DBG_PHY, "in %s : setting IFM_NONE\n",
4448                  __func__);
4449          return;
4450      }
4451  
4452      ifmr->ifm_active |= sc->media;
4453      return;
4454  }
4455  
4456  static void
bxe_handle_chip_tq(void * context,int pending)4457  bxe_handle_chip_tq(void *context,
4458                     int  pending)
4459  {
4460      struct bxe_softc *sc = (struct bxe_softc *)context;
4461      long work = atomic_load_acq_long(&sc->chip_tq_flags);
4462  
4463      switch (work)
4464      {
4465  
4466      case CHIP_TQ_REINIT:
4467          if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
4468              /* restart the interface */
4469              BLOGD(sc, DBG_LOAD, "Restarting the interface...\n");
4470              bxe_periodic_stop(sc);
4471              BXE_CORE_LOCK(sc);
4472              bxe_stop_locked(sc);
4473              bxe_init_locked(sc);
4474              BXE_CORE_UNLOCK(sc);
4475          }
4476          break;
4477  
4478      default:
4479          break;
4480      }
4481  }
4482  
4483  /*
4484   * Handles any IOCTL calls from the operating system.
4485   *
4486   * Returns:
4487   *   0 = Success, >0 Failure
4488   */
4489  static int
bxe_ioctl(if_t ifp,u_long command,caddr_t data)4490  bxe_ioctl(if_t ifp,
4491            u_long       command,
4492            caddr_t      data)
4493  {
4494      struct bxe_softc *sc = if_getsoftc(ifp);
4495      struct ifreq *ifr = (struct ifreq *)data;
4496      int mask = 0;
4497      int reinit = 0;
4498      int error = 0;
4499  
4500      int mtu_min = (ETH_MIN_PACKET_SIZE - ETH_HLEN);
4501      int mtu_max = (MJUM9BYTES - ETH_OVERHEAD - IP_HEADER_ALIGNMENT_PADDING);
4502  
4503      switch (command)
4504      {
4505      case SIOCSIFMTU:
4506          BLOGD(sc, DBG_IOCTL, "Received SIOCSIFMTU ioctl (mtu=%d)\n",
4507                ifr->ifr_mtu);
4508  
4509          if (sc->mtu == ifr->ifr_mtu) {
4510              /* nothing to change */
4511              break;
4512          }
4513  
4514          if ((ifr->ifr_mtu < mtu_min) || (ifr->ifr_mtu > mtu_max)) {
4515              BLOGE(sc, "Unsupported MTU size %d (range is %d-%d)\n",
4516                    ifr->ifr_mtu, mtu_min, mtu_max);
4517              error = EINVAL;
4518              break;
4519          }
4520  
4521          atomic_store_rel_int((volatile unsigned int *)&sc->mtu,
4522                               (unsigned long)ifr->ifr_mtu);
4523  	/*
4524          atomic_store_rel_long((volatile unsigned long *)&if_getmtu(ifp),
4525                                (unsigned long)ifr->ifr_mtu);
4526  	XXX - Not sure why it needs to be atomic
4527  	*/
4528  	if_setmtu(ifp, ifr->ifr_mtu);
4529          reinit = 1;
4530          break;
4531  
4532      case SIOCSIFFLAGS:
4533          /* toggle the interface state up or down */
4534          BLOGD(sc, DBG_IOCTL, "Received SIOCSIFFLAGS ioctl\n");
4535  
4536  	BXE_CORE_LOCK(sc);
4537          /* check if the interface is up */
4538          if (if_getflags(ifp) & IFF_UP) {
4539              if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4540                  /* set the receive mode flags */
4541                  bxe_set_rx_mode(sc);
4542              } else if(sc->state != BXE_STATE_DISABLED) {
4543  		bxe_init_locked(sc);
4544              }
4545          } else {
4546              if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4547  		bxe_periodic_stop(sc);
4548  		bxe_stop_locked(sc);
4549              }
4550          }
4551  	BXE_CORE_UNLOCK(sc);
4552  
4553          break;
4554  
4555      case SIOCADDMULTI:
4556      case SIOCDELMULTI:
4557          /* add/delete multicast addresses */
4558          BLOGD(sc, DBG_IOCTL, "Received SIOCADDMULTI/SIOCDELMULTI ioctl\n");
4559  
4560          /* check if the interface is up */
4561          if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4562              /* set the receive mode flags */
4563  	    BXE_CORE_LOCK(sc);
4564              bxe_set_rx_mode(sc);
4565  	    BXE_CORE_UNLOCK(sc);
4566          }
4567  
4568          break;
4569  
4570      case SIOCSIFCAP:
4571          /* find out which capabilities have changed */
4572          mask = (ifr->ifr_reqcap ^ if_getcapenable(ifp));
4573  
4574          BLOGD(sc, DBG_IOCTL, "Received SIOCSIFCAP ioctl (mask=0x%08x)\n",
4575                mask);
4576  
4577          /* toggle the LRO capabilites enable flag */
4578          if (mask & IFCAP_LRO) {
4579  	    if_togglecapenable(ifp, IFCAP_LRO);
4580              BLOGD(sc, DBG_IOCTL, "Turning LRO %s\n",
4581                    (if_getcapenable(ifp) & IFCAP_LRO) ? "ON" : "OFF");
4582              reinit = 1;
4583          }
4584  
4585          /* toggle the TXCSUM checksum capabilites enable flag */
4586          if (mask & IFCAP_TXCSUM) {
4587  	    if_togglecapenable(ifp, IFCAP_TXCSUM);
4588              BLOGD(sc, DBG_IOCTL, "Turning TXCSUM %s\n",
4589                    (if_getcapenable(ifp) & IFCAP_TXCSUM) ? "ON" : "OFF");
4590              if (if_getcapenable(ifp) & IFCAP_TXCSUM) {
4591                  if_sethwassistbits(ifp, (CSUM_IP      |
4592                                      CSUM_TCP      |
4593                                      CSUM_UDP      |
4594                                      CSUM_TSO      |
4595                                      CSUM_TCP_IPV6 |
4596                                      CSUM_UDP_IPV6), 0);
4597              } else {
4598  		if_clearhwassist(ifp); /* XXX */
4599              }
4600          }
4601  
4602          /* toggle the RXCSUM checksum capabilities enable flag */
4603          if (mask & IFCAP_RXCSUM) {
4604  	    if_togglecapenable(ifp, IFCAP_RXCSUM);
4605              BLOGD(sc, DBG_IOCTL, "Turning RXCSUM %s\n",
4606                    (if_getcapenable(ifp) & IFCAP_RXCSUM) ? "ON" : "OFF");
4607              if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
4608                  if_sethwassistbits(ifp, (CSUM_IP      |
4609                                      CSUM_TCP      |
4610                                      CSUM_UDP      |
4611                                      CSUM_TSO      |
4612                                      CSUM_TCP_IPV6 |
4613                                      CSUM_UDP_IPV6), 0);
4614              } else {
4615  		if_clearhwassist(ifp); /* XXX */
4616              }
4617          }
4618  
4619          /* toggle TSO4 capabilities enabled flag */
4620          if (mask & IFCAP_TSO4) {
4621              if_togglecapenable(ifp, IFCAP_TSO4);
4622              BLOGD(sc, DBG_IOCTL, "Turning TSO4 %s\n",
4623                    (if_getcapenable(ifp) & IFCAP_TSO4) ? "ON" : "OFF");
4624          }
4625  
4626          /* toggle TSO6 capabilities enabled flag */
4627          if (mask & IFCAP_TSO6) {
4628  	    if_togglecapenable(ifp, IFCAP_TSO6);
4629              BLOGD(sc, DBG_IOCTL, "Turning TSO6 %s\n",
4630                    (if_getcapenable(ifp) & IFCAP_TSO6) ? "ON" : "OFF");
4631          }
4632  
4633          /* toggle VLAN_HWTSO capabilities enabled flag */
4634          if (mask & IFCAP_VLAN_HWTSO) {
4635  
4636  	    if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
4637              BLOGD(sc, DBG_IOCTL, "Turning VLAN_HWTSO %s\n",
4638                    (if_getcapenable(ifp) & IFCAP_VLAN_HWTSO) ? "ON" : "OFF");
4639          }
4640  
4641          /* toggle VLAN_HWCSUM capabilities enabled flag */
4642          if (mask & IFCAP_VLAN_HWCSUM) {
4643              /* XXX investigate this... */
4644              BLOGE(sc, "Changing VLAN_HWCSUM is not supported!\n");
4645              error = EINVAL;
4646          }
4647  
4648          /* toggle VLAN_MTU capabilities enable flag */
4649          if (mask & IFCAP_VLAN_MTU) {
4650              /* XXX investigate this... */
4651              BLOGE(sc, "Changing VLAN_MTU is not supported!\n");
4652              error = EINVAL;
4653          }
4654  
4655          /* toggle VLAN_HWTAGGING capabilities enabled flag */
4656          if (mask & IFCAP_VLAN_HWTAGGING) {
4657              /* XXX investigate this... */
4658              BLOGE(sc, "Changing VLAN_HWTAGGING is not supported!\n");
4659              error = EINVAL;
4660          }
4661  
4662          /* toggle VLAN_HWFILTER capabilities enabled flag */
4663          if (mask & IFCAP_VLAN_HWFILTER) {
4664              /* XXX investigate this... */
4665              BLOGE(sc, "Changing VLAN_HWFILTER is not supported!\n");
4666              error = EINVAL;
4667          }
4668  
4669          /* XXX not yet...
4670           * IFCAP_WOL_MAGIC
4671           */
4672  
4673          break;
4674  
4675      case SIOCSIFMEDIA:
4676      case SIOCGIFMEDIA:
4677          /* set/get interface media */
4678          BLOGD(sc, DBG_IOCTL,
4679                "Received SIOCSIFMEDIA/SIOCGIFMEDIA ioctl (cmd=%lu)\n",
4680                (command & 0xff));
4681          error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
4682          break;
4683  
4684      default:
4685          BLOGD(sc, DBG_IOCTL, "Received Unknown Ioctl (cmd=%lu)\n",
4686                (command & 0xff));
4687          error = ether_ioctl(ifp, command, data);
4688          break;
4689      }
4690  
4691      if (reinit && (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) {
4692          BLOGD(sc, DBG_LOAD | DBG_IOCTL,
4693                "Re-initializing hardware from IOCTL change\n");
4694  	bxe_periodic_stop(sc);
4695  	BXE_CORE_LOCK(sc);
4696  	bxe_stop_locked(sc);
4697  	bxe_init_locked(sc);
4698  	BXE_CORE_UNLOCK(sc);
4699      }
4700  
4701      return (error);
4702  }
4703  
4704  static __noinline void
bxe_dump_mbuf(struct bxe_softc * sc,struct mbuf * m,uint8_t contents)4705  bxe_dump_mbuf(struct bxe_softc *sc,
4706                struct mbuf      *m,
4707                uint8_t          contents)
4708  {
4709      char * type;
4710      int i = 0;
4711  
4712      if (!(sc->debug & DBG_MBUF)) {
4713          return;
4714      }
4715  
4716      if (m == NULL) {
4717          BLOGD(sc, DBG_MBUF, "mbuf: null pointer\n");
4718          return;
4719      }
4720  
4721      while (m) {
4722  
4723          BLOGD(sc, DBG_MBUF,
4724                "%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n",
4725                i, m, m->m_len, m->m_flags, M_FLAG_BITS, m->m_data);
4726  
4727          if (m->m_flags & M_PKTHDR) {
4728               BLOGD(sc, DBG_MBUF,
4729                     "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n",
4730                     i, m->m_pkthdr.len, m->m_flags, M_FLAG_BITS,
4731                     (int)m->m_pkthdr.csum_flags, CSUM_BITS);
4732          }
4733  
4734          if (m->m_flags & M_EXT) {
4735              switch (m->m_ext.ext_type) {
4736              case EXT_CLUSTER:    type = "EXT_CLUSTER";    break;
4737              case EXT_SFBUF:      type = "EXT_SFBUF";      break;
4738              case EXT_JUMBOP:     type = "EXT_JUMBOP";     break;
4739              case EXT_JUMBO9:     type = "EXT_JUMBO9";     break;
4740              case EXT_JUMBO16:    type = "EXT_JUMBO16";    break;
4741              case EXT_PACKET:     type = "EXT_PACKET";     break;
4742              case EXT_MBUF:       type = "EXT_MBUF";       break;
4743              case EXT_NET_DRV:    type = "EXT_NET_DRV";    break;
4744              case EXT_MOD_TYPE:   type = "EXT_MOD_TYPE";   break;
4745              case EXT_DISPOSABLE: type = "EXT_DISPOSABLE"; break;
4746              case EXT_EXTREF:     type = "EXT_EXTREF";     break;
4747              default:             type = "UNKNOWN";        break;
4748              }
4749  
4750              BLOGD(sc, DBG_MBUF,
4751                    "%02d: - m_ext: %p ext_size=%d type=%s\n",
4752                    i, m->m_ext.ext_buf, m->m_ext.ext_size, type);
4753          }
4754  
4755          if (contents) {
4756              bxe_dump_mbuf_data(sc, "mbuf data", m, TRUE);
4757          }
4758  
4759          m = m->m_next;
4760          i++;
4761      }
4762  }
4763  
4764  /*
4765   * Checks to ensure the 13 bd sliding window is >= MSS for TSO.
4766   * Check that (13 total bds - 3 bds) = 10 bd window >= MSS.
4767   * The window: 3 bds are = 1 for headers BD + 2 for parse BD and last BD
4768   * The headers comes in a separate bd in FreeBSD so 13-3=10.
4769   * Returns: 0 if OK to send, 1 if packet needs further defragmentation
4770   */
4771  static int
bxe_chktso_window(struct bxe_softc * sc,int nsegs,bus_dma_segment_t * segs,struct mbuf * m)4772  bxe_chktso_window(struct bxe_softc  *sc,
4773                    int               nsegs,
4774                    bus_dma_segment_t *segs,
4775                    struct mbuf       *m)
4776  {
4777      uint32_t num_wnds, wnd_size, wnd_sum;
4778      int32_t frag_idx, wnd_idx;
4779      unsigned short lso_mss;
4780  
4781      wnd_sum = 0;
4782      wnd_size = 10;
4783      num_wnds = nsegs - wnd_size;
4784      lso_mss = htole16(m->m_pkthdr.tso_segsz);
4785  
4786      /*
4787       * Total header lengths Eth+IP+TCP in first FreeBSD mbuf so calculate the
4788       * first window sum of data while skipping the first assuming it is the
4789       * header in FreeBSD.
4790       */
4791      for (frag_idx = 1; (frag_idx <= wnd_size); frag_idx++) {
4792          wnd_sum += htole16(segs[frag_idx].ds_len);
4793      }
4794  
4795      /* check the first 10 bd window size */
4796      if (wnd_sum < lso_mss) {
4797          return (1);
4798      }
4799  
4800      /* run through the windows */
4801      for (wnd_idx = 0; wnd_idx < num_wnds; wnd_idx++, frag_idx++) {
4802          /* subtract the first mbuf->m_len of the last wndw(-header) */
4803          wnd_sum -= htole16(segs[wnd_idx+1].ds_len);
4804          /* add the next mbuf len to the len of our new window */
4805          wnd_sum += htole16(segs[frag_idx].ds_len);
4806          if (wnd_sum < lso_mss) {
4807              return (1);
4808          }
4809      }
4810  
4811      return (0);
4812  }
4813  
4814  static uint8_t
bxe_set_pbd_csum_e2(struct bxe_fastpath * fp,struct mbuf * m,uint32_t * parsing_data)4815  bxe_set_pbd_csum_e2(struct bxe_fastpath *fp,
4816                      struct mbuf         *m,
4817                      uint32_t            *parsing_data)
4818  {
4819      struct ether_vlan_header *eh = NULL;
4820      struct ip *ip4 = NULL;
4821      struct ip6_hdr *ip6 = NULL;
4822      caddr_t ip = NULL;
4823      struct tcphdr *th = NULL;
4824      int e_hlen, ip_hlen, l4_off;
4825      uint16_t proto;
4826  
4827      if (m->m_pkthdr.csum_flags == CSUM_IP) {
4828          /* no L4 checksum offload needed */
4829          return (0);
4830      }
4831  
4832      /* get the Ethernet header */
4833      eh = mtod(m, struct ether_vlan_header *);
4834  
4835      /* handle VLAN encapsulation if present */
4836      if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
4837          e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
4838          proto  = ntohs(eh->evl_proto);
4839      } else {
4840          e_hlen = ETHER_HDR_LEN;
4841          proto  = ntohs(eh->evl_encap_proto);
4842      }
4843  
4844      switch (proto) {
4845      case ETHERTYPE_IP:
4846          /* get the IP header, if mbuf len < 20 then header in next mbuf */
4847          ip4 = (m->m_len < sizeof(struct ip)) ?
4848                    (struct ip *)m->m_next->m_data :
4849                    (struct ip *)(m->m_data + e_hlen);
4850          /* ip_hl is number of 32-bit words */
4851          ip_hlen = (ip4->ip_hl << 2);
4852          ip = (caddr_t)ip4;
4853          break;
4854      case ETHERTYPE_IPV6:
4855          /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */
4856          ip6 = (m->m_len < sizeof(struct ip6_hdr)) ?
4857                    (struct ip6_hdr *)m->m_next->m_data :
4858                    (struct ip6_hdr *)(m->m_data + e_hlen);
4859          /* XXX cannot support offload with IPv6 extensions */
4860          ip_hlen = sizeof(struct ip6_hdr);
4861          ip = (caddr_t)ip6;
4862          break;
4863      default:
4864          /* We can't offload in this case... */
4865          /* XXX error stat ??? */
4866          return (0);
4867      }
4868  
4869      /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */
4870      l4_off = (e_hlen + ip_hlen);
4871  
4872      *parsing_data |=
4873          (((l4_off >> 1) << ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
4874           ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W);
4875  
4876      if (m->m_pkthdr.csum_flags & (CSUM_TCP |
4877                                    CSUM_TSO |
4878                                    CSUM_TCP_IPV6)) {
4879          fp->eth_q_stats.tx_ofld_frames_csum_tcp++;
4880          th = (struct tcphdr *)(ip + ip_hlen);
4881          /* th_off is number of 32-bit words */
4882          *parsing_data |= ((th->th_off <<
4883                             ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
4884                            ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW);
4885          return (l4_off + (th->th_off << 2)); /* entire header length */
4886      } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
4887                                           CSUM_UDP_IPV6)) {
4888          fp->eth_q_stats.tx_ofld_frames_csum_udp++;
4889          return (l4_off + sizeof(struct udphdr)); /* entire header length */
4890      } else {
4891          /* XXX error stat ??? */
4892          return (0);
4893      }
4894  }
4895  
4896  static uint8_t
bxe_set_pbd_csum(struct bxe_fastpath * fp,struct mbuf * m,struct eth_tx_parse_bd_e1x * pbd)4897  bxe_set_pbd_csum(struct bxe_fastpath        *fp,
4898                   struct mbuf                *m,
4899                   struct eth_tx_parse_bd_e1x *pbd)
4900  {
4901      struct ether_vlan_header *eh = NULL;
4902      struct ip *ip4 = NULL;
4903      struct ip6_hdr *ip6 = NULL;
4904      caddr_t ip = NULL;
4905      struct tcphdr *th = NULL;
4906      struct udphdr *uh = NULL;
4907      int e_hlen, ip_hlen;
4908      uint16_t proto;
4909      uint8_t hlen;
4910      uint16_t tmp_csum;
4911      uint32_t *tmp_uh;
4912  
4913      /* get the Ethernet header */
4914      eh = mtod(m, struct ether_vlan_header *);
4915  
4916      /* handle VLAN encapsulation if present */
4917      if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
4918          e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
4919          proto  = ntohs(eh->evl_proto);
4920      } else {
4921          e_hlen = ETHER_HDR_LEN;
4922          proto  = ntohs(eh->evl_encap_proto);
4923      }
4924  
4925      switch (proto) {
4926      case ETHERTYPE_IP:
4927          /* get the IP header, if mbuf len < 20 then header in next mbuf */
4928          ip4 = (m->m_len < sizeof(struct ip)) ?
4929                    (struct ip *)m->m_next->m_data :
4930                    (struct ip *)(m->m_data + e_hlen);
4931          /* ip_hl is number of 32-bit words */
4932          ip_hlen = (ip4->ip_hl << 1);
4933          ip = (caddr_t)ip4;
4934          break;
4935      case ETHERTYPE_IPV6:
4936          /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */
4937          ip6 = (m->m_len < sizeof(struct ip6_hdr)) ?
4938                    (struct ip6_hdr *)m->m_next->m_data :
4939                    (struct ip6_hdr *)(m->m_data + e_hlen);
4940          /* XXX cannot support offload with IPv6 extensions */
4941          ip_hlen = (sizeof(struct ip6_hdr) >> 1);
4942          ip = (caddr_t)ip6;
4943          break;
4944      default:
4945          /* We can't offload in this case... */
4946          /* XXX error stat ??? */
4947          return (0);
4948      }
4949  
4950      hlen = (e_hlen >> 1);
4951  
4952      /* note that rest of global_data is indirectly zeroed here */
4953      if (m->m_flags & M_VLANTAG) {
4954          pbd->global_data =
4955              htole16(hlen | (1 << ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
4956      } else {
4957          pbd->global_data = htole16(hlen);
4958      }
4959  
4960      pbd->ip_hlen_w = ip_hlen;
4961  
4962      hlen += pbd->ip_hlen_w;
4963  
4964      /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */
4965  
4966      if (m->m_pkthdr.csum_flags & (CSUM_TCP |
4967                                    CSUM_TSO |
4968                                    CSUM_TCP_IPV6)) {
4969          th = (struct tcphdr *)(ip + (ip_hlen << 1));
4970          /* th_off is number of 32-bit words */
4971          hlen += (uint16_t)(th->th_off << 1);
4972      } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
4973                                           CSUM_UDP_IPV6)) {
4974          uh = (struct udphdr *)(ip + (ip_hlen << 1));
4975          hlen += (sizeof(struct udphdr) / 2);
4976      } else {
4977          /* valid case as only CSUM_IP was set */
4978          return (0);
4979      }
4980  
4981      pbd->total_hlen_w = htole16(hlen);
4982  
4983      if (m->m_pkthdr.csum_flags & (CSUM_TCP |
4984                                    CSUM_TSO |
4985                                    CSUM_TCP_IPV6)) {
4986          fp->eth_q_stats.tx_ofld_frames_csum_tcp++;
4987          pbd->tcp_pseudo_csum = ntohs(th->th_sum);
4988      } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
4989                                           CSUM_UDP_IPV6)) {
4990          fp->eth_q_stats.tx_ofld_frames_csum_udp++;
4991  
4992          /*
4993           * Everest1 (i.e. 57710, 57711, 57711E) does not natively support UDP
4994           * checksums and does not know anything about the UDP header and where
4995           * the checksum field is located. It only knows about TCP. Therefore
4996           * we "lie" to the hardware for outgoing UDP packets w/ checksum
4997           * offload. Since the checksum field offset for TCP is 16 bytes and
4998           * for UDP it is 6 bytes we pass a pointer to the hardware that is 10
4999           * bytes less than the start of the UDP header. This allows the
5000           * hardware to write the checksum in the correct spot. But the
5001           * hardware will compute a checksum which includes the last 10 bytes
5002           * of the IP header. To correct this we tweak the stack computed
5003           * pseudo checksum by folding in the calculation of the inverse
5004           * checksum for those final 10 bytes of the IP header. This allows
5005           * the correct checksum to be computed by the hardware.
5006           */
5007  
5008          /* set pointer 10 bytes before UDP header */
5009          tmp_uh = (uint32_t *)((uint8_t *)uh - 10);
5010  
5011          /* calculate a pseudo header checksum over the first 10 bytes */
5012          tmp_csum = in_pseudo(*tmp_uh,
5013                               *(tmp_uh + 1),
5014                               *(uint16_t *)(tmp_uh + 2));
5015  
5016          pbd->tcp_pseudo_csum = ntohs(in_addword(uh->uh_sum, ~tmp_csum));
5017      }
5018  
5019      return (hlen * 2); /* entire header length, number of bytes */
5020  }
5021  
5022  static void
bxe_set_pbd_lso_e2(struct mbuf * m,uint32_t * parsing_data)5023  bxe_set_pbd_lso_e2(struct mbuf *m,
5024                     uint32_t    *parsing_data)
5025  {
5026      *parsing_data |= ((m->m_pkthdr.tso_segsz <<
5027                         ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
5028                        ETH_TX_PARSE_BD_E2_LSO_MSS);
5029  
5030      /* XXX test for IPv6 with extension header... */
5031  }
5032  
5033  static void
bxe_set_pbd_lso(struct mbuf * m,struct eth_tx_parse_bd_e1x * pbd)5034  bxe_set_pbd_lso(struct mbuf                *m,
5035                  struct eth_tx_parse_bd_e1x *pbd)
5036  {
5037      struct ether_vlan_header *eh = NULL;
5038      struct ip *ip = NULL;
5039      struct tcphdr *th = NULL;
5040      int e_hlen;
5041  
5042      /* get the Ethernet header */
5043      eh = mtod(m, struct ether_vlan_header *);
5044  
5045      /* handle VLAN encapsulation if present */
5046      e_hlen = (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) ?
5047                   (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) : ETHER_HDR_LEN;
5048  
5049      /* get the IP and TCP header, with LSO entire header in first mbuf */
5050      /* XXX assuming IPv4 */
5051      ip = (struct ip *)(m->m_data + e_hlen);
5052      th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
5053  
5054      pbd->lso_mss = htole16(m->m_pkthdr.tso_segsz);
5055      pbd->tcp_send_seq = ntohl(th->th_seq);
5056      pbd->tcp_flags = ((ntohl(((uint32_t *)th)[3]) >> 16) & 0xff);
5057  
5058  #if 1
5059          /* XXX IPv4 */
5060          pbd->ip_id = ntohs(ip->ip_id);
5061          pbd->tcp_pseudo_csum =
5062              ntohs(in_pseudo(ip->ip_src.s_addr,
5063                              ip->ip_dst.s_addr,
5064                              htons(IPPROTO_TCP)));
5065  #else
5066          /* XXX IPv6 */
5067          pbd->tcp_pseudo_csum =
5068              ntohs(in_pseudo(&ip6->ip6_src,
5069                              &ip6->ip6_dst,
5070                              htons(IPPROTO_TCP)));
5071  #endif
5072  
5073      pbd->global_data |=
5074          htole16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
5075  }
5076  
5077  /*
5078   * Encapsulte an mbuf cluster into the tx bd chain and makes the memory
5079   * visible to the controller.
5080   *
5081   * If an mbuf is submitted to this routine and cannot be given to the
5082   * controller (e.g. it has too many fragments) then the function may free
5083   * the mbuf and return to the caller.
5084   *
5085   * Returns:
5086   *   0 = Success, !0 = Failure
5087   *   Note the side effect that an mbuf may be freed if it causes a problem.
5088   */
5089  static int
bxe_tx_encap(struct bxe_fastpath * fp,struct mbuf ** m_head)5090  bxe_tx_encap(struct bxe_fastpath *fp, struct mbuf **m_head)
5091  {
5092      bus_dma_segment_t segs[32];
5093      struct mbuf *m0;
5094      struct bxe_sw_tx_bd *tx_buf;
5095      struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
5096      struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
5097      /* struct eth_tx_parse_2nd_bd *pbd2 = NULL; */
5098      struct eth_tx_bd *tx_data_bd;
5099      struct eth_tx_bd *tx_total_pkt_size_bd;
5100      struct eth_tx_start_bd *tx_start_bd;
5101      uint16_t bd_prod, pkt_prod, total_pkt_size;
5102      uint8_t mac_type;
5103      int defragged, error, nsegs, rc, nbds, vlan_off, ovlan;
5104      struct bxe_softc *sc;
5105      uint16_t tx_bd_avail;
5106      struct ether_vlan_header *eh;
5107      uint32_t pbd_e2_parsing_data = 0;
5108      uint8_t hlen = 0;
5109      int tmp_bd;
5110      int i;
5111  
5112      sc = fp->sc;
5113  
5114      M_ASSERTPKTHDR(*m_head);
5115  
5116      m0 = *m_head;
5117      rc = defragged = nbds = ovlan = vlan_off = total_pkt_size = 0;
5118      tx_start_bd = NULL;
5119      tx_data_bd = NULL;
5120      tx_total_pkt_size_bd = NULL;
5121  
5122      /* get the H/W pointer for packets and BDs */
5123      pkt_prod = fp->tx_pkt_prod;
5124      bd_prod = fp->tx_bd_prod;
5125  
5126      mac_type = UNICAST_ADDRESS;
5127  
5128      /* map the mbuf into the next open DMAable memory */
5129      tx_buf = &fp->tx_mbuf_chain[TX_BD(pkt_prod)];
5130      error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5131                                      tx_buf->m_map, m0,
5132                                      segs, &nsegs, BUS_DMA_NOWAIT);
5133  
5134      /* mapping errors */
5135      if(__predict_false(error != 0)) {
5136          fp->eth_q_stats.tx_dma_mapping_failure++;
5137          if (error == ENOMEM) {
5138              /* resource issue, try again later */
5139              rc = ENOMEM;
5140          } else if (error == EFBIG) {
5141              /* possibly recoverable with defragmentation */
5142              fp->eth_q_stats.mbuf_defrag_attempts++;
5143              m0 = m_defrag(*m_head, M_NOWAIT);
5144              if (m0 == NULL) {
5145                  fp->eth_q_stats.mbuf_defrag_failures++;
5146                  rc = ENOBUFS;
5147              } else {
5148                  /* defrag successful, try mapping again */
5149                  *m_head = m0;
5150                  error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5151                                                  tx_buf->m_map, m0,
5152                                                  segs, &nsegs, BUS_DMA_NOWAIT);
5153                  if (error) {
5154                      fp->eth_q_stats.tx_dma_mapping_failure++;
5155                      rc = error;
5156                  }
5157              }
5158          } else {
5159              /* unknown, unrecoverable mapping error */
5160              BLOGE(sc, "Unknown TX mapping error rc=%d\n", error);
5161              bxe_dump_mbuf(sc, m0, FALSE);
5162              rc = error;
5163          }
5164  
5165          goto bxe_tx_encap_continue;
5166      }
5167  
5168      tx_bd_avail = bxe_tx_avail(sc, fp);
5169  
5170      /* make sure there is enough room in the send queue */
5171      if (__predict_false(tx_bd_avail < (nsegs + 2))) {
5172          /* Recoverable, try again later. */
5173          fp->eth_q_stats.tx_hw_queue_full++;
5174          bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5175          rc = ENOMEM;
5176          goto bxe_tx_encap_continue;
5177      }
5178  
5179      /* capture the current H/W TX chain high watermark */
5180      if (__predict_false(fp->eth_q_stats.tx_hw_max_queue_depth <
5181                          (TX_BD_USABLE - tx_bd_avail))) {
5182          fp->eth_q_stats.tx_hw_max_queue_depth = (TX_BD_USABLE - tx_bd_avail);
5183      }
5184  
5185      /* make sure it fits in the packet window */
5186      if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) {
5187          /*
5188           * The mbuf may be to big for the controller to handle. If the frame
5189           * is a TSO frame we'll need to do an additional check.
5190           */
5191          if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
5192              if (bxe_chktso_window(sc, nsegs, segs, m0) == 0) {
5193                  goto bxe_tx_encap_continue; /* OK to send */
5194              } else {
5195                  fp->eth_q_stats.tx_window_violation_tso++;
5196              }
5197          } else {
5198              fp->eth_q_stats.tx_window_violation_std++;
5199          }
5200  
5201          /* lets try to defragment this mbuf and remap it */
5202          fp->eth_q_stats.mbuf_defrag_attempts++;
5203          bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5204  
5205          m0 = m_defrag(*m_head, M_NOWAIT);
5206          if (m0 == NULL) {
5207              fp->eth_q_stats.mbuf_defrag_failures++;
5208              /* Ugh, just drop the frame... :( */
5209              rc = ENOBUFS;
5210          } else {
5211              /* defrag successful, try mapping again */
5212              *m_head = m0;
5213              error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5214                                              tx_buf->m_map, m0,
5215                                              segs, &nsegs, BUS_DMA_NOWAIT);
5216              if (error) {
5217                  fp->eth_q_stats.tx_dma_mapping_failure++;
5218                  /* No sense in trying to defrag/copy chain, drop it. :( */
5219                  rc = error;
5220              } else {
5221                 /* if the chain is still too long then drop it */
5222                  if(m0->m_pkthdr.csum_flags & CSUM_TSO) {
5223                      /*
5224                       * in case TSO is enabled nsegs should be checked against
5225                       * BXE_TSO_MAX_SEGMENTS
5226                       */
5227                      if (__predict_false(nsegs > BXE_TSO_MAX_SEGMENTS)) {
5228                          bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5229                          fp->eth_q_stats.nsegs_path1_errors++;
5230                          rc = ENODEV;
5231                      }
5232                  } else {
5233                      if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) {
5234                          bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5235                          fp->eth_q_stats.nsegs_path2_errors++;
5236                          rc = ENODEV;
5237                      }
5238                  }
5239              }
5240          }
5241      }
5242  
5243  bxe_tx_encap_continue:
5244  
5245      /* Check for errors */
5246      if (rc) {
5247          if (rc == ENOMEM) {
5248              /* recoverable try again later  */
5249          } else {
5250              fp->eth_q_stats.tx_soft_errors++;
5251              fp->eth_q_stats.mbuf_alloc_tx--;
5252              m_freem(*m_head);
5253              *m_head = NULL;
5254          }
5255  
5256          return (rc);
5257      }
5258  
5259      /* set flag according to packet type (UNICAST_ADDRESS is default) */
5260      if (m0->m_flags & M_BCAST) {
5261          mac_type = BROADCAST_ADDRESS;
5262      } else if (m0->m_flags & M_MCAST) {
5263          mac_type = MULTICAST_ADDRESS;
5264      }
5265  
5266      /* store the mbuf into the mbuf ring */
5267      tx_buf->m        = m0;
5268      tx_buf->first_bd = fp->tx_bd_prod;
5269      tx_buf->flags    = 0;
5270  
5271      /* prepare the first transmit (start) BD for the mbuf */
5272      tx_start_bd = &fp->tx_chain[TX_BD(bd_prod)].start_bd;
5273  
5274      BLOGD(sc, DBG_TX,
5275            "sending pkt_prod=%u tx_buf=%p next_idx=%u bd=%u tx_start_bd=%p\n",
5276            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
5277  
5278      tx_start_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
5279      tx_start_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
5280      tx_start_bd->nbytes  = htole16(segs[0].ds_len);
5281      total_pkt_size += tx_start_bd->nbytes;
5282      tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
5283  
5284      tx_start_bd->general_data = (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
5285  
5286      /* all frames have at least Start BD + Parsing BD */
5287      nbds = nsegs + 1;
5288      tx_start_bd->nbd = htole16(nbds);
5289  
5290      if (m0->m_flags & M_VLANTAG) {
5291          tx_start_bd->vlan_or_ethertype = htole16(m0->m_pkthdr.ether_vtag);
5292          tx_start_bd->bd_flags.as_bitfield |=
5293              (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
5294      } else {
5295          /* vf tx, start bd must hold the ethertype for fw to enforce it */
5296          if (IS_VF(sc)) {
5297              /* map ethernet header to find type and header length */
5298              eh = mtod(m0, struct ether_vlan_header *);
5299              tx_start_bd->vlan_or_ethertype = eh->evl_encap_proto;
5300          } else {
5301              /* used by FW for packet accounting */
5302              tx_start_bd->vlan_or_ethertype = htole16(fp->tx_pkt_prod);
5303          }
5304      }
5305  
5306      /*
5307       * add a parsing BD from the chain. The parsing BD is always added
5308       * though it is only used for TSO and chksum
5309       */
5310      bd_prod = TX_BD_NEXT(bd_prod);
5311  
5312      if (m0->m_pkthdr.csum_flags) {
5313          if (m0->m_pkthdr.csum_flags & CSUM_IP) {
5314              fp->eth_q_stats.tx_ofld_frames_csum_ip++;
5315              tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
5316          }
5317  
5318          if (m0->m_pkthdr.csum_flags & CSUM_TCP_IPV6) {
5319              tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 |
5320                                                    ETH_TX_BD_FLAGS_L4_CSUM);
5321          } else if (m0->m_pkthdr.csum_flags & CSUM_UDP_IPV6) {
5322              tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6   |
5323                                                    ETH_TX_BD_FLAGS_IS_UDP |
5324                                                    ETH_TX_BD_FLAGS_L4_CSUM);
5325          } else if ((m0->m_pkthdr.csum_flags & CSUM_TCP) ||
5326                     (m0->m_pkthdr.csum_flags & CSUM_TSO)) {
5327              tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
5328          } else if (m0->m_pkthdr.csum_flags & CSUM_UDP) {
5329              tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_L4_CSUM |
5330                                                    ETH_TX_BD_FLAGS_IS_UDP);
5331          }
5332      }
5333  
5334      if (!CHIP_IS_E1x(sc)) {
5335          pbd_e2 = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e2;
5336          memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
5337  
5338          if (m0->m_pkthdr.csum_flags) {
5339              hlen = bxe_set_pbd_csum_e2(fp, m0, &pbd_e2_parsing_data);
5340          }
5341  
5342          SET_FLAG(pbd_e2_parsing_data, ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE,
5343                   mac_type);
5344      } else {
5345          uint16_t global_data = 0;
5346  
5347          pbd_e1x = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e1x;
5348          memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
5349  
5350          if (m0->m_pkthdr.csum_flags) {
5351              hlen = bxe_set_pbd_csum(fp, m0, pbd_e1x);
5352          }
5353  
5354          SET_FLAG(global_data,
5355                   ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
5356          pbd_e1x->global_data |= htole16(global_data);
5357      }
5358  
5359      /* setup the parsing BD with TSO specific info */
5360      if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
5361          fp->eth_q_stats.tx_ofld_frames_lso++;
5362          tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
5363  
5364          if (__predict_false(tx_start_bd->nbytes > hlen)) {
5365              fp->eth_q_stats.tx_ofld_frames_lso_hdr_splits++;
5366  
5367              /* split the first BD into header/data making the fw job easy */
5368              nbds++;
5369              tx_start_bd->nbd = htole16(nbds);
5370              tx_start_bd->nbytes = htole16(hlen);
5371  
5372              bd_prod = TX_BD_NEXT(bd_prod);
5373  
5374              /* new transmit BD after the tx_parse_bd */
5375              tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd;
5376              tx_data_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr + hlen));
5377              tx_data_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr + hlen));
5378              tx_data_bd->nbytes  = htole16(segs[0].ds_len - hlen);
5379              if (tx_total_pkt_size_bd == NULL) {
5380                  tx_total_pkt_size_bd = tx_data_bd;
5381              }
5382  
5383              BLOGD(sc, DBG_TX,
5384                    "TSO split header size is %d (%x:%x) nbds %d\n",
5385                    le16toh(tx_start_bd->nbytes),
5386                    le32toh(tx_start_bd->addr_hi),
5387                    le32toh(tx_start_bd->addr_lo),
5388                    nbds);
5389          }
5390  
5391          if (!CHIP_IS_E1x(sc)) {
5392              bxe_set_pbd_lso_e2(m0, &pbd_e2_parsing_data);
5393          } else {
5394              bxe_set_pbd_lso(m0, pbd_e1x);
5395          }
5396      }
5397  
5398      if (pbd_e2_parsing_data) {
5399          pbd_e2->parsing_data = htole32(pbd_e2_parsing_data);
5400      }
5401  
5402      /* prepare remaining BDs, start tx bd contains first seg/frag */
5403      for (i = 1; i < nsegs ; i++) {
5404          bd_prod = TX_BD_NEXT(bd_prod);
5405          tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd;
5406          tx_data_bd->addr_lo = htole32(U64_LO(segs[i].ds_addr));
5407          tx_data_bd->addr_hi = htole32(U64_HI(segs[i].ds_addr));
5408          tx_data_bd->nbytes  = htole16(segs[i].ds_len);
5409          if (tx_total_pkt_size_bd == NULL) {
5410              tx_total_pkt_size_bd = tx_data_bd;
5411          }
5412          total_pkt_size += tx_data_bd->nbytes;
5413      }
5414  
5415      BLOGD(sc, DBG_TX, "last bd %p\n", tx_data_bd);
5416  
5417      if (tx_total_pkt_size_bd != NULL) {
5418          tx_total_pkt_size_bd->total_pkt_bytes = total_pkt_size;
5419      }
5420  
5421      if (__predict_false(sc->debug & DBG_TX)) {
5422          tmp_bd = tx_buf->first_bd;
5423          for (i = 0; i < nbds; i++)
5424          {
5425              if (i == 0) {
5426                  BLOGD(sc, DBG_TX,
5427                        "TX Strt: %p bd=%d nbd=%d vlan=0x%x "
5428                        "bd_flags=0x%x hdr_nbds=%d\n",
5429                        tx_start_bd,
5430                        tmp_bd,
5431                        le16toh(tx_start_bd->nbd),
5432                        le16toh(tx_start_bd->vlan_or_ethertype),
5433                        tx_start_bd->bd_flags.as_bitfield,
5434                        (tx_start_bd->general_data & ETH_TX_START_BD_HDR_NBDS));
5435              } else if (i == 1) {
5436                  if (pbd_e1x) {
5437                      BLOGD(sc, DBG_TX,
5438                            "-> Prse: %p bd=%d global=0x%x ip_hlen_w=%u "
5439                            "ip_id=%u lso_mss=%u tcp_flags=0x%x csum=0x%x "
5440                            "tcp_seq=%u total_hlen_w=%u\n",
5441                            pbd_e1x,
5442                            tmp_bd,
5443                            pbd_e1x->global_data,
5444                            pbd_e1x->ip_hlen_w,
5445                            pbd_e1x->ip_id,
5446                            pbd_e1x->lso_mss,
5447                            pbd_e1x->tcp_flags,
5448                            pbd_e1x->tcp_pseudo_csum,
5449                            pbd_e1x->tcp_send_seq,
5450                            le16toh(pbd_e1x->total_hlen_w));
5451                  } else { /* if (pbd_e2) */
5452                      BLOGD(sc, DBG_TX,
5453                            "-> Parse: %p bd=%d dst=%02x:%02x:%02x "
5454                            "src=%02x:%02x:%02x parsing_data=0x%x\n",
5455                            pbd_e2,
5456                            tmp_bd,
5457                            pbd_e2->data.mac_addr.dst_hi,
5458                            pbd_e2->data.mac_addr.dst_mid,
5459                            pbd_e2->data.mac_addr.dst_lo,
5460                            pbd_e2->data.mac_addr.src_hi,
5461                            pbd_e2->data.mac_addr.src_mid,
5462                            pbd_e2->data.mac_addr.src_lo,
5463                            pbd_e2->parsing_data);
5464                  }
5465              }
5466  
5467              if (i != 1) { /* skip parse db as it doesn't hold data */
5468                  tx_data_bd = &fp->tx_chain[TX_BD(tmp_bd)].reg_bd;
5469                  BLOGD(sc, DBG_TX,
5470                        "-> Frag: %p bd=%d nbytes=%d hi=0x%x lo: 0x%x\n",
5471                        tx_data_bd,
5472                        tmp_bd,
5473                        le16toh(tx_data_bd->nbytes),
5474                        le32toh(tx_data_bd->addr_hi),
5475                        le32toh(tx_data_bd->addr_lo));
5476              }
5477  
5478              tmp_bd = TX_BD_NEXT(tmp_bd);
5479          }
5480      }
5481  
5482      BLOGD(sc, DBG_TX, "doorbell: nbds=%d bd=%u\n", nbds, bd_prod);
5483  
5484      /* update TX BD producer index value for next TX */
5485      bd_prod = TX_BD_NEXT(bd_prod);
5486  
5487      /*
5488       * If the chain of tx_bd's describing this frame is adjacent to or spans
5489       * an eth_tx_next_bd element then we need to increment the nbds value.
5490       */
5491      if (TX_BD_IDX(bd_prod) < nbds) {
5492          nbds++;
5493      }
5494  
5495      /* don't allow reordering of writes for nbd and packets */
5496      mb();
5497  
5498      fp->tx_db.data.prod += nbds;
5499  
5500      /* producer points to the next free tx_bd at this point */
5501      fp->tx_pkt_prod++;
5502      fp->tx_bd_prod = bd_prod;
5503  
5504      DOORBELL(sc, fp->index, fp->tx_db.raw);
5505  
5506      fp->eth_q_stats.tx_pkts++;
5507  
5508      /* Prevent speculative reads from getting ahead of the status block. */
5509      bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle,
5510                        0, 0, BUS_SPACE_BARRIER_READ);
5511  
5512      /* Prevent speculative reads from getting ahead of the doorbell. */
5513      bus_space_barrier(sc->bar[BAR2].tag, sc->bar[BAR2].handle,
5514                        0, 0, BUS_SPACE_BARRIER_READ);
5515  
5516      return (0);
5517  }
5518  
5519  static void
bxe_tx_start_locked(struct bxe_softc * sc,if_t ifp,struct bxe_fastpath * fp)5520  bxe_tx_start_locked(struct bxe_softc *sc,
5521                      if_t ifp,
5522                      struct bxe_fastpath *fp)
5523  {
5524      struct mbuf *m = NULL;
5525      int tx_count = 0;
5526      uint16_t tx_bd_avail;
5527  
5528      BXE_FP_TX_LOCK_ASSERT(fp);
5529  
5530      /* keep adding entries while there are frames to send */
5531      while (!if_sendq_empty(ifp)) {
5532  
5533          /*
5534           * check for any frames to send
5535           * dequeue can still be NULL even if queue is not empty
5536           */
5537          m = if_dequeue(ifp);
5538          if (__predict_false(m == NULL)) {
5539              break;
5540          }
5541  
5542          /* the mbuf now belongs to us */
5543          fp->eth_q_stats.mbuf_alloc_tx++;
5544  
5545          /*
5546           * Put the frame into the transmit ring. If we don't have room,
5547           * place the mbuf back at the head of the TX queue, set the
5548           * OACTIVE flag, and wait for the NIC to drain the chain.
5549           */
5550          if (__predict_false(bxe_tx_encap(fp, &m))) {
5551              fp->eth_q_stats.tx_encap_failures++;
5552              if (m != NULL) {
5553                  /* mark the TX queue as full and return the frame */
5554                  if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
5555  		if_sendq_prepend(ifp, m);
5556                  fp->eth_q_stats.mbuf_alloc_tx--;
5557                  fp->eth_q_stats.tx_queue_xoff++;
5558              }
5559  
5560              /* stop looking for more work */
5561              break;
5562          }
5563  
5564          /* the frame was enqueued successfully */
5565          tx_count++;
5566  
5567          /* send a copy of the frame to any BPF listeners. */
5568          ether_bpf_mtap_if(ifp, m);
5569  
5570          tx_bd_avail = bxe_tx_avail(sc, fp);
5571  
5572          /* handle any completions if we're running low */
5573          if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
5574              /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
5575              bxe_txeof(sc, fp);
5576              if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) {
5577                  break;
5578              }
5579          }
5580      }
5581  
5582      /* all TX packets were dequeued and/or the tx ring is full */
5583      if (tx_count > 0) {
5584          /* reset the TX watchdog timeout timer */
5585          fp->watchdog_timer = BXE_TX_TIMEOUT;
5586      }
5587  }
5588  
5589  /* Legacy (non-RSS) dispatch routine */
5590  static void
bxe_tx_start(if_t ifp)5591  bxe_tx_start(if_t ifp)
5592  {
5593      struct bxe_softc *sc;
5594      struct bxe_fastpath *fp;
5595  
5596      sc = if_getsoftc(ifp);
5597  
5598      if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
5599          BLOGW(sc, "Interface not running, ignoring transmit request\n");
5600          return;
5601      }
5602  
5603      if (!sc->link_vars.link_up) {
5604          BLOGW(sc, "Interface link is down, ignoring transmit request\n");
5605          return;
5606      }
5607  
5608      fp = &sc->fp[0];
5609  
5610      if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) {
5611          fp->eth_q_stats.tx_queue_full_return++;
5612          return;
5613      }
5614  
5615      BXE_FP_TX_LOCK(fp);
5616      bxe_tx_start_locked(sc, ifp, fp);
5617      BXE_FP_TX_UNLOCK(fp);
5618  }
5619  
5620  static int
bxe_tx_mq_start_locked(struct bxe_softc * sc,if_t ifp,struct bxe_fastpath * fp,struct mbuf * m)5621  bxe_tx_mq_start_locked(struct bxe_softc    *sc,
5622                         if_t                ifp,
5623                         struct bxe_fastpath *fp,
5624                         struct mbuf         *m)
5625  {
5626      struct buf_ring *tx_br = fp->tx_br;
5627      struct mbuf *next;
5628      int depth, rc, tx_count;
5629      uint16_t tx_bd_avail;
5630  
5631      rc = tx_count = 0;
5632  
5633      BXE_FP_TX_LOCK_ASSERT(fp);
5634  
5635      if (sc->state != BXE_STATE_OPEN)  {
5636          fp->eth_q_stats.bxe_tx_mq_sc_state_failures++;
5637          return ENETDOWN;
5638      }
5639  
5640      if (!tx_br) {
5641          BLOGE(sc, "Multiqueue TX and no buf_ring!\n");
5642          return (EINVAL);
5643      }
5644  
5645      if (m != NULL) {
5646          rc = drbr_enqueue(ifp, tx_br, m);
5647          if (rc != 0) {
5648              fp->eth_q_stats.tx_soft_errors++;
5649              goto bxe_tx_mq_start_locked_exit;
5650          }
5651      }
5652  
5653      if (!sc->link_vars.link_up || !(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
5654          fp->eth_q_stats.tx_request_link_down_failures++;
5655          goto bxe_tx_mq_start_locked_exit;
5656      }
5657  
5658      /* fetch the depth of the driver queue */
5659      depth = drbr_inuse(ifp, tx_br);
5660      if (depth > fp->eth_q_stats.tx_max_drbr_queue_depth) {
5661          fp->eth_q_stats.tx_max_drbr_queue_depth = depth;
5662      }
5663  
5664      /* keep adding entries while there are frames to send */
5665      while ((next = drbr_peek(ifp, tx_br)) != NULL) {
5666          /* handle any completions if we're running low */
5667          tx_bd_avail = bxe_tx_avail(sc, fp);
5668          if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
5669              /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
5670              bxe_txeof(sc, fp);
5671              tx_bd_avail = bxe_tx_avail(sc, fp);
5672              if (tx_bd_avail < (BXE_TSO_MAX_SEGMENTS + 1)) {
5673                  fp->eth_q_stats.bd_avail_too_less_failures++;
5674                  m_freem(next);
5675                  drbr_advance(ifp, tx_br);
5676                  rc = ENOBUFS;
5677                  break;
5678              }
5679          }
5680  
5681          /* the mbuf now belongs to us */
5682          fp->eth_q_stats.mbuf_alloc_tx++;
5683  
5684          /*
5685           * Put the frame into the transmit ring. If we don't have room,
5686           * place the mbuf back at the head of the TX queue, set the
5687           * OACTIVE flag, and wait for the NIC to drain the chain.
5688           */
5689          rc = bxe_tx_encap(fp, &next);
5690          if (__predict_false(rc != 0)) {
5691              fp->eth_q_stats.tx_encap_failures++;
5692              if (next != NULL) {
5693                  /* mark the TX queue as full and save the frame */
5694                  if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
5695                  drbr_putback(ifp, tx_br, next);
5696                  fp->eth_q_stats.mbuf_alloc_tx--;
5697                  fp->eth_q_stats.tx_frames_deferred++;
5698              } else
5699                  drbr_advance(ifp, tx_br);
5700  
5701              /* stop looking for more work */
5702              break;
5703          }
5704  
5705          /* the transmit frame was enqueued successfully */
5706          tx_count++;
5707  
5708          /* send a copy of the frame to any BPF listeners */
5709          ether_bpf_mtap_if(ifp, next);
5710  
5711          drbr_advance(ifp, tx_br);
5712      }
5713  
5714      /* all TX packets were dequeued and/or the tx ring is full */
5715      if (tx_count > 0) {
5716          /* reset the TX watchdog timeout timer */
5717          fp->watchdog_timer = BXE_TX_TIMEOUT;
5718      }
5719  
5720  bxe_tx_mq_start_locked_exit:
5721      /* If we didn't drain the drbr, enqueue a task in the future to do it. */
5722      if (!drbr_empty(ifp, tx_br)) {
5723          fp->eth_q_stats.tx_mq_not_empty++;
5724          taskqueue_enqueue_timeout(fp->tq, &fp->tx_timeout_task, 1);
5725      }
5726  
5727      return (rc);
5728  }
5729  
5730  static void
bxe_tx_mq_start_deferred(void * arg,int pending)5731  bxe_tx_mq_start_deferred(void *arg,
5732                           int pending)
5733  {
5734      struct bxe_fastpath *fp = (struct bxe_fastpath *)arg;
5735      struct bxe_softc *sc = fp->sc;
5736      if_t ifp = sc->ifp;
5737  
5738      BXE_FP_TX_LOCK(fp);
5739      bxe_tx_mq_start_locked(sc, ifp, fp, NULL);
5740      BXE_FP_TX_UNLOCK(fp);
5741  }
5742  
5743  /* Multiqueue (TSS) dispatch routine. */
5744  static int
bxe_tx_mq_start(if_t ifp,struct mbuf * m)5745  bxe_tx_mq_start(if_t ifp,
5746                  struct mbuf  *m)
5747  {
5748      struct bxe_softc *sc = if_getsoftc(ifp);
5749      struct bxe_fastpath *fp;
5750      int fp_index, rc;
5751  
5752      fp_index = 0; /* default is the first queue */
5753  
5754      /* check if flowid is set */
5755  
5756      if (BXE_VALID_FLOWID(m))
5757          fp_index = (m->m_pkthdr.flowid % sc->num_queues);
5758  
5759      fp = &sc->fp[fp_index];
5760  
5761      if (sc->state != BXE_STATE_OPEN)  {
5762          fp->eth_q_stats.bxe_tx_mq_sc_state_failures++;
5763          return ENETDOWN;
5764      }
5765  
5766      if (BXE_FP_TX_TRYLOCK(fp)) {
5767          rc = bxe_tx_mq_start_locked(sc, ifp, fp, m);
5768          BXE_FP_TX_UNLOCK(fp);
5769      } else {
5770          rc = drbr_enqueue(ifp, fp->tx_br, m);
5771          taskqueue_enqueue(fp->tq, &fp->tx_task);
5772      }
5773  
5774      return (rc);
5775  }
5776  
5777  static void
bxe_mq_flush(if_t ifp)5778  bxe_mq_flush(if_t ifp)
5779  {
5780      struct bxe_softc *sc = if_getsoftc(ifp);
5781      struct bxe_fastpath *fp;
5782      struct mbuf *m;
5783      int i;
5784  
5785      for (i = 0; i < sc->num_queues; i++) {
5786          fp = &sc->fp[i];
5787  
5788          if (fp->state != BXE_FP_STATE_IRQ) {
5789              BLOGD(sc, DBG_LOAD, "Not clearing fp[%02d] buf_ring (state=%d)\n",
5790                    fp->index, fp->state);
5791              continue;
5792          }
5793  
5794          if (fp->tx_br != NULL) {
5795              BLOGD(sc, DBG_LOAD, "Clearing fp[%02d] buf_ring\n", fp->index);
5796              BXE_FP_TX_LOCK(fp);
5797              while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) {
5798                  m_freem(m);
5799              }
5800              BXE_FP_TX_UNLOCK(fp);
5801          }
5802      }
5803  
5804      if_qflush(ifp);
5805  }
5806  
5807  static uint16_t
bxe_cid_ilt_lines(struct bxe_softc * sc)5808  bxe_cid_ilt_lines(struct bxe_softc *sc)
5809  {
5810      if (IS_SRIOV(sc)) {
5811          return ((BXE_FIRST_VF_CID + BXE_VF_CIDS) / ILT_PAGE_CIDS);
5812      }
5813      return (L2_ILT_LINES(sc));
5814  }
5815  
5816  static void
bxe_ilt_set_info(struct bxe_softc * sc)5817  bxe_ilt_set_info(struct bxe_softc *sc)
5818  {
5819      struct ilt_client_info *ilt_client;
5820      struct ecore_ilt *ilt = sc->ilt;
5821      uint16_t line = 0;
5822  
5823      ilt->start_line = FUNC_ILT_BASE(SC_FUNC(sc));
5824      BLOGD(sc, DBG_LOAD, "ilt starts at line %d\n", ilt->start_line);
5825  
5826      /* CDU */
5827      ilt_client = &ilt->clients[ILT_CLIENT_CDU];
5828      ilt_client->client_num = ILT_CLIENT_CDU;
5829      ilt_client->page_size = CDU_ILT_PAGE_SZ;
5830      ilt_client->flags = ILT_CLIENT_SKIP_MEM;
5831      ilt_client->start = line;
5832      line += bxe_cid_ilt_lines(sc);
5833  
5834      if (CNIC_SUPPORT(sc)) {
5835          line += CNIC_ILT_LINES;
5836      }
5837  
5838      ilt_client->end = (line - 1);
5839  
5840      BLOGD(sc, DBG_LOAD,
5841            "ilt client[CDU]: start %d, end %d, "
5842            "psz 0x%x, flags 0x%x, hw psz %d\n",
5843            ilt_client->start, ilt_client->end,
5844            ilt_client->page_size,
5845            ilt_client->flags,
5846            ilog2(ilt_client->page_size >> 12));
5847  
5848      /* QM */
5849      if (QM_INIT(sc->qm_cid_count)) {
5850          ilt_client = &ilt->clients[ILT_CLIENT_QM];
5851          ilt_client->client_num = ILT_CLIENT_QM;
5852          ilt_client->page_size = QM_ILT_PAGE_SZ;
5853          ilt_client->flags = 0;
5854          ilt_client->start = line;
5855  
5856          /* 4 bytes for each cid */
5857          line += DIV_ROUND_UP(sc->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
5858                               QM_ILT_PAGE_SZ);
5859  
5860          ilt_client->end = (line - 1);
5861  
5862          BLOGD(sc, DBG_LOAD,
5863                "ilt client[QM]: start %d, end %d, "
5864                "psz 0x%x, flags 0x%x, hw psz %d\n",
5865                ilt_client->start, ilt_client->end,
5866                ilt_client->page_size, ilt_client->flags,
5867                ilog2(ilt_client->page_size >> 12));
5868      }
5869  
5870      if (CNIC_SUPPORT(sc)) {
5871          /* SRC */
5872          ilt_client = &ilt->clients[ILT_CLIENT_SRC];
5873          ilt_client->client_num = ILT_CLIENT_SRC;
5874          ilt_client->page_size = SRC_ILT_PAGE_SZ;
5875          ilt_client->flags = 0;
5876          ilt_client->start = line;
5877          line += SRC_ILT_LINES;
5878          ilt_client->end = (line - 1);
5879  
5880          BLOGD(sc, DBG_LOAD,
5881                "ilt client[SRC]: start %d, end %d, "
5882                "psz 0x%x, flags 0x%x, hw psz %d\n",
5883                ilt_client->start, ilt_client->end,
5884                ilt_client->page_size, ilt_client->flags,
5885                ilog2(ilt_client->page_size >> 12));
5886  
5887          /* TM */
5888          ilt_client = &ilt->clients[ILT_CLIENT_TM];
5889          ilt_client->client_num = ILT_CLIENT_TM;
5890          ilt_client->page_size = TM_ILT_PAGE_SZ;
5891          ilt_client->flags = 0;
5892          ilt_client->start = line;
5893          line += TM_ILT_LINES;
5894          ilt_client->end = (line - 1);
5895  
5896          BLOGD(sc, DBG_LOAD,
5897                "ilt client[TM]: start %d, end %d, "
5898                "psz 0x%x, flags 0x%x, hw psz %d\n",
5899                ilt_client->start, ilt_client->end,
5900                ilt_client->page_size, ilt_client->flags,
5901                ilog2(ilt_client->page_size >> 12));
5902      }
5903  
5904      KASSERT((line <= ILT_MAX_LINES), ("Invalid number of ILT lines!"));
5905  }
5906  
5907  static void
bxe_set_fp_rx_buf_size(struct bxe_softc * sc)5908  bxe_set_fp_rx_buf_size(struct bxe_softc *sc)
5909  {
5910      int i;
5911      uint32_t rx_buf_size;
5912  
5913      rx_buf_size = (IP_HEADER_ALIGNMENT_PADDING + ETH_OVERHEAD + sc->mtu);
5914  
5915      for (i = 0; i < sc->num_queues; i++) {
5916          if(rx_buf_size <= MCLBYTES){
5917              sc->fp[i].rx_buf_size = rx_buf_size;
5918              sc->fp[i].mbuf_alloc_size = MCLBYTES;
5919          }else if (rx_buf_size <= MJUMPAGESIZE){
5920              sc->fp[i].rx_buf_size = rx_buf_size;
5921              sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE;
5922          }else if (rx_buf_size <= (MJUMPAGESIZE + MCLBYTES)){
5923              sc->fp[i].rx_buf_size = MCLBYTES;
5924              sc->fp[i].mbuf_alloc_size = MCLBYTES;
5925          }else if (rx_buf_size <= (2 * MJUMPAGESIZE)){
5926              sc->fp[i].rx_buf_size = MJUMPAGESIZE;
5927              sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE;
5928          }else {
5929              sc->fp[i].rx_buf_size = MCLBYTES;
5930              sc->fp[i].mbuf_alloc_size = MCLBYTES;
5931          }
5932      }
5933  }
5934  
5935  static int
bxe_alloc_ilt_mem(struct bxe_softc * sc)5936  bxe_alloc_ilt_mem(struct bxe_softc *sc)
5937  {
5938      int rc = 0;
5939  
5940      if ((sc->ilt =
5941           (struct ecore_ilt *)malloc(sizeof(struct ecore_ilt),
5942                                      M_BXE_ILT,
5943                                      (M_NOWAIT | M_ZERO))) == NULL) {
5944          rc = 1;
5945      }
5946  
5947      return (rc);
5948  }
5949  
5950  static int
bxe_alloc_ilt_lines_mem(struct bxe_softc * sc)5951  bxe_alloc_ilt_lines_mem(struct bxe_softc *sc)
5952  {
5953      int rc = 0;
5954  
5955      if ((sc->ilt->lines =
5956           (struct ilt_line *)malloc((sizeof(struct ilt_line) * ILT_MAX_LINES),
5957                                      M_BXE_ILT,
5958                                      (M_NOWAIT | M_ZERO))) == NULL) {
5959          rc = 1;
5960      }
5961  
5962      return (rc);
5963  }
5964  
5965  static void
bxe_free_ilt_mem(struct bxe_softc * sc)5966  bxe_free_ilt_mem(struct bxe_softc *sc)
5967  {
5968      if (sc->ilt != NULL) {
5969          free(sc->ilt, M_BXE_ILT);
5970          sc->ilt = NULL;
5971      }
5972  }
5973  
5974  static void
bxe_free_ilt_lines_mem(struct bxe_softc * sc)5975  bxe_free_ilt_lines_mem(struct bxe_softc *sc)
5976  {
5977      if (sc->ilt->lines != NULL) {
5978          free(sc->ilt->lines, M_BXE_ILT);
5979          sc->ilt->lines = NULL;
5980      }
5981  }
5982  
5983  static void
bxe_free_mem(struct bxe_softc * sc)5984  bxe_free_mem(struct bxe_softc *sc)
5985  {
5986      int i;
5987  
5988      for (i = 0; i < L2_ILT_LINES(sc); i++) {
5989          bxe_dma_free(sc, &sc->context[i].vcxt_dma);
5990          sc->context[i].vcxt = NULL;
5991          sc->context[i].size = 0;
5992      }
5993  
5994      ecore_ilt_mem_op(sc, ILT_MEMOP_FREE);
5995  
5996      bxe_free_ilt_lines_mem(sc);
5997  
5998  }
5999  
6000  static int
bxe_alloc_mem(struct bxe_softc * sc)6001  bxe_alloc_mem(struct bxe_softc *sc)
6002  {
6003  
6004      int context_size;
6005      int allocated;
6006      int i;
6007  
6008      /*
6009       * Allocate memory for CDU context:
6010       * This memory is allocated separately and not in the generic ILT
6011       * functions because CDU differs in few aspects:
6012       * 1. There can be multiple entities allocating memory for context -
6013       * regular L2, CNIC, and SRIOV drivers. Each separately controls
6014       * its own ILT lines.
6015       * 2. Since CDU page-size is not a single 4KB page (which is the case
6016       * for the other ILT clients), to be efficient we want to support
6017       * allocation of sub-page-size in the last entry.
6018       * 3. Context pointers are used by the driver to pass to FW / update
6019       * the context (for the other ILT clients the pointers are used just to
6020       * free the memory during unload).
6021       */
6022      context_size = (sizeof(union cdu_context) * BXE_L2_CID_COUNT(sc));
6023      for (i = 0, allocated = 0; allocated < context_size; i++) {
6024          sc->context[i].size = min(CDU_ILT_PAGE_SZ,
6025                                    (context_size - allocated));
6026  
6027          if (bxe_dma_alloc(sc, sc->context[i].size,
6028                            &sc->context[i].vcxt_dma,
6029                            "cdu context") != 0) {
6030              bxe_free_mem(sc);
6031              return (-1);
6032          }
6033  
6034          sc->context[i].vcxt =
6035              (union cdu_context *)sc->context[i].vcxt_dma.vaddr;
6036  
6037          allocated += sc->context[i].size;
6038      }
6039  
6040      bxe_alloc_ilt_lines_mem(sc);
6041  
6042      BLOGD(sc, DBG_LOAD, "ilt=%p start_line=%u lines=%p\n",
6043            sc->ilt, sc->ilt->start_line, sc->ilt->lines);
6044      {
6045          for (i = 0; i < 4; i++) {
6046              BLOGD(sc, DBG_LOAD,
6047                    "c%d page_size=%u start=%u end=%u num=%u flags=0x%x\n",
6048                    i,
6049                    sc->ilt->clients[i].page_size,
6050                    sc->ilt->clients[i].start,
6051                    sc->ilt->clients[i].end,
6052                    sc->ilt->clients[i].client_num,
6053                    sc->ilt->clients[i].flags);
6054          }
6055      }
6056      if (ecore_ilt_mem_op(sc, ILT_MEMOP_ALLOC)) {
6057          BLOGE(sc, "ecore_ilt_mem_op ILT_MEMOP_ALLOC failed\n");
6058          bxe_free_mem(sc);
6059          return (-1);
6060      }
6061  
6062      return (0);
6063  }
6064  
6065  static void
bxe_free_rx_bd_chain(struct bxe_fastpath * fp)6066  bxe_free_rx_bd_chain(struct bxe_fastpath *fp)
6067  {
6068      int i;
6069  
6070      if (fp->rx_mbuf_tag == NULL) {
6071          return;
6072      }
6073  
6074      /* free all mbufs and unload all maps */
6075      for (i = 0; i < RX_BD_TOTAL; i++) {
6076          if (fp->rx_mbuf_chain[i].m_map != NULL) {
6077              bus_dmamap_sync(fp->rx_mbuf_tag,
6078                              fp->rx_mbuf_chain[i].m_map,
6079                              BUS_DMASYNC_POSTREAD);
6080              bus_dmamap_unload(fp->rx_mbuf_tag,
6081                                fp->rx_mbuf_chain[i].m_map);
6082          }
6083  
6084          if (fp->rx_mbuf_chain[i].m != NULL) {
6085              m_freem(fp->rx_mbuf_chain[i].m);
6086              fp->rx_mbuf_chain[i].m = NULL;
6087              fp->eth_q_stats.mbuf_alloc_rx--;
6088          }
6089      }
6090  }
6091  
6092  static void
bxe_free_tpa_pool(struct bxe_fastpath * fp)6093  bxe_free_tpa_pool(struct bxe_fastpath *fp)
6094  {
6095      struct bxe_softc *sc;
6096      int i, max_agg_queues;
6097  
6098      sc = fp->sc;
6099  
6100      if (fp->rx_mbuf_tag == NULL) {
6101          return;
6102      }
6103  
6104      max_agg_queues = MAX_AGG_QS(sc);
6105  
6106      /* release all mbufs and unload all DMA maps in the TPA pool */
6107      for (i = 0; i < max_agg_queues; i++) {
6108          if (fp->rx_tpa_info[i].bd.m_map != NULL) {
6109              bus_dmamap_sync(fp->rx_mbuf_tag,
6110                              fp->rx_tpa_info[i].bd.m_map,
6111                              BUS_DMASYNC_POSTREAD);
6112              bus_dmamap_unload(fp->rx_mbuf_tag,
6113                                fp->rx_tpa_info[i].bd.m_map);
6114          }
6115  
6116          if (fp->rx_tpa_info[i].bd.m != NULL) {
6117              m_freem(fp->rx_tpa_info[i].bd.m);
6118              fp->rx_tpa_info[i].bd.m = NULL;
6119              fp->eth_q_stats.mbuf_alloc_tpa--;
6120          }
6121      }
6122  }
6123  
6124  static void
bxe_free_sge_chain(struct bxe_fastpath * fp)6125  bxe_free_sge_chain(struct bxe_fastpath *fp)
6126  {
6127      int i;
6128  
6129      if (fp->rx_sge_mbuf_tag == NULL) {
6130          return;
6131      }
6132  
6133      /* rree all mbufs and unload all maps */
6134      for (i = 0; i < RX_SGE_TOTAL; i++) {
6135          if (fp->rx_sge_mbuf_chain[i].m_map != NULL) {
6136              bus_dmamap_sync(fp->rx_sge_mbuf_tag,
6137                              fp->rx_sge_mbuf_chain[i].m_map,
6138                              BUS_DMASYNC_POSTREAD);
6139              bus_dmamap_unload(fp->rx_sge_mbuf_tag,
6140                                fp->rx_sge_mbuf_chain[i].m_map);
6141          }
6142  
6143          if (fp->rx_sge_mbuf_chain[i].m != NULL) {
6144              m_freem(fp->rx_sge_mbuf_chain[i].m);
6145              fp->rx_sge_mbuf_chain[i].m = NULL;
6146              fp->eth_q_stats.mbuf_alloc_sge--;
6147          }
6148      }
6149  }
6150  
6151  static void
bxe_free_fp_buffers(struct bxe_softc * sc)6152  bxe_free_fp_buffers(struct bxe_softc *sc)
6153  {
6154      struct bxe_fastpath *fp;
6155      int i;
6156  
6157      for (i = 0; i < sc->num_queues; i++) {
6158          fp = &sc->fp[i];
6159  
6160          if (fp->tx_br != NULL) {
6161              /* just in case bxe_mq_flush() wasn't called */
6162              if (mtx_initialized(&fp->tx_mtx)) {
6163                  struct mbuf *m;
6164  
6165                  BXE_FP_TX_LOCK(fp);
6166                  while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL)
6167                      m_freem(m);
6168                  BXE_FP_TX_UNLOCK(fp);
6169              }
6170          }
6171  
6172          /* free all RX buffers */
6173          bxe_free_rx_bd_chain(fp);
6174          bxe_free_tpa_pool(fp);
6175          bxe_free_sge_chain(fp);
6176  
6177          if (fp->eth_q_stats.mbuf_alloc_rx != 0) {
6178              BLOGE(sc, "failed to claim all rx mbufs (%d left)\n",
6179                    fp->eth_q_stats.mbuf_alloc_rx);
6180          }
6181  
6182          if (fp->eth_q_stats.mbuf_alloc_sge != 0) {
6183              BLOGE(sc, "failed to claim all sge mbufs (%d left)\n",
6184                    fp->eth_q_stats.mbuf_alloc_sge);
6185          }
6186  
6187          if (fp->eth_q_stats.mbuf_alloc_tpa != 0) {
6188              BLOGE(sc, "failed to claim all sge mbufs (%d left)\n",
6189                    fp->eth_q_stats.mbuf_alloc_tpa);
6190          }
6191  
6192          if (fp->eth_q_stats.mbuf_alloc_tx != 0) {
6193              BLOGE(sc, "failed to release tx mbufs (%d left)\n",
6194                    fp->eth_q_stats.mbuf_alloc_tx);
6195          }
6196  
6197          /* XXX verify all mbufs were reclaimed */
6198      }
6199  }
6200  
6201  static int
bxe_alloc_rx_bd_mbuf(struct bxe_fastpath * fp,uint16_t prev_index,uint16_t index)6202  bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp,
6203                       uint16_t            prev_index,
6204                       uint16_t            index)
6205  {
6206      struct bxe_sw_rx_bd *rx_buf;
6207      struct eth_rx_bd *rx_bd;
6208      bus_dma_segment_t segs[1];
6209      bus_dmamap_t map;
6210      struct mbuf *m;
6211      int nsegs, rc;
6212  
6213      rc = 0;
6214  
6215      /* allocate the new RX BD mbuf */
6216      m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
6217      if (__predict_false(m == NULL)) {
6218          fp->eth_q_stats.mbuf_rx_bd_alloc_failed++;
6219          return (ENOBUFS);
6220      }
6221  
6222      fp->eth_q_stats.mbuf_alloc_rx++;
6223  
6224      /* initialize the mbuf buffer length */
6225      m->m_pkthdr.len = m->m_len = fp->rx_buf_size;
6226  
6227      /* map the mbuf into non-paged pool */
6228      rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
6229                                   fp->rx_mbuf_spare_map,
6230                                   m, segs, &nsegs, BUS_DMA_NOWAIT);
6231      if (__predict_false(rc != 0)) {
6232          fp->eth_q_stats.mbuf_rx_bd_mapping_failed++;
6233          m_freem(m);
6234          fp->eth_q_stats.mbuf_alloc_rx--;
6235          return (rc);
6236      }
6237  
6238      /* all mbufs must map to a single segment */
6239      KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6240  
6241      /* release any existing RX BD mbuf mappings */
6242  
6243      if (prev_index != index) {
6244          rx_buf = &fp->rx_mbuf_chain[prev_index];
6245  
6246          if (rx_buf->m_map != NULL) {
6247              bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6248                              BUS_DMASYNC_POSTREAD);
6249              bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
6250          }
6251  
6252          /*
6253           * We only get here from bxe_rxeof() when the maximum number
6254           * of rx buffers is less than RX_BD_USABLE. bxe_rxeof() already
6255           * holds the mbuf in the prev_index so it's OK to NULL it out
6256           * here without concern of a memory leak.
6257           */
6258          fp->rx_mbuf_chain[prev_index].m = NULL;
6259      }
6260  
6261      rx_buf = &fp->rx_mbuf_chain[index];
6262  
6263      if (rx_buf->m_map != NULL) {
6264          bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6265                          BUS_DMASYNC_POSTREAD);
6266          bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
6267      }
6268  
6269      /* save the mbuf and mapping info for a future packet */
6270      map = (prev_index != index) ?
6271                fp->rx_mbuf_chain[prev_index].m_map : rx_buf->m_map;
6272      rx_buf->m_map = fp->rx_mbuf_spare_map;
6273      fp->rx_mbuf_spare_map = map;
6274      bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6275                      BUS_DMASYNC_PREREAD);
6276      rx_buf->m = m;
6277  
6278      rx_bd = &fp->rx_chain[index];
6279      rx_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
6280      rx_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
6281  
6282      return (rc);
6283  }
6284  
6285  static int
bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath * fp,int queue)6286  bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp,
6287                        int                 queue)
6288  {
6289      struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue];
6290      bus_dma_segment_t segs[1];
6291      bus_dmamap_t map;
6292      struct mbuf *m;
6293      int nsegs;
6294      int rc = 0;
6295  
6296      /* allocate the new TPA mbuf */
6297      m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
6298      if (__predict_false(m == NULL)) {
6299          fp->eth_q_stats.mbuf_rx_tpa_alloc_failed++;
6300          return (ENOBUFS);
6301      }
6302  
6303      fp->eth_q_stats.mbuf_alloc_tpa++;
6304  
6305      /* initialize the mbuf buffer length */
6306      m->m_pkthdr.len = m->m_len = fp->rx_buf_size;
6307  
6308      /* map the mbuf into non-paged pool */
6309      rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
6310                                   fp->rx_tpa_info_mbuf_spare_map,
6311                                   m, segs, &nsegs, BUS_DMA_NOWAIT);
6312      if (__predict_false(rc != 0)) {
6313          fp->eth_q_stats.mbuf_rx_tpa_mapping_failed++;
6314          m_free(m);
6315          fp->eth_q_stats.mbuf_alloc_tpa--;
6316          return (rc);
6317      }
6318  
6319      /* all mbufs must map to a single segment */
6320      KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6321  
6322      /* release any existing TPA mbuf mapping */
6323      if (tpa_info->bd.m_map != NULL) {
6324          bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map,
6325                          BUS_DMASYNC_POSTREAD);
6326          bus_dmamap_unload(fp->rx_mbuf_tag, tpa_info->bd.m_map);
6327      }
6328  
6329      /* save the mbuf and mapping info for the TPA mbuf */
6330      map = tpa_info->bd.m_map;
6331      tpa_info->bd.m_map = fp->rx_tpa_info_mbuf_spare_map;
6332      fp->rx_tpa_info_mbuf_spare_map = map;
6333      bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map,
6334                      BUS_DMASYNC_PREREAD);
6335      tpa_info->bd.m = m;
6336      tpa_info->seg = segs[0];
6337  
6338      return (rc);
6339  }
6340  
6341  /*
6342   * Allocate an mbuf and assign it to the receive scatter gather chain. The
6343   * caller must take care to save a copy of the existing mbuf in the SG mbuf
6344   * chain.
6345   */
6346  static int
bxe_alloc_rx_sge_mbuf(struct bxe_fastpath * fp,uint16_t index)6347  bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp,
6348                        uint16_t            index)
6349  {
6350      struct bxe_sw_rx_bd *sge_buf;
6351      struct eth_rx_sge *sge;
6352      bus_dma_segment_t segs[1];
6353      bus_dmamap_t map;
6354      struct mbuf *m;
6355      int nsegs;
6356      int rc = 0;
6357  
6358      /* allocate a new SGE mbuf */
6359      m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, SGE_PAGE_SIZE);
6360      if (__predict_false(m == NULL)) {
6361          fp->eth_q_stats.mbuf_rx_sge_alloc_failed++;
6362          return (ENOMEM);
6363      }
6364  
6365      fp->eth_q_stats.mbuf_alloc_sge++;
6366  
6367      /* initialize the mbuf buffer length */
6368      m->m_pkthdr.len = m->m_len = SGE_PAGE_SIZE;
6369  
6370      /* map the SGE mbuf into non-paged pool */
6371      rc = bus_dmamap_load_mbuf_sg(fp->rx_sge_mbuf_tag,
6372                                   fp->rx_sge_mbuf_spare_map,
6373                                   m, segs, &nsegs, BUS_DMA_NOWAIT);
6374      if (__predict_false(rc != 0)) {
6375          fp->eth_q_stats.mbuf_rx_sge_mapping_failed++;
6376          m_freem(m);
6377          fp->eth_q_stats.mbuf_alloc_sge--;
6378          return (rc);
6379      }
6380  
6381      /* all mbufs must map to a single segment */
6382      KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6383  
6384      sge_buf = &fp->rx_sge_mbuf_chain[index];
6385  
6386      /* release any existing SGE mbuf mapping */
6387      if (sge_buf->m_map != NULL) {
6388          bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map,
6389                          BUS_DMASYNC_POSTREAD);
6390          bus_dmamap_unload(fp->rx_sge_mbuf_tag, sge_buf->m_map);
6391      }
6392  
6393      /* save the mbuf and mapping info for a future packet */
6394      map = sge_buf->m_map;
6395      sge_buf->m_map = fp->rx_sge_mbuf_spare_map;
6396      fp->rx_sge_mbuf_spare_map = map;
6397      bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map,
6398                      BUS_DMASYNC_PREREAD);
6399      sge_buf->m = m;
6400  
6401      sge = &fp->rx_sge_chain[index];
6402      sge->addr_hi = htole32(U64_HI(segs[0].ds_addr));
6403      sge->addr_lo = htole32(U64_LO(segs[0].ds_addr));
6404  
6405      return (rc);
6406  }
6407  
6408  static __noinline int
bxe_alloc_fp_buffers(struct bxe_softc * sc)6409  bxe_alloc_fp_buffers(struct bxe_softc *sc)
6410  {
6411      struct bxe_fastpath *fp;
6412      int i, j, rc = 0;
6413      int ring_prod, cqe_ring_prod;
6414      int max_agg_queues;
6415  
6416      for (i = 0; i < sc->num_queues; i++) {
6417          fp = &sc->fp[i];
6418  
6419          ring_prod = cqe_ring_prod = 0;
6420          fp->rx_bd_cons = 0;
6421          fp->rx_cq_cons = 0;
6422  
6423          /* allocate buffers for the RX BDs in RX BD chain */
6424          for (j = 0; j < sc->max_rx_bufs; j++) {
6425              rc = bxe_alloc_rx_bd_mbuf(fp, ring_prod, ring_prod);
6426              if (rc != 0) {
6427                  BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n",
6428                        i, rc);
6429                  goto bxe_alloc_fp_buffers_error;
6430              }
6431  
6432              ring_prod     = RX_BD_NEXT(ring_prod);
6433              cqe_ring_prod = RCQ_NEXT(cqe_ring_prod);
6434          }
6435  
6436          fp->rx_bd_prod = ring_prod;
6437          fp->rx_cq_prod = cqe_ring_prod;
6438          fp->eth_q_stats.rx_calls = fp->eth_q_stats.rx_pkts = 0;
6439  
6440          max_agg_queues = MAX_AGG_QS(sc);
6441  
6442          fp->tpa_enable = TRUE;
6443  
6444          /* fill the TPA pool */
6445          for (j = 0; j < max_agg_queues; j++) {
6446              rc = bxe_alloc_rx_tpa_mbuf(fp, j);
6447              if (rc != 0) {
6448                  BLOGE(sc, "mbuf alloc fail for fp[%02d] TPA queue %d\n",
6449                            i, j);
6450                  fp->tpa_enable = FALSE;
6451                  goto bxe_alloc_fp_buffers_error;
6452              }
6453  
6454              fp->rx_tpa_info[j].state = BXE_TPA_STATE_STOP;
6455          }
6456  
6457          if (fp->tpa_enable) {
6458              /* fill the RX SGE chain */
6459              ring_prod = 0;
6460              for (j = 0; j < RX_SGE_USABLE; j++) {
6461                  rc = bxe_alloc_rx_sge_mbuf(fp, ring_prod);
6462                  if (rc != 0) {
6463                      BLOGE(sc, "mbuf alloc fail for fp[%02d] SGE %d\n",
6464                                i, ring_prod);
6465                      fp->tpa_enable = FALSE;
6466                      ring_prod = 0;
6467                      goto bxe_alloc_fp_buffers_error;
6468                  }
6469  
6470                  ring_prod = RX_SGE_NEXT(ring_prod);
6471              }
6472  
6473              fp->rx_sge_prod = ring_prod;
6474          }
6475      }
6476  
6477      return (0);
6478  
6479  bxe_alloc_fp_buffers_error:
6480  
6481      /* unwind what was already allocated */
6482      bxe_free_rx_bd_chain(fp);
6483      bxe_free_tpa_pool(fp);
6484      bxe_free_sge_chain(fp);
6485  
6486      return (ENOBUFS);
6487  }
6488  
6489  static void
bxe_free_fw_stats_mem(struct bxe_softc * sc)6490  bxe_free_fw_stats_mem(struct bxe_softc *sc)
6491  {
6492      bxe_dma_free(sc, &sc->fw_stats_dma);
6493  
6494      sc->fw_stats_num = 0;
6495  
6496      sc->fw_stats_req_size = 0;
6497      sc->fw_stats_req = NULL;
6498      sc->fw_stats_req_mapping = 0;
6499  
6500      sc->fw_stats_data_size = 0;
6501      sc->fw_stats_data = NULL;
6502      sc->fw_stats_data_mapping = 0;
6503  }
6504  
6505  static int
bxe_alloc_fw_stats_mem(struct bxe_softc * sc)6506  bxe_alloc_fw_stats_mem(struct bxe_softc *sc)
6507  {
6508      uint8_t num_queue_stats;
6509      int num_groups;
6510  
6511      /* number of queues for statistics is number of eth queues */
6512      num_queue_stats = BXE_NUM_ETH_QUEUES(sc);
6513  
6514      /*
6515       * Total number of FW statistics requests =
6516       *   1 for port stats + 1 for PF stats + num of queues
6517       */
6518      sc->fw_stats_num = (2 + num_queue_stats);
6519  
6520      /*
6521       * Request is built from stats_query_header and an array of
6522       * stats_query_cmd_group each of which contains STATS_QUERY_CMD_COUNT
6523       * rules. The real number or requests is configured in the
6524       * stats_query_header.
6525       */
6526      num_groups =
6527          ((sc->fw_stats_num / STATS_QUERY_CMD_COUNT) +
6528           ((sc->fw_stats_num % STATS_QUERY_CMD_COUNT) ? 1 : 0));
6529  
6530      BLOGD(sc, DBG_LOAD, "stats fw_stats_num %d num_groups %d\n",
6531            sc->fw_stats_num, num_groups);
6532  
6533      sc->fw_stats_req_size =
6534          (sizeof(struct stats_query_header) +
6535           (num_groups * sizeof(struct stats_query_cmd_group)));
6536  
6537      /*
6538       * Data for statistics requests + stats_counter.
6539       * stats_counter holds per-STORM counters that are incremented when
6540       * STORM has finished with the current request. Memory for FCoE
6541       * offloaded statistics are counted anyway, even if they will not be sent.
6542       * VF stats are not accounted for here as the data of VF stats is stored
6543       * in memory allocated by the VF, not here.
6544       */
6545      sc->fw_stats_data_size =
6546          (sizeof(struct stats_counter) +
6547           sizeof(struct per_port_stats) +
6548           sizeof(struct per_pf_stats) +
6549           /* sizeof(struct fcoe_statistics_params) + */
6550           (sizeof(struct per_queue_stats) * num_queue_stats));
6551  
6552      if (bxe_dma_alloc(sc, (sc->fw_stats_req_size + sc->fw_stats_data_size),
6553                        &sc->fw_stats_dma, "fw stats") != 0) {
6554          bxe_free_fw_stats_mem(sc);
6555          return (-1);
6556      }
6557  
6558      /* set up the shortcuts */
6559  
6560      sc->fw_stats_req =
6561          (struct bxe_fw_stats_req *)sc->fw_stats_dma.vaddr;
6562      sc->fw_stats_req_mapping = sc->fw_stats_dma.paddr;
6563  
6564      sc->fw_stats_data =
6565          (struct bxe_fw_stats_data *)((uint8_t *)sc->fw_stats_dma.vaddr +
6566                                       sc->fw_stats_req_size);
6567      sc->fw_stats_data_mapping = (sc->fw_stats_dma.paddr +
6568                                   sc->fw_stats_req_size);
6569  
6570      BLOGD(sc, DBG_LOAD, "statistics request base address set to %#jx\n",
6571            (uintmax_t)sc->fw_stats_req_mapping);
6572  
6573      BLOGD(sc, DBG_LOAD, "statistics data base address set to %#jx\n",
6574            (uintmax_t)sc->fw_stats_data_mapping);
6575  
6576      return (0);
6577  }
6578  
6579  /*
6580   * Bits map:
6581   * 0-7  - Engine0 load counter.
6582   * 8-15 - Engine1 load counter.
6583   * 16   - Engine0 RESET_IN_PROGRESS bit.
6584   * 17   - Engine1 RESET_IN_PROGRESS bit.
6585   * 18   - Engine0 ONE_IS_LOADED. Set when there is at least one active
6586   *        function on the engine
6587   * 19   - Engine1 ONE_IS_LOADED.
6588   * 20   - Chip reset flow bit. When set none-leader must wait for both engines
6589   *        leader to complete (check for both RESET_IN_PROGRESS bits and not
6590   *        for just the one belonging to its engine).
6591   */
6592  #define BXE_RECOVERY_GLOB_REG     MISC_REG_GENERIC_POR_1
6593  #define BXE_PATH0_LOAD_CNT_MASK   0x000000ff
6594  #define BXE_PATH0_LOAD_CNT_SHIFT  0
6595  #define BXE_PATH1_LOAD_CNT_MASK   0x0000ff00
6596  #define BXE_PATH1_LOAD_CNT_SHIFT  8
6597  #define BXE_PATH0_RST_IN_PROG_BIT 0x00010000
6598  #define BXE_PATH1_RST_IN_PROG_BIT 0x00020000
6599  #define BXE_GLOBAL_RESET_BIT      0x00040000
6600  
6601  /* set the GLOBAL_RESET bit, should be run under rtnl lock */
6602  static void
bxe_set_reset_global(struct bxe_softc * sc)6603  bxe_set_reset_global(struct bxe_softc *sc)
6604  {
6605      uint32_t val;
6606      bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6607      val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6608      REG_WR(sc, BXE_RECOVERY_GLOB_REG, val | BXE_GLOBAL_RESET_BIT);
6609      bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6610  }
6611  
6612  /* clear the GLOBAL_RESET bit, should be run under rtnl lock */
6613  static void
bxe_clear_reset_global(struct bxe_softc * sc)6614  bxe_clear_reset_global(struct bxe_softc *sc)
6615  {
6616      uint32_t val;
6617      bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6618      val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6619      REG_WR(sc, BXE_RECOVERY_GLOB_REG, val & (~BXE_GLOBAL_RESET_BIT));
6620      bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6621  }
6622  
6623  /* checks the GLOBAL_RESET bit, should be run under rtnl lock */
6624  static uint8_t
bxe_reset_is_global(struct bxe_softc * sc)6625  bxe_reset_is_global(struct bxe_softc *sc)
6626  {
6627      uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6628      BLOGD(sc, DBG_LOAD, "GLOB_REG=0x%08x\n", val);
6629      return (val & BXE_GLOBAL_RESET_BIT) ? TRUE : FALSE;
6630  }
6631  
6632  /* clear RESET_IN_PROGRESS bit for the engine, should be run under rtnl lock */
6633  static void
bxe_set_reset_done(struct bxe_softc * sc)6634  bxe_set_reset_done(struct bxe_softc *sc)
6635  {
6636      uint32_t val;
6637      uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT :
6638                                   BXE_PATH0_RST_IN_PROG_BIT;
6639  
6640      bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6641  
6642      val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6643      /* Clear the bit */
6644      val &= ~bit;
6645      REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6646  
6647      bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6648  }
6649  
6650  /* set RESET_IN_PROGRESS for the engine, should be run under rtnl lock */
6651  static void
bxe_set_reset_in_progress(struct bxe_softc * sc)6652  bxe_set_reset_in_progress(struct bxe_softc *sc)
6653  {
6654      uint32_t val;
6655      uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT :
6656                                   BXE_PATH0_RST_IN_PROG_BIT;
6657  
6658      bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6659  
6660      val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6661      /* Set the bit */
6662      val |= bit;
6663      REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6664  
6665      bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6666  }
6667  
6668  /* check RESET_IN_PROGRESS bit for an engine, should be run under rtnl lock */
6669  static uint8_t
bxe_reset_is_done(struct bxe_softc * sc,int engine)6670  bxe_reset_is_done(struct bxe_softc *sc,
6671                    int              engine)
6672  {
6673      uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6674      uint32_t bit = engine ? BXE_PATH1_RST_IN_PROG_BIT :
6675                              BXE_PATH0_RST_IN_PROG_BIT;
6676  
6677      /* return false if bit is set */
6678      return (val & bit) ? FALSE : TRUE;
6679  }
6680  
6681  /* get the load status for an engine, should be run under rtnl lock */
6682  static uint8_t
bxe_get_load_status(struct bxe_softc * sc,int engine)6683  bxe_get_load_status(struct bxe_softc *sc,
6684                      int              engine)
6685  {
6686      uint32_t mask = engine ? BXE_PATH1_LOAD_CNT_MASK :
6687                               BXE_PATH0_LOAD_CNT_MASK;
6688      uint32_t shift = engine ? BXE_PATH1_LOAD_CNT_SHIFT :
6689                                BXE_PATH0_LOAD_CNT_SHIFT;
6690      uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6691  
6692      BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val);
6693  
6694      val = ((val & mask) >> shift);
6695  
6696      BLOGD(sc, DBG_LOAD, "Load mask engine %d = 0x%08x\n", engine, val);
6697  
6698      return (val != 0);
6699  }
6700  
6701  /* set pf load mark */
6702  /* XXX needs to be under rtnl lock */
6703  static void
bxe_set_pf_load(struct bxe_softc * sc)6704  bxe_set_pf_load(struct bxe_softc *sc)
6705  {
6706      uint32_t val;
6707      uint32_t val1;
6708      uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK :
6709                                    BXE_PATH0_LOAD_CNT_MASK;
6710      uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT :
6711                                     BXE_PATH0_LOAD_CNT_SHIFT;
6712  
6713      bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6714  
6715      val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6716      BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val);
6717  
6718      /* get the current counter value */
6719      val1 = ((val & mask) >> shift);
6720  
6721      /* set bit of this PF */
6722      val1 |= (1 << SC_ABS_FUNC(sc));
6723  
6724      /* clear the old value */
6725      val &= ~mask;
6726  
6727      /* set the new one */
6728      val |= ((val1 << shift) & mask);
6729  
6730      REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6731  
6732      bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6733  }
6734  
6735  /* clear pf load mark */
6736  /* XXX needs to be under rtnl lock */
6737  static uint8_t
bxe_clear_pf_load(struct bxe_softc * sc)6738  bxe_clear_pf_load(struct bxe_softc *sc)
6739  {
6740      uint32_t val1, val;
6741      uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK :
6742                                    BXE_PATH0_LOAD_CNT_MASK;
6743      uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT :
6744                                     BXE_PATH0_LOAD_CNT_SHIFT;
6745  
6746      bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6747      val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6748      BLOGD(sc, DBG_LOAD, "Old GEN_REG_VAL=0x%08x\n", val);
6749  
6750      /* get the current counter value */
6751      val1 = (val & mask) >> shift;
6752  
6753      /* clear bit of that PF */
6754      val1 &= ~(1 << SC_ABS_FUNC(sc));
6755  
6756      /* clear the old value */
6757      val &= ~mask;
6758  
6759      /* set the new one */
6760      val |= ((val1 << shift) & mask);
6761  
6762      REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6763      bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6764      return (val1 != 0);
6765  }
6766  
6767  /* send load requrest to mcp and analyze response */
6768  static int
bxe_nic_load_request(struct bxe_softc * sc,uint32_t * load_code)6769  bxe_nic_load_request(struct bxe_softc *sc,
6770                       uint32_t         *load_code)
6771  {
6772      /* init fw_seq */
6773      sc->fw_seq =
6774          (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
6775           DRV_MSG_SEQ_NUMBER_MASK);
6776  
6777      BLOGD(sc, DBG_LOAD, "initial fw_seq 0x%04x\n", sc->fw_seq);
6778  
6779      /* get the current FW pulse sequence */
6780      sc->fw_drv_pulse_wr_seq =
6781          (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb) &
6782           DRV_PULSE_SEQ_MASK);
6783  
6784      BLOGD(sc, DBG_LOAD, "initial drv_pulse 0x%04x\n",
6785            sc->fw_drv_pulse_wr_seq);
6786  
6787      /* load request */
6788      (*load_code) = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ,
6789                                    DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
6790  
6791      /* if the MCP fails to respond we must abort */
6792      if (!(*load_code)) {
6793          BLOGE(sc, "MCP response failure!\n");
6794          return (-1);
6795      }
6796  
6797      /* if MCP refused then must abort */
6798      if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6799          BLOGE(sc, "MCP refused load request\n");
6800          return (-1);
6801      }
6802  
6803      return (0);
6804  }
6805  
6806  /*
6807   * Check whether another PF has already loaded FW to chip. In virtualized
6808   * environments a pf from anoth VM may have already initialized the device
6809   * including loading FW.
6810   */
6811  static int
bxe_nic_load_analyze_req(struct bxe_softc * sc,uint32_t load_code)6812  bxe_nic_load_analyze_req(struct bxe_softc *sc,
6813                           uint32_t         load_code)
6814  {
6815      uint32_t my_fw, loaded_fw;
6816  
6817      /* is another pf loaded on this engine? */
6818      if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
6819          (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
6820          /* build my FW version dword */
6821          my_fw = (BCM_5710_FW_MAJOR_VERSION +
6822                   (BCM_5710_FW_MINOR_VERSION << 8 ) +
6823                   (BCM_5710_FW_REVISION_VERSION << 16) +
6824                   (BCM_5710_FW_ENGINEERING_VERSION << 24));
6825  
6826          /* read loaded FW from chip */
6827          loaded_fw = REG_RD(sc, XSEM_REG_PRAM);
6828          BLOGD(sc, DBG_LOAD, "loaded FW 0x%08x / my FW 0x%08x\n",
6829                loaded_fw, my_fw);
6830  
6831          /* abort nic load if version mismatch */
6832          if (my_fw != loaded_fw) {
6833              BLOGE(sc, "FW 0x%08x already loaded (mine is 0x%08x)",
6834                    loaded_fw, my_fw);
6835              return (-1);
6836          }
6837      }
6838  
6839      return (0);
6840  }
6841  
6842  /* mark PMF if applicable */
6843  static void
bxe_nic_load_pmf(struct bxe_softc * sc,uint32_t load_code)6844  bxe_nic_load_pmf(struct bxe_softc *sc,
6845                   uint32_t         load_code)
6846  {
6847      uint32_t ncsi_oem_data_addr;
6848  
6849      if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6850          (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
6851          (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
6852          /*
6853           * Barrier here for ordering between the writing to sc->port.pmf here
6854           * and reading it from the periodic task.
6855           */
6856          sc->port.pmf = 1;
6857          mb();
6858      } else {
6859          sc->port.pmf = 0;
6860      }
6861  
6862      BLOGD(sc, DBG_LOAD, "pmf %d\n", sc->port.pmf);
6863  
6864      /* XXX needed? */
6865      if (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) {
6866          if (SHMEM2_HAS(sc, ncsi_oem_data_addr)) {
6867              ncsi_oem_data_addr = SHMEM2_RD(sc, ncsi_oem_data_addr);
6868              if (ncsi_oem_data_addr) {
6869                  REG_WR(sc,
6870                         (ncsi_oem_data_addr +
6871                          offsetof(struct glob_ncsi_oem_data, driver_version)),
6872                         0);
6873              }
6874          }
6875      }
6876  }
6877  
6878  static void
bxe_read_mf_cfg(struct bxe_softc * sc)6879  bxe_read_mf_cfg(struct bxe_softc *sc)
6880  {
6881      int n = (CHIP_IS_MODE_4_PORT(sc) ? 2 : 1);
6882      int abs_func;
6883      int vn;
6884  
6885      if (BXE_NOMCP(sc)) {
6886          return; /* what should be the default bvalue in this case */
6887      }
6888  
6889      /*
6890       * The formula for computing the absolute function number is...
6891       * For 2 port configuration (4 functions per port):
6892       *   abs_func = 2 * vn + SC_PORT + SC_PATH
6893       * For 4 port configuration (2 functions per port):
6894       *   abs_func = 4 * vn + 2 * SC_PORT + SC_PATH
6895       */
6896      for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
6897          abs_func = (n * (2 * vn + SC_PORT(sc)) + SC_PATH(sc));
6898          if (abs_func >= E1H_FUNC_MAX) {
6899              break;
6900          }
6901          sc->devinfo.mf_info.mf_config[vn] =
6902              MFCFG_RD(sc, func_mf_config[abs_func].config);
6903      }
6904  
6905      if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] &
6906          FUNC_MF_CFG_FUNC_DISABLED) {
6907          BLOGD(sc, DBG_LOAD, "mf_cfg function disabled\n");
6908          sc->flags |= BXE_MF_FUNC_DIS;
6909      } else {
6910          BLOGD(sc, DBG_LOAD, "mf_cfg function enabled\n");
6911          sc->flags &= ~BXE_MF_FUNC_DIS;
6912      }
6913  }
6914  
6915  /* acquire split MCP access lock register */
bxe_acquire_alr(struct bxe_softc * sc)6916  static int bxe_acquire_alr(struct bxe_softc *sc)
6917  {
6918      uint32_t j, val;
6919  
6920      for (j = 0; j < 1000; j++) {
6921          val = (1UL << 31);
6922          REG_WR(sc, GRCBASE_MCP + 0x9c, val);
6923          val = REG_RD(sc, GRCBASE_MCP + 0x9c);
6924          if (val & (1L << 31))
6925              break;
6926  
6927          DELAY(5000);
6928      }
6929  
6930      if (!(val & (1L << 31))) {
6931          BLOGE(sc, "Cannot acquire MCP access lock register\n");
6932          return (-1);
6933      }
6934  
6935      return (0);
6936  }
6937  
6938  /* release split MCP access lock register */
bxe_release_alr(struct bxe_softc * sc)6939  static void bxe_release_alr(struct bxe_softc *sc)
6940  {
6941      REG_WR(sc, GRCBASE_MCP + 0x9c, 0);
6942  }
6943  
6944  static void
bxe_fan_failure(struct bxe_softc * sc)6945  bxe_fan_failure(struct bxe_softc *sc)
6946  {
6947      int port = SC_PORT(sc);
6948      uint32_t ext_phy_config;
6949  
6950      /* mark the failure */
6951      ext_phy_config =
6952          SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config);
6953  
6954      ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6955      ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
6956      SHMEM_WR(sc, dev_info.port_hw_config[port].external_phy_config,
6957               ext_phy_config);
6958  
6959      /* log the failure */
6960      BLOGW(sc, "Fan Failure has caused the driver to shutdown "
6961                "the card to prevent permanent damage. "
6962                "Please contact OEM Support for assistance\n");
6963  
6964      /* XXX */
6965  #if 1
6966      bxe_panic(sc, ("Schedule task to handle fan failure\n"));
6967  #else
6968      /*
6969       * Schedule device reset (unload)
6970       * This is due to some boards consuming sufficient power when driver is
6971       * up to overheat if fan fails.
6972       */
6973      bxe_set_bit(BXE_SP_RTNL_FAN_FAILURE, &sc->sp_rtnl_state);
6974      schedule_delayed_work(&sc->sp_rtnl_task, 0);
6975  #endif
6976  }
6977  
6978  /* this function is called upon a link interrupt */
6979  static void
bxe_link_attn(struct bxe_softc * sc)6980  bxe_link_attn(struct bxe_softc *sc)
6981  {
6982      uint32_t pause_enabled = 0;
6983      struct host_port_stats *pstats;
6984      int cmng_fns;
6985      struct bxe_fastpath *fp;
6986      int i;
6987  
6988      /* Make sure that we are synced with the current statistics */
6989      bxe_stats_handle(sc, STATS_EVENT_STOP);
6990      BLOGD(sc, DBG_LOAD, "link_vars phy_flags : %x\n", sc->link_vars.phy_flags);
6991      elink_link_update(&sc->link_params, &sc->link_vars);
6992  
6993      if (sc->link_vars.link_up) {
6994  
6995          /* dropless flow control */
6996          if (!CHIP_IS_E1(sc) && sc->dropless_fc) {
6997              pause_enabled = 0;
6998  
6999              if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) {
7000                  pause_enabled = 1;
7001              }
7002  
7003              REG_WR(sc,
7004                     (BAR_USTRORM_INTMEM +
7005                      USTORM_ETH_PAUSE_ENABLED_OFFSET(SC_PORT(sc))),
7006                     pause_enabled);
7007          }
7008  
7009          if (sc->link_vars.mac_type != ELINK_MAC_TYPE_EMAC) {
7010              pstats = BXE_SP(sc, port_stats);
7011              /* reset old mac stats */
7012              memset(&(pstats->mac_stx[0]), 0, sizeof(struct mac_stx));
7013          }
7014  
7015          if (sc->state == BXE_STATE_OPEN) {
7016              bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
7017  	    /* Restart tx when the link comes back. */
7018  	    FOR_EACH_ETH_QUEUE(sc, i) {
7019  		fp = &sc->fp[i];
7020  		taskqueue_enqueue(fp->tq, &fp->tx_task);
7021  	    }
7022          }
7023  
7024      }
7025  
7026      if (sc->link_vars.link_up && sc->link_vars.line_speed) {
7027          cmng_fns = bxe_get_cmng_fns_mode(sc);
7028  
7029          if (cmng_fns != CMNG_FNS_NONE) {
7030              bxe_cmng_fns_init(sc, FALSE, cmng_fns);
7031              storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
7032          } else {
7033              /* rate shaping and fairness are disabled */
7034              BLOGD(sc, DBG_LOAD, "single function mode without fairness\n");
7035          }
7036      }
7037  
7038      bxe_link_report_locked(sc);
7039  
7040      if (IS_MF(sc)) {
7041          ; // XXX bxe_link_sync_notify(sc);
7042      }
7043  }
7044  
7045  static void
bxe_attn_int_asserted(struct bxe_softc * sc,uint32_t asserted)7046  bxe_attn_int_asserted(struct bxe_softc *sc,
7047                        uint32_t         asserted)
7048  {
7049      int port = SC_PORT(sc);
7050      uint32_t aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7051                                 MISC_REG_AEU_MASK_ATTN_FUNC_0;
7052      uint32_t nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
7053                                          NIG_REG_MASK_INTERRUPT_PORT0;
7054      uint32_t aeu_mask;
7055      uint32_t nig_mask = 0;
7056      uint32_t reg_addr;
7057      uint32_t igu_acked;
7058      uint32_t cnt;
7059  
7060      if (sc->attn_state & asserted) {
7061          BLOGE(sc, "IGU ERROR attn=0x%08x\n", asserted);
7062      }
7063  
7064      bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
7065  
7066      aeu_mask = REG_RD(sc, aeu_addr);
7067  
7068      BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly asserted 0x%08x\n",
7069            aeu_mask, asserted);
7070  
7071      aeu_mask &= ~(asserted & 0x3ff);
7072  
7073      BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask);
7074  
7075      REG_WR(sc, aeu_addr, aeu_mask);
7076  
7077      bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
7078  
7079      BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state);
7080      sc->attn_state |= asserted;
7081      BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state);
7082  
7083      if (asserted & ATTN_HARD_WIRED_MASK) {
7084          if (asserted & ATTN_NIG_FOR_FUNC) {
7085  
7086  	    bxe_acquire_phy_lock(sc);
7087              /* save nig interrupt mask */
7088              nig_mask = REG_RD(sc, nig_int_mask_addr);
7089  
7090              /* If nig_mask is not set, no need to call the update function */
7091              if (nig_mask) {
7092                  REG_WR(sc, nig_int_mask_addr, 0);
7093  
7094                  bxe_link_attn(sc);
7095              }
7096  
7097              /* handle unicore attn? */
7098          }
7099  
7100          if (asserted & ATTN_SW_TIMER_4_FUNC) {
7101              BLOGD(sc, DBG_INTR, "ATTN_SW_TIMER_4_FUNC!\n");
7102          }
7103  
7104          if (asserted & GPIO_2_FUNC) {
7105              BLOGD(sc, DBG_INTR, "GPIO_2_FUNC!\n");
7106          }
7107  
7108          if (asserted & GPIO_3_FUNC) {
7109              BLOGD(sc, DBG_INTR, "GPIO_3_FUNC!\n");
7110          }
7111  
7112          if (asserted & GPIO_4_FUNC) {
7113              BLOGD(sc, DBG_INTR, "GPIO_4_FUNC!\n");
7114          }
7115  
7116          if (port == 0) {
7117              if (asserted & ATTN_GENERAL_ATTN_1) {
7118                  BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_1!\n");
7119                  REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
7120              }
7121              if (asserted & ATTN_GENERAL_ATTN_2) {
7122                  BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_2!\n");
7123                  REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
7124              }
7125              if (asserted & ATTN_GENERAL_ATTN_3) {
7126                  BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_3!\n");
7127                  REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
7128              }
7129          } else {
7130              if (asserted & ATTN_GENERAL_ATTN_4) {
7131                  BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_4!\n");
7132                  REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
7133              }
7134              if (asserted & ATTN_GENERAL_ATTN_5) {
7135                  BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_5!\n");
7136                  REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
7137              }
7138              if (asserted & ATTN_GENERAL_ATTN_6) {
7139                  BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_6!\n");
7140                  REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
7141              }
7142          }
7143      } /* hardwired */
7144  
7145      if (sc->devinfo.int_block == INT_BLOCK_HC) {
7146          reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_SET);
7147      } else {
7148          reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
7149      }
7150  
7151      BLOGD(sc, DBG_INTR, "about to mask 0x%08x at %s addr 0x%08x\n",
7152            asserted,
7153            (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
7154      REG_WR(sc, reg_addr, asserted);
7155  
7156      /* now set back the mask */
7157      if (asserted & ATTN_NIG_FOR_FUNC) {
7158          /*
7159           * Verify that IGU ack through BAR was written before restoring
7160           * NIG mask. This loop should exit after 2-3 iterations max.
7161           */
7162          if (sc->devinfo.int_block != INT_BLOCK_HC) {
7163              cnt = 0;
7164  
7165              do {
7166                  igu_acked = REG_RD(sc, IGU_REG_ATTENTION_ACK_BITS);
7167              } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
7168                       (++cnt < MAX_IGU_ATTN_ACK_TO));
7169  
7170              if (!igu_acked) {
7171                  BLOGE(sc, "Failed to verify IGU ack on time\n");
7172              }
7173  
7174              mb();
7175          }
7176  
7177          REG_WR(sc, nig_int_mask_addr, nig_mask);
7178  
7179  	bxe_release_phy_lock(sc);
7180      }
7181  }
7182  
7183  static void
bxe_print_next_block(struct bxe_softc * sc,int idx,const char * blk)7184  bxe_print_next_block(struct bxe_softc *sc,
7185                       int              idx,
7186                       const char       *blk)
7187  {
7188      BLOGI(sc, "%s%s", idx ? ", " : "", blk);
7189  }
7190  
7191  static int
bxe_check_blocks_with_parity0(struct bxe_softc * sc,uint32_t sig,int par_num,uint8_t print)7192  bxe_check_blocks_with_parity0(struct bxe_softc *sc,
7193                                uint32_t         sig,
7194                                int              par_num,
7195                                uint8_t          print)
7196  {
7197      uint32_t cur_bit = 0;
7198      int i = 0;
7199  
7200      for (i = 0; sig; i++) {
7201          cur_bit = ((uint32_t)0x1 << i);
7202          if (sig & cur_bit) {
7203              switch (cur_bit) {
7204              case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
7205                  if (print)
7206                      bxe_print_next_block(sc, par_num++, "BRB");
7207                  break;
7208              case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
7209                  if (print)
7210                      bxe_print_next_block(sc, par_num++, "PARSER");
7211                  break;
7212              case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
7213                  if (print)
7214                      bxe_print_next_block(sc, par_num++, "TSDM");
7215                  break;
7216              case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
7217                  if (print)
7218                      bxe_print_next_block(sc, par_num++, "SEARCHER");
7219                  break;
7220              case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
7221                  if (print)
7222                      bxe_print_next_block(sc, par_num++, "TCM");
7223                  break;
7224              case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
7225                  if (print)
7226                      bxe_print_next_block(sc, par_num++, "TSEMI");
7227                  break;
7228              case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
7229                  if (print)
7230                      bxe_print_next_block(sc, par_num++, "XPB");
7231                  break;
7232              }
7233  
7234              /* Clear the bit */
7235              sig &= ~cur_bit;
7236          }
7237      }
7238  
7239      return (par_num);
7240  }
7241  
7242  static int
bxe_check_blocks_with_parity1(struct bxe_softc * sc,uint32_t sig,int par_num,uint8_t * global,uint8_t print)7243  bxe_check_blocks_with_parity1(struct bxe_softc *sc,
7244                                uint32_t         sig,
7245                                int              par_num,
7246                                uint8_t          *global,
7247                                uint8_t          print)
7248  {
7249      int i = 0;
7250      uint32_t cur_bit = 0;
7251      for (i = 0; sig; i++) {
7252          cur_bit = ((uint32_t)0x1 << i);
7253          if (sig & cur_bit) {
7254              switch (cur_bit) {
7255              case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
7256                  if (print)
7257                      bxe_print_next_block(sc, par_num++, "PBF");
7258                  break;
7259              case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
7260                  if (print)
7261                      bxe_print_next_block(sc, par_num++, "QM");
7262                  break;
7263              case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
7264                  if (print)
7265                      bxe_print_next_block(sc, par_num++, "TM");
7266                  break;
7267              case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
7268                  if (print)
7269                      bxe_print_next_block(sc, par_num++, "XSDM");
7270                  break;
7271              case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
7272                  if (print)
7273                      bxe_print_next_block(sc, par_num++, "XCM");
7274                  break;
7275              case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
7276                  if (print)
7277                      bxe_print_next_block(sc, par_num++, "XSEMI");
7278                  break;
7279              case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
7280                  if (print)
7281                      bxe_print_next_block(sc, par_num++, "DOORBELLQ");
7282                  break;
7283              case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
7284                  if (print)
7285                      bxe_print_next_block(sc, par_num++, "NIG");
7286                  break;
7287              case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
7288                  if (print)
7289                      bxe_print_next_block(sc, par_num++, "VAUX PCI CORE");
7290                  *global = TRUE;
7291                  break;
7292              case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
7293                  if (print)
7294                      bxe_print_next_block(sc, par_num++, "DEBUG");
7295                  break;
7296              case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
7297                  if (print)
7298                      bxe_print_next_block(sc, par_num++, "USDM");
7299                  break;
7300              case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
7301                  if (print)
7302                      bxe_print_next_block(sc, par_num++, "UCM");
7303                  break;
7304              case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
7305                  if (print)
7306                      bxe_print_next_block(sc, par_num++, "USEMI");
7307                  break;
7308              case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
7309                  if (print)
7310                      bxe_print_next_block(sc, par_num++, "UPB");
7311                  break;
7312              case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
7313                  if (print)
7314                      bxe_print_next_block(sc, par_num++, "CSDM");
7315                  break;
7316              case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
7317                  if (print)
7318                      bxe_print_next_block(sc, par_num++, "CCM");
7319                  break;
7320              }
7321  
7322              /* Clear the bit */
7323              sig &= ~cur_bit;
7324          }
7325      }
7326  
7327      return (par_num);
7328  }
7329  
7330  static int
bxe_check_blocks_with_parity2(struct bxe_softc * sc,uint32_t sig,int par_num,uint8_t print)7331  bxe_check_blocks_with_parity2(struct bxe_softc *sc,
7332                                uint32_t         sig,
7333                                int              par_num,
7334                                uint8_t          print)
7335  {
7336      uint32_t cur_bit = 0;
7337      int i = 0;
7338  
7339      for (i = 0; sig; i++) {
7340          cur_bit = ((uint32_t)0x1 << i);
7341          if (sig & cur_bit) {
7342              switch (cur_bit) {
7343              case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
7344                  if (print)
7345                      bxe_print_next_block(sc, par_num++, "CSEMI");
7346                  break;
7347              case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
7348                  if (print)
7349                      bxe_print_next_block(sc, par_num++, "PXP");
7350                  break;
7351              case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
7352                  if (print)
7353                      bxe_print_next_block(sc, par_num++, "PXPPCICLOCKCLIENT");
7354                  break;
7355              case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
7356                  if (print)
7357                      bxe_print_next_block(sc, par_num++, "CFC");
7358                  break;
7359              case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
7360                  if (print)
7361                      bxe_print_next_block(sc, par_num++, "CDU");
7362                  break;
7363              case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
7364                  if (print)
7365                      bxe_print_next_block(sc, par_num++, "DMAE");
7366                  break;
7367              case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
7368                  if (print)
7369                      bxe_print_next_block(sc, par_num++, "IGU");
7370                  break;
7371              case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
7372                  if (print)
7373                      bxe_print_next_block(sc, par_num++, "MISC");
7374                  break;
7375              }
7376  
7377              /* Clear the bit */
7378              sig &= ~cur_bit;
7379          }
7380      }
7381  
7382      return (par_num);
7383  }
7384  
7385  static int
bxe_check_blocks_with_parity3(struct bxe_softc * sc,uint32_t sig,int par_num,uint8_t * global,uint8_t print)7386  bxe_check_blocks_with_parity3(struct bxe_softc *sc,
7387                                uint32_t         sig,
7388                                int              par_num,
7389                                uint8_t          *global,
7390                                uint8_t          print)
7391  {
7392      uint32_t cur_bit = 0;
7393      int i = 0;
7394  
7395      for (i = 0; sig; i++) {
7396          cur_bit = ((uint32_t)0x1 << i);
7397          if (sig & cur_bit) {
7398              switch (cur_bit) {
7399              case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
7400                  if (print)
7401                      bxe_print_next_block(sc, par_num++, "MCP ROM");
7402                  *global = TRUE;
7403                  break;
7404              case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
7405                  if (print)
7406                      bxe_print_next_block(sc, par_num++,
7407                                "MCP UMP RX");
7408                  *global = TRUE;
7409                  break;
7410              case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
7411                  if (print)
7412                      bxe_print_next_block(sc, par_num++,
7413                                "MCP UMP TX");
7414                  *global = TRUE;
7415                  break;
7416              case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
7417                  if (print)
7418                      bxe_print_next_block(sc, par_num++,
7419                                "MCP SCPAD");
7420                  *global = TRUE;
7421                  break;
7422              }
7423  
7424              /* Clear the bit */
7425              sig &= ~cur_bit;
7426          }
7427      }
7428  
7429      return (par_num);
7430  }
7431  
7432  static int
bxe_check_blocks_with_parity4(struct bxe_softc * sc,uint32_t sig,int par_num,uint8_t print)7433  bxe_check_blocks_with_parity4(struct bxe_softc *sc,
7434                                uint32_t         sig,
7435                                int              par_num,
7436                                uint8_t          print)
7437  {
7438      uint32_t cur_bit = 0;
7439      int i = 0;
7440  
7441      for (i = 0; sig; i++) {
7442          cur_bit = ((uint32_t)0x1 << i);
7443          if (sig & cur_bit) {
7444              switch (cur_bit) {
7445              case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
7446                  if (print)
7447                      bxe_print_next_block(sc, par_num++, "PGLUE_B");
7448                  break;
7449              case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
7450                  if (print)
7451                      bxe_print_next_block(sc, par_num++, "ATC");
7452                  break;
7453              }
7454  
7455              /* Clear the bit */
7456              sig &= ~cur_bit;
7457          }
7458      }
7459  
7460      return (par_num);
7461  }
7462  
7463  static uint8_t
bxe_parity_attn(struct bxe_softc * sc,uint8_t * global,uint8_t print,uint32_t * sig)7464  bxe_parity_attn(struct bxe_softc *sc,
7465                  uint8_t          *global,
7466                  uint8_t          print,
7467                  uint32_t         *sig)
7468  {
7469      int par_num = 0;
7470  
7471      if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
7472          (sig[1] & HW_PRTY_ASSERT_SET_1) ||
7473          (sig[2] & HW_PRTY_ASSERT_SET_2) ||
7474          (sig[3] & HW_PRTY_ASSERT_SET_3) ||
7475          (sig[4] & HW_PRTY_ASSERT_SET_4)) {
7476          BLOGE(sc, "Parity error: HW block parity attention:\n"
7477                    "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
7478                (uint32_t)(sig[0] & HW_PRTY_ASSERT_SET_0),
7479                (uint32_t)(sig[1] & HW_PRTY_ASSERT_SET_1),
7480                (uint32_t)(sig[2] & HW_PRTY_ASSERT_SET_2),
7481                (uint32_t)(sig[3] & HW_PRTY_ASSERT_SET_3),
7482                (uint32_t)(sig[4] & HW_PRTY_ASSERT_SET_4));
7483  
7484          if (print)
7485              BLOGI(sc, "Parity errors detected in blocks: ");
7486  
7487          par_num =
7488              bxe_check_blocks_with_parity0(sc, sig[0] &
7489                                            HW_PRTY_ASSERT_SET_0,
7490                                            par_num, print);
7491          par_num =
7492              bxe_check_blocks_with_parity1(sc, sig[1] &
7493                                            HW_PRTY_ASSERT_SET_1,
7494                                            par_num, global, print);
7495          par_num =
7496              bxe_check_blocks_with_parity2(sc, sig[2] &
7497                                            HW_PRTY_ASSERT_SET_2,
7498                                            par_num, print);
7499          par_num =
7500              bxe_check_blocks_with_parity3(sc, sig[3] &
7501                                            HW_PRTY_ASSERT_SET_3,
7502                                            par_num, global, print);
7503          par_num =
7504              bxe_check_blocks_with_parity4(sc, sig[4] &
7505                                            HW_PRTY_ASSERT_SET_4,
7506                                            par_num, print);
7507  
7508          if (print)
7509              BLOGI(sc, "\n");
7510  
7511  	if( *global == TRUE ) {
7512                  BXE_SET_ERROR_BIT(sc, BXE_ERR_GLOBAL);
7513          }
7514  
7515          return (TRUE);
7516      }
7517  
7518      return (FALSE);
7519  }
7520  
7521  static uint8_t
bxe_chk_parity_attn(struct bxe_softc * sc,uint8_t * global,uint8_t print)7522  bxe_chk_parity_attn(struct bxe_softc *sc,
7523                      uint8_t          *global,
7524                      uint8_t          print)
7525  {
7526      struct attn_route attn = { {0} };
7527      int port = SC_PORT(sc);
7528  
7529      if(sc->state != BXE_STATE_OPEN)
7530          return FALSE;
7531  
7532      attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
7533      attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
7534      attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
7535      attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
7536  
7537      /*
7538       * Since MCP attentions can't be disabled inside the block, we need to
7539       * read AEU registers to see whether they're currently disabled
7540       */
7541      attn.sig[3] &= ((REG_RD(sc, (!port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
7542                                        : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0)) &
7543                           MISC_AEU_ENABLE_MCP_PRTY_BITS) |
7544                          ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
7545  
7546  
7547      if (!CHIP_IS_E1x(sc))
7548          attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
7549  
7550      return (bxe_parity_attn(sc, global, print, attn.sig));
7551  }
7552  
7553  static void
bxe_attn_int_deasserted4(struct bxe_softc * sc,uint32_t attn)7554  bxe_attn_int_deasserted4(struct bxe_softc *sc,
7555                           uint32_t         attn)
7556  {
7557      uint32_t val;
7558      bool err_flg = false;
7559  
7560      if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
7561          val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
7562          BLOGE(sc, "PGLUE hw attention 0x%08x\n", val);
7563          err_flg = true;
7564          if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
7565              BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
7566          if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
7567              BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
7568          if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
7569              BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
7570          if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
7571              BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
7572          if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
7573              BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
7574          if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
7575              BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
7576          if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
7577              BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
7578          if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
7579              BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
7580          if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
7581              BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
7582      }
7583  
7584      if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
7585          val = REG_RD(sc, ATC_REG_ATC_INT_STS_CLR);
7586          BLOGE(sc, "ATC hw attention 0x%08x\n", val);
7587  	err_flg = true;
7588          if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
7589              BLOGE(sc, "ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
7590          if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
7591              BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
7592          if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
7593              BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
7594          if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
7595              BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
7596          if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
7597              BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
7598          if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
7599              BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
7600      }
7601  
7602      if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
7603                  AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
7604          BLOGE(sc, "FATAL parity attention set4 0x%08x\n",
7605                (uint32_t)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
7606                                   AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
7607  	err_flg = true;
7608      }
7609      if (err_flg) {
7610  	BXE_SET_ERROR_BIT(sc, BXE_ERR_MISC);
7611  	taskqueue_enqueue_timeout(taskqueue_thread,
7612  	    &sc->sp_err_timeout_task, hz/10);
7613      }
7614  
7615  }
7616  
7617  static void
bxe_e1h_disable(struct bxe_softc * sc)7618  bxe_e1h_disable(struct bxe_softc *sc)
7619  {
7620      int port = SC_PORT(sc);
7621  
7622      bxe_tx_disable(sc);
7623  
7624      REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7625  }
7626  
7627  static void
bxe_e1h_enable(struct bxe_softc * sc)7628  bxe_e1h_enable(struct bxe_softc *sc)
7629  {
7630      int port = SC_PORT(sc);
7631  
7632      REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1);
7633  
7634      // XXX bxe_tx_enable(sc);
7635  }
7636  
7637  /*
7638   * called due to MCP event (on pmf):
7639   *   reread new bandwidth configuration
7640   *   configure FW
7641   *   notify others function about the change
7642   */
7643  static void
bxe_config_mf_bw(struct bxe_softc * sc)7644  bxe_config_mf_bw(struct bxe_softc *sc)
7645  {
7646      if (sc->link_vars.link_up) {
7647          bxe_cmng_fns_init(sc, TRUE, CMNG_FNS_MINMAX);
7648          // XXX bxe_link_sync_notify(sc);
7649      }
7650  
7651      storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
7652  }
7653  
7654  static void
bxe_set_mf_bw(struct bxe_softc * sc)7655  bxe_set_mf_bw(struct bxe_softc *sc)
7656  {
7657      bxe_config_mf_bw(sc);
7658      bxe_fw_command(sc, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
7659  }
7660  
7661  static void
bxe_handle_eee_event(struct bxe_softc * sc)7662  bxe_handle_eee_event(struct bxe_softc *sc)
7663  {
7664      BLOGD(sc, DBG_INTR, "EEE - LLDP event\n");
7665      bxe_fw_command(sc, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
7666  }
7667  
7668  #define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
7669  
7670  static void
bxe_drv_info_ether_stat(struct bxe_softc * sc)7671  bxe_drv_info_ether_stat(struct bxe_softc *sc)
7672  {
7673      struct eth_stats_info *ether_stat =
7674          &sc->sp->drv_info_to_mcp.ether_stat;
7675  
7676      strlcpy(ether_stat->version, BXE_DRIVER_VERSION,
7677              ETH_STAT_INFO_VERSION_LEN);
7678  
7679      /* XXX (+ MAC_PAD) taken from other driver... verify this is right */
7680      sc->sp_objs[0].mac_obj.get_n_elements(sc, &sc->sp_objs[0].mac_obj,
7681                                            DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
7682                                            ether_stat->mac_local + MAC_PAD,
7683                                            MAC_PAD, ETH_ALEN);
7684  
7685      ether_stat->mtu_size = sc->mtu;
7686  
7687      ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
7688      if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) {
7689          ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
7690      }
7691  
7692      // XXX ether_stat->feature_flags |= ???;
7693  
7694      ether_stat->promiscuous_mode = 0; // (flags & PROMISC) ? 1 : 0;
7695  
7696      ether_stat->txq_size = sc->tx_ring_size;
7697      ether_stat->rxq_size = sc->rx_ring_size;
7698  }
7699  
7700  static void
bxe_handle_drv_info_req(struct bxe_softc * sc)7701  bxe_handle_drv_info_req(struct bxe_softc *sc)
7702  {
7703      enum drv_info_opcode op_code;
7704      uint32_t drv_info_ctl = SHMEM2_RD(sc, drv_info_control);
7705  
7706      /* if drv_info version supported by MFW doesn't match - send NACK */
7707      if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
7708          bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0);
7709          return;
7710      }
7711  
7712      op_code = ((drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
7713                 DRV_INFO_CONTROL_OP_CODE_SHIFT);
7714  
7715      memset(&sc->sp->drv_info_to_mcp, 0, sizeof(union drv_info_to_mcp));
7716  
7717      switch (op_code) {
7718      case ETH_STATS_OPCODE:
7719          bxe_drv_info_ether_stat(sc);
7720          break;
7721      case FCOE_STATS_OPCODE:
7722      case ISCSI_STATS_OPCODE:
7723      default:
7724          /* if op code isn't supported - send NACK */
7725          bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0);
7726          return;
7727      }
7728  
7729      /*
7730       * If we got drv_info attn from MFW then these fields are defined in
7731       * shmem2 for sure
7732       */
7733      SHMEM2_WR(sc, drv_info_host_addr_lo,
7734                U64_LO(BXE_SP_MAPPING(sc, drv_info_to_mcp)));
7735      SHMEM2_WR(sc, drv_info_host_addr_hi,
7736                U64_HI(BXE_SP_MAPPING(sc, drv_info_to_mcp)));
7737  
7738      bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_ACK, 0);
7739  }
7740  
7741  static void
bxe_dcc_event(struct bxe_softc * sc,uint32_t dcc_event)7742  bxe_dcc_event(struct bxe_softc *sc,
7743                uint32_t         dcc_event)
7744  {
7745      BLOGD(sc, DBG_INTR, "dcc_event 0x%08x\n", dcc_event);
7746  
7747      if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
7748          /*
7749           * This is the only place besides the function initialization
7750           * where the sc->flags can change so it is done without any
7751           * locks
7752           */
7753          if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) {
7754              BLOGD(sc, DBG_INTR, "mf_cfg function disabled\n");
7755              sc->flags |= BXE_MF_FUNC_DIS;
7756              bxe_e1h_disable(sc);
7757          } else {
7758              BLOGD(sc, DBG_INTR, "mf_cfg function enabled\n");
7759              sc->flags &= ~BXE_MF_FUNC_DIS;
7760              bxe_e1h_enable(sc);
7761          }
7762          dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
7763      }
7764  
7765      if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
7766          bxe_config_mf_bw(sc);
7767          dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
7768      }
7769  
7770      /* Report results to MCP */
7771      if (dcc_event)
7772          bxe_fw_command(sc, DRV_MSG_CODE_DCC_FAILURE, 0);
7773      else
7774          bxe_fw_command(sc, DRV_MSG_CODE_DCC_OK, 0);
7775  }
7776  
7777  static void
bxe_pmf_update(struct bxe_softc * sc)7778  bxe_pmf_update(struct bxe_softc *sc)
7779  {
7780      int port = SC_PORT(sc);
7781      uint32_t val;
7782  
7783      sc->port.pmf = 1;
7784      BLOGD(sc, DBG_INTR, "pmf %d\n", sc->port.pmf);
7785  
7786      /*
7787       * We need the mb() to ensure the ordering between the writing to
7788       * sc->port.pmf here and reading it from the bxe_periodic_task().
7789       */
7790      mb();
7791  
7792      /* queue a periodic task */
7793      // XXX schedule task...
7794  
7795      // XXX bxe_dcbx_pmf_update(sc);
7796  
7797      /* enable nig attention */
7798      val = (0xff0f | (1 << (SC_VN(sc) + 4)));
7799      if (sc->devinfo.int_block == INT_BLOCK_HC) {
7800          REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, val);
7801          REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, val);
7802      } else if (!CHIP_IS_E1x(sc)) {
7803          REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val);
7804          REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val);
7805      }
7806  
7807      bxe_stats_handle(sc, STATS_EVENT_PMF);
7808  }
7809  
7810  static int
bxe_mc_assert(struct bxe_softc * sc)7811  bxe_mc_assert(struct bxe_softc *sc)
7812  {
7813      char last_idx;
7814      int i, rc = 0;
7815      uint32_t row0, row1, row2, row3;
7816  
7817      /* XSTORM */
7818      last_idx = REG_RD8(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_INDEX_OFFSET);
7819      if (last_idx)
7820          BLOGE(sc, "XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7821  
7822      /* print the asserts */
7823      for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7824  
7825          row0 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i));
7826          row1 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 4);
7827          row2 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 8);
7828          row3 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 12);
7829  
7830          if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7831              BLOGE(sc, "XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7832                    i, row3, row2, row1, row0);
7833              rc++;
7834          } else {
7835              break;
7836          }
7837      }
7838  
7839      /* TSTORM */
7840      last_idx = REG_RD8(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_INDEX_OFFSET);
7841      if (last_idx) {
7842          BLOGE(sc, "TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7843      }
7844  
7845      /* print the asserts */
7846      for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7847  
7848          row0 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i));
7849          row1 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 4);
7850          row2 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 8);
7851          row3 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 12);
7852  
7853          if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7854              BLOGE(sc, "TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7855                    i, row3, row2, row1, row0);
7856              rc++;
7857          } else {
7858              break;
7859          }
7860      }
7861  
7862      /* CSTORM */
7863      last_idx = REG_RD8(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_INDEX_OFFSET);
7864      if (last_idx) {
7865          BLOGE(sc, "CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7866      }
7867  
7868      /* print the asserts */
7869      for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7870  
7871          row0 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i));
7872          row1 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 4);
7873          row2 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 8);
7874          row3 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 12);
7875  
7876          if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7877              BLOGE(sc, "CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7878                    i, row3, row2, row1, row0);
7879              rc++;
7880          } else {
7881              break;
7882          }
7883      }
7884  
7885      /* USTORM */
7886      last_idx = REG_RD8(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_INDEX_OFFSET);
7887      if (last_idx) {
7888          BLOGE(sc, "USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7889      }
7890  
7891      /* print the asserts */
7892      for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7893  
7894          row0 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i));
7895          row1 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 4);
7896          row2 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 8);
7897          row3 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 12);
7898  
7899          if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7900              BLOGE(sc, "USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7901                    i, row3, row2, row1, row0);
7902              rc++;
7903          } else {
7904              break;
7905          }
7906      }
7907  
7908      return (rc);
7909  }
7910  
7911  static void
bxe_attn_int_deasserted3(struct bxe_softc * sc,uint32_t attn)7912  bxe_attn_int_deasserted3(struct bxe_softc *sc,
7913                           uint32_t         attn)
7914  {
7915      int func = SC_FUNC(sc);
7916      uint32_t val;
7917  
7918      if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
7919  
7920          if (attn & BXE_PMF_LINK_ASSERT(sc)) {
7921  
7922              REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7923              bxe_read_mf_cfg(sc);
7924              sc->devinfo.mf_info.mf_config[SC_VN(sc)] =
7925                  MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
7926              val = SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_status);
7927  
7928              if (val & DRV_STATUS_DCC_EVENT_MASK)
7929                  bxe_dcc_event(sc, (val & DRV_STATUS_DCC_EVENT_MASK));
7930  
7931              if (val & DRV_STATUS_SET_MF_BW)
7932                  bxe_set_mf_bw(sc);
7933  
7934              if (val & DRV_STATUS_DRV_INFO_REQ)
7935                  bxe_handle_drv_info_req(sc);
7936  
7937              if ((sc->port.pmf == 0) && (val & DRV_STATUS_PMF))
7938                  bxe_pmf_update(sc);
7939  
7940              if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
7941                  bxe_handle_eee_event(sc);
7942  
7943              if (sc->link_vars.periodic_flags &
7944                  ELINK_PERIODIC_FLAGS_LINK_EVENT) {
7945                  /* sync with link */
7946  		bxe_acquire_phy_lock(sc);
7947                  sc->link_vars.periodic_flags &=
7948                      ~ELINK_PERIODIC_FLAGS_LINK_EVENT;
7949  		bxe_release_phy_lock(sc);
7950                  if (IS_MF(sc))
7951                      ; // XXX bxe_link_sync_notify(sc);
7952                  bxe_link_report(sc);
7953              }
7954  
7955              /*
7956               * Always call it here: bxe_link_report() will
7957               * prevent the link indication duplication.
7958               */
7959              bxe_link_status_update(sc);
7960  
7961          } else if (attn & BXE_MC_ASSERT_BITS) {
7962  
7963              BLOGE(sc, "MC assert!\n");
7964              bxe_mc_assert(sc);
7965              REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_10, 0);
7966              REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_9, 0);
7967              REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_8, 0);
7968              REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_7, 0);
7969              bxe_int_disable(sc);
7970              BXE_SET_ERROR_BIT(sc, BXE_ERR_MC_ASSERT);
7971              taskqueue_enqueue_timeout(taskqueue_thread,
7972                  &sc->sp_err_timeout_task, hz/10);
7973  
7974          } else if (attn & BXE_MCP_ASSERT) {
7975  
7976              BLOGE(sc, "MCP assert!\n");
7977              REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_11, 0);
7978              BXE_SET_ERROR_BIT(sc, BXE_ERR_MCP_ASSERT);
7979              taskqueue_enqueue_timeout(taskqueue_thread,
7980                  &sc->sp_err_timeout_task, hz/10);
7981              bxe_int_disable(sc);  /*avoid repetive assert alert */
7982  
7983  
7984          } else {
7985              BLOGE(sc, "Unknown HW assert! (attn 0x%08x)\n", attn);
7986          }
7987      }
7988  
7989      if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
7990          BLOGE(sc, "LATCHED attention 0x%08x (masked)\n", attn);
7991          if (attn & BXE_GRC_TIMEOUT) {
7992              val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_TIMEOUT_ATTN);
7993              BLOGE(sc, "GRC time-out 0x%08x\n", val);
7994          }
7995          if (attn & BXE_GRC_RSV) {
7996              val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_RSV_ATTN);
7997              BLOGE(sc, "GRC reserved 0x%08x\n", val);
7998          }
7999          REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
8000      }
8001  }
8002  
8003  static void
bxe_attn_int_deasserted2(struct bxe_softc * sc,uint32_t attn)8004  bxe_attn_int_deasserted2(struct bxe_softc *sc,
8005                           uint32_t         attn)
8006  {
8007      int port = SC_PORT(sc);
8008      int reg_offset;
8009      uint32_t val0, mask0, val1, mask1;
8010      uint32_t val;
8011      bool err_flg = false;
8012  
8013      if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
8014          val = REG_RD(sc, CFC_REG_CFC_INT_STS_CLR);
8015          BLOGE(sc, "CFC hw attention 0x%08x\n", val);
8016          /* CFC error attention */
8017          if (val & 0x2) {
8018              BLOGE(sc, "FATAL error from CFC\n");
8019  	    err_flg = true;
8020          }
8021      }
8022  
8023      if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
8024          val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_0);
8025          BLOGE(sc, "PXP hw attention-0 0x%08x\n", val);
8026          /* RQ_USDMDP_FIFO_OVERFLOW */
8027          if (val & 0x18000) {
8028              BLOGE(sc, "FATAL error from PXP\n");
8029  	    err_flg = true;
8030          }
8031  
8032          if (!CHIP_IS_E1x(sc)) {
8033              val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_1);
8034              BLOGE(sc, "PXP hw attention-1 0x%08x\n", val);
8035  	    err_flg = true;
8036          }
8037      }
8038  
8039  #define PXP2_EOP_ERROR_BIT  PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR
8040  #define AEU_PXP2_HW_INT_BIT AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT
8041  
8042      if (attn & AEU_PXP2_HW_INT_BIT) {
8043          /*  CQ47854 workaround do not panic on
8044           *  PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR
8045           */
8046          if (!CHIP_IS_E1x(sc)) {
8047              mask0 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_0);
8048              val1 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_1);
8049              mask1 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_1);
8050              val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_0);
8051              /*
8052               * If the only PXP2_EOP_ERROR_BIT is set in
8053               * STS0 and STS1 - clear it
8054               *
8055               * probably we lose additional attentions between
8056               * STS0 and STS_CLR0, in this case user will not
8057               * be notified about them
8058               */
8059              if (val0 & mask0 & PXP2_EOP_ERROR_BIT &&
8060                  !(val1 & mask1))
8061                  val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
8062  
8063              /* print the register, since no one can restore it */
8064              BLOGE(sc, "PXP2_REG_PXP2_INT_STS_CLR_0 0x%08x\n", val0);
8065  
8066              /*
8067               * if PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR
8068               * then notify
8069               */
8070              if (val0 & PXP2_EOP_ERROR_BIT) {
8071                  BLOGE(sc, "PXP2_WR_PGLUE_EOP_ERROR\n");
8072  		err_flg = true;
8073  
8074                  /*
8075                   * if only PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR is
8076                   * set then clear attention from PXP2 block without panic
8077                   */
8078                  if (((val0 & mask0) == PXP2_EOP_ERROR_BIT) &&
8079                      ((val1 & mask1) == 0))
8080                      attn &= ~AEU_PXP2_HW_INT_BIT;
8081              }
8082          }
8083      }
8084  
8085      if (attn & HW_INTERRUT_ASSERT_SET_2) {
8086          reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
8087                               MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
8088  
8089          val = REG_RD(sc, reg_offset);
8090          val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
8091          REG_WR(sc, reg_offset, val);
8092  
8093          BLOGE(sc, "FATAL HW block attention set2 0x%x\n",
8094                (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_2));
8095  	err_flg = true;
8096          bxe_panic(sc, ("HW block attention set2\n"));
8097      }
8098      if(err_flg) {
8099          BXE_SET_ERROR_BIT(sc, BXE_ERR_GLOBAL);
8100          taskqueue_enqueue_timeout(taskqueue_thread,
8101             &sc->sp_err_timeout_task, hz/10);
8102      }
8103  
8104  }
8105  
8106  static void
bxe_attn_int_deasserted1(struct bxe_softc * sc,uint32_t attn)8107  bxe_attn_int_deasserted1(struct bxe_softc *sc,
8108                           uint32_t         attn)
8109  {
8110      int port = SC_PORT(sc);
8111      int reg_offset;
8112      uint32_t val;
8113      bool err_flg = false;
8114  
8115      if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
8116          val = REG_RD(sc, DORQ_REG_DORQ_INT_STS_CLR);
8117          BLOGE(sc, "DB hw attention 0x%08x\n", val);
8118          /* DORQ discard attention */
8119          if (val & 0x2) {
8120              BLOGE(sc, "FATAL error from DORQ\n");
8121  	    err_flg = true;
8122          }
8123      }
8124  
8125      if (attn & HW_INTERRUT_ASSERT_SET_1) {
8126          reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
8127                               MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
8128  
8129          val = REG_RD(sc, reg_offset);
8130          val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
8131          REG_WR(sc, reg_offset, val);
8132  
8133          BLOGE(sc, "FATAL HW block attention set1 0x%08x\n",
8134                (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_1));
8135          err_flg = true;
8136          bxe_panic(sc, ("HW block attention set1\n"));
8137      }
8138      if(err_flg) {
8139          BXE_SET_ERROR_BIT(sc, BXE_ERR_MISC);
8140          taskqueue_enqueue_timeout(taskqueue_thread,
8141             &sc->sp_err_timeout_task, hz/10);
8142      }
8143  
8144  }
8145  
8146  static void
bxe_attn_int_deasserted0(struct bxe_softc * sc,uint32_t attn)8147  bxe_attn_int_deasserted0(struct bxe_softc *sc,
8148                           uint32_t         attn)
8149  {
8150      int port = SC_PORT(sc);
8151      int reg_offset;
8152      uint32_t val;
8153  
8154      reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
8155                            MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
8156  
8157      if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
8158          val = REG_RD(sc, reg_offset);
8159          val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
8160          REG_WR(sc, reg_offset, val);
8161  
8162          BLOGW(sc, "SPIO5 hw attention\n");
8163  
8164          /* Fan failure attention */
8165          elink_hw_reset_phy(&sc->link_params);
8166          bxe_fan_failure(sc);
8167      }
8168  
8169      if ((attn & sc->link_vars.aeu_int_mask) && sc->port.pmf) {
8170  	bxe_acquire_phy_lock(sc);
8171          elink_handle_module_detect_int(&sc->link_params);
8172  	bxe_release_phy_lock(sc);
8173      }
8174  
8175      if (attn & HW_INTERRUT_ASSERT_SET_0) {
8176          val = REG_RD(sc, reg_offset);
8177          val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
8178          REG_WR(sc, reg_offset, val);
8179  
8180  
8181          BXE_SET_ERROR_BIT(sc, BXE_ERR_MISC);
8182          taskqueue_enqueue_timeout(taskqueue_thread,
8183             &sc->sp_err_timeout_task, hz/10);
8184  
8185          bxe_panic(sc, ("FATAL HW block attention set0 0x%lx\n",
8186                         (attn & HW_INTERRUT_ASSERT_SET_0)));
8187      }
8188  }
8189  
8190  static void
bxe_attn_int_deasserted(struct bxe_softc * sc,uint32_t deasserted)8191  bxe_attn_int_deasserted(struct bxe_softc *sc,
8192                          uint32_t         deasserted)
8193  {
8194      struct attn_route attn;
8195      struct attn_route *group_mask;
8196      int port = SC_PORT(sc);
8197      int index;
8198      uint32_t reg_addr;
8199      uint32_t val;
8200      uint32_t aeu_mask;
8201      uint8_t global = FALSE;
8202  
8203      /*
8204       * Need to take HW lock because MCP or other port might also
8205       * try to handle this event.
8206       */
8207      bxe_acquire_alr(sc);
8208  
8209      if (bxe_chk_parity_attn(sc, &global, TRUE)) {
8210          /* XXX
8211           * In case of parity errors don't handle attentions so that
8212           * other function would "see" parity errors.
8213           */
8214          // XXX schedule a recovery task...
8215          /* disable HW interrupts */
8216          bxe_int_disable(sc);
8217          BXE_SET_ERROR_BIT(sc, BXE_ERR_PARITY);
8218          taskqueue_enqueue_timeout(taskqueue_thread,
8219             &sc->sp_err_timeout_task, hz/10);
8220          bxe_release_alr(sc);
8221          return;
8222      }
8223  
8224      attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
8225      attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
8226      attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
8227      attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
8228      if (!CHIP_IS_E1x(sc)) {
8229          attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
8230      } else {
8231          attn.sig[4] = 0;
8232      }
8233  
8234      BLOGD(sc, DBG_INTR, "attn: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
8235            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
8236  
8237      for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
8238          if (deasserted & (1 << index)) {
8239              group_mask = &sc->attn_group[index];
8240  
8241              BLOGD(sc, DBG_INTR,
8242                    "group[%d]: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", index,
8243                    group_mask->sig[0], group_mask->sig[1],
8244                    group_mask->sig[2], group_mask->sig[3],
8245                    group_mask->sig[4]);
8246  
8247              bxe_attn_int_deasserted4(sc, attn.sig[4] & group_mask->sig[4]);
8248              bxe_attn_int_deasserted3(sc, attn.sig[3] & group_mask->sig[3]);
8249              bxe_attn_int_deasserted1(sc, attn.sig[1] & group_mask->sig[1]);
8250              bxe_attn_int_deasserted2(sc, attn.sig[2] & group_mask->sig[2]);
8251              bxe_attn_int_deasserted0(sc, attn.sig[0] & group_mask->sig[0]);
8252          }
8253      }
8254  
8255      bxe_release_alr(sc);
8256  
8257      if (sc->devinfo.int_block == INT_BLOCK_HC) {
8258          reg_addr = (HC_REG_COMMAND_REG + port*32 +
8259                      COMMAND_REG_ATTN_BITS_CLR);
8260      } else {
8261          reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
8262      }
8263  
8264      val = ~deasserted;
8265      BLOGD(sc, DBG_INTR,
8266            "about to mask 0x%08x at %s addr 0x%08x\n", val,
8267            (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
8268      REG_WR(sc, reg_addr, val);
8269  
8270      if (~sc->attn_state & deasserted) {
8271          BLOGE(sc, "IGU error\n");
8272      }
8273  
8274      reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8275                        MISC_REG_AEU_MASK_ATTN_FUNC_0;
8276  
8277      bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
8278  
8279      aeu_mask = REG_RD(sc, reg_addr);
8280  
8281      BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly deasserted 0x%08x\n",
8282            aeu_mask, deasserted);
8283      aeu_mask |= (deasserted & 0x3ff);
8284      BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask);
8285  
8286      REG_WR(sc, reg_addr, aeu_mask);
8287      bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
8288  
8289      BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state);
8290      sc->attn_state &= ~deasserted;
8291      BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state);
8292  }
8293  
8294  static void
bxe_attn_int(struct bxe_softc * sc)8295  bxe_attn_int(struct bxe_softc *sc)
8296  {
8297      /* read local copy of bits */
8298      uint32_t attn_bits = le32toh(sc->def_sb->atten_status_block.attn_bits);
8299      uint32_t attn_ack = le32toh(sc->def_sb->atten_status_block.attn_bits_ack);
8300      uint32_t attn_state = sc->attn_state;
8301  
8302      /* look for changed bits */
8303      uint32_t asserted   =  attn_bits & ~attn_ack & ~attn_state;
8304      uint32_t deasserted = ~attn_bits &  attn_ack &  attn_state;
8305  
8306      BLOGD(sc, DBG_INTR,
8307            "attn_bits 0x%08x attn_ack 0x%08x asserted 0x%08x deasserted 0x%08x\n",
8308            attn_bits, attn_ack, asserted, deasserted);
8309  
8310      if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) {
8311          BLOGE(sc, "BAD attention state\n");
8312      }
8313  
8314      /* handle bits that were raised */
8315      if (asserted) {
8316          bxe_attn_int_asserted(sc, asserted);
8317      }
8318  
8319      if (deasserted) {
8320          bxe_attn_int_deasserted(sc, deasserted);
8321      }
8322  }
8323  
8324  static uint16_t
bxe_update_dsb_idx(struct bxe_softc * sc)8325  bxe_update_dsb_idx(struct bxe_softc *sc)
8326  {
8327      struct host_sp_status_block *def_sb = sc->def_sb;
8328      uint16_t rc = 0;
8329  
8330      mb(); /* status block is written to by the chip */
8331  
8332      if (sc->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
8333          sc->def_att_idx = def_sb->atten_status_block.attn_bits_index;
8334          rc |= BXE_DEF_SB_ATT_IDX;
8335      }
8336  
8337      if (sc->def_idx != def_sb->sp_sb.running_index) {
8338          sc->def_idx = def_sb->sp_sb.running_index;
8339          rc |= BXE_DEF_SB_IDX;
8340      }
8341  
8342      mb();
8343  
8344      return (rc);
8345  }
8346  
8347  static inline struct ecore_queue_sp_obj *
bxe_cid_to_q_obj(struct bxe_softc * sc,uint32_t cid)8348  bxe_cid_to_q_obj(struct bxe_softc *sc,
8349                   uint32_t         cid)
8350  {
8351      BLOGD(sc, DBG_SP, "retrieving fp from cid %d\n", cid);
8352      return (&sc->sp_objs[CID_TO_FP(cid, sc)].q_obj);
8353  }
8354  
8355  static void
bxe_handle_mcast_eqe(struct bxe_softc * sc)8356  bxe_handle_mcast_eqe(struct bxe_softc *sc)
8357  {
8358      struct ecore_mcast_ramrod_params rparam;
8359      int rc;
8360  
8361      memset(&rparam, 0, sizeof(rparam));
8362  
8363      rparam.mcast_obj = &sc->mcast_obj;
8364  
8365      BXE_MCAST_LOCK(sc);
8366  
8367      /* clear pending state for the last command */
8368      sc->mcast_obj.raw.clear_pending(&sc->mcast_obj.raw);
8369  
8370      /* if there are pending mcast commands - send them */
8371      if (sc->mcast_obj.check_pending(&sc->mcast_obj)) {
8372          rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
8373          if (rc < 0) {
8374              BLOGD(sc, DBG_SP,
8375                  "ERROR: Failed to send pending mcast commands (%d)\n", rc);
8376          }
8377      }
8378  
8379      BXE_MCAST_UNLOCK(sc);
8380  }
8381  
8382  static void
bxe_handle_classification_eqe(struct bxe_softc * sc,union event_ring_elem * elem)8383  bxe_handle_classification_eqe(struct bxe_softc      *sc,
8384                                union event_ring_elem *elem)
8385  {
8386      unsigned long ramrod_flags = 0;
8387      int rc = 0;
8388      uint32_t cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK;
8389      struct ecore_vlan_mac_obj *vlan_mac_obj;
8390  
8391      /* always push next commands out, don't wait here */
8392      bit_set(&ramrod_flags, RAMROD_CONT);
8393  
8394      switch (le32toh(elem->message.data.eth_event.echo) >> BXE_SWCID_SHIFT) {
8395      case ECORE_FILTER_MAC_PENDING:
8396          BLOGD(sc, DBG_SP, "Got SETUP_MAC completions\n");
8397          vlan_mac_obj = &sc->sp_objs[cid].mac_obj;
8398          break;
8399  
8400      case ECORE_FILTER_MCAST_PENDING:
8401          BLOGD(sc, DBG_SP, "Got SETUP_MCAST completions\n");
8402          /*
8403           * This is only relevant for 57710 where multicast MACs are
8404           * configured as unicast MACs using the same ramrod.
8405           */
8406          bxe_handle_mcast_eqe(sc);
8407          return;
8408  
8409      default:
8410          BLOGE(sc, "Unsupported classification command: %d\n",
8411                elem->message.data.eth_event.echo);
8412          return;
8413      }
8414  
8415      rc = vlan_mac_obj->complete(sc, vlan_mac_obj, elem, &ramrod_flags);
8416  
8417      if (rc < 0) {
8418          BLOGE(sc, "Failed to schedule new commands (%d)\n", rc);
8419      } else if (rc > 0) {
8420          BLOGD(sc, DBG_SP, "Scheduled next pending commands...\n");
8421      }
8422  }
8423  
8424  static void
bxe_handle_rx_mode_eqe(struct bxe_softc * sc,union event_ring_elem * elem)8425  bxe_handle_rx_mode_eqe(struct bxe_softc      *sc,
8426                         union event_ring_elem *elem)
8427  {
8428      bxe_clear_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
8429  
8430      /* send rx_mode command again if was requested */
8431      if (bxe_test_and_clear_bit(ECORE_FILTER_RX_MODE_SCHED,
8432                                 &sc->sp_state)) {
8433          bxe_set_storm_rx_mode(sc);
8434      }
8435  }
8436  
8437  static void
bxe_update_eq_prod(struct bxe_softc * sc,uint16_t prod)8438  bxe_update_eq_prod(struct bxe_softc *sc,
8439                     uint16_t         prod)
8440  {
8441      storm_memset_eq_prod(sc, prod, SC_FUNC(sc));
8442      wmb(); /* keep prod updates ordered */
8443  }
8444  
8445  static void
bxe_eq_int(struct bxe_softc * sc)8446  bxe_eq_int(struct bxe_softc *sc)
8447  {
8448      uint16_t hw_cons, sw_cons, sw_prod;
8449      union event_ring_elem *elem;
8450      uint8_t echo;
8451      uint32_t cid;
8452      uint8_t opcode;
8453      int spqe_cnt = 0;
8454      struct ecore_queue_sp_obj *q_obj;
8455      struct ecore_func_sp_obj *f_obj = &sc->func_obj;
8456      struct ecore_raw_obj *rss_raw = &sc->rss_conf_obj.raw;
8457  
8458      hw_cons = le16toh(*sc->eq_cons_sb);
8459  
8460      /*
8461       * The hw_cons range is 1-255, 257 - the sw_cons range is 0-254, 256.
8462       * when we get to the next-page we need to adjust so the loop
8463       * condition below will be met. The next element is the size of a
8464       * regular element and hence incrementing by 1
8465       */
8466      if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) {
8467          hw_cons++;
8468      }
8469  
8470      /*
8471       * This function may never run in parallel with itself for a
8472       * specific sc and no need for a read memory barrier here.
8473       */
8474      sw_cons = sc->eq_cons;
8475      sw_prod = sc->eq_prod;
8476  
8477      BLOGD(sc, DBG_SP,"EQ: hw_cons=%u sw_cons=%u eq_spq_left=0x%lx\n",
8478            hw_cons, sw_cons, atomic_load_acq_long(&sc->eq_spq_left));
8479  
8480      for (;
8481           sw_cons != hw_cons;
8482           sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
8483  
8484          elem = &sc->eq[EQ_DESC(sw_cons)];
8485  
8486          /* elem CID originates from FW, actually LE */
8487          cid = SW_CID(elem->message.data.cfc_del_event.cid);
8488          opcode = elem->message.opcode;
8489  
8490          /* handle eq element */
8491          switch (opcode) {
8492  
8493          case EVENT_RING_OPCODE_STAT_QUERY:
8494              BLOGD(sc, DBG_SP, "got statistics completion event %d\n",
8495                    sc->stats_comp++);
8496              /* nothing to do with stats comp */
8497              goto next_spqe;
8498  
8499          case EVENT_RING_OPCODE_CFC_DEL:
8500              /* handle according to cid range */
8501              /* we may want to verify here that the sc state is HALTING */
8502              BLOGD(sc, DBG_SP, "got delete ramrod for MULTI[%d]\n", cid);
8503              q_obj = bxe_cid_to_q_obj(sc, cid);
8504              if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_CFC_DEL)) {
8505                  break;
8506              }
8507              goto next_spqe;
8508  
8509          case EVENT_RING_OPCODE_STOP_TRAFFIC:
8510              BLOGD(sc, DBG_SP, "got STOP TRAFFIC\n");
8511              if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_STOP)) {
8512                  break;
8513              }
8514              // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_PAUSED);
8515              goto next_spqe;
8516  
8517          case EVENT_RING_OPCODE_START_TRAFFIC:
8518              BLOGD(sc, DBG_SP, "got START TRAFFIC\n");
8519              if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_START)) {
8520                  break;
8521              }
8522              // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_RELEASED);
8523              goto next_spqe;
8524  
8525          case EVENT_RING_OPCODE_FUNCTION_UPDATE:
8526              echo = elem->message.data.function_update_event.echo;
8527              if (echo == SWITCH_UPDATE) {
8528                  BLOGD(sc, DBG_SP, "got FUNC_SWITCH_UPDATE ramrod\n");
8529                  if (f_obj->complete_cmd(sc, f_obj,
8530                                          ECORE_F_CMD_SWITCH_UPDATE)) {
8531                      break;
8532                  }
8533              }
8534              else {
8535                  BLOGD(sc, DBG_SP,
8536                        "AFEX: ramrod completed FUNCTION_UPDATE\n");
8537              }
8538              goto next_spqe;
8539  
8540          case EVENT_RING_OPCODE_FORWARD_SETUP:
8541              q_obj = &bxe_fwd_sp_obj(sc, q_obj);
8542              if (q_obj->complete_cmd(sc, q_obj,
8543                                      ECORE_Q_CMD_SETUP_TX_ONLY)) {
8544                  break;
8545              }
8546              goto next_spqe;
8547  
8548          case EVENT_RING_OPCODE_FUNCTION_START:
8549              BLOGD(sc, DBG_SP, "got FUNC_START ramrod\n");
8550              if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_START)) {
8551                  break;
8552              }
8553              goto next_spqe;
8554  
8555          case EVENT_RING_OPCODE_FUNCTION_STOP:
8556              BLOGD(sc, DBG_SP, "got FUNC_STOP ramrod\n");
8557              if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_STOP)) {
8558                  break;
8559              }
8560              goto next_spqe;
8561          }
8562  
8563          switch (opcode | sc->state) {
8564          case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPEN):
8565          case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPENING_WAITING_PORT):
8566              cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK;
8567              BLOGD(sc, DBG_SP, "got RSS_UPDATE ramrod. CID %d\n", cid);
8568              rss_raw->clear_pending(rss_raw);
8569              break;
8570  
8571          case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_OPEN):
8572          case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_DIAG):
8573          case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_CLOSING_WAITING_HALT):
8574          case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_OPEN):
8575          case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_DIAG):
8576          case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8577              BLOGD(sc, DBG_SP, "got (un)set mac ramrod\n");
8578              bxe_handle_classification_eqe(sc, elem);
8579              break;
8580  
8581          case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_OPEN):
8582          case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_DIAG):
8583          case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8584              BLOGD(sc, DBG_SP, "got mcast ramrod\n");
8585              bxe_handle_mcast_eqe(sc);
8586              break;
8587  
8588          case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_OPEN):
8589          case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_DIAG):
8590          case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8591              BLOGD(sc, DBG_SP, "got rx_mode ramrod\n");
8592              bxe_handle_rx_mode_eqe(sc, elem);
8593              break;
8594  
8595          default:
8596              /* unknown event log error and continue */
8597              BLOGE(sc, "Unknown EQ event %d, sc->state 0x%x\n",
8598                    elem->message.opcode, sc->state);
8599          }
8600  
8601  next_spqe:
8602          spqe_cnt++;
8603      } /* for */
8604  
8605      mb();
8606      atomic_add_acq_long(&sc->eq_spq_left, spqe_cnt);
8607  
8608      sc->eq_cons = sw_cons;
8609      sc->eq_prod = sw_prod;
8610  
8611      /* make sure that above mem writes were issued towards the memory */
8612      wmb();
8613  
8614      /* update producer */
8615      bxe_update_eq_prod(sc, sc->eq_prod);
8616  }
8617  
8618  static void
bxe_handle_sp_tq(void * context,int pending)8619  bxe_handle_sp_tq(void *context,
8620                   int  pending)
8621  {
8622      struct bxe_softc *sc = (struct bxe_softc *)context;
8623      uint16_t status;
8624  
8625      BLOGD(sc, DBG_SP, "---> SP TASK <---\n");
8626  
8627      /* what work needs to be performed? */
8628      status = bxe_update_dsb_idx(sc);
8629  
8630      BLOGD(sc, DBG_SP, "dsb status 0x%04x\n", status);
8631  
8632      /* HW attentions */
8633      if (status & BXE_DEF_SB_ATT_IDX) {
8634          BLOGD(sc, DBG_SP, "---> ATTN INTR <---\n");
8635          bxe_attn_int(sc);
8636          status &= ~BXE_DEF_SB_ATT_IDX;
8637      }
8638  
8639      /* SP events: STAT_QUERY and others */
8640      if (status & BXE_DEF_SB_IDX) {
8641          /* handle EQ completions */
8642          BLOGD(sc, DBG_SP, "---> EQ INTR <---\n");
8643          bxe_eq_int(sc);
8644          bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID,
8645                     le16toh(sc->def_idx), IGU_INT_NOP, 1);
8646          status &= ~BXE_DEF_SB_IDX;
8647      }
8648  
8649      /* if status is non zero then something went wrong */
8650      if (__predict_false(status)) {
8651          BLOGE(sc, "Got an unknown SP interrupt! (0x%04x)\n", status);
8652      }
8653  
8654      /* ack status block only if something was actually handled */
8655      bxe_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID,
8656                 le16toh(sc->def_att_idx), IGU_INT_ENABLE, 1);
8657  
8658      /*
8659       * Must be called after the EQ processing (since eq leads to sriov
8660       * ramrod completion flows).
8661       * This flow may have been scheduled by the arrival of a ramrod
8662       * completion, or by the sriov code rescheduling itself.
8663       */
8664      // XXX bxe_iov_sp_task(sc);
8665  
8666  }
8667  
8668  static void
bxe_handle_fp_tq(void * context,int pending)8669  bxe_handle_fp_tq(void *context,
8670                   int  pending)
8671  {
8672      struct bxe_fastpath *fp = (struct bxe_fastpath *)context;
8673      struct bxe_softc *sc = fp->sc;
8674      /* uint8_t more_tx = FALSE; */
8675      uint8_t more_rx = FALSE;
8676  
8677      BLOGD(sc, DBG_INTR, "---> FP TASK QUEUE (%d) <---\n", fp->index);
8678  
8679      /* XXX
8680       * IFF_DRV_RUNNING state can't be checked here since we process
8681       * slowpath events on a client queue during setup. Instead
8682       * we need to add a "process/continue" flag here that the driver
8683       * can use to tell the task here not to do anything.
8684       */
8685  #if 0
8686      if (!(if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) {
8687          return;
8688      }
8689  #endif
8690  
8691      /* update the fastpath index */
8692      bxe_update_fp_sb_idx(fp);
8693  
8694      /* XXX add loop here if ever support multiple tx CoS */
8695      /* fp->txdata[cos] */
8696      if (bxe_has_tx_work(fp)) {
8697          BXE_FP_TX_LOCK(fp);
8698          /* more_tx = */ bxe_txeof(sc, fp);
8699          BXE_FP_TX_UNLOCK(fp);
8700      }
8701  
8702      if (bxe_has_rx_work(fp)) {
8703          more_rx = bxe_rxeof(sc, fp);
8704      }
8705  
8706      if (more_rx /*|| more_tx*/) {
8707          /* still more work to do */
8708          taskqueue_enqueue(fp->tq, &fp->tq_task);
8709          return;
8710      }
8711  
8712      bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
8713                 le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
8714  }
8715  
8716  static void
bxe_task_fp(struct bxe_fastpath * fp)8717  bxe_task_fp(struct bxe_fastpath *fp)
8718  {
8719      struct bxe_softc *sc = fp->sc;
8720      /* uint8_t more_tx = FALSE; */
8721      uint8_t more_rx = FALSE;
8722  
8723      BLOGD(sc, DBG_INTR, "---> FP TASK ISR (%d) <---\n", fp->index);
8724  
8725      /* update the fastpath index */
8726      bxe_update_fp_sb_idx(fp);
8727  
8728      /* XXX add loop here if ever support multiple tx CoS */
8729      /* fp->txdata[cos] */
8730      if (bxe_has_tx_work(fp)) {
8731          BXE_FP_TX_LOCK(fp);
8732          /* more_tx = */ bxe_txeof(sc, fp);
8733          BXE_FP_TX_UNLOCK(fp);
8734      }
8735  
8736      if (bxe_has_rx_work(fp)) {
8737          more_rx = bxe_rxeof(sc, fp);
8738      }
8739  
8740      if (more_rx /*|| more_tx*/) {
8741          /* still more work to do, bail out if this ISR and process later */
8742          taskqueue_enqueue(fp->tq, &fp->tq_task);
8743          return;
8744      }
8745  
8746      /*
8747       * Here we write the fastpath index taken before doing any tx or rx work.
8748       * It is very well possible other hw events occurred up to this point and
8749       * they were actually processed accordingly above. Since we're going to
8750       * write an older fastpath index, an interrupt is coming which we might
8751       * not do any work in.
8752       */
8753      bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
8754                 le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
8755  }
8756  
8757  /*
8758   * Legacy interrupt entry point.
8759   *
8760   * Verifies that the controller generated the interrupt and
8761   * then calls a separate routine to handle the various
8762   * interrupt causes: link, RX, and TX.
8763   */
8764  static void
bxe_intr_legacy(void * xsc)8765  bxe_intr_legacy(void *xsc)
8766  {
8767      struct bxe_softc *sc = (struct bxe_softc *)xsc;
8768      struct bxe_fastpath *fp;
8769      uint16_t status, mask;
8770      int i;
8771  
8772      BLOGD(sc, DBG_INTR, "---> BXE INTx <---\n");
8773  
8774      /*
8775       * 0 for ustorm, 1 for cstorm
8776       * the bits returned from ack_int() are 0-15
8777       * bit 0 = attention status block
8778       * bit 1 = fast path status block
8779       * a mask of 0x2 or more = tx/rx event
8780       * a mask of 1 = slow path event
8781       */
8782  
8783      status = bxe_ack_int(sc);
8784  
8785      /* the interrupt is not for us */
8786      if (__predict_false(status == 0)) {
8787          BLOGD(sc, DBG_INTR, "Not our interrupt!\n");
8788          return;
8789      }
8790  
8791      BLOGD(sc, DBG_INTR, "Interrupt status 0x%04x\n", status);
8792  
8793      FOR_EACH_ETH_QUEUE(sc, i) {
8794          fp = &sc->fp[i];
8795          mask = (0x2 << (fp->index + CNIC_SUPPORT(sc)));
8796          if (status & mask) {
8797              /* acknowledge and disable further fastpath interrupts */
8798              bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8799              bxe_task_fp(fp);
8800              status &= ~mask;
8801          }
8802      }
8803  
8804      if (__predict_false(status & 0x1)) {
8805          /* acknowledge and disable further slowpath interrupts */
8806          bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8807  
8808          /* schedule slowpath handler */
8809          taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task);
8810  
8811          status &= ~0x1;
8812      }
8813  
8814      if (__predict_false(status)) {
8815          BLOGW(sc, "Unexpected fastpath status (0x%08x)!\n", status);
8816      }
8817  }
8818  
8819  /* slowpath interrupt entry point */
8820  static void
bxe_intr_sp(void * xsc)8821  bxe_intr_sp(void *xsc)
8822  {
8823      struct bxe_softc *sc = (struct bxe_softc *)xsc;
8824  
8825      BLOGD(sc, (DBG_INTR | DBG_SP), "---> SP INTR <---\n");
8826  
8827      /* acknowledge and disable further slowpath interrupts */
8828      bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8829  
8830      /* schedule slowpath handler */
8831      taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task);
8832  }
8833  
8834  /* fastpath interrupt entry point */
8835  static void
bxe_intr_fp(void * xfp)8836  bxe_intr_fp(void *xfp)
8837  {
8838      struct bxe_fastpath *fp = (struct bxe_fastpath *)xfp;
8839      struct bxe_softc *sc = fp->sc;
8840  
8841      BLOGD(sc, DBG_INTR, "---> FP INTR %d <---\n", fp->index);
8842  
8843      BLOGD(sc, DBG_INTR,
8844            "(cpu=%d) MSI-X fp=%d fw_sb=%d igu_sb=%d\n",
8845            curcpu, fp->index, fp->fw_sb_id, fp->igu_sb_id);
8846  
8847      /* acknowledge and disable further fastpath interrupts */
8848      bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8849  
8850      bxe_task_fp(fp);
8851  }
8852  
8853  /* Release all interrupts allocated by the driver. */
8854  static void
bxe_interrupt_free(struct bxe_softc * sc)8855  bxe_interrupt_free(struct bxe_softc *sc)
8856  {
8857      int i;
8858  
8859      switch (sc->interrupt_mode) {
8860      case INTR_MODE_INTX:
8861          BLOGD(sc, DBG_LOAD, "Releasing legacy INTx vector\n");
8862          if (sc->intr[0].resource != NULL) {
8863              bus_release_resource(sc->dev,
8864                                   SYS_RES_IRQ,
8865                                   sc->intr[0].rid,
8866                                   sc->intr[0].resource);
8867          }
8868          break;
8869      case INTR_MODE_MSI:
8870          for (i = 0; i < sc->intr_count; i++) {
8871              BLOGD(sc, DBG_LOAD, "Releasing MSI vector %d\n", i);
8872              if (sc->intr[i].resource && sc->intr[i].rid) {
8873                  bus_release_resource(sc->dev,
8874                                       SYS_RES_IRQ,
8875                                       sc->intr[i].rid,
8876                                       sc->intr[i].resource);
8877              }
8878          }
8879          pci_release_msi(sc->dev);
8880          break;
8881      case INTR_MODE_MSIX:
8882          for (i = 0; i < sc->intr_count; i++) {
8883              BLOGD(sc, DBG_LOAD, "Releasing MSI-X vector %d\n", i);
8884              if (sc->intr[i].resource && sc->intr[i].rid) {
8885                  bus_release_resource(sc->dev,
8886                                       SYS_RES_IRQ,
8887                                       sc->intr[i].rid,
8888                                       sc->intr[i].resource);
8889              }
8890          }
8891          pci_release_msi(sc->dev);
8892          break;
8893      default:
8894          /* nothing to do as initial allocation failed */
8895          break;
8896      }
8897  }
8898  
8899  /*
8900   * This function determines and allocates the appropriate
8901   * interrupt based on system capabilites and user request.
8902   *
8903   * The user may force a particular interrupt mode, specify
8904   * the number of receive queues, specify the method for
8905   * distribuitng received frames to receive queues, or use
8906   * the default settings which will automatically select the
8907   * best supported combination.  In addition, the OS may or
8908   * may not support certain combinations of these settings.
8909   * This routine attempts to reconcile the settings requested
8910   * by the user with the capabilites available from the system
8911   * to select the optimal combination of features.
8912   *
8913   * Returns:
8914   *   0 = Success, !0 = Failure.
8915   */
8916  static int
bxe_interrupt_alloc(struct bxe_softc * sc)8917  bxe_interrupt_alloc(struct bxe_softc *sc)
8918  {
8919      int msix_count = 0;
8920      int msi_count = 0;
8921      int num_requested = 0;
8922      int num_allocated = 0;
8923      int rid, i, j;
8924      int rc;
8925  
8926      /* get the number of available MSI/MSI-X interrupts from the OS */
8927      if (sc->interrupt_mode > 0) {
8928          if (sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) {
8929              msix_count = pci_msix_count(sc->dev);
8930          }
8931  
8932          if (sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) {
8933              msi_count = pci_msi_count(sc->dev);
8934          }
8935  
8936          BLOGD(sc, DBG_LOAD, "%d MSI and %d MSI-X vectors available\n",
8937                msi_count, msix_count);
8938      }
8939  
8940      do { /* try allocating MSI-X interrupt resources (at least 2) */
8941          if (sc->interrupt_mode != INTR_MODE_MSIX) {
8942              break;
8943          }
8944  
8945          if (((sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) == 0) ||
8946              (msix_count < 2)) {
8947              sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8948              break;
8949          }
8950  
8951          /* ask for the necessary number of MSI-X vectors */
8952          num_requested = min((sc->num_queues + 1), msix_count);
8953  
8954          BLOGD(sc, DBG_LOAD, "Requesting %d MSI-X vectors\n", num_requested);
8955  
8956          num_allocated = num_requested;
8957          if ((rc = pci_alloc_msix(sc->dev, &num_allocated)) != 0) {
8958              BLOGE(sc, "MSI-X alloc failed! (%d)\n", rc);
8959              sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8960              break;
8961          }
8962  
8963          if (num_allocated < 2) { /* possible? */
8964              BLOGE(sc, "MSI-X allocation less than 2!\n");
8965              sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8966              pci_release_msi(sc->dev);
8967              break;
8968          }
8969  
8970          BLOGI(sc, "MSI-X vectors Requested %d and Allocated %d\n",
8971                num_requested, num_allocated);
8972  
8973          /* best effort so use the number of vectors allocated to us */
8974          sc->intr_count = num_allocated;
8975          sc->num_queues = num_allocated - 1;
8976  
8977          rid = 1; /* initial resource identifier */
8978  
8979          /* allocate the MSI-X vectors */
8980          for (i = 0; i < num_allocated; i++) {
8981              sc->intr[i].rid = (rid + i);
8982  
8983              if ((sc->intr[i].resource =
8984                   bus_alloc_resource_any(sc->dev,
8985                                          SYS_RES_IRQ,
8986                                          &sc->intr[i].rid,
8987                                          RF_ACTIVE)) == NULL) {
8988                  BLOGE(sc, "Failed to map MSI-X[%d] (rid=%d)!\n",
8989                        i, (rid + i));
8990  
8991                  for (j = (i - 1); j >= 0; j--) {
8992                      bus_release_resource(sc->dev,
8993                                           SYS_RES_IRQ,
8994                                           sc->intr[j].rid,
8995                                           sc->intr[j].resource);
8996                  }
8997  
8998                  sc->intr_count = 0;
8999                  sc->num_queues = 0;
9000                  sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
9001                  pci_release_msi(sc->dev);
9002                  break;
9003              }
9004  
9005              BLOGD(sc, DBG_LOAD, "Mapped MSI-X[%d] (rid=%d)\n", i, (rid + i));
9006          }
9007      } while (0);
9008  
9009      do { /* try allocating MSI vector resources (at least 2) */
9010          if (sc->interrupt_mode != INTR_MODE_MSI) {
9011              break;
9012          }
9013  
9014          if (((sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) == 0) ||
9015              (msi_count < 1)) {
9016              sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9017              break;
9018          }
9019  
9020          /* ask for a single MSI vector */
9021          num_requested = 1;
9022  
9023          BLOGD(sc, DBG_LOAD, "Requesting %d MSI vectors\n", num_requested);
9024  
9025          num_allocated = num_requested;
9026          if ((rc = pci_alloc_msi(sc->dev, &num_allocated)) != 0) {
9027              BLOGE(sc, "MSI alloc failed (%d)!\n", rc);
9028              sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9029              break;
9030          }
9031  
9032          if (num_allocated != 1) { /* possible? */
9033              BLOGE(sc, "MSI allocation is not 1!\n");
9034              sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9035              pci_release_msi(sc->dev);
9036              break;
9037          }
9038  
9039          BLOGI(sc, "MSI vectors Requested %d and Allocated %d\n",
9040                num_requested, num_allocated);
9041  
9042          /* best effort so use the number of vectors allocated to us */
9043          sc->intr_count = num_allocated;
9044          sc->num_queues = num_allocated;
9045  
9046          rid = 1; /* initial resource identifier */
9047  
9048          sc->intr[0].rid = rid;
9049  
9050          if ((sc->intr[0].resource =
9051               bus_alloc_resource_any(sc->dev,
9052                                      SYS_RES_IRQ,
9053                                      &sc->intr[0].rid,
9054                                      RF_ACTIVE)) == NULL) {
9055              BLOGE(sc, "Failed to map MSI[0] (rid=%d)!\n", rid);
9056              sc->intr_count = 0;
9057              sc->num_queues = 0;
9058              sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9059              pci_release_msi(sc->dev);
9060              break;
9061          }
9062  
9063          BLOGD(sc, DBG_LOAD, "Mapped MSI[0] (rid=%d)\n", rid);
9064      } while (0);
9065  
9066      do { /* try allocating INTx vector resources */
9067          if (sc->interrupt_mode != INTR_MODE_INTX) {
9068              break;
9069          }
9070  
9071          BLOGD(sc, DBG_LOAD, "Requesting legacy INTx interrupt\n");
9072  
9073          /* only one vector for INTx */
9074          sc->intr_count = 1;
9075          sc->num_queues = 1;
9076  
9077          rid = 0; /* initial resource identifier */
9078  
9079          sc->intr[0].rid = rid;
9080  
9081          if ((sc->intr[0].resource =
9082               bus_alloc_resource_any(sc->dev,
9083                                      SYS_RES_IRQ,
9084                                      &sc->intr[0].rid,
9085                                      (RF_ACTIVE | RF_SHAREABLE))) == NULL) {
9086              BLOGE(sc, "Failed to map INTx (rid=%d)!\n", rid);
9087              sc->intr_count = 0;
9088              sc->num_queues = 0;
9089              sc->interrupt_mode = -1; /* Failed! */
9090              break;
9091          }
9092  
9093          BLOGD(sc, DBG_LOAD, "Mapped INTx (rid=%d)\n", rid);
9094      } while (0);
9095  
9096      if (sc->interrupt_mode == -1) {
9097          BLOGE(sc, "Interrupt Allocation: FAILED!!!\n");
9098          rc = 1;
9099      } else {
9100          BLOGD(sc, DBG_LOAD,
9101                "Interrupt Allocation: interrupt_mode=%d, num_queues=%d\n",
9102                sc->interrupt_mode, sc->num_queues);
9103          rc = 0;
9104      }
9105  
9106      return (rc);
9107  }
9108  
9109  static void
bxe_interrupt_detach(struct bxe_softc * sc)9110  bxe_interrupt_detach(struct bxe_softc *sc)
9111  {
9112      struct bxe_fastpath *fp;
9113      int i;
9114  
9115      /* release interrupt resources */
9116      for (i = 0; i < sc->intr_count; i++) {
9117          if (sc->intr[i].resource && sc->intr[i].tag) {
9118              BLOGD(sc, DBG_LOAD, "Disabling interrupt vector %d\n", i);
9119              bus_teardown_intr(sc->dev, sc->intr[i].resource, sc->intr[i].tag);
9120          }
9121      }
9122  
9123      for (i = 0; i < sc->num_queues; i++) {
9124          fp = &sc->fp[i];
9125          if (fp->tq) {
9126              taskqueue_drain(fp->tq, &fp->tq_task);
9127              taskqueue_drain(fp->tq, &fp->tx_task);
9128              while (taskqueue_cancel_timeout(fp->tq, &fp->tx_timeout_task,
9129                  NULL))
9130                  taskqueue_drain_timeout(fp->tq, &fp->tx_timeout_task);
9131          }
9132  
9133          for (i = 0; i < sc->num_queues; i++) {
9134              fp = &sc->fp[i];
9135              if (fp->tq != NULL) {
9136                  taskqueue_free(fp->tq);
9137                  fp->tq = NULL;
9138              }
9139          }
9140      }
9141  
9142      if (sc->sp_tq) {
9143          taskqueue_drain(sc->sp_tq, &sc->sp_tq_task);
9144          taskqueue_free(sc->sp_tq);
9145          sc->sp_tq = NULL;
9146      }
9147  }
9148  
9149  /*
9150   * Enables interrupts and attach to the ISR.
9151   *
9152   * When using multiple MSI/MSI-X vectors the first vector
9153   * is used for slowpath operations while all remaining
9154   * vectors are used for fastpath operations.  If only a
9155   * single MSI/MSI-X vector is used (SINGLE_ISR) then the
9156   * ISR must look for both slowpath and fastpath completions.
9157   */
9158  static int
bxe_interrupt_attach(struct bxe_softc * sc)9159  bxe_interrupt_attach(struct bxe_softc *sc)
9160  {
9161      struct bxe_fastpath *fp;
9162      int rc = 0;
9163      int i;
9164  
9165      snprintf(sc->sp_tq_name, sizeof(sc->sp_tq_name),
9166               "bxe%d_sp_tq", sc->unit);
9167      TASK_INIT(&sc->sp_tq_task, 0, bxe_handle_sp_tq, sc);
9168      sc->sp_tq = taskqueue_create(sc->sp_tq_name, M_NOWAIT,
9169                                   taskqueue_thread_enqueue,
9170                                   &sc->sp_tq);
9171      taskqueue_start_threads(&sc->sp_tq, 1, PWAIT, /* lower priority */
9172                              "%s", sc->sp_tq_name);
9173  
9174  
9175      for (i = 0; i < sc->num_queues; i++) {
9176          fp = &sc->fp[i];
9177          snprintf(fp->tq_name, sizeof(fp->tq_name),
9178                   "bxe%d_fp%d_tq", sc->unit, i);
9179          NET_TASK_INIT(&fp->tq_task, 0, bxe_handle_fp_tq, fp);
9180          TASK_INIT(&fp->tx_task, 0, bxe_tx_mq_start_deferred, fp);
9181          fp->tq = taskqueue_create(fp->tq_name, M_NOWAIT,
9182                                    taskqueue_thread_enqueue,
9183                                    &fp->tq);
9184          TIMEOUT_TASK_INIT(fp->tq, &fp->tx_timeout_task, 0,
9185                            bxe_tx_mq_start_deferred, fp);
9186          taskqueue_start_threads(&fp->tq, 1, PI_NET, /* higher priority */
9187                                  "%s", fp->tq_name);
9188      }
9189  
9190      /* setup interrupt handlers */
9191      if (sc->interrupt_mode == INTR_MODE_MSIX) {
9192          BLOGD(sc, DBG_LOAD, "Enabling slowpath MSI-X[0] vector\n");
9193  
9194          /*
9195           * Setup the interrupt handler. Note that we pass the driver instance
9196           * to the interrupt handler for the slowpath.
9197           */
9198          if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9199                                   (INTR_TYPE_NET | INTR_MPSAFE),
9200                                   NULL, bxe_intr_sp, sc,
9201                                   &sc->intr[0].tag)) != 0) {
9202              BLOGE(sc, "Failed to allocate MSI-X[0] vector (%d)\n", rc);
9203              goto bxe_interrupt_attach_exit;
9204          }
9205  
9206          bus_describe_intr(sc->dev, sc->intr[0].resource,
9207                            sc->intr[0].tag, "sp");
9208  
9209          /* bus_bind_intr(sc->dev, sc->intr[0].resource, 0); */
9210  
9211          /* initialize the fastpath vectors (note the first was used for sp) */
9212          for (i = 0; i < sc->num_queues; i++) {
9213              fp = &sc->fp[i];
9214              BLOGD(sc, DBG_LOAD, "Enabling MSI-X[%d] vector\n", (i + 1));
9215  
9216              /*
9217               * Setup the interrupt handler. Note that we pass the
9218               * fastpath context to the interrupt handler in this
9219               * case.
9220               */
9221              if ((rc = bus_setup_intr(sc->dev, sc->intr[i + 1].resource,
9222                                       (INTR_TYPE_NET | INTR_MPSAFE),
9223                                       NULL, bxe_intr_fp, fp,
9224                                       &sc->intr[i + 1].tag)) != 0) {
9225                  BLOGE(sc, "Failed to allocate MSI-X[%d] vector (%d)\n",
9226                        (i + 1), rc);
9227                  goto bxe_interrupt_attach_exit;
9228              }
9229  
9230              bus_describe_intr(sc->dev, sc->intr[i + 1].resource,
9231                                sc->intr[i + 1].tag, "fp%02d", i);
9232  
9233              /* bind the fastpath instance to a cpu */
9234              if (sc->num_queues > 1) {
9235                  bus_bind_intr(sc->dev, sc->intr[i + 1].resource, i);
9236              }
9237  
9238              fp->state = BXE_FP_STATE_IRQ;
9239          }
9240      } else if (sc->interrupt_mode == INTR_MODE_MSI) {
9241          BLOGD(sc, DBG_LOAD, "Enabling MSI[0] vector\n");
9242  
9243          /*
9244           * Setup the interrupt handler. Note that we pass the
9245           * driver instance to the interrupt handler which
9246           * will handle both the slowpath and fastpath.
9247           */
9248          if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9249                                   (INTR_TYPE_NET | INTR_MPSAFE),
9250                                   NULL, bxe_intr_legacy, sc,
9251                                   &sc->intr[0].tag)) != 0) {
9252              BLOGE(sc, "Failed to allocate MSI[0] vector (%d)\n", rc);
9253              goto bxe_interrupt_attach_exit;
9254          }
9255  
9256      } else { /* (sc->interrupt_mode == INTR_MODE_INTX) */
9257          BLOGD(sc, DBG_LOAD, "Enabling INTx interrupts\n");
9258  
9259          /*
9260           * Setup the interrupt handler. Note that we pass the
9261           * driver instance to the interrupt handler which
9262           * will handle both the slowpath and fastpath.
9263           */
9264          if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9265                                   (INTR_TYPE_NET | INTR_MPSAFE),
9266                                   NULL, bxe_intr_legacy, sc,
9267                                   &sc->intr[0].tag)) != 0) {
9268              BLOGE(sc, "Failed to allocate INTx interrupt (%d)\n", rc);
9269              goto bxe_interrupt_attach_exit;
9270          }
9271      }
9272  
9273  bxe_interrupt_attach_exit:
9274  
9275      return (rc);
9276  }
9277  
9278  static int  bxe_init_hw_common_chip(struct bxe_softc *sc);
9279  static int  bxe_init_hw_common(struct bxe_softc *sc);
9280  static int  bxe_init_hw_port(struct bxe_softc *sc);
9281  static int  bxe_init_hw_func(struct bxe_softc *sc);
9282  static void bxe_reset_common(struct bxe_softc *sc);
9283  static void bxe_reset_port(struct bxe_softc *sc);
9284  static void bxe_reset_func(struct bxe_softc *sc);
9285  static int  bxe_gunzip_init(struct bxe_softc *sc);
9286  static void bxe_gunzip_end(struct bxe_softc *sc);
9287  static int  bxe_init_firmware(struct bxe_softc *sc);
9288  static void bxe_release_firmware(struct bxe_softc *sc);
9289  
9290  static struct
9291  ecore_func_sp_drv_ops bxe_func_sp_drv = {
9292      .init_hw_cmn_chip = bxe_init_hw_common_chip,
9293      .init_hw_cmn      = bxe_init_hw_common,
9294      .init_hw_port     = bxe_init_hw_port,
9295      .init_hw_func     = bxe_init_hw_func,
9296  
9297      .reset_hw_cmn     = bxe_reset_common,
9298      .reset_hw_port    = bxe_reset_port,
9299      .reset_hw_func    = bxe_reset_func,
9300  
9301      .gunzip_init      = bxe_gunzip_init,
9302      .gunzip_end       = bxe_gunzip_end,
9303  
9304      .init_fw          = bxe_init_firmware,
9305      .release_fw       = bxe_release_firmware,
9306  };
9307  
9308  static void
bxe_init_func_obj(struct bxe_softc * sc)9309  bxe_init_func_obj(struct bxe_softc *sc)
9310  {
9311      sc->dmae_ready = 0;
9312  
9313      ecore_init_func_obj(sc,
9314                          &sc->func_obj,
9315                          BXE_SP(sc, func_rdata),
9316                          BXE_SP_MAPPING(sc, func_rdata),
9317                          BXE_SP(sc, func_afex_rdata),
9318                          BXE_SP_MAPPING(sc, func_afex_rdata),
9319                          &bxe_func_sp_drv);
9320  }
9321  
9322  static int
bxe_init_hw(struct bxe_softc * sc,uint32_t load_code)9323  bxe_init_hw(struct bxe_softc *sc,
9324              uint32_t         load_code)
9325  {
9326      struct ecore_func_state_params func_params = { NULL };
9327      int rc;
9328  
9329      /* prepare the parameters for function state transitions */
9330      bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT);
9331  
9332      func_params.f_obj = &sc->func_obj;
9333      func_params.cmd = ECORE_F_CMD_HW_INIT;
9334  
9335      func_params.params.hw_init.load_phase = load_code;
9336  
9337      /*
9338       * Via a plethora of function pointers, we will eventually reach
9339       * bxe_init_hw_common(), bxe_init_hw_port(), or bxe_init_hw_func().
9340       */
9341      rc = ecore_func_state_change(sc, &func_params);
9342  
9343      return (rc);
9344  }
9345  
9346  static void
bxe_fill(struct bxe_softc * sc,uint32_t addr,int fill,uint32_t len)9347  bxe_fill(struct bxe_softc *sc,
9348           uint32_t         addr,
9349           int              fill,
9350           uint32_t         len)
9351  {
9352      uint32_t i;
9353  
9354      if (!(len % 4) && !(addr % 4)) {
9355          for (i = 0; i < len; i += 4) {
9356              REG_WR(sc, (addr + i), fill);
9357          }
9358      } else {
9359          for (i = 0; i < len; i++) {
9360              REG_WR8(sc, (addr + i), fill);
9361          }
9362      }
9363  }
9364  
9365  /* writes FP SP data to FW - data_size in dwords */
9366  static void
bxe_wr_fp_sb_data(struct bxe_softc * sc,int fw_sb_id,uint32_t * sb_data_p,uint32_t data_size)9367  bxe_wr_fp_sb_data(struct bxe_softc *sc,
9368                    int              fw_sb_id,
9369                    uint32_t         *sb_data_p,
9370                    uint32_t         data_size)
9371  {
9372      int index;
9373  
9374      for (index = 0; index < data_size; index++) {
9375          REG_WR(sc,
9376                 (BAR_CSTRORM_INTMEM +
9377                  CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
9378                  (sizeof(uint32_t) * index)),
9379                 *(sb_data_p + index));
9380      }
9381  }
9382  
9383  static void
bxe_zero_fp_sb(struct bxe_softc * sc,int fw_sb_id)9384  bxe_zero_fp_sb(struct bxe_softc *sc,
9385                 int              fw_sb_id)
9386  {
9387      struct hc_status_block_data_e2 sb_data_e2;
9388      struct hc_status_block_data_e1x sb_data_e1x;
9389      uint32_t *sb_data_p;
9390      uint32_t data_size = 0;
9391  
9392      if (!CHIP_IS_E1x(sc)) {
9393          memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
9394          sb_data_e2.common.state = SB_DISABLED;
9395          sb_data_e2.common.p_func.vf_valid = FALSE;
9396          sb_data_p = (uint32_t *)&sb_data_e2;
9397          data_size = (sizeof(struct hc_status_block_data_e2) /
9398                       sizeof(uint32_t));
9399      } else {
9400          memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x));
9401          sb_data_e1x.common.state = SB_DISABLED;
9402          sb_data_e1x.common.p_func.vf_valid = FALSE;
9403          sb_data_p = (uint32_t *)&sb_data_e1x;
9404          data_size = (sizeof(struct hc_status_block_data_e1x) /
9405                       sizeof(uint32_t));
9406      }
9407  
9408      bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size);
9409  
9410      bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id)),
9411               0, CSTORM_STATUS_BLOCK_SIZE);
9412      bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id)),
9413               0, CSTORM_SYNC_BLOCK_SIZE);
9414  }
9415  
9416  static void
bxe_wr_sp_sb_data(struct bxe_softc * sc,struct hc_sp_status_block_data * sp_sb_data)9417  bxe_wr_sp_sb_data(struct bxe_softc               *sc,
9418                    struct hc_sp_status_block_data *sp_sb_data)
9419  {
9420      int i;
9421  
9422      for (i = 0;
9423           i < (sizeof(struct hc_sp_status_block_data) / sizeof(uint32_t));
9424           i++) {
9425          REG_WR(sc,
9426                 (BAR_CSTRORM_INTMEM +
9427                  CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(SC_FUNC(sc)) +
9428                  (i * sizeof(uint32_t))),
9429                 *((uint32_t *)sp_sb_data + i));
9430      }
9431  }
9432  
9433  static void
bxe_zero_sp_sb(struct bxe_softc * sc)9434  bxe_zero_sp_sb(struct bxe_softc *sc)
9435  {
9436      struct hc_sp_status_block_data sp_sb_data;
9437  
9438      memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
9439  
9440      sp_sb_data.state           = SB_DISABLED;
9441      sp_sb_data.p_func.vf_valid = FALSE;
9442  
9443      bxe_wr_sp_sb_data(sc, &sp_sb_data);
9444  
9445      bxe_fill(sc,
9446               (BAR_CSTRORM_INTMEM +
9447                CSTORM_SP_STATUS_BLOCK_OFFSET(SC_FUNC(sc))),
9448                0, CSTORM_SP_STATUS_BLOCK_SIZE);
9449      bxe_fill(sc,
9450               (BAR_CSTRORM_INTMEM +
9451                CSTORM_SP_SYNC_BLOCK_OFFSET(SC_FUNC(sc))),
9452                0, CSTORM_SP_SYNC_BLOCK_SIZE);
9453  }
9454  
9455  static void
bxe_setup_ndsb_state_machine(struct hc_status_block_sm * hc_sm,int igu_sb_id,int igu_seg_id)9456  bxe_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
9457                               int                       igu_sb_id,
9458                               int                       igu_seg_id)
9459  {
9460      hc_sm->igu_sb_id      = igu_sb_id;
9461      hc_sm->igu_seg_id     = igu_seg_id;
9462      hc_sm->timer_value    = 0xFF;
9463      hc_sm->time_to_expire = 0xFFFFFFFF;
9464  }
9465  
9466  static void
bxe_map_sb_state_machines(struct hc_index_data * index_data)9467  bxe_map_sb_state_machines(struct hc_index_data *index_data)
9468  {
9469      /* zero out state machine indices */
9470  
9471      /* rx indices */
9472      index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
9473  
9474      /* tx indices */
9475      index_data[HC_INDEX_OOO_TX_CQ_CONS].flags      &= ~HC_INDEX_DATA_SM_ID;
9476      index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
9477      index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
9478      index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
9479  
9480      /* map indices */
9481  
9482      /* rx indices */
9483      index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
9484          (SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9485  
9486      /* tx indices */
9487      index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
9488          (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9489      index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
9490          (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9491      index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
9492          (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9493      index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
9494          (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9495  }
9496  
9497  static void
bxe_init_sb(struct bxe_softc * sc,bus_addr_t busaddr,int vfid,uint8_t vf_valid,int fw_sb_id,int igu_sb_id)9498  bxe_init_sb(struct bxe_softc *sc,
9499              bus_addr_t       busaddr,
9500              int              vfid,
9501              uint8_t          vf_valid,
9502              int              fw_sb_id,
9503              int              igu_sb_id)
9504  {
9505      struct hc_status_block_data_e2  sb_data_e2;
9506      struct hc_status_block_data_e1x sb_data_e1x;
9507      struct hc_status_block_sm       *hc_sm_p;
9508      uint32_t *sb_data_p;
9509      int igu_seg_id;
9510      int data_size;
9511  
9512      if (CHIP_INT_MODE_IS_BC(sc)) {
9513          igu_seg_id = HC_SEG_ACCESS_NORM;
9514      } else {
9515          igu_seg_id = IGU_SEG_ACCESS_NORM;
9516      }
9517  
9518      bxe_zero_fp_sb(sc, fw_sb_id);
9519  
9520      if (!CHIP_IS_E1x(sc)) {
9521          memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
9522          sb_data_e2.common.state = SB_ENABLED;
9523          sb_data_e2.common.p_func.pf_id = SC_FUNC(sc);
9524          sb_data_e2.common.p_func.vf_id = vfid;
9525          sb_data_e2.common.p_func.vf_valid = vf_valid;
9526          sb_data_e2.common.p_func.vnic_id = SC_VN(sc);
9527          sb_data_e2.common.same_igu_sb_1b = TRUE;
9528          sb_data_e2.common.host_sb_addr.hi = U64_HI(busaddr);
9529          sb_data_e2.common.host_sb_addr.lo = U64_LO(busaddr);
9530          hc_sm_p = sb_data_e2.common.state_machine;
9531          sb_data_p = (uint32_t *)&sb_data_e2;
9532          data_size = (sizeof(struct hc_status_block_data_e2) /
9533                       sizeof(uint32_t));
9534          bxe_map_sb_state_machines(sb_data_e2.index_data);
9535      } else {
9536          memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x));
9537          sb_data_e1x.common.state = SB_ENABLED;
9538          sb_data_e1x.common.p_func.pf_id = SC_FUNC(sc);
9539          sb_data_e1x.common.p_func.vf_id = 0xff;
9540          sb_data_e1x.common.p_func.vf_valid = FALSE;
9541          sb_data_e1x.common.p_func.vnic_id = SC_VN(sc);
9542          sb_data_e1x.common.same_igu_sb_1b = TRUE;
9543          sb_data_e1x.common.host_sb_addr.hi = U64_HI(busaddr);
9544          sb_data_e1x.common.host_sb_addr.lo = U64_LO(busaddr);
9545          hc_sm_p = sb_data_e1x.common.state_machine;
9546          sb_data_p = (uint32_t *)&sb_data_e1x;
9547          data_size = (sizeof(struct hc_status_block_data_e1x) /
9548                       sizeof(uint32_t));
9549          bxe_map_sb_state_machines(sb_data_e1x.index_data);
9550      }
9551  
9552      bxe_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], igu_sb_id, igu_seg_id);
9553      bxe_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], igu_sb_id, igu_seg_id);
9554  
9555      BLOGD(sc, DBG_LOAD, "Init FW SB %d\n", fw_sb_id);
9556  
9557      /* write indices to HW - PCI guarantees endianity of regpairs */
9558      bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size);
9559  }
9560  
9561  static inline uint8_t
bxe_fp_qzone_id(struct bxe_fastpath * fp)9562  bxe_fp_qzone_id(struct bxe_fastpath *fp)
9563  {
9564      if (CHIP_IS_E1x(fp->sc)) {
9565          return (fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H);
9566      } else {
9567          return (fp->cl_id);
9568      }
9569  }
9570  
9571  static inline uint32_t
bxe_rx_ustorm_prods_offset(struct bxe_softc * sc,struct bxe_fastpath * fp)9572  bxe_rx_ustorm_prods_offset(struct bxe_softc    *sc,
9573                             struct bxe_fastpath *fp)
9574  {
9575      uint32_t offset = BAR_USTRORM_INTMEM;
9576  
9577      if (!CHIP_IS_E1x(sc)) {
9578          offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
9579      } else {
9580          offset += USTORM_RX_PRODS_E1X_OFFSET(SC_PORT(sc), fp->cl_id);
9581      }
9582  
9583      return (offset);
9584  }
9585  
9586  static void
bxe_init_eth_fp(struct bxe_softc * sc,int idx)9587  bxe_init_eth_fp(struct bxe_softc *sc,
9588                  int              idx)
9589  {
9590      struct bxe_fastpath *fp = &sc->fp[idx];
9591      uint32_t cids[ECORE_MULTI_TX_COS] = { 0 };
9592      unsigned long q_type = 0;
9593      int cos;
9594  
9595      fp->sc    = sc;
9596      fp->index = idx;
9597  
9598      fp->igu_sb_id = (sc->igu_base_sb + idx + CNIC_SUPPORT(sc));
9599      fp->fw_sb_id = (sc->base_fw_ndsb + idx + CNIC_SUPPORT(sc));
9600  
9601      fp->cl_id = (CHIP_IS_E1x(sc)) ?
9602                      (SC_L_ID(sc) + idx) :
9603                      /* want client ID same as IGU SB ID for non-E1 */
9604                      fp->igu_sb_id;
9605      fp->cl_qzone_id = bxe_fp_qzone_id(fp);
9606  
9607      /* setup sb indices */
9608      if (!CHIP_IS_E1x(sc)) {
9609          fp->sb_index_values  = fp->status_block.e2_sb->sb.index_values;
9610          fp->sb_running_index = fp->status_block.e2_sb->sb.running_index;
9611      } else {
9612          fp->sb_index_values  = fp->status_block.e1x_sb->sb.index_values;
9613          fp->sb_running_index = fp->status_block.e1x_sb->sb.running_index;
9614      }
9615  
9616      /* init shortcut */
9617      fp->ustorm_rx_prods_offset = bxe_rx_ustorm_prods_offset(sc, fp);
9618  
9619      fp->rx_cq_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS];
9620  
9621      /*
9622       * XXX If multiple CoS is ever supported then each fastpath structure
9623       * will need to maintain tx producer/consumer/dma/etc values *per* CoS.
9624       */
9625      for (cos = 0; cos < sc->max_cos; cos++) {
9626          cids[cos] = idx;
9627      }
9628      fp->tx_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0];
9629  
9630      /* nothing more for a VF to do */
9631      if (IS_VF(sc)) {
9632          return;
9633      }
9634  
9635      bxe_init_sb(sc, fp->sb_dma.paddr, BXE_VF_ID_INVALID, FALSE,
9636                  fp->fw_sb_id, fp->igu_sb_id);
9637  
9638      bxe_update_fp_sb_idx(fp);
9639  
9640      /* Configure Queue State object */
9641      bit_set(&q_type, ECORE_Q_TYPE_HAS_RX);
9642      bit_set(&q_type, ECORE_Q_TYPE_HAS_TX);
9643  
9644      ecore_init_queue_obj(sc,
9645                           &sc->sp_objs[idx].q_obj,
9646                           fp->cl_id,
9647                           cids,
9648                           sc->max_cos,
9649                           SC_FUNC(sc),
9650                           BXE_SP(sc, q_rdata),
9651                           BXE_SP_MAPPING(sc, q_rdata),
9652                           q_type);
9653  
9654      /* configure classification DBs */
9655      ecore_init_mac_obj(sc,
9656                         &sc->sp_objs[idx].mac_obj,
9657                         fp->cl_id,
9658                         idx,
9659                         SC_FUNC(sc),
9660                         BXE_SP(sc, mac_rdata),
9661                         BXE_SP_MAPPING(sc, mac_rdata),
9662                         ECORE_FILTER_MAC_PENDING,
9663                         &sc->sp_state,
9664                         ECORE_OBJ_TYPE_RX_TX,
9665                         &sc->macs_pool);
9666  
9667      BLOGD(sc, DBG_LOAD, "fp[%d]: sb=%p cl_id=%d fw_sb=%d igu_sb=%d\n",
9668            idx, fp->status_block.e2_sb, fp->cl_id, fp->fw_sb_id, fp->igu_sb_id);
9669  }
9670  
9671  static inline void
bxe_update_rx_prod(struct bxe_softc * sc,struct bxe_fastpath * fp,uint16_t rx_bd_prod,uint16_t rx_cq_prod,uint16_t rx_sge_prod)9672  bxe_update_rx_prod(struct bxe_softc    *sc,
9673                     struct bxe_fastpath *fp,
9674                     uint16_t            rx_bd_prod,
9675                     uint16_t            rx_cq_prod,
9676                     uint16_t            rx_sge_prod)
9677  {
9678      struct ustorm_eth_rx_producers rx_prods = { 0 };
9679      uint32_t i;
9680  
9681      /* update producers */
9682      rx_prods.bd_prod  = rx_bd_prod;
9683      rx_prods.cqe_prod = rx_cq_prod;
9684      rx_prods.sge_prod = rx_sge_prod;
9685  
9686      /*
9687       * Make sure that the BD and SGE data is updated before updating the
9688       * producers since FW might read the BD/SGE right after the producer
9689       * is updated.
9690       * This is only applicable for weak-ordered memory model archs such
9691       * as IA-64. The following barrier is also mandatory since FW will
9692       * assumes BDs must have buffers.
9693       */
9694      wmb();
9695  
9696      for (i = 0; i < (sizeof(rx_prods) / 4); i++) {
9697          REG_WR(sc,
9698                 (fp->ustorm_rx_prods_offset + (i * 4)),
9699                 ((uint32_t *)&rx_prods)[i]);
9700      }
9701  
9702      wmb(); /* keep prod updates ordered */
9703  
9704      BLOGD(sc, DBG_RX,
9705            "RX fp[%d]: wrote prods bd_prod=%u cqe_prod=%u sge_prod=%u\n",
9706            fp->index, rx_bd_prod, rx_cq_prod, rx_sge_prod);
9707  }
9708  
9709  static void
bxe_init_rx_rings(struct bxe_softc * sc)9710  bxe_init_rx_rings(struct bxe_softc *sc)
9711  {
9712      struct bxe_fastpath *fp;
9713      int i;
9714  
9715      for (i = 0; i < sc->num_queues; i++) {
9716          fp = &sc->fp[i];
9717  
9718          fp->rx_bd_cons = 0;
9719  
9720          /*
9721           * Activate the BD ring...
9722           * Warning, this will generate an interrupt (to the TSTORM)
9723           * so this can only be done after the chip is initialized
9724           */
9725          bxe_update_rx_prod(sc, fp,
9726                             fp->rx_bd_prod,
9727                             fp->rx_cq_prod,
9728                             fp->rx_sge_prod);
9729  
9730          if (i != 0) {
9731              continue;
9732          }
9733  
9734          if (CHIP_IS_E1(sc)) {
9735              REG_WR(sc,
9736                     (BAR_USTRORM_INTMEM +
9737                      USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc))),
9738                     U64_LO(fp->rcq_dma.paddr));
9739              REG_WR(sc,
9740                     (BAR_USTRORM_INTMEM +
9741                      USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc)) + 4),
9742                     U64_HI(fp->rcq_dma.paddr));
9743          }
9744      }
9745  }
9746  
9747  static void
bxe_init_tx_ring_one(struct bxe_fastpath * fp)9748  bxe_init_tx_ring_one(struct bxe_fastpath *fp)
9749  {
9750      SET_FLAG(fp->tx_db.data.header.data, DOORBELL_HDR_T_DB_TYPE, 1);
9751      fp->tx_db.data.zero_fill1 = 0;
9752      fp->tx_db.data.prod = 0;
9753  
9754      fp->tx_pkt_prod = 0;
9755      fp->tx_pkt_cons = 0;
9756      fp->tx_bd_prod = 0;
9757      fp->tx_bd_cons = 0;
9758      fp->eth_q_stats.tx_pkts = 0;
9759  }
9760  
9761  static inline void
bxe_init_tx_rings(struct bxe_softc * sc)9762  bxe_init_tx_rings(struct bxe_softc *sc)
9763  {
9764      int i;
9765  
9766      for (i = 0; i < sc->num_queues; i++) {
9767          bxe_init_tx_ring_one(&sc->fp[i]);
9768      }
9769  }
9770  
9771  static void
bxe_init_def_sb(struct bxe_softc * sc)9772  bxe_init_def_sb(struct bxe_softc *sc)
9773  {
9774      struct host_sp_status_block *def_sb = sc->def_sb;
9775      bus_addr_t mapping = sc->def_sb_dma.paddr;
9776      int igu_sp_sb_index;
9777      int igu_seg_id;
9778      int port = SC_PORT(sc);
9779      int func = SC_FUNC(sc);
9780      int reg_offset, reg_offset_en5;
9781      uint64_t section;
9782      int index, sindex;
9783      struct hc_sp_status_block_data sp_sb_data;
9784  
9785      memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
9786  
9787      if (CHIP_INT_MODE_IS_BC(sc)) {
9788          igu_sp_sb_index = DEF_SB_IGU_ID;
9789          igu_seg_id = HC_SEG_ACCESS_DEF;
9790      } else {
9791          igu_sp_sb_index = sc->igu_dsb_id;
9792          igu_seg_id = IGU_SEG_ACCESS_DEF;
9793      }
9794  
9795      /* attentions */
9796      section = ((uint64_t)mapping +
9797                 offsetof(struct host_sp_status_block, atten_status_block));
9798      def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
9799      sc->attn_state = 0;
9800  
9801      reg_offset = (port) ?
9802                       MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
9803                       MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
9804      reg_offset_en5 = (port) ?
9805                           MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
9806                           MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0;
9807  
9808      for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
9809          /* take care of sig[0]..sig[4] */
9810          for (sindex = 0; sindex < 4; sindex++) {
9811              sc->attn_group[index].sig[sindex] =
9812                  REG_RD(sc, (reg_offset + (sindex * 0x4) + (0x10 * index)));
9813          }
9814  
9815          if (!CHIP_IS_E1x(sc)) {
9816              /*
9817               * enable5 is separate from the rest of the registers,
9818               * and the address skip is 4 and not 16 between the
9819               * different groups
9820               */
9821              sc->attn_group[index].sig[4] =
9822                  REG_RD(sc, (reg_offset_en5 + (0x4 * index)));
9823          } else {
9824              sc->attn_group[index].sig[4] = 0;
9825          }
9826      }
9827  
9828      if (sc->devinfo.int_block == INT_BLOCK_HC) {
9829          reg_offset = (port) ?
9830                           HC_REG_ATTN_MSG1_ADDR_L :
9831                           HC_REG_ATTN_MSG0_ADDR_L;
9832          REG_WR(sc, reg_offset, U64_LO(section));
9833          REG_WR(sc, (reg_offset + 4), U64_HI(section));
9834      } else if (!CHIP_IS_E1x(sc)) {
9835          REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
9836          REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
9837      }
9838  
9839      section = ((uint64_t)mapping +
9840                 offsetof(struct host_sp_status_block, sp_sb));
9841  
9842      bxe_zero_sp_sb(sc);
9843  
9844      /* PCI guarantees endianity of regpair */
9845      sp_sb_data.state           = SB_ENABLED;
9846      sp_sb_data.host_sb_addr.lo = U64_LO(section);
9847      sp_sb_data.host_sb_addr.hi = U64_HI(section);
9848      sp_sb_data.igu_sb_id       = igu_sp_sb_index;
9849      sp_sb_data.igu_seg_id      = igu_seg_id;
9850      sp_sb_data.p_func.pf_id    = func;
9851      sp_sb_data.p_func.vnic_id  = SC_VN(sc);
9852      sp_sb_data.p_func.vf_id    = 0xff;
9853  
9854      bxe_wr_sp_sb_data(sc, &sp_sb_data);
9855  
9856      bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
9857  }
9858  
9859  static void
bxe_init_sp_ring(struct bxe_softc * sc)9860  bxe_init_sp_ring(struct bxe_softc *sc)
9861  {
9862      atomic_store_rel_long(&sc->cq_spq_left, MAX_SPQ_PENDING);
9863      sc->spq_prod_idx = 0;
9864      sc->dsb_sp_prod = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_ETH_DEF_CONS];
9865      sc->spq_prod_bd = sc->spq;
9866      sc->spq_last_bd = (sc->spq_prod_bd + MAX_SP_DESC_CNT);
9867  }
9868  
9869  static void
bxe_init_eq_ring(struct bxe_softc * sc)9870  bxe_init_eq_ring(struct bxe_softc *sc)
9871  {
9872      union event_ring_elem *elem;
9873      int i;
9874  
9875      for (i = 1; i <= NUM_EQ_PAGES; i++) {
9876          elem = &sc->eq[EQ_DESC_CNT_PAGE * i - 1];
9877  
9878          elem->next_page.addr.hi = htole32(U64_HI(sc->eq_dma.paddr +
9879                                                   BCM_PAGE_SIZE *
9880                                                   (i % NUM_EQ_PAGES)));
9881          elem->next_page.addr.lo = htole32(U64_LO(sc->eq_dma.paddr +
9882                                                   BCM_PAGE_SIZE *
9883                                                   (i % NUM_EQ_PAGES)));
9884      }
9885  
9886      sc->eq_cons    = 0;
9887      sc->eq_prod    = NUM_EQ_DESC;
9888      sc->eq_cons_sb = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_EQ_CONS];
9889  
9890      atomic_store_rel_long(&sc->eq_spq_left,
9891                            (min((MAX_SP_DESC_CNT - MAX_SPQ_PENDING),
9892                                 NUM_EQ_DESC) - 1));
9893  }
9894  
9895  static void
bxe_init_internal_common(struct bxe_softc * sc)9896  bxe_init_internal_common(struct bxe_softc *sc)
9897  {
9898      int i;
9899  
9900      /*
9901       * Zero this manually as its initialization is currently missing
9902       * in the initTool.
9903       */
9904      for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) {
9905          REG_WR(sc,
9906                 (BAR_USTRORM_INTMEM + USTORM_AGG_DATA_OFFSET + (i * 4)),
9907                 0);
9908      }
9909  
9910      if (!CHIP_IS_E1x(sc)) {
9911          REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET),
9912                  CHIP_INT_MODE_IS_BC(sc) ? HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
9913      }
9914  }
9915  
9916  static void
bxe_init_internal(struct bxe_softc * sc,uint32_t load_code)9917  bxe_init_internal(struct bxe_softc *sc,
9918                    uint32_t         load_code)
9919  {
9920      switch (load_code) {
9921      case FW_MSG_CODE_DRV_LOAD_COMMON:
9922      case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
9923          bxe_init_internal_common(sc);
9924          /* no break */
9925  
9926      case FW_MSG_CODE_DRV_LOAD_PORT:
9927          /* nothing to do */
9928          /* no break */
9929  
9930      case FW_MSG_CODE_DRV_LOAD_FUNCTION:
9931          /* internal memory per function is initialized inside bxe_pf_init */
9932          break;
9933  
9934      default:
9935          BLOGE(sc, "Unknown load_code (0x%x) from MCP\n", load_code);
9936          break;
9937      }
9938  }
9939  
9940  static void
storm_memset_func_cfg(struct bxe_softc * sc,struct tstorm_eth_function_common_config * tcfg,uint16_t abs_fid)9941  storm_memset_func_cfg(struct bxe_softc                         *sc,
9942                        struct tstorm_eth_function_common_config *tcfg,
9943                        uint16_t                                  abs_fid)
9944  {
9945      uint32_t addr;
9946      size_t size;
9947  
9948      addr = (BAR_TSTRORM_INTMEM +
9949              TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid));
9950      size = sizeof(struct tstorm_eth_function_common_config);
9951      ecore_storm_memset_struct(sc, addr, size, (uint32_t *)tcfg);
9952  }
9953  
9954  static void
bxe_func_init(struct bxe_softc * sc,struct bxe_func_init_params * p)9955  bxe_func_init(struct bxe_softc            *sc,
9956                struct bxe_func_init_params *p)
9957  {
9958      struct tstorm_eth_function_common_config tcfg = { 0 };
9959  
9960      if (CHIP_IS_E1x(sc)) {
9961          storm_memset_func_cfg(sc, &tcfg, p->func_id);
9962      }
9963  
9964      /* Enable the function in the FW */
9965      storm_memset_vf_to_pf(sc, p->func_id, p->pf_id);
9966      storm_memset_func_en(sc, p->func_id, 1);
9967  
9968      /* spq */
9969      if (p->func_flgs & FUNC_FLG_SPQ) {
9970          storm_memset_spq_addr(sc, p->spq_map, p->func_id);
9971          REG_WR(sc,
9972                 (XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(p->func_id)),
9973                 p->spq_prod);
9974      }
9975  }
9976  
9977  /*
9978   * Calculates the sum of vn_min_rates.
9979   * It's needed for further normalizing of the min_rates.
9980   * Returns:
9981   *   sum of vn_min_rates.
9982   *     or
9983   *   0 - if all the min_rates are 0.
9984   * In the later case fainess algorithm should be deactivated.
9985   * If all min rates are not zero then those that are zeroes will be set to 1.
9986   */
9987  static void
bxe_calc_vn_min(struct bxe_softc * sc,struct cmng_init_input * input)9988  bxe_calc_vn_min(struct bxe_softc       *sc,
9989                  struct cmng_init_input *input)
9990  {
9991      uint32_t vn_cfg;
9992      uint32_t vn_min_rate;
9993      int all_zero = 1;
9994      int vn;
9995  
9996      for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
9997          vn_cfg = sc->devinfo.mf_info.mf_config[vn];
9998          vn_min_rate = (((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
9999                          FUNC_MF_CFG_MIN_BW_SHIFT) * 100);
10000  
10001          if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
10002              /* skip hidden VNs */
10003              vn_min_rate = 0;
10004          } else if (!vn_min_rate) {
10005              /* If min rate is zero - set it to 100 */
10006              vn_min_rate = DEF_MIN_RATE;
10007          } else {
10008              all_zero = 0;
10009          }
10010  
10011          input->vnic_min_rate[vn] = vn_min_rate;
10012      }
10013  
10014      /* if ETS or all min rates are zeros - disable fairness */
10015      if (BXE_IS_ETS_ENABLED(sc)) {
10016          input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10017          BLOGD(sc, DBG_LOAD, "Fairness disabled (ETS)\n");
10018      } else if (all_zero) {
10019          input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10020          BLOGD(sc, DBG_LOAD,
10021                "Fariness disabled (all MIN values are zeroes)\n");
10022      } else {
10023          input->flags.cmng_enables |= CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10024      }
10025  }
10026  
10027  static inline uint16_t
bxe_extract_max_cfg(struct bxe_softc * sc,uint32_t mf_cfg)10028  bxe_extract_max_cfg(struct bxe_softc *sc,
10029                      uint32_t         mf_cfg)
10030  {
10031      uint16_t max_cfg = ((mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
10032                          FUNC_MF_CFG_MAX_BW_SHIFT);
10033  
10034      if (!max_cfg) {
10035          BLOGD(sc, DBG_LOAD, "Max BW configured to 0 - using 100 instead\n");
10036          max_cfg = 100;
10037      }
10038  
10039      return (max_cfg);
10040  }
10041  
10042  static void
bxe_calc_vn_max(struct bxe_softc * sc,int vn,struct cmng_init_input * input)10043  bxe_calc_vn_max(struct bxe_softc       *sc,
10044                  int                    vn,
10045                  struct cmng_init_input *input)
10046  {
10047      uint16_t vn_max_rate;
10048      uint32_t vn_cfg = sc->devinfo.mf_info.mf_config[vn];
10049      uint32_t max_cfg;
10050  
10051      if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
10052          vn_max_rate = 0;
10053      } else {
10054          max_cfg = bxe_extract_max_cfg(sc, vn_cfg);
10055  
10056          if (IS_MF_SI(sc)) {
10057              /* max_cfg in percents of linkspeed */
10058              vn_max_rate = ((sc->link_vars.line_speed * max_cfg) / 100);
10059          } else { /* SD modes */
10060              /* max_cfg is absolute in 100Mb units */
10061              vn_max_rate = (max_cfg * 100);
10062          }
10063      }
10064  
10065      BLOGD(sc, DBG_LOAD, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
10066  
10067      input->vnic_max_rate[vn] = vn_max_rate;
10068  }
10069  
10070  static void
bxe_cmng_fns_init(struct bxe_softc * sc,uint8_t read_cfg,uint8_t cmng_type)10071  bxe_cmng_fns_init(struct bxe_softc *sc,
10072                    uint8_t          read_cfg,
10073                    uint8_t          cmng_type)
10074  {
10075      struct cmng_init_input input;
10076      int vn;
10077  
10078      memset(&input, 0, sizeof(struct cmng_init_input));
10079  
10080      input.port_rate = sc->link_vars.line_speed;
10081  
10082      if (cmng_type == CMNG_FNS_MINMAX) {
10083          /* read mf conf from shmem */
10084          if (read_cfg) {
10085              bxe_read_mf_cfg(sc);
10086          }
10087  
10088          /* get VN min rate and enable fairness if not 0 */
10089          bxe_calc_vn_min(sc, &input);
10090  
10091          /* get VN max rate */
10092          if (sc->port.pmf) {
10093              for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10094                  bxe_calc_vn_max(sc, vn, &input);
10095              }
10096          }
10097  
10098          /* always enable rate shaping and fairness */
10099          input.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
10100  
10101          ecore_init_cmng(&input, &sc->cmng);
10102          return;
10103      }
10104  
10105      /* rate shaping and fairness are disabled */
10106      BLOGD(sc, DBG_LOAD, "rate shaping and fairness have been disabled\n");
10107  }
10108  
10109  static int
bxe_get_cmng_fns_mode(struct bxe_softc * sc)10110  bxe_get_cmng_fns_mode(struct bxe_softc *sc)
10111  {
10112      if (CHIP_REV_IS_SLOW(sc)) {
10113          return (CMNG_FNS_NONE);
10114      }
10115  
10116      if (IS_MF(sc)) {
10117          return (CMNG_FNS_MINMAX);
10118      }
10119  
10120      return (CMNG_FNS_NONE);
10121  }
10122  
10123  static void
storm_memset_cmng(struct bxe_softc * sc,struct cmng_init * cmng,uint8_t port)10124  storm_memset_cmng(struct bxe_softc *sc,
10125                    struct cmng_init *cmng,
10126                    uint8_t          port)
10127  {
10128      int vn;
10129      int func;
10130      uint32_t addr;
10131      size_t size;
10132  
10133      addr = (BAR_XSTRORM_INTMEM +
10134              XSTORM_CMNG_PER_PORT_VARS_OFFSET(port));
10135      size = sizeof(struct cmng_struct_per_port);
10136      ecore_storm_memset_struct(sc, addr, size, (uint32_t *)&cmng->port);
10137  
10138      for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10139          func = func_by_vn(sc, vn);
10140  
10141          addr = (BAR_XSTRORM_INTMEM +
10142                  XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func));
10143          size = sizeof(struct rate_shaping_vars_per_vn);
10144          ecore_storm_memset_struct(sc, addr, size,
10145                                    (uint32_t *)&cmng->vnic.vnic_max_rate[vn]);
10146  
10147          addr = (BAR_XSTRORM_INTMEM +
10148                  XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func));
10149          size = sizeof(struct fairness_vars_per_vn);
10150          ecore_storm_memset_struct(sc, addr, size,
10151                                    (uint32_t *)&cmng->vnic.vnic_min_rate[vn]);
10152      }
10153  }
10154  
10155  static void
bxe_pf_init(struct bxe_softc * sc)10156  bxe_pf_init(struct bxe_softc *sc)
10157  {
10158      struct bxe_func_init_params func_init = { 0 };
10159      struct event_ring_data eq_data = { { 0 } };
10160      uint16_t flags;
10161  
10162      if (!CHIP_IS_E1x(sc)) {
10163          /* reset IGU PF statistics: MSIX + ATTN */
10164          /* PF */
10165          REG_WR(sc,
10166                 (IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
10167                  (BXE_IGU_STAS_MSG_VF_CNT * 4) +
10168                  ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)),
10169                 0);
10170          /* ATTN */
10171          REG_WR(sc,
10172                 (IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
10173                  (BXE_IGU_STAS_MSG_VF_CNT * 4) +
10174                  (BXE_IGU_STAS_MSG_PF_CNT * 4) +
10175                  ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)),
10176                 0);
10177      }
10178  
10179      /* function setup flags */
10180      flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
10181  
10182      /*
10183       * This flag is relevant for E1x only.
10184       * E2 doesn't have a TPA configuration in a function level.
10185       */
10186      flags |= (if_getcapenable(sc->ifp) & IFCAP_LRO) ? FUNC_FLG_TPA : 0;
10187  
10188      func_init.func_flgs = flags;
10189      func_init.pf_id     = SC_FUNC(sc);
10190      func_init.func_id   = SC_FUNC(sc);
10191      func_init.spq_map   = sc->spq_dma.paddr;
10192      func_init.spq_prod  = sc->spq_prod_idx;
10193  
10194      bxe_func_init(sc, &func_init);
10195  
10196      memset(&sc->cmng, 0, sizeof(struct cmng_struct_per_port));
10197  
10198      /*
10199       * Congestion management values depend on the link rate.
10200       * There is no active link so initial link rate is set to 10Gbps.
10201       * When the link comes up the congestion management values are
10202       * re-calculated according to the actual link rate.
10203       */
10204      sc->link_vars.line_speed = SPEED_10000;
10205      bxe_cmng_fns_init(sc, TRUE, bxe_get_cmng_fns_mode(sc));
10206  
10207      /* Only the PMF sets the HW */
10208      if (sc->port.pmf) {
10209          storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
10210      }
10211  
10212      /* init Event Queue - PCI bus guarantees correct endainity */
10213      eq_data.base_addr.hi = U64_HI(sc->eq_dma.paddr);
10214      eq_data.base_addr.lo = U64_LO(sc->eq_dma.paddr);
10215      eq_data.producer     = sc->eq_prod;
10216      eq_data.index_id     = HC_SP_INDEX_EQ_CONS;
10217      eq_data.sb_id        = DEF_SB_ID;
10218      storm_memset_eq_data(sc, &eq_data, SC_FUNC(sc));
10219  }
10220  
10221  static void
bxe_hc_int_enable(struct bxe_softc * sc)10222  bxe_hc_int_enable(struct bxe_softc *sc)
10223  {
10224      int port = SC_PORT(sc);
10225      uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
10226      uint32_t val = REG_RD(sc, addr);
10227      uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE;
10228      uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) &&
10229                             (sc->intr_count == 1)) ? TRUE : FALSE;
10230      uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE;
10231  
10232      if (msix) {
10233          val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10234                   HC_CONFIG_0_REG_INT_LINE_EN_0);
10235          val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10236                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10237          if (single_msix) {
10238              val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
10239          }
10240      } else if (msi) {
10241          val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
10242          val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10243                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10244                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10245      } else {
10246          val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10247                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10248                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
10249                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10250  
10251          if (!CHIP_IS_E1(sc)) {
10252              BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n",
10253                    val, port, addr);
10254  
10255              REG_WR(sc, addr, val);
10256  
10257              val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
10258          }
10259      }
10260  
10261      if (CHIP_IS_E1(sc)) {
10262          REG_WR(sc, (HC_REG_INT_MASK + port*4), 0x1FFFF);
10263      }
10264  
10265      BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
10266            val, port, addr, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx")));
10267  
10268      REG_WR(sc, addr, val);
10269  
10270      /* ensure that HC_CONFIG is written before leading/trailing edge config */
10271      mb();
10272  
10273      if (!CHIP_IS_E1(sc)) {
10274          /* init leading/trailing edge */
10275          if (IS_MF(sc)) {
10276              val = (0xee0f | (1 << (SC_VN(sc) + 4)));
10277              if (sc->port.pmf) {
10278                  /* enable nig and gpio3 attention */
10279                  val |= 0x1100;
10280              }
10281          } else {
10282              val = 0xffff;
10283          }
10284  
10285          REG_WR(sc, (HC_REG_TRAILING_EDGE_0 + port*8), val);
10286          REG_WR(sc, (HC_REG_LEADING_EDGE_0 + port*8), val);
10287      }
10288  
10289      /* make sure that interrupts are indeed enabled from here on */
10290      mb();
10291  }
10292  
10293  static void
bxe_igu_int_enable(struct bxe_softc * sc)10294  bxe_igu_int_enable(struct bxe_softc *sc)
10295  {
10296      uint32_t val;
10297      uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE;
10298      uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) &&
10299                             (sc->intr_count == 1)) ? TRUE : FALSE;
10300      uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE;
10301  
10302      val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
10303  
10304      if (msix) {
10305          val &= ~(IGU_PF_CONF_INT_LINE_EN |
10306                   IGU_PF_CONF_SINGLE_ISR_EN);
10307          val |= (IGU_PF_CONF_MSI_MSIX_EN |
10308                  IGU_PF_CONF_ATTN_BIT_EN);
10309          if (single_msix) {
10310              val |= IGU_PF_CONF_SINGLE_ISR_EN;
10311          }
10312      } else if (msi) {
10313          val &= ~IGU_PF_CONF_INT_LINE_EN;
10314          val |= (IGU_PF_CONF_MSI_MSIX_EN |
10315                  IGU_PF_CONF_ATTN_BIT_EN |
10316                  IGU_PF_CONF_SINGLE_ISR_EN);
10317      } else {
10318          val &= ~IGU_PF_CONF_MSI_MSIX_EN;
10319          val |= (IGU_PF_CONF_INT_LINE_EN |
10320                  IGU_PF_CONF_ATTN_BIT_EN |
10321                  IGU_PF_CONF_SINGLE_ISR_EN);
10322      }
10323  
10324      /* clean previous status - need to configure igu prior to ack*/
10325      if ((!msix) || single_msix) {
10326          REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10327          bxe_ack_int(sc);
10328      }
10329  
10330      val |= IGU_PF_CONF_FUNC_EN;
10331  
10332      BLOGD(sc, DBG_INTR, "write 0x%x to IGU mode %s\n",
10333            val, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx")));
10334  
10335      REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10336  
10337      mb();
10338  
10339      /* init leading/trailing edge */
10340      if (IS_MF(sc)) {
10341          val = (0xee0f | (1 << (SC_VN(sc) + 4)));
10342          if (sc->port.pmf) {
10343              /* enable nig and gpio3 attention */
10344              val |= 0x1100;
10345          }
10346      } else {
10347          val = 0xffff;
10348      }
10349  
10350      REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val);
10351      REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val);
10352  
10353      /* make sure that interrupts are indeed enabled from here on */
10354      mb();
10355  }
10356  
10357  static void
bxe_int_enable(struct bxe_softc * sc)10358  bxe_int_enable(struct bxe_softc *sc)
10359  {
10360      if (sc->devinfo.int_block == INT_BLOCK_HC) {
10361          bxe_hc_int_enable(sc);
10362      } else {
10363          bxe_igu_int_enable(sc);
10364      }
10365  }
10366  
10367  static void
bxe_hc_int_disable(struct bxe_softc * sc)10368  bxe_hc_int_disable(struct bxe_softc *sc)
10369  {
10370      int port = SC_PORT(sc);
10371      uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
10372      uint32_t val = REG_RD(sc, addr);
10373  
10374      /*
10375       * In E1 we must use only PCI configuration space to disable MSI/MSIX
10376       * capablility. It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC
10377       * block
10378       */
10379      if (CHIP_IS_E1(sc)) {
10380          /*
10381           * Since IGU_PF_CONF_MSI_MSIX_EN still always on use mask register
10382           * to prevent from HC sending interrupts after we exit the function
10383           */
10384          REG_WR(sc, (HC_REG_INT_MASK + port*4), 0);
10385  
10386          val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10387                   HC_CONFIG_0_REG_INT_LINE_EN_0 |
10388                   HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10389      } else {
10390          val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10391                   HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10392                   HC_CONFIG_0_REG_INT_LINE_EN_0 |
10393                   HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10394      }
10395  
10396      BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", val, port, addr);
10397  
10398      /* flush all outstanding writes */
10399      mb();
10400  
10401      REG_WR(sc, addr, val);
10402      if (REG_RD(sc, addr) != val) {
10403          BLOGE(sc, "proper val not read from HC IGU!\n");
10404      }
10405  }
10406  
10407  static void
bxe_igu_int_disable(struct bxe_softc * sc)10408  bxe_igu_int_disable(struct bxe_softc *sc)
10409  {
10410      uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
10411  
10412      val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
10413               IGU_PF_CONF_INT_LINE_EN |
10414               IGU_PF_CONF_ATTN_BIT_EN);
10415  
10416      BLOGD(sc, DBG_INTR, "write %x to IGU\n", val);
10417  
10418      /* flush all outstanding writes */
10419      mb();
10420  
10421      REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10422      if (REG_RD(sc, IGU_REG_PF_CONFIGURATION) != val) {
10423          BLOGE(sc, "proper val not read from IGU!\n");
10424      }
10425  }
10426  
10427  static void
bxe_int_disable(struct bxe_softc * sc)10428  bxe_int_disable(struct bxe_softc *sc)
10429  {
10430      if (sc->devinfo.int_block == INT_BLOCK_HC) {
10431          bxe_hc_int_disable(sc);
10432      } else {
10433          bxe_igu_int_disable(sc);
10434      }
10435  }
10436  
10437  static void
bxe_nic_init(struct bxe_softc * sc,int load_code)10438  bxe_nic_init(struct bxe_softc *sc,
10439               int              load_code)
10440  {
10441      int i;
10442  
10443      for (i = 0; i < sc->num_queues; i++) {
10444          bxe_init_eth_fp(sc, i);
10445      }
10446  
10447      rmb(); /* ensure status block indices were read */
10448  
10449      bxe_init_rx_rings(sc);
10450      bxe_init_tx_rings(sc);
10451  
10452      if (IS_VF(sc)) {
10453          return;
10454      }
10455  
10456      /* initialize MOD_ABS interrupts */
10457      elink_init_mod_abs_int(sc, &sc->link_vars,
10458                             sc->devinfo.chip_id,
10459                             sc->devinfo.shmem_base,
10460                             sc->devinfo.shmem2_base,
10461                             SC_PORT(sc));
10462  
10463      bxe_init_def_sb(sc);
10464      bxe_update_dsb_idx(sc);
10465      bxe_init_sp_ring(sc);
10466      bxe_init_eq_ring(sc);
10467      bxe_init_internal(sc, load_code);
10468      bxe_pf_init(sc);
10469      bxe_stats_init(sc);
10470  
10471      /* flush all before enabling interrupts */
10472      mb();
10473  
10474      bxe_int_enable(sc);
10475  
10476      /* check for SPIO5 */
10477      bxe_attn_int_deasserted0(sc,
10478                               REG_RD(sc,
10479                                      (MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
10480                                       SC_PORT(sc)*4)) &
10481                               AEU_INPUTS_ATTN_BITS_SPIO5);
10482  }
10483  
10484  static inline void
bxe_init_objs(struct bxe_softc * sc)10485  bxe_init_objs(struct bxe_softc *sc)
10486  {
10487      /* mcast rules must be added to tx if tx switching is enabled */
10488      ecore_obj_type o_type =
10489          (sc->flags & BXE_TX_SWITCHING) ? ECORE_OBJ_TYPE_RX_TX :
10490                                           ECORE_OBJ_TYPE_RX;
10491  
10492      /* RX_MODE controlling object */
10493      ecore_init_rx_mode_obj(sc, &sc->rx_mode_obj);
10494  
10495      /* multicast configuration controlling object */
10496      ecore_init_mcast_obj(sc,
10497                           &sc->mcast_obj,
10498                           sc->fp[0].cl_id,
10499                           sc->fp[0].index,
10500                           SC_FUNC(sc),
10501                           SC_FUNC(sc),
10502                           BXE_SP(sc, mcast_rdata),
10503                           BXE_SP_MAPPING(sc, mcast_rdata),
10504                           ECORE_FILTER_MCAST_PENDING,
10505                           &sc->sp_state,
10506                           o_type);
10507  
10508      /* Setup CAM credit pools */
10509      ecore_init_mac_credit_pool(sc,
10510                                 &sc->macs_pool,
10511                                 SC_FUNC(sc),
10512                                 CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) :
10513                                                   VNICS_PER_PATH(sc));
10514  
10515      ecore_init_vlan_credit_pool(sc,
10516                                  &sc->vlans_pool,
10517                                  SC_ABS_FUNC(sc) >> 1,
10518                                  CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) :
10519                                                    VNICS_PER_PATH(sc));
10520  
10521      /* RSS configuration object */
10522      ecore_init_rss_config_obj(sc,
10523                                &sc->rss_conf_obj,
10524                                sc->fp[0].cl_id,
10525                                sc->fp[0].index,
10526                                SC_FUNC(sc),
10527                                SC_FUNC(sc),
10528                                BXE_SP(sc, rss_rdata),
10529                                BXE_SP_MAPPING(sc, rss_rdata),
10530                                ECORE_FILTER_RSS_CONF_PENDING,
10531                                &sc->sp_state, ECORE_OBJ_TYPE_RX);
10532  }
10533  
10534  /*
10535   * Initialize the function. This must be called before sending CLIENT_SETUP
10536   * for the first client.
10537   */
10538  static inline int
bxe_func_start(struct bxe_softc * sc)10539  bxe_func_start(struct bxe_softc *sc)
10540  {
10541      struct ecore_func_state_params func_params = { NULL };
10542      struct ecore_func_start_params *start_params = &func_params.params.start;
10543  
10544      /* Prepare parameters for function state transitions */
10545      bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT);
10546  
10547      func_params.f_obj = &sc->func_obj;
10548      func_params.cmd = ECORE_F_CMD_START;
10549  
10550      /* Function parameters */
10551      start_params->mf_mode     = sc->devinfo.mf_info.mf_mode;
10552      start_params->sd_vlan_tag = OVLAN(sc);
10553  
10554      if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) {
10555          start_params->network_cos_mode = STATIC_COS;
10556      } else { /* CHIP_IS_E1X */
10557          start_params->network_cos_mode = FW_WRR;
10558      }
10559  
10560      //start_params->gre_tunnel_mode = 0;
10561      //start_params->gre_tunnel_rss  = 0;
10562  
10563      return (ecore_func_state_change(sc, &func_params));
10564  }
10565  
10566  static int
bxe_set_power_state(struct bxe_softc * sc,uint8_t state)10567  bxe_set_power_state(struct bxe_softc *sc,
10568                      uint8_t          state)
10569  {
10570      uint16_t pmcsr;
10571  
10572      /* If there is no power capability, silently succeed */
10573      if (!(sc->devinfo.pcie_cap_flags & BXE_PM_CAPABLE_FLAG)) {
10574          BLOGW(sc, "No power capability\n");
10575          return (0);
10576      }
10577  
10578      pmcsr = pci_read_config(sc->dev,
10579                              (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10580                              2);
10581  
10582      switch (state) {
10583      case PCI_PM_D0:
10584          pci_write_config(sc->dev,
10585                           (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10586                           ((pmcsr & ~PCIM_PSTAT_DMASK) | PCIM_PSTAT_PME), 2);
10587  
10588          if (pmcsr & PCIM_PSTAT_DMASK) {
10589              /* delay required during transition out of D3hot */
10590              DELAY(20000);
10591          }
10592  
10593          break;
10594  
10595      case PCI_PM_D3hot:
10596          /* XXX if there are other clients above don't shut down the power */
10597  
10598          /* don't shut down the power for emulation and FPGA */
10599          if (CHIP_REV_IS_SLOW(sc)) {
10600              return (0);
10601          }
10602  
10603          pmcsr &= ~PCIM_PSTAT_DMASK;
10604          pmcsr |= PCIM_PSTAT_D3;
10605  
10606          if (sc->wol) {
10607              pmcsr |= PCIM_PSTAT_PMEENABLE;
10608          }
10609  
10610          pci_write_config(sc->dev,
10611                           (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10612                           pmcsr, 4);
10613  
10614          /*
10615           * No more memory access after this point until device is brought back
10616           * to D0 state.
10617           */
10618          break;
10619  
10620      default:
10621          BLOGE(sc, "Can't support PCI power state = 0x%x pmcsr 0x%x\n",
10622              state, pmcsr);
10623          return (-1);
10624      }
10625  
10626      return (0);
10627  }
10628  
10629  
10630  /* return true if succeeded to acquire the lock */
10631  static uint8_t
bxe_trylock_hw_lock(struct bxe_softc * sc,uint32_t resource)10632  bxe_trylock_hw_lock(struct bxe_softc *sc,
10633                      uint32_t         resource)
10634  {
10635      uint32_t lock_status;
10636      uint32_t resource_bit = (1 << resource);
10637      int func = SC_FUNC(sc);
10638      uint32_t hw_lock_control_reg;
10639  
10640      BLOGD(sc, DBG_LOAD, "Trying to take a resource lock 0x%x\n", resource);
10641  
10642      /* Validating that the resource is within range */
10643      if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
10644          BLOGD(sc, DBG_LOAD,
10645                "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
10646                resource, HW_LOCK_MAX_RESOURCE_VALUE);
10647          return (FALSE);
10648      }
10649  
10650      if (func <= 5) {
10651          hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
10652      } else {
10653          hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
10654      }
10655  
10656      /* try to acquire the lock */
10657      REG_WR(sc, hw_lock_control_reg + 4, resource_bit);
10658      lock_status = REG_RD(sc, hw_lock_control_reg);
10659      if (lock_status & resource_bit) {
10660          return (TRUE);
10661      }
10662  
10663      BLOGE(sc, "Failed to get a resource lock 0x%x func %d "
10664          "lock_status 0x%x resource_bit 0x%x\n", resource, func,
10665          lock_status, resource_bit);
10666  
10667      return (FALSE);
10668  }
10669  
10670  /*
10671   * Get the recovery leader resource id according to the engine this function
10672   * belongs to. Currently only only 2 engines is supported.
10673   */
10674  static int
bxe_get_leader_lock_resource(struct bxe_softc * sc)10675  bxe_get_leader_lock_resource(struct bxe_softc *sc)
10676  {
10677      if (SC_PATH(sc)) {
10678          return (HW_LOCK_RESOURCE_RECOVERY_LEADER_1);
10679      } else {
10680          return (HW_LOCK_RESOURCE_RECOVERY_LEADER_0);
10681      }
10682  }
10683  
10684  /* try to acquire a leader lock for current engine */
10685  static uint8_t
bxe_trylock_leader_lock(struct bxe_softc * sc)10686  bxe_trylock_leader_lock(struct bxe_softc *sc)
10687  {
10688      return (bxe_trylock_hw_lock(sc, bxe_get_leader_lock_resource(sc)));
10689  }
10690  
10691  static int
bxe_release_leader_lock(struct bxe_softc * sc)10692  bxe_release_leader_lock(struct bxe_softc *sc)
10693  {
10694      return (bxe_release_hw_lock(sc, bxe_get_leader_lock_resource(sc)));
10695  }
10696  
10697  /* close gates #2, #3 and #4 */
10698  static void
bxe_set_234_gates(struct bxe_softc * sc,uint8_t close)10699  bxe_set_234_gates(struct bxe_softc *sc,
10700                    uint8_t          close)
10701  {
10702      uint32_t val;
10703  
10704      /* gates #2 and #4a are closed/opened for "not E1" only */
10705      if (!CHIP_IS_E1(sc)) {
10706          /* #4 */
10707          REG_WR(sc, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
10708          /* #2 */
10709          REG_WR(sc, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
10710      }
10711  
10712      /* #3 */
10713      if (CHIP_IS_E1x(sc)) {
10714          /* prevent interrupts from HC on both ports */
10715          val = REG_RD(sc, HC_REG_CONFIG_1);
10716          REG_WR(sc, HC_REG_CONFIG_1,
10717                 (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
10718                 (val & ~(uint32_t)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
10719  
10720          val = REG_RD(sc, HC_REG_CONFIG_0);
10721          REG_WR(sc, HC_REG_CONFIG_0,
10722                 (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
10723                 (val & ~(uint32_t)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
10724      } else {
10725          /* Prevent incoming interrupts in IGU */
10726          val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
10727  
10728          REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION,
10729                 (!close) ?
10730                 (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
10731                 (val & ~(uint32_t)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
10732      }
10733  
10734      BLOGD(sc, DBG_LOAD, "%s gates #2, #3 and #4\n",
10735            close ? "closing" : "opening");
10736  
10737      wmb();
10738  }
10739  
10740  /* poll for pending writes bit, it should get cleared in no more than 1s */
10741  static int
bxe_er_poll_igu_vq(struct bxe_softc * sc)10742  bxe_er_poll_igu_vq(struct bxe_softc *sc)
10743  {
10744      uint32_t cnt = 1000;
10745      uint32_t pend_bits = 0;
10746  
10747      do {
10748          pend_bits = REG_RD(sc, IGU_REG_PENDING_BITS_STATUS);
10749  
10750          if (pend_bits == 0) {
10751              break;
10752          }
10753  
10754          DELAY(1000);
10755      } while (--cnt > 0);
10756  
10757      if (cnt == 0) {
10758          BLOGE(sc, "Still pending IGU requests bits=0x%08x!\n", pend_bits);
10759          return (-1);
10760      }
10761  
10762      return (0);
10763  }
10764  
10765  #define SHARED_MF_CLP_MAGIC  0x80000000 /* 'magic' bit */
10766  
10767  static void
bxe_clp_reset_prep(struct bxe_softc * sc,uint32_t * magic_val)10768  bxe_clp_reset_prep(struct bxe_softc *sc,
10769                     uint32_t         *magic_val)
10770  {
10771      /* Do some magic... */
10772      uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb);
10773      *magic_val = val & SHARED_MF_CLP_MAGIC;
10774      MFCFG_WR(sc, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
10775  }
10776  
10777  /* restore the value of the 'magic' bit */
10778  static void
bxe_clp_reset_done(struct bxe_softc * sc,uint32_t magic_val)10779  bxe_clp_reset_done(struct bxe_softc *sc,
10780                     uint32_t         magic_val)
10781  {
10782      /* Restore the 'magic' bit value... */
10783      uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb);
10784      MFCFG_WR(sc, shared_mf_config.clp_mb,
10785                (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
10786  }
10787  
10788  /* prepare for MCP reset, takes care of CLP configurations */
10789  static void
bxe_reset_mcp_prep(struct bxe_softc * sc,uint32_t * magic_val)10790  bxe_reset_mcp_prep(struct bxe_softc *sc,
10791                     uint32_t         *magic_val)
10792  {
10793      uint32_t shmem;
10794      uint32_t validity_offset;
10795  
10796      /* set `magic' bit in order to save MF config */
10797      if (!CHIP_IS_E1(sc)) {
10798          bxe_clp_reset_prep(sc, magic_val);
10799      }
10800  
10801      /* get shmem offset */
10802      shmem = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
10803      validity_offset =
10804          offsetof(struct shmem_region, validity_map[SC_PORT(sc)]);
10805  
10806      /* Clear validity map flags */
10807      if (shmem > 0) {
10808          REG_WR(sc, shmem + validity_offset, 0);
10809      }
10810  }
10811  
10812  #define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
10813  #define MCP_ONE_TIMEOUT  100    /* 100 ms */
10814  
10815  static void
bxe_mcp_wait_one(struct bxe_softc * sc)10816  bxe_mcp_wait_one(struct bxe_softc *sc)
10817  {
10818      /* special handling for emulation and FPGA (10 times longer) */
10819      if (CHIP_REV_IS_SLOW(sc)) {
10820          DELAY((MCP_ONE_TIMEOUT*10) * 1000);
10821      } else {
10822          DELAY((MCP_ONE_TIMEOUT) * 1000);
10823      }
10824  }
10825  
10826  /* initialize shmem_base and waits for validity signature to appear */
10827  static int
bxe_init_shmem(struct bxe_softc * sc)10828  bxe_init_shmem(struct bxe_softc *sc)
10829  {
10830      int cnt = 0;
10831      uint32_t val = 0;
10832  
10833      do {
10834          sc->devinfo.shmem_base     =
10835          sc->link_params.shmem_base =
10836              REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
10837  
10838          if (sc->devinfo.shmem_base) {
10839              val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]);
10840              if (val & SHR_MEM_VALIDITY_MB)
10841                  return (0);
10842          }
10843  
10844          bxe_mcp_wait_one(sc);
10845  
10846      } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
10847  
10848      BLOGE(sc, "BAD MCP validity signature\n");
10849  
10850      return (-1);
10851  }
10852  
10853  static int
bxe_reset_mcp_comp(struct bxe_softc * sc,uint32_t magic_val)10854  bxe_reset_mcp_comp(struct bxe_softc *sc,
10855                     uint32_t         magic_val)
10856  {
10857      int rc = bxe_init_shmem(sc);
10858  
10859      /* Restore the `magic' bit value */
10860      if (!CHIP_IS_E1(sc)) {
10861          bxe_clp_reset_done(sc, magic_val);
10862      }
10863  
10864      return (rc);
10865  }
10866  
10867  static void
bxe_pxp_prep(struct bxe_softc * sc)10868  bxe_pxp_prep(struct bxe_softc *sc)
10869  {
10870      if (!CHIP_IS_E1(sc)) {
10871          REG_WR(sc, PXP2_REG_RD_START_INIT, 0);
10872          REG_WR(sc, PXP2_REG_RQ_RBC_DONE, 0);
10873          wmb();
10874      }
10875  }
10876  
10877  /*
10878   * Reset the whole chip except for:
10879   *      - PCIE core
10880   *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by one reset bit)
10881   *      - IGU
10882   *      - MISC (including AEU)
10883   *      - GRC
10884   *      - RBCN, RBCP
10885   */
10886  static void
bxe_process_kill_chip_reset(struct bxe_softc * sc,uint8_t global)10887  bxe_process_kill_chip_reset(struct bxe_softc *sc,
10888                              uint8_t          global)
10889  {
10890      uint32_t not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
10891      uint32_t global_bits2, stay_reset2;
10892  
10893      /*
10894       * Bits that have to be set in reset_mask2 if we want to reset 'global'
10895       * (per chip) blocks.
10896       */
10897      global_bits2 =
10898          MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
10899          MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
10900  
10901      /*
10902       * Don't reset the following blocks.
10903       * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be
10904       *            reset, as in 4 port device they might still be owned
10905       *            by the MCP (there is only one leader per path).
10906       */
10907      not_reset_mask1 =
10908          MISC_REGISTERS_RESET_REG_1_RST_HC |
10909          MISC_REGISTERS_RESET_REG_1_RST_PXPV |
10910          MISC_REGISTERS_RESET_REG_1_RST_PXP;
10911  
10912      not_reset_mask2 =
10913          MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
10914          MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
10915          MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
10916          MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
10917          MISC_REGISTERS_RESET_REG_2_RST_RBCN |
10918          MISC_REGISTERS_RESET_REG_2_RST_GRC  |
10919          MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
10920          MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
10921          MISC_REGISTERS_RESET_REG_2_RST_ATC |
10922          MISC_REGISTERS_RESET_REG_2_PGLC |
10923          MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
10924          MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
10925          MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
10926          MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
10927          MISC_REGISTERS_RESET_REG_2_UMAC0 |
10928          MISC_REGISTERS_RESET_REG_2_UMAC1;
10929  
10930      /*
10931       * Keep the following blocks in reset:
10932       *  - all xxMACs are handled by the elink code.
10933       */
10934      stay_reset2 =
10935          MISC_REGISTERS_RESET_REG_2_XMAC |
10936          MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
10937  
10938      /* Full reset masks according to the chip */
10939      reset_mask1 = 0xffffffff;
10940  
10941      if (CHIP_IS_E1(sc))
10942          reset_mask2 = 0xffff;
10943      else if (CHIP_IS_E1H(sc))
10944          reset_mask2 = 0x1ffff;
10945      else if (CHIP_IS_E2(sc))
10946          reset_mask2 = 0xfffff;
10947      else /* CHIP_IS_E3 */
10948          reset_mask2 = 0x3ffffff;
10949  
10950      /* Don't reset global blocks unless we need to */
10951      if (!global)
10952          reset_mask2 &= ~global_bits2;
10953  
10954      /*
10955       * In case of attention in the QM, we need to reset PXP
10956       * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM
10957       * because otherwise QM reset would release 'close the gates' shortly
10958       * before resetting the PXP, then the PSWRQ would send a write
10959       * request to PGLUE. Then when PXP is reset, PGLUE would try to
10960       * read the payload data from PSWWR, but PSWWR would not
10961       * respond. The write queue in PGLUE would stuck, dmae commands
10962       * would not return. Therefore it's important to reset the second
10963       * reset register (containing the
10964       * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the
10965       * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM
10966       * bit).
10967       */
10968      REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
10969             reset_mask2 & (~not_reset_mask2));
10970  
10971      REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
10972             reset_mask1 & (~not_reset_mask1));
10973  
10974      mb();
10975      wmb();
10976  
10977      REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
10978             reset_mask2 & (~stay_reset2));
10979  
10980      mb();
10981      wmb();
10982  
10983      REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
10984      wmb();
10985  }
10986  
10987  static int
bxe_process_kill(struct bxe_softc * sc,uint8_t global)10988  bxe_process_kill(struct bxe_softc *sc,
10989                   uint8_t          global)
10990  {
10991      int cnt = 1000;
10992      uint32_t val = 0;
10993      uint32_t sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
10994      uint32_t tags_63_32 = 0;
10995  
10996      /* Empty the Tetris buffer, wait for 1s */
10997      do {
10998          sr_cnt  = REG_RD(sc, PXP2_REG_RD_SR_CNT);
10999          blk_cnt = REG_RD(sc, PXP2_REG_RD_BLK_CNT);
11000          port_is_idle_0 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_0);
11001          port_is_idle_1 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_1);
11002          pgl_exp_rom2 = REG_RD(sc, PXP2_REG_PGL_EXP_ROM2);
11003          if (CHIP_IS_E3(sc)) {
11004              tags_63_32 = REG_RD(sc, PGLUE_B_REG_TAGS_63_32);
11005          }
11006  
11007          if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
11008              ((port_is_idle_0 & 0x1) == 0x1) &&
11009              ((port_is_idle_1 & 0x1) == 0x1) &&
11010              (pgl_exp_rom2 == 0xffffffff) &&
11011              (!CHIP_IS_E3(sc) || (tags_63_32 == 0xffffffff)))
11012              break;
11013          DELAY(1000);
11014      } while (cnt-- > 0);
11015  
11016      if (cnt <= 0) {
11017          BLOGE(sc, "ERROR: Tetris buffer didn't get empty or there "
11018                    "are still outstanding read requests after 1s! "
11019                    "sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, "
11020                    "port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
11021                sr_cnt, blk_cnt, port_is_idle_0,
11022                port_is_idle_1, pgl_exp_rom2);
11023          return (-1);
11024      }
11025  
11026      mb();
11027  
11028      /* Close gates #2, #3 and #4 */
11029      bxe_set_234_gates(sc, TRUE);
11030  
11031      /* Poll for IGU VQs for 57712 and newer chips */
11032      if (!CHIP_IS_E1x(sc) && bxe_er_poll_igu_vq(sc)) {
11033          return (-1);
11034      }
11035  
11036      /* XXX indicate that "process kill" is in progress to MCP */
11037  
11038      /* clear "unprepared" bit */
11039      REG_WR(sc, MISC_REG_UNPREPARED, 0);
11040      mb();
11041  
11042      /* Make sure all is written to the chip before the reset */
11043      wmb();
11044  
11045      /*
11046       * Wait for 1ms to empty GLUE and PCI-E core queues,
11047       * PSWHST, GRC and PSWRD Tetris buffer.
11048       */
11049      DELAY(1000);
11050  
11051      /* Prepare to chip reset: */
11052      /* MCP */
11053      if (global) {
11054          bxe_reset_mcp_prep(sc, &val);
11055      }
11056  
11057      /* PXP */
11058      bxe_pxp_prep(sc);
11059      mb();
11060  
11061      /* reset the chip */
11062      bxe_process_kill_chip_reset(sc, global);
11063      mb();
11064  
11065      /* clear errors in PGB */
11066      if (!CHIP_IS_E1(sc))
11067          REG_WR(sc, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f);
11068  
11069      /* Recover after reset: */
11070      /* MCP */
11071      if (global && bxe_reset_mcp_comp(sc, val)) {
11072          return (-1);
11073      }
11074  
11075      /* XXX add resetting the NO_MCP mode DB here */
11076  
11077      /* Open the gates #2, #3 and #4 */
11078      bxe_set_234_gates(sc, FALSE);
11079  
11080      /* XXX
11081       * IGU/AEU preparation bring back the AEU/IGU to a reset state
11082       * re-enable attentions
11083       */
11084  
11085      return (0);
11086  }
11087  
11088  static int
bxe_leader_reset(struct bxe_softc * sc)11089  bxe_leader_reset(struct bxe_softc *sc)
11090  {
11091      int rc = 0;
11092      uint8_t global = bxe_reset_is_global(sc);
11093      uint32_t load_code;
11094  
11095      /*
11096       * If not going to reset MCP, load "fake" driver to reset HW while
11097       * driver is owner of the HW.
11098       */
11099      if (!global && !BXE_NOMCP(sc)) {
11100          load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ,
11101                                     DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
11102          if (!load_code) {
11103              BLOGE(sc, "MCP response failure, aborting\n");
11104              rc = -1;
11105              goto exit_leader_reset;
11106          }
11107  
11108          if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
11109              (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
11110              BLOGE(sc, "MCP unexpected response, aborting\n");
11111              rc = -1;
11112              goto exit_leader_reset2;
11113          }
11114  
11115          load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
11116          if (!load_code) {
11117              BLOGE(sc, "MCP response failure, aborting\n");
11118              rc = -1;
11119              goto exit_leader_reset2;
11120          }
11121      }
11122  
11123      /* try to recover after the failure */
11124      if (bxe_process_kill(sc, global)) {
11125          BLOGE(sc, "Something bad occurred on engine %d!\n", SC_PATH(sc));
11126          rc = -1;
11127          goto exit_leader_reset2;
11128      }
11129  
11130      /*
11131       * Clear the RESET_IN_PROGRESS and RESET_GLOBAL bits and update the driver
11132       * state.
11133       */
11134      bxe_set_reset_done(sc);
11135      if (global) {
11136          bxe_clear_reset_global(sc);
11137      }
11138  
11139  exit_leader_reset2:
11140  
11141      /* unload "fake driver" if it was loaded */
11142      if (!global && !BXE_NOMCP(sc)) {
11143          bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
11144          bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0);
11145      }
11146  
11147  exit_leader_reset:
11148  
11149      sc->is_leader = 0;
11150      bxe_release_leader_lock(sc);
11151  
11152      mb();
11153      return (rc);
11154  }
11155  
11156  /*
11157   * prepare INIT transition, parameters configured:
11158   *   - HC configuration
11159   *   - Queue's CDU context
11160   */
11161  static void
bxe_pf_q_prep_init(struct bxe_softc * sc,struct bxe_fastpath * fp,struct ecore_queue_init_params * init_params)11162  bxe_pf_q_prep_init(struct bxe_softc               *sc,
11163                     struct bxe_fastpath            *fp,
11164                     struct ecore_queue_init_params *init_params)
11165  {
11166      uint8_t cos;
11167      int cxt_index, cxt_offset;
11168  
11169      bxe_set_bit(ECORE_Q_FLG_HC, &init_params->rx.flags);
11170      bxe_set_bit(ECORE_Q_FLG_HC, &init_params->tx.flags);
11171  
11172      bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->rx.flags);
11173      bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->tx.flags);
11174  
11175      /* HC rate */
11176      init_params->rx.hc_rate =
11177          sc->hc_rx_ticks ? (1000000 / sc->hc_rx_ticks) : 0;
11178      init_params->tx.hc_rate =
11179          sc->hc_tx_ticks ? (1000000 / sc->hc_tx_ticks) : 0;
11180  
11181      /* FW SB ID */
11182      init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = fp->fw_sb_id;
11183  
11184      /* CQ index among the SB indices */
11185      init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
11186      init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
11187  
11188      /* set maximum number of COSs supported by this queue */
11189      init_params->max_cos = sc->max_cos;
11190  
11191      BLOGD(sc, DBG_LOAD, "fp %d setting queue params max cos to %d\n",
11192            fp->index, init_params->max_cos);
11193  
11194      /* set the context pointers queue object */
11195      for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
11196          /* XXX change index/cid here if ever support multiple tx CoS */
11197          /* fp->txdata[cos]->cid */
11198          cxt_index = fp->index / ILT_PAGE_CIDS;
11199          cxt_offset = fp->index - (cxt_index * ILT_PAGE_CIDS);
11200          init_params->cxts[cos] = &sc->context[cxt_index].vcxt[cxt_offset].eth;
11201      }
11202  }
11203  
11204  /* set flags that are common for the Tx-only and not normal connections */
11205  static unsigned long
bxe_get_common_flags(struct bxe_softc * sc,struct bxe_fastpath * fp,uint8_t zero_stats)11206  bxe_get_common_flags(struct bxe_softc    *sc,
11207                       struct bxe_fastpath *fp,
11208                       uint8_t             zero_stats)
11209  {
11210      unsigned long flags = 0;
11211  
11212      /* PF driver will always initialize the Queue to an ACTIVE state */
11213      bxe_set_bit(ECORE_Q_FLG_ACTIVE, &flags);
11214  
11215      /*
11216       * tx only connections collect statistics (on the same index as the
11217       * parent connection). The statistics are zeroed when the parent
11218       * connection is initialized.
11219       */
11220  
11221      bxe_set_bit(ECORE_Q_FLG_STATS, &flags);
11222      if (zero_stats) {
11223          bxe_set_bit(ECORE_Q_FLG_ZERO_STATS, &flags);
11224      }
11225  
11226      /*
11227       * tx only connections can support tx-switching, though their
11228       * CoS-ness doesn't survive the loopback
11229       */
11230      if (sc->flags & BXE_TX_SWITCHING) {
11231          bxe_set_bit(ECORE_Q_FLG_TX_SWITCH, &flags);
11232      }
11233  
11234      bxe_set_bit(ECORE_Q_FLG_PCSUM_ON_PKT, &flags);
11235  
11236      return (flags);
11237  }
11238  
11239  static unsigned long
bxe_get_q_flags(struct bxe_softc * sc,struct bxe_fastpath * fp,uint8_t leading)11240  bxe_get_q_flags(struct bxe_softc    *sc,
11241                  struct bxe_fastpath *fp,
11242                  uint8_t             leading)
11243  {
11244      unsigned long flags = 0;
11245  
11246      if (IS_MF_SD(sc)) {
11247          bxe_set_bit(ECORE_Q_FLG_OV, &flags);
11248      }
11249  
11250      if (if_getcapenable(sc->ifp) & IFCAP_LRO) {
11251          bxe_set_bit(ECORE_Q_FLG_TPA, &flags);
11252          bxe_set_bit(ECORE_Q_FLG_TPA_IPV6, &flags);
11253      }
11254  
11255      if (leading) {
11256          bxe_set_bit(ECORE_Q_FLG_LEADING_RSS, &flags);
11257          bxe_set_bit(ECORE_Q_FLG_MCAST, &flags);
11258      }
11259  
11260      bxe_set_bit(ECORE_Q_FLG_VLAN, &flags);
11261  
11262      /* merge with common flags */
11263      return (flags | bxe_get_common_flags(sc, fp, TRUE));
11264  }
11265  
11266  static void
bxe_pf_q_prep_general(struct bxe_softc * sc,struct bxe_fastpath * fp,struct ecore_general_setup_params * gen_init,uint8_t cos)11267  bxe_pf_q_prep_general(struct bxe_softc                  *sc,
11268                        struct bxe_fastpath               *fp,
11269                        struct ecore_general_setup_params *gen_init,
11270                        uint8_t                           cos)
11271  {
11272      gen_init->stat_id = bxe_stats_id(fp);
11273      gen_init->spcl_id = fp->cl_id;
11274      gen_init->mtu = sc->mtu;
11275      gen_init->cos = cos;
11276  }
11277  
11278  static void
bxe_pf_rx_q_prep(struct bxe_softc * sc,struct bxe_fastpath * fp,struct rxq_pause_params * pause,struct ecore_rxq_setup_params * rxq_init)11279  bxe_pf_rx_q_prep(struct bxe_softc              *sc,
11280                   struct bxe_fastpath           *fp,
11281                   struct rxq_pause_params       *pause,
11282                   struct ecore_rxq_setup_params *rxq_init)
11283  {
11284      uint8_t max_sge = 0;
11285      uint16_t sge_sz = 0;
11286      uint16_t tpa_agg_size = 0;
11287  
11288      pause->sge_th_lo = SGE_TH_LO(sc);
11289      pause->sge_th_hi = SGE_TH_HI(sc);
11290  
11291      /* validate SGE ring has enough to cross high threshold */
11292      if (sc->dropless_fc &&
11293              (pause->sge_th_hi + FW_PREFETCH_CNT) >
11294              (RX_SGE_USABLE_PER_PAGE * RX_SGE_NUM_PAGES)) {
11295          BLOGW(sc, "sge ring threshold limit\n");
11296      }
11297  
11298      /* minimum max_aggregation_size is 2*MTU (two full buffers) */
11299      tpa_agg_size = (2 * sc->mtu);
11300      if (tpa_agg_size < sc->max_aggregation_size) {
11301          tpa_agg_size = sc->max_aggregation_size;
11302      }
11303  
11304      max_sge = SGE_PAGE_ALIGN(sc->mtu) >> SGE_PAGE_SHIFT;
11305      max_sge = ((max_sge + PAGES_PER_SGE - 1) &
11306                     (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
11307      sge_sz = (uint16_t)min(SGE_PAGES, 0xffff);
11308  
11309      /* pause - not for e1 */
11310      if (!CHIP_IS_E1(sc)) {
11311          pause->bd_th_lo = BD_TH_LO(sc);
11312          pause->bd_th_hi = BD_TH_HI(sc);
11313  
11314          pause->rcq_th_lo = RCQ_TH_LO(sc);
11315          pause->rcq_th_hi = RCQ_TH_HI(sc);
11316  
11317          /* validate rings have enough entries to cross high thresholds */
11318          if (sc->dropless_fc &&
11319              pause->bd_th_hi + FW_PREFETCH_CNT >
11320              sc->rx_ring_size) {
11321              BLOGW(sc, "rx bd ring threshold limit\n");
11322          }
11323  
11324          if (sc->dropless_fc &&
11325              pause->rcq_th_hi + FW_PREFETCH_CNT >
11326              RCQ_NUM_PAGES * RCQ_USABLE_PER_PAGE) {
11327              BLOGW(sc, "rcq ring threshold limit\n");
11328          }
11329  
11330          pause->pri_map = 1;
11331      }
11332  
11333      /* rxq setup */
11334      rxq_init->dscr_map   = fp->rx_dma.paddr;
11335      rxq_init->sge_map    = fp->rx_sge_dma.paddr;
11336      rxq_init->rcq_map    = fp->rcq_dma.paddr;
11337      rxq_init->rcq_np_map = (fp->rcq_dma.paddr + BCM_PAGE_SIZE);
11338  
11339      /*
11340       * This should be a maximum number of data bytes that may be
11341       * placed on the BD (not including paddings).
11342       */
11343      rxq_init->buf_sz = (fp->rx_buf_size -
11344                          IP_HEADER_ALIGNMENT_PADDING);
11345  
11346      rxq_init->cl_qzone_id     = fp->cl_qzone_id;
11347      rxq_init->tpa_agg_sz      = tpa_agg_size;
11348      rxq_init->sge_buf_sz      = sge_sz;
11349      rxq_init->max_sges_pkt    = max_sge;
11350      rxq_init->rss_engine_id   = SC_FUNC(sc);
11351      rxq_init->mcast_engine_id = SC_FUNC(sc);
11352  
11353      /*
11354       * Maximum number or simultaneous TPA aggregation for this Queue.
11355       * For PF Clients it should be the maximum available number.
11356       * VF driver(s) may want to define it to a smaller value.
11357       */
11358      rxq_init->max_tpa_queues = MAX_AGG_QS(sc);
11359  
11360      rxq_init->cache_line_log = BXE_RX_ALIGN_SHIFT;
11361      rxq_init->fw_sb_id = fp->fw_sb_id;
11362  
11363      rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
11364  
11365      /*
11366       * configure silent vlan removal
11367       * if multi function mode is afex, then mask default vlan
11368       */
11369      if (IS_MF_AFEX(sc)) {
11370          rxq_init->silent_removal_value =
11371              sc->devinfo.mf_info.afex_def_vlan_tag;
11372          rxq_init->silent_removal_mask = EVL_VLID_MASK;
11373      }
11374  }
11375  
11376  static void
bxe_pf_tx_q_prep(struct bxe_softc * sc,struct bxe_fastpath * fp,struct ecore_txq_setup_params * txq_init,uint8_t cos)11377  bxe_pf_tx_q_prep(struct bxe_softc              *sc,
11378                   struct bxe_fastpath           *fp,
11379                   struct ecore_txq_setup_params *txq_init,
11380                   uint8_t                       cos)
11381  {
11382      /*
11383       * XXX If multiple CoS is ever supported then each fastpath structure
11384       * will need to maintain tx producer/consumer/dma/etc values *per* CoS.
11385       * fp->txdata[cos]->tx_dma.paddr;
11386       */
11387      txq_init->dscr_map     = fp->tx_dma.paddr;
11388      txq_init->sb_cq_index  = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
11389      txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
11390      txq_init->fw_sb_id     = fp->fw_sb_id;
11391  
11392      /*
11393       * set the TSS leading client id for TX classfication to the
11394       * leading RSS client id
11395       */
11396      txq_init->tss_leading_cl_id = BXE_FP(sc, 0, cl_id);
11397  }
11398  
11399  /*
11400   * This function performs 2 steps in a queue state machine:
11401   *   1) RESET->INIT
11402   *   2) INIT->SETUP
11403   */
11404  static int
bxe_setup_queue(struct bxe_softc * sc,struct bxe_fastpath * fp,uint8_t leading)11405  bxe_setup_queue(struct bxe_softc    *sc,
11406                  struct bxe_fastpath *fp,
11407                  uint8_t             leading)
11408  {
11409      struct ecore_queue_state_params q_params = { NULL };
11410      struct ecore_queue_setup_params *setup_params =
11411                          &q_params.params.setup;
11412      int rc;
11413  
11414      BLOGD(sc, DBG_LOAD, "setting up queue %d\n", fp->index);
11415  
11416      bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
11417  
11418      q_params.q_obj = &BXE_SP_OBJ(sc, fp).q_obj;
11419  
11420      /* we want to wait for completion in this context */
11421      bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
11422  
11423      /* prepare the INIT parameters */
11424      bxe_pf_q_prep_init(sc, fp, &q_params.params.init);
11425  
11426      /* Set the command */
11427      q_params.cmd = ECORE_Q_CMD_INIT;
11428  
11429      /* Change the state to INIT */
11430      rc = ecore_queue_state_change(sc, &q_params);
11431      if (rc) {
11432          BLOGE(sc, "Queue(%d) INIT failed rc = %d\n", fp->index, rc);
11433          return (rc);
11434      }
11435  
11436      BLOGD(sc, DBG_LOAD, "init complete\n");
11437  
11438      /* now move the Queue to the SETUP state */
11439      memset(setup_params, 0, sizeof(*setup_params));
11440  
11441      /* set Queue flags */
11442      setup_params->flags = bxe_get_q_flags(sc, fp, leading);
11443  
11444      /* set general SETUP parameters */
11445      bxe_pf_q_prep_general(sc, fp, &setup_params->gen_params,
11446                            FIRST_TX_COS_INDEX);
11447  
11448      bxe_pf_rx_q_prep(sc, fp,
11449                       &setup_params->pause_params,
11450                       &setup_params->rxq_params);
11451  
11452      bxe_pf_tx_q_prep(sc, fp,
11453                       &setup_params->txq_params,
11454                       FIRST_TX_COS_INDEX);
11455  
11456      /* Set the command */
11457      q_params.cmd = ECORE_Q_CMD_SETUP;
11458  
11459      /* change the state to SETUP */
11460      rc = ecore_queue_state_change(sc, &q_params);
11461      if (rc) {
11462          BLOGE(sc, "Queue(%d) SETUP failed (rc = %d)\n", fp->index, rc);
11463          return (rc);
11464      }
11465  
11466      return (rc);
11467  }
11468  
11469  static int
bxe_setup_leading(struct bxe_softc * sc)11470  bxe_setup_leading(struct bxe_softc *sc)
11471  {
11472      return (bxe_setup_queue(sc, &sc->fp[0], TRUE));
11473  }
11474  
11475  static int
bxe_config_rss_pf(struct bxe_softc * sc,struct ecore_rss_config_obj * rss_obj,uint8_t config_hash)11476  bxe_config_rss_pf(struct bxe_softc            *sc,
11477                    struct ecore_rss_config_obj *rss_obj,
11478                    uint8_t                     config_hash)
11479  {
11480      struct ecore_config_rss_params params = { NULL };
11481      int i;
11482  
11483      /*
11484       * Although RSS is meaningless when there is a single HW queue we
11485       * still need it enabled in order to have HW Rx hash generated.
11486       */
11487  
11488      params.rss_obj = rss_obj;
11489  
11490      bxe_set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
11491  
11492      bxe_set_bit(ECORE_RSS_MODE_REGULAR, &params.rss_flags);
11493  
11494      /* RSS configuration */
11495      bxe_set_bit(ECORE_RSS_IPV4, &params.rss_flags);
11496      bxe_set_bit(ECORE_RSS_IPV4_TCP, &params.rss_flags);
11497      bxe_set_bit(ECORE_RSS_IPV6, &params.rss_flags);
11498      bxe_set_bit(ECORE_RSS_IPV6_TCP, &params.rss_flags);
11499      if (rss_obj->udp_rss_v4) {
11500          bxe_set_bit(ECORE_RSS_IPV4_UDP, &params.rss_flags);
11501      }
11502      if (rss_obj->udp_rss_v6) {
11503          bxe_set_bit(ECORE_RSS_IPV6_UDP, &params.rss_flags);
11504      }
11505  
11506      /* Hash bits */
11507      params.rss_result_mask = MULTI_MASK;
11508  
11509      memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
11510  
11511      if (config_hash) {
11512          /* RSS keys */
11513          for (i = 0; i < sizeof(params.rss_key) / 4; i++) {
11514              params.rss_key[i] = arc4random();
11515          }
11516  
11517          bxe_set_bit(ECORE_RSS_SET_SRCH, &params.rss_flags);
11518      }
11519  
11520      return (ecore_config_rss(sc, &params));
11521  }
11522  
11523  static int
bxe_config_rss_eth(struct bxe_softc * sc,uint8_t config_hash)11524  bxe_config_rss_eth(struct bxe_softc *sc,
11525                     uint8_t          config_hash)
11526  {
11527      return (bxe_config_rss_pf(sc, &sc->rss_conf_obj, config_hash));
11528  }
11529  
11530  static int
bxe_init_rss_pf(struct bxe_softc * sc)11531  bxe_init_rss_pf(struct bxe_softc *sc)
11532  {
11533      uint8_t num_eth_queues = BXE_NUM_ETH_QUEUES(sc);
11534      int i;
11535  
11536      /*
11537       * Prepare the initial contents of the indirection table if
11538       * RSS is enabled
11539       */
11540      for (i = 0; i < sizeof(sc->rss_conf_obj.ind_table); i++) {
11541          sc->rss_conf_obj.ind_table[i] =
11542              (sc->fp->cl_id + (i % num_eth_queues));
11543      }
11544  
11545      if (sc->udp_rss) {
11546          sc->rss_conf_obj.udp_rss_v4 = sc->rss_conf_obj.udp_rss_v6 = 1;
11547      }
11548  
11549      /*
11550       * For 57710 and 57711 SEARCHER configuration (rss_keys) is
11551       * per-port, so if explicit configuration is needed, do it only
11552       * for a PMF.
11553       *
11554       * For 57712 and newer it's a per-function configuration.
11555       */
11556      return (bxe_config_rss_eth(sc, sc->port.pmf || !CHIP_IS_E1x(sc)));
11557  }
11558  
11559  static int
bxe_set_mac_one(struct bxe_softc * sc,uint8_t * mac,struct ecore_vlan_mac_obj * obj,uint8_t set,int mac_type,unsigned long * ramrod_flags)11560  bxe_set_mac_one(struct bxe_softc          *sc,
11561                  uint8_t                   *mac,
11562                  struct ecore_vlan_mac_obj *obj,
11563                  uint8_t                   set,
11564                  int                       mac_type,
11565                  unsigned long             *ramrod_flags)
11566  {
11567      struct ecore_vlan_mac_ramrod_params ramrod_param;
11568      int rc;
11569  
11570      memset(&ramrod_param, 0, sizeof(ramrod_param));
11571  
11572      /* fill in general parameters */
11573      ramrod_param.vlan_mac_obj = obj;
11574      ramrod_param.ramrod_flags = *ramrod_flags;
11575  
11576      /* fill a user request section if needed */
11577      if (!bxe_test_bit(RAMROD_CONT, ramrod_flags)) {
11578          memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
11579  
11580          bxe_set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
11581  
11582          /* Set the command: ADD or DEL */
11583          ramrod_param.user_req.cmd = (set) ? ECORE_VLAN_MAC_ADD :
11584                                              ECORE_VLAN_MAC_DEL;
11585      }
11586  
11587      rc = ecore_config_vlan_mac(sc, &ramrod_param);
11588  
11589      if (rc == ECORE_EXISTS) {
11590          BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n");
11591          /* do not treat adding same MAC as error */
11592          rc = 0;
11593      } else if (rc < 0) {
11594          BLOGE(sc, "%s MAC failed (%d)\n", (set ? "Set" : "Delete"), rc);
11595      }
11596  
11597      return (rc);
11598  }
11599  
11600  static int
bxe_set_eth_mac(struct bxe_softc * sc,uint8_t set)11601  bxe_set_eth_mac(struct bxe_softc *sc,
11602                  uint8_t          set)
11603  {
11604      unsigned long ramrod_flags = 0;
11605  
11606      BLOGD(sc, DBG_LOAD, "Adding Ethernet MAC\n");
11607  
11608      bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
11609  
11610      /* Eth MAC is set on RSS leading client (fp[0]) */
11611      return (bxe_set_mac_one(sc, sc->link_params.mac_addr,
11612                              &sc->sp_objs->mac_obj,
11613                              set, ECORE_ETH_MAC, &ramrod_flags));
11614  }
11615  
11616  static int
bxe_get_cur_phy_idx(struct bxe_softc * sc)11617  bxe_get_cur_phy_idx(struct bxe_softc *sc)
11618  {
11619      uint32_t sel_phy_idx = 0;
11620  
11621      if (sc->link_params.num_phys <= 1) {
11622          return (ELINK_INT_PHY);
11623      }
11624  
11625      if (sc->link_vars.link_up) {
11626          sel_phy_idx = ELINK_EXT_PHY1;
11627          /* In case link is SERDES, check if the ELINK_EXT_PHY2 is the one */
11628          if ((sc->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
11629              (sc->link_params.phy[ELINK_EXT_PHY2].supported &
11630               ELINK_SUPPORTED_FIBRE))
11631              sel_phy_idx = ELINK_EXT_PHY2;
11632      } else {
11633          switch (elink_phy_selection(&sc->link_params)) {
11634          case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
11635          case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
11636          case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
11637                 sel_phy_idx = ELINK_EXT_PHY1;
11638                 break;
11639          case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
11640          case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
11641                 sel_phy_idx = ELINK_EXT_PHY2;
11642                 break;
11643          }
11644      }
11645  
11646      return (sel_phy_idx);
11647  }
11648  
11649  static int
bxe_get_link_cfg_idx(struct bxe_softc * sc)11650  bxe_get_link_cfg_idx(struct bxe_softc *sc)
11651  {
11652      uint32_t sel_phy_idx = bxe_get_cur_phy_idx(sc);
11653  
11654      /*
11655       * The selected activated PHY is always after swapping (in case PHY
11656       * swapping is enabled). So when swapping is enabled, we need to reverse
11657       * the configuration
11658       */
11659  
11660      if (sc->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
11661          if (sel_phy_idx == ELINK_EXT_PHY1)
11662              sel_phy_idx = ELINK_EXT_PHY2;
11663          else if (sel_phy_idx == ELINK_EXT_PHY2)
11664              sel_phy_idx = ELINK_EXT_PHY1;
11665      }
11666  
11667      return (ELINK_LINK_CONFIG_IDX(sel_phy_idx));
11668  }
11669  
11670  static void
bxe_set_requested_fc(struct bxe_softc * sc)11671  bxe_set_requested_fc(struct bxe_softc *sc)
11672  {
11673      /*
11674       * Initialize link parameters structure variables
11675       * It is recommended to turn off RX FC for jumbo frames
11676       * for better performance
11677       */
11678      if (CHIP_IS_E1x(sc) && (sc->mtu > 5000)) {
11679          sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_TX;
11680      } else {
11681          sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_BOTH;
11682      }
11683  }
11684  
11685  static void
bxe_calc_fc_adv(struct bxe_softc * sc)11686  bxe_calc_fc_adv(struct bxe_softc *sc)
11687  {
11688      uint8_t cfg_idx = bxe_get_link_cfg_idx(sc);
11689  
11690  
11691      sc->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
11692                                             ADVERTISED_Pause);
11693  
11694      switch (sc->link_vars.ieee_fc &
11695              MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
11696  
11697      case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
11698          sc->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
11699                                            ADVERTISED_Pause);
11700          break;
11701  
11702      case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
11703          sc->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
11704          break;
11705  
11706      default:
11707          break;
11708  
11709      }
11710  }
11711  
11712  static uint16_t
bxe_get_mf_speed(struct bxe_softc * sc)11713  bxe_get_mf_speed(struct bxe_softc *sc)
11714  {
11715      uint16_t line_speed = sc->link_vars.line_speed;
11716      if (IS_MF(sc)) {
11717          uint16_t maxCfg =
11718              bxe_extract_max_cfg(sc, sc->devinfo.mf_info.mf_config[SC_VN(sc)]);
11719  
11720          /* calculate the current MAX line speed limit for the MF devices */
11721          if (IS_MF_SI(sc)) {
11722              line_speed = (line_speed * maxCfg) / 100;
11723          } else { /* SD mode */
11724              uint16_t vn_max_rate = maxCfg * 100;
11725  
11726              if (vn_max_rate < line_speed) {
11727                  line_speed = vn_max_rate;
11728              }
11729          }
11730      }
11731  
11732      return (line_speed);
11733  }
11734  
11735  static void
bxe_fill_report_data(struct bxe_softc * sc,struct bxe_link_report_data * data)11736  bxe_fill_report_data(struct bxe_softc            *sc,
11737                       struct bxe_link_report_data *data)
11738  {
11739      uint16_t line_speed = bxe_get_mf_speed(sc);
11740  
11741      memset(data, 0, sizeof(*data));
11742  
11743      /* fill the report data with the effective line speed */
11744      data->line_speed = line_speed;
11745  
11746      /* Link is down */
11747      if (!sc->link_vars.link_up || (sc->flags & BXE_MF_FUNC_DIS)) {
11748          bxe_set_bit(BXE_LINK_REPORT_LINK_DOWN, &data->link_report_flags);
11749      }
11750  
11751      /* Full DUPLEX */
11752      if (sc->link_vars.duplex == DUPLEX_FULL) {
11753          bxe_set_bit(BXE_LINK_REPORT_FULL_DUPLEX, &data->link_report_flags);
11754      }
11755  
11756      /* Rx Flow Control is ON */
11757      if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_RX) {
11758          bxe_set_bit(BXE_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
11759      }
11760  
11761      /* Tx Flow Control is ON */
11762      if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) {
11763          bxe_set_bit(BXE_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
11764      }
11765  }
11766  
11767  /* report link status to OS, should be called under phy_lock */
11768  static void
bxe_link_report_locked(struct bxe_softc * sc)11769  bxe_link_report_locked(struct bxe_softc *sc)
11770  {
11771      struct bxe_link_report_data cur_data;
11772  
11773      /* reread mf_cfg */
11774      if (IS_PF(sc) && !CHIP_IS_E1(sc)) {
11775          bxe_read_mf_cfg(sc);
11776      }
11777  
11778      /* Read the current link report info */
11779      bxe_fill_report_data(sc, &cur_data);
11780  
11781      /* Don't report link down or exactly the same link status twice */
11782      if (!memcmp(&cur_data, &sc->last_reported_link, sizeof(cur_data)) ||
11783          (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11784                        &sc->last_reported_link.link_report_flags) &&
11785           bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11786                        &cur_data.link_report_flags))) {
11787          return;
11788      }
11789  
11790  	ELINK_DEBUG_P2(sc, "Change in link status : cur_data = %x, last_reported_link = %x\n",
11791  					cur_data.link_report_flags, sc->last_reported_link.link_report_flags);
11792      sc->link_cnt++;
11793  
11794  	ELINK_DEBUG_P1(sc, "link status change count = %x\n", sc->link_cnt);
11795      /* report new link params and remember the state for the next time */
11796      memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data));
11797  
11798      if (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11799                       &cur_data.link_report_flags)) {
11800          if_link_state_change(sc->ifp, LINK_STATE_DOWN);
11801      } else {
11802          const char *duplex;
11803          const char *flow;
11804  
11805          if (bxe_test_and_clear_bit(BXE_LINK_REPORT_FULL_DUPLEX,
11806                                     &cur_data.link_report_flags)) {
11807              duplex = "full";
11808  			ELINK_DEBUG_P0(sc, "link set to full duplex\n");
11809          } else {
11810              duplex = "half";
11811  			ELINK_DEBUG_P0(sc, "link set to half duplex\n");
11812          }
11813  
11814          /*
11815           * Handle the FC at the end so that only these flags would be
11816           * possibly set. This way we may easily check if there is no FC
11817           * enabled.
11818           */
11819          if (cur_data.link_report_flags) {
11820              if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11821                               &cur_data.link_report_flags) &&
11822                  bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11823                               &cur_data.link_report_flags)) {
11824                  flow = "ON - receive & transmit";
11825              } else if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11826                                      &cur_data.link_report_flags) &&
11827                         !bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11828                                       &cur_data.link_report_flags)) {
11829                  flow = "ON - receive";
11830              } else if (!bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11831                                       &cur_data.link_report_flags) &&
11832                         bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11833                                      &cur_data.link_report_flags)) {
11834                  flow = "ON - transmit";
11835              } else {
11836                  flow = "none"; /* possible? */
11837              }
11838          } else {
11839              flow = "none";
11840          }
11841  
11842          if_link_state_change(sc->ifp, LINK_STATE_UP);
11843          BLOGI(sc, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
11844                cur_data.line_speed, duplex, flow);
11845      }
11846  }
11847  
11848  static void
bxe_link_report(struct bxe_softc * sc)11849  bxe_link_report(struct bxe_softc *sc)
11850  {
11851      bxe_acquire_phy_lock(sc);
11852      bxe_link_report_locked(sc);
11853      bxe_release_phy_lock(sc);
11854  }
11855  
11856  static void
bxe_link_status_update(struct bxe_softc * sc)11857  bxe_link_status_update(struct bxe_softc *sc)
11858  {
11859      if (sc->state != BXE_STATE_OPEN) {
11860          return;
11861      }
11862  
11863      if (IS_PF(sc) && !CHIP_REV_IS_SLOW(sc)) {
11864          elink_link_status_update(&sc->link_params, &sc->link_vars);
11865      } else {
11866          sc->port.supported[0] |= (ELINK_SUPPORTED_10baseT_Half |
11867                                    ELINK_SUPPORTED_10baseT_Full |
11868                                    ELINK_SUPPORTED_100baseT_Half |
11869                                    ELINK_SUPPORTED_100baseT_Full |
11870                                    ELINK_SUPPORTED_1000baseT_Full |
11871                                    ELINK_SUPPORTED_2500baseX_Full |
11872                                    ELINK_SUPPORTED_10000baseT_Full |
11873                                    ELINK_SUPPORTED_TP |
11874                                    ELINK_SUPPORTED_FIBRE |
11875                                    ELINK_SUPPORTED_Autoneg |
11876                                    ELINK_SUPPORTED_Pause |
11877                                    ELINK_SUPPORTED_Asym_Pause);
11878          sc->port.advertising[0] = sc->port.supported[0];
11879  
11880          sc->link_params.sc                = sc;
11881          sc->link_params.port              = SC_PORT(sc);
11882          sc->link_params.req_duplex[0]     = DUPLEX_FULL;
11883          sc->link_params.req_flow_ctrl[0]  = ELINK_FLOW_CTRL_NONE;
11884          sc->link_params.req_line_speed[0] = SPEED_10000;
11885          sc->link_params.speed_cap_mask[0] = 0x7f0000;
11886          sc->link_params.switch_cfg        = ELINK_SWITCH_CFG_10G;
11887  
11888          if (CHIP_REV_IS_FPGA(sc)) {
11889              sc->link_vars.mac_type    = ELINK_MAC_TYPE_EMAC;
11890              sc->link_vars.line_speed  = ELINK_SPEED_1000;
11891              sc->link_vars.link_status = (LINK_STATUS_LINK_UP |
11892                                           LINK_STATUS_SPEED_AND_DUPLEX_1000TFD);
11893          } else {
11894              sc->link_vars.mac_type    = ELINK_MAC_TYPE_BMAC;
11895              sc->link_vars.line_speed  = ELINK_SPEED_10000;
11896              sc->link_vars.link_status = (LINK_STATUS_LINK_UP |
11897                                           LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
11898          }
11899  
11900          sc->link_vars.link_up = 1;
11901  
11902          sc->link_vars.duplex    = DUPLEX_FULL;
11903          sc->link_vars.flow_ctrl = ELINK_FLOW_CTRL_NONE;
11904  
11905          if (IS_PF(sc)) {
11906              REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + sc->link_params.port*4, 0);
11907              bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11908              bxe_link_report(sc);
11909          }
11910      }
11911  
11912      if (IS_PF(sc)) {
11913          if (sc->link_vars.link_up) {
11914              bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11915          } else {
11916              bxe_stats_handle(sc, STATS_EVENT_STOP);
11917          }
11918          bxe_link_report(sc);
11919      } else {
11920          bxe_link_report(sc);
11921          bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11922      }
11923  }
11924  
11925  static int
bxe_initial_phy_init(struct bxe_softc * sc,int load_mode)11926  bxe_initial_phy_init(struct bxe_softc *sc,
11927                       int              load_mode)
11928  {
11929      int rc, cfg_idx = bxe_get_link_cfg_idx(sc);
11930      uint16_t req_line_speed = sc->link_params.req_line_speed[cfg_idx];
11931      struct elink_params *lp = &sc->link_params;
11932  
11933      bxe_set_requested_fc(sc);
11934  
11935      if (CHIP_REV_IS_SLOW(sc)) {
11936          uint32_t bond = CHIP_BOND_ID(sc);
11937          uint32_t feat = 0;
11938  
11939          if (CHIP_IS_E2(sc) && CHIP_IS_MODE_4_PORT(sc)) {
11940              feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC;
11941          } else if (bond & 0x4) {
11942              if (CHIP_IS_E3(sc)) {
11943                  feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC;
11944              } else {
11945                  feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC;
11946              }
11947          } else if (bond & 0x8) {
11948              if (CHIP_IS_E3(sc)) {
11949                  feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC;
11950              } else {
11951                  feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC;
11952              }
11953          }
11954  
11955          /* disable EMAC for E3 and above */
11956          if (bond & 0x2) {
11957              feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC;
11958          }
11959  
11960          sc->link_params.feature_config_flags |= feat;
11961      }
11962  
11963      bxe_acquire_phy_lock(sc);
11964  
11965      if (load_mode == LOAD_DIAG) {
11966          lp->loopback_mode = ELINK_LOOPBACK_XGXS;
11967          /* Prefer doing PHY loopback at 10G speed, if possible */
11968          if (lp->req_line_speed[cfg_idx] < ELINK_SPEED_10000) {
11969              if (lp->speed_cap_mask[cfg_idx] &
11970                  PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
11971                  lp->req_line_speed[cfg_idx] = ELINK_SPEED_10000;
11972              } else {
11973                  lp->req_line_speed[cfg_idx] = ELINK_SPEED_1000;
11974              }
11975          }
11976      }
11977  
11978      if (load_mode == LOAD_LOOPBACK_EXT) {
11979          lp->loopback_mode = ELINK_LOOPBACK_EXT;
11980      }
11981  
11982      rc = elink_phy_init(&sc->link_params, &sc->link_vars);
11983  
11984      bxe_release_phy_lock(sc);
11985  
11986      bxe_calc_fc_adv(sc);
11987  
11988      if (sc->link_vars.link_up) {
11989          bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11990          bxe_link_report(sc);
11991      }
11992  
11993      if (!CHIP_REV_IS_SLOW(sc)) {
11994          bxe_periodic_start(sc);
11995      }
11996  
11997      sc->link_params.req_line_speed[cfg_idx] = req_line_speed;
11998      return (rc);
11999  }
12000  
12001  static u_int
bxe_push_maddr(void * arg,struct sockaddr_dl * sdl,u_int cnt)12002  bxe_push_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
12003  {
12004      struct ecore_mcast_list_elem *mc_mac = arg;
12005  
12006      mc_mac += cnt;
12007      mc_mac->mac = (uint8_t *)LLADDR(sdl);
12008  
12009      return (1);
12010  }
12011  
12012  static int
bxe_init_mcast_macs_list(struct bxe_softc * sc,struct ecore_mcast_ramrod_params * p)12013  bxe_init_mcast_macs_list(struct bxe_softc                 *sc,
12014                           struct ecore_mcast_ramrod_params *p)
12015  {
12016      if_t ifp = sc->ifp;
12017      int mc_count;
12018      struct ecore_mcast_list_elem *mc_mac;
12019  
12020      ECORE_LIST_INIT(&p->mcast_list);
12021      p->mcast_list_len = 0;
12022  
12023      /* XXXGL: multicast count may change later */
12024      mc_count = if_llmaddr_count(ifp);
12025  
12026      if (!mc_count) {
12027          return (0);
12028      }
12029  
12030      mc_mac = malloc(sizeof(*mc_mac) * mc_count, M_DEVBUF,
12031                      (M_NOWAIT | M_ZERO));
12032      if (!mc_mac) {
12033          BLOGE(sc, "Failed to allocate temp mcast list\n");
12034          return (-1);
12035      }
12036      bzero(mc_mac, (sizeof(*mc_mac) * mc_count));
12037      if_foreach_llmaddr(ifp, bxe_push_maddr, mc_mac);
12038  
12039      for (int i = 0; i < mc_count; i ++) {
12040          ECORE_LIST_PUSH_TAIL(&mc_mac[i].link, &p->mcast_list);
12041          BLOGD(sc, DBG_LOAD,
12042                "Setting MCAST %02X:%02X:%02X:%02X:%02X:%02X and mc_count %d\n",
12043                mc_mac[i].mac[0], mc_mac[i].mac[1], mc_mac[i].mac[2],
12044                mc_mac[i].mac[3], mc_mac[i].mac[4], mc_mac[i].mac[5],
12045                mc_count);
12046      }
12047  
12048      p->mcast_list_len = mc_count;
12049  
12050      return (0);
12051  }
12052  
12053  static void
bxe_free_mcast_macs_list(struct ecore_mcast_ramrod_params * p)12054  bxe_free_mcast_macs_list(struct ecore_mcast_ramrod_params *p)
12055  {
12056      struct ecore_mcast_list_elem *mc_mac =
12057          ECORE_LIST_FIRST_ENTRY(&p->mcast_list,
12058                                 struct ecore_mcast_list_elem,
12059                                 link);
12060  
12061      if (mc_mac) {
12062          /* only a single free as all mc_macs are in the same heap array */
12063          free(mc_mac, M_DEVBUF);
12064      }
12065  }
12066  static int
bxe_set_mc_list(struct bxe_softc * sc)12067  bxe_set_mc_list(struct bxe_softc *sc)
12068  {
12069      struct ecore_mcast_ramrod_params rparam = { NULL };
12070      int rc = 0;
12071  
12072      rparam.mcast_obj = &sc->mcast_obj;
12073  
12074      BXE_MCAST_LOCK(sc);
12075  
12076      /* first, clear all configured multicast MACs */
12077      rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
12078      if (rc < 0) {
12079          BLOGE(sc, "Failed to clear multicast configuration: %d\n", rc);
12080          /* Manual backport parts of FreeBSD upstream r284470. */
12081          BXE_MCAST_UNLOCK(sc);
12082          return (rc);
12083      }
12084  
12085      /* configure a new MACs list */
12086      rc = bxe_init_mcast_macs_list(sc, &rparam);
12087      if (rc) {
12088          BLOGE(sc, "Failed to create mcast MACs list (%d)\n", rc);
12089          BXE_MCAST_UNLOCK(sc);
12090          return (rc);
12091      }
12092  
12093      /* Now add the new MACs */
12094      rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_ADD);
12095      if (rc < 0) {
12096          BLOGE(sc, "Failed to set new mcast config (%d)\n", rc);
12097      }
12098  
12099      bxe_free_mcast_macs_list(&rparam);
12100  
12101      BXE_MCAST_UNLOCK(sc);
12102  
12103      return (rc);
12104  }
12105  
12106  struct bxe_set_addr_ctx {
12107     struct bxe_softc *sc;
12108     unsigned long ramrod_flags;
12109     int rc;
12110  };
12111  
12112  static u_int
bxe_set_addr(void * arg,struct sockaddr_dl * sdl,u_int cnt)12113  bxe_set_addr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
12114  {
12115      struct bxe_set_addr_ctx *ctx = arg;
12116      struct ecore_vlan_mac_obj *mac_obj = &ctx->sc->sp_objs->mac_obj;
12117      int rc;
12118  
12119      if (ctx->rc < 0)
12120  	return (0);
12121  
12122      rc = bxe_set_mac_one(ctx->sc, (uint8_t *)LLADDR(sdl), mac_obj, TRUE,
12123                           ECORE_UC_LIST_MAC, &ctx->ramrod_flags);
12124  
12125      /* do not treat adding same MAC as an error */
12126      if (rc == -EEXIST)
12127  	BLOGD(ctx->sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n");
12128      else if (rc < 0) {
12129              BLOGE(ctx->sc, "Failed to schedule ADD operations (%d)\n", rc);
12130              ctx->rc = rc;
12131      }
12132  
12133      return (1);
12134  }
12135  
12136  static int
bxe_set_uc_list(struct bxe_softc * sc)12137  bxe_set_uc_list(struct bxe_softc *sc)
12138  {
12139      if_t ifp = sc->ifp;
12140      struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj;
12141      struct bxe_set_addr_ctx ctx = { sc, 0, 0 };
12142      int rc;
12143  
12144      /* first schedule a cleanup up of old configuration */
12145      rc = bxe_del_all_macs(sc, mac_obj, ECORE_UC_LIST_MAC, FALSE);
12146      if (rc < 0) {
12147          BLOGE(sc, "Failed to schedule delete of all ETH MACs (%d)\n", rc);
12148          return (rc);
12149      }
12150  
12151      if_foreach_lladdr(ifp, bxe_set_addr, &ctx);
12152      if (ctx.rc < 0)
12153  	return (ctx.rc);
12154  
12155      /* Execute the pending commands */
12156      bit_set(&ctx.ramrod_flags, RAMROD_CONT);
12157      return (bxe_set_mac_one(sc, NULL, mac_obj, FALSE /* don't care */,
12158                              ECORE_UC_LIST_MAC, &ctx.ramrod_flags));
12159  }
12160  
12161  static void
bxe_set_rx_mode(struct bxe_softc * sc)12162  bxe_set_rx_mode(struct bxe_softc *sc)
12163  {
12164      if_t ifp = sc->ifp;
12165      uint32_t rx_mode = BXE_RX_MODE_NORMAL;
12166  
12167      if (sc->state != BXE_STATE_OPEN) {
12168          BLOGD(sc, DBG_SP, "state is %x, returning\n", sc->state);
12169          return;
12170      }
12171  
12172      BLOGD(sc, DBG_SP, "if_flags(ifp)=0x%x\n", if_getflags(sc->ifp));
12173  
12174      if (if_getflags(ifp) & IFF_PROMISC) {
12175          rx_mode = BXE_RX_MODE_PROMISC;
12176      } else if ((if_getflags(ifp) & IFF_ALLMULTI) ||
12177                 (if_llmaddr_count(ifp) > BXE_MAX_MULTICAST &&
12178                  CHIP_IS_E1(sc))) {
12179          rx_mode = BXE_RX_MODE_ALLMULTI;
12180      } else {
12181          if (IS_PF(sc)) {
12182              /* some multicasts */
12183              if (bxe_set_mc_list(sc) < 0) {
12184                  rx_mode = BXE_RX_MODE_ALLMULTI;
12185              }
12186              if (bxe_set_uc_list(sc) < 0) {
12187                  rx_mode = BXE_RX_MODE_PROMISC;
12188              }
12189          }
12190      }
12191  
12192      sc->rx_mode = rx_mode;
12193  
12194      /* schedule the rx_mode command */
12195      if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) {
12196          BLOGD(sc, DBG_LOAD, "Scheduled setting rx_mode with ECORE...\n");
12197          bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state);
12198          return;
12199      }
12200  
12201      if (IS_PF(sc)) {
12202          bxe_set_storm_rx_mode(sc);
12203      }
12204  }
12205  
12206  
12207  /* update flags in shmem */
12208  static void
bxe_update_drv_flags(struct bxe_softc * sc,uint32_t flags,uint32_t set)12209  bxe_update_drv_flags(struct bxe_softc *sc,
12210                       uint32_t         flags,
12211                       uint32_t         set)
12212  {
12213      uint32_t drv_flags;
12214  
12215      if (SHMEM2_HAS(sc, drv_flags)) {
12216          bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS);
12217          drv_flags = SHMEM2_RD(sc, drv_flags);
12218  
12219          if (set) {
12220              SET_FLAGS(drv_flags, flags);
12221          } else {
12222              RESET_FLAGS(drv_flags, flags);
12223          }
12224  
12225          SHMEM2_WR(sc, drv_flags, drv_flags);
12226          BLOGD(sc, DBG_LOAD, "drv_flags 0x%08x\n", drv_flags);
12227  
12228          bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS);
12229      }
12230  }
12231  
12232  /* periodic timer callout routine, only runs when the interface is up */
12233  
12234  static void
bxe_periodic_callout_func(void * xsc)12235  bxe_periodic_callout_func(void *xsc)
12236  {
12237      struct bxe_softc *sc = (struct bxe_softc *)xsc;
12238      int i;
12239  
12240      if (!BXE_CORE_TRYLOCK(sc)) {
12241          /* just bail and try again next time */
12242  
12243          if ((sc->state == BXE_STATE_OPEN) &&
12244              (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) {
12245              /* schedule the next periodic callout */
12246              callout_reset(&sc->periodic_callout, hz,
12247                            bxe_periodic_callout_func, sc);
12248          }
12249  
12250          return;
12251      }
12252  
12253      if ((sc->state != BXE_STATE_OPEN) ||
12254          (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) {
12255          BLOGW(sc, "periodic callout exit (state=0x%x)\n", sc->state);
12256          BXE_CORE_UNLOCK(sc);
12257          return;
12258          }
12259  
12260  
12261      /* Check for TX timeouts on any fastpath. */
12262      FOR_EACH_QUEUE(sc, i) {
12263          if (bxe_watchdog(sc, &sc->fp[i]) != 0) {
12264              /* Ruh-Roh, chip was reset! */
12265              break;
12266          }
12267      }
12268  
12269      if (!CHIP_REV_IS_SLOW(sc)) {
12270          /*
12271           * This barrier is needed to ensure the ordering between the writing
12272           * to the sc->port.pmf in the bxe_nic_load() or bxe_pmf_update() and
12273           * the reading here.
12274           */
12275          mb();
12276          if (sc->port.pmf) {
12277  	    bxe_acquire_phy_lock(sc);
12278              elink_period_func(&sc->link_params, &sc->link_vars);
12279  	    bxe_release_phy_lock(sc);
12280          }
12281      }
12282  
12283      if (IS_PF(sc) && !(sc->flags & BXE_NO_PULSE)) {
12284          int mb_idx = SC_FW_MB_IDX(sc);
12285          uint32_t drv_pulse;
12286          uint32_t mcp_pulse;
12287  
12288          ++sc->fw_drv_pulse_wr_seq;
12289          sc->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
12290  
12291          drv_pulse = sc->fw_drv_pulse_wr_seq;
12292          bxe_drv_pulse(sc);
12293  
12294          mcp_pulse = (SHMEM_RD(sc, func_mb[mb_idx].mcp_pulse_mb) &
12295                       MCP_PULSE_SEQ_MASK);
12296  
12297          /*
12298           * The delta between driver pulse and mcp response should
12299           * be 1 (before mcp response) or 0 (after mcp response).
12300           */
12301          if ((drv_pulse != mcp_pulse) &&
12302              (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
12303              /* someone lost a heartbeat... */
12304              BLOGE(sc, "drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
12305                    drv_pulse, mcp_pulse);
12306          }
12307      }
12308  
12309      /* state is BXE_STATE_OPEN */
12310      bxe_stats_handle(sc, STATS_EVENT_UPDATE);
12311  
12312      BXE_CORE_UNLOCK(sc);
12313  
12314      if ((sc->state == BXE_STATE_OPEN) &&
12315          (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) {
12316          /* schedule the next periodic callout */
12317          callout_reset(&sc->periodic_callout, hz,
12318                        bxe_periodic_callout_func, sc);
12319      }
12320  }
12321  
12322  static void
bxe_periodic_start(struct bxe_softc * sc)12323  bxe_periodic_start(struct bxe_softc *sc)
12324  {
12325      atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO);
12326      callout_reset(&sc->periodic_callout, hz, bxe_periodic_callout_func, sc);
12327  }
12328  
12329  static void
bxe_periodic_stop(struct bxe_softc * sc)12330  bxe_periodic_stop(struct bxe_softc *sc)
12331  {
12332      atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP);
12333      callout_drain(&sc->periodic_callout);
12334  }
12335  
12336  void
bxe_parity_recover(struct bxe_softc * sc)12337  bxe_parity_recover(struct bxe_softc *sc)
12338  {
12339      uint8_t global = FALSE;
12340      uint32_t error_recovered, error_unrecovered;
12341  
12342  
12343      if ((sc->recovery_state == BXE_RECOVERY_FAILED) &&
12344          (sc->state == BXE_STATE_ERROR)) {
12345          BLOGE(sc, "RECOVERY failed, "
12346              "stack notified driver is NOT running! "
12347              "Please reboot/power cycle the system.\n");
12348          return;
12349      }
12350  
12351      while (1) {
12352          BLOGD(sc, DBG_SP,
12353             "%s sc=%p state=0x%x rec_state=0x%x error_status=%x\n",
12354              __func__, sc, sc->state, sc->recovery_state, sc->error_status);
12355  
12356          switch(sc->recovery_state) {
12357  
12358          case BXE_RECOVERY_INIT:
12359              bxe_chk_parity_attn(sc, &global, FALSE);
12360  
12361              if ((CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ||
12362                  (sc->error_status & BXE_ERR_MCP_ASSERT) ||
12363                  (sc->error_status & BXE_ERR_GLOBAL)) {
12364  
12365                  BXE_CORE_LOCK(sc);
12366                  if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
12367                      bxe_periodic_stop(sc);
12368                  }
12369                  bxe_nic_unload(sc, UNLOAD_RECOVERY, false);
12370                  sc->state = BXE_STATE_ERROR;
12371                  sc->recovery_state = BXE_RECOVERY_FAILED;
12372                  BLOGE(sc, " No Recovery tried for error 0x%x"
12373                      " stack notified driver is NOT running!"
12374                      " Please reboot/power cycle the system.\n",
12375                      sc->error_status);
12376                  BXE_CORE_UNLOCK(sc);
12377                  return;
12378              }
12379  
12380  
12381             /* Try to get a LEADER_LOCK HW lock */
12382              if (bxe_trylock_leader_lock(sc)) {
12383  
12384                  bxe_set_reset_in_progress(sc);
12385                  /*
12386                   * Check if there is a global attention and if
12387                   * there was a global attention, set the global
12388                   * reset bit.
12389                   */
12390                  if (global) {
12391                      bxe_set_reset_global(sc);
12392                  }
12393                  sc->is_leader = 1;
12394              }
12395  
12396              /* If interface has been removed - break */
12397  
12398              if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
12399                  bxe_periodic_stop(sc);
12400              }
12401  
12402              BXE_CORE_LOCK(sc);
12403              bxe_nic_unload(sc,UNLOAD_RECOVERY, false);
12404              sc->recovery_state = BXE_RECOVERY_WAIT;
12405              BXE_CORE_UNLOCK(sc);
12406  
12407              /*
12408               * Ensure "is_leader", MCP command sequence and
12409               * "recovery_state" update values are seen on other
12410               * CPUs.
12411               */
12412              mb();
12413              break;
12414          case BXE_RECOVERY_WAIT:
12415  
12416              if (sc->is_leader) {
12417                  int other_engine = SC_PATH(sc) ? 0 : 1;
12418                  bool other_load_status =
12419                      bxe_get_load_status(sc, other_engine);
12420                  bool load_status =
12421                      bxe_get_load_status(sc, SC_PATH(sc));
12422                  global = bxe_reset_is_global(sc);
12423  
12424                  /*
12425                   * In case of a parity in a global block, let
12426                   * the first leader that performs a
12427                   * leader_reset() reset the global blocks in
12428                   * order to clear global attentions. Otherwise
12429                   * the gates will remain closed for that
12430                   * engine.
12431                   */
12432                  if (load_status ||
12433                      (global && other_load_status)) {
12434                      /*
12435                       * Wait until all other functions get
12436                       * down.
12437                       */
12438                      taskqueue_enqueue_timeout(taskqueue_thread,
12439                          &sc->sp_err_timeout_task, hz/10);
12440                      return;
12441                  } else {
12442                      /*
12443                       * If all other functions got down
12444                       * try to bring the chip back to
12445                       * normal. In any case it's an exit
12446                       * point for a leader.
12447                       */
12448                      if (bxe_leader_reset(sc)) {
12449                          BLOGE(sc, "RECOVERY failed, "
12450                              "stack notified driver is NOT running!\n");
12451                          sc->recovery_state = BXE_RECOVERY_FAILED;
12452                          sc->state = BXE_STATE_ERROR;
12453                          mb();
12454                          return;
12455                      }
12456  
12457                      /*
12458                       * If we are here, means that the
12459                       * leader has succeeded and doesn't
12460                       * want to be a leader any more. Try
12461                       * to continue as a none-leader.
12462                       */
12463                  break;
12464                  }
12465  
12466              } else { /* non-leader */
12467                  if (!bxe_reset_is_done(sc, SC_PATH(sc))) {
12468                      /*
12469                       * Try to get a LEADER_LOCK HW lock as
12470                       * long as a former leader may have
12471                       * been unloaded by the user or
12472                       * released a leadership by another
12473                       * reason.
12474                       */
12475                      if (bxe_trylock_leader_lock(sc)) {
12476                          /*
12477                           * I'm a leader now! Restart a
12478                           * switch case.
12479                           */
12480                          sc->is_leader = 1;
12481                          break;
12482                      }
12483  
12484                      taskqueue_enqueue_timeout(taskqueue_thread,
12485                          &sc->sp_err_timeout_task, hz/10);
12486                      return;
12487  
12488                  } else {
12489                      /*
12490                       * If there was a global attention, wait
12491                       * for it to be cleared.
12492                       */
12493                      if (bxe_reset_is_global(sc)) {
12494                          taskqueue_enqueue_timeout(taskqueue_thread,
12495                              &sc->sp_err_timeout_task, hz/10);
12496                          return;
12497                       }
12498  
12499                       error_recovered =
12500                           sc->eth_stats.recoverable_error;
12501                       error_unrecovered =
12502                           sc->eth_stats.unrecoverable_error;
12503                       BXE_CORE_LOCK(sc);
12504                       sc->recovery_state =
12505                           BXE_RECOVERY_NIC_LOADING;
12506                       if (bxe_nic_load(sc, LOAD_NORMAL)) {
12507                           error_unrecovered++;
12508                           sc->recovery_state = BXE_RECOVERY_FAILED;
12509                           sc->state = BXE_STATE_ERROR;
12510                           BLOGE(sc, "Recovery is NOT successful, "
12511                              " state=0x%x recovery_state=0x%x error=%x\n",
12512                              sc->state, sc->recovery_state, sc->error_status);
12513                           sc->error_status = 0;
12514                       } else {
12515                           sc->recovery_state =
12516                               BXE_RECOVERY_DONE;
12517                           error_recovered++;
12518                           BLOGI(sc, "Recovery is successful from errors %x,"
12519                              " state=0x%x"
12520                              " recovery_state=0x%x \n", sc->error_status,
12521                              sc->state, sc->recovery_state);
12522                           mb();
12523                       }
12524                       sc->error_status = 0;
12525                       BXE_CORE_UNLOCK(sc);
12526                       sc->eth_stats.recoverable_error =
12527                           error_recovered;
12528                       sc->eth_stats.unrecoverable_error =
12529                           error_unrecovered;
12530  
12531                       return;
12532                   }
12533               }
12534           default:
12535               return;
12536           }
12537      }
12538  }
12539  void
bxe_handle_error(struct bxe_softc * sc)12540  bxe_handle_error(struct bxe_softc * sc)
12541  {
12542  
12543      if(sc->recovery_state == BXE_RECOVERY_WAIT) {
12544          return;
12545      }
12546      if(sc->error_status) {
12547          if (sc->state == BXE_STATE_OPEN)  {
12548              bxe_int_disable(sc);
12549          }
12550          if (sc->link_vars.link_up) {
12551              if_link_state_change(sc->ifp, LINK_STATE_DOWN);
12552          }
12553          sc->recovery_state = BXE_RECOVERY_INIT;
12554          BLOGI(sc, "bxe%d: Recovery started errors 0x%x recovery state 0x%x\n",
12555              sc->unit, sc->error_status, sc->recovery_state);
12556          bxe_parity_recover(sc);
12557     }
12558  }
12559  
12560  static void
bxe_sp_err_timeout_task(void * arg,int pending)12561  bxe_sp_err_timeout_task(void *arg, int pending)
12562  {
12563  
12564      struct bxe_softc *sc = (struct bxe_softc *)arg;
12565  
12566      BLOGD(sc, DBG_SP,
12567          "%s state = 0x%x rec state=0x%x error_status=%x\n",
12568          __func__, sc->state, sc->recovery_state, sc->error_status);
12569  
12570      if((sc->recovery_state == BXE_RECOVERY_FAILED) &&
12571         (sc->state == BXE_STATE_ERROR)) {
12572          return;
12573      }
12574      /* if can be taken */
12575      if ((sc->error_status) && (sc->trigger_grcdump)) {
12576          bxe_grc_dump(sc);
12577      }
12578      if (sc->recovery_state != BXE_RECOVERY_DONE) {
12579          bxe_handle_error(sc);
12580          bxe_parity_recover(sc);
12581      } else if (sc->error_status) {
12582          bxe_handle_error(sc);
12583      }
12584  
12585      return;
12586  }
12587  
12588  /* start the controller */
12589  static __noinline int
bxe_nic_load(struct bxe_softc * sc,int load_mode)12590  bxe_nic_load(struct bxe_softc *sc,
12591               int              load_mode)
12592  {
12593      uint32_t val;
12594      int load_code = 0;
12595      int i, rc = 0;
12596  
12597      BXE_CORE_LOCK_ASSERT(sc);
12598  
12599      BLOGD(sc, DBG_LOAD, "Starting NIC load...\n");
12600  
12601      sc->state = BXE_STATE_OPENING_WAITING_LOAD;
12602  
12603      if (IS_PF(sc)) {
12604          /* must be called before memory allocation and HW init */
12605          bxe_ilt_set_info(sc);
12606      }
12607  
12608      sc->last_reported_link_state = LINK_STATE_UNKNOWN;
12609  
12610      bxe_set_fp_rx_buf_size(sc);
12611  
12612      if (bxe_alloc_fp_buffers(sc) != 0) {
12613          BLOGE(sc, "Failed to allocate fastpath memory\n");
12614          sc->state = BXE_STATE_CLOSED;
12615          rc = ENOMEM;
12616          goto bxe_nic_load_error0;
12617      }
12618  
12619      if (bxe_alloc_mem(sc) != 0) {
12620          sc->state = BXE_STATE_CLOSED;
12621          rc = ENOMEM;
12622          goto bxe_nic_load_error0;
12623      }
12624  
12625      if (bxe_alloc_fw_stats_mem(sc) != 0) {
12626          sc->state = BXE_STATE_CLOSED;
12627          rc = ENOMEM;
12628          goto bxe_nic_load_error0;
12629      }
12630  
12631      if (IS_PF(sc)) {
12632          /* set pf load just before approaching the MCP */
12633          bxe_set_pf_load(sc);
12634  
12635          /* if MCP exists send load request and analyze response */
12636          if (!BXE_NOMCP(sc)) {
12637              /* attempt to load pf */
12638              if (bxe_nic_load_request(sc, &load_code) != 0) {
12639                  sc->state = BXE_STATE_CLOSED;
12640                  rc = ENXIO;
12641                  goto bxe_nic_load_error1;
12642              }
12643  
12644              /* what did the MCP say? */
12645              if (bxe_nic_load_analyze_req(sc, load_code) != 0) {
12646                  bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12647                  sc->state = BXE_STATE_CLOSED;
12648                  rc = ENXIO;
12649                  goto bxe_nic_load_error2;
12650              }
12651          } else {
12652              BLOGI(sc, "Device has no MCP!\n");
12653              load_code = bxe_nic_load_no_mcp(sc);
12654          }
12655  
12656          /* mark PMF if applicable */
12657          bxe_nic_load_pmf(sc, load_code);
12658  
12659          /* Init Function state controlling object */
12660          bxe_init_func_obj(sc);
12661  
12662          /* Initialize HW */
12663          if (bxe_init_hw(sc, load_code) != 0) {
12664              BLOGE(sc, "HW init failed\n");
12665              bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12666              sc->state = BXE_STATE_CLOSED;
12667              rc = ENXIO;
12668              goto bxe_nic_load_error2;
12669          }
12670      }
12671  
12672      /* set ALWAYS_ALIVE bit in shmem */
12673      sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
12674      bxe_drv_pulse(sc);
12675      sc->flags |= BXE_NO_PULSE;
12676  
12677      /* attach interrupts */
12678      if (bxe_interrupt_attach(sc) != 0) {
12679          sc->state = BXE_STATE_CLOSED;
12680          rc = ENXIO;
12681          goto bxe_nic_load_error2;
12682      }
12683  
12684      bxe_nic_init(sc, load_code);
12685  
12686      /* Init per-function objects */
12687      if (IS_PF(sc)) {
12688          bxe_init_objs(sc);
12689          // XXX bxe_iov_nic_init(sc);
12690  
12691          /* set AFEX default VLAN tag to an invalid value */
12692          sc->devinfo.mf_info.afex_def_vlan_tag = -1;
12693          // XXX bxe_nic_load_afex_dcc(sc, load_code);
12694  
12695          sc->state = BXE_STATE_OPENING_WAITING_PORT;
12696          rc = bxe_func_start(sc);
12697          if (rc) {
12698              BLOGE(sc, "Function start failed! rc = %d\n", rc);
12699              bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12700              sc->state = BXE_STATE_ERROR;
12701              goto bxe_nic_load_error3;
12702          }
12703  
12704          /* send LOAD_DONE command to MCP */
12705          if (!BXE_NOMCP(sc)) {
12706              load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12707              if (!load_code) {
12708                  BLOGE(sc, "MCP response failure, aborting\n");
12709                  sc->state = BXE_STATE_ERROR;
12710                  rc = ENXIO;
12711                  goto bxe_nic_load_error3;
12712              }
12713          }
12714  
12715          rc = bxe_setup_leading(sc);
12716          if (rc) {
12717              BLOGE(sc, "Setup leading failed! rc = %d\n", rc);
12718              sc->state = BXE_STATE_ERROR;
12719              goto bxe_nic_load_error3;
12720          }
12721  
12722          FOR_EACH_NONDEFAULT_ETH_QUEUE(sc, i) {
12723              rc = bxe_setup_queue(sc, &sc->fp[i], FALSE);
12724              if (rc) {
12725                  BLOGE(sc, "Queue(%d) setup failed rc = %d\n", i, rc);
12726                  sc->state = BXE_STATE_ERROR;
12727                  goto bxe_nic_load_error3;
12728              }
12729          }
12730  
12731          rc = bxe_init_rss_pf(sc);
12732          if (rc) {
12733              BLOGE(sc, "PF RSS init failed\n");
12734              sc->state = BXE_STATE_ERROR;
12735              goto bxe_nic_load_error3;
12736          }
12737      }
12738      /* XXX VF */
12739  
12740      /* now when Clients are configured we are ready to work */
12741      sc->state = BXE_STATE_OPEN;
12742  
12743      /* Configure a ucast MAC */
12744      if (IS_PF(sc)) {
12745          rc = bxe_set_eth_mac(sc, TRUE);
12746      }
12747      if (rc) {
12748          BLOGE(sc, "Setting Ethernet MAC failed rc = %d\n", rc);
12749          sc->state = BXE_STATE_ERROR;
12750          goto bxe_nic_load_error3;
12751      }
12752  
12753      if (sc->port.pmf) {
12754          rc = bxe_initial_phy_init(sc, /* XXX load_mode */LOAD_OPEN);
12755          if (rc) {
12756              sc->state = BXE_STATE_ERROR;
12757              goto bxe_nic_load_error3;
12758          }
12759      }
12760  
12761      sc->link_params.feature_config_flags &=
12762          ~ELINK_FEATURE_CONFIG_BOOT_FROM_SAN;
12763  
12764      /* start fast path */
12765  
12766      /* Initialize Rx filter */
12767      bxe_set_rx_mode(sc);
12768  
12769      /* start the Tx */
12770      switch (/* XXX load_mode */LOAD_OPEN) {
12771      case LOAD_NORMAL:
12772      case LOAD_OPEN:
12773          break;
12774  
12775      case LOAD_DIAG:
12776      case LOAD_LOOPBACK_EXT:
12777          sc->state = BXE_STATE_DIAG;
12778          break;
12779  
12780      default:
12781          break;
12782      }
12783  
12784      if (sc->port.pmf) {
12785          bxe_update_drv_flags(sc, 1 << DRV_FLAGS_PORT_MASK, 0);
12786      } else {
12787          bxe_link_status_update(sc);
12788      }
12789  
12790      /* start the periodic timer callout */
12791      bxe_periodic_start(sc);
12792  
12793      if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
12794          /* mark driver is loaded in shmem2 */
12795          val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
12796          SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)],
12797                    (val |
12798                     DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
12799                     DRV_FLAGS_CAPABILITIES_LOADED_L2));
12800      }
12801  
12802      /* wait for all pending SP commands to complete */
12803      if (IS_PF(sc) && !bxe_wait_sp_comp(sc, ~0x0UL)) {
12804          BLOGE(sc, "Timeout waiting for all SPs to complete!\n");
12805          bxe_periodic_stop(sc);
12806          bxe_nic_unload(sc, UNLOAD_CLOSE, FALSE);
12807          return (ENXIO);
12808      }
12809  
12810      /* Tell the stack the driver is running! */
12811      if_setdrvflags(sc->ifp, IFF_DRV_RUNNING);
12812  
12813      BLOGD(sc, DBG_LOAD, "NIC successfully loaded\n");
12814  
12815      return (0);
12816  
12817  bxe_nic_load_error3:
12818  
12819      if (IS_PF(sc)) {
12820          bxe_int_disable_sync(sc, 1);
12821  
12822          /* clean out queued objects */
12823          bxe_squeeze_objects(sc);
12824      }
12825  
12826      bxe_interrupt_detach(sc);
12827  
12828  bxe_nic_load_error2:
12829  
12830      if (IS_PF(sc) && !BXE_NOMCP(sc)) {
12831          bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
12832          bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0);
12833      }
12834  
12835      sc->port.pmf = 0;
12836  
12837  bxe_nic_load_error1:
12838  
12839      /* clear pf_load status, as it was already set */
12840      if (IS_PF(sc)) {
12841          bxe_clear_pf_load(sc);
12842      }
12843  
12844  bxe_nic_load_error0:
12845  
12846      bxe_free_fw_stats_mem(sc);
12847      bxe_free_fp_buffers(sc);
12848      bxe_free_mem(sc);
12849  
12850      return (rc);
12851  }
12852  
12853  static int
bxe_init_locked(struct bxe_softc * sc)12854  bxe_init_locked(struct bxe_softc *sc)
12855  {
12856      int other_engine = SC_PATH(sc) ? 0 : 1;
12857      uint8_t other_load_status, load_status;
12858      uint8_t global = FALSE;
12859      int rc;
12860  
12861      BXE_CORE_LOCK_ASSERT(sc);
12862  
12863      /* check if the driver is already running */
12864      if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
12865          BLOGD(sc, DBG_LOAD, "Init called while driver is running!\n");
12866          return (0);
12867      }
12868  
12869      if((sc->state == BXE_STATE_ERROR) &&
12870          (sc->recovery_state == BXE_RECOVERY_FAILED)) {
12871          BLOGE(sc, "Initialization not done, "
12872                    "as previous recovery failed."
12873                    "Reboot/Power-cycle the system\n" );
12874          return (ENXIO);
12875      }
12876  
12877  
12878      bxe_set_power_state(sc, PCI_PM_D0);
12879  
12880      /*
12881       * If parity occurred during the unload, then attentions and/or
12882       * RECOVERY_IN_PROGRES may still be set. If so we want the first function
12883       * loaded on the current engine to complete the recovery. Parity recovery
12884       * is only relevant for PF driver.
12885       */
12886      if (IS_PF(sc)) {
12887          other_load_status = bxe_get_load_status(sc, other_engine);
12888          load_status = bxe_get_load_status(sc, SC_PATH(sc));
12889  
12890          if (!bxe_reset_is_done(sc, SC_PATH(sc)) ||
12891              bxe_chk_parity_attn(sc, &global, TRUE)) {
12892              do {
12893                  /*
12894                   * If there are attentions and they are in global blocks, set
12895                   * the GLOBAL_RESET bit regardless whether it will be this
12896                   * function that will complete the recovery or not.
12897                   */
12898                  if (global) {
12899                      bxe_set_reset_global(sc);
12900                  }
12901  
12902                  /*
12903                   * Only the first function on the current engine should try
12904                   * to recover in open. In case of attentions in global blocks
12905                   * only the first in the chip should try to recover.
12906                   */
12907                  if ((!load_status && (!global || !other_load_status)) &&
12908                      bxe_trylock_leader_lock(sc) && !bxe_leader_reset(sc)) {
12909                      BLOGI(sc, "Recovered during init\n");
12910                      break;
12911                  }
12912  
12913                  /* recovery has failed... */
12914                  bxe_set_power_state(sc, PCI_PM_D3hot);
12915                  sc->recovery_state = BXE_RECOVERY_FAILED;
12916  
12917                  BLOGE(sc, "Recovery flow hasn't properly "
12918                            "completed yet, try again later. "
12919                            "If you still see this message after a "
12920                            "few retries then power cycle is required.\n");
12921  
12922                  rc = ENXIO;
12923                  goto bxe_init_locked_done;
12924              } while (0);
12925          }
12926      }
12927  
12928      sc->recovery_state = BXE_RECOVERY_DONE;
12929  
12930      rc = bxe_nic_load(sc, LOAD_OPEN);
12931  
12932  bxe_init_locked_done:
12933  
12934      if (rc) {
12935          /* Tell the stack the driver is NOT running! */
12936          BLOGE(sc, "Initialization failed, "
12937                    "stack notified driver is NOT running!\n");
12938  	if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING);
12939      }
12940  
12941      return (rc);
12942  }
12943  
12944  static int
bxe_stop_locked(struct bxe_softc * sc)12945  bxe_stop_locked(struct bxe_softc *sc)
12946  {
12947      BXE_CORE_LOCK_ASSERT(sc);
12948      return (bxe_nic_unload(sc, UNLOAD_NORMAL, TRUE));
12949  }
12950  
12951  /*
12952   * Handles controller initialization when called from an unlocked routine.
12953   * ifconfig calls this function.
12954   *
12955   * Returns:
12956   *   void
12957   */
12958  static void
bxe_init(void * xsc)12959  bxe_init(void *xsc)
12960  {
12961      struct bxe_softc *sc = (struct bxe_softc *)xsc;
12962  
12963      BXE_CORE_LOCK(sc);
12964      bxe_init_locked(sc);
12965      BXE_CORE_UNLOCK(sc);
12966  }
12967  
12968  static void
bxe_init_ifnet(struct bxe_softc * sc)12969  bxe_init_ifnet(struct bxe_softc *sc)
12970  {
12971      if_t ifp;
12972      int capabilities;
12973  
12974      /* ifconfig entrypoint for media type/status reporting */
12975      ifmedia_init(&sc->ifmedia, IFM_IMASK,
12976                   bxe_ifmedia_update,
12977                   bxe_ifmedia_status);
12978  
12979      /* set the default interface values */
12980      ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_FDX | sc->media), 0, NULL);
12981      ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL);
12982      ifmedia_set(&sc->ifmedia, (IFM_ETHER | IFM_AUTO));
12983  
12984      sc->ifmedia.ifm_media = sc->ifmedia.ifm_cur->ifm_media; /* XXX ? */
12985  	BLOGI(sc, "IFMEDIA flags : %x\n", sc->ifmedia.ifm_media);
12986  
12987      /* allocate the ifnet structure */
12988      ifp = if_gethandle(IFT_ETHER);
12989  
12990      if_setsoftc(ifp, sc);
12991      if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
12992      if_setflags(ifp, (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST));
12993      if_setioctlfn(ifp, bxe_ioctl);
12994      if_setstartfn(ifp, bxe_tx_start);
12995      if_setgetcounterfn(ifp, bxe_get_counter);
12996      if_settransmitfn(ifp, bxe_tx_mq_start);
12997      if_setqflushfn(ifp, bxe_mq_flush);
12998      if_setinitfn(ifp, bxe_init);
12999      if_setmtu(ifp, sc->mtu);
13000      if_sethwassist(ifp, (CSUM_IP      |
13001                          CSUM_TCP      |
13002                          CSUM_UDP      |
13003                          CSUM_TSO      |
13004                          CSUM_TCP_IPV6 |
13005                          CSUM_UDP_IPV6));
13006  
13007      capabilities =
13008          (IFCAP_VLAN_MTU       |
13009           IFCAP_VLAN_HWTAGGING |
13010           IFCAP_VLAN_HWTSO     |
13011           IFCAP_VLAN_HWFILTER  |
13012           IFCAP_VLAN_HWCSUM    |
13013           IFCAP_HWCSUM         |
13014           IFCAP_JUMBO_MTU      |
13015           IFCAP_LRO            |
13016           IFCAP_TSO4           |
13017           IFCAP_TSO6           |
13018           IFCAP_WOL_MAGIC);
13019      if_setcapabilitiesbit(ifp, capabilities, 0); /* XXX */
13020      if_setcapenable(ifp, if_getcapabilities(ifp));
13021      if_setbaudrate(ifp, IF_Gbps(10));
13022  /* XXX */
13023      if_setsendqlen(ifp, sc->tx_ring_size);
13024      if_setsendqready(ifp);
13025  /* XXX */
13026  
13027      sc->ifp = ifp;
13028  
13029      /* attach to the Ethernet interface list */
13030      ether_ifattach(ifp, sc->link_params.mac_addr);
13031  
13032      /* Attach driver debugnet methods. */
13033      DEBUGNET_SET(ifp, bxe);
13034  }
13035  
13036  static void
bxe_deallocate_bars(struct bxe_softc * sc)13037  bxe_deallocate_bars(struct bxe_softc *sc)
13038  {
13039      int i;
13040  
13041      for (i = 0; i < MAX_BARS; i++) {
13042          if (sc->bar[i].resource != NULL) {
13043              bus_release_resource(sc->dev,
13044                                   SYS_RES_MEMORY,
13045                                   sc->bar[i].rid,
13046                                   sc->bar[i].resource);
13047              BLOGD(sc, DBG_LOAD, "Released PCI BAR%d [%02x] memory\n",
13048                    i, PCIR_BAR(i));
13049          }
13050      }
13051  }
13052  
13053  static int
bxe_allocate_bars(struct bxe_softc * sc)13054  bxe_allocate_bars(struct bxe_softc *sc)
13055  {
13056      u_int flags;
13057      int i;
13058  
13059      memset(sc->bar, 0, sizeof(sc->bar));
13060  
13061      for (i = 0; i < MAX_BARS; i++) {
13062  
13063          /* memory resources reside at BARs 0, 2, 4 */
13064          /* Run `pciconf -lb` to see mappings */
13065          if ((i != 0) && (i != 2) && (i != 4)) {
13066              continue;
13067          }
13068  
13069          sc->bar[i].rid = PCIR_BAR(i);
13070  
13071          flags = RF_ACTIVE;
13072          if (i == 0) {
13073              flags |= RF_SHAREABLE;
13074          }
13075  
13076          if ((sc->bar[i].resource =
13077               bus_alloc_resource_any(sc->dev,
13078                                      SYS_RES_MEMORY,
13079                                      &sc->bar[i].rid,
13080                                      flags)) == NULL) {
13081              return (0);
13082          }
13083  
13084          sc->bar[i].tag    = rman_get_bustag(sc->bar[i].resource);
13085          sc->bar[i].handle = rman_get_bushandle(sc->bar[i].resource);
13086          sc->bar[i].kva    = (vm_offset_t)rman_get_virtual(sc->bar[i].resource);
13087  
13088          BLOGI(sc, "PCI BAR%d [%02x] memory allocated: %#jx-%#jx (%jd) -> %#jx\n",
13089                i, PCIR_BAR(i),
13090                rman_get_start(sc->bar[i].resource),
13091                rman_get_end(sc->bar[i].resource),
13092                rman_get_size(sc->bar[i].resource),
13093                (uintmax_t)sc->bar[i].kva);
13094      }
13095  
13096      return (0);
13097  }
13098  
13099  static void
bxe_get_function_num(struct bxe_softc * sc)13100  bxe_get_function_num(struct bxe_softc *sc)
13101  {
13102      uint32_t val = 0;
13103  
13104      /*
13105       * Read the ME register to get the function number. The ME register
13106       * holds the relative-function number and absolute-function number. The
13107       * absolute-function number appears only in E2 and above. Before that
13108       * these bits always contained zero, therefore we cannot blindly use them.
13109       */
13110  
13111      val = REG_RD(sc, BAR_ME_REGISTER);
13112  
13113      sc->pfunc_rel =
13114          (uint8_t)((val & ME_REG_PF_NUM) >> ME_REG_PF_NUM_SHIFT);
13115      sc->path_id =
13116          (uint8_t)((val & ME_REG_ABS_PF_NUM) >> ME_REG_ABS_PF_NUM_SHIFT) & 1;
13117  
13118      if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
13119          sc->pfunc_abs = ((sc->pfunc_rel << 1) | sc->path_id);
13120      } else {
13121          sc->pfunc_abs = (sc->pfunc_rel | sc->path_id);
13122      }
13123  
13124      BLOGD(sc, DBG_LOAD,
13125            "Relative function %d, Absolute function %d, Path %d\n",
13126            sc->pfunc_rel, sc->pfunc_abs, sc->path_id);
13127  }
13128  
13129  static uint32_t
bxe_get_shmem_mf_cfg_base(struct bxe_softc * sc)13130  bxe_get_shmem_mf_cfg_base(struct bxe_softc *sc)
13131  {
13132      uint32_t shmem2_size;
13133      uint32_t offset;
13134      uint32_t mf_cfg_offset_value;
13135  
13136      /* Non 57712 */
13137      offset = (SHMEM_RD(sc, func_mb) +
13138                (MAX_FUNC_NUM * sizeof(struct drv_func_mb)));
13139  
13140      /* 57712 plus */
13141      if (sc->devinfo.shmem2_base != 0) {
13142          shmem2_size = SHMEM2_RD(sc, size);
13143          if (shmem2_size > offsetof(struct shmem2_region, mf_cfg_addr)) {
13144              mf_cfg_offset_value = SHMEM2_RD(sc, mf_cfg_addr);
13145              if (SHMEM_MF_CFG_ADDR_NONE != mf_cfg_offset_value) {
13146                  offset = mf_cfg_offset_value;
13147              }
13148          }
13149      }
13150  
13151      return (offset);
13152  }
13153  
13154  static uint32_t
bxe_pcie_capability_read(struct bxe_softc * sc,int reg,int width)13155  bxe_pcie_capability_read(struct bxe_softc *sc,
13156                           int    reg,
13157                           int    width)
13158  {
13159      int pcie_reg;
13160  
13161      /* ensure PCIe capability is enabled */
13162      if (pci_find_cap(sc->dev, PCIY_EXPRESS, &pcie_reg) == 0) {
13163          if (pcie_reg != 0) {
13164              BLOGD(sc, DBG_LOAD, "PCIe capability at 0x%04x\n", pcie_reg);
13165              return (pci_read_config(sc->dev, (pcie_reg + reg), width));
13166          }
13167      }
13168  
13169      BLOGE(sc, "PCIe capability NOT FOUND!!!\n");
13170  
13171      return (0);
13172  }
13173  
13174  static uint8_t
bxe_is_pcie_pending(struct bxe_softc * sc)13175  bxe_is_pcie_pending(struct bxe_softc *sc)
13176  {
13177      return (bxe_pcie_capability_read(sc, PCIER_DEVICE_STA, 2) &
13178              PCIEM_STA_TRANSACTION_PND);
13179  }
13180  
13181  /*
13182   * Walk the PCI capabiites list for the device to find what features are
13183   * supported. These capabilites may be enabled/disabled by firmware so it's
13184   * best to walk the list rather than make assumptions.
13185   */
13186  static void
bxe_probe_pci_caps(struct bxe_softc * sc)13187  bxe_probe_pci_caps(struct bxe_softc *sc)
13188  {
13189      uint16_t link_status;
13190      int reg;
13191  
13192      /* check if PCI Power Management is enabled */
13193      if (pci_find_cap(sc->dev, PCIY_PMG, &reg) == 0) {
13194          if (reg != 0) {
13195              BLOGD(sc, DBG_LOAD, "Found PM capability at 0x%04x\n", reg);
13196  
13197              sc->devinfo.pcie_cap_flags |= BXE_PM_CAPABLE_FLAG;
13198              sc->devinfo.pcie_pm_cap_reg = (uint16_t)reg;
13199          }
13200      }
13201  
13202      link_status = bxe_pcie_capability_read(sc, PCIER_LINK_STA, 2);
13203  
13204      /* handle PCIe 2.0 workarounds for 57710 */
13205      if (CHIP_IS_E1(sc)) {
13206          /* workaround for 57710 errata E4_57710_27462 */
13207          sc->devinfo.pcie_link_speed =
13208              (REG_RD(sc, 0x3d04) & (1 << 24)) ? 2 : 1;
13209  
13210          /* workaround for 57710 errata E4_57710_27488 */
13211          sc->devinfo.pcie_link_width =
13212              ((link_status & PCIEM_LINK_STA_WIDTH) >> 4);
13213          if (sc->devinfo.pcie_link_speed > 1) {
13214              sc->devinfo.pcie_link_width =
13215                  ((link_status & PCIEM_LINK_STA_WIDTH) >> 4) >> 1;
13216          }
13217      } else {
13218          sc->devinfo.pcie_link_speed =
13219              (link_status & PCIEM_LINK_STA_SPEED);
13220          sc->devinfo.pcie_link_width =
13221              ((link_status & PCIEM_LINK_STA_WIDTH) >> 4);
13222      }
13223  
13224      BLOGD(sc, DBG_LOAD, "PCIe link speed=%d width=%d\n",
13225            sc->devinfo.pcie_link_speed, sc->devinfo.pcie_link_width);
13226  
13227      sc->devinfo.pcie_cap_flags |= BXE_PCIE_CAPABLE_FLAG;
13228      sc->devinfo.pcie_pcie_cap_reg = (uint16_t)reg;
13229  
13230      /* check if MSI capability is enabled */
13231      if (pci_find_cap(sc->dev, PCIY_MSI, &reg) == 0) {
13232          if (reg != 0) {
13233              BLOGD(sc, DBG_LOAD, "Found MSI capability at 0x%04x\n", reg);
13234  
13235              sc->devinfo.pcie_cap_flags |= BXE_MSI_CAPABLE_FLAG;
13236              sc->devinfo.pcie_msi_cap_reg = (uint16_t)reg;
13237          }
13238      }
13239  
13240      /* check if MSI-X capability is enabled */
13241      if (pci_find_cap(sc->dev, PCIY_MSIX, &reg) == 0) {
13242          if (reg != 0) {
13243              BLOGD(sc, DBG_LOAD, "Found MSI-X capability at 0x%04x\n", reg);
13244  
13245              sc->devinfo.pcie_cap_flags |= BXE_MSIX_CAPABLE_FLAG;
13246              sc->devinfo.pcie_msix_cap_reg = (uint16_t)reg;
13247          }
13248      }
13249  }
13250  
13251  static int
bxe_get_shmem_mf_cfg_info_sd(struct bxe_softc * sc)13252  bxe_get_shmem_mf_cfg_info_sd(struct bxe_softc *sc)
13253  {
13254      struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13255      uint32_t val;
13256  
13257      /* get the outer vlan if we're in switch-dependent mode */
13258  
13259      val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13260      mf_info->ext_id = (uint16_t)val;
13261  
13262      mf_info->multi_vnics_mode = 1;
13263  
13264      if (!VALID_OVLAN(mf_info->ext_id)) {
13265          BLOGE(sc, "Invalid VLAN (%d)\n", mf_info->ext_id);
13266          return (1);
13267      }
13268  
13269      /* get the capabilities */
13270      if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) ==
13271          FUNC_MF_CFG_PROTOCOL_ISCSI) {
13272          mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ISCSI;
13273      } else if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) ==
13274                 FUNC_MF_CFG_PROTOCOL_FCOE) {
13275          mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_FCOE;
13276      } else {
13277          mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ETHERNET;
13278      }
13279  
13280      mf_info->vnics_per_port =
13281          (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13282  
13283      return (0);
13284  }
13285  
13286  static uint32_t
bxe_get_shmem_ext_proto_support_flags(struct bxe_softc * sc)13287  bxe_get_shmem_ext_proto_support_flags(struct bxe_softc *sc)
13288  {
13289      uint32_t retval = 0;
13290      uint32_t val;
13291  
13292      val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg);
13293  
13294      if (val & MACP_FUNC_CFG_FLAGS_ENABLED) {
13295          if (val & MACP_FUNC_CFG_FLAGS_ETHERNET) {
13296              retval |= MF_PROTO_SUPPORT_ETHERNET;
13297          }
13298          if (val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
13299              retval |= MF_PROTO_SUPPORT_ISCSI;
13300          }
13301          if (val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
13302              retval |= MF_PROTO_SUPPORT_FCOE;
13303          }
13304      }
13305  
13306      return (retval);
13307  }
13308  
13309  static int
bxe_get_shmem_mf_cfg_info_si(struct bxe_softc * sc)13310  bxe_get_shmem_mf_cfg_info_si(struct bxe_softc *sc)
13311  {
13312      struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13313      uint32_t val;
13314  
13315      /*
13316       * There is no outer vlan if we're in switch-independent mode.
13317       * If the mac is valid then assume multi-function.
13318       */
13319  
13320      val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg);
13321  
13322      mf_info->multi_vnics_mode = ((val & MACP_FUNC_CFG_FLAGS_MASK) != 0);
13323  
13324      mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc);
13325  
13326      mf_info->vnics_per_port =
13327          (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13328  
13329      return (0);
13330  }
13331  
13332  static int
bxe_get_shmem_mf_cfg_info_niv(struct bxe_softc * sc)13333  bxe_get_shmem_mf_cfg_info_niv(struct bxe_softc *sc)
13334  {
13335      struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13336      uint32_t e1hov_tag;
13337      uint32_t func_config;
13338      uint32_t niv_config;
13339  
13340      mf_info->multi_vnics_mode = 1;
13341  
13342      e1hov_tag   = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13343      func_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
13344      niv_config  = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].afex_config);
13345  
13346      mf_info->ext_id =
13347          (uint16_t)((e1hov_tag & FUNC_MF_CFG_E1HOV_TAG_MASK) >>
13348                     FUNC_MF_CFG_E1HOV_TAG_SHIFT);
13349  
13350      mf_info->default_vlan =
13351          (uint16_t)((e1hov_tag & FUNC_MF_CFG_AFEX_VLAN_MASK) >>
13352                     FUNC_MF_CFG_AFEX_VLAN_SHIFT);
13353  
13354      mf_info->niv_allowed_priorities =
13355          (uint8_t)((niv_config & FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
13356                    FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT);
13357  
13358      mf_info->niv_default_cos =
13359          (uint8_t)((func_config & FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
13360                    FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT);
13361  
13362      mf_info->afex_vlan_mode =
13363          ((niv_config & FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
13364           FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT);
13365  
13366      mf_info->niv_mba_enabled =
13367          ((niv_config & FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK) >>
13368           FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT);
13369  
13370      mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc);
13371  
13372      mf_info->vnics_per_port =
13373          (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13374  
13375      return (0);
13376  }
13377  
13378  static int
bxe_check_valid_mf_cfg(struct bxe_softc * sc)13379  bxe_check_valid_mf_cfg(struct bxe_softc *sc)
13380  {
13381      struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13382      uint32_t mf_cfg1;
13383      uint32_t mf_cfg2;
13384      uint32_t ovlan1;
13385      uint32_t ovlan2;
13386      uint8_t i, j;
13387  
13388      BLOGD(sc, DBG_LOAD, "MF config parameters for function %d\n",
13389            SC_PORT(sc));
13390      BLOGD(sc, DBG_LOAD, "\tmf_config=0x%x\n",
13391            mf_info->mf_config[SC_VN(sc)]);
13392      BLOGD(sc, DBG_LOAD, "\tmulti_vnics_mode=%d\n",
13393            mf_info->multi_vnics_mode);
13394      BLOGD(sc, DBG_LOAD, "\tvnics_per_port=%d\n",
13395            mf_info->vnics_per_port);
13396      BLOGD(sc, DBG_LOAD, "\tovlan/vifid=%d\n",
13397            mf_info->ext_id);
13398      BLOGD(sc, DBG_LOAD, "\tmin_bw=%d/%d/%d/%d\n",
13399            mf_info->min_bw[0], mf_info->min_bw[1],
13400            mf_info->min_bw[2], mf_info->min_bw[3]);
13401      BLOGD(sc, DBG_LOAD, "\tmax_bw=%d/%d/%d/%d\n",
13402            mf_info->max_bw[0], mf_info->max_bw[1],
13403            mf_info->max_bw[2], mf_info->max_bw[3]);
13404      BLOGD(sc, DBG_LOAD, "\tmac_addr: %s\n",
13405            sc->mac_addr_str);
13406  
13407      /* various MF mode sanity checks... */
13408  
13409      if (mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_HIDE) {
13410          BLOGE(sc, "Enumerated function %d is marked as hidden\n",
13411                SC_PORT(sc));
13412          return (1);
13413      }
13414  
13415      if ((mf_info->vnics_per_port > 1) && !mf_info->multi_vnics_mode) {
13416          BLOGE(sc, "vnics_per_port=%d multi_vnics_mode=%d\n",
13417                mf_info->vnics_per_port, mf_info->multi_vnics_mode);
13418          return (1);
13419      }
13420  
13421      if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
13422          /* vnic id > 0 must have valid ovlan in switch-dependent mode */
13423          if ((SC_VN(sc) > 0) && !VALID_OVLAN(OVLAN(sc))) {
13424              BLOGE(sc, "mf_mode=SD vnic_id=%d ovlan=%d\n",
13425                    SC_VN(sc), OVLAN(sc));
13426              return (1);
13427          }
13428  
13429          if (!VALID_OVLAN(OVLAN(sc)) && mf_info->multi_vnics_mode) {
13430              BLOGE(sc, "mf_mode=SD multi_vnics_mode=%d ovlan=%d\n",
13431                    mf_info->multi_vnics_mode, OVLAN(sc));
13432              return (1);
13433          }
13434  
13435          /*
13436           * Verify all functions are either MF or SF mode. If MF, make sure
13437           * sure that all non-hidden functions have a valid ovlan. If SF,
13438           * make sure that all non-hidden functions have an invalid ovlan.
13439           */
13440          FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13441              mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config);
13442              ovlan1  = MFCFG_RD(sc, func_mf_config[i].e1hov_tag);
13443              if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) &&
13444                  (((mf_info->multi_vnics_mode) && !VALID_OVLAN(ovlan1)) ||
13445                   ((!mf_info->multi_vnics_mode) && VALID_OVLAN(ovlan1)))) {
13446                  BLOGE(sc, "mf_mode=SD function %d MF config "
13447                            "mismatch, multi_vnics_mode=%d ovlan=%d\n",
13448                        i, mf_info->multi_vnics_mode, ovlan1);
13449                  return (1);
13450              }
13451          }
13452  
13453          /* Verify all funcs on the same port each have a different ovlan. */
13454          FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13455              mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config);
13456              ovlan1  = MFCFG_RD(sc, func_mf_config[i].e1hov_tag);
13457              /* iterate from the next function on the port to the max func */
13458              for (j = i + 2; j < MAX_FUNC_NUM; j += 2) {
13459                  mf_cfg2 = MFCFG_RD(sc, func_mf_config[j].config);
13460                  ovlan2  = MFCFG_RD(sc, func_mf_config[j].e1hov_tag);
13461                  if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) &&
13462                      VALID_OVLAN(ovlan1) &&
13463                      !(mf_cfg2 & FUNC_MF_CFG_FUNC_HIDE) &&
13464                      VALID_OVLAN(ovlan2) &&
13465                      (ovlan1 == ovlan2)) {
13466                      BLOGE(sc, "mf_mode=SD functions %d and %d "
13467                                "have the same ovlan (%d)\n",
13468                            i, j, ovlan1);
13469                      return (1);
13470                  }
13471              }
13472          }
13473      } /* MULTI_FUNCTION_SD */
13474  
13475      return (0);
13476  }
13477  
13478  static int
bxe_get_mf_cfg_info(struct bxe_softc * sc)13479  bxe_get_mf_cfg_info(struct bxe_softc *sc)
13480  {
13481      struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13482      uint32_t val, mac_upper;
13483      uint8_t i, vnic;
13484  
13485      /* initialize mf_info defaults */
13486      mf_info->vnics_per_port   = 1;
13487      mf_info->multi_vnics_mode = FALSE;
13488      mf_info->path_has_ovlan   = FALSE;
13489      mf_info->mf_mode          = SINGLE_FUNCTION;
13490  
13491      if (!CHIP_IS_MF_CAP(sc)) {
13492          return (0);
13493      }
13494  
13495      if (sc->devinfo.mf_cfg_base == SHMEM_MF_CFG_ADDR_NONE) {
13496          BLOGE(sc, "Invalid mf_cfg_base!\n");
13497          return (1);
13498      }
13499  
13500      /* get the MF mode (switch dependent / independent / single-function) */
13501  
13502      val = SHMEM_RD(sc, dev_info.shared_feature_config.config);
13503  
13504      switch (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK)
13505      {
13506      case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
13507  
13508          mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13509  
13510          /* check for legal upper mac bytes */
13511          if (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT) {
13512              mf_info->mf_mode = MULTI_FUNCTION_SI;
13513          } else {
13514              BLOGE(sc, "Invalid config for Switch Independent mode\n");
13515          }
13516  
13517          break;
13518  
13519      case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
13520      case SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4:
13521  
13522          /* get outer vlan configuration */
13523          val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13524  
13525          if ((val & FUNC_MF_CFG_E1HOV_TAG_MASK) !=
13526              FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
13527              mf_info->mf_mode = MULTI_FUNCTION_SD;
13528          } else {
13529              BLOGE(sc, "Invalid config for Switch Dependent mode\n");
13530          }
13531  
13532          break;
13533  
13534      case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
13535  
13536          /* not in MF mode, vnics_per_port=1 and multi_vnics_mode=FALSE */
13537          return (0);
13538  
13539      case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
13540  
13541          /*
13542           * Mark MF mode as NIV if MCP version includes NPAR-SD support
13543           * and the MAC address is valid.
13544           */
13545          mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13546  
13547          if ((SHMEM2_HAS(sc, afex_driver_support)) &&
13548              (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT)) {
13549              mf_info->mf_mode = MULTI_FUNCTION_AFEX;
13550          } else {
13551              BLOGE(sc, "Invalid config for AFEX mode\n");
13552          }
13553  
13554          break;
13555  
13556      default:
13557  
13558          BLOGE(sc, "Unknown MF mode (0x%08x)\n",
13559                (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK));
13560  
13561          return (1);
13562      }
13563  
13564      /* set path mf_mode (which could be different than function mf_mode) */
13565      if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
13566          mf_info->path_has_ovlan = TRUE;
13567      } else if (mf_info->mf_mode == SINGLE_FUNCTION) {
13568          /*
13569           * Decide on path multi vnics mode. If we're not in MF mode and in
13570           * 4-port mode, this is good enough to check vnic-0 of the other port
13571           * on the same path
13572           */
13573          if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
13574              uint8_t other_port = !(PORT_ID(sc) & 1);
13575              uint8_t abs_func_other_port = (SC_PATH(sc) + (2 * other_port));
13576  
13577              val = MFCFG_RD(sc, func_mf_config[abs_func_other_port].e1hov_tag);
13578  
13579              mf_info->path_has_ovlan = VALID_OVLAN((uint16_t)val) ? 1 : 0;
13580          }
13581      }
13582  
13583      if (mf_info->mf_mode == SINGLE_FUNCTION) {
13584          /* invalid MF config */
13585          if (SC_VN(sc) >= 1) {
13586              BLOGE(sc, "VNIC ID >= 1 in SF mode\n");
13587              return (1);
13588          }
13589  
13590          return (0);
13591      }
13592  
13593      /* get the MF configuration */
13594      mf_info->mf_config[SC_VN(sc)] =
13595          MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
13596  
13597      switch(mf_info->mf_mode)
13598      {
13599      case MULTI_FUNCTION_SD:
13600  
13601          bxe_get_shmem_mf_cfg_info_sd(sc);
13602          break;
13603  
13604      case MULTI_FUNCTION_SI:
13605  
13606          bxe_get_shmem_mf_cfg_info_si(sc);
13607          break;
13608  
13609      case MULTI_FUNCTION_AFEX:
13610  
13611          bxe_get_shmem_mf_cfg_info_niv(sc);
13612          break;
13613  
13614      default:
13615  
13616          BLOGE(sc, "Get MF config failed (mf_mode=0x%08x)\n",
13617                mf_info->mf_mode);
13618          return (1);
13619      }
13620  
13621      /* get the congestion management parameters */
13622  
13623      vnic = 0;
13624      FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13625          /* get min/max bw */
13626          val = MFCFG_RD(sc, func_mf_config[i].config);
13627          mf_info->min_bw[vnic] =
13628              ((val & FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_SHIFT);
13629          mf_info->max_bw[vnic] =
13630              ((val & FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT);
13631          vnic++;
13632      }
13633  
13634      return (bxe_check_valid_mf_cfg(sc));
13635  }
13636  
13637  static int
bxe_get_shmem_info(struct bxe_softc * sc)13638  bxe_get_shmem_info(struct bxe_softc *sc)
13639  {
13640      int port;
13641      uint32_t mac_hi, mac_lo, val;
13642  
13643      port = SC_PORT(sc);
13644      mac_hi = mac_lo = 0;
13645  
13646      sc->link_params.sc   = sc;
13647      sc->link_params.port = port;
13648  
13649      /* get the hardware config info */
13650      sc->devinfo.hw_config =
13651          SHMEM_RD(sc, dev_info.shared_hw_config.config);
13652      sc->devinfo.hw_config2 =
13653          SHMEM_RD(sc, dev_info.shared_hw_config.config2);
13654  
13655      sc->link_params.hw_led_mode =
13656          ((sc->devinfo.hw_config & SHARED_HW_CFG_LED_MODE_MASK) >>
13657           SHARED_HW_CFG_LED_MODE_SHIFT);
13658  
13659      /* get the port feature config */
13660      sc->port.config =
13661          SHMEM_RD(sc, dev_info.port_feature_config[port].config);
13662  
13663      /* get the link params */
13664      sc->link_params.speed_cap_mask[0] =
13665          SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask);
13666      sc->link_params.speed_cap_mask[1] =
13667          SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask2);
13668  
13669      /* get the lane config */
13670      sc->link_params.lane_config =
13671          SHMEM_RD(sc, dev_info.port_hw_config[port].lane_config);
13672  
13673      /* get the link config */
13674      val = SHMEM_RD(sc, dev_info.port_feature_config[port].link_config);
13675      sc->port.link_config[ELINK_INT_PHY] = val;
13676      sc->link_params.switch_cfg = (val & PORT_FEATURE_CONNECTED_SWITCH_MASK);
13677      sc->port.link_config[ELINK_EXT_PHY1] =
13678          SHMEM_RD(sc, dev_info.port_feature_config[port].link_config2);
13679  
13680      /* get the override preemphasis flag and enable it or turn it off */
13681      val = SHMEM_RD(sc, dev_info.shared_feature_config.config);
13682      if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) {
13683          sc->link_params.feature_config_flags |=
13684              ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
13685      } else {
13686          sc->link_params.feature_config_flags &=
13687              ~ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
13688      }
13689  
13690      /* get the initial value of the link params */
13691      sc->link_params.multi_phy_config =
13692          SHMEM_RD(sc, dev_info.port_hw_config[port].multi_phy_config);
13693  
13694      /* get external phy info */
13695      sc->port.ext_phy_config =
13696          SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config);
13697  
13698      /* get the multifunction configuration */
13699      bxe_get_mf_cfg_info(sc);
13700  
13701      /* get the mac address */
13702      if (IS_MF(sc)) {
13703          mac_hi = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13704          mac_lo = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_lower);
13705      } else {
13706          mac_hi = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_upper);
13707          mac_lo = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_lower);
13708      }
13709  
13710      if ((mac_lo == 0) && (mac_hi == 0)) {
13711          *sc->mac_addr_str = 0;
13712          BLOGE(sc, "No Ethernet address programmed!\n");
13713      } else {
13714          sc->link_params.mac_addr[0] = (uint8_t)(mac_hi >> 8);
13715          sc->link_params.mac_addr[1] = (uint8_t)(mac_hi);
13716          sc->link_params.mac_addr[2] = (uint8_t)(mac_lo >> 24);
13717          sc->link_params.mac_addr[3] = (uint8_t)(mac_lo >> 16);
13718          sc->link_params.mac_addr[4] = (uint8_t)(mac_lo >> 8);
13719          sc->link_params.mac_addr[5] = (uint8_t)(mac_lo);
13720          snprintf(sc->mac_addr_str, sizeof(sc->mac_addr_str),
13721                   "%02x:%02x:%02x:%02x:%02x:%02x",
13722                   sc->link_params.mac_addr[0], sc->link_params.mac_addr[1],
13723                   sc->link_params.mac_addr[2], sc->link_params.mac_addr[3],
13724                   sc->link_params.mac_addr[4], sc->link_params.mac_addr[5]);
13725          BLOGD(sc, DBG_LOAD, "Ethernet address: %s\n", sc->mac_addr_str);
13726      }
13727  
13728      return (0);
13729  }
13730  
13731  static void
bxe_get_tunable_params(struct bxe_softc * sc)13732  bxe_get_tunable_params(struct bxe_softc *sc)
13733  {
13734      /* sanity checks */
13735  
13736      if ((bxe_interrupt_mode != INTR_MODE_INTX) &&
13737          (bxe_interrupt_mode != INTR_MODE_MSI)  &&
13738          (bxe_interrupt_mode != INTR_MODE_MSIX)) {
13739          BLOGW(sc, "invalid interrupt_mode value (%d)\n", bxe_interrupt_mode);
13740          bxe_interrupt_mode = INTR_MODE_MSIX;
13741      }
13742  
13743      if ((bxe_queue_count < 0) || (bxe_queue_count > MAX_RSS_CHAINS)) {
13744          BLOGW(sc, "invalid queue_count value (%d)\n", bxe_queue_count);
13745          bxe_queue_count = 0;
13746      }
13747  
13748      if ((bxe_max_rx_bufs < 1) || (bxe_max_rx_bufs > RX_BD_USABLE)) {
13749          if (bxe_max_rx_bufs == 0) {
13750              bxe_max_rx_bufs = RX_BD_USABLE;
13751          } else {
13752              BLOGW(sc, "invalid max_rx_bufs (%d)\n", bxe_max_rx_bufs);
13753              bxe_max_rx_bufs = 2048;
13754          }
13755      }
13756  
13757      if ((bxe_hc_rx_ticks < 1) || (bxe_hc_rx_ticks > 100)) {
13758          BLOGW(sc, "invalid hc_rx_ticks (%d)\n", bxe_hc_rx_ticks);
13759          bxe_hc_rx_ticks = 25;
13760      }
13761  
13762      if ((bxe_hc_tx_ticks < 1) || (bxe_hc_tx_ticks > 100)) {
13763          BLOGW(sc, "invalid hc_tx_ticks (%d)\n", bxe_hc_tx_ticks);
13764          bxe_hc_tx_ticks = 50;
13765      }
13766  
13767      if (bxe_max_aggregation_size == 0) {
13768          bxe_max_aggregation_size = TPA_AGG_SIZE;
13769      }
13770  
13771      if (bxe_max_aggregation_size > 0xffff) {
13772          BLOGW(sc, "invalid max_aggregation_size (%d)\n",
13773                bxe_max_aggregation_size);
13774          bxe_max_aggregation_size = TPA_AGG_SIZE;
13775      }
13776  
13777      if ((bxe_mrrs < -1) || (bxe_mrrs > 3)) {
13778          BLOGW(sc, "invalid mrrs (%d)\n", bxe_mrrs);
13779          bxe_mrrs = -1;
13780      }
13781  
13782      if ((bxe_autogreeen < 0) || (bxe_autogreeen > 2)) {
13783          BLOGW(sc, "invalid autogreeen (%d)\n", bxe_autogreeen);
13784          bxe_autogreeen = 0;
13785      }
13786  
13787      if ((bxe_udp_rss < 0) || (bxe_udp_rss > 1)) {
13788          BLOGW(sc, "invalid udp_rss (%d)\n", bxe_udp_rss);
13789          bxe_udp_rss = 0;
13790      }
13791  
13792      /* pull in user settings */
13793  
13794      sc->interrupt_mode       = bxe_interrupt_mode;
13795      sc->max_rx_bufs          = bxe_max_rx_bufs;
13796      sc->hc_rx_ticks          = bxe_hc_rx_ticks;
13797      sc->hc_tx_ticks          = bxe_hc_tx_ticks;
13798      sc->max_aggregation_size = bxe_max_aggregation_size;
13799      sc->mrrs                 = bxe_mrrs;
13800      sc->autogreeen           = bxe_autogreeen;
13801      sc->udp_rss              = bxe_udp_rss;
13802  
13803      if (bxe_interrupt_mode == INTR_MODE_INTX) {
13804          sc->num_queues = 1;
13805      } else { /* INTR_MODE_MSI or INTR_MODE_MSIX */
13806          sc->num_queues =
13807              min((bxe_queue_count ? bxe_queue_count : mp_ncpus),
13808                  MAX_RSS_CHAINS);
13809          if (sc->num_queues > mp_ncpus) {
13810              sc->num_queues = mp_ncpus;
13811          }
13812      }
13813  
13814      BLOGD(sc, DBG_LOAD,
13815            "User Config: "
13816            "debug=0x%lx "
13817            "interrupt_mode=%d "
13818            "queue_count=%d "
13819            "hc_rx_ticks=%d "
13820            "hc_tx_ticks=%d "
13821            "rx_budget=%d "
13822            "max_aggregation_size=%d "
13823            "mrrs=%d "
13824            "autogreeen=%d "
13825            "udp_rss=%d\n",
13826            bxe_debug,
13827            sc->interrupt_mode,
13828            sc->num_queues,
13829            sc->hc_rx_ticks,
13830            sc->hc_tx_ticks,
13831            bxe_rx_budget,
13832            sc->max_aggregation_size,
13833            sc->mrrs,
13834            sc->autogreeen,
13835            sc->udp_rss);
13836  }
13837  
13838  static int
bxe_media_detect(struct bxe_softc * sc)13839  bxe_media_detect(struct bxe_softc *sc)
13840  {
13841      int port_type;
13842      uint32_t phy_idx = bxe_get_cur_phy_idx(sc);
13843  
13844      switch (sc->link_params.phy[phy_idx].media_type) {
13845      case ELINK_ETH_PHY_SFPP_10G_FIBER:
13846      case ELINK_ETH_PHY_XFP_FIBER:
13847          BLOGI(sc, "Found 10Gb Fiber media.\n");
13848          sc->media = IFM_10G_SR;
13849          port_type = PORT_FIBRE;
13850          break;
13851      case ELINK_ETH_PHY_SFP_1G_FIBER:
13852          BLOGI(sc, "Found 1Gb Fiber media.\n");
13853          sc->media = IFM_1000_SX;
13854          port_type = PORT_FIBRE;
13855          break;
13856      case ELINK_ETH_PHY_KR:
13857      case ELINK_ETH_PHY_CX4:
13858          BLOGI(sc, "Found 10GBase-CX4 media.\n");
13859          sc->media = IFM_10G_CX4;
13860          port_type = PORT_FIBRE;
13861          break;
13862      case ELINK_ETH_PHY_DA_TWINAX:
13863          BLOGI(sc, "Found 10Gb Twinax media.\n");
13864          sc->media = IFM_10G_TWINAX;
13865          port_type = PORT_DA;
13866          break;
13867      case ELINK_ETH_PHY_BASE_T:
13868          if (sc->link_params.speed_cap_mask[0] &
13869              PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
13870              BLOGI(sc, "Found 10GBase-T media.\n");
13871              sc->media = IFM_10G_T;
13872              port_type = PORT_TP;
13873          } else {
13874              BLOGI(sc, "Found 1000Base-T media.\n");
13875              sc->media = IFM_1000_T;
13876              port_type = PORT_TP;
13877          }
13878          break;
13879      case ELINK_ETH_PHY_NOT_PRESENT:
13880          BLOGI(sc, "Media not present.\n");
13881          sc->media = 0;
13882          port_type = PORT_OTHER;
13883          break;
13884      case ELINK_ETH_PHY_UNSPECIFIED:
13885      default:
13886          BLOGI(sc, "Unknown media!\n");
13887          sc->media = 0;
13888          port_type = PORT_OTHER;
13889          break;
13890      }
13891      return port_type;
13892  }
13893  
13894  #define GET_FIELD(value, fname)                     \
13895      (((value) & (fname##_MASK)) >> (fname##_SHIFT))
13896  #define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
13897  #define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
13898  
13899  static int
bxe_get_igu_cam_info(struct bxe_softc * sc)13900  bxe_get_igu_cam_info(struct bxe_softc *sc)
13901  {
13902      int pfid = SC_FUNC(sc);
13903      int igu_sb_id;
13904      uint32_t val;
13905      uint8_t fid, igu_sb_cnt = 0;
13906  
13907      sc->igu_base_sb = 0xff;
13908  
13909      if (CHIP_INT_MODE_IS_BC(sc)) {
13910          int vn = SC_VN(sc);
13911          igu_sb_cnt = sc->igu_sb_cnt;
13912          sc->igu_base_sb = ((CHIP_IS_MODE_4_PORT(sc) ? pfid : vn) *
13913                             FP_SB_MAX_E1x);
13914          sc->igu_dsb_id = (E1HVN_MAX * FP_SB_MAX_E1x +
13915                            (CHIP_IS_MODE_4_PORT(sc) ? pfid : vn));
13916          return (0);
13917      }
13918  
13919      /* IGU in normal mode - read CAM */
13920      for (igu_sb_id = 0;
13921           igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
13922           igu_sb_id++) {
13923          val = REG_RD(sc, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
13924          if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) {
13925              continue;
13926          }
13927          fid = IGU_FID(val);
13928          if ((fid & IGU_FID_ENCODE_IS_PF)) {
13929              if ((fid & IGU_FID_PF_NUM_MASK) != pfid) {
13930                  continue;
13931              }
13932              if (IGU_VEC(val) == 0) {
13933                  /* default status block */
13934                  sc->igu_dsb_id = igu_sb_id;
13935              } else {
13936                  if (sc->igu_base_sb == 0xff) {
13937                      sc->igu_base_sb = igu_sb_id;
13938                  }
13939                  igu_sb_cnt++;
13940              }
13941          }
13942      }
13943  
13944      /*
13945       * Due to new PF resource allocation by MFW T7.4 and above, it's optional
13946       * that number of CAM entries will not be equal to the value advertised in
13947       * PCI. Driver should use the minimal value of both as the actual status
13948       * block count
13949       */
13950      sc->igu_sb_cnt = min(sc->igu_sb_cnt, igu_sb_cnt);
13951  
13952      if (igu_sb_cnt == 0) {
13953          BLOGE(sc, "CAM configuration error\n");
13954          return (-1);
13955      }
13956  
13957      return (0);
13958  }
13959  
13960  /*
13961   * Gather various information from the device config space, the device itself,
13962   * shmem, and the user input.
13963   */
13964  static int
bxe_get_device_info(struct bxe_softc * sc)13965  bxe_get_device_info(struct bxe_softc *sc)
13966  {
13967      uint32_t val;
13968      int rc;
13969  
13970      /* Get the data for the device */
13971      sc->devinfo.vendor_id    = pci_get_vendor(sc->dev);
13972      sc->devinfo.device_id    = pci_get_device(sc->dev);
13973      sc->devinfo.subvendor_id = pci_get_subvendor(sc->dev);
13974      sc->devinfo.subdevice_id = pci_get_subdevice(sc->dev);
13975  
13976      /* get the chip revision (chip metal comes from pci config space) */
13977      sc->devinfo.chip_id     =
13978      sc->link_params.chip_id =
13979          (((REG_RD(sc, MISC_REG_CHIP_NUM)                   & 0xffff) << 16) |
13980           ((REG_RD(sc, MISC_REG_CHIP_REV)                   & 0xf)    << 12) |
13981           (((REG_RD(sc, PCICFG_OFFSET + PCI_ID_VAL3) >> 24) & 0xf)    << 4)  |
13982           ((REG_RD(sc, MISC_REG_BOND_ID)                    & 0xf)    << 0));
13983  
13984      /* force 57811 according to MISC register */
13985      if (REG_RD(sc, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
13986          if (CHIP_IS_57810(sc)) {
13987              sc->devinfo.chip_id = ((CHIP_NUM_57811 << 16) |
13988                                     (sc->devinfo.chip_id & 0x0000ffff));
13989          } else if (CHIP_IS_57810_MF(sc)) {
13990              sc->devinfo.chip_id = ((CHIP_NUM_57811_MF << 16) |
13991                                     (sc->devinfo.chip_id & 0x0000ffff));
13992          }
13993          sc->devinfo.chip_id |= 0x1;
13994      }
13995  
13996      BLOGD(sc, DBG_LOAD,
13997            "chip_id=0x%08x (num=0x%04x rev=0x%01x metal=0x%02x bond=0x%01x)\n",
13998            sc->devinfo.chip_id,
13999            ((sc->devinfo.chip_id >> 16) & 0xffff),
14000            ((sc->devinfo.chip_id >> 12) & 0xf),
14001            ((sc->devinfo.chip_id >>  4) & 0xff),
14002            ((sc->devinfo.chip_id >>  0) & 0xf));
14003  
14004      val = (REG_RD(sc, 0x2874) & 0x55);
14005      if ((sc->devinfo.chip_id & 0x1) ||
14006          (CHIP_IS_E1(sc) && val) ||
14007          (CHIP_IS_E1H(sc) && (val == 0x55))) {
14008          sc->flags |= BXE_ONE_PORT_FLAG;
14009          BLOGD(sc, DBG_LOAD, "single port device\n");
14010      }
14011  
14012      /* set the doorbell size */
14013      sc->doorbell_size = (1 << BXE_DB_SHIFT);
14014  
14015      /* determine whether the device is in 2 port or 4 port mode */
14016      sc->devinfo.chip_port_mode = CHIP_PORT_MODE_NONE; /* E1 & E1h*/
14017      if (CHIP_IS_E2E3(sc)) {
14018          /*
14019           * Read port4mode_en_ovwr[0]:
14020           *   If 1, four port mode is in port4mode_en_ovwr[1].
14021           *   If 0, four port mode is in port4mode_en[0].
14022           */
14023          val = REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR);
14024          if (val & 1) {
14025              val = ((val >> 1) & 1);
14026          } else {
14027              val = REG_RD(sc, MISC_REG_PORT4MODE_EN);
14028          }
14029  
14030          sc->devinfo.chip_port_mode =
14031              (val) ? CHIP_4_PORT_MODE : CHIP_2_PORT_MODE;
14032  
14033          BLOGD(sc, DBG_LOAD, "Port mode = %s\n", (val) ? "4" : "2");
14034      }
14035  
14036      /* get the function and path info for the device */
14037      bxe_get_function_num(sc);
14038  
14039      /* get the shared memory base address */
14040      sc->devinfo.shmem_base     =
14041      sc->link_params.shmem_base =
14042          REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
14043      sc->devinfo.shmem2_base =
14044          REG_RD(sc, (SC_PATH(sc) ? MISC_REG_GENERIC_CR_1 :
14045                                    MISC_REG_GENERIC_CR_0));
14046  
14047      BLOGD(sc, DBG_LOAD, "shmem_base=0x%08x, shmem2_base=0x%08x\n",
14048            sc->devinfo.shmem_base, sc->devinfo.shmem2_base);
14049  
14050      if (!sc->devinfo.shmem_base) {
14051          /* this should ONLY prevent upcoming shmem reads */
14052          BLOGI(sc, "MCP not active\n");
14053          sc->flags |= BXE_NO_MCP_FLAG;
14054          return (0);
14055      }
14056  
14057      /* make sure the shared memory contents are valid */
14058      val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]);
14059      if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
14060          (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
14061          BLOGE(sc, "Invalid SHMEM validity signature: 0x%08x\n", val);
14062          return (0);
14063      }
14064      BLOGD(sc, DBG_LOAD, "Valid SHMEM validity signature: 0x%08x\n", val);
14065  
14066      /* get the bootcode version */
14067      sc->devinfo.bc_ver = SHMEM_RD(sc, dev_info.bc_rev);
14068      snprintf(sc->devinfo.bc_ver_str,
14069               sizeof(sc->devinfo.bc_ver_str),
14070               "%d.%d.%d",
14071               ((sc->devinfo.bc_ver >> 24) & 0xff),
14072               ((sc->devinfo.bc_ver >> 16) & 0xff),
14073               ((sc->devinfo.bc_ver >>  8) & 0xff));
14074      BLOGD(sc, DBG_LOAD, "Bootcode version: %s\n", sc->devinfo.bc_ver_str);
14075  
14076      /* get the bootcode shmem address */
14077      sc->devinfo.mf_cfg_base = bxe_get_shmem_mf_cfg_base(sc);
14078      BLOGD(sc, DBG_LOAD, "mf_cfg_base=0x08%x \n", sc->devinfo.mf_cfg_base);
14079  
14080      /* clean indirect addresses as they're not used */
14081      pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
14082      if (IS_PF(sc)) {
14083          REG_WR(sc, PXP2_REG_PGL_ADDR_88_F0, 0);
14084          REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F0, 0);
14085          REG_WR(sc, PXP2_REG_PGL_ADDR_90_F0, 0);
14086          REG_WR(sc, PXP2_REG_PGL_ADDR_94_F0, 0);
14087          if (CHIP_IS_E1x(sc)) {
14088              REG_WR(sc, PXP2_REG_PGL_ADDR_88_F1, 0);
14089              REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F1, 0);
14090              REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0);
14091              REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0);
14092          }
14093  
14094          /*
14095           * Enable internal target-read (in case we are probed after PF
14096           * FLR). Must be done prior to any BAR read access. Only for
14097           * 57712 and up
14098           */
14099          if (!CHIP_IS_E1x(sc)) {
14100              REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
14101          }
14102      }
14103  
14104      /* get the nvram size */
14105      val = REG_RD(sc, MCP_REG_MCPR_NVM_CFG4);
14106      sc->devinfo.flash_size =
14107          (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE));
14108      BLOGD(sc, DBG_LOAD, "nvram flash size: %d\n", sc->devinfo.flash_size);
14109  
14110      /* get PCI capabilites */
14111      bxe_probe_pci_caps(sc);
14112  
14113      bxe_set_power_state(sc, PCI_PM_D0);
14114  
14115      /* get various configuration parameters from shmem */
14116      bxe_get_shmem_info(sc);
14117  
14118      if (sc->devinfo.pcie_msix_cap_reg != 0) {
14119          val = pci_read_config(sc->dev,
14120                                (sc->devinfo.pcie_msix_cap_reg +
14121                                 PCIR_MSIX_CTRL),
14122                                2);
14123          sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE);
14124      } else {
14125          sc->igu_sb_cnt = 1;
14126      }
14127  
14128      sc->igu_base_addr = BAR_IGU_INTMEM;
14129  
14130      /* initialize IGU parameters */
14131      if (CHIP_IS_E1x(sc)) {
14132          sc->devinfo.int_block = INT_BLOCK_HC;
14133          sc->igu_dsb_id = DEF_SB_IGU_ID;
14134          sc->igu_base_sb = 0;
14135      } else {
14136          sc->devinfo.int_block = INT_BLOCK_IGU;
14137  
14138          /* do not allow device reset during IGU info preocessing */
14139          bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
14140  
14141          val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
14142  
14143          if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
14144              int tout = 5000;
14145  
14146              BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode\n");
14147  
14148              val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
14149              REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, val);
14150              REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x7f);
14151  
14152              while (tout && REG_RD(sc, IGU_REG_RESET_MEMORIES)) {
14153                  tout--;
14154                  DELAY(1000);
14155              }
14156  
14157              if (REG_RD(sc, IGU_REG_RESET_MEMORIES)) {
14158                  BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode failed!!!\n");
14159                  bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
14160                  return (-1);
14161              }
14162          }
14163  
14164          if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
14165              BLOGD(sc, DBG_LOAD, "IGU Backward Compatible Mode\n");
14166              sc->devinfo.int_block |= INT_BLOCK_MODE_BW_COMP;
14167          } else {
14168              BLOGD(sc, DBG_LOAD, "IGU Normal Mode\n");
14169          }
14170  
14171          rc = bxe_get_igu_cam_info(sc);
14172  
14173          bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
14174  
14175          if (rc) {
14176              return (rc);
14177          }
14178      }
14179  
14180      /*
14181       * Get base FW non-default (fast path) status block ID. This value is
14182       * used to initialize the fw_sb_id saved on the fp/queue structure to
14183       * determine the id used by the FW.
14184       */
14185      if (CHIP_IS_E1x(sc)) {
14186          sc->base_fw_ndsb = ((SC_PORT(sc) * FP_SB_MAX_E1x) + SC_L_ID(sc));
14187      } else {
14188          /*
14189           * 57712+ - We currently use one FW SB per IGU SB (Rx and Tx of
14190           * the same queue are indicated on the same IGU SB). So we prefer
14191           * FW and IGU SBs to be the same value.
14192           */
14193          sc->base_fw_ndsb = sc->igu_base_sb;
14194      }
14195  
14196      BLOGD(sc, DBG_LOAD,
14197            "igu_dsb_id=%d igu_base_sb=%d igu_sb_cnt=%d base_fw_ndsb=%d\n",
14198            sc->igu_dsb_id, sc->igu_base_sb,
14199            sc->igu_sb_cnt, sc->base_fw_ndsb);
14200  
14201      elink_phy_probe(&sc->link_params);
14202  
14203      return (0);
14204  }
14205  
14206  static void
bxe_link_settings_supported(struct bxe_softc * sc,uint32_t switch_cfg)14207  bxe_link_settings_supported(struct bxe_softc *sc,
14208                              uint32_t         switch_cfg)
14209  {
14210      uint32_t cfg_size = 0;
14211      uint32_t idx;
14212      uint8_t port = SC_PORT(sc);
14213  
14214      /* aggregation of supported attributes of all external phys */
14215      sc->port.supported[0] = 0;
14216      sc->port.supported[1] = 0;
14217  
14218      switch (sc->link_params.num_phys) {
14219      case 1:
14220          sc->port.supported[0] = sc->link_params.phy[ELINK_INT_PHY].supported;
14221          cfg_size = 1;
14222          break;
14223      case 2:
14224          sc->port.supported[0] = sc->link_params.phy[ELINK_EXT_PHY1].supported;
14225          cfg_size = 1;
14226          break;
14227      case 3:
14228          if (sc->link_params.multi_phy_config &
14229              PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
14230              sc->port.supported[1] =
14231                  sc->link_params.phy[ELINK_EXT_PHY1].supported;
14232              sc->port.supported[0] =
14233                  sc->link_params.phy[ELINK_EXT_PHY2].supported;
14234          } else {
14235              sc->port.supported[0] =
14236                  sc->link_params.phy[ELINK_EXT_PHY1].supported;
14237              sc->port.supported[1] =
14238                  sc->link_params.phy[ELINK_EXT_PHY2].supported;
14239          }
14240          cfg_size = 2;
14241          break;
14242      }
14243  
14244      if (!(sc->port.supported[0] || sc->port.supported[1])) {
14245          BLOGE(sc, "Invalid phy config in NVRAM (PHY1=0x%08x PHY2=0x%08x)\n",
14246                SHMEM_RD(sc,
14247                         dev_info.port_hw_config[port].external_phy_config),
14248                SHMEM_RD(sc,
14249                         dev_info.port_hw_config[port].external_phy_config2));
14250          return;
14251      }
14252  
14253      if (CHIP_IS_E3(sc))
14254          sc->port.phy_addr = REG_RD(sc, MISC_REG_WC0_CTRL_PHY_ADDR);
14255      else {
14256          switch (switch_cfg) {
14257          case ELINK_SWITCH_CFG_1G:
14258              sc->port.phy_addr =
14259                  REG_RD(sc, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
14260              break;
14261          case ELINK_SWITCH_CFG_10G:
14262              sc->port.phy_addr =
14263                  REG_RD(sc, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
14264              break;
14265          default:
14266              BLOGE(sc, "Invalid switch config in link_config=0x%08x\n",
14267                    sc->port.link_config[0]);
14268              return;
14269          }
14270      }
14271  
14272      BLOGD(sc, DBG_LOAD, "PHY addr 0x%08x\n", sc->port.phy_addr);
14273  
14274      /* mask what we support according to speed_cap_mask per configuration */
14275      for (idx = 0; idx < cfg_size; idx++) {
14276          if (!(sc->link_params.speed_cap_mask[idx] &
14277                PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) {
14278              sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Half;
14279          }
14280  
14281          if (!(sc->link_params.speed_cap_mask[idx] &
14282                PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) {
14283              sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Full;
14284          }
14285  
14286          if (!(sc->link_params.speed_cap_mask[idx] &
14287                PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) {
14288              sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Half;
14289          }
14290  
14291          if (!(sc->link_params.speed_cap_mask[idx] &
14292                PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) {
14293              sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Full;
14294          }
14295  
14296          if (!(sc->link_params.speed_cap_mask[idx] &
14297                PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) {
14298              sc->port.supported[idx] &= ~ELINK_SUPPORTED_1000baseT_Full;
14299          }
14300  
14301          if (!(sc->link_params.speed_cap_mask[idx] &
14302                PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) {
14303              sc->port.supported[idx] &= ~ELINK_SUPPORTED_2500baseX_Full;
14304          }
14305  
14306          if (!(sc->link_params.speed_cap_mask[idx] &
14307                PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
14308              sc->port.supported[idx] &= ~ELINK_SUPPORTED_10000baseT_Full;
14309          }
14310  
14311          if (!(sc->link_params.speed_cap_mask[idx] &
14312                PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) {
14313              sc->port.supported[idx] &= ~ELINK_SUPPORTED_20000baseKR2_Full;
14314          }
14315      }
14316  
14317      BLOGD(sc, DBG_LOAD, "PHY supported 0=0x%08x 1=0x%08x\n",
14318            sc->port.supported[0], sc->port.supported[1]);
14319  	ELINK_DEBUG_P2(sc, "PHY supported 0=0x%08x 1=0x%08x\n",
14320  					sc->port.supported[0], sc->port.supported[1]);
14321  }
14322  
14323  static void
bxe_link_settings_requested(struct bxe_softc * sc)14324  bxe_link_settings_requested(struct bxe_softc *sc)
14325  {
14326      uint32_t link_config;
14327      uint32_t idx;
14328      uint32_t cfg_size = 0;
14329  
14330      sc->port.advertising[0] = 0;
14331      sc->port.advertising[1] = 0;
14332  
14333      switch (sc->link_params.num_phys) {
14334      case 1:
14335      case 2:
14336          cfg_size = 1;
14337          break;
14338      case 3:
14339          cfg_size = 2;
14340          break;
14341      }
14342  
14343      for (idx = 0; idx < cfg_size; idx++) {
14344          sc->link_params.req_duplex[idx] = DUPLEX_FULL;
14345          link_config = sc->port.link_config[idx];
14346  
14347          switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
14348          case PORT_FEATURE_LINK_SPEED_AUTO:
14349              if (sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg) {
14350                  sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG;
14351                  sc->port.advertising[idx] |= sc->port.supported[idx];
14352                  if (sc->link_params.phy[ELINK_EXT_PHY1].type ==
14353                      PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
14354                      sc->port.advertising[idx] |=
14355                          (ELINK_SUPPORTED_100baseT_Half |
14356                           ELINK_SUPPORTED_100baseT_Full);
14357              } else {
14358                  /* force 10G, no AN */
14359                  sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000;
14360                  sc->port.advertising[idx] |=
14361                      (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
14362                  continue;
14363              }
14364              break;
14365  
14366          case PORT_FEATURE_LINK_SPEED_10M_FULL:
14367              if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Full) {
14368                  sc->link_params.req_line_speed[idx] = ELINK_SPEED_10;
14369                  sc->port.advertising[idx] |= (ADVERTISED_10baseT_Full |
14370                                                ADVERTISED_TP);
14371              } else {
14372                  BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14373                            "speed_cap_mask=0x%08x\n",
14374                        link_config, sc->link_params.speed_cap_mask[idx]);
14375                  return;
14376              }
14377              break;
14378  
14379          case PORT_FEATURE_LINK_SPEED_10M_HALF:
14380              if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Half) {
14381                  sc->link_params.req_line_speed[idx] = ELINK_SPEED_10;
14382                  sc->link_params.req_duplex[idx] = DUPLEX_HALF;
14383                  sc->port.advertising[idx] |= (ADVERTISED_10baseT_Half |
14384                                                ADVERTISED_TP);
14385  				ELINK_DEBUG_P1(sc, "driver requesting DUPLEX_HALF req_duplex = %x!\n",
14386  								sc->link_params.req_duplex[idx]);
14387              } else {
14388                  BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14389                            "speed_cap_mask=0x%08x\n",
14390                        link_config, sc->link_params.speed_cap_mask[idx]);
14391                  return;
14392              }
14393              break;
14394  
14395          case PORT_FEATURE_LINK_SPEED_100M_FULL:
14396              if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Full) {
14397                  sc->link_params.req_line_speed[idx] = ELINK_SPEED_100;
14398                  sc->port.advertising[idx] |= (ADVERTISED_100baseT_Full |
14399                                                ADVERTISED_TP);
14400              } else {
14401                  BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14402                            "speed_cap_mask=0x%08x\n",
14403                        link_config, sc->link_params.speed_cap_mask[idx]);
14404                  return;
14405              }
14406              break;
14407  
14408          case PORT_FEATURE_LINK_SPEED_100M_HALF:
14409              if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Half) {
14410                  sc->link_params.req_line_speed[idx] = ELINK_SPEED_100;
14411                  sc->link_params.req_duplex[idx] = DUPLEX_HALF;
14412                  sc->port.advertising[idx] |= (ADVERTISED_100baseT_Half |
14413                                                ADVERTISED_TP);
14414              } else {
14415                  BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14416                            "speed_cap_mask=0x%08x\n",
14417                        link_config, sc->link_params.speed_cap_mask[idx]);
14418                  return;
14419              }
14420              break;
14421  
14422          case PORT_FEATURE_LINK_SPEED_1G:
14423              if (sc->port.supported[idx] & ELINK_SUPPORTED_1000baseT_Full) {
14424                  sc->link_params.req_line_speed[idx] = ELINK_SPEED_1000;
14425                  sc->port.advertising[idx] |= (ADVERTISED_1000baseT_Full |
14426                                                ADVERTISED_TP);
14427              } else {
14428                  BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14429                            "speed_cap_mask=0x%08x\n",
14430                        link_config, sc->link_params.speed_cap_mask[idx]);
14431                  return;
14432              }
14433              break;
14434  
14435          case PORT_FEATURE_LINK_SPEED_2_5G:
14436              if (sc->port.supported[idx] & ELINK_SUPPORTED_2500baseX_Full) {
14437                  sc->link_params.req_line_speed[idx] = ELINK_SPEED_2500;
14438                  sc->port.advertising[idx] |= (ADVERTISED_2500baseX_Full |
14439                                                ADVERTISED_TP);
14440              } else {
14441                  BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14442                            "speed_cap_mask=0x%08x\n",
14443                        link_config, sc->link_params.speed_cap_mask[idx]);
14444                  return;
14445              }
14446              break;
14447  
14448          case PORT_FEATURE_LINK_SPEED_10G_CX4:
14449              if (sc->port.supported[idx] & ELINK_SUPPORTED_10000baseT_Full) {
14450                  sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000;
14451                  sc->port.advertising[idx] |= (ADVERTISED_10000baseT_Full |
14452                                                ADVERTISED_FIBRE);
14453              } else {
14454                  BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14455                            "speed_cap_mask=0x%08x\n",
14456                        link_config, sc->link_params.speed_cap_mask[idx]);
14457                  return;
14458              }
14459              break;
14460  
14461          case PORT_FEATURE_LINK_SPEED_20G:
14462              sc->link_params.req_line_speed[idx] = ELINK_SPEED_20000;
14463              break;
14464  
14465          default:
14466              BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14467                        "speed_cap_mask=0x%08x\n",
14468                    link_config, sc->link_params.speed_cap_mask[idx]);
14469              sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG;
14470              sc->port.advertising[idx] = sc->port.supported[idx];
14471              break;
14472          }
14473  
14474          sc->link_params.req_flow_ctrl[idx] =
14475              (link_config & PORT_FEATURE_FLOW_CONTROL_MASK);
14476  
14477          if (sc->link_params.req_flow_ctrl[idx] == ELINK_FLOW_CTRL_AUTO) {
14478              if (!(sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg)) {
14479                  sc->link_params.req_flow_ctrl[idx] = ELINK_FLOW_CTRL_NONE;
14480              } else {
14481                  bxe_set_requested_fc(sc);
14482              }
14483          }
14484  
14485          BLOGD(sc, DBG_LOAD, "req_line_speed=%d req_duplex=%d "
14486                              "req_flow_ctrl=0x%x advertising=0x%x\n",
14487                sc->link_params.req_line_speed[idx],
14488                sc->link_params.req_duplex[idx],
14489                sc->link_params.req_flow_ctrl[idx],
14490                sc->port.advertising[idx]);
14491  		ELINK_DEBUG_P3(sc, "req_line_speed=%d req_duplex=%d "
14492  						"advertising=0x%x\n",
14493  						sc->link_params.req_line_speed[idx],
14494  						sc->link_params.req_duplex[idx],
14495  						sc->port.advertising[idx]);
14496      }
14497  }
14498  
14499  static void
bxe_get_phy_info(struct bxe_softc * sc)14500  bxe_get_phy_info(struct bxe_softc *sc)
14501  {
14502      uint8_t port = SC_PORT(sc);
14503      uint32_t config = sc->port.config;
14504      uint32_t eee_mode;
14505  
14506      /* shmem data already read in bxe_get_shmem_info() */
14507  
14508      ELINK_DEBUG_P3(sc, "lane_config=0x%08x speed_cap_mask0=0x%08x "
14509                          "link_config0=0x%08x\n",
14510                 sc->link_params.lane_config,
14511                 sc->link_params.speed_cap_mask[0],
14512                 sc->port.link_config[0]);
14513  
14514  
14515      bxe_link_settings_supported(sc, sc->link_params.switch_cfg);
14516      bxe_link_settings_requested(sc);
14517  
14518      if (sc->autogreeen == AUTO_GREEN_FORCE_ON) {
14519          sc->link_params.feature_config_flags |=
14520              ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14521      } else if (sc->autogreeen == AUTO_GREEN_FORCE_OFF) {
14522          sc->link_params.feature_config_flags &=
14523              ~ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14524      } else if (config & PORT_FEAT_CFG_AUTOGREEEN_ENABLED) {
14525          sc->link_params.feature_config_flags |=
14526              ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14527      }
14528  
14529      /* configure link feature according to nvram value */
14530      eee_mode =
14531          (((SHMEM_RD(sc, dev_info.port_feature_config[port].eee_power_mode)) &
14532            PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
14533           PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
14534      if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
14535          sc->link_params.eee_mode = (ELINK_EEE_MODE_ADV_LPI |
14536                                      ELINK_EEE_MODE_ENABLE_LPI |
14537                                      ELINK_EEE_MODE_OUTPUT_TIME);
14538      } else {
14539          sc->link_params.eee_mode = 0;
14540      }
14541  
14542      /* get the media type */
14543      bxe_media_detect(sc);
14544  	ELINK_DEBUG_P1(sc, "detected media type\n", sc->media);
14545  }
14546  
14547  static void
bxe_get_params(struct bxe_softc * sc)14548  bxe_get_params(struct bxe_softc *sc)
14549  {
14550      /* get user tunable params */
14551      bxe_get_tunable_params(sc);
14552  
14553      /* select the RX and TX ring sizes */
14554      sc->tx_ring_size = TX_BD_USABLE;
14555      sc->rx_ring_size = RX_BD_USABLE;
14556  
14557      /* XXX disable WoL */
14558      sc->wol = 0;
14559  }
14560  
14561  static void
bxe_set_modes_bitmap(struct bxe_softc * sc)14562  bxe_set_modes_bitmap(struct bxe_softc *sc)
14563  {
14564      uint32_t flags = 0;
14565  
14566      if (CHIP_REV_IS_FPGA(sc)) {
14567          SET_FLAGS(flags, MODE_FPGA);
14568      } else if (CHIP_REV_IS_EMUL(sc)) {
14569          SET_FLAGS(flags, MODE_EMUL);
14570      } else {
14571          SET_FLAGS(flags, MODE_ASIC);
14572      }
14573  
14574      if (CHIP_IS_MODE_4_PORT(sc)) {
14575          SET_FLAGS(flags, MODE_PORT4);
14576      } else {
14577          SET_FLAGS(flags, MODE_PORT2);
14578      }
14579  
14580      if (CHIP_IS_E2(sc)) {
14581          SET_FLAGS(flags, MODE_E2);
14582      } else if (CHIP_IS_E3(sc)) {
14583          SET_FLAGS(flags, MODE_E3);
14584          if (CHIP_REV(sc) == CHIP_REV_Ax) {
14585              SET_FLAGS(flags, MODE_E3_A0);
14586          } else /*if (CHIP_REV(sc) == CHIP_REV_Bx)*/ {
14587              SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
14588          }
14589      }
14590  
14591      if (IS_MF(sc)) {
14592          SET_FLAGS(flags, MODE_MF);
14593          switch (sc->devinfo.mf_info.mf_mode) {
14594          case MULTI_FUNCTION_SD:
14595              SET_FLAGS(flags, MODE_MF_SD);
14596              break;
14597          case MULTI_FUNCTION_SI:
14598              SET_FLAGS(flags, MODE_MF_SI);
14599              break;
14600          case MULTI_FUNCTION_AFEX:
14601              SET_FLAGS(flags, MODE_MF_AFEX);
14602              break;
14603          }
14604      } else {
14605          SET_FLAGS(flags, MODE_SF);
14606      }
14607  
14608  #if defined(__LITTLE_ENDIAN)
14609      SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
14610  #else /* __BIG_ENDIAN */
14611      SET_FLAGS(flags, MODE_BIG_ENDIAN);
14612  #endif
14613  
14614      INIT_MODE_FLAGS(sc) = flags;
14615  }
14616  
14617  static int
bxe_alloc_hsi_mem(struct bxe_softc * sc)14618  bxe_alloc_hsi_mem(struct bxe_softc *sc)
14619  {
14620      struct bxe_fastpath *fp;
14621      bus_addr_t busaddr;
14622      int max_agg_queues;
14623      int max_segments;
14624      bus_size_t max_size;
14625      bus_size_t max_seg_size;
14626      char buf[32];
14627      int rc;
14628      int i, j;
14629  
14630      /* XXX zero out all vars here and call bxe_alloc_hsi_mem on error */
14631  
14632      /* allocate the parent bus DMA tag */
14633      rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent tag */
14634                              1,                        /* alignment */
14635                              0,                        /* boundary limit */
14636                              BUS_SPACE_MAXADDR,        /* restricted low */
14637                              BUS_SPACE_MAXADDR,        /* restricted hi */
14638                              NULL,                     /* addr filter() */
14639                              NULL,                     /* addr filter() arg */
14640                              BUS_SPACE_MAXSIZE_32BIT,  /* max map size */
14641                              BUS_SPACE_UNRESTRICTED,   /* num discontinuous */
14642                              BUS_SPACE_MAXSIZE_32BIT,  /* max seg size */
14643                              0,                        /* flags */
14644                              NULL,                     /* lock() */
14645                              NULL,                     /* lock() arg */
14646                              &sc->parent_dma_tag);     /* returned dma tag */
14647      if (rc != 0) {
14648          BLOGE(sc, "Failed to alloc parent DMA tag (%d)!\n", rc);
14649          return (1);
14650      }
14651  
14652      /************************/
14653      /* DEFAULT STATUS BLOCK */
14654      /************************/
14655  
14656      if (bxe_dma_alloc(sc, sizeof(struct host_sp_status_block),
14657                        &sc->def_sb_dma, "default status block") != 0) {
14658          /* XXX */
14659          bus_dma_tag_destroy(sc->parent_dma_tag);
14660          return (1);
14661      }
14662  
14663      sc->def_sb = (struct host_sp_status_block *)sc->def_sb_dma.vaddr;
14664  
14665      /***************/
14666      /* EVENT QUEUE */
14667      /***************/
14668  
14669      if (bxe_dma_alloc(sc, BCM_PAGE_SIZE,
14670                        &sc->eq_dma, "event queue") != 0) {
14671          /* XXX */
14672          bxe_dma_free(sc, &sc->def_sb_dma);
14673          sc->def_sb = NULL;
14674          bus_dma_tag_destroy(sc->parent_dma_tag);
14675          return (1);
14676      }
14677  
14678      sc->eq = (union event_ring_elem * )sc->eq_dma.vaddr;
14679  
14680      /*************/
14681      /* SLOW PATH */
14682      /*************/
14683  
14684      if (bxe_dma_alloc(sc, sizeof(struct bxe_slowpath),
14685                        &sc->sp_dma, "slow path") != 0) {
14686          /* XXX */
14687          bxe_dma_free(sc, &sc->eq_dma);
14688          sc->eq = NULL;
14689          bxe_dma_free(sc, &sc->def_sb_dma);
14690          sc->def_sb = NULL;
14691          bus_dma_tag_destroy(sc->parent_dma_tag);
14692          return (1);
14693      }
14694  
14695      sc->sp = (struct bxe_slowpath *)sc->sp_dma.vaddr;
14696  
14697      /*******************/
14698      /* SLOW PATH QUEUE */
14699      /*******************/
14700  
14701      if (bxe_dma_alloc(sc, BCM_PAGE_SIZE,
14702                        &sc->spq_dma, "slow path queue") != 0) {
14703          /* XXX */
14704          bxe_dma_free(sc, &sc->sp_dma);
14705          sc->sp = NULL;
14706          bxe_dma_free(sc, &sc->eq_dma);
14707          sc->eq = NULL;
14708          bxe_dma_free(sc, &sc->def_sb_dma);
14709          sc->def_sb = NULL;
14710          bus_dma_tag_destroy(sc->parent_dma_tag);
14711          return (1);
14712      }
14713  
14714      sc->spq = (struct eth_spe *)sc->spq_dma.vaddr;
14715  
14716      /***************************/
14717      /* FW DECOMPRESSION BUFFER */
14718      /***************************/
14719  
14720      if (bxe_dma_alloc(sc, FW_BUF_SIZE, &sc->gz_buf_dma,
14721                        "fw decompression buffer") != 0) {
14722          /* XXX */
14723          bxe_dma_free(sc, &sc->spq_dma);
14724          sc->spq = NULL;
14725          bxe_dma_free(sc, &sc->sp_dma);
14726          sc->sp = NULL;
14727          bxe_dma_free(sc, &sc->eq_dma);
14728          sc->eq = NULL;
14729          bxe_dma_free(sc, &sc->def_sb_dma);
14730          sc->def_sb = NULL;
14731          bus_dma_tag_destroy(sc->parent_dma_tag);
14732          return (1);
14733      }
14734  
14735      sc->gz_buf = (void *)sc->gz_buf_dma.vaddr;
14736  
14737      if ((sc->gz_strm =
14738           malloc(sizeof(*sc->gz_strm), M_DEVBUF, M_NOWAIT)) == NULL) {
14739          /* XXX */
14740          bxe_dma_free(sc, &sc->gz_buf_dma);
14741          sc->gz_buf = NULL;
14742          bxe_dma_free(sc, &sc->spq_dma);
14743          sc->spq = NULL;
14744          bxe_dma_free(sc, &sc->sp_dma);
14745          sc->sp = NULL;
14746          bxe_dma_free(sc, &sc->eq_dma);
14747          sc->eq = NULL;
14748          bxe_dma_free(sc, &sc->def_sb_dma);
14749          sc->def_sb = NULL;
14750          bus_dma_tag_destroy(sc->parent_dma_tag);
14751          return (1);
14752      }
14753  
14754      /*************/
14755      /* FASTPATHS */
14756      /*************/
14757  
14758      /* allocate DMA memory for each fastpath structure */
14759      for (i = 0; i < sc->num_queues; i++) {
14760          fp = &sc->fp[i];
14761          fp->sc    = sc;
14762          fp->index = i;
14763  
14764          /*******************/
14765          /* FP STATUS BLOCK */
14766          /*******************/
14767  
14768          snprintf(buf, sizeof(buf), "fp %d status block", i);
14769          if (bxe_dma_alloc(sc, sizeof(union bxe_host_hc_status_block),
14770                            &fp->sb_dma, buf) != 0) {
14771              /* XXX unwind and free previous fastpath allocations */
14772              BLOGE(sc, "Failed to alloc %s\n", buf);
14773              return (1);
14774          } else {
14775              if (CHIP_IS_E2E3(sc)) {
14776                  fp->status_block.e2_sb =
14777                      (struct host_hc_status_block_e2 *)fp->sb_dma.vaddr;
14778              } else {
14779                  fp->status_block.e1x_sb =
14780                      (struct host_hc_status_block_e1x *)fp->sb_dma.vaddr;
14781              }
14782          }
14783  
14784          /******************/
14785          /* FP TX BD CHAIN */
14786          /******************/
14787  
14788          snprintf(buf, sizeof(buf), "fp %d tx bd chain", i);
14789          if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * TX_BD_NUM_PAGES),
14790                            &fp->tx_dma, buf) != 0) {
14791              /* XXX unwind and free previous fastpath allocations */
14792              BLOGE(sc, "Failed to alloc %s\n", buf);
14793              return (1);
14794          } else {
14795              fp->tx_chain = (union eth_tx_bd_types *)fp->tx_dma.vaddr;
14796          }
14797  
14798          /* link together the tx bd chain pages */
14799          for (j = 1; j <= TX_BD_NUM_PAGES; j++) {
14800              /* index into the tx bd chain array to last entry per page */
14801              struct eth_tx_next_bd *tx_next_bd =
14802                  &fp->tx_chain[TX_BD_TOTAL_PER_PAGE * j - 1].next_bd;
14803              /* point to the next page and wrap from last page */
14804              busaddr = (fp->tx_dma.paddr +
14805                         (BCM_PAGE_SIZE * (j % TX_BD_NUM_PAGES)));
14806              tx_next_bd->addr_hi = htole32(U64_HI(busaddr));
14807              tx_next_bd->addr_lo = htole32(U64_LO(busaddr));
14808          }
14809  
14810          /******************/
14811          /* FP RX BD CHAIN */
14812          /******************/
14813  
14814          snprintf(buf, sizeof(buf), "fp %d rx bd chain", i);
14815          if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_BD_NUM_PAGES),
14816                            &fp->rx_dma, buf) != 0) {
14817              /* XXX unwind and free previous fastpath allocations */
14818              BLOGE(sc, "Failed to alloc %s\n", buf);
14819              return (1);
14820          } else {
14821              fp->rx_chain = (struct eth_rx_bd *)fp->rx_dma.vaddr;
14822          }
14823  
14824          /* link together the rx bd chain pages */
14825          for (j = 1; j <= RX_BD_NUM_PAGES; j++) {
14826              /* index into the rx bd chain array to last entry per page */
14827              struct eth_rx_bd *rx_bd =
14828                  &fp->rx_chain[RX_BD_TOTAL_PER_PAGE * j - 2];
14829              /* point to the next page and wrap from last page */
14830              busaddr = (fp->rx_dma.paddr +
14831                         (BCM_PAGE_SIZE * (j % RX_BD_NUM_PAGES)));
14832              rx_bd->addr_hi = htole32(U64_HI(busaddr));
14833              rx_bd->addr_lo = htole32(U64_LO(busaddr));
14834          }
14835  
14836          /*******************/
14837          /* FP RX RCQ CHAIN */
14838          /*******************/
14839  
14840          snprintf(buf, sizeof(buf), "fp %d rcq chain", i);
14841          if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RCQ_NUM_PAGES),
14842                            &fp->rcq_dma, buf) != 0) {
14843              /* XXX unwind and free previous fastpath allocations */
14844              BLOGE(sc, "Failed to alloc %s\n", buf);
14845              return (1);
14846          } else {
14847              fp->rcq_chain = (union eth_rx_cqe *)fp->rcq_dma.vaddr;
14848          }
14849  
14850          /* link together the rcq chain pages */
14851          for (j = 1; j <= RCQ_NUM_PAGES; j++) {
14852              /* index into the rcq chain array to last entry per page */
14853              struct eth_rx_cqe_next_page *rx_cqe_next =
14854                  (struct eth_rx_cqe_next_page *)
14855                  &fp->rcq_chain[RCQ_TOTAL_PER_PAGE * j - 1];
14856              /* point to the next page and wrap from last page */
14857              busaddr = (fp->rcq_dma.paddr +
14858                         (BCM_PAGE_SIZE * (j % RCQ_NUM_PAGES)));
14859              rx_cqe_next->addr_hi = htole32(U64_HI(busaddr));
14860              rx_cqe_next->addr_lo = htole32(U64_LO(busaddr));
14861          }
14862  
14863          /*******************/
14864          /* FP RX SGE CHAIN */
14865          /*******************/
14866  
14867          snprintf(buf, sizeof(buf), "fp %d sge chain", i);
14868          if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES),
14869                            &fp->rx_sge_dma, buf) != 0) {
14870              /* XXX unwind and free previous fastpath allocations */
14871              BLOGE(sc, "Failed to alloc %s\n", buf);
14872              return (1);
14873          } else {
14874              fp->rx_sge_chain = (struct eth_rx_sge *)fp->rx_sge_dma.vaddr;
14875          }
14876  
14877          /* link together the sge chain pages */
14878          for (j = 1; j <= RX_SGE_NUM_PAGES; j++) {
14879              /* index into the rcq chain array to last entry per page */
14880              struct eth_rx_sge *rx_sge =
14881                  &fp->rx_sge_chain[RX_SGE_TOTAL_PER_PAGE * j - 2];
14882              /* point to the next page and wrap from last page */
14883              busaddr = (fp->rx_sge_dma.paddr +
14884                         (BCM_PAGE_SIZE * (j % RX_SGE_NUM_PAGES)));
14885              rx_sge->addr_hi = htole32(U64_HI(busaddr));
14886              rx_sge->addr_lo = htole32(U64_LO(busaddr));
14887          }
14888  
14889          /***********************/
14890          /* FP TX MBUF DMA MAPS */
14891          /***********************/
14892  
14893          /* set required sizes before mapping to conserve resources */
14894          if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) {
14895              max_size     = BXE_TSO_MAX_SIZE;
14896              max_segments = BXE_TSO_MAX_SEGMENTS;
14897              max_seg_size = BXE_TSO_MAX_SEG_SIZE;
14898          } else {
14899              max_size     = (MCLBYTES * BXE_MAX_SEGMENTS);
14900              max_segments = BXE_MAX_SEGMENTS;
14901              max_seg_size = MCLBYTES;
14902          }
14903  
14904          /* create a dma tag for the tx mbufs */
14905          rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
14906                                  1,                  /* alignment */
14907                                  0,                  /* boundary limit */
14908                                  BUS_SPACE_MAXADDR,  /* restricted low */
14909                                  BUS_SPACE_MAXADDR,  /* restricted hi */
14910                                  NULL,               /* addr filter() */
14911                                  NULL,               /* addr filter() arg */
14912                                  max_size,           /* max map size */
14913                                  max_segments,       /* num discontinuous */
14914                                  max_seg_size,       /* max seg size */
14915                                  0,                  /* flags */
14916                                  NULL,               /* lock() */
14917                                  NULL,               /* lock() arg */
14918                                  &fp->tx_mbuf_tag);  /* returned dma tag */
14919          if (rc != 0) {
14920              /* XXX unwind and free previous fastpath allocations */
14921              BLOGE(sc, "Failed to create dma tag for "
14922                        "'fp %d tx mbufs' (%d)\n", i, rc);
14923              return (1);
14924          }
14925  
14926          /* create dma maps for each of the tx mbuf clusters */
14927          for (j = 0; j < TX_BD_TOTAL; j++) {
14928              if (bus_dmamap_create(fp->tx_mbuf_tag,
14929                                    BUS_DMA_NOWAIT,
14930                                    &fp->tx_mbuf_chain[j].m_map)) {
14931                  /* XXX unwind and free previous fastpath allocations */
14932                  BLOGE(sc, "Failed to create dma map for "
14933                            "'fp %d tx mbuf %d' (%d)\n", i, j, rc);
14934                  return (1);
14935              }
14936          }
14937  
14938          /***********************/
14939          /* FP RX MBUF DMA MAPS */
14940          /***********************/
14941  
14942          /* create a dma tag for the rx mbufs */
14943          rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
14944                                  1,                  /* alignment */
14945                                  0,                  /* boundary limit */
14946                                  BUS_SPACE_MAXADDR,  /* restricted low */
14947                                  BUS_SPACE_MAXADDR,  /* restricted hi */
14948                                  NULL,               /* addr filter() */
14949                                  NULL,               /* addr filter() arg */
14950                                  MJUM9BYTES,         /* max map size */
14951                                  1,                  /* num discontinuous */
14952                                  MJUM9BYTES,         /* max seg size */
14953                                  0,                  /* flags */
14954                                  NULL,               /* lock() */
14955                                  NULL,               /* lock() arg */
14956                                  &fp->rx_mbuf_tag);  /* returned dma tag */
14957          if (rc != 0) {
14958              /* XXX unwind and free previous fastpath allocations */
14959              BLOGE(sc, "Failed to create dma tag for "
14960                        "'fp %d rx mbufs' (%d)\n", i, rc);
14961              return (1);
14962          }
14963  
14964          /* create dma maps for each of the rx mbuf clusters */
14965          for (j = 0; j < RX_BD_TOTAL; j++) {
14966              if (bus_dmamap_create(fp->rx_mbuf_tag,
14967                                    BUS_DMA_NOWAIT,
14968                                    &fp->rx_mbuf_chain[j].m_map)) {
14969                  /* XXX unwind and free previous fastpath allocations */
14970                  BLOGE(sc, "Failed to create dma map for "
14971                            "'fp %d rx mbuf %d' (%d)\n", i, j, rc);
14972                  return (1);
14973              }
14974          }
14975  
14976          /* create dma map for the spare rx mbuf cluster */
14977          if (bus_dmamap_create(fp->rx_mbuf_tag,
14978                                BUS_DMA_NOWAIT,
14979                                &fp->rx_mbuf_spare_map)) {
14980              /* XXX unwind and free previous fastpath allocations */
14981              BLOGE(sc, "Failed to create dma map for "
14982                        "'fp %d spare rx mbuf' (%d)\n", i, rc);
14983              return (1);
14984          }
14985  
14986          /***************************/
14987          /* FP RX SGE MBUF DMA MAPS */
14988          /***************************/
14989  
14990          /* create a dma tag for the rx sge mbufs */
14991          rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
14992                                  1,                  /* alignment */
14993                                  0,                  /* boundary limit */
14994                                  BUS_SPACE_MAXADDR,  /* restricted low */
14995                                  BUS_SPACE_MAXADDR,  /* restricted hi */
14996                                  NULL,               /* addr filter() */
14997                                  NULL,               /* addr filter() arg */
14998                                  BCM_PAGE_SIZE,      /* max map size */
14999                                  1,                  /* num discontinuous */
15000                                  BCM_PAGE_SIZE,      /* max seg size */
15001                                  0,                  /* flags */
15002                                  NULL,               /* lock() */
15003                                  NULL,               /* lock() arg */
15004                                  &fp->rx_sge_mbuf_tag); /* returned dma tag */
15005          if (rc != 0) {
15006              /* XXX unwind and free previous fastpath allocations */
15007              BLOGE(sc, "Failed to create dma tag for "
15008                        "'fp %d rx sge mbufs' (%d)\n", i, rc);
15009              return (1);
15010          }
15011  
15012          /* create dma maps for the rx sge mbuf clusters */
15013          for (j = 0; j < RX_SGE_TOTAL; j++) {
15014              if (bus_dmamap_create(fp->rx_sge_mbuf_tag,
15015                                    BUS_DMA_NOWAIT,
15016                                    &fp->rx_sge_mbuf_chain[j].m_map)) {
15017                  /* XXX unwind and free previous fastpath allocations */
15018                  BLOGE(sc, "Failed to create dma map for "
15019                            "'fp %d rx sge mbuf %d' (%d)\n", i, j, rc);
15020                  return (1);
15021              }
15022          }
15023  
15024          /* create dma map for the spare rx sge mbuf cluster */
15025          if (bus_dmamap_create(fp->rx_sge_mbuf_tag,
15026                                BUS_DMA_NOWAIT,
15027                                &fp->rx_sge_mbuf_spare_map)) {
15028              /* XXX unwind and free previous fastpath allocations */
15029              BLOGE(sc, "Failed to create dma map for "
15030                        "'fp %d spare rx sge mbuf' (%d)\n", i, rc);
15031              return (1);
15032          }
15033  
15034          /***************************/
15035          /* FP RX TPA MBUF DMA MAPS */
15036          /***************************/
15037  
15038          /* create dma maps for the rx tpa mbuf clusters */
15039          max_agg_queues = MAX_AGG_QS(sc);
15040  
15041          for (j = 0; j < max_agg_queues; j++) {
15042              if (bus_dmamap_create(fp->rx_mbuf_tag,
15043                                    BUS_DMA_NOWAIT,
15044                                    &fp->rx_tpa_info[j].bd.m_map)) {
15045                  /* XXX unwind and free previous fastpath allocations */
15046                  BLOGE(sc, "Failed to create dma map for "
15047                            "'fp %d rx tpa mbuf %d' (%d)\n", i, j, rc);
15048                  return (1);
15049              }
15050          }
15051  
15052          /* create dma map for the spare rx tpa mbuf cluster */
15053          if (bus_dmamap_create(fp->rx_mbuf_tag,
15054                                BUS_DMA_NOWAIT,
15055                                &fp->rx_tpa_info_mbuf_spare_map)) {
15056              /* XXX unwind and free previous fastpath allocations */
15057              BLOGE(sc, "Failed to create dma map for "
15058                        "'fp %d spare rx tpa mbuf' (%d)\n", i, rc);
15059              return (1);
15060          }
15061  
15062          bxe_init_sge_ring_bit_mask(fp);
15063      }
15064  
15065      return (0);
15066  }
15067  
15068  static void
bxe_free_hsi_mem(struct bxe_softc * sc)15069  bxe_free_hsi_mem(struct bxe_softc *sc)
15070  {
15071      struct bxe_fastpath *fp;
15072      int max_agg_queues;
15073      int i, j;
15074  
15075      if (sc->parent_dma_tag == NULL) {
15076          return; /* assume nothing was allocated */
15077      }
15078  
15079      for (i = 0; i < sc->num_queues; i++) {
15080          fp = &sc->fp[i];
15081  
15082          /*******************/
15083          /* FP STATUS BLOCK */
15084          /*******************/
15085  
15086          bxe_dma_free(sc, &fp->sb_dma);
15087          memset(&fp->status_block, 0, sizeof(fp->status_block));
15088  
15089          /******************/
15090          /* FP TX BD CHAIN */
15091          /******************/
15092  
15093          bxe_dma_free(sc, &fp->tx_dma);
15094          fp->tx_chain = NULL;
15095  
15096          /******************/
15097          /* FP RX BD CHAIN */
15098          /******************/
15099  
15100          bxe_dma_free(sc, &fp->rx_dma);
15101          fp->rx_chain = NULL;
15102  
15103          /*******************/
15104          /* FP RX RCQ CHAIN */
15105          /*******************/
15106  
15107          bxe_dma_free(sc, &fp->rcq_dma);
15108          fp->rcq_chain = NULL;
15109  
15110          /*******************/
15111          /* FP RX SGE CHAIN */
15112          /*******************/
15113  
15114          bxe_dma_free(sc, &fp->rx_sge_dma);
15115          fp->rx_sge_chain = NULL;
15116  
15117          /***********************/
15118          /* FP TX MBUF DMA MAPS */
15119          /***********************/
15120  
15121          if (fp->tx_mbuf_tag != NULL) {
15122              for (j = 0; j < TX_BD_TOTAL; j++) {
15123                  if (fp->tx_mbuf_chain[j].m_map != NULL) {
15124                      bus_dmamap_unload(fp->tx_mbuf_tag,
15125                                        fp->tx_mbuf_chain[j].m_map);
15126                      bus_dmamap_destroy(fp->tx_mbuf_tag,
15127                                         fp->tx_mbuf_chain[j].m_map);
15128                  }
15129              }
15130  
15131              bus_dma_tag_destroy(fp->tx_mbuf_tag);
15132              fp->tx_mbuf_tag = NULL;
15133          }
15134  
15135          /***********************/
15136          /* FP RX MBUF DMA MAPS */
15137          /***********************/
15138  
15139          if (fp->rx_mbuf_tag != NULL) {
15140              for (j = 0; j < RX_BD_TOTAL; j++) {
15141                  if (fp->rx_mbuf_chain[j].m_map != NULL) {
15142                      bus_dmamap_unload(fp->rx_mbuf_tag,
15143                                        fp->rx_mbuf_chain[j].m_map);
15144                      bus_dmamap_destroy(fp->rx_mbuf_tag,
15145                                         fp->rx_mbuf_chain[j].m_map);
15146                  }
15147              }
15148  
15149              if (fp->rx_mbuf_spare_map != NULL) {
15150                  bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map);
15151                  bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map);
15152              }
15153  
15154              /***************************/
15155              /* FP RX TPA MBUF DMA MAPS */
15156              /***************************/
15157  
15158              max_agg_queues = MAX_AGG_QS(sc);
15159  
15160              for (j = 0; j < max_agg_queues; j++) {
15161                  if (fp->rx_tpa_info[j].bd.m_map != NULL) {
15162                      bus_dmamap_unload(fp->rx_mbuf_tag,
15163                                        fp->rx_tpa_info[j].bd.m_map);
15164                      bus_dmamap_destroy(fp->rx_mbuf_tag,
15165                                         fp->rx_tpa_info[j].bd.m_map);
15166                  }
15167              }
15168  
15169              if (fp->rx_tpa_info_mbuf_spare_map != NULL) {
15170                  bus_dmamap_unload(fp->rx_mbuf_tag,
15171                                    fp->rx_tpa_info_mbuf_spare_map);
15172                  bus_dmamap_destroy(fp->rx_mbuf_tag,
15173                                     fp->rx_tpa_info_mbuf_spare_map);
15174              }
15175  
15176              bus_dma_tag_destroy(fp->rx_mbuf_tag);
15177              fp->rx_mbuf_tag = NULL;
15178          }
15179  
15180          /***************************/
15181          /* FP RX SGE MBUF DMA MAPS */
15182          /***************************/
15183  
15184          if (fp->rx_sge_mbuf_tag != NULL) {
15185              for (j = 0; j < RX_SGE_TOTAL; j++) {
15186                  if (fp->rx_sge_mbuf_chain[j].m_map != NULL) {
15187                      bus_dmamap_unload(fp->rx_sge_mbuf_tag,
15188                                        fp->rx_sge_mbuf_chain[j].m_map);
15189                      bus_dmamap_destroy(fp->rx_sge_mbuf_tag,
15190                                         fp->rx_sge_mbuf_chain[j].m_map);
15191                  }
15192              }
15193  
15194              if (fp->rx_sge_mbuf_spare_map != NULL) {
15195                  bus_dmamap_unload(fp->rx_sge_mbuf_tag,
15196                                    fp->rx_sge_mbuf_spare_map);
15197                  bus_dmamap_destroy(fp->rx_sge_mbuf_tag,
15198                                     fp->rx_sge_mbuf_spare_map);
15199              }
15200  
15201              bus_dma_tag_destroy(fp->rx_sge_mbuf_tag);
15202              fp->rx_sge_mbuf_tag = NULL;
15203          }
15204      }
15205  
15206      /***************************/
15207      /* FW DECOMPRESSION BUFFER */
15208      /***************************/
15209  
15210      bxe_dma_free(sc, &sc->gz_buf_dma);
15211      sc->gz_buf = NULL;
15212      free(sc->gz_strm, M_DEVBUF);
15213      sc->gz_strm = NULL;
15214  
15215      /*******************/
15216      /* SLOW PATH QUEUE */
15217      /*******************/
15218  
15219      bxe_dma_free(sc, &sc->spq_dma);
15220      sc->spq = NULL;
15221  
15222      /*************/
15223      /* SLOW PATH */
15224      /*************/
15225  
15226      bxe_dma_free(sc, &sc->sp_dma);
15227      sc->sp = NULL;
15228  
15229      /***************/
15230      /* EVENT QUEUE */
15231      /***************/
15232  
15233      bxe_dma_free(sc, &sc->eq_dma);
15234      sc->eq = NULL;
15235  
15236      /************************/
15237      /* DEFAULT STATUS BLOCK */
15238      /************************/
15239  
15240      bxe_dma_free(sc, &sc->def_sb_dma);
15241      sc->def_sb = NULL;
15242  
15243      bus_dma_tag_destroy(sc->parent_dma_tag);
15244      sc->parent_dma_tag = NULL;
15245  }
15246  
15247  /*
15248   * Previous driver DMAE transaction may have occurred when pre-boot stage
15249   * ended and boot began. This would invalidate the addresses of the
15250   * transaction, resulting in was-error bit set in the PCI causing all
15251   * hw-to-host PCIe transactions to timeout. If this happened we want to clear
15252   * the interrupt which detected this from the pglueb and the was-done bit
15253   */
15254  static void
bxe_prev_interrupted_dmae(struct bxe_softc * sc)15255  bxe_prev_interrupted_dmae(struct bxe_softc *sc)
15256  {
15257      uint32_t val;
15258  
15259      if (!CHIP_IS_E1x(sc)) {
15260          val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS);
15261          if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
15262              BLOGD(sc, DBG_LOAD,
15263                    "Clearing 'was-error' bit that was set in pglueb");
15264              REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << SC_FUNC(sc));
15265          }
15266      }
15267  }
15268  
15269  static int
bxe_prev_mcp_done(struct bxe_softc * sc)15270  bxe_prev_mcp_done(struct bxe_softc *sc)
15271  {
15272      uint32_t rc = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE,
15273                                   DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
15274      if (!rc) {
15275          BLOGE(sc, "MCP response failure, aborting\n");
15276          return (-1);
15277      }
15278  
15279      return (0);
15280  }
15281  
15282  static struct bxe_prev_list_node *
bxe_prev_path_get_entry(struct bxe_softc * sc)15283  bxe_prev_path_get_entry(struct bxe_softc *sc)
15284  {
15285      struct bxe_prev_list_node *tmp;
15286  
15287      LIST_FOREACH(tmp, &bxe_prev_list, node) {
15288          if ((sc->pcie_bus == tmp->bus) &&
15289              (sc->pcie_device == tmp->slot) &&
15290              (SC_PATH(sc) == tmp->path)) {
15291              return (tmp);
15292          }
15293      }
15294  
15295      return (NULL);
15296  }
15297  
15298  static uint8_t
bxe_prev_is_path_marked(struct bxe_softc * sc)15299  bxe_prev_is_path_marked(struct bxe_softc *sc)
15300  {
15301      struct bxe_prev_list_node *tmp;
15302      int rc = FALSE;
15303  
15304      mtx_lock(&bxe_prev_mtx);
15305  
15306      tmp = bxe_prev_path_get_entry(sc);
15307      if (tmp) {
15308          if (tmp->aer) {
15309              BLOGD(sc, DBG_LOAD,
15310                    "Path %d/%d/%d was marked by AER\n",
15311                    sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15312          } else {
15313              rc = TRUE;
15314              BLOGD(sc, DBG_LOAD,
15315                    "Path %d/%d/%d was already cleaned from previous drivers\n",
15316                    sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15317          }
15318      }
15319  
15320      mtx_unlock(&bxe_prev_mtx);
15321  
15322      return (rc);
15323  }
15324  
15325  static int
bxe_prev_mark_path(struct bxe_softc * sc,uint8_t after_undi)15326  bxe_prev_mark_path(struct bxe_softc *sc,
15327                     uint8_t          after_undi)
15328  {
15329      struct bxe_prev_list_node *tmp;
15330  
15331      mtx_lock(&bxe_prev_mtx);
15332  
15333      /* Check whether the entry for this path already exists */
15334      tmp = bxe_prev_path_get_entry(sc);
15335      if (tmp) {
15336          if (!tmp->aer) {
15337              BLOGD(sc, DBG_LOAD,
15338                    "Re-marking AER in path %d/%d/%d\n",
15339                    sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15340          } else {
15341              BLOGD(sc, DBG_LOAD,
15342                    "Removing AER indication from path %d/%d/%d\n",
15343                    sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15344              tmp->aer = 0;
15345          }
15346  
15347          mtx_unlock(&bxe_prev_mtx);
15348          return (0);
15349      }
15350  
15351      mtx_unlock(&bxe_prev_mtx);
15352  
15353      /* Create an entry for this path and add it */
15354      tmp = malloc(sizeof(struct bxe_prev_list_node), M_DEVBUF,
15355                   (M_NOWAIT | M_ZERO));
15356      if (!tmp) {
15357          BLOGE(sc, "Failed to allocate 'bxe_prev_list_node'\n");
15358          return (-1);
15359      }
15360  
15361      tmp->bus  = sc->pcie_bus;
15362      tmp->slot = sc->pcie_device;
15363      tmp->path = SC_PATH(sc);
15364      tmp->aer  = 0;
15365      tmp->undi = after_undi ? (1 << SC_PORT(sc)) : 0;
15366  
15367      mtx_lock(&bxe_prev_mtx);
15368  
15369      BLOGD(sc, DBG_LOAD,
15370            "Marked path %d/%d/%d - finished previous unload\n",
15371            sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15372      LIST_INSERT_HEAD(&bxe_prev_list, tmp, node);
15373  
15374      mtx_unlock(&bxe_prev_mtx);
15375  
15376      return (0);
15377  }
15378  
15379  static int
bxe_do_flr(struct bxe_softc * sc)15380  bxe_do_flr(struct bxe_softc *sc)
15381  {
15382      int i;
15383  
15384      /* only E2 and onwards support FLR */
15385      if (CHIP_IS_E1x(sc)) {
15386          BLOGD(sc, DBG_LOAD, "FLR not supported in E1/E1H\n");
15387          return (-1);
15388      }
15389  
15390      /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */
15391      if (sc->devinfo.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
15392          BLOGD(sc, DBG_LOAD, "FLR not supported by BC_VER: 0x%08x\n",
15393                sc->devinfo.bc_ver);
15394          return (-1);
15395      }
15396  
15397      /* Wait for Transaction Pending bit clean */
15398      for (i = 0; i < 4; i++) {
15399          if (i) {
15400              DELAY(((1 << (i - 1)) * 100) * 1000);
15401          }
15402  
15403          if (!bxe_is_pcie_pending(sc)) {
15404              goto clear;
15405          }
15406      }
15407  
15408      BLOGE(sc, "PCIE transaction is not cleared, "
15409                "proceeding with reset anyway\n");
15410  
15411  clear:
15412  
15413      BLOGD(sc, DBG_LOAD, "Initiating FLR\n");
15414      bxe_fw_command(sc, DRV_MSG_CODE_INITIATE_FLR, 0);
15415  
15416      return (0);
15417  }
15418  
15419  struct bxe_mac_vals {
15420      uint32_t xmac_addr;
15421      uint32_t xmac_val;
15422      uint32_t emac_addr;
15423      uint32_t emac_val;
15424      uint32_t umac_addr;
15425      uint32_t umac_val;
15426      uint32_t bmac_addr;
15427      uint32_t bmac_val[2];
15428  };
15429  
15430  static void
bxe_prev_unload_close_mac(struct bxe_softc * sc,struct bxe_mac_vals * vals)15431  bxe_prev_unload_close_mac(struct bxe_softc *sc,
15432                            struct bxe_mac_vals *vals)
15433  {
15434      uint32_t val, base_addr, offset, mask, reset_reg;
15435      uint8_t mac_stopped = FALSE;
15436      uint8_t port = SC_PORT(sc);
15437      uint32_t wb_data[2];
15438  
15439      /* reset addresses as they also mark which values were changed */
15440      vals->bmac_addr = 0;
15441      vals->umac_addr = 0;
15442      vals->xmac_addr = 0;
15443      vals->emac_addr = 0;
15444  
15445      reset_reg = REG_RD(sc, MISC_REG_RESET_REG_2);
15446  
15447      if (!CHIP_IS_E3(sc)) {
15448          val = REG_RD(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
15449          mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port;
15450          if ((mask & reset_reg) && val) {
15451              BLOGD(sc, DBG_LOAD, "Disable BMAC Rx\n");
15452              base_addr = SC_PORT(sc) ? NIG_REG_INGRESS_BMAC1_MEM
15453                                      : NIG_REG_INGRESS_BMAC0_MEM;
15454              offset = CHIP_IS_E2(sc) ? BIGMAC2_REGISTER_BMAC_CONTROL
15455                                      : BIGMAC_REGISTER_BMAC_CONTROL;
15456  
15457              /*
15458               * use rd/wr since we cannot use dmae. This is safe
15459               * since MCP won't access the bus due to the request
15460               * to unload, and no function on the path can be
15461               * loaded at this time.
15462               */
15463              wb_data[0] = REG_RD(sc, base_addr + offset);
15464              wb_data[1] = REG_RD(sc, base_addr + offset + 0x4);
15465              vals->bmac_addr = base_addr + offset;
15466              vals->bmac_val[0] = wb_data[0];
15467              vals->bmac_val[1] = wb_data[1];
15468              wb_data[0] &= ~ELINK_BMAC_CONTROL_RX_ENABLE;
15469              REG_WR(sc, vals->bmac_addr, wb_data[0]);
15470              REG_WR(sc, vals->bmac_addr + 0x4, wb_data[1]);
15471          }
15472  
15473          BLOGD(sc, DBG_LOAD, "Disable EMAC Rx\n");
15474          vals->emac_addr = NIG_REG_NIG_EMAC0_EN + SC_PORT(sc)*4;
15475          vals->emac_val = REG_RD(sc, vals->emac_addr);
15476          REG_WR(sc, vals->emac_addr, 0);
15477          mac_stopped = TRUE;
15478      } else {
15479          if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
15480              BLOGD(sc, DBG_LOAD, "Disable XMAC Rx\n");
15481              base_addr = SC_PORT(sc) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
15482              val = REG_RD(sc, base_addr + XMAC_REG_PFC_CTRL_HI);
15483              REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val & ~(1 << 1));
15484              REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val | (1 << 1));
15485              vals->xmac_addr = base_addr + XMAC_REG_CTRL;
15486              vals->xmac_val = REG_RD(sc, vals->xmac_addr);
15487              REG_WR(sc, vals->xmac_addr, 0);
15488              mac_stopped = TRUE;
15489          }
15490  
15491          mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
15492          if (mask & reset_reg) {
15493              BLOGD(sc, DBG_LOAD, "Disable UMAC Rx\n");
15494              base_addr = SC_PORT(sc) ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
15495              vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG;
15496              vals->umac_val = REG_RD(sc, vals->umac_addr);
15497              REG_WR(sc, vals->umac_addr, 0);
15498              mac_stopped = TRUE;
15499          }
15500      }
15501  
15502      if (mac_stopped) {
15503          DELAY(20000);
15504      }
15505  }
15506  
15507  #define BXE_PREV_UNDI_PROD_ADDR(p)  (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
15508  #define BXE_PREV_UNDI_RCQ(val)      ((val) & 0xffff)
15509  #define BXE_PREV_UNDI_BD(val)       ((val) >> 16 & 0xffff)
15510  #define BXE_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
15511  
15512  static void
bxe_prev_unload_undi_inc(struct bxe_softc * sc,uint8_t port,uint8_t inc)15513  bxe_prev_unload_undi_inc(struct bxe_softc *sc,
15514                           uint8_t          port,
15515                           uint8_t          inc)
15516  {
15517      uint16_t rcq, bd;
15518      uint32_t tmp_reg = REG_RD(sc, BXE_PREV_UNDI_PROD_ADDR(port));
15519  
15520      rcq = BXE_PREV_UNDI_RCQ(tmp_reg) + inc;
15521      bd = BXE_PREV_UNDI_BD(tmp_reg) + inc;
15522  
15523      tmp_reg = BXE_PREV_UNDI_PROD(rcq, bd);
15524      REG_WR(sc, BXE_PREV_UNDI_PROD_ADDR(port), tmp_reg);
15525  
15526      BLOGD(sc, DBG_LOAD,
15527            "UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n",
15528            port, bd, rcq);
15529  }
15530  
15531  static int
bxe_prev_unload_common(struct bxe_softc * sc)15532  bxe_prev_unload_common(struct bxe_softc *sc)
15533  {
15534      uint32_t reset_reg, tmp_reg = 0, rc;
15535      uint8_t prev_undi = FALSE;
15536      struct bxe_mac_vals mac_vals;
15537      uint32_t timer_count = 1000;
15538      uint32_t prev_brb;
15539  
15540      /*
15541       * It is possible a previous function received 'common' answer,
15542       * but hasn't loaded yet, therefore creating a scenario of
15543       * multiple functions receiving 'common' on the same path.
15544       */
15545      BLOGD(sc, DBG_LOAD, "Common unload Flow\n");
15546  
15547      memset(&mac_vals, 0, sizeof(mac_vals));
15548  
15549      if (bxe_prev_is_path_marked(sc)) {
15550          return (bxe_prev_mcp_done(sc));
15551      }
15552  
15553      reset_reg = REG_RD(sc, MISC_REG_RESET_REG_1);
15554  
15555      /* Reset should be performed after BRB is emptied */
15556      if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
15557          /* Close the MAC Rx to prevent BRB from filling up */
15558          bxe_prev_unload_close_mac(sc, &mac_vals);
15559  
15560          /* close LLH filters towards the BRB */
15561          elink_set_rx_filter(&sc->link_params, 0);
15562  
15563          /*
15564           * Check if the UNDI driver was previously loaded.
15565           * UNDI driver initializes CID offset for normal bell to 0x7
15566           */
15567          if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
15568              tmp_reg = REG_RD(sc, DORQ_REG_NORM_CID_OFST);
15569              if (tmp_reg == 0x7) {
15570                  BLOGD(sc, DBG_LOAD, "UNDI previously loaded\n");
15571                  prev_undi = TRUE;
15572                  /* clear the UNDI indication */
15573                  REG_WR(sc, DORQ_REG_NORM_CID_OFST, 0);
15574                  /* clear possible idle check errors */
15575                  REG_RD(sc, NIG_REG_NIG_INT_STS_CLR_0);
15576              }
15577          }
15578  
15579          /* wait until BRB is empty */
15580          tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS);
15581          while (timer_count) {
15582              prev_brb = tmp_reg;
15583  
15584              tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS);
15585              if (!tmp_reg) {
15586                  break;
15587              }
15588  
15589              BLOGD(sc, DBG_LOAD, "BRB still has 0x%08x\n", tmp_reg);
15590  
15591              /* reset timer as long as BRB actually gets emptied */
15592              if (prev_brb > tmp_reg) {
15593                  timer_count = 1000;
15594              } else {
15595                  timer_count--;
15596              }
15597  
15598              /* If UNDI resides in memory, manually increment it */
15599              if (prev_undi) {
15600                  bxe_prev_unload_undi_inc(sc, SC_PORT(sc), 1);
15601              }
15602  
15603              DELAY(10);
15604          }
15605  
15606          if (!timer_count) {
15607              BLOGE(sc, "Failed to empty BRB\n");
15608          }
15609      }
15610  
15611      /* No packets are in the pipeline, path is ready for reset */
15612      bxe_reset_common(sc);
15613  
15614      if (mac_vals.xmac_addr) {
15615          REG_WR(sc, mac_vals.xmac_addr, mac_vals.xmac_val);
15616      }
15617      if (mac_vals.umac_addr) {
15618          REG_WR(sc, mac_vals.umac_addr, mac_vals.umac_val);
15619      }
15620      if (mac_vals.emac_addr) {
15621          REG_WR(sc, mac_vals.emac_addr, mac_vals.emac_val);
15622      }
15623      if (mac_vals.bmac_addr) {
15624          REG_WR(sc, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
15625          REG_WR(sc, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
15626      }
15627  
15628      rc = bxe_prev_mark_path(sc, prev_undi);
15629      if (rc) {
15630          bxe_prev_mcp_done(sc);
15631          return (rc);
15632      }
15633  
15634      return (bxe_prev_mcp_done(sc));
15635  }
15636  
15637  static int
bxe_prev_unload_uncommon(struct bxe_softc * sc)15638  bxe_prev_unload_uncommon(struct bxe_softc *sc)
15639  {
15640      int rc;
15641  
15642      BLOGD(sc, DBG_LOAD, "Uncommon unload Flow\n");
15643  
15644      /* Test if previous unload process was already finished for this path */
15645      if (bxe_prev_is_path_marked(sc)) {
15646          return (bxe_prev_mcp_done(sc));
15647      }
15648  
15649      BLOGD(sc, DBG_LOAD, "Path is unmarked\n");
15650  
15651      /*
15652       * If function has FLR capabilities, and existing FW version matches
15653       * the one required, then FLR will be sufficient to clean any residue
15654       * left by previous driver
15655       */
15656      rc = bxe_nic_load_analyze_req(sc, FW_MSG_CODE_DRV_LOAD_FUNCTION);
15657      if (!rc) {
15658          /* fw version is good */
15659          BLOGD(sc, DBG_LOAD, "FW version matches our own, attempting FLR\n");
15660          rc = bxe_do_flr(sc);
15661      }
15662  
15663      if (!rc) {
15664          /* FLR was performed */
15665          BLOGD(sc, DBG_LOAD, "FLR successful\n");
15666          return (0);
15667      }
15668  
15669      BLOGD(sc, DBG_LOAD, "Could not FLR\n");
15670  
15671      /* Close the MCP request, return failure*/
15672      rc = bxe_prev_mcp_done(sc);
15673      if (!rc) {
15674          rc = BXE_PREV_WAIT_NEEDED;
15675      }
15676  
15677      return (rc);
15678  }
15679  
15680  static int
bxe_prev_unload(struct bxe_softc * sc)15681  bxe_prev_unload(struct bxe_softc *sc)
15682  {
15683      int time_counter = 10;
15684      uint32_t fw, hw_lock_reg, hw_lock_val;
15685      uint32_t rc = 0;
15686  
15687      /*
15688       * Clear HW from errors which may have resulted from an interrupted
15689       * DMAE transaction.
15690       */
15691      bxe_prev_interrupted_dmae(sc);
15692  
15693      /* Release previously held locks */
15694      hw_lock_reg =
15695          (SC_FUNC(sc) <= 5) ?
15696              (MISC_REG_DRIVER_CONTROL_1 + SC_FUNC(sc) * 8) :
15697              (MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8);
15698  
15699      hw_lock_val = (REG_RD(sc, hw_lock_reg));
15700      if (hw_lock_val) {
15701          if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
15702              BLOGD(sc, DBG_LOAD, "Releasing previously held NVRAM lock\n");
15703              REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
15704                     (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << SC_PORT(sc)));
15705          }
15706          BLOGD(sc, DBG_LOAD, "Releasing previously held HW lock\n");
15707          REG_WR(sc, hw_lock_reg, 0xffffffff);
15708      } else {
15709          BLOGD(sc, DBG_LOAD, "No need to release HW/NVRAM locks\n");
15710      }
15711  
15712      if (MCPR_ACCESS_LOCK_LOCK & REG_RD(sc, MCP_REG_MCPR_ACCESS_LOCK)) {
15713          BLOGD(sc, DBG_LOAD, "Releasing previously held ALR\n");
15714          REG_WR(sc, MCP_REG_MCPR_ACCESS_LOCK, 0);
15715      }
15716  
15717      do {
15718          /* Lock MCP using an unload request */
15719          fw = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
15720          if (!fw) {
15721              BLOGE(sc, "MCP response failure, aborting\n");
15722              rc = -1;
15723              break;
15724          }
15725  
15726          if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
15727              rc = bxe_prev_unload_common(sc);
15728              break;
15729          }
15730  
15731          /* non-common reply from MCP night require looping */
15732          rc = bxe_prev_unload_uncommon(sc);
15733          if (rc != BXE_PREV_WAIT_NEEDED) {
15734              break;
15735          }
15736  
15737          DELAY(20000);
15738      } while (--time_counter);
15739  
15740      if (!time_counter || rc) {
15741          BLOGE(sc, "Failed to unload previous driver!"
15742              " time_counter %d rc %d\n", time_counter, rc);
15743          rc = -1;
15744      }
15745  
15746      return (rc);
15747  }
15748  
15749  void
bxe_dcbx_set_state(struct bxe_softc * sc,uint8_t dcb_on,uint32_t dcbx_enabled)15750  bxe_dcbx_set_state(struct bxe_softc *sc,
15751                     uint8_t          dcb_on,
15752                     uint32_t         dcbx_enabled)
15753  {
15754      if (!CHIP_IS_E1x(sc)) {
15755          sc->dcb_state = dcb_on;
15756          sc->dcbx_enabled = dcbx_enabled;
15757      } else {
15758          sc->dcb_state = FALSE;
15759          sc->dcbx_enabled = BXE_DCBX_ENABLED_INVALID;
15760      }
15761      BLOGD(sc, DBG_LOAD,
15762            "DCB state [%s:%s]\n",
15763            dcb_on ? "ON" : "OFF",
15764            (dcbx_enabled == BXE_DCBX_ENABLED_OFF) ? "user-mode" :
15765            (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_OFF) ? "on-chip static" :
15766            (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_ON) ?
15767            "on-chip with negotiation" : "invalid");
15768  }
15769  
15770  /* must be called after sriov-enable */
15771  static int
bxe_set_qm_cid_count(struct bxe_softc * sc)15772  bxe_set_qm_cid_count(struct bxe_softc *sc)
15773  {
15774      int cid_count = BXE_L2_MAX_CID(sc);
15775  
15776      if (IS_SRIOV(sc)) {
15777          cid_count += BXE_VF_CIDS;
15778      }
15779  
15780      if (CNIC_SUPPORT(sc)) {
15781          cid_count += CNIC_CID_MAX;
15782      }
15783  
15784      return (roundup(cid_count, QM_CID_ROUND));
15785  }
15786  
15787  static void
bxe_init_multi_cos(struct bxe_softc * sc)15788  bxe_init_multi_cos(struct bxe_softc *sc)
15789  {
15790      int pri, cos;
15791  
15792      uint32_t pri_map = 0; /* XXX change to user config */
15793  
15794      for (pri = 0; pri < BXE_MAX_PRIORITY; pri++) {
15795          cos = ((pri_map & (0xf << (pri * 4))) >> (pri * 4));
15796          if (cos < sc->max_cos) {
15797              sc->prio_to_cos[pri] = cos;
15798          } else {
15799              BLOGW(sc, "Invalid COS %d for priority %d "
15800                        "(max COS is %d), setting to 0\n",
15801                    cos, pri, (sc->max_cos - 1));
15802              sc->prio_to_cos[pri] = 0;
15803          }
15804      }
15805  }
15806  
15807  static int
bxe_sysctl_state(SYSCTL_HANDLER_ARGS)15808  bxe_sysctl_state(SYSCTL_HANDLER_ARGS)
15809  {
15810      struct bxe_softc *sc;
15811      int error, result;
15812  
15813      result = 0;
15814      error = sysctl_handle_int(oidp, &result, 0, req);
15815  
15816      if (error || !req->newptr) {
15817          return (error);
15818      }
15819  
15820      if (result == 1) {
15821          uint32_t  temp;
15822          sc = (struct bxe_softc *)arg1;
15823  
15824          BLOGI(sc, "... dumping driver state ...\n");
15825          temp = SHMEM2_RD(sc, temperature_in_half_celsius);
15826          BLOGI(sc, "\t Device Temperature = %d Celsius\n", (temp/2));
15827      }
15828  
15829      return (error);
15830  }
15831  
15832  static int
bxe_sysctl_eth_stat(SYSCTL_HANDLER_ARGS)15833  bxe_sysctl_eth_stat(SYSCTL_HANDLER_ARGS)
15834  {
15835      struct bxe_softc *sc = (struct bxe_softc *)arg1;
15836      uint32_t *eth_stats = (uint32_t *)&sc->eth_stats;
15837      uint32_t *offset;
15838      uint64_t value = 0;
15839      int index = (int)arg2;
15840  
15841      if (index >= BXE_NUM_ETH_STATS) {
15842          BLOGE(sc, "bxe_eth_stats index out of range (%d)\n", index);
15843          return (-1);
15844      }
15845  
15846      offset = (eth_stats + bxe_eth_stats_arr[index].offset);
15847  
15848      switch (bxe_eth_stats_arr[index].size) {
15849      case 4:
15850          value = (uint64_t)*offset;
15851          break;
15852      case 8:
15853          value = HILO_U64(*offset, *(offset + 1));
15854          break;
15855      default:
15856          BLOGE(sc, "Invalid bxe_eth_stats size (index=%d size=%d)\n",
15857                index, bxe_eth_stats_arr[index].size);
15858          return (-1);
15859      }
15860  
15861      return (sysctl_handle_64(oidp, &value, 0, req));
15862  }
15863  
15864  static int
bxe_sysctl_eth_q_stat(SYSCTL_HANDLER_ARGS)15865  bxe_sysctl_eth_q_stat(SYSCTL_HANDLER_ARGS)
15866  {
15867      struct bxe_softc *sc = (struct bxe_softc *)arg1;
15868      uint32_t *eth_stats;
15869      uint32_t *offset;
15870      uint64_t value = 0;
15871      uint32_t q_stat = (uint32_t)arg2;
15872      uint32_t fp_index = ((q_stat >> 16) & 0xffff);
15873      uint32_t index = (q_stat & 0xffff);
15874  
15875      eth_stats = (uint32_t *)&sc->fp[fp_index].eth_q_stats;
15876  
15877      if (index >= BXE_NUM_ETH_Q_STATS) {
15878          BLOGE(sc, "bxe_eth_q_stats index out of range (%d)\n", index);
15879          return (-1);
15880      }
15881  
15882      offset = (eth_stats + bxe_eth_q_stats_arr[index].offset);
15883  
15884      switch (bxe_eth_q_stats_arr[index].size) {
15885      case 4:
15886          value = (uint64_t)*offset;
15887          break;
15888      case 8:
15889          value = HILO_U64(*offset, *(offset + 1));
15890          break;
15891      default:
15892          BLOGE(sc, "Invalid bxe_eth_q_stats size (index=%d size=%d)\n",
15893                index, bxe_eth_q_stats_arr[index].size);
15894          return (-1);
15895      }
15896  
15897      return (sysctl_handle_64(oidp, &value, 0, req));
15898  }
15899  
bxe_force_link_reset(struct bxe_softc * sc)15900  static void bxe_force_link_reset(struct bxe_softc *sc)
15901  {
15902  
15903          bxe_acquire_phy_lock(sc);
15904          elink_link_reset(&sc->link_params, &sc->link_vars, 1);
15905          bxe_release_phy_lock(sc);
15906  }
15907  
15908  static int
bxe_sysctl_pauseparam(SYSCTL_HANDLER_ARGS)15909  bxe_sysctl_pauseparam(SYSCTL_HANDLER_ARGS)
15910  {
15911          struct bxe_softc *sc = (struct bxe_softc *)arg1;
15912          uint32_t cfg_idx = bxe_get_link_cfg_idx(sc);
15913          int rc = 0;
15914          int error;
15915          int result;
15916  
15917  
15918          error = sysctl_handle_int(oidp, &sc->bxe_pause_param, 0, req);
15919  
15920          if (error || !req->newptr) {
15921                  return (error);
15922          }
15923          if ((sc->bxe_pause_param < 0) ||  (sc->bxe_pause_param > 8)) {
15924                  BLOGW(sc, "invalid pause param (%d) - use integers between 1 & 8\n",sc->bxe_pause_param);
15925                  sc->bxe_pause_param = 8;
15926          }
15927  
15928          result = (sc->bxe_pause_param << PORT_FEATURE_FLOW_CONTROL_SHIFT);
15929  
15930  
15931          if((result & 0x400) && !(sc->port.supported[cfg_idx] & ELINK_SUPPORTED_Autoneg))  {
15932                          BLOGW(sc, "Does not support Autoneg pause_param %d\n", sc->bxe_pause_param);
15933                          return -EINVAL;
15934          }
15935  
15936          if(IS_MF(sc))
15937                  return 0;
15938         sc->link_params.req_flow_ctrl[cfg_idx] = ELINK_FLOW_CTRL_AUTO;
15939          if(result & ELINK_FLOW_CTRL_RX)
15940                  sc->link_params.req_flow_ctrl[cfg_idx] |= ELINK_FLOW_CTRL_RX;
15941  
15942          if(result & ELINK_FLOW_CTRL_TX)
15943                  sc->link_params.req_flow_ctrl[cfg_idx] |= ELINK_FLOW_CTRL_TX;
15944          if(sc->link_params.req_flow_ctrl[cfg_idx] == ELINK_FLOW_CTRL_AUTO)
15945                  sc->link_params.req_flow_ctrl[cfg_idx] = ELINK_FLOW_CTRL_NONE;
15946  
15947          if(result & 0x400) {
15948                  if (sc->link_params.req_line_speed[cfg_idx] == ELINK_SPEED_AUTO_NEG) {
15949                          sc->link_params.req_flow_ctrl[cfg_idx] =
15950                                  ELINK_FLOW_CTRL_AUTO;
15951                  }
15952                  sc->link_params.req_fc_auto_adv = 0;
15953                  if (result & ELINK_FLOW_CTRL_RX)
15954                          sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_RX;
15955  
15956                  if (result & ELINK_FLOW_CTRL_TX)
15957                          sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_TX;
15958                  if (!sc->link_params.req_fc_auto_adv)
15959                          sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_NONE;
15960          }
15961           if (IS_PF(sc)) {
15962                          if (sc->link_vars.link_up) {
15963                                  bxe_stats_handle(sc, STATS_EVENT_STOP);
15964                          }
15965  			if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
15966                          bxe_force_link_reset(sc);
15967                          bxe_acquire_phy_lock(sc);
15968  
15969                          rc = elink_phy_init(&sc->link_params, &sc->link_vars);
15970  
15971                          bxe_release_phy_lock(sc);
15972  
15973                          bxe_calc_fc_adv(sc);
15974                          }
15975          }
15976          return rc;
15977  }
15978  
15979  
15980  static void
bxe_add_sysctls(struct bxe_softc * sc)15981  bxe_add_sysctls(struct bxe_softc *sc)
15982  {
15983      struct sysctl_ctx_list *ctx;
15984      struct sysctl_oid_list *children;
15985      struct sysctl_oid *queue_top, *queue;
15986      struct sysctl_oid_list *queue_top_children, *queue_children;
15987      char queue_num_buf[32];
15988      uint32_t q_stat;
15989      int i, j;
15990  
15991      ctx = device_get_sysctl_ctx(sc->dev);
15992      children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
15993  
15994      SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "version",
15995                        CTLFLAG_RD, BXE_DRIVER_VERSION, 0,
15996                        "version");
15997  
15998      snprintf(sc->fw_ver_str, sizeof(sc->fw_ver_str), "%d.%d.%d.%d",
15999               BCM_5710_FW_MAJOR_VERSION,
16000               BCM_5710_FW_MINOR_VERSION,
16001               BCM_5710_FW_REVISION_VERSION,
16002               BCM_5710_FW_ENGINEERING_VERSION);
16003  
16004      snprintf(sc->mf_mode_str, sizeof(sc->mf_mode_str), "%s",
16005          ((sc->devinfo.mf_info.mf_mode == SINGLE_FUNCTION)     ? "Single"  :
16006           (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SD)   ? "MF-SD"   :
16007           (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SI)   ? "MF-SI"   :
16008           (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_AFEX) ? "MF-AFEX" :
16009                                                                  "Unknown"));
16010      SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "mf_vnics",
16011                      CTLFLAG_RD, &sc->devinfo.mf_info.vnics_per_port, 0,
16012                      "multifunction vnics per port");
16013  
16014      snprintf(sc->pci_link_str, sizeof(sc->pci_link_str), "%s x%d",
16015          ((sc->devinfo.pcie_link_speed == 1) ? "2.5GT/s" :
16016           (sc->devinfo.pcie_link_speed == 2) ? "5.0GT/s" :
16017           (sc->devinfo.pcie_link_speed == 4) ? "8.0GT/s" :
16018                                                "???GT/s"),
16019          sc->devinfo.pcie_link_width);
16020  
16021      sc->debug = bxe_debug;
16022  
16023      SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version",
16024                        CTLFLAG_RD, sc->devinfo.bc_ver_str, 0,
16025                        "bootcode version");
16026      SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version",
16027                        CTLFLAG_RD, sc->fw_ver_str, 0,
16028                        "firmware version");
16029      SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode",
16030                        CTLFLAG_RD, sc->mf_mode_str, 0,
16031                        "multifunction mode");
16032      SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr",
16033                        CTLFLAG_RD, sc->mac_addr_str, 0,
16034                        "mac address");
16035      SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link",
16036                        CTLFLAG_RD, sc->pci_link_str, 0,
16037                        "pci link status");
16038      SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "debug",
16039                      CTLFLAG_RW, &sc->debug,
16040                      "debug logging mode");
16041  
16042      sc->trigger_grcdump = 0;
16043      SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "trigger_grcdump",
16044                     CTLFLAG_RW, &sc->trigger_grcdump, 0,
16045                     "trigger grcdump should be invoked"
16046                     "  before collecting grcdump");
16047  
16048      sc->grcdump_started = 0;
16049      sc->grcdump_done = 0;
16050      SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "grcdump_done",
16051                     CTLFLAG_RD, &sc->grcdump_done, 0,
16052                     "set by driver when grcdump is done");
16053  
16054      sc->rx_budget = bxe_rx_budget;
16055      SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_budget",
16056                      CTLFLAG_RW, &sc->rx_budget, 0,
16057                      "rx processing budget");
16058  
16059      SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_param",
16060          CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
16061          bxe_sysctl_pauseparam, "IU",
16062          "need pause frames- DEF:0/TX:1/RX:2/BOTH:3/AUTO:4/AUTOTX:5/AUTORX:6/AUTORXTX:7/NONE:8");
16063  
16064  
16065      SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "state",
16066          CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
16067          bxe_sysctl_state, "IU", "dump driver state");
16068  
16069      for (i = 0; i < BXE_NUM_ETH_STATS; i++) {
16070          SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
16071              bxe_eth_stats_arr[i].string,
16072              CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, i,
16073              bxe_sysctl_eth_stat, "LU", bxe_eth_stats_arr[i].string);
16074      }
16075  
16076      /* add a new parent node for all queues "dev.bxe.#.queue" */
16077      queue_top = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "queue",
16078          CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "queue");
16079      queue_top_children = SYSCTL_CHILDREN(queue_top);
16080  
16081      for (i = 0; i < sc->num_queues; i++) {
16082          /* add a new parent node for a single queue "dev.bxe.#.queue.#" */
16083          snprintf(queue_num_buf, sizeof(queue_num_buf), "%d", i);
16084          queue = SYSCTL_ADD_NODE(ctx, queue_top_children, OID_AUTO,
16085              queue_num_buf, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "single queue");
16086          queue_children = SYSCTL_CHILDREN(queue);
16087  
16088          for (j = 0; j < BXE_NUM_ETH_Q_STATS; j++) {
16089              q_stat = ((i << 16) | j);
16090              SYSCTL_ADD_PROC(ctx, queue_children, OID_AUTO,
16091                   bxe_eth_q_stats_arr[j].string,
16092                   CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, q_stat,
16093                   bxe_sysctl_eth_q_stat, "LU", bxe_eth_q_stats_arr[j].string);
16094          }
16095      }
16096  }
16097  
16098  static int
bxe_alloc_buf_rings(struct bxe_softc * sc)16099  bxe_alloc_buf_rings(struct bxe_softc *sc)
16100  {
16101      int i;
16102      struct bxe_fastpath *fp;
16103  
16104      for (i = 0; i < sc->num_queues; i++) {
16105  
16106          fp = &sc->fp[i];
16107  
16108          fp->tx_br = buf_ring_alloc(BXE_BR_SIZE, M_DEVBUF,
16109                                     M_NOWAIT, &fp->tx_mtx);
16110          if (fp->tx_br == NULL)
16111              return (-1);
16112      }
16113  
16114      return (0);
16115  }
16116  
16117  static void
bxe_free_buf_rings(struct bxe_softc * sc)16118  bxe_free_buf_rings(struct bxe_softc *sc)
16119  {
16120      int i;
16121      struct bxe_fastpath *fp;
16122  
16123      for (i = 0; i < sc->num_queues; i++) {
16124  
16125          fp = &sc->fp[i];
16126  
16127          if (fp->tx_br) {
16128              buf_ring_free(fp->tx_br, M_DEVBUF);
16129              fp->tx_br = NULL;
16130          }
16131      }
16132  }
16133  
16134  static void
bxe_init_fp_mutexs(struct bxe_softc * sc)16135  bxe_init_fp_mutexs(struct bxe_softc *sc)
16136  {
16137      int i;
16138      struct bxe_fastpath *fp;
16139  
16140      for (i = 0; i < sc->num_queues; i++) {
16141  
16142          fp = &sc->fp[i];
16143  
16144          snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
16145              "bxe%d_fp%d_tx_lock", sc->unit, i);
16146          mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
16147  
16148          snprintf(fp->rx_mtx_name, sizeof(fp->rx_mtx_name),
16149              "bxe%d_fp%d_rx_lock", sc->unit, i);
16150          mtx_init(&fp->rx_mtx, fp->rx_mtx_name, NULL, MTX_DEF);
16151      }
16152  }
16153  
16154  static void
bxe_destroy_fp_mutexs(struct bxe_softc * sc)16155  bxe_destroy_fp_mutexs(struct bxe_softc *sc)
16156  {
16157      int i;
16158      struct bxe_fastpath *fp;
16159  
16160      for (i = 0; i < sc->num_queues; i++) {
16161  
16162          fp = &sc->fp[i];
16163  
16164          if (mtx_initialized(&fp->tx_mtx)) {
16165              mtx_destroy(&fp->tx_mtx);
16166          }
16167  
16168          if (mtx_initialized(&fp->rx_mtx)) {
16169              mtx_destroy(&fp->rx_mtx);
16170          }
16171      }
16172  }
16173  
16174  
16175  /*
16176   * Device attach function.
16177   *
16178   * Allocates device resources, performs secondary chip identification, and
16179   * initializes driver instance variables. This function is called from driver
16180   * load after a successful probe.
16181   *
16182   * Returns:
16183   *   0 = Success, >0 = Failure
16184   */
16185  static int
bxe_attach(device_t dev)16186  bxe_attach(device_t dev)
16187  {
16188      struct bxe_softc *sc;
16189  
16190      sc = device_get_softc(dev);
16191  
16192      BLOGD(sc, DBG_LOAD, "Starting attach...\n");
16193  
16194      sc->state = BXE_STATE_CLOSED;
16195  
16196      sc->dev  = dev;
16197      sc->unit = device_get_unit(dev);
16198  
16199      BLOGD(sc, DBG_LOAD, "softc = %p\n", sc);
16200  
16201      sc->pcie_bus    = pci_get_bus(dev);
16202      sc->pcie_device = pci_get_slot(dev);
16203      sc->pcie_func   = pci_get_function(dev);
16204  
16205      /* enable bus master capability */
16206      pci_enable_busmaster(dev);
16207  
16208      /* get the BARs */
16209      if (bxe_allocate_bars(sc) != 0) {
16210          return (ENXIO);
16211      }
16212  
16213      /* initialize the mutexes */
16214      bxe_init_mutexes(sc);
16215  
16216      /* prepare the periodic callout */
16217      callout_init(&sc->periodic_callout, 1);
16218  
16219      /* prepare the chip taskqueue */
16220      sc->chip_tq_flags = CHIP_TQ_NONE;
16221      snprintf(sc->chip_tq_name, sizeof(sc->chip_tq_name),
16222               "bxe%d_chip_tq", sc->unit);
16223      TASK_INIT(&sc->chip_tq_task, 0, bxe_handle_chip_tq, sc);
16224      sc->chip_tq = taskqueue_create(sc->chip_tq_name, M_NOWAIT,
16225                                     taskqueue_thread_enqueue,
16226                                     &sc->chip_tq);
16227      taskqueue_start_threads(&sc->chip_tq, 1, PWAIT, /* lower priority */
16228                              "%s", sc->chip_tq_name);
16229  
16230      TIMEOUT_TASK_INIT(taskqueue_thread,
16231          &sc->sp_err_timeout_task, 0, bxe_sp_err_timeout_task,  sc);
16232  
16233  
16234      /* get device info and set params */
16235      if (bxe_get_device_info(sc) != 0) {
16236          BLOGE(sc, "getting device info\n");
16237          bxe_deallocate_bars(sc);
16238          pci_disable_busmaster(dev);
16239          return (ENXIO);
16240      }
16241  
16242      /* get final misc params */
16243      bxe_get_params(sc);
16244  
16245      /* set the default MTU (changed via ifconfig) */
16246      sc->mtu = ETHERMTU;
16247  
16248      bxe_set_modes_bitmap(sc);
16249  
16250      /* XXX
16251       * If in AFEX mode and the function is configured for FCoE
16252       * then bail... no L2 allowed.
16253       */
16254  
16255      /* get phy settings from shmem and 'and' against admin settings */
16256      bxe_get_phy_info(sc);
16257  
16258      /* initialize the FreeBSD ifnet interface */
16259      bxe_init_ifnet(sc);
16260  
16261      if (bxe_add_cdev(sc) != 0) {
16262          if (sc->ifp != NULL) {
16263              ether_ifdetach(sc->ifp);
16264          }
16265          ifmedia_removeall(&sc->ifmedia);
16266          bxe_release_mutexes(sc);
16267          bxe_deallocate_bars(sc);
16268          pci_disable_busmaster(dev);
16269          return (ENXIO);
16270      }
16271  
16272      /* allocate device interrupts */
16273      if (bxe_interrupt_alloc(sc) != 0) {
16274          bxe_del_cdev(sc);
16275          if (sc->ifp != NULL) {
16276              ether_ifdetach(sc->ifp);
16277          }
16278          ifmedia_removeall(&sc->ifmedia);
16279          bxe_release_mutexes(sc);
16280          bxe_deallocate_bars(sc);
16281          pci_disable_busmaster(dev);
16282          return (ENXIO);
16283      }
16284  
16285      bxe_init_fp_mutexs(sc);
16286  
16287      if (bxe_alloc_buf_rings(sc) != 0) {
16288  	bxe_free_buf_rings(sc);
16289          bxe_interrupt_free(sc);
16290          bxe_del_cdev(sc);
16291          if (sc->ifp != NULL) {
16292              ether_ifdetach(sc->ifp);
16293          }
16294          ifmedia_removeall(&sc->ifmedia);
16295          bxe_release_mutexes(sc);
16296          bxe_deallocate_bars(sc);
16297          pci_disable_busmaster(dev);
16298          return (ENXIO);
16299      }
16300  
16301      /* allocate ilt */
16302      if (bxe_alloc_ilt_mem(sc) != 0) {
16303  	bxe_free_buf_rings(sc);
16304          bxe_interrupt_free(sc);
16305          bxe_del_cdev(sc);
16306          if (sc->ifp != NULL) {
16307              ether_ifdetach(sc->ifp);
16308          }
16309          ifmedia_removeall(&sc->ifmedia);
16310          bxe_release_mutexes(sc);
16311          bxe_deallocate_bars(sc);
16312          pci_disable_busmaster(dev);
16313          return (ENXIO);
16314      }
16315  
16316      /* allocate the host hardware/software hsi structures */
16317      if (bxe_alloc_hsi_mem(sc) != 0) {
16318          bxe_free_ilt_mem(sc);
16319  	bxe_free_buf_rings(sc);
16320          bxe_interrupt_free(sc);
16321          bxe_del_cdev(sc);
16322          if (sc->ifp != NULL) {
16323              ether_ifdetach(sc->ifp);
16324          }
16325          ifmedia_removeall(&sc->ifmedia);
16326          bxe_release_mutexes(sc);
16327          bxe_deallocate_bars(sc);
16328          pci_disable_busmaster(dev);
16329          return (ENXIO);
16330      }
16331  
16332      /* need to reset chip if UNDI was active */
16333      if (IS_PF(sc) && !BXE_NOMCP(sc)) {
16334          /* init fw_seq */
16335          sc->fw_seq =
16336              (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
16337               DRV_MSG_SEQ_NUMBER_MASK);
16338          BLOGD(sc, DBG_LOAD, "prev unload fw_seq 0x%04x\n", sc->fw_seq);
16339          bxe_prev_unload(sc);
16340      }
16341  
16342  #if 1
16343      /* XXX */
16344      bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF);
16345  #else
16346      if (SHMEM2_HAS(sc, dcbx_lldp_params_offset) &&
16347          SHMEM2_HAS(sc, dcbx_lldp_dcbx_stat_offset) &&
16348          SHMEM2_RD(sc, dcbx_lldp_params_offset) &&
16349          SHMEM2_RD(sc, dcbx_lldp_dcbx_stat_offset)) {
16350          bxe_dcbx_set_state(sc, TRUE, BXE_DCBX_ENABLED_ON_NEG_ON);
16351          bxe_dcbx_init_params(sc);
16352      } else {
16353          bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF);
16354      }
16355  #endif
16356  
16357      /* calculate qm_cid_count */
16358      sc->qm_cid_count = bxe_set_qm_cid_count(sc);
16359      BLOGD(sc, DBG_LOAD, "qm_cid_count=%d\n", sc->qm_cid_count);
16360  
16361      sc->max_cos = 1;
16362      bxe_init_multi_cos(sc);
16363  
16364      bxe_add_sysctls(sc);
16365  
16366      return (0);
16367  }
16368  
16369  /*
16370   * Device detach function.
16371   *
16372   * Stops the controller, resets the controller, and releases resources.
16373   *
16374   * Returns:
16375   *   0 = Success, >0 = Failure
16376   */
16377  static int
bxe_detach(device_t dev)16378  bxe_detach(device_t dev)
16379  {
16380      struct bxe_softc *sc;
16381      if_t ifp;
16382  
16383      sc = device_get_softc(dev);
16384  
16385      BLOGD(sc, DBG_LOAD, "Starting detach...\n");
16386  
16387      ifp = sc->ifp;
16388      if (ifp != NULL && if_vlantrunkinuse(ifp)) {
16389          BLOGE(sc, "Cannot detach while VLANs are in use.\n");
16390          return(EBUSY);
16391      }
16392  
16393      bxe_del_cdev(sc);
16394  
16395      /* stop the periodic callout */
16396      bxe_periodic_stop(sc);
16397  
16398      /* stop the chip taskqueue */
16399      atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_NONE);
16400      if (sc->chip_tq) {
16401          taskqueue_drain(sc->chip_tq, &sc->chip_tq_task);
16402          taskqueue_free(sc->chip_tq);
16403          sc->chip_tq = NULL;
16404          taskqueue_drain_timeout(taskqueue_thread,
16405              &sc->sp_err_timeout_task);
16406      }
16407  
16408      /* stop and reset the controller if it was open */
16409      if (sc->state != BXE_STATE_CLOSED) {
16410          BXE_CORE_LOCK(sc);
16411          bxe_nic_unload(sc, UNLOAD_CLOSE, TRUE);
16412          sc->state = BXE_STATE_DISABLED;
16413          BXE_CORE_UNLOCK(sc);
16414      }
16415  
16416      /* release the network interface */
16417      if (ifp != NULL) {
16418          ether_ifdetach(ifp);
16419      }
16420      ifmedia_removeall(&sc->ifmedia);
16421  
16422      /* XXX do the following based on driver state... */
16423  
16424      /* free the host hardware/software hsi structures */
16425      bxe_free_hsi_mem(sc);
16426  
16427      /* free ilt */
16428      bxe_free_ilt_mem(sc);
16429  
16430      bxe_free_buf_rings(sc);
16431  
16432      /* release the interrupts */
16433      bxe_interrupt_free(sc);
16434  
16435      /* Release the mutexes*/
16436      bxe_destroy_fp_mutexs(sc);
16437      bxe_release_mutexes(sc);
16438  
16439  
16440      /* Release the PCIe BAR mapped memory */
16441      bxe_deallocate_bars(sc);
16442  
16443      /* Release the FreeBSD interface. */
16444      if (sc->ifp != NULL) {
16445          if_free(sc->ifp);
16446      }
16447  
16448      pci_disable_busmaster(dev);
16449  
16450      return (0);
16451  }
16452  
16453  /*
16454   * Device shutdown function.
16455   *
16456   * Stops and resets the controller.
16457   *
16458   * Returns:
16459   *   Nothing
16460   */
16461  static int
bxe_shutdown(device_t dev)16462  bxe_shutdown(device_t dev)
16463  {
16464      struct bxe_softc *sc;
16465  
16466      sc = device_get_softc(dev);
16467  
16468      BLOGD(sc, DBG_LOAD, "Starting shutdown...\n");
16469  
16470      /* stop the periodic callout */
16471      bxe_periodic_stop(sc);
16472  
16473      if (sc->state != BXE_STATE_CLOSED) {
16474      	BXE_CORE_LOCK(sc);
16475      	bxe_nic_unload(sc, UNLOAD_NORMAL, FALSE);
16476      	BXE_CORE_UNLOCK(sc);
16477      }
16478  
16479      return (0);
16480  }
16481  
16482  void
bxe_igu_ack_sb(struct bxe_softc * sc,uint8_t igu_sb_id,uint8_t segment,uint16_t index,uint8_t op,uint8_t update)16483  bxe_igu_ack_sb(struct bxe_softc *sc,
16484                 uint8_t          igu_sb_id,
16485                 uint8_t          segment,
16486                 uint16_t         index,
16487                 uint8_t          op,
16488                 uint8_t          update)
16489  {
16490      uint32_t igu_addr = sc->igu_base_addr;
16491      igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
16492      bxe_igu_ack_sb_gen(sc, igu_sb_id, segment, index, op, update, igu_addr);
16493  }
16494  
16495  static void
bxe_igu_clear_sb_gen(struct bxe_softc * sc,uint8_t func,uint8_t idu_sb_id,uint8_t is_pf)16496  bxe_igu_clear_sb_gen(struct bxe_softc *sc,
16497                       uint8_t          func,
16498                       uint8_t          idu_sb_id,
16499                       uint8_t          is_pf)
16500  {
16501      uint32_t data, ctl, cnt = 100;
16502      uint32_t igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
16503      uint32_t igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
16504      uint32_t igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
16505      uint32_t sb_bit =  1 << (idu_sb_id%32);
16506      uint32_t func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
16507      uint32_t addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
16508  
16509      /* Not supported in BC mode */
16510      if (CHIP_INT_MODE_IS_BC(sc)) {
16511          return;
16512      }
16513  
16514      data = ((IGU_USE_REGISTER_cstorm_type_0_sb_cleanup <<
16515               IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
16516              IGU_REGULAR_CLEANUP_SET |
16517              IGU_REGULAR_BCLEANUP);
16518  
16519      ctl = ((addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT) |
16520             (func_encode << IGU_CTRL_REG_FID_SHIFT) |
16521             (IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT));
16522  
16523      BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
16524              data, igu_addr_data);
16525      REG_WR(sc, igu_addr_data, data);
16526  
16527      bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
16528                        BUS_SPACE_BARRIER_WRITE);
16529      mb();
16530  
16531      BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
16532              ctl, igu_addr_ctl);
16533      REG_WR(sc, igu_addr_ctl, ctl);
16534  
16535      bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
16536                        BUS_SPACE_BARRIER_WRITE);
16537      mb();
16538  
16539      /* wait for clean up to finish */
16540      while (!(REG_RD(sc, igu_addr_ack) & sb_bit) && --cnt) {
16541          DELAY(20000);
16542      }
16543  
16544      if (!(REG_RD(sc, igu_addr_ack) & sb_bit)) {
16545          BLOGD(sc, DBG_LOAD,
16546                "Unable to finish IGU cleanup: "
16547                "idu_sb_id %d offset %d bit %d (cnt %d)\n",
16548                idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
16549      }
16550  }
16551  
16552  static void
bxe_igu_clear_sb(struct bxe_softc * sc,uint8_t idu_sb_id)16553  bxe_igu_clear_sb(struct bxe_softc *sc,
16554                   uint8_t          idu_sb_id)
16555  {
16556      bxe_igu_clear_sb_gen(sc, SC_FUNC(sc), idu_sb_id, TRUE /*PF*/);
16557  }
16558  
16559  
16560  
16561  
16562  
16563  
16564  
16565  /*******************/
16566  /* ECORE CALLBACKS */
16567  /*******************/
16568  
16569  static void
bxe_reset_common(struct bxe_softc * sc)16570  bxe_reset_common(struct bxe_softc *sc)
16571  {
16572      uint32_t val = 0x1400;
16573  
16574      /* reset_common */
16575      REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR), 0xd3ffff7f);
16576  
16577      if (CHIP_IS_E3(sc)) {
16578          val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
16579          val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
16580      }
16581  
16582      REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR), val);
16583  }
16584  
16585  static void
bxe_common_init_phy(struct bxe_softc * sc)16586  bxe_common_init_phy(struct bxe_softc *sc)
16587  {
16588      uint32_t shmem_base[2];
16589      uint32_t shmem2_base[2];
16590  
16591      /* Avoid common init in case MFW supports LFA */
16592      if (SHMEM2_RD(sc, size) >
16593          (uint32_t)offsetof(struct shmem2_region,
16594                             lfa_host_addr[SC_PORT(sc)])) {
16595          return;
16596      }
16597  
16598      shmem_base[0]  = sc->devinfo.shmem_base;
16599      shmem2_base[0] = sc->devinfo.shmem2_base;
16600  
16601      if (!CHIP_IS_E1x(sc)) {
16602          shmem_base[1]  = SHMEM2_RD(sc, other_shmem_base_addr);
16603          shmem2_base[1] = SHMEM2_RD(sc, other_shmem2_base_addr);
16604      }
16605  
16606      bxe_acquire_phy_lock(sc);
16607      elink_common_init_phy(sc, shmem_base, shmem2_base,
16608                            sc->devinfo.chip_id, 0);
16609      bxe_release_phy_lock(sc);
16610  }
16611  
16612  static void
bxe_pf_disable(struct bxe_softc * sc)16613  bxe_pf_disable(struct bxe_softc *sc)
16614  {
16615      uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
16616  
16617      val &= ~IGU_PF_CONF_FUNC_EN;
16618  
16619      REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
16620      REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
16621      REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 0);
16622  }
16623  
16624  static void
bxe_init_pxp(struct bxe_softc * sc)16625  bxe_init_pxp(struct bxe_softc *sc)
16626  {
16627      uint16_t devctl;
16628      int r_order, w_order;
16629  
16630      devctl = bxe_pcie_capability_read(sc, PCIER_DEVICE_CTL, 2);
16631  
16632      BLOGD(sc, DBG_LOAD, "read 0x%08x from devctl\n", devctl);
16633  
16634      w_order = ((devctl & PCIEM_CTL_MAX_PAYLOAD) >> 5);
16635  
16636      if (sc->mrrs == -1) {
16637          r_order = ((devctl & PCIEM_CTL_MAX_READ_REQUEST) >> 12);
16638      } else {
16639          BLOGD(sc, DBG_LOAD, "forcing read order to %d\n", sc->mrrs);
16640          r_order = sc->mrrs;
16641      }
16642  
16643      ecore_init_pxp_arb(sc, r_order, w_order);
16644  }
16645  
16646  static uint32_t
bxe_get_pretend_reg(struct bxe_softc * sc)16647  bxe_get_pretend_reg(struct bxe_softc *sc)
16648  {
16649      uint32_t base = PXP2_REG_PGL_PRETEND_FUNC_F0;
16650      uint32_t stride = (PXP2_REG_PGL_PRETEND_FUNC_F1 - base);
16651      return (base + (SC_ABS_FUNC(sc)) * stride);
16652  }
16653  
16654  /*
16655   * Called only on E1H or E2.
16656   * When pretending to be PF, the pretend value is the function number 0..7.
16657   * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
16658   * combination.
16659   */
16660  static int
bxe_pretend_func(struct bxe_softc * sc,uint16_t pretend_func_val)16661  bxe_pretend_func(struct bxe_softc *sc,
16662                   uint16_t         pretend_func_val)
16663  {
16664      uint32_t pretend_reg;
16665  
16666      if (CHIP_IS_E1H(sc) && (pretend_func_val > E1H_FUNC_MAX)) {
16667          return (-1);
16668      }
16669  
16670      /* get my own pretend register */
16671      pretend_reg = bxe_get_pretend_reg(sc);
16672      REG_WR(sc, pretend_reg, pretend_func_val);
16673      REG_RD(sc, pretend_reg);
16674      return (0);
16675  }
16676  
16677  static void
bxe_iov_init_dmae(struct bxe_softc * sc)16678  bxe_iov_init_dmae(struct bxe_softc *sc)
16679  {
16680      return;
16681  }
16682  
16683  static void
bxe_iov_init_dq(struct bxe_softc * sc)16684  bxe_iov_init_dq(struct bxe_softc *sc)
16685  {
16686      return;
16687  }
16688  
16689  /* send a NIG loopback debug packet */
16690  static void
bxe_lb_pckt(struct bxe_softc * sc)16691  bxe_lb_pckt(struct bxe_softc *sc)
16692  {
16693      uint32_t wb_write[3];
16694  
16695      /* Ethernet source and destination addresses */
16696      wb_write[0] = 0x55555555;
16697      wb_write[1] = 0x55555555;
16698      wb_write[2] = 0x20;     /* SOP */
16699      REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
16700  
16701      /* NON-IP protocol */
16702      wb_write[0] = 0x09000000;
16703      wb_write[1] = 0x55555555;
16704      wb_write[2] = 0x10;     /* EOP, eop_bvalid = 0 */
16705      REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
16706  }
16707  
16708  /*
16709   * Some of the internal memories are not directly readable from the driver.
16710   * To test them we send debug packets.
16711   */
16712  static int
bxe_int_mem_test(struct bxe_softc * sc)16713  bxe_int_mem_test(struct bxe_softc *sc)
16714  {
16715      int factor;
16716      int count, i;
16717      uint32_t val = 0;
16718  
16719      if (CHIP_REV_IS_FPGA(sc)) {
16720          factor = 120;
16721      } else if (CHIP_REV_IS_EMUL(sc)) {
16722          factor = 200;
16723      } else {
16724          factor = 1;
16725      }
16726  
16727      /* disable inputs of parser neighbor blocks */
16728      REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0);
16729      REG_WR(sc, TCM_REG_PRS_IFEN, 0x0);
16730      REG_WR(sc, CFC_REG_DEBUG0, 0x1);
16731      REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0);
16732  
16733      /*  write 0 to parser credits for CFC search request */
16734      REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
16735  
16736      /* send Ethernet packet */
16737      bxe_lb_pckt(sc);
16738  
16739      /* TODO do i reset NIG statistic? */
16740      /* Wait until NIG register shows 1 packet of size 0x10 */
16741      count = 1000 * factor;
16742      while (count) {
16743          bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
16744          val = *BXE_SP(sc, wb_data[0]);
16745          if (val == 0x10) {
16746              break;
16747          }
16748  
16749          DELAY(10000);
16750          count--;
16751      }
16752  
16753      if (val != 0x10) {
16754          BLOGE(sc, "NIG timeout val=0x%x\n", val);
16755          return (-1);
16756      }
16757  
16758      /* wait until PRS register shows 1 packet */
16759      count = (1000 * factor);
16760      while (count) {
16761          val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16762          if (val == 1) {
16763              break;
16764          }
16765  
16766          DELAY(10000);
16767          count--;
16768      }
16769  
16770      if (val != 0x1) {
16771          BLOGE(sc, "PRS timeout val=0x%x\n", val);
16772          return (-2);
16773      }
16774  
16775      /* Reset and init BRB, PRS */
16776      REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
16777      DELAY(50000);
16778      REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
16779      DELAY(50000);
16780      ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
16781      ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
16782  
16783      /* Disable inputs of parser neighbor blocks */
16784      REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0);
16785      REG_WR(sc, TCM_REG_PRS_IFEN, 0x0);
16786      REG_WR(sc, CFC_REG_DEBUG0, 0x1);
16787      REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0);
16788  
16789      /* Write 0 to parser credits for CFC search request */
16790      REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
16791  
16792      /* send 10 Ethernet packets */
16793      for (i = 0; i < 10; i++) {
16794          bxe_lb_pckt(sc);
16795      }
16796  
16797      /* Wait until NIG register shows 10+1 packets of size 11*0x10 = 0xb0 */
16798      count = (1000 * factor);
16799      while (count) {
16800          bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
16801          val = *BXE_SP(sc, wb_data[0]);
16802          if (val == 0xb0) {
16803              break;
16804          }
16805  
16806          DELAY(10000);
16807          count--;
16808      }
16809  
16810      if (val != 0xb0) {
16811          BLOGE(sc, "NIG timeout val=0x%x\n", val);
16812          return (-3);
16813      }
16814  
16815      /* Wait until PRS register shows 2 packets */
16816      val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16817      if (val != 2) {
16818          BLOGE(sc, "PRS timeout val=0x%x\n", val);
16819      }
16820  
16821      /* Write 1 to parser credits for CFC search request */
16822      REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
16823  
16824      /* Wait until PRS register shows 3 packets */
16825      DELAY(10000 * factor);
16826  
16827      /* Wait until NIG register shows 1 packet of size 0x10 */
16828      val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16829      if (val != 3) {
16830          BLOGE(sc, "PRS timeout val=0x%x\n", val);
16831      }
16832  
16833      /* clear NIG EOP FIFO */
16834      for (i = 0; i < 11; i++) {
16835          REG_RD(sc, NIG_REG_INGRESS_EOP_LB_FIFO);
16836      }
16837  
16838      val = REG_RD(sc, NIG_REG_INGRESS_EOP_LB_EMPTY);
16839      if (val != 1) {
16840          BLOGE(sc, "clear of NIG failed val=0x%x\n", val);
16841          return (-4);
16842      }
16843  
16844      /* Reset and init BRB, PRS, NIG */
16845      REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
16846      DELAY(50000);
16847      REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
16848      DELAY(50000);
16849      ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
16850      ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
16851      if (!CNIC_SUPPORT(sc)) {
16852          /* set NIC mode */
16853          REG_WR(sc, PRS_REG_NIC_MODE, 1);
16854      }
16855  
16856      /* Enable inputs of parser neighbor blocks */
16857      REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x7fffffff);
16858      REG_WR(sc, TCM_REG_PRS_IFEN, 0x1);
16859      REG_WR(sc, CFC_REG_DEBUG0, 0x0);
16860      REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x1);
16861  
16862      return (0);
16863  }
16864  
16865  static void
bxe_setup_fan_failure_detection(struct bxe_softc * sc)16866  bxe_setup_fan_failure_detection(struct bxe_softc *sc)
16867  {
16868      int is_required;
16869      uint32_t val;
16870      int port;
16871  
16872      is_required = 0;
16873      val = (SHMEM_RD(sc, dev_info.shared_hw_config.config2) &
16874             SHARED_HW_CFG_FAN_FAILURE_MASK);
16875  
16876      if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) {
16877          is_required = 1;
16878      }
16879      /*
16880       * The fan failure mechanism is usually related to the PHY type since
16881       * the power consumption of the board is affected by the PHY. Currently,
16882       * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
16883       */
16884      else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) {
16885          for (port = PORT_0; port < PORT_MAX; port++) {
16886              is_required |= elink_fan_failure_det_req(sc,
16887                                                       sc->devinfo.shmem_base,
16888                                                       sc->devinfo.shmem2_base,
16889                                                       port);
16890          }
16891      }
16892  
16893      BLOGD(sc, DBG_LOAD, "fan detection setting: %d\n", is_required);
16894  
16895      if (is_required == 0) {
16896          return;
16897      }
16898  
16899      /* Fan failure is indicated by SPIO 5 */
16900      bxe_set_spio(sc, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);
16901  
16902      /* set to active low mode */
16903      val = REG_RD(sc, MISC_REG_SPIO_INT);
16904      val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS);
16905      REG_WR(sc, MISC_REG_SPIO_INT, val);
16906  
16907      /* enable interrupt to signal the IGU */
16908      val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN);
16909      val |= MISC_SPIO_SPIO5;
16910      REG_WR(sc, MISC_REG_SPIO_EVENT_EN, val);
16911  }
16912  
16913  static void
bxe_enable_blocks_attention(struct bxe_softc * sc)16914  bxe_enable_blocks_attention(struct bxe_softc *sc)
16915  {
16916      uint32_t val;
16917  
16918      REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0);
16919      if (!CHIP_IS_E1x(sc)) {
16920          REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0x40);
16921      } else {
16922          REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0);
16923      }
16924      REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
16925      REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
16926      /*
16927       * mask read length error interrupts in brb for parser
16928       * (parsing unit and 'checksum and crc' unit)
16929       * these errors are legal (PU reads fixed length and CAC can cause
16930       * read length error on truncated packets)
16931       */
16932      REG_WR(sc, BRB1_REG_BRB1_INT_MASK, 0xFC00);
16933      REG_WR(sc, QM_REG_QM_INT_MASK, 0);
16934      REG_WR(sc, TM_REG_TM_INT_MASK, 0);
16935      REG_WR(sc, XSDM_REG_XSDM_INT_MASK_0, 0);
16936      REG_WR(sc, XSDM_REG_XSDM_INT_MASK_1, 0);
16937      REG_WR(sc, XCM_REG_XCM_INT_MASK, 0);
16938  /*      REG_WR(sc, XSEM_REG_XSEM_INT_MASK_0, 0); */
16939  /*      REG_WR(sc, XSEM_REG_XSEM_INT_MASK_1, 0); */
16940      REG_WR(sc, USDM_REG_USDM_INT_MASK_0, 0);
16941      REG_WR(sc, USDM_REG_USDM_INT_MASK_1, 0);
16942      REG_WR(sc, UCM_REG_UCM_INT_MASK, 0);
16943  /*      REG_WR(sc, USEM_REG_USEM_INT_MASK_0, 0); */
16944  /*      REG_WR(sc, USEM_REG_USEM_INT_MASK_1, 0); */
16945      REG_WR(sc, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
16946      REG_WR(sc, CSDM_REG_CSDM_INT_MASK_0, 0);
16947      REG_WR(sc, CSDM_REG_CSDM_INT_MASK_1, 0);
16948      REG_WR(sc, CCM_REG_CCM_INT_MASK, 0);
16949  /*      REG_WR(sc, CSEM_REG_CSEM_INT_MASK_0, 0); */
16950  /*      REG_WR(sc, CSEM_REG_CSEM_INT_MASK_1, 0); */
16951  
16952      val = (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT |
16953             PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
16954             PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN);
16955      if (!CHIP_IS_E1x(sc)) {
16956          val |= (PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED |
16957                  PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED);
16958      }
16959      REG_WR(sc, PXP2_REG_PXP2_INT_MASK_0, val);
16960  
16961      REG_WR(sc, TSDM_REG_TSDM_INT_MASK_0, 0);
16962      REG_WR(sc, TSDM_REG_TSDM_INT_MASK_1, 0);
16963      REG_WR(sc, TCM_REG_TCM_INT_MASK, 0);
16964  /*      REG_WR(sc, TSEM_REG_TSEM_INT_MASK_0, 0); */
16965  
16966      if (!CHIP_IS_E1x(sc)) {
16967          /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */
16968          REG_WR(sc, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
16969      }
16970  
16971      REG_WR(sc, CDU_REG_CDU_INT_MASK, 0);
16972      REG_WR(sc, DMAE_REG_DMAE_INT_MASK, 0);
16973  /*      REG_WR(sc, MISC_REG_MISC_INT_MASK, 0); */
16974      REG_WR(sc, PBF_REG_PBF_INT_MASK, 0x18);     /* bit 3,4 masked */
16975  }
16976  
16977  /**
16978   * bxe_init_hw_common - initialize the HW at the COMMON phase.
16979   *
16980   * @sc:     driver handle
16981   */
16982  static int
bxe_init_hw_common(struct bxe_softc * sc)16983  bxe_init_hw_common(struct bxe_softc *sc)
16984  {
16985      uint8_t abs_func_id;
16986      uint32_t val;
16987  
16988      BLOGD(sc, DBG_LOAD, "starting common init for func %d\n",
16989            SC_ABS_FUNC(sc));
16990  
16991      /*
16992       * take the RESET lock to protect undi_unload flow from accessing
16993       * registers while we are resetting the chip
16994       */
16995      bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
16996  
16997      bxe_reset_common(sc);
16998  
16999      REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET), 0xffffffff);
17000  
17001      val = 0xfffc;
17002      if (CHIP_IS_E3(sc)) {
17003          val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
17004          val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
17005      }
17006  
17007      REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET), val);
17008  
17009      bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
17010  
17011      ecore_init_block(sc, BLOCK_MISC, PHASE_COMMON);
17012      BLOGD(sc, DBG_LOAD, "after misc block init\n");
17013  
17014      if (!CHIP_IS_E1x(sc)) {
17015          /*
17016           * 4-port mode or 2-port mode we need to turn off master-enable for
17017           * everyone. After that we turn it back on for self. So, we disregard
17018           * multi-function, and always disable all functions on the given path,
17019           * this means 0,2,4,6 for path 0 and 1,3,5,7 for path 1
17020           */
17021          for (abs_func_id = SC_PATH(sc);
17022               abs_func_id < (E2_FUNC_MAX * 2);
17023               abs_func_id += 2) {
17024              if (abs_func_id == SC_ABS_FUNC(sc)) {
17025                  REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17026                  continue;
17027              }
17028  
17029              bxe_pretend_func(sc, abs_func_id);
17030  
17031              /* clear pf enable */
17032              bxe_pf_disable(sc);
17033  
17034              bxe_pretend_func(sc, SC_ABS_FUNC(sc));
17035          }
17036      }
17037  
17038      BLOGD(sc, DBG_LOAD, "after pf disable\n");
17039  
17040      ecore_init_block(sc, BLOCK_PXP, PHASE_COMMON);
17041  
17042      if (CHIP_IS_E1(sc)) {
17043          /*
17044           * enable HW interrupt from PXP on USDM overflow
17045           * bit 16 on INT_MASK_0
17046           */
17047          REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0);
17048      }
17049  
17050      ecore_init_block(sc, BLOCK_PXP2, PHASE_COMMON);
17051      bxe_init_pxp(sc);
17052  
17053  #ifdef __BIG_ENDIAN
17054      REG_WR(sc, PXP2_REG_RQ_QM_ENDIAN_M, 1);
17055      REG_WR(sc, PXP2_REG_RQ_TM_ENDIAN_M, 1);
17056      REG_WR(sc, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
17057      REG_WR(sc, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
17058      REG_WR(sc, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
17059      /* make sure this value is 0 */
17060      REG_WR(sc, PXP2_REG_RQ_HC_ENDIAN_M, 0);
17061  
17062      //REG_WR(sc, PXP2_REG_RD_PBF_SWAP_MODE, 1);
17063      REG_WR(sc, PXP2_REG_RD_QM_SWAP_MODE, 1);
17064      REG_WR(sc, PXP2_REG_RD_TM_SWAP_MODE, 1);
17065      REG_WR(sc, PXP2_REG_RD_SRC_SWAP_MODE, 1);
17066      REG_WR(sc, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
17067  #endif
17068  
17069      ecore_ilt_init_page_size(sc, INITOP_SET);
17070  
17071      if (CHIP_REV_IS_FPGA(sc) && CHIP_IS_E1H(sc)) {
17072          REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
17073      }
17074  
17075      /* let the HW do it's magic... */
17076      DELAY(100000);
17077  
17078      /* finish PXP init */
17079      val = REG_RD(sc, PXP2_REG_RQ_CFG_DONE);
17080      if (val != 1) {
17081          BLOGE(sc, "PXP2 CFG failed PXP2_REG_RQ_CFG_DONE val = 0x%x\n",
17082              val);
17083          return (-1);
17084      }
17085      val = REG_RD(sc, PXP2_REG_RD_INIT_DONE);
17086      if (val != 1) {
17087          BLOGE(sc, "PXP2 RD_INIT failed val = 0x%x\n", val);
17088          return (-1);
17089      }
17090  
17091      BLOGD(sc, DBG_LOAD, "after pxp init\n");
17092  
17093      /*
17094       * Timer bug workaround for E2 only. We need to set the entire ILT to have
17095       * entries with value "0" and valid bit on. This needs to be done by the
17096       * first PF that is loaded in a path (i.e. common phase)
17097       */
17098      if (!CHIP_IS_E1x(sc)) {
17099  /*
17100   * In E2 there is a bug in the timers block that can cause function 6 / 7
17101   * (i.e. vnic3) to start even if it is marked as "scan-off".
17102   * This occurs when a different function (func2,3) is being marked
17103   * as "scan-off". Real-life scenario for example: if a driver is being
17104   * load-unloaded while func6,7 are down. This will cause the timer to access
17105   * the ilt, translate to a logical address and send a request to read/write.
17106   * Since the ilt for the function that is down is not valid, this will cause
17107   * a translation error which is unrecoverable.
17108   * The Workaround is intended to make sure that when this happens nothing
17109   * fatal will occur. The workaround:
17110   *  1.  First PF driver which loads on a path will:
17111   *      a.  After taking the chip out of reset, by using pretend,
17112   *          it will write "0" to the following registers of
17113   *          the other vnics.
17114   *          REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
17115   *          REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0);
17116   *          REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0);
17117   *          And for itself it will write '1' to
17118   *          PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable
17119   *          dmae-operations (writing to pram for example.)
17120   *          note: can be done for only function 6,7 but cleaner this
17121   *            way.
17122   *      b.  Write zero+valid to the entire ILT.
17123   *      c.  Init the first_timers_ilt_entry, last_timers_ilt_entry of
17124   *          VNIC3 (of that port). The range allocated will be the
17125   *          entire ILT. This is needed to prevent  ILT range error.
17126   *  2.  Any PF driver load flow:
17127   *      a.  ILT update with the physical addresses of the allocated
17128   *          logical pages.
17129   *      b.  Wait 20msec. - note that this timeout is needed to make
17130   *          sure there are no requests in one of the PXP internal
17131   *          queues with "old" ILT addresses.
17132   *      c.  PF enable in the PGLC.
17133   *      d.  Clear the was_error of the PF in the PGLC. (could have
17134   *          occurred while driver was down)
17135   *      e.  PF enable in the CFC (WEAK + STRONG)
17136   *      f.  Timers scan enable
17137   *  3.  PF driver unload flow:
17138   *      a.  Clear the Timers scan_en.
17139   *      b.  Polling for scan_on=0 for that PF.
17140   *      c.  Clear the PF enable bit in the PXP.
17141   *      d.  Clear the PF enable in the CFC (WEAK + STRONG)
17142   *      e.  Write zero+valid to all ILT entries (The valid bit must
17143   *          stay set)
17144   *      f.  If this is VNIC 3 of a port then also init
17145   *          first_timers_ilt_entry to zero and last_timers_ilt_entry
17146   *          to the last entry in the ILT.
17147   *
17148   *      Notes:
17149   *      Currently the PF error in the PGLC is non recoverable.
17150   *      In the future the there will be a recovery routine for this error.
17151   *      Currently attention is masked.
17152   *      Having an MCP lock on the load/unload process does not guarantee that
17153   *      there is no Timer disable during Func6/7 enable. This is because the
17154   *      Timers scan is currently being cleared by the MCP on FLR.
17155   *      Step 2.d can be done only for PF6/7 and the driver can also check if
17156   *      there is error before clearing it. But the flow above is simpler and
17157   *      more general.
17158   *      All ILT entries are written by zero+valid and not just PF6/7
17159   *      ILT entries since in the future the ILT entries allocation for
17160   *      PF-s might be dynamic.
17161   */
17162          struct ilt_client_info ilt_cli;
17163          struct ecore_ilt ilt;
17164  
17165          memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
17166          memset(&ilt, 0, sizeof(struct ecore_ilt));
17167  
17168          /* initialize dummy TM client */
17169          ilt_cli.start      = 0;
17170          ilt_cli.end        = ILT_NUM_PAGE_ENTRIES - 1;
17171          ilt_cli.client_num = ILT_CLIENT_TM;
17172  
17173          /*
17174           * Step 1: set zeroes to all ilt page entries with valid bit on
17175           * Step 2: set the timers first/last ilt entry to point
17176           * to the entire range to prevent ILT range error for 3rd/4th
17177           * vnic (this code assumes existence of the vnic)
17178           *
17179           * both steps performed by call to ecore_ilt_client_init_op()
17180           * with dummy TM client
17181           *
17182           * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
17183           * and his brother are split registers
17184           */
17185  
17186          bxe_pretend_func(sc, (SC_PATH(sc) + 6));
17187          ecore_ilt_client_init_op_ilt(sc, &ilt, &ilt_cli, INITOP_CLEAR);
17188          bxe_pretend_func(sc, SC_ABS_FUNC(sc));
17189  
17190          REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN, BXE_PXP_DRAM_ALIGN);
17191          REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_RD, BXE_PXP_DRAM_ALIGN);
17192          REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
17193      }
17194  
17195      REG_WR(sc, PXP2_REG_RQ_DISABLE_INPUTS, 0);
17196      REG_WR(sc, PXP2_REG_RD_DISABLE_INPUTS, 0);
17197  
17198      if (!CHIP_IS_E1x(sc)) {
17199          int factor = CHIP_REV_IS_EMUL(sc) ? 1000 :
17200                       (CHIP_REV_IS_FPGA(sc) ? 400 : 0);
17201  
17202          ecore_init_block(sc, BLOCK_PGLUE_B, PHASE_COMMON);
17203          ecore_init_block(sc, BLOCK_ATC, PHASE_COMMON);
17204  
17205          /* let the HW do it's magic... */
17206          do {
17207              DELAY(200000);
17208              val = REG_RD(sc, ATC_REG_ATC_INIT_DONE);
17209          } while (factor-- && (val != 1));
17210  
17211          if (val != 1) {
17212              BLOGE(sc, "ATC_INIT failed val = 0x%x\n", val);
17213              return (-1);
17214          }
17215      }
17216  
17217      BLOGD(sc, DBG_LOAD, "after pglue and atc init\n");
17218  
17219      ecore_init_block(sc, BLOCK_DMAE, PHASE_COMMON);
17220  
17221      bxe_iov_init_dmae(sc);
17222  
17223      /* clean the DMAE memory */
17224      sc->dmae_ready = 1;
17225      ecore_init_fill(sc, TSEM_REG_PRAM, 0, 8, 1);
17226  
17227      ecore_init_block(sc, BLOCK_TCM, PHASE_COMMON);
17228  
17229      ecore_init_block(sc, BLOCK_UCM, PHASE_COMMON);
17230  
17231      ecore_init_block(sc, BLOCK_CCM, PHASE_COMMON);
17232  
17233      ecore_init_block(sc, BLOCK_XCM, PHASE_COMMON);
17234  
17235      bxe_read_dmae(sc, XSEM_REG_PASSIVE_BUFFER, 3);
17236      bxe_read_dmae(sc, CSEM_REG_PASSIVE_BUFFER, 3);
17237      bxe_read_dmae(sc, TSEM_REG_PASSIVE_BUFFER, 3);
17238      bxe_read_dmae(sc, USEM_REG_PASSIVE_BUFFER, 3);
17239  
17240      ecore_init_block(sc, BLOCK_QM, PHASE_COMMON);
17241  
17242      /* QM queues pointers table */
17243      ecore_qm_init_ptr_table(sc, sc->qm_cid_count, INITOP_SET);
17244  
17245      /* soft reset pulse */
17246      REG_WR(sc, QM_REG_SOFT_RESET, 1);
17247      REG_WR(sc, QM_REG_SOFT_RESET, 0);
17248  
17249      if (CNIC_SUPPORT(sc))
17250          ecore_init_block(sc, BLOCK_TM, PHASE_COMMON);
17251  
17252      ecore_init_block(sc, BLOCK_DORQ, PHASE_COMMON);
17253      REG_WR(sc, DORQ_REG_DPM_CID_OFST, BXE_DB_SHIFT);
17254      if (!CHIP_REV_IS_SLOW(sc)) {
17255          /* enable hw interrupt from doorbell Q */
17256          REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
17257      }
17258  
17259      ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
17260  
17261      ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
17262      REG_WR(sc, PRS_REG_A_PRSU_20, 0xf);
17263  
17264      if (!CHIP_IS_E1(sc)) {
17265          REG_WR(sc, PRS_REG_E1HOV_MODE, sc->devinfo.mf_info.path_has_ovlan);
17266      }
17267  
17268      if (!CHIP_IS_E1x(sc) && !CHIP_IS_E3B0(sc)) {
17269          if (IS_MF_AFEX(sc)) {
17270              /*
17271               * configure that AFEX and VLAN headers must be
17272               * received in AFEX mode
17273               */
17274              REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 0xE);
17275              REG_WR(sc, PRS_REG_MUST_HAVE_HDRS, 0xA);
17276              REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
17277              REG_WR(sc, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
17278              REG_WR(sc, PRS_REG_TAG_LEN_0, 0x4);
17279          } else {
17280              /*
17281               * Bit-map indicating which L2 hdrs may appear
17282               * after the basic Ethernet header
17283               */
17284              REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC,
17285                     sc->devinfo.mf_info.path_has_ovlan ? 7 : 6);
17286          }
17287      }
17288  
17289      ecore_init_block(sc, BLOCK_TSDM, PHASE_COMMON);
17290      ecore_init_block(sc, BLOCK_CSDM, PHASE_COMMON);
17291      ecore_init_block(sc, BLOCK_USDM, PHASE_COMMON);
17292      ecore_init_block(sc, BLOCK_XSDM, PHASE_COMMON);
17293  
17294      if (!CHIP_IS_E1x(sc)) {
17295          /* reset VFC memories */
17296          REG_WR(sc, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
17297                 VFC_MEMORIES_RST_REG_CAM_RST |
17298                 VFC_MEMORIES_RST_REG_RAM_RST);
17299          REG_WR(sc, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
17300                 VFC_MEMORIES_RST_REG_CAM_RST |
17301                 VFC_MEMORIES_RST_REG_RAM_RST);
17302  
17303          DELAY(20000);
17304      }
17305  
17306      ecore_init_block(sc, BLOCK_TSEM, PHASE_COMMON);
17307      ecore_init_block(sc, BLOCK_USEM, PHASE_COMMON);
17308      ecore_init_block(sc, BLOCK_CSEM, PHASE_COMMON);
17309      ecore_init_block(sc, BLOCK_XSEM, PHASE_COMMON);
17310  
17311      /* sync semi rtc */
17312      REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
17313             0x80000000);
17314      REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
17315             0x80000000);
17316  
17317      ecore_init_block(sc, BLOCK_UPB, PHASE_COMMON);
17318      ecore_init_block(sc, BLOCK_XPB, PHASE_COMMON);
17319      ecore_init_block(sc, BLOCK_PBF, PHASE_COMMON);
17320  
17321      if (!CHIP_IS_E1x(sc)) {
17322          if (IS_MF_AFEX(sc)) {
17323              /*
17324               * configure that AFEX and VLAN headers must be
17325               * sent in AFEX mode
17326               */
17327              REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 0xE);
17328              REG_WR(sc, PBF_REG_MUST_HAVE_HDRS, 0xA);
17329              REG_WR(sc, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
17330              REG_WR(sc, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
17331              REG_WR(sc, PBF_REG_TAG_LEN_0, 0x4);
17332          } else {
17333              REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC,
17334                     sc->devinfo.mf_info.path_has_ovlan ? 7 : 6);
17335          }
17336      }
17337  
17338      REG_WR(sc, SRC_REG_SOFT_RST, 1);
17339  
17340      ecore_init_block(sc, BLOCK_SRC, PHASE_COMMON);
17341  
17342      if (CNIC_SUPPORT(sc)) {
17343          REG_WR(sc, SRC_REG_KEYSEARCH_0, 0x63285672);
17344          REG_WR(sc, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
17345          REG_WR(sc, SRC_REG_KEYSEARCH_2, 0x223aef9b);
17346          REG_WR(sc, SRC_REG_KEYSEARCH_3, 0x26001e3a);
17347          REG_WR(sc, SRC_REG_KEYSEARCH_4, 0x7ae91116);
17348          REG_WR(sc, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
17349          REG_WR(sc, SRC_REG_KEYSEARCH_6, 0x298d8adf);
17350          REG_WR(sc, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
17351          REG_WR(sc, SRC_REG_KEYSEARCH_8, 0x1830f82f);
17352          REG_WR(sc, SRC_REG_KEYSEARCH_9, 0x01e46be7);
17353      }
17354      REG_WR(sc, SRC_REG_SOFT_RST, 0);
17355  
17356      if (sizeof(union cdu_context) != 1024) {
17357          /* we currently assume that a context is 1024 bytes */
17358          BLOGE(sc, "please adjust the size of cdu_context(%ld)\n",
17359                (long)sizeof(union cdu_context));
17360      }
17361  
17362      ecore_init_block(sc, BLOCK_CDU, PHASE_COMMON);
17363      val = (4 << 24) + (0 << 12) + 1024;
17364      REG_WR(sc, CDU_REG_CDU_GLOBAL_PARAMS, val);
17365  
17366      ecore_init_block(sc, BLOCK_CFC, PHASE_COMMON);
17367  
17368      REG_WR(sc, CFC_REG_INIT_REG, 0x7FF);
17369      /* enable context validation interrupt from CFC */
17370      REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
17371  
17372      /* set the thresholds to prevent CFC/CDU race */
17373      REG_WR(sc, CFC_REG_DEBUG0, 0x20020000);
17374      ecore_init_block(sc, BLOCK_HC, PHASE_COMMON);
17375  
17376      if (!CHIP_IS_E1x(sc) && BXE_NOMCP(sc)) {
17377          REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x36);
17378      }
17379  
17380      ecore_init_block(sc, BLOCK_IGU, PHASE_COMMON);
17381      ecore_init_block(sc, BLOCK_MISC_AEU, PHASE_COMMON);
17382  
17383      /* Reset PCIE errors for debug */
17384      REG_WR(sc, 0x2814, 0xffffffff);
17385      REG_WR(sc, 0x3820, 0xffffffff);
17386  
17387      if (!CHIP_IS_E1x(sc)) {
17388          REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
17389                 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
17390                  PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
17391          REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
17392                 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
17393                  PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
17394                  PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
17395          REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
17396                 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
17397                  PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
17398                  PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
17399      }
17400  
17401      ecore_init_block(sc, BLOCK_NIG, PHASE_COMMON);
17402  
17403      if (!CHIP_IS_E1(sc)) {
17404          /* in E3 this done in per-port section */
17405          if (!CHIP_IS_E3(sc))
17406              REG_WR(sc, NIG_REG_LLH_MF_MODE, IS_MF(sc));
17407      }
17408  
17409      if (CHIP_IS_E1H(sc)) {
17410          /* not applicable for E2 (and above ...) */
17411          REG_WR(sc, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(sc));
17412      }
17413  
17414      if (CHIP_REV_IS_SLOW(sc)) {
17415          DELAY(200000);
17416      }
17417  
17418      /* finish CFC init */
17419      val = reg_poll(sc, CFC_REG_LL_INIT_DONE, 1, 100, 10);
17420      if (val != 1) {
17421          BLOGE(sc, "CFC LL_INIT failed val=0x%x\n", val);
17422          return (-1);
17423      }
17424      val = reg_poll(sc, CFC_REG_AC_INIT_DONE, 1, 100, 10);
17425      if (val != 1) {
17426          BLOGE(sc, "CFC AC_INIT failed val=0x%x\n", val);
17427          return (-1);
17428      }
17429      val = reg_poll(sc, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
17430      if (val != 1) {
17431          BLOGE(sc, "CFC CAM_INIT failed val=0x%x\n", val);
17432          return (-1);
17433      }
17434      REG_WR(sc, CFC_REG_DEBUG0, 0);
17435  
17436      if (CHIP_IS_E1(sc)) {
17437          /* read NIG statistic to see if this is our first up since powerup */
17438          bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
17439          val = *BXE_SP(sc, wb_data[0]);
17440  
17441          /* do internal memory self test */
17442          if ((val == 0) && bxe_int_mem_test(sc)) {
17443              BLOGE(sc, "internal mem self test failed val=0x%x\n", val);
17444              return (-1);
17445          }
17446      }
17447  
17448      bxe_setup_fan_failure_detection(sc);
17449  
17450      /* clear PXP2 attentions */
17451      REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
17452  
17453      bxe_enable_blocks_attention(sc);
17454  
17455      if (!CHIP_REV_IS_SLOW(sc)) {
17456          ecore_enable_blocks_parity(sc);
17457      }
17458  
17459      if (!BXE_NOMCP(sc)) {
17460          if (CHIP_IS_E1x(sc)) {
17461              bxe_common_init_phy(sc);
17462          }
17463      }
17464  
17465      return (0);
17466  }
17467  
17468  /**
17469   * bxe_init_hw_common_chip - init HW at the COMMON_CHIP phase.
17470   *
17471   * @sc:     driver handle
17472   */
17473  static int
bxe_init_hw_common_chip(struct bxe_softc * sc)17474  bxe_init_hw_common_chip(struct bxe_softc *sc)
17475  {
17476      int rc = bxe_init_hw_common(sc);
17477  
17478      if (rc) {
17479          BLOGE(sc, "bxe_init_hw_common failed rc=%d\n", rc);
17480          return (rc);
17481      }
17482  
17483      /* In E2 2-PORT mode, same ext phy is used for the two paths */
17484      if (!BXE_NOMCP(sc)) {
17485          bxe_common_init_phy(sc);
17486      }
17487  
17488      return (0);
17489  }
17490  
17491  static int
bxe_init_hw_port(struct bxe_softc * sc)17492  bxe_init_hw_port(struct bxe_softc *sc)
17493  {
17494      int port = SC_PORT(sc);
17495      int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
17496      uint32_t low, high;
17497      uint32_t val;
17498  
17499      BLOGD(sc, DBG_LOAD, "starting port init for port %d\n", port);
17500  
17501      REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
17502  
17503      ecore_init_block(sc, BLOCK_MISC, init_phase);
17504      ecore_init_block(sc, BLOCK_PXP, init_phase);
17505      ecore_init_block(sc, BLOCK_PXP2, init_phase);
17506  
17507      /*
17508       * Timers bug workaround: disables the pf_master bit in pglue at
17509       * common phase, we need to enable it here before any dmae access are
17510       * attempted. Therefore we manually added the enable-master to the
17511       * port phase (it also happens in the function phase)
17512       */
17513      if (!CHIP_IS_E1x(sc)) {
17514          REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17515      }
17516  
17517      ecore_init_block(sc, BLOCK_ATC, init_phase);
17518      ecore_init_block(sc, BLOCK_DMAE, init_phase);
17519      ecore_init_block(sc, BLOCK_PGLUE_B, init_phase);
17520      ecore_init_block(sc, BLOCK_QM, init_phase);
17521  
17522      ecore_init_block(sc, BLOCK_TCM, init_phase);
17523      ecore_init_block(sc, BLOCK_UCM, init_phase);
17524      ecore_init_block(sc, BLOCK_CCM, init_phase);
17525      ecore_init_block(sc, BLOCK_XCM, init_phase);
17526  
17527      /* QM cid (connection) count */
17528      ecore_qm_init_cid_count(sc, sc->qm_cid_count, INITOP_SET);
17529  
17530      if (CNIC_SUPPORT(sc)) {
17531          ecore_init_block(sc, BLOCK_TM, init_phase);
17532          REG_WR(sc, TM_REG_LIN0_SCAN_TIME + port*4, 20);
17533          REG_WR(sc, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
17534      }
17535  
17536      ecore_init_block(sc, BLOCK_DORQ, init_phase);
17537  
17538      ecore_init_block(sc, BLOCK_BRB1, init_phase);
17539  
17540      if (CHIP_IS_E1(sc) || CHIP_IS_E1H(sc)) {
17541          if (IS_MF(sc)) {
17542              low = (BXE_ONE_PORT(sc) ? 160 : 246);
17543          } else if (sc->mtu > 4096) {
17544              if (BXE_ONE_PORT(sc)) {
17545                  low = 160;
17546              } else {
17547                  val = sc->mtu;
17548                  /* (24*1024 + val*4)/256 */
17549                  low = (96 + (val / 64) + ((val % 64) ? 1 : 0));
17550              }
17551          } else {
17552              low = (BXE_ONE_PORT(sc) ? 80 : 160);
17553          }
17554          high = (low + 56); /* 14*1024/256 */
17555          REG_WR(sc, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
17556          REG_WR(sc, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
17557      }
17558  
17559      if (CHIP_IS_MODE_4_PORT(sc)) {
17560          REG_WR(sc, SC_PORT(sc) ?
17561                 BRB1_REG_MAC_GUARANTIED_1 :
17562                 BRB1_REG_MAC_GUARANTIED_0, 40);
17563      }
17564  
17565      ecore_init_block(sc, BLOCK_PRS, init_phase);
17566      if (CHIP_IS_E3B0(sc)) {
17567          if (IS_MF_AFEX(sc)) {
17568              /* configure headers for AFEX mode */
17569              REG_WR(sc, SC_PORT(sc) ?
17570                     PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
17571                     PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
17572              REG_WR(sc, SC_PORT(sc) ?
17573                     PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
17574                     PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
17575              REG_WR(sc, SC_PORT(sc) ?
17576                     PRS_REG_MUST_HAVE_HDRS_PORT_1 :
17577                     PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
17578          } else {
17579              /* Ovlan exists only if we are in multi-function +
17580               * switch-dependent mode, in switch-independent there
17581               * is no ovlan headers
17582               */
17583              REG_WR(sc, SC_PORT(sc) ?
17584                     PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
17585                     PRS_REG_HDRS_AFTER_BASIC_PORT_0,
17586                     (sc->devinfo.mf_info.path_has_ovlan ? 7 : 6));
17587          }
17588      }
17589  
17590      ecore_init_block(sc, BLOCK_TSDM, init_phase);
17591      ecore_init_block(sc, BLOCK_CSDM, init_phase);
17592      ecore_init_block(sc, BLOCK_USDM, init_phase);
17593      ecore_init_block(sc, BLOCK_XSDM, init_phase);
17594  
17595      ecore_init_block(sc, BLOCK_TSEM, init_phase);
17596      ecore_init_block(sc, BLOCK_USEM, init_phase);
17597      ecore_init_block(sc, BLOCK_CSEM, init_phase);
17598      ecore_init_block(sc, BLOCK_XSEM, init_phase);
17599  
17600      ecore_init_block(sc, BLOCK_UPB, init_phase);
17601      ecore_init_block(sc, BLOCK_XPB, init_phase);
17602  
17603      ecore_init_block(sc, BLOCK_PBF, init_phase);
17604  
17605      if (CHIP_IS_E1x(sc)) {
17606          /* configure PBF to work without PAUSE mtu 9000 */
17607          REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
17608  
17609          /* update threshold */
17610          REG_WR(sc, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
17611          /* update init credit */
17612          REG_WR(sc, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
17613  
17614          /* probe changes */
17615          REG_WR(sc, PBF_REG_INIT_P0 + port*4, 1);
17616          DELAY(50);
17617          REG_WR(sc, PBF_REG_INIT_P0 + port*4, 0);
17618      }
17619  
17620      if (CNIC_SUPPORT(sc)) {
17621          ecore_init_block(sc, BLOCK_SRC, init_phase);
17622      }
17623  
17624      ecore_init_block(sc, BLOCK_CDU, init_phase);
17625      ecore_init_block(sc, BLOCK_CFC, init_phase);
17626  
17627      if (CHIP_IS_E1(sc)) {
17628          REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
17629          REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
17630      }
17631      ecore_init_block(sc, BLOCK_HC, init_phase);
17632  
17633      ecore_init_block(sc, BLOCK_IGU, init_phase);
17634  
17635      ecore_init_block(sc, BLOCK_MISC_AEU, init_phase);
17636      /* init aeu_mask_attn_func_0/1:
17637       *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
17638       *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
17639       *             bits 4-7 are used for "per vn group attention" */
17640      val = IS_MF(sc) ? 0xF7 : 0x7;
17641      /* Enable DCBX attention for all but E1 */
17642      val |= CHIP_IS_E1(sc) ? 0 : 0x10;
17643      REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
17644  
17645      ecore_init_block(sc, BLOCK_NIG, init_phase);
17646  
17647      if (!CHIP_IS_E1x(sc)) {
17648          /* Bit-map indicating which L2 hdrs may appear after the
17649           * basic Ethernet header
17650           */
17651          if (IS_MF_AFEX(sc)) {
17652              REG_WR(sc, SC_PORT(sc) ?
17653                     NIG_REG_P1_HDRS_AFTER_BASIC :
17654                     NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
17655          } else {
17656              REG_WR(sc, SC_PORT(sc) ?
17657                     NIG_REG_P1_HDRS_AFTER_BASIC :
17658                     NIG_REG_P0_HDRS_AFTER_BASIC,
17659                     IS_MF_SD(sc) ? 7 : 6);
17660          }
17661  
17662          if (CHIP_IS_E3(sc)) {
17663              REG_WR(sc, SC_PORT(sc) ?
17664                     NIG_REG_LLH1_MF_MODE :
17665                     NIG_REG_LLH_MF_MODE, IS_MF(sc));
17666          }
17667      }
17668      if (!CHIP_IS_E3(sc)) {
17669          REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
17670      }
17671  
17672      if (!CHIP_IS_E1(sc)) {
17673          /* 0x2 disable mf_ov, 0x1 enable */
17674          REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
17675                 (IS_MF_SD(sc) ? 0x1 : 0x2));
17676  
17677          if (!CHIP_IS_E1x(sc)) {
17678              val = 0;
17679              switch (sc->devinfo.mf_info.mf_mode) {
17680              case MULTI_FUNCTION_SD:
17681                  val = 1;
17682                  break;
17683              case MULTI_FUNCTION_SI:
17684              case MULTI_FUNCTION_AFEX:
17685                  val = 2;
17686                  break;
17687              }
17688  
17689              REG_WR(sc, (SC_PORT(sc) ? NIG_REG_LLH1_CLS_TYPE :
17690                          NIG_REG_LLH0_CLS_TYPE), val);
17691          }
17692          REG_WR(sc, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
17693          REG_WR(sc, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
17694          REG_WR(sc, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
17695      }
17696  
17697      /* If SPIO5 is set to generate interrupts, enable it for this port */
17698      val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN);
17699      if (val & MISC_SPIO_SPIO5) {
17700          uint32_t reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
17701                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
17702          val = REG_RD(sc, reg_addr);
17703          val |= AEU_INPUTS_ATTN_BITS_SPIO5;
17704          REG_WR(sc, reg_addr, val);
17705      }
17706  
17707      return (0);
17708  }
17709  
17710  static uint32_t
bxe_flr_clnup_reg_poll(struct bxe_softc * sc,uint32_t reg,uint32_t expected,uint32_t poll_count)17711  bxe_flr_clnup_reg_poll(struct bxe_softc *sc,
17712                         uint32_t         reg,
17713                         uint32_t         expected,
17714                         uint32_t         poll_count)
17715  {
17716      uint32_t cur_cnt = poll_count;
17717      uint32_t val;
17718  
17719      while ((val = REG_RD(sc, reg)) != expected && cur_cnt--) {
17720          DELAY(FLR_WAIT_INTERVAL);
17721      }
17722  
17723      return (val);
17724  }
17725  
17726  static int
bxe_flr_clnup_poll_hw_counter(struct bxe_softc * sc,uint32_t reg,char * msg,uint32_t poll_cnt)17727  bxe_flr_clnup_poll_hw_counter(struct bxe_softc *sc,
17728                                uint32_t         reg,
17729                                char             *msg,
17730                                uint32_t         poll_cnt)
17731  {
17732      uint32_t val = bxe_flr_clnup_reg_poll(sc, reg, 0, poll_cnt);
17733  
17734      if (val != 0) {
17735          BLOGE(sc, "%s usage count=%d\n", msg, val);
17736          return (1);
17737      }
17738  
17739      return (0);
17740  }
17741  
17742  /* Common routines with VF FLR cleanup */
17743  static uint32_t
bxe_flr_clnup_poll_count(struct bxe_softc * sc)17744  bxe_flr_clnup_poll_count(struct bxe_softc *sc)
17745  {
17746      /* adjust polling timeout */
17747      if (CHIP_REV_IS_EMUL(sc)) {
17748          return (FLR_POLL_CNT * 2000);
17749      }
17750  
17751      if (CHIP_REV_IS_FPGA(sc)) {
17752          return (FLR_POLL_CNT * 120);
17753      }
17754  
17755      return (FLR_POLL_CNT);
17756  }
17757  
17758  static int
bxe_poll_hw_usage_counters(struct bxe_softc * sc,uint32_t poll_cnt)17759  bxe_poll_hw_usage_counters(struct bxe_softc *sc,
17760                             uint32_t         poll_cnt)
17761  {
17762      /* wait for CFC PF usage-counter to zero (includes all the VFs) */
17763      if (bxe_flr_clnup_poll_hw_counter(sc,
17764                                        CFC_REG_NUM_LCIDS_INSIDE_PF,
17765                                        "CFC PF usage counter timed out",
17766                                        poll_cnt)) {
17767          return (1);
17768      }
17769  
17770      /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
17771      if (bxe_flr_clnup_poll_hw_counter(sc,
17772                                        DORQ_REG_PF_USAGE_CNT,
17773                                        "DQ PF usage counter timed out",
17774                                        poll_cnt)) {
17775          return (1);
17776      }
17777  
17778      /* Wait for QM PF usage-counter to zero (until DQ cleanup) */
17779      if (bxe_flr_clnup_poll_hw_counter(sc,
17780                                        QM_REG_PF_USG_CNT_0 + 4*SC_FUNC(sc),
17781                                        "QM PF usage counter timed out",
17782                                        poll_cnt)) {
17783          return (1);
17784      }
17785  
17786      /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
17787      if (bxe_flr_clnup_poll_hw_counter(sc,
17788                                        TM_REG_LIN0_VNIC_UC + 4*SC_PORT(sc),
17789                                        "Timers VNIC usage counter timed out",
17790                                        poll_cnt)) {
17791          return (1);
17792      }
17793  
17794      if (bxe_flr_clnup_poll_hw_counter(sc,
17795                                        TM_REG_LIN0_NUM_SCANS + 4*SC_PORT(sc),
17796                                        "Timers NUM_SCANS usage counter timed out",
17797                                        poll_cnt)) {
17798          return (1);
17799      }
17800  
17801      /* Wait DMAE PF usage counter to zero */
17802      if (bxe_flr_clnup_poll_hw_counter(sc,
17803                                        dmae_reg_go_c[INIT_DMAE_C(sc)],
17804                                        "DMAE dommand register timed out",
17805                                        poll_cnt)) {
17806          return (1);
17807      }
17808  
17809      return (0);
17810  }
17811  
17812  #define OP_GEN_PARAM(param)                                            \
17813      (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
17814  #define OP_GEN_TYPE(type)                                           \
17815      (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
17816  #define OP_GEN_AGG_VECT(index)                                             \
17817      (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
17818  
17819  static int
bxe_send_final_clnup(struct bxe_softc * sc,uint8_t clnup_func,uint32_t poll_cnt)17820  bxe_send_final_clnup(struct bxe_softc *sc,
17821                       uint8_t          clnup_func,
17822                       uint32_t         poll_cnt)
17823  {
17824      uint32_t op_gen_command = 0;
17825      uint32_t comp_addr = (BAR_CSTRORM_INTMEM +
17826                            CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func));
17827      int ret = 0;
17828  
17829      if (REG_RD(sc, comp_addr)) {
17830          BLOGE(sc, "Cleanup complete was not 0 before sending\n");
17831          return (1);
17832      }
17833  
17834      op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
17835      op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
17836      op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
17837      op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
17838  
17839      BLOGD(sc, DBG_LOAD, "sending FW Final cleanup\n");
17840      REG_WR(sc, XSDM_REG_OPERATION_GEN, op_gen_command);
17841  
17842      if (bxe_flr_clnup_reg_poll(sc, comp_addr, 1, poll_cnt) != 1) {
17843          BLOGE(sc, "FW final cleanup did not succeed\n");
17844          BLOGD(sc, DBG_LOAD, "At timeout completion address contained %x\n",
17845                (REG_RD(sc, comp_addr)));
17846          bxe_panic(sc, ("FLR cleanup failed\n"));
17847          return (1);
17848      }
17849  
17850      /* Zero completion for nxt FLR */
17851      REG_WR(sc, comp_addr, 0);
17852  
17853      return (ret);
17854  }
17855  
17856  static void
bxe_pbf_pN_buf_flushed(struct bxe_softc * sc,struct pbf_pN_buf_regs * regs,uint32_t poll_count)17857  bxe_pbf_pN_buf_flushed(struct bxe_softc       *sc,
17858                         struct pbf_pN_buf_regs *regs,
17859                         uint32_t               poll_count)
17860  {
17861      uint32_t init_crd, crd, crd_start, crd_freed, crd_freed_start;
17862      uint32_t cur_cnt = poll_count;
17863  
17864      crd_freed = crd_freed_start = REG_RD(sc, regs->crd_freed);
17865      crd = crd_start = REG_RD(sc, regs->crd);
17866      init_crd = REG_RD(sc, regs->init_crd);
17867  
17868      BLOGD(sc, DBG_LOAD, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
17869      BLOGD(sc, DBG_LOAD, "CREDIT[%d]      : s:%x\n", regs->pN, crd);
17870      BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
17871  
17872      while ((crd != init_crd) &&
17873             ((uint32_t)((int32_t)crd_freed - (int32_t)crd_freed_start) <
17874              (init_crd - crd_start))) {
17875          if (cur_cnt--) {
17876              DELAY(FLR_WAIT_INTERVAL);
17877              crd = REG_RD(sc, regs->crd);
17878              crd_freed = REG_RD(sc, regs->crd_freed);
17879          } else {
17880              BLOGD(sc, DBG_LOAD, "PBF tx buffer[%d] timed out\n", regs->pN);
17881              BLOGD(sc, DBG_LOAD, "CREDIT[%d]      : c:%x\n", regs->pN, crd);
17882              BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: c:%x\n", regs->pN, crd_freed);
17883              break;
17884          }
17885      }
17886  
17887      BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF tx buffer[%d]\n",
17888            poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
17889  }
17890  
17891  static void
bxe_pbf_pN_cmd_flushed(struct bxe_softc * sc,struct pbf_pN_cmd_regs * regs,uint32_t poll_count)17892  bxe_pbf_pN_cmd_flushed(struct bxe_softc       *sc,
17893                         struct pbf_pN_cmd_regs *regs,
17894                         uint32_t               poll_count)
17895  {
17896      uint32_t occup, to_free, freed, freed_start;
17897      uint32_t cur_cnt = poll_count;
17898  
17899      occup = to_free = REG_RD(sc, regs->lines_occup);
17900      freed = freed_start = REG_RD(sc, regs->lines_freed);
17901  
17902      BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d]   : s:%x\n", regs->pN, occup);
17903      BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
17904  
17905      while (occup &&
17906             ((uint32_t)((int32_t)freed - (int32_t)freed_start) < to_free)) {
17907          if (cur_cnt--) {
17908              DELAY(FLR_WAIT_INTERVAL);
17909              occup = REG_RD(sc, regs->lines_occup);
17910              freed = REG_RD(sc, regs->lines_freed);
17911          } else {
17912              BLOGD(sc, DBG_LOAD, "PBF cmd queue[%d] timed out\n", regs->pN);
17913              BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d]   : s:%x\n", regs->pN, occup);
17914              BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
17915              break;
17916          }
17917      }
17918  
17919      BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF cmd queue[%d]\n",
17920            poll_count - cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
17921  }
17922  
17923  static void
bxe_tx_hw_flushed(struct bxe_softc * sc,uint32_t poll_count)17924  bxe_tx_hw_flushed(struct bxe_softc *sc, uint32_t poll_count)
17925  {
17926      struct pbf_pN_cmd_regs cmd_regs[] = {
17927          {0, (CHIP_IS_E3B0(sc)) ?
17928              PBF_REG_TQ_OCCUPANCY_Q0 :
17929              PBF_REG_P0_TQ_OCCUPANCY,
17930              (CHIP_IS_E3B0(sc)) ?
17931              PBF_REG_TQ_LINES_FREED_CNT_Q0 :
17932              PBF_REG_P0_TQ_LINES_FREED_CNT},
17933          {1, (CHIP_IS_E3B0(sc)) ?
17934              PBF_REG_TQ_OCCUPANCY_Q1 :
17935              PBF_REG_P1_TQ_OCCUPANCY,
17936              (CHIP_IS_E3B0(sc)) ?
17937              PBF_REG_TQ_LINES_FREED_CNT_Q1 :
17938              PBF_REG_P1_TQ_LINES_FREED_CNT},
17939          {4, (CHIP_IS_E3B0(sc)) ?
17940              PBF_REG_TQ_OCCUPANCY_LB_Q :
17941              PBF_REG_P4_TQ_OCCUPANCY,
17942              (CHIP_IS_E3B0(sc)) ?
17943              PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
17944              PBF_REG_P4_TQ_LINES_FREED_CNT}
17945      };
17946  
17947      struct pbf_pN_buf_regs buf_regs[] = {
17948          {0, (CHIP_IS_E3B0(sc)) ?
17949              PBF_REG_INIT_CRD_Q0 :
17950              PBF_REG_P0_INIT_CRD ,
17951              (CHIP_IS_E3B0(sc)) ?
17952              PBF_REG_CREDIT_Q0 :
17953              PBF_REG_P0_CREDIT,
17954              (CHIP_IS_E3B0(sc)) ?
17955              PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
17956              PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
17957          {1, (CHIP_IS_E3B0(sc)) ?
17958              PBF_REG_INIT_CRD_Q1 :
17959              PBF_REG_P1_INIT_CRD,
17960              (CHIP_IS_E3B0(sc)) ?
17961              PBF_REG_CREDIT_Q1 :
17962              PBF_REG_P1_CREDIT,
17963              (CHIP_IS_E3B0(sc)) ?
17964              PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
17965              PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
17966          {4, (CHIP_IS_E3B0(sc)) ?
17967              PBF_REG_INIT_CRD_LB_Q :
17968              PBF_REG_P4_INIT_CRD,
17969              (CHIP_IS_E3B0(sc)) ?
17970              PBF_REG_CREDIT_LB_Q :
17971              PBF_REG_P4_CREDIT,
17972              (CHIP_IS_E3B0(sc)) ?
17973              PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
17974              PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
17975      };
17976  
17977      int i;
17978  
17979      /* Verify the command queues are flushed P0, P1, P4 */
17980      for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) {
17981          bxe_pbf_pN_cmd_flushed(sc, &cmd_regs[i], poll_count);
17982      }
17983  
17984      /* Verify the transmission buffers are flushed P0, P1, P4 */
17985      for (i = 0; i < ARRAY_SIZE(buf_regs); i++) {
17986          bxe_pbf_pN_buf_flushed(sc, &buf_regs[i], poll_count);
17987      }
17988  }
17989  
17990  static void
bxe_hw_enable_status(struct bxe_softc * sc)17991  bxe_hw_enable_status(struct bxe_softc *sc)
17992  {
17993      uint32_t val;
17994  
17995      val = REG_RD(sc, CFC_REG_WEAK_ENABLE_PF);
17996      BLOGD(sc, DBG_LOAD, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
17997  
17998      val = REG_RD(sc, PBF_REG_DISABLE_PF);
17999      BLOGD(sc, DBG_LOAD, "PBF_REG_DISABLE_PF is 0x%x\n", val);
18000  
18001      val = REG_RD(sc, IGU_REG_PCI_PF_MSI_EN);
18002      BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
18003  
18004      val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_EN);
18005      BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
18006  
18007      val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
18008      BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
18009  
18010      val = REG_RD(sc, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
18011      BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
18012  
18013      val = REG_RD(sc, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
18014      BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
18015  
18016      val = REG_RD(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
18017      BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", val);
18018  }
18019  
18020  static int
bxe_pf_flr_clnup(struct bxe_softc * sc)18021  bxe_pf_flr_clnup(struct bxe_softc *sc)
18022  {
18023      uint32_t poll_cnt = bxe_flr_clnup_poll_count(sc);
18024  
18025      BLOGD(sc, DBG_LOAD, "Cleanup after FLR PF[%d]\n", SC_ABS_FUNC(sc));
18026  
18027      /* Re-enable PF target read access */
18028      REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
18029  
18030      /* Poll HW usage counters */
18031      BLOGD(sc, DBG_LOAD, "Polling usage counters\n");
18032      if (bxe_poll_hw_usage_counters(sc, poll_cnt)) {
18033          return (-1);
18034      }
18035  
18036      /* Zero the igu 'trailing edge' and 'leading edge' */
18037  
18038      /* Send the FW cleanup command */
18039      if (bxe_send_final_clnup(sc, (uint8_t)SC_FUNC(sc), poll_cnt)) {
18040          return (-1);
18041      }
18042  
18043      /* ATC cleanup */
18044  
18045      /* Verify TX hw is flushed */
18046      bxe_tx_hw_flushed(sc, poll_cnt);
18047  
18048      /* Wait 100ms (not adjusted according to platform) */
18049      DELAY(100000);
18050  
18051      /* Verify no pending pci transactions */
18052      if (bxe_is_pcie_pending(sc)) {
18053          BLOGE(sc, "PCIE Transactions still pending\n");
18054      }
18055  
18056      /* Debug */
18057      bxe_hw_enable_status(sc);
18058  
18059      /*
18060       * Master enable - Due to WB DMAE writes performed before this
18061       * register is re-initialized as part of the regular function init
18062       */
18063      REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
18064  
18065      return (0);
18066  }
18067  
18068  static int
bxe_init_hw_func(struct bxe_softc * sc)18069  bxe_init_hw_func(struct bxe_softc *sc)
18070  {
18071      int port = SC_PORT(sc);
18072      int func = SC_FUNC(sc);
18073      int init_phase = PHASE_PF0 + func;
18074      struct ecore_ilt *ilt = sc->ilt;
18075      uint16_t cdu_ilt_start;
18076      uint32_t addr, val;
18077      uint32_t main_mem_base, main_mem_size, main_mem_prty_clr;
18078      int i, main_mem_width, rc;
18079  
18080      BLOGD(sc, DBG_LOAD, "starting func init for func %d\n", func);
18081  
18082      /* FLR cleanup */
18083      if (!CHIP_IS_E1x(sc)) {
18084          rc = bxe_pf_flr_clnup(sc);
18085          if (rc) {
18086              BLOGE(sc, "FLR cleanup failed!\n");
18087              // XXX bxe_fw_dump(sc);
18088              // XXX bxe_idle_chk(sc);
18089              return (rc);
18090          }
18091      }
18092  
18093      /* set MSI reconfigure capability */
18094      if (sc->devinfo.int_block == INT_BLOCK_HC) {
18095          addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
18096          val = REG_RD(sc, addr);
18097          val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
18098          REG_WR(sc, addr, val);
18099      }
18100  
18101      ecore_init_block(sc, BLOCK_PXP, init_phase);
18102      ecore_init_block(sc, BLOCK_PXP2, init_phase);
18103  
18104      ilt = sc->ilt;
18105      cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
18106  
18107      for (i = 0; i < L2_ILT_LINES(sc); i++) {
18108          ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt;
18109          ilt->lines[cdu_ilt_start + i].page_mapping =
18110              sc->context[i].vcxt_dma.paddr;
18111          ilt->lines[cdu_ilt_start + i].size = sc->context[i].size;
18112      }
18113      ecore_ilt_init_op(sc, INITOP_SET);
18114  
18115      /* Set NIC mode */
18116      REG_WR(sc, PRS_REG_NIC_MODE, 1);
18117      BLOGD(sc, DBG_LOAD, "NIC MODE configured\n");
18118  
18119      if (!CHIP_IS_E1x(sc)) {
18120          uint32_t pf_conf = IGU_PF_CONF_FUNC_EN;
18121  
18122          /* Turn on a single ISR mode in IGU if driver is going to use
18123           * INT#x or MSI
18124           */
18125          if (sc->interrupt_mode != INTR_MODE_MSIX) {
18126              pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
18127          }
18128  
18129          /*
18130           * Timers workaround bug: function init part.
18131           * Need to wait 20msec after initializing ILT,
18132           * needed to make sure there are no requests in
18133           * one of the PXP internal queues with "old" ILT addresses
18134           */
18135          DELAY(20000);
18136  
18137          /*
18138           * Master enable - Due to WB DMAE writes performed before this
18139           * register is re-initialized as part of the regular function
18140           * init
18141           */
18142          REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
18143          /* Enable the function in IGU */
18144          REG_WR(sc, IGU_REG_PF_CONFIGURATION, pf_conf);
18145      }
18146  
18147      sc->dmae_ready = 1;
18148  
18149      ecore_init_block(sc, BLOCK_PGLUE_B, init_phase);
18150  
18151      if (!CHIP_IS_E1x(sc))
18152          REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
18153  
18154      ecore_init_block(sc, BLOCK_ATC, init_phase);
18155      ecore_init_block(sc, BLOCK_DMAE, init_phase);
18156      ecore_init_block(sc, BLOCK_NIG, init_phase);
18157      ecore_init_block(sc, BLOCK_SRC, init_phase);
18158      ecore_init_block(sc, BLOCK_MISC, init_phase);
18159      ecore_init_block(sc, BLOCK_TCM, init_phase);
18160      ecore_init_block(sc, BLOCK_UCM, init_phase);
18161      ecore_init_block(sc, BLOCK_CCM, init_phase);
18162      ecore_init_block(sc, BLOCK_XCM, init_phase);
18163      ecore_init_block(sc, BLOCK_TSEM, init_phase);
18164      ecore_init_block(sc, BLOCK_USEM, init_phase);
18165      ecore_init_block(sc, BLOCK_CSEM, init_phase);
18166      ecore_init_block(sc, BLOCK_XSEM, init_phase);
18167  
18168      if (!CHIP_IS_E1x(sc))
18169          REG_WR(sc, QM_REG_PF_EN, 1);
18170  
18171      if (!CHIP_IS_E1x(sc)) {
18172          REG_WR(sc, TSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
18173          REG_WR(sc, USEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
18174          REG_WR(sc, CSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
18175          REG_WR(sc, XSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
18176      }
18177      ecore_init_block(sc, BLOCK_QM, init_phase);
18178  
18179      ecore_init_block(sc, BLOCK_TM, init_phase);
18180      ecore_init_block(sc, BLOCK_DORQ, init_phase);
18181  
18182      bxe_iov_init_dq(sc);
18183  
18184      ecore_init_block(sc, BLOCK_BRB1, init_phase);
18185      ecore_init_block(sc, BLOCK_PRS, init_phase);
18186      ecore_init_block(sc, BLOCK_TSDM, init_phase);
18187      ecore_init_block(sc, BLOCK_CSDM, init_phase);
18188      ecore_init_block(sc, BLOCK_USDM, init_phase);
18189      ecore_init_block(sc, BLOCK_XSDM, init_phase);
18190      ecore_init_block(sc, BLOCK_UPB, init_phase);
18191      ecore_init_block(sc, BLOCK_XPB, init_phase);
18192      ecore_init_block(sc, BLOCK_PBF, init_phase);
18193      if (!CHIP_IS_E1x(sc))
18194          REG_WR(sc, PBF_REG_DISABLE_PF, 0);
18195  
18196      ecore_init_block(sc, BLOCK_CDU, init_phase);
18197  
18198      ecore_init_block(sc, BLOCK_CFC, init_phase);
18199  
18200      if (!CHIP_IS_E1x(sc))
18201          REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 1);
18202  
18203      if (IS_MF(sc)) {
18204          REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1);
18205          REG_WR(sc, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, OVLAN(sc));
18206      }
18207  
18208      ecore_init_block(sc, BLOCK_MISC_AEU, init_phase);
18209  
18210      /* HC init per function */
18211      if (sc->devinfo.int_block == INT_BLOCK_HC) {
18212          if (CHIP_IS_E1H(sc)) {
18213              REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
18214  
18215              REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
18216              REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
18217          }
18218          ecore_init_block(sc, BLOCK_HC, init_phase);
18219  
18220      } else {
18221          int num_segs, sb_idx, prod_offset;
18222  
18223          REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
18224  
18225          if (!CHIP_IS_E1x(sc)) {
18226              REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0);
18227              REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0);
18228          }
18229  
18230          ecore_init_block(sc, BLOCK_IGU, init_phase);
18231  
18232          if (!CHIP_IS_E1x(sc)) {
18233              int dsb_idx = 0;
18234              /**
18235               * Producer memory:
18236               * E2 mode: address 0-135 match to the mapping memory;
18237               * 136 - PF0 default prod; 137 - PF1 default prod;
18238               * 138 - PF2 default prod; 139 - PF3 default prod;
18239               * 140 - PF0 attn prod;    141 - PF1 attn prod;
18240               * 142 - PF2 attn prod;    143 - PF3 attn prod;
18241               * 144-147 reserved.
18242               *
18243               * E1.5 mode - In backward compatible mode;
18244               * for non default SB; each even line in the memory
18245               * holds the U producer and each odd line hold
18246               * the C producer. The first 128 producers are for
18247               * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
18248               * producers are for the DSB for each PF.
18249               * Each PF has five segments: (the order inside each
18250               * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
18251               * 132-135 C prods; 136-139 X prods; 140-143 T prods;
18252               * 144-147 attn prods;
18253               */
18254              /* non-default-status-blocks */
18255              num_segs = CHIP_INT_MODE_IS_BC(sc) ?
18256                  IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
18257              for (sb_idx = 0; sb_idx < sc->igu_sb_cnt; sb_idx++) {
18258                  prod_offset = (sc->igu_base_sb + sb_idx) *
18259                      num_segs;
18260  
18261                  for (i = 0; i < num_segs; i++) {
18262                      addr = IGU_REG_PROD_CONS_MEMORY +
18263                              (prod_offset + i) * 4;
18264                      REG_WR(sc, addr, 0);
18265                  }
18266                  /* send consumer update with value 0 */
18267                  bxe_ack_sb(sc, sc->igu_base_sb + sb_idx,
18268                             USTORM_ID, 0, IGU_INT_NOP, 1);
18269                  bxe_igu_clear_sb(sc, sc->igu_base_sb + sb_idx);
18270              }
18271  
18272              /* default-status-blocks */
18273              num_segs = CHIP_INT_MODE_IS_BC(sc) ?
18274                  IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
18275  
18276              if (CHIP_IS_MODE_4_PORT(sc))
18277                  dsb_idx = SC_FUNC(sc);
18278              else
18279                  dsb_idx = SC_VN(sc);
18280  
18281              prod_offset = (CHIP_INT_MODE_IS_BC(sc) ?
18282                         IGU_BC_BASE_DSB_PROD + dsb_idx :
18283                         IGU_NORM_BASE_DSB_PROD + dsb_idx);
18284  
18285              /*
18286               * igu prods come in chunks of E1HVN_MAX (4) -
18287               * does not matters what is the current chip mode
18288               */
18289              for (i = 0; i < (num_segs * E1HVN_MAX);
18290                   i += E1HVN_MAX) {
18291                  addr = IGU_REG_PROD_CONS_MEMORY +
18292                              (prod_offset + i)*4;
18293                  REG_WR(sc, addr, 0);
18294              }
18295              /* send consumer update with 0 */
18296              if (CHIP_INT_MODE_IS_BC(sc)) {
18297                  bxe_ack_sb(sc, sc->igu_dsb_id,
18298                             USTORM_ID, 0, IGU_INT_NOP, 1);
18299                  bxe_ack_sb(sc, sc->igu_dsb_id,
18300                             CSTORM_ID, 0, IGU_INT_NOP, 1);
18301                  bxe_ack_sb(sc, sc->igu_dsb_id,
18302                             XSTORM_ID, 0, IGU_INT_NOP, 1);
18303                  bxe_ack_sb(sc, sc->igu_dsb_id,
18304                             TSTORM_ID, 0, IGU_INT_NOP, 1);
18305                  bxe_ack_sb(sc, sc->igu_dsb_id,
18306                             ATTENTION_ID, 0, IGU_INT_NOP, 1);
18307              } else {
18308                  bxe_ack_sb(sc, sc->igu_dsb_id,
18309                             USTORM_ID, 0, IGU_INT_NOP, 1);
18310                  bxe_ack_sb(sc, sc->igu_dsb_id,
18311                             ATTENTION_ID, 0, IGU_INT_NOP, 1);
18312              }
18313              bxe_igu_clear_sb(sc, sc->igu_dsb_id);
18314  
18315              /* !!! these should become driver const once
18316                 rf-tool supports split-68 const */
18317              REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
18318              REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
18319              REG_WR(sc, IGU_REG_SB_MASK_LSB, 0);
18320              REG_WR(sc, IGU_REG_SB_MASK_MSB, 0);
18321              REG_WR(sc, IGU_REG_PBA_STATUS_LSB, 0);
18322              REG_WR(sc, IGU_REG_PBA_STATUS_MSB, 0);
18323          }
18324      }
18325  
18326      /* Reset PCIE errors for debug */
18327      REG_WR(sc, 0x2114, 0xffffffff);
18328      REG_WR(sc, 0x2120, 0xffffffff);
18329  
18330      if (CHIP_IS_E1x(sc)) {
18331          main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
18332          main_mem_base = HC_REG_MAIN_MEMORY +
18333                  SC_PORT(sc) * (main_mem_size * 4);
18334          main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
18335          main_mem_width = 8;
18336  
18337          val = REG_RD(sc, main_mem_prty_clr);
18338          if (val) {
18339              BLOGD(sc, DBG_LOAD,
18340                    "Parity errors in HC block during function init (0x%x)!\n",
18341                    val);
18342          }
18343  
18344          /* Clear "false" parity errors in MSI-X table */
18345          for (i = main_mem_base;
18346               i < main_mem_base + main_mem_size * 4;
18347               i += main_mem_width) {
18348              bxe_read_dmae(sc, i, main_mem_width / 4);
18349              bxe_write_dmae(sc, BXE_SP_MAPPING(sc, wb_data),
18350                             i, main_mem_width / 4);
18351          }
18352          /* Clear HC parity attention */
18353          REG_RD(sc, main_mem_prty_clr);
18354      }
18355  
18356  #if 1
18357      /* Enable STORMs SP logging */
18358      REG_WR8(sc, BAR_USTRORM_INTMEM +
18359             USTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18360      REG_WR8(sc, BAR_TSTRORM_INTMEM +
18361             TSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18362      REG_WR8(sc, BAR_CSTRORM_INTMEM +
18363             CSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18364      REG_WR8(sc, BAR_XSTRORM_INTMEM +
18365             XSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18366  #endif
18367  
18368      elink_phy_probe(&sc->link_params);
18369  
18370      return (0);
18371  }
18372  
18373  static void
bxe_link_reset(struct bxe_softc * sc)18374  bxe_link_reset(struct bxe_softc *sc)
18375  {
18376      if (!BXE_NOMCP(sc)) {
18377  	bxe_acquire_phy_lock(sc);
18378          elink_lfa_reset(&sc->link_params, &sc->link_vars);
18379  	bxe_release_phy_lock(sc);
18380      } else {
18381          if (!CHIP_REV_IS_SLOW(sc)) {
18382              BLOGW(sc, "Bootcode is missing - cannot reset link\n");
18383          }
18384      }
18385  }
18386  
18387  static void
bxe_reset_port(struct bxe_softc * sc)18388  bxe_reset_port(struct bxe_softc *sc)
18389  {
18390      int port = SC_PORT(sc);
18391      uint32_t val;
18392  
18393  	ELINK_DEBUG_P0(sc, "bxe_reset_port called\n");
18394      /* reset physical Link */
18395      bxe_link_reset(sc);
18396  
18397      REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
18398  
18399      /* Do not rcv packets to BRB */
18400      REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
18401      /* Do not direct rcv packets that are not for MCP to the BRB */
18402      REG_WR(sc, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
18403                 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
18404  
18405      /* Configure AEU */
18406      REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
18407  
18408      DELAY(100000);
18409  
18410      /* Check for BRB port occupancy */
18411      val = REG_RD(sc, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
18412      if (val) {
18413          BLOGD(sc, DBG_LOAD,
18414                "BRB1 is not empty, %d blocks are occupied\n", val);
18415      }
18416  
18417      /* TODO: Close Doorbell port? */
18418  }
18419  
18420  static void
bxe_ilt_wr(struct bxe_softc * sc,uint32_t index,bus_addr_t addr)18421  bxe_ilt_wr(struct bxe_softc *sc,
18422             uint32_t         index,
18423             bus_addr_t       addr)
18424  {
18425      int reg;
18426      uint32_t wb_write[2];
18427  
18428      if (CHIP_IS_E1(sc)) {
18429          reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
18430      } else {
18431          reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
18432      }
18433  
18434      wb_write[0] = ONCHIP_ADDR1(addr);
18435      wb_write[1] = ONCHIP_ADDR2(addr);
18436      REG_WR_DMAE(sc, reg, wb_write, 2);
18437  }
18438  
18439  static void
bxe_clear_func_ilt(struct bxe_softc * sc,uint32_t func)18440  bxe_clear_func_ilt(struct bxe_softc *sc,
18441                     uint32_t         func)
18442  {
18443      uint32_t i, base = FUNC_ILT_BASE(func);
18444      for (i = base; i < base + ILT_PER_FUNC; i++) {
18445          bxe_ilt_wr(sc, i, 0);
18446      }
18447  }
18448  
18449  static void
bxe_reset_func(struct bxe_softc * sc)18450  bxe_reset_func(struct bxe_softc *sc)
18451  {
18452      struct bxe_fastpath *fp;
18453      int port = SC_PORT(sc);
18454      int func = SC_FUNC(sc);
18455      int i;
18456  
18457      /* Disable the function in the FW */
18458      REG_WR8(sc, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
18459      REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
18460      REG_WR8(sc, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
18461      REG_WR8(sc, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
18462  
18463      /* FP SBs */
18464      FOR_EACH_ETH_QUEUE(sc, i) {
18465          fp = &sc->fp[i];
18466          REG_WR8(sc, BAR_CSTRORM_INTMEM +
18467                  CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
18468                  SB_DISABLED);
18469      }
18470  
18471      /* SP SB */
18472      REG_WR8(sc, BAR_CSTRORM_INTMEM +
18473              CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
18474              SB_DISABLED);
18475  
18476      for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) {
18477          REG_WR(sc, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 0);
18478      }
18479  
18480      /* Configure IGU */
18481      if (sc->devinfo.int_block == INT_BLOCK_HC) {
18482          REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
18483          REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
18484      } else {
18485          REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0);
18486          REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0);
18487      }
18488  
18489      if (CNIC_LOADED(sc)) {
18490          /* Disable Timer scan */
18491          REG_WR(sc, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
18492          /*
18493           * Wait for at least 10ms and up to 2 second for the timers
18494           * scan to complete
18495           */
18496          for (i = 0; i < 200; i++) {
18497              DELAY(10000);
18498              if (!REG_RD(sc, TM_REG_LIN0_SCAN_ON + port*4))
18499                  break;
18500          }
18501      }
18502  
18503      /* Clear ILT */
18504      bxe_clear_func_ilt(sc, func);
18505  
18506      /*
18507       * Timers workaround bug for E2: if this is vnic-3,
18508       * we need to set the entire ilt range for this timers.
18509       */
18510      if (!CHIP_IS_E1x(sc) && SC_VN(sc) == 3) {
18511          struct ilt_client_info ilt_cli;
18512          /* use dummy TM client */
18513          memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
18514          ilt_cli.start = 0;
18515          ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
18516          ilt_cli.client_num = ILT_CLIENT_TM;
18517  
18518          ecore_ilt_boundry_init_op(sc, &ilt_cli, 0, INITOP_CLEAR);
18519      }
18520  
18521      /* this assumes that reset_port() called before reset_func()*/
18522      if (!CHIP_IS_E1x(sc)) {
18523          bxe_pf_disable(sc);
18524      }
18525  
18526      sc->dmae_ready = 0;
18527  }
18528  
18529  static int
bxe_gunzip_init(struct bxe_softc * sc)18530  bxe_gunzip_init(struct bxe_softc *sc)
18531  {
18532      return (0);
18533  }
18534  
18535  static void
bxe_gunzip_end(struct bxe_softc * sc)18536  bxe_gunzip_end(struct bxe_softc *sc)
18537  {
18538      return;
18539  }
18540  
18541  static int
bxe_init_firmware(struct bxe_softc * sc)18542  bxe_init_firmware(struct bxe_softc *sc)
18543  {
18544      if (CHIP_IS_E1(sc)) {
18545          ecore_init_e1_firmware(sc);
18546          sc->iro_array = e1_iro_arr;
18547      } else if (CHIP_IS_E1H(sc)) {
18548          ecore_init_e1h_firmware(sc);
18549          sc->iro_array = e1h_iro_arr;
18550      } else if (!CHIP_IS_E1x(sc)) {
18551          ecore_init_e2_firmware(sc);
18552          sc->iro_array = e2_iro_arr;
18553      } else {
18554          BLOGE(sc, "Unsupported chip revision\n");
18555          return (-1);
18556      }
18557  
18558      return (0);
18559  }
18560  
18561  static void
bxe_release_firmware(struct bxe_softc * sc)18562  bxe_release_firmware(struct bxe_softc *sc)
18563  {
18564      /* Do nothing */
18565      return;
18566  }
18567  
18568  static int
ecore_gunzip(struct bxe_softc * sc,const uint8_t * zbuf,int len)18569  ecore_gunzip(struct bxe_softc *sc,
18570               const uint8_t    *zbuf,
18571               int              len)
18572  {
18573      /* XXX : Implement... */
18574      BLOGD(sc, DBG_LOAD, "ECORE_GUNZIP NOT IMPLEMENTED\n");
18575      return (FALSE);
18576  }
18577  
18578  static void
ecore_reg_wr_ind(struct bxe_softc * sc,uint32_t addr,uint32_t val)18579  ecore_reg_wr_ind(struct bxe_softc *sc,
18580                   uint32_t         addr,
18581                   uint32_t         val)
18582  {
18583      bxe_reg_wr_ind(sc, addr, val);
18584  }
18585  
18586  static void
ecore_write_dmae_phys_len(struct bxe_softc * sc,bus_addr_t phys_addr,uint32_t addr,uint32_t len)18587  ecore_write_dmae_phys_len(struct bxe_softc *sc,
18588                            bus_addr_t       phys_addr,
18589                            uint32_t         addr,
18590                            uint32_t         len)
18591  {
18592      bxe_write_dmae_phys_len(sc, phys_addr, addr, len);
18593  }
18594  
18595  void
ecore_storm_memset_struct(struct bxe_softc * sc,uint32_t addr,size_t size,uint32_t * data)18596  ecore_storm_memset_struct(struct bxe_softc *sc,
18597                            uint32_t         addr,
18598                            size_t           size,
18599                            uint32_t         *data)
18600  {
18601      uint8_t i;
18602      for (i = 0; i < size/4; i++) {
18603          REG_WR(sc, addr + (i * 4), data[i]);
18604      }
18605  }
18606  
18607  
18608  /*
18609   * character device - ioctl interface definitions
18610   */
18611  
18612  
18613  #include "bxe_dump.h"
18614  #include "bxe_ioctl.h"
18615  #include <sys/conf.h>
18616  
18617  static int bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
18618                  struct thread *td);
18619  
18620  static struct cdevsw bxe_cdevsw = {
18621      .d_version = D_VERSION,
18622      .d_ioctl = bxe_eioctl,
18623      .d_name = "bxecnic",
18624  };
18625  
18626  #define BXE_PATH(sc)    (CHIP_IS_E1x(sc) ? 0 : (sc->pcie_func & 1))
18627  
18628  
18629  #define DUMP_ALL_PRESETS        0x1FFF
18630  #define DUMP_MAX_PRESETS        13
18631  #define IS_E1_REG(chips)        ((chips & DUMP_CHIP_E1) == DUMP_CHIP_E1)
18632  #define IS_E1H_REG(chips)       ((chips & DUMP_CHIP_E1H) == DUMP_CHIP_E1H)
18633  #define IS_E2_REG(chips)        ((chips & DUMP_CHIP_E2) == DUMP_CHIP_E2)
18634  #define IS_E3A0_REG(chips)      ((chips & DUMP_CHIP_E3A0) == DUMP_CHIP_E3A0)
18635  #define IS_E3B0_REG(chips)      ((chips & DUMP_CHIP_E3B0) == DUMP_CHIP_E3B0)
18636  
18637  #define IS_REG_IN_PRESET(presets, idx)  \
18638                  ((presets & (1 << (idx-1))) == (1 << (idx-1)))
18639  
18640  
18641  static int
bxe_get_preset_regs_len(struct bxe_softc * sc,uint32_t preset)18642  bxe_get_preset_regs_len(struct bxe_softc *sc, uint32_t preset)
18643  {
18644      if (CHIP_IS_E1(sc))
18645          return dump_num_registers[0][preset-1];
18646      else if (CHIP_IS_E1H(sc))
18647          return dump_num_registers[1][preset-1];
18648      else if (CHIP_IS_E2(sc))
18649          return dump_num_registers[2][preset-1];
18650      else if (CHIP_IS_E3A0(sc))
18651          return dump_num_registers[3][preset-1];
18652      else if (CHIP_IS_E3B0(sc))
18653          return dump_num_registers[4][preset-1];
18654      else
18655          return 0;
18656  }
18657  
18658  static int
bxe_get_total_regs_len32(struct bxe_softc * sc)18659  bxe_get_total_regs_len32(struct bxe_softc *sc)
18660  {
18661      uint32_t preset_idx;
18662      int regdump_len32 = 0;
18663  
18664  
18665      /* Calculate the total preset regs length */
18666      for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
18667          regdump_len32 += bxe_get_preset_regs_len(sc, preset_idx);
18668      }
18669  
18670      return regdump_len32;
18671  }
18672  
18673  static const uint32_t *
__bxe_get_page_addr_ar(struct bxe_softc * sc)18674  __bxe_get_page_addr_ar(struct bxe_softc *sc)
18675  {
18676      if (CHIP_IS_E2(sc))
18677          return page_vals_e2;
18678      else if (CHIP_IS_E3(sc))
18679          return page_vals_e3;
18680      else
18681          return NULL;
18682  }
18683  
18684  static uint32_t
__bxe_get_page_reg_num(struct bxe_softc * sc)18685  __bxe_get_page_reg_num(struct bxe_softc *sc)
18686  {
18687      if (CHIP_IS_E2(sc))
18688          return PAGE_MODE_VALUES_E2;
18689      else if (CHIP_IS_E3(sc))
18690          return PAGE_MODE_VALUES_E3;
18691      else
18692          return 0;
18693  }
18694  
18695  static const uint32_t *
__bxe_get_page_write_ar(struct bxe_softc * sc)18696  __bxe_get_page_write_ar(struct bxe_softc *sc)
18697  {
18698      if (CHIP_IS_E2(sc))
18699          return page_write_regs_e2;
18700      else if (CHIP_IS_E3(sc))
18701          return page_write_regs_e3;
18702      else
18703          return NULL;
18704  }
18705  
18706  static uint32_t
__bxe_get_page_write_num(struct bxe_softc * sc)18707  __bxe_get_page_write_num(struct bxe_softc *sc)
18708  {
18709      if (CHIP_IS_E2(sc))
18710          return PAGE_WRITE_REGS_E2;
18711      else if (CHIP_IS_E3(sc))
18712          return PAGE_WRITE_REGS_E3;
18713      else
18714          return 0;
18715  }
18716  
18717  static const struct reg_addr *
__bxe_get_page_read_ar(struct bxe_softc * sc)18718  __bxe_get_page_read_ar(struct bxe_softc *sc)
18719  {
18720      if (CHIP_IS_E2(sc))
18721          return page_read_regs_e2;
18722      else if (CHIP_IS_E3(sc))
18723          return page_read_regs_e3;
18724      else
18725          return NULL;
18726  }
18727  
18728  static uint32_t
__bxe_get_page_read_num(struct bxe_softc * sc)18729  __bxe_get_page_read_num(struct bxe_softc *sc)
18730  {
18731      if (CHIP_IS_E2(sc))
18732          return PAGE_READ_REGS_E2;
18733      else if (CHIP_IS_E3(sc))
18734          return PAGE_READ_REGS_E3;
18735      else
18736          return 0;
18737  }
18738  
18739  static bool
bxe_is_reg_in_chip(struct bxe_softc * sc,const struct reg_addr * reg_info)18740  bxe_is_reg_in_chip(struct bxe_softc *sc, const struct reg_addr *reg_info)
18741  {
18742      if (CHIP_IS_E1(sc))
18743          return IS_E1_REG(reg_info->chips);
18744      else if (CHIP_IS_E1H(sc))
18745          return IS_E1H_REG(reg_info->chips);
18746      else if (CHIP_IS_E2(sc))
18747          return IS_E2_REG(reg_info->chips);
18748      else if (CHIP_IS_E3A0(sc))
18749          return IS_E3A0_REG(reg_info->chips);
18750      else if (CHIP_IS_E3B0(sc))
18751          return IS_E3B0_REG(reg_info->chips);
18752      else
18753          return 0;
18754  }
18755  
18756  static bool
bxe_is_wreg_in_chip(struct bxe_softc * sc,const struct wreg_addr * wreg_info)18757  bxe_is_wreg_in_chip(struct bxe_softc *sc, const struct wreg_addr *wreg_info)
18758  {
18759      if (CHIP_IS_E1(sc))
18760          return IS_E1_REG(wreg_info->chips);
18761      else if (CHIP_IS_E1H(sc))
18762          return IS_E1H_REG(wreg_info->chips);
18763      else if (CHIP_IS_E2(sc))
18764          return IS_E2_REG(wreg_info->chips);
18765      else if (CHIP_IS_E3A0(sc))
18766          return IS_E3A0_REG(wreg_info->chips);
18767      else if (CHIP_IS_E3B0(sc))
18768          return IS_E3B0_REG(wreg_info->chips);
18769      else
18770          return 0;
18771  }
18772  
18773  /**
18774   * bxe_read_pages_regs - read "paged" registers
18775   *
18776   * @bp          device handle
18777   * @p           output buffer
18778   *
18779   * Reads "paged" memories: memories that may only be read by first writing to a
18780   * specific address ("write address") and then reading from a specific address
18781   * ("read address"). There may be more than one write address per "page" and
18782   * more than one read address per write address.
18783   */
18784  static void
bxe_read_pages_regs(struct bxe_softc * sc,uint32_t * p,uint32_t preset)18785  bxe_read_pages_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset)
18786  {
18787      uint32_t i, j, k, n;
18788  
18789      /* addresses of the paged registers */
18790      const uint32_t *page_addr = __bxe_get_page_addr_ar(sc);
18791      /* number of paged registers */
18792      int num_pages = __bxe_get_page_reg_num(sc);
18793      /* write addresses */
18794      const uint32_t *write_addr = __bxe_get_page_write_ar(sc);
18795      /* number of write addresses */
18796      int write_num = __bxe_get_page_write_num(sc);
18797      /* read addresses info */
18798      const struct reg_addr *read_addr = __bxe_get_page_read_ar(sc);
18799      /* number of read addresses */
18800      int read_num = __bxe_get_page_read_num(sc);
18801      uint32_t addr, size;
18802  
18803      for (i = 0; i < num_pages; i++) {
18804          for (j = 0; j < write_num; j++) {
18805              REG_WR(sc, write_addr[j], page_addr[i]);
18806  
18807              for (k = 0; k < read_num; k++) {
18808                  if (IS_REG_IN_PRESET(read_addr[k].presets, preset)) {
18809                      size = read_addr[k].size;
18810                      for (n = 0; n < size; n++) {
18811                          addr = read_addr[k].addr + n*4;
18812                          *p++ = REG_RD(sc, addr);
18813                      }
18814                  }
18815              }
18816          }
18817      }
18818      return;
18819  }
18820  
18821  
18822  static int
bxe_get_preset_regs(struct bxe_softc * sc,uint32_t * p,uint32_t preset)18823  bxe_get_preset_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset)
18824  {
18825      uint32_t i, j, addr;
18826      const struct wreg_addr *wreg_addr_p = NULL;
18827  
18828      if (CHIP_IS_E1(sc))
18829          wreg_addr_p = &wreg_addr_e1;
18830      else if (CHIP_IS_E1H(sc))
18831          wreg_addr_p = &wreg_addr_e1h;
18832      else if (CHIP_IS_E2(sc))
18833          wreg_addr_p = &wreg_addr_e2;
18834      else if (CHIP_IS_E3A0(sc))
18835          wreg_addr_p = &wreg_addr_e3;
18836      else if (CHIP_IS_E3B0(sc))
18837          wreg_addr_p = &wreg_addr_e3b0;
18838      else
18839          return (-1);
18840  
18841      /* Read the idle_chk registers */
18842      for (i = 0; i < IDLE_REGS_COUNT; i++) {
18843          if (bxe_is_reg_in_chip(sc, &idle_reg_addrs[i]) &&
18844              IS_REG_IN_PRESET(idle_reg_addrs[i].presets, preset)) {
18845              for (j = 0; j < idle_reg_addrs[i].size; j++)
18846                  *p++ = REG_RD(sc, idle_reg_addrs[i].addr + j*4);
18847          }
18848      }
18849  
18850      /* Read the regular registers */
18851      for (i = 0; i < REGS_COUNT; i++) {
18852          if (bxe_is_reg_in_chip(sc, &reg_addrs[i]) &&
18853              IS_REG_IN_PRESET(reg_addrs[i].presets, preset)) {
18854              for (j = 0; j < reg_addrs[i].size; j++)
18855                  *p++ = REG_RD(sc, reg_addrs[i].addr + j*4);
18856          }
18857      }
18858  
18859      /* Read the CAM registers */
18860      if (bxe_is_wreg_in_chip(sc, wreg_addr_p) &&
18861          IS_REG_IN_PRESET(wreg_addr_p->presets, preset)) {
18862          for (i = 0; i < wreg_addr_p->size; i++) {
18863              *p++ = REG_RD(sc, wreg_addr_p->addr + i*4);
18864  
18865              /* In case of wreg_addr register, read additional
18866                 registers from read_regs array
18867               */
18868              for (j = 0; j < wreg_addr_p->read_regs_count; j++) {
18869                  addr = *(wreg_addr_p->read_regs);
18870                  *p++ = REG_RD(sc, addr + j*4);
18871              }
18872          }
18873      }
18874  
18875      /* Paged registers are supported in E2 & E3 only */
18876      if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) {
18877          /* Read "paged" registers */
18878          bxe_read_pages_regs(sc, p, preset);
18879      }
18880  
18881      return 0;
18882  }
18883  
18884  int
bxe_grc_dump(struct bxe_softc * sc)18885  bxe_grc_dump(struct bxe_softc *sc)
18886  {
18887      int rval = 0;
18888      uint32_t preset_idx;
18889      uint8_t *buf;
18890      uint32_t size;
18891      struct  dump_header *d_hdr;
18892      uint32_t i;
18893      uint32_t reg_val;
18894      uint32_t reg_addr;
18895      uint32_t cmd_offset;
18896      struct ecore_ilt *ilt = SC_ILT(sc);
18897      struct bxe_fastpath *fp;
18898      struct ilt_client_info *ilt_cli;
18899      int grc_dump_size;
18900  
18901  
18902      if (sc->grcdump_done || sc->grcdump_started)
18903  	return (rval);
18904  
18905      sc->grcdump_started = 1;
18906      BLOGI(sc, "Started collecting grcdump\n");
18907  
18908      grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
18909                  sizeof(struct  dump_header);
18910  
18911      sc->grc_dump = malloc(grc_dump_size, M_DEVBUF, M_NOWAIT);
18912  
18913      if (sc->grc_dump == NULL) {
18914          BLOGW(sc, "Unable to allocate memory for grcdump collection\n");
18915          return(ENOMEM);
18916      }
18917  
18918  
18919  
18920      /* Disable parity attentions as long as following dump may
18921       * cause false alarms by reading never written registers. We
18922       * will re-enable parity attentions right after the dump.
18923       */
18924  
18925      /* Disable parity on path 0 */
18926      bxe_pretend_func(sc, 0);
18927  
18928      ecore_disable_blocks_parity(sc);
18929  
18930      /* Disable parity on path 1 */
18931      bxe_pretend_func(sc, 1);
18932      ecore_disable_blocks_parity(sc);
18933  
18934      /* Return to current function */
18935      bxe_pretend_func(sc, SC_ABS_FUNC(sc));
18936  
18937      buf = sc->grc_dump;
18938      d_hdr = sc->grc_dump;
18939  
18940      d_hdr->header_size = (sizeof(struct  dump_header) >> 2) - 1;
18941      d_hdr->version = BNX2X_DUMP_VERSION;
18942      d_hdr->preset = DUMP_ALL_PRESETS;
18943  
18944      if (CHIP_IS_E1(sc)) {
18945          d_hdr->dump_meta_data = DUMP_CHIP_E1;
18946      } else if (CHIP_IS_E1H(sc)) {
18947          d_hdr->dump_meta_data = DUMP_CHIP_E1H;
18948      } else if (CHIP_IS_E2(sc)) {
18949          d_hdr->dump_meta_data = DUMP_CHIP_E2 |
18950                  (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18951      } else if (CHIP_IS_E3A0(sc)) {
18952          d_hdr->dump_meta_data = DUMP_CHIP_E3A0 |
18953                  (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18954      } else if (CHIP_IS_E3B0(sc)) {
18955          d_hdr->dump_meta_data = DUMP_CHIP_E3B0 |
18956                  (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18957      }
18958  
18959      buf += sizeof(struct  dump_header);
18960  
18961      for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
18962  
18963          /* Skip presets with IOR */
18964          if ((preset_idx == 2) || (preset_idx == 5) || (preset_idx == 8) ||
18965              (preset_idx == 11))
18966              continue;
18967  
18968          rval = bxe_get_preset_regs(sc, (uint32_t *)buf, preset_idx);
18969  
18970  	if (rval)
18971              break;
18972  
18973          size = bxe_get_preset_regs_len(sc, preset_idx) * (sizeof (uint32_t));
18974  
18975          buf += size;
18976      }
18977  
18978      bxe_pretend_func(sc, 0);
18979      ecore_clear_blocks_parity(sc);
18980      ecore_enable_blocks_parity(sc);
18981  
18982      bxe_pretend_func(sc, 1);
18983      ecore_clear_blocks_parity(sc);
18984      ecore_enable_blocks_parity(sc);
18985  
18986      /* Return to current function */
18987      bxe_pretend_func(sc, SC_ABS_FUNC(sc));
18988  
18989  
18990  
18991      if(sc->state == BXE_STATE_OPEN) {
18992          if(sc->fw_stats_req  != NULL) {
18993      		BLOGI(sc, "fw stats start_paddr %#jx end_paddr %#jx vaddr %p size 0x%x\n",
18994          			(uintmax_t)sc->fw_stats_req_mapping,
18995          			(uintmax_t)sc->fw_stats_data_mapping,
18996          			sc->fw_stats_req, (sc->fw_stats_req_size + sc->fw_stats_data_size));
18997  		}
18998  		if(sc->def_sb != NULL) {
18999  			BLOGI(sc, "def_status_block paddr %p vaddr %p size 0x%zx\n",
19000          			(void *)sc->def_sb_dma.paddr, sc->def_sb,
19001          			sizeof(struct host_sp_status_block));
19002  		}
19003  		if(sc->eq_dma.vaddr != NULL) {
19004      		BLOGI(sc, "event_queue paddr %#jx vaddr %p size 0x%x\n",
19005          			(uintmax_t)sc->eq_dma.paddr, sc->eq_dma.vaddr, BCM_PAGE_SIZE);
19006  		}
19007  		if(sc->sp_dma.vaddr != NULL) {
19008      		BLOGI(sc, "slow path paddr %#jx vaddr %p size 0x%zx\n",
19009          			(uintmax_t)sc->sp_dma.paddr, sc->sp_dma.vaddr,
19010          			sizeof(struct bxe_slowpath));
19011  		}
19012  		if(sc->spq_dma.vaddr != NULL) {
19013      		BLOGI(sc, "slow path queue paddr %#jx vaddr %p size 0x%x\n",
19014          			(uintmax_t)sc->spq_dma.paddr, sc->spq_dma.vaddr, BCM_PAGE_SIZE);
19015  		}
19016  		if(sc->gz_buf_dma.vaddr != NULL) {
19017      		BLOGI(sc, "fw_buf paddr %#jx vaddr %p size 0x%x\n",
19018          			(uintmax_t)sc->gz_buf_dma.paddr, sc->gz_buf_dma.vaddr,
19019          			FW_BUF_SIZE);
19020  		}
19021      	for (i = 0; i < sc->num_queues; i++) {
19022          	fp = &sc->fp[i];
19023  			if(fp->sb_dma.vaddr != NULL && fp->tx_dma.vaddr != NULL &&
19024                          fp->rx_dma.vaddr != NULL && fp->rcq_dma.vaddr != NULL &&
19025                          fp->rx_sge_dma.vaddr != NULL) {
19026  
19027  				BLOGI(sc, "FP status block fp %d paddr %#jx vaddr %p size 0x%zx\n", i,
19028              			(uintmax_t)fp->sb_dma.paddr, fp->sb_dma.vaddr,
19029              			sizeof(union bxe_host_hc_status_block));
19030  				BLOGI(sc, "TX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
19031              			(uintmax_t)fp->tx_dma.paddr, fp->tx_dma.vaddr,
19032              			(BCM_PAGE_SIZE * TX_BD_NUM_PAGES));
19033          		BLOGI(sc, "RX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
19034              			(uintmax_t)fp->rx_dma.paddr, fp->rx_dma.vaddr,
19035              			(BCM_PAGE_SIZE * RX_BD_NUM_PAGES));
19036          		BLOGI(sc, "RX RCQ CHAIN fp %d paddr %#jx vaddr %p size 0x%zx\n", i,
19037              			(uintmax_t)fp->rcq_dma.paddr, fp->rcq_dma.vaddr,
19038              			(BCM_PAGE_SIZE * RCQ_NUM_PAGES));
19039          		BLOGI(sc, "RX SGE CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
19040              			(uintmax_t)fp->rx_sge_dma.paddr, fp->rx_sge_dma.vaddr,
19041              			(BCM_PAGE_SIZE * RX_SGE_NUM_PAGES));
19042      		}
19043  		}
19044  		if(ilt != NULL ) {
19045      		ilt_cli = &ilt->clients[1];
19046  			if(ilt->lines != NULL) {
19047      		for (i = ilt_cli->start; i <= ilt_cli->end; i++) {
19048          		BLOGI(sc, "ECORE_ILT paddr %#jx vaddr %p size 0x%x\n",
19049              			(uintmax_t)(((struct bxe_dma *)((&ilt->lines[i])->page))->paddr),
19050              			((struct bxe_dma *)((&ilt->lines[i])->page))->vaddr, BCM_PAGE_SIZE);
19051      		}
19052  			}
19053  		}
19054  
19055  
19056      	cmd_offset = DMAE_REG_CMD_MEM;
19057      	for (i = 0; i < 224; i++) {
19058          	reg_addr = (cmd_offset +(i * 4));
19059          	reg_val = REG_RD(sc, reg_addr);
19060          	BLOGI(sc, "DMAE_REG_CMD_MEM i=%d reg_addr 0x%x reg_val 0x%08x\n",i,
19061              			reg_addr, reg_val);
19062      	}
19063  	}
19064  
19065      BLOGI(sc, "Collection of grcdump done\n");
19066      sc->grcdump_done = 1;
19067      return(rval);
19068  }
19069  
19070  static int
bxe_add_cdev(struct bxe_softc * sc)19071  bxe_add_cdev(struct bxe_softc *sc)
19072  {
19073      sc->eeprom = malloc(BXE_EEPROM_MAX_DATA_LEN, M_DEVBUF, M_NOWAIT);
19074  
19075      if (sc->eeprom == NULL) {
19076          BLOGW(sc, "Unable to alloc for eeprom size buffer\n");
19077          return (-1);
19078      }
19079  
19080      sc->ioctl_dev = make_dev(&bxe_cdevsw,
19081                              if_getdunit(sc->ifp),
19082                              UID_ROOT,
19083                              GID_WHEEL,
19084                              0600,
19085                              "%s",
19086                              if_name(sc->ifp));
19087  
19088      if (sc->ioctl_dev == NULL) {
19089          free(sc->eeprom, M_DEVBUF);
19090          sc->eeprom = NULL;
19091          return (-1);
19092      }
19093  
19094      sc->ioctl_dev->si_drv1 = sc;
19095  
19096      return (0);
19097  }
19098  
19099  static void
bxe_del_cdev(struct bxe_softc * sc)19100  bxe_del_cdev(struct bxe_softc *sc)
19101  {
19102      if (sc->ioctl_dev != NULL)
19103          destroy_dev(sc->ioctl_dev);
19104  
19105      if (sc->eeprom != NULL) {
19106          free(sc->eeprom, M_DEVBUF);
19107          sc->eeprom = NULL;
19108      }
19109      sc->ioctl_dev = NULL;
19110  
19111      return;
19112  }
19113  
bxe_is_nvram_accessible(struct bxe_softc * sc)19114  static bool bxe_is_nvram_accessible(struct bxe_softc *sc)
19115  {
19116  
19117      if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0)
19118          return FALSE;
19119  
19120      return TRUE;
19121  }
19122  
19123  
19124  static int
bxe_wr_eeprom(struct bxe_softc * sc,void * data,uint32_t offset,uint32_t len)19125  bxe_wr_eeprom(struct bxe_softc *sc, void *data, uint32_t offset, uint32_t len)
19126  {
19127      int rval = 0;
19128  
19129      if(!bxe_is_nvram_accessible(sc)) {
19130          BLOGW(sc, "Cannot access eeprom when interface is down\n");
19131          return (-EAGAIN);
19132      }
19133      rval = bxe_nvram_write(sc, offset, (uint8_t *)data, len);
19134  
19135  
19136     return (rval);
19137  }
19138  
19139  static int
bxe_rd_eeprom(struct bxe_softc * sc,void * data,uint32_t offset,uint32_t len)19140  bxe_rd_eeprom(struct bxe_softc *sc, void *data, uint32_t offset, uint32_t len)
19141  {
19142      int rval = 0;
19143  
19144      if(!bxe_is_nvram_accessible(sc)) {
19145          BLOGW(sc, "Cannot access eeprom when interface is down\n");
19146          return (-EAGAIN);
19147      }
19148      rval = bxe_nvram_read(sc, offset, (uint8_t *)data, len);
19149  
19150     return (rval);
19151  }
19152  
19153  static int
bxe_eeprom_rd_wr(struct bxe_softc * sc,bxe_eeprom_t * eeprom)19154  bxe_eeprom_rd_wr(struct bxe_softc *sc, bxe_eeprom_t *eeprom)
19155  {
19156      int rval = 0;
19157  
19158      switch (eeprom->eeprom_cmd) {
19159  
19160      case BXE_EEPROM_CMD_SET_EEPROM:
19161  
19162          rval = copyin(eeprom->eeprom_data, sc->eeprom,
19163                         eeprom->eeprom_data_len);
19164  
19165          if (rval)
19166              break;
19167  
19168          rval = bxe_wr_eeprom(sc, sc->eeprom, eeprom->eeprom_offset,
19169                         eeprom->eeprom_data_len);
19170          break;
19171  
19172      case BXE_EEPROM_CMD_GET_EEPROM:
19173  
19174          rval = bxe_rd_eeprom(sc, sc->eeprom, eeprom->eeprom_offset,
19175                         eeprom->eeprom_data_len);
19176  
19177          if (rval) {
19178              break;
19179          }
19180  
19181          rval = copyout(sc->eeprom, eeprom->eeprom_data,
19182                         eeprom->eeprom_data_len);
19183          break;
19184  
19185      default:
19186              rval = EINVAL;
19187              break;
19188      }
19189  
19190      if (rval) {
19191          BLOGW(sc, "ioctl cmd %d  failed rval %d\n", eeprom->eeprom_cmd, rval);
19192      }
19193  
19194      return (rval);
19195  }
19196  
19197  static int
bxe_get_settings(struct bxe_softc * sc,bxe_dev_setting_t * dev_p)19198  bxe_get_settings(struct bxe_softc *sc, bxe_dev_setting_t *dev_p)
19199  {
19200      uint32_t ext_phy_config;
19201      int port = SC_PORT(sc);
19202      int cfg_idx = bxe_get_link_cfg_idx(sc);
19203  
19204      dev_p->supported = sc->port.supported[cfg_idx] |
19205              (sc->port.supported[cfg_idx ^ 1] &
19206              (ELINK_SUPPORTED_TP | ELINK_SUPPORTED_FIBRE));
19207      dev_p->advertising = sc->port.advertising[cfg_idx];
19208      if(sc->link_params.phy[bxe_get_cur_phy_idx(sc)].media_type ==
19209          ELINK_ETH_PHY_SFP_1G_FIBER) {
19210          dev_p->supported = ~(ELINK_SUPPORTED_10000baseT_Full);
19211          dev_p->advertising &= ~(ADVERTISED_10000baseT_Full);
19212      }
19213      if ((sc->state == BXE_STATE_OPEN) && sc->link_vars.link_up &&
19214          !(sc->flags & BXE_MF_FUNC_DIS)) {
19215          dev_p->duplex = sc->link_vars.duplex;
19216          if (IS_MF(sc) && !BXE_NOMCP(sc))
19217              dev_p->speed = bxe_get_mf_speed(sc);
19218          else
19219              dev_p->speed = sc->link_vars.line_speed;
19220      } else {
19221          dev_p->duplex = DUPLEX_UNKNOWN;
19222          dev_p->speed = SPEED_UNKNOWN;
19223      }
19224  
19225      dev_p->port = bxe_media_detect(sc);
19226  
19227      ext_phy_config = SHMEM_RD(sc,
19228                           dev_info.port_hw_config[port].external_phy_config);
19229      if((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) ==
19230          PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
19231          dev_p->phy_address =  sc->port.phy_addr;
19232      else if(((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) !=
19233              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
19234          ((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) !=
19235              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
19236          dev_p->phy_address = ELINK_XGXS_EXT_PHY_ADDR(ext_phy_config);
19237      else
19238          dev_p->phy_address = 0;
19239  
19240      if(sc->link_params.req_line_speed[cfg_idx] == ELINK_SPEED_AUTO_NEG)
19241          dev_p->autoneg = AUTONEG_ENABLE;
19242      else
19243         dev_p->autoneg = AUTONEG_DISABLE;
19244  
19245  
19246      return 0;
19247  }
19248  
19249  static int
bxe_eioctl(struct cdev * dev,u_long cmd,caddr_t data,int fflag,struct thread * td)19250  bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
19251          struct thread *td)
19252  {
19253      struct bxe_softc    *sc;
19254      int                 rval = 0;
19255      bxe_grcdump_t       *dump = NULL;
19256      int grc_dump_size;
19257      bxe_drvinfo_t   *drv_infop = NULL;
19258      bxe_dev_setting_t  *dev_p;
19259      bxe_dev_setting_t  dev_set;
19260      bxe_get_regs_t  *reg_p;
19261      bxe_reg_rdw_t *reg_rdw_p;
19262      bxe_pcicfg_rdw_t *cfg_rdw_p;
19263      bxe_perm_mac_addr_t *mac_addr_p;
19264  
19265  
19266      if ((sc = (struct bxe_softc *)dev->si_drv1) == NULL)
19267          return ENXIO;
19268  
19269      dump = (bxe_grcdump_t *)data;
19270  
19271      switch(cmd) {
19272  
19273          case BXE_GRC_DUMP_SIZE:
19274              dump->pci_func = sc->pcie_func;
19275              dump->grcdump_size =
19276                  (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
19277                       sizeof(struct  dump_header);
19278              break;
19279  
19280          case BXE_GRC_DUMP:
19281  
19282              grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
19283                                  sizeof(struct  dump_header);
19284              if ((!sc->trigger_grcdump) || (dump->grcdump == NULL) ||
19285                  (dump->grcdump_size < grc_dump_size)) {
19286                  rval = EINVAL;
19287                  break;
19288              }
19289  
19290              if((sc->trigger_grcdump) && (!sc->grcdump_done) &&
19291                  (!sc->grcdump_started)) {
19292                  rval =  bxe_grc_dump(sc);
19293              }
19294  
19295              if((!rval) && (sc->grcdump_done) && (sc->grcdump_started) &&
19296                  (sc->grc_dump != NULL))  {
19297                  dump->grcdump_dwords = grc_dump_size >> 2;
19298                  rval = copyout(sc->grc_dump, dump->grcdump, grc_dump_size);
19299                  free(sc->grc_dump, M_DEVBUF);
19300                  sc->grc_dump = NULL;
19301                  sc->grcdump_started = 0;
19302                  sc->grcdump_done = 0;
19303              }
19304  
19305              break;
19306  
19307          case BXE_DRV_INFO:
19308              drv_infop = (bxe_drvinfo_t *)data;
19309              snprintf(drv_infop->drv_name, BXE_DRV_NAME_LENGTH, "%s", "bxe");
19310              snprintf(drv_infop->drv_version, BXE_DRV_VERSION_LENGTH, "v:%s",
19311                  BXE_DRIVER_VERSION);
19312              snprintf(drv_infop->mfw_version, BXE_MFW_VERSION_LENGTH, "%s",
19313                  sc->devinfo.bc_ver_str);
19314              snprintf(drv_infop->stormfw_version, BXE_STORMFW_VERSION_LENGTH,
19315                  "%s", sc->fw_ver_str);
19316              drv_infop->eeprom_dump_len = sc->devinfo.flash_size;
19317              drv_infop->reg_dump_len =
19318                  (bxe_get_total_regs_len32(sc) * sizeof(uint32_t))
19319                      + sizeof(struct  dump_header);
19320              snprintf(drv_infop->bus_info, BXE_BUS_INFO_LENGTH, "%d:%d:%d",
19321                  sc->pcie_bus, sc->pcie_device, sc->pcie_func);
19322              break;
19323  
19324          case BXE_DEV_SETTING:
19325              dev_p = (bxe_dev_setting_t *)data;
19326              bxe_get_settings(sc, &dev_set);
19327              dev_p->supported = dev_set.supported;
19328              dev_p->advertising = dev_set.advertising;
19329              dev_p->speed = dev_set.speed;
19330              dev_p->duplex = dev_set.duplex;
19331              dev_p->port = dev_set.port;
19332              dev_p->phy_address = dev_set.phy_address;
19333              dev_p->autoneg = dev_set.autoneg;
19334  
19335              break;
19336  
19337          case BXE_GET_REGS:
19338  
19339              reg_p = (bxe_get_regs_t *)data;
19340              grc_dump_size = reg_p->reg_buf_len;
19341  
19342              if((!sc->grcdump_done) && (!sc->grcdump_started)) {
19343                  bxe_grc_dump(sc);
19344              }
19345              if((sc->grcdump_done) && (sc->grcdump_started) &&
19346                  (sc->grc_dump != NULL))  {
19347                  rval = copyout(sc->grc_dump, reg_p->reg_buf, grc_dump_size);
19348                  free(sc->grc_dump, M_DEVBUF);
19349                  sc->grc_dump = NULL;
19350                  sc->grcdump_started = 0;
19351                  sc->grcdump_done = 0;
19352              }
19353  
19354              break;
19355  
19356          case BXE_RDW_REG:
19357              reg_rdw_p = (bxe_reg_rdw_t *)data;
19358              if((reg_rdw_p->reg_cmd == BXE_READ_REG_CMD) &&
19359                  (reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT))
19360                  reg_rdw_p->reg_val = REG_RD(sc, reg_rdw_p->reg_id);
19361  
19362              if((reg_rdw_p->reg_cmd == BXE_WRITE_REG_CMD) &&
19363                  (reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT))
19364                  REG_WR(sc, reg_rdw_p->reg_id, reg_rdw_p->reg_val);
19365  
19366              break;
19367  
19368          case BXE_RDW_PCICFG:
19369              cfg_rdw_p = (bxe_pcicfg_rdw_t *)data;
19370              if(cfg_rdw_p->cfg_cmd == BXE_READ_PCICFG) {
19371  
19372                  cfg_rdw_p->cfg_val = pci_read_config(sc->dev, cfg_rdw_p->cfg_id,
19373                                           cfg_rdw_p->cfg_width);
19374  
19375              } else if(cfg_rdw_p->cfg_cmd == BXE_WRITE_PCICFG) {
19376                  pci_write_config(sc->dev, cfg_rdw_p->cfg_id, cfg_rdw_p->cfg_val,
19377                              cfg_rdw_p->cfg_width);
19378              } else {
19379                  BLOGW(sc, "BXE_RDW_PCICFG ioctl wrong cmd passed\n");
19380              }
19381              break;
19382  
19383          case BXE_MAC_ADDR:
19384              mac_addr_p = (bxe_perm_mac_addr_t *)data;
19385              snprintf(mac_addr_p->mac_addr_str, sizeof(sc->mac_addr_str), "%s",
19386                  sc->mac_addr_str);
19387              break;
19388  
19389          case BXE_EEPROM:
19390              rval = bxe_eeprom_rd_wr(sc, (bxe_eeprom_t *)data);
19391              break;
19392  
19393  
19394          default:
19395              break;
19396      }
19397  
19398      return (rval);
19399  }
19400  
19401  #ifdef DEBUGNET
19402  static void
bxe_debugnet_init(if_t ifp,int * nrxr,int * ncl,int * clsize)19403  bxe_debugnet_init(if_t ifp, int *nrxr, int *ncl, int *clsize)
19404  {
19405  	struct bxe_softc *sc;
19406  
19407  	sc = if_getsoftc(ifp);
19408  	BXE_CORE_LOCK(sc);
19409  	*nrxr = sc->num_queues;
19410  	*ncl = DEBUGNET_MAX_IN_FLIGHT;
19411  	*clsize = sc->fp[0].mbuf_alloc_size;
19412  	BXE_CORE_UNLOCK(sc);
19413  }
19414  
19415  static void
bxe_debugnet_event(if_t ifp __unused,enum debugnet_ev event __unused)19416  bxe_debugnet_event(if_t ifp __unused, enum debugnet_ev event __unused)
19417  {
19418  }
19419  
19420  static int
bxe_debugnet_transmit(if_t ifp,struct mbuf * m)19421  bxe_debugnet_transmit(if_t ifp, struct mbuf *m)
19422  {
19423  	struct bxe_softc *sc;
19424  	int error;
19425  
19426  	sc = if_getsoftc(ifp);
19427  	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
19428  	    IFF_DRV_RUNNING || !sc->link_vars.link_up)
19429  		return (ENOENT);
19430  
19431  	error = bxe_tx_encap(&sc->fp[0], &m);
19432  	if (error != 0 && m != NULL)
19433  		m_freem(m);
19434  	return (error);
19435  }
19436  
19437  static int
bxe_debugnet_poll(if_t ifp,int count)19438  bxe_debugnet_poll(if_t ifp, int count)
19439  {
19440  	struct bxe_softc *sc;
19441  	int i;
19442  
19443  	sc = if_getsoftc(ifp);
19444  	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0 ||
19445  	    !sc->link_vars.link_up)
19446  		return (ENOENT);
19447  
19448  	for (i = 0; i < sc->num_queues; i++)
19449  		(void)bxe_rxeof(sc, &sc->fp[i]);
19450  	(void)bxe_txeof(sc, &sc->fp[0]);
19451  	return (0);
19452  }
19453  #endif /* DEBUGNET */
19454