xref: /freebsd/sys/dev/bxe/bxe_stats.c (revision 38f0b757fd84d17d0fc24739a7cda160c4516d81)
1 /*-
2  * Copyright (c) 2007-2013 Broadcom Corporation. All rights reserved.
3  *
4  * Eric Davis        <edavis@broadcom.com>
5  * David Christensen <davidch@broadcom.com>
6  * Gary Zambrano     <zambrano@broadcom.com>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. Neither the name of Broadcom Corporation nor the name of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written consent.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
22  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include "bxe.h"
38 #include "bxe_stats.h"
39 
40 #ifdef __i386__
41 #define BITS_PER_LONG 32
42 #else
43 #define BITS_PER_LONG 64
44 #endif
45 
46 static inline long
47 bxe_hilo(uint32_t *hiref)
48 {
49     uint32_t lo = *(hiref + 1);
50 #if (BITS_PER_LONG == 64)
51     uint32_t hi = *hiref;
52     return (HILO_U64(hi, lo));
53 #else
54     return (lo);
55 #endif
56 }
57 
58 static inline uint16_t
59 bxe_get_port_stats_dma_len(struct bxe_softc *sc)
60 {
61     uint16_t res = 0;
62     uint32_t size;
63 
64     /* 'newest' convention - shmem2 contains the size of the port stats */
65     if (SHMEM2_HAS(sc, sizeof_port_stats)) {
66         size = SHMEM2_RD(sc, sizeof_port_stats);
67         if (size) {
68             res = size;
69         }
70 
71         /* prevent newer BC from causing buffer overflow */
72         if (res > sizeof(struct host_port_stats)) {
73             res = sizeof(struct host_port_stats);
74         }
75     }
76 
77     /*
78      * Older convention - all BCs support the port stats fields up until
79      * the 'not_used' field
80      */
81     if (!res) {
82         res = (offsetof(struct host_port_stats, not_used) + 4);
83 
84         /* if PFC stats are supported by the MFW, DMA them as well */
85         if (sc->devinfo.bc_ver >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) {
86             res += (offsetof(struct host_port_stats, pfc_frames_rx_lo) -
87                     offsetof(struct host_port_stats, pfc_frames_tx_hi) + 4);
88         }
89     }
90 
91     res >>= 2;
92 
93     DBASSERT(sc, !(res > 2 * DMAE_LEN32_RD_MAX), ("big stats dmae length\n"));
94     return (res);
95 }
96 
97 /*
98  * Init service functions
99  */
100 
101 static void
102 bxe_dp_stats(struct bxe_softc *sc)
103 {
104     int i;
105 
106     BLOGD(sc, DBG_STATS,
107           "dumping stats:\n"
108           "  fw_stats_req\n"
109           "    hdr\n"
110           "      cmd_num %d\n"
111           "      reserved0 %d\n"
112           "      drv_stats_counter %d\n"
113           "      reserved1 %d\n"
114           "      stats_counters_addrs %x %x\n",
115           sc->fw_stats_req->hdr.cmd_num,
116           sc->fw_stats_req->hdr.reserved0,
117           sc->fw_stats_req->hdr.drv_stats_counter,
118           sc->fw_stats_req->hdr.reserved1,
119           sc->fw_stats_req->hdr.stats_counters_addrs.hi,
120           sc->fw_stats_req->hdr.stats_counters_addrs.lo);
121 
122     for (i = 0; i < sc->fw_stats_req->hdr.cmd_num; i++) {
123         BLOGD(sc, DBG_STATS,
124               "query[%d]\n"
125               "  kind %d\n"
126               "  index %d\n"
127               "  funcID %d\n"
128               "  reserved %d\n"
129               "  address %x %x\n",
130               i,
131               sc->fw_stats_req->query[i].kind,
132               sc->fw_stats_req->query[i].index,
133               sc->fw_stats_req->query[i].funcID,
134               sc->fw_stats_req->query[i].reserved,
135               sc->fw_stats_req->query[i].address.hi,
136               sc->fw_stats_req->query[i].address.lo);
137     }
138 }
139 
140 /*
141  * Post the next statistics ramrod. Protect it with the lock in
142  * order to ensure the strict order between statistics ramrods
143  * (each ramrod has a sequence number passed in a
144  * sc->fw_stats_req->hdr.drv_stats_counter and ramrods must be
145  * sent in order).
146  */
147 static void
148 bxe_storm_stats_post(struct bxe_softc *sc)
149 {
150     int rc;
151 
152     if (!sc->stats_pending) {
153         BXE_STATS_LOCK(sc);
154 
155         if (sc->stats_pending) {
156             BXE_STATS_UNLOCK(sc);
157             return;
158         }
159 
160         sc->fw_stats_req->hdr.drv_stats_counter =
161             htole16(sc->stats_counter++);
162 
163         BLOGD(sc, DBG_STATS,
164               "sending statistics ramrod %d\n",
165               le16toh(sc->fw_stats_req->hdr.drv_stats_counter));
166 
167         /* adjust the ramrod to include VF queues statistics */
168         // XXX bxe_iov_adjust_stats_req(sc);
169 
170         bxe_dp_stats(sc);
171 
172         /* send FW stats ramrod */
173         rc = bxe_sp_post(sc, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
174                          U64_HI(sc->fw_stats_req_mapping),
175                          U64_LO(sc->fw_stats_req_mapping),
176                          NONE_CONNECTION_TYPE);
177         if (rc == 0) {
178             sc->stats_pending = 1;
179         }
180 
181         BXE_STATS_UNLOCK(sc);
182     }
183 }
184 
185 static void
186 bxe_hw_stats_post(struct bxe_softc *sc)
187 {
188     struct dmae_command *dmae = &sc->stats_dmae;
189     uint32_t *stats_comp = BXE_SP(sc, stats_comp);
190     int loader_idx;
191     uint32_t opcode;
192 
193     *stats_comp = DMAE_COMP_VAL;
194     if (CHIP_REV_IS_SLOW(sc)) {
195         return;
196     }
197 
198     /* Update MCP's statistics if possible */
199     if (sc->func_stx) {
200         memcpy(BXE_SP(sc, func_stats), &sc->func_stats,
201                sizeof(sc->func_stats));
202     }
203 
204     /* loader */
205     if (sc->executer_idx) {
206         loader_idx = PMF_DMAE_C(sc);
207         opcode =  bxe_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
208                                   TRUE, DMAE_COMP_GRC);
209         opcode = bxe_dmae_opcode_clr_src_reset(opcode);
210 
211         memset(dmae, 0, sizeof(struct dmae_command));
212         dmae->opcode = opcode;
213         dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, dmae[0]));
214         dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, dmae[0]));
215         dmae->dst_addr_lo = ((DMAE_REG_CMD_MEM +
216                               sizeof(struct dmae_command) *
217                               (loader_idx + 1)) >> 2);
218         dmae->dst_addr_hi = 0;
219         dmae->len = sizeof(struct dmae_command) >> 2;
220         if (CHIP_IS_E1(sc)) {
221             dmae->len--;
222         }
223         dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx + 1] >> 2);
224         dmae->comp_addr_hi = 0;
225         dmae->comp_val = 1;
226 
227         *stats_comp = 0;
228         bxe_post_dmae(sc, dmae, loader_idx);
229     } else if (sc->func_stx) {
230         *stats_comp = 0;
231         bxe_post_dmae(sc, dmae, INIT_DMAE_C(sc));
232     }
233 }
234 
235 static int
236 bxe_stats_comp(struct bxe_softc *sc)
237 {
238     uint32_t *stats_comp = BXE_SP(sc, stats_comp);
239     int cnt = 10;
240 
241     while (*stats_comp != DMAE_COMP_VAL) {
242         if (!cnt) {
243             BLOGE(sc, "Timeout waiting for stats finished\n");
244             break;
245         }
246 
247         cnt--;
248         DELAY(1000);
249     }
250 
251     return (1);
252 }
253 
254 /*
255  * Statistics service functions
256  */
257 
258 static void
259 bxe_stats_pmf_update(struct bxe_softc *sc)
260 {
261     struct dmae_command *dmae;
262     uint32_t opcode;
263     int loader_idx = PMF_DMAE_C(sc);
264     uint32_t *stats_comp = BXE_SP(sc, stats_comp);
265 
266     if (sc->devinfo.bc_ver <= 0x06001400) {
267         /*
268          * Bootcode v6.0.21 fixed a GRC timeout that occurs when accessing
269          * BRB registers while the BRB block is in reset. The DMA transfer
270          * below triggers this issue resulting in the DMAE to stop
271          * functioning. Skip this initial stats transfer for old bootcode
272          * versions <= 6.0.20.
273          */
274         return;
275     }
276 
277     /* sanity */
278     if (!sc->port.pmf || !sc->port.port_stx) {
279         BLOGE(sc, "BUG!\n");
280         return;
281     }
282 
283     sc->executer_idx = 0;
284 
285     opcode = bxe_dmae_opcode(sc, DMAE_SRC_GRC, DMAE_DST_PCI, FALSE, 0);
286 
287     dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
288     dmae->opcode = bxe_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
289     dmae->src_addr_lo = (sc->port.port_stx >> 2);
290     dmae->src_addr_hi = 0;
291     dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, port_stats));
292     dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, port_stats));
293     dmae->len = DMAE_LEN32_RD_MAX;
294     dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
295     dmae->comp_addr_hi = 0;
296     dmae->comp_val = 1;
297 
298     dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
299     dmae->opcode = bxe_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
300     dmae->src_addr_lo = ((sc->port.port_stx >> 2) + DMAE_LEN32_RD_MAX);
301     dmae->src_addr_hi = 0;
302     dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, port_stats) +
303                                DMAE_LEN32_RD_MAX * 4);
304     dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, port_stats) +
305                                DMAE_LEN32_RD_MAX * 4);
306     dmae->len = (bxe_get_port_stats_dma_len(sc) - DMAE_LEN32_RD_MAX);
307 
308     dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
309     dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
310     dmae->comp_val = DMAE_COMP_VAL;
311 
312     *stats_comp = 0;
313     bxe_hw_stats_post(sc);
314     bxe_stats_comp(sc);
315 }
316 
317 static void
318 bxe_port_stats_init(struct bxe_softc *sc)
319 {
320     struct dmae_command *dmae;
321     int port = SC_PORT(sc);
322     uint32_t opcode;
323     int loader_idx = PMF_DMAE_C(sc);
324     uint32_t mac_addr;
325     uint32_t *stats_comp = BXE_SP(sc, stats_comp);
326 
327     /* sanity */
328     if (!sc->link_vars.link_up || !sc->port.pmf) {
329         BLOGE(sc, "BUG!\n");
330         return;
331     }
332 
333     sc->executer_idx = 0;
334 
335     /* MCP */
336     opcode = bxe_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
337                              TRUE, DMAE_COMP_GRC);
338 
339     if (sc->port.port_stx) {
340         dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
341         dmae->opcode = opcode;
342         dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, port_stats));
343         dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, port_stats));
344         dmae->dst_addr_lo = sc->port.port_stx >> 2;
345         dmae->dst_addr_hi = 0;
346         dmae->len = bxe_get_port_stats_dma_len(sc);
347         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
348         dmae->comp_addr_hi = 0;
349         dmae->comp_val = 1;
350     }
351 
352     if (sc->func_stx) {
353         dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
354         dmae->opcode = opcode;
355         dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, func_stats));
356         dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, func_stats));
357         dmae->dst_addr_lo = (sc->func_stx >> 2);
358         dmae->dst_addr_hi = 0;
359         dmae->len = (sizeof(struct host_func_stats) >> 2);
360         dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
361         dmae->comp_addr_hi = 0;
362         dmae->comp_val = 1;
363     }
364 
365     /* MAC */
366     opcode = bxe_dmae_opcode(sc, DMAE_SRC_GRC, DMAE_DST_PCI,
367                              TRUE, DMAE_COMP_GRC);
368 
369     /* EMAC is special */
370     if (sc->link_vars.mac_type == ELINK_MAC_TYPE_EMAC) {
371         mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
372 
373         /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
374         dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
375         dmae->opcode = opcode;
376         dmae->src_addr_lo = (mac_addr + EMAC_REG_EMAC_RX_STAT_AC) >> 2;
377         dmae->src_addr_hi = 0;
378         dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, mac_stats));
379         dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, mac_stats));
380         dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
381         dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
382         dmae->comp_addr_hi = 0;
383         dmae->comp_val = 1;
384 
385         /* EMAC_REG_EMAC_RX_STAT_AC_28 */
386         dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
387         dmae->opcode = opcode;
388         dmae->src_addr_lo = ((mac_addr + EMAC_REG_EMAC_RX_STAT_AC_28) >> 2);
389         dmae->src_addr_hi = 0;
390         dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, mac_stats) +
391                                    offsetof(struct emac_stats,
392                                             rx_stat_falsecarriererrors));
393         dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, mac_stats) +
394                                    offsetof(struct emac_stats,
395                                             rx_stat_falsecarriererrors));
396         dmae->len = 1;
397         dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
398         dmae->comp_addr_hi = 0;
399         dmae->comp_val = 1;
400 
401         /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
402         dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
403         dmae->opcode = opcode;
404         dmae->src_addr_lo = ((mac_addr + EMAC_REG_EMAC_TX_STAT_AC) >> 2);
405         dmae->src_addr_hi = 0;
406         dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, mac_stats) +
407                                    offsetof(struct emac_stats,
408                                             tx_stat_ifhcoutoctets));
409         dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, mac_stats) +
410                                    offsetof(struct emac_stats,
411                                             tx_stat_ifhcoutoctets));
412         dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
413         dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
414         dmae->comp_addr_hi = 0;
415         dmae->comp_val = 1;
416     } else {
417         uint32_t tx_src_addr_lo, rx_src_addr_lo;
418         uint16_t rx_len, tx_len;
419 
420         /* configure the params according to MAC type */
421         switch (sc->link_vars.mac_type) {
422         case ELINK_MAC_TYPE_BMAC:
423             mac_addr = (port) ? NIG_REG_INGRESS_BMAC1_MEM :
424                                 NIG_REG_INGRESS_BMAC0_MEM;
425 
426             /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
427                BIGMAC_REGISTER_TX_STAT_GTBYT */
428             if (CHIP_IS_E1x(sc)) {
429                 tx_src_addr_lo =
430                     ((mac_addr + BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2);
431                 tx_len = ((8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
432                            BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2);
433                 rx_src_addr_lo =
434                     ((mac_addr + BIGMAC_REGISTER_RX_STAT_GR64) >> 2);
435                 rx_len = ((8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
436                            BIGMAC_REGISTER_RX_STAT_GR64) >> 2);
437             } else {
438                 tx_src_addr_lo =
439                     ((mac_addr + BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2);
440                 tx_len = ((8 + BIGMAC2_REGISTER_TX_STAT_GTBYT -
441                            BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2);
442                 rx_src_addr_lo =
443                     ((mac_addr + BIGMAC2_REGISTER_RX_STAT_GR64) >> 2);
444                 rx_len = ((8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ -
445                            BIGMAC2_REGISTER_RX_STAT_GR64) >> 2);
446             }
447 
448             break;
449 
450         case ELINK_MAC_TYPE_UMAC: /* handled by MSTAT */
451         case ELINK_MAC_TYPE_XMAC: /* handled by MSTAT */
452         default:
453             mac_addr = (port) ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0;
454             tx_src_addr_lo = ((mac_addr + MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2);
455             rx_src_addr_lo = ((mac_addr + MSTAT_REG_RX_STAT_GR64_LO) >> 2);
456             tx_len =
457                 (sizeof(sc->sp->mac_stats.mstat_stats.stats_tx) >> 2);
458             rx_len =
459                 (sizeof(sc->sp->mac_stats.mstat_stats.stats_rx) >> 2);
460             break;
461         }
462 
463         /* TX stats */
464         dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
465         dmae->opcode = opcode;
466         dmae->src_addr_lo = tx_src_addr_lo;
467         dmae->src_addr_hi = 0;
468         dmae->len = tx_len;
469         dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, mac_stats));
470         dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, mac_stats));
471         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
472         dmae->comp_addr_hi = 0;
473         dmae->comp_val = 1;
474 
475         /* RX stats */
476         dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
477         dmae->opcode = opcode;
478         dmae->src_addr_hi = 0;
479         dmae->src_addr_lo = rx_src_addr_lo;
480         dmae->dst_addr_lo =
481             U64_LO(BXE_SP_MAPPING(sc, mac_stats) + (tx_len << 2));
482         dmae->dst_addr_hi =
483             U64_HI(BXE_SP_MAPPING(sc, mac_stats) + (tx_len << 2));
484         dmae->len = rx_len;
485         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
486         dmae->comp_addr_hi = 0;
487         dmae->comp_val = 1;
488     }
489 
490     /* NIG */
491     if (!CHIP_IS_E3(sc)) {
492         dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
493         dmae->opcode = opcode;
494         dmae->src_addr_lo =
495             (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
496                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
497         dmae->src_addr_hi = 0;
498         dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, nig_stats) +
499                                    offsetof(struct nig_stats,
500                                             egress_mac_pkt0_lo));
501         dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, nig_stats) +
502                                    offsetof(struct nig_stats,
503                                             egress_mac_pkt0_lo));
504         dmae->len = ((2 * sizeof(uint32_t)) >> 2);
505         dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
506         dmae->comp_addr_hi = 0;
507         dmae->comp_val = 1;
508 
509         dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
510         dmae->opcode = opcode;
511         dmae->src_addr_lo =
512             (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
513                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
514         dmae->src_addr_hi = 0;
515         dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, nig_stats) +
516                                    offsetof(struct nig_stats,
517                                             egress_mac_pkt1_lo));
518         dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, nig_stats) +
519                                    offsetof(struct nig_stats,
520                                             egress_mac_pkt1_lo));
521         dmae->len = ((2 * sizeof(uint32_t)) >> 2);
522         dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
523         dmae->comp_addr_hi = 0;
524         dmae->comp_val = 1;
525     }
526 
527     dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
528     dmae->opcode = bxe_dmae_opcode(sc, DMAE_SRC_GRC, DMAE_DST_PCI,
529                                    TRUE, DMAE_COMP_PCI);
530     dmae->src_addr_lo =
531         (port ? NIG_REG_STAT1_BRB_DISCARD :
532                 NIG_REG_STAT0_BRB_DISCARD) >> 2;
533     dmae->src_addr_hi = 0;
534     dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, nig_stats));
535     dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, nig_stats));
536     dmae->len = (sizeof(struct nig_stats) - 4*sizeof(uint32_t)) >> 2;
537 
538     dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
539     dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
540     dmae->comp_val = DMAE_COMP_VAL;
541 
542     *stats_comp = 0;
543 }
544 
545 static void
546 bxe_func_stats_init(struct bxe_softc *sc)
547 {
548     struct dmae_command *dmae = &sc->stats_dmae;
549     uint32_t *stats_comp = BXE_SP(sc, stats_comp);
550 
551     /* sanity */
552     if (!sc->func_stx) {
553         BLOGE(sc, "BUG!\n");
554         return;
555     }
556 
557     sc->executer_idx = 0;
558     memset(dmae, 0, sizeof(struct dmae_command));
559 
560     dmae->opcode = bxe_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
561                                    TRUE, DMAE_COMP_PCI);
562     dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, func_stats));
563     dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, func_stats));
564     dmae->dst_addr_lo = (sc->func_stx >> 2);
565     dmae->dst_addr_hi = 0;
566     dmae->len = (sizeof(struct host_func_stats) >> 2);
567     dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
568     dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
569     dmae->comp_val = DMAE_COMP_VAL;
570 
571     *stats_comp = 0;
572 }
573 
574 static void
575 bxe_stats_start(struct bxe_softc *sc)
576 {
577     /*
578      * VFs travel through here as part of the statistics FSM, but no action
579      * is required
580      */
581     if (IS_VF(sc)) {
582         return;
583     }
584 
585     if (sc->port.pmf) {
586         bxe_port_stats_init(sc);
587     }
588 
589     else if (sc->func_stx) {
590         bxe_func_stats_init(sc);
591     }
592 
593     bxe_hw_stats_post(sc);
594     bxe_storm_stats_post(sc);
595 }
596 
597 static void
598 bxe_stats_pmf_start(struct bxe_softc *sc)
599 {
600     bxe_stats_comp(sc);
601     bxe_stats_pmf_update(sc);
602     bxe_stats_start(sc);
603 }
604 
605 static void
606 bxe_stats_restart(struct bxe_softc *sc)
607 {
608     /*
609      * VFs travel through here as part of the statistics FSM, but no action
610      * is required
611      */
612     if (IS_VF(sc)) {
613         return;
614     }
615 
616     bxe_stats_comp(sc);
617     bxe_stats_start(sc);
618 }
619 
620 static void
621 bxe_bmac_stats_update(struct bxe_softc *sc)
622 {
623     struct host_port_stats *pstats = BXE_SP(sc, port_stats);
624     struct bxe_eth_stats *estats = &sc->eth_stats;
625     struct {
626         uint32_t lo;
627         uint32_t hi;
628     } diff;
629 
630     if (CHIP_IS_E1x(sc)) {
631         struct bmac1_stats *new = BXE_SP(sc, mac_stats.bmac1_stats);
632 
633         /* the macros below will use "bmac1_stats" type */
634         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
635         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
636         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
637         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
638         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
639         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
640         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
641         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
642         UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
643 
644         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
645         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
646         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
647         UPDATE_STAT64(tx_stat_gt127,
648                       tx_stat_etherstatspkts65octetsto127octets);
649         UPDATE_STAT64(tx_stat_gt255,
650                       tx_stat_etherstatspkts128octetsto255octets);
651         UPDATE_STAT64(tx_stat_gt511,
652                       tx_stat_etherstatspkts256octetsto511octets);
653         UPDATE_STAT64(tx_stat_gt1023,
654                       tx_stat_etherstatspkts512octetsto1023octets);
655         UPDATE_STAT64(tx_stat_gt1518,
656                       tx_stat_etherstatspkts1024octetsto1522octets);
657         UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
658         UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
659         UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
660         UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
661         UPDATE_STAT64(tx_stat_gterr,
662                       tx_stat_dot3statsinternalmactransmiterrors);
663         UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
664     } else {
665         struct bmac2_stats *new = BXE_SP(sc, mac_stats.bmac2_stats);
666         struct bxe_fw_port_stats_old *fwstats = &sc->fw_stats_old;
667 
668         /* the macros below will use "bmac2_stats" type */
669         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
670         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
671         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
672         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
673         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
674         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
675         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
676         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
677         UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
678         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
679         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
680         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
681         UPDATE_STAT64(tx_stat_gt127,
682                       tx_stat_etherstatspkts65octetsto127octets);
683         UPDATE_STAT64(tx_stat_gt255,
684                       tx_stat_etherstatspkts128octetsto255octets);
685         UPDATE_STAT64(tx_stat_gt511,
686                       tx_stat_etherstatspkts256octetsto511octets);
687         UPDATE_STAT64(tx_stat_gt1023,
688                       tx_stat_etherstatspkts512octetsto1023octets);
689         UPDATE_STAT64(tx_stat_gt1518,
690                       tx_stat_etherstatspkts1024octetsto1522octets);
691         UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
692         UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
693         UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
694         UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
695         UPDATE_STAT64(tx_stat_gterr,
696                       tx_stat_dot3statsinternalmactransmiterrors);
697         UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
698 
699         /* collect PFC stats */
700         pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi;
701         pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo;
702         ADD_64(pstats->pfc_frames_tx_hi, fwstats->pfc_frames_tx_hi,
703                pstats->pfc_frames_tx_lo, fwstats->pfc_frames_tx_lo);
704 
705         pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi;
706         pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo;
707         ADD_64(pstats->pfc_frames_rx_hi, fwstats->pfc_frames_rx_hi,
708                pstats->pfc_frames_rx_lo, fwstats->pfc_frames_rx_lo);
709     }
710 
711     estats->pause_frames_received_hi = pstats->mac_stx[1].rx_stat_mac_xpf_hi;
712     estats->pause_frames_received_lo = pstats->mac_stx[1].rx_stat_mac_xpf_lo;
713 
714     estats->pause_frames_sent_hi = pstats->mac_stx[1].tx_stat_outxoffsent_hi;
715     estats->pause_frames_sent_lo = pstats->mac_stx[1].tx_stat_outxoffsent_lo;
716 
717     estats->pfc_frames_received_hi = pstats->pfc_frames_rx_hi;
718     estats->pfc_frames_received_lo = pstats->pfc_frames_rx_lo;
719     estats->pfc_frames_sent_hi = pstats->pfc_frames_tx_hi;
720     estats->pfc_frames_sent_lo = pstats->pfc_frames_tx_lo;
721 }
722 
723 static void
724 bxe_mstat_stats_update(struct bxe_softc *sc)
725 {
726     struct host_port_stats *pstats = BXE_SP(sc, port_stats);
727     struct bxe_eth_stats *estats = &sc->eth_stats;
728     struct mstat_stats *new = BXE_SP(sc, mac_stats.mstat_stats);
729 
730     ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets);
731     ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors);
732     ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts);
733     ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong);
734     ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments);
735     ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived);
736     ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered);
737     ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf);
738     ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent);
739     ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone);
740 
741     /* collect pfc stats */
742     ADD_64(pstats->pfc_frames_tx_hi, new->stats_tx.tx_gtxpp_hi,
743            pstats->pfc_frames_tx_lo, new->stats_tx.tx_gtxpp_lo);
744     ADD_64(pstats->pfc_frames_rx_hi, new->stats_rx.rx_grxpp_hi,
745            pstats->pfc_frames_rx_lo, new->stats_rx.rx_grxpp_lo);
746 
747     ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets);
748     ADD_STAT64(stats_tx.tx_gt127, tx_stat_etherstatspkts65octetsto127octets);
749     ADD_STAT64(stats_tx.tx_gt255, tx_stat_etherstatspkts128octetsto255octets);
750     ADD_STAT64(stats_tx.tx_gt511, tx_stat_etherstatspkts256octetsto511octets);
751     ADD_STAT64(stats_tx.tx_gt1023,
752                tx_stat_etherstatspkts512octetsto1023octets);
753     ADD_STAT64(stats_tx.tx_gt1518,
754                tx_stat_etherstatspkts1024octetsto1522octets);
755     ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047);
756 
757     ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095);
758     ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216);
759     ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383);
760 
761     ADD_STAT64(stats_tx.tx_gterr, tx_stat_dot3statsinternalmactransmiterrors);
762     ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl);
763 
764     estats->etherstatspkts1024octetsto1522octets_hi =
765         pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_hi;
766     estats->etherstatspkts1024octetsto1522octets_lo =
767         pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_lo;
768 
769     estats->etherstatspktsover1522octets_hi =
770         pstats->mac_stx[1].tx_stat_mac_2047_hi;
771     estats->etherstatspktsover1522octets_lo =
772         pstats->mac_stx[1].tx_stat_mac_2047_lo;
773 
774     ADD_64(estats->etherstatspktsover1522octets_hi,
775            pstats->mac_stx[1].tx_stat_mac_4095_hi,
776            estats->etherstatspktsover1522octets_lo,
777            pstats->mac_stx[1].tx_stat_mac_4095_lo);
778 
779     ADD_64(estats->etherstatspktsover1522octets_hi,
780            pstats->mac_stx[1].tx_stat_mac_9216_hi,
781            estats->etherstatspktsover1522octets_lo,
782            pstats->mac_stx[1].tx_stat_mac_9216_lo);
783 
784     ADD_64(estats->etherstatspktsover1522octets_hi,
785            pstats->mac_stx[1].tx_stat_mac_16383_hi,
786            estats->etherstatspktsover1522octets_lo,
787            pstats->mac_stx[1].tx_stat_mac_16383_lo);
788 
789     estats->pause_frames_received_hi = pstats->mac_stx[1].rx_stat_mac_xpf_hi;
790     estats->pause_frames_received_lo = pstats->mac_stx[1].rx_stat_mac_xpf_lo;
791 
792     estats->pause_frames_sent_hi = pstats->mac_stx[1].tx_stat_outxoffsent_hi;
793     estats->pause_frames_sent_lo = pstats->mac_stx[1].tx_stat_outxoffsent_lo;
794 
795     estats->pfc_frames_received_hi = pstats->pfc_frames_rx_hi;
796     estats->pfc_frames_received_lo = pstats->pfc_frames_rx_lo;
797     estats->pfc_frames_sent_hi = pstats->pfc_frames_tx_hi;
798     estats->pfc_frames_sent_lo = pstats->pfc_frames_tx_lo;
799 }
800 
801 static void
802 bxe_emac_stats_update(struct bxe_softc *sc)
803 {
804     struct emac_stats *new = BXE_SP(sc, mac_stats.emac_stats);
805     struct host_port_stats *pstats = BXE_SP(sc, port_stats);
806     struct bxe_eth_stats *estats = &sc->eth_stats;
807 
808     UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
809     UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
810     UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
811     UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
812     UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
813     UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
814     UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
815     UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
816     UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
817     UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
818     UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
819     UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
820     UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
821     UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
822     UPDATE_EXTEND_STAT(tx_stat_outxonsent);
823     UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
824     UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
825     UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
826     UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
827     UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
828     UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
829     UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
830     UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
831     UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
832     UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
833     UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
834     UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
835     UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
836     UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
837     UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
838     UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
839 
840     estats->pause_frames_received_hi =
841         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
842     estats->pause_frames_received_lo =
843         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
844     ADD_64(estats->pause_frames_received_hi,
845            pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
846            estats->pause_frames_received_lo,
847            pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
848 
849     estats->pause_frames_sent_hi =
850         pstats->mac_stx[1].tx_stat_outxonsent_hi;
851     estats->pause_frames_sent_lo =
852         pstats->mac_stx[1].tx_stat_outxonsent_lo;
853     ADD_64(estats->pause_frames_sent_hi,
854            pstats->mac_stx[1].tx_stat_outxoffsent_hi,
855            estats->pause_frames_sent_lo,
856            pstats->mac_stx[1].tx_stat_outxoffsent_lo);
857 }
858 
859 static int
860 bxe_hw_stats_update(struct bxe_softc *sc)
861 {
862     struct nig_stats *new = BXE_SP(sc, nig_stats);
863     struct nig_stats *old = &(sc->port.old_nig_stats);
864     struct host_port_stats *pstats = BXE_SP(sc, port_stats);
865     struct bxe_eth_stats *estats = &sc->eth_stats;
866     uint32_t lpi_reg, nig_timer_max;
867     struct {
868         uint32_t lo;
869         uint32_t hi;
870     } diff;
871 
872     switch (sc->link_vars.mac_type) {
873     case ELINK_MAC_TYPE_BMAC:
874         bxe_bmac_stats_update(sc);
875         break;
876 
877     case ELINK_MAC_TYPE_EMAC:
878         bxe_emac_stats_update(sc);
879         break;
880 
881     case ELINK_MAC_TYPE_UMAC:
882     case ELINK_MAC_TYPE_XMAC:
883         bxe_mstat_stats_update(sc);
884         break;
885 
886     case ELINK_MAC_TYPE_NONE: /* unreached */
887         BLOGD(sc, DBG_STATS,
888               "stats updated by DMAE but no MAC active\n");
889         return (-1);
890 
891     default: /* unreached */
892         BLOGE(sc, "stats update failed, unknown MAC type\n");
893     }
894 
895     ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
896                   new->brb_discard - old->brb_discard);
897     ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
898                   new->brb_truncate - old->brb_truncate);
899 
900     if (!CHIP_IS_E3(sc)) {
901         UPDATE_STAT64_NIG(egress_mac_pkt0,
902                           etherstatspkts1024octetsto1522octets);
903         UPDATE_STAT64_NIG(egress_mac_pkt1,
904                           etherstatspktsover1522octets);
905     }
906 
907     memcpy(old, new, sizeof(struct nig_stats));
908 
909     memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
910            sizeof(struct mac_stx));
911     estats->brb_drop_hi = pstats->brb_drop_hi;
912     estats->brb_drop_lo = pstats->brb_drop_lo;
913 
914     pstats->host_port_stats_counter++;
915 
916     if (CHIP_IS_E3(sc)) {
917         lpi_reg = (SC_PORT(sc)) ?
918                       MISC_REG_CPMU_LP_SM_ENT_CNT_P1 :
919                       MISC_REG_CPMU_LP_SM_ENT_CNT_P0;
920         estats->eee_tx_lpi += REG_RD(sc, lpi_reg);
921     }
922 
923     if (!BXE_NOMCP(sc)) {
924         nig_timer_max = SHMEM_RD(sc, port_mb[SC_PORT(sc)].stat_nig_timer);
925         if (nig_timer_max != estats->nig_timer_max) {
926             estats->nig_timer_max = nig_timer_max;
927             BLOGE(sc, "invalid NIG timer max (%u)\n",
928                   estats->nig_timer_max);
929         }
930     }
931 
932     return (0);
933 }
934 
935 static int
936 bxe_storm_stats_validate_counters(struct bxe_softc *sc)
937 {
938     struct stats_counter *counters = &sc->fw_stats_data->storm_counters;
939     uint16_t cur_stats_counter;
940 
941     /*
942      * Make sure we use the value of the counter
943      * used for sending the last stats ramrod.
944      */
945     BXE_STATS_LOCK(sc);
946     cur_stats_counter = (sc->stats_counter - 1);
947     BXE_STATS_UNLOCK(sc);
948 
949     /* are storm stats valid? */
950     if (le16toh(counters->xstats_counter) != cur_stats_counter) {
951         BLOGD(sc, DBG_STATS,
952               "stats not updated by xstorm, "
953               "counter 0x%x != stats_counter 0x%x\n",
954               le16toh(counters->xstats_counter), sc->stats_counter);
955         return (-EAGAIN);
956     }
957 
958     if (le16toh(counters->ustats_counter) != cur_stats_counter) {
959         BLOGD(sc, DBG_STATS,
960               "stats not updated by ustorm, "
961               "counter 0x%x != stats_counter 0x%x\n",
962               le16toh(counters->ustats_counter), sc->stats_counter);
963         return (-EAGAIN);
964     }
965 
966     if (le16toh(counters->cstats_counter) != cur_stats_counter) {
967         BLOGD(sc, DBG_STATS,
968               "stats not updated by cstorm, "
969               "counter 0x%x != stats_counter 0x%x\n",
970               le16toh(counters->cstats_counter), sc->stats_counter);
971         return (-EAGAIN);
972     }
973 
974     if (le16toh(counters->tstats_counter) != cur_stats_counter) {
975         BLOGD(sc, DBG_STATS,
976               "stats not updated by tstorm, "
977               "counter 0x%x != stats_counter 0x%x\n",
978               le16toh(counters->tstats_counter), sc->stats_counter);
979         return (-EAGAIN);
980     }
981 
982     return (0);
983 }
984 
985 static int
986 bxe_storm_stats_update(struct bxe_softc *sc)
987 {
988     struct tstorm_per_port_stats *tport =
989         &sc->fw_stats_data->port.tstorm_port_statistics;
990     struct tstorm_per_pf_stats *tfunc =
991         &sc->fw_stats_data->pf.tstorm_pf_statistics;
992     struct host_func_stats *fstats = &sc->func_stats;
993     struct bxe_eth_stats *estats = &sc->eth_stats;
994     struct bxe_eth_stats_old *estats_old = &sc->eth_stats_old;
995     int i;
996 
997     /* vfs stat counter is managed by pf */
998     if (IS_PF(sc) && bxe_storm_stats_validate_counters(sc)) {
999         return (-EAGAIN);
1000     }
1001 
1002     estats->error_bytes_received_hi = 0;
1003     estats->error_bytes_received_lo = 0;
1004 
1005     for (i = 0; i < sc->num_queues; i++) {
1006         struct bxe_fastpath *fp = &sc->fp[i];
1007         struct tstorm_per_queue_stats *tclient =
1008             &sc->fw_stats_data->queue_stats[i].tstorm_queue_statistics;
1009         struct tstorm_per_queue_stats *old_tclient = &fp->old_tclient;
1010         struct ustorm_per_queue_stats *uclient =
1011             &sc->fw_stats_data->queue_stats[i].ustorm_queue_statistics;
1012         struct ustorm_per_queue_stats *old_uclient = &fp->old_uclient;
1013         struct xstorm_per_queue_stats *xclient =
1014             &sc->fw_stats_data->queue_stats[i].xstorm_queue_statistics;
1015         struct xstorm_per_queue_stats *old_xclient = &fp->old_xclient;
1016         struct bxe_eth_q_stats *qstats = &fp->eth_q_stats;
1017         struct bxe_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old;
1018 
1019         uint32_t diff;
1020 
1021         BLOGD(sc, DBG_STATS,
1022               "queue[%d]: ucast_sent 0x%x bcast_sent 0x%x mcast_sent 0x%x\n",
1023               i, xclient->ucast_pkts_sent, xclient->bcast_pkts_sent,
1024               xclient->mcast_pkts_sent);
1025 
1026         BLOGD(sc, DBG_STATS, "---------------\n");
1027 
1028         UPDATE_QSTAT(tclient->rcv_bcast_bytes,
1029                      total_broadcast_bytes_received);
1030         UPDATE_QSTAT(tclient->rcv_mcast_bytes,
1031                      total_multicast_bytes_received);
1032         UPDATE_QSTAT(tclient->rcv_ucast_bytes,
1033                      total_unicast_bytes_received);
1034 
1035         /*
1036          * sum to total_bytes_received all
1037          * unicast/multicast/broadcast
1038          */
1039         qstats->total_bytes_received_hi =
1040             qstats->total_broadcast_bytes_received_hi;
1041         qstats->total_bytes_received_lo =
1042             qstats->total_broadcast_bytes_received_lo;
1043 
1044         ADD_64(qstats->total_bytes_received_hi,
1045                qstats->total_multicast_bytes_received_hi,
1046                qstats->total_bytes_received_lo,
1047                qstats->total_multicast_bytes_received_lo);
1048 
1049         ADD_64(qstats->total_bytes_received_hi,
1050                qstats->total_unicast_bytes_received_hi,
1051                qstats->total_bytes_received_lo,
1052                qstats->total_unicast_bytes_received_lo);
1053 
1054         qstats->valid_bytes_received_hi = qstats->total_bytes_received_hi;
1055         qstats->valid_bytes_received_lo = qstats->total_bytes_received_lo;
1056 
1057         UPDATE_EXTEND_TSTAT(rcv_ucast_pkts, total_unicast_packets_received);
1058         UPDATE_EXTEND_TSTAT(rcv_mcast_pkts, total_multicast_packets_received);
1059         UPDATE_EXTEND_TSTAT(rcv_bcast_pkts, total_broadcast_packets_received);
1060         UPDATE_EXTEND_E_TSTAT(pkts_too_big_discard,
1061                               etherstatsoverrsizepkts, 32);
1062         UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard, 16);
1063 
1064         SUB_EXTEND_USTAT(ucast_no_buff_pkts, total_unicast_packets_received);
1065         SUB_EXTEND_USTAT(mcast_no_buff_pkts,
1066                          total_multicast_packets_received);
1067         SUB_EXTEND_USTAT(bcast_no_buff_pkts,
1068                          total_broadcast_packets_received);
1069         UPDATE_EXTEND_E_USTAT(ucast_no_buff_pkts, no_buff_discard);
1070         UPDATE_EXTEND_E_USTAT(mcast_no_buff_pkts, no_buff_discard);
1071         UPDATE_EXTEND_E_USTAT(bcast_no_buff_pkts, no_buff_discard);
1072 
1073         UPDATE_QSTAT(xclient->bcast_bytes_sent,
1074                      total_broadcast_bytes_transmitted);
1075         UPDATE_QSTAT(xclient->mcast_bytes_sent,
1076                      total_multicast_bytes_transmitted);
1077         UPDATE_QSTAT(xclient->ucast_bytes_sent,
1078                      total_unicast_bytes_transmitted);
1079 
1080         /*
1081          * sum to total_bytes_transmitted all
1082          * unicast/multicast/broadcast
1083          */
1084         qstats->total_bytes_transmitted_hi =
1085             qstats->total_unicast_bytes_transmitted_hi;
1086         qstats->total_bytes_transmitted_lo =
1087             qstats->total_unicast_bytes_transmitted_lo;
1088 
1089         ADD_64(qstats->total_bytes_transmitted_hi,
1090                qstats->total_broadcast_bytes_transmitted_hi,
1091                qstats->total_bytes_transmitted_lo,
1092                qstats->total_broadcast_bytes_transmitted_lo);
1093 
1094         ADD_64(qstats->total_bytes_transmitted_hi,
1095                qstats->total_multicast_bytes_transmitted_hi,
1096                qstats->total_bytes_transmitted_lo,
1097                qstats->total_multicast_bytes_transmitted_lo);
1098 
1099         UPDATE_EXTEND_XSTAT(ucast_pkts_sent,
1100                             total_unicast_packets_transmitted);
1101         UPDATE_EXTEND_XSTAT(mcast_pkts_sent,
1102                             total_multicast_packets_transmitted);
1103         UPDATE_EXTEND_XSTAT(bcast_pkts_sent,
1104                             total_broadcast_packets_transmitted);
1105 
1106         UPDATE_EXTEND_TSTAT(checksum_discard,
1107                             total_packets_received_checksum_discarded);
1108         UPDATE_EXTEND_TSTAT(ttl0_discard,
1109                             total_packets_received_ttl0_discarded);
1110 
1111         UPDATE_EXTEND_XSTAT(error_drop_pkts,
1112                             total_transmitted_dropped_packets_error);
1113 
1114         /* TPA aggregations completed */
1115         UPDATE_EXTEND_E_USTAT(coalesced_events, total_tpa_aggregations);
1116         /* Number of network frames aggregated by TPA */
1117         UPDATE_EXTEND_E_USTAT(coalesced_pkts, total_tpa_aggregated_frames);
1118         /* Total number of bytes in completed TPA aggregations */
1119         UPDATE_QSTAT(uclient->coalesced_bytes, total_tpa_bytes);
1120 
1121         UPDATE_ESTAT_QSTAT_64(total_tpa_bytes);
1122 
1123         UPDATE_FSTAT_QSTAT(total_bytes_received);
1124         UPDATE_FSTAT_QSTAT(total_bytes_transmitted);
1125         UPDATE_FSTAT_QSTAT(total_unicast_packets_received);
1126         UPDATE_FSTAT_QSTAT(total_multicast_packets_received);
1127         UPDATE_FSTAT_QSTAT(total_broadcast_packets_received);
1128         UPDATE_FSTAT_QSTAT(total_unicast_packets_transmitted);
1129         UPDATE_FSTAT_QSTAT(total_multicast_packets_transmitted);
1130         UPDATE_FSTAT_QSTAT(total_broadcast_packets_transmitted);
1131         UPDATE_FSTAT_QSTAT(valid_bytes_received);
1132     }
1133 
1134     ADD_64(estats->total_bytes_received_hi,
1135            estats->rx_stat_ifhcinbadoctets_hi,
1136            estats->total_bytes_received_lo,
1137            estats->rx_stat_ifhcinbadoctets_lo);
1138 
1139     ADD_64_LE(estats->total_bytes_received_hi,
1140               tfunc->rcv_error_bytes.hi,
1141               estats->total_bytes_received_lo,
1142               tfunc->rcv_error_bytes.lo);
1143 
1144     ADD_64_LE(estats->error_bytes_received_hi,
1145               tfunc->rcv_error_bytes.hi,
1146               estats->error_bytes_received_lo,
1147               tfunc->rcv_error_bytes.lo);
1148 
1149     UPDATE_ESTAT(etherstatsoverrsizepkts, rx_stat_dot3statsframestoolong);
1150 
1151     ADD_64(estats->error_bytes_received_hi,
1152            estats->rx_stat_ifhcinbadoctets_hi,
1153            estats->error_bytes_received_lo,
1154            estats->rx_stat_ifhcinbadoctets_lo);
1155 
1156     if (sc->port.pmf) {
1157         struct bxe_fw_port_stats_old *fwstats = &sc->fw_stats_old;
1158         UPDATE_FW_STAT(mac_filter_discard);
1159         UPDATE_FW_STAT(mf_tag_discard);
1160         UPDATE_FW_STAT(brb_truncate_discard);
1161         UPDATE_FW_STAT(mac_discard);
1162     }
1163 
1164     fstats->host_func_stats_start = ++fstats->host_func_stats_end;
1165 
1166     sc->stats_pending = 0;
1167 
1168     return (0);
1169 }
1170 
1171 static void
1172 bxe_net_stats_update(struct bxe_softc *sc)
1173 {
1174     struct bxe_eth_stats *estats = &sc->eth_stats;
1175     struct ifnet *ifnet = sc->ifnet;
1176     unsigned long tmp;
1177     int i;
1178 
1179     ifnet->if_data.ifi_ipackets =
1180         bxe_hilo(&estats->total_unicast_packets_received_hi) +
1181         bxe_hilo(&estats->total_multicast_packets_received_hi) +
1182         bxe_hilo(&estats->total_broadcast_packets_received_hi);
1183 
1184     ifnet->if_data.ifi_opackets =
1185         bxe_hilo(&estats->total_unicast_packets_transmitted_hi) +
1186         bxe_hilo(&estats->total_multicast_packets_transmitted_hi) +
1187         bxe_hilo(&estats->total_broadcast_packets_transmitted_hi);
1188 
1189     ifnet->if_data.ifi_ibytes = bxe_hilo(&estats->total_bytes_received_hi);
1190 
1191     ifnet->if_data.ifi_obytes = bxe_hilo(&estats->total_bytes_transmitted_hi);
1192 
1193     tmp = 0;
1194     for (i = 0; i < sc->num_queues; i++) {
1195         struct tstorm_per_queue_stats *old_tclient =
1196             &sc->fp[i].old_tclient;
1197         tmp += le32toh(old_tclient->checksum_discard);
1198     }
1199 
1200     ifnet->if_data.ifi_iqdrops = tmp;
1201 
1202     ifnet->if_data.ifi_ierrors =
1203         bxe_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
1204         bxe_hilo(&estats->etherstatsoverrsizepkts_hi) +
1205         bxe_hilo(&estats->brb_drop_hi) +
1206         bxe_hilo(&estats->brb_truncate_hi) +
1207         bxe_hilo(&estats->rx_stat_dot3statsfcserrors_hi) +
1208         bxe_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi) +
1209         bxe_hilo(&estats->no_buff_discard_hi);
1210 
1211     ifnet->if_data.ifi_oerrors =
1212         bxe_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi) +
1213         bxe_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
1214 
1215     ifnet->if_data.ifi_imcasts =
1216         bxe_hilo(&estats->total_multicast_packets_received_hi);
1217 
1218     ifnet->if_data.ifi_collisions =
1219         bxe_hilo(&estats->tx_stat_etherstatscollisions_hi) +
1220         bxe_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
1221         bxe_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
1222 }
1223 
1224 static void
1225 bxe_drv_stats_update(struct bxe_softc *sc)
1226 {
1227     struct bxe_eth_stats *estats = &sc->eth_stats;
1228     int i;
1229 
1230     for (i = 0; i < sc->num_queues; i++) {
1231         struct bxe_eth_q_stats *qstats = &sc->fp[i].eth_q_stats;
1232         struct bxe_eth_q_stats_old *qstats_old = &sc->fp[i].eth_q_stats_old;
1233 
1234         UPDATE_ESTAT_QSTAT(rx_calls);
1235         UPDATE_ESTAT_QSTAT(rx_pkts);
1236         UPDATE_ESTAT_QSTAT(rx_tpa_pkts);
1237         UPDATE_ESTAT_QSTAT(rx_soft_errors);
1238         UPDATE_ESTAT_QSTAT(rx_hw_csum_errors);
1239         UPDATE_ESTAT_QSTAT(rx_ofld_frames_csum_ip);
1240         UPDATE_ESTAT_QSTAT(rx_ofld_frames_csum_tcp_udp);
1241         UPDATE_ESTAT_QSTAT(rx_budget_reached);
1242         UPDATE_ESTAT_QSTAT(tx_pkts);
1243         UPDATE_ESTAT_QSTAT(tx_soft_errors);
1244         UPDATE_ESTAT_QSTAT(tx_ofld_frames_csum_ip);
1245         UPDATE_ESTAT_QSTAT(tx_ofld_frames_csum_tcp);
1246         UPDATE_ESTAT_QSTAT(tx_ofld_frames_csum_udp);
1247         UPDATE_ESTAT_QSTAT(tx_ofld_frames_lso);
1248         UPDATE_ESTAT_QSTAT(tx_ofld_frames_lso_hdr_splits);
1249         UPDATE_ESTAT_QSTAT(tx_encap_failures);
1250         UPDATE_ESTAT_QSTAT(tx_hw_queue_full);
1251         UPDATE_ESTAT_QSTAT(tx_hw_max_queue_depth);
1252         UPDATE_ESTAT_QSTAT(tx_dma_mapping_failure);
1253         UPDATE_ESTAT_QSTAT(tx_max_drbr_queue_depth);
1254         UPDATE_ESTAT_QSTAT(tx_window_violation_std);
1255         UPDATE_ESTAT_QSTAT(tx_window_violation_tso);
1256         //UPDATE_ESTAT_QSTAT(tx_unsupported_tso_request_ipv6);
1257         //UPDATE_ESTAT_QSTAT(tx_unsupported_tso_request_not_tcp);
1258         UPDATE_ESTAT_QSTAT(tx_chain_lost_mbuf);
1259         UPDATE_ESTAT_QSTAT(tx_frames_deferred);
1260         UPDATE_ESTAT_QSTAT(tx_queue_xoff);
1261 
1262         /* mbuf driver statistics */
1263         UPDATE_ESTAT_QSTAT(mbuf_defrag_attempts);
1264         UPDATE_ESTAT_QSTAT(mbuf_defrag_failures);
1265         UPDATE_ESTAT_QSTAT(mbuf_rx_bd_alloc_failed);
1266         UPDATE_ESTAT_QSTAT(mbuf_rx_bd_mapping_failed);
1267         UPDATE_ESTAT_QSTAT(mbuf_rx_tpa_alloc_failed);
1268         UPDATE_ESTAT_QSTAT(mbuf_rx_tpa_mapping_failed);
1269         UPDATE_ESTAT_QSTAT(mbuf_rx_sge_alloc_failed);
1270         UPDATE_ESTAT_QSTAT(mbuf_rx_sge_mapping_failed);
1271 
1272         /* track the number of allocated mbufs */
1273         UPDATE_ESTAT_QSTAT(mbuf_alloc_tx);
1274         UPDATE_ESTAT_QSTAT(mbuf_alloc_rx);
1275         UPDATE_ESTAT_QSTAT(mbuf_alloc_sge);
1276         UPDATE_ESTAT_QSTAT(mbuf_alloc_tpa);
1277     }
1278 }
1279 
1280 static uint8_t
1281 bxe_edebug_stats_stopped(struct bxe_softc *sc)
1282 {
1283     uint32_t val;
1284 
1285     if (SHMEM2_HAS(sc, edebug_driver_if[1])) {
1286         val = SHMEM2_RD(sc, edebug_driver_if[1]);
1287 
1288         if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT) {
1289             return (TRUE);
1290         }
1291     }
1292 
1293     return (FALSE);
1294 }
1295 
1296 static void
1297 bxe_stats_update(struct bxe_softc *sc)
1298 {
1299     uint32_t *stats_comp = BXE_SP(sc, stats_comp);
1300 
1301     if (bxe_edebug_stats_stopped(sc)) {
1302         return;
1303     }
1304 
1305     if (IS_PF(sc)) {
1306         if (*stats_comp != DMAE_COMP_VAL) {
1307             return;
1308         }
1309 
1310         if (sc->port.pmf) {
1311             bxe_hw_stats_update(sc);
1312         }
1313 
1314         if (bxe_storm_stats_update(sc)) {
1315             if (sc->stats_pending++ == 3) {
1316                 bxe_panic(sc, ("storm stats not updated for 3 times\n"));
1317             }
1318             return;
1319         }
1320     } else {
1321         /*
1322          * VF doesn't collect HW statistics, and doesn't get completions,
1323          * performs only update.
1324          */
1325         bxe_storm_stats_update(sc);
1326     }
1327 
1328     bxe_net_stats_update(sc);
1329     bxe_drv_stats_update(sc);
1330 
1331     /* vf is done */
1332     if (IS_VF(sc)) {
1333         return;
1334     }
1335 
1336     bxe_hw_stats_post(sc);
1337     bxe_storm_stats_post(sc);
1338 }
1339 
1340 static void
1341 bxe_port_stats_stop(struct bxe_softc *sc)
1342 {
1343     struct dmae_command *dmae;
1344     uint32_t opcode;
1345     int loader_idx = PMF_DMAE_C(sc);
1346     uint32_t *stats_comp = BXE_SP(sc, stats_comp);
1347 
1348     sc->executer_idx = 0;
1349 
1350     opcode = bxe_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC, FALSE, 0);
1351 
1352     if (sc->port.port_stx) {
1353         dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
1354 
1355         if (sc->func_stx) {
1356             dmae->opcode = bxe_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
1357         } else {
1358             dmae->opcode = bxe_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
1359         }
1360 
1361         dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, port_stats));
1362         dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, port_stats));
1363         dmae->dst_addr_lo = sc->port.port_stx >> 2;
1364         dmae->dst_addr_hi = 0;
1365         dmae->len = bxe_get_port_stats_dma_len(sc);
1366         if (sc->func_stx) {
1367             dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
1368             dmae->comp_addr_hi = 0;
1369             dmae->comp_val = 1;
1370         } else {
1371             dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
1372             dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
1373             dmae->comp_val = DMAE_COMP_VAL;
1374 
1375             *stats_comp = 0;
1376         }
1377     }
1378 
1379     if (sc->func_stx) {
1380         dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
1381         dmae->opcode = bxe_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
1382         dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, func_stats));
1383         dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, func_stats));
1384         dmae->dst_addr_lo = (sc->func_stx >> 2);
1385         dmae->dst_addr_hi = 0;
1386         dmae->len = (sizeof(struct host_func_stats) >> 2);
1387         dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
1388         dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
1389         dmae->comp_val = DMAE_COMP_VAL;
1390 
1391         *stats_comp = 0;
1392     }
1393 }
1394 
1395 static void
1396 bxe_stats_stop(struct bxe_softc *sc)
1397 {
1398     uint8_t update = FALSE;
1399 
1400     bxe_stats_comp(sc);
1401 
1402     if (sc->port.pmf) {
1403         update = bxe_hw_stats_update(sc) == 0;
1404     }
1405 
1406     update |= bxe_storm_stats_update(sc) == 0;
1407 
1408     if (update) {
1409         bxe_net_stats_update(sc);
1410 
1411         if (sc->port.pmf) {
1412             bxe_port_stats_stop(sc);
1413         }
1414 
1415         bxe_hw_stats_post(sc);
1416         bxe_stats_comp(sc);
1417     }
1418 }
1419 
1420 static void
1421 bxe_stats_do_nothing(struct bxe_softc *sc)
1422 {
1423     return;
1424 }
1425 
1426 static const struct {
1427     void (*action)(struct bxe_softc *sc);
1428     enum bxe_stats_state next_state;
1429 } bxe_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
1430     {
1431     /* DISABLED PMF */ { bxe_stats_pmf_update, STATS_STATE_DISABLED },
1432     /*      LINK_UP */ { bxe_stats_start,      STATS_STATE_ENABLED },
1433     /*      UPDATE  */ { bxe_stats_do_nothing, STATS_STATE_DISABLED },
1434     /*      STOP    */ { bxe_stats_do_nothing, STATS_STATE_DISABLED }
1435     },
1436     {
1437     /* ENABLED  PMF */ { bxe_stats_pmf_start,  STATS_STATE_ENABLED },
1438     /*      LINK_UP */ { bxe_stats_restart,    STATS_STATE_ENABLED },
1439     /*      UPDATE  */ { bxe_stats_update,     STATS_STATE_ENABLED },
1440     /*      STOP    */ { bxe_stats_stop,       STATS_STATE_DISABLED }
1441     }
1442 };
1443 
1444 void bxe_stats_handle(struct bxe_softc     *sc,
1445                       enum bxe_stats_event event)
1446 {
1447     enum bxe_stats_state state;
1448 
1449     if (__predict_false(sc->panic)) {
1450         return;
1451     }
1452 
1453     BXE_STATS_LOCK(sc);
1454     state = sc->stats_state;
1455     sc->stats_state = bxe_stats_stm[state][event].next_state;
1456     BXE_STATS_UNLOCK(sc);
1457 
1458     bxe_stats_stm[state][event].action(sc);
1459 
1460     if (event != STATS_EVENT_UPDATE) {
1461         BLOGD(sc, DBG_STATS,
1462               "state %d -> event %d -> state %d\n",
1463               state, event, sc->stats_state);
1464     }
1465 }
1466 
1467 static void
1468 bxe_port_stats_base_init(struct bxe_softc *sc)
1469 {
1470     struct dmae_command *dmae;
1471     uint32_t *stats_comp = BXE_SP(sc, stats_comp);
1472 
1473     /* sanity */
1474     if (!sc->port.pmf || !sc->port.port_stx) {
1475         BLOGE(sc, "BUG!\n");
1476         return;
1477     }
1478 
1479     sc->executer_idx = 0;
1480 
1481     dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
1482     dmae->opcode = bxe_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
1483                                    TRUE, DMAE_COMP_PCI);
1484     dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, port_stats));
1485     dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, port_stats));
1486     dmae->dst_addr_lo = (sc->port.port_stx >> 2);
1487     dmae->dst_addr_hi = 0;
1488     dmae->len = bxe_get_port_stats_dma_len(sc);
1489     dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
1490     dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
1491     dmae->comp_val = DMAE_COMP_VAL;
1492 
1493     *stats_comp = 0;
1494     bxe_hw_stats_post(sc);
1495     bxe_stats_comp(sc);
1496 }
1497 
1498 /*
1499  * This function will prepare the statistics ramrod data the way
1500  * we will only have to increment the statistics counter and
1501  * send the ramrod each time we have to.
1502  */
1503 static void
1504 bxe_prep_fw_stats_req(struct bxe_softc *sc)
1505 {
1506     int i;
1507     int first_queue_query_index;
1508     struct stats_query_header *stats_hdr = &sc->fw_stats_req->hdr;
1509     bus_addr_t cur_data_offset;
1510     struct stats_query_entry *cur_query_entry;
1511 
1512     stats_hdr->cmd_num = sc->fw_stats_num;
1513     stats_hdr->drv_stats_counter = 0;
1514 
1515     /*
1516      * The storm_counters struct contains the counters of completed
1517      * statistics requests per storm which are incremented by FW
1518      * each time it completes hadning a statistics ramrod. We will
1519      * check these counters in the timer handler and discard a
1520      * (statistics) ramrod completion.
1521      */
1522     cur_data_offset = (sc->fw_stats_data_mapping +
1523                        offsetof(struct bxe_fw_stats_data, storm_counters));
1524 
1525     stats_hdr->stats_counters_addrs.hi = htole32(U64_HI(cur_data_offset));
1526     stats_hdr->stats_counters_addrs.lo = htole32(U64_LO(cur_data_offset));
1527 
1528     /*
1529      * Prepare the first stats ramrod (will be completed with
1530      * the counters equal to zero) - init counters to somethig different.
1531      */
1532     memset(&sc->fw_stats_data->storm_counters, 0xff,
1533            sizeof(struct stats_counter));
1534 
1535     /**** Port FW statistics data ****/
1536     cur_data_offset = (sc->fw_stats_data_mapping +
1537                        offsetof(struct bxe_fw_stats_data, port));
1538 
1539     cur_query_entry = &sc->fw_stats_req->query[BXE_PORT_QUERY_IDX];
1540 
1541     cur_query_entry->kind = STATS_TYPE_PORT;
1542     /* For port query index is a DONT CARE */
1543     cur_query_entry->index = SC_PORT(sc);
1544     /* For port query funcID is a DONT CARE */
1545     cur_query_entry->funcID = htole16(SC_FUNC(sc));
1546     cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset));
1547     cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset));
1548 
1549     /**** PF FW statistics data ****/
1550     cur_data_offset = (sc->fw_stats_data_mapping +
1551                        offsetof(struct bxe_fw_stats_data, pf));
1552 
1553     cur_query_entry = &sc->fw_stats_req->query[BXE_PF_QUERY_IDX];
1554 
1555     cur_query_entry->kind = STATS_TYPE_PF;
1556     /* For PF query index is a DONT CARE */
1557     cur_query_entry->index = SC_PORT(sc);
1558     cur_query_entry->funcID = htole16(SC_FUNC(sc));
1559     cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset));
1560     cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset));
1561 
1562 #if 0
1563     /**** FCoE FW statistics data ****/
1564     if (!NO_FCOE(sc)) {
1565         cur_data_offset = (sc->fw_stats_data_mapping +
1566                            offsetof(struct bxe_fw_stats_data, fcoe));
1567 
1568         cur_query_entry = &sc->fw_stats_req->query[BXE_FCOE_QUERY_IDX];
1569 
1570         cur_query_entry->kind = STATS_TYPE_FCOE;
1571         /* For FCoE query index is a DONT CARE */
1572         cur_query_entry->index = SC_PORT(sc);
1573         cur_query_entry->funcID = cpu_to_le16(SC_FUNC(sc));
1574         cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset));
1575         cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset));
1576     }
1577 #endif
1578 
1579     /**** Clients' queries ****/
1580     cur_data_offset = (sc->fw_stats_data_mapping +
1581                        offsetof(struct bxe_fw_stats_data, queue_stats));
1582 
1583     /*
1584      * First queue query index depends whether FCoE offloaded request will
1585      * be included in the ramrod
1586      */
1587 #if 0
1588     if (!NO_FCOE(sc))
1589         first_queue_query_index = BXE_FIRST_QUEUE_QUERY_IDX;
1590     else
1591 #endif
1592         first_queue_query_index = (BXE_FIRST_QUEUE_QUERY_IDX - 1);
1593 
1594     for (i = 0; i < sc->num_queues; i++) {
1595         cur_query_entry =
1596             &sc->fw_stats_req->query[first_queue_query_index + i];
1597 
1598         cur_query_entry->kind = STATS_TYPE_QUEUE;
1599         cur_query_entry->index = bxe_stats_id(&sc->fp[i]);
1600         cur_query_entry->funcID = htole16(SC_FUNC(sc));
1601         cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset));
1602         cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset));
1603 
1604         cur_data_offset += sizeof(struct per_queue_stats);
1605     }
1606 
1607 #if 0
1608     /* add FCoE queue query if needed */
1609     if (!NO_FCOE(sc)) {
1610         cur_query_entry =
1611             &sc->fw_stats_req->query[first_queue_query_index + i];
1612 
1613         cur_query_entry->kind = STATS_TYPE_QUEUE;
1614         cur_query_entry->index = bxe_stats_id(&sc->fp[FCOE_IDX(sc)]);
1615         cur_query_entry->funcID = htole16(SC_FUNC(sc));
1616         cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset));
1617         cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset));
1618     }
1619 #endif
1620 }
1621 
1622 void
1623 bxe_stats_init(struct bxe_softc *sc)
1624 {
1625     int /*abs*/port = SC_PORT(sc);
1626     int mb_idx = SC_FW_MB_IDX(sc);
1627     int i;
1628 
1629     sc->stats_pending = 0;
1630     sc->executer_idx = 0;
1631     sc->stats_counter = 0;
1632 
1633     /* port and func stats for management */
1634     if (!BXE_NOMCP(sc)) {
1635         sc->port.port_stx = SHMEM_RD(sc, port_mb[port].port_stx);
1636         sc->func_stx = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_param);
1637     } else {
1638         sc->port.port_stx = 0;
1639         sc->func_stx = 0;
1640     }
1641 
1642     BLOGD(sc, DBG_STATS, "port_stx 0x%x func_stx 0x%x\n",
1643           sc->port.port_stx, sc->func_stx);
1644 
1645     /* pmf should retrieve port statistics from SP on a non-init*/
1646     if (!sc->stats_init && sc->port.pmf && sc->port.port_stx) {
1647         bxe_stats_handle(sc, STATS_EVENT_PMF);
1648     }
1649 
1650     port = SC_PORT(sc);
1651     /* port stats */
1652     memset(&(sc->port.old_nig_stats), 0, sizeof(struct nig_stats));
1653     sc->port.old_nig_stats.brb_discard =
1654         REG_RD(sc, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
1655     sc->port.old_nig_stats.brb_truncate =
1656         REG_RD(sc, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
1657     if (!CHIP_IS_E3(sc)) {
1658         REG_RD_DMAE(sc, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
1659                     &(sc->port.old_nig_stats.egress_mac_pkt0_lo), 2);
1660         REG_RD_DMAE(sc, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
1661                     &(sc->port.old_nig_stats.egress_mac_pkt1_lo), 2);
1662     }
1663 
1664     /* function stats */
1665     for (i = 0; i < sc->num_queues; i++) {
1666         memset(&sc->fp[i].old_tclient, 0, sizeof(sc->fp[i].old_tclient));
1667         memset(&sc->fp[i].old_uclient, 0, sizeof(sc->fp[i].old_uclient));
1668         memset(&sc->fp[i].old_xclient, 0, sizeof(sc->fp[i].old_xclient));
1669         if (sc->stats_init) {
1670             memset(&sc->fp[i].eth_q_stats, 0,
1671                    sizeof(sc->fp[i].eth_q_stats));
1672             memset(&sc->fp[i].eth_q_stats_old, 0,
1673                    sizeof(sc->fp[i].eth_q_stats_old));
1674         }
1675     }
1676 
1677     /* prepare statistics ramrod data */
1678     bxe_prep_fw_stats_req(sc);
1679 
1680     sc->ifnet->if_data.ifi_ipackets   = 0;
1681     sc->ifnet->if_data.ifi_opackets   = 0;
1682     sc->ifnet->if_data.ifi_ibytes     = 0;
1683     sc->ifnet->if_data.ifi_obytes     = 0;
1684     sc->ifnet->if_data.ifi_ierrors    = 0;
1685     sc->ifnet->if_data.ifi_oerrors    = 0;
1686     sc->ifnet->if_data.ifi_imcasts    = 0;
1687     sc->ifnet->if_data.ifi_collisions = 0;
1688 
1689     if (sc->stats_init) {
1690         memset(&sc->net_stats_old, 0, sizeof(sc->net_stats_old));
1691         memset(&sc->fw_stats_old, 0, sizeof(sc->fw_stats_old));
1692         memset(&sc->eth_stats_old, 0, sizeof(sc->eth_stats_old));
1693         memset(&sc->eth_stats, 0, sizeof(sc->eth_stats));
1694         memset(&sc->func_stats, 0, sizeof(sc->func_stats));
1695 
1696         /* Clean SP from previous statistics */
1697         if (sc->func_stx) {
1698             memset(BXE_SP(sc, func_stats), 0, sizeof(struct host_func_stats));
1699             bxe_func_stats_init(sc);
1700             bxe_hw_stats_post(sc);
1701             bxe_stats_comp(sc);
1702         }
1703     }
1704 
1705     sc->stats_state = STATS_STATE_DISABLED;
1706 
1707     if (sc->port.pmf && sc->port.port_stx) {
1708         bxe_port_stats_base_init(sc);
1709     }
1710 
1711     /* mark the end of statistics initializiation */
1712     sc->stats_init = FALSE;
1713 }
1714 
1715 void
1716 bxe_save_statistics(struct bxe_softc *sc)
1717 {
1718     int i;
1719 
1720     /* save queue statistics */
1721     for (i = 0; i < sc->num_queues; i++) {
1722         struct bxe_fastpath *fp = &sc->fp[i];
1723         struct bxe_eth_q_stats *qstats = &fp->eth_q_stats;
1724         struct bxe_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old;
1725 
1726         UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi);
1727         UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo);
1728         UPDATE_QSTAT_OLD(total_broadcast_bytes_received_hi);
1729         UPDATE_QSTAT_OLD(total_broadcast_bytes_received_lo);
1730         UPDATE_QSTAT_OLD(total_multicast_bytes_received_hi);
1731         UPDATE_QSTAT_OLD(total_multicast_bytes_received_lo);
1732         UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_hi);
1733         UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_lo);
1734         UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_hi);
1735         UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_lo);
1736         UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_hi);
1737         UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_lo);
1738         UPDATE_QSTAT_OLD(total_tpa_bytes_hi);
1739         UPDATE_QSTAT_OLD(total_tpa_bytes_lo);
1740     }
1741 
1742     /* save net_device_stats statistics */
1743     sc->net_stats_old.rx_dropped = sc->ifnet->if_data.ifi_iqdrops;
1744 
1745     /* store port firmware statistics */
1746     if (sc->port.pmf) {
1747         struct bxe_eth_stats *estats = &sc->eth_stats;
1748         struct bxe_fw_port_stats_old *fwstats = &sc->fw_stats_old;
1749         struct host_port_stats *pstats = BXE_SP(sc, port_stats);
1750 
1751         fwstats->pfc_frames_rx_hi = pstats->pfc_frames_rx_hi;
1752         fwstats->pfc_frames_rx_lo = pstats->pfc_frames_rx_lo;
1753         fwstats->pfc_frames_tx_hi = pstats->pfc_frames_tx_hi;
1754         fwstats->pfc_frames_tx_lo = pstats->pfc_frames_tx_lo;
1755 
1756         if (IS_MF(sc)) {
1757             UPDATE_FW_STAT_OLD(mac_filter_discard);
1758             UPDATE_FW_STAT_OLD(mf_tag_discard);
1759             UPDATE_FW_STAT_OLD(brb_truncate_discard);
1760             UPDATE_FW_STAT_OLD(mac_discard);
1761         }
1762     }
1763 }
1764 
1765 void
1766 bxe_afex_collect_stats(struct bxe_softc *sc,
1767                        void             *void_afex_stats,
1768                        uint32_t         stats_type)
1769 {
1770     int i;
1771     struct afex_stats *afex_stats = (struct afex_stats *)void_afex_stats;
1772     struct bxe_eth_stats *estats = &sc->eth_stats;
1773 #if 0
1774     struct per_queue_stats *fcoe_q_stats =
1775         &sc->fw_stats_data->queue_stats[FCOE_IDX(sc)];
1776 
1777     struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
1778         &fcoe_q_stats->tstorm_queue_statistics;
1779 
1780     struct ustorm_per_queue_stats *fcoe_q_ustorm_stats =
1781         &fcoe_q_stats->ustorm_queue_statistics;
1782 
1783     struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
1784         &fcoe_q_stats->xstorm_queue_statistics;
1785 
1786     struct fcoe_statistics_params *fw_fcoe_stat =
1787         &sc->fw_stats_data->fcoe;
1788 #endif
1789 
1790     memset(afex_stats, 0, sizeof(struct afex_stats));
1791 
1792     for (i = 0; i < sc->num_queues; i++) {
1793         struct bxe_eth_q_stats *qstats = &sc->fp[i].eth_q_stats;
1794 
1795         ADD_64(afex_stats->rx_unicast_bytes_hi,
1796                qstats->total_unicast_bytes_received_hi,
1797                afex_stats->rx_unicast_bytes_lo,
1798                qstats->total_unicast_bytes_received_lo);
1799 
1800         ADD_64(afex_stats->rx_broadcast_bytes_hi,
1801                qstats->total_broadcast_bytes_received_hi,
1802                afex_stats->rx_broadcast_bytes_lo,
1803                qstats->total_broadcast_bytes_received_lo);
1804 
1805         ADD_64(afex_stats->rx_multicast_bytes_hi,
1806                qstats->total_multicast_bytes_received_hi,
1807                afex_stats->rx_multicast_bytes_lo,
1808                qstats->total_multicast_bytes_received_lo);
1809 
1810         ADD_64(afex_stats->rx_unicast_frames_hi,
1811                qstats->total_unicast_packets_received_hi,
1812                afex_stats->rx_unicast_frames_lo,
1813                qstats->total_unicast_packets_received_lo);
1814 
1815         ADD_64(afex_stats->rx_broadcast_frames_hi,
1816                qstats->total_broadcast_packets_received_hi,
1817                afex_stats->rx_broadcast_frames_lo,
1818                qstats->total_broadcast_packets_received_lo);
1819 
1820         ADD_64(afex_stats->rx_multicast_frames_hi,
1821                qstats->total_multicast_packets_received_hi,
1822                afex_stats->rx_multicast_frames_lo,
1823                qstats->total_multicast_packets_received_lo);
1824 
1825         /*
1826          * sum to rx_frames_discarded all discarded
1827          * packets due to size, ttl0 and checksum
1828          */
1829         ADD_64(afex_stats->rx_frames_discarded_hi,
1830                qstats->total_packets_received_checksum_discarded_hi,
1831                afex_stats->rx_frames_discarded_lo,
1832                qstats->total_packets_received_checksum_discarded_lo);
1833 
1834         ADD_64(afex_stats->rx_frames_discarded_hi,
1835                qstats->total_packets_received_ttl0_discarded_hi,
1836                afex_stats->rx_frames_discarded_lo,
1837                qstats->total_packets_received_ttl0_discarded_lo);
1838 
1839         ADD_64(afex_stats->rx_frames_discarded_hi,
1840                qstats->etherstatsoverrsizepkts_hi,
1841                afex_stats->rx_frames_discarded_lo,
1842                qstats->etherstatsoverrsizepkts_lo);
1843 
1844         ADD_64(afex_stats->rx_frames_dropped_hi,
1845                qstats->no_buff_discard_hi,
1846                afex_stats->rx_frames_dropped_lo,
1847                qstats->no_buff_discard_lo);
1848 
1849         ADD_64(afex_stats->tx_unicast_bytes_hi,
1850                qstats->total_unicast_bytes_transmitted_hi,
1851                afex_stats->tx_unicast_bytes_lo,
1852                qstats->total_unicast_bytes_transmitted_lo);
1853 
1854         ADD_64(afex_stats->tx_broadcast_bytes_hi,
1855                qstats->total_broadcast_bytes_transmitted_hi,
1856                afex_stats->tx_broadcast_bytes_lo,
1857                qstats->total_broadcast_bytes_transmitted_lo);
1858 
1859         ADD_64(afex_stats->tx_multicast_bytes_hi,
1860                qstats->total_multicast_bytes_transmitted_hi,
1861                afex_stats->tx_multicast_bytes_lo,
1862                qstats->total_multicast_bytes_transmitted_lo);
1863 
1864         ADD_64(afex_stats->tx_unicast_frames_hi,
1865                qstats->total_unicast_packets_transmitted_hi,
1866                afex_stats->tx_unicast_frames_lo,
1867                qstats->total_unicast_packets_transmitted_lo);
1868 
1869         ADD_64(afex_stats->tx_broadcast_frames_hi,
1870                qstats->total_broadcast_packets_transmitted_hi,
1871                afex_stats->tx_broadcast_frames_lo,
1872                qstats->total_broadcast_packets_transmitted_lo);
1873 
1874         ADD_64(afex_stats->tx_multicast_frames_hi,
1875                qstats->total_multicast_packets_transmitted_hi,
1876                afex_stats->tx_multicast_frames_lo,
1877                qstats->total_multicast_packets_transmitted_lo);
1878 
1879         ADD_64(afex_stats->tx_frames_dropped_hi,
1880                qstats->total_transmitted_dropped_packets_error_hi,
1881                afex_stats->tx_frames_dropped_lo,
1882                qstats->total_transmitted_dropped_packets_error_lo);
1883     }
1884 
1885 #if 0
1886     /*
1887      * Now add FCoE statistics which are collected separately
1888      * (both offloaded and non offloaded)
1889      */
1890     if (!NO_FCOE(sc)) {
1891         ADD_64_LE(afex_stats->rx_unicast_bytes_hi,
1892                   LE32_0,
1893                   afex_stats->rx_unicast_bytes_lo,
1894                   fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
1895 
1896         ADD_64_LE(afex_stats->rx_unicast_bytes_hi,
1897                   fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
1898                   afex_stats->rx_unicast_bytes_lo,
1899                   fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
1900 
1901         ADD_64_LE(afex_stats->rx_broadcast_bytes_hi,
1902                   fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
1903                   afex_stats->rx_broadcast_bytes_lo,
1904                   fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
1905 
1906         ADD_64_LE(afex_stats->rx_multicast_bytes_hi,
1907                   fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
1908                   afex_stats->rx_multicast_bytes_lo,
1909                   fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
1910 
1911         ADD_64_LE(afex_stats->rx_unicast_frames_hi,
1912                   LE32_0,
1913                   afex_stats->rx_unicast_frames_lo,
1914                   fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
1915 
1916         ADD_64_LE(afex_stats->rx_unicast_frames_hi,
1917                   LE32_0,
1918                   afex_stats->rx_unicast_frames_lo,
1919                   fcoe_q_tstorm_stats->rcv_ucast_pkts);
1920 
1921         ADD_64_LE(afex_stats->rx_broadcast_frames_hi,
1922                   LE32_0,
1923                   afex_stats->rx_broadcast_frames_lo,
1924                   fcoe_q_tstorm_stats->rcv_bcast_pkts);
1925 
1926         ADD_64_LE(afex_stats->rx_multicast_frames_hi,
1927                   LE32_0,
1928                   afex_stats->rx_multicast_frames_lo,
1929                   fcoe_q_tstorm_stats->rcv_ucast_pkts);
1930 
1931         ADD_64_LE(afex_stats->rx_frames_discarded_hi,
1932                   LE32_0,
1933                   afex_stats->rx_frames_discarded_lo,
1934                   fcoe_q_tstorm_stats->checksum_discard);
1935 
1936         ADD_64_LE(afex_stats->rx_frames_discarded_hi,
1937                   LE32_0,
1938                   afex_stats->rx_frames_discarded_lo,
1939                   fcoe_q_tstorm_stats->pkts_too_big_discard);
1940 
1941         ADD_64_LE(afex_stats->rx_frames_discarded_hi,
1942                   LE32_0,
1943                   afex_stats->rx_frames_discarded_lo,
1944                   fcoe_q_tstorm_stats->ttl0_discard);
1945 
1946         ADD_64_LE16(afex_stats->rx_frames_dropped_hi,
1947                     LE16_0,
1948                     afex_stats->rx_frames_dropped_lo,
1949                     fcoe_q_tstorm_stats->no_buff_discard);
1950 
1951         ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1952                   LE32_0,
1953                   afex_stats->rx_frames_dropped_lo,
1954                   fcoe_q_ustorm_stats->ucast_no_buff_pkts);
1955 
1956         ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1957                   LE32_0,
1958                   afex_stats->rx_frames_dropped_lo,
1959                   fcoe_q_ustorm_stats->mcast_no_buff_pkts);
1960 
1961         ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1962                   LE32_0,
1963                   afex_stats->rx_frames_dropped_lo,
1964                   fcoe_q_ustorm_stats->bcast_no_buff_pkts);
1965 
1966         ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1967                   LE32_0,
1968                   afex_stats->rx_frames_dropped_lo,
1969                   fw_fcoe_stat->rx_stat1.fcoe_rx_drop_pkt_cnt);
1970 
1971         ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1972                   LE32_0,
1973                   afex_stats->rx_frames_dropped_lo,
1974                   fw_fcoe_stat->rx_stat2.fcoe_rx_drop_pkt_cnt);
1975 
1976         ADD_64_LE(afex_stats->tx_unicast_bytes_hi,
1977                   LE32_0,
1978                   afex_stats->tx_unicast_bytes_lo,
1979                   fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
1980 
1981         ADD_64_LE(afex_stats->tx_unicast_bytes_hi,
1982                   fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
1983                   afex_stats->tx_unicast_bytes_lo,
1984                   fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
1985 
1986         ADD_64_LE(afex_stats->tx_broadcast_bytes_hi,
1987                   fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
1988                   afex_stats->tx_broadcast_bytes_lo,
1989                   fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
1990 
1991         ADD_64_LE(afex_stats->tx_multicast_bytes_hi,
1992                   fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
1993                   afex_stats->tx_multicast_bytes_lo,
1994                   fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
1995 
1996         ADD_64_LE(afex_stats->tx_unicast_frames_hi,
1997                   LE32_0,
1998                   afex_stats->tx_unicast_frames_lo,
1999                   fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
2000 
2001         ADD_64_LE(afex_stats->tx_unicast_frames_hi,
2002                   LE32_0,
2003                   afex_stats->tx_unicast_frames_lo,
2004                   fcoe_q_xstorm_stats->ucast_pkts_sent);
2005 
2006         ADD_64_LE(afex_stats->tx_broadcast_frames_hi,
2007                   LE32_0,
2008                   afex_stats->tx_broadcast_frames_lo,
2009                   fcoe_q_xstorm_stats->bcast_pkts_sent);
2010 
2011         ADD_64_LE(afex_stats->tx_multicast_frames_hi,
2012                   LE32_0,
2013                   afex_stats->tx_multicast_frames_lo,
2014                   fcoe_q_xstorm_stats->mcast_pkts_sent);
2015 
2016         ADD_64_LE(afex_stats->tx_frames_dropped_hi,
2017                   LE32_0,
2018                   afex_stats->tx_frames_dropped_lo,
2019                   fcoe_q_xstorm_stats->error_drop_pkts);
2020     }
2021 #endif
2022 
2023     /*
2024      * If port stats are requested, add them to the PMF
2025      * stats, as anyway they will be accumulated by the
2026      * MCP before sent to the switch
2027      */
2028     if ((sc->port.pmf) && (stats_type == VICSTATST_UIF_INDEX)) {
2029         ADD_64(afex_stats->rx_frames_dropped_hi,
2030                0,
2031                afex_stats->rx_frames_dropped_lo,
2032                estats->mac_filter_discard);
2033         ADD_64(afex_stats->rx_frames_dropped_hi,
2034                0,
2035                afex_stats->rx_frames_dropped_lo,
2036                estats->brb_truncate_discard);
2037         ADD_64(afex_stats->rx_frames_discarded_hi,
2038                0,
2039                afex_stats->rx_frames_discarded_lo,
2040                estats->mac_discard);
2041     }
2042 }
2043 
2044