xref: /freebsd/sys/dev/bxe/bxe_stats.c (revision 39ee7a7a6bdd1557b1c3532abf60d139798ac88b)
1 /*-
2  * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
15  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
18  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
19  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
20  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
21  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
22  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
23  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
24  * THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include "bxe.h"
31 #include "bxe_stats.h"
32 
33 #ifdef __i386__
34 #define BITS_PER_LONG 32
35 #else
36 #define BITS_PER_LONG 64
37 #endif
38 
39 static inline long
40 bxe_hilo(uint32_t *hiref)
41 {
42     uint32_t lo = *(hiref + 1);
43 #if (BITS_PER_LONG == 64)
44     uint32_t hi = *hiref;
45     return (HILO_U64(hi, lo));
46 #else
47     return (lo);
48 #endif
49 }
50 
51 static inline uint16_t
52 bxe_get_port_stats_dma_len(struct bxe_softc *sc)
53 {
54     uint16_t res = 0;
55     uint32_t size;
56 
57     /* 'newest' convention - shmem2 contains the size of the port stats */
58     if (SHMEM2_HAS(sc, sizeof_port_stats)) {
59         size = SHMEM2_RD(sc, sizeof_port_stats);
60         if (size) {
61             res = size;
62         }
63 
64         /* prevent newer BC from causing buffer overflow */
65         if (res > sizeof(struct host_port_stats)) {
66             res = sizeof(struct host_port_stats);
67         }
68     }
69 
70     /*
71      * Older convention - all BCs support the port stats fields up until
72      * the 'not_used' field
73      */
74     if (!res) {
75         res = (offsetof(struct host_port_stats, not_used) + 4);
76 
77         /* if PFC stats are supported by the MFW, DMA them as well */
78         if (sc->devinfo.bc_ver >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) {
79             res += (offsetof(struct host_port_stats, pfc_frames_rx_lo) -
80                     offsetof(struct host_port_stats, pfc_frames_tx_hi) + 4);
81         }
82     }
83 
84     res >>= 2;
85 
86     DBASSERT(sc, !(res > 2 * DMAE_LEN32_RD_MAX), ("big stats dmae length\n"));
87     return (res);
88 }
89 
90 /*
91  * Init service functions
92  */
93 
94 static void
95 bxe_dp_stats(struct bxe_softc *sc)
96 {
97     int i;
98 
99     BLOGD(sc, DBG_STATS,
100           "dumping stats:\n"
101           "  fw_stats_req\n"
102           "    hdr\n"
103           "      cmd_num %d\n"
104           "      reserved0 %d\n"
105           "      drv_stats_counter %d\n"
106           "      reserved1 %d\n"
107           "      stats_counters_addrs %x %x\n",
108           sc->fw_stats_req->hdr.cmd_num,
109           sc->fw_stats_req->hdr.reserved0,
110           sc->fw_stats_req->hdr.drv_stats_counter,
111           sc->fw_stats_req->hdr.reserved1,
112           sc->fw_stats_req->hdr.stats_counters_addrs.hi,
113           sc->fw_stats_req->hdr.stats_counters_addrs.lo);
114 
115     for (i = 0; i < sc->fw_stats_req->hdr.cmd_num; i++) {
116         BLOGD(sc, DBG_STATS,
117               "query[%d]\n"
118               "  kind %d\n"
119               "  index %d\n"
120               "  funcID %d\n"
121               "  reserved %d\n"
122               "  address %x %x\n",
123               i,
124               sc->fw_stats_req->query[i].kind,
125               sc->fw_stats_req->query[i].index,
126               sc->fw_stats_req->query[i].funcID,
127               sc->fw_stats_req->query[i].reserved,
128               sc->fw_stats_req->query[i].address.hi,
129               sc->fw_stats_req->query[i].address.lo);
130     }
131 }
132 
133 /*
134  * Post the next statistics ramrod. Protect it with the lock in
135  * order to ensure the strict order between statistics ramrods
136  * (each ramrod has a sequence number passed in a
137  * sc->fw_stats_req->hdr.drv_stats_counter and ramrods must be
138  * sent in order).
139  */
140 static void
141 bxe_storm_stats_post(struct bxe_softc *sc)
142 {
143     int rc;
144 
145     if (!sc->stats_pending) {
146         BXE_STATS_LOCK(sc);
147 
148         if (sc->stats_pending) {
149             BXE_STATS_UNLOCK(sc);
150             return;
151         }
152 
153         sc->fw_stats_req->hdr.drv_stats_counter =
154             htole16(sc->stats_counter++);
155 
156         BLOGD(sc, DBG_STATS,
157               "sending statistics ramrod %d\n",
158               le16toh(sc->fw_stats_req->hdr.drv_stats_counter));
159 
160         /* adjust the ramrod to include VF queues statistics */
161         // XXX bxe_iov_adjust_stats_req(sc);
162 
163         bxe_dp_stats(sc);
164 
165         /* send FW stats ramrod */
166         rc = bxe_sp_post(sc, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
167                          U64_HI(sc->fw_stats_req_mapping),
168                          U64_LO(sc->fw_stats_req_mapping),
169                          NONE_CONNECTION_TYPE);
170         if (rc == 0) {
171             sc->stats_pending = 1;
172         }
173 
174         BXE_STATS_UNLOCK(sc);
175     }
176 }
177 
178 static void
179 bxe_hw_stats_post(struct bxe_softc *sc)
180 {
181     struct dmae_command *dmae = &sc->stats_dmae;
182     uint32_t *stats_comp = BXE_SP(sc, stats_comp);
183     int loader_idx;
184     uint32_t opcode;
185 
186     *stats_comp = DMAE_COMP_VAL;
187     if (CHIP_REV_IS_SLOW(sc)) {
188         return;
189     }
190 
191     /* Update MCP's statistics if possible */
192     if (sc->func_stx) {
193         memcpy(BXE_SP(sc, func_stats), &sc->func_stats,
194                sizeof(sc->func_stats));
195     }
196 
197     /* loader */
198     if (sc->executer_idx) {
199         loader_idx = PMF_DMAE_C(sc);
200         opcode =  bxe_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
201                                   TRUE, DMAE_COMP_GRC);
202         opcode = bxe_dmae_opcode_clr_src_reset(opcode);
203 
204         memset(dmae, 0, sizeof(struct dmae_command));
205         dmae->opcode = opcode;
206         dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, dmae[0]));
207         dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, dmae[0]));
208         dmae->dst_addr_lo = ((DMAE_REG_CMD_MEM +
209                               sizeof(struct dmae_command) *
210                               (loader_idx + 1)) >> 2);
211         dmae->dst_addr_hi = 0;
212         dmae->len = sizeof(struct dmae_command) >> 2;
213         if (CHIP_IS_E1(sc)) {
214             dmae->len--;
215         }
216         dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx + 1] >> 2);
217         dmae->comp_addr_hi = 0;
218         dmae->comp_val = 1;
219 
220         *stats_comp = 0;
221         bxe_post_dmae(sc, dmae, loader_idx);
222     } else if (sc->func_stx) {
223         *stats_comp = 0;
224         bxe_post_dmae(sc, dmae, INIT_DMAE_C(sc));
225     }
226 }
227 
228 static int
229 bxe_stats_comp(struct bxe_softc *sc)
230 {
231     uint32_t *stats_comp = BXE_SP(sc, stats_comp);
232     int cnt = 10;
233 
234     while (*stats_comp != DMAE_COMP_VAL) {
235         if (!cnt) {
236             BLOGE(sc, "Timeout waiting for stats finished\n");
237             break;
238         }
239 
240         cnt--;
241         DELAY(1000);
242     }
243 
244     return (1);
245 }
246 
247 /*
248  * Statistics service functions
249  */
250 
251 static void
252 bxe_stats_pmf_update(struct bxe_softc *sc)
253 {
254     struct dmae_command *dmae;
255     uint32_t opcode;
256     int loader_idx = PMF_DMAE_C(sc);
257     uint32_t *stats_comp = BXE_SP(sc, stats_comp);
258 
259     if (sc->devinfo.bc_ver <= 0x06001400) {
260         /*
261          * Bootcode v6.0.21 fixed a GRC timeout that occurs when accessing
262          * BRB registers while the BRB block is in reset. The DMA transfer
263          * below triggers this issue resulting in the DMAE to stop
264          * functioning. Skip this initial stats transfer for old bootcode
265          * versions <= 6.0.20.
266          */
267         return;
268     }
269 
270     /* sanity */
271     if (!sc->port.pmf || !sc->port.port_stx) {
272         BLOGE(sc, "BUG!\n");
273         return;
274     }
275 
276     sc->executer_idx = 0;
277 
278     opcode = bxe_dmae_opcode(sc, DMAE_SRC_GRC, DMAE_DST_PCI, FALSE, 0);
279 
280     dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
281     dmae->opcode = bxe_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
282     dmae->src_addr_lo = (sc->port.port_stx >> 2);
283     dmae->src_addr_hi = 0;
284     dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, port_stats));
285     dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, port_stats));
286     dmae->len = DMAE_LEN32_RD_MAX;
287     dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
288     dmae->comp_addr_hi = 0;
289     dmae->comp_val = 1;
290 
291     dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
292     dmae->opcode = bxe_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
293     dmae->src_addr_lo = ((sc->port.port_stx >> 2) + DMAE_LEN32_RD_MAX);
294     dmae->src_addr_hi = 0;
295     dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, port_stats) +
296                                DMAE_LEN32_RD_MAX * 4);
297     dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, port_stats) +
298                                DMAE_LEN32_RD_MAX * 4);
299     dmae->len = (bxe_get_port_stats_dma_len(sc) - DMAE_LEN32_RD_MAX);
300 
301     dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
302     dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
303     dmae->comp_val = DMAE_COMP_VAL;
304 
305     *stats_comp = 0;
306     bxe_hw_stats_post(sc);
307     bxe_stats_comp(sc);
308 }
309 
310 static void
311 bxe_port_stats_init(struct bxe_softc *sc)
312 {
313     struct dmae_command *dmae;
314     int port = SC_PORT(sc);
315     uint32_t opcode;
316     int loader_idx = PMF_DMAE_C(sc);
317     uint32_t mac_addr;
318     uint32_t *stats_comp = BXE_SP(sc, stats_comp);
319 
320     /* sanity */
321     if (!sc->link_vars.link_up || !sc->port.pmf) {
322         BLOGE(sc, "BUG!\n");
323         return;
324     }
325 
326     sc->executer_idx = 0;
327 
328     /* MCP */
329     opcode = bxe_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
330                              TRUE, DMAE_COMP_GRC);
331 
332     if (sc->port.port_stx) {
333         dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
334         dmae->opcode = opcode;
335         dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, port_stats));
336         dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, port_stats));
337         dmae->dst_addr_lo = sc->port.port_stx >> 2;
338         dmae->dst_addr_hi = 0;
339         dmae->len = bxe_get_port_stats_dma_len(sc);
340         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
341         dmae->comp_addr_hi = 0;
342         dmae->comp_val = 1;
343     }
344 
345     if (sc->func_stx) {
346         dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
347         dmae->opcode = opcode;
348         dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, func_stats));
349         dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, func_stats));
350         dmae->dst_addr_lo = (sc->func_stx >> 2);
351         dmae->dst_addr_hi = 0;
352         dmae->len = (sizeof(struct host_func_stats) >> 2);
353         dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
354         dmae->comp_addr_hi = 0;
355         dmae->comp_val = 1;
356     }
357 
358     /* MAC */
359     opcode = bxe_dmae_opcode(sc, DMAE_SRC_GRC, DMAE_DST_PCI,
360                              TRUE, DMAE_COMP_GRC);
361 
362     /* EMAC is special */
363     if (sc->link_vars.mac_type == ELINK_MAC_TYPE_EMAC) {
364         mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
365 
366         /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
367         dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
368         dmae->opcode = opcode;
369         dmae->src_addr_lo = (mac_addr + EMAC_REG_EMAC_RX_STAT_AC) >> 2;
370         dmae->src_addr_hi = 0;
371         dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, mac_stats));
372         dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, mac_stats));
373         dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
374         dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
375         dmae->comp_addr_hi = 0;
376         dmae->comp_val = 1;
377 
378         /* EMAC_REG_EMAC_RX_STAT_AC_28 */
379         dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
380         dmae->opcode = opcode;
381         dmae->src_addr_lo = ((mac_addr + EMAC_REG_EMAC_RX_STAT_AC_28) >> 2);
382         dmae->src_addr_hi = 0;
383         dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, mac_stats) +
384                                    offsetof(struct emac_stats,
385                                             rx_stat_falsecarriererrors));
386         dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, mac_stats) +
387                                    offsetof(struct emac_stats,
388                                             rx_stat_falsecarriererrors));
389         dmae->len = 1;
390         dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
391         dmae->comp_addr_hi = 0;
392         dmae->comp_val = 1;
393 
394         /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
395         dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
396         dmae->opcode = opcode;
397         dmae->src_addr_lo = ((mac_addr + EMAC_REG_EMAC_TX_STAT_AC) >> 2);
398         dmae->src_addr_hi = 0;
399         dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, mac_stats) +
400                                    offsetof(struct emac_stats,
401                                             tx_stat_ifhcoutoctets));
402         dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, mac_stats) +
403                                    offsetof(struct emac_stats,
404                                             tx_stat_ifhcoutoctets));
405         dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
406         dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
407         dmae->comp_addr_hi = 0;
408         dmae->comp_val = 1;
409     } else {
410         uint32_t tx_src_addr_lo, rx_src_addr_lo;
411         uint16_t rx_len, tx_len;
412 
413         /* configure the params according to MAC type */
414         switch (sc->link_vars.mac_type) {
415         case ELINK_MAC_TYPE_BMAC:
416             mac_addr = (port) ? NIG_REG_INGRESS_BMAC1_MEM :
417                                 NIG_REG_INGRESS_BMAC0_MEM;
418 
419             /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
420                BIGMAC_REGISTER_TX_STAT_GTBYT */
421             if (CHIP_IS_E1x(sc)) {
422                 tx_src_addr_lo =
423                     ((mac_addr + BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2);
424                 tx_len = ((8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
425                            BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2);
426                 rx_src_addr_lo =
427                     ((mac_addr + BIGMAC_REGISTER_RX_STAT_GR64) >> 2);
428                 rx_len = ((8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
429                            BIGMAC_REGISTER_RX_STAT_GR64) >> 2);
430             } else {
431                 tx_src_addr_lo =
432                     ((mac_addr + BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2);
433                 tx_len = ((8 + BIGMAC2_REGISTER_TX_STAT_GTBYT -
434                            BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2);
435                 rx_src_addr_lo =
436                     ((mac_addr + BIGMAC2_REGISTER_RX_STAT_GR64) >> 2);
437                 rx_len = ((8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ -
438                            BIGMAC2_REGISTER_RX_STAT_GR64) >> 2);
439             }
440 
441             break;
442 
443         case ELINK_MAC_TYPE_UMAC: /* handled by MSTAT */
444         case ELINK_MAC_TYPE_XMAC: /* handled by MSTAT */
445         default:
446             mac_addr = (port) ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0;
447             tx_src_addr_lo = ((mac_addr + MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2);
448             rx_src_addr_lo = ((mac_addr + MSTAT_REG_RX_STAT_GR64_LO) >> 2);
449             tx_len =
450                 (sizeof(sc->sp->mac_stats.mstat_stats.stats_tx) >> 2);
451             rx_len =
452                 (sizeof(sc->sp->mac_stats.mstat_stats.stats_rx) >> 2);
453             break;
454         }
455 
456         /* TX stats */
457         dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
458         dmae->opcode = opcode;
459         dmae->src_addr_lo = tx_src_addr_lo;
460         dmae->src_addr_hi = 0;
461         dmae->len = tx_len;
462         dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, mac_stats));
463         dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, mac_stats));
464         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
465         dmae->comp_addr_hi = 0;
466         dmae->comp_val = 1;
467 
468         /* RX stats */
469         dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
470         dmae->opcode = opcode;
471         dmae->src_addr_hi = 0;
472         dmae->src_addr_lo = rx_src_addr_lo;
473         dmae->dst_addr_lo =
474             U64_LO(BXE_SP_MAPPING(sc, mac_stats) + (tx_len << 2));
475         dmae->dst_addr_hi =
476             U64_HI(BXE_SP_MAPPING(sc, mac_stats) + (tx_len << 2));
477         dmae->len = rx_len;
478         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
479         dmae->comp_addr_hi = 0;
480         dmae->comp_val = 1;
481     }
482 
483     /* NIG */
484     if (!CHIP_IS_E3(sc)) {
485         dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
486         dmae->opcode = opcode;
487         dmae->src_addr_lo =
488             (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
489                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
490         dmae->src_addr_hi = 0;
491         dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, nig_stats) +
492                                    offsetof(struct nig_stats,
493                                             egress_mac_pkt0_lo));
494         dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, nig_stats) +
495                                    offsetof(struct nig_stats,
496                                             egress_mac_pkt0_lo));
497         dmae->len = ((2 * sizeof(uint32_t)) >> 2);
498         dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
499         dmae->comp_addr_hi = 0;
500         dmae->comp_val = 1;
501 
502         dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
503         dmae->opcode = opcode;
504         dmae->src_addr_lo =
505             (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
506                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
507         dmae->src_addr_hi = 0;
508         dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, nig_stats) +
509                                    offsetof(struct nig_stats,
510                                             egress_mac_pkt1_lo));
511         dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, nig_stats) +
512                                    offsetof(struct nig_stats,
513                                             egress_mac_pkt1_lo));
514         dmae->len = ((2 * sizeof(uint32_t)) >> 2);
515         dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
516         dmae->comp_addr_hi = 0;
517         dmae->comp_val = 1;
518     }
519 
520     dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
521     dmae->opcode = bxe_dmae_opcode(sc, DMAE_SRC_GRC, DMAE_DST_PCI,
522                                    TRUE, DMAE_COMP_PCI);
523     dmae->src_addr_lo =
524         (port ? NIG_REG_STAT1_BRB_DISCARD :
525                 NIG_REG_STAT0_BRB_DISCARD) >> 2;
526     dmae->src_addr_hi = 0;
527     dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, nig_stats));
528     dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, nig_stats));
529     dmae->len = (sizeof(struct nig_stats) - 4*sizeof(uint32_t)) >> 2;
530 
531     dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
532     dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
533     dmae->comp_val = DMAE_COMP_VAL;
534 
535     *stats_comp = 0;
536 }
537 
538 static void
539 bxe_func_stats_init(struct bxe_softc *sc)
540 {
541     struct dmae_command *dmae = &sc->stats_dmae;
542     uint32_t *stats_comp = BXE_SP(sc, stats_comp);
543 
544     /* sanity */
545     if (!sc->func_stx) {
546         BLOGE(sc, "BUG!\n");
547         return;
548     }
549 
550     sc->executer_idx = 0;
551     memset(dmae, 0, sizeof(struct dmae_command));
552 
553     dmae->opcode = bxe_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
554                                    TRUE, DMAE_COMP_PCI);
555     dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, func_stats));
556     dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, func_stats));
557     dmae->dst_addr_lo = (sc->func_stx >> 2);
558     dmae->dst_addr_hi = 0;
559     dmae->len = (sizeof(struct host_func_stats) >> 2);
560     dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
561     dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
562     dmae->comp_val = DMAE_COMP_VAL;
563 
564     *stats_comp = 0;
565 }
566 
567 static void
568 bxe_stats_start(struct bxe_softc *sc)
569 {
570     /*
571      * VFs travel through here as part of the statistics FSM, but no action
572      * is required
573      */
574     if (IS_VF(sc)) {
575         return;
576     }
577 
578     if (sc->port.pmf) {
579         bxe_port_stats_init(sc);
580     }
581 
582     else if (sc->func_stx) {
583         bxe_func_stats_init(sc);
584     }
585 
586     bxe_hw_stats_post(sc);
587     bxe_storm_stats_post(sc);
588 }
589 
590 static void
591 bxe_stats_pmf_start(struct bxe_softc *sc)
592 {
593     bxe_stats_comp(sc);
594     bxe_stats_pmf_update(sc);
595     bxe_stats_start(sc);
596 }
597 
598 static void
599 bxe_stats_restart(struct bxe_softc *sc)
600 {
601     /*
602      * VFs travel through here as part of the statistics FSM, but no action
603      * is required
604      */
605     if (IS_VF(sc)) {
606         return;
607     }
608 
609     bxe_stats_comp(sc);
610     bxe_stats_start(sc);
611 }
612 
613 static void
614 bxe_bmac_stats_update(struct bxe_softc *sc)
615 {
616     struct host_port_stats *pstats = BXE_SP(sc, port_stats);
617     struct bxe_eth_stats *estats = &sc->eth_stats;
618     struct {
619         uint32_t lo;
620         uint32_t hi;
621     } diff;
622 
623     if (CHIP_IS_E1x(sc)) {
624         struct bmac1_stats *new = BXE_SP(sc, mac_stats.bmac1_stats);
625 
626         /* the macros below will use "bmac1_stats" type */
627         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
628         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
629         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
630         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
631         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
632         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
633         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
634         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
635         UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
636 
637         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
638         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
639         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
640         UPDATE_STAT64(tx_stat_gt127,
641                       tx_stat_etherstatspkts65octetsto127octets);
642         UPDATE_STAT64(tx_stat_gt255,
643                       tx_stat_etherstatspkts128octetsto255octets);
644         UPDATE_STAT64(tx_stat_gt511,
645                       tx_stat_etherstatspkts256octetsto511octets);
646         UPDATE_STAT64(tx_stat_gt1023,
647                       tx_stat_etherstatspkts512octetsto1023octets);
648         UPDATE_STAT64(tx_stat_gt1518,
649                       tx_stat_etherstatspkts1024octetsto1522octets);
650         UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
651         UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
652         UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
653         UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
654         UPDATE_STAT64(tx_stat_gterr,
655                       tx_stat_dot3statsinternalmactransmiterrors);
656         UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
657     } else {
658         struct bmac2_stats *new = BXE_SP(sc, mac_stats.bmac2_stats);
659         struct bxe_fw_port_stats_old *fwstats = &sc->fw_stats_old;
660 
661         /* the macros below will use "bmac2_stats" type */
662         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
663         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
664         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
665         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
666         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
667         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
668         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
669         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
670         UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
671         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
672         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
673         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
674         UPDATE_STAT64(tx_stat_gt127,
675                       tx_stat_etherstatspkts65octetsto127octets);
676         UPDATE_STAT64(tx_stat_gt255,
677                       tx_stat_etherstatspkts128octetsto255octets);
678         UPDATE_STAT64(tx_stat_gt511,
679                       tx_stat_etherstatspkts256octetsto511octets);
680         UPDATE_STAT64(tx_stat_gt1023,
681                       tx_stat_etherstatspkts512octetsto1023octets);
682         UPDATE_STAT64(tx_stat_gt1518,
683                       tx_stat_etherstatspkts1024octetsto1522octets);
684         UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
685         UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
686         UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
687         UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
688         UPDATE_STAT64(tx_stat_gterr,
689                       tx_stat_dot3statsinternalmactransmiterrors);
690         UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
691 
692         /* collect PFC stats */
693         pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi;
694         pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo;
695         ADD_64(pstats->pfc_frames_tx_hi, fwstats->pfc_frames_tx_hi,
696                pstats->pfc_frames_tx_lo, fwstats->pfc_frames_tx_lo);
697 
698         pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi;
699         pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo;
700         ADD_64(pstats->pfc_frames_rx_hi, fwstats->pfc_frames_rx_hi,
701                pstats->pfc_frames_rx_lo, fwstats->pfc_frames_rx_lo);
702     }
703 
704     estats->pause_frames_received_hi = pstats->mac_stx[1].rx_stat_mac_xpf_hi;
705     estats->pause_frames_received_lo = pstats->mac_stx[1].rx_stat_mac_xpf_lo;
706 
707     estats->pause_frames_sent_hi = pstats->mac_stx[1].tx_stat_outxoffsent_hi;
708     estats->pause_frames_sent_lo = pstats->mac_stx[1].tx_stat_outxoffsent_lo;
709 
710     estats->pfc_frames_received_hi = pstats->pfc_frames_rx_hi;
711     estats->pfc_frames_received_lo = pstats->pfc_frames_rx_lo;
712     estats->pfc_frames_sent_hi = pstats->pfc_frames_tx_hi;
713     estats->pfc_frames_sent_lo = pstats->pfc_frames_tx_lo;
714 }
715 
716 static void
717 bxe_mstat_stats_update(struct bxe_softc *sc)
718 {
719     struct host_port_stats *pstats = BXE_SP(sc, port_stats);
720     struct bxe_eth_stats *estats = &sc->eth_stats;
721     struct mstat_stats *new = BXE_SP(sc, mac_stats.mstat_stats);
722 
723     ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets);
724     ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors);
725     ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts);
726     ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong);
727     ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments);
728     ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived);
729     ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered);
730     ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf);
731     ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent);
732     ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone);
733 
734     /* collect pfc stats */
735     ADD_64(pstats->pfc_frames_tx_hi, new->stats_tx.tx_gtxpp_hi,
736            pstats->pfc_frames_tx_lo, new->stats_tx.tx_gtxpp_lo);
737     ADD_64(pstats->pfc_frames_rx_hi, new->stats_rx.rx_grxpp_hi,
738            pstats->pfc_frames_rx_lo, new->stats_rx.rx_grxpp_lo);
739 
740     ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets);
741     ADD_STAT64(stats_tx.tx_gt127, tx_stat_etherstatspkts65octetsto127octets);
742     ADD_STAT64(stats_tx.tx_gt255, tx_stat_etherstatspkts128octetsto255octets);
743     ADD_STAT64(stats_tx.tx_gt511, tx_stat_etherstatspkts256octetsto511octets);
744     ADD_STAT64(stats_tx.tx_gt1023,
745                tx_stat_etherstatspkts512octetsto1023octets);
746     ADD_STAT64(stats_tx.tx_gt1518,
747                tx_stat_etherstatspkts1024octetsto1522octets);
748     ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047);
749 
750     ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095);
751     ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216);
752     ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383);
753 
754     ADD_STAT64(stats_tx.tx_gterr, tx_stat_dot3statsinternalmactransmiterrors);
755     ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl);
756 
757     estats->etherstatspkts1024octetsto1522octets_hi =
758         pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_hi;
759     estats->etherstatspkts1024octetsto1522octets_lo =
760         pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_lo;
761 
762     estats->etherstatspktsover1522octets_hi =
763         pstats->mac_stx[1].tx_stat_mac_2047_hi;
764     estats->etherstatspktsover1522octets_lo =
765         pstats->mac_stx[1].tx_stat_mac_2047_lo;
766 
767     ADD_64(estats->etherstatspktsover1522octets_hi,
768            pstats->mac_stx[1].tx_stat_mac_4095_hi,
769            estats->etherstatspktsover1522octets_lo,
770            pstats->mac_stx[1].tx_stat_mac_4095_lo);
771 
772     ADD_64(estats->etherstatspktsover1522octets_hi,
773            pstats->mac_stx[1].tx_stat_mac_9216_hi,
774            estats->etherstatspktsover1522octets_lo,
775            pstats->mac_stx[1].tx_stat_mac_9216_lo);
776 
777     ADD_64(estats->etherstatspktsover1522octets_hi,
778            pstats->mac_stx[1].tx_stat_mac_16383_hi,
779            estats->etherstatspktsover1522octets_lo,
780            pstats->mac_stx[1].tx_stat_mac_16383_lo);
781 
782     estats->pause_frames_received_hi = pstats->mac_stx[1].rx_stat_mac_xpf_hi;
783     estats->pause_frames_received_lo = pstats->mac_stx[1].rx_stat_mac_xpf_lo;
784 
785     estats->pause_frames_sent_hi = pstats->mac_stx[1].tx_stat_outxoffsent_hi;
786     estats->pause_frames_sent_lo = pstats->mac_stx[1].tx_stat_outxoffsent_lo;
787 
788     estats->pfc_frames_received_hi = pstats->pfc_frames_rx_hi;
789     estats->pfc_frames_received_lo = pstats->pfc_frames_rx_lo;
790     estats->pfc_frames_sent_hi = pstats->pfc_frames_tx_hi;
791     estats->pfc_frames_sent_lo = pstats->pfc_frames_tx_lo;
792 }
793 
794 static void
795 bxe_emac_stats_update(struct bxe_softc *sc)
796 {
797     struct emac_stats *new = BXE_SP(sc, mac_stats.emac_stats);
798     struct host_port_stats *pstats = BXE_SP(sc, port_stats);
799     struct bxe_eth_stats *estats = &sc->eth_stats;
800 
801     UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
802     UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
803     UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
804     UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
805     UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
806     UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
807     UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
808     UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
809     UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
810     UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
811     UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
812     UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
813     UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
814     UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
815     UPDATE_EXTEND_STAT(tx_stat_outxonsent);
816     UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
817     UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
818     UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
819     UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
820     UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
821     UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
822     UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
823     UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
824     UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
825     UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
826     UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
827     UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
828     UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
829     UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
830     UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
831     UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
832 
833     estats->pause_frames_received_hi =
834         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
835     estats->pause_frames_received_lo =
836         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
837     ADD_64(estats->pause_frames_received_hi,
838            pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
839            estats->pause_frames_received_lo,
840            pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
841 
842     estats->pause_frames_sent_hi =
843         pstats->mac_stx[1].tx_stat_outxonsent_hi;
844     estats->pause_frames_sent_lo =
845         pstats->mac_stx[1].tx_stat_outxonsent_lo;
846     ADD_64(estats->pause_frames_sent_hi,
847            pstats->mac_stx[1].tx_stat_outxoffsent_hi,
848            estats->pause_frames_sent_lo,
849            pstats->mac_stx[1].tx_stat_outxoffsent_lo);
850 }
851 
852 static int
853 bxe_hw_stats_update(struct bxe_softc *sc)
854 {
855     struct nig_stats *new = BXE_SP(sc, nig_stats);
856     struct nig_stats *old = &(sc->port.old_nig_stats);
857     struct host_port_stats *pstats = BXE_SP(sc, port_stats);
858     struct bxe_eth_stats *estats = &sc->eth_stats;
859     uint32_t lpi_reg, nig_timer_max;
860     struct {
861         uint32_t lo;
862         uint32_t hi;
863     } diff;
864 
865     switch (sc->link_vars.mac_type) {
866     case ELINK_MAC_TYPE_BMAC:
867         bxe_bmac_stats_update(sc);
868         break;
869 
870     case ELINK_MAC_TYPE_EMAC:
871         bxe_emac_stats_update(sc);
872         break;
873 
874     case ELINK_MAC_TYPE_UMAC:
875     case ELINK_MAC_TYPE_XMAC:
876         bxe_mstat_stats_update(sc);
877         break;
878 
879     case ELINK_MAC_TYPE_NONE: /* unreached */
880         BLOGD(sc, DBG_STATS,
881               "stats updated by DMAE but no MAC active\n");
882         return (-1);
883 
884     default: /* unreached */
885         BLOGE(sc, "stats update failed, unknown MAC type\n");
886     }
887 
888     ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
889                   new->brb_discard - old->brb_discard);
890     ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
891                   new->brb_truncate - old->brb_truncate);
892 
893     if (!CHIP_IS_E3(sc)) {
894         UPDATE_STAT64_NIG(egress_mac_pkt0,
895                           etherstatspkts1024octetsto1522octets);
896         UPDATE_STAT64_NIG(egress_mac_pkt1,
897                           etherstatspktsover1522octets);
898     }
899 
900     memcpy(old, new, sizeof(struct nig_stats));
901 
902     memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
903            sizeof(struct mac_stx));
904     estats->brb_drop_hi = pstats->brb_drop_hi;
905     estats->brb_drop_lo = pstats->brb_drop_lo;
906 
907     pstats->host_port_stats_counter++;
908 
909     if (CHIP_IS_E3(sc)) {
910         lpi_reg = (SC_PORT(sc)) ?
911                       MISC_REG_CPMU_LP_SM_ENT_CNT_P1 :
912                       MISC_REG_CPMU_LP_SM_ENT_CNT_P0;
913         estats->eee_tx_lpi += REG_RD(sc, lpi_reg);
914     }
915 
916     if (!BXE_NOMCP(sc)) {
917         nig_timer_max = SHMEM_RD(sc, port_mb[SC_PORT(sc)].stat_nig_timer);
918         if (nig_timer_max != estats->nig_timer_max) {
919             estats->nig_timer_max = nig_timer_max;
920             BLOGE(sc, "invalid NIG timer max (%u)\n",
921                   estats->nig_timer_max);
922         }
923     }
924 
925     return (0);
926 }
927 
928 static int
929 bxe_storm_stats_validate_counters(struct bxe_softc *sc)
930 {
931     struct stats_counter *counters = &sc->fw_stats_data->storm_counters;
932     uint16_t cur_stats_counter;
933 
934     /*
935      * Make sure we use the value of the counter
936      * used for sending the last stats ramrod.
937      */
938     BXE_STATS_LOCK(sc);
939     cur_stats_counter = (sc->stats_counter - 1);
940     BXE_STATS_UNLOCK(sc);
941 
942     /* are storm stats valid? */
943     if (le16toh(counters->xstats_counter) != cur_stats_counter) {
944         BLOGD(sc, DBG_STATS,
945               "stats not updated by xstorm, "
946               "counter 0x%x != stats_counter 0x%x\n",
947               le16toh(counters->xstats_counter), sc->stats_counter);
948         return (-EAGAIN);
949     }
950 
951     if (le16toh(counters->ustats_counter) != cur_stats_counter) {
952         BLOGD(sc, DBG_STATS,
953               "stats not updated by ustorm, "
954               "counter 0x%x != stats_counter 0x%x\n",
955               le16toh(counters->ustats_counter), sc->stats_counter);
956         return (-EAGAIN);
957     }
958 
959     if (le16toh(counters->cstats_counter) != cur_stats_counter) {
960         BLOGD(sc, DBG_STATS,
961               "stats not updated by cstorm, "
962               "counter 0x%x != stats_counter 0x%x\n",
963               le16toh(counters->cstats_counter), sc->stats_counter);
964         return (-EAGAIN);
965     }
966 
967     if (le16toh(counters->tstats_counter) != cur_stats_counter) {
968         BLOGD(sc, DBG_STATS,
969               "stats not updated by tstorm, "
970               "counter 0x%x != stats_counter 0x%x\n",
971               le16toh(counters->tstats_counter), sc->stats_counter);
972         return (-EAGAIN);
973     }
974 
975     return (0);
976 }
977 
978 static int
979 bxe_storm_stats_update(struct bxe_softc *sc)
980 {
981     struct tstorm_per_port_stats *tport =
982         &sc->fw_stats_data->port.tstorm_port_statistics;
983     struct tstorm_per_pf_stats *tfunc =
984         &sc->fw_stats_data->pf.tstorm_pf_statistics;
985     struct host_func_stats *fstats = &sc->func_stats;
986     struct bxe_eth_stats *estats = &sc->eth_stats;
987     struct bxe_eth_stats_old *estats_old = &sc->eth_stats_old;
988     int i;
989 
990     /* vfs stat counter is managed by pf */
991     if (IS_PF(sc) && bxe_storm_stats_validate_counters(sc)) {
992         return (-EAGAIN);
993     }
994 
995     estats->error_bytes_received_hi = 0;
996     estats->error_bytes_received_lo = 0;
997 
998     for (i = 0; i < sc->num_queues; i++) {
999         struct bxe_fastpath *fp = &sc->fp[i];
1000         struct tstorm_per_queue_stats *tclient =
1001             &sc->fw_stats_data->queue_stats[i].tstorm_queue_statistics;
1002         struct tstorm_per_queue_stats *old_tclient = &fp->old_tclient;
1003         struct ustorm_per_queue_stats *uclient =
1004             &sc->fw_stats_data->queue_stats[i].ustorm_queue_statistics;
1005         struct ustorm_per_queue_stats *old_uclient = &fp->old_uclient;
1006         struct xstorm_per_queue_stats *xclient =
1007             &sc->fw_stats_data->queue_stats[i].xstorm_queue_statistics;
1008         struct xstorm_per_queue_stats *old_xclient = &fp->old_xclient;
1009         struct bxe_eth_q_stats *qstats = &fp->eth_q_stats;
1010         struct bxe_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old;
1011 
1012         uint32_t diff;
1013 
1014         BLOGD(sc, DBG_STATS,
1015               "queue[%d]: ucast_sent 0x%x bcast_sent 0x%x mcast_sent 0x%x\n",
1016               i, xclient->ucast_pkts_sent, xclient->bcast_pkts_sent,
1017               xclient->mcast_pkts_sent);
1018 
1019         BLOGD(sc, DBG_STATS, "---------------\n");
1020 
1021         UPDATE_QSTAT(tclient->rcv_bcast_bytes,
1022                      total_broadcast_bytes_received);
1023         UPDATE_QSTAT(tclient->rcv_mcast_bytes,
1024                      total_multicast_bytes_received);
1025         UPDATE_QSTAT(tclient->rcv_ucast_bytes,
1026                      total_unicast_bytes_received);
1027 
1028         /*
1029          * sum to total_bytes_received all
1030          * unicast/multicast/broadcast
1031          */
1032         qstats->total_bytes_received_hi =
1033             qstats->total_broadcast_bytes_received_hi;
1034         qstats->total_bytes_received_lo =
1035             qstats->total_broadcast_bytes_received_lo;
1036 
1037         ADD_64(qstats->total_bytes_received_hi,
1038                qstats->total_multicast_bytes_received_hi,
1039                qstats->total_bytes_received_lo,
1040                qstats->total_multicast_bytes_received_lo);
1041 
1042         ADD_64(qstats->total_bytes_received_hi,
1043                qstats->total_unicast_bytes_received_hi,
1044                qstats->total_bytes_received_lo,
1045                qstats->total_unicast_bytes_received_lo);
1046 
1047         qstats->valid_bytes_received_hi = qstats->total_bytes_received_hi;
1048         qstats->valid_bytes_received_lo = qstats->total_bytes_received_lo;
1049 
1050         UPDATE_EXTEND_TSTAT(rcv_ucast_pkts, total_unicast_packets_received);
1051         UPDATE_EXTEND_TSTAT(rcv_mcast_pkts, total_multicast_packets_received);
1052         UPDATE_EXTEND_TSTAT(rcv_bcast_pkts, total_broadcast_packets_received);
1053         UPDATE_EXTEND_E_TSTAT(pkts_too_big_discard,
1054                               etherstatsoverrsizepkts, 32);
1055         UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard, 16);
1056 
1057         SUB_EXTEND_USTAT(ucast_no_buff_pkts, total_unicast_packets_received);
1058         SUB_EXTEND_USTAT(mcast_no_buff_pkts,
1059                          total_multicast_packets_received);
1060         SUB_EXTEND_USTAT(bcast_no_buff_pkts,
1061                          total_broadcast_packets_received);
1062         UPDATE_EXTEND_E_USTAT(ucast_no_buff_pkts, no_buff_discard);
1063         UPDATE_EXTEND_E_USTAT(mcast_no_buff_pkts, no_buff_discard);
1064         UPDATE_EXTEND_E_USTAT(bcast_no_buff_pkts, no_buff_discard);
1065 
1066         UPDATE_QSTAT(xclient->bcast_bytes_sent,
1067                      total_broadcast_bytes_transmitted);
1068         UPDATE_QSTAT(xclient->mcast_bytes_sent,
1069                      total_multicast_bytes_transmitted);
1070         UPDATE_QSTAT(xclient->ucast_bytes_sent,
1071                      total_unicast_bytes_transmitted);
1072 
1073         /*
1074          * sum to total_bytes_transmitted all
1075          * unicast/multicast/broadcast
1076          */
1077         qstats->total_bytes_transmitted_hi =
1078             qstats->total_unicast_bytes_transmitted_hi;
1079         qstats->total_bytes_transmitted_lo =
1080             qstats->total_unicast_bytes_transmitted_lo;
1081 
1082         ADD_64(qstats->total_bytes_transmitted_hi,
1083                qstats->total_broadcast_bytes_transmitted_hi,
1084                qstats->total_bytes_transmitted_lo,
1085                qstats->total_broadcast_bytes_transmitted_lo);
1086 
1087         ADD_64(qstats->total_bytes_transmitted_hi,
1088                qstats->total_multicast_bytes_transmitted_hi,
1089                qstats->total_bytes_transmitted_lo,
1090                qstats->total_multicast_bytes_transmitted_lo);
1091 
1092         UPDATE_EXTEND_XSTAT(ucast_pkts_sent,
1093                             total_unicast_packets_transmitted);
1094         UPDATE_EXTEND_XSTAT(mcast_pkts_sent,
1095                             total_multicast_packets_transmitted);
1096         UPDATE_EXTEND_XSTAT(bcast_pkts_sent,
1097                             total_broadcast_packets_transmitted);
1098 
1099         UPDATE_EXTEND_TSTAT(checksum_discard,
1100                             total_packets_received_checksum_discarded);
1101         UPDATE_EXTEND_TSTAT(ttl0_discard,
1102                             total_packets_received_ttl0_discarded);
1103 
1104         UPDATE_EXTEND_XSTAT(error_drop_pkts,
1105                             total_transmitted_dropped_packets_error);
1106 
1107         /* TPA aggregations completed */
1108         UPDATE_EXTEND_E_USTAT(coalesced_events, total_tpa_aggregations);
1109         /* Number of network frames aggregated by TPA */
1110         UPDATE_EXTEND_E_USTAT(coalesced_pkts, total_tpa_aggregated_frames);
1111         /* Total number of bytes in completed TPA aggregations */
1112         UPDATE_QSTAT(uclient->coalesced_bytes, total_tpa_bytes);
1113 
1114         UPDATE_ESTAT_QSTAT_64(total_tpa_bytes);
1115 
1116         UPDATE_FSTAT_QSTAT(total_bytes_received);
1117         UPDATE_FSTAT_QSTAT(total_bytes_transmitted);
1118         UPDATE_FSTAT_QSTAT(total_unicast_packets_received);
1119         UPDATE_FSTAT_QSTAT(total_multicast_packets_received);
1120         UPDATE_FSTAT_QSTAT(total_broadcast_packets_received);
1121         UPDATE_FSTAT_QSTAT(total_unicast_packets_transmitted);
1122         UPDATE_FSTAT_QSTAT(total_multicast_packets_transmitted);
1123         UPDATE_FSTAT_QSTAT(total_broadcast_packets_transmitted);
1124         UPDATE_FSTAT_QSTAT(valid_bytes_received);
1125     }
1126 
1127     ADD_64(estats->total_bytes_received_hi,
1128            estats->rx_stat_ifhcinbadoctets_hi,
1129            estats->total_bytes_received_lo,
1130            estats->rx_stat_ifhcinbadoctets_lo);
1131 
1132     ADD_64_LE(estats->total_bytes_received_hi,
1133               tfunc->rcv_error_bytes.hi,
1134               estats->total_bytes_received_lo,
1135               tfunc->rcv_error_bytes.lo);
1136 
1137     ADD_64_LE(estats->error_bytes_received_hi,
1138               tfunc->rcv_error_bytes.hi,
1139               estats->error_bytes_received_lo,
1140               tfunc->rcv_error_bytes.lo);
1141 
1142     UPDATE_ESTAT(etherstatsoverrsizepkts, rx_stat_dot3statsframestoolong);
1143 
1144     ADD_64(estats->error_bytes_received_hi,
1145            estats->rx_stat_ifhcinbadoctets_hi,
1146            estats->error_bytes_received_lo,
1147            estats->rx_stat_ifhcinbadoctets_lo);
1148 
1149     if (sc->port.pmf) {
1150         struct bxe_fw_port_stats_old *fwstats = &sc->fw_stats_old;
1151         UPDATE_FW_STAT(mac_filter_discard);
1152         UPDATE_FW_STAT(mf_tag_discard);
1153         UPDATE_FW_STAT(brb_truncate_discard);
1154         UPDATE_FW_STAT(mac_discard);
1155     }
1156 
1157     fstats->host_func_stats_start = ++fstats->host_func_stats_end;
1158 
1159     sc->stats_pending = 0;
1160 
1161     return (0);
1162 }
1163 
1164 static void
1165 bxe_net_stats_update(struct bxe_softc *sc)
1166 {
1167 
1168     for (int i = 0; i < sc->num_queues; i++)
1169         if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS,
1170 	    le32toh(sc->fp[i].old_tclient.checksum_discard));
1171 }
1172 
1173 uint64_t
1174 bxe_get_counter(if_t ifp, ift_counter cnt)
1175 {
1176 	struct bxe_softc *sc;
1177 	struct bxe_eth_stats *estats;
1178 
1179 	sc = if_getsoftc(ifp);
1180 	estats = &sc->eth_stats;
1181 
1182 	switch (cnt) {
1183 	case IFCOUNTER_IPACKETS:
1184 		return (bxe_hilo(&estats->total_unicast_packets_received_hi) +
1185 		    bxe_hilo(&estats->total_multicast_packets_received_hi) +
1186 		    bxe_hilo(&estats->total_broadcast_packets_received_hi));
1187 	case IFCOUNTER_OPACKETS:
1188 		return (bxe_hilo(&estats->total_unicast_packets_transmitted_hi) +
1189 		    bxe_hilo(&estats->total_multicast_packets_transmitted_hi) +
1190 		    bxe_hilo(&estats->total_broadcast_packets_transmitted_hi));
1191 	case IFCOUNTER_IBYTES:
1192 		return (bxe_hilo(&estats->total_bytes_received_hi));
1193 	case IFCOUNTER_OBYTES:
1194 		return (bxe_hilo(&estats->total_bytes_transmitted_hi));
1195 	case IFCOUNTER_IERRORS:
1196 		return (bxe_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
1197 		    bxe_hilo(&estats->etherstatsoverrsizepkts_hi) +
1198 		    bxe_hilo(&estats->brb_drop_hi) +
1199 		    bxe_hilo(&estats->brb_truncate_hi) +
1200 		    bxe_hilo(&estats->rx_stat_dot3statsfcserrors_hi) +
1201 		    bxe_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi) +
1202 		    bxe_hilo(&estats->no_buff_discard_hi));
1203 	case IFCOUNTER_OERRORS:
1204 		return (bxe_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi) +
1205 		    bxe_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi));
1206 	case IFCOUNTER_IMCASTS:
1207 		return (bxe_hilo(&estats->total_multicast_packets_received_hi));
1208 	case IFCOUNTER_COLLISIONS:
1209 		return (bxe_hilo(&estats->tx_stat_etherstatscollisions_hi) +
1210 		    bxe_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
1211 		    bxe_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi));
1212 	default:
1213 		return (if_get_counter_default(ifp, cnt));
1214 	}
1215 }
1216 
1217 static void
1218 bxe_drv_stats_update(struct bxe_softc *sc)
1219 {
1220     struct bxe_eth_stats *estats = &sc->eth_stats;
1221     int i;
1222 
1223     for (i = 0; i < sc->num_queues; i++) {
1224         struct bxe_eth_q_stats *qstats = &sc->fp[i].eth_q_stats;
1225         struct bxe_eth_q_stats_old *qstats_old = &sc->fp[i].eth_q_stats_old;
1226 
1227         UPDATE_ESTAT_QSTAT(rx_calls);
1228         UPDATE_ESTAT_QSTAT(rx_pkts);
1229         UPDATE_ESTAT_QSTAT(rx_tpa_pkts);
1230         UPDATE_ESTAT_QSTAT(rx_jumbo_sge_pkts);
1231         UPDATE_ESTAT_QSTAT(rx_soft_errors);
1232         UPDATE_ESTAT_QSTAT(rx_hw_csum_errors);
1233         UPDATE_ESTAT_QSTAT(rx_ofld_frames_csum_ip);
1234         UPDATE_ESTAT_QSTAT(rx_ofld_frames_csum_tcp_udp);
1235         UPDATE_ESTAT_QSTAT(rx_budget_reached);
1236         UPDATE_ESTAT_QSTAT(tx_pkts);
1237         UPDATE_ESTAT_QSTAT(tx_soft_errors);
1238         UPDATE_ESTAT_QSTAT(tx_ofld_frames_csum_ip);
1239         UPDATE_ESTAT_QSTAT(tx_ofld_frames_csum_tcp);
1240         UPDATE_ESTAT_QSTAT(tx_ofld_frames_csum_udp);
1241         UPDATE_ESTAT_QSTAT(tx_ofld_frames_lso);
1242         UPDATE_ESTAT_QSTAT(tx_ofld_frames_lso_hdr_splits);
1243         UPDATE_ESTAT_QSTAT(tx_encap_failures);
1244         UPDATE_ESTAT_QSTAT(tx_hw_queue_full);
1245         UPDATE_ESTAT_QSTAT(tx_hw_max_queue_depth);
1246         UPDATE_ESTAT_QSTAT(tx_dma_mapping_failure);
1247         UPDATE_ESTAT_QSTAT(tx_max_drbr_queue_depth);
1248         UPDATE_ESTAT_QSTAT(tx_window_violation_std);
1249         UPDATE_ESTAT_QSTAT(tx_window_violation_tso);
1250         //UPDATE_ESTAT_QSTAT(tx_unsupported_tso_request_ipv6);
1251         //UPDATE_ESTAT_QSTAT(tx_unsupported_tso_request_not_tcp);
1252         UPDATE_ESTAT_QSTAT(tx_chain_lost_mbuf);
1253         UPDATE_ESTAT_QSTAT(tx_frames_deferred);
1254         UPDATE_ESTAT_QSTAT(tx_queue_xoff);
1255 
1256         /* mbuf driver statistics */
1257         UPDATE_ESTAT_QSTAT(mbuf_defrag_attempts);
1258         UPDATE_ESTAT_QSTAT(mbuf_defrag_failures);
1259         UPDATE_ESTAT_QSTAT(mbuf_rx_bd_alloc_failed);
1260         UPDATE_ESTAT_QSTAT(mbuf_rx_bd_mapping_failed);
1261         UPDATE_ESTAT_QSTAT(mbuf_rx_tpa_alloc_failed);
1262         UPDATE_ESTAT_QSTAT(mbuf_rx_tpa_mapping_failed);
1263         UPDATE_ESTAT_QSTAT(mbuf_rx_sge_alloc_failed);
1264         UPDATE_ESTAT_QSTAT(mbuf_rx_sge_mapping_failed);
1265 
1266         /* track the number of allocated mbufs */
1267         UPDATE_ESTAT_QSTAT(mbuf_alloc_tx);
1268         UPDATE_ESTAT_QSTAT(mbuf_alloc_rx);
1269         UPDATE_ESTAT_QSTAT(mbuf_alloc_sge);
1270         UPDATE_ESTAT_QSTAT(mbuf_alloc_tpa);
1271     }
1272 }
1273 
1274 static uint8_t
1275 bxe_edebug_stats_stopped(struct bxe_softc *sc)
1276 {
1277     uint32_t val;
1278 
1279     if (SHMEM2_HAS(sc, edebug_driver_if[1])) {
1280         val = SHMEM2_RD(sc, edebug_driver_if[1]);
1281 
1282         if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT) {
1283             return (TRUE);
1284         }
1285     }
1286 
1287     return (FALSE);
1288 }
1289 
1290 static void
1291 bxe_stats_update(struct bxe_softc *sc)
1292 {
1293     uint32_t *stats_comp = BXE_SP(sc, stats_comp);
1294 
1295     if (bxe_edebug_stats_stopped(sc)) {
1296         return;
1297     }
1298 
1299     if (IS_PF(sc)) {
1300         if (*stats_comp != DMAE_COMP_VAL) {
1301             return;
1302         }
1303 
1304         if (sc->port.pmf) {
1305             bxe_hw_stats_update(sc);
1306         }
1307 
1308         if (bxe_storm_stats_update(sc)) {
1309             if (sc->stats_pending++ == 3) {
1310 		if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
1311 			atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_REINIT);
1312 			taskqueue_enqueue(sc->chip_tq, &sc->chip_tq_task);
1313 		}
1314             }
1315             return;
1316         }
1317     } else {
1318         /*
1319          * VF doesn't collect HW statistics, and doesn't get completions,
1320          * performs only update.
1321          */
1322         bxe_storm_stats_update(sc);
1323     }
1324 
1325     bxe_net_stats_update(sc);
1326     bxe_drv_stats_update(sc);
1327 
1328     /* vf is done */
1329     if (IS_VF(sc)) {
1330         return;
1331     }
1332 
1333     bxe_hw_stats_post(sc);
1334     bxe_storm_stats_post(sc);
1335 }
1336 
1337 static void
1338 bxe_port_stats_stop(struct bxe_softc *sc)
1339 {
1340     struct dmae_command *dmae;
1341     uint32_t opcode;
1342     int loader_idx = PMF_DMAE_C(sc);
1343     uint32_t *stats_comp = BXE_SP(sc, stats_comp);
1344 
1345     sc->executer_idx = 0;
1346 
1347     opcode = bxe_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC, FALSE, 0);
1348 
1349     if (sc->port.port_stx) {
1350         dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
1351 
1352         if (sc->func_stx) {
1353             dmae->opcode = bxe_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
1354         } else {
1355             dmae->opcode = bxe_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
1356         }
1357 
1358         dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, port_stats));
1359         dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, port_stats));
1360         dmae->dst_addr_lo = sc->port.port_stx >> 2;
1361         dmae->dst_addr_hi = 0;
1362         dmae->len = bxe_get_port_stats_dma_len(sc);
1363         if (sc->func_stx) {
1364             dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
1365             dmae->comp_addr_hi = 0;
1366             dmae->comp_val = 1;
1367         } else {
1368             dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
1369             dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
1370             dmae->comp_val = DMAE_COMP_VAL;
1371 
1372             *stats_comp = 0;
1373         }
1374     }
1375 
1376     if (sc->func_stx) {
1377         dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
1378         dmae->opcode = bxe_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
1379         dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, func_stats));
1380         dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, func_stats));
1381         dmae->dst_addr_lo = (sc->func_stx >> 2);
1382         dmae->dst_addr_hi = 0;
1383         dmae->len = (sizeof(struct host_func_stats) >> 2);
1384         dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
1385         dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
1386         dmae->comp_val = DMAE_COMP_VAL;
1387 
1388         *stats_comp = 0;
1389     }
1390 }
1391 
1392 static void
1393 bxe_stats_stop(struct bxe_softc *sc)
1394 {
1395     uint8_t update = FALSE;
1396 
1397     bxe_stats_comp(sc);
1398 
1399     if (sc->port.pmf) {
1400         update = bxe_hw_stats_update(sc) == 0;
1401     }
1402 
1403     update |= bxe_storm_stats_update(sc) == 0;
1404 
1405     if (update) {
1406         bxe_net_stats_update(sc);
1407 
1408         if (sc->port.pmf) {
1409             bxe_port_stats_stop(sc);
1410         }
1411 
1412         bxe_hw_stats_post(sc);
1413         bxe_stats_comp(sc);
1414     }
1415 }
1416 
1417 static void
1418 bxe_stats_do_nothing(struct bxe_softc *sc)
1419 {
1420     return;
1421 }
1422 
1423 static const struct {
1424     void (*action)(struct bxe_softc *sc);
1425     enum bxe_stats_state next_state;
1426 } bxe_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
1427     {
1428     /* DISABLED PMF */ { bxe_stats_pmf_update, STATS_STATE_DISABLED },
1429     /*      LINK_UP */ { bxe_stats_start,      STATS_STATE_ENABLED },
1430     /*      UPDATE  */ { bxe_stats_do_nothing, STATS_STATE_DISABLED },
1431     /*      STOP    */ { bxe_stats_do_nothing, STATS_STATE_DISABLED }
1432     },
1433     {
1434     /* ENABLED  PMF */ { bxe_stats_pmf_start,  STATS_STATE_ENABLED },
1435     /*      LINK_UP */ { bxe_stats_restart,    STATS_STATE_ENABLED },
1436     /*      UPDATE  */ { bxe_stats_update,     STATS_STATE_ENABLED },
1437     /*      STOP    */ { bxe_stats_stop,       STATS_STATE_DISABLED }
1438     }
1439 };
1440 
1441 void bxe_stats_handle(struct bxe_softc     *sc,
1442                       enum bxe_stats_event event)
1443 {
1444     enum bxe_stats_state state;
1445 
1446     if (__predict_false(sc->panic)) {
1447         return;
1448     }
1449 
1450     BXE_STATS_LOCK(sc);
1451     state = sc->stats_state;
1452     sc->stats_state = bxe_stats_stm[state][event].next_state;
1453     BXE_STATS_UNLOCK(sc);
1454 
1455     bxe_stats_stm[state][event].action(sc);
1456 
1457     if (event != STATS_EVENT_UPDATE) {
1458         BLOGD(sc, DBG_STATS,
1459               "state %d -> event %d -> state %d\n",
1460               state, event, sc->stats_state);
1461     }
1462 }
1463 
1464 static void
1465 bxe_port_stats_base_init(struct bxe_softc *sc)
1466 {
1467     struct dmae_command *dmae;
1468     uint32_t *stats_comp = BXE_SP(sc, stats_comp);
1469 
1470     /* sanity */
1471     if (!sc->port.pmf || !sc->port.port_stx) {
1472         BLOGE(sc, "BUG!\n");
1473         return;
1474     }
1475 
1476     sc->executer_idx = 0;
1477 
1478     dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
1479     dmae->opcode = bxe_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
1480                                    TRUE, DMAE_COMP_PCI);
1481     dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, port_stats));
1482     dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, port_stats));
1483     dmae->dst_addr_lo = (sc->port.port_stx >> 2);
1484     dmae->dst_addr_hi = 0;
1485     dmae->len = bxe_get_port_stats_dma_len(sc);
1486     dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
1487     dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
1488     dmae->comp_val = DMAE_COMP_VAL;
1489 
1490     *stats_comp = 0;
1491     bxe_hw_stats_post(sc);
1492     bxe_stats_comp(sc);
1493 }
1494 
1495 /*
1496  * This function will prepare the statistics ramrod data the way
1497  * we will only have to increment the statistics counter and
1498  * send the ramrod each time we have to.
1499  */
1500 static void
1501 bxe_prep_fw_stats_req(struct bxe_softc *sc)
1502 {
1503     int i;
1504     int first_queue_query_index;
1505     struct stats_query_header *stats_hdr = &sc->fw_stats_req->hdr;
1506     bus_addr_t cur_data_offset;
1507     struct stats_query_entry *cur_query_entry;
1508 
1509     stats_hdr->cmd_num = sc->fw_stats_num;
1510     stats_hdr->drv_stats_counter = 0;
1511 
1512     /*
1513      * The storm_counters struct contains the counters of completed
1514      * statistics requests per storm which are incremented by FW
1515      * each time it completes hadning a statistics ramrod. We will
1516      * check these counters in the timer handler and discard a
1517      * (statistics) ramrod completion.
1518      */
1519     cur_data_offset = (sc->fw_stats_data_mapping +
1520                        offsetof(struct bxe_fw_stats_data, storm_counters));
1521 
1522     stats_hdr->stats_counters_addrs.hi = htole32(U64_HI(cur_data_offset));
1523     stats_hdr->stats_counters_addrs.lo = htole32(U64_LO(cur_data_offset));
1524 
1525     /*
1526      * Prepare the first stats ramrod (will be completed with
1527      * the counters equal to zero) - init counters to somethig different.
1528      */
1529     memset(&sc->fw_stats_data->storm_counters, 0xff,
1530            sizeof(struct stats_counter));
1531 
1532     /**** Port FW statistics data ****/
1533     cur_data_offset = (sc->fw_stats_data_mapping +
1534                        offsetof(struct bxe_fw_stats_data, port));
1535 
1536     cur_query_entry = &sc->fw_stats_req->query[BXE_PORT_QUERY_IDX];
1537 
1538     cur_query_entry->kind = STATS_TYPE_PORT;
1539     /* For port query index is a DONT CARE */
1540     cur_query_entry->index = SC_PORT(sc);
1541     /* For port query funcID is a DONT CARE */
1542     cur_query_entry->funcID = htole16(SC_FUNC(sc));
1543     cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset));
1544     cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset));
1545 
1546     /**** PF FW statistics data ****/
1547     cur_data_offset = (sc->fw_stats_data_mapping +
1548                        offsetof(struct bxe_fw_stats_data, pf));
1549 
1550     cur_query_entry = &sc->fw_stats_req->query[BXE_PF_QUERY_IDX];
1551 
1552     cur_query_entry->kind = STATS_TYPE_PF;
1553     /* For PF query index is a DONT CARE */
1554     cur_query_entry->index = SC_PORT(sc);
1555     cur_query_entry->funcID = htole16(SC_FUNC(sc));
1556     cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset));
1557     cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset));
1558 
1559 #if 0
1560     /**** FCoE FW statistics data ****/
1561     if (!NO_FCOE(sc)) {
1562         cur_data_offset = (sc->fw_stats_data_mapping +
1563                            offsetof(struct bxe_fw_stats_data, fcoe));
1564 
1565         cur_query_entry = &sc->fw_stats_req->query[BXE_FCOE_QUERY_IDX];
1566 
1567         cur_query_entry->kind = STATS_TYPE_FCOE;
1568         /* For FCoE query index is a DONT CARE */
1569         cur_query_entry->index = SC_PORT(sc);
1570         cur_query_entry->funcID = cpu_to_le16(SC_FUNC(sc));
1571         cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset));
1572         cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset));
1573     }
1574 #endif
1575 
1576     /**** Clients' queries ****/
1577     cur_data_offset = (sc->fw_stats_data_mapping +
1578                        offsetof(struct bxe_fw_stats_data, queue_stats));
1579 
1580     /*
1581      * First queue query index depends whether FCoE offloaded request will
1582      * be included in the ramrod
1583      */
1584 #if 0
1585     if (!NO_FCOE(sc))
1586         first_queue_query_index = BXE_FIRST_QUEUE_QUERY_IDX;
1587     else
1588 #endif
1589         first_queue_query_index = (BXE_FIRST_QUEUE_QUERY_IDX - 1);
1590 
1591     for (i = 0; i < sc->num_queues; i++) {
1592         cur_query_entry =
1593             &sc->fw_stats_req->query[first_queue_query_index + i];
1594 
1595         cur_query_entry->kind = STATS_TYPE_QUEUE;
1596         cur_query_entry->index = bxe_stats_id(&sc->fp[i]);
1597         cur_query_entry->funcID = htole16(SC_FUNC(sc));
1598         cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset));
1599         cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset));
1600 
1601         cur_data_offset += sizeof(struct per_queue_stats);
1602     }
1603 
1604 #if 0
1605     /* add FCoE queue query if needed */
1606     if (!NO_FCOE(sc)) {
1607         cur_query_entry =
1608             &sc->fw_stats_req->query[first_queue_query_index + i];
1609 
1610         cur_query_entry->kind = STATS_TYPE_QUEUE;
1611         cur_query_entry->index = bxe_stats_id(&sc->fp[FCOE_IDX(sc)]);
1612         cur_query_entry->funcID = htole16(SC_FUNC(sc));
1613         cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset));
1614         cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset));
1615     }
1616 #endif
1617 }
1618 
1619 void
1620 bxe_stats_init(struct bxe_softc *sc)
1621 {
1622     int /*abs*/port = SC_PORT(sc);
1623     int mb_idx = SC_FW_MB_IDX(sc);
1624     int i;
1625 
1626     sc->stats_pending = 0;
1627     sc->executer_idx = 0;
1628     sc->stats_counter = 0;
1629 
1630     /* port and func stats for management */
1631     if (!BXE_NOMCP(sc)) {
1632         sc->port.port_stx = SHMEM_RD(sc, port_mb[port].port_stx);
1633         sc->func_stx = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_param);
1634     } else {
1635         sc->port.port_stx = 0;
1636         sc->func_stx = 0;
1637     }
1638 
1639     BLOGD(sc, DBG_STATS, "port_stx 0x%x func_stx 0x%x\n",
1640           sc->port.port_stx, sc->func_stx);
1641 
1642     /* pmf should retrieve port statistics from SP on a non-init*/
1643     if (!sc->stats_init && sc->port.pmf && sc->port.port_stx) {
1644         bxe_stats_handle(sc, STATS_EVENT_PMF);
1645     }
1646 
1647     port = SC_PORT(sc);
1648     /* port stats */
1649     memset(&(sc->port.old_nig_stats), 0, sizeof(struct nig_stats));
1650     sc->port.old_nig_stats.brb_discard =
1651         REG_RD(sc, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
1652     sc->port.old_nig_stats.brb_truncate =
1653         REG_RD(sc, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
1654     if (!CHIP_IS_E3(sc)) {
1655         REG_RD_DMAE(sc, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
1656                     &(sc->port.old_nig_stats.egress_mac_pkt0_lo), 2);
1657         REG_RD_DMAE(sc, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
1658                     &(sc->port.old_nig_stats.egress_mac_pkt1_lo), 2);
1659     }
1660 
1661     /* function stats */
1662     for (i = 0; i < sc->num_queues; i++) {
1663         memset(&sc->fp[i].old_tclient, 0, sizeof(sc->fp[i].old_tclient));
1664         memset(&sc->fp[i].old_uclient, 0, sizeof(sc->fp[i].old_uclient));
1665         memset(&sc->fp[i].old_xclient, 0, sizeof(sc->fp[i].old_xclient));
1666         if (sc->stats_init) {
1667             memset(&sc->fp[i].eth_q_stats, 0,
1668                    sizeof(sc->fp[i].eth_q_stats));
1669             memset(&sc->fp[i].eth_q_stats_old, 0,
1670                    sizeof(sc->fp[i].eth_q_stats_old));
1671         }
1672     }
1673 
1674     /* prepare statistics ramrod data */
1675     bxe_prep_fw_stats_req(sc);
1676 
1677     if (sc->stats_init) {
1678         memset(&sc->net_stats_old, 0, sizeof(sc->net_stats_old));
1679         memset(&sc->fw_stats_old, 0, sizeof(sc->fw_stats_old));
1680         memset(&sc->eth_stats_old, 0, sizeof(sc->eth_stats_old));
1681         memset(&sc->eth_stats, 0, sizeof(sc->eth_stats));
1682         memset(&sc->func_stats, 0, sizeof(sc->func_stats));
1683 
1684         /* Clean SP from previous statistics */
1685         if (sc->func_stx) {
1686             memset(BXE_SP(sc, func_stats), 0, sizeof(struct host_func_stats));
1687             bxe_func_stats_init(sc);
1688             bxe_hw_stats_post(sc);
1689             bxe_stats_comp(sc);
1690         }
1691     }
1692 
1693     sc->stats_state = STATS_STATE_DISABLED;
1694 
1695     if (sc->port.pmf && sc->port.port_stx) {
1696         bxe_port_stats_base_init(sc);
1697     }
1698 
1699     /* mark the end of statistics initializiation */
1700     sc->stats_init = FALSE;
1701 }
1702 
1703 void
1704 bxe_save_statistics(struct bxe_softc *sc)
1705 {
1706     int i;
1707 
1708     /* save queue statistics */
1709     for (i = 0; i < sc->num_queues; i++) {
1710         struct bxe_fastpath *fp = &sc->fp[i];
1711         struct bxe_eth_q_stats *qstats = &fp->eth_q_stats;
1712         struct bxe_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old;
1713 
1714         UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi);
1715         UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo);
1716         UPDATE_QSTAT_OLD(total_broadcast_bytes_received_hi);
1717         UPDATE_QSTAT_OLD(total_broadcast_bytes_received_lo);
1718         UPDATE_QSTAT_OLD(total_multicast_bytes_received_hi);
1719         UPDATE_QSTAT_OLD(total_multicast_bytes_received_lo);
1720         UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_hi);
1721         UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_lo);
1722         UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_hi);
1723         UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_lo);
1724         UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_hi);
1725         UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_lo);
1726         UPDATE_QSTAT_OLD(total_tpa_bytes_hi);
1727         UPDATE_QSTAT_OLD(total_tpa_bytes_lo);
1728     }
1729 
1730     /* store port firmware statistics */
1731     if (sc->port.pmf) {
1732         struct bxe_eth_stats *estats = &sc->eth_stats;
1733         struct bxe_fw_port_stats_old *fwstats = &sc->fw_stats_old;
1734         struct host_port_stats *pstats = BXE_SP(sc, port_stats);
1735 
1736         fwstats->pfc_frames_rx_hi = pstats->pfc_frames_rx_hi;
1737         fwstats->pfc_frames_rx_lo = pstats->pfc_frames_rx_lo;
1738         fwstats->pfc_frames_tx_hi = pstats->pfc_frames_tx_hi;
1739         fwstats->pfc_frames_tx_lo = pstats->pfc_frames_tx_lo;
1740 
1741         if (IS_MF(sc)) {
1742             UPDATE_FW_STAT_OLD(mac_filter_discard);
1743             UPDATE_FW_STAT_OLD(mf_tag_discard);
1744             UPDATE_FW_STAT_OLD(brb_truncate_discard);
1745             UPDATE_FW_STAT_OLD(mac_discard);
1746         }
1747     }
1748 }
1749 
1750 void
1751 bxe_afex_collect_stats(struct bxe_softc *sc,
1752                        void             *void_afex_stats,
1753                        uint32_t         stats_type)
1754 {
1755     int i;
1756     struct afex_stats *afex_stats = (struct afex_stats *)void_afex_stats;
1757     struct bxe_eth_stats *estats = &sc->eth_stats;
1758 #if 0
1759     struct per_queue_stats *fcoe_q_stats =
1760         &sc->fw_stats_data->queue_stats[FCOE_IDX(sc)];
1761 
1762     struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
1763         &fcoe_q_stats->tstorm_queue_statistics;
1764 
1765     struct ustorm_per_queue_stats *fcoe_q_ustorm_stats =
1766         &fcoe_q_stats->ustorm_queue_statistics;
1767 
1768     struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
1769         &fcoe_q_stats->xstorm_queue_statistics;
1770 
1771     struct fcoe_statistics_params *fw_fcoe_stat =
1772         &sc->fw_stats_data->fcoe;
1773 #endif
1774 
1775     memset(afex_stats, 0, sizeof(struct afex_stats));
1776 
1777     for (i = 0; i < sc->num_queues; i++) {
1778         struct bxe_eth_q_stats *qstats = &sc->fp[i].eth_q_stats;
1779 
1780         ADD_64(afex_stats->rx_unicast_bytes_hi,
1781                qstats->total_unicast_bytes_received_hi,
1782                afex_stats->rx_unicast_bytes_lo,
1783                qstats->total_unicast_bytes_received_lo);
1784 
1785         ADD_64(afex_stats->rx_broadcast_bytes_hi,
1786                qstats->total_broadcast_bytes_received_hi,
1787                afex_stats->rx_broadcast_bytes_lo,
1788                qstats->total_broadcast_bytes_received_lo);
1789 
1790         ADD_64(afex_stats->rx_multicast_bytes_hi,
1791                qstats->total_multicast_bytes_received_hi,
1792                afex_stats->rx_multicast_bytes_lo,
1793                qstats->total_multicast_bytes_received_lo);
1794 
1795         ADD_64(afex_stats->rx_unicast_frames_hi,
1796                qstats->total_unicast_packets_received_hi,
1797                afex_stats->rx_unicast_frames_lo,
1798                qstats->total_unicast_packets_received_lo);
1799 
1800         ADD_64(afex_stats->rx_broadcast_frames_hi,
1801                qstats->total_broadcast_packets_received_hi,
1802                afex_stats->rx_broadcast_frames_lo,
1803                qstats->total_broadcast_packets_received_lo);
1804 
1805         ADD_64(afex_stats->rx_multicast_frames_hi,
1806                qstats->total_multicast_packets_received_hi,
1807                afex_stats->rx_multicast_frames_lo,
1808                qstats->total_multicast_packets_received_lo);
1809 
1810         /*
1811          * sum to rx_frames_discarded all discarded
1812          * packets due to size, ttl0 and checksum
1813          */
1814         ADD_64(afex_stats->rx_frames_discarded_hi,
1815                qstats->total_packets_received_checksum_discarded_hi,
1816                afex_stats->rx_frames_discarded_lo,
1817                qstats->total_packets_received_checksum_discarded_lo);
1818 
1819         ADD_64(afex_stats->rx_frames_discarded_hi,
1820                qstats->total_packets_received_ttl0_discarded_hi,
1821                afex_stats->rx_frames_discarded_lo,
1822                qstats->total_packets_received_ttl0_discarded_lo);
1823 
1824         ADD_64(afex_stats->rx_frames_discarded_hi,
1825                qstats->etherstatsoverrsizepkts_hi,
1826                afex_stats->rx_frames_discarded_lo,
1827                qstats->etherstatsoverrsizepkts_lo);
1828 
1829         ADD_64(afex_stats->rx_frames_dropped_hi,
1830                qstats->no_buff_discard_hi,
1831                afex_stats->rx_frames_dropped_lo,
1832                qstats->no_buff_discard_lo);
1833 
1834         ADD_64(afex_stats->tx_unicast_bytes_hi,
1835                qstats->total_unicast_bytes_transmitted_hi,
1836                afex_stats->tx_unicast_bytes_lo,
1837                qstats->total_unicast_bytes_transmitted_lo);
1838 
1839         ADD_64(afex_stats->tx_broadcast_bytes_hi,
1840                qstats->total_broadcast_bytes_transmitted_hi,
1841                afex_stats->tx_broadcast_bytes_lo,
1842                qstats->total_broadcast_bytes_transmitted_lo);
1843 
1844         ADD_64(afex_stats->tx_multicast_bytes_hi,
1845                qstats->total_multicast_bytes_transmitted_hi,
1846                afex_stats->tx_multicast_bytes_lo,
1847                qstats->total_multicast_bytes_transmitted_lo);
1848 
1849         ADD_64(afex_stats->tx_unicast_frames_hi,
1850                qstats->total_unicast_packets_transmitted_hi,
1851                afex_stats->tx_unicast_frames_lo,
1852                qstats->total_unicast_packets_transmitted_lo);
1853 
1854         ADD_64(afex_stats->tx_broadcast_frames_hi,
1855                qstats->total_broadcast_packets_transmitted_hi,
1856                afex_stats->tx_broadcast_frames_lo,
1857                qstats->total_broadcast_packets_transmitted_lo);
1858 
1859         ADD_64(afex_stats->tx_multicast_frames_hi,
1860                qstats->total_multicast_packets_transmitted_hi,
1861                afex_stats->tx_multicast_frames_lo,
1862                qstats->total_multicast_packets_transmitted_lo);
1863 
1864         ADD_64(afex_stats->tx_frames_dropped_hi,
1865                qstats->total_transmitted_dropped_packets_error_hi,
1866                afex_stats->tx_frames_dropped_lo,
1867                qstats->total_transmitted_dropped_packets_error_lo);
1868     }
1869 
1870 #if 0
1871     /*
1872      * Now add FCoE statistics which are collected separately
1873      * (both offloaded and non offloaded)
1874      */
1875     if (!NO_FCOE(sc)) {
1876         ADD_64_LE(afex_stats->rx_unicast_bytes_hi,
1877                   LE32_0,
1878                   afex_stats->rx_unicast_bytes_lo,
1879                   fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
1880 
1881         ADD_64_LE(afex_stats->rx_unicast_bytes_hi,
1882                   fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
1883                   afex_stats->rx_unicast_bytes_lo,
1884                   fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
1885 
1886         ADD_64_LE(afex_stats->rx_broadcast_bytes_hi,
1887                   fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
1888                   afex_stats->rx_broadcast_bytes_lo,
1889                   fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
1890 
1891         ADD_64_LE(afex_stats->rx_multicast_bytes_hi,
1892                   fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
1893                   afex_stats->rx_multicast_bytes_lo,
1894                   fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
1895 
1896         ADD_64_LE(afex_stats->rx_unicast_frames_hi,
1897                   LE32_0,
1898                   afex_stats->rx_unicast_frames_lo,
1899                   fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
1900 
1901         ADD_64_LE(afex_stats->rx_unicast_frames_hi,
1902                   LE32_0,
1903                   afex_stats->rx_unicast_frames_lo,
1904                   fcoe_q_tstorm_stats->rcv_ucast_pkts);
1905 
1906         ADD_64_LE(afex_stats->rx_broadcast_frames_hi,
1907                   LE32_0,
1908                   afex_stats->rx_broadcast_frames_lo,
1909                   fcoe_q_tstorm_stats->rcv_bcast_pkts);
1910 
1911         ADD_64_LE(afex_stats->rx_multicast_frames_hi,
1912                   LE32_0,
1913                   afex_stats->rx_multicast_frames_lo,
1914                   fcoe_q_tstorm_stats->rcv_ucast_pkts);
1915 
1916         ADD_64_LE(afex_stats->rx_frames_discarded_hi,
1917                   LE32_0,
1918                   afex_stats->rx_frames_discarded_lo,
1919                   fcoe_q_tstorm_stats->checksum_discard);
1920 
1921         ADD_64_LE(afex_stats->rx_frames_discarded_hi,
1922                   LE32_0,
1923                   afex_stats->rx_frames_discarded_lo,
1924                   fcoe_q_tstorm_stats->pkts_too_big_discard);
1925 
1926         ADD_64_LE(afex_stats->rx_frames_discarded_hi,
1927                   LE32_0,
1928                   afex_stats->rx_frames_discarded_lo,
1929                   fcoe_q_tstorm_stats->ttl0_discard);
1930 
1931         ADD_64_LE16(afex_stats->rx_frames_dropped_hi,
1932                     LE16_0,
1933                     afex_stats->rx_frames_dropped_lo,
1934                     fcoe_q_tstorm_stats->no_buff_discard);
1935 
1936         ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1937                   LE32_0,
1938                   afex_stats->rx_frames_dropped_lo,
1939                   fcoe_q_ustorm_stats->ucast_no_buff_pkts);
1940 
1941         ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1942                   LE32_0,
1943                   afex_stats->rx_frames_dropped_lo,
1944                   fcoe_q_ustorm_stats->mcast_no_buff_pkts);
1945 
1946         ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1947                   LE32_0,
1948                   afex_stats->rx_frames_dropped_lo,
1949                   fcoe_q_ustorm_stats->bcast_no_buff_pkts);
1950 
1951         ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1952                   LE32_0,
1953                   afex_stats->rx_frames_dropped_lo,
1954                   fw_fcoe_stat->rx_stat1.fcoe_rx_drop_pkt_cnt);
1955 
1956         ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1957                   LE32_0,
1958                   afex_stats->rx_frames_dropped_lo,
1959                   fw_fcoe_stat->rx_stat2.fcoe_rx_drop_pkt_cnt);
1960 
1961         ADD_64_LE(afex_stats->tx_unicast_bytes_hi,
1962                   LE32_0,
1963                   afex_stats->tx_unicast_bytes_lo,
1964                   fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
1965 
1966         ADD_64_LE(afex_stats->tx_unicast_bytes_hi,
1967                   fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
1968                   afex_stats->tx_unicast_bytes_lo,
1969                   fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
1970 
1971         ADD_64_LE(afex_stats->tx_broadcast_bytes_hi,
1972                   fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
1973                   afex_stats->tx_broadcast_bytes_lo,
1974                   fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
1975 
1976         ADD_64_LE(afex_stats->tx_multicast_bytes_hi,
1977                   fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
1978                   afex_stats->tx_multicast_bytes_lo,
1979                   fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
1980 
1981         ADD_64_LE(afex_stats->tx_unicast_frames_hi,
1982                   LE32_0,
1983                   afex_stats->tx_unicast_frames_lo,
1984                   fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
1985 
1986         ADD_64_LE(afex_stats->tx_unicast_frames_hi,
1987                   LE32_0,
1988                   afex_stats->tx_unicast_frames_lo,
1989                   fcoe_q_xstorm_stats->ucast_pkts_sent);
1990 
1991         ADD_64_LE(afex_stats->tx_broadcast_frames_hi,
1992                   LE32_0,
1993                   afex_stats->tx_broadcast_frames_lo,
1994                   fcoe_q_xstorm_stats->bcast_pkts_sent);
1995 
1996         ADD_64_LE(afex_stats->tx_multicast_frames_hi,
1997                   LE32_0,
1998                   afex_stats->tx_multicast_frames_lo,
1999                   fcoe_q_xstorm_stats->mcast_pkts_sent);
2000 
2001         ADD_64_LE(afex_stats->tx_frames_dropped_hi,
2002                   LE32_0,
2003                   afex_stats->tx_frames_dropped_lo,
2004                   fcoe_q_xstorm_stats->error_drop_pkts);
2005     }
2006 #endif
2007 
2008     /*
2009      * If port stats are requested, add them to the PMF
2010      * stats, as anyway they will be accumulated by the
2011      * MCP before sent to the switch
2012      */
2013     if ((sc->port.pmf) && (stats_type == VICSTATST_UIF_INDEX)) {
2014         ADD_64(afex_stats->rx_frames_dropped_hi,
2015                0,
2016                afex_stats->rx_frames_dropped_lo,
2017                estats->mac_filter_discard);
2018         ADD_64(afex_stats->rx_frames_dropped_hi,
2019                0,
2020                afex_stats->rx_frames_dropped_lo,
2021                estats->brb_truncate_discard);
2022         ADD_64(afex_stats->rx_frames_discarded_hi,
2023                0,
2024                afex_stats->rx_frames_discarded_lo,
2025                estats->mac_discard);
2026     }
2027 }
2028 
2029