xref: /freebsd/sys/dev/bxe/bxe_stats.c (revision 193d9e768ba63fcfb187cfd17f461f7d41345048)
1 /*-
2  * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
15  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
18  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
19  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
20  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
21  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
22  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
23  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
24  * THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include "bxe.h"
31 #include "bxe_stats.h"
32 
33 #ifdef __i386__
34 #define BITS_PER_LONG 32
35 #else
36 #define BITS_PER_LONG 64
37 #endif
38 
39 extern int bxe_grc_dump(struct bxe_softc *sc);
40 
41 static inline long
42 bxe_hilo(uint32_t *hiref)
43 {
44     uint32_t lo = *(hiref + 1);
45 #if (BITS_PER_LONG == 64)
46     uint32_t hi = *hiref;
47     return (HILO_U64(hi, lo));
48 #else
49     return (lo);
50 #endif
51 }
52 
53 static inline uint16_t
54 bxe_get_port_stats_dma_len(struct bxe_softc *sc)
55 {
56     uint16_t res = 0;
57     uint32_t size;
58 
59     /* 'newest' convention - shmem2 contains the size of the port stats */
60     if (SHMEM2_HAS(sc, sizeof_port_stats)) {
61         size = SHMEM2_RD(sc, sizeof_port_stats);
62         if (size) {
63             res = size;
64         }
65 
66         /* prevent newer BC from causing buffer overflow */
67         if (res > sizeof(struct host_port_stats)) {
68             res = sizeof(struct host_port_stats);
69         }
70     }
71 
72     /*
73      * Older convention - all BCs support the port stats fields up until
74      * the 'not_used' field
75      */
76     if (!res) {
77         res = (offsetof(struct host_port_stats, not_used) + 4);
78 
79         /* if PFC stats are supported by the MFW, DMA them as well */
80         if (sc->devinfo.bc_ver >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) {
81             res += (offsetof(struct host_port_stats, pfc_frames_rx_lo) -
82                     offsetof(struct host_port_stats, pfc_frames_tx_hi) + 4);
83         }
84     }
85 
86     res >>= 2;
87 
88     DBASSERT(sc, !(res > 2 * DMAE_LEN32_RD_MAX), ("big stats dmae length\n"));
89     return (res);
90 }
91 
92 /*
93  * Init service functions
94  */
95 
96 static void
97 bxe_dp_stats(struct bxe_softc *sc)
98 {
99     int i;
100 
101     BLOGD(sc, DBG_STATS,
102           "dumping stats:\n"
103           "  fw_stats_req\n"
104           "    hdr\n"
105           "      cmd_num %d\n"
106           "      reserved0 %d\n"
107           "      drv_stats_counter %d\n"
108           "      reserved1 %d\n"
109           "      stats_counters_addrs %x %x\n",
110           sc->fw_stats_req->hdr.cmd_num,
111           sc->fw_stats_req->hdr.reserved0,
112           sc->fw_stats_req->hdr.drv_stats_counter,
113           sc->fw_stats_req->hdr.reserved1,
114           sc->fw_stats_req->hdr.stats_counters_addrs.hi,
115           sc->fw_stats_req->hdr.stats_counters_addrs.lo);
116 
117     for (i = 0; i < sc->fw_stats_req->hdr.cmd_num; i++) {
118         BLOGD(sc, DBG_STATS,
119               "query[%d]\n"
120               "  kind %d\n"
121               "  index %d\n"
122               "  funcID %d\n"
123               "  reserved %d\n"
124               "  address %x %x\n",
125               i,
126               sc->fw_stats_req->query[i].kind,
127               sc->fw_stats_req->query[i].index,
128               sc->fw_stats_req->query[i].funcID,
129               sc->fw_stats_req->query[i].reserved,
130               sc->fw_stats_req->query[i].address.hi,
131               sc->fw_stats_req->query[i].address.lo);
132     }
133 }
134 
135 /*
136  * Post the next statistics ramrod. Protect it with the lock in
137  * order to ensure the strict order between statistics ramrods
138  * (each ramrod has a sequence number passed in a
139  * sc->fw_stats_req->hdr.drv_stats_counter and ramrods must be
140  * sent in order).
141  */
142 static void
143 bxe_storm_stats_post(struct bxe_softc *sc)
144 {
145     int rc;
146 
147     if (!sc->stats_pending) {
148         BXE_STATS_LOCK(sc);
149 
150         if (sc->stats_pending) {
151             BXE_STATS_UNLOCK(sc);
152             return;
153         }
154 
155         sc->fw_stats_req->hdr.drv_stats_counter =
156             htole16(sc->stats_counter++);
157 
158         BLOGD(sc, DBG_STATS,
159               "sending statistics ramrod %d\n",
160               le16toh(sc->fw_stats_req->hdr.drv_stats_counter));
161 
162         /* adjust the ramrod to include VF queues statistics */
163         // XXX bxe_iov_adjust_stats_req(sc);
164 
165         bxe_dp_stats(sc);
166 
167         /* send FW stats ramrod */
168         rc = bxe_sp_post(sc, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
169                          U64_HI(sc->fw_stats_req_mapping),
170                          U64_LO(sc->fw_stats_req_mapping),
171                          NONE_CONNECTION_TYPE);
172         if (rc == 0) {
173             sc->stats_pending = 1;
174         }
175 
176         BXE_STATS_UNLOCK(sc);
177     }
178 }
179 
180 static void
181 bxe_hw_stats_post(struct bxe_softc *sc)
182 {
183     struct dmae_cmd *dmae = &sc->stats_dmae;
184     uint32_t *stats_comp = BXE_SP(sc, stats_comp);
185     int loader_idx;
186     uint32_t opcode;
187 
188     *stats_comp = DMAE_COMP_VAL;
189     if (CHIP_REV_IS_SLOW(sc)) {
190         return;
191     }
192 
193     /* Update MCP's statistics if possible */
194     if (sc->func_stx) {
195         memcpy(BXE_SP(sc, func_stats), &sc->func_stats,
196                sizeof(sc->func_stats));
197     }
198 
199     /* loader */
200     if (sc->executer_idx) {
201         loader_idx = PMF_DMAE_C(sc);
202         opcode =  bxe_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
203                                   TRUE, DMAE_COMP_GRC);
204         opcode = bxe_dmae_opcode_clr_src_reset(opcode);
205 
206         memset(dmae, 0, sizeof(struct dmae_cmd));
207         dmae->opcode = opcode;
208         dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, dmae[0]));
209         dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, dmae[0]));
210         dmae->dst_addr_lo = ((DMAE_REG_CMD_MEM +
211                               sizeof(struct dmae_cmd) *
212                               (loader_idx + 1)) >> 2);
213         dmae->dst_addr_hi = 0;
214         dmae->len = sizeof(struct dmae_cmd) >> 2;
215         if (CHIP_IS_E1(sc)) {
216             dmae->len--;
217         }
218         dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx + 1] >> 2);
219         dmae->comp_addr_hi = 0;
220         dmae->comp_val = 1;
221 
222         *stats_comp = 0;
223         bxe_post_dmae(sc, dmae, loader_idx);
224     } else if (sc->func_stx) {
225         *stats_comp = 0;
226         bxe_post_dmae(sc, dmae, INIT_DMAE_C(sc));
227     }
228 }
229 
230 static int
231 bxe_stats_comp(struct bxe_softc *sc)
232 {
233     uint32_t *stats_comp = BXE_SP(sc, stats_comp);
234     int cnt = 10;
235 
236     while (*stats_comp != DMAE_COMP_VAL) {
237         if (!cnt) {
238             BLOGE(sc, "Timeout waiting for stats finished\n");
239             if(sc->trigger_grcdump) {
240                 /* taking grcdump */
241                 bxe_grc_dump(sc);
242             }
243             break;
244         }
245 
246         cnt--;
247         DELAY(1000);
248     }
249 
250     return (1);
251 }
252 
253 /*
254  * Statistics service functions
255  */
256 
257 static void
258 bxe_stats_pmf_update(struct bxe_softc *sc)
259 {
260     struct dmae_cmd *dmae;
261     uint32_t opcode;
262     int loader_idx = PMF_DMAE_C(sc);
263     uint32_t *stats_comp = BXE_SP(sc, stats_comp);
264 
265     if (sc->devinfo.bc_ver <= 0x06001400) {
266         /*
267          * Bootcode v6.0.21 fixed a GRC timeout that occurs when accessing
268          * BRB registers while the BRB block is in reset. The DMA transfer
269          * below triggers this issue resulting in the DMAE to stop
270          * functioning. Skip this initial stats transfer for old bootcode
271          * versions <= 6.0.20.
272          */
273         return;
274     }
275 
276     /* sanity */
277     if (!sc->port.pmf || !sc->port.port_stx) {
278         BLOGE(sc, "BUG!\n");
279         return;
280     }
281 
282     sc->executer_idx = 0;
283 
284     opcode = bxe_dmae_opcode(sc, DMAE_SRC_GRC, DMAE_DST_PCI, FALSE, 0);
285 
286     dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
287     dmae->opcode = bxe_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
288     dmae->src_addr_lo = (sc->port.port_stx >> 2);
289     dmae->src_addr_hi = 0;
290     dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, port_stats));
291     dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, port_stats));
292     dmae->len = DMAE_LEN32_RD_MAX;
293     dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
294     dmae->comp_addr_hi = 0;
295     dmae->comp_val = 1;
296 
297     dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
298     dmae->opcode = bxe_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
299     dmae->src_addr_lo = ((sc->port.port_stx >> 2) + DMAE_LEN32_RD_MAX);
300     dmae->src_addr_hi = 0;
301     dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, port_stats) +
302                                DMAE_LEN32_RD_MAX * 4);
303     dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, port_stats) +
304                                DMAE_LEN32_RD_MAX * 4);
305     dmae->len = (bxe_get_port_stats_dma_len(sc) - DMAE_LEN32_RD_MAX);
306 
307     dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
308     dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
309     dmae->comp_val = DMAE_COMP_VAL;
310 
311     *stats_comp = 0;
312     bxe_hw_stats_post(sc);
313     bxe_stats_comp(sc);
314 }
315 
316 static void
317 bxe_port_stats_init(struct bxe_softc *sc)
318 {
319     struct dmae_cmd *dmae;
320     int port = SC_PORT(sc);
321     uint32_t opcode;
322     int loader_idx = PMF_DMAE_C(sc);
323     uint32_t mac_addr;
324     uint32_t *stats_comp = BXE_SP(sc, stats_comp);
325 
326     /* sanity */
327     if (!sc->link_vars.link_up || !sc->port.pmf) {
328         BLOGE(sc, "BUG!\n");
329         return;
330     }
331 
332     sc->executer_idx = 0;
333 
334     /* MCP */
335     opcode = bxe_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
336                              TRUE, DMAE_COMP_GRC);
337 
338     if (sc->port.port_stx) {
339         dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
340         dmae->opcode = opcode;
341         dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, port_stats));
342         dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, port_stats));
343         dmae->dst_addr_lo = sc->port.port_stx >> 2;
344         dmae->dst_addr_hi = 0;
345         dmae->len = bxe_get_port_stats_dma_len(sc);
346         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
347         dmae->comp_addr_hi = 0;
348         dmae->comp_val = 1;
349     }
350 
351     if (sc->func_stx) {
352         dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
353         dmae->opcode = opcode;
354         dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, func_stats));
355         dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, func_stats));
356         dmae->dst_addr_lo = (sc->func_stx >> 2);
357         dmae->dst_addr_hi = 0;
358         dmae->len = (sizeof(struct host_func_stats) >> 2);
359         dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
360         dmae->comp_addr_hi = 0;
361         dmae->comp_val = 1;
362     }
363 
364     /* MAC */
365     opcode = bxe_dmae_opcode(sc, DMAE_SRC_GRC, DMAE_DST_PCI,
366                              TRUE, DMAE_COMP_GRC);
367 
368     /* EMAC is special */
369     if (sc->link_vars.mac_type == ELINK_MAC_TYPE_EMAC) {
370         mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
371 
372         /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
373         dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
374         dmae->opcode = opcode;
375         dmae->src_addr_lo = (mac_addr + EMAC_REG_EMAC_RX_STAT_AC) >> 2;
376         dmae->src_addr_hi = 0;
377         dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, mac_stats));
378         dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, mac_stats));
379         dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
380         dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
381         dmae->comp_addr_hi = 0;
382         dmae->comp_val = 1;
383 
384         /* EMAC_REG_EMAC_RX_STAT_AC_28 */
385         dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
386         dmae->opcode = opcode;
387         dmae->src_addr_lo = ((mac_addr + EMAC_REG_EMAC_RX_STAT_AC_28) >> 2);
388         dmae->src_addr_hi = 0;
389         dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, mac_stats) +
390                                    offsetof(struct emac_stats,
391                                             rx_stat_falsecarriererrors));
392         dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, mac_stats) +
393                                    offsetof(struct emac_stats,
394                                             rx_stat_falsecarriererrors));
395         dmae->len = 1;
396         dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
397         dmae->comp_addr_hi = 0;
398         dmae->comp_val = 1;
399 
400         /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
401         dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
402         dmae->opcode = opcode;
403         dmae->src_addr_lo = ((mac_addr + EMAC_REG_EMAC_TX_STAT_AC) >> 2);
404         dmae->src_addr_hi = 0;
405         dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, mac_stats) +
406                                    offsetof(struct emac_stats,
407                                             tx_stat_ifhcoutoctets));
408         dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, mac_stats) +
409                                    offsetof(struct emac_stats,
410                                             tx_stat_ifhcoutoctets));
411         dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
412         dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
413         dmae->comp_addr_hi = 0;
414         dmae->comp_val = 1;
415     } else {
416         uint32_t tx_src_addr_lo, rx_src_addr_lo;
417         uint16_t rx_len, tx_len;
418 
419         /* configure the params according to MAC type */
420         switch (sc->link_vars.mac_type) {
421         case ELINK_MAC_TYPE_BMAC:
422             mac_addr = (port) ? NIG_REG_INGRESS_BMAC1_MEM :
423                                 NIG_REG_INGRESS_BMAC0_MEM;
424 
425             /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
426                BIGMAC_REGISTER_TX_STAT_GTBYT */
427             if (CHIP_IS_E1x(sc)) {
428                 tx_src_addr_lo =
429                     ((mac_addr + BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2);
430                 tx_len = ((8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
431                            BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2);
432                 rx_src_addr_lo =
433                     ((mac_addr + BIGMAC_REGISTER_RX_STAT_GR64) >> 2);
434                 rx_len = ((8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
435                            BIGMAC_REGISTER_RX_STAT_GR64) >> 2);
436             } else {
437                 tx_src_addr_lo =
438                     ((mac_addr + BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2);
439                 tx_len = ((8 + BIGMAC2_REGISTER_TX_STAT_GTBYT -
440                            BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2);
441                 rx_src_addr_lo =
442                     ((mac_addr + BIGMAC2_REGISTER_RX_STAT_GR64) >> 2);
443                 rx_len = ((8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ -
444                            BIGMAC2_REGISTER_RX_STAT_GR64) >> 2);
445             }
446 
447             break;
448 
449         case ELINK_MAC_TYPE_UMAC: /* handled by MSTAT */
450         case ELINK_MAC_TYPE_XMAC: /* handled by MSTAT */
451         default:
452             mac_addr = (port) ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0;
453             tx_src_addr_lo = ((mac_addr + MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2);
454             rx_src_addr_lo = ((mac_addr + MSTAT_REG_RX_STAT_GR64_LO) >> 2);
455             tx_len =
456                 (sizeof(sc->sp->mac_stats.mstat_stats.stats_tx) >> 2);
457             rx_len =
458                 (sizeof(sc->sp->mac_stats.mstat_stats.stats_rx) >> 2);
459             break;
460         }
461 
462         /* TX stats */
463         dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
464         dmae->opcode = opcode;
465         dmae->src_addr_lo = tx_src_addr_lo;
466         dmae->src_addr_hi = 0;
467         dmae->len = tx_len;
468         dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, mac_stats));
469         dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, mac_stats));
470         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
471         dmae->comp_addr_hi = 0;
472         dmae->comp_val = 1;
473 
474         /* RX stats */
475         dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
476         dmae->opcode = opcode;
477         dmae->src_addr_hi = 0;
478         dmae->src_addr_lo = rx_src_addr_lo;
479         dmae->dst_addr_lo =
480             U64_LO(BXE_SP_MAPPING(sc, mac_stats) + (tx_len << 2));
481         dmae->dst_addr_hi =
482             U64_HI(BXE_SP_MAPPING(sc, mac_stats) + (tx_len << 2));
483         dmae->len = rx_len;
484         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
485         dmae->comp_addr_hi = 0;
486         dmae->comp_val = 1;
487     }
488 
489     /* NIG */
490     if (!CHIP_IS_E3(sc)) {
491         dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
492         dmae->opcode = opcode;
493         dmae->src_addr_lo =
494             (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
495                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
496         dmae->src_addr_hi = 0;
497         dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, nig_stats) +
498                                    offsetof(struct nig_stats,
499                                             egress_mac_pkt0_lo));
500         dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, nig_stats) +
501                                    offsetof(struct nig_stats,
502                                             egress_mac_pkt0_lo));
503         dmae->len = ((2 * sizeof(uint32_t)) >> 2);
504         dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
505         dmae->comp_addr_hi = 0;
506         dmae->comp_val = 1;
507 
508         dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
509         dmae->opcode = opcode;
510         dmae->src_addr_lo =
511             (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
512                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
513         dmae->src_addr_hi = 0;
514         dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, nig_stats) +
515                                    offsetof(struct nig_stats,
516                                             egress_mac_pkt1_lo));
517         dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, nig_stats) +
518                                    offsetof(struct nig_stats,
519                                             egress_mac_pkt1_lo));
520         dmae->len = ((2 * sizeof(uint32_t)) >> 2);
521         dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
522         dmae->comp_addr_hi = 0;
523         dmae->comp_val = 1;
524     }
525 
526     dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
527     dmae->opcode = bxe_dmae_opcode(sc, DMAE_SRC_GRC, DMAE_DST_PCI,
528                                    TRUE, DMAE_COMP_PCI);
529     dmae->src_addr_lo =
530         (port ? NIG_REG_STAT1_BRB_DISCARD :
531                 NIG_REG_STAT0_BRB_DISCARD) >> 2;
532     dmae->src_addr_hi = 0;
533     dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, nig_stats));
534     dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, nig_stats));
535     dmae->len = (sizeof(struct nig_stats) - 4*sizeof(uint32_t)) >> 2;
536 
537     dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
538     dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
539     dmae->comp_val = DMAE_COMP_VAL;
540 
541     *stats_comp = 0;
542 }
543 
544 static void
545 bxe_func_stats_init(struct bxe_softc *sc)
546 {
547     struct dmae_cmd *dmae = &sc->stats_dmae;
548     uint32_t *stats_comp = BXE_SP(sc, stats_comp);
549 
550     /* sanity */
551     if (!sc->func_stx) {
552         BLOGE(sc, "BUG!\n");
553         return;
554     }
555 
556     sc->executer_idx = 0;
557     memset(dmae, 0, sizeof(struct dmae_cmd));
558 
559     dmae->opcode = bxe_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
560                                    TRUE, DMAE_COMP_PCI);
561     dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, func_stats));
562     dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, func_stats));
563     dmae->dst_addr_lo = (sc->func_stx >> 2);
564     dmae->dst_addr_hi = 0;
565     dmae->len = (sizeof(struct host_func_stats) >> 2);
566     dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
567     dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
568     dmae->comp_val = DMAE_COMP_VAL;
569 
570     *stats_comp = 0;
571 }
572 
573 static void
574 bxe_stats_start(struct bxe_softc *sc)
575 {
576     /*
577      * VFs travel through here as part of the statistics FSM, but no action
578      * is required
579      */
580     if (IS_VF(sc)) {
581         return;
582     }
583 
584     if (sc->port.pmf) {
585         bxe_port_stats_init(sc);
586     }
587 
588     else if (sc->func_stx) {
589         bxe_func_stats_init(sc);
590     }
591 
592     bxe_hw_stats_post(sc);
593     bxe_storm_stats_post(sc);
594 }
595 
596 static void
597 bxe_stats_pmf_start(struct bxe_softc *sc)
598 {
599     bxe_stats_comp(sc);
600     bxe_stats_pmf_update(sc);
601     bxe_stats_start(sc);
602 }
603 
604 static void
605 bxe_stats_restart(struct bxe_softc *sc)
606 {
607     /*
608      * VFs travel through here as part of the statistics FSM, but no action
609      * is required
610      */
611     if (IS_VF(sc)) {
612         return;
613     }
614 
615     bxe_stats_comp(sc);
616     bxe_stats_start(sc);
617 }
618 
619 static void
620 bxe_bmac_stats_update(struct bxe_softc *sc)
621 {
622     struct host_port_stats *pstats = BXE_SP(sc, port_stats);
623     struct bxe_eth_stats *estats = &sc->eth_stats;
624     struct {
625         uint32_t lo;
626         uint32_t hi;
627     } diff;
628 
629     if (CHIP_IS_E1x(sc)) {
630         struct bmac1_stats *new = BXE_SP(sc, mac_stats.bmac1_stats);
631 
632         /* the macros below will use "bmac1_stats" type */
633         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
634         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
635         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
636         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
637         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
638         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
639         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
640         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
641         UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
642 
643         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
644         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
645         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
646         UPDATE_STAT64(tx_stat_gt127,
647                       tx_stat_etherstatspkts65octetsto127octets);
648         UPDATE_STAT64(tx_stat_gt255,
649                       tx_stat_etherstatspkts128octetsto255octets);
650         UPDATE_STAT64(tx_stat_gt511,
651                       tx_stat_etherstatspkts256octetsto511octets);
652         UPDATE_STAT64(tx_stat_gt1023,
653                       tx_stat_etherstatspkts512octetsto1023octets);
654         UPDATE_STAT64(tx_stat_gt1518,
655                       tx_stat_etherstatspkts1024octetsto1522octets);
656         UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
657         UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
658         UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
659         UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
660         UPDATE_STAT64(tx_stat_gterr,
661                       tx_stat_dot3statsinternalmactransmiterrors);
662         UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
663     } else {
664         struct bmac2_stats *new = BXE_SP(sc, mac_stats.bmac2_stats);
665         struct bxe_fw_port_stats_old *fwstats = &sc->fw_stats_old;
666 
667         /* the macros below will use "bmac2_stats" type */
668         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
669         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
670         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
671         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
672         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
673         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
674         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
675         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
676         UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
677         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
678         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
679         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
680         UPDATE_STAT64(tx_stat_gt127,
681                       tx_stat_etherstatspkts65octetsto127octets);
682         UPDATE_STAT64(tx_stat_gt255,
683                       tx_stat_etherstatspkts128octetsto255octets);
684         UPDATE_STAT64(tx_stat_gt511,
685                       tx_stat_etherstatspkts256octetsto511octets);
686         UPDATE_STAT64(tx_stat_gt1023,
687                       tx_stat_etherstatspkts512octetsto1023octets);
688         UPDATE_STAT64(tx_stat_gt1518,
689                       tx_stat_etherstatspkts1024octetsto1522octets);
690         UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
691         UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
692         UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
693         UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
694         UPDATE_STAT64(tx_stat_gterr,
695                       tx_stat_dot3statsinternalmactransmiterrors);
696         UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
697 
698         /* collect PFC stats */
699         pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi;
700         pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo;
701         ADD_64(pstats->pfc_frames_tx_hi, fwstats->pfc_frames_tx_hi,
702                pstats->pfc_frames_tx_lo, fwstats->pfc_frames_tx_lo);
703 
704         pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi;
705         pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo;
706         ADD_64(pstats->pfc_frames_rx_hi, fwstats->pfc_frames_rx_hi,
707                pstats->pfc_frames_rx_lo, fwstats->pfc_frames_rx_lo);
708     }
709 
710     estats->pause_frames_received_hi = pstats->mac_stx[1].rx_stat_mac_xpf_hi;
711     estats->pause_frames_received_lo = pstats->mac_stx[1].rx_stat_mac_xpf_lo;
712 
713     estats->pause_frames_sent_hi = pstats->mac_stx[1].tx_stat_outxoffsent_hi;
714     estats->pause_frames_sent_lo = pstats->mac_stx[1].tx_stat_outxoffsent_lo;
715 
716     estats->pfc_frames_received_hi = pstats->pfc_frames_rx_hi;
717     estats->pfc_frames_received_lo = pstats->pfc_frames_rx_lo;
718     estats->pfc_frames_sent_hi = pstats->pfc_frames_tx_hi;
719     estats->pfc_frames_sent_lo = pstats->pfc_frames_tx_lo;
720 }
721 
722 static void
723 bxe_mstat_stats_update(struct bxe_softc *sc)
724 {
725     struct host_port_stats *pstats = BXE_SP(sc, port_stats);
726     struct bxe_eth_stats *estats = &sc->eth_stats;
727     struct mstat_stats *new = BXE_SP(sc, mac_stats.mstat_stats);
728 
729     ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets);
730     ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors);
731     ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts);
732     ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong);
733     ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments);
734     ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived);
735     ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered);
736     ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf);
737     ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent);
738     ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone);
739 
740     /* collect pfc stats */
741     ADD_64(pstats->pfc_frames_tx_hi, new->stats_tx.tx_gtxpp_hi,
742            pstats->pfc_frames_tx_lo, new->stats_tx.tx_gtxpp_lo);
743     ADD_64(pstats->pfc_frames_rx_hi, new->stats_rx.rx_grxpp_hi,
744            pstats->pfc_frames_rx_lo, new->stats_rx.rx_grxpp_lo);
745 
746     ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets);
747     ADD_STAT64(stats_tx.tx_gt127, tx_stat_etherstatspkts65octetsto127octets);
748     ADD_STAT64(stats_tx.tx_gt255, tx_stat_etherstatspkts128octetsto255octets);
749     ADD_STAT64(stats_tx.tx_gt511, tx_stat_etherstatspkts256octetsto511octets);
750     ADD_STAT64(stats_tx.tx_gt1023,
751                tx_stat_etherstatspkts512octetsto1023octets);
752     ADD_STAT64(stats_tx.tx_gt1518,
753                tx_stat_etherstatspkts1024octetsto1522octets);
754     ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047);
755 
756     ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095);
757     ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216);
758     ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383);
759 
760     ADD_STAT64(stats_tx.tx_gterr, tx_stat_dot3statsinternalmactransmiterrors);
761     ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl);
762 
763     estats->etherstatspkts1024octetsto1522octets_hi =
764         pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_hi;
765     estats->etherstatspkts1024octetsto1522octets_lo =
766         pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_lo;
767 
768     estats->etherstatspktsover1522octets_hi =
769         pstats->mac_stx[1].tx_stat_mac_2047_hi;
770     estats->etherstatspktsover1522octets_lo =
771         pstats->mac_stx[1].tx_stat_mac_2047_lo;
772 
773     ADD_64(estats->etherstatspktsover1522octets_hi,
774            pstats->mac_stx[1].tx_stat_mac_4095_hi,
775            estats->etherstatspktsover1522octets_lo,
776            pstats->mac_stx[1].tx_stat_mac_4095_lo);
777 
778     ADD_64(estats->etherstatspktsover1522octets_hi,
779            pstats->mac_stx[1].tx_stat_mac_9216_hi,
780            estats->etherstatspktsover1522octets_lo,
781            pstats->mac_stx[1].tx_stat_mac_9216_lo);
782 
783     ADD_64(estats->etherstatspktsover1522octets_hi,
784            pstats->mac_stx[1].tx_stat_mac_16383_hi,
785            estats->etherstatspktsover1522octets_lo,
786            pstats->mac_stx[1].tx_stat_mac_16383_lo);
787 
788     estats->pause_frames_received_hi = pstats->mac_stx[1].rx_stat_mac_xpf_hi;
789     estats->pause_frames_received_lo = pstats->mac_stx[1].rx_stat_mac_xpf_lo;
790 
791     estats->pause_frames_sent_hi = pstats->mac_stx[1].tx_stat_outxoffsent_hi;
792     estats->pause_frames_sent_lo = pstats->mac_stx[1].tx_stat_outxoffsent_lo;
793 
794     estats->pfc_frames_received_hi = pstats->pfc_frames_rx_hi;
795     estats->pfc_frames_received_lo = pstats->pfc_frames_rx_lo;
796     estats->pfc_frames_sent_hi = pstats->pfc_frames_tx_hi;
797     estats->pfc_frames_sent_lo = pstats->pfc_frames_tx_lo;
798 }
799 
800 static void
801 bxe_emac_stats_update(struct bxe_softc *sc)
802 {
803     struct emac_stats *new = BXE_SP(sc, mac_stats.emac_stats);
804     struct host_port_stats *pstats = BXE_SP(sc, port_stats);
805     struct bxe_eth_stats *estats = &sc->eth_stats;
806 
807     UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
808     UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
809     UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
810     UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
811     UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
812     UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
813     UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
814     UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
815     UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
816     UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
817     UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
818     UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
819     UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
820     UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
821     UPDATE_EXTEND_STAT(tx_stat_outxonsent);
822     UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
823     UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
824     UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
825     UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
826     UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
827     UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
828     UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
829     UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
830     UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
831     UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
832     UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
833     UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
834     UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
835     UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
836     UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
837     UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
838 
839     estats->pause_frames_received_hi =
840         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
841     estats->pause_frames_received_lo =
842         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
843     ADD_64(estats->pause_frames_received_hi,
844            pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
845            estats->pause_frames_received_lo,
846            pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
847 
848     estats->pause_frames_sent_hi =
849         pstats->mac_stx[1].tx_stat_outxonsent_hi;
850     estats->pause_frames_sent_lo =
851         pstats->mac_stx[1].tx_stat_outxonsent_lo;
852     ADD_64(estats->pause_frames_sent_hi,
853            pstats->mac_stx[1].tx_stat_outxoffsent_hi,
854            estats->pause_frames_sent_lo,
855            pstats->mac_stx[1].tx_stat_outxoffsent_lo);
856 }
857 
858 static int
859 bxe_hw_stats_update(struct bxe_softc *sc)
860 {
861     struct nig_stats *new = BXE_SP(sc, nig_stats);
862     struct nig_stats *old = &(sc->port.old_nig_stats);
863     struct host_port_stats *pstats = BXE_SP(sc, port_stats);
864     struct bxe_eth_stats *estats = &sc->eth_stats;
865     uint32_t lpi_reg, nig_timer_max;
866     struct {
867         uint32_t lo;
868         uint32_t hi;
869     } diff;
870 
871     switch (sc->link_vars.mac_type) {
872     case ELINK_MAC_TYPE_BMAC:
873         bxe_bmac_stats_update(sc);
874         break;
875 
876     case ELINK_MAC_TYPE_EMAC:
877         bxe_emac_stats_update(sc);
878         break;
879 
880     case ELINK_MAC_TYPE_UMAC:
881     case ELINK_MAC_TYPE_XMAC:
882         bxe_mstat_stats_update(sc);
883         break;
884 
885     case ELINK_MAC_TYPE_NONE: /* unreached */
886         BLOGD(sc, DBG_STATS,
887               "stats updated by DMAE but no MAC active\n");
888         return (-1);
889 
890     default: /* unreached */
891         BLOGE(sc, "stats update failed, unknown MAC type\n");
892     }
893 
894     ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
895                   new->brb_discard - old->brb_discard);
896     ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
897                   new->brb_truncate - old->brb_truncate);
898 
899     if (!CHIP_IS_E3(sc)) {
900         UPDATE_STAT64_NIG(egress_mac_pkt0,
901                           etherstatspkts1024octetsto1522octets);
902         UPDATE_STAT64_NIG(egress_mac_pkt1,
903                           etherstatspktsover1522octets);
904     }
905 
906     memcpy(old, new, sizeof(struct nig_stats));
907 
908     memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
909            sizeof(struct mac_stx));
910     estats->brb_drop_hi = pstats->brb_drop_hi;
911     estats->brb_drop_lo = pstats->brb_drop_lo;
912 
913     pstats->host_port_stats_counter++;
914 
915     if (CHIP_IS_E3(sc)) {
916         lpi_reg = (SC_PORT(sc)) ?
917                       MISC_REG_CPMU_LP_SM_ENT_CNT_P1 :
918                       MISC_REG_CPMU_LP_SM_ENT_CNT_P0;
919         estats->eee_tx_lpi += REG_RD(sc, lpi_reg);
920     }
921 
922     if (!BXE_NOMCP(sc)) {
923         nig_timer_max = SHMEM_RD(sc, port_mb[SC_PORT(sc)].stat_nig_timer);
924         if (nig_timer_max != estats->nig_timer_max) {
925             estats->nig_timer_max = nig_timer_max;
926             BLOGE(sc, "invalid NIG timer max (%u)\n",
927                   estats->nig_timer_max);
928         }
929     }
930 
931     return (0);
932 }
933 
934 static int
935 bxe_storm_stats_validate_counters(struct bxe_softc *sc)
936 {
937     struct stats_counter *counters = &sc->fw_stats_data->storm_counters;
938     uint16_t cur_stats_counter;
939 
940     /*
941      * Make sure we use the value of the counter
942      * used for sending the last stats ramrod.
943      */
944     BXE_STATS_LOCK(sc);
945     cur_stats_counter = (sc->stats_counter - 1);
946     BXE_STATS_UNLOCK(sc);
947 
948     /* are storm stats valid? */
949     if (le16toh(counters->xstats_counter) != cur_stats_counter) {
950         BLOGD(sc, DBG_STATS,
951               "stats not updated by xstorm, "
952               "counter 0x%x != stats_counter 0x%x\n",
953               le16toh(counters->xstats_counter), sc->stats_counter);
954         return (-EAGAIN);
955     }
956 
957     if (le16toh(counters->ustats_counter) != cur_stats_counter) {
958         BLOGD(sc, DBG_STATS,
959               "stats not updated by ustorm, "
960               "counter 0x%x != stats_counter 0x%x\n",
961               le16toh(counters->ustats_counter), sc->stats_counter);
962         return (-EAGAIN);
963     }
964 
965     if (le16toh(counters->cstats_counter) != cur_stats_counter) {
966         BLOGD(sc, DBG_STATS,
967               "stats not updated by cstorm, "
968               "counter 0x%x != stats_counter 0x%x\n",
969               le16toh(counters->cstats_counter), sc->stats_counter);
970         return (-EAGAIN);
971     }
972 
973     if (le16toh(counters->tstats_counter) != cur_stats_counter) {
974         BLOGD(sc, DBG_STATS,
975               "stats not updated by tstorm, "
976               "counter 0x%x != stats_counter 0x%x\n",
977               le16toh(counters->tstats_counter), sc->stats_counter);
978         return (-EAGAIN);
979     }
980 
981     return (0);
982 }
983 
984 static int
985 bxe_storm_stats_update(struct bxe_softc *sc)
986 {
987     struct tstorm_per_port_stats *tport =
988         &sc->fw_stats_data->port.tstorm_port_statistics;
989     struct tstorm_per_pf_stats *tfunc =
990         &sc->fw_stats_data->pf.tstorm_pf_statistics;
991     struct host_func_stats *fstats = &sc->func_stats;
992     struct bxe_eth_stats *estats = &sc->eth_stats;
993     struct bxe_eth_stats_old *estats_old = &sc->eth_stats_old;
994     int i;
995 
996     /* vfs stat counter is managed by pf */
997     if (IS_PF(sc) && bxe_storm_stats_validate_counters(sc)) {
998         return (-EAGAIN);
999     }
1000 
1001     estats->error_bytes_received_hi = 0;
1002     estats->error_bytes_received_lo = 0;
1003 
1004     for (i = 0; i < sc->num_queues; i++) {
1005         struct bxe_fastpath *fp = &sc->fp[i];
1006         struct tstorm_per_queue_stats *tclient =
1007             &sc->fw_stats_data->queue_stats[i].tstorm_queue_statistics;
1008         struct tstorm_per_queue_stats *old_tclient = &fp->old_tclient;
1009         struct ustorm_per_queue_stats *uclient =
1010             &sc->fw_stats_data->queue_stats[i].ustorm_queue_statistics;
1011         struct ustorm_per_queue_stats *old_uclient = &fp->old_uclient;
1012         struct xstorm_per_queue_stats *xclient =
1013             &sc->fw_stats_data->queue_stats[i].xstorm_queue_statistics;
1014         struct xstorm_per_queue_stats *old_xclient = &fp->old_xclient;
1015         struct bxe_eth_q_stats *qstats = &fp->eth_q_stats;
1016         struct bxe_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old;
1017 
1018         uint32_t diff;
1019 
1020         BLOGD(sc, DBG_STATS,
1021               "queue[%d]: ucast_sent 0x%x bcast_sent 0x%x mcast_sent 0x%x\n",
1022               i, xclient->ucast_pkts_sent, xclient->bcast_pkts_sent,
1023               xclient->mcast_pkts_sent);
1024 
1025         BLOGD(sc, DBG_STATS, "---------------\n");
1026 
1027         UPDATE_QSTAT(tclient->rcv_bcast_bytes,
1028                      total_broadcast_bytes_received);
1029         UPDATE_QSTAT(tclient->rcv_mcast_bytes,
1030                      total_multicast_bytes_received);
1031         UPDATE_QSTAT(tclient->rcv_ucast_bytes,
1032                      total_unicast_bytes_received);
1033 
1034         /*
1035          * sum to total_bytes_received all
1036          * unicast/multicast/broadcast
1037          */
1038         qstats->total_bytes_received_hi =
1039             qstats->total_broadcast_bytes_received_hi;
1040         qstats->total_bytes_received_lo =
1041             qstats->total_broadcast_bytes_received_lo;
1042 
1043         ADD_64(qstats->total_bytes_received_hi,
1044                qstats->total_multicast_bytes_received_hi,
1045                qstats->total_bytes_received_lo,
1046                qstats->total_multicast_bytes_received_lo);
1047 
1048         ADD_64(qstats->total_bytes_received_hi,
1049                qstats->total_unicast_bytes_received_hi,
1050                qstats->total_bytes_received_lo,
1051                qstats->total_unicast_bytes_received_lo);
1052 
1053         qstats->valid_bytes_received_hi = qstats->total_bytes_received_hi;
1054         qstats->valid_bytes_received_lo = qstats->total_bytes_received_lo;
1055 
1056         UPDATE_EXTEND_TSTAT(rcv_ucast_pkts, total_unicast_packets_received);
1057         UPDATE_EXTEND_TSTAT(rcv_mcast_pkts, total_multicast_packets_received);
1058         UPDATE_EXTEND_TSTAT(rcv_bcast_pkts, total_broadcast_packets_received);
1059         UPDATE_EXTEND_E_TSTAT(pkts_too_big_discard,
1060                               etherstatsoverrsizepkts, 32);
1061         UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard, 16);
1062 
1063         SUB_EXTEND_USTAT(ucast_no_buff_pkts, total_unicast_packets_received);
1064         SUB_EXTEND_USTAT(mcast_no_buff_pkts,
1065                          total_multicast_packets_received);
1066         SUB_EXTEND_USTAT(bcast_no_buff_pkts,
1067                          total_broadcast_packets_received);
1068         UPDATE_EXTEND_E_USTAT(ucast_no_buff_pkts, no_buff_discard);
1069         UPDATE_EXTEND_E_USTAT(mcast_no_buff_pkts, no_buff_discard);
1070         UPDATE_EXTEND_E_USTAT(bcast_no_buff_pkts, no_buff_discard);
1071 
1072         UPDATE_QSTAT(xclient->bcast_bytes_sent,
1073                      total_broadcast_bytes_transmitted);
1074         UPDATE_QSTAT(xclient->mcast_bytes_sent,
1075                      total_multicast_bytes_transmitted);
1076         UPDATE_QSTAT(xclient->ucast_bytes_sent,
1077                      total_unicast_bytes_transmitted);
1078 
1079         /*
1080          * sum to total_bytes_transmitted all
1081          * unicast/multicast/broadcast
1082          */
1083         qstats->total_bytes_transmitted_hi =
1084             qstats->total_unicast_bytes_transmitted_hi;
1085         qstats->total_bytes_transmitted_lo =
1086             qstats->total_unicast_bytes_transmitted_lo;
1087 
1088         ADD_64(qstats->total_bytes_transmitted_hi,
1089                qstats->total_broadcast_bytes_transmitted_hi,
1090                qstats->total_bytes_transmitted_lo,
1091                qstats->total_broadcast_bytes_transmitted_lo);
1092 
1093         ADD_64(qstats->total_bytes_transmitted_hi,
1094                qstats->total_multicast_bytes_transmitted_hi,
1095                qstats->total_bytes_transmitted_lo,
1096                qstats->total_multicast_bytes_transmitted_lo);
1097 
1098         UPDATE_EXTEND_XSTAT(ucast_pkts_sent,
1099                             total_unicast_packets_transmitted);
1100         UPDATE_EXTEND_XSTAT(mcast_pkts_sent,
1101                             total_multicast_packets_transmitted);
1102         UPDATE_EXTEND_XSTAT(bcast_pkts_sent,
1103                             total_broadcast_packets_transmitted);
1104 
1105         UPDATE_EXTEND_TSTAT(checksum_discard,
1106                             total_packets_received_checksum_discarded);
1107         UPDATE_EXTEND_TSTAT(ttl0_discard,
1108                             total_packets_received_ttl0_discarded);
1109 
1110         UPDATE_EXTEND_XSTAT(error_drop_pkts,
1111                             total_transmitted_dropped_packets_error);
1112 
1113         /* TPA aggregations completed */
1114         UPDATE_EXTEND_E_USTAT(coalesced_events, total_tpa_aggregations);
1115         /* Number of network frames aggregated by TPA */
1116         UPDATE_EXTEND_E_USTAT(coalesced_pkts, total_tpa_aggregated_frames);
1117         /* Total number of bytes in completed TPA aggregations */
1118         UPDATE_QSTAT(uclient->coalesced_bytes, total_tpa_bytes);
1119 
1120         UPDATE_ESTAT_QSTAT_64(total_tpa_bytes);
1121 
1122         UPDATE_FSTAT_QSTAT(total_bytes_received);
1123         UPDATE_FSTAT_QSTAT(total_bytes_transmitted);
1124         UPDATE_FSTAT_QSTAT(total_unicast_packets_received);
1125         UPDATE_FSTAT_QSTAT(total_multicast_packets_received);
1126         UPDATE_FSTAT_QSTAT(total_broadcast_packets_received);
1127         UPDATE_FSTAT_QSTAT(total_unicast_packets_transmitted);
1128         UPDATE_FSTAT_QSTAT(total_multicast_packets_transmitted);
1129         UPDATE_FSTAT_QSTAT(total_broadcast_packets_transmitted);
1130         UPDATE_FSTAT_QSTAT(valid_bytes_received);
1131     }
1132 
1133     ADD_64(estats->total_bytes_received_hi,
1134            estats->rx_stat_ifhcinbadoctets_hi,
1135            estats->total_bytes_received_lo,
1136            estats->rx_stat_ifhcinbadoctets_lo);
1137 
1138     ADD_64_LE(estats->total_bytes_received_hi,
1139               tfunc->rcv_error_bytes.hi,
1140               estats->total_bytes_received_lo,
1141               tfunc->rcv_error_bytes.lo);
1142 
1143     ADD_64_LE(estats->error_bytes_received_hi,
1144               tfunc->rcv_error_bytes.hi,
1145               estats->error_bytes_received_lo,
1146               tfunc->rcv_error_bytes.lo);
1147 
1148     UPDATE_ESTAT(etherstatsoverrsizepkts, rx_stat_dot3statsframestoolong);
1149 
1150     ADD_64(estats->error_bytes_received_hi,
1151            estats->rx_stat_ifhcinbadoctets_hi,
1152            estats->error_bytes_received_lo,
1153            estats->rx_stat_ifhcinbadoctets_lo);
1154 
1155     if (sc->port.pmf) {
1156         struct bxe_fw_port_stats_old *fwstats = &sc->fw_stats_old;
1157         UPDATE_FW_STAT(mac_filter_discard);
1158         UPDATE_FW_STAT(mf_tag_discard);
1159         UPDATE_FW_STAT(brb_truncate_discard);
1160         UPDATE_FW_STAT(mac_discard);
1161     }
1162 
1163     fstats->host_func_stats_start = ++fstats->host_func_stats_end;
1164 
1165     sc->stats_pending = 0;
1166 
1167     return (0);
1168 }
1169 
1170 static void
1171 bxe_net_stats_update(struct bxe_softc *sc)
1172 {
1173 
1174     for (int i = 0; i < sc->num_queues; i++)
1175         if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS,
1176 	    le32toh(sc->fp[i].old_tclient.checksum_discard));
1177 }
1178 
1179 uint64_t
1180 bxe_get_counter(if_t ifp, ift_counter cnt)
1181 {
1182 	struct bxe_softc *sc;
1183 	struct bxe_eth_stats *estats;
1184 
1185 	sc = if_getsoftc(ifp);
1186 	estats = &sc->eth_stats;
1187 
1188 	switch (cnt) {
1189 	case IFCOUNTER_IPACKETS:
1190 		return (bxe_hilo(&estats->total_unicast_packets_received_hi) +
1191 		    bxe_hilo(&estats->total_multicast_packets_received_hi) +
1192 		    bxe_hilo(&estats->total_broadcast_packets_received_hi));
1193 	case IFCOUNTER_OPACKETS:
1194 		return (bxe_hilo(&estats->total_unicast_packets_transmitted_hi) +
1195 		    bxe_hilo(&estats->total_multicast_packets_transmitted_hi) +
1196 		    bxe_hilo(&estats->total_broadcast_packets_transmitted_hi));
1197 	case IFCOUNTER_IBYTES:
1198 		return (bxe_hilo(&estats->total_bytes_received_hi));
1199 	case IFCOUNTER_OBYTES:
1200 		return (bxe_hilo(&estats->total_bytes_transmitted_hi));
1201 	case IFCOUNTER_IERRORS:
1202 		return (bxe_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
1203 		    bxe_hilo(&estats->etherstatsoverrsizepkts_hi) +
1204 		    bxe_hilo(&estats->brb_drop_hi) +
1205 		    bxe_hilo(&estats->brb_truncate_hi) +
1206 		    bxe_hilo(&estats->rx_stat_dot3statsfcserrors_hi) +
1207 		    bxe_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi) +
1208 		    bxe_hilo(&estats->no_buff_discard_hi));
1209 	case IFCOUNTER_OERRORS:
1210 		return (bxe_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi) +
1211 		    bxe_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi));
1212 	case IFCOUNTER_IMCASTS:
1213 		return (bxe_hilo(&estats->total_multicast_packets_received_hi));
1214 	case IFCOUNTER_COLLISIONS:
1215 		return (bxe_hilo(&estats->tx_stat_etherstatscollisions_hi) +
1216 		    bxe_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
1217 		    bxe_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi));
1218 	default:
1219 		return (if_get_counter_default(ifp, cnt));
1220 	}
1221 }
1222 
1223 static void
1224 bxe_drv_stats_update(struct bxe_softc *sc)
1225 {
1226     struct bxe_eth_stats *estats = &sc->eth_stats;
1227     int i;
1228 
1229     for (i = 0; i < sc->num_queues; i++) {
1230         struct bxe_eth_q_stats *qstats = &sc->fp[i].eth_q_stats;
1231         struct bxe_eth_q_stats_old *qstats_old = &sc->fp[i].eth_q_stats_old;
1232 
1233         UPDATE_ESTAT_QSTAT(rx_calls);
1234         UPDATE_ESTAT_QSTAT(rx_pkts);
1235         UPDATE_ESTAT_QSTAT(rx_tpa_pkts);
1236         UPDATE_ESTAT_QSTAT(rx_erroneous_jumbo_sge_pkts);
1237         UPDATE_ESTAT_QSTAT(rx_bxe_service_rxsgl);
1238         UPDATE_ESTAT_QSTAT(rx_jumbo_sge_pkts);
1239         UPDATE_ESTAT_QSTAT(rx_soft_errors);
1240         UPDATE_ESTAT_QSTAT(rx_hw_csum_errors);
1241         UPDATE_ESTAT_QSTAT(rx_ofld_frames_csum_ip);
1242         UPDATE_ESTAT_QSTAT(rx_ofld_frames_csum_tcp_udp);
1243         UPDATE_ESTAT_QSTAT(rx_budget_reached);
1244         UPDATE_ESTAT_QSTAT(tx_pkts);
1245         UPDATE_ESTAT_QSTAT(tx_soft_errors);
1246         UPDATE_ESTAT_QSTAT(tx_ofld_frames_csum_ip);
1247         UPDATE_ESTAT_QSTAT(tx_ofld_frames_csum_tcp);
1248         UPDATE_ESTAT_QSTAT(tx_ofld_frames_csum_udp);
1249         UPDATE_ESTAT_QSTAT(tx_ofld_frames_lso);
1250         UPDATE_ESTAT_QSTAT(tx_ofld_frames_lso_hdr_splits);
1251         UPDATE_ESTAT_QSTAT(tx_encap_failures);
1252         UPDATE_ESTAT_QSTAT(tx_hw_queue_full);
1253         UPDATE_ESTAT_QSTAT(tx_hw_max_queue_depth);
1254         UPDATE_ESTAT_QSTAT(tx_dma_mapping_failure);
1255         UPDATE_ESTAT_QSTAT(tx_max_drbr_queue_depth);
1256         UPDATE_ESTAT_QSTAT(tx_window_violation_std);
1257         UPDATE_ESTAT_QSTAT(tx_window_violation_tso);
1258         //UPDATE_ESTAT_QSTAT(tx_unsupported_tso_request_ipv6);
1259         //UPDATE_ESTAT_QSTAT(tx_unsupported_tso_request_not_tcp);
1260         UPDATE_ESTAT_QSTAT(tx_chain_lost_mbuf);
1261         UPDATE_ESTAT_QSTAT(tx_frames_deferred);
1262         UPDATE_ESTAT_QSTAT(tx_queue_xoff);
1263 
1264         /* mbuf driver statistics */
1265         UPDATE_ESTAT_QSTAT(mbuf_defrag_attempts);
1266         UPDATE_ESTAT_QSTAT(mbuf_defrag_failures);
1267         UPDATE_ESTAT_QSTAT(mbuf_rx_bd_alloc_failed);
1268         UPDATE_ESTAT_QSTAT(mbuf_rx_bd_mapping_failed);
1269         UPDATE_ESTAT_QSTAT(mbuf_rx_tpa_alloc_failed);
1270         UPDATE_ESTAT_QSTAT(mbuf_rx_tpa_mapping_failed);
1271         UPDATE_ESTAT_QSTAT(mbuf_rx_sge_alloc_failed);
1272         UPDATE_ESTAT_QSTAT(mbuf_rx_sge_mapping_failed);
1273 
1274         /* track the number of allocated mbufs */
1275         UPDATE_ESTAT_QSTAT(mbuf_alloc_tx);
1276         UPDATE_ESTAT_QSTAT(mbuf_alloc_rx);
1277         UPDATE_ESTAT_QSTAT(mbuf_alloc_sge);
1278         UPDATE_ESTAT_QSTAT(mbuf_alloc_tpa);
1279     }
1280 }
1281 
1282 static uint8_t
1283 bxe_edebug_stats_stopped(struct bxe_softc *sc)
1284 {
1285     uint32_t val;
1286 
1287     if (SHMEM2_HAS(sc, edebug_driver_if[1])) {
1288         val = SHMEM2_RD(sc, edebug_driver_if[1]);
1289 
1290         if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT) {
1291             return (TRUE);
1292         }
1293     }
1294 
1295     return (FALSE);
1296 }
1297 
1298 static void
1299 bxe_stats_update(struct bxe_softc *sc)
1300 {
1301     uint32_t *stats_comp = BXE_SP(sc, stats_comp);
1302 
1303     if (bxe_edebug_stats_stopped(sc)) {
1304         return;
1305     }
1306 
1307     if (IS_PF(sc)) {
1308         if (*stats_comp != DMAE_COMP_VAL) {
1309             return;
1310         }
1311 
1312         if (sc->port.pmf) {
1313             bxe_hw_stats_update(sc);
1314         }
1315 
1316         if (bxe_storm_stats_update(sc)) {
1317             if (sc->stats_pending++ == 3) {
1318 		if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
1319                     if(sc->trigger_grcdump) {
1320                         /* taking grcdump */
1321                         bxe_grc_dump(sc);
1322                     }
1323                     atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_REINIT);
1324                     taskqueue_enqueue(sc->chip_tq, &sc->chip_tq_task);
1325 		}
1326             }
1327             return;
1328         }
1329     } else {
1330         /*
1331          * VF doesn't collect HW statistics, and doesn't get completions,
1332          * performs only update.
1333          */
1334         bxe_storm_stats_update(sc);
1335     }
1336 
1337     bxe_net_stats_update(sc);
1338     bxe_drv_stats_update(sc);
1339 
1340     /* vf is done */
1341     if (IS_VF(sc)) {
1342         return;
1343     }
1344 
1345     bxe_hw_stats_post(sc);
1346     bxe_storm_stats_post(sc);
1347 }
1348 
1349 static void
1350 bxe_port_stats_stop(struct bxe_softc *sc)
1351 {
1352     struct dmae_cmd *dmae;
1353     uint32_t opcode;
1354     int loader_idx = PMF_DMAE_C(sc);
1355     uint32_t *stats_comp = BXE_SP(sc, stats_comp);
1356 
1357     sc->executer_idx = 0;
1358 
1359     opcode = bxe_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC, FALSE, 0);
1360 
1361     if (sc->port.port_stx) {
1362         dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
1363 
1364         if (sc->func_stx) {
1365             dmae->opcode = bxe_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
1366         } else {
1367             dmae->opcode = bxe_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
1368         }
1369 
1370         dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, port_stats));
1371         dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, port_stats));
1372         dmae->dst_addr_lo = sc->port.port_stx >> 2;
1373         dmae->dst_addr_hi = 0;
1374         dmae->len = bxe_get_port_stats_dma_len(sc);
1375         if (sc->func_stx) {
1376             dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
1377             dmae->comp_addr_hi = 0;
1378             dmae->comp_val = 1;
1379         } else {
1380             dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
1381             dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
1382             dmae->comp_val = DMAE_COMP_VAL;
1383 
1384             *stats_comp = 0;
1385         }
1386     }
1387 
1388     if (sc->func_stx) {
1389         dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
1390         dmae->opcode = bxe_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
1391         dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, func_stats));
1392         dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, func_stats));
1393         dmae->dst_addr_lo = (sc->func_stx >> 2);
1394         dmae->dst_addr_hi = 0;
1395         dmae->len = (sizeof(struct host_func_stats) >> 2);
1396         dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
1397         dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
1398         dmae->comp_val = DMAE_COMP_VAL;
1399 
1400         *stats_comp = 0;
1401     }
1402 }
1403 
1404 static void
1405 bxe_stats_stop(struct bxe_softc *sc)
1406 {
1407     uint8_t update = FALSE;
1408 
1409     bxe_stats_comp(sc);
1410 
1411     if (sc->port.pmf) {
1412         update = bxe_hw_stats_update(sc) == 0;
1413     }
1414 
1415     update |= bxe_storm_stats_update(sc) == 0;
1416 
1417     if (update) {
1418         bxe_net_stats_update(sc);
1419 
1420         if (sc->port.pmf) {
1421             bxe_port_stats_stop(sc);
1422         }
1423 
1424         bxe_hw_stats_post(sc);
1425         bxe_stats_comp(sc);
1426     }
1427 }
1428 
1429 static void
1430 bxe_stats_do_nothing(struct bxe_softc *sc)
1431 {
1432     return;
1433 }
1434 
1435 static const struct {
1436     void (*action)(struct bxe_softc *sc);
1437     enum bxe_stats_state next_state;
1438 } bxe_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
1439     {
1440     /* DISABLED PMF */ { bxe_stats_pmf_update, STATS_STATE_DISABLED },
1441     /*      LINK_UP */ { bxe_stats_start,      STATS_STATE_ENABLED },
1442     /*      UPDATE  */ { bxe_stats_do_nothing, STATS_STATE_DISABLED },
1443     /*      STOP    */ { bxe_stats_do_nothing, STATS_STATE_DISABLED }
1444     },
1445     {
1446     /* ENABLED  PMF */ { bxe_stats_pmf_start,  STATS_STATE_ENABLED },
1447     /*      LINK_UP */ { bxe_stats_restart,    STATS_STATE_ENABLED },
1448     /*      UPDATE  */ { bxe_stats_update,     STATS_STATE_ENABLED },
1449     /*      STOP    */ { bxe_stats_stop,       STATS_STATE_DISABLED }
1450     }
1451 };
1452 
1453 void bxe_stats_handle(struct bxe_softc     *sc,
1454                       enum bxe_stats_event event)
1455 {
1456     enum bxe_stats_state state;
1457 
1458     if (__predict_false(sc->panic)) {
1459         return;
1460     }
1461 
1462     BXE_STATS_LOCK(sc);
1463     state = sc->stats_state;
1464     sc->stats_state = bxe_stats_stm[state][event].next_state;
1465     BXE_STATS_UNLOCK(sc);
1466 
1467     bxe_stats_stm[state][event].action(sc);
1468 
1469     if (event != STATS_EVENT_UPDATE) {
1470         BLOGD(sc, DBG_STATS,
1471               "state %d -> event %d -> state %d\n",
1472               state, event, sc->stats_state);
1473     }
1474 }
1475 
1476 static void
1477 bxe_port_stats_base_init(struct bxe_softc *sc)
1478 {
1479     struct dmae_cmd *dmae;
1480     uint32_t *stats_comp = BXE_SP(sc, stats_comp);
1481 
1482     /* sanity */
1483     if (!sc->port.pmf || !sc->port.port_stx) {
1484         BLOGE(sc, "BUG!\n");
1485         return;
1486     }
1487 
1488     sc->executer_idx = 0;
1489 
1490     dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
1491     dmae->opcode = bxe_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
1492                                    TRUE, DMAE_COMP_PCI);
1493     dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, port_stats));
1494     dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, port_stats));
1495     dmae->dst_addr_lo = (sc->port.port_stx >> 2);
1496     dmae->dst_addr_hi = 0;
1497     dmae->len = bxe_get_port_stats_dma_len(sc);
1498     dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
1499     dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
1500     dmae->comp_val = DMAE_COMP_VAL;
1501 
1502     *stats_comp = 0;
1503     bxe_hw_stats_post(sc);
1504     bxe_stats_comp(sc);
1505 }
1506 
1507 /*
1508  * This function will prepare the statistics ramrod data the way
1509  * we will only have to increment the statistics counter and
1510  * send the ramrod each time we have to.
1511  */
1512 static void
1513 bxe_prep_fw_stats_req(struct bxe_softc *sc)
1514 {
1515     int i;
1516     int first_queue_query_index;
1517     struct stats_query_header *stats_hdr = &sc->fw_stats_req->hdr;
1518     bus_addr_t cur_data_offset;
1519     struct stats_query_entry *cur_query_entry;
1520 
1521     stats_hdr->cmd_num = sc->fw_stats_num;
1522     stats_hdr->drv_stats_counter = 0;
1523 
1524     /*
1525      * The storm_counters struct contains the counters of completed
1526      * statistics requests per storm which are incremented by FW
1527      * each time it completes hadning a statistics ramrod. We will
1528      * check these counters in the timer handler and discard a
1529      * (statistics) ramrod completion.
1530      */
1531     cur_data_offset = (sc->fw_stats_data_mapping +
1532                        offsetof(struct bxe_fw_stats_data, storm_counters));
1533 
1534     stats_hdr->stats_counters_addrs.hi = htole32(U64_HI(cur_data_offset));
1535     stats_hdr->stats_counters_addrs.lo = htole32(U64_LO(cur_data_offset));
1536 
1537     /*
1538      * Prepare the first stats ramrod (will be completed with
1539      * the counters equal to zero) - init counters to somethig different.
1540      */
1541     memset(&sc->fw_stats_data->storm_counters, 0xff,
1542            sizeof(struct stats_counter));
1543 
1544     /**** Port FW statistics data ****/
1545     cur_data_offset = (sc->fw_stats_data_mapping +
1546                        offsetof(struct bxe_fw_stats_data, port));
1547 
1548     cur_query_entry = &sc->fw_stats_req->query[BXE_PORT_QUERY_IDX];
1549 
1550     cur_query_entry->kind = STATS_TYPE_PORT;
1551     /* For port query index is a DONT CARE */
1552     cur_query_entry->index = SC_PORT(sc);
1553     /* For port query funcID is a DONT CARE */
1554     cur_query_entry->funcID = htole16(SC_FUNC(sc));
1555     cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset));
1556     cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset));
1557 
1558     /**** PF FW statistics data ****/
1559     cur_data_offset = (sc->fw_stats_data_mapping +
1560                        offsetof(struct bxe_fw_stats_data, pf));
1561 
1562     cur_query_entry = &sc->fw_stats_req->query[BXE_PF_QUERY_IDX];
1563 
1564     cur_query_entry->kind = STATS_TYPE_PF;
1565     /* For PF query index is a DONT CARE */
1566     cur_query_entry->index = SC_PORT(sc);
1567     cur_query_entry->funcID = htole16(SC_FUNC(sc));
1568     cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset));
1569     cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset));
1570 
1571     /**** Clients' queries ****/
1572     cur_data_offset = (sc->fw_stats_data_mapping +
1573                        offsetof(struct bxe_fw_stats_data, queue_stats));
1574 
1575     /*
1576      * First queue query index depends whether FCoE offloaded request will
1577      * be included in the ramrod
1578      */
1579     first_queue_query_index = (BXE_FIRST_QUEUE_QUERY_IDX - 1);
1580 
1581     for (i = 0; i < sc->num_queues; i++) {
1582         cur_query_entry =
1583             &sc->fw_stats_req->query[first_queue_query_index + i];
1584 
1585         cur_query_entry->kind = STATS_TYPE_QUEUE;
1586         cur_query_entry->index = bxe_stats_id(&sc->fp[i]);
1587         cur_query_entry->funcID = htole16(SC_FUNC(sc));
1588         cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset));
1589         cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset));
1590 
1591         cur_data_offset += sizeof(struct per_queue_stats);
1592     }
1593 }
1594 
1595 void
1596 bxe_stats_init(struct bxe_softc *sc)
1597 {
1598     int /*abs*/port = SC_PORT(sc);
1599     int mb_idx = SC_FW_MB_IDX(sc);
1600     int i;
1601 
1602     sc->stats_pending = 0;
1603     sc->executer_idx = 0;
1604     sc->stats_counter = 0;
1605 
1606     /* port and func stats for management */
1607     if (!BXE_NOMCP(sc)) {
1608         sc->port.port_stx = SHMEM_RD(sc, port_mb[port].port_stx);
1609         sc->func_stx = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_param);
1610     } else {
1611         sc->port.port_stx = 0;
1612         sc->func_stx = 0;
1613     }
1614 
1615     BLOGD(sc, DBG_STATS, "port_stx 0x%x func_stx 0x%x\n",
1616           sc->port.port_stx, sc->func_stx);
1617 
1618     /* pmf should retrieve port statistics from SP on a non-init*/
1619     if (!sc->stats_init && sc->port.pmf && sc->port.port_stx) {
1620         bxe_stats_handle(sc, STATS_EVENT_PMF);
1621     }
1622 
1623     port = SC_PORT(sc);
1624     /* port stats */
1625     memset(&(sc->port.old_nig_stats), 0, sizeof(struct nig_stats));
1626     sc->port.old_nig_stats.brb_discard =
1627         REG_RD(sc, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
1628     sc->port.old_nig_stats.brb_truncate =
1629         REG_RD(sc, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
1630     if (!CHIP_IS_E3(sc)) {
1631         REG_RD_DMAE(sc, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
1632                     &(sc->port.old_nig_stats.egress_mac_pkt0_lo), 2);
1633         REG_RD_DMAE(sc, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
1634                     &(sc->port.old_nig_stats.egress_mac_pkt1_lo), 2);
1635     }
1636 
1637     /* function stats */
1638     for (i = 0; i < sc->num_queues; i++) {
1639         memset(&sc->fp[i].old_tclient, 0, sizeof(sc->fp[i].old_tclient));
1640         memset(&sc->fp[i].old_uclient, 0, sizeof(sc->fp[i].old_uclient));
1641         memset(&sc->fp[i].old_xclient, 0, sizeof(sc->fp[i].old_xclient));
1642         if (sc->stats_init) {
1643             memset(&sc->fp[i].eth_q_stats, 0,
1644                    sizeof(sc->fp[i].eth_q_stats));
1645             memset(&sc->fp[i].eth_q_stats_old, 0,
1646                    sizeof(sc->fp[i].eth_q_stats_old));
1647         }
1648     }
1649 
1650     /* prepare statistics ramrod data */
1651     bxe_prep_fw_stats_req(sc);
1652 
1653     if (sc->stats_init) {
1654         memset(&sc->net_stats_old, 0, sizeof(sc->net_stats_old));
1655         memset(&sc->fw_stats_old, 0, sizeof(sc->fw_stats_old));
1656         memset(&sc->eth_stats_old, 0, sizeof(sc->eth_stats_old));
1657         memset(&sc->eth_stats, 0, sizeof(sc->eth_stats));
1658         memset(&sc->func_stats, 0, sizeof(sc->func_stats));
1659 
1660         /* Clean SP from previous statistics */
1661         if (sc->func_stx) {
1662             memset(BXE_SP(sc, func_stats), 0, sizeof(struct host_func_stats));
1663             bxe_func_stats_init(sc);
1664             bxe_hw_stats_post(sc);
1665             bxe_stats_comp(sc);
1666         }
1667     }
1668 
1669     sc->stats_state = STATS_STATE_DISABLED;
1670 
1671     if (sc->port.pmf && sc->port.port_stx) {
1672         bxe_port_stats_base_init(sc);
1673     }
1674 
1675     /* mark the end of statistics initialization */
1676     sc->stats_init = FALSE;
1677 }
1678 
1679 void
1680 bxe_save_statistics(struct bxe_softc *sc)
1681 {
1682     int i;
1683 
1684     /* save queue statistics */
1685     for (i = 0; i < sc->num_queues; i++) {
1686         struct bxe_fastpath *fp = &sc->fp[i];
1687         struct bxe_eth_q_stats *qstats = &fp->eth_q_stats;
1688         struct bxe_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old;
1689 
1690         UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi);
1691         UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo);
1692         UPDATE_QSTAT_OLD(total_broadcast_bytes_received_hi);
1693         UPDATE_QSTAT_OLD(total_broadcast_bytes_received_lo);
1694         UPDATE_QSTAT_OLD(total_multicast_bytes_received_hi);
1695         UPDATE_QSTAT_OLD(total_multicast_bytes_received_lo);
1696         UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_hi);
1697         UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_lo);
1698         UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_hi);
1699         UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_lo);
1700         UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_hi);
1701         UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_lo);
1702         UPDATE_QSTAT_OLD(total_tpa_bytes_hi);
1703         UPDATE_QSTAT_OLD(total_tpa_bytes_lo);
1704     }
1705 
1706     /* store port firmware statistics */
1707     if (sc->port.pmf) {
1708         struct bxe_eth_stats *estats = &sc->eth_stats;
1709         struct bxe_fw_port_stats_old *fwstats = &sc->fw_stats_old;
1710         struct host_port_stats *pstats = BXE_SP(sc, port_stats);
1711 
1712         fwstats->pfc_frames_rx_hi = pstats->pfc_frames_rx_hi;
1713         fwstats->pfc_frames_rx_lo = pstats->pfc_frames_rx_lo;
1714         fwstats->pfc_frames_tx_hi = pstats->pfc_frames_tx_hi;
1715         fwstats->pfc_frames_tx_lo = pstats->pfc_frames_tx_lo;
1716 
1717         if (IS_MF(sc)) {
1718             UPDATE_FW_STAT_OLD(mac_filter_discard);
1719             UPDATE_FW_STAT_OLD(mf_tag_discard);
1720             UPDATE_FW_STAT_OLD(brb_truncate_discard);
1721             UPDATE_FW_STAT_OLD(mac_discard);
1722         }
1723     }
1724 }
1725 
1726 void
1727 bxe_afex_collect_stats(struct bxe_softc *sc,
1728                        void             *void_afex_stats,
1729                        uint32_t         stats_type)
1730 {
1731     int i;
1732     struct afex_stats *afex_stats = (struct afex_stats *)void_afex_stats;
1733     struct bxe_eth_stats *estats = &sc->eth_stats;
1734 
1735     memset(afex_stats, 0, sizeof(struct afex_stats));
1736 
1737     for (i = 0; i < sc->num_queues; i++) {
1738         struct bxe_eth_q_stats *qstats = &sc->fp[i].eth_q_stats;
1739 
1740         ADD_64(afex_stats->rx_unicast_bytes_hi,
1741                qstats->total_unicast_bytes_received_hi,
1742                afex_stats->rx_unicast_bytes_lo,
1743                qstats->total_unicast_bytes_received_lo);
1744 
1745         ADD_64(afex_stats->rx_broadcast_bytes_hi,
1746                qstats->total_broadcast_bytes_received_hi,
1747                afex_stats->rx_broadcast_bytes_lo,
1748                qstats->total_broadcast_bytes_received_lo);
1749 
1750         ADD_64(afex_stats->rx_multicast_bytes_hi,
1751                qstats->total_multicast_bytes_received_hi,
1752                afex_stats->rx_multicast_bytes_lo,
1753                qstats->total_multicast_bytes_received_lo);
1754 
1755         ADD_64(afex_stats->rx_unicast_frames_hi,
1756                qstats->total_unicast_packets_received_hi,
1757                afex_stats->rx_unicast_frames_lo,
1758                qstats->total_unicast_packets_received_lo);
1759 
1760         ADD_64(afex_stats->rx_broadcast_frames_hi,
1761                qstats->total_broadcast_packets_received_hi,
1762                afex_stats->rx_broadcast_frames_lo,
1763                qstats->total_broadcast_packets_received_lo);
1764 
1765         ADD_64(afex_stats->rx_multicast_frames_hi,
1766                qstats->total_multicast_packets_received_hi,
1767                afex_stats->rx_multicast_frames_lo,
1768                qstats->total_multicast_packets_received_lo);
1769 
1770         /*
1771          * sum to rx_frames_discarded all discarded
1772          * packets due to size, ttl0 and checksum
1773          */
1774         ADD_64(afex_stats->rx_frames_discarded_hi,
1775                qstats->total_packets_received_checksum_discarded_hi,
1776                afex_stats->rx_frames_discarded_lo,
1777                qstats->total_packets_received_checksum_discarded_lo);
1778 
1779         ADD_64(afex_stats->rx_frames_discarded_hi,
1780                qstats->total_packets_received_ttl0_discarded_hi,
1781                afex_stats->rx_frames_discarded_lo,
1782                qstats->total_packets_received_ttl0_discarded_lo);
1783 
1784         ADD_64(afex_stats->rx_frames_discarded_hi,
1785                qstats->etherstatsoverrsizepkts_hi,
1786                afex_stats->rx_frames_discarded_lo,
1787                qstats->etherstatsoverrsizepkts_lo);
1788 
1789         ADD_64(afex_stats->rx_frames_dropped_hi,
1790                qstats->no_buff_discard_hi,
1791                afex_stats->rx_frames_dropped_lo,
1792                qstats->no_buff_discard_lo);
1793 
1794         ADD_64(afex_stats->tx_unicast_bytes_hi,
1795                qstats->total_unicast_bytes_transmitted_hi,
1796                afex_stats->tx_unicast_bytes_lo,
1797                qstats->total_unicast_bytes_transmitted_lo);
1798 
1799         ADD_64(afex_stats->tx_broadcast_bytes_hi,
1800                qstats->total_broadcast_bytes_transmitted_hi,
1801                afex_stats->tx_broadcast_bytes_lo,
1802                qstats->total_broadcast_bytes_transmitted_lo);
1803 
1804         ADD_64(afex_stats->tx_multicast_bytes_hi,
1805                qstats->total_multicast_bytes_transmitted_hi,
1806                afex_stats->tx_multicast_bytes_lo,
1807                qstats->total_multicast_bytes_transmitted_lo);
1808 
1809         ADD_64(afex_stats->tx_unicast_frames_hi,
1810                qstats->total_unicast_packets_transmitted_hi,
1811                afex_stats->tx_unicast_frames_lo,
1812                qstats->total_unicast_packets_transmitted_lo);
1813 
1814         ADD_64(afex_stats->tx_broadcast_frames_hi,
1815                qstats->total_broadcast_packets_transmitted_hi,
1816                afex_stats->tx_broadcast_frames_lo,
1817                qstats->total_broadcast_packets_transmitted_lo);
1818 
1819         ADD_64(afex_stats->tx_multicast_frames_hi,
1820                qstats->total_multicast_packets_transmitted_hi,
1821                afex_stats->tx_multicast_frames_lo,
1822                qstats->total_multicast_packets_transmitted_lo);
1823 
1824         ADD_64(afex_stats->tx_frames_dropped_hi,
1825                qstats->total_transmitted_dropped_packets_error_hi,
1826                afex_stats->tx_frames_dropped_lo,
1827                qstats->total_transmitted_dropped_packets_error_lo);
1828     }
1829 
1830     /*
1831      * If port stats are requested, add them to the PMF
1832      * stats, as anyway they will be accumulated by the
1833      * MCP before sent to the switch
1834      */
1835     if ((sc->port.pmf) && (stats_type == VICSTATST_UIF_INDEX)) {
1836         ADD_64(afex_stats->rx_frames_dropped_hi,
1837                0,
1838                afex_stats->rx_frames_dropped_lo,
1839                estats->mac_filter_discard);
1840         ADD_64(afex_stats->rx_frames_dropped_hi,
1841                0,
1842                afex_stats->rx_frames_dropped_lo,
1843                estats->brb_truncate_discard);
1844         ADD_64(afex_stats->rx_frames_discarded_hi,
1845                0,
1846                afex_stats->rx_frames_discarded_lo,
1847                estats->mac_discard);
1848     }
1849 }
1850 
1851