1 /* bnx2x_stats.c: Broadcom Everest network driver. 2 * 3 * Copyright (c) 2007-2012 Broadcom Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation. 8 * 9 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 10 * Written by: Eliezer Tamir 11 * Based on code from Michael Chan's bnx2 driver 12 * UDP CSUM errata workaround by Arik Gendelman 13 * Slowpath and fastpath rework by Vladislav Zolotarov 14 * Statistics and Link management by Yitchak Gertner 15 * 16 */ 17 18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 19 20 #include "bnx2x_stats.h" 21 #include "bnx2x_cmn.h" 22 23 24 /* Statistics */ 25 26 /* 27 * General service functions 28 */ 29 30 static inline long bnx2x_hilo(u32 *hiref) 31 { 32 u32 lo = *(hiref + 1); 33 #if (BITS_PER_LONG == 64) 34 u32 hi = *hiref; 35 36 return HILO_U64(hi, lo); 37 #else 38 return lo; 39 #endif 40 } 41 42 static u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp) 43 { 44 u16 res = sizeof(struct host_port_stats) >> 2; 45 46 /* if PFC stats are not supported by the MFW, don't DMA them */ 47 if (!(bp->flags & BC_SUPPORTS_PFC_STATS)) 48 res -= (sizeof(u32)*4) >> 2; 49 50 return res; 51 } 52 53 /* 54 * Init service functions 55 */ 56 57 /* Post the next statistics ramrod. Protect it with the spin in 58 * order to ensure the strict order between statistics ramrods 59 * (each ramrod has a sequence number passed in a 60 * bp->fw_stats_req->hdr.drv_stats_counter and ramrods must be 61 * sent in order). 62 */ 63 static void bnx2x_storm_stats_post(struct bnx2x *bp) 64 { 65 if (!bp->stats_pending) { 66 int rc; 67 68 spin_lock_bh(&bp->stats_lock); 69 70 if (bp->stats_pending) { 71 spin_unlock_bh(&bp->stats_lock); 72 return; 73 } 74 75 bp->fw_stats_req->hdr.drv_stats_counter = 76 cpu_to_le16(bp->stats_counter++); 77 78 DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n", 79 bp->fw_stats_req->hdr.drv_stats_counter); 80 81 82 83 /* send FW stats ramrod */ 84 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0, 85 U64_HI(bp->fw_stats_req_mapping), 86 U64_LO(bp->fw_stats_req_mapping), 87 NONE_CONNECTION_TYPE); 88 if (rc == 0) 89 bp->stats_pending = 1; 90 91 spin_unlock_bh(&bp->stats_lock); 92 } 93 } 94 95 static void bnx2x_hw_stats_post(struct bnx2x *bp) 96 { 97 struct dmae_command *dmae = &bp->stats_dmae; 98 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 99 100 *stats_comp = DMAE_COMP_VAL; 101 if (CHIP_REV_IS_SLOW(bp)) 102 return; 103 104 /* Update MCP's statistics if possible */ 105 if (bp->func_stx) 106 memcpy(bnx2x_sp(bp, func_stats), &bp->func_stats, 107 sizeof(bp->func_stats)); 108 109 /* loader */ 110 if (bp->executer_idx) { 111 int loader_idx = PMF_DMAE_C(bp); 112 u32 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, 113 true, DMAE_COMP_GRC); 114 opcode = bnx2x_dmae_opcode_clr_src_reset(opcode); 115 116 memset(dmae, 0, sizeof(struct dmae_command)); 117 dmae->opcode = opcode; 118 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0])); 119 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0])); 120 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM + 121 sizeof(struct dmae_command) * 122 (loader_idx + 1)) >> 2; 123 dmae->dst_addr_hi = 0; 124 dmae->len = sizeof(struct dmae_command) >> 2; 125 if (CHIP_IS_E1(bp)) 126 dmae->len--; 127 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2; 128 dmae->comp_addr_hi = 0; 129 dmae->comp_val = 1; 130 131 *stats_comp = 0; 132 bnx2x_post_dmae(bp, dmae, loader_idx); 133 134 } else if (bp->func_stx) { 135 *stats_comp = 0; 136 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); 137 } 138 } 139 140 static int bnx2x_stats_comp(struct bnx2x *bp) 141 { 142 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 143 int cnt = 10; 144 145 might_sleep(); 146 while (*stats_comp != DMAE_COMP_VAL) { 147 if (!cnt) { 148 BNX2X_ERR("timeout waiting for stats finished\n"); 149 break; 150 } 151 cnt--; 152 usleep_range(1000, 1000); 153 } 154 return 1; 155 } 156 157 /* 158 * Statistics service functions 159 */ 160 161 static void bnx2x_stats_pmf_update(struct bnx2x *bp) 162 { 163 struct dmae_command *dmae; 164 u32 opcode; 165 int loader_idx = PMF_DMAE_C(bp); 166 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 167 168 /* sanity */ 169 if (!bp->port.pmf || !bp->port.port_stx) { 170 BNX2X_ERR("BUG!\n"); 171 return; 172 } 173 174 bp->executer_idx = 0; 175 176 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, false, 0); 177 178 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 179 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC); 180 dmae->src_addr_lo = bp->port.port_stx >> 2; 181 dmae->src_addr_hi = 0; 182 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); 183 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); 184 dmae->len = DMAE_LEN32_RD_MAX; 185 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 186 dmae->comp_addr_hi = 0; 187 dmae->comp_val = 1; 188 189 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 190 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI); 191 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX; 192 dmae->src_addr_hi = 0; 193 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) + 194 DMAE_LEN32_RD_MAX * 4); 195 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) + 196 DMAE_LEN32_RD_MAX * 4); 197 dmae->len = bnx2x_get_port_stats_dma_len(bp) - DMAE_LEN32_RD_MAX; 198 199 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); 200 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); 201 dmae->comp_val = DMAE_COMP_VAL; 202 203 *stats_comp = 0; 204 bnx2x_hw_stats_post(bp); 205 bnx2x_stats_comp(bp); 206 } 207 208 static void bnx2x_port_stats_init(struct bnx2x *bp) 209 { 210 struct dmae_command *dmae; 211 int port = BP_PORT(bp); 212 u32 opcode; 213 int loader_idx = PMF_DMAE_C(bp); 214 u32 mac_addr; 215 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 216 217 /* sanity */ 218 if (!bp->link_vars.link_up || !bp->port.pmf) { 219 BNX2X_ERR("BUG!\n"); 220 return; 221 } 222 223 bp->executer_idx = 0; 224 225 /* MCP */ 226 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, 227 true, DMAE_COMP_GRC); 228 229 if (bp->port.port_stx) { 230 231 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 232 dmae->opcode = opcode; 233 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); 234 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); 235 dmae->dst_addr_lo = bp->port.port_stx >> 2; 236 dmae->dst_addr_hi = 0; 237 dmae->len = bnx2x_get_port_stats_dma_len(bp); 238 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 239 dmae->comp_addr_hi = 0; 240 dmae->comp_val = 1; 241 } 242 243 if (bp->func_stx) { 244 245 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 246 dmae->opcode = opcode; 247 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); 248 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); 249 dmae->dst_addr_lo = bp->func_stx >> 2; 250 dmae->dst_addr_hi = 0; 251 dmae->len = sizeof(struct host_func_stats) >> 2; 252 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 253 dmae->comp_addr_hi = 0; 254 dmae->comp_val = 1; 255 } 256 257 /* MAC */ 258 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, 259 true, DMAE_COMP_GRC); 260 261 /* EMAC is special */ 262 if (bp->link_vars.mac_type == MAC_TYPE_EMAC) { 263 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0); 264 265 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/ 266 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 267 dmae->opcode = opcode; 268 dmae->src_addr_lo = (mac_addr + 269 EMAC_REG_EMAC_RX_STAT_AC) >> 2; 270 dmae->src_addr_hi = 0; 271 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats)); 272 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats)); 273 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT; 274 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 275 dmae->comp_addr_hi = 0; 276 dmae->comp_val = 1; 277 278 /* EMAC_REG_EMAC_RX_STAT_AC_28 */ 279 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 280 dmae->opcode = opcode; 281 dmae->src_addr_lo = (mac_addr + 282 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2; 283 dmae->src_addr_hi = 0; 284 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) + 285 offsetof(struct emac_stats, rx_stat_falsecarriererrors)); 286 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) + 287 offsetof(struct emac_stats, rx_stat_falsecarriererrors)); 288 dmae->len = 1; 289 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 290 dmae->comp_addr_hi = 0; 291 dmae->comp_val = 1; 292 293 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/ 294 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 295 dmae->opcode = opcode; 296 dmae->src_addr_lo = (mac_addr + 297 EMAC_REG_EMAC_TX_STAT_AC) >> 2; 298 dmae->src_addr_hi = 0; 299 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) + 300 offsetof(struct emac_stats, tx_stat_ifhcoutoctets)); 301 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) + 302 offsetof(struct emac_stats, tx_stat_ifhcoutoctets)); 303 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT; 304 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 305 dmae->comp_addr_hi = 0; 306 dmae->comp_val = 1; 307 } else { 308 u32 tx_src_addr_lo, rx_src_addr_lo; 309 u16 rx_len, tx_len; 310 311 /* configure the params according to MAC type */ 312 switch (bp->link_vars.mac_type) { 313 case MAC_TYPE_BMAC: 314 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM : 315 NIG_REG_INGRESS_BMAC0_MEM); 316 317 /* BIGMAC_REGISTER_TX_STAT_GTPKT .. 318 BIGMAC_REGISTER_TX_STAT_GTBYT */ 319 if (CHIP_IS_E1x(bp)) { 320 tx_src_addr_lo = (mac_addr + 321 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2; 322 tx_len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT - 323 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2; 324 rx_src_addr_lo = (mac_addr + 325 BIGMAC_REGISTER_RX_STAT_GR64) >> 2; 326 rx_len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ - 327 BIGMAC_REGISTER_RX_STAT_GR64) >> 2; 328 } else { 329 tx_src_addr_lo = (mac_addr + 330 BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2; 331 tx_len = (8 + BIGMAC2_REGISTER_TX_STAT_GTBYT - 332 BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2; 333 rx_src_addr_lo = (mac_addr + 334 BIGMAC2_REGISTER_RX_STAT_GR64) >> 2; 335 rx_len = (8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ - 336 BIGMAC2_REGISTER_RX_STAT_GR64) >> 2; 337 } 338 break; 339 340 case MAC_TYPE_UMAC: /* handled by MSTAT */ 341 case MAC_TYPE_XMAC: /* handled by MSTAT */ 342 default: 343 mac_addr = port ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0; 344 tx_src_addr_lo = (mac_addr + 345 MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2; 346 rx_src_addr_lo = (mac_addr + 347 MSTAT_REG_RX_STAT_GR64_LO) >> 2; 348 tx_len = sizeof(bp->slowpath-> 349 mac_stats.mstat_stats.stats_tx) >> 2; 350 rx_len = sizeof(bp->slowpath-> 351 mac_stats.mstat_stats.stats_rx) >> 2; 352 break; 353 } 354 355 /* TX stats */ 356 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 357 dmae->opcode = opcode; 358 dmae->src_addr_lo = tx_src_addr_lo; 359 dmae->src_addr_hi = 0; 360 dmae->len = tx_len; 361 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats)); 362 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats)); 363 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 364 dmae->comp_addr_hi = 0; 365 dmae->comp_val = 1; 366 367 /* RX stats */ 368 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 369 dmae->opcode = opcode; 370 dmae->src_addr_hi = 0; 371 dmae->src_addr_lo = rx_src_addr_lo; 372 dmae->dst_addr_lo = 373 U64_LO(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2)); 374 dmae->dst_addr_hi = 375 U64_HI(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2)); 376 dmae->len = rx_len; 377 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 378 dmae->comp_addr_hi = 0; 379 dmae->comp_val = 1; 380 } 381 382 /* NIG */ 383 if (!CHIP_IS_E3(bp)) { 384 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 385 dmae->opcode = opcode; 386 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 : 387 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2; 388 dmae->src_addr_hi = 0; 389 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) + 390 offsetof(struct nig_stats, egress_mac_pkt0_lo)); 391 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) + 392 offsetof(struct nig_stats, egress_mac_pkt0_lo)); 393 dmae->len = (2*sizeof(u32)) >> 2; 394 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 395 dmae->comp_addr_hi = 0; 396 dmae->comp_val = 1; 397 398 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 399 dmae->opcode = opcode; 400 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 : 401 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2; 402 dmae->src_addr_hi = 0; 403 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) + 404 offsetof(struct nig_stats, egress_mac_pkt1_lo)); 405 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) + 406 offsetof(struct nig_stats, egress_mac_pkt1_lo)); 407 dmae->len = (2*sizeof(u32)) >> 2; 408 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 409 dmae->comp_addr_hi = 0; 410 dmae->comp_val = 1; 411 } 412 413 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 414 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, 415 true, DMAE_COMP_PCI); 416 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD : 417 NIG_REG_STAT0_BRB_DISCARD) >> 2; 418 dmae->src_addr_hi = 0; 419 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats)); 420 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats)); 421 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2; 422 423 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); 424 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); 425 dmae->comp_val = DMAE_COMP_VAL; 426 427 *stats_comp = 0; 428 } 429 430 static void bnx2x_func_stats_init(struct bnx2x *bp) 431 { 432 struct dmae_command *dmae = &bp->stats_dmae; 433 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 434 435 /* sanity */ 436 if (!bp->func_stx) { 437 BNX2X_ERR("BUG!\n"); 438 return; 439 } 440 441 bp->executer_idx = 0; 442 memset(dmae, 0, sizeof(struct dmae_command)); 443 444 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, 445 true, DMAE_COMP_PCI); 446 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); 447 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); 448 dmae->dst_addr_lo = bp->func_stx >> 2; 449 dmae->dst_addr_hi = 0; 450 dmae->len = sizeof(struct host_func_stats) >> 2; 451 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); 452 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); 453 dmae->comp_val = DMAE_COMP_VAL; 454 455 *stats_comp = 0; 456 } 457 458 static void bnx2x_stats_start(struct bnx2x *bp) 459 { 460 if (bp->port.pmf) 461 bnx2x_port_stats_init(bp); 462 463 else if (bp->func_stx) 464 bnx2x_func_stats_init(bp); 465 466 bnx2x_hw_stats_post(bp); 467 bnx2x_storm_stats_post(bp); 468 } 469 470 static void bnx2x_stats_pmf_start(struct bnx2x *bp) 471 { 472 bnx2x_stats_comp(bp); 473 bnx2x_stats_pmf_update(bp); 474 bnx2x_stats_start(bp); 475 } 476 477 static void bnx2x_stats_restart(struct bnx2x *bp) 478 { 479 bnx2x_stats_comp(bp); 480 bnx2x_stats_start(bp); 481 } 482 483 static void bnx2x_bmac_stats_update(struct bnx2x *bp) 484 { 485 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); 486 struct bnx2x_eth_stats *estats = &bp->eth_stats; 487 struct { 488 u32 lo; 489 u32 hi; 490 } diff; 491 492 if (CHIP_IS_E1x(bp)) { 493 struct bmac1_stats *new = bnx2x_sp(bp, mac_stats.bmac1_stats); 494 495 /* the macros below will use "bmac1_stats" type */ 496 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets); 497 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors); 498 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts); 499 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong); 500 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments); 501 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); 502 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); 503 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); 504 UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf); 505 506 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); 507 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone); 508 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets); 509 UPDATE_STAT64(tx_stat_gt127, 510 tx_stat_etherstatspkts65octetsto127octets); 511 UPDATE_STAT64(tx_stat_gt255, 512 tx_stat_etherstatspkts128octetsto255octets); 513 UPDATE_STAT64(tx_stat_gt511, 514 tx_stat_etherstatspkts256octetsto511octets); 515 UPDATE_STAT64(tx_stat_gt1023, 516 tx_stat_etherstatspkts512octetsto1023octets); 517 UPDATE_STAT64(tx_stat_gt1518, 518 tx_stat_etherstatspkts1024octetsto1522octets); 519 UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047); 520 UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095); 521 UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216); 522 UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383); 523 UPDATE_STAT64(tx_stat_gterr, 524 tx_stat_dot3statsinternalmactransmiterrors); 525 UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl); 526 527 } else { 528 struct bmac2_stats *new = bnx2x_sp(bp, mac_stats.bmac2_stats); 529 530 /* the macros below will use "bmac2_stats" type */ 531 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets); 532 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors); 533 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts); 534 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong); 535 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments); 536 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); 537 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); 538 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); 539 UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf); 540 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); 541 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone); 542 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets); 543 UPDATE_STAT64(tx_stat_gt127, 544 tx_stat_etherstatspkts65octetsto127octets); 545 UPDATE_STAT64(tx_stat_gt255, 546 tx_stat_etherstatspkts128octetsto255octets); 547 UPDATE_STAT64(tx_stat_gt511, 548 tx_stat_etherstatspkts256octetsto511octets); 549 UPDATE_STAT64(tx_stat_gt1023, 550 tx_stat_etherstatspkts512octetsto1023octets); 551 UPDATE_STAT64(tx_stat_gt1518, 552 tx_stat_etherstatspkts1024octetsto1522octets); 553 UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047); 554 UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095); 555 UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216); 556 UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383); 557 UPDATE_STAT64(tx_stat_gterr, 558 tx_stat_dot3statsinternalmactransmiterrors); 559 UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl); 560 561 /* collect PFC stats */ 562 pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi; 563 pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo; 564 565 pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi; 566 pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo; 567 } 568 569 estats->pause_frames_received_hi = 570 pstats->mac_stx[1].rx_stat_mac_xpf_hi; 571 estats->pause_frames_received_lo = 572 pstats->mac_stx[1].rx_stat_mac_xpf_lo; 573 574 estats->pause_frames_sent_hi = 575 pstats->mac_stx[1].tx_stat_outxoffsent_hi; 576 estats->pause_frames_sent_lo = 577 pstats->mac_stx[1].tx_stat_outxoffsent_lo; 578 579 estats->pfc_frames_received_hi = 580 pstats->pfc_frames_rx_hi; 581 estats->pfc_frames_received_lo = 582 pstats->pfc_frames_rx_lo; 583 estats->pfc_frames_sent_hi = 584 pstats->pfc_frames_tx_hi; 585 estats->pfc_frames_sent_lo = 586 pstats->pfc_frames_tx_lo; 587 } 588 589 static void bnx2x_mstat_stats_update(struct bnx2x *bp) 590 { 591 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); 592 struct bnx2x_eth_stats *estats = &bp->eth_stats; 593 594 struct mstat_stats *new = bnx2x_sp(bp, mac_stats.mstat_stats); 595 596 ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets); 597 ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors); 598 ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts); 599 ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong); 600 ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments); 601 ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived); 602 ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered); 603 ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf); 604 ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent); 605 ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone); 606 607 /* collect pfc stats */ 608 ADD_64(pstats->pfc_frames_tx_hi, new->stats_tx.tx_gtxpp_hi, 609 pstats->pfc_frames_tx_lo, new->stats_tx.tx_gtxpp_lo); 610 ADD_64(pstats->pfc_frames_rx_hi, new->stats_rx.rx_grxpp_hi, 611 pstats->pfc_frames_rx_lo, new->stats_rx.rx_grxpp_lo); 612 613 ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets); 614 ADD_STAT64(stats_tx.tx_gt127, 615 tx_stat_etherstatspkts65octetsto127octets); 616 ADD_STAT64(stats_tx.tx_gt255, 617 tx_stat_etherstatspkts128octetsto255octets); 618 ADD_STAT64(stats_tx.tx_gt511, 619 tx_stat_etherstatspkts256octetsto511octets); 620 ADD_STAT64(stats_tx.tx_gt1023, 621 tx_stat_etherstatspkts512octetsto1023octets); 622 ADD_STAT64(stats_tx.tx_gt1518, 623 tx_stat_etherstatspkts1024octetsto1522octets); 624 ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047); 625 626 ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095); 627 ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216); 628 ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383); 629 630 ADD_STAT64(stats_tx.tx_gterr, 631 tx_stat_dot3statsinternalmactransmiterrors); 632 ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl); 633 634 estats->etherstatspkts1024octetsto1522octets_hi = 635 pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_hi; 636 estats->etherstatspkts1024octetsto1522octets_lo = 637 pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_lo; 638 639 estats->etherstatspktsover1522octets_hi = 640 pstats->mac_stx[1].tx_stat_mac_2047_hi; 641 estats->etherstatspktsover1522octets_lo = 642 pstats->mac_stx[1].tx_stat_mac_2047_lo; 643 644 ADD_64(estats->etherstatspktsover1522octets_hi, 645 pstats->mac_stx[1].tx_stat_mac_4095_hi, 646 estats->etherstatspktsover1522octets_lo, 647 pstats->mac_stx[1].tx_stat_mac_4095_lo); 648 649 ADD_64(estats->etherstatspktsover1522octets_hi, 650 pstats->mac_stx[1].tx_stat_mac_9216_hi, 651 estats->etherstatspktsover1522octets_lo, 652 pstats->mac_stx[1].tx_stat_mac_9216_lo); 653 654 ADD_64(estats->etherstatspktsover1522octets_hi, 655 pstats->mac_stx[1].tx_stat_mac_16383_hi, 656 estats->etherstatspktsover1522octets_lo, 657 pstats->mac_stx[1].tx_stat_mac_16383_lo); 658 659 estats->pause_frames_received_hi = 660 pstats->mac_stx[1].rx_stat_mac_xpf_hi; 661 estats->pause_frames_received_lo = 662 pstats->mac_stx[1].rx_stat_mac_xpf_lo; 663 664 estats->pause_frames_sent_hi = 665 pstats->mac_stx[1].tx_stat_outxoffsent_hi; 666 estats->pause_frames_sent_lo = 667 pstats->mac_stx[1].tx_stat_outxoffsent_lo; 668 669 estats->pfc_frames_received_hi = 670 pstats->pfc_frames_rx_hi; 671 estats->pfc_frames_received_lo = 672 pstats->pfc_frames_rx_lo; 673 estats->pfc_frames_sent_hi = 674 pstats->pfc_frames_tx_hi; 675 estats->pfc_frames_sent_lo = 676 pstats->pfc_frames_tx_lo; 677 } 678 679 static void bnx2x_emac_stats_update(struct bnx2x *bp) 680 { 681 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats); 682 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); 683 struct bnx2x_eth_stats *estats = &bp->eth_stats; 684 685 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets); 686 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets); 687 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors); 688 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors); 689 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors); 690 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors); 691 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts); 692 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong); 693 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments); 694 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers); 695 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived); 696 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered); 697 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived); 698 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived); 699 UPDATE_EXTEND_STAT(tx_stat_outxonsent); 700 UPDATE_EXTEND_STAT(tx_stat_outxoffsent); 701 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone); 702 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions); 703 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes); 704 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes); 705 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions); 706 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions); 707 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions); 708 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets); 709 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets); 710 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets); 711 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets); 712 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets); 713 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets); 714 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets); 715 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors); 716 717 estats->pause_frames_received_hi = 718 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi; 719 estats->pause_frames_received_lo = 720 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo; 721 ADD_64(estats->pause_frames_received_hi, 722 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi, 723 estats->pause_frames_received_lo, 724 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo); 725 726 estats->pause_frames_sent_hi = 727 pstats->mac_stx[1].tx_stat_outxonsent_hi; 728 estats->pause_frames_sent_lo = 729 pstats->mac_stx[1].tx_stat_outxonsent_lo; 730 ADD_64(estats->pause_frames_sent_hi, 731 pstats->mac_stx[1].tx_stat_outxoffsent_hi, 732 estats->pause_frames_sent_lo, 733 pstats->mac_stx[1].tx_stat_outxoffsent_lo); 734 } 735 736 static int bnx2x_hw_stats_update(struct bnx2x *bp) 737 { 738 struct nig_stats *new = bnx2x_sp(bp, nig_stats); 739 struct nig_stats *old = &(bp->port.old_nig_stats); 740 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); 741 struct bnx2x_eth_stats *estats = &bp->eth_stats; 742 struct { 743 u32 lo; 744 u32 hi; 745 } diff; 746 747 switch (bp->link_vars.mac_type) { 748 case MAC_TYPE_BMAC: 749 bnx2x_bmac_stats_update(bp); 750 break; 751 752 case MAC_TYPE_EMAC: 753 bnx2x_emac_stats_update(bp); 754 break; 755 756 case MAC_TYPE_UMAC: 757 case MAC_TYPE_XMAC: 758 bnx2x_mstat_stats_update(bp); 759 break; 760 761 case MAC_TYPE_NONE: /* unreached */ 762 DP(BNX2X_MSG_STATS, 763 "stats updated by DMAE but no MAC active\n"); 764 return -1; 765 766 default: /* unreached */ 767 BNX2X_ERR("Unknown MAC type\n"); 768 } 769 770 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo, 771 new->brb_discard - old->brb_discard); 772 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo, 773 new->brb_truncate - old->brb_truncate); 774 775 if (!CHIP_IS_E3(bp)) { 776 UPDATE_STAT64_NIG(egress_mac_pkt0, 777 etherstatspkts1024octetsto1522octets); 778 UPDATE_STAT64_NIG(egress_mac_pkt1, 779 etherstatspktsover1522octets); 780 } 781 782 memcpy(old, new, sizeof(struct nig_stats)); 783 784 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]), 785 sizeof(struct mac_stx)); 786 estats->brb_drop_hi = pstats->brb_drop_hi; 787 estats->brb_drop_lo = pstats->brb_drop_lo; 788 789 pstats->host_port_stats_counter++; 790 791 if (CHIP_IS_E3(bp)) { 792 u32 lpi_reg = BP_PORT(bp) ? MISC_REG_CPMU_LP_SM_ENT_CNT_P1 793 : MISC_REG_CPMU_LP_SM_ENT_CNT_P0; 794 estats->eee_tx_lpi += REG_RD(bp, lpi_reg); 795 } 796 797 if (!BP_NOMCP(bp)) { 798 u32 nig_timer_max = 799 SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer); 800 if (nig_timer_max != estats->nig_timer_max) { 801 estats->nig_timer_max = nig_timer_max; 802 BNX2X_ERR("NIG timer max (%u)\n", 803 estats->nig_timer_max); 804 } 805 } 806 807 return 0; 808 } 809 810 static int bnx2x_storm_stats_update(struct bnx2x *bp) 811 { 812 struct tstorm_per_port_stats *tport = 813 &bp->fw_stats_data->port.tstorm_port_statistics; 814 struct tstorm_per_pf_stats *tfunc = 815 &bp->fw_stats_data->pf.tstorm_pf_statistics; 816 struct host_func_stats *fstats = &bp->func_stats; 817 struct bnx2x_eth_stats *estats = &bp->eth_stats; 818 struct bnx2x_eth_stats_old *estats_old = &bp->eth_stats_old; 819 struct stats_counter *counters = &bp->fw_stats_data->storm_counters; 820 int i; 821 u16 cur_stats_counter; 822 823 /* Make sure we use the value of the counter 824 * used for sending the last stats ramrod. 825 */ 826 spin_lock_bh(&bp->stats_lock); 827 cur_stats_counter = bp->stats_counter - 1; 828 spin_unlock_bh(&bp->stats_lock); 829 830 /* are storm stats valid? */ 831 if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) { 832 DP(BNX2X_MSG_STATS, 833 "stats not updated by xstorm xstorm counter (0x%x) != stats_counter (0x%x)\n", 834 le16_to_cpu(counters->xstats_counter), bp->stats_counter); 835 return -EAGAIN; 836 } 837 838 if (le16_to_cpu(counters->ustats_counter) != cur_stats_counter) { 839 DP(BNX2X_MSG_STATS, 840 "stats not updated by ustorm ustorm counter (0x%x) != stats_counter (0x%x)\n", 841 le16_to_cpu(counters->ustats_counter), bp->stats_counter); 842 return -EAGAIN; 843 } 844 845 if (le16_to_cpu(counters->cstats_counter) != cur_stats_counter) { 846 DP(BNX2X_MSG_STATS, 847 "stats not updated by cstorm cstorm counter (0x%x) != stats_counter (0x%x)\n", 848 le16_to_cpu(counters->cstats_counter), bp->stats_counter); 849 return -EAGAIN; 850 } 851 852 if (le16_to_cpu(counters->tstats_counter) != cur_stats_counter) { 853 DP(BNX2X_MSG_STATS, 854 "stats not updated by tstorm tstorm counter (0x%x) != stats_counter (0x%x)\n", 855 le16_to_cpu(counters->tstats_counter), bp->stats_counter); 856 return -EAGAIN; 857 } 858 859 estats->error_bytes_received_hi = 0; 860 estats->error_bytes_received_lo = 0; 861 862 for_each_eth_queue(bp, i) { 863 struct bnx2x_fastpath *fp = &bp->fp[i]; 864 struct tstorm_per_queue_stats *tclient = 865 &bp->fw_stats_data->queue_stats[i]. 866 tstorm_queue_statistics; 867 struct tstorm_per_queue_stats *old_tclient = 868 &bnx2x_fp_stats(bp, fp)->old_tclient; 869 struct ustorm_per_queue_stats *uclient = 870 &bp->fw_stats_data->queue_stats[i]. 871 ustorm_queue_statistics; 872 struct ustorm_per_queue_stats *old_uclient = 873 &bnx2x_fp_stats(bp, fp)->old_uclient; 874 struct xstorm_per_queue_stats *xclient = 875 &bp->fw_stats_data->queue_stats[i]. 876 xstorm_queue_statistics; 877 struct xstorm_per_queue_stats *old_xclient = 878 &bnx2x_fp_stats(bp, fp)->old_xclient; 879 struct bnx2x_eth_q_stats *qstats = 880 &bnx2x_fp_stats(bp, fp)->eth_q_stats; 881 struct bnx2x_eth_q_stats_old *qstats_old = 882 &bnx2x_fp_stats(bp, fp)->eth_q_stats_old; 883 884 u32 diff; 885 886 DP(BNX2X_MSG_STATS, "queue[%d]: ucast_sent 0x%x, bcast_sent 0x%x mcast_sent 0x%x\n", 887 i, xclient->ucast_pkts_sent, 888 xclient->bcast_pkts_sent, xclient->mcast_pkts_sent); 889 890 DP(BNX2X_MSG_STATS, "---------------\n"); 891 892 UPDATE_QSTAT(tclient->rcv_bcast_bytes, 893 total_broadcast_bytes_received); 894 UPDATE_QSTAT(tclient->rcv_mcast_bytes, 895 total_multicast_bytes_received); 896 UPDATE_QSTAT(tclient->rcv_ucast_bytes, 897 total_unicast_bytes_received); 898 899 /* 900 * sum to total_bytes_received all 901 * unicast/multicast/broadcast 902 */ 903 qstats->total_bytes_received_hi = 904 qstats->total_broadcast_bytes_received_hi; 905 qstats->total_bytes_received_lo = 906 qstats->total_broadcast_bytes_received_lo; 907 908 ADD_64(qstats->total_bytes_received_hi, 909 qstats->total_multicast_bytes_received_hi, 910 qstats->total_bytes_received_lo, 911 qstats->total_multicast_bytes_received_lo); 912 913 ADD_64(qstats->total_bytes_received_hi, 914 qstats->total_unicast_bytes_received_hi, 915 qstats->total_bytes_received_lo, 916 qstats->total_unicast_bytes_received_lo); 917 918 qstats->valid_bytes_received_hi = 919 qstats->total_bytes_received_hi; 920 qstats->valid_bytes_received_lo = 921 qstats->total_bytes_received_lo; 922 923 924 UPDATE_EXTEND_TSTAT(rcv_ucast_pkts, 925 total_unicast_packets_received); 926 UPDATE_EXTEND_TSTAT(rcv_mcast_pkts, 927 total_multicast_packets_received); 928 UPDATE_EXTEND_TSTAT(rcv_bcast_pkts, 929 total_broadcast_packets_received); 930 UPDATE_EXTEND_E_TSTAT(pkts_too_big_discard, 931 etherstatsoverrsizepkts); 932 UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard); 933 934 SUB_EXTEND_USTAT(ucast_no_buff_pkts, 935 total_unicast_packets_received); 936 SUB_EXTEND_USTAT(mcast_no_buff_pkts, 937 total_multicast_packets_received); 938 SUB_EXTEND_USTAT(bcast_no_buff_pkts, 939 total_broadcast_packets_received); 940 UPDATE_EXTEND_E_USTAT(ucast_no_buff_pkts, no_buff_discard); 941 UPDATE_EXTEND_E_USTAT(mcast_no_buff_pkts, no_buff_discard); 942 UPDATE_EXTEND_E_USTAT(bcast_no_buff_pkts, no_buff_discard); 943 944 UPDATE_QSTAT(xclient->bcast_bytes_sent, 945 total_broadcast_bytes_transmitted); 946 UPDATE_QSTAT(xclient->mcast_bytes_sent, 947 total_multicast_bytes_transmitted); 948 UPDATE_QSTAT(xclient->ucast_bytes_sent, 949 total_unicast_bytes_transmitted); 950 951 /* 952 * sum to total_bytes_transmitted all 953 * unicast/multicast/broadcast 954 */ 955 qstats->total_bytes_transmitted_hi = 956 qstats->total_unicast_bytes_transmitted_hi; 957 qstats->total_bytes_transmitted_lo = 958 qstats->total_unicast_bytes_transmitted_lo; 959 960 ADD_64(qstats->total_bytes_transmitted_hi, 961 qstats->total_broadcast_bytes_transmitted_hi, 962 qstats->total_bytes_transmitted_lo, 963 qstats->total_broadcast_bytes_transmitted_lo); 964 965 ADD_64(qstats->total_bytes_transmitted_hi, 966 qstats->total_multicast_bytes_transmitted_hi, 967 qstats->total_bytes_transmitted_lo, 968 qstats->total_multicast_bytes_transmitted_lo); 969 970 UPDATE_EXTEND_XSTAT(ucast_pkts_sent, 971 total_unicast_packets_transmitted); 972 UPDATE_EXTEND_XSTAT(mcast_pkts_sent, 973 total_multicast_packets_transmitted); 974 UPDATE_EXTEND_XSTAT(bcast_pkts_sent, 975 total_broadcast_packets_transmitted); 976 977 UPDATE_EXTEND_TSTAT(checksum_discard, 978 total_packets_received_checksum_discarded); 979 UPDATE_EXTEND_TSTAT(ttl0_discard, 980 total_packets_received_ttl0_discarded); 981 982 UPDATE_EXTEND_XSTAT(error_drop_pkts, 983 total_transmitted_dropped_packets_error); 984 985 /* TPA aggregations completed */ 986 UPDATE_EXTEND_E_USTAT(coalesced_events, total_tpa_aggregations); 987 /* Number of network frames aggregated by TPA */ 988 UPDATE_EXTEND_E_USTAT(coalesced_pkts, 989 total_tpa_aggregated_frames); 990 /* Total number of bytes in completed TPA aggregations */ 991 UPDATE_QSTAT(uclient->coalesced_bytes, total_tpa_bytes); 992 993 UPDATE_ESTAT_QSTAT_64(total_tpa_bytes); 994 995 UPDATE_FSTAT_QSTAT(total_bytes_received); 996 UPDATE_FSTAT_QSTAT(total_bytes_transmitted); 997 UPDATE_FSTAT_QSTAT(total_unicast_packets_received); 998 UPDATE_FSTAT_QSTAT(total_multicast_packets_received); 999 UPDATE_FSTAT_QSTAT(total_broadcast_packets_received); 1000 UPDATE_FSTAT_QSTAT(total_unicast_packets_transmitted); 1001 UPDATE_FSTAT_QSTAT(total_multicast_packets_transmitted); 1002 UPDATE_FSTAT_QSTAT(total_broadcast_packets_transmitted); 1003 UPDATE_FSTAT_QSTAT(valid_bytes_received); 1004 } 1005 1006 ADD_64(estats->total_bytes_received_hi, 1007 estats->rx_stat_ifhcinbadoctets_hi, 1008 estats->total_bytes_received_lo, 1009 estats->rx_stat_ifhcinbadoctets_lo); 1010 1011 ADD_64(estats->total_bytes_received_hi, 1012 le32_to_cpu(tfunc->rcv_error_bytes.hi), 1013 estats->total_bytes_received_lo, 1014 le32_to_cpu(tfunc->rcv_error_bytes.lo)); 1015 1016 ADD_64(estats->error_bytes_received_hi, 1017 le32_to_cpu(tfunc->rcv_error_bytes.hi), 1018 estats->error_bytes_received_lo, 1019 le32_to_cpu(tfunc->rcv_error_bytes.lo)); 1020 1021 UPDATE_ESTAT(etherstatsoverrsizepkts, rx_stat_dot3statsframestoolong); 1022 1023 ADD_64(estats->error_bytes_received_hi, 1024 estats->rx_stat_ifhcinbadoctets_hi, 1025 estats->error_bytes_received_lo, 1026 estats->rx_stat_ifhcinbadoctets_lo); 1027 1028 if (bp->port.pmf) { 1029 struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old; 1030 UPDATE_FW_STAT(mac_filter_discard); 1031 UPDATE_FW_STAT(mf_tag_discard); 1032 UPDATE_FW_STAT(brb_truncate_discard); 1033 UPDATE_FW_STAT(mac_discard); 1034 } 1035 1036 fstats->host_func_stats_start = ++fstats->host_func_stats_end; 1037 1038 bp->stats_pending = 0; 1039 1040 return 0; 1041 } 1042 1043 static void bnx2x_net_stats_update(struct bnx2x *bp) 1044 { 1045 struct bnx2x_eth_stats *estats = &bp->eth_stats; 1046 struct net_device_stats *nstats = &bp->dev->stats; 1047 unsigned long tmp; 1048 int i; 1049 1050 nstats->rx_packets = 1051 bnx2x_hilo(&estats->total_unicast_packets_received_hi) + 1052 bnx2x_hilo(&estats->total_multicast_packets_received_hi) + 1053 bnx2x_hilo(&estats->total_broadcast_packets_received_hi); 1054 1055 nstats->tx_packets = 1056 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) + 1057 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) + 1058 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi); 1059 1060 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi); 1061 1062 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi); 1063 1064 tmp = estats->mac_discard; 1065 for_each_rx_queue(bp, i) { 1066 struct tstorm_per_queue_stats *old_tclient = 1067 &bp->fp_stats[i].old_tclient; 1068 tmp += le32_to_cpu(old_tclient->checksum_discard); 1069 } 1070 nstats->rx_dropped = tmp + bp->net_stats_old.rx_dropped; 1071 1072 nstats->tx_dropped = 0; 1073 1074 nstats->multicast = 1075 bnx2x_hilo(&estats->total_multicast_packets_received_hi); 1076 1077 nstats->collisions = 1078 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi); 1079 1080 nstats->rx_length_errors = 1081 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) + 1082 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi); 1083 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) + 1084 bnx2x_hilo(&estats->brb_truncate_hi); 1085 nstats->rx_crc_errors = 1086 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi); 1087 nstats->rx_frame_errors = 1088 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi); 1089 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi); 1090 nstats->rx_missed_errors = 0; 1091 1092 nstats->rx_errors = nstats->rx_length_errors + 1093 nstats->rx_over_errors + 1094 nstats->rx_crc_errors + 1095 nstats->rx_frame_errors + 1096 nstats->rx_fifo_errors + 1097 nstats->rx_missed_errors; 1098 1099 nstats->tx_aborted_errors = 1100 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) + 1101 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi); 1102 nstats->tx_carrier_errors = 1103 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi); 1104 nstats->tx_fifo_errors = 0; 1105 nstats->tx_heartbeat_errors = 0; 1106 nstats->tx_window_errors = 0; 1107 1108 nstats->tx_errors = nstats->tx_aborted_errors + 1109 nstats->tx_carrier_errors + 1110 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi); 1111 } 1112 1113 static void bnx2x_drv_stats_update(struct bnx2x *bp) 1114 { 1115 struct bnx2x_eth_stats *estats = &bp->eth_stats; 1116 int i; 1117 1118 for_each_queue(bp, i) { 1119 struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats; 1120 struct bnx2x_eth_q_stats_old *qstats_old = 1121 &bp->fp_stats[i].eth_q_stats_old; 1122 1123 UPDATE_ESTAT_QSTAT(driver_xoff); 1124 UPDATE_ESTAT_QSTAT(rx_err_discard_pkt); 1125 UPDATE_ESTAT_QSTAT(rx_skb_alloc_failed); 1126 UPDATE_ESTAT_QSTAT(hw_csum_err); 1127 } 1128 } 1129 1130 static bool bnx2x_edebug_stats_stopped(struct bnx2x *bp) 1131 { 1132 u32 val; 1133 1134 if (SHMEM2_HAS(bp, edebug_driver_if[1])) { 1135 val = SHMEM2_RD(bp, edebug_driver_if[1]); 1136 1137 if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT) 1138 return true; 1139 } 1140 1141 return false; 1142 } 1143 1144 static void bnx2x_stats_update(struct bnx2x *bp) 1145 { 1146 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 1147 1148 if (bnx2x_edebug_stats_stopped(bp)) 1149 return; 1150 1151 if (*stats_comp != DMAE_COMP_VAL) 1152 return; 1153 1154 if (bp->port.pmf) 1155 bnx2x_hw_stats_update(bp); 1156 1157 if (bnx2x_storm_stats_update(bp)) { 1158 if (bp->stats_pending++ == 3) { 1159 BNX2X_ERR("storm stats were not updated for 3 times\n"); 1160 bnx2x_panic(); 1161 } 1162 return; 1163 } 1164 1165 bnx2x_net_stats_update(bp); 1166 bnx2x_drv_stats_update(bp); 1167 1168 if (netif_msg_timer(bp)) { 1169 struct bnx2x_eth_stats *estats = &bp->eth_stats; 1170 1171 netdev_dbg(bp->dev, "brb drops %u brb truncate %u\n", 1172 estats->brb_drop_lo, estats->brb_truncate_lo); 1173 } 1174 1175 bnx2x_hw_stats_post(bp); 1176 bnx2x_storm_stats_post(bp); 1177 } 1178 1179 static void bnx2x_port_stats_stop(struct bnx2x *bp) 1180 { 1181 struct dmae_command *dmae; 1182 u32 opcode; 1183 int loader_idx = PMF_DMAE_C(bp); 1184 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 1185 1186 bp->executer_idx = 0; 1187 1188 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, false, 0); 1189 1190 if (bp->port.port_stx) { 1191 1192 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 1193 if (bp->func_stx) 1194 dmae->opcode = bnx2x_dmae_opcode_add_comp( 1195 opcode, DMAE_COMP_GRC); 1196 else 1197 dmae->opcode = bnx2x_dmae_opcode_add_comp( 1198 opcode, DMAE_COMP_PCI); 1199 1200 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); 1201 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); 1202 dmae->dst_addr_lo = bp->port.port_stx >> 2; 1203 dmae->dst_addr_hi = 0; 1204 dmae->len = bnx2x_get_port_stats_dma_len(bp); 1205 if (bp->func_stx) { 1206 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 1207 dmae->comp_addr_hi = 0; 1208 dmae->comp_val = 1; 1209 } else { 1210 dmae->comp_addr_lo = 1211 U64_LO(bnx2x_sp_mapping(bp, stats_comp)); 1212 dmae->comp_addr_hi = 1213 U64_HI(bnx2x_sp_mapping(bp, stats_comp)); 1214 dmae->comp_val = DMAE_COMP_VAL; 1215 1216 *stats_comp = 0; 1217 } 1218 } 1219 1220 if (bp->func_stx) { 1221 1222 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 1223 dmae->opcode = 1224 bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI); 1225 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); 1226 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); 1227 dmae->dst_addr_lo = bp->func_stx >> 2; 1228 dmae->dst_addr_hi = 0; 1229 dmae->len = sizeof(struct host_func_stats) >> 2; 1230 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); 1231 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); 1232 dmae->comp_val = DMAE_COMP_VAL; 1233 1234 *stats_comp = 0; 1235 } 1236 } 1237 1238 static void bnx2x_stats_stop(struct bnx2x *bp) 1239 { 1240 int update = 0; 1241 1242 bnx2x_stats_comp(bp); 1243 1244 if (bp->port.pmf) 1245 update = (bnx2x_hw_stats_update(bp) == 0); 1246 1247 update |= (bnx2x_storm_stats_update(bp) == 0); 1248 1249 if (update) { 1250 bnx2x_net_stats_update(bp); 1251 1252 if (bp->port.pmf) 1253 bnx2x_port_stats_stop(bp); 1254 1255 bnx2x_hw_stats_post(bp); 1256 bnx2x_stats_comp(bp); 1257 } 1258 } 1259 1260 static void bnx2x_stats_do_nothing(struct bnx2x *bp) 1261 { 1262 } 1263 1264 static const struct { 1265 void (*action)(struct bnx2x *bp); 1266 enum bnx2x_stats_state next_state; 1267 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = { 1268 /* state event */ 1269 { 1270 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED}, 1271 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED}, 1272 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}, 1273 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED} 1274 }, 1275 { 1276 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED}, 1277 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED}, 1278 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED}, 1279 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED} 1280 } 1281 }; 1282 1283 void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) 1284 { 1285 enum bnx2x_stats_state state; 1286 if (unlikely(bp->panic)) 1287 return; 1288 1289 spin_lock_bh(&bp->stats_lock); 1290 state = bp->stats_state; 1291 bp->stats_state = bnx2x_stats_stm[state][event].next_state; 1292 spin_unlock_bh(&bp->stats_lock); 1293 1294 bnx2x_stats_stm[state][event].action(bp); 1295 1296 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) 1297 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", 1298 state, event, bp->stats_state); 1299 } 1300 1301 static void bnx2x_port_stats_base_init(struct bnx2x *bp) 1302 { 1303 struct dmae_command *dmae; 1304 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 1305 1306 /* sanity */ 1307 if (!bp->port.pmf || !bp->port.port_stx) { 1308 BNX2X_ERR("BUG!\n"); 1309 return; 1310 } 1311 1312 bp->executer_idx = 0; 1313 1314 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 1315 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, 1316 true, DMAE_COMP_PCI); 1317 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); 1318 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); 1319 dmae->dst_addr_lo = bp->port.port_stx >> 2; 1320 dmae->dst_addr_hi = 0; 1321 dmae->len = bnx2x_get_port_stats_dma_len(bp); 1322 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); 1323 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); 1324 dmae->comp_val = DMAE_COMP_VAL; 1325 1326 *stats_comp = 0; 1327 bnx2x_hw_stats_post(bp); 1328 bnx2x_stats_comp(bp); 1329 } 1330 1331 /* This function will prepare the statistics ramrod data the way 1332 * we will only have to increment the statistics counter and 1333 * send the ramrod each time we have to. 1334 */ 1335 static void bnx2x_prep_fw_stats_req(struct bnx2x *bp) 1336 { 1337 int i; 1338 int first_queue_query_index; 1339 struct stats_query_header *stats_hdr = &bp->fw_stats_req->hdr; 1340 1341 dma_addr_t cur_data_offset; 1342 struct stats_query_entry *cur_query_entry; 1343 1344 stats_hdr->cmd_num = bp->fw_stats_num; 1345 stats_hdr->drv_stats_counter = 0; 1346 1347 /* storm_counters struct contains the counters of completed 1348 * statistics requests per storm which are incremented by FW 1349 * each time it completes hadning a statistics ramrod. We will 1350 * check these counters in the timer handler and discard a 1351 * (statistics) ramrod completion. 1352 */ 1353 cur_data_offset = bp->fw_stats_data_mapping + 1354 offsetof(struct bnx2x_fw_stats_data, storm_counters); 1355 1356 stats_hdr->stats_counters_addrs.hi = 1357 cpu_to_le32(U64_HI(cur_data_offset)); 1358 stats_hdr->stats_counters_addrs.lo = 1359 cpu_to_le32(U64_LO(cur_data_offset)); 1360 1361 /* prepare to the first stats ramrod (will be completed with 1362 * the counters equal to zero) - init counters to somethig different. 1363 */ 1364 memset(&bp->fw_stats_data->storm_counters, 0xff, 1365 sizeof(struct stats_counter)); 1366 1367 /**** Port FW statistics data ****/ 1368 cur_data_offset = bp->fw_stats_data_mapping + 1369 offsetof(struct bnx2x_fw_stats_data, port); 1370 1371 cur_query_entry = &bp->fw_stats_req->query[BNX2X_PORT_QUERY_IDX]; 1372 1373 cur_query_entry->kind = STATS_TYPE_PORT; 1374 /* For port query index is a DONT CARE */ 1375 cur_query_entry->index = BP_PORT(bp); 1376 /* For port query funcID is a DONT CARE */ 1377 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); 1378 cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset)); 1379 cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset)); 1380 1381 /**** PF FW statistics data ****/ 1382 cur_data_offset = bp->fw_stats_data_mapping + 1383 offsetof(struct bnx2x_fw_stats_data, pf); 1384 1385 cur_query_entry = &bp->fw_stats_req->query[BNX2X_PF_QUERY_IDX]; 1386 1387 cur_query_entry->kind = STATS_TYPE_PF; 1388 /* For PF query index is a DONT CARE */ 1389 cur_query_entry->index = BP_PORT(bp); 1390 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); 1391 cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset)); 1392 cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset)); 1393 1394 /**** FCoE FW statistics data ****/ 1395 if (!NO_FCOE(bp)) { 1396 cur_data_offset = bp->fw_stats_data_mapping + 1397 offsetof(struct bnx2x_fw_stats_data, fcoe); 1398 1399 cur_query_entry = 1400 &bp->fw_stats_req->query[BNX2X_FCOE_QUERY_IDX]; 1401 1402 cur_query_entry->kind = STATS_TYPE_FCOE; 1403 /* For FCoE query index is a DONT CARE */ 1404 cur_query_entry->index = BP_PORT(bp); 1405 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); 1406 cur_query_entry->address.hi = 1407 cpu_to_le32(U64_HI(cur_data_offset)); 1408 cur_query_entry->address.lo = 1409 cpu_to_le32(U64_LO(cur_data_offset)); 1410 } 1411 1412 /**** Clients' queries ****/ 1413 cur_data_offset = bp->fw_stats_data_mapping + 1414 offsetof(struct bnx2x_fw_stats_data, queue_stats); 1415 1416 /* first queue query index depends whether FCoE offloaded request will 1417 * be included in the ramrod 1418 */ 1419 if (!NO_FCOE(bp)) 1420 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX; 1421 else 1422 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 1; 1423 1424 for_each_eth_queue(bp, i) { 1425 cur_query_entry = 1426 &bp->fw_stats_req-> 1427 query[first_queue_query_index + i]; 1428 1429 cur_query_entry->kind = STATS_TYPE_QUEUE; 1430 cur_query_entry->index = bnx2x_stats_id(&bp->fp[i]); 1431 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); 1432 cur_query_entry->address.hi = 1433 cpu_to_le32(U64_HI(cur_data_offset)); 1434 cur_query_entry->address.lo = 1435 cpu_to_le32(U64_LO(cur_data_offset)); 1436 1437 cur_data_offset += sizeof(struct per_queue_stats); 1438 } 1439 1440 /* add FCoE queue query if needed */ 1441 if (!NO_FCOE(bp)) { 1442 cur_query_entry = 1443 &bp->fw_stats_req-> 1444 query[first_queue_query_index + i]; 1445 1446 cur_query_entry->kind = STATS_TYPE_QUEUE; 1447 cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX(bp)]); 1448 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); 1449 cur_query_entry->address.hi = 1450 cpu_to_le32(U64_HI(cur_data_offset)); 1451 cur_query_entry->address.lo = 1452 cpu_to_le32(U64_LO(cur_data_offset)); 1453 } 1454 } 1455 1456 void bnx2x_stats_init(struct bnx2x *bp) 1457 { 1458 int /*abs*/port = BP_PORT(bp); 1459 int mb_idx = BP_FW_MB_IDX(bp); 1460 int i; 1461 1462 bp->stats_pending = 0; 1463 bp->executer_idx = 0; 1464 bp->stats_counter = 0; 1465 1466 /* port and func stats for management */ 1467 if (!BP_NOMCP(bp)) { 1468 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx); 1469 bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param); 1470 1471 } else { 1472 bp->port.port_stx = 0; 1473 bp->func_stx = 0; 1474 } 1475 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n", 1476 bp->port.port_stx, bp->func_stx); 1477 1478 /* pmf should retrieve port statistics from SP on a non-init*/ 1479 if (!bp->stats_init && bp->port.pmf && bp->port.port_stx) 1480 bnx2x_stats_handle(bp, STATS_EVENT_PMF); 1481 1482 port = BP_PORT(bp); 1483 /* port stats */ 1484 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats)); 1485 bp->port.old_nig_stats.brb_discard = 1486 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38); 1487 bp->port.old_nig_stats.brb_truncate = 1488 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38); 1489 if (!CHIP_IS_E3(bp)) { 1490 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50, 1491 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2); 1492 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50, 1493 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2); 1494 } 1495 1496 /* function stats */ 1497 for_each_queue(bp, i) { 1498 struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[i]; 1499 1500 memset(&fp_stats->old_tclient, 0, 1501 sizeof(fp_stats->old_tclient)); 1502 memset(&fp_stats->old_uclient, 0, 1503 sizeof(fp_stats->old_uclient)); 1504 memset(&fp_stats->old_xclient, 0, 1505 sizeof(fp_stats->old_xclient)); 1506 if (bp->stats_init) { 1507 memset(&fp_stats->eth_q_stats, 0, 1508 sizeof(fp_stats->eth_q_stats)); 1509 memset(&fp_stats->eth_q_stats_old, 0, 1510 sizeof(fp_stats->eth_q_stats_old)); 1511 } 1512 } 1513 1514 /* Prepare statistics ramrod data */ 1515 bnx2x_prep_fw_stats_req(bp); 1516 1517 memset(&bp->dev->stats, 0, sizeof(bp->dev->stats)); 1518 if (bp->stats_init) { 1519 memset(&bp->net_stats_old, 0, sizeof(bp->net_stats_old)); 1520 memset(&bp->fw_stats_old, 0, sizeof(bp->fw_stats_old)); 1521 memset(&bp->eth_stats_old, 0, sizeof(bp->eth_stats_old)); 1522 memset(&bp->eth_stats, 0, sizeof(bp->eth_stats)); 1523 memset(&bp->func_stats, 0, sizeof(bp->func_stats)); 1524 1525 /* Clean SP from previous statistics */ 1526 if (bp->func_stx) { 1527 memset(bnx2x_sp(bp, func_stats), 0, 1528 sizeof(struct host_func_stats)); 1529 bnx2x_func_stats_init(bp); 1530 bnx2x_hw_stats_post(bp); 1531 bnx2x_stats_comp(bp); 1532 } 1533 } 1534 1535 bp->stats_state = STATS_STATE_DISABLED; 1536 1537 if (bp->port.pmf && bp->port.port_stx) 1538 bnx2x_port_stats_base_init(bp); 1539 1540 /* mark the end of statistics initializiation */ 1541 bp->stats_init = false; 1542 } 1543 1544 void bnx2x_save_statistics(struct bnx2x *bp) 1545 { 1546 int i; 1547 struct net_device_stats *nstats = &bp->dev->stats; 1548 1549 /* save queue statistics */ 1550 for_each_eth_queue(bp, i) { 1551 struct bnx2x_fastpath *fp = &bp->fp[i]; 1552 struct bnx2x_eth_q_stats *qstats = 1553 &bnx2x_fp_stats(bp, fp)->eth_q_stats; 1554 struct bnx2x_eth_q_stats_old *qstats_old = 1555 &bnx2x_fp_stats(bp, fp)->eth_q_stats_old; 1556 1557 UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi); 1558 UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo); 1559 UPDATE_QSTAT_OLD(total_broadcast_bytes_received_hi); 1560 UPDATE_QSTAT_OLD(total_broadcast_bytes_received_lo); 1561 UPDATE_QSTAT_OLD(total_multicast_bytes_received_hi); 1562 UPDATE_QSTAT_OLD(total_multicast_bytes_received_lo); 1563 UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_hi); 1564 UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_lo); 1565 UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_hi); 1566 UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_lo); 1567 UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_hi); 1568 UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_lo); 1569 UPDATE_QSTAT_OLD(total_tpa_bytes_hi); 1570 UPDATE_QSTAT_OLD(total_tpa_bytes_lo); 1571 } 1572 1573 /* save net_device_stats statistics */ 1574 bp->net_stats_old.rx_dropped = nstats->rx_dropped; 1575 1576 /* store port firmware statistics */ 1577 if (bp->port.pmf && IS_MF(bp)) { 1578 struct bnx2x_eth_stats *estats = &bp->eth_stats; 1579 struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old; 1580 UPDATE_FW_STAT_OLD(mac_filter_discard); 1581 UPDATE_FW_STAT_OLD(mf_tag_discard); 1582 UPDATE_FW_STAT_OLD(brb_truncate_discard); 1583 UPDATE_FW_STAT_OLD(mac_discard); 1584 } 1585 } 1586 1587 void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats, 1588 u32 stats_type) 1589 { 1590 int i; 1591 struct afex_stats *afex_stats = (struct afex_stats *)void_afex_stats; 1592 struct bnx2x_eth_stats *estats = &bp->eth_stats; 1593 struct per_queue_stats *fcoe_q_stats = 1594 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)]; 1595 1596 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats = 1597 &fcoe_q_stats->tstorm_queue_statistics; 1598 1599 struct ustorm_per_queue_stats *fcoe_q_ustorm_stats = 1600 &fcoe_q_stats->ustorm_queue_statistics; 1601 1602 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats = 1603 &fcoe_q_stats->xstorm_queue_statistics; 1604 1605 struct fcoe_statistics_params *fw_fcoe_stat = 1606 &bp->fw_stats_data->fcoe; 1607 1608 memset(afex_stats, 0, sizeof(struct afex_stats)); 1609 1610 for_each_eth_queue(bp, i) { 1611 struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats; 1612 1613 ADD_64(afex_stats->rx_unicast_bytes_hi, 1614 qstats->total_unicast_bytes_received_hi, 1615 afex_stats->rx_unicast_bytes_lo, 1616 qstats->total_unicast_bytes_received_lo); 1617 1618 ADD_64(afex_stats->rx_broadcast_bytes_hi, 1619 qstats->total_broadcast_bytes_received_hi, 1620 afex_stats->rx_broadcast_bytes_lo, 1621 qstats->total_broadcast_bytes_received_lo); 1622 1623 ADD_64(afex_stats->rx_multicast_bytes_hi, 1624 qstats->total_multicast_bytes_received_hi, 1625 afex_stats->rx_multicast_bytes_lo, 1626 qstats->total_multicast_bytes_received_lo); 1627 1628 ADD_64(afex_stats->rx_unicast_frames_hi, 1629 qstats->total_unicast_packets_received_hi, 1630 afex_stats->rx_unicast_frames_lo, 1631 qstats->total_unicast_packets_received_lo); 1632 1633 ADD_64(afex_stats->rx_broadcast_frames_hi, 1634 qstats->total_broadcast_packets_received_hi, 1635 afex_stats->rx_broadcast_frames_lo, 1636 qstats->total_broadcast_packets_received_lo); 1637 1638 ADD_64(afex_stats->rx_multicast_frames_hi, 1639 qstats->total_multicast_packets_received_hi, 1640 afex_stats->rx_multicast_frames_lo, 1641 qstats->total_multicast_packets_received_lo); 1642 1643 /* sum to rx_frames_discarded all discraded 1644 * packets due to size, ttl0 and checksum 1645 */ 1646 ADD_64(afex_stats->rx_frames_discarded_hi, 1647 qstats->total_packets_received_checksum_discarded_hi, 1648 afex_stats->rx_frames_discarded_lo, 1649 qstats->total_packets_received_checksum_discarded_lo); 1650 1651 ADD_64(afex_stats->rx_frames_discarded_hi, 1652 qstats->total_packets_received_ttl0_discarded_hi, 1653 afex_stats->rx_frames_discarded_lo, 1654 qstats->total_packets_received_ttl0_discarded_lo); 1655 1656 ADD_64(afex_stats->rx_frames_discarded_hi, 1657 qstats->etherstatsoverrsizepkts_hi, 1658 afex_stats->rx_frames_discarded_lo, 1659 qstats->etherstatsoverrsizepkts_lo); 1660 1661 ADD_64(afex_stats->rx_frames_dropped_hi, 1662 qstats->no_buff_discard_hi, 1663 afex_stats->rx_frames_dropped_lo, 1664 qstats->no_buff_discard_lo); 1665 1666 ADD_64(afex_stats->tx_unicast_bytes_hi, 1667 qstats->total_unicast_bytes_transmitted_hi, 1668 afex_stats->tx_unicast_bytes_lo, 1669 qstats->total_unicast_bytes_transmitted_lo); 1670 1671 ADD_64(afex_stats->tx_broadcast_bytes_hi, 1672 qstats->total_broadcast_bytes_transmitted_hi, 1673 afex_stats->tx_broadcast_bytes_lo, 1674 qstats->total_broadcast_bytes_transmitted_lo); 1675 1676 ADD_64(afex_stats->tx_multicast_bytes_hi, 1677 qstats->total_multicast_bytes_transmitted_hi, 1678 afex_stats->tx_multicast_bytes_lo, 1679 qstats->total_multicast_bytes_transmitted_lo); 1680 1681 ADD_64(afex_stats->tx_unicast_frames_hi, 1682 qstats->total_unicast_packets_transmitted_hi, 1683 afex_stats->tx_unicast_frames_lo, 1684 qstats->total_unicast_packets_transmitted_lo); 1685 1686 ADD_64(afex_stats->tx_broadcast_frames_hi, 1687 qstats->total_broadcast_packets_transmitted_hi, 1688 afex_stats->tx_broadcast_frames_lo, 1689 qstats->total_broadcast_packets_transmitted_lo); 1690 1691 ADD_64(afex_stats->tx_multicast_frames_hi, 1692 qstats->total_multicast_packets_transmitted_hi, 1693 afex_stats->tx_multicast_frames_lo, 1694 qstats->total_multicast_packets_transmitted_lo); 1695 1696 ADD_64(afex_stats->tx_frames_dropped_hi, 1697 qstats->total_transmitted_dropped_packets_error_hi, 1698 afex_stats->tx_frames_dropped_lo, 1699 qstats->total_transmitted_dropped_packets_error_lo); 1700 } 1701 1702 /* now add FCoE statistics which are collected separately 1703 * (both offloaded and non offloaded) 1704 */ 1705 if (!NO_FCOE(bp)) { 1706 ADD_64_LE(afex_stats->rx_unicast_bytes_hi, 1707 LE32_0, 1708 afex_stats->rx_unicast_bytes_lo, 1709 fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt); 1710 1711 ADD_64_LE(afex_stats->rx_unicast_bytes_hi, 1712 fcoe_q_tstorm_stats->rcv_ucast_bytes.hi, 1713 afex_stats->rx_unicast_bytes_lo, 1714 fcoe_q_tstorm_stats->rcv_ucast_bytes.lo); 1715 1716 ADD_64_LE(afex_stats->rx_broadcast_bytes_hi, 1717 fcoe_q_tstorm_stats->rcv_bcast_bytes.hi, 1718 afex_stats->rx_broadcast_bytes_lo, 1719 fcoe_q_tstorm_stats->rcv_bcast_bytes.lo); 1720 1721 ADD_64_LE(afex_stats->rx_multicast_bytes_hi, 1722 fcoe_q_tstorm_stats->rcv_mcast_bytes.hi, 1723 afex_stats->rx_multicast_bytes_lo, 1724 fcoe_q_tstorm_stats->rcv_mcast_bytes.lo); 1725 1726 ADD_64_LE(afex_stats->rx_unicast_frames_hi, 1727 LE32_0, 1728 afex_stats->rx_unicast_frames_lo, 1729 fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt); 1730 1731 ADD_64_LE(afex_stats->rx_unicast_frames_hi, 1732 LE32_0, 1733 afex_stats->rx_unicast_frames_lo, 1734 fcoe_q_tstorm_stats->rcv_ucast_pkts); 1735 1736 ADD_64_LE(afex_stats->rx_broadcast_frames_hi, 1737 LE32_0, 1738 afex_stats->rx_broadcast_frames_lo, 1739 fcoe_q_tstorm_stats->rcv_bcast_pkts); 1740 1741 ADD_64_LE(afex_stats->rx_multicast_frames_hi, 1742 LE32_0, 1743 afex_stats->rx_multicast_frames_lo, 1744 fcoe_q_tstorm_stats->rcv_ucast_pkts); 1745 1746 ADD_64_LE(afex_stats->rx_frames_discarded_hi, 1747 LE32_0, 1748 afex_stats->rx_frames_discarded_lo, 1749 fcoe_q_tstorm_stats->checksum_discard); 1750 1751 ADD_64_LE(afex_stats->rx_frames_discarded_hi, 1752 LE32_0, 1753 afex_stats->rx_frames_discarded_lo, 1754 fcoe_q_tstorm_stats->pkts_too_big_discard); 1755 1756 ADD_64_LE(afex_stats->rx_frames_discarded_hi, 1757 LE32_0, 1758 afex_stats->rx_frames_discarded_lo, 1759 fcoe_q_tstorm_stats->ttl0_discard); 1760 1761 ADD_64_LE16(afex_stats->rx_frames_dropped_hi, 1762 LE16_0, 1763 afex_stats->rx_frames_dropped_lo, 1764 fcoe_q_tstorm_stats->no_buff_discard); 1765 1766 ADD_64_LE(afex_stats->rx_frames_dropped_hi, 1767 LE32_0, 1768 afex_stats->rx_frames_dropped_lo, 1769 fcoe_q_ustorm_stats->ucast_no_buff_pkts); 1770 1771 ADD_64_LE(afex_stats->rx_frames_dropped_hi, 1772 LE32_0, 1773 afex_stats->rx_frames_dropped_lo, 1774 fcoe_q_ustorm_stats->mcast_no_buff_pkts); 1775 1776 ADD_64_LE(afex_stats->rx_frames_dropped_hi, 1777 LE32_0, 1778 afex_stats->rx_frames_dropped_lo, 1779 fcoe_q_ustorm_stats->bcast_no_buff_pkts); 1780 1781 ADD_64_LE(afex_stats->rx_frames_dropped_hi, 1782 LE32_0, 1783 afex_stats->rx_frames_dropped_lo, 1784 fw_fcoe_stat->rx_stat1.fcoe_rx_drop_pkt_cnt); 1785 1786 ADD_64_LE(afex_stats->rx_frames_dropped_hi, 1787 LE32_0, 1788 afex_stats->rx_frames_dropped_lo, 1789 fw_fcoe_stat->rx_stat2.fcoe_rx_drop_pkt_cnt); 1790 1791 ADD_64_LE(afex_stats->tx_unicast_bytes_hi, 1792 LE32_0, 1793 afex_stats->tx_unicast_bytes_lo, 1794 fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt); 1795 1796 ADD_64_LE(afex_stats->tx_unicast_bytes_hi, 1797 fcoe_q_xstorm_stats->ucast_bytes_sent.hi, 1798 afex_stats->tx_unicast_bytes_lo, 1799 fcoe_q_xstorm_stats->ucast_bytes_sent.lo); 1800 1801 ADD_64_LE(afex_stats->tx_broadcast_bytes_hi, 1802 fcoe_q_xstorm_stats->bcast_bytes_sent.hi, 1803 afex_stats->tx_broadcast_bytes_lo, 1804 fcoe_q_xstorm_stats->bcast_bytes_sent.lo); 1805 1806 ADD_64_LE(afex_stats->tx_multicast_bytes_hi, 1807 fcoe_q_xstorm_stats->mcast_bytes_sent.hi, 1808 afex_stats->tx_multicast_bytes_lo, 1809 fcoe_q_xstorm_stats->mcast_bytes_sent.lo); 1810 1811 ADD_64_LE(afex_stats->tx_unicast_frames_hi, 1812 LE32_0, 1813 afex_stats->tx_unicast_frames_lo, 1814 fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt); 1815 1816 ADD_64_LE(afex_stats->tx_unicast_frames_hi, 1817 LE32_0, 1818 afex_stats->tx_unicast_frames_lo, 1819 fcoe_q_xstorm_stats->ucast_pkts_sent); 1820 1821 ADD_64_LE(afex_stats->tx_broadcast_frames_hi, 1822 LE32_0, 1823 afex_stats->tx_broadcast_frames_lo, 1824 fcoe_q_xstorm_stats->bcast_pkts_sent); 1825 1826 ADD_64_LE(afex_stats->tx_multicast_frames_hi, 1827 LE32_0, 1828 afex_stats->tx_multicast_frames_lo, 1829 fcoe_q_xstorm_stats->mcast_pkts_sent); 1830 1831 ADD_64_LE(afex_stats->tx_frames_dropped_hi, 1832 LE32_0, 1833 afex_stats->tx_frames_dropped_lo, 1834 fcoe_q_xstorm_stats->error_drop_pkts); 1835 } 1836 1837 /* if port stats are requested, add them to the PMF 1838 * stats, as anyway they will be accumulated by the 1839 * MCP before sent to the switch 1840 */ 1841 if ((bp->port.pmf) && (stats_type == VICSTATST_UIF_INDEX)) { 1842 ADD_64(afex_stats->rx_frames_dropped_hi, 1843 0, 1844 afex_stats->rx_frames_dropped_lo, 1845 estats->mac_filter_discard); 1846 ADD_64(afex_stats->rx_frames_dropped_hi, 1847 0, 1848 afex_stats->rx_frames_dropped_lo, 1849 estats->brb_truncate_discard); 1850 ADD_64(afex_stats->rx_frames_discarded_hi, 1851 0, 1852 afex_stats->rx_frames_discarded_lo, 1853 estats->mac_discard); 1854 } 1855 } 1856