1 /* bnx2x_stats.c: Broadcom Everest network driver. 2 * 3 * Copyright (c) 2007-2011 Broadcom Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation. 8 * 9 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 10 * Written by: Eliezer Tamir 11 * Based on code from Michael Chan's bnx2 driver 12 * UDP CSUM errata workaround by Arik Gendelman 13 * Slowpath and fastpath rework by Vladislav Zolotarov 14 * Statistics and Link management by Yitchak Gertner 15 * 16 */ 17 18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 19 20 #include "bnx2x_stats.h" 21 #include "bnx2x_cmn.h" 22 23 24 /* Statistics */ 25 26 /* 27 * General service functions 28 */ 29 30 static inline long bnx2x_hilo(u32 *hiref) 31 { 32 u32 lo = *(hiref + 1); 33 #if (BITS_PER_LONG == 64) 34 u32 hi = *hiref; 35 36 return HILO_U64(hi, lo); 37 #else 38 return lo; 39 #endif 40 } 41 42 static u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp) 43 { 44 u16 res = sizeof(struct host_port_stats) >> 2; 45 46 /* if PFC stats are not supported by the MFW, don't DMA them */ 47 if (!(bp->flags & BC_SUPPORTS_PFC_STATS)) 48 res -= (sizeof(u32)*4) >> 2; 49 50 return res; 51 } 52 53 /* 54 * Init service functions 55 */ 56 57 /* Post the next statistics ramrod. Protect it with the spin in 58 * order to ensure the strict order between statistics ramrods 59 * (each ramrod has a sequence number passed in a 60 * bp->fw_stats_req->hdr.drv_stats_counter and ramrods must be 61 * sent in order). 62 */ 63 static void bnx2x_storm_stats_post(struct bnx2x *bp) 64 { 65 if (!bp->stats_pending) { 66 int rc; 67 68 spin_lock_bh(&bp->stats_lock); 69 70 if (bp->stats_pending) { 71 spin_unlock_bh(&bp->stats_lock); 72 return; 73 } 74 75 bp->fw_stats_req->hdr.drv_stats_counter = 76 cpu_to_le16(bp->stats_counter++); 77 78 DP(NETIF_MSG_TIMER, "Sending statistics ramrod %d\n", 79 bp->fw_stats_req->hdr.drv_stats_counter); 80 81 82 83 /* send FW stats ramrod */ 84 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0, 85 U64_HI(bp->fw_stats_req_mapping), 86 U64_LO(bp->fw_stats_req_mapping), 87 NONE_CONNECTION_TYPE); 88 if (rc == 0) 89 bp->stats_pending = 1; 90 91 spin_unlock_bh(&bp->stats_lock); 92 } 93 } 94 95 static void bnx2x_hw_stats_post(struct bnx2x *bp) 96 { 97 struct dmae_command *dmae = &bp->stats_dmae; 98 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 99 100 *stats_comp = DMAE_COMP_VAL; 101 if (CHIP_REV_IS_SLOW(bp)) 102 return; 103 104 /* loader */ 105 if (bp->executer_idx) { 106 int loader_idx = PMF_DMAE_C(bp); 107 u32 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, 108 true, DMAE_COMP_GRC); 109 opcode = bnx2x_dmae_opcode_clr_src_reset(opcode); 110 111 memset(dmae, 0, sizeof(struct dmae_command)); 112 dmae->opcode = opcode; 113 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0])); 114 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0])); 115 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM + 116 sizeof(struct dmae_command) * 117 (loader_idx + 1)) >> 2; 118 dmae->dst_addr_hi = 0; 119 dmae->len = sizeof(struct dmae_command) >> 2; 120 if (CHIP_IS_E1(bp)) 121 dmae->len--; 122 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2; 123 dmae->comp_addr_hi = 0; 124 dmae->comp_val = 1; 125 126 *stats_comp = 0; 127 bnx2x_post_dmae(bp, dmae, loader_idx); 128 129 } else if (bp->func_stx) { 130 *stats_comp = 0; 131 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); 132 } 133 } 134 135 static int bnx2x_stats_comp(struct bnx2x *bp) 136 { 137 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 138 int cnt = 10; 139 140 might_sleep(); 141 while (*stats_comp != DMAE_COMP_VAL) { 142 if (!cnt) { 143 BNX2X_ERR("timeout waiting for stats finished\n"); 144 break; 145 } 146 cnt--; 147 usleep_range(1000, 1000); 148 } 149 return 1; 150 } 151 152 /* 153 * Statistics service functions 154 */ 155 156 static void bnx2x_stats_pmf_update(struct bnx2x *bp) 157 { 158 struct dmae_command *dmae; 159 u32 opcode; 160 int loader_idx = PMF_DMAE_C(bp); 161 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 162 163 /* sanity */ 164 if (!IS_MF(bp) || !bp->port.pmf || !bp->port.port_stx) { 165 BNX2X_ERR("BUG!\n"); 166 return; 167 } 168 169 bp->executer_idx = 0; 170 171 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, false, 0); 172 173 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 174 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC); 175 dmae->src_addr_lo = bp->port.port_stx >> 2; 176 dmae->src_addr_hi = 0; 177 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); 178 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); 179 dmae->len = DMAE_LEN32_RD_MAX; 180 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 181 dmae->comp_addr_hi = 0; 182 dmae->comp_val = 1; 183 184 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 185 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI); 186 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX; 187 dmae->src_addr_hi = 0; 188 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) + 189 DMAE_LEN32_RD_MAX * 4); 190 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) + 191 DMAE_LEN32_RD_MAX * 4); 192 dmae->len = bnx2x_get_port_stats_dma_len(bp) - DMAE_LEN32_RD_MAX; 193 194 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); 195 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); 196 dmae->comp_val = DMAE_COMP_VAL; 197 198 *stats_comp = 0; 199 bnx2x_hw_stats_post(bp); 200 bnx2x_stats_comp(bp); 201 } 202 203 static void bnx2x_port_stats_init(struct bnx2x *bp) 204 { 205 struct dmae_command *dmae; 206 int port = BP_PORT(bp); 207 u32 opcode; 208 int loader_idx = PMF_DMAE_C(bp); 209 u32 mac_addr; 210 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 211 212 /* sanity */ 213 if (!bp->link_vars.link_up || !bp->port.pmf) { 214 BNX2X_ERR("BUG!\n"); 215 return; 216 } 217 218 bp->executer_idx = 0; 219 220 /* MCP */ 221 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, 222 true, DMAE_COMP_GRC); 223 224 if (bp->port.port_stx) { 225 226 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 227 dmae->opcode = opcode; 228 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); 229 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); 230 dmae->dst_addr_lo = bp->port.port_stx >> 2; 231 dmae->dst_addr_hi = 0; 232 dmae->len = bnx2x_get_port_stats_dma_len(bp); 233 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 234 dmae->comp_addr_hi = 0; 235 dmae->comp_val = 1; 236 } 237 238 if (bp->func_stx) { 239 240 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 241 dmae->opcode = opcode; 242 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); 243 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); 244 dmae->dst_addr_lo = bp->func_stx >> 2; 245 dmae->dst_addr_hi = 0; 246 dmae->len = sizeof(struct host_func_stats) >> 2; 247 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 248 dmae->comp_addr_hi = 0; 249 dmae->comp_val = 1; 250 } 251 252 /* MAC */ 253 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, 254 true, DMAE_COMP_GRC); 255 256 /* EMAC is special */ 257 if (bp->link_vars.mac_type == MAC_TYPE_EMAC) { 258 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0); 259 260 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/ 261 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 262 dmae->opcode = opcode; 263 dmae->src_addr_lo = (mac_addr + 264 EMAC_REG_EMAC_RX_STAT_AC) >> 2; 265 dmae->src_addr_hi = 0; 266 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats)); 267 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats)); 268 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT; 269 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 270 dmae->comp_addr_hi = 0; 271 dmae->comp_val = 1; 272 273 /* EMAC_REG_EMAC_RX_STAT_AC_28 */ 274 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 275 dmae->opcode = opcode; 276 dmae->src_addr_lo = (mac_addr + 277 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2; 278 dmae->src_addr_hi = 0; 279 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) + 280 offsetof(struct emac_stats, rx_stat_falsecarriererrors)); 281 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) + 282 offsetof(struct emac_stats, rx_stat_falsecarriererrors)); 283 dmae->len = 1; 284 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 285 dmae->comp_addr_hi = 0; 286 dmae->comp_val = 1; 287 288 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/ 289 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 290 dmae->opcode = opcode; 291 dmae->src_addr_lo = (mac_addr + 292 EMAC_REG_EMAC_TX_STAT_AC) >> 2; 293 dmae->src_addr_hi = 0; 294 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) + 295 offsetof(struct emac_stats, tx_stat_ifhcoutoctets)); 296 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) + 297 offsetof(struct emac_stats, tx_stat_ifhcoutoctets)); 298 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT; 299 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 300 dmae->comp_addr_hi = 0; 301 dmae->comp_val = 1; 302 } else { 303 u32 tx_src_addr_lo, rx_src_addr_lo; 304 u16 rx_len, tx_len; 305 306 /* configure the params according to MAC type */ 307 switch (bp->link_vars.mac_type) { 308 case MAC_TYPE_BMAC: 309 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM : 310 NIG_REG_INGRESS_BMAC0_MEM); 311 312 /* BIGMAC_REGISTER_TX_STAT_GTPKT .. 313 BIGMAC_REGISTER_TX_STAT_GTBYT */ 314 if (CHIP_IS_E1x(bp)) { 315 tx_src_addr_lo = (mac_addr + 316 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2; 317 tx_len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT - 318 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2; 319 rx_src_addr_lo = (mac_addr + 320 BIGMAC_REGISTER_RX_STAT_GR64) >> 2; 321 rx_len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ - 322 BIGMAC_REGISTER_RX_STAT_GR64) >> 2; 323 } else { 324 tx_src_addr_lo = (mac_addr + 325 BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2; 326 tx_len = (8 + BIGMAC2_REGISTER_TX_STAT_GTBYT - 327 BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2; 328 rx_src_addr_lo = (mac_addr + 329 BIGMAC2_REGISTER_RX_STAT_GR64) >> 2; 330 rx_len = (8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ - 331 BIGMAC2_REGISTER_RX_STAT_GR64) >> 2; 332 } 333 break; 334 335 case MAC_TYPE_UMAC: /* handled by MSTAT */ 336 case MAC_TYPE_XMAC: /* handled by MSTAT */ 337 default: 338 mac_addr = port ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0; 339 tx_src_addr_lo = (mac_addr + 340 MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2; 341 rx_src_addr_lo = (mac_addr + 342 MSTAT_REG_RX_STAT_GR64_LO) >> 2; 343 tx_len = sizeof(bp->slowpath-> 344 mac_stats.mstat_stats.stats_tx) >> 2; 345 rx_len = sizeof(bp->slowpath-> 346 mac_stats.mstat_stats.stats_rx) >> 2; 347 break; 348 } 349 350 /* TX stats */ 351 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 352 dmae->opcode = opcode; 353 dmae->src_addr_lo = tx_src_addr_lo; 354 dmae->src_addr_hi = 0; 355 dmae->len = tx_len; 356 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats)); 357 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats)); 358 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 359 dmae->comp_addr_hi = 0; 360 dmae->comp_val = 1; 361 362 /* RX stats */ 363 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 364 dmae->opcode = opcode; 365 dmae->src_addr_hi = 0; 366 dmae->src_addr_lo = rx_src_addr_lo; 367 dmae->dst_addr_lo = 368 U64_LO(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2)); 369 dmae->dst_addr_hi = 370 U64_HI(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2)); 371 dmae->len = rx_len; 372 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 373 dmae->comp_addr_hi = 0; 374 dmae->comp_val = 1; 375 } 376 377 /* NIG */ 378 if (!CHIP_IS_E3(bp)) { 379 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 380 dmae->opcode = opcode; 381 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 : 382 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2; 383 dmae->src_addr_hi = 0; 384 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) + 385 offsetof(struct nig_stats, egress_mac_pkt0_lo)); 386 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) + 387 offsetof(struct nig_stats, egress_mac_pkt0_lo)); 388 dmae->len = (2*sizeof(u32)) >> 2; 389 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 390 dmae->comp_addr_hi = 0; 391 dmae->comp_val = 1; 392 393 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 394 dmae->opcode = opcode; 395 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 : 396 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2; 397 dmae->src_addr_hi = 0; 398 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) + 399 offsetof(struct nig_stats, egress_mac_pkt1_lo)); 400 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) + 401 offsetof(struct nig_stats, egress_mac_pkt1_lo)); 402 dmae->len = (2*sizeof(u32)) >> 2; 403 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 404 dmae->comp_addr_hi = 0; 405 dmae->comp_val = 1; 406 } 407 408 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 409 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, 410 true, DMAE_COMP_PCI); 411 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD : 412 NIG_REG_STAT0_BRB_DISCARD) >> 2; 413 dmae->src_addr_hi = 0; 414 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats)); 415 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats)); 416 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2; 417 418 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); 419 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); 420 dmae->comp_val = DMAE_COMP_VAL; 421 422 *stats_comp = 0; 423 } 424 425 static void bnx2x_func_stats_init(struct bnx2x *bp) 426 { 427 struct dmae_command *dmae = &bp->stats_dmae; 428 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 429 430 /* sanity */ 431 if (!bp->func_stx) { 432 BNX2X_ERR("BUG!\n"); 433 return; 434 } 435 436 bp->executer_idx = 0; 437 memset(dmae, 0, sizeof(struct dmae_command)); 438 439 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, 440 true, DMAE_COMP_PCI); 441 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); 442 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); 443 dmae->dst_addr_lo = bp->func_stx >> 2; 444 dmae->dst_addr_hi = 0; 445 dmae->len = sizeof(struct host_func_stats) >> 2; 446 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); 447 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); 448 dmae->comp_val = DMAE_COMP_VAL; 449 450 *stats_comp = 0; 451 } 452 453 static void bnx2x_stats_start(struct bnx2x *bp) 454 { 455 if (bp->port.pmf) 456 bnx2x_port_stats_init(bp); 457 458 else if (bp->func_stx) 459 bnx2x_func_stats_init(bp); 460 461 bnx2x_hw_stats_post(bp); 462 bnx2x_storm_stats_post(bp); 463 } 464 465 static void bnx2x_stats_pmf_start(struct bnx2x *bp) 466 { 467 bnx2x_stats_comp(bp); 468 bnx2x_stats_pmf_update(bp); 469 bnx2x_stats_start(bp); 470 } 471 472 static void bnx2x_stats_restart(struct bnx2x *bp) 473 { 474 bnx2x_stats_comp(bp); 475 bnx2x_stats_start(bp); 476 } 477 478 static void bnx2x_bmac_stats_update(struct bnx2x *bp) 479 { 480 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); 481 struct bnx2x_eth_stats *estats = &bp->eth_stats; 482 struct { 483 u32 lo; 484 u32 hi; 485 } diff; 486 487 if (CHIP_IS_E1x(bp)) { 488 struct bmac1_stats *new = bnx2x_sp(bp, mac_stats.bmac1_stats); 489 490 /* the macros below will use "bmac1_stats" type */ 491 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets); 492 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors); 493 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts); 494 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong); 495 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments); 496 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); 497 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); 498 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); 499 UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf); 500 501 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); 502 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone); 503 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets); 504 UPDATE_STAT64(tx_stat_gt127, 505 tx_stat_etherstatspkts65octetsto127octets); 506 UPDATE_STAT64(tx_stat_gt255, 507 tx_stat_etherstatspkts128octetsto255octets); 508 UPDATE_STAT64(tx_stat_gt511, 509 tx_stat_etherstatspkts256octetsto511octets); 510 UPDATE_STAT64(tx_stat_gt1023, 511 tx_stat_etherstatspkts512octetsto1023octets); 512 UPDATE_STAT64(tx_stat_gt1518, 513 tx_stat_etherstatspkts1024octetsto1522octets); 514 UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047); 515 UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095); 516 UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216); 517 UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383); 518 UPDATE_STAT64(tx_stat_gterr, 519 tx_stat_dot3statsinternalmactransmiterrors); 520 UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl); 521 522 } else { 523 struct bmac2_stats *new = bnx2x_sp(bp, mac_stats.bmac2_stats); 524 525 /* the macros below will use "bmac2_stats" type */ 526 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets); 527 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors); 528 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts); 529 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong); 530 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments); 531 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); 532 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); 533 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); 534 UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf); 535 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); 536 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone); 537 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets); 538 UPDATE_STAT64(tx_stat_gt127, 539 tx_stat_etherstatspkts65octetsto127octets); 540 UPDATE_STAT64(tx_stat_gt255, 541 tx_stat_etherstatspkts128octetsto255octets); 542 UPDATE_STAT64(tx_stat_gt511, 543 tx_stat_etherstatspkts256octetsto511octets); 544 UPDATE_STAT64(tx_stat_gt1023, 545 tx_stat_etherstatspkts512octetsto1023octets); 546 UPDATE_STAT64(tx_stat_gt1518, 547 tx_stat_etherstatspkts1024octetsto1522octets); 548 UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047); 549 UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095); 550 UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216); 551 UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383); 552 UPDATE_STAT64(tx_stat_gterr, 553 tx_stat_dot3statsinternalmactransmiterrors); 554 UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl); 555 556 /* collect PFC stats */ 557 pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi; 558 pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo; 559 560 pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi; 561 pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo; 562 } 563 564 estats->pause_frames_received_hi = 565 pstats->mac_stx[1].rx_stat_mac_xpf_hi; 566 estats->pause_frames_received_lo = 567 pstats->mac_stx[1].rx_stat_mac_xpf_lo; 568 569 estats->pause_frames_sent_hi = 570 pstats->mac_stx[1].tx_stat_outxoffsent_hi; 571 estats->pause_frames_sent_lo = 572 pstats->mac_stx[1].tx_stat_outxoffsent_lo; 573 574 estats->pfc_frames_received_hi = 575 pstats->pfc_frames_rx_hi; 576 estats->pfc_frames_received_lo = 577 pstats->pfc_frames_rx_lo; 578 estats->pfc_frames_sent_hi = 579 pstats->pfc_frames_tx_hi; 580 estats->pfc_frames_sent_lo = 581 pstats->pfc_frames_tx_lo; 582 } 583 584 static void bnx2x_mstat_stats_update(struct bnx2x *bp) 585 { 586 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); 587 struct bnx2x_eth_stats *estats = &bp->eth_stats; 588 589 struct mstat_stats *new = bnx2x_sp(bp, mac_stats.mstat_stats); 590 591 ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets); 592 ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors); 593 ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts); 594 ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong); 595 ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments); 596 ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived); 597 ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered); 598 ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf); 599 ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent); 600 ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone); 601 602 /* collect pfc stats */ 603 ADD_64(pstats->pfc_frames_tx_hi, new->stats_tx.tx_gtxpp_hi, 604 pstats->pfc_frames_tx_lo, new->stats_tx.tx_gtxpp_lo); 605 ADD_64(pstats->pfc_frames_rx_hi, new->stats_rx.rx_grxpp_hi, 606 pstats->pfc_frames_rx_lo, new->stats_rx.rx_grxpp_lo); 607 608 ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets); 609 ADD_STAT64(stats_tx.tx_gt127, 610 tx_stat_etherstatspkts65octetsto127octets); 611 ADD_STAT64(stats_tx.tx_gt255, 612 tx_stat_etherstatspkts128octetsto255octets); 613 ADD_STAT64(stats_tx.tx_gt511, 614 tx_stat_etherstatspkts256octetsto511octets); 615 ADD_STAT64(stats_tx.tx_gt1023, 616 tx_stat_etherstatspkts512octetsto1023octets); 617 ADD_STAT64(stats_tx.tx_gt1518, 618 tx_stat_etherstatspkts1024octetsto1522octets); 619 ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047); 620 621 ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095); 622 ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216); 623 ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383); 624 625 ADD_STAT64(stats_tx.tx_gterr, 626 tx_stat_dot3statsinternalmactransmiterrors); 627 ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl); 628 629 ADD_64(estats->etherstatspkts1024octetsto1522octets_hi, 630 new->stats_tx.tx_gt1518_hi, 631 estats->etherstatspkts1024octetsto1522octets_lo, 632 new->stats_tx.tx_gt1518_lo); 633 634 ADD_64(estats->etherstatspktsover1522octets_hi, 635 new->stats_tx.tx_gt2047_hi, 636 estats->etherstatspktsover1522octets_lo, 637 new->stats_tx.tx_gt2047_lo); 638 639 ADD_64(estats->etherstatspktsover1522octets_hi, 640 new->stats_tx.tx_gt4095_hi, 641 estats->etherstatspktsover1522octets_lo, 642 new->stats_tx.tx_gt4095_lo); 643 644 ADD_64(estats->etherstatspktsover1522octets_hi, 645 new->stats_tx.tx_gt9216_hi, 646 estats->etherstatspktsover1522octets_lo, 647 new->stats_tx.tx_gt9216_lo); 648 649 650 ADD_64(estats->etherstatspktsover1522octets_hi, 651 new->stats_tx.tx_gt16383_hi, 652 estats->etherstatspktsover1522octets_lo, 653 new->stats_tx.tx_gt16383_lo); 654 655 estats->pause_frames_received_hi = 656 pstats->mac_stx[1].rx_stat_mac_xpf_hi; 657 estats->pause_frames_received_lo = 658 pstats->mac_stx[1].rx_stat_mac_xpf_lo; 659 660 estats->pause_frames_sent_hi = 661 pstats->mac_stx[1].tx_stat_outxoffsent_hi; 662 estats->pause_frames_sent_lo = 663 pstats->mac_stx[1].tx_stat_outxoffsent_lo; 664 665 estats->pfc_frames_received_hi = 666 pstats->pfc_frames_rx_hi; 667 estats->pfc_frames_received_lo = 668 pstats->pfc_frames_rx_lo; 669 estats->pfc_frames_sent_hi = 670 pstats->pfc_frames_tx_hi; 671 estats->pfc_frames_sent_lo = 672 pstats->pfc_frames_tx_lo; 673 } 674 675 static void bnx2x_emac_stats_update(struct bnx2x *bp) 676 { 677 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats); 678 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); 679 struct bnx2x_eth_stats *estats = &bp->eth_stats; 680 681 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets); 682 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets); 683 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors); 684 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors); 685 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors); 686 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors); 687 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts); 688 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong); 689 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments); 690 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers); 691 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived); 692 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered); 693 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived); 694 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived); 695 UPDATE_EXTEND_STAT(tx_stat_outxonsent); 696 UPDATE_EXTEND_STAT(tx_stat_outxoffsent); 697 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone); 698 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions); 699 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes); 700 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes); 701 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions); 702 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions); 703 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions); 704 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets); 705 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets); 706 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets); 707 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets); 708 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets); 709 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets); 710 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets); 711 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors); 712 713 estats->pause_frames_received_hi = 714 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi; 715 estats->pause_frames_received_lo = 716 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo; 717 ADD_64(estats->pause_frames_received_hi, 718 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi, 719 estats->pause_frames_received_lo, 720 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo); 721 722 estats->pause_frames_sent_hi = 723 pstats->mac_stx[1].tx_stat_outxonsent_hi; 724 estats->pause_frames_sent_lo = 725 pstats->mac_stx[1].tx_stat_outxonsent_lo; 726 ADD_64(estats->pause_frames_sent_hi, 727 pstats->mac_stx[1].tx_stat_outxoffsent_hi, 728 estats->pause_frames_sent_lo, 729 pstats->mac_stx[1].tx_stat_outxoffsent_lo); 730 } 731 732 static int bnx2x_hw_stats_update(struct bnx2x *bp) 733 { 734 struct nig_stats *new = bnx2x_sp(bp, nig_stats); 735 struct nig_stats *old = &(bp->port.old_nig_stats); 736 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); 737 struct bnx2x_eth_stats *estats = &bp->eth_stats; 738 struct { 739 u32 lo; 740 u32 hi; 741 } diff; 742 743 switch (bp->link_vars.mac_type) { 744 case MAC_TYPE_BMAC: 745 bnx2x_bmac_stats_update(bp); 746 break; 747 748 case MAC_TYPE_EMAC: 749 bnx2x_emac_stats_update(bp); 750 break; 751 752 case MAC_TYPE_UMAC: 753 case MAC_TYPE_XMAC: 754 bnx2x_mstat_stats_update(bp); 755 break; 756 757 case MAC_TYPE_NONE: /* unreached */ 758 DP(BNX2X_MSG_STATS, 759 "stats updated by DMAE but no MAC active\n"); 760 return -1; 761 762 default: /* unreached */ 763 BNX2X_ERR("Unknown MAC type\n"); 764 } 765 766 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo, 767 new->brb_discard - old->brb_discard); 768 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo, 769 new->brb_truncate - old->brb_truncate); 770 771 if (!CHIP_IS_E3(bp)) { 772 UPDATE_STAT64_NIG(egress_mac_pkt0, 773 etherstatspkts1024octetsto1522octets); 774 UPDATE_STAT64_NIG(egress_mac_pkt1, 775 etherstatspktsover1522octets); 776 } 777 778 memcpy(old, new, sizeof(struct nig_stats)); 779 780 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]), 781 sizeof(struct mac_stx)); 782 estats->brb_drop_hi = pstats->brb_drop_hi; 783 estats->brb_drop_lo = pstats->brb_drop_lo; 784 785 pstats->host_port_stats_counter++; 786 787 if (!BP_NOMCP(bp)) { 788 u32 nig_timer_max = 789 SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer); 790 if (nig_timer_max != estats->nig_timer_max) { 791 estats->nig_timer_max = nig_timer_max; 792 BNX2X_ERR("NIG timer max (%u)\n", 793 estats->nig_timer_max); 794 } 795 } 796 797 return 0; 798 } 799 800 static int bnx2x_storm_stats_update(struct bnx2x *bp) 801 { 802 struct tstorm_per_port_stats *tport = 803 &bp->fw_stats_data->port.tstorm_port_statistics; 804 struct tstorm_per_pf_stats *tfunc = 805 &bp->fw_stats_data->pf.tstorm_pf_statistics; 806 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats); 807 struct bnx2x_eth_stats *estats = &bp->eth_stats; 808 struct stats_counter *counters = &bp->fw_stats_data->storm_counters; 809 int i; 810 u16 cur_stats_counter; 811 812 /* Make sure we use the value of the counter 813 * used for sending the last stats ramrod. 814 */ 815 spin_lock_bh(&bp->stats_lock); 816 cur_stats_counter = bp->stats_counter - 1; 817 spin_unlock_bh(&bp->stats_lock); 818 819 /* are storm stats valid? */ 820 if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) { 821 DP(BNX2X_MSG_STATS, "stats not updated by xstorm" 822 " xstorm counter (0x%x) != stats_counter (0x%x)\n", 823 le16_to_cpu(counters->xstats_counter), bp->stats_counter); 824 return -EAGAIN; 825 } 826 827 if (le16_to_cpu(counters->ustats_counter) != cur_stats_counter) { 828 DP(BNX2X_MSG_STATS, "stats not updated by ustorm" 829 " ustorm counter (0x%x) != stats_counter (0x%x)\n", 830 le16_to_cpu(counters->ustats_counter), bp->stats_counter); 831 return -EAGAIN; 832 } 833 834 if (le16_to_cpu(counters->cstats_counter) != cur_stats_counter) { 835 DP(BNX2X_MSG_STATS, "stats not updated by cstorm" 836 " cstorm counter (0x%x) != stats_counter (0x%x)\n", 837 le16_to_cpu(counters->cstats_counter), bp->stats_counter); 838 return -EAGAIN; 839 } 840 841 if (le16_to_cpu(counters->tstats_counter) != cur_stats_counter) { 842 DP(BNX2X_MSG_STATS, "stats not updated by tstorm" 843 " tstorm counter (0x%x) != stats_counter (0x%x)\n", 844 le16_to_cpu(counters->tstats_counter), bp->stats_counter); 845 return -EAGAIN; 846 } 847 848 memcpy(&(fstats->total_bytes_received_hi), 849 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi), 850 sizeof(struct host_func_stats) - 2*sizeof(u32)); 851 estats->error_bytes_received_hi = 0; 852 estats->error_bytes_received_lo = 0; 853 estats->etherstatsoverrsizepkts_hi = 0; 854 estats->etherstatsoverrsizepkts_lo = 0; 855 estats->no_buff_discard_hi = 0; 856 estats->no_buff_discard_lo = 0; 857 estats->total_tpa_aggregations_hi = 0; 858 estats->total_tpa_aggregations_lo = 0; 859 estats->total_tpa_aggregated_frames_hi = 0; 860 estats->total_tpa_aggregated_frames_lo = 0; 861 estats->total_tpa_bytes_hi = 0; 862 estats->total_tpa_bytes_lo = 0; 863 864 for_each_eth_queue(bp, i) { 865 struct bnx2x_fastpath *fp = &bp->fp[i]; 866 struct tstorm_per_queue_stats *tclient = 867 &bp->fw_stats_data->queue_stats[i]. 868 tstorm_queue_statistics; 869 struct tstorm_per_queue_stats *old_tclient = &fp->old_tclient; 870 struct ustorm_per_queue_stats *uclient = 871 &bp->fw_stats_data->queue_stats[i]. 872 ustorm_queue_statistics; 873 struct ustorm_per_queue_stats *old_uclient = &fp->old_uclient; 874 struct xstorm_per_queue_stats *xclient = 875 &bp->fw_stats_data->queue_stats[i]. 876 xstorm_queue_statistics; 877 struct xstorm_per_queue_stats *old_xclient = &fp->old_xclient; 878 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; 879 u32 diff; 880 881 DP(BNX2X_MSG_STATS, "queue[%d]: ucast_sent 0x%x, " 882 "bcast_sent 0x%x mcast_sent 0x%x\n", 883 i, xclient->ucast_pkts_sent, 884 xclient->bcast_pkts_sent, xclient->mcast_pkts_sent); 885 886 DP(BNX2X_MSG_STATS, "---------------\n"); 887 888 qstats->total_broadcast_bytes_received_hi = 889 le32_to_cpu(tclient->rcv_bcast_bytes.hi); 890 qstats->total_broadcast_bytes_received_lo = 891 le32_to_cpu(tclient->rcv_bcast_bytes.lo); 892 893 qstats->total_multicast_bytes_received_hi = 894 le32_to_cpu(tclient->rcv_mcast_bytes.hi); 895 qstats->total_multicast_bytes_received_lo = 896 le32_to_cpu(tclient->rcv_mcast_bytes.lo); 897 898 qstats->total_unicast_bytes_received_hi = 899 le32_to_cpu(tclient->rcv_ucast_bytes.hi); 900 qstats->total_unicast_bytes_received_lo = 901 le32_to_cpu(tclient->rcv_ucast_bytes.lo); 902 903 /* 904 * sum to total_bytes_received all 905 * unicast/multicast/broadcast 906 */ 907 qstats->total_bytes_received_hi = 908 qstats->total_broadcast_bytes_received_hi; 909 qstats->total_bytes_received_lo = 910 qstats->total_broadcast_bytes_received_lo; 911 912 ADD_64(qstats->total_bytes_received_hi, 913 qstats->total_multicast_bytes_received_hi, 914 qstats->total_bytes_received_lo, 915 qstats->total_multicast_bytes_received_lo); 916 917 ADD_64(qstats->total_bytes_received_hi, 918 qstats->total_unicast_bytes_received_hi, 919 qstats->total_bytes_received_lo, 920 qstats->total_unicast_bytes_received_lo); 921 922 qstats->valid_bytes_received_hi = 923 qstats->total_bytes_received_hi; 924 qstats->valid_bytes_received_lo = 925 qstats->total_bytes_received_lo; 926 927 928 UPDATE_EXTEND_TSTAT(rcv_ucast_pkts, 929 total_unicast_packets_received); 930 UPDATE_EXTEND_TSTAT(rcv_mcast_pkts, 931 total_multicast_packets_received); 932 UPDATE_EXTEND_TSTAT(rcv_bcast_pkts, 933 total_broadcast_packets_received); 934 UPDATE_EXTEND_TSTAT(pkts_too_big_discard, 935 etherstatsoverrsizepkts); 936 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard); 937 938 SUB_EXTEND_USTAT(ucast_no_buff_pkts, 939 total_unicast_packets_received); 940 SUB_EXTEND_USTAT(mcast_no_buff_pkts, 941 total_multicast_packets_received); 942 SUB_EXTEND_USTAT(bcast_no_buff_pkts, 943 total_broadcast_packets_received); 944 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard); 945 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard); 946 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard); 947 948 qstats->total_broadcast_bytes_transmitted_hi = 949 le32_to_cpu(xclient->bcast_bytes_sent.hi); 950 qstats->total_broadcast_bytes_transmitted_lo = 951 le32_to_cpu(xclient->bcast_bytes_sent.lo); 952 953 qstats->total_multicast_bytes_transmitted_hi = 954 le32_to_cpu(xclient->mcast_bytes_sent.hi); 955 qstats->total_multicast_bytes_transmitted_lo = 956 le32_to_cpu(xclient->mcast_bytes_sent.lo); 957 958 qstats->total_unicast_bytes_transmitted_hi = 959 le32_to_cpu(xclient->ucast_bytes_sent.hi); 960 qstats->total_unicast_bytes_transmitted_lo = 961 le32_to_cpu(xclient->ucast_bytes_sent.lo); 962 /* 963 * sum to total_bytes_transmitted all 964 * unicast/multicast/broadcast 965 */ 966 qstats->total_bytes_transmitted_hi = 967 qstats->total_unicast_bytes_transmitted_hi; 968 qstats->total_bytes_transmitted_lo = 969 qstats->total_unicast_bytes_transmitted_lo; 970 971 ADD_64(qstats->total_bytes_transmitted_hi, 972 qstats->total_broadcast_bytes_transmitted_hi, 973 qstats->total_bytes_transmitted_lo, 974 qstats->total_broadcast_bytes_transmitted_lo); 975 976 ADD_64(qstats->total_bytes_transmitted_hi, 977 qstats->total_multicast_bytes_transmitted_hi, 978 qstats->total_bytes_transmitted_lo, 979 qstats->total_multicast_bytes_transmitted_lo); 980 981 UPDATE_EXTEND_XSTAT(ucast_pkts_sent, 982 total_unicast_packets_transmitted); 983 UPDATE_EXTEND_XSTAT(mcast_pkts_sent, 984 total_multicast_packets_transmitted); 985 UPDATE_EXTEND_XSTAT(bcast_pkts_sent, 986 total_broadcast_packets_transmitted); 987 988 UPDATE_EXTEND_TSTAT(checksum_discard, 989 total_packets_received_checksum_discarded); 990 UPDATE_EXTEND_TSTAT(ttl0_discard, 991 total_packets_received_ttl0_discarded); 992 993 UPDATE_EXTEND_XSTAT(error_drop_pkts, 994 total_transmitted_dropped_packets_error); 995 996 /* TPA aggregations completed */ 997 UPDATE_EXTEND_USTAT(coalesced_events, total_tpa_aggregations); 998 /* Number of network frames aggregated by TPA */ 999 UPDATE_EXTEND_USTAT(coalesced_pkts, 1000 total_tpa_aggregated_frames); 1001 /* Total number of bytes in completed TPA aggregations */ 1002 qstats->total_tpa_bytes_lo = 1003 le32_to_cpu(uclient->coalesced_bytes.lo); 1004 qstats->total_tpa_bytes_hi = 1005 le32_to_cpu(uclient->coalesced_bytes.hi); 1006 1007 /* TPA stats per-function */ 1008 ADD_64(estats->total_tpa_aggregations_hi, 1009 qstats->total_tpa_aggregations_hi, 1010 estats->total_tpa_aggregations_lo, 1011 qstats->total_tpa_aggregations_lo); 1012 ADD_64(estats->total_tpa_aggregated_frames_hi, 1013 qstats->total_tpa_aggregated_frames_hi, 1014 estats->total_tpa_aggregated_frames_lo, 1015 qstats->total_tpa_aggregated_frames_lo); 1016 ADD_64(estats->total_tpa_bytes_hi, 1017 qstats->total_tpa_bytes_hi, 1018 estats->total_tpa_bytes_lo, 1019 qstats->total_tpa_bytes_lo); 1020 1021 ADD_64(fstats->total_bytes_received_hi, 1022 qstats->total_bytes_received_hi, 1023 fstats->total_bytes_received_lo, 1024 qstats->total_bytes_received_lo); 1025 ADD_64(fstats->total_bytes_transmitted_hi, 1026 qstats->total_bytes_transmitted_hi, 1027 fstats->total_bytes_transmitted_lo, 1028 qstats->total_bytes_transmitted_lo); 1029 ADD_64(fstats->total_unicast_packets_received_hi, 1030 qstats->total_unicast_packets_received_hi, 1031 fstats->total_unicast_packets_received_lo, 1032 qstats->total_unicast_packets_received_lo); 1033 ADD_64(fstats->total_multicast_packets_received_hi, 1034 qstats->total_multicast_packets_received_hi, 1035 fstats->total_multicast_packets_received_lo, 1036 qstats->total_multicast_packets_received_lo); 1037 ADD_64(fstats->total_broadcast_packets_received_hi, 1038 qstats->total_broadcast_packets_received_hi, 1039 fstats->total_broadcast_packets_received_lo, 1040 qstats->total_broadcast_packets_received_lo); 1041 ADD_64(fstats->total_unicast_packets_transmitted_hi, 1042 qstats->total_unicast_packets_transmitted_hi, 1043 fstats->total_unicast_packets_transmitted_lo, 1044 qstats->total_unicast_packets_transmitted_lo); 1045 ADD_64(fstats->total_multicast_packets_transmitted_hi, 1046 qstats->total_multicast_packets_transmitted_hi, 1047 fstats->total_multicast_packets_transmitted_lo, 1048 qstats->total_multicast_packets_transmitted_lo); 1049 ADD_64(fstats->total_broadcast_packets_transmitted_hi, 1050 qstats->total_broadcast_packets_transmitted_hi, 1051 fstats->total_broadcast_packets_transmitted_lo, 1052 qstats->total_broadcast_packets_transmitted_lo); 1053 ADD_64(fstats->valid_bytes_received_hi, 1054 qstats->valid_bytes_received_hi, 1055 fstats->valid_bytes_received_lo, 1056 qstats->valid_bytes_received_lo); 1057 1058 ADD_64(estats->etherstatsoverrsizepkts_hi, 1059 qstats->etherstatsoverrsizepkts_hi, 1060 estats->etherstatsoverrsizepkts_lo, 1061 qstats->etherstatsoverrsizepkts_lo); 1062 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi, 1063 estats->no_buff_discard_lo, qstats->no_buff_discard_lo); 1064 } 1065 1066 ADD_64(fstats->total_bytes_received_hi, 1067 estats->rx_stat_ifhcinbadoctets_hi, 1068 fstats->total_bytes_received_lo, 1069 estats->rx_stat_ifhcinbadoctets_lo); 1070 1071 ADD_64(fstats->total_bytes_received_hi, 1072 le32_to_cpu(tfunc->rcv_error_bytes.hi), 1073 fstats->total_bytes_received_lo, 1074 le32_to_cpu(tfunc->rcv_error_bytes.lo)); 1075 1076 memcpy(estats, &(fstats->total_bytes_received_hi), 1077 sizeof(struct host_func_stats) - 2*sizeof(u32)); 1078 1079 ADD_64(estats->error_bytes_received_hi, 1080 le32_to_cpu(tfunc->rcv_error_bytes.hi), 1081 estats->error_bytes_received_lo, 1082 le32_to_cpu(tfunc->rcv_error_bytes.lo)); 1083 1084 ADD_64(estats->etherstatsoverrsizepkts_hi, 1085 estats->rx_stat_dot3statsframestoolong_hi, 1086 estats->etherstatsoverrsizepkts_lo, 1087 estats->rx_stat_dot3statsframestoolong_lo); 1088 ADD_64(estats->error_bytes_received_hi, 1089 estats->rx_stat_ifhcinbadoctets_hi, 1090 estats->error_bytes_received_lo, 1091 estats->rx_stat_ifhcinbadoctets_lo); 1092 1093 if (bp->port.pmf) { 1094 estats->mac_filter_discard = 1095 le32_to_cpu(tport->mac_filter_discard); 1096 estats->mf_tag_discard = 1097 le32_to_cpu(tport->mf_tag_discard); 1098 estats->brb_truncate_discard = 1099 le32_to_cpu(tport->brb_truncate_discard); 1100 estats->mac_discard = le32_to_cpu(tport->mac_discard); 1101 } 1102 1103 fstats->host_func_stats_start = ++fstats->host_func_stats_end; 1104 1105 bp->stats_pending = 0; 1106 1107 return 0; 1108 } 1109 1110 static void bnx2x_net_stats_update(struct bnx2x *bp) 1111 { 1112 struct bnx2x_eth_stats *estats = &bp->eth_stats; 1113 struct net_device_stats *nstats = &bp->dev->stats; 1114 unsigned long tmp; 1115 int i; 1116 1117 nstats->rx_packets = 1118 bnx2x_hilo(&estats->total_unicast_packets_received_hi) + 1119 bnx2x_hilo(&estats->total_multicast_packets_received_hi) + 1120 bnx2x_hilo(&estats->total_broadcast_packets_received_hi); 1121 1122 nstats->tx_packets = 1123 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) + 1124 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) + 1125 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi); 1126 1127 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi); 1128 1129 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi); 1130 1131 tmp = estats->mac_discard; 1132 for_each_rx_queue(bp, i) 1133 tmp += le32_to_cpu(bp->fp[i].old_tclient.checksum_discard); 1134 nstats->rx_dropped = tmp; 1135 1136 nstats->tx_dropped = 0; 1137 1138 nstats->multicast = 1139 bnx2x_hilo(&estats->total_multicast_packets_received_hi); 1140 1141 nstats->collisions = 1142 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi); 1143 1144 nstats->rx_length_errors = 1145 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) + 1146 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi); 1147 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) + 1148 bnx2x_hilo(&estats->brb_truncate_hi); 1149 nstats->rx_crc_errors = 1150 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi); 1151 nstats->rx_frame_errors = 1152 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi); 1153 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi); 1154 nstats->rx_missed_errors = 0; 1155 1156 nstats->rx_errors = nstats->rx_length_errors + 1157 nstats->rx_over_errors + 1158 nstats->rx_crc_errors + 1159 nstats->rx_frame_errors + 1160 nstats->rx_fifo_errors + 1161 nstats->rx_missed_errors; 1162 1163 nstats->tx_aborted_errors = 1164 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) + 1165 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi); 1166 nstats->tx_carrier_errors = 1167 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi); 1168 nstats->tx_fifo_errors = 0; 1169 nstats->tx_heartbeat_errors = 0; 1170 nstats->tx_window_errors = 0; 1171 1172 nstats->tx_errors = nstats->tx_aborted_errors + 1173 nstats->tx_carrier_errors + 1174 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi); 1175 } 1176 1177 static void bnx2x_drv_stats_update(struct bnx2x *bp) 1178 { 1179 struct bnx2x_eth_stats *estats = &bp->eth_stats; 1180 int i; 1181 1182 estats->driver_xoff = 0; 1183 estats->rx_err_discard_pkt = 0; 1184 estats->rx_skb_alloc_failed = 0; 1185 estats->hw_csum_err = 0; 1186 for_each_queue(bp, i) { 1187 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats; 1188 1189 estats->driver_xoff += qstats->driver_xoff; 1190 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt; 1191 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed; 1192 estats->hw_csum_err += qstats->hw_csum_err; 1193 } 1194 } 1195 1196 static bool bnx2x_edebug_stats_stopped(struct bnx2x *bp) 1197 { 1198 u32 val; 1199 1200 if (SHMEM2_HAS(bp, edebug_driver_if[1])) { 1201 val = SHMEM2_RD(bp, edebug_driver_if[1]); 1202 1203 if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT) 1204 return true; 1205 } 1206 1207 return false; 1208 } 1209 1210 static void bnx2x_stats_update(struct bnx2x *bp) 1211 { 1212 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 1213 1214 if (bnx2x_edebug_stats_stopped(bp)) 1215 return; 1216 1217 if (*stats_comp != DMAE_COMP_VAL) 1218 return; 1219 1220 if (bp->port.pmf) 1221 bnx2x_hw_stats_update(bp); 1222 1223 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) { 1224 BNX2X_ERR("storm stats were not updated for 3 times\n"); 1225 bnx2x_panic(); 1226 return; 1227 } 1228 1229 bnx2x_net_stats_update(bp); 1230 bnx2x_drv_stats_update(bp); 1231 1232 if (netif_msg_timer(bp)) { 1233 struct bnx2x_eth_stats *estats = &bp->eth_stats; 1234 int i, cos; 1235 1236 netdev_dbg(bp->dev, "brb drops %u brb truncate %u\n", 1237 estats->brb_drop_lo, estats->brb_truncate_lo); 1238 1239 for_each_eth_queue(bp, i) { 1240 struct bnx2x_fastpath *fp = &bp->fp[i]; 1241 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; 1242 1243 pr_debug("%s: rx usage(%4u) *rx_cons_sb(%u) rx pkt(%lu) rx calls(%lu %lu)\n", 1244 fp->name, (le16_to_cpu(*fp->rx_cons_sb) - 1245 fp->rx_comp_cons), 1246 le16_to_cpu(*fp->rx_cons_sb), 1247 bnx2x_hilo(&qstats-> 1248 total_unicast_packets_received_hi), 1249 fp->rx_calls, fp->rx_pkt); 1250 } 1251 1252 for_each_eth_queue(bp, i) { 1253 struct bnx2x_fastpath *fp = &bp->fp[i]; 1254 struct bnx2x_fp_txdata *txdata; 1255 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; 1256 struct netdev_queue *txq; 1257 1258 pr_debug("%s: tx pkt(%lu) (Xoff events %u)", 1259 fp->name, 1260 bnx2x_hilo( 1261 &qstats->total_unicast_packets_transmitted_hi), 1262 qstats->driver_xoff); 1263 1264 for_each_cos_in_tx_queue(fp, cos) { 1265 txdata = &fp->txdata[cos]; 1266 txq = netdev_get_tx_queue(bp->dev, 1267 FP_COS_TO_TXQ(fp, cos)); 1268 1269 pr_debug("%d: tx avail(%4u) *tx_cons_sb(%u) tx calls (%lu) %s\n", 1270 cos, 1271 bnx2x_tx_avail(bp, txdata), 1272 le16_to_cpu(*txdata->tx_cons_sb), 1273 txdata->tx_pkt, 1274 (netif_tx_queue_stopped(txq) ? 1275 "Xoff" : "Xon") 1276 ); 1277 } 1278 } 1279 } 1280 1281 bnx2x_hw_stats_post(bp); 1282 bnx2x_storm_stats_post(bp); 1283 } 1284 1285 static void bnx2x_port_stats_stop(struct bnx2x *bp) 1286 { 1287 struct dmae_command *dmae; 1288 u32 opcode; 1289 int loader_idx = PMF_DMAE_C(bp); 1290 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 1291 1292 bp->executer_idx = 0; 1293 1294 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, false, 0); 1295 1296 if (bp->port.port_stx) { 1297 1298 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 1299 if (bp->func_stx) 1300 dmae->opcode = bnx2x_dmae_opcode_add_comp( 1301 opcode, DMAE_COMP_GRC); 1302 else 1303 dmae->opcode = bnx2x_dmae_opcode_add_comp( 1304 opcode, DMAE_COMP_PCI); 1305 1306 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); 1307 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); 1308 dmae->dst_addr_lo = bp->port.port_stx >> 2; 1309 dmae->dst_addr_hi = 0; 1310 dmae->len = bnx2x_get_port_stats_dma_len(bp); 1311 if (bp->func_stx) { 1312 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 1313 dmae->comp_addr_hi = 0; 1314 dmae->comp_val = 1; 1315 } else { 1316 dmae->comp_addr_lo = 1317 U64_LO(bnx2x_sp_mapping(bp, stats_comp)); 1318 dmae->comp_addr_hi = 1319 U64_HI(bnx2x_sp_mapping(bp, stats_comp)); 1320 dmae->comp_val = DMAE_COMP_VAL; 1321 1322 *stats_comp = 0; 1323 } 1324 } 1325 1326 if (bp->func_stx) { 1327 1328 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 1329 dmae->opcode = 1330 bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI); 1331 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); 1332 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); 1333 dmae->dst_addr_lo = bp->func_stx >> 2; 1334 dmae->dst_addr_hi = 0; 1335 dmae->len = sizeof(struct host_func_stats) >> 2; 1336 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); 1337 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); 1338 dmae->comp_val = DMAE_COMP_VAL; 1339 1340 *stats_comp = 0; 1341 } 1342 } 1343 1344 static void bnx2x_stats_stop(struct bnx2x *bp) 1345 { 1346 int update = 0; 1347 1348 bnx2x_stats_comp(bp); 1349 1350 if (bp->port.pmf) 1351 update = (bnx2x_hw_stats_update(bp) == 0); 1352 1353 update |= (bnx2x_storm_stats_update(bp) == 0); 1354 1355 if (update) { 1356 bnx2x_net_stats_update(bp); 1357 1358 if (bp->port.pmf) 1359 bnx2x_port_stats_stop(bp); 1360 1361 bnx2x_hw_stats_post(bp); 1362 bnx2x_stats_comp(bp); 1363 } 1364 } 1365 1366 static void bnx2x_stats_do_nothing(struct bnx2x *bp) 1367 { 1368 } 1369 1370 static const struct { 1371 void (*action)(struct bnx2x *bp); 1372 enum bnx2x_stats_state next_state; 1373 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = { 1374 /* state event */ 1375 { 1376 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED}, 1377 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED}, 1378 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}, 1379 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED} 1380 }, 1381 { 1382 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED}, 1383 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED}, 1384 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED}, 1385 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED} 1386 } 1387 }; 1388 1389 void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) 1390 { 1391 enum bnx2x_stats_state state; 1392 if (unlikely(bp->panic)) 1393 return; 1394 1395 spin_lock_bh(&bp->stats_lock); 1396 state = bp->stats_state; 1397 bp->stats_state = bnx2x_stats_stm[state][event].next_state; 1398 spin_unlock_bh(&bp->stats_lock); 1399 1400 bnx2x_stats_stm[state][event].action(bp); 1401 1402 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) 1403 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", 1404 state, event, bp->stats_state); 1405 } 1406 1407 static void bnx2x_port_stats_base_init(struct bnx2x *bp) 1408 { 1409 struct dmae_command *dmae; 1410 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 1411 1412 /* sanity */ 1413 if (!bp->port.pmf || !bp->port.port_stx) { 1414 BNX2X_ERR("BUG!\n"); 1415 return; 1416 } 1417 1418 bp->executer_idx = 0; 1419 1420 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 1421 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, 1422 true, DMAE_COMP_PCI); 1423 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); 1424 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); 1425 dmae->dst_addr_lo = bp->port.port_stx >> 2; 1426 dmae->dst_addr_hi = 0; 1427 dmae->len = bnx2x_get_port_stats_dma_len(bp); 1428 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); 1429 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); 1430 dmae->comp_val = DMAE_COMP_VAL; 1431 1432 *stats_comp = 0; 1433 bnx2x_hw_stats_post(bp); 1434 bnx2x_stats_comp(bp); 1435 } 1436 1437 static void bnx2x_func_stats_base_init(struct bnx2x *bp) 1438 { 1439 int vn, vn_max = IS_MF(bp) ? BP_MAX_VN_NUM(bp) : E1VN_MAX; 1440 u32 func_stx; 1441 1442 /* sanity */ 1443 if (!bp->port.pmf || !bp->func_stx) { 1444 BNX2X_ERR("BUG!\n"); 1445 return; 1446 } 1447 1448 /* save our func_stx */ 1449 func_stx = bp->func_stx; 1450 1451 for (vn = VN_0; vn < vn_max; vn++) { 1452 int mb_idx = BP_FW_MB_IDX_VN(bp, vn); 1453 1454 bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param); 1455 bnx2x_func_stats_init(bp); 1456 bnx2x_hw_stats_post(bp); 1457 bnx2x_stats_comp(bp); 1458 } 1459 1460 /* restore our func_stx */ 1461 bp->func_stx = func_stx; 1462 } 1463 1464 static void bnx2x_func_stats_base_update(struct bnx2x *bp) 1465 { 1466 struct dmae_command *dmae = &bp->stats_dmae; 1467 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 1468 1469 /* sanity */ 1470 if (!bp->func_stx) { 1471 BNX2X_ERR("BUG!\n"); 1472 return; 1473 } 1474 1475 bp->executer_idx = 0; 1476 memset(dmae, 0, sizeof(struct dmae_command)); 1477 1478 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, 1479 true, DMAE_COMP_PCI); 1480 dmae->src_addr_lo = bp->func_stx >> 2; 1481 dmae->src_addr_hi = 0; 1482 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base)); 1483 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base)); 1484 dmae->len = sizeof(struct host_func_stats) >> 2; 1485 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); 1486 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); 1487 dmae->comp_val = DMAE_COMP_VAL; 1488 1489 *stats_comp = 0; 1490 bnx2x_hw_stats_post(bp); 1491 bnx2x_stats_comp(bp); 1492 } 1493 1494 /** 1495 * This function will prepare the statistics ramrod data the way 1496 * we will only have to increment the statistics counter and 1497 * send the ramrod each time we have to. 1498 * 1499 * @param bp 1500 */ 1501 static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp) 1502 { 1503 int i; 1504 int first_queue_query_index; 1505 struct stats_query_header *stats_hdr = &bp->fw_stats_req->hdr; 1506 1507 dma_addr_t cur_data_offset; 1508 struct stats_query_entry *cur_query_entry; 1509 1510 stats_hdr->cmd_num = bp->fw_stats_num; 1511 stats_hdr->drv_stats_counter = 0; 1512 1513 /* storm_counters struct contains the counters of completed 1514 * statistics requests per storm which are incremented by FW 1515 * each time it completes hadning a statistics ramrod. We will 1516 * check these counters in the timer handler and discard a 1517 * (statistics) ramrod completion. 1518 */ 1519 cur_data_offset = bp->fw_stats_data_mapping + 1520 offsetof(struct bnx2x_fw_stats_data, storm_counters); 1521 1522 stats_hdr->stats_counters_addrs.hi = 1523 cpu_to_le32(U64_HI(cur_data_offset)); 1524 stats_hdr->stats_counters_addrs.lo = 1525 cpu_to_le32(U64_LO(cur_data_offset)); 1526 1527 /* prepare to the first stats ramrod (will be completed with 1528 * the counters equal to zero) - init counters to somethig different. 1529 */ 1530 memset(&bp->fw_stats_data->storm_counters, 0xff, 1531 sizeof(struct stats_counter)); 1532 1533 /**** Port FW statistics data ****/ 1534 cur_data_offset = bp->fw_stats_data_mapping + 1535 offsetof(struct bnx2x_fw_stats_data, port); 1536 1537 cur_query_entry = &bp->fw_stats_req->query[BNX2X_PORT_QUERY_IDX]; 1538 1539 cur_query_entry->kind = STATS_TYPE_PORT; 1540 /* For port query index is a DONT CARE */ 1541 cur_query_entry->index = BP_PORT(bp); 1542 /* For port query funcID is a DONT CARE */ 1543 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); 1544 cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset)); 1545 cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset)); 1546 1547 /**** PF FW statistics data ****/ 1548 cur_data_offset = bp->fw_stats_data_mapping + 1549 offsetof(struct bnx2x_fw_stats_data, pf); 1550 1551 cur_query_entry = &bp->fw_stats_req->query[BNX2X_PF_QUERY_IDX]; 1552 1553 cur_query_entry->kind = STATS_TYPE_PF; 1554 /* For PF query index is a DONT CARE */ 1555 cur_query_entry->index = BP_PORT(bp); 1556 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); 1557 cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset)); 1558 cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset)); 1559 1560 /**** FCoE FW statistics data ****/ 1561 if (!NO_FCOE(bp)) { 1562 cur_data_offset = bp->fw_stats_data_mapping + 1563 offsetof(struct bnx2x_fw_stats_data, fcoe); 1564 1565 cur_query_entry = 1566 &bp->fw_stats_req->query[BNX2X_FCOE_QUERY_IDX]; 1567 1568 cur_query_entry->kind = STATS_TYPE_FCOE; 1569 /* For FCoE query index is a DONT CARE */ 1570 cur_query_entry->index = BP_PORT(bp); 1571 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); 1572 cur_query_entry->address.hi = 1573 cpu_to_le32(U64_HI(cur_data_offset)); 1574 cur_query_entry->address.lo = 1575 cpu_to_le32(U64_LO(cur_data_offset)); 1576 } 1577 1578 /**** Clients' queries ****/ 1579 cur_data_offset = bp->fw_stats_data_mapping + 1580 offsetof(struct bnx2x_fw_stats_data, queue_stats); 1581 1582 /* first queue query index depends whether FCoE offloaded request will 1583 * be included in the ramrod 1584 */ 1585 if (!NO_FCOE(bp)) 1586 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX; 1587 else 1588 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 1; 1589 1590 for_each_eth_queue(bp, i) { 1591 cur_query_entry = 1592 &bp->fw_stats_req-> 1593 query[first_queue_query_index + i]; 1594 1595 cur_query_entry->kind = STATS_TYPE_QUEUE; 1596 cur_query_entry->index = bnx2x_stats_id(&bp->fp[i]); 1597 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); 1598 cur_query_entry->address.hi = 1599 cpu_to_le32(U64_HI(cur_data_offset)); 1600 cur_query_entry->address.lo = 1601 cpu_to_le32(U64_LO(cur_data_offset)); 1602 1603 cur_data_offset += sizeof(struct per_queue_stats); 1604 } 1605 1606 /* add FCoE queue query if needed */ 1607 if (!NO_FCOE(bp)) { 1608 cur_query_entry = 1609 &bp->fw_stats_req-> 1610 query[first_queue_query_index + i]; 1611 1612 cur_query_entry->kind = STATS_TYPE_QUEUE; 1613 cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX]); 1614 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); 1615 cur_query_entry->address.hi = 1616 cpu_to_le32(U64_HI(cur_data_offset)); 1617 cur_query_entry->address.lo = 1618 cpu_to_le32(U64_LO(cur_data_offset)); 1619 } 1620 } 1621 1622 void bnx2x_stats_init(struct bnx2x *bp) 1623 { 1624 int /*abs*/port = BP_PORT(bp); 1625 int mb_idx = BP_FW_MB_IDX(bp); 1626 int i; 1627 1628 bp->stats_pending = 0; 1629 bp->executer_idx = 0; 1630 bp->stats_counter = 0; 1631 1632 /* port and func stats for management */ 1633 if (!BP_NOMCP(bp)) { 1634 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx); 1635 bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param); 1636 1637 } else { 1638 bp->port.port_stx = 0; 1639 bp->func_stx = 0; 1640 } 1641 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n", 1642 bp->port.port_stx, bp->func_stx); 1643 1644 port = BP_PORT(bp); 1645 /* port stats */ 1646 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats)); 1647 bp->port.old_nig_stats.brb_discard = 1648 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38); 1649 bp->port.old_nig_stats.brb_truncate = 1650 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38); 1651 if (!CHIP_IS_E3(bp)) { 1652 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50, 1653 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2); 1654 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50, 1655 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2); 1656 } 1657 1658 /* function stats */ 1659 for_each_queue(bp, i) { 1660 struct bnx2x_fastpath *fp = &bp->fp[i]; 1661 1662 memset(&fp->old_tclient, 0, sizeof(fp->old_tclient)); 1663 memset(&fp->old_uclient, 0, sizeof(fp->old_uclient)); 1664 memset(&fp->old_xclient, 0, sizeof(fp->old_xclient)); 1665 memset(&fp->eth_q_stats, 0, sizeof(fp->eth_q_stats)); 1666 } 1667 1668 /* Prepare statistics ramrod data */ 1669 bnx2x_prep_fw_stats_req(bp); 1670 1671 memset(&bp->dev->stats, 0, sizeof(bp->dev->stats)); 1672 memset(&bp->eth_stats, 0, sizeof(bp->eth_stats)); 1673 1674 bp->stats_state = STATS_STATE_DISABLED; 1675 1676 if (bp->port.pmf) { 1677 if (bp->port.port_stx) 1678 bnx2x_port_stats_base_init(bp); 1679 1680 if (bp->func_stx) 1681 bnx2x_func_stats_base_init(bp); 1682 1683 } else if (bp->func_stx) 1684 bnx2x_func_stats_base_update(bp); 1685 } 1686