1 /* bnx2x_stats.c: Broadcom Everest network driver. 2 * 3 * Copyright (c) 2007-2012 Broadcom Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation. 8 * 9 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 10 * Written by: Eliezer Tamir 11 * Based on code from Michael Chan's bnx2 driver 12 * UDP CSUM errata workaround by Arik Gendelman 13 * Slowpath and fastpath rework by Vladislav Zolotarov 14 * Statistics and Link management by Yitchak Gertner 15 * 16 */ 17 18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 19 20 #include "bnx2x_stats.h" 21 #include "bnx2x_cmn.h" 22 23 24 /* Statistics */ 25 26 /* 27 * General service functions 28 */ 29 30 static inline long bnx2x_hilo(u32 *hiref) 31 { 32 u32 lo = *(hiref + 1); 33 #if (BITS_PER_LONG == 64) 34 u32 hi = *hiref; 35 36 return HILO_U64(hi, lo); 37 #else 38 return lo; 39 #endif 40 } 41 42 static inline u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp) 43 { 44 u16 res = 0; 45 46 /* 'newest' convention - shmem2 cotains the size of the port stats */ 47 if (SHMEM2_HAS(bp, sizeof_port_stats)) { 48 u32 size = SHMEM2_RD(bp, sizeof_port_stats); 49 if (size) 50 res = size; 51 52 /* prevent newer BC from causing buffer overflow */ 53 if (res > sizeof(struct host_port_stats)) 54 res = sizeof(struct host_port_stats); 55 } 56 57 /* Older convention - all BCs support the port stats' fields up until 58 * the 'not_used' field 59 */ 60 if (!res) { 61 res = offsetof(struct host_port_stats, not_used) + 4; 62 63 /* if PFC stats are supported by the MFW, DMA them as well */ 64 if (bp->flags & BC_SUPPORTS_PFC_STATS) { 65 res += offsetof(struct host_port_stats, 66 pfc_frames_rx_lo) - 67 offsetof(struct host_port_stats, 68 pfc_frames_tx_hi) + 4 ; 69 } 70 } 71 72 res >>= 2; 73 74 WARN_ON(res > 2 * DMAE_LEN32_RD_MAX); 75 return res; 76 } 77 78 /* 79 * Init service functions 80 */ 81 82 /* Post the next statistics ramrod. Protect it with the spin in 83 * order to ensure the strict order between statistics ramrods 84 * (each ramrod has a sequence number passed in a 85 * bp->fw_stats_req->hdr.drv_stats_counter and ramrods must be 86 * sent in order). 87 */ 88 static void bnx2x_storm_stats_post(struct bnx2x *bp) 89 { 90 if (!bp->stats_pending) { 91 int rc; 92 93 spin_lock_bh(&bp->stats_lock); 94 95 if (bp->stats_pending) { 96 spin_unlock_bh(&bp->stats_lock); 97 return; 98 } 99 100 bp->fw_stats_req->hdr.drv_stats_counter = 101 cpu_to_le16(bp->stats_counter++); 102 103 DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n", 104 bp->fw_stats_req->hdr.drv_stats_counter); 105 106 107 108 /* send FW stats ramrod */ 109 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0, 110 U64_HI(bp->fw_stats_req_mapping), 111 U64_LO(bp->fw_stats_req_mapping), 112 NONE_CONNECTION_TYPE); 113 if (rc == 0) 114 bp->stats_pending = 1; 115 116 spin_unlock_bh(&bp->stats_lock); 117 } 118 } 119 120 static void bnx2x_hw_stats_post(struct bnx2x *bp) 121 { 122 struct dmae_command *dmae = &bp->stats_dmae; 123 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 124 125 *stats_comp = DMAE_COMP_VAL; 126 if (CHIP_REV_IS_SLOW(bp)) 127 return; 128 129 /* Update MCP's statistics if possible */ 130 if (bp->func_stx) 131 memcpy(bnx2x_sp(bp, func_stats), &bp->func_stats, 132 sizeof(bp->func_stats)); 133 134 /* loader */ 135 if (bp->executer_idx) { 136 int loader_idx = PMF_DMAE_C(bp); 137 u32 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, 138 true, DMAE_COMP_GRC); 139 opcode = bnx2x_dmae_opcode_clr_src_reset(opcode); 140 141 memset(dmae, 0, sizeof(struct dmae_command)); 142 dmae->opcode = opcode; 143 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0])); 144 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0])); 145 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM + 146 sizeof(struct dmae_command) * 147 (loader_idx + 1)) >> 2; 148 dmae->dst_addr_hi = 0; 149 dmae->len = sizeof(struct dmae_command) >> 2; 150 if (CHIP_IS_E1(bp)) 151 dmae->len--; 152 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2; 153 dmae->comp_addr_hi = 0; 154 dmae->comp_val = 1; 155 156 *stats_comp = 0; 157 bnx2x_post_dmae(bp, dmae, loader_idx); 158 159 } else if (bp->func_stx) { 160 *stats_comp = 0; 161 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); 162 } 163 } 164 165 static int bnx2x_stats_comp(struct bnx2x *bp) 166 { 167 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 168 int cnt = 10; 169 170 might_sleep(); 171 while (*stats_comp != DMAE_COMP_VAL) { 172 if (!cnt) { 173 BNX2X_ERR("timeout waiting for stats finished\n"); 174 break; 175 } 176 cnt--; 177 usleep_range(1000, 1000); 178 } 179 return 1; 180 } 181 182 /* 183 * Statistics service functions 184 */ 185 186 static void bnx2x_stats_pmf_update(struct bnx2x *bp) 187 { 188 struct dmae_command *dmae; 189 u32 opcode; 190 int loader_idx = PMF_DMAE_C(bp); 191 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 192 193 /* sanity */ 194 if (!bp->port.pmf || !bp->port.port_stx) { 195 BNX2X_ERR("BUG!\n"); 196 return; 197 } 198 199 bp->executer_idx = 0; 200 201 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, false, 0); 202 203 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 204 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC); 205 dmae->src_addr_lo = bp->port.port_stx >> 2; 206 dmae->src_addr_hi = 0; 207 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); 208 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); 209 dmae->len = DMAE_LEN32_RD_MAX; 210 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 211 dmae->comp_addr_hi = 0; 212 dmae->comp_val = 1; 213 214 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 215 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI); 216 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX; 217 dmae->src_addr_hi = 0; 218 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) + 219 DMAE_LEN32_RD_MAX * 4); 220 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) + 221 DMAE_LEN32_RD_MAX * 4); 222 dmae->len = bnx2x_get_port_stats_dma_len(bp) - DMAE_LEN32_RD_MAX; 223 224 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); 225 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); 226 dmae->comp_val = DMAE_COMP_VAL; 227 228 *stats_comp = 0; 229 bnx2x_hw_stats_post(bp); 230 bnx2x_stats_comp(bp); 231 } 232 233 static void bnx2x_port_stats_init(struct bnx2x *bp) 234 { 235 struct dmae_command *dmae; 236 int port = BP_PORT(bp); 237 u32 opcode; 238 int loader_idx = PMF_DMAE_C(bp); 239 u32 mac_addr; 240 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 241 242 /* sanity */ 243 if (!bp->link_vars.link_up || !bp->port.pmf) { 244 BNX2X_ERR("BUG!\n"); 245 return; 246 } 247 248 bp->executer_idx = 0; 249 250 /* MCP */ 251 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, 252 true, DMAE_COMP_GRC); 253 254 if (bp->port.port_stx) { 255 256 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 257 dmae->opcode = opcode; 258 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); 259 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); 260 dmae->dst_addr_lo = bp->port.port_stx >> 2; 261 dmae->dst_addr_hi = 0; 262 dmae->len = bnx2x_get_port_stats_dma_len(bp); 263 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 264 dmae->comp_addr_hi = 0; 265 dmae->comp_val = 1; 266 } 267 268 if (bp->func_stx) { 269 270 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 271 dmae->opcode = opcode; 272 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); 273 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); 274 dmae->dst_addr_lo = bp->func_stx >> 2; 275 dmae->dst_addr_hi = 0; 276 dmae->len = sizeof(struct host_func_stats) >> 2; 277 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 278 dmae->comp_addr_hi = 0; 279 dmae->comp_val = 1; 280 } 281 282 /* MAC */ 283 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, 284 true, DMAE_COMP_GRC); 285 286 /* EMAC is special */ 287 if (bp->link_vars.mac_type == MAC_TYPE_EMAC) { 288 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0); 289 290 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/ 291 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 292 dmae->opcode = opcode; 293 dmae->src_addr_lo = (mac_addr + 294 EMAC_REG_EMAC_RX_STAT_AC) >> 2; 295 dmae->src_addr_hi = 0; 296 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats)); 297 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats)); 298 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT; 299 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 300 dmae->comp_addr_hi = 0; 301 dmae->comp_val = 1; 302 303 /* EMAC_REG_EMAC_RX_STAT_AC_28 */ 304 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 305 dmae->opcode = opcode; 306 dmae->src_addr_lo = (mac_addr + 307 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2; 308 dmae->src_addr_hi = 0; 309 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) + 310 offsetof(struct emac_stats, rx_stat_falsecarriererrors)); 311 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) + 312 offsetof(struct emac_stats, rx_stat_falsecarriererrors)); 313 dmae->len = 1; 314 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 315 dmae->comp_addr_hi = 0; 316 dmae->comp_val = 1; 317 318 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/ 319 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 320 dmae->opcode = opcode; 321 dmae->src_addr_lo = (mac_addr + 322 EMAC_REG_EMAC_TX_STAT_AC) >> 2; 323 dmae->src_addr_hi = 0; 324 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) + 325 offsetof(struct emac_stats, tx_stat_ifhcoutoctets)); 326 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) + 327 offsetof(struct emac_stats, tx_stat_ifhcoutoctets)); 328 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT; 329 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 330 dmae->comp_addr_hi = 0; 331 dmae->comp_val = 1; 332 } else { 333 u32 tx_src_addr_lo, rx_src_addr_lo; 334 u16 rx_len, tx_len; 335 336 /* configure the params according to MAC type */ 337 switch (bp->link_vars.mac_type) { 338 case MAC_TYPE_BMAC: 339 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM : 340 NIG_REG_INGRESS_BMAC0_MEM); 341 342 /* BIGMAC_REGISTER_TX_STAT_GTPKT .. 343 BIGMAC_REGISTER_TX_STAT_GTBYT */ 344 if (CHIP_IS_E1x(bp)) { 345 tx_src_addr_lo = (mac_addr + 346 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2; 347 tx_len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT - 348 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2; 349 rx_src_addr_lo = (mac_addr + 350 BIGMAC_REGISTER_RX_STAT_GR64) >> 2; 351 rx_len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ - 352 BIGMAC_REGISTER_RX_STAT_GR64) >> 2; 353 } else { 354 tx_src_addr_lo = (mac_addr + 355 BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2; 356 tx_len = (8 + BIGMAC2_REGISTER_TX_STAT_GTBYT - 357 BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2; 358 rx_src_addr_lo = (mac_addr + 359 BIGMAC2_REGISTER_RX_STAT_GR64) >> 2; 360 rx_len = (8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ - 361 BIGMAC2_REGISTER_RX_STAT_GR64) >> 2; 362 } 363 break; 364 365 case MAC_TYPE_UMAC: /* handled by MSTAT */ 366 case MAC_TYPE_XMAC: /* handled by MSTAT */ 367 default: 368 mac_addr = port ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0; 369 tx_src_addr_lo = (mac_addr + 370 MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2; 371 rx_src_addr_lo = (mac_addr + 372 MSTAT_REG_RX_STAT_GR64_LO) >> 2; 373 tx_len = sizeof(bp->slowpath-> 374 mac_stats.mstat_stats.stats_tx) >> 2; 375 rx_len = sizeof(bp->slowpath-> 376 mac_stats.mstat_stats.stats_rx) >> 2; 377 break; 378 } 379 380 /* TX stats */ 381 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 382 dmae->opcode = opcode; 383 dmae->src_addr_lo = tx_src_addr_lo; 384 dmae->src_addr_hi = 0; 385 dmae->len = tx_len; 386 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats)); 387 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats)); 388 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 389 dmae->comp_addr_hi = 0; 390 dmae->comp_val = 1; 391 392 /* RX stats */ 393 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 394 dmae->opcode = opcode; 395 dmae->src_addr_hi = 0; 396 dmae->src_addr_lo = rx_src_addr_lo; 397 dmae->dst_addr_lo = 398 U64_LO(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2)); 399 dmae->dst_addr_hi = 400 U64_HI(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2)); 401 dmae->len = rx_len; 402 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 403 dmae->comp_addr_hi = 0; 404 dmae->comp_val = 1; 405 } 406 407 /* NIG */ 408 if (!CHIP_IS_E3(bp)) { 409 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 410 dmae->opcode = opcode; 411 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 : 412 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2; 413 dmae->src_addr_hi = 0; 414 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) + 415 offsetof(struct nig_stats, egress_mac_pkt0_lo)); 416 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) + 417 offsetof(struct nig_stats, egress_mac_pkt0_lo)); 418 dmae->len = (2*sizeof(u32)) >> 2; 419 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 420 dmae->comp_addr_hi = 0; 421 dmae->comp_val = 1; 422 423 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 424 dmae->opcode = opcode; 425 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 : 426 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2; 427 dmae->src_addr_hi = 0; 428 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) + 429 offsetof(struct nig_stats, egress_mac_pkt1_lo)); 430 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) + 431 offsetof(struct nig_stats, egress_mac_pkt1_lo)); 432 dmae->len = (2*sizeof(u32)) >> 2; 433 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 434 dmae->comp_addr_hi = 0; 435 dmae->comp_val = 1; 436 } 437 438 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 439 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, 440 true, DMAE_COMP_PCI); 441 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD : 442 NIG_REG_STAT0_BRB_DISCARD) >> 2; 443 dmae->src_addr_hi = 0; 444 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats)); 445 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats)); 446 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2; 447 448 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); 449 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); 450 dmae->comp_val = DMAE_COMP_VAL; 451 452 *stats_comp = 0; 453 } 454 455 static void bnx2x_func_stats_init(struct bnx2x *bp) 456 { 457 struct dmae_command *dmae = &bp->stats_dmae; 458 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 459 460 /* sanity */ 461 if (!bp->func_stx) { 462 BNX2X_ERR("BUG!\n"); 463 return; 464 } 465 466 bp->executer_idx = 0; 467 memset(dmae, 0, sizeof(struct dmae_command)); 468 469 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, 470 true, DMAE_COMP_PCI); 471 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); 472 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); 473 dmae->dst_addr_lo = bp->func_stx >> 2; 474 dmae->dst_addr_hi = 0; 475 dmae->len = sizeof(struct host_func_stats) >> 2; 476 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); 477 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); 478 dmae->comp_val = DMAE_COMP_VAL; 479 480 *stats_comp = 0; 481 } 482 483 static void bnx2x_stats_start(struct bnx2x *bp) 484 { 485 if (bp->port.pmf) 486 bnx2x_port_stats_init(bp); 487 488 else if (bp->func_stx) 489 bnx2x_func_stats_init(bp); 490 491 bnx2x_hw_stats_post(bp); 492 bnx2x_storm_stats_post(bp); 493 } 494 495 static void bnx2x_stats_pmf_start(struct bnx2x *bp) 496 { 497 bnx2x_stats_comp(bp); 498 bnx2x_stats_pmf_update(bp); 499 bnx2x_stats_start(bp); 500 } 501 502 static void bnx2x_stats_restart(struct bnx2x *bp) 503 { 504 bnx2x_stats_comp(bp); 505 bnx2x_stats_start(bp); 506 } 507 508 static void bnx2x_bmac_stats_update(struct bnx2x *bp) 509 { 510 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); 511 struct bnx2x_eth_stats *estats = &bp->eth_stats; 512 struct { 513 u32 lo; 514 u32 hi; 515 } diff; 516 517 if (CHIP_IS_E1x(bp)) { 518 struct bmac1_stats *new = bnx2x_sp(bp, mac_stats.bmac1_stats); 519 520 /* the macros below will use "bmac1_stats" type */ 521 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets); 522 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors); 523 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts); 524 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong); 525 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments); 526 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); 527 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); 528 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); 529 UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf); 530 531 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); 532 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone); 533 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets); 534 UPDATE_STAT64(tx_stat_gt127, 535 tx_stat_etherstatspkts65octetsto127octets); 536 UPDATE_STAT64(tx_stat_gt255, 537 tx_stat_etherstatspkts128octetsto255octets); 538 UPDATE_STAT64(tx_stat_gt511, 539 tx_stat_etherstatspkts256octetsto511octets); 540 UPDATE_STAT64(tx_stat_gt1023, 541 tx_stat_etherstatspkts512octetsto1023octets); 542 UPDATE_STAT64(tx_stat_gt1518, 543 tx_stat_etherstatspkts1024octetsto1522octets); 544 UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047); 545 UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095); 546 UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216); 547 UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383); 548 UPDATE_STAT64(tx_stat_gterr, 549 tx_stat_dot3statsinternalmactransmiterrors); 550 UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl); 551 552 } else { 553 struct bmac2_stats *new = bnx2x_sp(bp, mac_stats.bmac2_stats); 554 555 /* the macros below will use "bmac2_stats" type */ 556 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets); 557 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors); 558 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts); 559 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong); 560 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments); 561 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); 562 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); 563 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); 564 UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf); 565 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); 566 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone); 567 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets); 568 UPDATE_STAT64(tx_stat_gt127, 569 tx_stat_etherstatspkts65octetsto127octets); 570 UPDATE_STAT64(tx_stat_gt255, 571 tx_stat_etherstatspkts128octetsto255octets); 572 UPDATE_STAT64(tx_stat_gt511, 573 tx_stat_etherstatspkts256octetsto511octets); 574 UPDATE_STAT64(tx_stat_gt1023, 575 tx_stat_etherstatspkts512octetsto1023octets); 576 UPDATE_STAT64(tx_stat_gt1518, 577 tx_stat_etherstatspkts1024octetsto1522octets); 578 UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047); 579 UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095); 580 UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216); 581 UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383); 582 UPDATE_STAT64(tx_stat_gterr, 583 tx_stat_dot3statsinternalmactransmiterrors); 584 UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl); 585 586 /* collect PFC stats */ 587 pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi; 588 pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo; 589 590 pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi; 591 pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo; 592 } 593 594 estats->pause_frames_received_hi = 595 pstats->mac_stx[1].rx_stat_mac_xpf_hi; 596 estats->pause_frames_received_lo = 597 pstats->mac_stx[1].rx_stat_mac_xpf_lo; 598 599 estats->pause_frames_sent_hi = 600 pstats->mac_stx[1].tx_stat_outxoffsent_hi; 601 estats->pause_frames_sent_lo = 602 pstats->mac_stx[1].tx_stat_outxoffsent_lo; 603 604 estats->pfc_frames_received_hi = 605 pstats->pfc_frames_rx_hi; 606 estats->pfc_frames_received_lo = 607 pstats->pfc_frames_rx_lo; 608 estats->pfc_frames_sent_hi = 609 pstats->pfc_frames_tx_hi; 610 estats->pfc_frames_sent_lo = 611 pstats->pfc_frames_tx_lo; 612 } 613 614 static void bnx2x_mstat_stats_update(struct bnx2x *bp) 615 { 616 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); 617 struct bnx2x_eth_stats *estats = &bp->eth_stats; 618 619 struct mstat_stats *new = bnx2x_sp(bp, mac_stats.mstat_stats); 620 621 ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets); 622 ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors); 623 ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts); 624 ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong); 625 ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments); 626 ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived); 627 ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered); 628 ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf); 629 ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent); 630 ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone); 631 632 /* collect pfc stats */ 633 ADD_64(pstats->pfc_frames_tx_hi, new->stats_tx.tx_gtxpp_hi, 634 pstats->pfc_frames_tx_lo, new->stats_tx.tx_gtxpp_lo); 635 ADD_64(pstats->pfc_frames_rx_hi, new->stats_rx.rx_grxpp_hi, 636 pstats->pfc_frames_rx_lo, new->stats_rx.rx_grxpp_lo); 637 638 ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets); 639 ADD_STAT64(stats_tx.tx_gt127, 640 tx_stat_etherstatspkts65octetsto127octets); 641 ADD_STAT64(stats_tx.tx_gt255, 642 tx_stat_etherstatspkts128octetsto255octets); 643 ADD_STAT64(stats_tx.tx_gt511, 644 tx_stat_etherstatspkts256octetsto511octets); 645 ADD_STAT64(stats_tx.tx_gt1023, 646 tx_stat_etherstatspkts512octetsto1023octets); 647 ADD_STAT64(stats_tx.tx_gt1518, 648 tx_stat_etherstatspkts1024octetsto1522octets); 649 ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047); 650 651 ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095); 652 ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216); 653 ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383); 654 655 ADD_STAT64(stats_tx.tx_gterr, 656 tx_stat_dot3statsinternalmactransmiterrors); 657 ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl); 658 659 estats->etherstatspkts1024octetsto1522octets_hi = 660 pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_hi; 661 estats->etherstatspkts1024octetsto1522octets_lo = 662 pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_lo; 663 664 estats->etherstatspktsover1522octets_hi = 665 pstats->mac_stx[1].tx_stat_mac_2047_hi; 666 estats->etherstatspktsover1522octets_lo = 667 pstats->mac_stx[1].tx_stat_mac_2047_lo; 668 669 ADD_64(estats->etherstatspktsover1522octets_hi, 670 pstats->mac_stx[1].tx_stat_mac_4095_hi, 671 estats->etherstatspktsover1522octets_lo, 672 pstats->mac_stx[1].tx_stat_mac_4095_lo); 673 674 ADD_64(estats->etherstatspktsover1522octets_hi, 675 pstats->mac_stx[1].tx_stat_mac_9216_hi, 676 estats->etherstatspktsover1522octets_lo, 677 pstats->mac_stx[1].tx_stat_mac_9216_lo); 678 679 ADD_64(estats->etherstatspktsover1522octets_hi, 680 pstats->mac_stx[1].tx_stat_mac_16383_hi, 681 estats->etherstatspktsover1522octets_lo, 682 pstats->mac_stx[1].tx_stat_mac_16383_lo); 683 684 estats->pause_frames_received_hi = 685 pstats->mac_stx[1].rx_stat_mac_xpf_hi; 686 estats->pause_frames_received_lo = 687 pstats->mac_stx[1].rx_stat_mac_xpf_lo; 688 689 estats->pause_frames_sent_hi = 690 pstats->mac_stx[1].tx_stat_outxoffsent_hi; 691 estats->pause_frames_sent_lo = 692 pstats->mac_stx[1].tx_stat_outxoffsent_lo; 693 694 estats->pfc_frames_received_hi = 695 pstats->pfc_frames_rx_hi; 696 estats->pfc_frames_received_lo = 697 pstats->pfc_frames_rx_lo; 698 estats->pfc_frames_sent_hi = 699 pstats->pfc_frames_tx_hi; 700 estats->pfc_frames_sent_lo = 701 pstats->pfc_frames_tx_lo; 702 } 703 704 static void bnx2x_emac_stats_update(struct bnx2x *bp) 705 { 706 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats); 707 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); 708 struct bnx2x_eth_stats *estats = &bp->eth_stats; 709 710 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets); 711 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets); 712 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors); 713 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors); 714 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors); 715 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors); 716 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts); 717 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong); 718 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments); 719 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers); 720 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived); 721 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered); 722 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived); 723 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived); 724 UPDATE_EXTEND_STAT(tx_stat_outxonsent); 725 UPDATE_EXTEND_STAT(tx_stat_outxoffsent); 726 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone); 727 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions); 728 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes); 729 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes); 730 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions); 731 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions); 732 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions); 733 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets); 734 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets); 735 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets); 736 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets); 737 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets); 738 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets); 739 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets); 740 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors); 741 742 estats->pause_frames_received_hi = 743 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi; 744 estats->pause_frames_received_lo = 745 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo; 746 ADD_64(estats->pause_frames_received_hi, 747 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi, 748 estats->pause_frames_received_lo, 749 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo); 750 751 estats->pause_frames_sent_hi = 752 pstats->mac_stx[1].tx_stat_outxonsent_hi; 753 estats->pause_frames_sent_lo = 754 pstats->mac_stx[1].tx_stat_outxonsent_lo; 755 ADD_64(estats->pause_frames_sent_hi, 756 pstats->mac_stx[1].tx_stat_outxoffsent_hi, 757 estats->pause_frames_sent_lo, 758 pstats->mac_stx[1].tx_stat_outxoffsent_lo); 759 } 760 761 static int bnx2x_hw_stats_update(struct bnx2x *bp) 762 { 763 struct nig_stats *new = bnx2x_sp(bp, nig_stats); 764 struct nig_stats *old = &(bp->port.old_nig_stats); 765 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); 766 struct bnx2x_eth_stats *estats = &bp->eth_stats; 767 struct { 768 u32 lo; 769 u32 hi; 770 } diff; 771 772 switch (bp->link_vars.mac_type) { 773 case MAC_TYPE_BMAC: 774 bnx2x_bmac_stats_update(bp); 775 break; 776 777 case MAC_TYPE_EMAC: 778 bnx2x_emac_stats_update(bp); 779 break; 780 781 case MAC_TYPE_UMAC: 782 case MAC_TYPE_XMAC: 783 bnx2x_mstat_stats_update(bp); 784 break; 785 786 case MAC_TYPE_NONE: /* unreached */ 787 DP(BNX2X_MSG_STATS, 788 "stats updated by DMAE but no MAC active\n"); 789 return -1; 790 791 default: /* unreached */ 792 BNX2X_ERR("Unknown MAC type\n"); 793 } 794 795 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo, 796 new->brb_discard - old->brb_discard); 797 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo, 798 new->brb_truncate - old->brb_truncate); 799 800 if (!CHIP_IS_E3(bp)) { 801 UPDATE_STAT64_NIG(egress_mac_pkt0, 802 etherstatspkts1024octetsto1522octets); 803 UPDATE_STAT64_NIG(egress_mac_pkt1, 804 etherstatspktsover1522octets); 805 } 806 807 memcpy(old, new, sizeof(struct nig_stats)); 808 809 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]), 810 sizeof(struct mac_stx)); 811 estats->brb_drop_hi = pstats->brb_drop_hi; 812 estats->brb_drop_lo = pstats->brb_drop_lo; 813 814 pstats->host_port_stats_counter++; 815 816 if (CHIP_IS_E3(bp)) { 817 u32 lpi_reg = BP_PORT(bp) ? MISC_REG_CPMU_LP_SM_ENT_CNT_P1 818 : MISC_REG_CPMU_LP_SM_ENT_CNT_P0; 819 estats->eee_tx_lpi += REG_RD(bp, lpi_reg); 820 } 821 822 if (!BP_NOMCP(bp)) { 823 u32 nig_timer_max = 824 SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer); 825 if (nig_timer_max != estats->nig_timer_max) { 826 estats->nig_timer_max = nig_timer_max; 827 BNX2X_ERR("NIG timer max (%u)\n", 828 estats->nig_timer_max); 829 } 830 } 831 832 return 0; 833 } 834 835 static int bnx2x_storm_stats_update(struct bnx2x *bp) 836 { 837 struct tstorm_per_port_stats *tport = 838 &bp->fw_stats_data->port.tstorm_port_statistics; 839 struct tstorm_per_pf_stats *tfunc = 840 &bp->fw_stats_data->pf.tstorm_pf_statistics; 841 struct host_func_stats *fstats = &bp->func_stats; 842 struct bnx2x_eth_stats *estats = &bp->eth_stats; 843 struct bnx2x_eth_stats_old *estats_old = &bp->eth_stats_old; 844 struct stats_counter *counters = &bp->fw_stats_data->storm_counters; 845 int i; 846 u16 cur_stats_counter; 847 848 /* Make sure we use the value of the counter 849 * used for sending the last stats ramrod. 850 */ 851 spin_lock_bh(&bp->stats_lock); 852 cur_stats_counter = bp->stats_counter - 1; 853 spin_unlock_bh(&bp->stats_lock); 854 855 /* are storm stats valid? */ 856 if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) { 857 DP(BNX2X_MSG_STATS, 858 "stats not updated by xstorm xstorm counter (0x%x) != stats_counter (0x%x)\n", 859 le16_to_cpu(counters->xstats_counter), bp->stats_counter); 860 return -EAGAIN; 861 } 862 863 if (le16_to_cpu(counters->ustats_counter) != cur_stats_counter) { 864 DP(BNX2X_MSG_STATS, 865 "stats not updated by ustorm ustorm counter (0x%x) != stats_counter (0x%x)\n", 866 le16_to_cpu(counters->ustats_counter), bp->stats_counter); 867 return -EAGAIN; 868 } 869 870 if (le16_to_cpu(counters->cstats_counter) != cur_stats_counter) { 871 DP(BNX2X_MSG_STATS, 872 "stats not updated by cstorm cstorm counter (0x%x) != stats_counter (0x%x)\n", 873 le16_to_cpu(counters->cstats_counter), bp->stats_counter); 874 return -EAGAIN; 875 } 876 877 if (le16_to_cpu(counters->tstats_counter) != cur_stats_counter) { 878 DP(BNX2X_MSG_STATS, 879 "stats not updated by tstorm tstorm counter (0x%x) != stats_counter (0x%x)\n", 880 le16_to_cpu(counters->tstats_counter), bp->stats_counter); 881 return -EAGAIN; 882 } 883 884 estats->error_bytes_received_hi = 0; 885 estats->error_bytes_received_lo = 0; 886 887 for_each_eth_queue(bp, i) { 888 struct bnx2x_fastpath *fp = &bp->fp[i]; 889 struct tstorm_per_queue_stats *tclient = 890 &bp->fw_stats_data->queue_stats[i]. 891 tstorm_queue_statistics; 892 struct tstorm_per_queue_stats *old_tclient = 893 &bnx2x_fp_stats(bp, fp)->old_tclient; 894 struct ustorm_per_queue_stats *uclient = 895 &bp->fw_stats_data->queue_stats[i]. 896 ustorm_queue_statistics; 897 struct ustorm_per_queue_stats *old_uclient = 898 &bnx2x_fp_stats(bp, fp)->old_uclient; 899 struct xstorm_per_queue_stats *xclient = 900 &bp->fw_stats_data->queue_stats[i]. 901 xstorm_queue_statistics; 902 struct xstorm_per_queue_stats *old_xclient = 903 &bnx2x_fp_stats(bp, fp)->old_xclient; 904 struct bnx2x_eth_q_stats *qstats = 905 &bnx2x_fp_stats(bp, fp)->eth_q_stats; 906 struct bnx2x_eth_q_stats_old *qstats_old = 907 &bnx2x_fp_stats(bp, fp)->eth_q_stats_old; 908 909 u32 diff; 910 911 DP(BNX2X_MSG_STATS, "queue[%d]: ucast_sent 0x%x, bcast_sent 0x%x mcast_sent 0x%x\n", 912 i, xclient->ucast_pkts_sent, 913 xclient->bcast_pkts_sent, xclient->mcast_pkts_sent); 914 915 DP(BNX2X_MSG_STATS, "---------------\n"); 916 917 UPDATE_QSTAT(tclient->rcv_bcast_bytes, 918 total_broadcast_bytes_received); 919 UPDATE_QSTAT(tclient->rcv_mcast_bytes, 920 total_multicast_bytes_received); 921 UPDATE_QSTAT(tclient->rcv_ucast_bytes, 922 total_unicast_bytes_received); 923 924 /* 925 * sum to total_bytes_received all 926 * unicast/multicast/broadcast 927 */ 928 qstats->total_bytes_received_hi = 929 qstats->total_broadcast_bytes_received_hi; 930 qstats->total_bytes_received_lo = 931 qstats->total_broadcast_bytes_received_lo; 932 933 ADD_64(qstats->total_bytes_received_hi, 934 qstats->total_multicast_bytes_received_hi, 935 qstats->total_bytes_received_lo, 936 qstats->total_multicast_bytes_received_lo); 937 938 ADD_64(qstats->total_bytes_received_hi, 939 qstats->total_unicast_bytes_received_hi, 940 qstats->total_bytes_received_lo, 941 qstats->total_unicast_bytes_received_lo); 942 943 qstats->valid_bytes_received_hi = 944 qstats->total_bytes_received_hi; 945 qstats->valid_bytes_received_lo = 946 qstats->total_bytes_received_lo; 947 948 949 UPDATE_EXTEND_TSTAT(rcv_ucast_pkts, 950 total_unicast_packets_received); 951 UPDATE_EXTEND_TSTAT(rcv_mcast_pkts, 952 total_multicast_packets_received); 953 UPDATE_EXTEND_TSTAT(rcv_bcast_pkts, 954 total_broadcast_packets_received); 955 UPDATE_EXTEND_E_TSTAT(pkts_too_big_discard, 956 etherstatsoverrsizepkts); 957 UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard); 958 959 SUB_EXTEND_USTAT(ucast_no_buff_pkts, 960 total_unicast_packets_received); 961 SUB_EXTEND_USTAT(mcast_no_buff_pkts, 962 total_multicast_packets_received); 963 SUB_EXTEND_USTAT(bcast_no_buff_pkts, 964 total_broadcast_packets_received); 965 UPDATE_EXTEND_E_USTAT(ucast_no_buff_pkts, no_buff_discard); 966 UPDATE_EXTEND_E_USTAT(mcast_no_buff_pkts, no_buff_discard); 967 UPDATE_EXTEND_E_USTAT(bcast_no_buff_pkts, no_buff_discard); 968 969 UPDATE_QSTAT(xclient->bcast_bytes_sent, 970 total_broadcast_bytes_transmitted); 971 UPDATE_QSTAT(xclient->mcast_bytes_sent, 972 total_multicast_bytes_transmitted); 973 UPDATE_QSTAT(xclient->ucast_bytes_sent, 974 total_unicast_bytes_transmitted); 975 976 /* 977 * sum to total_bytes_transmitted all 978 * unicast/multicast/broadcast 979 */ 980 qstats->total_bytes_transmitted_hi = 981 qstats->total_unicast_bytes_transmitted_hi; 982 qstats->total_bytes_transmitted_lo = 983 qstats->total_unicast_bytes_transmitted_lo; 984 985 ADD_64(qstats->total_bytes_transmitted_hi, 986 qstats->total_broadcast_bytes_transmitted_hi, 987 qstats->total_bytes_transmitted_lo, 988 qstats->total_broadcast_bytes_transmitted_lo); 989 990 ADD_64(qstats->total_bytes_transmitted_hi, 991 qstats->total_multicast_bytes_transmitted_hi, 992 qstats->total_bytes_transmitted_lo, 993 qstats->total_multicast_bytes_transmitted_lo); 994 995 UPDATE_EXTEND_XSTAT(ucast_pkts_sent, 996 total_unicast_packets_transmitted); 997 UPDATE_EXTEND_XSTAT(mcast_pkts_sent, 998 total_multicast_packets_transmitted); 999 UPDATE_EXTEND_XSTAT(bcast_pkts_sent, 1000 total_broadcast_packets_transmitted); 1001 1002 UPDATE_EXTEND_TSTAT(checksum_discard, 1003 total_packets_received_checksum_discarded); 1004 UPDATE_EXTEND_TSTAT(ttl0_discard, 1005 total_packets_received_ttl0_discarded); 1006 1007 UPDATE_EXTEND_XSTAT(error_drop_pkts, 1008 total_transmitted_dropped_packets_error); 1009 1010 /* TPA aggregations completed */ 1011 UPDATE_EXTEND_E_USTAT(coalesced_events, total_tpa_aggregations); 1012 /* Number of network frames aggregated by TPA */ 1013 UPDATE_EXTEND_E_USTAT(coalesced_pkts, 1014 total_tpa_aggregated_frames); 1015 /* Total number of bytes in completed TPA aggregations */ 1016 UPDATE_QSTAT(uclient->coalesced_bytes, total_tpa_bytes); 1017 1018 UPDATE_ESTAT_QSTAT_64(total_tpa_bytes); 1019 1020 UPDATE_FSTAT_QSTAT(total_bytes_received); 1021 UPDATE_FSTAT_QSTAT(total_bytes_transmitted); 1022 UPDATE_FSTAT_QSTAT(total_unicast_packets_received); 1023 UPDATE_FSTAT_QSTAT(total_multicast_packets_received); 1024 UPDATE_FSTAT_QSTAT(total_broadcast_packets_received); 1025 UPDATE_FSTAT_QSTAT(total_unicast_packets_transmitted); 1026 UPDATE_FSTAT_QSTAT(total_multicast_packets_transmitted); 1027 UPDATE_FSTAT_QSTAT(total_broadcast_packets_transmitted); 1028 UPDATE_FSTAT_QSTAT(valid_bytes_received); 1029 } 1030 1031 ADD_64(estats->total_bytes_received_hi, 1032 estats->rx_stat_ifhcinbadoctets_hi, 1033 estats->total_bytes_received_lo, 1034 estats->rx_stat_ifhcinbadoctets_lo); 1035 1036 ADD_64(estats->total_bytes_received_hi, 1037 le32_to_cpu(tfunc->rcv_error_bytes.hi), 1038 estats->total_bytes_received_lo, 1039 le32_to_cpu(tfunc->rcv_error_bytes.lo)); 1040 1041 ADD_64(estats->error_bytes_received_hi, 1042 le32_to_cpu(tfunc->rcv_error_bytes.hi), 1043 estats->error_bytes_received_lo, 1044 le32_to_cpu(tfunc->rcv_error_bytes.lo)); 1045 1046 UPDATE_ESTAT(etherstatsoverrsizepkts, rx_stat_dot3statsframestoolong); 1047 1048 ADD_64(estats->error_bytes_received_hi, 1049 estats->rx_stat_ifhcinbadoctets_hi, 1050 estats->error_bytes_received_lo, 1051 estats->rx_stat_ifhcinbadoctets_lo); 1052 1053 if (bp->port.pmf) { 1054 struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old; 1055 UPDATE_FW_STAT(mac_filter_discard); 1056 UPDATE_FW_STAT(mf_tag_discard); 1057 UPDATE_FW_STAT(brb_truncate_discard); 1058 UPDATE_FW_STAT(mac_discard); 1059 } 1060 1061 fstats->host_func_stats_start = ++fstats->host_func_stats_end; 1062 1063 bp->stats_pending = 0; 1064 1065 return 0; 1066 } 1067 1068 static void bnx2x_net_stats_update(struct bnx2x *bp) 1069 { 1070 struct bnx2x_eth_stats *estats = &bp->eth_stats; 1071 struct net_device_stats *nstats = &bp->dev->stats; 1072 unsigned long tmp; 1073 int i; 1074 1075 nstats->rx_packets = 1076 bnx2x_hilo(&estats->total_unicast_packets_received_hi) + 1077 bnx2x_hilo(&estats->total_multicast_packets_received_hi) + 1078 bnx2x_hilo(&estats->total_broadcast_packets_received_hi); 1079 1080 nstats->tx_packets = 1081 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) + 1082 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) + 1083 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi); 1084 1085 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi); 1086 1087 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi); 1088 1089 tmp = estats->mac_discard; 1090 for_each_rx_queue(bp, i) { 1091 struct tstorm_per_queue_stats *old_tclient = 1092 &bp->fp_stats[i].old_tclient; 1093 tmp += le32_to_cpu(old_tclient->checksum_discard); 1094 } 1095 nstats->rx_dropped = tmp + bp->net_stats_old.rx_dropped; 1096 1097 nstats->tx_dropped = 0; 1098 1099 nstats->multicast = 1100 bnx2x_hilo(&estats->total_multicast_packets_received_hi); 1101 1102 nstats->collisions = 1103 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi); 1104 1105 nstats->rx_length_errors = 1106 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) + 1107 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi); 1108 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) + 1109 bnx2x_hilo(&estats->brb_truncate_hi); 1110 nstats->rx_crc_errors = 1111 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi); 1112 nstats->rx_frame_errors = 1113 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi); 1114 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi); 1115 nstats->rx_missed_errors = 0; 1116 1117 nstats->rx_errors = nstats->rx_length_errors + 1118 nstats->rx_over_errors + 1119 nstats->rx_crc_errors + 1120 nstats->rx_frame_errors + 1121 nstats->rx_fifo_errors + 1122 nstats->rx_missed_errors; 1123 1124 nstats->tx_aborted_errors = 1125 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) + 1126 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi); 1127 nstats->tx_carrier_errors = 1128 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi); 1129 nstats->tx_fifo_errors = 0; 1130 nstats->tx_heartbeat_errors = 0; 1131 nstats->tx_window_errors = 0; 1132 1133 nstats->tx_errors = nstats->tx_aborted_errors + 1134 nstats->tx_carrier_errors + 1135 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi); 1136 } 1137 1138 static void bnx2x_drv_stats_update(struct bnx2x *bp) 1139 { 1140 struct bnx2x_eth_stats *estats = &bp->eth_stats; 1141 int i; 1142 1143 for_each_queue(bp, i) { 1144 struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats; 1145 struct bnx2x_eth_q_stats_old *qstats_old = 1146 &bp->fp_stats[i].eth_q_stats_old; 1147 1148 UPDATE_ESTAT_QSTAT(driver_xoff); 1149 UPDATE_ESTAT_QSTAT(rx_err_discard_pkt); 1150 UPDATE_ESTAT_QSTAT(rx_skb_alloc_failed); 1151 UPDATE_ESTAT_QSTAT(hw_csum_err); 1152 UPDATE_ESTAT_QSTAT(driver_filtered_tx_pkt); 1153 } 1154 } 1155 1156 static bool bnx2x_edebug_stats_stopped(struct bnx2x *bp) 1157 { 1158 u32 val; 1159 1160 if (SHMEM2_HAS(bp, edebug_driver_if[1])) { 1161 val = SHMEM2_RD(bp, edebug_driver_if[1]); 1162 1163 if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT) 1164 return true; 1165 } 1166 1167 return false; 1168 } 1169 1170 static void bnx2x_stats_update(struct bnx2x *bp) 1171 { 1172 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 1173 1174 if (bnx2x_edebug_stats_stopped(bp)) 1175 return; 1176 1177 if (*stats_comp != DMAE_COMP_VAL) 1178 return; 1179 1180 if (bp->port.pmf) 1181 bnx2x_hw_stats_update(bp); 1182 1183 if (bnx2x_storm_stats_update(bp)) { 1184 if (bp->stats_pending++ == 3) { 1185 BNX2X_ERR("storm stats were not updated for 3 times\n"); 1186 bnx2x_panic(); 1187 } 1188 return; 1189 } 1190 1191 bnx2x_net_stats_update(bp); 1192 bnx2x_drv_stats_update(bp); 1193 1194 if (netif_msg_timer(bp)) { 1195 struct bnx2x_eth_stats *estats = &bp->eth_stats; 1196 1197 netdev_dbg(bp->dev, "brb drops %u brb truncate %u\n", 1198 estats->brb_drop_lo, estats->brb_truncate_lo); 1199 } 1200 1201 bnx2x_hw_stats_post(bp); 1202 bnx2x_storm_stats_post(bp); 1203 } 1204 1205 static void bnx2x_port_stats_stop(struct bnx2x *bp) 1206 { 1207 struct dmae_command *dmae; 1208 u32 opcode; 1209 int loader_idx = PMF_DMAE_C(bp); 1210 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 1211 1212 bp->executer_idx = 0; 1213 1214 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, false, 0); 1215 1216 if (bp->port.port_stx) { 1217 1218 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 1219 if (bp->func_stx) 1220 dmae->opcode = bnx2x_dmae_opcode_add_comp( 1221 opcode, DMAE_COMP_GRC); 1222 else 1223 dmae->opcode = bnx2x_dmae_opcode_add_comp( 1224 opcode, DMAE_COMP_PCI); 1225 1226 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); 1227 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); 1228 dmae->dst_addr_lo = bp->port.port_stx >> 2; 1229 dmae->dst_addr_hi = 0; 1230 dmae->len = bnx2x_get_port_stats_dma_len(bp); 1231 if (bp->func_stx) { 1232 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 1233 dmae->comp_addr_hi = 0; 1234 dmae->comp_val = 1; 1235 } else { 1236 dmae->comp_addr_lo = 1237 U64_LO(bnx2x_sp_mapping(bp, stats_comp)); 1238 dmae->comp_addr_hi = 1239 U64_HI(bnx2x_sp_mapping(bp, stats_comp)); 1240 dmae->comp_val = DMAE_COMP_VAL; 1241 1242 *stats_comp = 0; 1243 } 1244 } 1245 1246 if (bp->func_stx) { 1247 1248 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 1249 dmae->opcode = 1250 bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI); 1251 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); 1252 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); 1253 dmae->dst_addr_lo = bp->func_stx >> 2; 1254 dmae->dst_addr_hi = 0; 1255 dmae->len = sizeof(struct host_func_stats) >> 2; 1256 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); 1257 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); 1258 dmae->comp_val = DMAE_COMP_VAL; 1259 1260 *stats_comp = 0; 1261 } 1262 } 1263 1264 static void bnx2x_stats_stop(struct bnx2x *bp) 1265 { 1266 int update = 0; 1267 1268 bnx2x_stats_comp(bp); 1269 1270 if (bp->port.pmf) 1271 update = (bnx2x_hw_stats_update(bp) == 0); 1272 1273 update |= (bnx2x_storm_stats_update(bp) == 0); 1274 1275 if (update) { 1276 bnx2x_net_stats_update(bp); 1277 1278 if (bp->port.pmf) 1279 bnx2x_port_stats_stop(bp); 1280 1281 bnx2x_hw_stats_post(bp); 1282 bnx2x_stats_comp(bp); 1283 } 1284 } 1285 1286 static void bnx2x_stats_do_nothing(struct bnx2x *bp) 1287 { 1288 } 1289 1290 static const struct { 1291 void (*action)(struct bnx2x *bp); 1292 enum bnx2x_stats_state next_state; 1293 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = { 1294 /* state event */ 1295 { 1296 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED}, 1297 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED}, 1298 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}, 1299 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED} 1300 }, 1301 { 1302 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED}, 1303 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED}, 1304 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED}, 1305 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED} 1306 } 1307 }; 1308 1309 void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) 1310 { 1311 enum bnx2x_stats_state state; 1312 if (unlikely(bp->panic)) 1313 return; 1314 1315 spin_lock_bh(&bp->stats_lock); 1316 state = bp->stats_state; 1317 bp->stats_state = bnx2x_stats_stm[state][event].next_state; 1318 spin_unlock_bh(&bp->stats_lock); 1319 1320 bnx2x_stats_stm[state][event].action(bp); 1321 1322 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) 1323 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", 1324 state, event, bp->stats_state); 1325 } 1326 1327 static void bnx2x_port_stats_base_init(struct bnx2x *bp) 1328 { 1329 struct dmae_command *dmae; 1330 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 1331 1332 /* sanity */ 1333 if (!bp->port.pmf || !bp->port.port_stx) { 1334 BNX2X_ERR("BUG!\n"); 1335 return; 1336 } 1337 1338 bp->executer_idx = 0; 1339 1340 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 1341 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, 1342 true, DMAE_COMP_PCI); 1343 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); 1344 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); 1345 dmae->dst_addr_lo = bp->port.port_stx >> 2; 1346 dmae->dst_addr_hi = 0; 1347 dmae->len = bnx2x_get_port_stats_dma_len(bp); 1348 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); 1349 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); 1350 dmae->comp_val = DMAE_COMP_VAL; 1351 1352 *stats_comp = 0; 1353 bnx2x_hw_stats_post(bp); 1354 bnx2x_stats_comp(bp); 1355 } 1356 1357 /* This function will prepare the statistics ramrod data the way 1358 * we will only have to increment the statistics counter and 1359 * send the ramrod each time we have to. 1360 */ 1361 static void bnx2x_prep_fw_stats_req(struct bnx2x *bp) 1362 { 1363 int i; 1364 int first_queue_query_index; 1365 struct stats_query_header *stats_hdr = &bp->fw_stats_req->hdr; 1366 1367 dma_addr_t cur_data_offset; 1368 struct stats_query_entry *cur_query_entry; 1369 1370 stats_hdr->cmd_num = bp->fw_stats_num; 1371 stats_hdr->drv_stats_counter = 0; 1372 1373 /* storm_counters struct contains the counters of completed 1374 * statistics requests per storm which are incremented by FW 1375 * each time it completes hadning a statistics ramrod. We will 1376 * check these counters in the timer handler and discard a 1377 * (statistics) ramrod completion. 1378 */ 1379 cur_data_offset = bp->fw_stats_data_mapping + 1380 offsetof(struct bnx2x_fw_stats_data, storm_counters); 1381 1382 stats_hdr->stats_counters_addrs.hi = 1383 cpu_to_le32(U64_HI(cur_data_offset)); 1384 stats_hdr->stats_counters_addrs.lo = 1385 cpu_to_le32(U64_LO(cur_data_offset)); 1386 1387 /* prepare to the first stats ramrod (will be completed with 1388 * the counters equal to zero) - init counters to somethig different. 1389 */ 1390 memset(&bp->fw_stats_data->storm_counters, 0xff, 1391 sizeof(struct stats_counter)); 1392 1393 /**** Port FW statistics data ****/ 1394 cur_data_offset = bp->fw_stats_data_mapping + 1395 offsetof(struct bnx2x_fw_stats_data, port); 1396 1397 cur_query_entry = &bp->fw_stats_req->query[BNX2X_PORT_QUERY_IDX]; 1398 1399 cur_query_entry->kind = STATS_TYPE_PORT; 1400 /* For port query index is a DONT CARE */ 1401 cur_query_entry->index = BP_PORT(bp); 1402 /* For port query funcID is a DONT CARE */ 1403 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); 1404 cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset)); 1405 cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset)); 1406 1407 /**** PF FW statistics data ****/ 1408 cur_data_offset = bp->fw_stats_data_mapping + 1409 offsetof(struct bnx2x_fw_stats_data, pf); 1410 1411 cur_query_entry = &bp->fw_stats_req->query[BNX2X_PF_QUERY_IDX]; 1412 1413 cur_query_entry->kind = STATS_TYPE_PF; 1414 /* For PF query index is a DONT CARE */ 1415 cur_query_entry->index = BP_PORT(bp); 1416 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); 1417 cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset)); 1418 cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset)); 1419 1420 /**** FCoE FW statistics data ****/ 1421 if (!NO_FCOE(bp)) { 1422 cur_data_offset = bp->fw_stats_data_mapping + 1423 offsetof(struct bnx2x_fw_stats_data, fcoe); 1424 1425 cur_query_entry = 1426 &bp->fw_stats_req->query[BNX2X_FCOE_QUERY_IDX]; 1427 1428 cur_query_entry->kind = STATS_TYPE_FCOE; 1429 /* For FCoE query index is a DONT CARE */ 1430 cur_query_entry->index = BP_PORT(bp); 1431 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); 1432 cur_query_entry->address.hi = 1433 cpu_to_le32(U64_HI(cur_data_offset)); 1434 cur_query_entry->address.lo = 1435 cpu_to_le32(U64_LO(cur_data_offset)); 1436 } 1437 1438 /**** Clients' queries ****/ 1439 cur_data_offset = bp->fw_stats_data_mapping + 1440 offsetof(struct bnx2x_fw_stats_data, queue_stats); 1441 1442 /* first queue query index depends whether FCoE offloaded request will 1443 * be included in the ramrod 1444 */ 1445 if (!NO_FCOE(bp)) 1446 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX; 1447 else 1448 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 1; 1449 1450 for_each_eth_queue(bp, i) { 1451 cur_query_entry = 1452 &bp->fw_stats_req-> 1453 query[first_queue_query_index + i]; 1454 1455 cur_query_entry->kind = STATS_TYPE_QUEUE; 1456 cur_query_entry->index = bnx2x_stats_id(&bp->fp[i]); 1457 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); 1458 cur_query_entry->address.hi = 1459 cpu_to_le32(U64_HI(cur_data_offset)); 1460 cur_query_entry->address.lo = 1461 cpu_to_le32(U64_LO(cur_data_offset)); 1462 1463 cur_data_offset += sizeof(struct per_queue_stats); 1464 } 1465 1466 /* add FCoE queue query if needed */ 1467 if (!NO_FCOE(bp)) { 1468 cur_query_entry = 1469 &bp->fw_stats_req-> 1470 query[first_queue_query_index + i]; 1471 1472 cur_query_entry->kind = STATS_TYPE_QUEUE; 1473 cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX(bp)]); 1474 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); 1475 cur_query_entry->address.hi = 1476 cpu_to_le32(U64_HI(cur_data_offset)); 1477 cur_query_entry->address.lo = 1478 cpu_to_le32(U64_LO(cur_data_offset)); 1479 } 1480 } 1481 1482 void bnx2x_stats_init(struct bnx2x *bp) 1483 { 1484 int /*abs*/port = BP_PORT(bp); 1485 int mb_idx = BP_FW_MB_IDX(bp); 1486 int i; 1487 1488 bp->stats_pending = 0; 1489 bp->executer_idx = 0; 1490 bp->stats_counter = 0; 1491 1492 /* port and func stats for management */ 1493 if (!BP_NOMCP(bp)) { 1494 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx); 1495 bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param); 1496 1497 } else { 1498 bp->port.port_stx = 0; 1499 bp->func_stx = 0; 1500 } 1501 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n", 1502 bp->port.port_stx, bp->func_stx); 1503 1504 /* pmf should retrieve port statistics from SP on a non-init*/ 1505 if (!bp->stats_init && bp->port.pmf && bp->port.port_stx) 1506 bnx2x_stats_handle(bp, STATS_EVENT_PMF); 1507 1508 port = BP_PORT(bp); 1509 /* port stats */ 1510 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats)); 1511 bp->port.old_nig_stats.brb_discard = 1512 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38); 1513 bp->port.old_nig_stats.brb_truncate = 1514 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38); 1515 if (!CHIP_IS_E3(bp)) { 1516 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50, 1517 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2); 1518 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50, 1519 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2); 1520 } 1521 1522 /* function stats */ 1523 for_each_queue(bp, i) { 1524 struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[i]; 1525 1526 memset(&fp_stats->old_tclient, 0, 1527 sizeof(fp_stats->old_tclient)); 1528 memset(&fp_stats->old_uclient, 0, 1529 sizeof(fp_stats->old_uclient)); 1530 memset(&fp_stats->old_xclient, 0, 1531 sizeof(fp_stats->old_xclient)); 1532 if (bp->stats_init) { 1533 memset(&fp_stats->eth_q_stats, 0, 1534 sizeof(fp_stats->eth_q_stats)); 1535 memset(&fp_stats->eth_q_stats_old, 0, 1536 sizeof(fp_stats->eth_q_stats_old)); 1537 } 1538 } 1539 1540 /* Prepare statistics ramrod data */ 1541 bnx2x_prep_fw_stats_req(bp); 1542 1543 memset(&bp->dev->stats, 0, sizeof(bp->dev->stats)); 1544 if (bp->stats_init) { 1545 memset(&bp->net_stats_old, 0, sizeof(bp->net_stats_old)); 1546 memset(&bp->fw_stats_old, 0, sizeof(bp->fw_stats_old)); 1547 memset(&bp->eth_stats_old, 0, sizeof(bp->eth_stats_old)); 1548 memset(&bp->eth_stats, 0, sizeof(bp->eth_stats)); 1549 memset(&bp->func_stats, 0, sizeof(bp->func_stats)); 1550 1551 /* Clean SP from previous statistics */ 1552 if (bp->func_stx) { 1553 memset(bnx2x_sp(bp, func_stats), 0, 1554 sizeof(struct host_func_stats)); 1555 bnx2x_func_stats_init(bp); 1556 bnx2x_hw_stats_post(bp); 1557 bnx2x_stats_comp(bp); 1558 } 1559 } 1560 1561 bp->stats_state = STATS_STATE_DISABLED; 1562 1563 if (bp->port.pmf && bp->port.port_stx) 1564 bnx2x_port_stats_base_init(bp); 1565 1566 /* mark the end of statistics initializiation */ 1567 bp->stats_init = false; 1568 } 1569 1570 void bnx2x_save_statistics(struct bnx2x *bp) 1571 { 1572 int i; 1573 struct net_device_stats *nstats = &bp->dev->stats; 1574 1575 /* save queue statistics */ 1576 for_each_eth_queue(bp, i) { 1577 struct bnx2x_fastpath *fp = &bp->fp[i]; 1578 struct bnx2x_eth_q_stats *qstats = 1579 &bnx2x_fp_stats(bp, fp)->eth_q_stats; 1580 struct bnx2x_eth_q_stats_old *qstats_old = 1581 &bnx2x_fp_stats(bp, fp)->eth_q_stats_old; 1582 1583 UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi); 1584 UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo); 1585 UPDATE_QSTAT_OLD(total_broadcast_bytes_received_hi); 1586 UPDATE_QSTAT_OLD(total_broadcast_bytes_received_lo); 1587 UPDATE_QSTAT_OLD(total_multicast_bytes_received_hi); 1588 UPDATE_QSTAT_OLD(total_multicast_bytes_received_lo); 1589 UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_hi); 1590 UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_lo); 1591 UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_hi); 1592 UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_lo); 1593 UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_hi); 1594 UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_lo); 1595 UPDATE_QSTAT_OLD(total_tpa_bytes_hi); 1596 UPDATE_QSTAT_OLD(total_tpa_bytes_lo); 1597 } 1598 1599 /* save net_device_stats statistics */ 1600 bp->net_stats_old.rx_dropped = nstats->rx_dropped; 1601 1602 /* store port firmware statistics */ 1603 if (bp->port.pmf && IS_MF(bp)) { 1604 struct bnx2x_eth_stats *estats = &bp->eth_stats; 1605 struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old; 1606 UPDATE_FW_STAT_OLD(mac_filter_discard); 1607 UPDATE_FW_STAT_OLD(mf_tag_discard); 1608 UPDATE_FW_STAT_OLD(brb_truncate_discard); 1609 UPDATE_FW_STAT_OLD(mac_discard); 1610 } 1611 } 1612 1613 void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats, 1614 u32 stats_type) 1615 { 1616 int i; 1617 struct afex_stats *afex_stats = (struct afex_stats *)void_afex_stats; 1618 struct bnx2x_eth_stats *estats = &bp->eth_stats; 1619 struct per_queue_stats *fcoe_q_stats = 1620 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)]; 1621 1622 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats = 1623 &fcoe_q_stats->tstorm_queue_statistics; 1624 1625 struct ustorm_per_queue_stats *fcoe_q_ustorm_stats = 1626 &fcoe_q_stats->ustorm_queue_statistics; 1627 1628 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats = 1629 &fcoe_q_stats->xstorm_queue_statistics; 1630 1631 struct fcoe_statistics_params *fw_fcoe_stat = 1632 &bp->fw_stats_data->fcoe; 1633 1634 memset(afex_stats, 0, sizeof(struct afex_stats)); 1635 1636 for_each_eth_queue(bp, i) { 1637 struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats; 1638 1639 ADD_64(afex_stats->rx_unicast_bytes_hi, 1640 qstats->total_unicast_bytes_received_hi, 1641 afex_stats->rx_unicast_bytes_lo, 1642 qstats->total_unicast_bytes_received_lo); 1643 1644 ADD_64(afex_stats->rx_broadcast_bytes_hi, 1645 qstats->total_broadcast_bytes_received_hi, 1646 afex_stats->rx_broadcast_bytes_lo, 1647 qstats->total_broadcast_bytes_received_lo); 1648 1649 ADD_64(afex_stats->rx_multicast_bytes_hi, 1650 qstats->total_multicast_bytes_received_hi, 1651 afex_stats->rx_multicast_bytes_lo, 1652 qstats->total_multicast_bytes_received_lo); 1653 1654 ADD_64(afex_stats->rx_unicast_frames_hi, 1655 qstats->total_unicast_packets_received_hi, 1656 afex_stats->rx_unicast_frames_lo, 1657 qstats->total_unicast_packets_received_lo); 1658 1659 ADD_64(afex_stats->rx_broadcast_frames_hi, 1660 qstats->total_broadcast_packets_received_hi, 1661 afex_stats->rx_broadcast_frames_lo, 1662 qstats->total_broadcast_packets_received_lo); 1663 1664 ADD_64(afex_stats->rx_multicast_frames_hi, 1665 qstats->total_multicast_packets_received_hi, 1666 afex_stats->rx_multicast_frames_lo, 1667 qstats->total_multicast_packets_received_lo); 1668 1669 /* sum to rx_frames_discarded all discraded 1670 * packets due to size, ttl0 and checksum 1671 */ 1672 ADD_64(afex_stats->rx_frames_discarded_hi, 1673 qstats->total_packets_received_checksum_discarded_hi, 1674 afex_stats->rx_frames_discarded_lo, 1675 qstats->total_packets_received_checksum_discarded_lo); 1676 1677 ADD_64(afex_stats->rx_frames_discarded_hi, 1678 qstats->total_packets_received_ttl0_discarded_hi, 1679 afex_stats->rx_frames_discarded_lo, 1680 qstats->total_packets_received_ttl0_discarded_lo); 1681 1682 ADD_64(afex_stats->rx_frames_discarded_hi, 1683 qstats->etherstatsoverrsizepkts_hi, 1684 afex_stats->rx_frames_discarded_lo, 1685 qstats->etherstatsoverrsizepkts_lo); 1686 1687 ADD_64(afex_stats->rx_frames_dropped_hi, 1688 qstats->no_buff_discard_hi, 1689 afex_stats->rx_frames_dropped_lo, 1690 qstats->no_buff_discard_lo); 1691 1692 ADD_64(afex_stats->tx_unicast_bytes_hi, 1693 qstats->total_unicast_bytes_transmitted_hi, 1694 afex_stats->tx_unicast_bytes_lo, 1695 qstats->total_unicast_bytes_transmitted_lo); 1696 1697 ADD_64(afex_stats->tx_broadcast_bytes_hi, 1698 qstats->total_broadcast_bytes_transmitted_hi, 1699 afex_stats->tx_broadcast_bytes_lo, 1700 qstats->total_broadcast_bytes_transmitted_lo); 1701 1702 ADD_64(afex_stats->tx_multicast_bytes_hi, 1703 qstats->total_multicast_bytes_transmitted_hi, 1704 afex_stats->tx_multicast_bytes_lo, 1705 qstats->total_multicast_bytes_transmitted_lo); 1706 1707 ADD_64(afex_stats->tx_unicast_frames_hi, 1708 qstats->total_unicast_packets_transmitted_hi, 1709 afex_stats->tx_unicast_frames_lo, 1710 qstats->total_unicast_packets_transmitted_lo); 1711 1712 ADD_64(afex_stats->tx_broadcast_frames_hi, 1713 qstats->total_broadcast_packets_transmitted_hi, 1714 afex_stats->tx_broadcast_frames_lo, 1715 qstats->total_broadcast_packets_transmitted_lo); 1716 1717 ADD_64(afex_stats->tx_multicast_frames_hi, 1718 qstats->total_multicast_packets_transmitted_hi, 1719 afex_stats->tx_multicast_frames_lo, 1720 qstats->total_multicast_packets_transmitted_lo); 1721 1722 ADD_64(afex_stats->tx_frames_dropped_hi, 1723 qstats->total_transmitted_dropped_packets_error_hi, 1724 afex_stats->tx_frames_dropped_lo, 1725 qstats->total_transmitted_dropped_packets_error_lo); 1726 } 1727 1728 /* now add FCoE statistics which are collected separately 1729 * (both offloaded and non offloaded) 1730 */ 1731 if (!NO_FCOE(bp)) { 1732 ADD_64_LE(afex_stats->rx_unicast_bytes_hi, 1733 LE32_0, 1734 afex_stats->rx_unicast_bytes_lo, 1735 fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt); 1736 1737 ADD_64_LE(afex_stats->rx_unicast_bytes_hi, 1738 fcoe_q_tstorm_stats->rcv_ucast_bytes.hi, 1739 afex_stats->rx_unicast_bytes_lo, 1740 fcoe_q_tstorm_stats->rcv_ucast_bytes.lo); 1741 1742 ADD_64_LE(afex_stats->rx_broadcast_bytes_hi, 1743 fcoe_q_tstorm_stats->rcv_bcast_bytes.hi, 1744 afex_stats->rx_broadcast_bytes_lo, 1745 fcoe_q_tstorm_stats->rcv_bcast_bytes.lo); 1746 1747 ADD_64_LE(afex_stats->rx_multicast_bytes_hi, 1748 fcoe_q_tstorm_stats->rcv_mcast_bytes.hi, 1749 afex_stats->rx_multicast_bytes_lo, 1750 fcoe_q_tstorm_stats->rcv_mcast_bytes.lo); 1751 1752 ADD_64_LE(afex_stats->rx_unicast_frames_hi, 1753 LE32_0, 1754 afex_stats->rx_unicast_frames_lo, 1755 fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt); 1756 1757 ADD_64_LE(afex_stats->rx_unicast_frames_hi, 1758 LE32_0, 1759 afex_stats->rx_unicast_frames_lo, 1760 fcoe_q_tstorm_stats->rcv_ucast_pkts); 1761 1762 ADD_64_LE(afex_stats->rx_broadcast_frames_hi, 1763 LE32_0, 1764 afex_stats->rx_broadcast_frames_lo, 1765 fcoe_q_tstorm_stats->rcv_bcast_pkts); 1766 1767 ADD_64_LE(afex_stats->rx_multicast_frames_hi, 1768 LE32_0, 1769 afex_stats->rx_multicast_frames_lo, 1770 fcoe_q_tstorm_stats->rcv_ucast_pkts); 1771 1772 ADD_64_LE(afex_stats->rx_frames_discarded_hi, 1773 LE32_0, 1774 afex_stats->rx_frames_discarded_lo, 1775 fcoe_q_tstorm_stats->checksum_discard); 1776 1777 ADD_64_LE(afex_stats->rx_frames_discarded_hi, 1778 LE32_0, 1779 afex_stats->rx_frames_discarded_lo, 1780 fcoe_q_tstorm_stats->pkts_too_big_discard); 1781 1782 ADD_64_LE(afex_stats->rx_frames_discarded_hi, 1783 LE32_0, 1784 afex_stats->rx_frames_discarded_lo, 1785 fcoe_q_tstorm_stats->ttl0_discard); 1786 1787 ADD_64_LE16(afex_stats->rx_frames_dropped_hi, 1788 LE16_0, 1789 afex_stats->rx_frames_dropped_lo, 1790 fcoe_q_tstorm_stats->no_buff_discard); 1791 1792 ADD_64_LE(afex_stats->rx_frames_dropped_hi, 1793 LE32_0, 1794 afex_stats->rx_frames_dropped_lo, 1795 fcoe_q_ustorm_stats->ucast_no_buff_pkts); 1796 1797 ADD_64_LE(afex_stats->rx_frames_dropped_hi, 1798 LE32_0, 1799 afex_stats->rx_frames_dropped_lo, 1800 fcoe_q_ustorm_stats->mcast_no_buff_pkts); 1801 1802 ADD_64_LE(afex_stats->rx_frames_dropped_hi, 1803 LE32_0, 1804 afex_stats->rx_frames_dropped_lo, 1805 fcoe_q_ustorm_stats->bcast_no_buff_pkts); 1806 1807 ADD_64_LE(afex_stats->rx_frames_dropped_hi, 1808 LE32_0, 1809 afex_stats->rx_frames_dropped_lo, 1810 fw_fcoe_stat->rx_stat1.fcoe_rx_drop_pkt_cnt); 1811 1812 ADD_64_LE(afex_stats->rx_frames_dropped_hi, 1813 LE32_0, 1814 afex_stats->rx_frames_dropped_lo, 1815 fw_fcoe_stat->rx_stat2.fcoe_rx_drop_pkt_cnt); 1816 1817 ADD_64_LE(afex_stats->tx_unicast_bytes_hi, 1818 LE32_0, 1819 afex_stats->tx_unicast_bytes_lo, 1820 fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt); 1821 1822 ADD_64_LE(afex_stats->tx_unicast_bytes_hi, 1823 fcoe_q_xstorm_stats->ucast_bytes_sent.hi, 1824 afex_stats->tx_unicast_bytes_lo, 1825 fcoe_q_xstorm_stats->ucast_bytes_sent.lo); 1826 1827 ADD_64_LE(afex_stats->tx_broadcast_bytes_hi, 1828 fcoe_q_xstorm_stats->bcast_bytes_sent.hi, 1829 afex_stats->tx_broadcast_bytes_lo, 1830 fcoe_q_xstorm_stats->bcast_bytes_sent.lo); 1831 1832 ADD_64_LE(afex_stats->tx_multicast_bytes_hi, 1833 fcoe_q_xstorm_stats->mcast_bytes_sent.hi, 1834 afex_stats->tx_multicast_bytes_lo, 1835 fcoe_q_xstorm_stats->mcast_bytes_sent.lo); 1836 1837 ADD_64_LE(afex_stats->tx_unicast_frames_hi, 1838 LE32_0, 1839 afex_stats->tx_unicast_frames_lo, 1840 fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt); 1841 1842 ADD_64_LE(afex_stats->tx_unicast_frames_hi, 1843 LE32_0, 1844 afex_stats->tx_unicast_frames_lo, 1845 fcoe_q_xstorm_stats->ucast_pkts_sent); 1846 1847 ADD_64_LE(afex_stats->tx_broadcast_frames_hi, 1848 LE32_0, 1849 afex_stats->tx_broadcast_frames_lo, 1850 fcoe_q_xstorm_stats->bcast_pkts_sent); 1851 1852 ADD_64_LE(afex_stats->tx_multicast_frames_hi, 1853 LE32_0, 1854 afex_stats->tx_multicast_frames_lo, 1855 fcoe_q_xstorm_stats->mcast_pkts_sent); 1856 1857 ADD_64_LE(afex_stats->tx_frames_dropped_hi, 1858 LE32_0, 1859 afex_stats->tx_frames_dropped_lo, 1860 fcoe_q_xstorm_stats->error_drop_pkts); 1861 } 1862 1863 /* if port stats are requested, add them to the PMF 1864 * stats, as anyway they will be accumulated by the 1865 * MCP before sent to the switch 1866 */ 1867 if ((bp->port.pmf) && (stats_type == VICSTATST_UIF_INDEX)) { 1868 ADD_64(afex_stats->rx_frames_dropped_hi, 1869 0, 1870 afex_stats->rx_frames_dropped_lo, 1871 estats->mac_filter_discard); 1872 ADD_64(afex_stats->rx_frames_dropped_hi, 1873 0, 1874 afex_stats->rx_frames_dropped_lo, 1875 estats->brb_truncate_discard); 1876 ADD_64(afex_stats->rx_frames_discarded_hi, 1877 0, 1878 afex_stats->rx_frames_discarded_lo, 1879 estats->mac_discard); 1880 } 1881 } 1882