1 /* bnx2x_stats.c: Broadcom Everest network driver. 2 * 3 * Copyright (c) 2007-2013 Broadcom Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation. 8 * 9 * Maintained by: Ariel Elior <ariel.elior@qlogic.com> 10 * Written by: Eliezer Tamir 11 * Based on code from Michael Chan's bnx2 driver 12 * UDP CSUM errata workaround by Arik Gendelman 13 * Slowpath and fastpath rework by Vladislav Zolotarov 14 * Statistics and Link management by Yitchak Gertner 15 * 16 */ 17 18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 19 20 #include "bnx2x_stats.h" 21 #include "bnx2x_cmn.h" 22 #include "bnx2x_sriov.h" 23 24 /* Statistics */ 25 26 /* 27 * General service functions 28 */ 29 30 static inline long bnx2x_hilo(u32 *hiref) 31 { 32 u32 lo = *(hiref + 1); 33 #if (BITS_PER_LONG == 64) 34 u32 hi = *hiref; 35 36 return HILO_U64(hi, lo); 37 #else 38 return lo; 39 #endif 40 } 41 42 static inline u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp) 43 { 44 u16 res = 0; 45 46 /* 'newest' convention - shmem2 cotains the size of the port stats */ 47 if (SHMEM2_HAS(bp, sizeof_port_stats)) { 48 u32 size = SHMEM2_RD(bp, sizeof_port_stats); 49 if (size) 50 res = size; 51 52 /* prevent newer BC from causing buffer overflow */ 53 if (res > sizeof(struct host_port_stats)) 54 res = sizeof(struct host_port_stats); 55 } 56 57 /* Older convention - all BCs support the port stats' fields up until 58 * the 'not_used' field 59 */ 60 if (!res) { 61 res = offsetof(struct host_port_stats, not_used) + 4; 62 63 /* if PFC stats are supported by the MFW, DMA them as well */ 64 if (bp->flags & BC_SUPPORTS_PFC_STATS) { 65 res += offsetof(struct host_port_stats, 66 pfc_frames_rx_lo) - 67 offsetof(struct host_port_stats, 68 pfc_frames_tx_hi) + 4 ; 69 } 70 } 71 72 res >>= 2; 73 74 WARN_ON(res > 2 * DMAE_LEN32_RD_MAX); 75 return res; 76 } 77 78 /* 79 * Init service functions 80 */ 81 82 static void bnx2x_dp_stats(struct bnx2x *bp) 83 { 84 int i; 85 86 DP(BNX2X_MSG_STATS, "dumping stats:\n" 87 "fw_stats_req\n" 88 " hdr\n" 89 " cmd_num %d\n" 90 " reserved0 %d\n" 91 " drv_stats_counter %d\n" 92 " reserved1 %d\n" 93 " stats_counters_addrs %x %x\n", 94 bp->fw_stats_req->hdr.cmd_num, 95 bp->fw_stats_req->hdr.reserved0, 96 bp->fw_stats_req->hdr.drv_stats_counter, 97 bp->fw_stats_req->hdr.reserved1, 98 bp->fw_stats_req->hdr.stats_counters_addrs.hi, 99 bp->fw_stats_req->hdr.stats_counters_addrs.lo); 100 101 for (i = 0; i < bp->fw_stats_req->hdr.cmd_num; i++) { 102 DP(BNX2X_MSG_STATS, 103 "query[%d]\n" 104 " kind %d\n" 105 " index %d\n" 106 " funcID %d\n" 107 " reserved %d\n" 108 " address %x %x\n", 109 i, bp->fw_stats_req->query[i].kind, 110 bp->fw_stats_req->query[i].index, 111 bp->fw_stats_req->query[i].funcID, 112 bp->fw_stats_req->query[i].reserved, 113 bp->fw_stats_req->query[i].address.hi, 114 bp->fw_stats_req->query[i].address.lo); 115 } 116 } 117 118 /* Post the next statistics ramrod. Protect it with the spin in 119 * order to ensure the strict order between statistics ramrods 120 * (each ramrod has a sequence number passed in a 121 * bp->fw_stats_req->hdr.drv_stats_counter and ramrods must be 122 * sent in order). 123 */ 124 static void bnx2x_storm_stats_post(struct bnx2x *bp) 125 { 126 if (!bp->stats_pending) { 127 int rc; 128 129 spin_lock_bh(&bp->stats_lock); 130 131 if (bp->stats_pending) { 132 spin_unlock_bh(&bp->stats_lock); 133 return; 134 } 135 136 bp->fw_stats_req->hdr.drv_stats_counter = 137 cpu_to_le16(bp->stats_counter++); 138 139 DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n", 140 bp->fw_stats_req->hdr.drv_stats_counter); 141 142 /* adjust the ramrod to include VF queues statistics */ 143 bnx2x_iov_adjust_stats_req(bp); 144 bnx2x_dp_stats(bp); 145 146 /* send FW stats ramrod */ 147 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0, 148 U64_HI(bp->fw_stats_req_mapping), 149 U64_LO(bp->fw_stats_req_mapping), 150 NONE_CONNECTION_TYPE); 151 if (rc == 0) 152 bp->stats_pending = 1; 153 154 spin_unlock_bh(&bp->stats_lock); 155 } 156 } 157 158 static void bnx2x_hw_stats_post(struct bnx2x *bp) 159 { 160 struct dmae_command *dmae = &bp->stats_dmae; 161 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 162 163 *stats_comp = DMAE_COMP_VAL; 164 if (CHIP_REV_IS_SLOW(bp)) 165 return; 166 167 /* Update MCP's statistics if possible */ 168 if (bp->func_stx) 169 memcpy(bnx2x_sp(bp, func_stats), &bp->func_stats, 170 sizeof(bp->func_stats)); 171 172 /* loader */ 173 if (bp->executer_idx) { 174 int loader_idx = PMF_DMAE_C(bp); 175 u32 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, 176 true, DMAE_COMP_GRC); 177 opcode = bnx2x_dmae_opcode_clr_src_reset(opcode); 178 179 memset(dmae, 0, sizeof(struct dmae_command)); 180 dmae->opcode = opcode; 181 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0])); 182 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0])); 183 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM + 184 sizeof(struct dmae_command) * 185 (loader_idx + 1)) >> 2; 186 dmae->dst_addr_hi = 0; 187 dmae->len = sizeof(struct dmae_command) >> 2; 188 if (CHIP_IS_E1(bp)) 189 dmae->len--; 190 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2; 191 dmae->comp_addr_hi = 0; 192 dmae->comp_val = 1; 193 194 *stats_comp = 0; 195 bnx2x_post_dmae(bp, dmae, loader_idx); 196 197 } else if (bp->func_stx) { 198 *stats_comp = 0; 199 bnx2x_issue_dmae_with_comp(bp, dmae, stats_comp); 200 } 201 } 202 203 static int bnx2x_stats_comp(struct bnx2x *bp) 204 { 205 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 206 int cnt = 10; 207 208 might_sleep(); 209 while (*stats_comp != DMAE_COMP_VAL) { 210 if (!cnt) { 211 BNX2X_ERR("timeout waiting for stats finished\n"); 212 break; 213 } 214 cnt--; 215 usleep_range(1000, 2000); 216 } 217 return 1; 218 } 219 220 /* 221 * Statistics service functions 222 */ 223 224 /* should be called under stats_sema */ 225 static void __bnx2x_stats_pmf_update(struct bnx2x *bp) 226 { 227 struct dmae_command *dmae; 228 u32 opcode; 229 int loader_idx = PMF_DMAE_C(bp); 230 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 231 232 /* sanity */ 233 if (!bp->port.pmf || !bp->port.port_stx) { 234 BNX2X_ERR("BUG!\n"); 235 return; 236 } 237 238 bp->executer_idx = 0; 239 240 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, false, 0); 241 242 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 243 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC); 244 dmae->src_addr_lo = bp->port.port_stx >> 2; 245 dmae->src_addr_hi = 0; 246 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); 247 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); 248 dmae->len = DMAE_LEN32_RD_MAX; 249 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 250 dmae->comp_addr_hi = 0; 251 dmae->comp_val = 1; 252 253 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 254 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI); 255 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX; 256 dmae->src_addr_hi = 0; 257 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) + 258 DMAE_LEN32_RD_MAX * 4); 259 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) + 260 DMAE_LEN32_RD_MAX * 4); 261 dmae->len = bnx2x_get_port_stats_dma_len(bp) - DMAE_LEN32_RD_MAX; 262 263 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); 264 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); 265 dmae->comp_val = DMAE_COMP_VAL; 266 267 *stats_comp = 0; 268 bnx2x_hw_stats_post(bp); 269 bnx2x_stats_comp(bp); 270 } 271 272 static void bnx2x_port_stats_init(struct bnx2x *bp) 273 { 274 struct dmae_command *dmae; 275 int port = BP_PORT(bp); 276 u32 opcode; 277 int loader_idx = PMF_DMAE_C(bp); 278 u32 mac_addr; 279 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 280 281 /* sanity */ 282 if (!bp->link_vars.link_up || !bp->port.pmf) { 283 BNX2X_ERR("BUG!\n"); 284 return; 285 } 286 287 bp->executer_idx = 0; 288 289 /* MCP */ 290 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, 291 true, DMAE_COMP_GRC); 292 293 if (bp->port.port_stx) { 294 295 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 296 dmae->opcode = opcode; 297 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); 298 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); 299 dmae->dst_addr_lo = bp->port.port_stx >> 2; 300 dmae->dst_addr_hi = 0; 301 dmae->len = bnx2x_get_port_stats_dma_len(bp); 302 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 303 dmae->comp_addr_hi = 0; 304 dmae->comp_val = 1; 305 } 306 307 if (bp->func_stx) { 308 309 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 310 dmae->opcode = opcode; 311 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); 312 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); 313 dmae->dst_addr_lo = bp->func_stx >> 2; 314 dmae->dst_addr_hi = 0; 315 dmae->len = sizeof(struct host_func_stats) >> 2; 316 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 317 dmae->comp_addr_hi = 0; 318 dmae->comp_val = 1; 319 } 320 321 /* MAC */ 322 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, 323 true, DMAE_COMP_GRC); 324 325 /* EMAC is special */ 326 if (bp->link_vars.mac_type == MAC_TYPE_EMAC) { 327 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0); 328 329 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/ 330 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 331 dmae->opcode = opcode; 332 dmae->src_addr_lo = (mac_addr + 333 EMAC_REG_EMAC_RX_STAT_AC) >> 2; 334 dmae->src_addr_hi = 0; 335 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats)); 336 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats)); 337 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT; 338 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 339 dmae->comp_addr_hi = 0; 340 dmae->comp_val = 1; 341 342 /* EMAC_REG_EMAC_RX_STAT_AC_28 */ 343 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 344 dmae->opcode = opcode; 345 dmae->src_addr_lo = (mac_addr + 346 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2; 347 dmae->src_addr_hi = 0; 348 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) + 349 offsetof(struct emac_stats, rx_stat_falsecarriererrors)); 350 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) + 351 offsetof(struct emac_stats, rx_stat_falsecarriererrors)); 352 dmae->len = 1; 353 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 354 dmae->comp_addr_hi = 0; 355 dmae->comp_val = 1; 356 357 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/ 358 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 359 dmae->opcode = opcode; 360 dmae->src_addr_lo = (mac_addr + 361 EMAC_REG_EMAC_TX_STAT_AC) >> 2; 362 dmae->src_addr_hi = 0; 363 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) + 364 offsetof(struct emac_stats, tx_stat_ifhcoutoctets)); 365 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) + 366 offsetof(struct emac_stats, tx_stat_ifhcoutoctets)); 367 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT; 368 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 369 dmae->comp_addr_hi = 0; 370 dmae->comp_val = 1; 371 } else { 372 u32 tx_src_addr_lo, rx_src_addr_lo; 373 u16 rx_len, tx_len; 374 375 /* configure the params according to MAC type */ 376 switch (bp->link_vars.mac_type) { 377 case MAC_TYPE_BMAC: 378 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM : 379 NIG_REG_INGRESS_BMAC0_MEM); 380 381 /* BIGMAC_REGISTER_TX_STAT_GTPKT .. 382 BIGMAC_REGISTER_TX_STAT_GTBYT */ 383 if (CHIP_IS_E1x(bp)) { 384 tx_src_addr_lo = (mac_addr + 385 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2; 386 tx_len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT - 387 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2; 388 rx_src_addr_lo = (mac_addr + 389 BIGMAC_REGISTER_RX_STAT_GR64) >> 2; 390 rx_len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ - 391 BIGMAC_REGISTER_RX_STAT_GR64) >> 2; 392 } else { 393 tx_src_addr_lo = (mac_addr + 394 BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2; 395 tx_len = (8 + BIGMAC2_REGISTER_TX_STAT_GTBYT - 396 BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2; 397 rx_src_addr_lo = (mac_addr + 398 BIGMAC2_REGISTER_RX_STAT_GR64) >> 2; 399 rx_len = (8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ - 400 BIGMAC2_REGISTER_RX_STAT_GR64) >> 2; 401 } 402 break; 403 404 case MAC_TYPE_UMAC: /* handled by MSTAT */ 405 case MAC_TYPE_XMAC: /* handled by MSTAT */ 406 default: 407 mac_addr = port ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0; 408 tx_src_addr_lo = (mac_addr + 409 MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2; 410 rx_src_addr_lo = (mac_addr + 411 MSTAT_REG_RX_STAT_GR64_LO) >> 2; 412 tx_len = sizeof(bp->slowpath-> 413 mac_stats.mstat_stats.stats_tx) >> 2; 414 rx_len = sizeof(bp->slowpath-> 415 mac_stats.mstat_stats.stats_rx) >> 2; 416 break; 417 } 418 419 /* TX stats */ 420 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 421 dmae->opcode = opcode; 422 dmae->src_addr_lo = tx_src_addr_lo; 423 dmae->src_addr_hi = 0; 424 dmae->len = tx_len; 425 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats)); 426 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats)); 427 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 428 dmae->comp_addr_hi = 0; 429 dmae->comp_val = 1; 430 431 /* RX stats */ 432 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 433 dmae->opcode = opcode; 434 dmae->src_addr_hi = 0; 435 dmae->src_addr_lo = rx_src_addr_lo; 436 dmae->dst_addr_lo = 437 U64_LO(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2)); 438 dmae->dst_addr_hi = 439 U64_HI(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2)); 440 dmae->len = rx_len; 441 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 442 dmae->comp_addr_hi = 0; 443 dmae->comp_val = 1; 444 } 445 446 /* NIG */ 447 if (!CHIP_IS_E3(bp)) { 448 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 449 dmae->opcode = opcode; 450 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 : 451 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2; 452 dmae->src_addr_hi = 0; 453 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) + 454 offsetof(struct nig_stats, egress_mac_pkt0_lo)); 455 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) + 456 offsetof(struct nig_stats, egress_mac_pkt0_lo)); 457 dmae->len = (2*sizeof(u32)) >> 2; 458 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 459 dmae->comp_addr_hi = 0; 460 dmae->comp_val = 1; 461 462 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 463 dmae->opcode = opcode; 464 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 : 465 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2; 466 dmae->src_addr_hi = 0; 467 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) + 468 offsetof(struct nig_stats, egress_mac_pkt1_lo)); 469 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) + 470 offsetof(struct nig_stats, egress_mac_pkt1_lo)); 471 dmae->len = (2*sizeof(u32)) >> 2; 472 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 473 dmae->comp_addr_hi = 0; 474 dmae->comp_val = 1; 475 } 476 477 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 478 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, 479 true, DMAE_COMP_PCI); 480 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD : 481 NIG_REG_STAT0_BRB_DISCARD) >> 2; 482 dmae->src_addr_hi = 0; 483 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats)); 484 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats)); 485 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2; 486 487 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); 488 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); 489 dmae->comp_val = DMAE_COMP_VAL; 490 491 *stats_comp = 0; 492 } 493 494 static void bnx2x_func_stats_init(struct bnx2x *bp) 495 { 496 struct dmae_command *dmae = &bp->stats_dmae; 497 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 498 499 /* sanity */ 500 if (!bp->func_stx) { 501 BNX2X_ERR("BUG!\n"); 502 return; 503 } 504 505 bp->executer_idx = 0; 506 memset(dmae, 0, sizeof(struct dmae_command)); 507 508 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, 509 true, DMAE_COMP_PCI); 510 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); 511 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); 512 dmae->dst_addr_lo = bp->func_stx >> 2; 513 dmae->dst_addr_hi = 0; 514 dmae->len = sizeof(struct host_func_stats) >> 2; 515 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); 516 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); 517 dmae->comp_val = DMAE_COMP_VAL; 518 519 *stats_comp = 0; 520 } 521 522 /* should be called under stats_sema */ 523 static void __bnx2x_stats_start(struct bnx2x *bp) 524 { 525 if (IS_PF(bp)) { 526 if (bp->port.pmf) 527 bnx2x_port_stats_init(bp); 528 529 else if (bp->func_stx) 530 bnx2x_func_stats_init(bp); 531 532 bnx2x_hw_stats_post(bp); 533 bnx2x_storm_stats_post(bp); 534 } 535 536 bp->stats_started = true; 537 } 538 539 static void bnx2x_stats_start(struct bnx2x *bp) 540 { 541 if (down_timeout(&bp->stats_sema, HZ/10)) 542 BNX2X_ERR("Unable to acquire stats lock\n"); 543 __bnx2x_stats_start(bp); 544 up(&bp->stats_sema); 545 } 546 547 static void bnx2x_stats_pmf_start(struct bnx2x *bp) 548 { 549 if (down_timeout(&bp->stats_sema, HZ/10)) 550 BNX2X_ERR("Unable to acquire stats lock\n"); 551 bnx2x_stats_comp(bp); 552 __bnx2x_stats_pmf_update(bp); 553 __bnx2x_stats_start(bp); 554 up(&bp->stats_sema); 555 } 556 557 static void bnx2x_stats_pmf_update(struct bnx2x *bp) 558 { 559 if (down_timeout(&bp->stats_sema, HZ/10)) 560 BNX2X_ERR("Unable to acquire stats lock\n"); 561 __bnx2x_stats_pmf_update(bp); 562 up(&bp->stats_sema); 563 } 564 565 static void bnx2x_stats_restart(struct bnx2x *bp) 566 { 567 /* vfs travel through here as part of the statistics FSM, but no action 568 * is required 569 */ 570 if (IS_VF(bp)) 571 return; 572 if (down_timeout(&bp->stats_sema, HZ/10)) 573 BNX2X_ERR("Unable to acquire stats lock\n"); 574 bnx2x_stats_comp(bp); 575 __bnx2x_stats_start(bp); 576 up(&bp->stats_sema); 577 } 578 579 static void bnx2x_bmac_stats_update(struct bnx2x *bp) 580 { 581 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); 582 struct bnx2x_eth_stats *estats = &bp->eth_stats; 583 struct { 584 u32 lo; 585 u32 hi; 586 } diff; 587 588 if (CHIP_IS_E1x(bp)) { 589 struct bmac1_stats *new = bnx2x_sp(bp, mac_stats.bmac1_stats); 590 591 /* the macros below will use "bmac1_stats" type */ 592 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets); 593 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors); 594 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts); 595 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong); 596 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments); 597 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); 598 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); 599 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); 600 UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf); 601 602 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); 603 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone); 604 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets); 605 UPDATE_STAT64(tx_stat_gt127, 606 tx_stat_etherstatspkts65octetsto127octets); 607 UPDATE_STAT64(tx_stat_gt255, 608 tx_stat_etherstatspkts128octetsto255octets); 609 UPDATE_STAT64(tx_stat_gt511, 610 tx_stat_etherstatspkts256octetsto511octets); 611 UPDATE_STAT64(tx_stat_gt1023, 612 tx_stat_etherstatspkts512octetsto1023octets); 613 UPDATE_STAT64(tx_stat_gt1518, 614 tx_stat_etherstatspkts1024octetsto1522octets); 615 UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047); 616 UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095); 617 UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216); 618 UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383); 619 UPDATE_STAT64(tx_stat_gterr, 620 tx_stat_dot3statsinternalmactransmiterrors); 621 UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl); 622 623 } else { 624 struct bmac2_stats *new = bnx2x_sp(bp, mac_stats.bmac2_stats); 625 626 /* the macros below will use "bmac2_stats" type */ 627 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets); 628 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors); 629 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts); 630 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong); 631 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments); 632 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); 633 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); 634 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); 635 UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf); 636 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); 637 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone); 638 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets); 639 UPDATE_STAT64(tx_stat_gt127, 640 tx_stat_etherstatspkts65octetsto127octets); 641 UPDATE_STAT64(tx_stat_gt255, 642 tx_stat_etherstatspkts128octetsto255octets); 643 UPDATE_STAT64(tx_stat_gt511, 644 tx_stat_etherstatspkts256octetsto511octets); 645 UPDATE_STAT64(tx_stat_gt1023, 646 tx_stat_etherstatspkts512octetsto1023octets); 647 UPDATE_STAT64(tx_stat_gt1518, 648 tx_stat_etherstatspkts1024octetsto1522octets); 649 UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047); 650 UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095); 651 UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216); 652 UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383); 653 UPDATE_STAT64(tx_stat_gterr, 654 tx_stat_dot3statsinternalmactransmiterrors); 655 UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl); 656 657 /* collect PFC stats */ 658 pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi; 659 pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo; 660 661 pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi; 662 pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo; 663 } 664 665 estats->pause_frames_received_hi = 666 pstats->mac_stx[1].rx_stat_mac_xpf_hi; 667 estats->pause_frames_received_lo = 668 pstats->mac_stx[1].rx_stat_mac_xpf_lo; 669 670 estats->pause_frames_sent_hi = 671 pstats->mac_stx[1].tx_stat_outxoffsent_hi; 672 estats->pause_frames_sent_lo = 673 pstats->mac_stx[1].tx_stat_outxoffsent_lo; 674 675 estats->pfc_frames_received_hi = 676 pstats->pfc_frames_rx_hi; 677 estats->pfc_frames_received_lo = 678 pstats->pfc_frames_rx_lo; 679 estats->pfc_frames_sent_hi = 680 pstats->pfc_frames_tx_hi; 681 estats->pfc_frames_sent_lo = 682 pstats->pfc_frames_tx_lo; 683 } 684 685 static void bnx2x_mstat_stats_update(struct bnx2x *bp) 686 { 687 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); 688 struct bnx2x_eth_stats *estats = &bp->eth_stats; 689 690 struct mstat_stats *new = bnx2x_sp(bp, mac_stats.mstat_stats); 691 692 ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets); 693 ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors); 694 ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts); 695 ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong); 696 ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments); 697 ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived); 698 ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered); 699 ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf); 700 ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent); 701 ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone); 702 703 /* collect pfc stats */ 704 ADD_64(pstats->pfc_frames_tx_hi, new->stats_tx.tx_gtxpp_hi, 705 pstats->pfc_frames_tx_lo, new->stats_tx.tx_gtxpp_lo); 706 ADD_64(pstats->pfc_frames_rx_hi, new->stats_rx.rx_grxpp_hi, 707 pstats->pfc_frames_rx_lo, new->stats_rx.rx_grxpp_lo); 708 709 ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets); 710 ADD_STAT64(stats_tx.tx_gt127, 711 tx_stat_etherstatspkts65octetsto127octets); 712 ADD_STAT64(stats_tx.tx_gt255, 713 tx_stat_etherstatspkts128octetsto255octets); 714 ADD_STAT64(stats_tx.tx_gt511, 715 tx_stat_etherstatspkts256octetsto511octets); 716 ADD_STAT64(stats_tx.tx_gt1023, 717 tx_stat_etherstatspkts512octetsto1023octets); 718 ADD_STAT64(stats_tx.tx_gt1518, 719 tx_stat_etherstatspkts1024octetsto1522octets); 720 ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047); 721 722 ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095); 723 ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216); 724 ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383); 725 726 ADD_STAT64(stats_tx.tx_gterr, 727 tx_stat_dot3statsinternalmactransmiterrors); 728 ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl); 729 730 estats->etherstatspkts1024octetsto1522octets_hi = 731 pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_hi; 732 estats->etherstatspkts1024octetsto1522octets_lo = 733 pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_lo; 734 735 estats->etherstatspktsover1522octets_hi = 736 pstats->mac_stx[1].tx_stat_mac_2047_hi; 737 estats->etherstatspktsover1522octets_lo = 738 pstats->mac_stx[1].tx_stat_mac_2047_lo; 739 740 ADD_64(estats->etherstatspktsover1522octets_hi, 741 pstats->mac_stx[1].tx_stat_mac_4095_hi, 742 estats->etherstatspktsover1522octets_lo, 743 pstats->mac_stx[1].tx_stat_mac_4095_lo); 744 745 ADD_64(estats->etherstatspktsover1522octets_hi, 746 pstats->mac_stx[1].tx_stat_mac_9216_hi, 747 estats->etherstatspktsover1522octets_lo, 748 pstats->mac_stx[1].tx_stat_mac_9216_lo); 749 750 ADD_64(estats->etherstatspktsover1522octets_hi, 751 pstats->mac_stx[1].tx_stat_mac_16383_hi, 752 estats->etherstatspktsover1522octets_lo, 753 pstats->mac_stx[1].tx_stat_mac_16383_lo); 754 755 estats->pause_frames_received_hi = 756 pstats->mac_stx[1].rx_stat_mac_xpf_hi; 757 estats->pause_frames_received_lo = 758 pstats->mac_stx[1].rx_stat_mac_xpf_lo; 759 760 estats->pause_frames_sent_hi = 761 pstats->mac_stx[1].tx_stat_outxoffsent_hi; 762 estats->pause_frames_sent_lo = 763 pstats->mac_stx[1].tx_stat_outxoffsent_lo; 764 765 estats->pfc_frames_received_hi = 766 pstats->pfc_frames_rx_hi; 767 estats->pfc_frames_received_lo = 768 pstats->pfc_frames_rx_lo; 769 estats->pfc_frames_sent_hi = 770 pstats->pfc_frames_tx_hi; 771 estats->pfc_frames_sent_lo = 772 pstats->pfc_frames_tx_lo; 773 } 774 775 static void bnx2x_emac_stats_update(struct bnx2x *bp) 776 { 777 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats); 778 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); 779 struct bnx2x_eth_stats *estats = &bp->eth_stats; 780 781 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets); 782 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets); 783 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors); 784 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors); 785 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors); 786 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors); 787 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts); 788 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong); 789 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments); 790 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers); 791 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived); 792 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered); 793 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived); 794 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived); 795 UPDATE_EXTEND_STAT(tx_stat_outxonsent); 796 UPDATE_EXTEND_STAT(tx_stat_outxoffsent); 797 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone); 798 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions); 799 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes); 800 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes); 801 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions); 802 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions); 803 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions); 804 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets); 805 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets); 806 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets); 807 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets); 808 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets); 809 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets); 810 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets); 811 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors); 812 813 estats->pause_frames_received_hi = 814 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi; 815 estats->pause_frames_received_lo = 816 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo; 817 ADD_64(estats->pause_frames_received_hi, 818 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi, 819 estats->pause_frames_received_lo, 820 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo); 821 822 estats->pause_frames_sent_hi = 823 pstats->mac_stx[1].tx_stat_outxonsent_hi; 824 estats->pause_frames_sent_lo = 825 pstats->mac_stx[1].tx_stat_outxonsent_lo; 826 ADD_64(estats->pause_frames_sent_hi, 827 pstats->mac_stx[1].tx_stat_outxoffsent_hi, 828 estats->pause_frames_sent_lo, 829 pstats->mac_stx[1].tx_stat_outxoffsent_lo); 830 } 831 832 static int bnx2x_hw_stats_update(struct bnx2x *bp) 833 { 834 struct nig_stats *new = bnx2x_sp(bp, nig_stats); 835 struct nig_stats *old = &(bp->port.old_nig_stats); 836 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); 837 struct bnx2x_eth_stats *estats = &bp->eth_stats; 838 struct { 839 u32 lo; 840 u32 hi; 841 } diff; 842 843 switch (bp->link_vars.mac_type) { 844 case MAC_TYPE_BMAC: 845 bnx2x_bmac_stats_update(bp); 846 break; 847 848 case MAC_TYPE_EMAC: 849 bnx2x_emac_stats_update(bp); 850 break; 851 852 case MAC_TYPE_UMAC: 853 case MAC_TYPE_XMAC: 854 bnx2x_mstat_stats_update(bp); 855 break; 856 857 case MAC_TYPE_NONE: /* unreached */ 858 DP(BNX2X_MSG_STATS, 859 "stats updated by DMAE but no MAC active\n"); 860 return -1; 861 862 default: /* unreached */ 863 BNX2X_ERR("Unknown MAC type\n"); 864 } 865 866 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo, 867 new->brb_discard - old->brb_discard); 868 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo, 869 new->brb_truncate - old->brb_truncate); 870 871 if (!CHIP_IS_E3(bp)) { 872 UPDATE_STAT64_NIG(egress_mac_pkt0, 873 etherstatspkts1024octetsto1522octets); 874 UPDATE_STAT64_NIG(egress_mac_pkt1, 875 etherstatspktsover1522octets); 876 } 877 878 memcpy(old, new, sizeof(struct nig_stats)); 879 880 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]), 881 sizeof(struct mac_stx)); 882 estats->brb_drop_hi = pstats->brb_drop_hi; 883 estats->brb_drop_lo = pstats->brb_drop_lo; 884 885 pstats->host_port_stats_counter++; 886 887 if (CHIP_IS_E3(bp)) { 888 u32 lpi_reg = BP_PORT(bp) ? MISC_REG_CPMU_LP_SM_ENT_CNT_P1 889 : MISC_REG_CPMU_LP_SM_ENT_CNT_P0; 890 estats->eee_tx_lpi += REG_RD(bp, lpi_reg); 891 } 892 893 if (!BP_NOMCP(bp)) { 894 u32 nig_timer_max = 895 SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer); 896 if (nig_timer_max != estats->nig_timer_max) { 897 estats->nig_timer_max = nig_timer_max; 898 BNX2X_ERR("NIG timer max (%u)\n", 899 estats->nig_timer_max); 900 } 901 } 902 903 return 0; 904 } 905 906 static int bnx2x_storm_stats_validate_counters(struct bnx2x *bp) 907 { 908 struct stats_counter *counters = &bp->fw_stats_data->storm_counters; 909 u16 cur_stats_counter; 910 /* Make sure we use the value of the counter 911 * used for sending the last stats ramrod. 912 */ 913 cur_stats_counter = bp->stats_counter - 1; 914 915 /* are storm stats valid? */ 916 if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) { 917 DP(BNX2X_MSG_STATS, 918 "stats not updated by xstorm xstorm counter (0x%x) != stats_counter (0x%x)\n", 919 le16_to_cpu(counters->xstats_counter), bp->stats_counter); 920 return -EAGAIN; 921 } 922 923 if (le16_to_cpu(counters->ustats_counter) != cur_stats_counter) { 924 DP(BNX2X_MSG_STATS, 925 "stats not updated by ustorm ustorm counter (0x%x) != stats_counter (0x%x)\n", 926 le16_to_cpu(counters->ustats_counter), bp->stats_counter); 927 return -EAGAIN; 928 } 929 930 if (le16_to_cpu(counters->cstats_counter) != cur_stats_counter) { 931 DP(BNX2X_MSG_STATS, 932 "stats not updated by cstorm cstorm counter (0x%x) != stats_counter (0x%x)\n", 933 le16_to_cpu(counters->cstats_counter), bp->stats_counter); 934 return -EAGAIN; 935 } 936 937 if (le16_to_cpu(counters->tstats_counter) != cur_stats_counter) { 938 DP(BNX2X_MSG_STATS, 939 "stats not updated by tstorm tstorm counter (0x%x) != stats_counter (0x%x)\n", 940 le16_to_cpu(counters->tstats_counter), bp->stats_counter); 941 return -EAGAIN; 942 } 943 return 0; 944 } 945 946 static int bnx2x_storm_stats_update(struct bnx2x *bp) 947 { 948 struct tstorm_per_port_stats *tport = 949 &bp->fw_stats_data->port.tstorm_port_statistics; 950 struct tstorm_per_pf_stats *tfunc = 951 &bp->fw_stats_data->pf.tstorm_pf_statistics; 952 struct host_func_stats *fstats = &bp->func_stats; 953 struct bnx2x_eth_stats *estats = &bp->eth_stats; 954 struct bnx2x_eth_stats_old *estats_old = &bp->eth_stats_old; 955 int i; 956 957 /* vfs stat counter is managed by pf */ 958 if (IS_PF(bp) && bnx2x_storm_stats_validate_counters(bp)) 959 return -EAGAIN; 960 961 estats->error_bytes_received_hi = 0; 962 estats->error_bytes_received_lo = 0; 963 964 for_each_eth_queue(bp, i) { 965 struct bnx2x_fastpath *fp = &bp->fp[i]; 966 struct tstorm_per_queue_stats *tclient = 967 &bp->fw_stats_data->queue_stats[i]. 968 tstorm_queue_statistics; 969 struct tstorm_per_queue_stats *old_tclient = 970 &bnx2x_fp_stats(bp, fp)->old_tclient; 971 struct ustorm_per_queue_stats *uclient = 972 &bp->fw_stats_data->queue_stats[i]. 973 ustorm_queue_statistics; 974 struct ustorm_per_queue_stats *old_uclient = 975 &bnx2x_fp_stats(bp, fp)->old_uclient; 976 struct xstorm_per_queue_stats *xclient = 977 &bp->fw_stats_data->queue_stats[i]. 978 xstorm_queue_statistics; 979 struct xstorm_per_queue_stats *old_xclient = 980 &bnx2x_fp_stats(bp, fp)->old_xclient; 981 struct bnx2x_eth_q_stats *qstats = 982 &bnx2x_fp_stats(bp, fp)->eth_q_stats; 983 struct bnx2x_eth_q_stats_old *qstats_old = 984 &bnx2x_fp_stats(bp, fp)->eth_q_stats_old; 985 986 u32 diff; 987 988 DP(BNX2X_MSG_STATS, "queue[%d]: ucast_sent 0x%x, bcast_sent 0x%x mcast_sent 0x%x\n", 989 i, xclient->ucast_pkts_sent, 990 xclient->bcast_pkts_sent, xclient->mcast_pkts_sent); 991 992 DP(BNX2X_MSG_STATS, "---------------\n"); 993 994 UPDATE_QSTAT(tclient->rcv_bcast_bytes, 995 total_broadcast_bytes_received); 996 UPDATE_QSTAT(tclient->rcv_mcast_bytes, 997 total_multicast_bytes_received); 998 UPDATE_QSTAT(tclient->rcv_ucast_bytes, 999 total_unicast_bytes_received); 1000 1001 /* 1002 * sum to total_bytes_received all 1003 * unicast/multicast/broadcast 1004 */ 1005 qstats->total_bytes_received_hi = 1006 qstats->total_broadcast_bytes_received_hi; 1007 qstats->total_bytes_received_lo = 1008 qstats->total_broadcast_bytes_received_lo; 1009 1010 ADD_64(qstats->total_bytes_received_hi, 1011 qstats->total_multicast_bytes_received_hi, 1012 qstats->total_bytes_received_lo, 1013 qstats->total_multicast_bytes_received_lo); 1014 1015 ADD_64(qstats->total_bytes_received_hi, 1016 qstats->total_unicast_bytes_received_hi, 1017 qstats->total_bytes_received_lo, 1018 qstats->total_unicast_bytes_received_lo); 1019 1020 qstats->valid_bytes_received_hi = 1021 qstats->total_bytes_received_hi; 1022 qstats->valid_bytes_received_lo = 1023 qstats->total_bytes_received_lo; 1024 1025 UPDATE_EXTEND_TSTAT(rcv_ucast_pkts, 1026 total_unicast_packets_received); 1027 UPDATE_EXTEND_TSTAT(rcv_mcast_pkts, 1028 total_multicast_packets_received); 1029 UPDATE_EXTEND_TSTAT(rcv_bcast_pkts, 1030 total_broadcast_packets_received); 1031 UPDATE_EXTEND_E_TSTAT(pkts_too_big_discard, 1032 etherstatsoverrsizepkts, 32); 1033 UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard, 16); 1034 1035 SUB_EXTEND_USTAT(ucast_no_buff_pkts, 1036 total_unicast_packets_received); 1037 SUB_EXTEND_USTAT(mcast_no_buff_pkts, 1038 total_multicast_packets_received); 1039 SUB_EXTEND_USTAT(bcast_no_buff_pkts, 1040 total_broadcast_packets_received); 1041 UPDATE_EXTEND_E_USTAT(ucast_no_buff_pkts, no_buff_discard); 1042 UPDATE_EXTEND_E_USTAT(mcast_no_buff_pkts, no_buff_discard); 1043 UPDATE_EXTEND_E_USTAT(bcast_no_buff_pkts, no_buff_discard); 1044 1045 UPDATE_QSTAT(xclient->bcast_bytes_sent, 1046 total_broadcast_bytes_transmitted); 1047 UPDATE_QSTAT(xclient->mcast_bytes_sent, 1048 total_multicast_bytes_transmitted); 1049 UPDATE_QSTAT(xclient->ucast_bytes_sent, 1050 total_unicast_bytes_transmitted); 1051 1052 /* 1053 * sum to total_bytes_transmitted all 1054 * unicast/multicast/broadcast 1055 */ 1056 qstats->total_bytes_transmitted_hi = 1057 qstats->total_unicast_bytes_transmitted_hi; 1058 qstats->total_bytes_transmitted_lo = 1059 qstats->total_unicast_bytes_transmitted_lo; 1060 1061 ADD_64(qstats->total_bytes_transmitted_hi, 1062 qstats->total_broadcast_bytes_transmitted_hi, 1063 qstats->total_bytes_transmitted_lo, 1064 qstats->total_broadcast_bytes_transmitted_lo); 1065 1066 ADD_64(qstats->total_bytes_transmitted_hi, 1067 qstats->total_multicast_bytes_transmitted_hi, 1068 qstats->total_bytes_transmitted_lo, 1069 qstats->total_multicast_bytes_transmitted_lo); 1070 1071 UPDATE_EXTEND_XSTAT(ucast_pkts_sent, 1072 total_unicast_packets_transmitted); 1073 UPDATE_EXTEND_XSTAT(mcast_pkts_sent, 1074 total_multicast_packets_transmitted); 1075 UPDATE_EXTEND_XSTAT(bcast_pkts_sent, 1076 total_broadcast_packets_transmitted); 1077 1078 UPDATE_EXTEND_TSTAT(checksum_discard, 1079 total_packets_received_checksum_discarded); 1080 UPDATE_EXTEND_TSTAT(ttl0_discard, 1081 total_packets_received_ttl0_discarded); 1082 1083 UPDATE_EXTEND_XSTAT(error_drop_pkts, 1084 total_transmitted_dropped_packets_error); 1085 1086 /* TPA aggregations completed */ 1087 UPDATE_EXTEND_E_USTAT(coalesced_events, total_tpa_aggregations); 1088 /* Number of network frames aggregated by TPA */ 1089 UPDATE_EXTEND_E_USTAT(coalesced_pkts, 1090 total_tpa_aggregated_frames); 1091 /* Total number of bytes in completed TPA aggregations */ 1092 UPDATE_QSTAT(uclient->coalesced_bytes, total_tpa_bytes); 1093 1094 UPDATE_ESTAT_QSTAT_64(total_tpa_bytes); 1095 1096 UPDATE_FSTAT_QSTAT(total_bytes_received); 1097 UPDATE_FSTAT_QSTAT(total_bytes_transmitted); 1098 UPDATE_FSTAT_QSTAT(total_unicast_packets_received); 1099 UPDATE_FSTAT_QSTAT(total_multicast_packets_received); 1100 UPDATE_FSTAT_QSTAT(total_broadcast_packets_received); 1101 UPDATE_FSTAT_QSTAT(total_unicast_packets_transmitted); 1102 UPDATE_FSTAT_QSTAT(total_multicast_packets_transmitted); 1103 UPDATE_FSTAT_QSTAT(total_broadcast_packets_transmitted); 1104 UPDATE_FSTAT_QSTAT(valid_bytes_received); 1105 } 1106 1107 ADD_64(estats->total_bytes_received_hi, 1108 estats->rx_stat_ifhcinbadoctets_hi, 1109 estats->total_bytes_received_lo, 1110 estats->rx_stat_ifhcinbadoctets_lo); 1111 1112 ADD_64_LE(estats->total_bytes_received_hi, 1113 tfunc->rcv_error_bytes.hi, 1114 estats->total_bytes_received_lo, 1115 tfunc->rcv_error_bytes.lo); 1116 1117 ADD_64_LE(estats->error_bytes_received_hi, 1118 tfunc->rcv_error_bytes.hi, 1119 estats->error_bytes_received_lo, 1120 tfunc->rcv_error_bytes.lo); 1121 1122 UPDATE_ESTAT(etherstatsoverrsizepkts, rx_stat_dot3statsframestoolong); 1123 1124 ADD_64(estats->error_bytes_received_hi, 1125 estats->rx_stat_ifhcinbadoctets_hi, 1126 estats->error_bytes_received_lo, 1127 estats->rx_stat_ifhcinbadoctets_lo); 1128 1129 if (bp->port.pmf) { 1130 struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old; 1131 UPDATE_FW_STAT(mac_filter_discard); 1132 UPDATE_FW_STAT(mf_tag_discard); 1133 UPDATE_FW_STAT(brb_truncate_discard); 1134 UPDATE_FW_STAT(mac_discard); 1135 } 1136 1137 fstats->host_func_stats_start = ++fstats->host_func_stats_end; 1138 1139 bp->stats_pending = 0; 1140 1141 return 0; 1142 } 1143 1144 static void bnx2x_net_stats_update(struct bnx2x *bp) 1145 { 1146 struct bnx2x_eth_stats *estats = &bp->eth_stats; 1147 struct net_device_stats *nstats = &bp->dev->stats; 1148 unsigned long tmp; 1149 int i; 1150 1151 nstats->rx_packets = 1152 bnx2x_hilo(&estats->total_unicast_packets_received_hi) + 1153 bnx2x_hilo(&estats->total_multicast_packets_received_hi) + 1154 bnx2x_hilo(&estats->total_broadcast_packets_received_hi); 1155 1156 nstats->tx_packets = 1157 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) + 1158 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) + 1159 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi); 1160 1161 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi); 1162 1163 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi); 1164 1165 tmp = estats->mac_discard; 1166 for_each_rx_queue(bp, i) { 1167 struct tstorm_per_queue_stats *old_tclient = 1168 &bp->fp_stats[i].old_tclient; 1169 tmp += le32_to_cpu(old_tclient->checksum_discard); 1170 } 1171 nstats->rx_dropped = tmp + bp->net_stats_old.rx_dropped; 1172 1173 nstats->tx_dropped = 0; 1174 1175 nstats->multicast = 1176 bnx2x_hilo(&estats->total_multicast_packets_received_hi); 1177 1178 nstats->collisions = 1179 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi); 1180 1181 nstats->rx_length_errors = 1182 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) + 1183 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi); 1184 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) + 1185 bnx2x_hilo(&estats->brb_truncate_hi); 1186 nstats->rx_crc_errors = 1187 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi); 1188 nstats->rx_frame_errors = 1189 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi); 1190 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi); 1191 nstats->rx_missed_errors = 0; 1192 1193 nstats->rx_errors = nstats->rx_length_errors + 1194 nstats->rx_over_errors + 1195 nstats->rx_crc_errors + 1196 nstats->rx_frame_errors + 1197 nstats->rx_fifo_errors + 1198 nstats->rx_missed_errors; 1199 1200 nstats->tx_aborted_errors = 1201 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) + 1202 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi); 1203 nstats->tx_carrier_errors = 1204 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi); 1205 nstats->tx_fifo_errors = 0; 1206 nstats->tx_heartbeat_errors = 0; 1207 nstats->tx_window_errors = 0; 1208 1209 nstats->tx_errors = nstats->tx_aborted_errors + 1210 nstats->tx_carrier_errors + 1211 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi); 1212 } 1213 1214 static void bnx2x_drv_stats_update(struct bnx2x *bp) 1215 { 1216 struct bnx2x_eth_stats *estats = &bp->eth_stats; 1217 int i; 1218 1219 for_each_queue(bp, i) { 1220 struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats; 1221 struct bnx2x_eth_q_stats_old *qstats_old = 1222 &bp->fp_stats[i].eth_q_stats_old; 1223 1224 UPDATE_ESTAT_QSTAT(driver_xoff); 1225 UPDATE_ESTAT_QSTAT(rx_err_discard_pkt); 1226 UPDATE_ESTAT_QSTAT(rx_skb_alloc_failed); 1227 UPDATE_ESTAT_QSTAT(hw_csum_err); 1228 UPDATE_ESTAT_QSTAT(driver_filtered_tx_pkt); 1229 } 1230 } 1231 1232 static bool bnx2x_edebug_stats_stopped(struct bnx2x *bp) 1233 { 1234 u32 val; 1235 1236 if (SHMEM2_HAS(bp, edebug_driver_if[1])) { 1237 val = SHMEM2_RD(bp, edebug_driver_if[1]); 1238 1239 if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT) 1240 return true; 1241 } 1242 1243 return false; 1244 } 1245 1246 static void bnx2x_stats_update(struct bnx2x *bp) 1247 { 1248 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 1249 1250 /* we run update from timer context, so give up 1251 * if somebody is in the middle of transition 1252 */ 1253 if (down_trylock(&bp->stats_sema)) 1254 return; 1255 1256 if (bnx2x_edebug_stats_stopped(bp) || !bp->stats_started) 1257 goto out; 1258 1259 if (IS_PF(bp)) { 1260 if (*stats_comp != DMAE_COMP_VAL) 1261 goto out; 1262 1263 if (bp->port.pmf) 1264 bnx2x_hw_stats_update(bp); 1265 1266 if (bnx2x_storm_stats_update(bp)) { 1267 if (bp->stats_pending++ == 3) { 1268 BNX2X_ERR("storm stats were not updated for 3 times\n"); 1269 bnx2x_panic(); 1270 } 1271 goto out; 1272 } 1273 } else { 1274 /* vf doesn't collect HW statistics, and doesn't get completions 1275 * perform only update 1276 */ 1277 bnx2x_storm_stats_update(bp); 1278 } 1279 1280 bnx2x_net_stats_update(bp); 1281 bnx2x_drv_stats_update(bp); 1282 1283 /* vf is done */ 1284 if (IS_VF(bp)) 1285 goto out; 1286 1287 if (netif_msg_timer(bp)) { 1288 struct bnx2x_eth_stats *estats = &bp->eth_stats; 1289 1290 netdev_dbg(bp->dev, "brb drops %u brb truncate %u\n", 1291 estats->brb_drop_lo, estats->brb_truncate_lo); 1292 } 1293 1294 bnx2x_hw_stats_post(bp); 1295 bnx2x_storm_stats_post(bp); 1296 1297 out: 1298 up(&bp->stats_sema); 1299 } 1300 1301 static void bnx2x_port_stats_stop(struct bnx2x *bp) 1302 { 1303 struct dmae_command *dmae; 1304 u32 opcode; 1305 int loader_idx = PMF_DMAE_C(bp); 1306 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 1307 1308 bp->executer_idx = 0; 1309 1310 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, false, 0); 1311 1312 if (bp->port.port_stx) { 1313 1314 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 1315 if (bp->func_stx) 1316 dmae->opcode = bnx2x_dmae_opcode_add_comp( 1317 opcode, DMAE_COMP_GRC); 1318 else 1319 dmae->opcode = bnx2x_dmae_opcode_add_comp( 1320 opcode, DMAE_COMP_PCI); 1321 1322 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); 1323 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); 1324 dmae->dst_addr_lo = bp->port.port_stx >> 2; 1325 dmae->dst_addr_hi = 0; 1326 dmae->len = bnx2x_get_port_stats_dma_len(bp); 1327 if (bp->func_stx) { 1328 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 1329 dmae->comp_addr_hi = 0; 1330 dmae->comp_val = 1; 1331 } else { 1332 dmae->comp_addr_lo = 1333 U64_LO(bnx2x_sp_mapping(bp, stats_comp)); 1334 dmae->comp_addr_hi = 1335 U64_HI(bnx2x_sp_mapping(bp, stats_comp)); 1336 dmae->comp_val = DMAE_COMP_VAL; 1337 1338 *stats_comp = 0; 1339 } 1340 } 1341 1342 if (bp->func_stx) { 1343 1344 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 1345 dmae->opcode = 1346 bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI); 1347 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); 1348 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); 1349 dmae->dst_addr_lo = bp->func_stx >> 2; 1350 dmae->dst_addr_hi = 0; 1351 dmae->len = sizeof(struct host_func_stats) >> 2; 1352 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); 1353 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); 1354 dmae->comp_val = DMAE_COMP_VAL; 1355 1356 *stats_comp = 0; 1357 } 1358 } 1359 1360 static void bnx2x_stats_stop(struct bnx2x *bp) 1361 { 1362 int update = 0; 1363 1364 if (down_timeout(&bp->stats_sema, HZ/10)) 1365 BNX2X_ERR("Unable to acquire stats lock\n"); 1366 1367 bp->stats_started = false; 1368 1369 bnx2x_stats_comp(bp); 1370 1371 if (bp->port.pmf) 1372 update = (bnx2x_hw_stats_update(bp) == 0); 1373 1374 update |= (bnx2x_storm_stats_update(bp) == 0); 1375 1376 if (update) { 1377 bnx2x_net_stats_update(bp); 1378 1379 if (bp->port.pmf) 1380 bnx2x_port_stats_stop(bp); 1381 1382 bnx2x_hw_stats_post(bp); 1383 bnx2x_stats_comp(bp); 1384 } 1385 1386 up(&bp->stats_sema); 1387 } 1388 1389 static void bnx2x_stats_do_nothing(struct bnx2x *bp) 1390 { 1391 } 1392 1393 static const struct { 1394 void (*action)(struct bnx2x *bp); 1395 enum bnx2x_stats_state next_state; 1396 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = { 1397 /* state event */ 1398 { 1399 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED}, 1400 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED}, 1401 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}, 1402 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED} 1403 }, 1404 { 1405 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED}, 1406 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED}, 1407 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED}, 1408 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED} 1409 } 1410 }; 1411 1412 void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) 1413 { 1414 enum bnx2x_stats_state state; 1415 void (*action)(struct bnx2x *bp); 1416 if (unlikely(bp->panic)) 1417 return; 1418 1419 spin_lock_bh(&bp->stats_lock); 1420 state = bp->stats_state; 1421 bp->stats_state = bnx2x_stats_stm[state][event].next_state; 1422 action = bnx2x_stats_stm[state][event].action; 1423 spin_unlock_bh(&bp->stats_lock); 1424 1425 action(bp); 1426 1427 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) 1428 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", 1429 state, event, bp->stats_state); 1430 } 1431 1432 static void bnx2x_port_stats_base_init(struct bnx2x *bp) 1433 { 1434 struct dmae_command *dmae; 1435 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 1436 1437 /* sanity */ 1438 if (!bp->port.pmf || !bp->port.port_stx) { 1439 BNX2X_ERR("BUG!\n"); 1440 return; 1441 } 1442 1443 bp->executer_idx = 0; 1444 1445 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 1446 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, 1447 true, DMAE_COMP_PCI); 1448 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); 1449 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); 1450 dmae->dst_addr_lo = bp->port.port_stx >> 2; 1451 dmae->dst_addr_hi = 0; 1452 dmae->len = bnx2x_get_port_stats_dma_len(bp); 1453 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); 1454 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); 1455 dmae->comp_val = DMAE_COMP_VAL; 1456 1457 *stats_comp = 0; 1458 bnx2x_hw_stats_post(bp); 1459 bnx2x_stats_comp(bp); 1460 } 1461 1462 /* This function will prepare the statistics ramrod data the way 1463 * we will only have to increment the statistics counter and 1464 * send the ramrod each time we have to. 1465 */ 1466 static void bnx2x_prep_fw_stats_req(struct bnx2x *bp) 1467 { 1468 int i; 1469 int first_queue_query_index; 1470 struct stats_query_header *stats_hdr = &bp->fw_stats_req->hdr; 1471 1472 dma_addr_t cur_data_offset; 1473 struct stats_query_entry *cur_query_entry; 1474 1475 stats_hdr->cmd_num = bp->fw_stats_num; 1476 stats_hdr->drv_stats_counter = 0; 1477 1478 /* storm_counters struct contains the counters of completed 1479 * statistics requests per storm which are incremented by FW 1480 * each time it completes hadning a statistics ramrod. We will 1481 * check these counters in the timer handler and discard a 1482 * (statistics) ramrod completion. 1483 */ 1484 cur_data_offset = bp->fw_stats_data_mapping + 1485 offsetof(struct bnx2x_fw_stats_data, storm_counters); 1486 1487 stats_hdr->stats_counters_addrs.hi = 1488 cpu_to_le32(U64_HI(cur_data_offset)); 1489 stats_hdr->stats_counters_addrs.lo = 1490 cpu_to_le32(U64_LO(cur_data_offset)); 1491 1492 /* prepare to the first stats ramrod (will be completed with 1493 * the counters equal to zero) - init counters to somethig different. 1494 */ 1495 memset(&bp->fw_stats_data->storm_counters, 0xff, 1496 sizeof(struct stats_counter)); 1497 1498 /**** Port FW statistics data ****/ 1499 cur_data_offset = bp->fw_stats_data_mapping + 1500 offsetof(struct bnx2x_fw_stats_data, port); 1501 1502 cur_query_entry = &bp->fw_stats_req->query[BNX2X_PORT_QUERY_IDX]; 1503 1504 cur_query_entry->kind = STATS_TYPE_PORT; 1505 /* For port query index is a DONT CARE */ 1506 cur_query_entry->index = BP_PORT(bp); 1507 /* For port query funcID is a DONT CARE */ 1508 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); 1509 cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset)); 1510 cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset)); 1511 1512 /**** PF FW statistics data ****/ 1513 cur_data_offset = bp->fw_stats_data_mapping + 1514 offsetof(struct bnx2x_fw_stats_data, pf); 1515 1516 cur_query_entry = &bp->fw_stats_req->query[BNX2X_PF_QUERY_IDX]; 1517 1518 cur_query_entry->kind = STATS_TYPE_PF; 1519 /* For PF query index is a DONT CARE */ 1520 cur_query_entry->index = BP_PORT(bp); 1521 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); 1522 cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset)); 1523 cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset)); 1524 1525 /**** FCoE FW statistics data ****/ 1526 if (!NO_FCOE(bp)) { 1527 cur_data_offset = bp->fw_stats_data_mapping + 1528 offsetof(struct bnx2x_fw_stats_data, fcoe); 1529 1530 cur_query_entry = 1531 &bp->fw_stats_req->query[BNX2X_FCOE_QUERY_IDX]; 1532 1533 cur_query_entry->kind = STATS_TYPE_FCOE; 1534 /* For FCoE query index is a DONT CARE */ 1535 cur_query_entry->index = BP_PORT(bp); 1536 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); 1537 cur_query_entry->address.hi = 1538 cpu_to_le32(U64_HI(cur_data_offset)); 1539 cur_query_entry->address.lo = 1540 cpu_to_le32(U64_LO(cur_data_offset)); 1541 } 1542 1543 /**** Clients' queries ****/ 1544 cur_data_offset = bp->fw_stats_data_mapping + 1545 offsetof(struct bnx2x_fw_stats_data, queue_stats); 1546 1547 /* first queue query index depends whether FCoE offloaded request will 1548 * be included in the ramrod 1549 */ 1550 if (!NO_FCOE(bp)) 1551 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX; 1552 else 1553 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 1; 1554 1555 for_each_eth_queue(bp, i) { 1556 cur_query_entry = 1557 &bp->fw_stats_req-> 1558 query[first_queue_query_index + i]; 1559 1560 cur_query_entry->kind = STATS_TYPE_QUEUE; 1561 cur_query_entry->index = bnx2x_stats_id(&bp->fp[i]); 1562 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); 1563 cur_query_entry->address.hi = 1564 cpu_to_le32(U64_HI(cur_data_offset)); 1565 cur_query_entry->address.lo = 1566 cpu_to_le32(U64_LO(cur_data_offset)); 1567 1568 cur_data_offset += sizeof(struct per_queue_stats); 1569 } 1570 1571 /* add FCoE queue query if needed */ 1572 if (!NO_FCOE(bp)) { 1573 cur_query_entry = 1574 &bp->fw_stats_req-> 1575 query[first_queue_query_index + i]; 1576 1577 cur_query_entry->kind = STATS_TYPE_QUEUE; 1578 cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX(bp)]); 1579 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); 1580 cur_query_entry->address.hi = 1581 cpu_to_le32(U64_HI(cur_data_offset)); 1582 cur_query_entry->address.lo = 1583 cpu_to_le32(U64_LO(cur_data_offset)); 1584 } 1585 } 1586 1587 void bnx2x_memset_stats(struct bnx2x *bp) 1588 { 1589 int i; 1590 1591 /* function stats */ 1592 for_each_queue(bp, i) { 1593 struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[i]; 1594 1595 memset(&fp_stats->old_tclient, 0, 1596 sizeof(fp_stats->old_tclient)); 1597 memset(&fp_stats->old_uclient, 0, 1598 sizeof(fp_stats->old_uclient)); 1599 memset(&fp_stats->old_xclient, 0, 1600 sizeof(fp_stats->old_xclient)); 1601 if (bp->stats_init) { 1602 memset(&fp_stats->eth_q_stats, 0, 1603 sizeof(fp_stats->eth_q_stats)); 1604 memset(&fp_stats->eth_q_stats_old, 0, 1605 sizeof(fp_stats->eth_q_stats_old)); 1606 } 1607 } 1608 1609 memset(&bp->dev->stats, 0, sizeof(bp->dev->stats)); 1610 1611 if (bp->stats_init) { 1612 memset(&bp->net_stats_old, 0, sizeof(bp->net_stats_old)); 1613 memset(&bp->fw_stats_old, 0, sizeof(bp->fw_stats_old)); 1614 memset(&bp->eth_stats_old, 0, sizeof(bp->eth_stats_old)); 1615 memset(&bp->eth_stats, 0, sizeof(bp->eth_stats)); 1616 memset(&bp->func_stats, 0, sizeof(bp->func_stats)); 1617 } 1618 1619 bp->stats_state = STATS_STATE_DISABLED; 1620 1621 if (bp->port.pmf && bp->port.port_stx) 1622 bnx2x_port_stats_base_init(bp); 1623 1624 /* mark the end of statistics initializiation */ 1625 bp->stats_init = false; 1626 } 1627 1628 void bnx2x_stats_init(struct bnx2x *bp) 1629 { 1630 int /*abs*/port = BP_PORT(bp); 1631 int mb_idx = BP_FW_MB_IDX(bp); 1632 1633 bp->stats_pending = 0; 1634 bp->executer_idx = 0; 1635 bp->stats_counter = 0; 1636 1637 /* port and func stats for management */ 1638 if (!BP_NOMCP(bp)) { 1639 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx); 1640 bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param); 1641 1642 } else { 1643 bp->port.port_stx = 0; 1644 bp->func_stx = 0; 1645 } 1646 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n", 1647 bp->port.port_stx, bp->func_stx); 1648 1649 /* pmf should retrieve port statistics from SP on a non-init*/ 1650 if (!bp->stats_init && bp->port.pmf && bp->port.port_stx) 1651 bnx2x_stats_handle(bp, STATS_EVENT_PMF); 1652 1653 port = BP_PORT(bp); 1654 /* port stats */ 1655 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats)); 1656 bp->port.old_nig_stats.brb_discard = 1657 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38); 1658 bp->port.old_nig_stats.brb_truncate = 1659 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38); 1660 if (!CHIP_IS_E3(bp)) { 1661 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50, 1662 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2); 1663 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50, 1664 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2); 1665 } 1666 1667 /* Prepare statistics ramrod data */ 1668 bnx2x_prep_fw_stats_req(bp); 1669 1670 /* Clean SP from previous statistics */ 1671 if (bp->stats_init) { 1672 if (bp->func_stx) { 1673 memset(bnx2x_sp(bp, func_stats), 0, 1674 sizeof(struct host_func_stats)); 1675 bnx2x_func_stats_init(bp); 1676 bnx2x_hw_stats_post(bp); 1677 bnx2x_stats_comp(bp); 1678 } 1679 } 1680 1681 bnx2x_memset_stats(bp); 1682 } 1683 1684 void bnx2x_save_statistics(struct bnx2x *bp) 1685 { 1686 int i; 1687 struct net_device_stats *nstats = &bp->dev->stats; 1688 1689 /* save queue statistics */ 1690 for_each_eth_queue(bp, i) { 1691 struct bnx2x_fastpath *fp = &bp->fp[i]; 1692 struct bnx2x_eth_q_stats *qstats = 1693 &bnx2x_fp_stats(bp, fp)->eth_q_stats; 1694 struct bnx2x_eth_q_stats_old *qstats_old = 1695 &bnx2x_fp_stats(bp, fp)->eth_q_stats_old; 1696 1697 UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi); 1698 UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo); 1699 UPDATE_QSTAT_OLD(total_broadcast_bytes_received_hi); 1700 UPDATE_QSTAT_OLD(total_broadcast_bytes_received_lo); 1701 UPDATE_QSTAT_OLD(total_multicast_bytes_received_hi); 1702 UPDATE_QSTAT_OLD(total_multicast_bytes_received_lo); 1703 UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_hi); 1704 UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_lo); 1705 UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_hi); 1706 UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_lo); 1707 UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_hi); 1708 UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_lo); 1709 UPDATE_QSTAT_OLD(total_tpa_bytes_hi); 1710 UPDATE_QSTAT_OLD(total_tpa_bytes_lo); 1711 } 1712 1713 /* save net_device_stats statistics */ 1714 bp->net_stats_old.rx_dropped = nstats->rx_dropped; 1715 1716 /* store port firmware statistics */ 1717 if (bp->port.pmf && IS_MF(bp)) { 1718 struct bnx2x_eth_stats *estats = &bp->eth_stats; 1719 struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old; 1720 UPDATE_FW_STAT_OLD(mac_filter_discard); 1721 UPDATE_FW_STAT_OLD(mf_tag_discard); 1722 UPDATE_FW_STAT_OLD(brb_truncate_discard); 1723 UPDATE_FW_STAT_OLD(mac_discard); 1724 } 1725 } 1726 1727 void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats, 1728 u32 stats_type) 1729 { 1730 int i; 1731 struct afex_stats *afex_stats = (struct afex_stats *)void_afex_stats; 1732 struct bnx2x_eth_stats *estats = &bp->eth_stats; 1733 struct per_queue_stats *fcoe_q_stats = 1734 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)]; 1735 1736 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats = 1737 &fcoe_q_stats->tstorm_queue_statistics; 1738 1739 struct ustorm_per_queue_stats *fcoe_q_ustorm_stats = 1740 &fcoe_q_stats->ustorm_queue_statistics; 1741 1742 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats = 1743 &fcoe_q_stats->xstorm_queue_statistics; 1744 1745 struct fcoe_statistics_params *fw_fcoe_stat = 1746 &bp->fw_stats_data->fcoe; 1747 1748 memset(afex_stats, 0, sizeof(struct afex_stats)); 1749 1750 for_each_eth_queue(bp, i) { 1751 struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats; 1752 1753 ADD_64(afex_stats->rx_unicast_bytes_hi, 1754 qstats->total_unicast_bytes_received_hi, 1755 afex_stats->rx_unicast_bytes_lo, 1756 qstats->total_unicast_bytes_received_lo); 1757 1758 ADD_64(afex_stats->rx_broadcast_bytes_hi, 1759 qstats->total_broadcast_bytes_received_hi, 1760 afex_stats->rx_broadcast_bytes_lo, 1761 qstats->total_broadcast_bytes_received_lo); 1762 1763 ADD_64(afex_stats->rx_multicast_bytes_hi, 1764 qstats->total_multicast_bytes_received_hi, 1765 afex_stats->rx_multicast_bytes_lo, 1766 qstats->total_multicast_bytes_received_lo); 1767 1768 ADD_64(afex_stats->rx_unicast_frames_hi, 1769 qstats->total_unicast_packets_received_hi, 1770 afex_stats->rx_unicast_frames_lo, 1771 qstats->total_unicast_packets_received_lo); 1772 1773 ADD_64(afex_stats->rx_broadcast_frames_hi, 1774 qstats->total_broadcast_packets_received_hi, 1775 afex_stats->rx_broadcast_frames_lo, 1776 qstats->total_broadcast_packets_received_lo); 1777 1778 ADD_64(afex_stats->rx_multicast_frames_hi, 1779 qstats->total_multicast_packets_received_hi, 1780 afex_stats->rx_multicast_frames_lo, 1781 qstats->total_multicast_packets_received_lo); 1782 1783 /* sum to rx_frames_discarded all discraded 1784 * packets due to size, ttl0 and checksum 1785 */ 1786 ADD_64(afex_stats->rx_frames_discarded_hi, 1787 qstats->total_packets_received_checksum_discarded_hi, 1788 afex_stats->rx_frames_discarded_lo, 1789 qstats->total_packets_received_checksum_discarded_lo); 1790 1791 ADD_64(afex_stats->rx_frames_discarded_hi, 1792 qstats->total_packets_received_ttl0_discarded_hi, 1793 afex_stats->rx_frames_discarded_lo, 1794 qstats->total_packets_received_ttl0_discarded_lo); 1795 1796 ADD_64(afex_stats->rx_frames_discarded_hi, 1797 qstats->etherstatsoverrsizepkts_hi, 1798 afex_stats->rx_frames_discarded_lo, 1799 qstats->etherstatsoverrsizepkts_lo); 1800 1801 ADD_64(afex_stats->rx_frames_dropped_hi, 1802 qstats->no_buff_discard_hi, 1803 afex_stats->rx_frames_dropped_lo, 1804 qstats->no_buff_discard_lo); 1805 1806 ADD_64(afex_stats->tx_unicast_bytes_hi, 1807 qstats->total_unicast_bytes_transmitted_hi, 1808 afex_stats->tx_unicast_bytes_lo, 1809 qstats->total_unicast_bytes_transmitted_lo); 1810 1811 ADD_64(afex_stats->tx_broadcast_bytes_hi, 1812 qstats->total_broadcast_bytes_transmitted_hi, 1813 afex_stats->tx_broadcast_bytes_lo, 1814 qstats->total_broadcast_bytes_transmitted_lo); 1815 1816 ADD_64(afex_stats->tx_multicast_bytes_hi, 1817 qstats->total_multicast_bytes_transmitted_hi, 1818 afex_stats->tx_multicast_bytes_lo, 1819 qstats->total_multicast_bytes_transmitted_lo); 1820 1821 ADD_64(afex_stats->tx_unicast_frames_hi, 1822 qstats->total_unicast_packets_transmitted_hi, 1823 afex_stats->tx_unicast_frames_lo, 1824 qstats->total_unicast_packets_transmitted_lo); 1825 1826 ADD_64(afex_stats->tx_broadcast_frames_hi, 1827 qstats->total_broadcast_packets_transmitted_hi, 1828 afex_stats->tx_broadcast_frames_lo, 1829 qstats->total_broadcast_packets_transmitted_lo); 1830 1831 ADD_64(afex_stats->tx_multicast_frames_hi, 1832 qstats->total_multicast_packets_transmitted_hi, 1833 afex_stats->tx_multicast_frames_lo, 1834 qstats->total_multicast_packets_transmitted_lo); 1835 1836 ADD_64(afex_stats->tx_frames_dropped_hi, 1837 qstats->total_transmitted_dropped_packets_error_hi, 1838 afex_stats->tx_frames_dropped_lo, 1839 qstats->total_transmitted_dropped_packets_error_lo); 1840 } 1841 1842 /* now add FCoE statistics which are collected separately 1843 * (both offloaded and non offloaded) 1844 */ 1845 if (!NO_FCOE(bp)) { 1846 ADD_64_LE(afex_stats->rx_unicast_bytes_hi, 1847 LE32_0, 1848 afex_stats->rx_unicast_bytes_lo, 1849 fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt); 1850 1851 ADD_64_LE(afex_stats->rx_unicast_bytes_hi, 1852 fcoe_q_tstorm_stats->rcv_ucast_bytes.hi, 1853 afex_stats->rx_unicast_bytes_lo, 1854 fcoe_q_tstorm_stats->rcv_ucast_bytes.lo); 1855 1856 ADD_64_LE(afex_stats->rx_broadcast_bytes_hi, 1857 fcoe_q_tstorm_stats->rcv_bcast_bytes.hi, 1858 afex_stats->rx_broadcast_bytes_lo, 1859 fcoe_q_tstorm_stats->rcv_bcast_bytes.lo); 1860 1861 ADD_64_LE(afex_stats->rx_multicast_bytes_hi, 1862 fcoe_q_tstorm_stats->rcv_mcast_bytes.hi, 1863 afex_stats->rx_multicast_bytes_lo, 1864 fcoe_q_tstorm_stats->rcv_mcast_bytes.lo); 1865 1866 ADD_64_LE(afex_stats->rx_unicast_frames_hi, 1867 LE32_0, 1868 afex_stats->rx_unicast_frames_lo, 1869 fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt); 1870 1871 ADD_64_LE(afex_stats->rx_unicast_frames_hi, 1872 LE32_0, 1873 afex_stats->rx_unicast_frames_lo, 1874 fcoe_q_tstorm_stats->rcv_ucast_pkts); 1875 1876 ADD_64_LE(afex_stats->rx_broadcast_frames_hi, 1877 LE32_0, 1878 afex_stats->rx_broadcast_frames_lo, 1879 fcoe_q_tstorm_stats->rcv_bcast_pkts); 1880 1881 ADD_64_LE(afex_stats->rx_multicast_frames_hi, 1882 LE32_0, 1883 afex_stats->rx_multicast_frames_lo, 1884 fcoe_q_tstorm_stats->rcv_ucast_pkts); 1885 1886 ADD_64_LE(afex_stats->rx_frames_discarded_hi, 1887 LE32_0, 1888 afex_stats->rx_frames_discarded_lo, 1889 fcoe_q_tstorm_stats->checksum_discard); 1890 1891 ADD_64_LE(afex_stats->rx_frames_discarded_hi, 1892 LE32_0, 1893 afex_stats->rx_frames_discarded_lo, 1894 fcoe_q_tstorm_stats->pkts_too_big_discard); 1895 1896 ADD_64_LE(afex_stats->rx_frames_discarded_hi, 1897 LE32_0, 1898 afex_stats->rx_frames_discarded_lo, 1899 fcoe_q_tstorm_stats->ttl0_discard); 1900 1901 ADD_64_LE16(afex_stats->rx_frames_dropped_hi, 1902 LE16_0, 1903 afex_stats->rx_frames_dropped_lo, 1904 fcoe_q_tstorm_stats->no_buff_discard); 1905 1906 ADD_64_LE(afex_stats->rx_frames_dropped_hi, 1907 LE32_0, 1908 afex_stats->rx_frames_dropped_lo, 1909 fcoe_q_ustorm_stats->ucast_no_buff_pkts); 1910 1911 ADD_64_LE(afex_stats->rx_frames_dropped_hi, 1912 LE32_0, 1913 afex_stats->rx_frames_dropped_lo, 1914 fcoe_q_ustorm_stats->mcast_no_buff_pkts); 1915 1916 ADD_64_LE(afex_stats->rx_frames_dropped_hi, 1917 LE32_0, 1918 afex_stats->rx_frames_dropped_lo, 1919 fcoe_q_ustorm_stats->bcast_no_buff_pkts); 1920 1921 ADD_64_LE(afex_stats->rx_frames_dropped_hi, 1922 LE32_0, 1923 afex_stats->rx_frames_dropped_lo, 1924 fw_fcoe_stat->rx_stat1.fcoe_rx_drop_pkt_cnt); 1925 1926 ADD_64_LE(afex_stats->rx_frames_dropped_hi, 1927 LE32_0, 1928 afex_stats->rx_frames_dropped_lo, 1929 fw_fcoe_stat->rx_stat2.fcoe_rx_drop_pkt_cnt); 1930 1931 ADD_64_LE(afex_stats->tx_unicast_bytes_hi, 1932 LE32_0, 1933 afex_stats->tx_unicast_bytes_lo, 1934 fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt); 1935 1936 ADD_64_LE(afex_stats->tx_unicast_bytes_hi, 1937 fcoe_q_xstorm_stats->ucast_bytes_sent.hi, 1938 afex_stats->tx_unicast_bytes_lo, 1939 fcoe_q_xstorm_stats->ucast_bytes_sent.lo); 1940 1941 ADD_64_LE(afex_stats->tx_broadcast_bytes_hi, 1942 fcoe_q_xstorm_stats->bcast_bytes_sent.hi, 1943 afex_stats->tx_broadcast_bytes_lo, 1944 fcoe_q_xstorm_stats->bcast_bytes_sent.lo); 1945 1946 ADD_64_LE(afex_stats->tx_multicast_bytes_hi, 1947 fcoe_q_xstorm_stats->mcast_bytes_sent.hi, 1948 afex_stats->tx_multicast_bytes_lo, 1949 fcoe_q_xstorm_stats->mcast_bytes_sent.lo); 1950 1951 ADD_64_LE(afex_stats->tx_unicast_frames_hi, 1952 LE32_0, 1953 afex_stats->tx_unicast_frames_lo, 1954 fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt); 1955 1956 ADD_64_LE(afex_stats->tx_unicast_frames_hi, 1957 LE32_0, 1958 afex_stats->tx_unicast_frames_lo, 1959 fcoe_q_xstorm_stats->ucast_pkts_sent); 1960 1961 ADD_64_LE(afex_stats->tx_broadcast_frames_hi, 1962 LE32_0, 1963 afex_stats->tx_broadcast_frames_lo, 1964 fcoe_q_xstorm_stats->bcast_pkts_sent); 1965 1966 ADD_64_LE(afex_stats->tx_multicast_frames_hi, 1967 LE32_0, 1968 afex_stats->tx_multicast_frames_lo, 1969 fcoe_q_xstorm_stats->mcast_pkts_sent); 1970 1971 ADD_64_LE(afex_stats->tx_frames_dropped_hi, 1972 LE32_0, 1973 afex_stats->tx_frames_dropped_lo, 1974 fcoe_q_xstorm_stats->error_drop_pkts); 1975 } 1976 1977 /* if port stats are requested, add them to the PMF 1978 * stats, as anyway they will be accumulated by the 1979 * MCP before sent to the switch 1980 */ 1981 if ((bp->port.pmf) && (stats_type == VICSTATST_UIF_INDEX)) { 1982 ADD_64(afex_stats->rx_frames_dropped_hi, 1983 0, 1984 afex_stats->rx_frames_dropped_lo, 1985 estats->mac_filter_discard); 1986 ADD_64(afex_stats->rx_frames_dropped_hi, 1987 0, 1988 afex_stats->rx_frames_dropped_lo, 1989 estats->brb_truncate_discard); 1990 ADD_64(afex_stats->rx_frames_discarded_hi, 1991 0, 1992 afex_stats->rx_frames_discarded_lo, 1993 estats->mac_discard); 1994 } 1995 } 1996 1997 void bnx2x_stats_safe_exec(struct bnx2x *bp, 1998 void (func_to_exec)(void *cookie), 1999 void *cookie){ 2000 if (down_timeout(&bp->stats_sema, HZ/10)) 2001 BNX2X_ERR("Unable to acquire stats lock\n"); 2002 bnx2x_stats_comp(bp); 2003 func_to_exec(cookie); 2004 __bnx2x_stats_start(bp); 2005 up(&bp->stats_sema); 2006 } 2007