1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) Meta Platforms, Inc. and affiliates. */ 3 4 #include <linux/rtnetlink.h> 5 6 #include "fbnic.h" 7 8 static void fbnic_hw_stat_rst32(struct fbnic_dev *fbd, u32 reg, 9 struct fbnic_stat_counter *stat) 10 { 11 /* We do not touch the "value" field here. 12 * It gets zeroed out on fbd structure allocation. 13 * After that we want it to grow continuously 14 * through device resets and power state changes. 15 */ 16 stat->u.old_reg_value_32 = rd32(fbd, reg); 17 } 18 19 static void fbnic_hw_stat_rd32(struct fbnic_dev *fbd, u32 reg, 20 struct fbnic_stat_counter *stat) 21 { 22 u32 new_reg_value; 23 24 new_reg_value = rd32(fbd, reg); 25 stat->value += new_reg_value - stat->u.old_reg_value_32; 26 stat->u.old_reg_value_32 = new_reg_value; 27 } 28 29 u64 fbnic_stat_rd64(struct fbnic_dev *fbd, u32 reg, u32 offset) 30 { 31 u32 prev_upper, upper, lower, diff; 32 33 prev_upper = rd32(fbd, reg + offset); 34 lower = rd32(fbd, reg); 35 upper = rd32(fbd, reg + offset); 36 37 diff = upper - prev_upper; 38 if (!diff) 39 return ((u64)upper << 32) | lower; 40 41 if (diff > 1) 42 dev_warn_once(fbd->dev, 43 "Stats inconsistent, upper 32b of %#010x updating too quickly\n", 44 reg * 4); 45 46 /* Return only the upper bits as we cannot guarantee 47 * the accuracy of the lower bits. We will add them in 48 * when the counter slows down enough that we can get 49 * a snapshot with both upper values being the same 50 * between reads. 51 */ 52 return ((u64)upper << 32); 53 } 54 55 static void fbnic_hw_stat_rst64(struct fbnic_dev *fbd, u32 reg, s32 offset, 56 struct fbnic_stat_counter *stat) 57 { 58 /* Record initial counter values and compute deltas from there to ensure 59 * stats start at 0 after reboot/reset. This avoids exposing absolute 60 * hardware counter values to userspace. 61 */ 62 stat->u.old_reg_value_64 = fbnic_stat_rd64(fbd, reg, offset); 63 } 64 65 static void fbnic_hw_stat_rd64(struct fbnic_dev *fbd, u32 reg, s32 offset, 66 struct fbnic_stat_counter *stat) 67 { 68 u64 new_reg_value; 69 70 new_reg_value = fbnic_stat_rd64(fbd, reg, offset); 71 stat->value += new_reg_value - stat->u.old_reg_value_64; 72 stat->u.old_reg_value_64 = new_reg_value; 73 } 74 75 static void fbnic_reset_tmi_stats(struct fbnic_dev *fbd, 76 struct fbnic_tmi_stats *tmi) 77 { 78 fbnic_hw_stat_rst32(fbd, FBNIC_TMI_DROP_PKTS, &tmi->drop.frames); 79 fbnic_hw_stat_rst64(fbd, FBNIC_TMI_DROP_BYTE_L, 1, &tmi->drop.bytes); 80 81 fbnic_hw_stat_rst32(fbd, 82 FBNIC_TMI_ILLEGAL_PTP_REQS, 83 &tmi->ptp_illegal_req); 84 fbnic_hw_stat_rst32(fbd, FBNIC_TMI_GOOD_PTP_TS, &tmi->ptp_good_ts); 85 fbnic_hw_stat_rst32(fbd, FBNIC_TMI_BAD_PTP_TS, &tmi->ptp_bad_ts); 86 } 87 88 static void fbnic_get_tmi_stats32(struct fbnic_dev *fbd, 89 struct fbnic_tmi_stats *tmi) 90 { 91 fbnic_hw_stat_rd32(fbd, FBNIC_TMI_DROP_PKTS, &tmi->drop.frames); 92 93 fbnic_hw_stat_rd32(fbd, 94 FBNIC_TMI_ILLEGAL_PTP_REQS, 95 &tmi->ptp_illegal_req); 96 fbnic_hw_stat_rd32(fbd, FBNIC_TMI_GOOD_PTP_TS, &tmi->ptp_good_ts); 97 fbnic_hw_stat_rd32(fbd, FBNIC_TMI_BAD_PTP_TS, &tmi->ptp_bad_ts); 98 } 99 100 static void fbnic_get_tmi_stats(struct fbnic_dev *fbd, 101 struct fbnic_tmi_stats *tmi) 102 { 103 fbnic_hw_stat_rd64(fbd, FBNIC_TMI_DROP_BYTE_L, 1, &tmi->drop.bytes); 104 } 105 106 static void fbnic_reset_tti_stats(struct fbnic_dev *fbd, 107 struct fbnic_tti_stats *tti) 108 { 109 fbnic_hw_stat_rst32(fbd, 110 FBNIC_TCE_TTI_CM_DROP_PKTS, 111 &tti->cm_drop.frames); 112 fbnic_hw_stat_rst64(fbd, 113 FBNIC_TCE_TTI_CM_DROP_BYTE_L, 114 1, 115 &tti->cm_drop.bytes); 116 117 fbnic_hw_stat_rst32(fbd, 118 FBNIC_TCE_TTI_FRAME_DROP_PKTS, 119 &tti->frame_drop.frames); 120 fbnic_hw_stat_rst64(fbd, 121 FBNIC_TCE_TTI_FRAME_DROP_BYTE_L, 122 1, 123 &tti->frame_drop.bytes); 124 125 fbnic_hw_stat_rst32(fbd, 126 FBNIC_TCE_TBI_DROP_PKTS, 127 &tti->tbi_drop.frames); 128 fbnic_hw_stat_rst64(fbd, 129 FBNIC_TCE_TBI_DROP_BYTE_L, 130 1, 131 &tti->tbi_drop.bytes); 132 } 133 134 static void fbnic_get_tti_stats32(struct fbnic_dev *fbd, 135 struct fbnic_tti_stats *tti) 136 { 137 fbnic_hw_stat_rd32(fbd, 138 FBNIC_TCE_TTI_CM_DROP_PKTS, 139 &tti->cm_drop.frames); 140 141 fbnic_hw_stat_rd32(fbd, 142 FBNIC_TCE_TTI_FRAME_DROP_PKTS, 143 &tti->frame_drop.frames); 144 145 fbnic_hw_stat_rd32(fbd, 146 FBNIC_TCE_TBI_DROP_PKTS, 147 &tti->tbi_drop.frames); 148 } 149 150 static void fbnic_get_tti_stats(struct fbnic_dev *fbd, 151 struct fbnic_tti_stats *tti) 152 { 153 fbnic_hw_stat_rd64(fbd, 154 FBNIC_TCE_TTI_CM_DROP_BYTE_L, 155 1, 156 &tti->cm_drop.bytes); 157 158 fbnic_hw_stat_rd64(fbd, 159 FBNIC_TCE_TTI_FRAME_DROP_BYTE_L, 160 1, 161 &tti->frame_drop.bytes); 162 163 fbnic_hw_stat_rd64(fbd, 164 FBNIC_TCE_TBI_DROP_BYTE_L, 165 1, 166 &tti->tbi_drop.bytes); 167 } 168 169 static void fbnic_reset_rpc_stats(struct fbnic_dev *fbd, 170 struct fbnic_rpc_stats *rpc) 171 { 172 fbnic_hw_stat_rst32(fbd, 173 FBNIC_RPC_CNTR_UNKN_ETYPE, 174 &rpc->unkn_etype); 175 fbnic_hw_stat_rst32(fbd, 176 FBNIC_RPC_CNTR_UNKN_EXT_HDR, 177 &rpc->unkn_ext_hdr); 178 fbnic_hw_stat_rst32(fbd, FBNIC_RPC_CNTR_IPV4_FRAG, &rpc->ipv4_frag); 179 fbnic_hw_stat_rst32(fbd, FBNIC_RPC_CNTR_IPV6_FRAG, &rpc->ipv6_frag); 180 fbnic_hw_stat_rst32(fbd, FBNIC_RPC_CNTR_IPV4_ESP, &rpc->ipv4_esp); 181 fbnic_hw_stat_rst32(fbd, FBNIC_RPC_CNTR_IPV6_ESP, &rpc->ipv6_esp); 182 fbnic_hw_stat_rst32(fbd, FBNIC_RPC_CNTR_TCP_OPT_ERR, &rpc->tcp_opt_err); 183 fbnic_hw_stat_rst32(fbd, 184 FBNIC_RPC_CNTR_OUT_OF_HDR_ERR, 185 &rpc->out_of_hdr_err); 186 fbnic_hw_stat_rst32(fbd, 187 FBNIC_RPC_CNTR_OVR_SIZE_ERR, 188 &rpc->ovr_size_err); 189 } 190 191 static void fbnic_get_rpc_stats32(struct fbnic_dev *fbd, 192 struct fbnic_rpc_stats *rpc) 193 { 194 fbnic_hw_stat_rd32(fbd, 195 FBNIC_RPC_CNTR_UNKN_ETYPE, 196 &rpc->unkn_etype); 197 fbnic_hw_stat_rd32(fbd, 198 FBNIC_RPC_CNTR_UNKN_EXT_HDR, 199 &rpc->unkn_ext_hdr); 200 201 fbnic_hw_stat_rd32(fbd, FBNIC_RPC_CNTR_IPV4_FRAG, &rpc->ipv4_frag); 202 fbnic_hw_stat_rd32(fbd, FBNIC_RPC_CNTR_IPV6_FRAG, &rpc->ipv6_frag); 203 204 fbnic_hw_stat_rd32(fbd, FBNIC_RPC_CNTR_IPV4_ESP, &rpc->ipv4_esp); 205 fbnic_hw_stat_rd32(fbd, FBNIC_RPC_CNTR_IPV6_ESP, &rpc->ipv6_esp); 206 207 fbnic_hw_stat_rd32(fbd, FBNIC_RPC_CNTR_TCP_OPT_ERR, &rpc->tcp_opt_err); 208 fbnic_hw_stat_rd32(fbd, 209 FBNIC_RPC_CNTR_OUT_OF_HDR_ERR, 210 &rpc->out_of_hdr_err); 211 fbnic_hw_stat_rd32(fbd, 212 FBNIC_RPC_CNTR_OVR_SIZE_ERR, 213 &rpc->ovr_size_err); 214 } 215 216 static void fbnic_reset_rxb_fifo_stats(struct fbnic_dev *fbd, int i, 217 struct fbnic_rxb_fifo_stats *fifo) 218 { 219 fbnic_hw_stat_rst32(fbd, FBNIC_RXB_DROP_FRMS_STS(i), 220 &fifo->drop.frames); 221 fbnic_hw_stat_rst64(fbd, FBNIC_RXB_DROP_BYTES_STS_L(i), 1, 222 &fifo->drop.bytes); 223 224 fbnic_hw_stat_rst32(fbd, FBNIC_RXB_TRUN_FRMS_STS(i), 225 &fifo->trunc.frames); 226 fbnic_hw_stat_rst64(fbd, FBNIC_RXB_TRUN_BYTES_STS_L(i), 1, 227 &fifo->trunc.bytes); 228 229 fbnic_hw_stat_rst32(fbd, FBNIC_RXB_TRANS_DROP_STS(i), 230 &fifo->trans_drop); 231 fbnic_hw_stat_rst32(fbd, FBNIC_RXB_TRANS_ECN_STS(i), 232 &fifo->trans_ecn); 233 234 fifo->level.u.old_reg_value_32 = 0; 235 } 236 237 static void fbnic_reset_rxb_enq_stats(struct fbnic_dev *fbd, int i, 238 struct fbnic_rxb_enqueue_stats *enq) 239 { 240 fbnic_hw_stat_rst32(fbd, FBNIC_RXB_DRBO_FRM_CNT_SRC(i), 241 &enq->drbo.frames); 242 fbnic_hw_stat_rst64(fbd, FBNIC_RXB_DRBO_BYTE_CNT_SRC_L(i), 4, 243 &enq->drbo.bytes); 244 245 fbnic_hw_stat_rst32(fbd, FBNIC_RXB_INTEGRITY_ERR(i), 246 &enq->integrity_err); 247 fbnic_hw_stat_rst32(fbd, FBNIC_RXB_MAC_ERR(i), 248 &enq->mac_err); 249 fbnic_hw_stat_rst32(fbd, FBNIC_RXB_PARSER_ERR(i), 250 &enq->parser_err); 251 fbnic_hw_stat_rst32(fbd, FBNIC_RXB_FRM_ERR(i), 252 &enq->frm_err); 253 } 254 255 static void fbnic_reset_rxb_deq_stats(struct fbnic_dev *fbd, int i, 256 struct fbnic_rxb_dequeue_stats *deq) 257 { 258 fbnic_hw_stat_rst32(fbd, FBNIC_RXB_INTF_FRM_CNT_DST(i), 259 &deq->intf.frames); 260 fbnic_hw_stat_rst64(fbd, FBNIC_RXB_INTF_BYTE_CNT_DST_L(i), 4, 261 &deq->intf.bytes); 262 263 fbnic_hw_stat_rst32(fbd, FBNIC_RXB_PBUF_FRM_CNT_DST(i), 264 &deq->pbuf.frames); 265 fbnic_hw_stat_rst64(fbd, FBNIC_RXB_PBUF_BYTE_CNT_DST_L(i), 4, 266 &deq->pbuf.bytes); 267 } 268 269 static void fbnic_reset_rxb_stats(struct fbnic_dev *fbd, 270 struct fbnic_rxb_stats *rxb) 271 { 272 int i; 273 274 for (i = 0; i < FBNIC_RXB_FIFO_INDICES; i++) 275 fbnic_reset_rxb_fifo_stats(fbd, i, &rxb->fifo[i]); 276 277 for (i = 0; i < FBNIC_RXB_INTF_INDICES; i++) { 278 fbnic_reset_rxb_enq_stats(fbd, i, &rxb->enq[i]); 279 fbnic_reset_rxb_deq_stats(fbd, i, &rxb->deq[i]); 280 } 281 } 282 283 static void fbnic_get_rxb_fifo_stats32(struct fbnic_dev *fbd, int i, 284 struct fbnic_rxb_fifo_stats *fifo) 285 { 286 fbnic_hw_stat_rd32(fbd, FBNIC_RXB_DROP_FRMS_STS(i), 287 &fifo->drop.frames); 288 fbnic_hw_stat_rd32(fbd, FBNIC_RXB_TRUN_FRMS_STS(i), 289 &fifo->trunc.frames); 290 291 fbnic_hw_stat_rd32(fbd, FBNIC_RXB_TRANS_DROP_STS(i), 292 &fifo->trans_drop); 293 fbnic_hw_stat_rd32(fbd, FBNIC_RXB_TRANS_ECN_STS(i), 294 &fifo->trans_ecn); 295 296 fifo->level.value = rd32(fbd, FBNIC_RXB_PBUF_FIFO_LEVEL(i)); 297 } 298 299 static void fbnic_get_rxb_fifo_stats(struct fbnic_dev *fbd, int i, 300 struct fbnic_rxb_fifo_stats *fifo) 301 { 302 fbnic_hw_stat_rd64(fbd, FBNIC_RXB_DROP_BYTES_STS_L(i), 1, 303 &fifo->drop.bytes); 304 fbnic_hw_stat_rd64(fbd, FBNIC_RXB_TRUN_BYTES_STS_L(i), 1, 305 &fifo->trunc.bytes); 306 307 fbnic_get_rxb_fifo_stats32(fbd, i, fifo); 308 } 309 310 static void fbnic_get_rxb_enq_stats32(struct fbnic_dev *fbd, int i, 311 struct fbnic_rxb_enqueue_stats *enq) 312 { 313 fbnic_hw_stat_rd32(fbd, FBNIC_RXB_DRBO_FRM_CNT_SRC(i), 314 &enq->drbo.frames); 315 316 fbnic_hw_stat_rd32(fbd, FBNIC_RXB_INTEGRITY_ERR(i), 317 &enq->integrity_err); 318 fbnic_hw_stat_rd32(fbd, FBNIC_RXB_MAC_ERR(i), 319 &enq->mac_err); 320 fbnic_hw_stat_rd32(fbd, FBNIC_RXB_PARSER_ERR(i), 321 &enq->parser_err); 322 fbnic_hw_stat_rd32(fbd, FBNIC_RXB_FRM_ERR(i), 323 &enq->frm_err); 324 } 325 326 static void fbnic_get_rxb_enq_stats(struct fbnic_dev *fbd, int i, 327 struct fbnic_rxb_enqueue_stats *enq) 328 { 329 fbnic_hw_stat_rd64(fbd, FBNIC_RXB_DRBO_BYTE_CNT_SRC_L(i), 4, 330 &enq->drbo.bytes); 331 332 fbnic_get_rxb_enq_stats32(fbd, i, enq); 333 } 334 335 static void fbnic_get_rxb_deq_stats32(struct fbnic_dev *fbd, int i, 336 struct fbnic_rxb_dequeue_stats *deq) 337 { 338 fbnic_hw_stat_rd32(fbd, FBNIC_RXB_INTF_FRM_CNT_DST(i), 339 &deq->intf.frames); 340 fbnic_hw_stat_rd32(fbd, FBNIC_RXB_PBUF_FRM_CNT_DST(i), 341 &deq->pbuf.frames); 342 } 343 344 static void fbnic_get_rxb_deq_stats(struct fbnic_dev *fbd, int i, 345 struct fbnic_rxb_dequeue_stats *deq) 346 { 347 fbnic_hw_stat_rd64(fbd, FBNIC_RXB_INTF_BYTE_CNT_DST_L(i), 4, 348 &deq->intf.bytes); 349 fbnic_hw_stat_rd64(fbd, FBNIC_RXB_PBUF_BYTE_CNT_DST_L(i), 4, 350 &deq->pbuf.bytes); 351 352 fbnic_get_rxb_deq_stats32(fbd, i, deq); 353 } 354 355 static void fbnic_get_rxb_stats32(struct fbnic_dev *fbd, 356 struct fbnic_rxb_stats *rxb) 357 { 358 int i; 359 360 for (i = 0; i < FBNIC_RXB_FIFO_INDICES; i++) 361 fbnic_get_rxb_fifo_stats32(fbd, i, &rxb->fifo[i]); 362 363 for (i = 0; i < FBNIC_RXB_INTF_INDICES; i++) { 364 fbnic_get_rxb_enq_stats32(fbd, i, &rxb->enq[i]); 365 fbnic_get_rxb_deq_stats32(fbd, i, &rxb->deq[i]); 366 } 367 } 368 369 static void fbnic_get_rxb_stats(struct fbnic_dev *fbd, 370 struct fbnic_rxb_stats *rxb) 371 { 372 int i; 373 374 for (i = 0; i < FBNIC_RXB_FIFO_INDICES; i++) 375 fbnic_get_rxb_fifo_stats(fbd, i, &rxb->fifo[i]); 376 377 for (i = 0; i < FBNIC_RXB_INTF_INDICES; i++) { 378 fbnic_get_rxb_enq_stats(fbd, i, &rxb->enq[i]); 379 fbnic_get_rxb_deq_stats(fbd, i, &rxb->deq[i]); 380 } 381 } 382 383 static void fbnic_reset_hw_rxq_stats(struct fbnic_dev *fbd, 384 struct fbnic_hw_q_stats *hw_q) 385 { 386 int i; 387 388 for (i = 0; i < fbd->max_num_queues; i++, hw_q++) { 389 u32 base = FBNIC_QUEUE(i); 390 391 fbnic_hw_stat_rst32(fbd, 392 base + FBNIC_QUEUE_RDE_PKT_ERR_CNT, 393 &hw_q->rde_pkt_err); 394 fbnic_hw_stat_rst32(fbd, 395 base + FBNIC_QUEUE_RDE_CQ_DROP_CNT, 396 &hw_q->rde_pkt_cq_drop); 397 fbnic_hw_stat_rst32(fbd, 398 base + FBNIC_QUEUE_RDE_BDQ_DROP_CNT, 399 &hw_q->rde_pkt_bdq_drop); 400 } 401 } 402 403 static void fbnic_get_hw_rxq_stats32(struct fbnic_dev *fbd, 404 struct fbnic_hw_q_stats *hw_q) 405 { 406 int i; 407 408 for (i = 0; i < fbd->max_num_queues; i++, hw_q++) { 409 u32 base = FBNIC_QUEUE(i); 410 411 fbnic_hw_stat_rd32(fbd, 412 base + FBNIC_QUEUE_RDE_PKT_ERR_CNT, 413 &hw_q->rde_pkt_err); 414 fbnic_hw_stat_rd32(fbd, 415 base + FBNIC_QUEUE_RDE_CQ_DROP_CNT, 416 &hw_q->rde_pkt_cq_drop); 417 fbnic_hw_stat_rd32(fbd, 418 base + FBNIC_QUEUE_RDE_BDQ_DROP_CNT, 419 &hw_q->rde_pkt_bdq_drop); 420 } 421 } 422 423 void fbnic_get_hw_q_stats(struct fbnic_dev *fbd, 424 struct fbnic_hw_q_stats *hw_q) 425 { 426 spin_lock(&fbd->hw_stats.lock); 427 fbnic_get_hw_rxq_stats32(fbd, hw_q); 428 spin_unlock(&fbd->hw_stats.lock); 429 } 430 431 static void fbnic_reset_pcie_stats_asic(struct fbnic_dev *fbd, 432 struct fbnic_pcie_stats *pcie) 433 { 434 fbnic_hw_stat_rst64(fbd, 435 FBNIC_PUL_USER_OB_RD_TLP_CNT_31_0, 436 1, 437 &pcie->ob_rd_tlp); 438 fbnic_hw_stat_rst64(fbd, 439 FBNIC_PUL_USER_OB_RD_DWORD_CNT_31_0, 440 1, 441 &pcie->ob_rd_dword); 442 fbnic_hw_stat_rst64(fbd, 443 FBNIC_PUL_USER_OB_CPL_TLP_CNT_31_0, 444 1, 445 &pcie->ob_cpl_tlp); 446 fbnic_hw_stat_rst64(fbd, 447 FBNIC_PUL_USER_OB_CPL_DWORD_CNT_31_0, 448 1, 449 &pcie->ob_cpl_dword); 450 fbnic_hw_stat_rst64(fbd, 451 FBNIC_PUL_USER_OB_WR_TLP_CNT_31_0, 452 1, 453 &pcie->ob_wr_tlp); 454 fbnic_hw_stat_rst64(fbd, 455 FBNIC_PUL_USER_OB_WR_DWORD_CNT_31_0, 456 1, 457 &pcie->ob_wr_dword); 458 459 fbnic_hw_stat_rst64(fbd, 460 FBNIC_PUL_USER_OB_RD_DBG_CNT_TAG_31_0, 461 1, 462 &pcie->ob_rd_no_tag); 463 fbnic_hw_stat_rst64(fbd, 464 FBNIC_PUL_USER_OB_RD_DBG_CNT_CPL_CRED_31_0, 465 1, 466 &pcie->ob_rd_no_cpl_cred); 467 fbnic_hw_stat_rst64(fbd, 468 FBNIC_PUL_USER_OB_RD_DBG_CNT_NP_CRED_31_0, 469 1, 470 &pcie->ob_rd_no_np_cred); 471 } 472 473 static void fbnic_get_pcie_stats_asic64(struct fbnic_dev *fbd, 474 struct fbnic_pcie_stats *pcie) 475 { 476 fbnic_hw_stat_rd64(fbd, 477 FBNIC_PUL_USER_OB_RD_TLP_CNT_31_0, 478 1, 479 &pcie->ob_rd_tlp); 480 fbnic_hw_stat_rd64(fbd, 481 FBNIC_PUL_USER_OB_RD_DWORD_CNT_31_0, 482 1, 483 &pcie->ob_rd_dword); 484 fbnic_hw_stat_rd64(fbd, 485 FBNIC_PUL_USER_OB_WR_TLP_CNT_31_0, 486 1, 487 &pcie->ob_wr_tlp); 488 fbnic_hw_stat_rd64(fbd, 489 FBNIC_PUL_USER_OB_WR_DWORD_CNT_31_0, 490 1, 491 &pcie->ob_wr_dword); 492 fbnic_hw_stat_rd64(fbd, 493 FBNIC_PUL_USER_OB_CPL_TLP_CNT_31_0, 494 1, 495 &pcie->ob_cpl_tlp); 496 fbnic_hw_stat_rd64(fbd, 497 FBNIC_PUL_USER_OB_CPL_DWORD_CNT_31_0, 498 1, 499 &pcie->ob_cpl_dword); 500 501 fbnic_hw_stat_rd64(fbd, 502 FBNIC_PUL_USER_OB_RD_DBG_CNT_TAG_31_0, 503 1, 504 &pcie->ob_rd_no_tag); 505 fbnic_hw_stat_rd64(fbd, 506 FBNIC_PUL_USER_OB_RD_DBG_CNT_CPL_CRED_31_0, 507 1, 508 &pcie->ob_rd_no_cpl_cred); 509 fbnic_hw_stat_rd64(fbd, 510 FBNIC_PUL_USER_OB_RD_DBG_CNT_NP_CRED_31_0, 511 1, 512 &pcie->ob_rd_no_np_cred); 513 } 514 515 static void fbnic_reset_phy_stats(struct fbnic_dev *fbd, 516 struct fbnic_phy_stats *phy_stats) 517 { 518 const struct fbnic_mac *mac = fbd->mac; 519 520 mac->get_fec_stats(fbd, true, &phy_stats->fec); 521 mac->get_pcs_stats(fbd, true, &phy_stats->pcs); 522 } 523 524 static void fbnic_get_phy_stats32(struct fbnic_dev *fbd, 525 struct fbnic_phy_stats *phy_stats) 526 { 527 const struct fbnic_mac *mac = fbd->mac; 528 529 mac->get_fec_stats(fbd, false, &phy_stats->fec); 530 mac->get_pcs_stats(fbd, false, &phy_stats->pcs); 531 } 532 533 static void fbnic_reset_hw_mac_stats(struct fbnic_dev *fbd, 534 struct fbnic_mac_stats *mac_stats) 535 { 536 const struct fbnic_mac *mac = fbd->mac; 537 538 mac->get_eth_mac_stats(fbd, true, &mac_stats->eth_mac); 539 mac->get_pause_stats(fbd, true, &mac_stats->pause); 540 mac->get_eth_ctrl_stats(fbd, true, &mac_stats->eth_ctrl); 541 mac->get_rmon_stats(fbd, true, &mac_stats->rmon); 542 } 543 544 void fbnic_reset_hw_stats(struct fbnic_dev *fbd) 545 { 546 spin_lock(&fbd->hw_stats.lock); 547 fbnic_reset_phy_stats(fbd, &fbd->hw_stats.phy); 548 fbnic_reset_tmi_stats(fbd, &fbd->hw_stats.tmi); 549 fbnic_reset_tti_stats(fbd, &fbd->hw_stats.tti); 550 fbnic_reset_rpc_stats(fbd, &fbd->hw_stats.rpc); 551 fbnic_reset_rxb_stats(fbd, &fbd->hw_stats.rxb); 552 fbnic_reset_hw_rxq_stats(fbd, fbd->hw_stats.hw_q); 553 fbnic_reset_pcie_stats_asic(fbd, &fbd->hw_stats.pcie); 554 spin_unlock(&fbd->hw_stats.lock); 555 556 /* Once registered, the only other access to MAC stats is via the 557 * ethtool API which is protected by the rtnl_lock. The call to 558 * fbnic_reset_hw_stats() during PCI recovery is also protected 559 * by the rtnl_lock hence, we don't need the spinlock to access 560 * the MAC stats. 561 */ 562 if (fbd->netdev) 563 ASSERT_RTNL(); 564 fbnic_reset_hw_mac_stats(fbd, &fbd->hw_stats.mac); 565 } 566 567 void fbnic_init_hw_stats(struct fbnic_dev *fbd) 568 { 569 spin_lock_init(&fbd->hw_stats.lock); 570 571 fbnic_reset_hw_stats(fbd); 572 } 573 574 static void __fbnic_get_hw_stats32(struct fbnic_dev *fbd) 575 { 576 fbnic_get_phy_stats32(fbd, &fbd->hw_stats.phy); 577 fbnic_get_tmi_stats32(fbd, &fbd->hw_stats.tmi); 578 fbnic_get_tti_stats32(fbd, &fbd->hw_stats.tti); 579 fbnic_get_rpc_stats32(fbd, &fbd->hw_stats.rpc); 580 fbnic_get_rxb_stats32(fbd, &fbd->hw_stats.rxb); 581 fbnic_get_hw_rxq_stats32(fbd, fbd->hw_stats.hw_q); 582 } 583 584 void fbnic_get_hw_stats32(struct fbnic_dev *fbd) 585 { 586 spin_lock(&fbd->hw_stats.lock); 587 __fbnic_get_hw_stats32(fbd); 588 spin_unlock(&fbd->hw_stats.lock); 589 } 590 591 void fbnic_get_hw_stats(struct fbnic_dev *fbd) 592 { 593 spin_lock(&fbd->hw_stats.lock); 594 __fbnic_get_hw_stats32(fbd); 595 596 fbnic_get_tmi_stats(fbd, &fbd->hw_stats.tmi); 597 fbnic_get_tti_stats(fbd, &fbd->hw_stats.tti); 598 fbnic_get_rxb_stats(fbd, &fbd->hw_stats.rxb); 599 fbnic_get_pcie_stats_asic64(fbd, &fbd->hw_stats.pcie); 600 spin_unlock(&fbd->hw_stats.lock); 601 } 602