1 /******************************************************************* 2 * This file is part of the Emulex RoCE Device Driver for * 3 * RoCE (RDMA over Converged Ethernet) adapters. * 4 * Copyright (C) 2008-2014 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * * 8 * This program is free software; you can redistribute it and/or * 9 * modify it under the terms of version 2 of the GNU General * 10 * Public License as published by the Free Software Foundation. * 11 * This program is distributed in the hope that it will be useful. * 12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 17 * more details, a copy of which can be found in the file COPYING * 18 * included with this package. * 19 * 20 * Contact Information: 21 * linux-drivers@emulex.com 22 * 23 * Emulex 24 * 3333 Susan Street 25 * Costa Mesa, CA 92626 26 *******************************************************************/ 27 28 #include <rdma/ib_addr.h> 29 #include <rdma/ib_pma.h> 30 #include "ocrdma_stats.h" 31 32 static struct dentry *ocrdma_dbgfs_dir; 33 34 static int ocrdma_add_stat(char *start, char *pcur, 35 char *name, u64 count) 36 { 37 char buff[128] = {0}; 38 int cpy_len = 0; 39 40 snprintf(buff, 128, "%s: %llu\n", name, count); 41 cpy_len = strlen(buff); 42 43 if (pcur + cpy_len > start + OCRDMA_MAX_DBGFS_MEM) { 44 pr_err("%s: No space in stats buff\n", __func__); 45 return 0; 46 } 47 48 memcpy(pcur, buff, cpy_len); 49 return cpy_len; 50 } 51 52 static bool ocrdma_alloc_stats_mem(struct ocrdma_dev *dev) 53 { 54 struct stats_mem *mem = &dev->stats_mem; 55 56 /* Alloc mbox command mem*/ 57 mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req), 58 sizeof(struct ocrdma_rdma_stats_resp)); 59 60 mem->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, mem->size, 61 &mem->pa, GFP_KERNEL); 62 if (!mem->va) { 63 pr_err("%s: stats mbox allocation failed\n", __func__); 64 return false; 65 } 66 67 memset(mem->va, 0, mem->size); 68 69 /* Alloc debugfs mem */ 70 mem->debugfs_mem = kzalloc(OCRDMA_MAX_DBGFS_MEM, GFP_KERNEL); 71 if (!mem->debugfs_mem) { 72 pr_err("%s: stats debugfs mem allocation failed\n", __func__); 73 return false; 74 } 75 76 return true; 77 } 78 79 static void ocrdma_release_stats_mem(struct ocrdma_dev *dev) 80 { 81 struct stats_mem *mem = &dev->stats_mem; 82 83 if (mem->va) 84 dma_free_coherent(&dev->nic_info.pdev->dev, mem->size, 85 mem->va, mem->pa); 86 kfree(mem->debugfs_mem); 87 } 88 89 static char *ocrdma_resource_stats(struct ocrdma_dev *dev) 90 { 91 char *stats = dev->stats_mem.debugfs_mem, *pcur; 92 struct ocrdma_rdma_stats_resp *rdma_stats = 93 (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; 94 struct ocrdma_rsrc_stats *rsrc_stats = &rdma_stats->act_rsrc_stats; 95 96 memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM)); 97 98 pcur = stats; 99 pcur += ocrdma_add_stat(stats, pcur, "active_dpp_pds", 100 (u64)rsrc_stats->dpp_pds); 101 pcur += ocrdma_add_stat(stats, pcur, "active_non_dpp_pds", 102 (u64)rsrc_stats->non_dpp_pds); 103 pcur += ocrdma_add_stat(stats, pcur, "active_rc_dpp_qps", 104 (u64)rsrc_stats->rc_dpp_qps); 105 pcur += ocrdma_add_stat(stats, pcur, "active_uc_dpp_qps", 106 (u64)rsrc_stats->uc_dpp_qps); 107 pcur += ocrdma_add_stat(stats, pcur, "active_ud_dpp_qps", 108 (u64)rsrc_stats->ud_dpp_qps); 109 pcur += ocrdma_add_stat(stats, pcur, "active_rc_non_dpp_qps", 110 (u64)rsrc_stats->rc_non_dpp_qps); 111 pcur += ocrdma_add_stat(stats, pcur, "active_uc_non_dpp_qps", 112 (u64)rsrc_stats->uc_non_dpp_qps); 113 pcur += ocrdma_add_stat(stats, pcur, "active_ud_non_dpp_qps", 114 (u64)rsrc_stats->ud_non_dpp_qps); 115 pcur += ocrdma_add_stat(stats, pcur, "active_srqs", 116 (u64)rsrc_stats->srqs); 117 pcur += ocrdma_add_stat(stats, pcur, "active_rbqs", 118 (u64)rsrc_stats->rbqs); 119 pcur += ocrdma_add_stat(stats, pcur, "active_64K_nsmr", 120 (u64)rsrc_stats->r64K_nsmr); 121 pcur += ocrdma_add_stat(stats, pcur, "active_64K_to_2M_nsmr", 122 (u64)rsrc_stats->r64K_to_2M_nsmr); 123 pcur += ocrdma_add_stat(stats, pcur, "active_2M_to_44M_nsmr", 124 (u64)rsrc_stats->r2M_to_44M_nsmr); 125 pcur += ocrdma_add_stat(stats, pcur, "active_44M_to_1G_nsmr", 126 (u64)rsrc_stats->r44M_to_1G_nsmr); 127 pcur += ocrdma_add_stat(stats, pcur, "active_1G_to_4G_nsmr", 128 (u64)rsrc_stats->r1G_to_4G_nsmr); 129 pcur += ocrdma_add_stat(stats, pcur, "active_nsmr_count_4G_to_32G", 130 (u64)rsrc_stats->nsmr_count_4G_to_32G); 131 pcur += ocrdma_add_stat(stats, pcur, "active_32G_to_64G_nsmr", 132 (u64)rsrc_stats->r32G_to_64G_nsmr); 133 pcur += ocrdma_add_stat(stats, pcur, "active_64G_to_128G_nsmr", 134 (u64)rsrc_stats->r64G_to_128G_nsmr); 135 pcur += ocrdma_add_stat(stats, pcur, "active_128G_to_higher_nsmr", 136 (u64)rsrc_stats->r128G_to_higher_nsmr); 137 pcur += ocrdma_add_stat(stats, pcur, "active_embedded_nsmr", 138 (u64)rsrc_stats->embedded_nsmr); 139 pcur += ocrdma_add_stat(stats, pcur, "active_frmr", 140 (u64)rsrc_stats->frmr); 141 pcur += ocrdma_add_stat(stats, pcur, "active_prefetch_qps", 142 (u64)rsrc_stats->prefetch_qps); 143 pcur += ocrdma_add_stat(stats, pcur, "active_ondemand_qps", 144 (u64)rsrc_stats->ondemand_qps); 145 pcur += ocrdma_add_stat(stats, pcur, "active_phy_mr", 146 (u64)rsrc_stats->phy_mr); 147 pcur += ocrdma_add_stat(stats, pcur, "active_mw", 148 (u64)rsrc_stats->mw); 149 150 /* Print the threshold stats */ 151 rsrc_stats = &rdma_stats->th_rsrc_stats; 152 153 pcur += ocrdma_add_stat(stats, pcur, "threshold_dpp_pds", 154 (u64)rsrc_stats->dpp_pds); 155 pcur += ocrdma_add_stat(stats, pcur, "threshold_non_dpp_pds", 156 (u64)rsrc_stats->non_dpp_pds); 157 pcur += ocrdma_add_stat(stats, pcur, "threshold_rc_dpp_qps", 158 (u64)rsrc_stats->rc_dpp_qps); 159 pcur += ocrdma_add_stat(stats, pcur, "threshold_uc_dpp_qps", 160 (u64)rsrc_stats->uc_dpp_qps); 161 pcur += ocrdma_add_stat(stats, pcur, "threshold_ud_dpp_qps", 162 (u64)rsrc_stats->ud_dpp_qps); 163 pcur += ocrdma_add_stat(stats, pcur, "threshold_rc_non_dpp_qps", 164 (u64)rsrc_stats->rc_non_dpp_qps); 165 pcur += ocrdma_add_stat(stats, pcur, "threshold_uc_non_dpp_qps", 166 (u64)rsrc_stats->uc_non_dpp_qps); 167 pcur += ocrdma_add_stat(stats, pcur, "threshold_ud_non_dpp_qps", 168 (u64)rsrc_stats->ud_non_dpp_qps); 169 pcur += ocrdma_add_stat(stats, pcur, "threshold_srqs", 170 (u64)rsrc_stats->srqs); 171 pcur += ocrdma_add_stat(stats, pcur, "threshold_rbqs", 172 (u64)rsrc_stats->rbqs); 173 pcur += ocrdma_add_stat(stats, pcur, "threshold_64K_nsmr", 174 (u64)rsrc_stats->r64K_nsmr); 175 pcur += ocrdma_add_stat(stats, pcur, "threshold_64K_to_2M_nsmr", 176 (u64)rsrc_stats->r64K_to_2M_nsmr); 177 pcur += ocrdma_add_stat(stats, pcur, "threshold_2M_to_44M_nsmr", 178 (u64)rsrc_stats->r2M_to_44M_nsmr); 179 pcur += ocrdma_add_stat(stats, pcur, "threshold_44M_to_1G_nsmr", 180 (u64)rsrc_stats->r44M_to_1G_nsmr); 181 pcur += ocrdma_add_stat(stats, pcur, "threshold_1G_to_4G_nsmr", 182 (u64)rsrc_stats->r1G_to_4G_nsmr); 183 pcur += ocrdma_add_stat(stats, pcur, "threshold_nsmr_count_4G_to_32G", 184 (u64)rsrc_stats->nsmr_count_4G_to_32G); 185 pcur += ocrdma_add_stat(stats, pcur, "threshold_32G_to_64G_nsmr", 186 (u64)rsrc_stats->r32G_to_64G_nsmr); 187 pcur += ocrdma_add_stat(stats, pcur, "threshold_64G_to_128G_nsmr", 188 (u64)rsrc_stats->r64G_to_128G_nsmr); 189 pcur += ocrdma_add_stat(stats, pcur, "threshold_128G_to_higher_nsmr", 190 (u64)rsrc_stats->r128G_to_higher_nsmr); 191 pcur += ocrdma_add_stat(stats, pcur, "threshold_embedded_nsmr", 192 (u64)rsrc_stats->embedded_nsmr); 193 pcur += ocrdma_add_stat(stats, pcur, "threshold_frmr", 194 (u64)rsrc_stats->frmr); 195 pcur += ocrdma_add_stat(stats, pcur, "threshold_prefetch_qps", 196 (u64)rsrc_stats->prefetch_qps); 197 pcur += ocrdma_add_stat(stats, pcur, "threshold_ondemand_qps", 198 (u64)rsrc_stats->ondemand_qps); 199 pcur += ocrdma_add_stat(stats, pcur, "threshold_phy_mr", 200 (u64)rsrc_stats->phy_mr); 201 pcur += ocrdma_add_stat(stats, pcur, "threshold_mw", 202 (u64)rsrc_stats->mw); 203 return stats; 204 } 205 206 static char *ocrdma_rx_stats(struct ocrdma_dev *dev) 207 { 208 char *stats = dev->stats_mem.debugfs_mem, *pcur; 209 struct ocrdma_rdma_stats_resp *rdma_stats = 210 (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; 211 struct ocrdma_rx_stats *rx_stats = &rdma_stats->rx_stats; 212 213 memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM)); 214 215 pcur = stats; 216 pcur += ocrdma_add_stat 217 (stats, pcur, "roce_frame_bytes", 218 convert_to_64bit(rx_stats->roce_frame_bytes_lo, 219 rx_stats->roce_frame_bytes_hi)); 220 pcur += ocrdma_add_stat(stats, pcur, "roce_frame_icrc_drops", 221 (u64)rx_stats->roce_frame_icrc_drops); 222 pcur += ocrdma_add_stat(stats, pcur, "roce_frame_payload_len_drops", 223 (u64)rx_stats->roce_frame_payload_len_drops); 224 pcur += ocrdma_add_stat(stats, pcur, "ud_drops", 225 (u64)rx_stats->ud_drops); 226 pcur += ocrdma_add_stat(stats, pcur, "qp1_drops", 227 (u64)rx_stats->qp1_drops); 228 pcur += ocrdma_add_stat(stats, pcur, "psn_error_request_packets", 229 (u64)rx_stats->psn_error_request_packets); 230 pcur += ocrdma_add_stat(stats, pcur, "psn_error_resp_packets", 231 (u64)rx_stats->psn_error_resp_packets); 232 pcur += ocrdma_add_stat(stats, pcur, "rnr_nak_timeouts", 233 (u64)rx_stats->rnr_nak_timeouts); 234 pcur += ocrdma_add_stat(stats, pcur, "rnr_nak_receives", 235 (u64)rx_stats->rnr_nak_receives); 236 pcur += ocrdma_add_stat(stats, pcur, "roce_frame_rxmt_drops", 237 (u64)rx_stats->roce_frame_rxmt_drops); 238 pcur += ocrdma_add_stat(stats, pcur, "nak_count_psn_sequence_errors", 239 (u64)rx_stats->nak_count_psn_sequence_errors); 240 pcur += ocrdma_add_stat(stats, pcur, "rc_drop_count_lookup_errors", 241 (u64)rx_stats->rc_drop_count_lookup_errors); 242 pcur += ocrdma_add_stat(stats, pcur, "rq_rnr_naks", 243 (u64)rx_stats->rq_rnr_naks); 244 pcur += ocrdma_add_stat(stats, pcur, "srq_rnr_naks", 245 (u64)rx_stats->srq_rnr_naks); 246 pcur += ocrdma_add_stat(stats, pcur, "roce_frames", 247 convert_to_64bit(rx_stats->roce_frames_lo, 248 rx_stats->roce_frames_hi)); 249 250 return stats; 251 } 252 253 static u64 ocrdma_sysfs_rcv_pkts(struct ocrdma_dev *dev) 254 { 255 struct ocrdma_rdma_stats_resp *rdma_stats = 256 (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; 257 struct ocrdma_rx_stats *rx_stats = &rdma_stats->rx_stats; 258 259 return convert_to_64bit(rx_stats->roce_frames_lo, 260 rx_stats->roce_frames_hi) + (u64)rx_stats->roce_frame_icrc_drops 261 + (u64)rx_stats->roce_frame_payload_len_drops; 262 } 263 264 static u64 ocrdma_sysfs_rcv_data(struct ocrdma_dev *dev) 265 { 266 struct ocrdma_rdma_stats_resp *rdma_stats = 267 (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; 268 struct ocrdma_rx_stats *rx_stats = &rdma_stats->rx_stats; 269 270 return (convert_to_64bit(rx_stats->roce_frame_bytes_lo, 271 rx_stats->roce_frame_bytes_hi))/4; 272 } 273 274 static char *ocrdma_tx_stats(struct ocrdma_dev *dev) 275 { 276 char *stats = dev->stats_mem.debugfs_mem, *pcur; 277 struct ocrdma_rdma_stats_resp *rdma_stats = 278 (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; 279 struct ocrdma_tx_stats *tx_stats = &rdma_stats->tx_stats; 280 281 memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM)); 282 283 pcur = stats; 284 pcur += ocrdma_add_stat(stats, pcur, "send_pkts", 285 convert_to_64bit(tx_stats->send_pkts_lo, 286 tx_stats->send_pkts_hi)); 287 pcur += ocrdma_add_stat(stats, pcur, "write_pkts", 288 convert_to_64bit(tx_stats->write_pkts_lo, 289 tx_stats->write_pkts_hi)); 290 pcur += ocrdma_add_stat(stats, pcur, "read_pkts", 291 convert_to_64bit(tx_stats->read_pkts_lo, 292 tx_stats->read_pkts_hi)); 293 pcur += ocrdma_add_stat(stats, pcur, "read_rsp_pkts", 294 convert_to_64bit(tx_stats->read_rsp_pkts_lo, 295 tx_stats->read_rsp_pkts_hi)); 296 pcur += ocrdma_add_stat(stats, pcur, "ack_pkts", 297 convert_to_64bit(tx_stats->ack_pkts_lo, 298 tx_stats->ack_pkts_hi)); 299 pcur += ocrdma_add_stat(stats, pcur, "send_bytes", 300 convert_to_64bit(tx_stats->send_bytes_lo, 301 tx_stats->send_bytes_hi)); 302 pcur += ocrdma_add_stat(stats, pcur, "write_bytes", 303 convert_to_64bit(tx_stats->write_bytes_lo, 304 tx_stats->write_bytes_hi)); 305 pcur += ocrdma_add_stat(stats, pcur, "read_req_bytes", 306 convert_to_64bit(tx_stats->read_req_bytes_lo, 307 tx_stats->read_req_bytes_hi)); 308 pcur += ocrdma_add_stat(stats, pcur, "read_rsp_bytes", 309 convert_to_64bit(tx_stats->read_rsp_bytes_lo, 310 tx_stats->read_rsp_bytes_hi)); 311 pcur += ocrdma_add_stat(stats, pcur, "ack_timeouts", 312 (u64)tx_stats->ack_timeouts); 313 314 return stats; 315 } 316 317 static u64 ocrdma_sysfs_xmit_pkts(struct ocrdma_dev *dev) 318 { 319 struct ocrdma_rdma_stats_resp *rdma_stats = 320 (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; 321 struct ocrdma_tx_stats *tx_stats = &rdma_stats->tx_stats; 322 323 return (convert_to_64bit(tx_stats->send_pkts_lo, 324 tx_stats->send_pkts_hi) + 325 convert_to_64bit(tx_stats->write_pkts_lo, tx_stats->write_pkts_hi) + 326 convert_to_64bit(tx_stats->read_pkts_lo, tx_stats->read_pkts_hi) + 327 convert_to_64bit(tx_stats->read_rsp_pkts_lo, 328 tx_stats->read_rsp_pkts_hi) + 329 convert_to_64bit(tx_stats->ack_pkts_lo, tx_stats->ack_pkts_hi)); 330 } 331 332 static u64 ocrdma_sysfs_xmit_data(struct ocrdma_dev *dev) 333 { 334 struct ocrdma_rdma_stats_resp *rdma_stats = 335 (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; 336 struct ocrdma_tx_stats *tx_stats = &rdma_stats->tx_stats; 337 338 return (convert_to_64bit(tx_stats->send_bytes_lo, 339 tx_stats->send_bytes_hi) + 340 convert_to_64bit(tx_stats->write_bytes_lo, 341 tx_stats->write_bytes_hi) + 342 convert_to_64bit(tx_stats->read_req_bytes_lo, 343 tx_stats->read_req_bytes_hi) + 344 convert_to_64bit(tx_stats->read_rsp_bytes_lo, 345 tx_stats->read_rsp_bytes_hi))/4; 346 } 347 348 static char *ocrdma_wqe_stats(struct ocrdma_dev *dev) 349 { 350 char *stats = dev->stats_mem.debugfs_mem, *pcur; 351 struct ocrdma_rdma_stats_resp *rdma_stats = 352 (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; 353 struct ocrdma_wqe_stats *wqe_stats = &rdma_stats->wqe_stats; 354 355 memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM)); 356 357 pcur = stats; 358 pcur += ocrdma_add_stat(stats, pcur, "large_send_rc_wqes", 359 convert_to_64bit(wqe_stats->large_send_rc_wqes_lo, 360 wqe_stats->large_send_rc_wqes_hi)); 361 pcur += ocrdma_add_stat(stats, pcur, "large_write_rc_wqes", 362 convert_to_64bit(wqe_stats->large_write_rc_wqes_lo, 363 wqe_stats->large_write_rc_wqes_hi)); 364 pcur += ocrdma_add_stat(stats, pcur, "read_wqes", 365 convert_to_64bit(wqe_stats->read_wqes_lo, 366 wqe_stats->read_wqes_hi)); 367 pcur += ocrdma_add_stat(stats, pcur, "frmr_wqes", 368 convert_to_64bit(wqe_stats->frmr_wqes_lo, 369 wqe_stats->frmr_wqes_hi)); 370 pcur += ocrdma_add_stat(stats, pcur, "mw_bind_wqes", 371 convert_to_64bit(wqe_stats->mw_bind_wqes_lo, 372 wqe_stats->mw_bind_wqes_hi)); 373 pcur += ocrdma_add_stat(stats, pcur, "invalidate_wqes", 374 convert_to_64bit(wqe_stats->invalidate_wqes_lo, 375 wqe_stats->invalidate_wqes_hi)); 376 pcur += ocrdma_add_stat(stats, pcur, "dpp_wqe_drops", 377 (u64)wqe_stats->dpp_wqe_drops); 378 return stats; 379 } 380 381 static char *ocrdma_db_errstats(struct ocrdma_dev *dev) 382 { 383 char *stats = dev->stats_mem.debugfs_mem, *pcur; 384 struct ocrdma_rdma_stats_resp *rdma_stats = 385 (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; 386 struct ocrdma_db_err_stats *db_err_stats = &rdma_stats->db_err_stats; 387 388 memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM)); 389 390 pcur = stats; 391 pcur += ocrdma_add_stat(stats, pcur, "sq_doorbell_errors", 392 (u64)db_err_stats->sq_doorbell_errors); 393 pcur += ocrdma_add_stat(stats, pcur, "cq_doorbell_errors", 394 (u64)db_err_stats->cq_doorbell_errors); 395 pcur += ocrdma_add_stat(stats, pcur, "rq_srq_doorbell_errors", 396 (u64)db_err_stats->rq_srq_doorbell_errors); 397 pcur += ocrdma_add_stat(stats, pcur, "cq_overflow_errors", 398 (u64)db_err_stats->cq_overflow_errors); 399 return stats; 400 } 401 402 static char *ocrdma_rxqp_errstats(struct ocrdma_dev *dev) 403 { 404 char *stats = dev->stats_mem.debugfs_mem, *pcur; 405 struct ocrdma_rdma_stats_resp *rdma_stats = 406 (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; 407 struct ocrdma_rx_qp_err_stats *rx_qp_err_stats = 408 &rdma_stats->rx_qp_err_stats; 409 410 memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM)); 411 412 pcur = stats; 413 pcur += ocrdma_add_stat(stats, pcur, "nak_invalid_requst_errors", 414 (u64)rx_qp_err_stats->nak_invalid_requst_errors); 415 pcur += ocrdma_add_stat(stats, pcur, "nak_remote_operation_errors", 416 (u64)rx_qp_err_stats->nak_remote_operation_errors); 417 pcur += ocrdma_add_stat(stats, pcur, "nak_count_remote_access_errors", 418 (u64)rx_qp_err_stats->nak_count_remote_access_errors); 419 pcur += ocrdma_add_stat(stats, pcur, "local_length_errors", 420 (u64)rx_qp_err_stats->local_length_errors); 421 pcur += ocrdma_add_stat(stats, pcur, "local_protection_errors", 422 (u64)rx_qp_err_stats->local_protection_errors); 423 pcur += ocrdma_add_stat(stats, pcur, "local_qp_operation_errors", 424 (u64)rx_qp_err_stats->local_qp_operation_errors); 425 return stats; 426 } 427 428 static char *ocrdma_txqp_errstats(struct ocrdma_dev *dev) 429 { 430 char *stats = dev->stats_mem.debugfs_mem, *pcur; 431 struct ocrdma_rdma_stats_resp *rdma_stats = 432 (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; 433 struct ocrdma_tx_qp_err_stats *tx_qp_err_stats = 434 &rdma_stats->tx_qp_err_stats; 435 436 memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM)); 437 438 pcur = stats; 439 pcur += ocrdma_add_stat(stats, pcur, "local_length_errors", 440 (u64)tx_qp_err_stats->local_length_errors); 441 pcur += ocrdma_add_stat(stats, pcur, "local_protection_errors", 442 (u64)tx_qp_err_stats->local_protection_errors); 443 pcur += ocrdma_add_stat(stats, pcur, "local_qp_operation_errors", 444 (u64)tx_qp_err_stats->local_qp_operation_errors); 445 pcur += ocrdma_add_stat(stats, pcur, "retry_count_exceeded_errors", 446 (u64)tx_qp_err_stats->retry_count_exceeded_errors); 447 pcur += ocrdma_add_stat(stats, pcur, "rnr_retry_count_exceeded_errors", 448 (u64)tx_qp_err_stats->rnr_retry_count_exceeded_errors); 449 return stats; 450 } 451 452 static char *ocrdma_tx_dbg_stats(struct ocrdma_dev *dev) 453 { 454 int i; 455 char *pstats = dev->stats_mem.debugfs_mem; 456 struct ocrdma_rdma_stats_resp *rdma_stats = 457 (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; 458 struct ocrdma_tx_dbg_stats *tx_dbg_stats = 459 &rdma_stats->tx_dbg_stats; 460 461 memset(pstats, 0, (OCRDMA_MAX_DBGFS_MEM)); 462 463 for (i = 0; i < 100; i++) 464 pstats += snprintf(pstats, 80, "DW[%d] = 0x%x\n", i, 465 tx_dbg_stats->data[i]); 466 467 return dev->stats_mem.debugfs_mem; 468 } 469 470 static char *ocrdma_rx_dbg_stats(struct ocrdma_dev *dev) 471 { 472 int i; 473 char *pstats = dev->stats_mem.debugfs_mem; 474 struct ocrdma_rdma_stats_resp *rdma_stats = 475 (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; 476 struct ocrdma_rx_dbg_stats *rx_dbg_stats = 477 &rdma_stats->rx_dbg_stats; 478 479 memset(pstats, 0, (OCRDMA_MAX_DBGFS_MEM)); 480 481 for (i = 0; i < 200; i++) 482 pstats += snprintf(pstats, 80, "DW[%d] = 0x%x\n", i, 483 rx_dbg_stats->data[i]); 484 485 return dev->stats_mem.debugfs_mem; 486 } 487 488 static char *ocrdma_driver_dbg_stats(struct ocrdma_dev *dev) 489 { 490 char *stats = dev->stats_mem.debugfs_mem, *pcur; 491 492 493 memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM)); 494 495 pcur = stats; 496 pcur += ocrdma_add_stat(stats, pcur, "async_cq_err", 497 (u64)(dev->async_err_stats 498 [OCRDMA_CQ_ERROR].counter)); 499 pcur += ocrdma_add_stat(stats, pcur, "async_cq_overrun_err", 500 (u64)dev->async_err_stats 501 [OCRDMA_CQ_OVERRUN_ERROR].counter); 502 pcur += ocrdma_add_stat(stats, pcur, "async_cq_qpcat_err", 503 (u64)dev->async_err_stats 504 [OCRDMA_CQ_QPCAT_ERROR].counter); 505 pcur += ocrdma_add_stat(stats, pcur, "async_qp_access_err", 506 (u64)dev->async_err_stats 507 [OCRDMA_QP_ACCESS_ERROR].counter); 508 pcur += ocrdma_add_stat(stats, pcur, "async_qp_commm_est_evt", 509 (u64)dev->async_err_stats 510 [OCRDMA_QP_COMM_EST_EVENT].counter); 511 pcur += ocrdma_add_stat(stats, pcur, "async_sq_drained_evt", 512 (u64)dev->async_err_stats 513 [OCRDMA_SQ_DRAINED_EVENT].counter); 514 pcur += ocrdma_add_stat(stats, pcur, "async_dev_fatal_evt", 515 (u64)dev->async_err_stats 516 [OCRDMA_DEVICE_FATAL_EVENT].counter); 517 pcur += ocrdma_add_stat(stats, pcur, "async_srqcat_err", 518 (u64)dev->async_err_stats 519 [OCRDMA_SRQCAT_ERROR].counter); 520 pcur += ocrdma_add_stat(stats, pcur, "async_srq_limit_evt", 521 (u64)dev->async_err_stats 522 [OCRDMA_SRQ_LIMIT_EVENT].counter); 523 pcur += ocrdma_add_stat(stats, pcur, "async_qp_last_wqe_evt", 524 (u64)dev->async_err_stats 525 [OCRDMA_QP_LAST_WQE_EVENT].counter); 526 527 pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_len_err", 528 (u64)dev->cqe_err_stats 529 [OCRDMA_CQE_LOC_LEN_ERR].counter); 530 pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_qp_op_err", 531 (u64)dev->cqe_err_stats 532 [OCRDMA_CQE_LOC_QP_OP_ERR].counter); 533 pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_eec_op_err", 534 (u64)dev->cqe_err_stats 535 [OCRDMA_CQE_LOC_EEC_OP_ERR].counter); 536 pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_prot_err", 537 (u64)dev->cqe_err_stats 538 [OCRDMA_CQE_LOC_PROT_ERR].counter); 539 pcur += ocrdma_add_stat(stats, pcur, "cqe_wr_flush_err", 540 (u64)dev->cqe_err_stats 541 [OCRDMA_CQE_WR_FLUSH_ERR].counter); 542 pcur += ocrdma_add_stat(stats, pcur, "cqe_mw_bind_err", 543 (u64)dev->cqe_err_stats 544 [OCRDMA_CQE_MW_BIND_ERR].counter); 545 pcur += ocrdma_add_stat(stats, pcur, "cqe_bad_resp_err", 546 (u64)dev->cqe_err_stats 547 [OCRDMA_CQE_BAD_RESP_ERR].counter); 548 pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_access_err", 549 (u64)dev->cqe_err_stats 550 [OCRDMA_CQE_LOC_ACCESS_ERR].counter); 551 pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_inv_req_err", 552 (u64)dev->cqe_err_stats 553 [OCRDMA_CQE_REM_INV_REQ_ERR].counter); 554 pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_access_err", 555 (u64)dev->cqe_err_stats 556 [OCRDMA_CQE_REM_ACCESS_ERR].counter); 557 pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_op_err", 558 (u64)dev->cqe_err_stats 559 [OCRDMA_CQE_REM_OP_ERR].counter); 560 pcur += ocrdma_add_stat(stats, pcur, "cqe_retry_exc_err", 561 (u64)dev->cqe_err_stats 562 [OCRDMA_CQE_RETRY_EXC_ERR].counter); 563 pcur += ocrdma_add_stat(stats, pcur, "cqe_rnr_retry_exc_err", 564 (u64)dev->cqe_err_stats 565 [OCRDMA_CQE_RNR_RETRY_EXC_ERR].counter); 566 pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_rdd_viol_err", 567 (u64)dev->cqe_err_stats 568 [OCRDMA_CQE_LOC_RDD_VIOL_ERR].counter); 569 pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_inv_rd_req_err", 570 (u64)dev->cqe_err_stats 571 [OCRDMA_CQE_REM_INV_RD_REQ_ERR].counter); 572 pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_abort_err", 573 (u64)dev->cqe_err_stats 574 [OCRDMA_CQE_REM_ABORT_ERR].counter); 575 pcur += ocrdma_add_stat(stats, pcur, "cqe_inv_eecn_err", 576 (u64)dev->cqe_err_stats 577 [OCRDMA_CQE_INV_EECN_ERR].counter); 578 pcur += ocrdma_add_stat(stats, pcur, "cqe_inv_eec_state_err", 579 (u64)dev->cqe_err_stats 580 [OCRDMA_CQE_INV_EEC_STATE_ERR].counter); 581 pcur += ocrdma_add_stat(stats, pcur, "cqe_fatal_err", 582 (u64)dev->cqe_err_stats 583 [OCRDMA_CQE_FATAL_ERR].counter); 584 pcur += ocrdma_add_stat(stats, pcur, "cqe_resp_timeout_err", 585 (u64)dev->cqe_err_stats 586 [OCRDMA_CQE_RESP_TIMEOUT_ERR].counter); 587 pcur += ocrdma_add_stat(stats, pcur, "cqe_general_err", 588 (u64)dev->cqe_err_stats 589 [OCRDMA_CQE_GENERAL_ERR].counter); 590 return stats; 591 } 592 593 static void ocrdma_update_stats(struct ocrdma_dev *dev) 594 { 595 ulong now = jiffies, secs; 596 int status = 0; 597 struct ocrdma_rdma_stats_resp *rdma_stats = 598 (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; 599 struct ocrdma_rsrc_stats *rsrc_stats = &rdma_stats->act_rsrc_stats; 600 601 secs = jiffies_to_msecs(now - dev->last_stats_time) / 1000U; 602 if (secs) { 603 /* update */ 604 status = ocrdma_mbx_rdma_stats(dev, false); 605 if (status) 606 pr_err("%s: stats mbox failed with status = %d\n", 607 __func__, status); 608 /* Update PD counters from PD resource manager */ 609 if (dev->pd_mgr->pd_prealloc_valid) { 610 rsrc_stats->dpp_pds = dev->pd_mgr->pd_dpp_count; 611 rsrc_stats->non_dpp_pds = dev->pd_mgr->pd_norm_count; 612 /* Threshold stata*/ 613 rsrc_stats = &rdma_stats->th_rsrc_stats; 614 rsrc_stats->dpp_pds = dev->pd_mgr->pd_dpp_thrsh; 615 rsrc_stats->non_dpp_pds = dev->pd_mgr->pd_norm_thrsh; 616 } 617 dev->last_stats_time = jiffies; 618 } 619 } 620 621 static ssize_t ocrdma_dbgfs_ops_write(struct file *filp, 622 const char __user *buffer, 623 size_t count, loff_t *ppos) 624 { 625 char tmp_str[32]; 626 long reset; 627 int status = 0; 628 struct ocrdma_stats *pstats = filp->private_data; 629 struct ocrdma_dev *dev = pstats->dev; 630 631 if (count > 32) 632 goto err; 633 634 if (copy_from_user(tmp_str, buffer, count)) 635 goto err; 636 637 tmp_str[count-1] = '\0'; 638 if (kstrtol(tmp_str, 10, &reset)) 639 goto err; 640 641 switch (pstats->type) { 642 case OCRDMA_RESET_STATS: 643 if (reset) { 644 status = ocrdma_mbx_rdma_stats(dev, true); 645 if (status) { 646 pr_err("Failed to reset stats = %d", status); 647 goto err; 648 } 649 } 650 break; 651 default: 652 goto err; 653 } 654 655 return count; 656 err: 657 return -EFAULT; 658 } 659 660 int ocrdma_pma_counters(struct ocrdma_dev *dev, 661 struct ib_mad *out_mad) 662 { 663 struct ib_pma_portcounters *pma_cnt; 664 665 memset(out_mad->data, 0, sizeof out_mad->data); 666 pma_cnt = (void *)(out_mad->data + 40); 667 ocrdma_update_stats(dev); 668 669 pma_cnt->port_xmit_data = cpu_to_be32(ocrdma_sysfs_xmit_data(dev)); 670 pma_cnt->port_rcv_data = cpu_to_be32(ocrdma_sysfs_rcv_data(dev)); 671 pma_cnt->port_xmit_packets = cpu_to_be32(ocrdma_sysfs_xmit_pkts(dev)); 672 pma_cnt->port_rcv_packets = cpu_to_be32(ocrdma_sysfs_rcv_pkts(dev)); 673 return 0; 674 } 675 676 static ssize_t ocrdma_dbgfs_ops_read(struct file *filp, char __user *buffer, 677 size_t usr_buf_len, loff_t *ppos) 678 { 679 struct ocrdma_stats *pstats = filp->private_data; 680 struct ocrdma_dev *dev = pstats->dev; 681 ssize_t status = 0; 682 char *data = NULL; 683 684 /* No partial reads */ 685 if (*ppos != 0) 686 return 0; 687 688 mutex_lock(&dev->stats_lock); 689 690 ocrdma_update_stats(dev); 691 692 switch (pstats->type) { 693 case OCRDMA_RSRC_STATS: 694 data = ocrdma_resource_stats(dev); 695 break; 696 case OCRDMA_RXSTATS: 697 data = ocrdma_rx_stats(dev); 698 break; 699 case OCRDMA_WQESTATS: 700 data = ocrdma_wqe_stats(dev); 701 break; 702 case OCRDMA_TXSTATS: 703 data = ocrdma_tx_stats(dev); 704 break; 705 case OCRDMA_DB_ERRSTATS: 706 data = ocrdma_db_errstats(dev); 707 break; 708 case OCRDMA_RXQP_ERRSTATS: 709 data = ocrdma_rxqp_errstats(dev); 710 break; 711 case OCRDMA_TXQP_ERRSTATS: 712 data = ocrdma_txqp_errstats(dev); 713 break; 714 case OCRDMA_TX_DBG_STATS: 715 data = ocrdma_tx_dbg_stats(dev); 716 break; 717 case OCRDMA_RX_DBG_STATS: 718 data = ocrdma_rx_dbg_stats(dev); 719 break; 720 case OCRDMA_DRV_STATS: 721 data = ocrdma_driver_dbg_stats(dev); 722 break; 723 724 default: 725 status = -EFAULT; 726 goto exit; 727 } 728 729 if (usr_buf_len < strlen(data)) { 730 status = -ENOSPC; 731 goto exit; 732 } 733 734 status = simple_read_from_buffer(buffer, usr_buf_len, ppos, data, 735 strlen(data)); 736 exit: 737 mutex_unlock(&dev->stats_lock); 738 return status; 739 } 740 741 static const struct file_operations ocrdma_dbg_ops = { 742 .owner = THIS_MODULE, 743 .open = simple_open, 744 .read = ocrdma_dbgfs_ops_read, 745 .write = ocrdma_dbgfs_ops_write, 746 }; 747 748 void ocrdma_add_port_stats(struct ocrdma_dev *dev) 749 { 750 if (!ocrdma_dbgfs_dir) 751 return; 752 753 /* Create post stats base dir */ 754 dev->dir = debugfs_create_dir(dev->ibdev.name, ocrdma_dbgfs_dir); 755 if (!dev->dir) 756 goto err; 757 758 dev->rsrc_stats.type = OCRDMA_RSRC_STATS; 759 dev->rsrc_stats.dev = dev; 760 if (!debugfs_create_file("resource_stats", S_IRUSR, dev->dir, 761 &dev->rsrc_stats, &ocrdma_dbg_ops)) 762 goto err; 763 764 dev->rx_stats.type = OCRDMA_RXSTATS; 765 dev->rx_stats.dev = dev; 766 if (!debugfs_create_file("rx_stats", S_IRUSR, dev->dir, 767 &dev->rx_stats, &ocrdma_dbg_ops)) 768 goto err; 769 770 dev->wqe_stats.type = OCRDMA_WQESTATS; 771 dev->wqe_stats.dev = dev; 772 if (!debugfs_create_file("wqe_stats", S_IRUSR, dev->dir, 773 &dev->wqe_stats, &ocrdma_dbg_ops)) 774 goto err; 775 776 dev->tx_stats.type = OCRDMA_TXSTATS; 777 dev->tx_stats.dev = dev; 778 if (!debugfs_create_file("tx_stats", S_IRUSR, dev->dir, 779 &dev->tx_stats, &ocrdma_dbg_ops)) 780 goto err; 781 782 dev->db_err_stats.type = OCRDMA_DB_ERRSTATS; 783 dev->db_err_stats.dev = dev; 784 if (!debugfs_create_file("db_err_stats", S_IRUSR, dev->dir, 785 &dev->db_err_stats, &ocrdma_dbg_ops)) 786 goto err; 787 788 789 dev->tx_qp_err_stats.type = OCRDMA_TXQP_ERRSTATS; 790 dev->tx_qp_err_stats.dev = dev; 791 if (!debugfs_create_file("tx_qp_err_stats", S_IRUSR, dev->dir, 792 &dev->tx_qp_err_stats, &ocrdma_dbg_ops)) 793 goto err; 794 795 dev->rx_qp_err_stats.type = OCRDMA_RXQP_ERRSTATS; 796 dev->rx_qp_err_stats.dev = dev; 797 if (!debugfs_create_file("rx_qp_err_stats", S_IRUSR, dev->dir, 798 &dev->rx_qp_err_stats, &ocrdma_dbg_ops)) 799 goto err; 800 801 802 dev->tx_dbg_stats.type = OCRDMA_TX_DBG_STATS; 803 dev->tx_dbg_stats.dev = dev; 804 if (!debugfs_create_file("tx_dbg_stats", S_IRUSR, dev->dir, 805 &dev->tx_dbg_stats, &ocrdma_dbg_ops)) 806 goto err; 807 808 dev->rx_dbg_stats.type = OCRDMA_RX_DBG_STATS; 809 dev->rx_dbg_stats.dev = dev; 810 if (!debugfs_create_file("rx_dbg_stats", S_IRUSR, dev->dir, 811 &dev->rx_dbg_stats, &ocrdma_dbg_ops)) 812 goto err; 813 814 dev->driver_stats.type = OCRDMA_DRV_STATS; 815 dev->driver_stats.dev = dev; 816 if (!debugfs_create_file("driver_dbg_stats", S_IRUSR, dev->dir, 817 &dev->driver_stats, &ocrdma_dbg_ops)) 818 goto err; 819 820 dev->reset_stats.type = OCRDMA_RESET_STATS; 821 dev->reset_stats.dev = dev; 822 if (!debugfs_create_file("reset_stats", S_IRUSR, dev->dir, 823 &dev->reset_stats, &ocrdma_dbg_ops)) 824 goto err; 825 826 /* Now create dma_mem for stats mbx command */ 827 if (!ocrdma_alloc_stats_mem(dev)) 828 goto err; 829 830 mutex_init(&dev->stats_lock); 831 832 return; 833 err: 834 ocrdma_release_stats_mem(dev); 835 debugfs_remove_recursive(dev->dir); 836 dev->dir = NULL; 837 } 838 839 void ocrdma_rem_port_stats(struct ocrdma_dev *dev) 840 { 841 if (!dev->dir) 842 return; 843 mutex_destroy(&dev->stats_lock); 844 ocrdma_release_stats_mem(dev); 845 debugfs_remove(dev->dir); 846 } 847 848 void ocrdma_init_debugfs(void) 849 { 850 /* Create base dir in debugfs root dir */ 851 ocrdma_dbgfs_dir = debugfs_create_dir("ocrdma", NULL); 852 } 853 854 void ocrdma_rem_debugfs(void) 855 { 856 debugfs_remove_recursive(ocrdma_dbgfs_dir); 857 } 858