1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2024, Microsoft Corporation. All rights reserved. 4 */ 5 6 #include "counters.h" 7 8 static const struct rdma_stat_desc mana_ib_port_stats_desc[] = { 9 [MANA_IB_REQUESTER_TIMEOUT].name = "requester_timeout", 10 [MANA_IB_REQUESTER_OOS_NAK].name = "requester_oos_nak", 11 [MANA_IB_REQUESTER_RNR_NAK].name = "requester_rnr_nak", 12 [MANA_IB_RESPONDER_RNR_NAK].name = "responder_rnr_nak", 13 [MANA_IB_RESPONDER_OOS].name = "responder_oos", 14 [MANA_IB_RESPONDER_DUP_REQUEST].name = "responder_dup_request", 15 [MANA_IB_REQUESTER_IMPLICIT_NAK].name = "requester_implicit_nak", 16 [MANA_IB_REQUESTER_READRESP_PSN_MISMATCH].name = "requester_readresp_psn_mismatch", 17 [MANA_IB_NAK_INV_REQ].name = "nak_inv_req", 18 [MANA_IB_NAK_ACCESS_ERR].name = "nak_access_error", 19 [MANA_IB_NAK_OPP_ERR].name = "nak_opp_error", 20 [MANA_IB_NAK_INV_READ].name = "nak_inv_read", 21 [MANA_IB_RESPONDER_LOCAL_LEN_ERR].name = "responder_local_len_error", 22 [MANA_IB_REQUESTOR_LOCAL_PROT_ERR].name = "requestor_local_prot_error", 23 [MANA_IB_RESPONDER_REM_ACCESS_ERR].name = "responder_rem_access_error", 24 [MANA_IB_RESPONDER_LOCAL_QP_ERR].name = "responder_local_qp_error", 25 [MANA_IB_RESPONDER_MALFORMED_WQE].name = "responder_malformed_wqe", 26 [MANA_IB_GENERAL_HW_ERR].name = "general_hw_error", 27 [MANA_IB_REQUESTER_RNR_NAK_RETRIES_EXCEEDED].name = "requester_rnr_nak_retries_exceeded", 28 [MANA_IB_REQUESTER_RETRIES_EXCEEDED].name = "requester_retries_exceeded", 29 [MANA_IB_TOTAL_FATAL_ERR].name = "total_fatal_error", 30 [MANA_IB_RECEIVED_CNPS].name = "received_cnps", 31 [MANA_IB_NUM_QPS_CONGESTED].name = "num_qps_congested", 32 [MANA_IB_RATE_INC_EVENTS].name = "rate_inc_events", 33 [MANA_IB_NUM_QPS_RECOVERED].name = "num_qps_recovered", 34 [MANA_IB_CURRENT_RATE].name = "current_rate", 35 [MANA_IB_DUP_RX_REQ].name = "dup_rx_requests", 36 [MANA_IB_TX_BYTES].name = "tx_bytes", 37 [MANA_IB_RX_BYTES].name = "rx_bytes", 38 [MANA_IB_RX_SEND_REQ].name = "rx_send_requests", 39 [MANA_IB_RX_WRITE_REQ].name = "rx_write_requests", 40 [MANA_IB_RX_READ_REQ].name = "rx_read_requests", 41 [MANA_IB_TX_PKT].name = "tx_packets", 42 [MANA_IB_RX_PKT].name = "rx_packets", 43 }; 44 45 static const struct rdma_stat_desc mana_ib_device_stats_desc[] = { 46 [MANA_IB_SENT_CNPS].name = "sent_cnps", 47 [MANA_IB_RECEIVED_ECNS].name = "received_ecns", 48 [MANA_IB_RECEIVED_CNP_COUNT].name = "received_cnp_count", 49 [MANA_IB_QP_CONGESTED_EVENTS].name = "qp_congested_events", 50 [MANA_IB_QP_RECOVERED_EVENTS].name = "qp_recovered_events", 51 [MANA_IB_DEV_RATE_INC_EVENTS].name = "rate_inc_events", 52 }; 53 54 struct rdma_hw_stats *mana_ib_alloc_hw_device_stats(struct ib_device *ibdev) 55 { 56 return rdma_alloc_hw_stats_struct(mana_ib_device_stats_desc, 57 ARRAY_SIZE(mana_ib_device_stats_desc), 58 RDMA_HW_STATS_DEFAULT_LIFESPAN); 59 } 60 61 struct rdma_hw_stats *mana_ib_alloc_hw_port_stats(struct ib_device *ibdev, 62 u32 port_num) 63 { 64 return rdma_alloc_hw_stats_struct(mana_ib_port_stats_desc, 65 ARRAY_SIZE(mana_ib_port_stats_desc), 66 RDMA_HW_STATS_DEFAULT_LIFESPAN); 67 } 68 69 static int mana_ib_get_hw_device_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats) 70 { 71 struct mana_ib_dev *mdev = container_of(ibdev, struct mana_ib_dev, 72 ib_dev); 73 struct mana_rnic_query_device_cntrs_resp resp = {}; 74 struct mana_rnic_query_device_cntrs_req req = {}; 75 int err; 76 77 mana_gd_init_req_hdr(&req.hdr, MANA_IB_QUERY_DEVICE_COUNTERS, 78 sizeof(req), sizeof(resp)); 79 req.hdr.dev_id = mdev->gdma_dev->dev_id; 80 req.adapter = mdev->adapter_handle; 81 82 err = mana_gd_send_request(mdev_to_gc(mdev), sizeof(req), &req, 83 sizeof(resp), &resp); 84 if (err) { 85 ibdev_err(&mdev->ib_dev, "Failed to query device counters err %d", 86 err); 87 return err; 88 } 89 90 stats->value[MANA_IB_SENT_CNPS] = resp.sent_cnps; 91 stats->value[MANA_IB_RECEIVED_ECNS] = resp.received_ecns; 92 stats->value[MANA_IB_RECEIVED_CNP_COUNT] = resp.received_cnp_count; 93 stats->value[MANA_IB_QP_CONGESTED_EVENTS] = resp.qp_congested_events; 94 stats->value[MANA_IB_QP_RECOVERED_EVENTS] = resp.qp_recovered_events; 95 stats->value[MANA_IB_DEV_RATE_INC_EVENTS] = resp.rate_inc_events; 96 97 return ARRAY_SIZE(mana_ib_device_stats_desc); 98 } 99 100 static int mana_ib_get_hw_port_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats, 101 u32 port_num) 102 { 103 struct mana_ib_dev *mdev = container_of(ibdev, struct mana_ib_dev, 104 ib_dev); 105 struct mana_rnic_query_vf_cntrs_resp resp = {}; 106 struct mana_rnic_query_vf_cntrs_req req = {}; 107 int err; 108 109 mana_gd_init_req_hdr(&req.hdr, MANA_IB_QUERY_VF_COUNTERS, 110 sizeof(req), sizeof(resp)); 111 req.hdr.resp.msg_version = GDMA_MESSAGE_V2; 112 req.hdr.dev_id = mdev->gdma_dev->dev_id; 113 req.adapter = mdev->adapter_handle; 114 115 err = mana_gd_send_request(mdev_to_gc(mdev), sizeof(req), &req, 116 sizeof(resp), &resp); 117 if (err) { 118 ibdev_err(&mdev->ib_dev, "Failed to query vf counters err %d", 119 err); 120 return err; 121 } 122 123 stats->value[MANA_IB_REQUESTER_TIMEOUT] = resp.requester_timeout; 124 stats->value[MANA_IB_REQUESTER_OOS_NAK] = resp.requester_oos_nak; 125 stats->value[MANA_IB_REQUESTER_RNR_NAK] = resp.requester_rnr_nak; 126 stats->value[MANA_IB_RESPONDER_RNR_NAK] = resp.responder_rnr_nak; 127 stats->value[MANA_IB_RESPONDER_OOS] = resp.responder_oos; 128 stats->value[MANA_IB_RESPONDER_DUP_REQUEST] = resp.responder_dup_request; 129 stats->value[MANA_IB_REQUESTER_IMPLICIT_NAK] = 130 resp.requester_implicit_nak; 131 stats->value[MANA_IB_REQUESTER_READRESP_PSN_MISMATCH] = 132 resp.requester_readresp_psn_mismatch; 133 stats->value[MANA_IB_NAK_INV_REQ] = resp.nak_inv_req; 134 stats->value[MANA_IB_NAK_ACCESS_ERR] = resp.nak_access_err; 135 stats->value[MANA_IB_NAK_OPP_ERR] = resp.nak_opp_err; 136 stats->value[MANA_IB_NAK_INV_READ] = resp.nak_inv_read; 137 stats->value[MANA_IB_RESPONDER_LOCAL_LEN_ERR] = 138 resp.responder_local_len_err; 139 stats->value[MANA_IB_REQUESTOR_LOCAL_PROT_ERR] = 140 resp.requestor_local_prot_err; 141 stats->value[MANA_IB_RESPONDER_REM_ACCESS_ERR] = 142 resp.responder_rem_access_err; 143 stats->value[MANA_IB_RESPONDER_LOCAL_QP_ERR] = 144 resp.responder_local_qp_err; 145 stats->value[MANA_IB_RESPONDER_MALFORMED_WQE] = 146 resp.responder_malformed_wqe; 147 stats->value[MANA_IB_GENERAL_HW_ERR] = resp.general_hw_err; 148 stats->value[MANA_IB_REQUESTER_RNR_NAK_RETRIES_EXCEEDED] = 149 resp.requester_rnr_nak_retries_exceeded; 150 stats->value[MANA_IB_REQUESTER_RETRIES_EXCEEDED] = 151 resp.requester_retries_exceeded; 152 stats->value[MANA_IB_TOTAL_FATAL_ERR] = resp.total_fatal_err; 153 154 stats->value[MANA_IB_RECEIVED_CNPS] = resp.received_cnps; 155 stats->value[MANA_IB_NUM_QPS_CONGESTED] = resp.num_qps_congested; 156 stats->value[MANA_IB_RATE_INC_EVENTS] = resp.rate_inc_events; 157 stats->value[MANA_IB_NUM_QPS_RECOVERED] = resp.num_qps_recovered; 158 stats->value[MANA_IB_CURRENT_RATE] = resp.current_rate; 159 160 stats->value[MANA_IB_DUP_RX_REQ] = resp.dup_rx_req; 161 stats->value[MANA_IB_TX_BYTES] = resp.tx_bytes; 162 stats->value[MANA_IB_RX_BYTES] = resp.rx_bytes; 163 stats->value[MANA_IB_RX_SEND_REQ] = resp.rx_send_req; 164 stats->value[MANA_IB_RX_WRITE_REQ] = resp.rx_write_req; 165 stats->value[MANA_IB_RX_READ_REQ] = resp.rx_read_req; 166 stats->value[MANA_IB_TX_PKT] = resp.tx_pkt; 167 stats->value[MANA_IB_RX_PKT] = resp.rx_pkt; 168 169 return ARRAY_SIZE(mana_ib_port_stats_desc); 170 } 171 172 int mana_ib_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats, 173 u32 port_num, int index) 174 { 175 if (!port_num) 176 return mana_ib_get_hw_device_stats(ibdev, stats); 177 else 178 return mana_ib_get_hw_port_stats(ibdev, stats, port_num); 179 } 180