xref: /linux/drivers/infiniband/hw/ocrdma/ocrdma_stats.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /* This file is part of the Emulex RoCE Device Driver for
2  * RoCE (RDMA over Converged Ethernet) adapters.
3  * Copyright (C) 2012-2015 Emulex. All rights reserved.
4  * EMULEX and SLI are trademarks of Emulex.
5  * www.emulex.com
6  *
7  * This software is available to you under a choice of one of two licenses.
8  * You may choose to be licensed under the terms of the GNU General Public
9  * License (GPL) Version 2, available from the file COPYING in the main
10  * directory of this source tree, or the BSD license below:
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  *
16  * - Redistributions of source code must retain the above copyright notice,
17  *   this list of conditions and the following disclaimer.
18  *
19  * - Redistributions in binary form must reproduce the above copyright
20  *   notice, this list of conditions and the following disclaimer in
21  *   the documentation and/or other materials provided with the distribution.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34  *
35  * Contact Information:
36  * linux-drivers@emulex.com
37  *
38  * Emulex
39  * 3333 Susan Street
40  * Costa Mesa, CA 92626
41  */
42 
43 #include <rdma/ib_addr.h>
44 #include <rdma/ib_pma.h>
45 #include "ocrdma_stats.h"
46 
47 static struct dentry *ocrdma_dbgfs_dir;
48 
49 static int ocrdma_add_stat(char *start, char *pcur,
50 				char *name, u64 count)
51 {
52 	char buff[128] = {0};
53 	int cpy_len = 0;
54 
55 	snprintf(buff, 128, "%s: %llu\n", name, count);
56 	cpy_len = strlen(buff);
57 
58 	if (pcur + cpy_len > start + OCRDMA_MAX_DBGFS_MEM) {
59 		pr_err("%s: No space in stats buff\n", __func__);
60 		return 0;
61 	}
62 
63 	memcpy(pcur, buff, cpy_len);
64 	return cpy_len;
65 }
66 
67 static bool ocrdma_alloc_stats_mem(struct ocrdma_dev *dev)
68 {
69 	struct stats_mem *mem = &dev->stats_mem;
70 
71 	/* Alloc mbox command mem*/
72 	mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req),
73 			sizeof(struct ocrdma_rdma_stats_resp));
74 
75 	mem->va   = dma_alloc_coherent(&dev->nic_info.pdev->dev, mem->size,
76 					 &mem->pa, GFP_KERNEL);
77 	if (!mem->va) {
78 		pr_err("%s: stats mbox allocation failed\n", __func__);
79 		return false;
80 	}
81 
82 	memset(mem->va, 0, mem->size);
83 
84 	/* Alloc debugfs mem */
85 	mem->debugfs_mem = kzalloc(OCRDMA_MAX_DBGFS_MEM, GFP_KERNEL);
86 	if (!mem->debugfs_mem) {
87 		pr_err("%s: stats debugfs mem allocation failed\n", __func__);
88 		return false;
89 	}
90 
91 	return true;
92 }
93 
94 static void ocrdma_release_stats_mem(struct ocrdma_dev *dev)
95 {
96 	struct stats_mem *mem = &dev->stats_mem;
97 
98 	if (mem->va)
99 		dma_free_coherent(&dev->nic_info.pdev->dev, mem->size,
100 				  mem->va, mem->pa);
101 	kfree(mem->debugfs_mem);
102 }
103 
104 static char *ocrdma_resource_stats(struct ocrdma_dev *dev)
105 {
106 	char *stats = dev->stats_mem.debugfs_mem, *pcur;
107 	struct ocrdma_rdma_stats_resp *rdma_stats =
108 			(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
109 	struct ocrdma_rsrc_stats *rsrc_stats = &rdma_stats->act_rsrc_stats;
110 
111 	memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
112 
113 	pcur = stats;
114 	pcur += ocrdma_add_stat(stats, pcur, "active_dpp_pds",
115 				(u64)rsrc_stats->dpp_pds);
116 	pcur += ocrdma_add_stat(stats, pcur, "active_non_dpp_pds",
117 				(u64)rsrc_stats->non_dpp_pds);
118 	pcur += ocrdma_add_stat(stats, pcur, "active_rc_dpp_qps",
119 				(u64)rsrc_stats->rc_dpp_qps);
120 	pcur += ocrdma_add_stat(stats, pcur, "active_uc_dpp_qps",
121 				(u64)rsrc_stats->uc_dpp_qps);
122 	pcur += ocrdma_add_stat(stats, pcur, "active_ud_dpp_qps",
123 				(u64)rsrc_stats->ud_dpp_qps);
124 	pcur += ocrdma_add_stat(stats, pcur, "active_rc_non_dpp_qps",
125 				(u64)rsrc_stats->rc_non_dpp_qps);
126 	pcur += ocrdma_add_stat(stats, pcur, "active_uc_non_dpp_qps",
127 				(u64)rsrc_stats->uc_non_dpp_qps);
128 	pcur += ocrdma_add_stat(stats, pcur, "active_ud_non_dpp_qps",
129 				(u64)rsrc_stats->ud_non_dpp_qps);
130 	pcur += ocrdma_add_stat(stats, pcur, "active_srqs",
131 				(u64)rsrc_stats->srqs);
132 	pcur += ocrdma_add_stat(stats, pcur, "active_rbqs",
133 				(u64)rsrc_stats->rbqs);
134 	pcur += ocrdma_add_stat(stats, pcur, "active_64K_nsmr",
135 				(u64)rsrc_stats->r64K_nsmr);
136 	pcur += ocrdma_add_stat(stats, pcur, "active_64K_to_2M_nsmr",
137 				(u64)rsrc_stats->r64K_to_2M_nsmr);
138 	pcur += ocrdma_add_stat(stats, pcur, "active_2M_to_44M_nsmr",
139 				(u64)rsrc_stats->r2M_to_44M_nsmr);
140 	pcur += ocrdma_add_stat(stats, pcur, "active_44M_to_1G_nsmr",
141 				(u64)rsrc_stats->r44M_to_1G_nsmr);
142 	pcur += ocrdma_add_stat(stats, pcur, "active_1G_to_4G_nsmr",
143 				(u64)rsrc_stats->r1G_to_4G_nsmr);
144 	pcur += ocrdma_add_stat(stats, pcur, "active_nsmr_count_4G_to_32G",
145 				(u64)rsrc_stats->nsmr_count_4G_to_32G);
146 	pcur += ocrdma_add_stat(stats, pcur, "active_32G_to_64G_nsmr",
147 				(u64)rsrc_stats->r32G_to_64G_nsmr);
148 	pcur += ocrdma_add_stat(stats, pcur, "active_64G_to_128G_nsmr",
149 				(u64)rsrc_stats->r64G_to_128G_nsmr);
150 	pcur += ocrdma_add_stat(stats, pcur, "active_128G_to_higher_nsmr",
151 				(u64)rsrc_stats->r128G_to_higher_nsmr);
152 	pcur += ocrdma_add_stat(stats, pcur, "active_embedded_nsmr",
153 				(u64)rsrc_stats->embedded_nsmr);
154 	pcur += ocrdma_add_stat(stats, pcur, "active_frmr",
155 				(u64)rsrc_stats->frmr);
156 	pcur += ocrdma_add_stat(stats, pcur, "active_prefetch_qps",
157 				(u64)rsrc_stats->prefetch_qps);
158 	pcur += ocrdma_add_stat(stats, pcur, "active_ondemand_qps",
159 				(u64)rsrc_stats->ondemand_qps);
160 	pcur += ocrdma_add_stat(stats, pcur, "active_phy_mr",
161 				(u64)rsrc_stats->phy_mr);
162 	pcur += ocrdma_add_stat(stats, pcur, "active_mw",
163 				(u64)rsrc_stats->mw);
164 
165 	/* Print the threshold stats */
166 	rsrc_stats = &rdma_stats->th_rsrc_stats;
167 
168 	pcur += ocrdma_add_stat(stats, pcur, "threshold_dpp_pds",
169 				(u64)rsrc_stats->dpp_pds);
170 	pcur += ocrdma_add_stat(stats, pcur, "threshold_non_dpp_pds",
171 				(u64)rsrc_stats->non_dpp_pds);
172 	pcur += ocrdma_add_stat(stats, pcur, "threshold_rc_dpp_qps",
173 				(u64)rsrc_stats->rc_dpp_qps);
174 	pcur += ocrdma_add_stat(stats, pcur, "threshold_uc_dpp_qps",
175 				(u64)rsrc_stats->uc_dpp_qps);
176 	pcur += ocrdma_add_stat(stats, pcur, "threshold_ud_dpp_qps",
177 				(u64)rsrc_stats->ud_dpp_qps);
178 	pcur += ocrdma_add_stat(stats, pcur, "threshold_rc_non_dpp_qps",
179 				(u64)rsrc_stats->rc_non_dpp_qps);
180 	pcur += ocrdma_add_stat(stats, pcur, "threshold_uc_non_dpp_qps",
181 				(u64)rsrc_stats->uc_non_dpp_qps);
182 	pcur += ocrdma_add_stat(stats, pcur, "threshold_ud_non_dpp_qps",
183 				(u64)rsrc_stats->ud_non_dpp_qps);
184 	pcur += ocrdma_add_stat(stats, pcur, "threshold_srqs",
185 				(u64)rsrc_stats->srqs);
186 	pcur += ocrdma_add_stat(stats, pcur, "threshold_rbqs",
187 				(u64)rsrc_stats->rbqs);
188 	pcur += ocrdma_add_stat(stats, pcur, "threshold_64K_nsmr",
189 				(u64)rsrc_stats->r64K_nsmr);
190 	pcur += ocrdma_add_stat(stats, pcur, "threshold_64K_to_2M_nsmr",
191 				(u64)rsrc_stats->r64K_to_2M_nsmr);
192 	pcur += ocrdma_add_stat(stats, pcur, "threshold_2M_to_44M_nsmr",
193 				(u64)rsrc_stats->r2M_to_44M_nsmr);
194 	pcur += ocrdma_add_stat(stats, pcur, "threshold_44M_to_1G_nsmr",
195 				(u64)rsrc_stats->r44M_to_1G_nsmr);
196 	pcur += ocrdma_add_stat(stats, pcur, "threshold_1G_to_4G_nsmr",
197 				(u64)rsrc_stats->r1G_to_4G_nsmr);
198 	pcur += ocrdma_add_stat(stats, pcur, "threshold_nsmr_count_4G_to_32G",
199 				(u64)rsrc_stats->nsmr_count_4G_to_32G);
200 	pcur += ocrdma_add_stat(stats, pcur, "threshold_32G_to_64G_nsmr",
201 				(u64)rsrc_stats->r32G_to_64G_nsmr);
202 	pcur += ocrdma_add_stat(stats, pcur, "threshold_64G_to_128G_nsmr",
203 				(u64)rsrc_stats->r64G_to_128G_nsmr);
204 	pcur += ocrdma_add_stat(stats, pcur, "threshold_128G_to_higher_nsmr",
205 				(u64)rsrc_stats->r128G_to_higher_nsmr);
206 	pcur += ocrdma_add_stat(stats, pcur, "threshold_embedded_nsmr",
207 				(u64)rsrc_stats->embedded_nsmr);
208 	pcur += ocrdma_add_stat(stats, pcur, "threshold_frmr",
209 				(u64)rsrc_stats->frmr);
210 	pcur += ocrdma_add_stat(stats, pcur, "threshold_prefetch_qps",
211 				(u64)rsrc_stats->prefetch_qps);
212 	pcur += ocrdma_add_stat(stats, pcur, "threshold_ondemand_qps",
213 				(u64)rsrc_stats->ondemand_qps);
214 	pcur += ocrdma_add_stat(stats, pcur, "threshold_phy_mr",
215 				(u64)rsrc_stats->phy_mr);
216 	pcur += ocrdma_add_stat(stats, pcur, "threshold_mw",
217 				(u64)rsrc_stats->mw);
218 	return stats;
219 }
220 
221 static char *ocrdma_rx_stats(struct ocrdma_dev *dev)
222 {
223 	char *stats = dev->stats_mem.debugfs_mem, *pcur;
224 	struct ocrdma_rdma_stats_resp *rdma_stats =
225 		(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
226 	struct ocrdma_rx_stats *rx_stats = &rdma_stats->rx_stats;
227 
228 	memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
229 
230 	pcur = stats;
231 	pcur += ocrdma_add_stat
232 		(stats, pcur, "roce_frame_bytes",
233 		 convert_to_64bit(rx_stats->roce_frame_bytes_lo,
234 		 rx_stats->roce_frame_bytes_hi));
235 	pcur += ocrdma_add_stat(stats, pcur, "roce_frame_icrc_drops",
236 				(u64)rx_stats->roce_frame_icrc_drops);
237 	pcur += ocrdma_add_stat(stats, pcur, "roce_frame_payload_len_drops",
238 				(u64)rx_stats->roce_frame_payload_len_drops);
239 	pcur += ocrdma_add_stat(stats, pcur, "ud_drops",
240 				(u64)rx_stats->ud_drops);
241 	pcur += ocrdma_add_stat(stats, pcur, "qp1_drops",
242 				(u64)rx_stats->qp1_drops);
243 	pcur += ocrdma_add_stat(stats, pcur, "psn_error_request_packets",
244 				(u64)rx_stats->psn_error_request_packets);
245 	pcur += ocrdma_add_stat(stats, pcur, "psn_error_resp_packets",
246 				(u64)rx_stats->psn_error_resp_packets);
247 	pcur += ocrdma_add_stat(stats, pcur, "rnr_nak_timeouts",
248 				(u64)rx_stats->rnr_nak_timeouts);
249 	pcur += ocrdma_add_stat(stats, pcur, "rnr_nak_receives",
250 				(u64)rx_stats->rnr_nak_receives);
251 	pcur += ocrdma_add_stat(stats, pcur, "roce_frame_rxmt_drops",
252 				(u64)rx_stats->roce_frame_rxmt_drops);
253 	pcur += ocrdma_add_stat(stats, pcur, "nak_count_psn_sequence_errors",
254 				(u64)rx_stats->nak_count_psn_sequence_errors);
255 	pcur += ocrdma_add_stat(stats, pcur, "rc_drop_count_lookup_errors",
256 				(u64)rx_stats->rc_drop_count_lookup_errors);
257 	pcur += ocrdma_add_stat(stats, pcur, "rq_rnr_naks",
258 				(u64)rx_stats->rq_rnr_naks);
259 	pcur += ocrdma_add_stat(stats, pcur, "srq_rnr_naks",
260 				(u64)rx_stats->srq_rnr_naks);
261 	pcur += ocrdma_add_stat(stats, pcur, "roce_frames",
262 				convert_to_64bit(rx_stats->roce_frames_lo,
263 						 rx_stats->roce_frames_hi));
264 
265 	return stats;
266 }
267 
268 static u64 ocrdma_sysfs_rcv_pkts(struct ocrdma_dev *dev)
269 {
270 	struct ocrdma_rdma_stats_resp *rdma_stats =
271 		(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
272 	struct ocrdma_rx_stats *rx_stats = &rdma_stats->rx_stats;
273 
274 	return convert_to_64bit(rx_stats->roce_frames_lo,
275 		rx_stats->roce_frames_hi) + (u64)rx_stats->roce_frame_icrc_drops
276 		+ (u64)rx_stats->roce_frame_payload_len_drops;
277 }
278 
279 static u64 ocrdma_sysfs_rcv_data(struct ocrdma_dev *dev)
280 {
281 	struct ocrdma_rdma_stats_resp *rdma_stats =
282 		(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
283 	struct ocrdma_rx_stats *rx_stats = &rdma_stats->rx_stats;
284 
285 	return (convert_to_64bit(rx_stats->roce_frame_bytes_lo,
286 		rx_stats->roce_frame_bytes_hi))/4;
287 }
288 
289 static char *ocrdma_tx_stats(struct ocrdma_dev *dev)
290 {
291 	char *stats = dev->stats_mem.debugfs_mem, *pcur;
292 	struct ocrdma_rdma_stats_resp *rdma_stats =
293 		(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
294 	struct ocrdma_tx_stats *tx_stats = &rdma_stats->tx_stats;
295 
296 	memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
297 
298 	pcur = stats;
299 	pcur += ocrdma_add_stat(stats, pcur, "send_pkts",
300 				convert_to_64bit(tx_stats->send_pkts_lo,
301 						 tx_stats->send_pkts_hi));
302 	pcur += ocrdma_add_stat(stats, pcur, "write_pkts",
303 				convert_to_64bit(tx_stats->write_pkts_lo,
304 						 tx_stats->write_pkts_hi));
305 	pcur += ocrdma_add_stat(stats, pcur, "read_pkts",
306 				convert_to_64bit(tx_stats->read_pkts_lo,
307 						 tx_stats->read_pkts_hi));
308 	pcur += ocrdma_add_stat(stats, pcur, "read_rsp_pkts",
309 				convert_to_64bit(tx_stats->read_rsp_pkts_lo,
310 						 tx_stats->read_rsp_pkts_hi));
311 	pcur += ocrdma_add_stat(stats, pcur, "ack_pkts",
312 				convert_to_64bit(tx_stats->ack_pkts_lo,
313 						 tx_stats->ack_pkts_hi));
314 	pcur += ocrdma_add_stat(stats, pcur, "send_bytes",
315 				convert_to_64bit(tx_stats->send_bytes_lo,
316 						 tx_stats->send_bytes_hi));
317 	pcur += ocrdma_add_stat(stats, pcur, "write_bytes",
318 				convert_to_64bit(tx_stats->write_bytes_lo,
319 						 tx_stats->write_bytes_hi));
320 	pcur += ocrdma_add_stat(stats, pcur, "read_req_bytes",
321 				convert_to_64bit(tx_stats->read_req_bytes_lo,
322 						 tx_stats->read_req_bytes_hi));
323 	pcur += ocrdma_add_stat(stats, pcur, "read_rsp_bytes",
324 				convert_to_64bit(tx_stats->read_rsp_bytes_lo,
325 						 tx_stats->read_rsp_bytes_hi));
326 	pcur += ocrdma_add_stat(stats, pcur, "ack_timeouts",
327 				(u64)tx_stats->ack_timeouts);
328 
329 	return stats;
330 }
331 
332 static u64 ocrdma_sysfs_xmit_pkts(struct ocrdma_dev *dev)
333 {
334 	struct ocrdma_rdma_stats_resp *rdma_stats =
335 		(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
336 	struct ocrdma_tx_stats *tx_stats = &rdma_stats->tx_stats;
337 
338 	return (convert_to_64bit(tx_stats->send_pkts_lo,
339 				 tx_stats->send_pkts_hi) +
340 	convert_to_64bit(tx_stats->write_pkts_lo, tx_stats->write_pkts_hi) +
341 	convert_to_64bit(tx_stats->read_pkts_lo, tx_stats->read_pkts_hi) +
342 	convert_to_64bit(tx_stats->read_rsp_pkts_lo,
343 			 tx_stats->read_rsp_pkts_hi) +
344 	convert_to_64bit(tx_stats->ack_pkts_lo, tx_stats->ack_pkts_hi));
345 }
346 
347 static u64 ocrdma_sysfs_xmit_data(struct ocrdma_dev *dev)
348 {
349 	struct ocrdma_rdma_stats_resp *rdma_stats =
350 		(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
351 	struct ocrdma_tx_stats *tx_stats = &rdma_stats->tx_stats;
352 
353 	return (convert_to_64bit(tx_stats->send_bytes_lo,
354 				 tx_stats->send_bytes_hi) +
355 		convert_to_64bit(tx_stats->write_bytes_lo,
356 				 tx_stats->write_bytes_hi) +
357 		convert_to_64bit(tx_stats->read_req_bytes_lo,
358 				 tx_stats->read_req_bytes_hi) +
359 		convert_to_64bit(tx_stats->read_rsp_bytes_lo,
360 				 tx_stats->read_rsp_bytes_hi))/4;
361 }
362 
363 static char *ocrdma_wqe_stats(struct ocrdma_dev *dev)
364 {
365 	char *stats = dev->stats_mem.debugfs_mem, *pcur;
366 	struct ocrdma_rdma_stats_resp *rdma_stats =
367 		(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
368 	struct ocrdma_wqe_stats	*wqe_stats = &rdma_stats->wqe_stats;
369 
370 	memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
371 
372 	pcur = stats;
373 	pcur += ocrdma_add_stat(stats, pcur, "large_send_rc_wqes",
374 		convert_to_64bit(wqe_stats->large_send_rc_wqes_lo,
375 				 wqe_stats->large_send_rc_wqes_hi));
376 	pcur += ocrdma_add_stat(stats, pcur, "large_write_rc_wqes",
377 		convert_to_64bit(wqe_stats->large_write_rc_wqes_lo,
378 				 wqe_stats->large_write_rc_wqes_hi));
379 	pcur += ocrdma_add_stat(stats, pcur, "read_wqes",
380 				convert_to_64bit(wqe_stats->read_wqes_lo,
381 						 wqe_stats->read_wqes_hi));
382 	pcur += ocrdma_add_stat(stats, pcur, "frmr_wqes",
383 				convert_to_64bit(wqe_stats->frmr_wqes_lo,
384 						 wqe_stats->frmr_wqes_hi));
385 	pcur += ocrdma_add_stat(stats, pcur, "mw_bind_wqes",
386 				convert_to_64bit(wqe_stats->mw_bind_wqes_lo,
387 						 wqe_stats->mw_bind_wqes_hi));
388 	pcur += ocrdma_add_stat(stats, pcur, "invalidate_wqes",
389 		convert_to_64bit(wqe_stats->invalidate_wqes_lo,
390 				 wqe_stats->invalidate_wqes_hi));
391 	pcur += ocrdma_add_stat(stats, pcur, "dpp_wqe_drops",
392 				(u64)wqe_stats->dpp_wqe_drops);
393 	return stats;
394 }
395 
396 static char *ocrdma_db_errstats(struct ocrdma_dev *dev)
397 {
398 	char *stats = dev->stats_mem.debugfs_mem, *pcur;
399 	struct ocrdma_rdma_stats_resp *rdma_stats =
400 		(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
401 	struct ocrdma_db_err_stats *db_err_stats = &rdma_stats->db_err_stats;
402 
403 	memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
404 
405 	pcur = stats;
406 	pcur += ocrdma_add_stat(stats, pcur, "sq_doorbell_errors",
407 				(u64)db_err_stats->sq_doorbell_errors);
408 	pcur += ocrdma_add_stat(stats, pcur, "cq_doorbell_errors",
409 				(u64)db_err_stats->cq_doorbell_errors);
410 	pcur += ocrdma_add_stat(stats, pcur, "rq_srq_doorbell_errors",
411 				(u64)db_err_stats->rq_srq_doorbell_errors);
412 	pcur += ocrdma_add_stat(stats, pcur, "cq_overflow_errors",
413 				(u64)db_err_stats->cq_overflow_errors);
414 	return stats;
415 }
416 
417 static char *ocrdma_rxqp_errstats(struct ocrdma_dev *dev)
418 {
419 	char *stats = dev->stats_mem.debugfs_mem, *pcur;
420 	struct ocrdma_rdma_stats_resp *rdma_stats =
421 		(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
422 	struct ocrdma_rx_qp_err_stats *rx_qp_err_stats =
423 		 &rdma_stats->rx_qp_err_stats;
424 
425 	memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
426 
427 	pcur = stats;
428 	pcur += ocrdma_add_stat(stats, pcur, "nak_invalid_requst_errors",
429 			(u64)rx_qp_err_stats->nak_invalid_requst_errors);
430 	pcur += ocrdma_add_stat(stats, pcur, "nak_remote_operation_errors",
431 			(u64)rx_qp_err_stats->nak_remote_operation_errors);
432 	pcur += ocrdma_add_stat(stats, pcur, "nak_count_remote_access_errors",
433 			(u64)rx_qp_err_stats->nak_count_remote_access_errors);
434 	pcur += ocrdma_add_stat(stats, pcur, "local_length_errors",
435 			(u64)rx_qp_err_stats->local_length_errors);
436 	pcur += ocrdma_add_stat(stats, pcur, "local_protection_errors",
437 			(u64)rx_qp_err_stats->local_protection_errors);
438 	pcur += ocrdma_add_stat(stats, pcur, "local_qp_operation_errors",
439 			(u64)rx_qp_err_stats->local_qp_operation_errors);
440 	return stats;
441 }
442 
443 static char *ocrdma_txqp_errstats(struct ocrdma_dev *dev)
444 {
445 	char *stats = dev->stats_mem.debugfs_mem, *pcur;
446 	struct ocrdma_rdma_stats_resp *rdma_stats =
447 		(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
448 	struct ocrdma_tx_qp_err_stats *tx_qp_err_stats =
449 		&rdma_stats->tx_qp_err_stats;
450 
451 	memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
452 
453 	pcur = stats;
454 	pcur += ocrdma_add_stat(stats, pcur, "local_length_errors",
455 			(u64)tx_qp_err_stats->local_length_errors);
456 	pcur += ocrdma_add_stat(stats, pcur, "local_protection_errors",
457 			(u64)tx_qp_err_stats->local_protection_errors);
458 	pcur += ocrdma_add_stat(stats, pcur, "local_qp_operation_errors",
459 			(u64)tx_qp_err_stats->local_qp_operation_errors);
460 	pcur += ocrdma_add_stat(stats, pcur, "retry_count_exceeded_errors",
461 			(u64)tx_qp_err_stats->retry_count_exceeded_errors);
462 	pcur += ocrdma_add_stat(stats, pcur, "rnr_retry_count_exceeded_errors",
463 			(u64)tx_qp_err_stats->rnr_retry_count_exceeded_errors);
464 	return stats;
465 }
466 
467 static char *ocrdma_tx_dbg_stats(struct ocrdma_dev *dev)
468 {
469 	int i;
470 	char *pstats = dev->stats_mem.debugfs_mem;
471 	struct ocrdma_rdma_stats_resp *rdma_stats =
472 		(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
473 	struct ocrdma_tx_dbg_stats *tx_dbg_stats =
474 		&rdma_stats->tx_dbg_stats;
475 
476 	memset(pstats, 0, (OCRDMA_MAX_DBGFS_MEM));
477 
478 	for (i = 0; i < 100; i++)
479 		pstats += snprintf(pstats, 80, "DW[%d] = 0x%x\n", i,
480 				 tx_dbg_stats->data[i]);
481 
482 	return dev->stats_mem.debugfs_mem;
483 }
484 
485 static char *ocrdma_rx_dbg_stats(struct ocrdma_dev *dev)
486 {
487 	int i;
488 	char *pstats = dev->stats_mem.debugfs_mem;
489 	struct ocrdma_rdma_stats_resp *rdma_stats =
490 		(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
491 	struct ocrdma_rx_dbg_stats *rx_dbg_stats =
492 		&rdma_stats->rx_dbg_stats;
493 
494 	memset(pstats, 0, (OCRDMA_MAX_DBGFS_MEM));
495 
496 	for (i = 0; i < 200; i++)
497 		pstats += snprintf(pstats, 80, "DW[%d] = 0x%x\n", i,
498 				 rx_dbg_stats->data[i]);
499 
500 	return dev->stats_mem.debugfs_mem;
501 }
502 
503 static char *ocrdma_driver_dbg_stats(struct ocrdma_dev *dev)
504 {
505 	char *stats = dev->stats_mem.debugfs_mem, *pcur;
506 
507 
508 	memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
509 
510 	pcur = stats;
511 	pcur += ocrdma_add_stat(stats, pcur, "async_cq_err",
512 				(u64)(dev->async_err_stats
513 				[OCRDMA_CQ_ERROR].counter));
514 	pcur += ocrdma_add_stat(stats, pcur, "async_cq_overrun_err",
515 				(u64)dev->async_err_stats
516 				[OCRDMA_CQ_OVERRUN_ERROR].counter);
517 	pcur += ocrdma_add_stat(stats, pcur, "async_cq_qpcat_err",
518 				(u64)dev->async_err_stats
519 				[OCRDMA_CQ_QPCAT_ERROR].counter);
520 	pcur += ocrdma_add_stat(stats, pcur, "async_qp_access_err",
521 				(u64)dev->async_err_stats
522 				[OCRDMA_QP_ACCESS_ERROR].counter);
523 	pcur += ocrdma_add_stat(stats, pcur, "async_qp_commm_est_evt",
524 				(u64)dev->async_err_stats
525 				[OCRDMA_QP_COMM_EST_EVENT].counter);
526 	pcur += ocrdma_add_stat(stats, pcur, "async_sq_drained_evt",
527 				(u64)dev->async_err_stats
528 				[OCRDMA_SQ_DRAINED_EVENT].counter);
529 	pcur += ocrdma_add_stat(stats, pcur, "async_dev_fatal_evt",
530 				(u64)dev->async_err_stats
531 				[OCRDMA_DEVICE_FATAL_EVENT].counter);
532 	pcur += ocrdma_add_stat(stats, pcur, "async_srqcat_err",
533 				(u64)dev->async_err_stats
534 				[OCRDMA_SRQCAT_ERROR].counter);
535 	pcur += ocrdma_add_stat(stats, pcur, "async_srq_limit_evt",
536 				(u64)dev->async_err_stats
537 				[OCRDMA_SRQ_LIMIT_EVENT].counter);
538 	pcur += ocrdma_add_stat(stats, pcur, "async_qp_last_wqe_evt",
539 				(u64)dev->async_err_stats
540 				[OCRDMA_QP_LAST_WQE_EVENT].counter);
541 
542 	pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_len_err",
543 				(u64)dev->cqe_err_stats
544 				[OCRDMA_CQE_LOC_LEN_ERR].counter);
545 	pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_qp_op_err",
546 				(u64)dev->cqe_err_stats
547 				[OCRDMA_CQE_LOC_QP_OP_ERR].counter);
548 	pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_eec_op_err",
549 				(u64)dev->cqe_err_stats
550 				[OCRDMA_CQE_LOC_EEC_OP_ERR].counter);
551 	pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_prot_err",
552 				(u64)dev->cqe_err_stats
553 				[OCRDMA_CQE_LOC_PROT_ERR].counter);
554 	pcur += ocrdma_add_stat(stats, pcur, "cqe_wr_flush_err",
555 				(u64)dev->cqe_err_stats
556 				[OCRDMA_CQE_WR_FLUSH_ERR].counter);
557 	pcur += ocrdma_add_stat(stats, pcur, "cqe_mw_bind_err",
558 				(u64)dev->cqe_err_stats
559 				[OCRDMA_CQE_MW_BIND_ERR].counter);
560 	pcur += ocrdma_add_stat(stats, pcur, "cqe_bad_resp_err",
561 				(u64)dev->cqe_err_stats
562 				[OCRDMA_CQE_BAD_RESP_ERR].counter);
563 	pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_access_err",
564 				(u64)dev->cqe_err_stats
565 				[OCRDMA_CQE_LOC_ACCESS_ERR].counter);
566 	pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_inv_req_err",
567 				(u64)dev->cqe_err_stats
568 				[OCRDMA_CQE_REM_INV_REQ_ERR].counter);
569 	pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_access_err",
570 				(u64)dev->cqe_err_stats
571 				[OCRDMA_CQE_REM_ACCESS_ERR].counter);
572 	pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_op_err",
573 				(u64)dev->cqe_err_stats
574 				[OCRDMA_CQE_REM_OP_ERR].counter);
575 	pcur += ocrdma_add_stat(stats, pcur, "cqe_retry_exc_err",
576 				(u64)dev->cqe_err_stats
577 				[OCRDMA_CQE_RETRY_EXC_ERR].counter);
578 	pcur += ocrdma_add_stat(stats, pcur, "cqe_rnr_retry_exc_err",
579 				(u64)dev->cqe_err_stats
580 				[OCRDMA_CQE_RNR_RETRY_EXC_ERR].counter);
581 	pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_rdd_viol_err",
582 				(u64)dev->cqe_err_stats
583 				[OCRDMA_CQE_LOC_RDD_VIOL_ERR].counter);
584 	pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_inv_rd_req_err",
585 				(u64)dev->cqe_err_stats
586 				[OCRDMA_CQE_REM_INV_RD_REQ_ERR].counter);
587 	pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_abort_err",
588 				(u64)dev->cqe_err_stats
589 				[OCRDMA_CQE_REM_ABORT_ERR].counter);
590 	pcur += ocrdma_add_stat(stats, pcur, "cqe_inv_eecn_err",
591 				(u64)dev->cqe_err_stats
592 				[OCRDMA_CQE_INV_EECN_ERR].counter);
593 	pcur += ocrdma_add_stat(stats, pcur, "cqe_inv_eec_state_err",
594 				(u64)dev->cqe_err_stats
595 				[OCRDMA_CQE_INV_EEC_STATE_ERR].counter);
596 	pcur += ocrdma_add_stat(stats, pcur, "cqe_fatal_err",
597 				(u64)dev->cqe_err_stats
598 				[OCRDMA_CQE_FATAL_ERR].counter);
599 	pcur += ocrdma_add_stat(stats, pcur, "cqe_resp_timeout_err",
600 				(u64)dev->cqe_err_stats
601 				[OCRDMA_CQE_RESP_TIMEOUT_ERR].counter);
602 	pcur += ocrdma_add_stat(stats, pcur, "cqe_general_err",
603 				(u64)dev->cqe_err_stats
604 				[OCRDMA_CQE_GENERAL_ERR].counter);
605 	return stats;
606 }
607 
608 static void ocrdma_update_stats(struct ocrdma_dev *dev)
609 {
610 	ulong now = jiffies, secs;
611 	int status = 0;
612 	struct ocrdma_rdma_stats_resp *rdma_stats =
613 		      (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
614 	struct ocrdma_rsrc_stats *rsrc_stats = &rdma_stats->act_rsrc_stats;
615 
616 	secs = jiffies_to_msecs(now - dev->last_stats_time) / 1000U;
617 	if (secs) {
618 		/* update */
619 		status = ocrdma_mbx_rdma_stats(dev, false);
620 		if (status)
621 			pr_err("%s: stats mbox failed with status = %d\n",
622 			       __func__, status);
623 		/* Update PD counters from PD resource manager */
624 		if (dev->pd_mgr->pd_prealloc_valid) {
625 			rsrc_stats->dpp_pds = dev->pd_mgr->pd_dpp_count;
626 			rsrc_stats->non_dpp_pds = dev->pd_mgr->pd_norm_count;
627 			/* Threshold stata*/
628 			rsrc_stats = &rdma_stats->th_rsrc_stats;
629 			rsrc_stats->dpp_pds = dev->pd_mgr->pd_dpp_thrsh;
630 			rsrc_stats->non_dpp_pds = dev->pd_mgr->pd_norm_thrsh;
631 		}
632 		dev->last_stats_time = jiffies;
633 	}
634 }
635 
636 static ssize_t ocrdma_dbgfs_ops_write(struct file *filp,
637 					const char __user *buffer,
638 					size_t count, loff_t *ppos)
639 {
640 	char tmp_str[32];
641 	long reset;
642 	int status = 0;
643 	struct ocrdma_stats *pstats = filp->private_data;
644 	struct ocrdma_dev *dev = pstats->dev;
645 
646 	if (count > 32)
647 		goto err;
648 
649 	if (copy_from_user(tmp_str, buffer, count))
650 		goto err;
651 
652 	tmp_str[count-1] = '\0';
653 	if (kstrtol(tmp_str, 10, &reset))
654 		goto err;
655 
656 	switch (pstats->type) {
657 	case OCRDMA_RESET_STATS:
658 		if (reset) {
659 			status = ocrdma_mbx_rdma_stats(dev, true);
660 			if (status) {
661 				pr_err("Failed to reset stats = %d", status);
662 				goto err;
663 			}
664 		}
665 		break;
666 	default:
667 		goto err;
668 	}
669 
670 	return count;
671 err:
672 	return -EFAULT;
673 }
674 
675 int ocrdma_pma_counters(struct ocrdma_dev *dev,
676 			struct ib_mad *out_mad)
677 {
678 	struct ib_pma_portcounters *pma_cnt;
679 
680 	memset(out_mad->data, 0, sizeof out_mad->data);
681 	pma_cnt = (void *)(out_mad->data + 40);
682 	ocrdma_update_stats(dev);
683 
684 	pma_cnt->port_xmit_data    = cpu_to_be32(ocrdma_sysfs_xmit_data(dev));
685 	pma_cnt->port_rcv_data     = cpu_to_be32(ocrdma_sysfs_rcv_data(dev));
686 	pma_cnt->port_xmit_packets = cpu_to_be32(ocrdma_sysfs_xmit_pkts(dev));
687 	pma_cnt->port_rcv_packets  = cpu_to_be32(ocrdma_sysfs_rcv_pkts(dev));
688 	return 0;
689 }
690 
691 static ssize_t ocrdma_dbgfs_ops_read(struct file *filp, char __user *buffer,
692 					size_t usr_buf_len, loff_t *ppos)
693 {
694 	struct ocrdma_stats *pstats = filp->private_data;
695 	struct ocrdma_dev *dev = pstats->dev;
696 	ssize_t status = 0;
697 	char *data = NULL;
698 
699 	/* No partial reads */
700 	if (*ppos != 0)
701 		return 0;
702 
703 	mutex_lock(&dev->stats_lock);
704 
705 	ocrdma_update_stats(dev);
706 
707 	switch (pstats->type) {
708 	case OCRDMA_RSRC_STATS:
709 		data = ocrdma_resource_stats(dev);
710 		break;
711 	case OCRDMA_RXSTATS:
712 		data = ocrdma_rx_stats(dev);
713 		break;
714 	case OCRDMA_WQESTATS:
715 		data = ocrdma_wqe_stats(dev);
716 		break;
717 	case OCRDMA_TXSTATS:
718 		data = ocrdma_tx_stats(dev);
719 		break;
720 	case OCRDMA_DB_ERRSTATS:
721 		data = ocrdma_db_errstats(dev);
722 		break;
723 	case OCRDMA_RXQP_ERRSTATS:
724 		data = ocrdma_rxqp_errstats(dev);
725 		break;
726 	case OCRDMA_TXQP_ERRSTATS:
727 		data = ocrdma_txqp_errstats(dev);
728 		break;
729 	case OCRDMA_TX_DBG_STATS:
730 		data = ocrdma_tx_dbg_stats(dev);
731 		break;
732 	case OCRDMA_RX_DBG_STATS:
733 		data = ocrdma_rx_dbg_stats(dev);
734 		break;
735 	case OCRDMA_DRV_STATS:
736 		data = ocrdma_driver_dbg_stats(dev);
737 		break;
738 
739 	default:
740 		status = -EFAULT;
741 		goto exit;
742 	}
743 
744 	if (usr_buf_len < strlen(data)) {
745 		status = -ENOSPC;
746 		goto exit;
747 	}
748 
749 	status = simple_read_from_buffer(buffer, usr_buf_len, ppos, data,
750 					 strlen(data));
751 exit:
752 	mutex_unlock(&dev->stats_lock);
753 	return status;
754 }
755 
756 static const struct file_operations ocrdma_dbg_ops = {
757 	.owner = THIS_MODULE,
758 	.open = simple_open,
759 	.read = ocrdma_dbgfs_ops_read,
760 	.write = ocrdma_dbgfs_ops_write,
761 };
762 
763 void ocrdma_add_port_stats(struct ocrdma_dev *dev)
764 {
765 	if (!ocrdma_dbgfs_dir)
766 		return;
767 
768 	/* Create post stats base dir */
769 	dev->dir = debugfs_create_dir(dev->ibdev.name, ocrdma_dbgfs_dir);
770 	if (!dev->dir)
771 		goto err;
772 
773 	dev->rsrc_stats.type = OCRDMA_RSRC_STATS;
774 	dev->rsrc_stats.dev = dev;
775 	if (!debugfs_create_file("resource_stats", S_IRUSR, dev->dir,
776 				 &dev->rsrc_stats, &ocrdma_dbg_ops))
777 		goto err;
778 
779 	dev->rx_stats.type = OCRDMA_RXSTATS;
780 	dev->rx_stats.dev = dev;
781 	if (!debugfs_create_file("rx_stats", S_IRUSR, dev->dir,
782 				 &dev->rx_stats, &ocrdma_dbg_ops))
783 		goto err;
784 
785 	dev->wqe_stats.type = OCRDMA_WQESTATS;
786 	dev->wqe_stats.dev = dev;
787 	if (!debugfs_create_file("wqe_stats", S_IRUSR, dev->dir,
788 				 &dev->wqe_stats, &ocrdma_dbg_ops))
789 		goto err;
790 
791 	dev->tx_stats.type = OCRDMA_TXSTATS;
792 	dev->tx_stats.dev = dev;
793 	if (!debugfs_create_file("tx_stats", S_IRUSR, dev->dir,
794 				 &dev->tx_stats, &ocrdma_dbg_ops))
795 		goto err;
796 
797 	dev->db_err_stats.type = OCRDMA_DB_ERRSTATS;
798 	dev->db_err_stats.dev = dev;
799 	if (!debugfs_create_file("db_err_stats", S_IRUSR, dev->dir,
800 				 &dev->db_err_stats, &ocrdma_dbg_ops))
801 		goto err;
802 
803 
804 	dev->tx_qp_err_stats.type = OCRDMA_TXQP_ERRSTATS;
805 	dev->tx_qp_err_stats.dev = dev;
806 	if (!debugfs_create_file("tx_qp_err_stats", S_IRUSR, dev->dir,
807 				 &dev->tx_qp_err_stats, &ocrdma_dbg_ops))
808 		goto err;
809 
810 	dev->rx_qp_err_stats.type = OCRDMA_RXQP_ERRSTATS;
811 	dev->rx_qp_err_stats.dev = dev;
812 	if (!debugfs_create_file("rx_qp_err_stats", S_IRUSR, dev->dir,
813 				 &dev->rx_qp_err_stats, &ocrdma_dbg_ops))
814 		goto err;
815 
816 
817 	dev->tx_dbg_stats.type = OCRDMA_TX_DBG_STATS;
818 	dev->tx_dbg_stats.dev = dev;
819 	if (!debugfs_create_file("tx_dbg_stats", S_IRUSR, dev->dir,
820 				 &dev->tx_dbg_stats, &ocrdma_dbg_ops))
821 		goto err;
822 
823 	dev->rx_dbg_stats.type = OCRDMA_RX_DBG_STATS;
824 	dev->rx_dbg_stats.dev = dev;
825 	if (!debugfs_create_file("rx_dbg_stats", S_IRUSR, dev->dir,
826 				 &dev->rx_dbg_stats, &ocrdma_dbg_ops))
827 		goto err;
828 
829 	dev->driver_stats.type = OCRDMA_DRV_STATS;
830 	dev->driver_stats.dev = dev;
831 	if (!debugfs_create_file("driver_dbg_stats", S_IRUSR, dev->dir,
832 					&dev->driver_stats, &ocrdma_dbg_ops))
833 		goto err;
834 
835 	dev->reset_stats.type = OCRDMA_RESET_STATS;
836 	dev->reset_stats.dev = dev;
837 	if (!debugfs_create_file("reset_stats", S_IRUSR, dev->dir,
838 				&dev->reset_stats, &ocrdma_dbg_ops))
839 		goto err;
840 
841 	/* Now create dma_mem for stats mbx command */
842 	if (!ocrdma_alloc_stats_mem(dev))
843 		goto err;
844 
845 	mutex_init(&dev->stats_lock);
846 
847 	return;
848 err:
849 	ocrdma_release_stats_mem(dev);
850 	debugfs_remove_recursive(dev->dir);
851 	dev->dir = NULL;
852 }
853 
854 void ocrdma_rem_port_stats(struct ocrdma_dev *dev)
855 {
856 	if (!dev->dir)
857 		return;
858 	mutex_destroy(&dev->stats_lock);
859 	ocrdma_release_stats_mem(dev);
860 	debugfs_remove(dev->dir);
861 }
862 
863 void ocrdma_init_debugfs(void)
864 {
865 	/* Create base dir in debugfs root dir */
866 	ocrdma_dbgfs_dir = debugfs_create_dir("ocrdma", NULL);
867 }
868 
869 void ocrdma_rem_debugfs(void)
870 {
871 	debugfs_remove_recursive(ocrdma_dbgfs_dir);
872 }
873