xref: /linux/drivers/crypto/hisilicon/debugfs.c (revision 621cde16e49b3ecf7d59a8106a20aaebfb4a59a9)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2022 HiSilicon Limited. */
3 #include <linux/hisi_acc_qm.h>
4 #include "qm_common.h"
5 
6 #define QM_DFX_BASE			0x0100000
7 #define QM_DFX_STATE1			0x0104000
8 #define QM_DFX_STATE2			0x01040C8
9 #define QM_DFX_COMMON			0x0000
10 #define QM_DFX_BASE_LEN			0x5A
11 #define QM_DFX_STATE1_LEN		0x2E
12 #define QM_DFX_STATE2_LEN		0x11
13 #define QM_DFX_COMMON_LEN		0xC3
14 #define QM_DFX_REGS_LEN			4UL
15 #define QM_DBG_TMP_BUF_LEN		22
16 #define QM_XQC_ADDR_MASK		GENMASK(31, 0)
17 #define CURRENT_FUN_MASK		GENMASK(5, 0)
18 #define CURRENT_Q_MASK			GENMASK(31, 16)
19 #define QM_SQE_ADDR_MASK		GENMASK(7, 0)
20 
21 #define QM_DFX_MB_CNT_VF		0x104010
22 #define QM_DFX_DB_CNT_VF		0x104020
23 #define QM_DFX_SQE_CNT_VF_SQN		0x104030
24 #define QM_DFX_CQE_CNT_VF_CQN		0x104040
25 #define QM_DFX_QN_SHIFT			16
26 #define QM_DFX_CNT_CLR_CE		0x100118
27 #define QM_DBG_WRITE_LEN		1024
28 #define QM_IN_IDLE_ST_REG		0x1040e4
29 #define QM_IN_IDLE_STATE		0x1
30 
31 static const char * const qm_debug_file_name[] = {
32 	[CURRENT_QM]   = "current_qm",
33 	[CURRENT_Q]    = "current_q",
34 	[CLEAR_ENABLE] = "clear_enable",
35 };
36 
37 static const char * const qm_s[] = {
38 	"work", "stop",
39 };
40 
41 struct qm_dfx_item {
42 	const char *name;
43 	u32 offset;
44 };
45 
46 struct qm_cmd_dump_item {
47 	const char *cmd;
48 	char *info_name;
49 	int (*dump_fn)(struct hisi_qm *qm, char *cmd, char *info_name);
50 };
51 
52 static struct qm_dfx_item qm_dfx_files[] = {
53 	{"err_irq", offsetof(struct qm_dfx, err_irq_cnt)},
54 	{"aeq_irq", offsetof(struct qm_dfx, aeq_irq_cnt)},
55 	{"abnormal_irq", offsetof(struct qm_dfx, abnormal_irq_cnt)},
56 	{"create_qp_err", offsetof(struct qm_dfx, create_qp_err_cnt)},
57 	{"mb_err", offsetof(struct qm_dfx, mb_err_cnt)},
58 };
59 
60 #define CNT_CYC_REGS_NUM		10
61 static const struct debugfs_reg32 qm_dfx_regs[] = {
62 	/* XXX_CNT are reading clear register */
63 	{"QM_ECC_1BIT_CNT               ",  0x104000},
64 	{"QM_ECC_MBIT_CNT               ",  0x104008},
65 	{"QM_DFX_MB_CNT                 ",  0x104018},
66 	{"QM_DFX_DB_CNT                 ",  0x104028},
67 	{"QM_DFX_SQE_CNT                ",  0x104038},
68 	{"QM_DFX_CQE_CNT                ",  0x104048},
69 	{"QM_DFX_SEND_SQE_TO_ACC_CNT    ",  0x104050},
70 	{"QM_DFX_WB_SQE_FROM_ACC_CNT    ",  0x104058},
71 	{"QM_DFX_ACC_FINISH_CNT         ",  0x104060},
72 	{"QM_DFX_CQE_ERR_CNT            ",  0x1040b4},
73 	{"QM_DFX_FUNS_ACTIVE_ST         ",  0x200},
74 	{"QM_ECC_1BIT_INF               ",  0x104004},
75 	{"QM_ECC_MBIT_INF               ",  0x10400c},
76 	{"QM_DFX_ACC_RDY_VLD0           ",  0x1040a0},
77 	{"QM_DFX_ACC_RDY_VLD1           ",  0x1040a4},
78 	{"QM_DFX_AXI_RDY_VLD            ",  0x1040a8},
79 	{"QM_DFX_FF_ST0                 ",  0x1040c8},
80 	{"QM_DFX_FF_ST1                 ",  0x1040cc},
81 	{"QM_DFX_FF_ST2                 ",  0x1040d0},
82 	{"QM_DFX_FF_ST3                 ",  0x1040d4},
83 	{"QM_DFX_FF_ST4                 ",  0x1040d8},
84 	{"QM_DFX_FF_ST5                 ",  0x1040dc},
85 	{"QM_DFX_FF_ST6                 ",  0x1040e0},
86 	{"QM_IN_IDLE_ST                 ",  0x1040e4},
87 	{"QM_CACHE_CTL                  ",  0x100050},
88 	{"QM_TIMEOUT_CFG                ",  0x100070},
89 	{"QM_DB_TIMEOUT_CFG             ",  0x100074},
90 	{"QM_FLR_PENDING_TIME_CFG       ",  0x100078},
91 	{"QM_ARUSR_MCFG1                ",  0x100088},
92 	{"QM_AWUSR_MCFG1                ",  0x100098},
93 	{"QM_AXI_M_CFG_ENABLE           ",  0x1000B0},
94 	{"QM_RAS_CE_THRESHOLD           ",  0x1000F8},
95 	{"QM_AXI_TIMEOUT_CTRL           ",  0x100120},
96 	{"QM_AXI_TIMEOUT_STATUS         ",  0x100124},
97 	{"QM_CQE_AGGR_TIMEOUT_CTRL      ",  0x100144},
98 	{"ACC_RAS_MSI_INT_SEL           ",  0x1040fc},
99 	{"QM_CQE_OUT                    ",  0x104100},
100 	{"QM_EQE_OUT                    ",  0x104104},
101 	{"QM_AEQE_OUT                   ",  0x104108},
102 	{"QM_DB_INFO0                   ",  0x104180},
103 	{"QM_DB_INFO1                   ",  0x104184},
104 	{"QM_AM_CTRL_GLOBAL             ",  0x300000},
105 	{"QM_AM_CURR_PORT_STS           ",  0x300100},
106 	{"QM_AM_CURR_TRANS_RETURN       ",  0x300150},
107 	{"QM_AM_CURR_RD_MAX_TXID        ",  0x300154},
108 	{"QM_AM_CURR_WR_MAX_TXID        ",  0x300158},
109 	{"QM_AM_ALARM_RRESP             ",  0x300180},
110 	{"QM_AM_ALARM_BRESP             ",  0x300184},
111 };
112 
113 static const struct debugfs_reg32 qm_vf_dfx_regs[] = {
114 	{"QM_DFX_FUNS_ACTIVE_ST         ",  0x200},
115 };
116 
117 /* define the QM's dfx regs region and region length */
118 static struct dfx_diff_registers qm_diff_regs[] = {
119 	{
120 		.reg_offset = QM_DFX_BASE,
121 		.reg_len = QM_DFX_BASE_LEN,
122 	}, {
123 		.reg_offset = QM_DFX_STATE1,
124 		.reg_len = QM_DFX_STATE1_LEN,
125 	}, {
126 		.reg_offset = QM_DFX_STATE2,
127 		.reg_len = QM_DFX_STATE2_LEN,
128 	}, {
129 		.reg_offset = QM_DFX_COMMON,
130 		.reg_len = QM_DFX_COMMON_LEN,
131 	},
132 };
133 
file_to_qm(struct debugfs_file * file)134 static struct hisi_qm *file_to_qm(struct debugfs_file *file)
135 {
136 	struct qm_debug *debug = file->debug;
137 
138 	return container_of(debug, struct hisi_qm, debug);
139 }
140 
qm_cmd_read(struct file * filp,char __user * buffer,size_t count,loff_t * pos)141 static ssize_t qm_cmd_read(struct file *filp, char __user *buffer,
142 			   size_t count, loff_t *pos)
143 {
144 	char buf[QM_DBG_READ_LEN];
145 	int len;
146 
147 	len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n",
148 			"Please echo help to cmd to get help information");
149 
150 	return simple_read_from_buffer(buffer, count, pos, buf, len);
151 }
152 
dump_show(struct hisi_qm * qm,void * info,unsigned int info_size,char * info_name)153 static void dump_show(struct hisi_qm *qm, void *info,
154 		     unsigned int info_size, char *info_name)
155 {
156 	struct device *dev = &qm->pdev->dev;
157 	u8 *info_curr = info;
158 	u32 i;
159 #define BYTE_PER_DW	4
160 
161 	dev_info(dev, "%s DUMP\n", info_name);
162 	for (i = 0; i < info_size; i += BYTE_PER_DW, info_curr += BYTE_PER_DW) {
163 		pr_info("DW%u: %02X%02X %02X%02X\n", i / BYTE_PER_DW,
164 			*(info_curr + 3), *(info_curr + 2), *(info_curr + 1), *(info_curr));
165 	}
166 }
167 
qm_sqc_dump(struct hisi_qm * qm,char * s,char * name)168 static int qm_sqc_dump(struct hisi_qm *qm, char *s, char *name)
169 {
170 	struct device *dev = &qm->pdev->dev;
171 	struct qm_sqc sqc;
172 	u32 qp_id;
173 	int ret;
174 
175 	if (!s)
176 		return -EINVAL;
177 
178 	ret = kstrtou32(s, 0, &qp_id);
179 	if (ret || qp_id >= qm->qp_num) {
180 		dev_err(dev, "Please input qp num (0-%u)", qm->qp_num - 1);
181 		return -EINVAL;
182 	}
183 
184 	ret = qm_set_and_get_xqc(qm, QM_MB_CMD_SQC, &sqc, qp_id, 1);
185 	if (!ret) {
186 		sqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK);
187 		sqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK);
188 		dump_show(qm, &sqc, sizeof(struct qm_sqc), name);
189 
190 		return 0;
191 	}
192 
193 	down_read(&qm->qps_lock);
194 	if (qm->sqc) {
195 		memcpy(&sqc, qm->sqc + qp_id * sizeof(struct qm_sqc), sizeof(struct qm_sqc));
196 		sqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK);
197 		sqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK);
198 		dump_show(qm, &sqc, sizeof(struct qm_sqc), "SOFT SQC");
199 	}
200 	up_read(&qm->qps_lock);
201 
202 	return 0;
203 }
204 
qm_cqc_dump(struct hisi_qm * qm,char * s,char * name)205 static int qm_cqc_dump(struct hisi_qm *qm, char *s, char *name)
206 {
207 	struct device *dev = &qm->pdev->dev;
208 	struct qm_cqc cqc;
209 	u32 qp_id;
210 	int ret;
211 
212 	if (!s)
213 		return -EINVAL;
214 
215 	ret = kstrtou32(s, 0, &qp_id);
216 	if (ret || qp_id >= qm->qp_num) {
217 		dev_err(dev, "Please input qp num (0-%u)", qm->qp_num - 1);
218 		return -EINVAL;
219 	}
220 
221 	ret = qm_set_and_get_xqc(qm, QM_MB_CMD_CQC, &cqc, qp_id, 1);
222 	if (!ret) {
223 		cqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK);
224 		cqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK);
225 		dump_show(qm, &cqc, sizeof(struct qm_cqc), name);
226 
227 		return 0;
228 	}
229 
230 	down_read(&qm->qps_lock);
231 	if (qm->cqc) {
232 		memcpy(&cqc, qm->cqc + qp_id * sizeof(struct qm_cqc), sizeof(struct qm_cqc));
233 		cqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK);
234 		cqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK);
235 		dump_show(qm, &cqc, sizeof(struct qm_cqc), "SOFT CQC");
236 	}
237 	up_read(&qm->qps_lock);
238 
239 	return 0;
240 }
241 
qm_eqc_aeqc_dump(struct hisi_qm * qm,char * s,char * name)242 static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, char *name)
243 {
244 	struct device *dev = &qm->pdev->dev;
245 	struct qm_aeqc aeqc;
246 	struct qm_eqc eqc;
247 	size_t size;
248 	void *xeqc;
249 	int ret;
250 	u8 cmd;
251 
252 	if (strsep(&s, " ")) {
253 		dev_err(dev, "Please do not input extra characters!\n");
254 		return -EINVAL;
255 	}
256 
257 	if (!strcmp(name, "EQC")) {
258 		cmd = QM_MB_CMD_EQC;
259 		size = sizeof(struct qm_eqc);
260 		xeqc = &eqc;
261 	} else {
262 		cmd = QM_MB_CMD_AEQC;
263 		size = sizeof(struct qm_aeqc);
264 		xeqc = &aeqc;
265 	}
266 
267 	ret = qm_set_and_get_xqc(qm, cmd, xeqc, 0, 1);
268 	if (ret)
269 		return ret;
270 
271 	aeqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK);
272 	aeqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK);
273 	eqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK);
274 	eqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK);
275 	dump_show(qm, xeqc, size, name);
276 
277 	return ret;
278 }
279 
q_dump_param_parse(struct hisi_qm * qm,char * s,u32 * e_id,u32 * q_id,u16 q_depth)280 static int q_dump_param_parse(struct hisi_qm *qm, char *s,
281 			      u32 *e_id, u32 *q_id, u16 q_depth)
282 {
283 	struct device *dev = &qm->pdev->dev;
284 	unsigned int qp_num = qm->qp_num;
285 	char *presult;
286 	int ret;
287 
288 	presult = strsep(&s, " ");
289 	if (!presult) {
290 		dev_err(dev, "Please input qp number!\n");
291 		return -EINVAL;
292 	}
293 
294 	ret = kstrtou32(presult, 0, q_id);
295 	if (ret || *q_id >= qp_num) {
296 		dev_err(dev, "Please input qp num (0-%u)", qp_num - 1);
297 		return -EINVAL;
298 	}
299 
300 	presult = strsep(&s, " ");
301 	if (!presult) {
302 		dev_err(dev, "Please input sqe number!\n");
303 		return -EINVAL;
304 	}
305 
306 	ret = kstrtou32(presult, 0, e_id);
307 	if (ret || *e_id >= q_depth) {
308 		dev_err(dev, "Please input sqe num (0-%u)", q_depth - 1);
309 		return -EINVAL;
310 	}
311 
312 	if (strsep(&s, " ")) {
313 		dev_err(dev, "Please do not input extra characters!\n");
314 		return -EINVAL;
315 	}
316 
317 	return 0;
318 }
319 
qm_sq_dump(struct hisi_qm * qm,char * s,char * name)320 static int qm_sq_dump(struct hisi_qm *qm, char *s, char *name)
321 {
322 	u16 sq_depth = qm->qp_array->sq_depth;
323 	struct hisi_qp *qp;
324 	u32 qp_id, sqe_id;
325 	void *sqe;
326 	int ret;
327 
328 	ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id, sq_depth);
329 	if (ret)
330 		return ret;
331 
332 	sqe = kzalloc(qm->sqe_size, GFP_KERNEL);
333 	if (!sqe)
334 		return -ENOMEM;
335 
336 	qp = &qm->qp_array[qp_id];
337 	memcpy(sqe, qp->sqe + sqe_id * qm->sqe_size, qm->sqe_size);
338 	memset(sqe + qm->debug.sqe_mask_offset, QM_SQE_ADDR_MASK,
339 	       qm->debug.sqe_mask_len);
340 
341 	dump_show(qm, sqe, qm->sqe_size, name);
342 
343 	kfree(sqe);
344 
345 	return 0;
346 }
347 
qm_cq_dump(struct hisi_qm * qm,char * s,char * name)348 static int qm_cq_dump(struct hisi_qm *qm, char *s, char *name)
349 {
350 	struct qm_cqe *cqe_curr;
351 	struct hisi_qp *qp;
352 	u32 qp_id, cqe_id;
353 	int ret;
354 
355 	ret = q_dump_param_parse(qm, s, &cqe_id, &qp_id, qm->qp_array->cq_depth);
356 	if (ret)
357 		return ret;
358 
359 	qp = &qm->qp_array[qp_id];
360 	cqe_curr = qp->cqe + cqe_id;
361 	dump_show(qm, cqe_curr, sizeof(struct qm_cqe), name);
362 
363 	return 0;
364 }
365 
qm_eq_aeq_dump(struct hisi_qm * qm,char * s,char * name)366 static int qm_eq_aeq_dump(struct hisi_qm *qm, char *s, char *name)
367 {
368 	struct device *dev = &qm->pdev->dev;
369 	u16 xeq_depth;
370 	size_t size;
371 	void *xeqe;
372 	u32 xeqe_id;
373 	int ret;
374 
375 	if (!s)
376 		return -EINVAL;
377 
378 	ret = kstrtou32(s, 0, &xeqe_id);
379 	if (ret)
380 		return -EINVAL;
381 
382 	if (!strcmp(name, "EQE")) {
383 		xeq_depth = qm->eq_depth;
384 		size = sizeof(struct qm_eqe);
385 	} else {
386 		xeq_depth = qm->aeq_depth;
387 		size = sizeof(struct qm_aeqe);
388 	}
389 
390 	if (xeqe_id >= xeq_depth) {
391 		dev_err(dev, "Please input eqe or aeqe num (0-%u)", xeq_depth - 1);
392 		return -EINVAL;
393 	}
394 
395 	down_read(&qm->qps_lock);
396 
397 	if (qm->eqe && !strcmp(name, "EQE")) {
398 		xeqe = qm->eqe + xeqe_id;
399 	} else if (qm->aeqe && !strcmp(name, "AEQE")) {
400 		xeqe = qm->aeqe + xeqe_id;
401 	} else {
402 		ret = -EINVAL;
403 		goto err_unlock;
404 	}
405 
406 	dump_show(qm, xeqe, size, name);
407 
408 err_unlock:
409 	up_read(&qm->qps_lock);
410 	return ret;
411 }
412 
qm_dbg_help(struct hisi_qm * qm,char * s)413 static int qm_dbg_help(struct hisi_qm *qm, char *s)
414 {
415 	struct device *dev = &qm->pdev->dev;
416 
417 	if (strsep(&s, " ")) {
418 		dev_err(dev, "Please do not input extra characters!\n");
419 		return -EINVAL;
420 	}
421 
422 	dev_info(dev, "available commands:\n");
423 	dev_info(dev, "sqc <num>\n");
424 	dev_info(dev, "cqc <num>\n");
425 	dev_info(dev, "eqc\n");
426 	dev_info(dev, "aeqc\n");
427 	dev_info(dev, "sq <num> <e>\n");
428 	dev_info(dev, "cq <num> <e>\n");
429 	dev_info(dev, "eq <e>\n");
430 	dev_info(dev, "aeq <e>\n");
431 
432 	return 0;
433 }
434 
435 static const struct qm_cmd_dump_item qm_cmd_dump_table[] = {
436 	{
437 		.cmd = "sqc",
438 		.info_name = "SQC",
439 		.dump_fn = qm_sqc_dump,
440 	}, {
441 		.cmd = "cqc",
442 		.info_name = "CQC",
443 		.dump_fn = qm_cqc_dump,
444 	}, {
445 		.cmd = "eqc",
446 		.info_name = "EQC",
447 		.dump_fn = qm_eqc_aeqc_dump,
448 	}, {
449 		.cmd = "aeqc",
450 		.info_name = "AEQC",
451 		.dump_fn = qm_eqc_aeqc_dump,
452 	}, {
453 		.cmd = "sq",
454 		.info_name = "SQE",
455 		.dump_fn = qm_sq_dump,
456 	}, {
457 		.cmd = "cq",
458 		.info_name = "CQE",
459 		.dump_fn = qm_cq_dump,
460 	}, {
461 		.cmd = "eq",
462 		.info_name = "EQE",
463 		.dump_fn = qm_eq_aeq_dump,
464 	}, {
465 		.cmd = "aeq",
466 		.info_name = "AEQE",
467 		.dump_fn = qm_eq_aeq_dump,
468 	},
469 };
470 
qm_cmd_write_dump(struct hisi_qm * qm,const char * cmd_buf)471 static int qm_cmd_write_dump(struct hisi_qm *qm, const char *cmd_buf)
472 {
473 	struct device *dev = &qm->pdev->dev;
474 	char *presult, *s, *s_tmp;
475 	int table_size, i, ret;
476 
477 	s = kstrdup(cmd_buf, GFP_KERNEL);
478 	if (!s)
479 		return -ENOMEM;
480 
481 	s_tmp = s;
482 	presult = strsep(&s, " ");
483 	if (!presult) {
484 		ret = -EINVAL;
485 		goto err_buffer_free;
486 	}
487 
488 	if (!strcmp(presult, "help")) {
489 		ret = qm_dbg_help(qm, s);
490 		goto err_buffer_free;
491 	}
492 
493 	table_size = ARRAY_SIZE(qm_cmd_dump_table);
494 	for (i = 0; i < table_size; i++) {
495 		if (!strcmp(presult, qm_cmd_dump_table[i].cmd)) {
496 			ret = qm_cmd_dump_table[i].dump_fn(qm, s,
497 				qm_cmd_dump_table[i].info_name);
498 			break;
499 		}
500 	}
501 
502 	if (i == table_size) {
503 		dev_info(dev, "Please echo help\n");
504 		ret = -EINVAL;
505 	}
506 
507 err_buffer_free:
508 	kfree(s_tmp);
509 
510 	return ret;
511 }
512 
qm_cmd_write(struct file * filp,const char __user * buffer,size_t count,loff_t * pos)513 static ssize_t qm_cmd_write(struct file *filp, const char __user *buffer,
514 			    size_t count, loff_t *pos)
515 {
516 	struct hisi_qm *qm = filp->private_data;
517 	char *cmd_buf, *cmd_buf_tmp;
518 	int ret;
519 
520 	if (*pos)
521 		return 0;
522 
523 	ret = hisi_qm_get_dfx_access(qm);
524 	if (ret)
525 		return ret;
526 
527 	/* Judge if the instance is being reset. */
528 	if (unlikely(atomic_read(&qm->status.flags) == QM_STOP)) {
529 		ret = 0;
530 		goto put_dfx_access;
531 	}
532 
533 	if (count > QM_DBG_WRITE_LEN) {
534 		ret = -ENOSPC;
535 		goto put_dfx_access;
536 	}
537 
538 	cmd_buf = memdup_user_nul(buffer, count);
539 	if (IS_ERR(cmd_buf)) {
540 		ret = PTR_ERR(cmd_buf);
541 		goto put_dfx_access;
542 	}
543 
544 	cmd_buf_tmp = strchr(cmd_buf, '\n');
545 	if (cmd_buf_tmp) {
546 		*cmd_buf_tmp = '\0';
547 		count = cmd_buf_tmp - cmd_buf + 1;
548 	}
549 
550 	ret = qm_cmd_write_dump(qm, cmd_buf);
551 	if (ret) {
552 		kfree(cmd_buf);
553 		goto put_dfx_access;
554 	}
555 
556 	kfree(cmd_buf);
557 
558 	ret = count;
559 
560 put_dfx_access:
561 	hisi_qm_put_dfx_access(qm);
562 	return ret;
563 }
564 
565 static const struct file_operations qm_cmd_fops = {
566 	.owner = THIS_MODULE,
567 	.open = simple_open,
568 	.read = qm_cmd_read,
569 	.write = qm_cmd_write,
570 };
571 
572 /**
573  * hisi_qm_regs_dump() - Dump registers's value.
574  * @s: debugfs file handle.
575  * @regset: accelerator registers information.
576  *
577  * Dump accelerator registers.
578  */
hisi_qm_regs_dump(struct seq_file * s,struct debugfs_regset32 * regset)579 void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset)
580 {
581 	struct pci_dev *pdev = to_pci_dev(regset->dev);
582 	struct hisi_qm *qm = pci_get_drvdata(pdev);
583 	const struct debugfs_reg32 *regs = regset->regs;
584 	int regs_len = regset->nregs;
585 	int i, ret;
586 	u32 val;
587 
588 	ret = hisi_qm_get_dfx_access(qm);
589 	if (ret)
590 		return;
591 
592 	for (i = 0; i < regs_len; i++) {
593 		val = readl(regset->base + regs[i].offset);
594 		seq_printf(s, "%s= 0x%08x\n", regs[i].name, val);
595 	}
596 
597 	hisi_qm_put_dfx_access(qm);
598 }
599 EXPORT_SYMBOL_GPL(hisi_qm_regs_dump);
600 
qm_regs_show(struct seq_file * s,void * unused)601 static int qm_regs_show(struct seq_file *s, void *unused)
602 {
603 	struct hisi_qm *qm = s->private;
604 	struct debugfs_regset32 regset;
605 
606 	if (qm->fun_type == QM_HW_PF) {
607 		regset.regs = qm_dfx_regs;
608 		regset.nregs = ARRAY_SIZE(qm_dfx_regs);
609 	} else {
610 		regset.regs = qm_vf_dfx_regs;
611 		regset.nregs = ARRAY_SIZE(qm_vf_dfx_regs);
612 	}
613 
614 	regset.base = qm->io_base;
615 	regset.dev = &qm->pdev->dev;
616 
617 	hisi_qm_regs_dump(s, &regset);
618 
619 	return 0;
620 }
621 
622 DEFINE_SHOW_ATTRIBUTE(qm_regs);
623 
current_q_read(struct hisi_qm * qm)624 static u32 current_q_read(struct hisi_qm *qm)
625 {
626 	return readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) >> QM_DFX_QN_SHIFT;
627 }
628 
current_q_write(struct hisi_qm * qm,u32 val)629 static int current_q_write(struct hisi_qm *qm, u32 val)
630 {
631 	u32 tmp;
632 
633 	if (val >= qm->debug.curr_qm_qp_num)
634 		return -EINVAL;
635 
636 	tmp = val << QM_DFX_QN_SHIFT |
637 	      (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_FUN_MASK);
638 	writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
639 
640 	tmp = val << QM_DFX_QN_SHIFT |
641 	      (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_FUN_MASK);
642 	writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
643 
644 	return 0;
645 }
646 
clear_enable_read(struct hisi_qm * qm)647 static u32 clear_enable_read(struct hisi_qm *qm)
648 {
649 	return readl(qm->io_base + QM_DFX_CNT_CLR_CE);
650 }
651 
652 /* rd_clr_ctrl 1 enable read clear, otherwise 0 disable it */
clear_enable_write(struct hisi_qm * qm,u32 rd_clr_ctrl)653 static int clear_enable_write(struct hisi_qm *qm, u32 rd_clr_ctrl)
654 {
655 	if (rd_clr_ctrl > 1)
656 		return -EINVAL;
657 
658 	writel(rd_clr_ctrl, qm->io_base + QM_DFX_CNT_CLR_CE);
659 
660 	return 0;
661 }
662 
current_qm_read(struct hisi_qm * qm)663 static u32 current_qm_read(struct hisi_qm *qm)
664 {
665 	return readl(qm->io_base + QM_DFX_MB_CNT_VF);
666 }
667 
qm_get_vf_qp_num(struct hisi_qm * qm,u32 fun_num)668 static int qm_get_vf_qp_num(struct hisi_qm *qm, u32 fun_num)
669 {
670 	u32 remain_q_num, vfq_num;
671 	u32 num_vfs = qm->vfs_num;
672 
673 	vfq_num = (qm->ctrl_qp_num - qm->qp_num) / num_vfs;
674 	if (vfq_num >= qm->max_qp_num)
675 		return qm->max_qp_num;
676 
677 	remain_q_num = (qm->ctrl_qp_num - qm->qp_num) % num_vfs;
678 	if (vfq_num + remain_q_num <= qm->max_qp_num)
679 		return fun_num == num_vfs ? vfq_num + remain_q_num : vfq_num;
680 
681 	/*
682 	 * if vfq_num + remain_q_num > max_qp_num, the last VFs,
683 	 * each with one more queue.
684 	 */
685 	return fun_num + remain_q_num > num_vfs ? vfq_num + 1 : vfq_num;
686 }
687 
current_qm_write(struct hisi_qm * qm,u32 val)688 static int current_qm_write(struct hisi_qm *qm, u32 val)
689 {
690 	u32 tmp;
691 
692 	if (val > qm->vfs_num)
693 		return -EINVAL;
694 
695 	/* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
696 	if (!val)
697 		qm->debug.curr_qm_qp_num = qm->qp_num;
698 	else
699 		qm->debug.curr_qm_qp_num = qm_get_vf_qp_num(qm, val);
700 
701 	writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
702 	writel(val, qm->io_base + QM_DFX_DB_CNT_VF);
703 
704 	tmp = val |
705 	      (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK);
706 	writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
707 
708 	tmp = val |
709 	      (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK);
710 	writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
711 
712 	return 0;
713 }
714 
qm_debug_read(struct file * filp,char __user * buf,size_t count,loff_t * pos)715 static ssize_t qm_debug_read(struct file *filp, char __user *buf,
716 			     size_t count, loff_t *pos)
717 {
718 	struct debugfs_file *file = filp->private_data;
719 	enum qm_debug_file index = file->index;
720 	struct hisi_qm *qm = file_to_qm(file);
721 	char tbuf[QM_DBG_TMP_BUF_LEN];
722 	u32 val;
723 	int ret;
724 
725 	ret = hisi_qm_get_dfx_access(qm);
726 	if (ret)
727 		return ret;
728 
729 	mutex_lock(&file->lock);
730 	switch (index) {
731 	case CURRENT_QM:
732 		val = current_qm_read(qm);
733 		break;
734 	case CURRENT_Q:
735 		val = current_q_read(qm);
736 		break;
737 	case CLEAR_ENABLE:
738 		val = clear_enable_read(qm);
739 		break;
740 	default:
741 		goto err_input;
742 	}
743 	mutex_unlock(&file->lock);
744 
745 	hisi_qm_put_dfx_access(qm);
746 	ret = scnprintf(tbuf, QM_DBG_TMP_BUF_LEN, "%u\n", val);
747 	return simple_read_from_buffer(buf, count, pos, tbuf, ret);
748 
749 err_input:
750 	mutex_unlock(&file->lock);
751 	hisi_qm_put_dfx_access(qm);
752 	return -EINVAL;
753 }
754 
qm_debug_write(struct file * filp,const char __user * buf,size_t count,loff_t * pos)755 static ssize_t qm_debug_write(struct file *filp, const char __user *buf,
756 			      size_t count, loff_t *pos)
757 {
758 	struct debugfs_file *file = filp->private_data;
759 	enum qm_debug_file index = file->index;
760 	struct hisi_qm *qm = file_to_qm(file);
761 	unsigned long val;
762 	char tbuf[QM_DBG_TMP_BUF_LEN];
763 	int len, ret;
764 
765 	if (*pos != 0)
766 		return 0;
767 
768 	if (count >= QM_DBG_TMP_BUF_LEN)
769 		return -ENOSPC;
770 
771 	len = simple_write_to_buffer(tbuf, QM_DBG_TMP_BUF_LEN - 1, pos, buf,
772 				     count);
773 	if (len < 0)
774 		return len;
775 
776 	tbuf[len] = '\0';
777 	if (kstrtoul(tbuf, 0, &val))
778 		return -EFAULT;
779 
780 	ret = hisi_qm_get_dfx_access(qm);
781 	if (ret)
782 		return ret;
783 
784 	mutex_lock(&file->lock);
785 	switch (index) {
786 	case CURRENT_QM:
787 		ret = current_qm_write(qm, val);
788 		break;
789 	case CURRENT_Q:
790 		ret = current_q_write(qm, val);
791 		break;
792 	case CLEAR_ENABLE:
793 		ret = clear_enable_write(qm, val);
794 		break;
795 	default:
796 		ret = -EINVAL;
797 	}
798 	mutex_unlock(&file->lock);
799 
800 	hisi_qm_put_dfx_access(qm);
801 
802 	if (ret)
803 		return ret;
804 
805 	return count;
806 }
807 
808 static const struct file_operations qm_debug_fops = {
809 	.owner = THIS_MODULE,
810 	.open = simple_open,
811 	.read = qm_debug_read,
812 	.write = qm_debug_write,
813 };
814 
dfx_regs_uninit(struct hisi_qm * qm,struct dfx_diff_registers * dregs,int reg_len)815 static void dfx_regs_uninit(struct hisi_qm *qm,
816 		struct dfx_diff_registers *dregs, int reg_len)
817 {
818 	int i;
819 
820 	if (!dregs)
821 		return;
822 
823 	/* Setting the pointer is NULL to prevent double free */
824 	for (i = 0; i < reg_len; i++) {
825 		if (!dregs[i].regs)
826 			continue;
827 
828 		kfree(dregs[i].regs);
829 		dregs[i].regs = NULL;
830 	}
831 	kfree(dregs);
832 }
833 
dfx_regs_init(struct hisi_qm * qm,const struct dfx_diff_registers * cregs,u32 reg_len)834 static struct dfx_diff_registers *dfx_regs_init(struct hisi_qm *qm,
835 	const struct dfx_diff_registers *cregs, u32 reg_len)
836 {
837 	struct dfx_diff_registers *diff_regs;
838 	u32 j, base_offset;
839 	int i;
840 
841 	diff_regs = kcalloc(reg_len, sizeof(*diff_regs), GFP_KERNEL);
842 	if (!diff_regs)
843 		return ERR_PTR(-ENOMEM);
844 
845 	for (i = 0; i < reg_len; i++) {
846 		if (!cregs[i].reg_len)
847 			continue;
848 
849 		diff_regs[i].reg_offset = cregs[i].reg_offset;
850 		diff_regs[i].reg_len = cregs[i].reg_len;
851 		diff_regs[i].regs = kcalloc(QM_DFX_REGS_LEN, cregs[i].reg_len,
852 					 GFP_KERNEL);
853 		if (!diff_regs[i].regs)
854 			goto alloc_error;
855 
856 		for (j = 0; j < diff_regs[i].reg_len; j++) {
857 			base_offset = diff_regs[i].reg_offset +
858 					j * QM_DFX_REGS_LEN;
859 			diff_regs[i].regs[j] = readl(qm->io_base + base_offset);
860 		}
861 	}
862 
863 	return diff_regs;
864 
865 alloc_error:
866 	while (i > 0) {
867 		i--;
868 		kfree(diff_regs[i].regs);
869 	}
870 	kfree(diff_regs);
871 	return ERR_PTR(-ENOMEM);
872 }
873 
qm_diff_regs_init(struct hisi_qm * qm,struct dfx_diff_registers * dregs,u32 reg_len)874 static int qm_diff_regs_init(struct hisi_qm *qm,
875 		struct dfx_diff_registers *dregs, u32 reg_len)
876 {
877 	int ret;
878 
879 	qm->debug.qm_diff_regs = dfx_regs_init(qm, qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
880 	if (IS_ERR(qm->debug.qm_diff_regs)) {
881 		ret = PTR_ERR(qm->debug.qm_diff_regs);
882 		qm->debug.qm_diff_regs = NULL;
883 		return ret;
884 	}
885 
886 	qm->debug.acc_diff_regs = dfx_regs_init(qm, dregs, reg_len);
887 	if (IS_ERR(qm->debug.acc_diff_regs)) {
888 		dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
889 		ret = PTR_ERR(qm->debug.acc_diff_regs);
890 		qm->debug.acc_diff_regs = NULL;
891 		return ret;
892 	}
893 
894 	return 0;
895 }
896 
qm_last_regs_uninit(struct hisi_qm * qm)897 static void qm_last_regs_uninit(struct hisi_qm *qm)
898 {
899 	struct qm_debug *debug = &qm->debug;
900 
901 	if (qm->fun_type == QM_HW_VF || !debug->qm_last_words)
902 		return;
903 
904 	kfree(debug->qm_last_words);
905 	debug->qm_last_words = NULL;
906 }
907 
qm_last_regs_init(struct hisi_qm * qm)908 static int qm_last_regs_init(struct hisi_qm *qm)
909 {
910 	int dfx_regs_num = ARRAY_SIZE(qm_dfx_regs);
911 	struct qm_debug *debug = &qm->debug;
912 	int i;
913 
914 	if (qm->fun_type == QM_HW_VF)
915 		return 0;
916 
917 	debug->qm_last_words = kcalloc(dfx_regs_num, sizeof(unsigned int), GFP_KERNEL);
918 	if (!debug->qm_last_words)
919 		return -ENOMEM;
920 
921 	for (i = 0; i < dfx_regs_num; i++) {
922 		debug->qm_last_words[i] = readl_relaxed(qm->io_base +
923 			qm_dfx_regs[i].offset);
924 	}
925 
926 	return 0;
927 }
928 
qm_diff_regs_uninit(struct hisi_qm * qm,u32 reg_len)929 static void qm_diff_regs_uninit(struct hisi_qm *qm, u32 reg_len)
930 {
931 	dfx_regs_uninit(qm, qm->debug.acc_diff_regs, reg_len);
932 	qm->debug.acc_diff_regs = NULL;
933 	dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
934 	qm->debug.qm_diff_regs = NULL;
935 }
936 
937 /**
938  * hisi_qm_regs_debugfs_init() - Allocate memory for registers.
939  * @qm: device qm handle.
940  * @dregs: diff registers handle.
941  * @reg_len: diff registers region length.
942  */
hisi_qm_regs_debugfs_init(struct hisi_qm * qm,struct dfx_diff_registers * dregs,u32 reg_len)943 int hisi_qm_regs_debugfs_init(struct hisi_qm *qm,
944 		struct dfx_diff_registers *dregs, u32 reg_len)
945 {
946 	int ret;
947 
948 	if (!qm || !dregs)
949 		return -EINVAL;
950 
951 	if (qm->fun_type != QM_HW_PF)
952 		return 0;
953 
954 	ret = qm_last_regs_init(qm);
955 	if (ret) {
956 		dev_info(&qm->pdev->dev, "failed to init qm words memory!\n");
957 		return ret;
958 	}
959 
960 	ret = qm_diff_regs_init(qm, dregs, reg_len);
961 	if (ret) {
962 		qm_last_regs_uninit(qm);
963 		return ret;
964 	}
965 
966 	return 0;
967 }
968 EXPORT_SYMBOL_GPL(hisi_qm_regs_debugfs_init);
969 
970 /**
971  * hisi_qm_regs_debugfs_uninit() - Free memory for registers.
972  * @qm: device qm handle.
973  * @reg_len: diff registers region length.
974  */
hisi_qm_regs_debugfs_uninit(struct hisi_qm * qm,u32 reg_len)975 void hisi_qm_regs_debugfs_uninit(struct hisi_qm *qm, u32 reg_len)
976 {
977 	if (!qm || qm->fun_type != QM_HW_PF)
978 		return;
979 
980 	qm_diff_regs_uninit(qm, reg_len);
981 	qm_last_regs_uninit(qm);
982 }
983 EXPORT_SYMBOL_GPL(hisi_qm_regs_debugfs_uninit);
984 
985 /**
986  * hisi_qm_acc_diff_regs_dump() - Dump registers's value.
987  * @qm: device qm handle.
988  * @s: Debugfs file handle.
989  * @dregs: diff registers handle.
990  * @regs_len: diff registers region length.
991  */
hisi_qm_acc_diff_regs_dump(struct hisi_qm * qm,struct seq_file * s,struct dfx_diff_registers * dregs,u32 regs_len)992 void hisi_qm_acc_diff_regs_dump(struct hisi_qm *qm, struct seq_file *s,
993 	struct dfx_diff_registers *dregs, u32 regs_len)
994 {
995 	u32 j, val, base_offset;
996 	int i, ret;
997 
998 	if (!qm || !s || !dregs)
999 		return;
1000 
1001 	ret = hisi_qm_get_dfx_access(qm);
1002 	if (ret)
1003 		return;
1004 
1005 	down_read(&qm->qps_lock);
1006 	for (i = 0; i < regs_len; i++) {
1007 		if (!dregs[i].reg_len)
1008 			continue;
1009 
1010 		for (j = 0; j < dregs[i].reg_len; j++) {
1011 			base_offset = dregs[i].reg_offset + j * QM_DFX_REGS_LEN;
1012 			val = readl(qm->io_base + base_offset);
1013 			if (val != dregs[i].regs[j])
1014 				seq_printf(s, "0x%08x = 0x%08x ---> 0x%08x\n",
1015 					   base_offset, dregs[i].regs[j], val);
1016 		}
1017 	}
1018 	up_read(&qm->qps_lock);
1019 
1020 	hisi_qm_put_dfx_access(qm);
1021 }
1022 EXPORT_SYMBOL_GPL(hisi_qm_acc_diff_regs_dump);
1023 
hisi_qm_show_last_dfx_regs(struct hisi_qm * qm)1024 void hisi_qm_show_last_dfx_regs(struct hisi_qm *qm)
1025 {
1026 	struct qm_debug *debug = &qm->debug;
1027 	struct pci_dev *pdev = qm->pdev;
1028 	u32 val;
1029 	int i;
1030 
1031 	if (qm->fun_type == QM_HW_VF || !debug->qm_last_words)
1032 		return;
1033 
1034 	for (i = 0; i < ARRAY_SIZE(qm_dfx_regs); i++) {
1035 		val = readl_relaxed(qm->io_base + qm_dfx_regs[i].offset);
1036 		if (debug->qm_last_words[i] != val)
1037 			pci_info(pdev, "%s \t= 0x%08x => 0x%08x\n",
1038 			qm_dfx_regs[i].name, debug->qm_last_words[i], val);
1039 	}
1040 }
1041 
qm_diff_regs_show(struct seq_file * s,void * unused)1042 static int qm_diff_regs_show(struct seq_file *s, void *unused)
1043 {
1044 	struct hisi_qm *qm = s->private;
1045 
1046 	hisi_qm_acc_diff_regs_dump(qm, s, qm->debug.qm_diff_regs,
1047 					ARRAY_SIZE(qm_diff_regs));
1048 
1049 	return 0;
1050 }
1051 DEFINE_SHOW_ATTRIBUTE(qm_diff_regs);
1052 
qm_state_show(struct seq_file * s,void * unused)1053 static int qm_state_show(struct seq_file *s, void *unused)
1054 {
1055 	struct hisi_qm *qm = s->private;
1056 	u32 val;
1057 	int ret;
1058 
1059 	/* If device is in suspended, directly return the idle state. */
1060 	ret = hisi_qm_get_dfx_access(qm);
1061 	if (!ret) {
1062 		val = readl(qm->io_base + QM_IN_IDLE_ST_REG);
1063 		hisi_qm_put_dfx_access(qm);
1064 	} else if (ret == -EAGAIN) {
1065 		val = QM_IN_IDLE_STATE;
1066 	} else {
1067 		return ret;
1068 	}
1069 
1070 	seq_printf(s, "%u\n", val);
1071 
1072 	return 0;
1073 }
1074 
1075 DEFINE_SHOW_ATTRIBUTE(qm_state);
1076 
qm_status_read(struct file * filp,char __user * buffer,size_t count,loff_t * pos)1077 static ssize_t qm_status_read(struct file *filp, char __user *buffer,
1078 			      size_t count, loff_t *pos)
1079 {
1080 	struct hisi_qm *qm = filp->private_data;
1081 	char buf[QM_DBG_READ_LEN];
1082 	int val, len;
1083 
1084 	val = atomic_read(&qm->status.flags);
1085 	len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n", qm_s[val]);
1086 
1087 	return simple_read_from_buffer(buffer, count, pos, buf, len);
1088 }
1089 
1090 static const struct file_operations qm_status_fops = {
1091 	.owner = THIS_MODULE,
1092 	.open = simple_open,
1093 	.read = qm_status_read,
1094 };
1095 
qm_create_debugfs_file(struct hisi_qm * qm,struct dentry * dir,enum qm_debug_file index)1096 static void qm_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir,
1097 				   enum qm_debug_file index)
1098 {
1099 	struct debugfs_file *file = qm->debug.files + index;
1100 
1101 	file->index = index;
1102 	mutex_init(&file->lock);
1103 	file->debug = &qm->debug;
1104 
1105 	debugfs_create_file(qm_debug_file_name[index], 0600, dir, file,
1106 			    &qm_debug_fops);
1107 }
1108 
qm_debugfs_atomic64_set(void * data,u64 val)1109 static int qm_debugfs_atomic64_set(void *data, u64 val)
1110 {
1111 	if (val)
1112 		return -EINVAL;
1113 
1114 	atomic64_set((atomic64_t *)data, 0);
1115 
1116 	return 0;
1117 }
1118 
qm_debugfs_atomic64_get(void * data,u64 * val)1119 static int qm_debugfs_atomic64_get(void *data, u64 *val)
1120 {
1121 	*val = atomic64_read((atomic64_t *)data);
1122 
1123 	return 0;
1124 }
1125 
1126 DEFINE_DEBUGFS_ATTRIBUTE(qm_atomic64_ops, qm_debugfs_atomic64_get,
1127 			 qm_debugfs_atomic64_set, "%llu\n");
1128 
1129 /**
1130  * hisi_qm_debug_init() - Initialize qm related debugfs files.
1131  * @qm: The qm for which we want to add debugfs files.
1132  *
1133  * Create qm related debugfs files.
1134  */
hisi_qm_debug_init(struct hisi_qm * qm)1135 void hisi_qm_debug_init(struct hisi_qm *qm)
1136 {
1137 	struct dfx_diff_registers *qm_regs = qm->debug.qm_diff_regs;
1138 	struct qm_dev_dfx *dev_dfx = &qm->debug.dev_dfx;
1139 	struct qm_dfx *dfx = &qm->debug.dfx;
1140 	struct dentry *qm_d;
1141 	void *data;
1142 	int i;
1143 
1144 	qm_d = debugfs_create_dir("qm", qm->debug.debug_root);
1145 	qm->debug.qm_d = qm_d;
1146 
1147 	/* only show this in PF */
1148 	if (qm->fun_type == QM_HW_PF) {
1149 		debugfs_create_file("qm_state", 0444, qm->debug.qm_d,
1150 					qm, &qm_state_fops);
1151 
1152 		qm_create_debugfs_file(qm, qm->debug.debug_root, CURRENT_QM);
1153 		for (i = CURRENT_Q; i < DEBUG_FILE_NUM; i++)
1154 			qm_create_debugfs_file(qm, qm->debug.qm_d, i);
1155 	}
1156 
1157 	if (qm_regs)
1158 		debugfs_create_file("diff_regs", 0444, qm->debug.qm_d,
1159 					qm, &qm_diff_regs_fops);
1160 
1161 	debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops);
1162 
1163 	debugfs_create_file("cmd", 0600, qm->debug.qm_d, qm, &qm_cmd_fops);
1164 
1165 	debugfs_create_file("status", 0444, qm->debug.qm_d, qm,
1166 			&qm_status_fops);
1167 
1168 	debugfs_create_u32("dev_state", 0444, qm->debug.qm_d, &dev_dfx->dev_state);
1169 	debugfs_create_u32("dev_timeout", 0644, qm->debug.qm_d, &dev_dfx->dev_timeout);
1170 
1171 	for (i = 0; i < ARRAY_SIZE(qm_dfx_files); i++) {
1172 		data = (atomic64_t *)((uintptr_t)dfx + qm_dfx_files[i].offset);
1173 		debugfs_create_file(qm_dfx_files[i].name,
1174 			0644,
1175 			qm_d,
1176 			data,
1177 			&qm_atomic64_ops);
1178 	}
1179 
1180 	if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
1181 		hisi_qm_set_algqos_init(qm);
1182 }
1183 EXPORT_SYMBOL_GPL(hisi_qm_debug_init);
1184 
1185 /**
1186  * hisi_qm_debug_regs_clear() - clear qm debug related registers.
1187  * @qm: The qm for which we want to clear its debug registers.
1188  */
hisi_qm_debug_regs_clear(struct hisi_qm * qm)1189 void hisi_qm_debug_regs_clear(struct hisi_qm *qm)
1190 {
1191 	const struct debugfs_reg32 *regs;
1192 	int i;
1193 
1194 	/* clear current_qm */
1195 	writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
1196 	writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
1197 
1198 	/* clear current_q */
1199 	writel(0x0, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
1200 	writel(0x0, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
1201 
1202 	/*
1203 	 * these registers are reading and clearing, so clear them after
1204 	 * reading them.
1205 	 */
1206 	writel(0x1, qm->io_base + QM_DFX_CNT_CLR_CE);
1207 
1208 	regs = qm_dfx_regs;
1209 	for (i = 0; i < CNT_CYC_REGS_NUM; i++) {
1210 		readl(qm->io_base + regs->offset);
1211 		regs++;
1212 	}
1213 
1214 	/* clear clear_enable */
1215 	writel(0x0, qm->io_base + QM_DFX_CNT_CLR_CE);
1216 }
1217 EXPORT_SYMBOL_GPL(hisi_qm_debug_regs_clear);
1218