xref: /linux/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c (revision a4989fa91110508b64eea7ccde63d062113988ff)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
3  *
4  * Copyright (C) 2019 Marvell International Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #ifdef CONFIG_DEBUG_FS
12 
13 #include <linux/fs.h>
14 #include <linux/debugfs.h>
15 #include <linux/module.h>
16 #include <linux/pci.h>
17 
18 #include "rvu_struct.h"
19 #include "rvu_reg.h"
20 #include "rvu.h"
21 #include "cgx.h"
22 #include "npc.h"
23 
24 #define DEBUGFS_DIR_NAME "octeontx2"
25 
26 enum {
27 	CGX_STAT0,
28 	CGX_STAT1,
29 	CGX_STAT2,
30 	CGX_STAT3,
31 	CGX_STAT4,
32 	CGX_STAT5,
33 	CGX_STAT6,
34 	CGX_STAT7,
35 	CGX_STAT8,
36 	CGX_STAT9,
37 	CGX_STAT10,
38 	CGX_STAT11,
39 	CGX_STAT12,
40 	CGX_STAT13,
41 	CGX_STAT14,
42 	CGX_STAT15,
43 	CGX_STAT16,
44 	CGX_STAT17,
45 	CGX_STAT18,
46 };
47 
48 /* NIX TX stats */
49 enum nix_stat_lf_tx {
50 	TX_UCAST	= 0x0,
51 	TX_BCAST	= 0x1,
52 	TX_MCAST	= 0x2,
53 	TX_DROP		= 0x3,
54 	TX_OCTS		= 0x4,
55 	TX_STATS_ENUM_LAST,
56 };
57 
58 /* NIX RX stats */
59 enum nix_stat_lf_rx {
60 	RX_OCTS		= 0x0,
61 	RX_UCAST	= 0x1,
62 	RX_BCAST	= 0x2,
63 	RX_MCAST	= 0x3,
64 	RX_DROP		= 0x4,
65 	RX_DROP_OCTS	= 0x5,
66 	RX_FCS		= 0x6,
67 	RX_ERR		= 0x7,
68 	RX_DRP_BCAST	= 0x8,
69 	RX_DRP_MCAST	= 0x9,
70 	RX_DRP_L3BCAST	= 0xa,
71 	RX_DRP_L3MCAST	= 0xb,
72 	RX_STATS_ENUM_LAST,
73 };
74 
75 static char *cgx_rx_stats_fields[] = {
76 	[CGX_STAT0]	= "Received packets",
77 	[CGX_STAT1]	= "Octets of received packets",
78 	[CGX_STAT2]	= "Received PAUSE packets",
79 	[CGX_STAT3]	= "Received PAUSE and control packets",
80 	[CGX_STAT4]	= "Filtered DMAC0 (NIX-bound) packets",
81 	[CGX_STAT5]	= "Filtered DMAC0 (NIX-bound) octets",
82 	[CGX_STAT6]	= "Packets dropped due to RX FIFO full",
83 	[CGX_STAT7]	= "Octets dropped due to RX FIFO full",
84 	[CGX_STAT8]	= "Error packets",
85 	[CGX_STAT9]	= "Filtered DMAC1 (NCSI-bound) packets",
86 	[CGX_STAT10]	= "Filtered DMAC1 (NCSI-bound) octets",
87 	[CGX_STAT11]	= "NCSI-bound packets dropped",
88 	[CGX_STAT12]	= "NCSI-bound octets dropped",
89 };
90 
91 static char *cgx_tx_stats_fields[] = {
92 	[CGX_STAT0]	= "Packets dropped due to excessive collisions",
93 	[CGX_STAT1]	= "Packets dropped due to excessive deferral",
94 	[CGX_STAT2]	= "Multiple collisions before successful transmission",
95 	[CGX_STAT3]	= "Single collisions before successful transmission",
96 	[CGX_STAT4]	= "Total octets sent on the interface",
97 	[CGX_STAT5]	= "Total frames sent on the interface",
98 	[CGX_STAT6]	= "Packets sent with an octet count < 64",
99 	[CGX_STAT7]	= "Packets sent with an octet count == 64",
100 	[CGX_STAT8]	= "Packets sent with an octet count of 65–127",
101 	[CGX_STAT9]	= "Packets sent with an octet count of 128-255",
102 	[CGX_STAT10]	= "Packets sent with an octet count of 256-511",
103 	[CGX_STAT11]	= "Packets sent with an octet count of 512-1023",
104 	[CGX_STAT12]	= "Packets sent with an octet count of 1024-1518",
105 	[CGX_STAT13]	= "Packets sent with an octet count of > 1518",
106 	[CGX_STAT14]	= "Packets sent to a broadcast DMAC",
107 	[CGX_STAT15]	= "Packets sent to the multicast DMAC",
108 	[CGX_STAT16]	= "Transmit underflow and were truncated",
109 	[CGX_STAT17]	= "Control/PAUSE packets sent",
110 };
111 
112 enum cpt_eng_type {
113 	CPT_AE_TYPE = 1,
114 	CPT_SE_TYPE = 2,
115 	CPT_IE_TYPE = 3,
116 };
117 
118 #define NDC_MAX_BANK(rvu, blk_addr) (rvu_read64(rvu, \
119 						blk_addr, NDC_AF_CONST) & 0xFF)
120 
121 #define rvu_dbg_NULL NULL
122 #define rvu_dbg_open_NULL NULL
123 
124 #define RVU_DEBUG_SEQ_FOPS(name, read_op, write_op)	\
125 static int rvu_dbg_open_##name(struct inode *inode, struct file *file) \
126 { \
127 	return single_open(file, rvu_dbg_##read_op, inode->i_private); \
128 } \
129 static const struct file_operations rvu_dbg_##name##_fops = { \
130 	.owner		= THIS_MODULE, \
131 	.open		= rvu_dbg_open_##name, \
132 	.read		= seq_read, \
133 	.write		= rvu_dbg_##write_op, \
134 	.llseek		= seq_lseek, \
135 	.release	= single_release, \
136 }
137 
138 #define RVU_DEBUG_FOPS(name, read_op, write_op) \
139 static const struct file_operations rvu_dbg_##name##_fops = { \
140 	.owner = THIS_MODULE, \
141 	.open = simple_open, \
142 	.read = rvu_dbg_##read_op, \
143 	.write = rvu_dbg_##write_op \
144 }
145 
146 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
147 
148 /* Dumps current provisioning status of all RVU block LFs */
149 static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
150 					  char __user *buffer,
151 					  size_t count, loff_t *ppos)
152 {
153 	int index, off = 0, flag = 0, go_back = 0, off_prev;
154 	struct rvu *rvu = filp->private_data;
155 	int lf, pf, vf, pcifunc;
156 	struct rvu_block block;
157 	int bytes_not_copied;
158 	int buf_size = 2048;
159 	char *buf;
160 
161 	/* don't allow partial reads */
162 	if (*ppos != 0)
163 		return 0;
164 
165 	buf = kzalloc(buf_size, GFP_KERNEL);
166 	if (!buf)
167 		return -ENOSPC;
168 	off +=	scnprintf(&buf[off], buf_size - 1 - off, "\npcifunc\t\t");
169 	for (index = 0; index < BLK_COUNT; index++)
170 		if (strlen(rvu->hw->block[index].name))
171 			off +=	scnprintf(&buf[off], buf_size - 1 - off,
172 					  "%*s\t", (index - 1) * 2,
173 					  rvu->hw->block[index].name);
174 	off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
175 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
176 		for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
177 			pcifunc = pf << 10 | vf;
178 			if (!pcifunc)
179 				continue;
180 
181 			if (vf) {
182 				go_back = scnprintf(&buf[off],
183 						    buf_size - 1 - off,
184 						    "PF%d:VF%d\t\t", pf,
185 						    vf - 1);
186 			} else {
187 				go_back = scnprintf(&buf[off],
188 						    buf_size - 1 - off,
189 						    "PF%d\t\t", pf);
190 			}
191 
192 			off += go_back;
193 			for (index = 0; index < BLKTYPE_MAX; index++) {
194 				block = rvu->hw->block[index];
195 				if (!strlen(block.name))
196 					continue;
197 				off_prev = off;
198 				for (lf = 0; lf < block.lf.max; lf++) {
199 					if (block.fn_map[lf] != pcifunc)
200 						continue;
201 					flag = 1;
202 					off += scnprintf(&buf[off], buf_size - 1
203 							- off, "%3d,", lf);
204 				}
205 				if (flag && off_prev != off)
206 					off--;
207 				else
208 					go_back++;
209 				off += scnprintf(&buf[off], buf_size - 1 - off,
210 						"\t");
211 			}
212 			if (!flag)
213 				off -= go_back;
214 			else
215 				flag = 0;
216 			off--;
217 			off +=	scnprintf(&buf[off], buf_size - 1 - off, "\n");
218 		}
219 	}
220 
221 	bytes_not_copied = copy_to_user(buffer, buf, off);
222 	kfree(buf);
223 
224 	if (bytes_not_copied)
225 		return -EFAULT;
226 
227 	*ppos = off;
228 	return off;
229 }
230 
231 RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
232 
233 static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
234 {
235 	struct rvu *rvu = filp->private;
236 	struct pci_dev *pdev = NULL;
237 	char cgx[10], lmac[10];
238 	struct rvu_pfvf *pfvf;
239 	int pf, domain, blkid;
240 	u8 cgx_id, lmac_id;
241 	u16 pcifunc;
242 
243 	domain = 2;
244 	seq_puts(filp, "PCI dev\t\tRVU PF Func\tNIX block\tCGX\tLMAC\n");
245 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
246 		if (!is_pf_cgxmapped(rvu, pf))
247 			continue;
248 
249 		pdev =  pci_get_domain_bus_and_slot(domain, pf + 1, 0);
250 		if (!pdev)
251 			continue;
252 
253 		cgx[0] = 0;
254 		lmac[0] = 0;
255 		pcifunc = pf << 10;
256 		pfvf = rvu_get_pfvf(rvu, pcifunc);
257 
258 		if (pfvf->nix_blkaddr == BLKADDR_NIX0)
259 			blkid = 0;
260 		else
261 			blkid = 1;
262 
263 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id,
264 				    &lmac_id);
265 		sprintf(cgx, "CGX%d", cgx_id);
266 		sprintf(lmac, "LMAC%d", lmac_id);
267 		seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n",
268 			   dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac);
269 	}
270 	return 0;
271 }
272 
273 RVU_DEBUG_SEQ_FOPS(rvu_pf_cgx_map, rvu_pf_cgx_map_display, NULL);
274 
275 static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blkaddr, int lf,
276 				u16 *pcifunc)
277 {
278 	struct rvu_block *block;
279 	struct rvu_hwinfo *hw;
280 
281 	hw = rvu->hw;
282 	block = &hw->block[blkaddr];
283 
284 	if (lf < 0 || lf >= block->lf.max) {
285 		dev_warn(rvu->dev, "Invalid LF: valid range: 0-%d\n",
286 			 block->lf.max - 1);
287 		return false;
288 	}
289 
290 	*pcifunc = block->fn_map[lf];
291 	if (!*pcifunc) {
292 		dev_warn(rvu->dev,
293 			 "This LF is not attached to any RVU PFFUNC\n");
294 		return false;
295 	}
296 	return true;
297 }
298 
299 static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf)
300 {
301 	char *buf;
302 
303 	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
304 	if (!buf)
305 		return;
306 
307 	if (!pfvf->aura_ctx) {
308 		seq_puts(m, "Aura context is not initialized\n");
309 	} else {
310 		bitmap_print_to_pagebuf(false, buf, pfvf->aura_bmap,
311 					pfvf->aura_ctx->qsize);
312 		seq_printf(m, "Aura count : %d\n", pfvf->aura_ctx->qsize);
313 		seq_printf(m, "Aura context ena/dis bitmap : %s\n", buf);
314 	}
315 
316 	if (!pfvf->pool_ctx) {
317 		seq_puts(m, "Pool context is not initialized\n");
318 	} else {
319 		bitmap_print_to_pagebuf(false, buf, pfvf->pool_bmap,
320 					pfvf->pool_ctx->qsize);
321 		seq_printf(m, "Pool count : %d\n", pfvf->pool_ctx->qsize);
322 		seq_printf(m, "Pool context ena/dis bitmap : %s\n", buf);
323 	}
324 	kfree(buf);
325 }
326 
327 /* The 'qsize' entry dumps current Aura/Pool context Qsize
328  * and each context's current enable/disable status in a bitmap.
329  */
330 static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
331 				 int blktype)
332 {
333 	void (*print_qsize)(struct seq_file *filp,
334 			    struct rvu_pfvf *pfvf) = NULL;
335 	struct dentry *current_dir;
336 	struct rvu_pfvf *pfvf;
337 	struct rvu *rvu;
338 	int qsize_id;
339 	u16 pcifunc;
340 	int blkaddr;
341 
342 	rvu = filp->private;
343 	switch (blktype) {
344 	case BLKTYPE_NPA:
345 		qsize_id = rvu->rvu_dbg.npa_qsize_id;
346 		print_qsize = print_npa_qsize;
347 		break;
348 
349 	case BLKTYPE_NIX:
350 		qsize_id = rvu->rvu_dbg.nix_qsize_id;
351 		print_qsize = print_nix_qsize;
352 		break;
353 
354 	default:
355 		return -EINVAL;
356 	}
357 
358 	if (blktype == BLKTYPE_NPA) {
359 		blkaddr = BLKADDR_NPA;
360 	} else {
361 		current_dir = filp->file->f_path.dentry->d_parent;
362 		blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
363 				   BLKADDR_NIX1 : BLKADDR_NIX0);
364 	}
365 
366 	if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc))
367 		return -EINVAL;
368 
369 	pfvf = rvu_get_pfvf(rvu, pcifunc);
370 	print_qsize(filp, pfvf);
371 
372 	return 0;
373 }
374 
375 static ssize_t rvu_dbg_qsize_write(struct file *filp,
376 				   const char __user *buffer, size_t count,
377 				   loff_t *ppos, int blktype)
378 {
379 	char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix";
380 	struct seq_file *seqfile = filp->private_data;
381 	char *cmd_buf, *cmd_buf_tmp, *subtoken;
382 	struct rvu *rvu = seqfile->private;
383 	struct dentry *current_dir;
384 	int blkaddr;
385 	u16 pcifunc;
386 	int ret, lf;
387 
388 	cmd_buf = memdup_user(buffer, count);
389 	if (IS_ERR(cmd_buf))
390 		return -ENOMEM;
391 
392 	cmd_buf[count] = '\0';
393 
394 	cmd_buf_tmp = strchr(cmd_buf, '\n');
395 	if (cmd_buf_tmp) {
396 		*cmd_buf_tmp = '\0';
397 		count = cmd_buf_tmp - cmd_buf + 1;
398 	}
399 
400 	cmd_buf_tmp = cmd_buf;
401 	subtoken = strsep(&cmd_buf, " ");
402 	ret = subtoken ? kstrtoint(subtoken, 10, &lf) : -EINVAL;
403 	if (cmd_buf)
404 		ret = -EINVAL;
405 
406 	if (!strncmp(subtoken, "help", 4) || ret < 0) {
407 		dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
408 		goto qsize_write_done;
409 	}
410 
411 	if (blktype == BLKTYPE_NPA) {
412 		blkaddr = BLKADDR_NPA;
413 	} else {
414 		current_dir = filp->f_path.dentry->d_parent;
415 		blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
416 				   BLKADDR_NIX1 : BLKADDR_NIX0);
417 	}
418 
419 	if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) {
420 		ret = -EINVAL;
421 		goto qsize_write_done;
422 	}
423 	if (blktype  == BLKTYPE_NPA)
424 		rvu->rvu_dbg.npa_qsize_id = lf;
425 	else
426 		rvu->rvu_dbg.nix_qsize_id = lf;
427 
428 qsize_write_done:
429 	kfree(cmd_buf_tmp);
430 	return ret ? ret : count;
431 }
432 
433 static ssize_t rvu_dbg_npa_qsize_write(struct file *filp,
434 				       const char __user *buffer,
435 				       size_t count, loff_t *ppos)
436 {
437 	return rvu_dbg_qsize_write(filp, buffer, count, ppos,
438 					    BLKTYPE_NPA);
439 }
440 
441 static int rvu_dbg_npa_qsize_display(struct seq_file *filp, void *unused)
442 {
443 	return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NPA);
444 }
445 
446 RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write);
447 
448 /* Dumps given NPA Aura's context */
449 static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
450 {
451 	struct npa_aura_s *aura = &rsp->aura;
452 
453 	seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr);
454 
455 	seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n",
456 		   aura->ena, aura->pool_caching);
457 	seq_printf(m, "W1: pool way mask\t%d\nW1: avg con\t\t%d\n",
458 		   aura->pool_way_mask, aura->avg_con);
459 	seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n",
460 		   aura->pool_drop_ena, aura->aura_drop_ena);
461 	seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n",
462 		   aura->bp_ena, aura->aura_drop);
463 	seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n",
464 		   aura->shift, aura->avg_level);
465 
466 	seq_printf(m, "W2: count\t\t%llu\nW2: nix0_bpid\t\t%d\nW2: nix1_bpid\t\t%d\n",
467 		   (u64)aura->count, aura->nix0_bpid, aura->nix1_bpid);
468 
469 	seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
470 		   (u64)aura->limit, aura->bp, aura->fc_ena);
471 	seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n",
472 		   aura->fc_up_crossing, aura->fc_stype);
473 	seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits);
474 
475 	seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr);
476 
477 	seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n",
478 		   aura->pool_drop, aura->update_time);
479 	seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n",
480 		   aura->err_int, aura->err_int_ena);
481 	seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n",
482 		   aura->thresh_int, aura->thresh_int_ena);
483 	seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n",
484 		   aura->thresh_up, aura->thresh_qint_idx);
485 	seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx);
486 
487 	seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh);
488 }
489 
490 /* Dumps given NPA Pool's context */
491 static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
492 {
493 	struct npa_pool_s *pool = &rsp->pool;
494 
495 	seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base);
496 
497 	seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n",
498 		   pool->ena, pool->nat_align);
499 	seq_printf(m, "W1: stack_caching\t%d\nW1: stack_way_mask\t%d\n",
500 		   pool->stack_caching, pool->stack_way_mask);
501 	seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n",
502 		   pool->buf_offset, pool->buf_size);
503 
504 	seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n",
505 		   pool->stack_max_pages, pool->stack_pages);
506 
507 	seq_printf(m, "W3: op_pc \t\t%llu\n", (u64)pool->op_pc);
508 
509 	seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n",
510 		   pool->stack_offset, pool->shift, pool->avg_level);
511 	seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n",
512 		   pool->avg_con, pool->fc_ena, pool->fc_stype);
513 	seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n",
514 		   pool->fc_hyst_bits, pool->fc_up_crossing);
515 	seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time);
516 
517 	seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr);
518 
519 	seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start);
520 
521 	seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end);
522 
523 	seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n",
524 		   pool->err_int, pool->err_int_ena);
525 	seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int);
526 	seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n",
527 		   pool->thresh_int_ena, pool->thresh_up);
528 	seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t\t%d\n",
529 		   pool->thresh_qint_idx, pool->err_qint_idx);
530 }
531 
532 /* Reads aura/pool's ctx from admin queue */
533 static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype)
534 {
535 	void (*print_npa_ctx)(struct seq_file *m, struct npa_aq_enq_rsp *rsp);
536 	struct npa_aq_enq_req aq_req;
537 	struct npa_aq_enq_rsp rsp;
538 	struct rvu_pfvf *pfvf;
539 	int aura, rc, max_id;
540 	int npalf, id, all;
541 	struct rvu *rvu;
542 	u16 pcifunc;
543 
544 	rvu = m->private;
545 
546 	switch (ctype) {
547 	case NPA_AQ_CTYPE_AURA:
548 		npalf = rvu->rvu_dbg.npa_aura_ctx.lf;
549 		id = rvu->rvu_dbg.npa_aura_ctx.id;
550 		all = rvu->rvu_dbg.npa_aura_ctx.all;
551 		break;
552 
553 	case NPA_AQ_CTYPE_POOL:
554 		npalf = rvu->rvu_dbg.npa_pool_ctx.lf;
555 		id = rvu->rvu_dbg.npa_pool_ctx.id;
556 		all = rvu->rvu_dbg.npa_pool_ctx.all;
557 		break;
558 	default:
559 		return -EINVAL;
560 	}
561 
562 	if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
563 		return -EINVAL;
564 
565 	pfvf = rvu_get_pfvf(rvu, pcifunc);
566 	if (ctype == NPA_AQ_CTYPE_AURA && !pfvf->aura_ctx) {
567 		seq_puts(m, "Aura context is not initialized\n");
568 		return -EINVAL;
569 	} else if (ctype == NPA_AQ_CTYPE_POOL && !pfvf->pool_ctx) {
570 		seq_puts(m, "Pool context is not initialized\n");
571 		return -EINVAL;
572 	}
573 
574 	memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
575 	aq_req.hdr.pcifunc = pcifunc;
576 	aq_req.ctype = ctype;
577 	aq_req.op = NPA_AQ_INSTOP_READ;
578 	if (ctype == NPA_AQ_CTYPE_AURA) {
579 		max_id = pfvf->aura_ctx->qsize;
580 		print_npa_ctx = print_npa_aura_ctx;
581 	} else {
582 		max_id = pfvf->pool_ctx->qsize;
583 		print_npa_ctx = print_npa_pool_ctx;
584 	}
585 
586 	if (id < 0 || id >= max_id) {
587 		seq_printf(m, "Invalid %s, valid range is 0-%d\n",
588 			   (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
589 			max_id - 1);
590 		return -EINVAL;
591 	}
592 
593 	if (all)
594 		id = 0;
595 	else
596 		max_id = id + 1;
597 
598 	for (aura = id; aura < max_id; aura++) {
599 		aq_req.aura_id = aura;
600 		seq_printf(m, "======%s : %d=======\n",
601 			   (ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL",
602 			aq_req.aura_id);
603 		rc = rvu_npa_aq_enq_inst(rvu, &aq_req, &rsp);
604 		if (rc) {
605 			seq_puts(m, "Failed to read context\n");
606 			return -EINVAL;
607 		}
608 		print_npa_ctx(m, &rsp);
609 	}
610 	return 0;
611 }
612 
613 static int write_npa_ctx(struct rvu *rvu, bool all,
614 			 int npalf, int id, int ctype)
615 {
616 	struct rvu_pfvf *pfvf;
617 	int max_id = 0;
618 	u16 pcifunc;
619 
620 	if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
621 		return -EINVAL;
622 
623 	pfvf = rvu_get_pfvf(rvu, pcifunc);
624 
625 	if (ctype == NPA_AQ_CTYPE_AURA) {
626 		if (!pfvf->aura_ctx) {
627 			dev_warn(rvu->dev, "Aura context is not initialized\n");
628 			return -EINVAL;
629 		}
630 		max_id = pfvf->aura_ctx->qsize;
631 	} else if (ctype == NPA_AQ_CTYPE_POOL) {
632 		if (!pfvf->pool_ctx) {
633 			dev_warn(rvu->dev, "Pool context is not initialized\n");
634 			return -EINVAL;
635 		}
636 		max_id = pfvf->pool_ctx->qsize;
637 	}
638 
639 	if (id < 0 || id >= max_id) {
640 		dev_warn(rvu->dev, "Invalid %s, valid range is 0-%d\n",
641 			 (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
642 			max_id - 1);
643 		return -EINVAL;
644 	}
645 
646 	switch (ctype) {
647 	case NPA_AQ_CTYPE_AURA:
648 		rvu->rvu_dbg.npa_aura_ctx.lf = npalf;
649 		rvu->rvu_dbg.npa_aura_ctx.id = id;
650 		rvu->rvu_dbg.npa_aura_ctx.all = all;
651 		break;
652 
653 	case NPA_AQ_CTYPE_POOL:
654 		rvu->rvu_dbg.npa_pool_ctx.lf = npalf;
655 		rvu->rvu_dbg.npa_pool_ctx.id = id;
656 		rvu->rvu_dbg.npa_pool_ctx.all = all;
657 		break;
658 	default:
659 		return -EINVAL;
660 	}
661 	return 0;
662 }
663 
664 static int parse_cmd_buffer_ctx(char *cmd_buf, size_t *count,
665 				const char __user *buffer, int *npalf,
666 				int *id, bool *all)
667 {
668 	int bytes_not_copied;
669 	char *cmd_buf_tmp;
670 	char *subtoken;
671 	int ret;
672 
673 	bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
674 	if (bytes_not_copied)
675 		return -EFAULT;
676 
677 	cmd_buf[*count] = '\0';
678 	cmd_buf_tmp = strchr(cmd_buf, '\n');
679 
680 	if (cmd_buf_tmp) {
681 		*cmd_buf_tmp = '\0';
682 		*count = cmd_buf_tmp - cmd_buf + 1;
683 	}
684 
685 	subtoken = strsep(&cmd_buf, " ");
686 	ret = subtoken ? kstrtoint(subtoken, 10, npalf) : -EINVAL;
687 	if (ret < 0)
688 		return ret;
689 	subtoken = strsep(&cmd_buf, " ");
690 	if (subtoken && strcmp(subtoken, "all") == 0) {
691 		*all = true;
692 	} else {
693 		ret = subtoken ? kstrtoint(subtoken, 10, id) : -EINVAL;
694 		if (ret < 0)
695 			return ret;
696 	}
697 	if (cmd_buf)
698 		return -EINVAL;
699 	return ret;
700 }
701 
702 static ssize_t rvu_dbg_npa_ctx_write(struct file *filp,
703 				     const char __user *buffer,
704 				     size_t count, loff_t *ppos, int ctype)
705 {
706 	char *cmd_buf, *ctype_string = (ctype == NPA_AQ_CTYPE_AURA) ?
707 					"aura" : "pool";
708 	struct seq_file *seqfp = filp->private_data;
709 	struct rvu *rvu = seqfp->private;
710 	int npalf, id = 0, ret;
711 	bool all = false;
712 
713 	if ((*ppos != 0) || !count)
714 		return -EINVAL;
715 
716 	cmd_buf = kzalloc(count + 1, GFP_KERNEL);
717 	if (!cmd_buf)
718 		return count;
719 	ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
720 				   &npalf, &id, &all);
721 	if (ret < 0) {
722 		dev_info(rvu->dev,
723 			 "Usage: echo <npalf> [%s number/all] > %s_ctx\n",
724 			 ctype_string, ctype_string);
725 		goto done;
726 	} else {
727 		ret = write_npa_ctx(rvu, all, npalf, id, ctype);
728 	}
729 done:
730 	kfree(cmd_buf);
731 	return ret ? ret : count;
732 }
733 
734 static ssize_t rvu_dbg_npa_aura_ctx_write(struct file *filp,
735 					  const char __user *buffer,
736 					  size_t count, loff_t *ppos)
737 {
738 	return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
739 				     NPA_AQ_CTYPE_AURA);
740 }
741 
742 static int rvu_dbg_npa_aura_ctx_display(struct seq_file *filp, void *unused)
743 {
744 	return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_AURA);
745 }
746 
747 RVU_DEBUG_SEQ_FOPS(npa_aura_ctx, npa_aura_ctx_display, npa_aura_ctx_write);
748 
749 static ssize_t rvu_dbg_npa_pool_ctx_write(struct file *filp,
750 					  const char __user *buffer,
751 					  size_t count, loff_t *ppos)
752 {
753 	return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
754 				     NPA_AQ_CTYPE_POOL);
755 }
756 
757 static int rvu_dbg_npa_pool_ctx_display(struct seq_file *filp, void *unused)
758 {
759 	return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_POOL);
760 }
761 
762 RVU_DEBUG_SEQ_FOPS(npa_pool_ctx, npa_pool_ctx_display, npa_pool_ctx_write);
763 
764 static void ndc_cache_stats(struct seq_file *s, int blk_addr,
765 			    int ctype, int transaction)
766 {
767 	u64 req, out_req, lat, cant_alloc;
768 	struct nix_hw *nix_hw;
769 	struct rvu *rvu;
770 	int port;
771 
772 	if (blk_addr == BLKADDR_NDC_NPA0) {
773 		rvu = s->private;
774 	} else {
775 		nix_hw = s->private;
776 		rvu = nix_hw->rvu;
777 	}
778 
779 	for (port = 0; port < NDC_MAX_PORT; port++) {
780 		req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC
781 						(port, ctype, transaction));
782 		lat = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_LAT_PC
783 						(port, ctype, transaction));
784 		out_req = rvu_read64(rvu, blk_addr,
785 				     NDC_AF_PORTX_RTX_RWX_OSTDN_PC
786 				     (port, ctype, transaction));
787 		cant_alloc = rvu_read64(rvu, blk_addr,
788 					NDC_AF_PORTX_RTX_CANT_ALLOC_PC
789 					(port, transaction));
790 		seq_printf(s, "\nPort:%d\n", port);
791 		seq_printf(s, "\tTotal Requests:\t\t%lld\n", req);
792 		seq_printf(s, "\tTotal Time Taken:\t%lld cycles\n", lat);
793 		seq_printf(s, "\tAvg Latency:\t\t%lld cycles\n", lat / req);
794 		seq_printf(s, "\tOutstanding Requests:\t%lld\n", out_req);
795 		seq_printf(s, "\tCant Alloc Requests:\t%lld\n", cant_alloc);
796 	}
797 }
798 
799 static int ndc_blk_cache_stats(struct seq_file *s, int idx, int blk_addr)
800 {
801 	seq_puts(s, "\n***** CACHE mode read stats *****\n");
802 	ndc_cache_stats(s, blk_addr, CACHING, NDC_READ_TRANS);
803 	seq_puts(s, "\n***** CACHE mode write stats *****\n");
804 	ndc_cache_stats(s, blk_addr, CACHING, NDC_WRITE_TRANS);
805 	seq_puts(s, "\n***** BY-PASS mode read stats *****\n");
806 	ndc_cache_stats(s, blk_addr, BYPASS, NDC_READ_TRANS);
807 	seq_puts(s, "\n***** BY-PASS mode write stats *****\n");
808 	ndc_cache_stats(s, blk_addr, BYPASS, NDC_WRITE_TRANS);
809 	return 0;
810 }
811 
812 static int rvu_dbg_npa_ndc_cache_display(struct seq_file *filp, void *unused)
813 {
814 	return ndc_blk_cache_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
815 }
816 
817 RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL);
818 
819 static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
820 {
821 	struct nix_hw *nix_hw;
822 	struct rvu *rvu;
823 	int bank, max_bank;
824 
825 	if (blk_addr == BLKADDR_NDC_NPA0) {
826 		rvu = s->private;
827 	} else {
828 		nix_hw = s->private;
829 		rvu = nix_hw->rvu;
830 	}
831 
832 	max_bank = NDC_MAX_BANK(rvu, blk_addr);
833 	for (bank = 0; bank < max_bank; bank++) {
834 		seq_printf(s, "BANK:%d\n", bank);
835 		seq_printf(s, "\tHits:\t%lld\n",
836 			   (u64)rvu_read64(rvu, blk_addr,
837 			   NDC_AF_BANKX_HIT_PC(bank)));
838 		seq_printf(s, "\tMiss:\t%lld\n",
839 			   (u64)rvu_read64(rvu, blk_addr,
840 			    NDC_AF_BANKX_MISS_PC(bank)));
841 	}
842 	return 0;
843 }
844 
845 static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused)
846 {
847 	struct nix_hw *nix_hw = filp->private;
848 	int blkaddr = 0;
849 	int ndc_idx = 0;
850 
851 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
852 		   BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
853 	ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_RX : NIX0_RX);
854 
855 	return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
856 }
857 
858 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL);
859 
860 static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused)
861 {
862 	struct nix_hw *nix_hw = filp->private;
863 	int blkaddr = 0;
864 	int ndc_idx = 0;
865 
866 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
867 		   BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
868 	ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_TX : NIX0_TX);
869 
870 	return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
871 }
872 
873 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL);
874 
875 static int rvu_dbg_npa_ndc_hits_miss_display(struct seq_file *filp,
876 					     void *unused)
877 {
878 	return ndc_blk_hits_miss_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
879 }
880 
881 RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL);
882 
883 static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp,
884 						void *unused)
885 {
886 	struct nix_hw *nix_hw = filp->private;
887 	int ndc_idx = NPA0_U;
888 	int blkaddr = 0;
889 
890 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
891 		   BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
892 
893 	return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
894 }
895 
896 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL);
897 
898 static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp,
899 						void *unused)
900 {
901 	struct nix_hw *nix_hw = filp->private;
902 	int ndc_idx = NPA0_U;
903 	int blkaddr = 0;
904 
905 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
906 		   BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
907 
908 	return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
909 }
910 
911 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL);
912 
913 /* Dumps given nix_sq's context */
914 static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
915 {
916 	struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
917 
918 	seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n",
919 		   sq_ctx->sqe_way_mask, sq_ctx->cq);
920 	seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
921 		   sq_ctx->sdp_mcast, sq_ctx->substream);
922 	seq_printf(m, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n\n",
923 		   sq_ctx->qint_idx, sq_ctx->ena);
924 
925 	seq_printf(m, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d\n",
926 		   sq_ctx->sqb_count, sq_ctx->default_chan);
927 	seq_printf(m, "W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d\n",
928 		   sq_ctx->smq_rr_quantum, sq_ctx->sso_ena);
929 	seq_printf(m, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n\n",
930 		   sq_ctx->xoff, sq_ctx->cq_ena, sq_ctx->smq);
931 
932 	seq_printf(m, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d\n",
933 		   sq_ctx->sqe_stype, sq_ctx->sq_int_ena);
934 	seq_printf(m, "W2: sq_int \t\t\t%d\nW2: sqb_aura \t\t\t%d\n",
935 		   sq_ctx->sq_int, sq_ctx->sqb_aura);
936 	seq_printf(m, "W2: smq_rr_count \t\t%d\n\n", sq_ctx->smq_rr_count);
937 
938 	seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
939 		   sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
940 	seq_printf(m, "W3: smenq_next_sqb_vld \t\t%d\nW3: head_offset\t\t\t%d\n",
941 		   sq_ctx->smenq_next_sqb_vld, sq_ctx->head_offset);
942 	seq_printf(m, "W3: smenq_offset\t\t%d\nW3: tail_offset\t\t\t%d\n",
943 		   sq_ctx->smenq_offset, sq_ctx->tail_offset);
944 	seq_printf(m, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq\t\t\t%d\n",
945 		   sq_ctx->smq_lso_segnum, sq_ctx->smq_next_sq);
946 	seq_printf(m, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d\n",
947 		   sq_ctx->mnq_dis, sq_ctx->lmt_dis);
948 	seq_printf(m, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n\n",
949 		   sq_ctx->cq_limit, sq_ctx->max_sqe_size);
950 
951 	seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
952 	seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
953 	seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
954 	seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
955 		   sq_ctx->smenq_next_sqb);
956 
957 	seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
958 
959 	seq_printf(m, "W9: vfi_lso_vld\t\t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d\n",
960 		   sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
961 	seq_printf(m, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d\n",
962 		   sq_ctx->vfi_lso_vlan0_ins_ena, sq_ctx->vfi_lso_mps);
963 	seq_printf(m, "W9: vfi_lso_sb\t\t\t%d\nW9: vfi_lso_sizem1\t\t%d\n",
964 		   sq_ctx->vfi_lso_sb, sq_ctx->vfi_lso_sizem1);
965 	seq_printf(m, "W9: vfi_lso_total\t\t%d\n\n", sq_ctx->vfi_lso_total);
966 
967 	seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
968 		   (u64)sq_ctx->scm_lso_rem);
969 	seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
970 	seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
971 	seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
972 		   (u64)sq_ctx->dropped_octs);
973 	seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
974 		   (u64)sq_ctx->dropped_pkts);
975 }
976 
977 /* Dumps given nix_rq's context */
978 static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
979 {
980 	struct nix_rq_ctx_s *rq_ctx = &rsp->rq;
981 
982 	seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
983 		   rq_ctx->wqe_aura, rq_ctx->substream);
984 	seq_printf(m, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
985 		   rq_ctx->cq, rq_ctx->ena_wqwd);
986 	seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
987 		   rq_ctx->ipsech_ena, rq_ctx->sso_ena);
988 	seq_printf(m, "W0: ena \t\t\t%d\n\n", rq_ctx->ena);
989 
990 	seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
991 		   rq_ctx->lpb_drop_ena, rq_ctx->spb_drop_ena);
992 	seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d\n",
993 		   rq_ctx->xqe_drop_ena, rq_ctx->wqe_caching);
994 	seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
995 		   rq_ctx->pb_caching, rq_ctx->sso_tt);
996 	seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
997 		   rq_ctx->sso_grp, rq_ctx->lpb_aura);
998 	seq_printf(m, "W1: spb_aura \t\t\t%d\n\n", rq_ctx->spb_aura);
999 
1000 	seq_printf(m, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d\n",
1001 		   rq_ctx->xqe_hdr_split, rq_ctx->xqe_imm_copy);
1002 	seq_printf(m, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d\n",
1003 		   rq_ctx->xqe_imm_size, rq_ctx->later_skip);
1004 	seq_printf(m, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d\n",
1005 		   rq_ctx->first_skip, rq_ctx->lpb_sizem1);
1006 	seq_printf(m, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d\n",
1007 		   rq_ctx->spb_ena, rq_ctx->wqe_skip);
1008 	seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n\n", rq_ctx->spb_sizem1);
1009 
1010 	seq_printf(m, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d\n",
1011 		   rq_ctx->spb_pool_pass, rq_ctx->spb_pool_drop);
1012 	seq_printf(m, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d\n",
1013 		   rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
1014 	seq_printf(m, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d\n",
1015 		   rq_ctx->wqe_pool_pass, rq_ctx->wqe_pool_drop);
1016 	seq_printf(m, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n\n",
1017 		   rq_ctx->xqe_pass, rq_ctx->xqe_drop);
1018 
1019 	seq_printf(m, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d\n",
1020 		   rq_ctx->qint_idx, rq_ctx->rq_int_ena);
1021 	seq_printf(m, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d\n",
1022 		   rq_ctx->rq_int, rq_ctx->lpb_pool_pass);
1023 	seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d\n",
1024 		   rq_ctx->lpb_pool_drop, rq_ctx->lpb_aura_pass);
1025 	seq_printf(m, "W4: lpb_aura_drop \t\t%d\n\n", rq_ctx->lpb_aura_drop);
1026 
1027 	seq_printf(m, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d\n",
1028 		   rq_ctx->flow_tagw, rq_ctx->bad_utag);
1029 	seq_printf(m, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n\n",
1030 		   rq_ctx->good_utag, rq_ctx->ltag);
1031 
1032 	seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
1033 	seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
1034 	seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
1035 	seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
1036 	seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
1037 }
1038 
1039 /* Dumps given nix_cq's context */
1040 static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1041 {
1042 	struct nix_cq_ctx_s *cq_ctx = &rsp->cq;
1043 
1044 	seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base);
1045 
1046 	seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr);
1047 	seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n",
1048 		   cq_ctx->avg_con, cq_ctx->cint_idx);
1049 	seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n",
1050 		   cq_ctx->cq_err, cq_ctx->qint_idx);
1051 	seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n",
1052 		   cq_ctx->bpid, cq_ctx->bp_ena);
1053 
1054 	seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n",
1055 		   cq_ctx->update_time, cq_ctx->avg_level);
1056 	seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n",
1057 		   cq_ctx->head, cq_ctx->tail);
1058 
1059 	seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n",
1060 		   cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int);
1061 	seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n",
1062 		   cq_ctx->qsize, cq_ctx->caching);
1063 	seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n",
1064 		   cq_ctx->substream, cq_ctx->ena);
1065 	seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n",
1066 		   cq_ctx->drop_ena, cq_ctx->drop);
1067 	seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp);
1068 }
1069 
1070 static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
1071 					 void *unused, int ctype)
1072 {
1073 	void (*print_nix_ctx)(struct seq_file *filp,
1074 			      struct nix_aq_enq_rsp *rsp) = NULL;
1075 	struct nix_hw *nix_hw = filp->private;
1076 	struct rvu *rvu = nix_hw->rvu;
1077 	struct nix_aq_enq_req aq_req;
1078 	struct nix_aq_enq_rsp rsp;
1079 	char *ctype_string = NULL;
1080 	int qidx, rc, max_id = 0;
1081 	struct rvu_pfvf *pfvf;
1082 	int nixlf, id, all;
1083 	u16 pcifunc;
1084 
1085 	switch (ctype) {
1086 	case NIX_AQ_CTYPE_CQ:
1087 		nixlf = rvu->rvu_dbg.nix_cq_ctx.lf;
1088 		id = rvu->rvu_dbg.nix_cq_ctx.id;
1089 		all = rvu->rvu_dbg.nix_cq_ctx.all;
1090 		break;
1091 
1092 	case NIX_AQ_CTYPE_SQ:
1093 		nixlf = rvu->rvu_dbg.nix_sq_ctx.lf;
1094 		id = rvu->rvu_dbg.nix_sq_ctx.id;
1095 		all = rvu->rvu_dbg.nix_sq_ctx.all;
1096 		break;
1097 
1098 	case NIX_AQ_CTYPE_RQ:
1099 		nixlf = rvu->rvu_dbg.nix_rq_ctx.lf;
1100 		id = rvu->rvu_dbg.nix_rq_ctx.id;
1101 		all = rvu->rvu_dbg.nix_rq_ctx.all;
1102 		break;
1103 
1104 	default:
1105 		return -EINVAL;
1106 	}
1107 
1108 	if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1109 		return -EINVAL;
1110 
1111 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1112 	if (ctype == NIX_AQ_CTYPE_SQ && !pfvf->sq_ctx) {
1113 		seq_puts(filp, "SQ context is not initialized\n");
1114 		return -EINVAL;
1115 	} else if (ctype == NIX_AQ_CTYPE_RQ && !pfvf->rq_ctx) {
1116 		seq_puts(filp, "RQ context is not initialized\n");
1117 		return -EINVAL;
1118 	} else if (ctype == NIX_AQ_CTYPE_CQ && !pfvf->cq_ctx) {
1119 		seq_puts(filp, "CQ context is not initialized\n");
1120 		return -EINVAL;
1121 	}
1122 
1123 	if (ctype == NIX_AQ_CTYPE_SQ) {
1124 		max_id = pfvf->sq_ctx->qsize;
1125 		ctype_string = "sq";
1126 		print_nix_ctx = print_nix_sq_ctx;
1127 	} else if (ctype == NIX_AQ_CTYPE_RQ) {
1128 		max_id = pfvf->rq_ctx->qsize;
1129 		ctype_string = "rq";
1130 		print_nix_ctx = print_nix_rq_ctx;
1131 	} else if (ctype == NIX_AQ_CTYPE_CQ) {
1132 		max_id = pfvf->cq_ctx->qsize;
1133 		ctype_string = "cq";
1134 		print_nix_ctx = print_nix_cq_ctx;
1135 	}
1136 
1137 	memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1138 	aq_req.hdr.pcifunc = pcifunc;
1139 	aq_req.ctype = ctype;
1140 	aq_req.op = NIX_AQ_INSTOP_READ;
1141 	if (all)
1142 		id = 0;
1143 	else
1144 		max_id = id + 1;
1145 	for (qidx = id; qidx < max_id; qidx++) {
1146 		aq_req.qidx = qidx;
1147 		seq_printf(filp, "=====%s_ctx for nixlf:%d and qidx:%d is=====\n",
1148 			   ctype_string, nixlf, aq_req.qidx);
1149 		rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
1150 		if (rc) {
1151 			seq_puts(filp, "Failed to read the context\n");
1152 			return -EINVAL;
1153 		}
1154 		print_nix_ctx(filp, &rsp);
1155 	}
1156 	return 0;
1157 }
1158 
1159 static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf,
1160 			       int id, int ctype, char *ctype_string,
1161 			       struct seq_file *m)
1162 {
1163 	struct nix_hw *nix_hw = m->private;
1164 	struct rvu_pfvf *pfvf;
1165 	int max_id = 0;
1166 	u16 pcifunc;
1167 
1168 	if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1169 		return -EINVAL;
1170 
1171 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1172 
1173 	if (ctype == NIX_AQ_CTYPE_SQ) {
1174 		if (!pfvf->sq_ctx) {
1175 			dev_warn(rvu->dev, "SQ context is not initialized\n");
1176 			return -EINVAL;
1177 		}
1178 		max_id = pfvf->sq_ctx->qsize;
1179 	} else if (ctype == NIX_AQ_CTYPE_RQ) {
1180 		if (!pfvf->rq_ctx) {
1181 			dev_warn(rvu->dev, "RQ context is not initialized\n");
1182 			return -EINVAL;
1183 		}
1184 		max_id = pfvf->rq_ctx->qsize;
1185 	} else if (ctype == NIX_AQ_CTYPE_CQ) {
1186 		if (!pfvf->cq_ctx) {
1187 			dev_warn(rvu->dev, "CQ context is not initialized\n");
1188 			return -EINVAL;
1189 		}
1190 		max_id = pfvf->cq_ctx->qsize;
1191 	}
1192 
1193 	if (id < 0 || id >= max_id) {
1194 		dev_warn(rvu->dev, "Invalid %s_ctx valid range 0-%d\n",
1195 			 ctype_string, max_id - 1);
1196 		return -EINVAL;
1197 	}
1198 	switch (ctype) {
1199 	case NIX_AQ_CTYPE_CQ:
1200 		rvu->rvu_dbg.nix_cq_ctx.lf = nixlf;
1201 		rvu->rvu_dbg.nix_cq_ctx.id = id;
1202 		rvu->rvu_dbg.nix_cq_ctx.all = all;
1203 		break;
1204 
1205 	case NIX_AQ_CTYPE_SQ:
1206 		rvu->rvu_dbg.nix_sq_ctx.lf = nixlf;
1207 		rvu->rvu_dbg.nix_sq_ctx.id = id;
1208 		rvu->rvu_dbg.nix_sq_ctx.all = all;
1209 		break;
1210 
1211 	case NIX_AQ_CTYPE_RQ:
1212 		rvu->rvu_dbg.nix_rq_ctx.lf = nixlf;
1213 		rvu->rvu_dbg.nix_rq_ctx.id = id;
1214 		rvu->rvu_dbg.nix_rq_ctx.all = all;
1215 		break;
1216 	default:
1217 		return -EINVAL;
1218 	}
1219 	return 0;
1220 }
1221 
1222 static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
1223 					   const char __user *buffer,
1224 					   size_t count, loff_t *ppos,
1225 					   int ctype)
1226 {
1227 	struct seq_file *m = filp->private_data;
1228 	struct nix_hw *nix_hw = m->private;
1229 	struct rvu *rvu = nix_hw->rvu;
1230 	char *cmd_buf, *ctype_string;
1231 	int nixlf, id = 0, ret;
1232 	bool all = false;
1233 
1234 	if ((*ppos != 0) || !count)
1235 		return -EINVAL;
1236 
1237 	switch (ctype) {
1238 	case NIX_AQ_CTYPE_SQ:
1239 		ctype_string = "sq";
1240 		break;
1241 	case NIX_AQ_CTYPE_RQ:
1242 		ctype_string = "rq";
1243 		break;
1244 	case NIX_AQ_CTYPE_CQ:
1245 		ctype_string = "cq";
1246 		break;
1247 	default:
1248 		return -EINVAL;
1249 	}
1250 
1251 	cmd_buf = kzalloc(count + 1, GFP_KERNEL);
1252 
1253 	if (!cmd_buf)
1254 		return count;
1255 
1256 	ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
1257 				   &nixlf, &id, &all);
1258 	if (ret < 0) {
1259 		dev_info(rvu->dev,
1260 			 "Usage: echo <nixlf> [%s number/all] > %s_ctx\n",
1261 			 ctype_string, ctype_string);
1262 		goto done;
1263 	} else {
1264 		ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype,
1265 					  ctype_string, m);
1266 	}
1267 done:
1268 	kfree(cmd_buf);
1269 	return ret ? ret : count;
1270 }
1271 
1272 static ssize_t rvu_dbg_nix_sq_ctx_write(struct file *filp,
1273 					const char __user *buffer,
1274 					size_t count, loff_t *ppos)
1275 {
1276 	return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1277 					    NIX_AQ_CTYPE_SQ);
1278 }
1279 
1280 static int rvu_dbg_nix_sq_ctx_display(struct seq_file *filp, void *unused)
1281 {
1282 	return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_SQ);
1283 }
1284 
1285 RVU_DEBUG_SEQ_FOPS(nix_sq_ctx, nix_sq_ctx_display, nix_sq_ctx_write);
1286 
1287 static ssize_t rvu_dbg_nix_rq_ctx_write(struct file *filp,
1288 					const char __user *buffer,
1289 					size_t count, loff_t *ppos)
1290 {
1291 	return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1292 					    NIX_AQ_CTYPE_RQ);
1293 }
1294 
1295 static int rvu_dbg_nix_rq_ctx_display(struct seq_file *filp, void  *unused)
1296 {
1297 	return rvu_dbg_nix_queue_ctx_display(filp, unused,  NIX_AQ_CTYPE_RQ);
1298 }
1299 
1300 RVU_DEBUG_SEQ_FOPS(nix_rq_ctx, nix_rq_ctx_display, nix_rq_ctx_write);
1301 
1302 static ssize_t rvu_dbg_nix_cq_ctx_write(struct file *filp,
1303 					const char __user *buffer,
1304 					size_t count, loff_t *ppos)
1305 {
1306 	return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1307 					    NIX_AQ_CTYPE_CQ);
1308 }
1309 
1310 static int rvu_dbg_nix_cq_ctx_display(struct seq_file *filp, void *unused)
1311 {
1312 	return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_CQ);
1313 }
1314 
1315 RVU_DEBUG_SEQ_FOPS(nix_cq_ctx, nix_cq_ctx_display, nix_cq_ctx_write);
1316 
1317 static void print_nix_qctx_qsize(struct seq_file *filp, int qsize,
1318 				 unsigned long *bmap, char *qtype)
1319 {
1320 	char *buf;
1321 
1322 	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1323 	if (!buf)
1324 		return;
1325 
1326 	bitmap_print_to_pagebuf(false, buf, bmap, qsize);
1327 	seq_printf(filp, "%s context count : %d\n", qtype, qsize);
1328 	seq_printf(filp, "%s context ena/dis bitmap : %s\n",
1329 		   qtype, buf);
1330 	kfree(buf);
1331 }
1332 
1333 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf)
1334 {
1335 	if (!pfvf->cq_ctx)
1336 		seq_puts(filp, "cq context is not initialized\n");
1337 	else
1338 		print_nix_qctx_qsize(filp, pfvf->cq_ctx->qsize, pfvf->cq_bmap,
1339 				     "cq");
1340 
1341 	if (!pfvf->rq_ctx)
1342 		seq_puts(filp, "rq context is not initialized\n");
1343 	else
1344 		print_nix_qctx_qsize(filp, pfvf->rq_ctx->qsize, pfvf->rq_bmap,
1345 				     "rq");
1346 
1347 	if (!pfvf->sq_ctx)
1348 		seq_puts(filp, "sq context is not initialized\n");
1349 	else
1350 		print_nix_qctx_qsize(filp, pfvf->sq_ctx->qsize, pfvf->sq_bmap,
1351 				     "sq");
1352 }
1353 
1354 static ssize_t rvu_dbg_nix_qsize_write(struct file *filp,
1355 				       const char __user *buffer,
1356 				       size_t count, loff_t *ppos)
1357 {
1358 	return rvu_dbg_qsize_write(filp, buffer, count, ppos,
1359 				   BLKTYPE_NIX);
1360 }
1361 
1362 static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
1363 {
1364 	return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NIX);
1365 }
1366 
1367 RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
1368 
1369 static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
1370 {
1371 	const struct device *dev = &rvu->pdev->dev;
1372 	struct nix_hw *nix_hw;
1373 	struct dentry *pfile;
1374 
1375 	if (!is_block_implemented(rvu->hw, blkaddr))
1376 		return;
1377 
1378 	if (blkaddr == BLKADDR_NIX0) {
1379 		rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root);
1380 		if (!rvu->rvu_dbg.nix) {
1381 			dev_err(rvu->dev, "create debugfs dir failed for nix\n");
1382 			return;
1383 		}
1384 		nix_hw = &rvu->hw->nix[0];
1385 	} else {
1386 		rvu->rvu_dbg.nix = debugfs_create_dir("nix1",
1387 						      rvu->rvu_dbg.root);
1388 		if (!rvu->rvu_dbg.nix) {
1389 			dev_err(rvu->dev,
1390 				"create debugfs dir failed for nix1\n");
1391 			return;
1392 		}
1393 		nix_hw = &rvu->hw->nix[1];
1394 	}
1395 
1396 	pfile = debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1397 				    &rvu_dbg_nix_sq_ctx_fops);
1398 	if (!pfile)
1399 		goto create_failed;
1400 
1401 	pfile = debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1402 				    &rvu_dbg_nix_rq_ctx_fops);
1403 	if (!pfile)
1404 		goto create_failed;
1405 
1406 	pfile = debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1407 				    &rvu_dbg_nix_cq_ctx_fops);
1408 	if (!pfile)
1409 		goto create_failed;
1410 
1411 	pfile = debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix,
1412 				    nix_hw, &rvu_dbg_nix_ndc_tx_cache_fops);
1413 	if (!pfile)
1414 		goto create_failed;
1415 
1416 	pfile = debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix,
1417 				    nix_hw, &rvu_dbg_nix_ndc_rx_cache_fops);
1418 	if (!pfile)
1419 		goto create_failed;
1420 
1421 	pfile = debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix,
1422 				    nix_hw,
1423 				    &rvu_dbg_nix_ndc_tx_hits_miss_fops);
1424 	if (!pfile)
1425 		goto create_failed;
1426 
1427 	pfile = debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix,
1428 				    nix_hw,
1429 				    &rvu_dbg_nix_ndc_rx_hits_miss_fops);
1430 	if (!pfile)
1431 		goto create_failed;
1432 
1433 	pfile = debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
1434 				    &rvu_dbg_nix_qsize_fops);
1435 	if (!pfile)
1436 		goto create_failed;
1437 
1438 	return;
1439 create_failed:
1440 	dev_err(dev,
1441 		"Failed to create debugfs dir/file for NIX blk\n");
1442 	debugfs_remove_recursive(rvu->rvu_dbg.nix);
1443 }
1444 
1445 static void rvu_dbg_npa_init(struct rvu *rvu)
1446 {
1447 	const struct device *dev = &rvu->pdev->dev;
1448 	struct dentry *pfile;
1449 
1450 	rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root);
1451 	if (!rvu->rvu_dbg.npa)
1452 		return;
1453 
1454 	pfile = debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu,
1455 				    &rvu_dbg_npa_qsize_fops);
1456 	if (!pfile)
1457 		goto create_failed;
1458 
1459 	pfile = debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu,
1460 				    &rvu_dbg_npa_aura_ctx_fops);
1461 	if (!pfile)
1462 		goto create_failed;
1463 
1464 	pfile = debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
1465 				    &rvu_dbg_npa_pool_ctx_fops);
1466 	if (!pfile)
1467 		goto create_failed;
1468 
1469 	pfile = debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu,
1470 				    &rvu_dbg_npa_ndc_cache_fops);
1471 	if (!pfile)
1472 		goto create_failed;
1473 
1474 	pfile = debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa,
1475 				    rvu, &rvu_dbg_npa_ndc_hits_miss_fops);
1476 	if (!pfile)
1477 		goto create_failed;
1478 
1479 	return;
1480 
1481 create_failed:
1482 	dev_err(dev, "Failed to create debugfs dir/file for NPA\n");
1483 	debugfs_remove_recursive(rvu->rvu_dbg.npa);
1484 }
1485 
1486 #define PRINT_CGX_CUML_NIXRX_STATUS(idx, name)				\
1487 	({								\
1488 		u64 cnt;						\
1489 		err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx),	\
1490 					     NIX_STATS_RX, &(cnt));	\
1491 		if (!err)						\
1492 			seq_printf(s, "%s: %llu\n", name, cnt);		\
1493 		cnt;							\
1494 	})
1495 
1496 #define PRINT_CGX_CUML_NIXTX_STATUS(idx, name)			\
1497 	({								\
1498 		u64 cnt;						\
1499 		err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx),	\
1500 					  NIX_STATS_TX, &(cnt));	\
1501 		if (!err)						\
1502 			seq_printf(s, "%s: %llu\n", name, cnt);		\
1503 		cnt;							\
1504 	})
1505 
1506 static int cgx_print_stats(struct seq_file *s, int lmac_id)
1507 {
1508 	struct cgx_link_user_info linfo;
1509 	void *cgxd = s->private;
1510 	u64 ucast, mcast, bcast;
1511 	int stat = 0, err = 0;
1512 	u64 tx_stat, rx_stat;
1513 	struct rvu *rvu;
1514 
1515 	rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
1516 					     PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
1517 	if (!rvu)
1518 		return -ENODEV;
1519 
1520 	/* Link status */
1521 	seq_puts(s, "\n=======Link Status======\n\n");
1522 	err = cgx_get_link_info(cgxd, lmac_id, &linfo);
1523 	if (err)
1524 		seq_puts(s, "Failed to read link status\n");
1525 	seq_printf(s, "\nLink is %s %d Mbps\n\n",
1526 		   linfo.link_up ? "UP" : "DOWN", linfo.speed);
1527 
1528 	/* Rx stats */
1529 	seq_puts(s, "\n=======NIX RX_STATS(CGX port level)======\n\n");
1530 	ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames");
1531 	if (err)
1532 		return err;
1533 	mcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_MCAST, "rx_mcast_frames");
1534 	if (err)
1535 		return err;
1536 	bcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_BCAST, "rx_bcast_frames");
1537 	if (err)
1538 		return err;
1539 	seq_printf(s, "rx_frames: %llu\n", ucast + mcast + bcast);
1540 	PRINT_CGX_CUML_NIXRX_STATUS(RX_OCTS, "rx_bytes");
1541 	if (err)
1542 		return err;
1543 	PRINT_CGX_CUML_NIXRX_STATUS(RX_DROP, "rx_drops");
1544 	if (err)
1545 		return err;
1546 	PRINT_CGX_CUML_NIXRX_STATUS(RX_ERR, "rx_errors");
1547 	if (err)
1548 		return err;
1549 
1550 	/* Tx stats */
1551 	seq_puts(s, "\n=======NIX TX_STATS(CGX port level)======\n\n");
1552 	ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames");
1553 	if (err)
1554 		return err;
1555 	mcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_MCAST, "tx_mcast_frames");
1556 	if (err)
1557 		return err;
1558 	bcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_BCAST, "tx_bcast_frames");
1559 	if (err)
1560 		return err;
1561 	seq_printf(s, "tx_frames: %llu\n", ucast + mcast + bcast);
1562 	PRINT_CGX_CUML_NIXTX_STATUS(TX_OCTS, "tx_bytes");
1563 	if (err)
1564 		return err;
1565 	PRINT_CGX_CUML_NIXTX_STATUS(TX_DROP, "tx_drops");
1566 	if (err)
1567 		return err;
1568 
1569 	/* Rx stats */
1570 	seq_puts(s, "\n=======CGX RX_STATS======\n\n");
1571 	while (stat < CGX_RX_STATS_COUNT) {
1572 		err = cgx_get_rx_stats(cgxd, lmac_id, stat, &rx_stat);
1573 		if (err)
1574 			return err;
1575 		seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat], rx_stat);
1576 		stat++;
1577 	}
1578 
1579 	/* Tx stats */
1580 	stat = 0;
1581 	seq_puts(s, "\n=======CGX TX_STATS======\n\n");
1582 	while (stat < CGX_TX_STATS_COUNT) {
1583 		err = cgx_get_tx_stats(cgxd, lmac_id, stat, &tx_stat);
1584 		if (err)
1585 			return err;
1586 		seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat], tx_stat);
1587 		stat++;
1588 	}
1589 
1590 	return err;
1591 }
1592 
1593 static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
1594 {
1595 	struct dentry *current_dir;
1596 	int err, lmac_id;
1597 	char *buf;
1598 
1599 	current_dir = filp->file->f_path.dentry->d_parent;
1600 	buf = strrchr(current_dir->d_name.name, 'c');
1601 	if (!buf)
1602 		return -EINVAL;
1603 
1604 	err = kstrtoint(buf + 1, 10, &lmac_id);
1605 	if (!err) {
1606 		err = cgx_print_stats(filp, lmac_id);
1607 		if (err)
1608 			return err;
1609 	}
1610 	return err;
1611 }
1612 
1613 RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
1614 
1615 static void rvu_dbg_cgx_init(struct rvu *rvu)
1616 {
1617 	const struct device *dev = &rvu->pdev->dev;
1618 	struct dentry *pfile;
1619 	int i, lmac_id;
1620 	char dname[20];
1621 	void *cgx;
1622 
1623 	rvu->rvu_dbg.cgx_root = debugfs_create_dir("cgx", rvu->rvu_dbg.root);
1624 
1625 	for (i = 0; i < cgx_get_cgxcnt_max(); i++) {
1626 		cgx = rvu_cgx_pdata(i, rvu);
1627 		if (!cgx)
1628 			continue;
1629 		/* cgx debugfs dir */
1630 		sprintf(dname, "cgx%d", i);
1631 		rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
1632 						      rvu->rvu_dbg.cgx_root);
1633 		for (lmac_id = 0; lmac_id < cgx_get_lmac_cnt(cgx); lmac_id++) {
1634 			/* lmac debugfs dir */
1635 			sprintf(dname, "lmac%d", lmac_id);
1636 			rvu->rvu_dbg.lmac =
1637 				debugfs_create_dir(dname, rvu->rvu_dbg.cgx);
1638 
1639 			pfile =	debugfs_create_file("stats", 0600,
1640 						    rvu->rvu_dbg.lmac, cgx,
1641 						    &rvu_dbg_cgx_stat_fops);
1642 			if (!pfile)
1643 				goto create_failed;
1644 		}
1645 	}
1646 	return;
1647 
1648 create_failed:
1649 	dev_err(dev, "Failed to create debugfs dir/file for CGX\n");
1650 	debugfs_remove_recursive(rvu->rvu_dbg.cgx_root);
1651 }
1652 
1653 /* NPC debugfs APIs */
1654 static void rvu_print_npc_mcam_info(struct seq_file *s,
1655 				    u16 pcifunc, int blkaddr)
1656 {
1657 	struct rvu *rvu = s->private;
1658 	int entry_acnt, entry_ecnt;
1659 	int cntr_acnt, cntr_ecnt;
1660 
1661 	/* Skip PF0 */
1662 	if (!pcifunc)
1663 		return;
1664 	rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr,
1665 					  &entry_acnt, &entry_ecnt);
1666 	rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr,
1667 					    &cntr_acnt, &cntr_ecnt);
1668 	if (!entry_acnt && !cntr_acnt)
1669 		return;
1670 
1671 	if (!(pcifunc & RVU_PFVF_FUNC_MASK))
1672 		seq_printf(s, "\n\t\t Device \t\t: PF%d\n",
1673 			   rvu_get_pf(pcifunc));
1674 	else
1675 		seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n",
1676 			   rvu_get_pf(pcifunc),
1677 			   (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
1678 
1679 	if (entry_acnt) {
1680 		seq_printf(s, "\t\t Entries allocated \t: %d\n", entry_acnt);
1681 		seq_printf(s, "\t\t Entries enabled \t: %d\n", entry_ecnt);
1682 	}
1683 	if (cntr_acnt) {
1684 		seq_printf(s, "\t\t Counters allocated \t: %d\n", cntr_acnt);
1685 		seq_printf(s, "\t\t Counters enabled \t: %d\n", cntr_ecnt);
1686 	}
1687 }
1688 
1689 static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
1690 {
1691 	struct rvu *rvu = filp->private;
1692 	int pf, vf, numvfs, blkaddr;
1693 	struct npc_mcam *mcam;
1694 	u16 pcifunc, counters;
1695 	u64 cfg;
1696 
1697 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
1698 	if (blkaddr < 0)
1699 		return -ENODEV;
1700 
1701 	mcam = &rvu->hw->mcam;
1702 	counters = rvu->hw->npc_counters;
1703 
1704 	seq_puts(filp, "\nNPC MCAM info:\n");
1705 	/* MCAM keywidth on receive and transmit sides */
1706 	cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
1707 	cfg = (cfg >> 32) & 0x07;
1708 	seq_printf(filp, "\t\t RX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
1709 		   "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
1710 		   "224bits" : "448bits"));
1711 	cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX));
1712 	cfg = (cfg >> 32) & 0x07;
1713 	seq_printf(filp, "\t\t TX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
1714 		   "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
1715 		   "224bits" : "448bits"));
1716 
1717 	mutex_lock(&mcam->lock);
1718 	/* MCAM entries */
1719 	seq_printf(filp, "\n\t\t MCAM entries \t: %d\n", mcam->total_entries);
1720 	seq_printf(filp, "\t\t Reserved \t: %d\n",
1721 		   mcam->total_entries - mcam->bmap_entries);
1722 	seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt);
1723 
1724 	/* MCAM counters */
1725 	seq_printf(filp, "\n\t\t MCAM counters \t: %d\n", counters);
1726 	seq_printf(filp, "\t\t Reserved \t: %d\n",
1727 		   counters - mcam->counters.max);
1728 	seq_printf(filp, "\t\t Available \t: %d\n",
1729 		   rvu_rsrc_free_count(&mcam->counters));
1730 
1731 	if (mcam->bmap_entries == mcam->bmap_fcnt) {
1732 		mutex_unlock(&mcam->lock);
1733 		return 0;
1734 	}
1735 
1736 	seq_puts(filp, "\n\t\t Current allocation\n");
1737 	seq_puts(filp, "\t\t====================\n");
1738 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
1739 		pcifunc = (pf << RVU_PFVF_PF_SHIFT);
1740 		rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
1741 
1742 		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
1743 		numvfs = (cfg >> 12) & 0xFF;
1744 		for (vf = 0; vf < numvfs; vf++) {
1745 			pcifunc = (pf << RVU_PFVF_PF_SHIFT) | (vf + 1);
1746 			rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
1747 		}
1748 	}
1749 
1750 	mutex_unlock(&mcam->lock);
1751 	return 0;
1752 }
1753 
1754 RVU_DEBUG_SEQ_FOPS(npc_mcam_info, npc_mcam_info_display, NULL);
1755 
1756 static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
1757 					     void *unused)
1758 {
1759 	struct rvu *rvu = filp->private;
1760 	struct npc_mcam *mcam;
1761 	int blkaddr;
1762 
1763 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
1764 	if (blkaddr < 0)
1765 		return -ENODEV;
1766 
1767 	mcam = &rvu->hw->mcam;
1768 
1769 	seq_puts(filp, "\nNPC MCAM RX miss action stats\n");
1770 	seq_printf(filp, "\t\tStat %d: \t%lld\n", mcam->rx_miss_act_cntr,
1771 		   rvu_read64(rvu, blkaddr,
1772 			      NPC_AF_MATCH_STATX(mcam->rx_miss_act_cntr)));
1773 
1774 	return 0;
1775 }
1776 
1777 RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
1778 
1779 static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
1780 					struct rvu_npc_mcam_rule *rule)
1781 {
1782 	u8 bit;
1783 
1784 	for_each_set_bit(bit, (unsigned long *)&rule->features, 64) {
1785 		seq_printf(s, "\t%s  ", npc_get_field_name(bit));
1786 		switch (bit) {
1787 		case NPC_DMAC:
1788 			seq_printf(s, "%pM ", rule->packet.dmac);
1789 			seq_printf(s, "mask %pM\n", rule->mask.dmac);
1790 			break;
1791 		case NPC_SMAC:
1792 			seq_printf(s, "%pM ", rule->packet.smac);
1793 			seq_printf(s, "mask %pM\n", rule->mask.smac);
1794 			break;
1795 		case NPC_ETYPE:
1796 			seq_printf(s, "0x%x ", ntohs(rule->packet.etype));
1797 			seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.etype));
1798 			break;
1799 		case NPC_OUTER_VID:
1800 			seq_printf(s, "%d ", ntohs(rule->packet.vlan_tci));
1801 			seq_printf(s, "mask 0x%x\n",
1802 				   ntohs(rule->mask.vlan_tci));
1803 			break;
1804 		case NPC_TOS:
1805 			seq_printf(s, "%d ", rule->packet.tos);
1806 			seq_printf(s, "mask 0x%x\n", rule->mask.tos);
1807 			break;
1808 		case NPC_SIP_IPV4:
1809 			seq_printf(s, "%pI4 ", &rule->packet.ip4src);
1810 			seq_printf(s, "mask %pI4\n", &rule->mask.ip4src);
1811 			break;
1812 		case NPC_DIP_IPV4:
1813 			seq_printf(s, "%pI4 ", &rule->packet.ip4dst);
1814 			seq_printf(s, "mask %pI4\n", &rule->mask.ip4dst);
1815 			break;
1816 		case NPC_SIP_IPV6:
1817 			seq_printf(s, "%pI6 ", rule->packet.ip6src);
1818 			seq_printf(s, "mask %pI6\n", rule->mask.ip6src);
1819 			break;
1820 		case NPC_DIP_IPV6:
1821 			seq_printf(s, "%pI6 ", rule->packet.ip6dst);
1822 			seq_printf(s, "mask %pI6\n", rule->mask.ip6dst);
1823 			break;
1824 		case NPC_SPORT_TCP:
1825 		case NPC_SPORT_UDP:
1826 		case NPC_SPORT_SCTP:
1827 			seq_printf(s, "%d ", ntohs(rule->packet.sport));
1828 			seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.sport));
1829 			break;
1830 		case NPC_DPORT_TCP:
1831 		case NPC_DPORT_UDP:
1832 		case NPC_DPORT_SCTP:
1833 			seq_printf(s, "%d ", ntohs(rule->packet.dport));
1834 			seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
1835 			break;
1836 		default:
1837 			break;
1838 		}
1839 	}
1840 }
1841 
1842 static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
1843 					 struct rvu_npc_mcam_rule *rule)
1844 {
1845 	if (rule->intf == NIX_INTF_TX) {
1846 		switch (rule->tx_action.op) {
1847 		case NIX_TX_ACTIONOP_DROP:
1848 			seq_puts(s, "\taction: Drop\n");
1849 			break;
1850 		case NIX_TX_ACTIONOP_UCAST_DEFAULT:
1851 			seq_puts(s, "\taction: Unicast to default channel\n");
1852 			break;
1853 		case NIX_TX_ACTIONOP_UCAST_CHAN:
1854 			seq_printf(s, "\taction: Unicast to channel %d\n",
1855 				   rule->tx_action.index);
1856 			break;
1857 		case NIX_TX_ACTIONOP_MCAST:
1858 			seq_puts(s, "\taction: Multicast\n");
1859 			break;
1860 		case NIX_TX_ACTIONOP_DROP_VIOL:
1861 			seq_puts(s, "\taction: Lockdown Violation Drop\n");
1862 			break;
1863 		default:
1864 			break;
1865 		};
1866 	} else {
1867 		switch (rule->rx_action.op) {
1868 		case NIX_RX_ACTIONOP_DROP:
1869 			seq_puts(s, "\taction: Drop\n");
1870 			break;
1871 		case NIX_RX_ACTIONOP_UCAST:
1872 			seq_printf(s, "\taction: Direct to queue %d\n",
1873 				   rule->rx_action.index);
1874 			break;
1875 		case NIX_RX_ACTIONOP_RSS:
1876 			seq_puts(s, "\taction: RSS\n");
1877 			break;
1878 		case NIX_RX_ACTIONOP_UCAST_IPSEC:
1879 			seq_puts(s, "\taction: Unicast ipsec\n");
1880 			break;
1881 		case NIX_RX_ACTIONOP_MCAST:
1882 			seq_puts(s, "\taction: Multicast\n");
1883 			break;
1884 		default:
1885 			break;
1886 		};
1887 	}
1888 }
1889 
1890 static const char *rvu_dbg_get_intf_name(int intf)
1891 {
1892 	switch (intf) {
1893 	case NIX_INTFX_RX(0):
1894 		return "NIX0_RX";
1895 	case NIX_INTFX_RX(1):
1896 		return "NIX1_RX";
1897 	case NIX_INTFX_TX(0):
1898 		return "NIX0_TX";
1899 	case NIX_INTFX_TX(1):
1900 		return "NIX1_TX";
1901 	default:
1902 		break;
1903 	}
1904 
1905 	return "unknown";
1906 }
1907 
1908 static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
1909 {
1910 	struct rvu_npc_mcam_rule *iter;
1911 	struct rvu *rvu = s->private;
1912 	struct npc_mcam *mcam;
1913 	int pf, vf = -1;
1914 	int blkaddr;
1915 	u16 target;
1916 	u64 hits;
1917 
1918 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
1919 	if (blkaddr < 0)
1920 		return 0;
1921 
1922 	mcam = &rvu->hw->mcam;
1923 
1924 	mutex_lock(&mcam->lock);
1925 	list_for_each_entry(iter, &mcam->mcam_rules, list) {
1926 		pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
1927 		seq_printf(s, "\n\tInstalled by: PF%d ", pf);
1928 
1929 		if (iter->owner & RVU_PFVF_FUNC_MASK) {
1930 			vf = (iter->owner & RVU_PFVF_FUNC_MASK) - 1;
1931 			seq_printf(s, "VF%d", vf);
1932 		}
1933 		seq_puts(s, "\n");
1934 
1935 		seq_printf(s, "\tdirection: %s\n", is_npc_intf_rx(iter->intf) ?
1936 						    "RX" : "TX");
1937 		seq_printf(s, "\tinterface: %s\n",
1938 			   rvu_dbg_get_intf_name(iter->intf));
1939 		seq_printf(s, "\tmcam entry: %d\n", iter->entry);
1940 
1941 		rvu_dbg_npc_mcam_show_flows(s, iter);
1942 		if (iter->intf == NIX_INTF_RX) {
1943 			target = iter->rx_action.pf_func;
1944 			pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
1945 			seq_printf(s, "\tForward to: PF%d ", pf);
1946 
1947 			if (target & RVU_PFVF_FUNC_MASK) {
1948 				vf = (target & RVU_PFVF_FUNC_MASK) - 1;
1949 				seq_printf(s, "VF%d", vf);
1950 			}
1951 			seq_puts(s, "\n");
1952 		}
1953 
1954 		rvu_dbg_npc_mcam_show_action(s, iter);
1955 		seq_printf(s, "\tenabled: %s\n", iter->enable ? "yes" : "no");
1956 
1957 		if (!iter->has_cntr)
1958 			continue;
1959 		seq_printf(s, "\tcounter: %d\n", iter->cntr);
1960 
1961 		hits = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(iter->cntr));
1962 		seq_printf(s, "\thits: %lld\n", hits);
1963 	}
1964 	mutex_unlock(&mcam->lock);
1965 
1966 	return 0;
1967 }
1968 
1969 RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL);
1970 
1971 static void rvu_dbg_npc_init(struct rvu *rvu)
1972 {
1973 	const struct device *dev = &rvu->pdev->dev;
1974 	struct dentry *pfile;
1975 
1976 	rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root);
1977 	if (!rvu->rvu_dbg.npc)
1978 		return;
1979 
1980 	pfile = debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc,
1981 				    rvu, &rvu_dbg_npc_mcam_info_fops);
1982 	if (!pfile)
1983 		goto create_failed;
1984 
1985 	pfile = debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc,
1986 				    rvu, &rvu_dbg_npc_mcam_rules_fops);
1987 	if (!pfile)
1988 		goto create_failed;
1989 
1990 	pfile = debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc,
1991 				    rvu, &rvu_dbg_npc_rx_miss_act_fops);
1992 	if (!pfile)
1993 		goto create_failed;
1994 
1995 	return;
1996 
1997 create_failed:
1998 	dev_err(dev, "Failed to create debugfs dir/file for NPC\n");
1999 	debugfs_remove_recursive(rvu->rvu_dbg.npc);
2000 }
2001 
2002 /* CPT debugfs APIs */
2003 static int cpt_eng_sts_display(struct seq_file *filp, u8 eng_type)
2004 {
2005 	struct rvu *rvu = filp->private;
2006 	u64 busy_sts = 0, free_sts = 0;
2007 	u32 e_min = 0, e_max = 0, e, i;
2008 	u16 max_ses, max_ies, max_aes;
2009 	int blkaddr;
2010 	u64 reg;
2011 
2012 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0);
2013 	if (blkaddr < 0)
2014 		return -ENODEV;
2015 
2016 	reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
2017 	max_ses = reg & 0xffff;
2018 	max_ies = (reg >> 16) & 0xffff;
2019 	max_aes = (reg >> 32) & 0xffff;
2020 
2021 	switch (eng_type) {
2022 	case CPT_AE_TYPE:
2023 		e_min = max_ses + max_ies;
2024 		e_max = max_ses + max_ies + max_aes;
2025 		break;
2026 	case CPT_SE_TYPE:
2027 		e_min = 0;
2028 		e_max = max_ses;
2029 		break;
2030 	case CPT_IE_TYPE:
2031 		e_min = max_ses;
2032 		e_max = max_ses + max_ies;
2033 		break;
2034 	default:
2035 		return -EINVAL;
2036 	}
2037 
2038 	for (e = e_min, i = 0; e < e_max; e++, i++) {
2039 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e));
2040 		if (reg & 0x1)
2041 			busy_sts |= 1ULL << i;
2042 
2043 		if (reg & 0x2)
2044 			free_sts |= 1ULL << i;
2045 	}
2046 	seq_printf(filp, "FREE STS : 0x%016llx\n", free_sts);
2047 	seq_printf(filp, "BUSY STS : 0x%016llx\n", busy_sts);
2048 
2049 	return 0;
2050 }
2051 
2052 static int rvu_dbg_cpt_ae_sts_display(struct seq_file *filp, void *unused)
2053 {
2054 	return cpt_eng_sts_display(filp, CPT_AE_TYPE);
2055 }
2056 
2057 RVU_DEBUG_SEQ_FOPS(cpt_ae_sts, cpt_ae_sts_display, NULL);
2058 
2059 static int rvu_dbg_cpt_se_sts_display(struct seq_file *filp, void *unused)
2060 {
2061 	return cpt_eng_sts_display(filp, CPT_SE_TYPE);
2062 }
2063 
2064 RVU_DEBUG_SEQ_FOPS(cpt_se_sts, cpt_se_sts_display, NULL);
2065 
2066 static int rvu_dbg_cpt_ie_sts_display(struct seq_file *filp, void *unused)
2067 {
2068 	return cpt_eng_sts_display(filp, CPT_IE_TYPE);
2069 }
2070 
2071 RVU_DEBUG_SEQ_FOPS(cpt_ie_sts, cpt_ie_sts_display, NULL);
2072 
2073 static int rvu_dbg_cpt_engines_info_display(struct seq_file *filp, void *unused)
2074 {
2075 	struct rvu *rvu = filp->private;
2076 	u16 max_ses, max_ies, max_aes;
2077 	u32 e_max, e;
2078 	int blkaddr;
2079 	u64 reg;
2080 
2081 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0);
2082 	if (blkaddr < 0)
2083 		return -ENODEV;
2084 
2085 	reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
2086 	max_ses = reg & 0xffff;
2087 	max_ies = (reg >> 16) & 0xffff;
2088 	max_aes = (reg >> 32) & 0xffff;
2089 
2090 	e_max = max_ses + max_ies + max_aes;
2091 
2092 	seq_puts(filp, "===========================================\n");
2093 	for (e = 0; e < e_max; e++) {
2094 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(e));
2095 		seq_printf(filp, "CPT Engine[%u] Group Enable   0x%02llx\n", e,
2096 			   reg & 0xff);
2097 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_ACTIVE(e));
2098 		seq_printf(filp, "CPT Engine[%u] Active Info    0x%llx\n", e,
2099 			   reg);
2100 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(e));
2101 		seq_printf(filp, "CPT Engine[%u] Control        0x%llx\n", e,
2102 			   reg);
2103 		seq_puts(filp, "===========================================\n");
2104 	}
2105 	return 0;
2106 }
2107 
2108 RVU_DEBUG_SEQ_FOPS(cpt_engines_info, cpt_engines_info_display, NULL);
2109 
2110 static int rvu_dbg_cpt_lfs_info_display(struct seq_file *filp, void *unused)
2111 {
2112 	struct rvu *rvu = filp->private;
2113 	struct rvu_hwinfo *hw = rvu->hw;
2114 	struct rvu_block *block;
2115 	int blkaddr;
2116 	u64 reg;
2117 	u32 lf;
2118 
2119 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0);
2120 	if (blkaddr < 0)
2121 		return -ENODEV;
2122 
2123 	block = &hw->block[blkaddr];
2124 	if (!block->lf.bmap)
2125 		return -ENODEV;
2126 
2127 	seq_puts(filp, "===========================================\n");
2128 	for (lf = 0; lf < block->lf.max; lf++) {
2129 		reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(lf));
2130 		seq_printf(filp, "CPT Lf[%u] CTL          0x%llx\n", lf, reg);
2131 		reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(lf));
2132 		seq_printf(filp, "CPT Lf[%u] CTL2         0x%llx\n", lf, reg);
2133 		reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_PTR_CTL(lf));
2134 		seq_printf(filp, "CPT Lf[%u] PTR_CTL      0x%llx\n", lf, reg);
2135 		reg = rvu_read64(rvu, blkaddr, block->lfcfg_reg |
2136 				(lf << block->lfshift));
2137 		seq_printf(filp, "CPT Lf[%u] CFG          0x%llx\n", lf, reg);
2138 		seq_puts(filp, "===========================================\n");
2139 	}
2140 	return 0;
2141 }
2142 
2143 RVU_DEBUG_SEQ_FOPS(cpt_lfs_info, cpt_lfs_info_display, NULL);
2144 
2145 static int rvu_dbg_cpt_err_info_display(struct seq_file *filp, void *unused)
2146 {
2147 	struct rvu *rvu = filp->private;
2148 	u64 reg0, reg1;
2149 	int blkaddr;
2150 
2151 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0);
2152 	if (blkaddr < 0)
2153 		return -ENODEV;
2154 
2155 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
2156 	reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
2157 	seq_printf(filp, "CPT_AF_FLTX_INT:       0x%llx 0x%llx\n", reg0, reg1);
2158 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(0));
2159 	reg1 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(1));
2160 	seq_printf(filp, "CPT_AF_PSNX_EXE:       0x%llx 0x%llx\n", reg0, reg1);
2161 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_LF(0));
2162 	seq_printf(filp, "CPT_AF_PSNX_LF:        0x%llx\n", reg0);
2163 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT);
2164 	seq_printf(filp, "CPT_AF_RVU_INT:        0x%llx\n", reg0);
2165 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT);
2166 	seq_printf(filp, "CPT_AF_RAS_INT:        0x%llx\n", reg0);
2167 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO);
2168 	seq_printf(filp, "CPT_AF_EXE_ERR_INFO:   0x%llx\n", reg0);
2169 
2170 	return 0;
2171 }
2172 
2173 RVU_DEBUG_SEQ_FOPS(cpt_err_info, cpt_err_info_display, NULL);
2174 
2175 static int rvu_dbg_cpt_pc_display(struct seq_file *filp, void *unused)
2176 {
2177 	struct rvu *rvu;
2178 	int blkaddr;
2179 	u64 reg;
2180 
2181 	rvu = filp->private;
2182 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0);
2183 	if (blkaddr < 0)
2184 		return -ENODEV;
2185 
2186 	reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC);
2187 	seq_printf(filp, "CPT instruction requests   %llu\n", reg);
2188 	reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC);
2189 	seq_printf(filp, "CPT instruction latency    %llu\n", reg);
2190 	reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC);
2191 	seq_printf(filp, "CPT NCB read requests      %llu\n", reg);
2192 	reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC);
2193 	seq_printf(filp, "CPT NCB read latency       %llu\n", reg);
2194 	reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC);
2195 	seq_printf(filp, "CPT read requests caused by UC fills   %llu\n", reg);
2196 	reg = rvu_read64(rvu, blkaddr, CPT_AF_ACTIVE_CYCLES_PC);
2197 	seq_printf(filp, "CPT active cycles pc       %llu\n", reg);
2198 	reg = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT);
2199 	seq_printf(filp, "CPT clock count pc         %llu\n", reg);
2200 
2201 	return 0;
2202 }
2203 
2204 RVU_DEBUG_SEQ_FOPS(cpt_pc, cpt_pc_display, NULL);
2205 
2206 static void rvu_dbg_cpt_init(struct rvu *rvu)
2207 {
2208 	const struct device *dev = &rvu->pdev->dev;
2209 	struct dentry *pfile;
2210 
2211 	if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
2212 		return;
2213 
2214 	rvu->rvu_dbg.cpt = debugfs_create_dir("cpt", rvu->rvu_dbg.root);
2215 	if (!rvu->rvu_dbg.cpt)
2216 		return;
2217 
2218 	pfile = debugfs_create_file("cpt_pc", 0600,
2219 				    rvu->rvu_dbg.cpt, rvu,
2220 				    &rvu_dbg_cpt_pc_fops);
2221 	if (!pfile)
2222 		goto create_failed;
2223 
2224 	pfile = debugfs_create_file("cpt_ae_sts", 0600,
2225 				    rvu->rvu_dbg.cpt, rvu,
2226 				    &rvu_dbg_cpt_ae_sts_fops);
2227 	if (!pfile)
2228 		goto create_failed;
2229 
2230 	pfile = debugfs_create_file("cpt_se_sts", 0600,
2231 				    rvu->rvu_dbg.cpt, rvu,
2232 				    &rvu_dbg_cpt_se_sts_fops);
2233 	if (!pfile)
2234 		goto create_failed;
2235 
2236 	pfile = debugfs_create_file("cpt_ie_sts", 0600,
2237 				    rvu->rvu_dbg.cpt, rvu,
2238 				    &rvu_dbg_cpt_ie_sts_fops);
2239 	if (!pfile)
2240 		goto create_failed;
2241 
2242 	pfile = debugfs_create_file("cpt_engines_info", 0600,
2243 				    rvu->rvu_dbg.cpt, rvu,
2244 				    &rvu_dbg_cpt_engines_info_fops);
2245 	if (!pfile)
2246 		goto create_failed;
2247 
2248 	pfile = debugfs_create_file("cpt_lfs_info", 0600,
2249 				    rvu->rvu_dbg.cpt, rvu,
2250 				    &rvu_dbg_cpt_lfs_info_fops);
2251 	if (!pfile)
2252 		goto create_failed;
2253 
2254 	pfile = debugfs_create_file("cpt_err_info", 0600,
2255 				    rvu->rvu_dbg.cpt, rvu,
2256 				    &rvu_dbg_cpt_err_info_fops);
2257 	if (!pfile)
2258 		goto create_failed;
2259 
2260 	return;
2261 
2262 create_failed:
2263 	dev_err(dev, "Failed to create debugfs dir/file for CPT\n");
2264 	debugfs_remove_recursive(rvu->rvu_dbg.cpt);
2265 }
2266 
2267 void rvu_dbg_init(struct rvu *rvu)
2268 {
2269 	struct device *dev = &rvu->pdev->dev;
2270 	struct dentry *pfile;
2271 
2272 	rvu->rvu_dbg.root = debugfs_create_dir(DEBUGFS_DIR_NAME, NULL);
2273 	if (!rvu->rvu_dbg.root) {
2274 		dev_err(rvu->dev, "%s failed\n", __func__);
2275 		return;
2276 	}
2277 	pfile = debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
2278 				    &rvu_dbg_rsrc_status_fops);
2279 	if (!pfile)
2280 		goto create_failed;
2281 
2282 	pfile = debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root,
2283 				    rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
2284 	if (!pfile)
2285 		goto create_failed;
2286 
2287 	rvu_dbg_npa_init(rvu);
2288 	rvu_dbg_nix_init(rvu, BLKADDR_NIX0);
2289 
2290 	rvu_dbg_nix_init(rvu, BLKADDR_NIX1);
2291 	rvu_dbg_cgx_init(rvu);
2292 	rvu_dbg_npc_init(rvu);
2293 	rvu_dbg_cpt_init(rvu);
2294 
2295 	return;
2296 
2297 create_failed:
2298 	dev_err(dev, "Failed to create debugfs dir\n");
2299 	debugfs_remove_recursive(rvu->rvu_dbg.root);
2300 }
2301 
2302 void rvu_dbg_exit(struct rvu *rvu)
2303 {
2304 	debugfs_remove_recursive(rvu->rvu_dbg.root);
2305 }
2306 
2307 #endif /* CONFIG_DEBUG_FS */
2308