xref: /linux/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c (revision a4eb44a6435d6d8f9e642407a4a06f65eb90ca04)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3  *
4  * Copyright (C) 2019 Marvell.
5  *
6  */
7 
8 #ifdef CONFIG_DEBUG_FS
9 
10 #include <linux/fs.h>
11 #include <linux/debugfs.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 
15 #include "rvu_struct.h"
16 #include "rvu_reg.h"
17 #include "rvu.h"
18 #include "cgx.h"
19 #include "lmac_common.h"
20 #include "npc.h"
21 
22 #define DEBUGFS_DIR_NAME "octeontx2"
23 
24 enum {
25 	CGX_STAT0,
26 	CGX_STAT1,
27 	CGX_STAT2,
28 	CGX_STAT3,
29 	CGX_STAT4,
30 	CGX_STAT5,
31 	CGX_STAT6,
32 	CGX_STAT7,
33 	CGX_STAT8,
34 	CGX_STAT9,
35 	CGX_STAT10,
36 	CGX_STAT11,
37 	CGX_STAT12,
38 	CGX_STAT13,
39 	CGX_STAT14,
40 	CGX_STAT15,
41 	CGX_STAT16,
42 	CGX_STAT17,
43 	CGX_STAT18,
44 };
45 
46 /* NIX TX stats */
47 enum nix_stat_lf_tx {
48 	TX_UCAST	= 0x0,
49 	TX_BCAST	= 0x1,
50 	TX_MCAST	= 0x2,
51 	TX_DROP		= 0x3,
52 	TX_OCTS		= 0x4,
53 	TX_STATS_ENUM_LAST,
54 };
55 
56 /* NIX RX stats */
57 enum nix_stat_lf_rx {
58 	RX_OCTS		= 0x0,
59 	RX_UCAST	= 0x1,
60 	RX_BCAST	= 0x2,
61 	RX_MCAST	= 0x3,
62 	RX_DROP		= 0x4,
63 	RX_DROP_OCTS	= 0x5,
64 	RX_FCS		= 0x6,
65 	RX_ERR		= 0x7,
66 	RX_DRP_BCAST	= 0x8,
67 	RX_DRP_MCAST	= 0x9,
68 	RX_DRP_L3BCAST	= 0xa,
69 	RX_DRP_L3MCAST	= 0xb,
70 	RX_STATS_ENUM_LAST,
71 };
72 
73 static char *cgx_rx_stats_fields[] = {
74 	[CGX_STAT0]	= "Received packets",
75 	[CGX_STAT1]	= "Octets of received packets",
76 	[CGX_STAT2]	= "Received PAUSE packets",
77 	[CGX_STAT3]	= "Received PAUSE and control packets",
78 	[CGX_STAT4]	= "Filtered DMAC0 (NIX-bound) packets",
79 	[CGX_STAT5]	= "Filtered DMAC0 (NIX-bound) octets",
80 	[CGX_STAT6]	= "Packets dropped due to RX FIFO full",
81 	[CGX_STAT7]	= "Octets dropped due to RX FIFO full",
82 	[CGX_STAT8]	= "Error packets",
83 	[CGX_STAT9]	= "Filtered DMAC1 (NCSI-bound) packets",
84 	[CGX_STAT10]	= "Filtered DMAC1 (NCSI-bound) octets",
85 	[CGX_STAT11]	= "NCSI-bound packets dropped",
86 	[CGX_STAT12]	= "NCSI-bound octets dropped",
87 };
88 
89 static char *cgx_tx_stats_fields[] = {
90 	[CGX_STAT0]	= "Packets dropped due to excessive collisions",
91 	[CGX_STAT1]	= "Packets dropped due to excessive deferral",
92 	[CGX_STAT2]	= "Multiple collisions before successful transmission",
93 	[CGX_STAT3]	= "Single collisions before successful transmission",
94 	[CGX_STAT4]	= "Total octets sent on the interface",
95 	[CGX_STAT5]	= "Total frames sent on the interface",
96 	[CGX_STAT6]	= "Packets sent with an octet count < 64",
97 	[CGX_STAT7]	= "Packets sent with an octet count == 64",
98 	[CGX_STAT8]	= "Packets sent with an octet count of 65-127",
99 	[CGX_STAT9]	= "Packets sent with an octet count of 128-255",
100 	[CGX_STAT10]	= "Packets sent with an octet count of 256-511",
101 	[CGX_STAT11]	= "Packets sent with an octet count of 512-1023",
102 	[CGX_STAT12]	= "Packets sent with an octet count of 1024-1518",
103 	[CGX_STAT13]	= "Packets sent with an octet count of > 1518",
104 	[CGX_STAT14]	= "Packets sent to a broadcast DMAC",
105 	[CGX_STAT15]	= "Packets sent to the multicast DMAC",
106 	[CGX_STAT16]	= "Transmit underflow and were truncated",
107 	[CGX_STAT17]	= "Control/PAUSE packets sent",
108 };
109 
110 static char *rpm_rx_stats_fields[] = {
111 	"Octets of received packets",
112 	"Octets of received packets with out error",
113 	"Received packets with alignment errors",
114 	"Control/PAUSE packets received",
115 	"Packets received with Frame too long Errors",
116 	"Packets received with a1nrange length Errors",
117 	"Received packets",
118 	"Packets received with FrameCheckSequenceErrors",
119 	"Packets received with VLAN header",
120 	"Error packets",
121 	"Packets received with unicast DMAC",
122 	"Packets received with multicast DMAC",
123 	"Packets received with broadcast DMAC",
124 	"Dropped packets",
125 	"Total frames received on interface",
126 	"Packets received with an octet count < 64",
127 	"Packets received with an octet count == 64",
128 	"Packets received with an octet count of 65-127",
129 	"Packets received with an octet count of 128-255",
130 	"Packets received with an octet count of 256-511",
131 	"Packets received with an octet count of 512-1023",
132 	"Packets received with an octet count of 1024-1518",
133 	"Packets received with an octet count of > 1518",
134 	"Oversized Packets",
135 	"Jabber Packets",
136 	"Fragmented Packets",
137 	"CBFC(class based flow control) pause frames received for class 0",
138 	"CBFC pause frames received for class 1",
139 	"CBFC pause frames received for class 2",
140 	"CBFC pause frames received for class 3",
141 	"CBFC pause frames received for class 4",
142 	"CBFC pause frames received for class 5",
143 	"CBFC pause frames received for class 6",
144 	"CBFC pause frames received for class 7",
145 	"CBFC pause frames received for class 8",
146 	"CBFC pause frames received for class 9",
147 	"CBFC pause frames received for class 10",
148 	"CBFC pause frames received for class 11",
149 	"CBFC pause frames received for class 12",
150 	"CBFC pause frames received for class 13",
151 	"CBFC pause frames received for class 14",
152 	"CBFC pause frames received for class 15",
153 	"MAC control packets received",
154 };
155 
156 static char *rpm_tx_stats_fields[] = {
157 	"Total octets sent on the interface",
158 	"Total octets transmitted OK",
159 	"Control/Pause frames sent",
160 	"Total frames transmitted OK",
161 	"Total frames sent with VLAN header",
162 	"Error Packets",
163 	"Packets sent to unicast DMAC",
164 	"Packets sent to the multicast DMAC",
165 	"Packets sent to a broadcast DMAC",
166 	"Packets sent with an octet count == 64",
167 	"Packets sent with an octet count of 65-127",
168 	"Packets sent with an octet count of 128-255",
169 	"Packets sent with an octet count of 256-511",
170 	"Packets sent with an octet count of 512-1023",
171 	"Packets sent with an octet count of 1024-1518",
172 	"Packets sent with an octet count of > 1518",
173 	"CBFC(class based flow control) pause frames transmitted for class 0",
174 	"CBFC pause frames transmitted for class 1",
175 	"CBFC pause frames transmitted for class 2",
176 	"CBFC pause frames transmitted for class 3",
177 	"CBFC pause frames transmitted for class 4",
178 	"CBFC pause frames transmitted for class 5",
179 	"CBFC pause frames transmitted for class 6",
180 	"CBFC pause frames transmitted for class 7",
181 	"CBFC pause frames transmitted for class 8",
182 	"CBFC pause frames transmitted for class 9",
183 	"CBFC pause frames transmitted for class 10",
184 	"CBFC pause frames transmitted for class 11",
185 	"CBFC pause frames transmitted for class 12",
186 	"CBFC pause frames transmitted for class 13",
187 	"CBFC pause frames transmitted for class 14",
188 	"CBFC pause frames transmitted for class 15",
189 	"MAC control packets sent",
190 	"Total frames sent on the interface"
191 };
192 
193 enum cpt_eng_type {
194 	CPT_AE_TYPE = 1,
195 	CPT_SE_TYPE = 2,
196 	CPT_IE_TYPE = 3,
197 };
198 
199 #define NDC_MAX_BANK(rvu, blk_addr) (rvu_read64(rvu, \
200 						blk_addr, NDC_AF_CONST) & 0xFF)
201 
202 #define rvu_dbg_NULL NULL
203 #define rvu_dbg_open_NULL NULL
204 
205 #define RVU_DEBUG_SEQ_FOPS(name, read_op, write_op)	\
206 static int rvu_dbg_open_##name(struct inode *inode, struct file *file) \
207 { \
208 	return single_open(file, rvu_dbg_##read_op, inode->i_private); \
209 } \
210 static const struct file_operations rvu_dbg_##name##_fops = { \
211 	.owner		= THIS_MODULE, \
212 	.open		= rvu_dbg_open_##name, \
213 	.read		= seq_read, \
214 	.write		= rvu_dbg_##write_op, \
215 	.llseek		= seq_lseek, \
216 	.release	= single_release, \
217 }
218 
219 #define RVU_DEBUG_FOPS(name, read_op, write_op) \
220 static const struct file_operations rvu_dbg_##name##_fops = { \
221 	.owner = THIS_MODULE, \
222 	.open = simple_open, \
223 	.read = rvu_dbg_##read_op, \
224 	.write = rvu_dbg_##write_op \
225 }
226 
227 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
228 
229 #define LMT_MAPTBL_ENTRY_SIZE 16
230 /* Dump LMTST map table */
231 static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
232 					       char __user *buffer,
233 					       size_t count, loff_t *ppos)
234 {
235 	struct rvu *rvu = filp->private_data;
236 	u64 lmt_addr, val, tbl_base;
237 	int pf, vf, num_vfs, hw_vfs;
238 	void __iomem *lmt_map_base;
239 	int buf_size = 10240;
240 	size_t off = 0;
241 	int index = 0;
242 	char *buf;
243 	int ret;
244 
245 	/* don't allow partial reads */
246 	if (*ppos != 0)
247 		return 0;
248 
249 	buf = kzalloc(buf_size, GFP_KERNEL);
250 	if (!buf)
251 		return -ENOSPC;
252 
253 	tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
254 
255 	lmt_map_base = ioremap_wc(tbl_base, 128 * 1024);
256 	if (!lmt_map_base) {
257 		dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
258 		kfree(buf);
259 		return false;
260 	}
261 
262 	off +=	scnprintf(&buf[off], buf_size - 1 - off,
263 			  "\n\t\t\t\t\tLmtst Map Table Entries");
264 	off +=	scnprintf(&buf[off], buf_size - 1 - off,
265 			  "\n\t\t\t\t\t=======================");
266 	off +=	scnprintf(&buf[off], buf_size - 1 - off, "\nPcifunc\t\t\t");
267 	off +=	scnprintf(&buf[off], buf_size - 1 - off, "Table Index\t\t");
268 	off +=	scnprintf(&buf[off], buf_size - 1 - off,
269 			  "Lmtline Base (word 0)\t\t");
270 	off +=	scnprintf(&buf[off], buf_size - 1 - off,
271 			  "Lmt Map Entry (word 1)");
272 	off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
273 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
274 		off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d  \t\t\t",
275 				    pf);
276 
277 		index = pf * rvu->hw->total_vfs * LMT_MAPTBL_ENTRY_SIZE;
278 		off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t",
279 				 (tbl_base + index));
280 		lmt_addr = readq(lmt_map_base + index);
281 		off += scnprintf(&buf[off], buf_size - 1 - off,
282 				 " 0x%016llx\t\t", lmt_addr);
283 		index += 8;
284 		val = readq(lmt_map_base + index);
285 		off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%016llx\n",
286 				 val);
287 		/* Reading num of VFs per PF */
288 		rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs);
289 		for (vf = 0; vf < num_vfs; vf++) {
290 			index = (pf * rvu->hw->total_vfs * 16) +
291 				((vf + 1)  * LMT_MAPTBL_ENTRY_SIZE);
292 			off += scnprintf(&buf[off], buf_size - 1 - off,
293 					    "PF%d:VF%d  \t\t", pf, vf);
294 			off += scnprintf(&buf[off], buf_size - 1 - off,
295 					 " 0x%llx\t\t", (tbl_base + index));
296 			lmt_addr = readq(lmt_map_base + index);
297 			off += scnprintf(&buf[off], buf_size - 1 - off,
298 					 " 0x%016llx\t\t", lmt_addr);
299 			index += 8;
300 			val = readq(lmt_map_base + index);
301 			off += scnprintf(&buf[off], buf_size - 1 - off,
302 					 " 0x%016llx\n", val);
303 		}
304 	}
305 	off +=	scnprintf(&buf[off], buf_size - 1 - off, "\n");
306 
307 	ret = min(off, count);
308 	if (copy_to_user(buffer, buf, ret))
309 		ret = -EFAULT;
310 	kfree(buf);
311 
312 	iounmap(lmt_map_base);
313 	if (ret < 0)
314 		return ret;
315 
316 	*ppos = ret;
317 	return ret;
318 }
319 
320 RVU_DEBUG_FOPS(lmtst_map_table, lmtst_map_table_display, NULL);
321 
322 static void get_lf_str_list(struct rvu_block block, int pcifunc,
323 			    char *lfs)
324 {
325 	int lf = 0, seq = 0, len = 0, prev_lf = block.lf.max;
326 
327 	for_each_set_bit(lf, block.lf.bmap, block.lf.max) {
328 		if (lf >= block.lf.max)
329 			break;
330 
331 		if (block.fn_map[lf] != pcifunc)
332 			continue;
333 
334 		if (lf == prev_lf + 1) {
335 			prev_lf = lf;
336 			seq = 1;
337 			continue;
338 		}
339 
340 		if (seq)
341 			len += sprintf(lfs + len, "-%d,%d", prev_lf, lf);
342 		else
343 			len += (len ? sprintf(lfs + len, ",%d", lf) :
344 				      sprintf(lfs + len, "%d", lf));
345 
346 		prev_lf = lf;
347 		seq = 0;
348 	}
349 
350 	if (seq)
351 		len += sprintf(lfs + len, "-%d", prev_lf);
352 
353 	lfs[len] = '\0';
354 }
355 
356 static int get_max_column_width(struct rvu *rvu)
357 {
358 	int index, pf, vf, lf_str_size = 12, buf_size = 256;
359 	struct rvu_block block;
360 	u16 pcifunc;
361 	char *buf;
362 
363 	buf = kzalloc(buf_size, GFP_KERNEL);
364 	if (!buf)
365 		return -ENOMEM;
366 
367 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
368 		for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
369 			pcifunc = pf << 10 | vf;
370 			if (!pcifunc)
371 				continue;
372 
373 			for (index = 0; index < BLK_COUNT; index++) {
374 				block = rvu->hw->block[index];
375 				if (!strlen(block.name))
376 					continue;
377 
378 				get_lf_str_list(block, pcifunc, buf);
379 				if (lf_str_size <= strlen(buf))
380 					lf_str_size = strlen(buf) + 1;
381 			}
382 		}
383 	}
384 
385 	kfree(buf);
386 	return lf_str_size;
387 }
388 
389 /* Dumps current provisioning status of all RVU block LFs */
390 static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
391 					  char __user *buffer,
392 					  size_t count, loff_t *ppos)
393 {
394 	int index, off = 0, flag = 0, len = 0, i = 0;
395 	struct rvu *rvu = filp->private_data;
396 	int bytes_not_copied = 0;
397 	struct rvu_block block;
398 	int pf, vf, pcifunc;
399 	int buf_size = 2048;
400 	int lf_str_size;
401 	char *lfs;
402 	char *buf;
403 
404 	/* don't allow partial reads */
405 	if (*ppos != 0)
406 		return 0;
407 
408 	buf = kzalloc(buf_size, GFP_KERNEL);
409 	if (!buf)
410 		return -ENOSPC;
411 
412 	/* Get the maximum width of a column */
413 	lf_str_size = get_max_column_width(rvu);
414 
415 	lfs = kzalloc(lf_str_size, GFP_KERNEL);
416 	if (!lfs) {
417 		kfree(buf);
418 		return -ENOMEM;
419 	}
420 	off +=	scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size,
421 			  "pcifunc");
422 	for (index = 0; index < BLK_COUNT; index++)
423 		if (strlen(rvu->hw->block[index].name)) {
424 			off += scnprintf(&buf[off], buf_size - 1 - off,
425 					 "%-*s", lf_str_size,
426 					 rvu->hw->block[index].name);
427 		}
428 
429 	off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
430 	bytes_not_copied = copy_to_user(buffer + (i * off), buf, off);
431 	if (bytes_not_copied)
432 		goto out;
433 
434 	i++;
435 	*ppos += off;
436 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
437 		for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
438 			off = 0;
439 			flag = 0;
440 			pcifunc = pf << 10 | vf;
441 			if (!pcifunc)
442 				continue;
443 
444 			if (vf) {
445 				sprintf(lfs, "PF%d:VF%d", pf, vf - 1);
446 				off = scnprintf(&buf[off],
447 						buf_size - 1 - off,
448 						"%-*s", lf_str_size, lfs);
449 			} else {
450 				sprintf(lfs, "PF%d", pf);
451 				off = scnprintf(&buf[off],
452 						buf_size - 1 - off,
453 						"%-*s", lf_str_size, lfs);
454 			}
455 
456 			for (index = 0; index < BLK_COUNT; index++) {
457 				block = rvu->hw->block[index];
458 				if (!strlen(block.name))
459 					continue;
460 				len = 0;
461 				lfs[len] = '\0';
462 				get_lf_str_list(block, pcifunc, lfs);
463 				if (strlen(lfs))
464 					flag = 1;
465 
466 				off += scnprintf(&buf[off], buf_size - 1 - off,
467 						 "%-*s", lf_str_size, lfs);
468 			}
469 			if (flag) {
470 				off +=	scnprintf(&buf[off],
471 						  buf_size - 1 - off, "\n");
472 				bytes_not_copied = copy_to_user(buffer +
473 								(i * off),
474 								buf, off);
475 				if (bytes_not_copied)
476 					goto out;
477 
478 				i++;
479 				*ppos += off;
480 			}
481 		}
482 	}
483 
484 out:
485 	kfree(lfs);
486 	kfree(buf);
487 	if (bytes_not_copied)
488 		return -EFAULT;
489 
490 	return *ppos;
491 }
492 
493 RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
494 
495 static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
496 {
497 	struct rvu *rvu = filp->private;
498 	struct pci_dev *pdev = NULL;
499 	struct mac_ops *mac_ops;
500 	char cgx[10], lmac[10];
501 	struct rvu_pfvf *pfvf;
502 	int pf, domain, blkid;
503 	u8 cgx_id, lmac_id;
504 	u16 pcifunc;
505 
506 	domain = 2;
507 	mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
508 	/* There can be no CGX devices at all */
509 	if (!mac_ops)
510 		return 0;
511 	seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\n",
512 		   mac_ops->name);
513 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
514 		if (!is_pf_cgxmapped(rvu, pf))
515 			continue;
516 
517 		pdev =  pci_get_domain_bus_and_slot(domain, pf + 1, 0);
518 		if (!pdev)
519 			continue;
520 
521 		cgx[0] = 0;
522 		lmac[0] = 0;
523 		pcifunc = pf << 10;
524 		pfvf = rvu_get_pfvf(rvu, pcifunc);
525 
526 		if (pfvf->nix_blkaddr == BLKADDR_NIX0)
527 			blkid = 0;
528 		else
529 			blkid = 1;
530 
531 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id,
532 				    &lmac_id);
533 		sprintf(cgx, "%s%d", mac_ops->name, cgx_id);
534 		sprintf(lmac, "LMAC%d", lmac_id);
535 		seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n",
536 			   dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac);
537 	}
538 	return 0;
539 }
540 
541 RVU_DEBUG_SEQ_FOPS(rvu_pf_cgx_map, rvu_pf_cgx_map_display, NULL);
542 
543 static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blkaddr, int lf,
544 				u16 *pcifunc)
545 {
546 	struct rvu_block *block;
547 	struct rvu_hwinfo *hw;
548 
549 	hw = rvu->hw;
550 	block = &hw->block[blkaddr];
551 
552 	if (lf < 0 || lf >= block->lf.max) {
553 		dev_warn(rvu->dev, "Invalid LF: valid range: 0-%d\n",
554 			 block->lf.max - 1);
555 		return false;
556 	}
557 
558 	*pcifunc = block->fn_map[lf];
559 	if (!*pcifunc) {
560 		dev_warn(rvu->dev,
561 			 "This LF is not attached to any RVU PFFUNC\n");
562 		return false;
563 	}
564 	return true;
565 }
566 
567 static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf)
568 {
569 	char *buf;
570 
571 	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
572 	if (!buf)
573 		return;
574 
575 	if (!pfvf->aura_ctx) {
576 		seq_puts(m, "Aura context is not initialized\n");
577 	} else {
578 		bitmap_print_to_pagebuf(false, buf, pfvf->aura_bmap,
579 					pfvf->aura_ctx->qsize);
580 		seq_printf(m, "Aura count : %d\n", pfvf->aura_ctx->qsize);
581 		seq_printf(m, "Aura context ena/dis bitmap : %s\n", buf);
582 	}
583 
584 	if (!pfvf->pool_ctx) {
585 		seq_puts(m, "Pool context is not initialized\n");
586 	} else {
587 		bitmap_print_to_pagebuf(false, buf, pfvf->pool_bmap,
588 					pfvf->pool_ctx->qsize);
589 		seq_printf(m, "Pool count : %d\n", pfvf->pool_ctx->qsize);
590 		seq_printf(m, "Pool context ena/dis bitmap : %s\n", buf);
591 	}
592 	kfree(buf);
593 }
594 
595 /* The 'qsize' entry dumps current Aura/Pool context Qsize
596  * and each context's current enable/disable status in a bitmap.
597  */
598 static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
599 				 int blktype)
600 {
601 	void (*print_qsize)(struct seq_file *filp,
602 			    struct rvu_pfvf *pfvf) = NULL;
603 	struct dentry *current_dir;
604 	struct rvu_pfvf *pfvf;
605 	struct rvu *rvu;
606 	int qsize_id;
607 	u16 pcifunc;
608 	int blkaddr;
609 
610 	rvu = filp->private;
611 	switch (blktype) {
612 	case BLKTYPE_NPA:
613 		qsize_id = rvu->rvu_dbg.npa_qsize_id;
614 		print_qsize = print_npa_qsize;
615 		break;
616 
617 	case BLKTYPE_NIX:
618 		qsize_id = rvu->rvu_dbg.nix_qsize_id;
619 		print_qsize = print_nix_qsize;
620 		break;
621 
622 	default:
623 		return -EINVAL;
624 	}
625 
626 	if (blktype == BLKTYPE_NPA) {
627 		blkaddr = BLKADDR_NPA;
628 	} else {
629 		current_dir = filp->file->f_path.dentry->d_parent;
630 		blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
631 				   BLKADDR_NIX1 : BLKADDR_NIX0);
632 	}
633 
634 	if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc))
635 		return -EINVAL;
636 
637 	pfvf = rvu_get_pfvf(rvu, pcifunc);
638 	print_qsize(filp, pfvf);
639 
640 	return 0;
641 }
642 
643 static ssize_t rvu_dbg_qsize_write(struct file *filp,
644 				   const char __user *buffer, size_t count,
645 				   loff_t *ppos, int blktype)
646 {
647 	char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix";
648 	struct seq_file *seqfile = filp->private_data;
649 	char *cmd_buf, *cmd_buf_tmp, *subtoken;
650 	struct rvu *rvu = seqfile->private;
651 	struct dentry *current_dir;
652 	int blkaddr;
653 	u16 pcifunc;
654 	int ret, lf;
655 
656 	cmd_buf = memdup_user(buffer, count + 1);
657 	if (IS_ERR(cmd_buf))
658 		return -ENOMEM;
659 
660 	cmd_buf[count] = '\0';
661 
662 	cmd_buf_tmp = strchr(cmd_buf, '\n');
663 	if (cmd_buf_tmp) {
664 		*cmd_buf_tmp = '\0';
665 		count = cmd_buf_tmp - cmd_buf + 1;
666 	}
667 
668 	cmd_buf_tmp = cmd_buf;
669 	subtoken = strsep(&cmd_buf, " ");
670 	ret = subtoken ? kstrtoint(subtoken, 10, &lf) : -EINVAL;
671 	if (cmd_buf)
672 		ret = -EINVAL;
673 
674 	if (ret < 0 || !strncmp(subtoken, "help", 4)) {
675 		dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
676 		goto qsize_write_done;
677 	}
678 
679 	if (blktype == BLKTYPE_NPA) {
680 		blkaddr = BLKADDR_NPA;
681 	} else {
682 		current_dir = filp->f_path.dentry->d_parent;
683 		blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
684 				   BLKADDR_NIX1 : BLKADDR_NIX0);
685 	}
686 
687 	if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) {
688 		ret = -EINVAL;
689 		goto qsize_write_done;
690 	}
691 	if (blktype  == BLKTYPE_NPA)
692 		rvu->rvu_dbg.npa_qsize_id = lf;
693 	else
694 		rvu->rvu_dbg.nix_qsize_id = lf;
695 
696 qsize_write_done:
697 	kfree(cmd_buf_tmp);
698 	return ret ? ret : count;
699 }
700 
701 static ssize_t rvu_dbg_npa_qsize_write(struct file *filp,
702 				       const char __user *buffer,
703 				       size_t count, loff_t *ppos)
704 {
705 	return rvu_dbg_qsize_write(filp, buffer, count, ppos,
706 					    BLKTYPE_NPA);
707 }
708 
709 static int rvu_dbg_npa_qsize_display(struct seq_file *filp, void *unused)
710 {
711 	return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NPA);
712 }
713 
714 RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write);
715 
716 /* Dumps given NPA Aura's context */
717 static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
718 {
719 	struct npa_aura_s *aura = &rsp->aura;
720 	struct rvu *rvu = m->private;
721 
722 	seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr);
723 
724 	seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n",
725 		   aura->ena, aura->pool_caching);
726 	seq_printf(m, "W1: pool way mask\t%d\nW1: avg con\t\t%d\n",
727 		   aura->pool_way_mask, aura->avg_con);
728 	seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n",
729 		   aura->pool_drop_ena, aura->aura_drop_ena);
730 	seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n",
731 		   aura->bp_ena, aura->aura_drop);
732 	seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n",
733 		   aura->shift, aura->avg_level);
734 
735 	seq_printf(m, "W2: count\t\t%llu\nW2: nix0_bpid\t\t%d\nW2: nix1_bpid\t\t%d\n",
736 		   (u64)aura->count, aura->nix0_bpid, aura->nix1_bpid);
737 
738 	seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
739 		   (u64)aura->limit, aura->bp, aura->fc_ena);
740 
741 	if (!is_rvu_otx2(rvu))
742 		seq_printf(m, "W3: fc_be\t\t%d\n", aura->fc_be);
743 	seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n",
744 		   aura->fc_up_crossing, aura->fc_stype);
745 	seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits);
746 
747 	seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr);
748 
749 	seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n",
750 		   aura->pool_drop, aura->update_time);
751 	seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n",
752 		   aura->err_int, aura->err_int_ena);
753 	seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n",
754 		   aura->thresh_int, aura->thresh_int_ena);
755 	seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n",
756 		   aura->thresh_up, aura->thresh_qint_idx);
757 	seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx);
758 
759 	seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh);
760 	if (!is_rvu_otx2(rvu))
761 		seq_printf(m, "W6: fc_msh_dst\t\t%d\n", aura->fc_msh_dst);
762 }
763 
764 /* Dumps given NPA Pool's context */
765 static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
766 {
767 	struct npa_pool_s *pool = &rsp->pool;
768 	struct rvu *rvu = m->private;
769 
770 	seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base);
771 
772 	seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n",
773 		   pool->ena, pool->nat_align);
774 	seq_printf(m, "W1: stack_caching\t%d\nW1: stack_way_mask\t%d\n",
775 		   pool->stack_caching, pool->stack_way_mask);
776 	seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n",
777 		   pool->buf_offset, pool->buf_size);
778 
779 	seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n",
780 		   pool->stack_max_pages, pool->stack_pages);
781 
782 	seq_printf(m, "W3: op_pc \t\t%llu\n", (u64)pool->op_pc);
783 
784 	seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n",
785 		   pool->stack_offset, pool->shift, pool->avg_level);
786 	seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n",
787 		   pool->avg_con, pool->fc_ena, pool->fc_stype);
788 	seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n",
789 		   pool->fc_hyst_bits, pool->fc_up_crossing);
790 	if (!is_rvu_otx2(rvu))
791 		seq_printf(m, "W4: fc_be\t\t%d\n", pool->fc_be);
792 	seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time);
793 
794 	seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr);
795 
796 	seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start);
797 
798 	seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end);
799 
800 	seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n",
801 		   pool->err_int, pool->err_int_ena);
802 	seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int);
803 	seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n",
804 		   pool->thresh_int_ena, pool->thresh_up);
805 	seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d\n",
806 		   pool->thresh_qint_idx, pool->err_qint_idx);
807 	if (!is_rvu_otx2(rvu))
808 		seq_printf(m, "W8: fc_msh_dst\t\t%d\n", pool->fc_msh_dst);
809 }
810 
811 /* Reads aura/pool's ctx from admin queue */
812 static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype)
813 {
814 	void (*print_npa_ctx)(struct seq_file *m, struct npa_aq_enq_rsp *rsp);
815 	struct npa_aq_enq_req aq_req;
816 	struct npa_aq_enq_rsp rsp;
817 	struct rvu_pfvf *pfvf;
818 	int aura, rc, max_id;
819 	int npalf, id, all;
820 	struct rvu *rvu;
821 	u16 pcifunc;
822 
823 	rvu = m->private;
824 
825 	switch (ctype) {
826 	case NPA_AQ_CTYPE_AURA:
827 		npalf = rvu->rvu_dbg.npa_aura_ctx.lf;
828 		id = rvu->rvu_dbg.npa_aura_ctx.id;
829 		all = rvu->rvu_dbg.npa_aura_ctx.all;
830 		break;
831 
832 	case NPA_AQ_CTYPE_POOL:
833 		npalf = rvu->rvu_dbg.npa_pool_ctx.lf;
834 		id = rvu->rvu_dbg.npa_pool_ctx.id;
835 		all = rvu->rvu_dbg.npa_pool_ctx.all;
836 		break;
837 	default:
838 		return -EINVAL;
839 	}
840 
841 	if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
842 		return -EINVAL;
843 
844 	pfvf = rvu_get_pfvf(rvu, pcifunc);
845 	if (ctype == NPA_AQ_CTYPE_AURA && !pfvf->aura_ctx) {
846 		seq_puts(m, "Aura context is not initialized\n");
847 		return -EINVAL;
848 	} else if (ctype == NPA_AQ_CTYPE_POOL && !pfvf->pool_ctx) {
849 		seq_puts(m, "Pool context is not initialized\n");
850 		return -EINVAL;
851 	}
852 
853 	memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
854 	aq_req.hdr.pcifunc = pcifunc;
855 	aq_req.ctype = ctype;
856 	aq_req.op = NPA_AQ_INSTOP_READ;
857 	if (ctype == NPA_AQ_CTYPE_AURA) {
858 		max_id = pfvf->aura_ctx->qsize;
859 		print_npa_ctx = print_npa_aura_ctx;
860 	} else {
861 		max_id = pfvf->pool_ctx->qsize;
862 		print_npa_ctx = print_npa_pool_ctx;
863 	}
864 
865 	if (id < 0 || id >= max_id) {
866 		seq_printf(m, "Invalid %s, valid range is 0-%d\n",
867 			   (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
868 			max_id - 1);
869 		return -EINVAL;
870 	}
871 
872 	if (all)
873 		id = 0;
874 	else
875 		max_id = id + 1;
876 
877 	for (aura = id; aura < max_id; aura++) {
878 		aq_req.aura_id = aura;
879 		seq_printf(m, "======%s : %d=======\n",
880 			   (ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL",
881 			aq_req.aura_id);
882 		rc = rvu_npa_aq_enq_inst(rvu, &aq_req, &rsp);
883 		if (rc) {
884 			seq_puts(m, "Failed to read context\n");
885 			return -EINVAL;
886 		}
887 		print_npa_ctx(m, &rsp);
888 	}
889 	return 0;
890 }
891 
892 static int write_npa_ctx(struct rvu *rvu, bool all,
893 			 int npalf, int id, int ctype)
894 {
895 	struct rvu_pfvf *pfvf;
896 	int max_id = 0;
897 	u16 pcifunc;
898 
899 	if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
900 		return -EINVAL;
901 
902 	pfvf = rvu_get_pfvf(rvu, pcifunc);
903 
904 	if (ctype == NPA_AQ_CTYPE_AURA) {
905 		if (!pfvf->aura_ctx) {
906 			dev_warn(rvu->dev, "Aura context is not initialized\n");
907 			return -EINVAL;
908 		}
909 		max_id = pfvf->aura_ctx->qsize;
910 	} else if (ctype == NPA_AQ_CTYPE_POOL) {
911 		if (!pfvf->pool_ctx) {
912 			dev_warn(rvu->dev, "Pool context is not initialized\n");
913 			return -EINVAL;
914 		}
915 		max_id = pfvf->pool_ctx->qsize;
916 	}
917 
918 	if (id < 0 || id >= max_id) {
919 		dev_warn(rvu->dev, "Invalid %s, valid range is 0-%d\n",
920 			 (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
921 			max_id - 1);
922 		return -EINVAL;
923 	}
924 
925 	switch (ctype) {
926 	case NPA_AQ_CTYPE_AURA:
927 		rvu->rvu_dbg.npa_aura_ctx.lf = npalf;
928 		rvu->rvu_dbg.npa_aura_ctx.id = id;
929 		rvu->rvu_dbg.npa_aura_ctx.all = all;
930 		break;
931 
932 	case NPA_AQ_CTYPE_POOL:
933 		rvu->rvu_dbg.npa_pool_ctx.lf = npalf;
934 		rvu->rvu_dbg.npa_pool_ctx.id = id;
935 		rvu->rvu_dbg.npa_pool_ctx.all = all;
936 		break;
937 	default:
938 		return -EINVAL;
939 	}
940 	return 0;
941 }
942 
943 static int parse_cmd_buffer_ctx(char *cmd_buf, size_t *count,
944 				const char __user *buffer, int *npalf,
945 				int *id, bool *all)
946 {
947 	int bytes_not_copied;
948 	char *cmd_buf_tmp;
949 	char *subtoken;
950 	int ret;
951 
952 	bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
953 	if (bytes_not_copied)
954 		return -EFAULT;
955 
956 	cmd_buf[*count] = '\0';
957 	cmd_buf_tmp = strchr(cmd_buf, '\n');
958 
959 	if (cmd_buf_tmp) {
960 		*cmd_buf_tmp = '\0';
961 		*count = cmd_buf_tmp - cmd_buf + 1;
962 	}
963 
964 	subtoken = strsep(&cmd_buf, " ");
965 	ret = subtoken ? kstrtoint(subtoken, 10, npalf) : -EINVAL;
966 	if (ret < 0)
967 		return ret;
968 	subtoken = strsep(&cmd_buf, " ");
969 	if (subtoken && strcmp(subtoken, "all") == 0) {
970 		*all = true;
971 	} else {
972 		ret = subtoken ? kstrtoint(subtoken, 10, id) : -EINVAL;
973 		if (ret < 0)
974 			return ret;
975 	}
976 	if (cmd_buf)
977 		return -EINVAL;
978 	return ret;
979 }
980 
981 static ssize_t rvu_dbg_npa_ctx_write(struct file *filp,
982 				     const char __user *buffer,
983 				     size_t count, loff_t *ppos, int ctype)
984 {
985 	char *cmd_buf, *ctype_string = (ctype == NPA_AQ_CTYPE_AURA) ?
986 					"aura" : "pool";
987 	struct seq_file *seqfp = filp->private_data;
988 	struct rvu *rvu = seqfp->private;
989 	int npalf, id = 0, ret;
990 	bool all = false;
991 
992 	if ((*ppos != 0) || !count)
993 		return -EINVAL;
994 
995 	cmd_buf = kzalloc(count + 1, GFP_KERNEL);
996 	if (!cmd_buf)
997 		return count;
998 	ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
999 				   &npalf, &id, &all);
1000 	if (ret < 0) {
1001 		dev_info(rvu->dev,
1002 			 "Usage: echo <npalf> [%s number/all] > %s_ctx\n",
1003 			 ctype_string, ctype_string);
1004 		goto done;
1005 	} else {
1006 		ret = write_npa_ctx(rvu, all, npalf, id, ctype);
1007 	}
1008 done:
1009 	kfree(cmd_buf);
1010 	return ret ? ret : count;
1011 }
1012 
1013 static ssize_t rvu_dbg_npa_aura_ctx_write(struct file *filp,
1014 					  const char __user *buffer,
1015 					  size_t count, loff_t *ppos)
1016 {
1017 	return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
1018 				     NPA_AQ_CTYPE_AURA);
1019 }
1020 
1021 static int rvu_dbg_npa_aura_ctx_display(struct seq_file *filp, void *unused)
1022 {
1023 	return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_AURA);
1024 }
1025 
1026 RVU_DEBUG_SEQ_FOPS(npa_aura_ctx, npa_aura_ctx_display, npa_aura_ctx_write);
1027 
1028 static ssize_t rvu_dbg_npa_pool_ctx_write(struct file *filp,
1029 					  const char __user *buffer,
1030 					  size_t count, loff_t *ppos)
1031 {
1032 	return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
1033 				     NPA_AQ_CTYPE_POOL);
1034 }
1035 
1036 static int rvu_dbg_npa_pool_ctx_display(struct seq_file *filp, void *unused)
1037 {
1038 	return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_POOL);
1039 }
1040 
1041 RVU_DEBUG_SEQ_FOPS(npa_pool_ctx, npa_pool_ctx_display, npa_pool_ctx_write);
1042 
1043 static void ndc_cache_stats(struct seq_file *s, int blk_addr,
1044 			    int ctype, int transaction)
1045 {
1046 	u64 req, out_req, lat, cant_alloc;
1047 	struct nix_hw *nix_hw;
1048 	struct rvu *rvu;
1049 	int port;
1050 
1051 	if (blk_addr == BLKADDR_NDC_NPA0) {
1052 		rvu = s->private;
1053 	} else {
1054 		nix_hw = s->private;
1055 		rvu = nix_hw->rvu;
1056 	}
1057 
1058 	for (port = 0; port < NDC_MAX_PORT; port++) {
1059 		req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC
1060 						(port, ctype, transaction));
1061 		lat = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_LAT_PC
1062 						(port, ctype, transaction));
1063 		out_req = rvu_read64(rvu, blk_addr,
1064 				     NDC_AF_PORTX_RTX_RWX_OSTDN_PC
1065 				     (port, ctype, transaction));
1066 		cant_alloc = rvu_read64(rvu, blk_addr,
1067 					NDC_AF_PORTX_RTX_CANT_ALLOC_PC
1068 					(port, transaction));
1069 		seq_printf(s, "\nPort:%d\n", port);
1070 		seq_printf(s, "\tTotal Requests:\t\t%lld\n", req);
1071 		seq_printf(s, "\tTotal Time Taken:\t%lld cycles\n", lat);
1072 		seq_printf(s, "\tAvg Latency:\t\t%lld cycles\n", lat / req);
1073 		seq_printf(s, "\tOutstanding Requests:\t%lld\n", out_req);
1074 		seq_printf(s, "\tCant Alloc Requests:\t%lld\n", cant_alloc);
1075 	}
1076 }
1077 
1078 static int ndc_blk_cache_stats(struct seq_file *s, int idx, int blk_addr)
1079 {
1080 	seq_puts(s, "\n***** CACHE mode read stats *****\n");
1081 	ndc_cache_stats(s, blk_addr, CACHING, NDC_READ_TRANS);
1082 	seq_puts(s, "\n***** CACHE mode write stats *****\n");
1083 	ndc_cache_stats(s, blk_addr, CACHING, NDC_WRITE_TRANS);
1084 	seq_puts(s, "\n***** BY-PASS mode read stats *****\n");
1085 	ndc_cache_stats(s, blk_addr, BYPASS, NDC_READ_TRANS);
1086 	seq_puts(s, "\n***** BY-PASS mode write stats *****\n");
1087 	ndc_cache_stats(s, blk_addr, BYPASS, NDC_WRITE_TRANS);
1088 	return 0;
1089 }
1090 
1091 static int rvu_dbg_npa_ndc_cache_display(struct seq_file *filp, void *unused)
1092 {
1093 	return ndc_blk_cache_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
1094 }
1095 
1096 RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL);
1097 
1098 static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
1099 {
1100 	struct nix_hw *nix_hw;
1101 	struct rvu *rvu;
1102 	int bank, max_bank;
1103 
1104 	if (blk_addr == BLKADDR_NDC_NPA0) {
1105 		rvu = s->private;
1106 	} else {
1107 		nix_hw = s->private;
1108 		rvu = nix_hw->rvu;
1109 	}
1110 
1111 	max_bank = NDC_MAX_BANK(rvu, blk_addr);
1112 	for (bank = 0; bank < max_bank; bank++) {
1113 		seq_printf(s, "BANK:%d\n", bank);
1114 		seq_printf(s, "\tHits:\t%lld\n",
1115 			   (u64)rvu_read64(rvu, blk_addr,
1116 			   NDC_AF_BANKX_HIT_PC(bank)));
1117 		seq_printf(s, "\tMiss:\t%lld\n",
1118 			   (u64)rvu_read64(rvu, blk_addr,
1119 			    NDC_AF_BANKX_MISS_PC(bank)));
1120 	}
1121 	return 0;
1122 }
1123 
1124 static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused)
1125 {
1126 	struct nix_hw *nix_hw = filp->private;
1127 	int blkaddr = 0;
1128 	int ndc_idx = 0;
1129 
1130 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1131 		   BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1132 	ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_RX : NIX0_RX);
1133 
1134 	return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
1135 }
1136 
1137 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL);
1138 
1139 static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused)
1140 {
1141 	struct nix_hw *nix_hw = filp->private;
1142 	int blkaddr = 0;
1143 	int ndc_idx = 0;
1144 
1145 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1146 		   BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1147 	ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_TX : NIX0_TX);
1148 
1149 	return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
1150 }
1151 
1152 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL);
1153 
1154 static int rvu_dbg_npa_ndc_hits_miss_display(struct seq_file *filp,
1155 					     void *unused)
1156 {
1157 	return ndc_blk_hits_miss_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
1158 }
1159 
1160 RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL);
1161 
1162 static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp,
1163 						void *unused)
1164 {
1165 	struct nix_hw *nix_hw = filp->private;
1166 	int ndc_idx = NPA0_U;
1167 	int blkaddr = 0;
1168 
1169 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1170 		   BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1171 
1172 	return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1173 }
1174 
1175 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL);
1176 
1177 static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp,
1178 						void *unused)
1179 {
1180 	struct nix_hw *nix_hw = filp->private;
1181 	int ndc_idx = NPA0_U;
1182 	int blkaddr = 0;
1183 
1184 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1185 		   BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1186 
1187 	return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1188 }
1189 
1190 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL);
1191 
1192 static void print_nix_cn10k_sq_ctx(struct seq_file *m,
1193 				   struct nix_cn10k_sq_ctx_s *sq_ctx)
1194 {
1195 	seq_printf(m, "W0: ena \t\t\t%d\nW0: qint_idx \t\t\t%d\n",
1196 		   sq_ctx->ena, sq_ctx->qint_idx);
1197 	seq_printf(m, "W0: substream \t\t\t0x%03x\nW0: sdp_mcast \t\t\t%d\n",
1198 		   sq_ctx->substream, sq_ctx->sdp_mcast);
1199 	seq_printf(m, "W0: cq \t\t\t\t%d\nW0: sqe_way_mask \t\t%d\n\n",
1200 		   sq_ctx->cq, sq_ctx->sqe_way_mask);
1201 
1202 	seq_printf(m, "W1: smq \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: xoff\t\t\t%d\n",
1203 		   sq_ctx->smq, sq_ctx->cq_ena, sq_ctx->xoff);
1204 	seq_printf(m, "W1: sso_ena \t\t\t%d\nW1: smq_rr_weight\t\t%d\n",
1205 		   sq_ctx->sso_ena, sq_ctx->smq_rr_weight);
1206 	seq_printf(m, "W1: default_chan\t\t%d\nW1: sqb_count\t\t\t%d\n\n",
1207 		   sq_ctx->default_chan, sq_ctx->sqb_count);
1208 
1209 	seq_printf(m, "W2: smq_rr_count_lb \t\t%d\n", sq_ctx->smq_rr_count_lb);
1210 	seq_printf(m, "W2: smq_rr_count_ub \t\t%d\n", sq_ctx->smq_rr_count_ub);
1211 	seq_printf(m, "W2: sqb_aura \t\t\t%d\nW2: sq_int \t\t\t%d\n",
1212 		   sq_ctx->sqb_aura, sq_ctx->sq_int);
1213 	seq_printf(m, "W2: sq_int_ena \t\t\t%d\nW2: sqe_stype \t\t\t%d\n",
1214 		   sq_ctx->sq_int_ena, sq_ctx->sqe_stype);
1215 
1216 	seq_printf(m, "W3: max_sqe_size\t\t%d\nW3: cq_limit\t\t\t%d\n",
1217 		   sq_ctx->max_sqe_size, sq_ctx->cq_limit);
1218 	seq_printf(m, "W3: lmt_dis \t\t\t%d\nW3: mnq_dis \t\t\t%d\n",
1219 		   sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1220 	seq_printf(m, "W3: smq_next_sq\t\t\t%d\nW3: smq_lso_segnum\t\t%d\n",
1221 		   sq_ctx->smq_next_sq, sq_ctx->smq_lso_segnum);
1222 	seq_printf(m, "W3: tail_offset \t\t%d\nW3: smenq_offset\t\t%d\n",
1223 		   sq_ctx->tail_offset, sq_ctx->smenq_offset);
1224 	seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n",
1225 		   sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld);
1226 
1227 	seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
1228 		   sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
1229 	seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1230 	seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1231 	seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1232 	seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1233 		   sq_ctx->smenq_next_sqb);
1234 
1235 	seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1236 
1237 	seq_printf(m, "W9: vfi_lso_total\t\t%d\n", sq_ctx->vfi_lso_total);
1238 	seq_printf(m, "W9: vfi_lso_sizem1\t\t%d\nW9: vfi_lso_sb\t\t\t%d\n",
1239 		   sq_ctx->vfi_lso_sizem1, sq_ctx->vfi_lso_sb);
1240 	seq_printf(m, "W9: vfi_lso_mps\t\t\t%d\nW9: vfi_lso_vlan0_ins_ena\t%d\n",
1241 		   sq_ctx->vfi_lso_mps, sq_ctx->vfi_lso_vlan0_ins_ena);
1242 	seq_printf(m, "W9: vfi_lso_vlan1_ins_ena\t%d\nW9: vfi_lso_vld \t\t%d\n\n",
1243 		   sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1244 
1245 	seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1246 		   (u64)sq_ctx->scm_lso_rem);
1247 	seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1248 	seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1249 	seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1250 		   (u64)sq_ctx->dropped_octs);
1251 	seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1252 		   (u64)sq_ctx->dropped_pkts);
1253 }
1254 
1255 /* Dumps given nix_sq's context */
1256 static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1257 {
1258 	struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
1259 	struct nix_hw *nix_hw = m->private;
1260 	struct rvu *rvu = nix_hw->rvu;
1261 
1262 	if (!is_rvu_otx2(rvu)) {
1263 		print_nix_cn10k_sq_ctx(m, (struct nix_cn10k_sq_ctx_s *)sq_ctx);
1264 		return;
1265 	}
1266 	seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n",
1267 		   sq_ctx->sqe_way_mask, sq_ctx->cq);
1268 	seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
1269 		   sq_ctx->sdp_mcast, sq_ctx->substream);
1270 	seq_printf(m, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n\n",
1271 		   sq_ctx->qint_idx, sq_ctx->ena);
1272 
1273 	seq_printf(m, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d\n",
1274 		   sq_ctx->sqb_count, sq_ctx->default_chan);
1275 	seq_printf(m, "W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d\n",
1276 		   sq_ctx->smq_rr_quantum, sq_ctx->sso_ena);
1277 	seq_printf(m, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n\n",
1278 		   sq_ctx->xoff, sq_ctx->cq_ena, sq_ctx->smq);
1279 
1280 	seq_printf(m, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d\n",
1281 		   sq_ctx->sqe_stype, sq_ctx->sq_int_ena);
1282 	seq_printf(m, "W2: sq_int \t\t\t%d\nW2: sqb_aura \t\t\t%d\n",
1283 		   sq_ctx->sq_int, sq_ctx->sqb_aura);
1284 	seq_printf(m, "W2: smq_rr_count \t\t%d\n\n", sq_ctx->smq_rr_count);
1285 
1286 	seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
1287 		   sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
1288 	seq_printf(m, "W3: smenq_next_sqb_vld \t\t%d\nW3: head_offset\t\t\t%d\n",
1289 		   sq_ctx->smenq_next_sqb_vld, sq_ctx->head_offset);
1290 	seq_printf(m, "W3: smenq_offset\t\t%d\nW3: tail_offset\t\t\t%d\n",
1291 		   sq_ctx->smenq_offset, sq_ctx->tail_offset);
1292 	seq_printf(m, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq\t\t\t%d\n",
1293 		   sq_ctx->smq_lso_segnum, sq_ctx->smq_next_sq);
1294 	seq_printf(m, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d\n",
1295 		   sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1296 	seq_printf(m, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n\n",
1297 		   sq_ctx->cq_limit, sq_ctx->max_sqe_size);
1298 
1299 	seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1300 	seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1301 	seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1302 	seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1303 		   sq_ctx->smenq_next_sqb);
1304 
1305 	seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1306 
1307 	seq_printf(m, "W9: vfi_lso_vld\t\t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d\n",
1308 		   sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1309 	seq_printf(m, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d\n",
1310 		   sq_ctx->vfi_lso_vlan0_ins_ena, sq_ctx->vfi_lso_mps);
1311 	seq_printf(m, "W9: vfi_lso_sb\t\t\t%d\nW9: vfi_lso_sizem1\t\t%d\n",
1312 		   sq_ctx->vfi_lso_sb, sq_ctx->vfi_lso_sizem1);
1313 	seq_printf(m, "W9: vfi_lso_total\t\t%d\n\n", sq_ctx->vfi_lso_total);
1314 
1315 	seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1316 		   (u64)sq_ctx->scm_lso_rem);
1317 	seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1318 	seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1319 	seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1320 		   (u64)sq_ctx->dropped_octs);
1321 	seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1322 		   (u64)sq_ctx->dropped_pkts);
1323 }
1324 
1325 static void print_nix_cn10k_rq_ctx(struct seq_file *m,
1326 				   struct nix_cn10k_rq_ctx_s *rq_ctx)
1327 {
1328 	seq_printf(m, "W0: ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
1329 		   rq_ctx->ena, rq_ctx->sso_ena);
1330 	seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
1331 		   rq_ctx->ipsech_ena, rq_ctx->ena_wqwd);
1332 	seq_printf(m, "W0: cq \t\t\t\t%d\nW0: lenerr_dis \t\t\t%d\n",
1333 		   rq_ctx->cq, rq_ctx->lenerr_dis);
1334 	seq_printf(m, "W0: csum_il4_dis \t\t%d\nW0: csum_ol4_dis \t\t%d\n",
1335 		   rq_ctx->csum_il4_dis, rq_ctx->csum_ol4_dis);
1336 	seq_printf(m, "W0: len_il4_dis \t\t%d\nW0: len_il3_dis \t\t%d\n",
1337 		   rq_ctx->len_il4_dis, rq_ctx->len_il3_dis);
1338 	seq_printf(m, "W0: len_ol4_dis \t\t%d\nW0: len_ol3_dis \t\t%d\n",
1339 		   rq_ctx->len_ol4_dis, rq_ctx->len_ol3_dis);
1340 	seq_printf(m, "W0: wqe_aura \t\t\t%d\n\n", rq_ctx->wqe_aura);
1341 
1342 	seq_printf(m, "W1: spb_aura \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
1343 		   rq_ctx->spb_aura, rq_ctx->lpb_aura);
1344 	seq_printf(m, "W1: spb_aura \t\t\t%d\n", rq_ctx->spb_aura);
1345 	seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
1346 		   rq_ctx->sso_grp, rq_ctx->sso_tt);
1347 	seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: wqe_caching \t\t%d\n",
1348 		   rq_ctx->pb_caching, rq_ctx->wqe_caching);
1349 	seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
1350 		   rq_ctx->xqe_drop_ena, rq_ctx->spb_drop_ena);
1351 	seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: pb_stashing \t\t%d\n",
1352 		   rq_ctx->lpb_drop_ena, rq_ctx->pb_stashing);
1353 	seq_printf(m, "W1: ipsecd_drop_ena \t\t%d\nW1: chi_ena \t\t\t%d\n\n",
1354 		   rq_ctx->ipsecd_drop_ena, rq_ctx->chi_ena);
1355 
1356 	seq_printf(m, "W2: band_prof_id \t\t%d\n", rq_ctx->band_prof_id);
1357 	seq_printf(m, "W2: policer_ena \t\t%d\n", rq_ctx->policer_ena);
1358 	seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n", rq_ctx->spb_sizem1);
1359 	seq_printf(m, "W2: wqe_skip \t\t\t%d\nW2: sqb_ena \t\t\t%d\n",
1360 		   rq_ctx->wqe_skip, rq_ctx->spb_ena);
1361 	seq_printf(m, "W2: lpb_size1 \t\t\t%d\nW2: first_skip \t\t\t%d\n",
1362 		   rq_ctx->lpb_sizem1, rq_ctx->first_skip);
1363 	seq_printf(m, "W2: later_skip\t\t\t%d\nW2: xqe_imm_size\t\t%d\n",
1364 		   rq_ctx->later_skip, rq_ctx->xqe_imm_size);
1365 	seq_printf(m, "W2: xqe_imm_copy \t\t%d\nW2: xqe_hdr_split \t\t%d\n\n",
1366 		   rq_ctx->xqe_imm_copy, rq_ctx->xqe_hdr_split);
1367 
1368 	seq_printf(m, "W3: xqe_drop \t\t\t%d\nW3: xqe_pass \t\t\t%d\n",
1369 		   rq_ctx->xqe_drop, rq_ctx->xqe_pass);
1370 	seq_printf(m, "W3: wqe_pool_drop \t\t%d\nW3: wqe_pool_pass \t\t%d\n",
1371 		   rq_ctx->wqe_pool_drop, rq_ctx->wqe_pool_pass);
1372 	seq_printf(m, "W3: spb_pool_drop \t\t%d\nW3: spb_pool_pass \t\t%d\n",
1373 		   rq_ctx->spb_pool_drop, rq_ctx->spb_pool_pass);
1374 	seq_printf(m, "W3: spb_aura_drop \t\t%d\nW3: spb_aura_pass \t\t%d\n\n",
1375 		   rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
1376 
1377 	seq_printf(m, "W4: lpb_aura_drop \t\t%d\nW3: lpb_aura_pass \t\t%d\n",
1378 		   rq_ctx->lpb_aura_pass, rq_ctx->lpb_aura_drop);
1379 	seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW3: lpb_pool_pass \t\t%d\n",
1380 		   rq_ctx->lpb_pool_drop, rq_ctx->lpb_pool_pass);
1381 	seq_printf(m, "W4: rq_int \t\t\t%d\nW4: rq_int_ena\t\t\t%d\n",
1382 		   rq_ctx->rq_int, rq_ctx->rq_int_ena);
1383 	seq_printf(m, "W4: qint_idx \t\t\t%d\n\n", rq_ctx->qint_idx);
1384 
1385 	seq_printf(m, "W5: ltag \t\t\t%d\nW5: good_utag \t\t\t%d\n",
1386 		   rq_ctx->ltag, rq_ctx->good_utag);
1387 	seq_printf(m, "W5: bad_utag \t\t\t%d\nW5: flow_tagw \t\t\t%d\n",
1388 		   rq_ctx->bad_utag, rq_ctx->flow_tagw);
1389 	seq_printf(m, "W5: ipsec_vwqe \t\t\t%d\nW5: vwqe_ena \t\t\t%d\n",
1390 		   rq_ctx->ipsec_vwqe, rq_ctx->vwqe_ena);
1391 	seq_printf(m, "W5: vwqe_wait \t\t\t%d\nW5: max_vsize_exp\t\t%d\n",
1392 		   rq_ctx->vwqe_wait, rq_ctx->max_vsize_exp);
1393 	seq_printf(m, "W5: vwqe_skip \t\t\t%d\n\n", rq_ctx->vwqe_skip);
1394 
1395 	seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
1396 	seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
1397 	seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
1398 	seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
1399 	seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
1400 }
1401 
1402 /* Dumps given nix_rq's context */
1403 static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1404 {
1405 	struct nix_rq_ctx_s *rq_ctx = &rsp->rq;
1406 	struct nix_hw *nix_hw = m->private;
1407 	struct rvu *rvu = nix_hw->rvu;
1408 
1409 	if (!is_rvu_otx2(rvu)) {
1410 		print_nix_cn10k_rq_ctx(m, (struct nix_cn10k_rq_ctx_s *)rq_ctx);
1411 		return;
1412 	}
1413 
1414 	seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
1415 		   rq_ctx->wqe_aura, rq_ctx->substream);
1416 	seq_printf(m, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
1417 		   rq_ctx->cq, rq_ctx->ena_wqwd);
1418 	seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
1419 		   rq_ctx->ipsech_ena, rq_ctx->sso_ena);
1420 	seq_printf(m, "W0: ena \t\t\t%d\n\n", rq_ctx->ena);
1421 
1422 	seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
1423 		   rq_ctx->lpb_drop_ena, rq_ctx->spb_drop_ena);
1424 	seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d\n",
1425 		   rq_ctx->xqe_drop_ena, rq_ctx->wqe_caching);
1426 	seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
1427 		   rq_ctx->pb_caching, rq_ctx->sso_tt);
1428 	seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
1429 		   rq_ctx->sso_grp, rq_ctx->lpb_aura);
1430 	seq_printf(m, "W1: spb_aura \t\t\t%d\n\n", rq_ctx->spb_aura);
1431 
1432 	seq_printf(m, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d\n",
1433 		   rq_ctx->xqe_hdr_split, rq_ctx->xqe_imm_copy);
1434 	seq_printf(m, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d\n",
1435 		   rq_ctx->xqe_imm_size, rq_ctx->later_skip);
1436 	seq_printf(m, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d\n",
1437 		   rq_ctx->first_skip, rq_ctx->lpb_sizem1);
1438 	seq_printf(m, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d\n",
1439 		   rq_ctx->spb_ena, rq_ctx->wqe_skip);
1440 	seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n\n", rq_ctx->spb_sizem1);
1441 
1442 	seq_printf(m, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d\n",
1443 		   rq_ctx->spb_pool_pass, rq_ctx->spb_pool_drop);
1444 	seq_printf(m, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d\n",
1445 		   rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
1446 	seq_printf(m, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d\n",
1447 		   rq_ctx->wqe_pool_pass, rq_ctx->wqe_pool_drop);
1448 	seq_printf(m, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n\n",
1449 		   rq_ctx->xqe_pass, rq_ctx->xqe_drop);
1450 
1451 	seq_printf(m, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d\n",
1452 		   rq_ctx->qint_idx, rq_ctx->rq_int_ena);
1453 	seq_printf(m, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d\n",
1454 		   rq_ctx->rq_int, rq_ctx->lpb_pool_pass);
1455 	seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d\n",
1456 		   rq_ctx->lpb_pool_drop, rq_ctx->lpb_aura_pass);
1457 	seq_printf(m, "W4: lpb_aura_drop \t\t%d\n\n", rq_ctx->lpb_aura_drop);
1458 
1459 	seq_printf(m, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d\n",
1460 		   rq_ctx->flow_tagw, rq_ctx->bad_utag);
1461 	seq_printf(m, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n\n",
1462 		   rq_ctx->good_utag, rq_ctx->ltag);
1463 
1464 	seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
1465 	seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
1466 	seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
1467 	seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
1468 	seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
1469 }
1470 
1471 /* Dumps given nix_cq's context */
1472 static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1473 {
1474 	struct nix_cq_ctx_s *cq_ctx = &rsp->cq;
1475 
1476 	seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base);
1477 
1478 	seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr);
1479 	seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n",
1480 		   cq_ctx->avg_con, cq_ctx->cint_idx);
1481 	seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n",
1482 		   cq_ctx->cq_err, cq_ctx->qint_idx);
1483 	seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n",
1484 		   cq_ctx->bpid, cq_ctx->bp_ena);
1485 
1486 	seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n",
1487 		   cq_ctx->update_time, cq_ctx->avg_level);
1488 	seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n",
1489 		   cq_ctx->head, cq_ctx->tail);
1490 
1491 	seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n",
1492 		   cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int);
1493 	seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n",
1494 		   cq_ctx->qsize, cq_ctx->caching);
1495 	seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n",
1496 		   cq_ctx->substream, cq_ctx->ena);
1497 	seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n",
1498 		   cq_ctx->drop_ena, cq_ctx->drop);
1499 	seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp);
1500 }
1501 
1502 static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
1503 					 void *unused, int ctype)
1504 {
1505 	void (*print_nix_ctx)(struct seq_file *filp,
1506 			      struct nix_aq_enq_rsp *rsp) = NULL;
1507 	struct nix_hw *nix_hw = filp->private;
1508 	struct rvu *rvu = nix_hw->rvu;
1509 	struct nix_aq_enq_req aq_req;
1510 	struct nix_aq_enq_rsp rsp;
1511 	char *ctype_string = NULL;
1512 	int qidx, rc, max_id = 0;
1513 	struct rvu_pfvf *pfvf;
1514 	int nixlf, id, all;
1515 	u16 pcifunc;
1516 
1517 	switch (ctype) {
1518 	case NIX_AQ_CTYPE_CQ:
1519 		nixlf = rvu->rvu_dbg.nix_cq_ctx.lf;
1520 		id = rvu->rvu_dbg.nix_cq_ctx.id;
1521 		all = rvu->rvu_dbg.nix_cq_ctx.all;
1522 		break;
1523 
1524 	case NIX_AQ_CTYPE_SQ:
1525 		nixlf = rvu->rvu_dbg.nix_sq_ctx.lf;
1526 		id = rvu->rvu_dbg.nix_sq_ctx.id;
1527 		all = rvu->rvu_dbg.nix_sq_ctx.all;
1528 		break;
1529 
1530 	case NIX_AQ_CTYPE_RQ:
1531 		nixlf = rvu->rvu_dbg.nix_rq_ctx.lf;
1532 		id = rvu->rvu_dbg.nix_rq_ctx.id;
1533 		all = rvu->rvu_dbg.nix_rq_ctx.all;
1534 		break;
1535 
1536 	default:
1537 		return -EINVAL;
1538 	}
1539 
1540 	if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1541 		return -EINVAL;
1542 
1543 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1544 	if (ctype == NIX_AQ_CTYPE_SQ && !pfvf->sq_ctx) {
1545 		seq_puts(filp, "SQ context is not initialized\n");
1546 		return -EINVAL;
1547 	} else if (ctype == NIX_AQ_CTYPE_RQ && !pfvf->rq_ctx) {
1548 		seq_puts(filp, "RQ context is not initialized\n");
1549 		return -EINVAL;
1550 	} else if (ctype == NIX_AQ_CTYPE_CQ && !pfvf->cq_ctx) {
1551 		seq_puts(filp, "CQ context is not initialized\n");
1552 		return -EINVAL;
1553 	}
1554 
1555 	if (ctype == NIX_AQ_CTYPE_SQ) {
1556 		max_id = pfvf->sq_ctx->qsize;
1557 		ctype_string = "sq";
1558 		print_nix_ctx = print_nix_sq_ctx;
1559 	} else if (ctype == NIX_AQ_CTYPE_RQ) {
1560 		max_id = pfvf->rq_ctx->qsize;
1561 		ctype_string = "rq";
1562 		print_nix_ctx = print_nix_rq_ctx;
1563 	} else if (ctype == NIX_AQ_CTYPE_CQ) {
1564 		max_id = pfvf->cq_ctx->qsize;
1565 		ctype_string = "cq";
1566 		print_nix_ctx = print_nix_cq_ctx;
1567 	}
1568 
1569 	memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1570 	aq_req.hdr.pcifunc = pcifunc;
1571 	aq_req.ctype = ctype;
1572 	aq_req.op = NIX_AQ_INSTOP_READ;
1573 	if (all)
1574 		id = 0;
1575 	else
1576 		max_id = id + 1;
1577 	for (qidx = id; qidx < max_id; qidx++) {
1578 		aq_req.qidx = qidx;
1579 		seq_printf(filp, "=====%s_ctx for nixlf:%d and qidx:%d is=====\n",
1580 			   ctype_string, nixlf, aq_req.qidx);
1581 		rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
1582 		if (rc) {
1583 			seq_puts(filp, "Failed to read the context\n");
1584 			return -EINVAL;
1585 		}
1586 		print_nix_ctx(filp, &rsp);
1587 	}
1588 	return 0;
1589 }
1590 
1591 static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf,
1592 			       int id, int ctype, char *ctype_string,
1593 			       struct seq_file *m)
1594 {
1595 	struct nix_hw *nix_hw = m->private;
1596 	struct rvu_pfvf *pfvf;
1597 	int max_id = 0;
1598 	u16 pcifunc;
1599 
1600 	if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1601 		return -EINVAL;
1602 
1603 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1604 
1605 	if (ctype == NIX_AQ_CTYPE_SQ) {
1606 		if (!pfvf->sq_ctx) {
1607 			dev_warn(rvu->dev, "SQ context is not initialized\n");
1608 			return -EINVAL;
1609 		}
1610 		max_id = pfvf->sq_ctx->qsize;
1611 	} else if (ctype == NIX_AQ_CTYPE_RQ) {
1612 		if (!pfvf->rq_ctx) {
1613 			dev_warn(rvu->dev, "RQ context is not initialized\n");
1614 			return -EINVAL;
1615 		}
1616 		max_id = pfvf->rq_ctx->qsize;
1617 	} else if (ctype == NIX_AQ_CTYPE_CQ) {
1618 		if (!pfvf->cq_ctx) {
1619 			dev_warn(rvu->dev, "CQ context is not initialized\n");
1620 			return -EINVAL;
1621 		}
1622 		max_id = pfvf->cq_ctx->qsize;
1623 	}
1624 
1625 	if (id < 0 || id >= max_id) {
1626 		dev_warn(rvu->dev, "Invalid %s_ctx valid range 0-%d\n",
1627 			 ctype_string, max_id - 1);
1628 		return -EINVAL;
1629 	}
1630 	switch (ctype) {
1631 	case NIX_AQ_CTYPE_CQ:
1632 		rvu->rvu_dbg.nix_cq_ctx.lf = nixlf;
1633 		rvu->rvu_dbg.nix_cq_ctx.id = id;
1634 		rvu->rvu_dbg.nix_cq_ctx.all = all;
1635 		break;
1636 
1637 	case NIX_AQ_CTYPE_SQ:
1638 		rvu->rvu_dbg.nix_sq_ctx.lf = nixlf;
1639 		rvu->rvu_dbg.nix_sq_ctx.id = id;
1640 		rvu->rvu_dbg.nix_sq_ctx.all = all;
1641 		break;
1642 
1643 	case NIX_AQ_CTYPE_RQ:
1644 		rvu->rvu_dbg.nix_rq_ctx.lf = nixlf;
1645 		rvu->rvu_dbg.nix_rq_ctx.id = id;
1646 		rvu->rvu_dbg.nix_rq_ctx.all = all;
1647 		break;
1648 	default:
1649 		return -EINVAL;
1650 	}
1651 	return 0;
1652 }
1653 
1654 static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
1655 					   const char __user *buffer,
1656 					   size_t count, loff_t *ppos,
1657 					   int ctype)
1658 {
1659 	struct seq_file *m = filp->private_data;
1660 	struct nix_hw *nix_hw = m->private;
1661 	struct rvu *rvu = nix_hw->rvu;
1662 	char *cmd_buf, *ctype_string;
1663 	int nixlf, id = 0, ret;
1664 	bool all = false;
1665 
1666 	if ((*ppos != 0) || !count)
1667 		return -EINVAL;
1668 
1669 	switch (ctype) {
1670 	case NIX_AQ_CTYPE_SQ:
1671 		ctype_string = "sq";
1672 		break;
1673 	case NIX_AQ_CTYPE_RQ:
1674 		ctype_string = "rq";
1675 		break;
1676 	case NIX_AQ_CTYPE_CQ:
1677 		ctype_string = "cq";
1678 		break;
1679 	default:
1680 		return -EINVAL;
1681 	}
1682 
1683 	cmd_buf = kzalloc(count + 1, GFP_KERNEL);
1684 
1685 	if (!cmd_buf)
1686 		return count;
1687 
1688 	ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
1689 				   &nixlf, &id, &all);
1690 	if (ret < 0) {
1691 		dev_info(rvu->dev,
1692 			 "Usage: echo <nixlf> [%s number/all] > %s_ctx\n",
1693 			 ctype_string, ctype_string);
1694 		goto done;
1695 	} else {
1696 		ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype,
1697 					  ctype_string, m);
1698 	}
1699 done:
1700 	kfree(cmd_buf);
1701 	return ret ? ret : count;
1702 }
1703 
1704 static ssize_t rvu_dbg_nix_sq_ctx_write(struct file *filp,
1705 					const char __user *buffer,
1706 					size_t count, loff_t *ppos)
1707 {
1708 	return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1709 					    NIX_AQ_CTYPE_SQ);
1710 }
1711 
1712 static int rvu_dbg_nix_sq_ctx_display(struct seq_file *filp, void *unused)
1713 {
1714 	return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_SQ);
1715 }
1716 
1717 RVU_DEBUG_SEQ_FOPS(nix_sq_ctx, nix_sq_ctx_display, nix_sq_ctx_write);
1718 
1719 static ssize_t rvu_dbg_nix_rq_ctx_write(struct file *filp,
1720 					const char __user *buffer,
1721 					size_t count, loff_t *ppos)
1722 {
1723 	return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1724 					    NIX_AQ_CTYPE_RQ);
1725 }
1726 
1727 static int rvu_dbg_nix_rq_ctx_display(struct seq_file *filp, void  *unused)
1728 {
1729 	return rvu_dbg_nix_queue_ctx_display(filp, unused,  NIX_AQ_CTYPE_RQ);
1730 }
1731 
1732 RVU_DEBUG_SEQ_FOPS(nix_rq_ctx, nix_rq_ctx_display, nix_rq_ctx_write);
1733 
1734 static ssize_t rvu_dbg_nix_cq_ctx_write(struct file *filp,
1735 					const char __user *buffer,
1736 					size_t count, loff_t *ppos)
1737 {
1738 	return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1739 					    NIX_AQ_CTYPE_CQ);
1740 }
1741 
1742 static int rvu_dbg_nix_cq_ctx_display(struct seq_file *filp, void *unused)
1743 {
1744 	return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_CQ);
1745 }
1746 
1747 RVU_DEBUG_SEQ_FOPS(nix_cq_ctx, nix_cq_ctx_display, nix_cq_ctx_write);
1748 
1749 static void print_nix_qctx_qsize(struct seq_file *filp, int qsize,
1750 				 unsigned long *bmap, char *qtype)
1751 {
1752 	char *buf;
1753 
1754 	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1755 	if (!buf)
1756 		return;
1757 
1758 	bitmap_print_to_pagebuf(false, buf, bmap, qsize);
1759 	seq_printf(filp, "%s context count : %d\n", qtype, qsize);
1760 	seq_printf(filp, "%s context ena/dis bitmap : %s\n",
1761 		   qtype, buf);
1762 	kfree(buf);
1763 }
1764 
1765 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf)
1766 {
1767 	if (!pfvf->cq_ctx)
1768 		seq_puts(filp, "cq context is not initialized\n");
1769 	else
1770 		print_nix_qctx_qsize(filp, pfvf->cq_ctx->qsize, pfvf->cq_bmap,
1771 				     "cq");
1772 
1773 	if (!pfvf->rq_ctx)
1774 		seq_puts(filp, "rq context is not initialized\n");
1775 	else
1776 		print_nix_qctx_qsize(filp, pfvf->rq_ctx->qsize, pfvf->rq_bmap,
1777 				     "rq");
1778 
1779 	if (!pfvf->sq_ctx)
1780 		seq_puts(filp, "sq context is not initialized\n");
1781 	else
1782 		print_nix_qctx_qsize(filp, pfvf->sq_ctx->qsize, pfvf->sq_bmap,
1783 				     "sq");
1784 }
1785 
1786 static ssize_t rvu_dbg_nix_qsize_write(struct file *filp,
1787 				       const char __user *buffer,
1788 				       size_t count, loff_t *ppos)
1789 {
1790 	return rvu_dbg_qsize_write(filp, buffer, count, ppos,
1791 				   BLKTYPE_NIX);
1792 }
1793 
1794 static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
1795 {
1796 	return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NIX);
1797 }
1798 
1799 RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
1800 
1801 static void print_band_prof_ctx(struct seq_file *m,
1802 				struct nix_bandprof_s *prof)
1803 {
1804 	char *str;
1805 
1806 	switch (prof->pc_mode) {
1807 	case NIX_RX_PC_MODE_VLAN:
1808 		str = "VLAN";
1809 		break;
1810 	case NIX_RX_PC_MODE_DSCP:
1811 		str = "DSCP";
1812 		break;
1813 	case NIX_RX_PC_MODE_GEN:
1814 		str = "Generic";
1815 		break;
1816 	case NIX_RX_PC_MODE_RSVD:
1817 		str = "Reserved";
1818 		break;
1819 	}
1820 	seq_printf(m, "W0: pc_mode\t\t%s\n", str);
1821 	str = (prof->icolor == 3) ? "Color blind" :
1822 		(prof->icolor == 0) ? "Green" :
1823 		(prof->icolor == 1) ? "Yellow" : "Red";
1824 	seq_printf(m, "W0: icolor\t\t%s\n", str);
1825 	seq_printf(m, "W0: tnl_ena\t\t%d\n", prof->tnl_ena);
1826 	seq_printf(m, "W0: peir_exponent\t%d\n", prof->peir_exponent);
1827 	seq_printf(m, "W0: pebs_exponent\t%d\n", prof->pebs_exponent);
1828 	seq_printf(m, "W0: cir_exponent\t%d\n", prof->cir_exponent);
1829 	seq_printf(m, "W0: cbs_exponent\t%d\n", prof->cbs_exponent);
1830 	seq_printf(m, "W0: peir_mantissa\t%d\n", prof->peir_mantissa);
1831 	seq_printf(m, "W0: pebs_mantissa\t%d\n", prof->pebs_mantissa);
1832 	seq_printf(m, "W0: cir_mantissa\t%d\n", prof->cir_mantissa);
1833 
1834 	seq_printf(m, "W1: cbs_mantissa\t%d\n", prof->cbs_mantissa);
1835 	str = (prof->lmode == 0) ? "byte" : "packet";
1836 	seq_printf(m, "W1: lmode\t\t%s\n", str);
1837 	seq_printf(m, "W1: l_select\t\t%d\n", prof->l_sellect);
1838 	seq_printf(m, "W1: rdiv\t\t%d\n", prof->rdiv);
1839 	seq_printf(m, "W1: adjust_exponent\t%d\n", prof->adjust_exponent);
1840 	seq_printf(m, "W1: adjust_mantissa\t%d\n", prof->adjust_mantissa);
1841 	str = (prof->gc_action == 0) ? "PASS" :
1842 		(prof->gc_action == 1) ? "DROP" : "RED";
1843 	seq_printf(m, "W1: gc_action\t\t%s\n", str);
1844 	str = (prof->yc_action == 0) ? "PASS" :
1845 		(prof->yc_action == 1) ? "DROP" : "RED";
1846 	seq_printf(m, "W1: yc_action\t\t%s\n", str);
1847 	str = (prof->rc_action == 0) ? "PASS" :
1848 		(prof->rc_action == 1) ? "DROP" : "RED";
1849 	seq_printf(m, "W1: rc_action\t\t%s\n", str);
1850 	seq_printf(m, "W1: meter_algo\t\t%d\n", prof->meter_algo);
1851 	seq_printf(m, "W1: band_prof_id\t%d\n", prof->band_prof_id);
1852 	seq_printf(m, "W1: hl_en\t\t%d\n", prof->hl_en);
1853 
1854 	seq_printf(m, "W2: ts\t\t\t%lld\n", (u64)prof->ts);
1855 	seq_printf(m, "W3: pe_accum\t\t%d\n", prof->pe_accum);
1856 	seq_printf(m, "W3: c_accum\t\t%d\n", prof->c_accum);
1857 	seq_printf(m, "W4: green_pkt_pass\t%lld\n",
1858 		   (u64)prof->green_pkt_pass);
1859 	seq_printf(m, "W5: yellow_pkt_pass\t%lld\n",
1860 		   (u64)prof->yellow_pkt_pass);
1861 	seq_printf(m, "W6: red_pkt_pass\t%lld\n", (u64)prof->red_pkt_pass);
1862 	seq_printf(m, "W7: green_octs_pass\t%lld\n",
1863 		   (u64)prof->green_octs_pass);
1864 	seq_printf(m, "W8: yellow_octs_pass\t%lld\n",
1865 		   (u64)prof->yellow_octs_pass);
1866 	seq_printf(m, "W9: red_octs_pass\t%lld\n", (u64)prof->red_octs_pass);
1867 	seq_printf(m, "W10: green_pkt_drop\t%lld\n",
1868 		   (u64)prof->green_pkt_drop);
1869 	seq_printf(m, "W11: yellow_pkt_drop\t%lld\n",
1870 		   (u64)prof->yellow_pkt_drop);
1871 	seq_printf(m, "W12: red_pkt_drop\t%lld\n", (u64)prof->red_pkt_drop);
1872 	seq_printf(m, "W13: green_octs_drop\t%lld\n",
1873 		   (u64)prof->green_octs_drop);
1874 	seq_printf(m, "W14: yellow_octs_drop\t%lld\n",
1875 		   (u64)prof->yellow_octs_drop);
1876 	seq_printf(m, "W15: red_octs_drop\t%lld\n", (u64)prof->red_octs_drop);
1877 	seq_puts(m, "==============================\n");
1878 }
1879 
1880 static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused)
1881 {
1882 	struct nix_hw *nix_hw = m->private;
1883 	struct nix_cn10k_aq_enq_req aq_req;
1884 	struct nix_cn10k_aq_enq_rsp aq_rsp;
1885 	struct rvu *rvu = nix_hw->rvu;
1886 	struct nix_ipolicer *ipolicer;
1887 	int layer, prof_idx, idx, rc;
1888 	u16 pcifunc;
1889 	char *str;
1890 
1891 	/* Ingress policers do not exist on all platforms */
1892 	if (!nix_hw->ipolicer)
1893 		return 0;
1894 
1895 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
1896 		if (layer == BAND_PROF_INVAL_LAYER)
1897 			continue;
1898 		str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
1899 			(layer == BAND_PROF_MID_LAYER) ? "Mid" : "Top";
1900 
1901 		seq_printf(m, "\n%s bandwidth profiles\n", str);
1902 		seq_puts(m, "=======================\n");
1903 
1904 		ipolicer = &nix_hw->ipolicer[layer];
1905 
1906 		for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
1907 			if (is_rsrc_free(&ipolicer->band_prof, idx))
1908 				continue;
1909 
1910 			prof_idx = (idx & 0x3FFF) | (layer << 14);
1911 			rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
1912 						 0x00, NIX_AQ_CTYPE_BANDPROF,
1913 						 prof_idx);
1914 			if (rc) {
1915 				dev_err(rvu->dev,
1916 					"%s: Failed to fetch context of %s profile %d, err %d\n",
1917 					__func__, str, idx, rc);
1918 				return 0;
1919 			}
1920 			seq_printf(m, "\n%s bandwidth profile:: %d\n", str, idx);
1921 			pcifunc = ipolicer->pfvf_map[idx];
1922 			if (!(pcifunc & RVU_PFVF_FUNC_MASK))
1923 				seq_printf(m, "Allocated to :: PF %d\n",
1924 					   rvu_get_pf(pcifunc));
1925 			else
1926 				seq_printf(m, "Allocated to :: PF %d VF %d\n",
1927 					   rvu_get_pf(pcifunc),
1928 					   (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
1929 			print_band_prof_ctx(m, &aq_rsp.prof);
1930 		}
1931 	}
1932 	return 0;
1933 }
1934 
1935 RVU_DEBUG_SEQ_FOPS(nix_band_prof_ctx, nix_band_prof_ctx_display, NULL);
1936 
1937 static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused)
1938 {
1939 	struct nix_hw *nix_hw = m->private;
1940 	struct nix_ipolicer *ipolicer;
1941 	int layer;
1942 	char *str;
1943 
1944 	/* Ingress policers do not exist on all platforms */
1945 	if (!nix_hw->ipolicer)
1946 		return 0;
1947 
1948 	seq_puts(m, "\nBandwidth profile resource free count\n");
1949 	seq_puts(m, "=====================================\n");
1950 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
1951 		if (layer == BAND_PROF_INVAL_LAYER)
1952 			continue;
1953 		str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
1954 			(layer == BAND_PROF_MID_LAYER) ? "Mid " : "Top ";
1955 
1956 		ipolicer = &nix_hw->ipolicer[layer];
1957 		seq_printf(m, "%s :: Max: %4d  Free: %4d\n", str,
1958 			   ipolicer->band_prof.max,
1959 			   rvu_rsrc_free_count(&ipolicer->band_prof));
1960 	}
1961 	seq_puts(m, "=====================================\n");
1962 
1963 	return 0;
1964 }
1965 
1966 RVU_DEBUG_SEQ_FOPS(nix_band_prof_rsrc, nix_band_prof_rsrc_display, NULL);
1967 
1968 static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
1969 {
1970 	struct nix_hw *nix_hw;
1971 
1972 	if (!is_block_implemented(rvu->hw, blkaddr))
1973 		return;
1974 
1975 	if (blkaddr == BLKADDR_NIX0) {
1976 		rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root);
1977 		nix_hw = &rvu->hw->nix[0];
1978 	} else {
1979 		rvu->rvu_dbg.nix = debugfs_create_dir("nix1",
1980 						      rvu->rvu_dbg.root);
1981 		nix_hw = &rvu->hw->nix[1];
1982 	}
1983 
1984 	debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1985 			    &rvu_dbg_nix_sq_ctx_fops);
1986 	debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1987 			    &rvu_dbg_nix_rq_ctx_fops);
1988 	debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1989 			    &rvu_dbg_nix_cq_ctx_fops);
1990 	debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
1991 			    &rvu_dbg_nix_ndc_tx_cache_fops);
1992 	debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
1993 			    &rvu_dbg_nix_ndc_rx_cache_fops);
1994 	debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
1995 			    &rvu_dbg_nix_ndc_tx_hits_miss_fops);
1996 	debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
1997 			    &rvu_dbg_nix_ndc_rx_hits_miss_fops);
1998 	debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
1999 			    &rvu_dbg_nix_qsize_fops);
2000 	debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2001 			    &rvu_dbg_nix_band_prof_ctx_fops);
2002 	debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw,
2003 			    &rvu_dbg_nix_band_prof_rsrc_fops);
2004 }
2005 
2006 static void rvu_dbg_npa_init(struct rvu *rvu)
2007 {
2008 	rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root);
2009 
2010 	debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu,
2011 			    &rvu_dbg_npa_qsize_fops);
2012 	debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu,
2013 			    &rvu_dbg_npa_aura_ctx_fops);
2014 	debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
2015 			    &rvu_dbg_npa_pool_ctx_fops);
2016 	debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu,
2017 			    &rvu_dbg_npa_ndc_cache_fops);
2018 	debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa, rvu,
2019 			    &rvu_dbg_npa_ndc_hits_miss_fops);
2020 }
2021 
2022 #define PRINT_CGX_CUML_NIXRX_STATUS(idx, name)				\
2023 	({								\
2024 		u64 cnt;						\
2025 		err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx),	\
2026 					     NIX_STATS_RX, &(cnt));	\
2027 		if (!err)						\
2028 			seq_printf(s, "%s: %llu\n", name, cnt);		\
2029 		cnt;							\
2030 	})
2031 
2032 #define PRINT_CGX_CUML_NIXTX_STATUS(idx, name)			\
2033 	({								\
2034 		u64 cnt;						\
2035 		err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx),	\
2036 					  NIX_STATS_TX, &(cnt));	\
2037 		if (!err)						\
2038 			seq_printf(s, "%s: %llu\n", name, cnt);		\
2039 		cnt;							\
2040 	})
2041 
2042 static int cgx_print_stats(struct seq_file *s, int lmac_id)
2043 {
2044 	struct cgx_link_user_info linfo;
2045 	struct mac_ops *mac_ops;
2046 	void *cgxd = s->private;
2047 	u64 ucast, mcast, bcast;
2048 	int stat = 0, err = 0;
2049 	u64 tx_stat, rx_stat;
2050 	struct rvu *rvu;
2051 
2052 	rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
2053 					     PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
2054 	if (!rvu)
2055 		return -ENODEV;
2056 
2057 	mac_ops = get_mac_ops(cgxd);
2058 	/* There can be no CGX devices at all */
2059 	if (!mac_ops)
2060 		return 0;
2061 
2062 	/* Link status */
2063 	seq_puts(s, "\n=======Link Status======\n\n");
2064 	err = cgx_get_link_info(cgxd, lmac_id, &linfo);
2065 	if (err)
2066 		seq_puts(s, "Failed to read link status\n");
2067 	seq_printf(s, "\nLink is %s %d Mbps\n\n",
2068 		   linfo.link_up ? "UP" : "DOWN", linfo.speed);
2069 
2070 	/* Rx stats */
2071 	seq_printf(s, "\n=======NIX RX_STATS(%s port level)======\n\n",
2072 		   mac_ops->name);
2073 	ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames");
2074 	if (err)
2075 		return err;
2076 	mcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_MCAST, "rx_mcast_frames");
2077 	if (err)
2078 		return err;
2079 	bcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_BCAST, "rx_bcast_frames");
2080 	if (err)
2081 		return err;
2082 	seq_printf(s, "rx_frames: %llu\n", ucast + mcast + bcast);
2083 	PRINT_CGX_CUML_NIXRX_STATUS(RX_OCTS, "rx_bytes");
2084 	if (err)
2085 		return err;
2086 	PRINT_CGX_CUML_NIXRX_STATUS(RX_DROP, "rx_drops");
2087 	if (err)
2088 		return err;
2089 	PRINT_CGX_CUML_NIXRX_STATUS(RX_ERR, "rx_errors");
2090 	if (err)
2091 		return err;
2092 
2093 	/* Tx stats */
2094 	seq_printf(s, "\n=======NIX TX_STATS(%s port level)======\n\n",
2095 		   mac_ops->name);
2096 	ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames");
2097 	if (err)
2098 		return err;
2099 	mcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_MCAST, "tx_mcast_frames");
2100 	if (err)
2101 		return err;
2102 	bcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_BCAST, "tx_bcast_frames");
2103 	if (err)
2104 		return err;
2105 	seq_printf(s, "tx_frames: %llu\n", ucast + mcast + bcast);
2106 	PRINT_CGX_CUML_NIXTX_STATUS(TX_OCTS, "tx_bytes");
2107 	if (err)
2108 		return err;
2109 	PRINT_CGX_CUML_NIXTX_STATUS(TX_DROP, "tx_drops");
2110 	if (err)
2111 		return err;
2112 
2113 	/* Rx stats */
2114 	seq_printf(s, "\n=======%s RX_STATS======\n\n", mac_ops->name);
2115 	while (stat < mac_ops->rx_stats_cnt) {
2116 		err = mac_ops->mac_get_rx_stats(cgxd, lmac_id, stat, &rx_stat);
2117 		if (err)
2118 			return err;
2119 		if (is_rvu_otx2(rvu))
2120 			seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat],
2121 				   rx_stat);
2122 		else
2123 			seq_printf(s, "%s: %llu\n", rpm_rx_stats_fields[stat],
2124 				   rx_stat);
2125 		stat++;
2126 	}
2127 
2128 	/* Tx stats */
2129 	stat = 0;
2130 	seq_printf(s, "\n=======%s TX_STATS======\n\n", mac_ops->name);
2131 	while (stat < mac_ops->tx_stats_cnt) {
2132 		err = mac_ops->mac_get_tx_stats(cgxd, lmac_id, stat, &tx_stat);
2133 		if (err)
2134 			return err;
2135 
2136 		if (is_rvu_otx2(rvu))
2137 			seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat],
2138 				   tx_stat);
2139 		else
2140 			seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat],
2141 				   tx_stat);
2142 		stat++;
2143 	}
2144 
2145 	return err;
2146 }
2147 
2148 static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id)
2149 {
2150 	struct dentry *current_dir;
2151 	char *buf;
2152 
2153 	current_dir = filp->file->f_path.dentry->d_parent;
2154 	buf = strrchr(current_dir->d_name.name, 'c');
2155 	if (!buf)
2156 		return -EINVAL;
2157 
2158 	return kstrtoint(buf + 1, 10, lmac_id);
2159 }
2160 
2161 static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
2162 {
2163 	int lmac_id, err;
2164 
2165 	err = rvu_dbg_derive_lmacid(filp, &lmac_id);
2166 	if (!err)
2167 		return cgx_print_stats(filp, lmac_id);
2168 
2169 	return err;
2170 }
2171 
2172 RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
2173 
2174 static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id)
2175 {
2176 	struct pci_dev *pdev = NULL;
2177 	void *cgxd = s->private;
2178 	char *bcast, *mcast;
2179 	u16 index, domain;
2180 	u8 dmac[ETH_ALEN];
2181 	struct rvu *rvu;
2182 	u64 cfg, mac;
2183 	int pf;
2184 
2185 	rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
2186 					     PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
2187 	if (!rvu)
2188 		return -ENODEV;
2189 
2190 	pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
2191 	domain = 2;
2192 
2193 	pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
2194 	if (!pdev)
2195 		return 0;
2196 
2197 	cfg = cgx_read_dmac_ctrl(cgxd, lmac_id);
2198 	bcast = cfg & CGX_DMAC_BCAST_MODE ? "ACCEPT" : "REJECT";
2199 	mcast = cfg & CGX_DMAC_MCAST_MODE ? "ACCEPT" : "REJECT";
2200 
2201 	seq_puts(s,
2202 		 "PCI dev       RVUPF   BROADCAST  MULTICAST  FILTER-MODE\n");
2203 	seq_printf(s, "%s  PF%d  %9s  %9s",
2204 		   dev_name(&pdev->dev), pf, bcast, mcast);
2205 	if (cfg & CGX_DMAC_CAM_ACCEPT)
2206 		seq_printf(s, "%12s\n\n", "UNICAST");
2207 	else
2208 		seq_printf(s, "%16s\n\n", "PROMISCUOUS");
2209 
2210 	seq_puts(s, "\nDMAC-INDEX  ADDRESS\n");
2211 
2212 	for (index = 0 ; index < 32 ; index++) {
2213 		cfg = cgx_read_dmac_entry(cgxd, index);
2214 		/* Display enabled dmac entries associated with current lmac */
2215 		if (lmac_id == FIELD_GET(CGX_DMAC_CAM_ENTRY_LMACID, cfg) &&
2216 		    FIELD_GET(CGX_DMAC_CAM_ADDR_ENABLE, cfg)) {
2217 			mac = FIELD_GET(CGX_RX_DMAC_ADR_MASK, cfg);
2218 			u64_to_ether_addr(mac, dmac);
2219 			seq_printf(s, "%7d     %pM\n", index, dmac);
2220 		}
2221 	}
2222 
2223 	return 0;
2224 }
2225 
2226 static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused)
2227 {
2228 	int err, lmac_id;
2229 
2230 	err = rvu_dbg_derive_lmacid(filp, &lmac_id);
2231 	if (!err)
2232 		return cgx_print_dmac_flt(filp, lmac_id);
2233 
2234 	return err;
2235 }
2236 
2237 RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL);
2238 
2239 static void rvu_dbg_cgx_init(struct rvu *rvu)
2240 {
2241 	struct mac_ops *mac_ops;
2242 	unsigned long lmac_bmap;
2243 	int i, lmac_id;
2244 	char dname[20];
2245 	void *cgx;
2246 
2247 	if (!cgx_get_cgxcnt_max())
2248 		return;
2249 
2250 	mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
2251 	if (!mac_ops)
2252 		return;
2253 
2254 	rvu->rvu_dbg.cgx_root = debugfs_create_dir(mac_ops->name,
2255 						   rvu->rvu_dbg.root);
2256 
2257 	for (i = 0; i < cgx_get_cgxcnt_max(); i++) {
2258 		cgx = rvu_cgx_pdata(i, rvu);
2259 		if (!cgx)
2260 			continue;
2261 		lmac_bmap = cgx_get_lmac_bmap(cgx);
2262 		/* cgx debugfs dir */
2263 		sprintf(dname, "%s%d", mac_ops->name, i);
2264 		rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
2265 						      rvu->rvu_dbg.cgx_root);
2266 
2267 		for_each_set_bit(lmac_id, &lmac_bmap, MAX_LMAC_PER_CGX) {
2268 			/* lmac debugfs dir */
2269 			sprintf(dname, "lmac%d", lmac_id);
2270 			rvu->rvu_dbg.lmac =
2271 				debugfs_create_dir(dname, rvu->rvu_dbg.cgx);
2272 
2273 			debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac,
2274 					    cgx, &rvu_dbg_cgx_stat_fops);
2275 			debugfs_create_file("mac_filter", 0600,
2276 					    rvu->rvu_dbg.lmac, cgx,
2277 					    &rvu_dbg_cgx_dmac_flt_fops);
2278 		}
2279 	}
2280 }
2281 
2282 /* NPC debugfs APIs */
2283 static void rvu_print_npc_mcam_info(struct seq_file *s,
2284 				    u16 pcifunc, int blkaddr)
2285 {
2286 	struct rvu *rvu = s->private;
2287 	int entry_acnt, entry_ecnt;
2288 	int cntr_acnt, cntr_ecnt;
2289 
2290 	rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr,
2291 					  &entry_acnt, &entry_ecnt);
2292 	rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr,
2293 					    &cntr_acnt, &cntr_ecnt);
2294 	if (!entry_acnt && !cntr_acnt)
2295 		return;
2296 
2297 	if (!(pcifunc & RVU_PFVF_FUNC_MASK))
2298 		seq_printf(s, "\n\t\t Device \t\t: PF%d\n",
2299 			   rvu_get_pf(pcifunc));
2300 	else
2301 		seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n",
2302 			   rvu_get_pf(pcifunc),
2303 			   (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
2304 
2305 	if (entry_acnt) {
2306 		seq_printf(s, "\t\t Entries allocated \t: %d\n", entry_acnt);
2307 		seq_printf(s, "\t\t Entries enabled \t: %d\n", entry_ecnt);
2308 	}
2309 	if (cntr_acnt) {
2310 		seq_printf(s, "\t\t Counters allocated \t: %d\n", cntr_acnt);
2311 		seq_printf(s, "\t\t Counters enabled \t: %d\n", cntr_ecnt);
2312 	}
2313 }
2314 
2315 static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
2316 {
2317 	struct rvu *rvu = filp->private;
2318 	int pf, vf, numvfs, blkaddr;
2319 	struct npc_mcam *mcam;
2320 	u16 pcifunc, counters;
2321 	u64 cfg;
2322 
2323 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2324 	if (blkaddr < 0)
2325 		return -ENODEV;
2326 
2327 	mcam = &rvu->hw->mcam;
2328 	counters = rvu->hw->npc_counters;
2329 
2330 	seq_puts(filp, "\nNPC MCAM info:\n");
2331 	/* MCAM keywidth on receive and transmit sides */
2332 	cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
2333 	cfg = (cfg >> 32) & 0x07;
2334 	seq_printf(filp, "\t\t RX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
2335 		   "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
2336 		   "224bits" : "448bits"));
2337 	cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX));
2338 	cfg = (cfg >> 32) & 0x07;
2339 	seq_printf(filp, "\t\t TX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
2340 		   "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
2341 		   "224bits" : "448bits"));
2342 
2343 	mutex_lock(&mcam->lock);
2344 	/* MCAM entries */
2345 	seq_printf(filp, "\n\t\t MCAM entries \t: %d\n", mcam->total_entries);
2346 	seq_printf(filp, "\t\t Reserved \t: %d\n",
2347 		   mcam->total_entries - mcam->bmap_entries);
2348 	seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt);
2349 
2350 	/* MCAM counters */
2351 	seq_printf(filp, "\n\t\t MCAM counters \t: %d\n", counters);
2352 	seq_printf(filp, "\t\t Reserved \t: %d\n",
2353 		   counters - mcam->counters.max);
2354 	seq_printf(filp, "\t\t Available \t: %d\n",
2355 		   rvu_rsrc_free_count(&mcam->counters));
2356 
2357 	if (mcam->bmap_entries == mcam->bmap_fcnt) {
2358 		mutex_unlock(&mcam->lock);
2359 		return 0;
2360 	}
2361 
2362 	seq_puts(filp, "\n\t\t Current allocation\n");
2363 	seq_puts(filp, "\t\t====================\n");
2364 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2365 		pcifunc = (pf << RVU_PFVF_PF_SHIFT);
2366 		rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
2367 
2368 		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2369 		numvfs = (cfg >> 12) & 0xFF;
2370 		for (vf = 0; vf < numvfs; vf++) {
2371 			pcifunc = (pf << RVU_PFVF_PF_SHIFT) | (vf + 1);
2372 			rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
2373 		}
2374 	}
2375 
2376 	mutex_unlock(&mcam->lock);
2377 	return 0;
2378 }
2379 
2380 RVU_DEBUG_SEQ_FOPS(npc_mcam_info, npc_mcam_info_display, NULL);
2381 
2382 static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
2383 					     void *unused)
2384 {
2385 	struct rvu *rvu = filp->private;
2386 	struct npc_mcam *mcam;
2387 	int blkaddr;
2388 
2389 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2390 	if (blkaddr < 0)
2391 		return -ENODEV;
2392 
2393 	mcam = &rvu->hw->mcam;
2394 
2395 	seq_puts(filp, "\nNPC MCAM RX miss action stats\n");
2396 	seq_printf(filp, "\t\tStat %d: \t%lld\n", mcam->rx_miss_act_cntr,
2397 		   rvu_read64(rvu, blkaddr,
2398 			      NPC_AF_MATCH_STATX(mcam->rx_miss_act_cntr)));
2399 
2400 	return 0;
2401 }
2402 
2403 RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
2404 
2405 static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
2406 					struct rvu_npc_mcam_rule *rule)
2407 {
2408 	u8 bit;
2409 
2410 	for_each_set_bit(bit, (unsigned long *)&rule->features, 64) {
2411 		seq_printf(s, "\t%s  ", npc_get_field_name(bit));
2412 		switch (bit) {
2413 		case NPC_DMAC:
2414 			seq_printf(s, "%pM ", rule->packet.dmac);
2415 			seq_printf(s, "mask %pM\n", rule->mask.dmac);
2416 			break;
2417 		case NPC_SMAC:
2418 			seq_printf(s, "%pM ", rule->packet.smac);
2419 			seq_printf(s, "mask %pM\n", rule->mask.smac);
2420 			break;
2421 		case NPC_ETYPE:
2422 			seq_printf(s, "0x%x ", ntohs(rule->packet.etype));
2423 			seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.etype));
2424 			break;
2425 		case NPC_OUTER_VID:
2426 			seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_tci));
2427 			seq_printf(s, "mask 0x%x\n",
2428 				   ntohs(rule->mask.vlan_tci));
2429 			break;
2430 		case NPC_TOS:
2431 			seq_printf(s, "%d ", rule->packet.tos);
2432 			seq_printf(s, "mask 0x%x\n", rule->mask.tos);
2433 			break;
2434 		case NPC_SIP_IPV4:
2435 			seq_printf(s, "%pI4 ", &rule->packet.ip4src);
2436 			seq_printf(s, "mask %pI4\n", &rule->mask.ip4src);
2437 			break;
2438 		case NPC_DIP_IPV4:
2439 			seq_printf(s, "%pI4 ", &rule->packet.ip4dst);
2440 			seq_printf(s, "mask %pI4\n", &rule->mask.ip4dst);
2441 			break;
2442 		case NPC_SIP_IPV6:
2443 			seq_printf(s, "%pI6 ", rule->packet.ip6src);
2444 			seq_printf(s, "mask %pI6\n", rule->mask.ip6src);
2445 			break;
2446 		case NPC_DIP_IPV6:
2447 			seq_printf(s, "%pI6 ", rule->packet.ip6dst);
2448 			seq_printf(s, "mask %pI6\n", rule->mask.ip6dst);
2449 			break;
2450 		case NPC_SPORT_TCP:
2451 		case NPC_SPORT_UDP:
2452 		case NPC_SPORT_SCTP:
2453 			seq_printf(s, "%d ", ntohs(rule->packet.sport));
2454 			seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.sport));
2455 			break;
2456 		case NPC_DPORT_TCP:
2457 		case NPC_DPORT_UDP:
2458 		case NPC_DPORT_SCTP:
2459 			seq_printf(s, "%d ", ntohs(rule->packet.dport));
2460 			seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
2461 			break;
2462 		default:
2463 			seq_puts(s, "\n");
2464 			break;
2465 		}
2466 	}
2467 }
2468 
2469 static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
2470 					 struct rvu_npc_mcam_rule *rule)
2471 {
2472 	if (is_npc_intf_tx(rule->intf)) {
2473 		switch (rule->tx_action.op) {
2474 		case NIX_TX_ACTIONOP_DROP:
2475 			seq_puts(s, "\taction: Drop\n");
2476 			break;
2477 		case NIX_TX_ACTIONOP_UCAST_DEFAULT:
2478 			seq_puts(s, "\taction: Unicast to default channel\n");
2479 			break;
2480 		case NIX_TX_ACTIONOP_UCAST_CHAN:
2481 			seq_printf(s, "\taction: Unicast to channel %d\n",
2482 				   rule->tx_action.index);
2483 			break;
2484 		case NIX_TX_ACTIONOP_MCAST:
2485 			seq_puts(s, "\taction: Multicast\n");
2486 			break;
2487 		case NIX_TX_ACTIONOP_DROP_VIOL:
2488 			seq_puts(s, "\taction: Lockdown Violation Drop\n");
2489 			break;
2490 		default:
2491 			break;
2492 		}
2493 	} else {
2494 		switch (rule->rx_action.op) {
2495 		case NIX_RX_ACTIONOP_DROP:
2496 			seq_puts(s, "\taction: Drop\n");
2497 			break;
2498 		case NIX_RX_ACTIONOP_UCAST:
2499 			seq_printf(s, "\taction: Direct to queue %d\n",
2500 				   rule->rx_action.index);
2501 			break;
2502 		case NIX_RX_ACTIONOP_RSS:
2503 			seq_puts(s, "\taction: RSS\n");
2504 			break;
2505 		case NIX_RX_ACTIONOP_UCAST_IPSEC:
2506 			seq_puts(s, "\taction: Unicast ipsec\n");
2507 			break;
2508 		case NIX_RX_ACTIONOP_MCAST:
2509 			seq_puts(s, "\taction: Multicast\n");
2510 			break;
2511 		default:
2512 			break;
2513 		}
2514 	}
2515 }
2516 
2517 static const char *rvu_dbg_get_intf_name(int intf)
2518 {
2519 	switch (intf) {
2520 	case NIX_INTFX_RX(0):
2521 		return "NIX0_RX";
2522 	case NIX_INTFX_RX(1):
2523 		return "NIX1_RX";
2524 	case NIX_INTFX_TX(0):
2525 		return "NIX0_TX";
2526 	case NIX_INTFX_TX(1):
2527 		return "NIX1_TX";
2528 	default:
2529 		break;
2530 	}
2531 
2532 	return "unknown";
2533 }
2534 
2535 static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
2536 {
2537 	struct rvu_npc_mcam_rule *iter;
2538 	struct rvu *rvu = s->private;
2539 	struct npc_mcam *mcam;
2540 	int pf, vf = -1;
2541 	bool enabled;
2542 	int blkaddr;
2543 	u16 target;
2544 	u64 hits;
2545 
2546 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2547 	if (blkaddr < 0)
2548 		return 0;
2549 
2550 	mcam = &rvu->hw->mcam;
2551 
2552 	mutex_lock(&mcam->lock);
2553 	list_for_each_entry(iter, &mcam->mcam_rules, list) {
2554 		pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
2555 		seq_printf(s, "\n\tInstalled by: PF%d ", pf);
2556 
2557 		if (iter->owner & RVU_PFVF_FUNC_MASK) {
2558 			vf = (iter->owner & RVU_PFVF_FUNC_MASK) - 1;
2559 			seq_printf(s, "VF%d", vf);
2560 		}
2561 		seq_puts(s, "\n");
2562 
2563 		seq_printf(s, "\tdirection: %s\n", is_npc_intf_rx(iter->intf) ?
2564 						    "RX" : "TX");
2565 		seq_printf(s, "\tinterface: %s\n",
2566 			   rvu_dbg_get_intf_name(iter->intf));
2567 		seq_printf(s, "\tmcam entry: %d\n", iter->entry);
2568 
2569 		rvu_dbg_npc_mcam_show_flows(s, iter);
2570 		if (is_npc_intf_rx(iter->intf)) {
2571 			target = iter->rx_action.pf_func;
2572 			pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
2573 			seq_printf(s, "\tForward to: PF%d ", pf);
2574 
2575 			if (target & RVU_PFVF_FUNC_MASK) {
2576 				vf = (target & RVU_PFVF_FUNC_MASK) - 1;
2577 				seq_printf(s, "VF%d", vf);
2578 			}
2579 			seq_puts(s, "\n");
2580 			seq_printf(s, "\tchannel: 0x%x\n", iter->chan);
2581 			seq_printf(s, "\tchannel_mask: 0x%x\n", iter->chan_mask);
2582 		}
2583 
2584 		rvu_dbg_npc_mcam_show_action(s, iter);
2585 
2586 		enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, iter->entry);
2587 		seq_printf(s, "\tenabled: %s\n", enabled ? "yes" : "no");
2588 
2589 		if (!iter->has_cntr)
2590 			continue;
2591 		seq_printf(s, "\tcounter: %d\n", iter->cntr);
2592 
2593 		hits = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(iter->cntr));
2594 		seq_printf(s, "\thits: %lld\n", hits);
2595 	}
2596 	mutex_unlock(&mcam->lock);
2597 
2598 	return 0;
2599 }
2600 
2601 RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL);
2602 
2603 static void rvu_dbg_npc_init(struct rvu *rvu)
2604 {
2605 	rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root);
2606 
2607 	debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc, rvu,
2608 			    &rvu_dbg_npc_mcam_info_fops);
2609 	debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc, rvu,
2610 			    &rvu_dbg_npc_mcam_rules_fops);
2611 	debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc, rvu,
2612 			    &rvu_dbg_npc_rx_miss_act_fops);
2613 }
2614 
2615 static int cpt_eng_sts_display(struct seq_file *filp, u8 eng_type)
2616 {
2617 	struct cpt_ctx *ctx = filp->private;
2618 	u64 busy_sts = 0, free_sts = 0;
2619 	u32 e_min = 0, e_max = 0, e, i;
2620 	u16 max_ses, max_ies, max_aes;
2621 	struct rvu *rvu = ctx->rvu;
2622 	int blkaddr = ctx->blkaddr;
2623 	u64 reg;
2624 
2625 	reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
2626 	max_ses = reg & 0xffff;
2627 	max_ies = (reg >> 16) & 0xffff;
2628 	max_aes = (reg >> 32) & 0xffff;
2629 
2630 	switch (eng_type) {
2631 	case CPT_AE_TYPE:
2632 		e_min = max_ses + max_ies;
2633 		e_max = max_ses + max_ies + max_aes;
2634 		break;
2635 	case CPT_SE_TYPE:
2636 		e_min = 0;
2637 		e_max = max_ses;
2638 		break;
2639 	case CPT_IE_TYPE:
2640 		e_min = max_ses;
2641 		e_max = max_ses + max_ies;
2642 		break;
2643 	default:
2644 		return -EINVAL;
2645 	}
2646 
2647 	for (e = e_min, i = 0; e < e_max; e++, i++) {
2648 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e));
2649 		if (reg & 0x1)
2650 			busy_sts |= 1ULL << i;
2651 
2652 		if (reg & 0x2)
2653 			free_sts |= 1ULL << i;
2654 	}
2655 	seq_printf(filp, "FREE STS : 0x%016llx\n", free_sts);
2656 	seq_printf(filp, "BUSY STS : 0x%016llx\n", busy_sts);
2657 
2658 	return 0;
2659 }
2660 
2661 static int rvu_dbg_cpt_ae_sts_display(struct seq_file *filp, void *unused)
2662 {
2663 	return cpt_eng_sts_display(filp, CPT_AE_TYPE);
2664 }
2665 
2666 RVU_DEBUG_SEQ_FOPS(cpt_ae_sts, cpt_ae_sts_display, NULL);
2667 
2668 static int rvu_dbg_cpt_se_sts_display(struct seq_file *filp, void *unused)
2669 {
2670 	return cpt_eng_sts_display(filp, CPT_SE_TYPE);
2671 }
2672 
2673 RVU_DEBUG_SEQ_FOPS(cpt_se_sts, cpt_se_sts_display, NULL);
2674 
2675 static int rvu_dbg_cpt_ie_sts_display(struct seq_file *filp, void *unused)
2676 {
2677 	return cpt_eng_sts_display(filp, CPT_IE_TYPE);
2678 }
2679 
2680 RVU_DEBUG_SEQ_FOPS(cpt_ie_sts, cpt_ie_sts_display, NULL);
2681 
2682 static int rvu_dbg_cpt_engines_info_display(struct seq_file *filp, void *unused)
2683 {
2684 	struct cpt_ctx *ctx = filp->private;
2685 	u16 max_ses, max_ies, max_aes;
2686 	struct rvu *rvu = ctx->rvu;
2687 	int blkaddr = ctx->blkaddr;
2688 	u32 e_max, e;
2689 	u64 reg;
2690 
2691 	reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
2692 	max_ses = reg & 0xffff;
2693 	max_ies = (reg >> 16) & 0xffff;
2694 	max_aes = (reg >> 32) & 0xffff;
2695 
2696 	e_max = max_ses + max_ies + max_aes;
2697 
2698 	seq_puts(filp, "===========================================\n");
2699 	for (e = 0; e < e_max; e++) {
2700 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(e));
2701 		seq_printf(filp, "CPT Engine[%u] Group Enable   0x%02llx\n", e,
2702 			   reg & 0xff);
2703 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_ACTIVE(e));
2704 		seq_printf(filp, "CPT Engine[%u] Active Info    0x%llx\n", e,
2705 			   reg);
2706 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(e));
2707 		seq_printf(filp, "CPT Engine[%u] Control        0x%llx\n", e,
2708 			   reg);
2709 		seq_puts(filp, "===========================================\n");
2710 	}
2711 	return 0;
2712 }
2713 
2714 RVU_DEBUG_SEQ_FOPS(cpt_engines_info, cpt_engines_info_display, NULL);
2715 
2716 static int rvu_dbg_cpt_lfs_info_display(struct seq_file *filp, void *unused)
2717 {
2718 	struct cpt_ctx *ctx = filp->private;
2719 	int blkaddr = ctx->blkaddr;
2720 	struct rvu *rvu = ctx->rvu;
2721 	struct rvu_block *block;
2722 	struct rvu_hwinfo *hw;
2723 	u64 reg;
2724 	u32 lf;
2725 
2726 	hw = rvu->hw;
2727 	block = &hw->block[blkaddr];
2728 	if (!block->lf.bmap)
2729 		return -ENODEV;
2730 
2731 	seq_puts(filp, "===========================================\n");
2732 	for (lf = 0; lf < block->lf.max; lf++) {
2733 		reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(lf));
2734 		seq_printf(filp, "CPT Lf[%u] CTL          0x%llx\n", lf, reg);
2735 		reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(lf));
2736 		seq_printf(filp, "CPT Lf[%u] CTL2         0x%llx\n", lf, reg);
2737 		reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_PTR_CTL(lf));
2738 		seq_printf(filp, "CPT Lf[%u] PTR_CTL      0x%llx\n", lf, reg);
2739 		reg = rvu_read64(rvu, blkaddr, block->lfcfg_reg |
2740 				(lf << block->lfshift));
2741 		seq_printf(filp, "CPT Lf[%u] CFG          0x%llx\n", lf, reg);
2742 		seq_puts(filp, "===========================================\n");
2743 	}
2744 	return 0;
2745 }
2746 
2747 RVU_DEBUG_SEQ_FOPS(cpt_lfs_info, cpt_lfs_info_display, NULL);
2748 
2749 static int rvu_dbg_cpt_err_info_display(struct seq_file *filp, void *unused)
2750 {
2751 	struct cpt_ctx *ctx = filp->private;
2752 	struct rvu *rvu = ctx->rvu;
2753 	int blkaddr = ctx->blkaddr;
2754 	u64 reg0, reg1;
2755 
2756 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
2757 	reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
2758 	seq_printf(filp, "CPT_AF_FLTX_INT:       0x%llx 0x%llx\n", reg0, reg1);
2759 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(0));
2760 	reg1 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(1));
2761 	seq_printf(filp, "CPT_AF_PSNX_EXE:       0x%llx 0x%llx\n", reg0, reg1);
2762 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_LF(0));
2763 	seq_printf(filp, "CPT_AF_PSNX_LF:        0x%llx\n", reg0);
2764 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT);
2765 	seq_printf(filp, "CPT_AF_RVU_INT:        0x%llx\n", reg0);
2766 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT);
2767 	seq_printf(filp, "CPT_AF_RAS_INT:        0x%llx\n", reg0);
2768 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO);
2769 	seq_printf(filp, "CPT_AF_EXE_ERR_INFO:   0x%llx\n", reg0);
2770 
2771 	return 0;
2772 }
2773 
2774 RVU_DEBUG_SEQ_FOPS(cpt_err_info, cpt_err_info_display, NULL);
2775 
2776 static int rvu_dbg_cpt_pc_display(struct seq_file *filp, void *unused)
2777 {
2778 	struct cpt_ctx *ctx = filp->private;
2779 	struct rvu *rvu = ctx->rvu;
2780 	int blkaddr = ctx->blkaddr;
2781 	u64 reg;
2782 
2783 	reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC);
2784 	seq_printf(filp, "CPT instruction requests   %llu\n", reg);
2785 	reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC);
2786 	seq_printf(filp, "CPT instruction latency    %llu\n", reg);
2787 	reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC);
2788 	seq_printf(filp, "CPT NCB read requests      %llu\n", reg);
2789 	reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC);
2790 	seq_printf(filp, "CPT NCB read latency       %llu\n", reg);
2791 	reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC);
2792 	seq_printf(filp, "CPT read requests caused by UC fills   %llu\n", reg);
2793 	reg = rvu_read64(rvu, blkaddr, CPT_AF_ACTIVE_CYCLES_PC);
2794 	seq_printf(filp, "CPT active cycles pc       %llu\n", reg);
2795 	reg = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT);
2796 	seq_printf(filp, "CPT clock count pc         %llu\n", reg);
2797 
2798 	return 0;
2799 }
2800 
2801 RVU_DEBUG_SEQ_FOPS(cpt_pc, cpt_pc_display, NULL);
2802 
2803 static void rvu_dbg_cpt_init(struct rvu *rvu, int blkaddr)
2804 {
2805 	struct cpt_ctx *ctx;
2806 
2807 	if (!is_block_implemented(rvu->hw, blkaddr))
2808 		return;
2809 
2810 	if (blkaddr == BLKADDR_CPT0) {
2811 		rvu->rvu_dbg.cpt = debugfs_create_dir("cpt", rvu->rvu_dbg.root);
2812 		ctx = &rvu->rvu_dbg.cpt_ctx[0];
2813 		ctx->blkaddr = BLKADDR_CPT0;
2814 		ctx->rvu = rvu;
2815 	} else {
2816 		rvu->rvu_dbg.cpt = debugfs_create_dir("cpt1",
2817 						      rvu->rvu_dbg.root);
2818 		ctx = &rvu->rvu_dbg.cpt_ctx[1];
2819 		ctx->blkaddr = BLKADDR_CPT1;
2820 		ctx->rvu = rvu;
2821 	}
2822 
2823 	debugfs_create_file("cpt_pc", 0600, rvu->rvu_dbg.cpt, ctx,
2824 			    &rvu_dbg_cpt_pc_fops);
2825 	debugfs_create_file("cpt_ae_sts", 0600, rvu->rvu_dbg.cpt, ctx,
2826 			    &rvu_dbg_cpt_ae_sts_fops);
2827 	debugfs_create_file("cpt_se_sts", 0600, rvu->rvu_dbg.cpt, ctx,
2828 			    &rvu_dbg_cpt_se_sts_fops);
2829 	debugfs_create_file("cpt_ie_sts", 0600, rvu->rvu_dbg.cpt, ctx,
2830 			    &rvu_dbg_cpt_ie_sts_fops);
2831 	debugfs_create_file("cpt_engines_info", 0600, rvu->rvu_dbg.cpt, ctx,
2832 			    &rvu_dbg_cpt_engines_info_fops);
2833 	debugfs_create_file("cpt_lfs_info", 0600, rvu->rvu_dbg.cpt, ctx,
2834 			    &rvu_dbg_cpt_lfs_info_fops);
2835 	debugfs_create_file("cpt_err_info", 0600, rvu->rvu_dbg.cpt, ctx,
2836 			    &rvu_dbg_cpt_err_info_fops);
2837 }
2838 
2839 static const char *rvu_get_dbg_dir_name(struct rvu *rvu)
2840 {
2841 	if (!is_rvu_otx2(rvu))
2842 		return "cn10k";
2843 	else
2844 		return "octeontx2";
2845 }
2846 
2847 void rvu_dbg_init(struct rvu *rvu)
2848 {
2849 	rvu->rvu_dbg.root = debugfs_create_dir(rvu_get_dbg_dir_name(rvu), NULL);
2850 
2851 	debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
2852 			    &rvu_dbg_rsrc_status_fops);
2853 
2854 	if (!is_rvu_otx2(rvu))
2855 		debugfs_create_file("lmtst_map_table", 0444, rvu->rvu_dbg.root,
2856 				    rvu, &rvu_dbg_lmtst_map_table_fops);
2857 
2858 	if (!cgx_get_cgxcnt_max())
2859 		goto create;
2860 
2861 	if (is_rvu_otx2(rvu))
2862 		debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root,
2863 				    rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
2864 	else
2865 		debugfs_create_file("rvu_pf_rpm_map", 0444, rvu->rvu_dbg.root,
2866 				    rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
2867 
2868 create:
2869 	rvu_dbg_npa_init(rvu);
2870 	rvu_dbg_nix_init(rvu, BLKADDR_NIX0);
2871 
2872 	rvu_dbg_nix_init(rvu, BLKADDR_NIX1);
2873 	rvu_dbg_cgx_init(rvu);
2874 	rvu_dbg_npc_init(rvu);
2875 	rvu_dbg_cpt_init(rvu, BLKADDR_CPT0);
2876 	rvu_dbg_cpt_init(rvu, BLKADDR_CPT1);
2877 }
2878 
2879 void rvu_dbg_exit(struct rvu *rvu)
2880 {
2881 	debugfs_remove_recursive(rvu->rvu_dbg.root);
2882 }
2883 
2884 #endif /* CONFIG_DEBUG_FS */
2885