xref: /linux/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c (revision 7354eb7f1558466e92e926802d36e69e42938ea9)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3  *
4  * Copyright (C) 2019 Marvell.
5  *
6  */
7 
8 #ifdef CONFIG_DEBUG_FS
9 
10 #include <linux/fs.h>
11 #include <linux/debugfs.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 
15 #include "rvu_struct.h"
16 #include "rvu_reg.h"
17 #include "rvu.h"
18 #include "cgx.h"
19 #include "lmac_common.h"
20 #include "npc.h"
21 #include "rvu_npc_hash.h"
22 #include "mcs.h"
23 
24 #define DEBUGFS_DIR_NAME "octeontx2"
25 
26 enum {
27 	CGX_STAT0,
28 	CGX_STAT1,
29 	CGX_STAT2,
30 	CGX_STAT3,
31 	CGX_STAT4,
32 	CGX_STAT5,
33 	CGX_STAT6,
34 	CGX_STAT7,
35 	CGX_STAT8,
36 	CGX_STAT9,
37 	CGX_STAT10,
38 	CGX_STAT11,
39 	CGX_STAT12,
40 	CGX_STAT13,
41 	CGX_STAT14,
42 	CGX_STAT15,
43 	CGX_STAT16,
44 	CGX_STAT17,
45 	CGX_STAT18,
46 };
47 
48 /* NIX TX stats */
49 enum nix_stat_lf_tx {
50 	TX_UCAST	= 0x0,
51 	TX_BCAST	= 0x1,
52 	TX_MCAST	= 0x2,
53 	TX_DROP		= 0x3,
54 	TX_OCTS		= 0x4,
55 	TX_STATS_ENUM_LAST,
56 };
57 
58 /* NIX RX stats */
59 enum nix_stat_lf_rx {
60 	RX_OCTS		= 0x0,
61 	RX_UCAST	= 0x1,
62 	RX_BCAST	= 0x2,
63 	RX_MCAST	= 0x3,
64 	RX_DROP		= 0x4,
65 	RX_DROP_OCTS	= 0x5,
66 	RX_FCS		= 0x6,
67 	RX_ERR		= 0x7,
68 	RX_DRP_BCAST	= 0x8,
69 	RX_DRP_MCAST	= 0x9,
70 	RX_DRP_L3BCAST	= 0xa,
71 	RX_DRP_L3MCAST	= 0xb,
72 	RX_STATS_ENUM_LAST,
73 };
74 
75 static char *cgx_rx_stats_fields[] = {
76 	[CGX_STAT0]	= "Received packets",
77 	[CGX_STAT1]	= "Octets of received packets",
78 	[CGX_STAT2]	= "Received PAUSE packets",
79 	[CGX_STAT3]	= "Received PAUSE and control packets",
80 	[CGX_STAT4]	= "Filtered DMAC0 (NIX-bound) packets",
81 	[CGX_STAT5]	= "Filtered DMAC0 (NIX-bound) octets",
82 	[CGX_STAT6]	= "Packets dropped due to RX FIFO full",
83 	[CGX_STAT7]	= "Octets dropped due to RX FIFO full",
84 	[CGX_STAT8]	= "Error packets",
85 	[CGX_STAT9]	= "Filtered DMAC1 (NCSI-bound) packets",
86 	[CGX_STAT10]	= "Filtered DMAC1 (NCSI-bound) octets",
87 	[CGX_STAT11]	= "NCSI-bound packets dropped",
88 	[CGX_STAT12]	= "NCSI-bound octets dropped",
89 };
90 
91 static char *cgx_tx_stats_fields[] = {
92 	[CGX_STAT0]	= "Packets dropped due to excessive collisions",
93 	[CGX_STAT1]	= "Packets dropped due to excessive deferral",
94 	[CGX_STAT2]	= "Multiple collisions before successful transmission",
95 	[CGX_STAT3]	= "Single collisions before successful transmission",
96 	[CGX_STAT4]	= "Total octets sent on the interface",
97 	[CGX_STAT5]	= "Total frames sent on the interface",
98 	[CGX_STAT6]	= "Packets sent with an octet count < 64",
99 	[CGX_STAT7]	= "Packets sent with an octet count == 64",
100 	[CGX_STAT8]	= "Packets sent with an octet count of 65-127",
101 	[CGX_STAT9]	= "Packets sent with an octet count of 128-255",
102 	[CGX_STAT10]	= "Packets sent with an octet count of 256-511",
103 	[CGX_STAT11]	= "Packets sent with an octet count of 512-1023",
104 	[CGX_STAT12]	= "Packets sent with an octet count of 1024-1518",
105 	[CGX_STAT13]	= "Packets sent with an octet count of > 1518",
106 	[CGX_STAT14]	= "Packets sent to a broadcast DMAC",
107 	[CGX_STAT15]	= "Packets sent to the multicast DMAC",
108 	[CGX_STAT16]	= "Transmit underflow and were truncated",
109 	[CGX_STAT17]	= "Control/PAUSE packets sent",
110 };
111 
112 static char *rpm_rx_stats_fields[] = {
113 	"Octets of received packets",
114 	"Octets of received packets with out error",
115 	"Received packets with alignment errors",
116 	"Control/PAUSE packets received",
117 	"Packets received with Frame too long Errors",
118 	"Packets received with a1nrange length Errors",
119 	"Received packets",
120 	"Packets received with FrameCheckSequenceErrors",
121 	"Packets received with VLAN header",
122 	"Error packets",
123 	"Packets received with unicast DMAC",
124 	"Packets received with multicast DMAC",
125 	"Packets received with broadcast DMAC",
126 	"Dropped packets",
127 	"Total frames received on interface",
128 	"Packets received with an octet count < 64",
129 	"Packets received with an octet count == 64",
130 	"Packets received with an octet count of 65-127",
131 	"Packets received with an octet count of 128-255",
132 	"Packets received with an octet count of 256-511",
133 	"Packets received with an octet count of 512-1023",
134 	"Packets received with an octet count of 1024-1518",
135 	"Packets received with an octet count of > 1518",
136 	"Oversized Packets",
137 	"Jabber Packets",
138 	"Fragmented Packets",
139 	"CBFC(class based flow control) pause frames received for class 0",
140 	"CBFC pause frames received for class 1",
141 	"CBFC pause frames received for class 2",
142 	"CBFC pause frames received for class 3",
143 	"CBFC pause frames received for class 4",
144 	"CBFC pause frames received for class 5",
145 	"CBFC pause frames received for class 6",
146 	"CBFC pause frames received for class 7",
147 	"CBFC pause frames received for class 8",
148 	"CBFC pause frames received for class 9",
149 	"CBFC pause frames received for class 10",
150 	"CBFC pause frames received for class 11",
151 	"CBFC pause frames received for class 12",
152 	"CBFC pause frames received for class 13",
153 	"CBFC pause frames received for class 14",
154 	"CBFC pause frames received for class 15",
155 	"MAC control packets received",
156 };
157 
158 static char *rpm_tx_stats_fields[] = {
159 	"Total octets sent on the interface",
160 	"Total octets transmitted OK",
161 	"Control/Pause frames sent",
162 	"Total frames transmitted OK",
163 	"Total frames sent with VLAN header",
164 	"Error Packets",
165 	"Packets sent to unicast DMAC",
166 	"Packets sent to the multicast DMAC",
167 	"Packets sent to a broadcast DMAC",
168 	"Packets sent with an octet count == 64",
169 	"Packets sent with an octet count of 65-127",
170 	"Packets sent with an octet count of 128-255",
171 	"Packets sent with an octet count of 256-511",
172 	"Packets sent with an octet count of 512-1023",
173 	"Packets sent with an octet count of 1024-1518",
174 	"Packets sent with an octet count of > 1518",
175 	"CBFC(class based flow control) pause frames transmitted for class 0",
176 	"CBFC pause frames transmitted for class 1",
177 	"CBFC pause frames transmitted for class 2",
178 	"CBFC pause frames transmitted for class 3",
179 	"CBFC pause frames transmitted for class 4",
180 	"CBFC pause frames transmitted for class 5",
181 	"CBFC pause frames transmitted for class 6",
182 	"CBFC pause frames transmitted for class 7",
183 	"CBFC pause frames transmitted for class 8",
184 	"CBFC pause frames transmitted for class 9",
185 	"CBFC pause frames transmitted for class 10",
186 	"CBFC pause frames transmitted for class 11",
187 	"CBFC pause frames transmitted for class 12",
188 	"CBFC pause frames transmitted for class 13",
189 	"CBFC pause frames transmitted for class 14",
190 	"CBFC pause frames transmitted for class 15",
191 	"MAC control packets sent",
192 	"Total frames sent on the interface"
193 };
194 
195 enum cpt_eng_type {
196 	CPT_AE_TYPE = 1,
197 	CPT_SE_TYPE = 2,
198 	CPT_IE_TYPE = 3,
199 };
200 
201 #define rvu_dbg_NULL NULL
202 #define rvu_dbg_open_NULL NULL
203 
204 #define RVU_DEBUG_SEQ_FOPS(name, read_op, write_op)	\
205 static int rvu_dbg_open_##name(struct inode *inode, struct file *file) \
206 { \
207 	return single_open(file, rvu_dbg_##read_op, inode->i_private); \
208 } \
209 static const struct file_operations rvu_dbg_##name##_fops = { \
210 	.owner		= THIS_MODULE, \
211 	.open		= rvu_dbg_open_##name, \
212 	.read		= seq_read, \
213 	.write		= rvu_dbg_##write_op, \
214 	.llseek		= seq_lseek, \
215 	.release	= single_release, \
216 }
217 
218 #define RVU_DEBUG_FOPS(name, read_op, write_op) \
219 static const struct file_operations rvu_dbg_##name##_fops = { \
220 	.owner = THIS_MODULE, \
221 	.open = simple_open, \
222 	.read = rvu_dbg_##read_op, \
223 	.write = rvu_dbg_##write_op \
224 }
225 
226 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
227 
228 static int rvu_dbg_mcs_port_stats_display(struct seq_file *filp, void *unused, int dir)
229 {
230 	struct mcs *mcs = filp->private;
231 	struct mcs_port_stats stats;
232 	int lmac;
233 
234 	seq_puts(filp, "\n port stats\n");
235 	mutex_lock(&mcs->stats_lock);
236 	for_each_set_bit(lmac, &mcs->hw->lmac_bmap, mcs->hw->lmac_cnt) {
237 		mcs_get_port_stats(mcs, &stats, lmac, dir);
238 		seq_printf(filp, "port%d: Tcam Miss: %lld\n", lmac, stats.tcam_miss_cnt);
239 		seq_printf(filp, "port%d: Parser errors: %lld\n", lmac, stats.parser_err_cnt);
240 
241 		if (dir == MCS_RX && mcs->hw->mcs_blks > 1)
242 			seq_printf(filp, "port%d: Preempt error: %lld\n", lmac,
243 				   stats.preempt_err_cnt);
244 		if (dir == MCS_TX)
245 			seq_printf(filp, "port%d: Sectag insert error: %lld\n", lmac,
246 				   stats.sectag_insert_err_cnt);
247 	}
248 	mutex_unlock(&mcs->stats_lock);
249 	return 0;
250 }
251 
252 static int rvu_dbg_mcs_rx_port_stats_display(struct seq_file *filp, void *unused)
253 {
254 	return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_RX);
255 }
256 
257 RVU_DEBUG_SEQ_FOPS(mcs_rx_port_stats, mcs_rx_port_stats_display, NULL);
258 
259 static int rvu_dbg_mcs_tx_port_stats_display(struct seq_file *filp, void *unused)
260 {
261 	return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_TX);
262 }
263 
264 RVU_DEBUG_SEQ_FOPS(mcs_tx_port_stats, mcs_tx_port_stats_display, NULL);
265 
266 static int rvu_dbg_mcs_sa_stats_display(struct seq_file *filp, void *unused, int dir)
267 {
268 	struct mcs *mcs = filp->private;
269 	struct mcs_sa_stats stats;
270 	struct rsrc_bmap *map;
271 	int sa_id;
272 
273 	if (dir == MCS_TX) {
274 		map = &mcs->tx.sa;
275 		mutex_lock(&mcs->stats_lock);
276 		for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) {
277 			seq_puts(filp, "\n TX SA stats\n");
278 			mcs_get_sa_stats(mcs, &stats, sa_id, MCS_TX);
279 			seq_printf(filp, "sa%d: Pkts encrypted: %lld\n", sa_id,
280 				   stats.pkt_encrypt_cnt);
281 
282 			seq_printf(filp, "sa%d: Pkts protected: %lld\n", sa_id,
283 				   stats.pkt_protected_cnt);
284 		}
285 		mutex_unlock(&mcs->stats_lock);
286 		return 0;
287 	}
288 
289 	/* RX stats */
290 	map = &mcs->rx.sa;
291 	mutex_lock(&mcs->stats_lock);
292 	for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) {
293 		seq_puts(filp, "\n RX SA stats\n");
294 		mcs_get_sa_stats(mcs, &stats, sa_id, MCS_RX);
295 		seq_printf(filp, "sa%d: Invalid pkts: %lld\n", sa_id, stats.pkt_invalid_cnt);
296 		seq_printf(filp, "sa%d: Pkts no sa error: %lld\n", sa_id, stats.pkt_nosaerror_cnt);
297 		seq_printf(filp, "sa%d: Pkts not valid: %lld\n", sa_id, stats.pkt_notvalid_cnt);
298 		seq_printf(filp, "sa%d: Pkts ok: %lld\n", sa_id, stats.pkt_ok_cnt);
299 		seq_printf(filp, "sa%d: Pkts no sa: %lld\n", sa_id, stats.pkt_nosa_cnt);
300 	}
301 	mutex_unlock(&mcs->stats_lock);
302 	return 0;
303 }
304 
305 static int rvu_dbg_mcs_rx_sa_stats_display(struct seq_file *filp, void *unused)
306 {
307 	return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_RX);
308 }
309 
310 RVU_DEBUG_SEQ_FOPS(mcs_rx_sa_stats, mcs_rx_sa_stats_display, NULL);
311 
312 static int rvu_dbg_mcs_tx_sa_stats_display(struct seq_file *filp, void *unused)
313 {
314 	return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_TX);
315 }
316 
317 RVU_DEBUG_SEQ_FOPS(mcs_tx_sa_stats, mcs_tx_sa_stats_display, NULL);
318 
319 static int rvu_dbg_mcs_tx_sc_stats_display(struct seq_file *filp, void *unused)
320 {
321 	struct mcs *mcs = filp->private;
322 	struct mcs_sc_stats stats;
323 	struct rsrc_bmap *map;
324 	int sc_id;
325 
326 	map = &mcs->tx.sc;
327 	seq_puts(filp, "\n SC stats\n");
328 
329 	mutex_lock(&mcs->stats_lock);
330 	for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) {
331 		mcs_get_sc_stats(mcs, &stats, sc_id, MCS_TX);
332 		seq_printf(filp, "\n=======sc%d======\n\n", sc_id);
333 		seq_printf(filp, "sc%d: Pkts encrypted: %lld\n", sc_id, stats.pkt_encrypt_cnt);
334 		seq_printf(filp, "sc%d: Pkts protected: %lld\n", sc_id, stats.pkt_protected_cnt);
335 
336 		if (mcs->hw->mcs_blks == 1) {
337 			seq_printf(filp, "sc%d: Octets encrypted: %lld\n", sc_id,
338 				   stats.octet_encrypt_cnt);
339 			seq_printf(filp, "sc%d: Octets protected: %lld\n", sc_id,
340 				   stats.octet_protected_cnt);
341 		}
342 	}
343 	mutex_unlock(&mcs->stats_lock);
344 	return 0;
345 }
346 
347 RVU_DEBUG_SEQ_FOPS(mcs_tx_sc_stats, mcs_tx_sc_stats_display, NULL);
348 
349 static int rvu_dbg_mcs_rx_sc_stats_display(struct seq_file *filp, void *unused)
350 {
351 	struct mcs *mcs = filp->private;
352 	struct mcs_sc_stats stats;
353 	struct rsrc_bmap *map;
354 	int sc_id;
355 
356 	map = &mcs->rx.sc;
357 	seq_puts(filp, "\n SC stats\n");
358 
359 	mutex_lock(&mcs->stats_lock);
360 	for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) {
361 		mcs_get_sc_stats(mcs, &stats, sc_id, MCS_RX);
362 		seq_printf(filp, "\n=======sc%d======\n\n", sc_id);
363 		seq_printf(filp, "sc%d: Cam hits: %lld\n", sc_id, stats.hit_cnt);
364 		seq_printf(filp, "sc%d: Invalid pkts: %lld\n", sc_id, stats.pkt_invalid_cnt);
365 		seq_printf(filp, "sc%d: Late pkts: %lld\n", sc_id, stats.pkt_late_cnt);
366 		seq_printf(filp, "sc%d: Notvalid pkts: %lld\n", sc_id, stats.pkt_notvalid_cnt);
367 		seq_printf(filp, "sc%d: Unchecked pkts: %lld\n", sc_id, stats.pkt_unchecked_cnt);
368 
369 		if (mcs->hw->mcs_blks > 1) {
370 			seq_printf(filp, "sc%d: Delay pkts: %lld\n", sc_id, stats.pkt_delay_cnt);
371 			seq_printf(filp, "sc%d: Pkts ok: %lld\n", sc_id, stats.pkt_ok_cnt);
372 		}
373 		if (mcs->hw->mcs_blks == 1) {
374 			seq_printf(filp, "sc%d: Octets decrypted: %lld\n", sc_id,
375 				   stats.octet_decrypt_cnt);
376 			seq_printf(filp, "sc%d: Octets validated: %lld\n", sc_id,
377 				   stats.octet_validate_cnt);
378 		}
379 	}
380 	mutex_unlock(&mcs->stats_lock);
381 	return 0;
382 }
383 
384 RVU_DEBUG_SEQ_FOPS(mcs_rx_sc_stats, mcs_rx_sc_stats_display, NULL);
385 
386 static int rvu_dbg_mcs_flowid_stats_display(struct seq_file *filp, void *unused, int dir)
387 {
388 	struct mcs *mcs = filp->private;
389 	struct mcs_flowid_stats stats;
390 	struct rsrc_bmap *map;
391 	int flow_id;
392 
393 	seq_puts(filp, "\n Flowid stats\n");
394 
395 	if (dir == MCS_RX)
396 		map = &mcs->rx.flow_ids;
397 	else
398 		map = &mcs->tx.flow_ids;
399 
400 	mutex_lock(&mcs->stats_lock);
401 	for_each_set_bit(flow_id, map->bmap, mcs->hw->tcam_entries) {
402 		mcs_get_flowid_stats(mcs, &stats, flow_id, dir);
403 		seq_printf(filp, "Flowid%d: Hit:%lld\n", flow_id, stats.tcam_hit_cnt);
404 	}
405 	mutex_unlock(&mcs->stats_lock);
406 	return 0;
407 }
408 
409 static int rvu_dbg_mcs_tx_flowid_stats_display(struct seq_file *filp, void *unused)
410 {
411 	return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_TX);
412 }
413 
414 RVU_DEBUG_SEQ_FOPS(mcs_tx_flowid_stats, mcs_tx_flowid_stats_display, NULL);
415 
416 static int rvu_dbg_mcs_rx_flowid_stats_display(struct seq_file *filp, void *unused)
417 {
418 	return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_RX);
419 }
420 
421 RVU_DEBUG_SEQ_FOPS(mcs_rx_flowid_stats, mcs_rx_flowid_stats_display, NULL);
422 
423 static int rvu_dbg_mcs_tx_secy_stats_display(struct seq_file *filp, void *unused)
424 {
425 	struct mcs *mcs = filp->private;
426 	struct mcs_secy_stats stats;
427 	struct rsrc_bmap *map;
428 	int secy_id;
429 
430 	map = &mcs->tx.secy;
431 	seq_puts(filp, "\n MCS TX secy stats\n");
432 
433 	mutex_lock(&mcs->stats_lock);
434 	for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) {
435 		mcs_get_tx_secy_stats(mcs, &stats, secy_id);
436 		seq_printf(filp, "\n=======Secy%d======\n\n", secy_id);
437 		seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id,
438 			   stats.ctl_pkt_bcast_cnt);
439 		seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id,
440 			   stats.ctl_pkt_mcast_cnt);
441 		seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id,
442 			   stats.ctl_pkt_ucast_cnt);
443 		seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt);
444 		seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id,
445 			   stats.unctl_pkt_bcast_cnt);
446 		seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id,
447 			   stats.unctl_pkt_mcast_cnt);
448 		seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id,
449 			   stats.unctl_pkt_ucast_cnt);
450 		seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt);
451 		seq_printf(filp, "secy%d: Octet encrypted: %lld\n", secy_id,
452 			   stats.octet_encrypted_cnt);
453 		seq_printf(filp, "secy%d: octet protected: %lld\n", secy_id,
454 			   stats.octet_protected_cnt);
455 		seq_printf(filp, "secy%d: Pkts on active sa: %lld\n", secy_id,
456 			   stats.pkt_noactivesa_cnt);
457 		seq_printf(filp, "secy%d: Pkts too long: %lld\n", secy_id, stats.pkt_toolong_cnt);
458 		seq_printf(filp, "secy%d: Pkts untagged: %lld\n", secy_id, stats.pkt_untagged_cnt);
459 	}
460 	mutex_unlock(&mcs->stats_lock);
461 	return 0;
462 }
463 
464 RVU_DEBUG_SEQ_FOPS(mcs_tx_secy_stats, mcs_tx_secy_stats_display, NULL);
465 
466 static int rvu_dbg_mcs_rx_secy_stats_display(struct seq_file *filp, void *unused)
467 {
468 	struct mcs *mcs = filp->private;
469 	struct mcs_secy_stats stats;
470 	struct rsrc_bmap *map;
471 	int secy_id;
472 
473 	map = &mcs->rx.secy;
474 	seq_puts(filp, "\n MCS secy stats\n");
475 
476 	mutex_lock(&mcs->stats_lock);
477 	for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) {
478 		mcs_get_rx_secy_stats(mcs, &stats, secy_id);
479 		seq_printf(filp, "\n=======Secy%d======\n\n", secy_id);
480 		seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id,
481 			   stats.ctl_pkt_bcast_cnt);
482 		seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id,
483 			   stats.ctl_pkt_mcast_cnt);
484 		seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id,
485 			   stats.ctl_pkt_ucast_cnt);
486 		seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt);
487 		seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id,
488 			   stats.unctl_pkt_bcast_cnt);
489 		seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id,
490 			   stats.unctl_pkt_mcast_cnt);
491 		seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id,
492 			   stats.unctl_pkt_ucast_cnt);
493 		seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt);
494 		seq_printf(filp, "secy%d: Octet decrypted: %lld\n", secy_id,
495 			   stats.octet_decrypted_cnt);
496 		seq_printf(filp, "secy%d: octet validated: %lld\n", secy_id,
497 			   stats.octet_validated_cnt);
498 		seq_printf(filp, "secy%d: Pkts on disable port: %lld\n", secy_id,
499 			   stats.pkt_port_disabled_cnt);
500 		seq_printf(filp, "secy%d: Pkts with badtag: %lld\n", secy_id, stats.pkt_badtag_cnt);
501 		seq_printf(filp, "secy%d: Pkts with no SA(sectag.tci.c=0): %lld\n", secy_id,
502 			   stats.pkt_nosa_cnt);
503 		seq_printf(filp, "secy%d: Pkts with nosaerror: %lld\n", secy_id,
504 			   stats.pkt_nosaerror_cnt);
505 		seq_printf(filp, "secy%d: Tagged ctrl pkts: %lld\n", secy_id,
506 			   stats.pkt_tagged_ctl_cnt);
507 		seq_printf(filp, "secy%d: Untaged pkts: %lld\n", secy_id, stats.pkt_untaged_cnt);
508 		seq_printf(filp, "secy%d: Ctrl pkts: %lld\n", secy_id, stats.pkt_ctl_cnt);
509 		if (mcs->hw->mcs_blks > 1)
510 			seq_printf(filp, "secy%d: pkts notag: %lld\n", secy_id,
511 				   stats.pkt_notag_cnt);
512 	}
513 	mutex_unlock(&mcs->stats_lock);
514 	return 0;
515 }
516 
517 RVU_DEBUG_SEQ_FOPS(mcs_rx_secy_stats, mcs_rx_secy_stats_display, NULL);
518 
519 static void rvu_dbg_mcs_init(struct rvu *rvu)
520 {
521 	struct mcs *mcs;
522 	char dname[10];
523 	int i;
524 
525 	if (!rvu->mcs_blk_cnt)
526 		return;
527 
528 	rvu->rvu_dbg.mcs_root = debugfs_create_dir("mcs", rvu->rvu_dbg.root);
529 
530 	for (i = 0; i < rvu->mcs_blk_cnt; i++) {
531 		mcs = mcs_get_pdata(i);
532 
533 		sprintf(dname, "mcs%d", i);
534 		rvu->rvu_dbg.mcs = debugfs_create_dir(dname,
535 						      rvu->rvu_dbg.mcs_root);
536 
537 		rvu->rvu_dbg.mcs_rx = debugfs_create_dir("rx_stats", rvu->rvu_dbg.mcs);
538 
539 		debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_rx, mcs,
540 				    &rvu_dbg_mcs_rx_flowid_stats_fops);
541 
542 		debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_rx, mcs,
543 				    &rvu_dbg_mcs_rx_secy_stats_fops);
544 
545 		debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_rx, mcs,
546 				    &rvu_dbg_mcs_rx_sc_stats_fops);
547 
548 		debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_rx, mcs,
549 				    &rvu_dbg_mcs_rx_sa_stats_fops);
550 
551 		debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_rx, mcs,
552 				    &rvu_dbg_mcs_rx_port_stats_fops);
553 
554 		rvu->rvu_dbg.mcs_tx = debugfs_create_dir("tx_stats", rvu->rvu_dbg.mcs);
555 
556 		debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_tx, mcs,
557 				    &rvu_dbg_mcs_tx_flowid_stats_fops);
558 
559 		debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_tx, mcs,
560 				    &rvu_dbg_mcs_tx_secy_stats_fops);
561 
562 		debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_tx, mcs,
563 				    &rvu_dbg_mcs_tx_sc_stats_fops);
564 
565 		debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_tx, mcs,
566 				    &rvu_dbg_mcs_tx_sa_stats_fops);
567 
568 		debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_tx, mcs,
569 				    &rvu_dbg_mcs_tx_port_stats_fops);
570 	}
571 }
572 
573 #define LMT_MAPTBL_ENTRY_SIZE 16
574 /* Dump LMTST map table */
575 static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
576 					       char __user *buffer,
577 					       size_t count, loff_t *ppos)
578 {
579 	struct rvu *rvu = filp->private_data;
580 	u64 lmt_addr, val, tbl_base;
581 	int pf, vf, num_vfs, hw_vfs;
582 	void __iomem *lmt_map_base;
583 	int buf_size = 10240;
584 	size_t off = 0;
585 	int index = 0;
586 	char *buf;
587 	int ret;
588 
589 	/* don't allow partial reads */
590 	if (*ppos != 0)
591 		return 0;
592 
593 	buf = kzalloc(buf_size, GFP_KERNEL);
594 	if (!buf)
595 		return -ENOMEM;
596 
597 	tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
598 
599 	lmt_map_base = ioremap_wc(tbl_base, 128 * 1024);
600 	if (!lmt_map_base) {
601 		dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
602 		kfree(buf);
603 		return false;
604 	}
605 
606 	off +=	scnprintf(&buf[off], buf_size - 1 - off,
607 			  "\n\t\t\t\t\tLmtst Map Table Entries");
608 	off +=	scnprintf(&buf[off], buf_size - 1 - off,
609 			  "\n\t\t\t\t\t=======================");
610 	off +=	scnprintf(&buf[off], buf_size - 1 - off, "\nPcifunc\t\t\t");
611 	off +=	scnprintf(&buf[off], buf_size - 1 - off, "Table Index\t\t");
612 	off +=	scnprintf(&buf[off], buf_size - 1 - off,
613 			  "Lmtline Base (word 0)\t\t");
614 	off +=	scnprintf(&buf[off], buf_size - 1 - off,
615 			  "Lmt Map Entry (word 1)");
616 	off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
617 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
618 		off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d  \t\t\t",
619 				    pf);
620 
621 		index = pf * rvu->hw->total_vfs * LMT_MAPTBL_ENTRY_SIZE;
622 		off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t",
623 				 (tbl_base + index));
624 		lmt_addr = readq(lmt_map_base + index);
625 		off += scnprintf(&buf[off], buf_size - 1 - off,
626 				 " 0x%016llx\t\t", lmt_addr);
627 		index += 8;
628 		val = readq(lmt_map_base + index);
629 		off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%016llx\n",
630 				 val);
631 		/* Reading num of VFs per PF */
632 		rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs);
633 		for (vf = 0; vf < num_vfs; vf++) {
634 			index = (pf * rvu->hw->total_vfs * 16) +
635 				((vf + 1)  * LMT_MAPTBL_ENTRY_SIZE);
636 			off += scnprintf(&buf[off], buf_size - 1 - off,
637 					    "PF%d:VF%d  \t\t", pf, vf);
638 			off += scnprintf(&buf[off], buf_size - 1 - off,
639 					 " 0x%llx\t\t", (tbl_base + index));
640 			lmt_addr = readq(lmt_map_base + index);
641 			off += scnprintf(&buf[off], buf_size - 1 - off,
642 					 " 0x%016llx\t\t", lmt_addr);
643 			index += 8;
644 			val = readq(lmt_map_base + index);
645 			off += scnprintf(&buf[off], buf_size - 1 - off,
646 					 " 0x%016llx\n", val);
647 		}
648 	}
649 	off +=	scnprintf(&buf[off], buf_size - 1 - off, "\n");
650 
651 	ret = min(off, count);
652 	if (copy_to_user(buffer, buf, ret))
653 		ret = -EFAULT;
654 	kfree(buf);
655 
656 	iounmap(lmt_map_base);
657 	if (ret < 0)
658 		return ret;
659 
660 	*ppos = ret;
661 	return ret;
662 }
663 
664 RVU_DEBUG_FOPS(lmtst_map_table, lmtst_map_table_display, NULL);
665 
666 static void get_lf_str_list(struct rvu_block block, int pcifunc,
667 			    char *lfs)
668 {
669 	int lf = 0, seq = 0, len = 0, prev_lf = block.lf.max;
670 
671 	for_each_set_bit(lf, block.lf.bmap, block.lf.max) {
672 		if (lf >= block.lf.max)
673 			break;
674 
675 		if (block.fn_map[lf] != pcifunc)
676 			continue;
677 
678 		if (lf == prev_lf + 1) {
679 			prev_lf = lf;
680 			seq = 1;
681 			continue;
682 		}
683 
684 		if (seq)
685 			len += sprintf(lfs + len, "-%d,%d", prev_lf, lf);
686 		else
687 			len += (len ? sprintf(lfs + len, ",%d", lf) :
688 				      sprintf(lfs + len, "%d", lf));
689 
690 		prev_lf = lf;
691 		seq = 0;
692 	}
693 
694 	if (seq)
695 		len += sprintf(lfs + len, "-%d", prev_lf);
696 
697 	lfs[len] = '\0';
698 }
699 
700 static int get_max_column_width(struct rvu *rvu)
701 {
702 	int index, pf, vf, lf_str_size = 12, buf_size = 256;
703 	struct rvu_block block;
704 	u16 pcifunc;
705 	char *buf;
706 
707 	buf = kzalloc(buf_size, GFP_KERNEL);
708 	if (!buf)
709 		return -ENOMEM;
710 
711 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
712 		for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
713 			pcifunc = pf << 10 | vf;
714 			if (!pcifunc)
715 				continue;
716 
717 			for (index = 0; index < BLK_COUNT; index++) {
718 				block = rvu->hw->block[index];
719 				if (!strlen(block.name))
720 					continue;
721 
722 				get_lf_str_list(block, pcifunc, buf);
723 				if (lf_str_size <= strlen(buf))
724 					lf_str_size = strlen(buf) + 1;
725 			}
726 		}
727 	}
728 
729 	kfree(buf);
730 	return lf_str_size;
731 }
732 
733 /* Dumps current provisioning status of all RVU block LFs */
734 static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
735 					  char __user *buffer,
736 					  size_t count, loff_t *ppos)
737 {
738 	int index, off = 0, flag = 0, len = 0, i = 0;
739 	struct rvu *rvu = filp->private_data;
740 	int bytes_not_copied = 0;
741 	struct rvu_block block;
742 	int pf, vf, pcifunc;
743 	int buf_size = 2048;
744 	int lf_str_size;
745 	char *lfs;
746 	char *buf;
747 
748 	/* don't allow partial reads */
749 	if (*ppos != 0)
750 		return 0;
751 
752 	buf = kzalloc(buf_size, GFP_KERNEL);
753 	if (!buf)
754 		return -ENOMEM;
755 
756 	/* Get the maximum width of a column */
757 	lf_str_size = get_max_column_width(rvu);
758 
759 	lfs = kzalloc(lf_str_size, GFP_KERNEL);
760 	if (!lfs) {
761 		kfree(buf);
762 		return -ENOMEM;
763 	}
764 	off +=	scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size,
765 			  "pcifunc");
766 	for (index = 0; index < BLK_COUNT; index++)
767 		if (strlen(rvu->hw->block[index].name)) {
768 			off += scnprintf(&buf[off], buf_size - 1 - off,
769 					 "%-*s", lf_str_size,
770 					 rvu->hw->block[index].name);
771 		}
772 
773 	off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
774 	bytes_not_copied = copy_to_user(buffer + (i * off), buf, off);
775 	if (bytes_not_copied)
776 		goto out;
777 
778 	i++;
779 	*ppos += off;
780 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
781 		for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
782 			off = 0;
783 			flag = 0;
784 			pcifunc = pf << 10 | vf;
785 			if (!pcifunc)
786 				continue;
787 
788 			if (vf) {
789 				sprintf(lfs, "PF%d:VF%d", pf, vf - 1);
790 				off = scnprintf(&buf[off],
791 						buf_size - 1 - off,
792 						"%-*s", lf_str_size, lfs);
793 			} else {
794 				sprintf(lfs, "PF%d", pf);
795 				off = scnprintf(&buf[off],
796 						buf_size - 1 - off,
797 						"%-*s", lf_str_size, lfs);
798 			}
799 
800 			for (index = 0; index < BLK_COUNT; index++) {
801 				block = rvu->hw->block[index];
802 				if (!strlen(block.name))
803 					continue;
804 				len = 0;
805 				lfs[len] = '\0';
806 				get_lf_str_list(block, pcifunc, lfs);
807 				if (strlen(lfs))
808 					flag = 1;
809 
810 				off += scnprintf(&buf[off], buf_size - 1 - off,
811 						 "%-*s", lf_str_size, lfs);
812 			}
813 			if (flag) {
814 				off +=	scnprintf(&buf[off],
815 						  buf_size - 1 - off, "\n");
816 				bytes_not_copied = copy_to_user(buffer +
817 								(i * off),
818 								buf, off);
819 				if (bytes_not_copied)
820 					goto out;
821 
822 				i++;
823 				*ppos += off;
824 			}
825 		}
826 	}
827 
828 out:
829 	kfree(lfs);
830 	kfree(buf);
831 	if (bytes_not_copied)
832 		return -EFAULT;
833 
834 	return *ppos;
835 }
836 
837 RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
838 
839 static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
840 {
841 	struct rvu *rvu = filp->private;
842 	struct pci_dev *pdev = NULL;
843 	struct mac_ops *mac_ops;
844 	char cgx[10], lmac[10];
845 	struct rvu_pfvf *pfvf;
846 	int pf, domain, blkid;
847 	u8 cgx_id, lmac_id;
848 	u16 pcifunc;
849 
850 	domain = 2;
851 	mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
852 	/* There can be no CGX devices at all */
853 	if (!mac_ops)
854 		return 0;
855 	seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\n",
856 		   mac_ops->name);
857 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
858 		if (!is_pf_cgxmapped(rvu, pf))
859 			continue;
860 
861 		pdev =  pci_get_domain_bus_and_slot(domain, pf + 1, 0);
862 		if (!pdev)
863 			continue;
864 
865 		cgx[0] = 0;
866 		lmac[0] = 0;
867 		pcifunc = pf << 10;
868 		pfvf = rvu_get_pfvf(rvu, pcifunc);
869 
870 		if (pfvf->nix_blkaddr == BLKADDR_NIX0)
871 			blkid = 0;
872 		else
873 			blkid = 1;
874 
875 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id,
876 				    &lmac_id);
877 		sprintf(cgx, "%s%d", mac_ops->name, cgx_id);
878 		sprintf(lmac, "LMAC%d", lmac_id);
879 		seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n",
880 			   dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac);
881 
882 		pci_dev_put(pdev);
883 	}
884 	return 0;
885 }
886 
887 RVU_DEBUG_SEQ_FOPS(rvu_pf_cgx_map, rvu_pf_cgx_map_display, NULL);
888 
889 static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blkaddr, int lf,
890 				u16 *pcifunc)
891 {
892 	struct rvu_block *block;
893 	struct rvu_hwinfo *hw;
894 
895 	hw = rvu->hw;
896 	block = &hw->block[blkaddr];
897 
898 	if (lf < 0 || lf >= block->lf.max) {
899 		dev_warn(rvu->dev, "Invalid LF: valid range: 0-%d\n",
900 			 block->lf.max - 1);
901 		return false;
902 	}
903 
904 	*pcifunc = block->fn_map[lf];
905 	if (!*pcifunc) {
906 		dev_warn(rvu->dev,
907 			 "This LF is not attached to any RVU PFFUNC\n");
908 		return false;
909 	}
910 	return true;
911 }
912 
913 static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf)
914 {
915 	char *buf;
916 
917 	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
918 	if (!buf)
919 		return;
920 
921 	if (!pfvf->aura_ctx) {
922 		seq_puts(m, "Aura context is not initialized\n");
923 	} else {
924 		bitmap_print_to_pagebuf(false, buf, pfvf->aura_bmap,
925 					pfvf->aura_ctx->qsize);
926 		seq_printf(m, "Aura count : %d\n", pfvf->aura_ctx->qsize);
927 		seq_printf(m, "Aura context ena/dis bitmap : %s\n", buf);
928 	}
929 
930 	if (!pfvf->pool_ctx) {
931 		seq_puts(m, "Pool context is not initialized\n");
932 	} else {
933 		bitmap_print_to_pagebuf(false, buf, pfvf->pool_bmap,
934 					pfvf->pool_ctx->qsize);
935 		seq_printf(m, "Pool count : %d\n", pfvf->pool_ctx->qsize);
936 		seq_printf(m, "Pool context ena/dis bitmap : %s\n", buf);
937 	}
938 	kfree(buf);
939 }
940 
941 /* The 'qsize' entry dumps current Aura/Pool context Qsize
942  * and each context's current enable/disable status in a bitmap.
943  */
944 static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
945 				 int blktype)
946 {
947 	void (*print_qsize)(struct seq_file *filp,
948 			    struct rvu_pfvf *pfvf) = NULL;
949 	struct dentry *current_dir;
950 	struct rvu_pfvf *pfvf;
951 	struct rvu *rvu;
952 	int qsize_id;
953 	u16 pcifunc;
954 	int blkaddr;
955 
956 	rvu = filp->private;
957 	switch (blktype) {
958 	case BLKTYPE_NPA:
959 		qsize_id = rvu->rvu_dbg.npa_qsize_id;
960 		print_qsize = print_npa_qsize;
961 		break;
962 
963 	case BLKTYPE_NIX:
964 		qsize_id = rvu->rvu_dbg.nix_qsize_id;
965 		print_qsize = print_nix_qsize;
966 		break;
967 
968 	default:
969 		return -EINVAL;
970 	}
971 
972 	if (blktype == BLKTYPE_NPA) {
973 		blkaddr = BLKADDR_NPA;
974 	} else {
975 		current_dir = filp->file->f_path.dentry->d_parent;
976 		blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
977 				   BLKADDR_NIX1 : BLKADDR_NIX0);
978 	}
979 
980 	if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc))
981 		return -EINVAL;
982 
983 	pfvf = rvu_get_pfvf(rvu, pcifunc);
984 	print_qsize(filp, pfvf);
985 
986 	return 0;
987 }
988 
989 static ssize_t rvu_dbg_qsize_write(struct file *filp,
990 				   const char __user *buffer, size_t count,
991 				   loff_t *ppos, int blktype)
992 {
993 	char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix";
994 	struct seq_file *seqfile = filp->private_data;
995 	char *cmd_buf, *cmd_buf_tmp, *subtoken;
996 	struct rvu *rvu = seqfile->private;
997 	struct dentry *current_dir;
998 	int blkaddr;
999 	u16 pcifunc;
1000 	int ret, lf;
1001 
1002 	cmd_buf = memdup_user_nul(buffer, count);
1003 	if (IS_ERR(cmd_buf))
1004 		return -ENOMEM;
1005 
1006 	cmd_buf_tmp = strchr(cmd_buf, '\n');
1007 	if (cmd_buf_tmp) {
1008 		*cmd_buf_tmp = '\0';
1009 		count = cmd_buf_tmp - cmd_buf + 1;
1010 	}
1011 
1012 	cmd_buf_tmp = cmd_buf;
1013 	subtoken = strsep(&cmd_buf, " ");
1014 	ret = subtoken ? kstrtoint(subtoken, 10, &lf) : -EINVAL;
1015 	if (cmd_buf)
1016 		ret = -EINVAL;
1017 
1018 	if (ret < 0 || !strncmp(subtoken, "help", 4)) {
1019 		dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
1020 		goto qsize_write_done;
1021 	}
1022 
1023 	if (blktype == BLKTYPE_NPA) {
1024 		blkaddr = BLKADDR_NPA;
1025 	} else {
1026 		current_dir = filp->f_path.dentry->d_parent;
1027 		blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
1028 				   BLKADDR_NIX1 : BLKADDR_NIX0);
1029 	}
1030 
1031 	if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) {
1032 		ret = -EINVAL;
1033 		goto qsize_write_done;
1034 	}
1035 	if (blktype  == BLKTYPE_NPA)
1036 		rvu->rvu_dbg.npa_qsize_id = lf;
1037 	else
1038 		rvu->rvu_dbg.nix_qsize_id = lf;
1039 
1040 qsize_write_done:
1041 	kfree(cmd_buf_tmp);
1042 	return ret ? ret : count;
1043 }
1044 
1045 static ssize_t rvu_dbg_npa_qsize_write(struct file *filp,
1046 				       const char __user *buffer,
1047 				       size_t count, loff_t *ppos)
1048 {
1049 	return rvu_dbg_qsize_write(filp, buffer, count, ppos,
1050 					    BLKTYPE_NPA);
1051 }
1052 
1053 static int rvu_dbg_npa_qsize_display(struct seq_file *filp, void *unused)
1054 {
1055 	return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NPA);
1056 }
1057 
1058 RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write);
1059 
1060 /* Dumps given NPA Aura's context */
1061 static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
1062 {
1063 	struct npa_aura_s *aura = &rsp->aura;
1064 	struct rvu *rvu = m->private;
1065 
1066 	seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr);
1067 
1068 	seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n",
1069 		   aura->ena, aura->pool_caching);
1070 	seq_printf(m, "W1: pool way mask\t%d\nW1: avg con\t\t%d\n",
1071 		   aura->pool_way_mask, aura->avg_con);
1072 	seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n",
1073 		   aura->pool_drop_ena, aura->aura_drop_ena);
1074 	seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n",
1075 		   aura->bp_ena, aura->aura_drop);
1076 	seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n",
1077 		   aura->shift, aura->avg_level);
1078 
1079 	seq_printf(m, "W2: count\t\t%llu\nW2: nix0_bpid\t\t%d\nW2: nix1_bpid\t\t%d\n",
1080 		   (u64)aura->count, aura->nix0_bpid, aura->nix1_bpid);
1081 
1082 	seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
1083 		   (u64)aura->limit, aura->bp, aura->fc_ena);
1084 
1085 	if (!is_rvu_otx2(rvu))
1086 		seq_printf(m, "W3: fc_be\t\t%d\n", aura->fc_be);
1087 	seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n",
1088 		   aura->fc_up_crossing, aura->fc_stype);
1089 	seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits);
1090 
1091 	seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr);
1092 
1093 	seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n",
1094 		   aura->pool_drop, aura->update_time);
1095 	seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n",
1096 		   aura->err_int, aura->err_int_ena);
1097 	seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n",
1098 		   aura->thresh_int, aura->thresh_int_ena);
1099 	seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n",
1100 		   aura->thresh_up, aura->thresh_qint_idx);
1101 	seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx);
1102 
1103 	seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh);
1104 	if (!is_rvu_otx2(rvu))
1105 		seq_printf(m, "W6: fc_msh_dst\t\t%d\n", aura->fc_msh_dst);
1106 }
1107 
1108 /* Dumps given NPA Pool's context */
1109 static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
1110 {
1111 	struct npa_pool_s *pool = &rsp->pool;
1112 	struct rvu *rvu = m->private;
1113 
1114 	seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base);
1115 
1116 	seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n",
1117 		   pool->ena, pool->nat_align);
1118 	seq_printf(m, "W1: stack_caching\t%d\nW1: stack_way_mask\t%d\n",
1119 		   pool->stack_caching, pool->stack_way_mask);
1120 	seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n",
1121 		   pool->buf_offset, pool->buf_size);
1122 
1123 	seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n",
1124 		   pool->stack_max_pages, pool->stack_pages);
1125 
1126 	seq_printf(m, "W3: op_pc \t\t%llu\n", (u64)pool->op_pc);
1127 
1128 	seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n",
1129 		   pool->stack_offset, pool->shift, pool->avg_level);
1130 	seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n",
1131 		   pool->avg_con, pool->fc_ena, pool->fc_stype);
1132 	seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n",
1133 		   pool->fc_hyst_bits, pool->fc_up_crossing);
1134 	if (!is_rvu_otx2(rvu))
1135 		seq_printf(m, "W4: fc_be\t\t%d\n", pool->fc_be);
1136 	seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time);
1137 
1138 	seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr);
1139 
1140 	seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start);
1141 
1142 	seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end);
1143 
1144 	seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n",
1145 		   pool->err_int, pool->err_int_ena);
1146 	seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int);
1147 	seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n",
1148 		   pool->thresh_int_ena, pool->thresh_up);
1149 	seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d\n",
1150 		   pool->thresh_qint_idx, pool->err_qint_idx);
1151 	if (!is_rvu_otx2(rvu))
1152 		seq_printf(m, "W8: fc_msh_dst\t\t%d\n", pool->fc_msh_dst);
1153 }
1154 
1155 /* Reads aura/pool's ctx from admin queue */
1156 static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype)
1157 {
1158 	void (*print_npa_ctx)(struct seq_file *m, struct npa_aq_enq_rsp *rsp);
1159 	struct npa_aq_enq_req aq_req;
1160 	struct npa_aq_enq_rsp rsp;
1161 	struct rvu_pfvf *pfvf;
1162 	int aura, rc, max_id;
1163 	int npalf, id, all;
1164 	struct rvu *rvu;
1165 	u16 pcifunc;
1166 
1167 	rvu = m->private;
1168 
1169 	switch (ctype) {
1170 	case NPA_AQ_CTYPE_AURA:
1171 		npalf = rvu->rvu_dbg.npa_aura_ctx.lf;
1172 		id = rvu->rvu_dbg.npa_aura_ctx.id;
1173 		all = rvu->rvu_dbg.npa_aura_ctx.all;
1174 		break;
1175 
1176 	case NPA_AQ_CTYPE_POOL:
1177 		npalf = rvu->rvu_dbg.npa_pool_ctx.lf;
1178 		id = rvu->rvu_dbg.npa_pool_ctx.id;
1179 		all = rvu->rvu_dbg.npa_pool_ctx.all;
1180 		break;
1181 	default:
1182 		return -EINVAL;
1183 	}
1184 
1185 	if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
1186 		return -EINVAL;
1187 
1188 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1189 	if (ctype == NPA_AQ_CTYPE_AURA && !pfvf->aura_ctx) {
1190 		seq_puts(m, "Aura context is not initialized\n");
1191 		return -EINVAL;
1192 	} else if (ctype == NPA_AQ_CTYPE_POOL && !pfvf->pool_ctx) {
1193 		seq_puts(m, "Pool context is not initialized\n");
1194 		return -EINVAL;
1195 	}
1196 
1197 	memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
1198 	aq_req.hdr.pcifunc = pcifunc;
1199 	aq_req.ctype = ctype;
1200 	aq_req.op = NPA_AQ_INSTOP_READ;
1201 	if (ctype == NPA_AQ_CTYPE_AURA) {
1202 		max_id = pfvf->aura_ctx->qsize;
1203 		print_npa_ctx = print_npa_aura_ctx;
1204 	} else {
1205 		max_id = pfvf->pool_ctx->qsize;
1206 		print_npa_ctx = print_npa_pool_ctx;
1207 	}
1208 
1209 	if (id < 0 || id >= max_id) {
1210 		seq_printf(m, "Invalid %s, valid range is 0-%d\n",
1211 			   (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
1212 			max_id - 1);
1213 		return -EINVAL;
1214 	}
1215 
1216 	if (all)
1217 		id = 0;
1218 	else
1219 		max_id = id + 1;
1220 
1221 	for (aura = id; aura < max_id; aura++) {
1222 		aq_req.aura_id = aura;
1223 
1224 		/* Skip if queue is uninitialized */
1225 		if (ctype == NPA_AQ_CTYPE_POOL && !test_bit(aura, pfvf->pool_bmap))
1226 			continue;
1227 
1228 		seq_printf(m, "======%s : %d=======\n",
1229 			   (ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL",
1230 			aq_req.aura_id);
1231 		rc = rvu_npa_aq_enq_inst(rvu, &aq_req, &rsp);
1232 		if (rc) {
1233 			seq_puts(m, "Failed to read context\n");
1234 			return -EINVAL;
1235 		}
1236 		print_npa_ctx(m, &rsp);
1237 	}
1238 	return 0;
1239 }
1240 
1241 static int write_npa_ctx(struct rvu *rvu, bool all,
1242 			 int npalf, int id, int ctype)
1243 {
1244 	struct rvu_pfvf *pfvf;
1245 	int max_id = 0;
1246 	u16 pcifunc;
1247 
1248 	if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
1249 		return -EINVAL;
1250 
1251 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1252 
1253 	if (ctype == NPA_AQ_CTYPE_AURA) {
1254 		if (!pfvf->aura_ctx) {
1255 			dev_warn(rvu->dev, "Aura context is not initialized\n");
1256 			return -EINVAL;
1257 		}
1258 		max_id = pfvf->aura_ctx->qsize;
1259 	} else if (ctype == NPA_AQ_CTYPE_POOL) {
1260 		if (!pfvf->pool_ctx) {
1261 			dev_warn(rvu->dev, "Pool context is not initialized\n");
1262 			return -EINVAL;
1263 		}
1264 		max_id = pfvf->pool_ctx->qsize;
1265 	}
1266 
1267 	if (id < 0 || id >= max_id) {
1268 		dev_warn(rvu->dev, "Invalid %s, valid range is 0-%d\n",
1269 			 (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
1270 			max_id - 1);
1271 		return -EINVAL;
1272 	}
1273 
1274 	switch (ctype) {
1275 	case NPA_AQ_CTYPE_AURA:
1276 		rvu->rvu_dbg.npa_aura_ctx.lf = npalf;
1277 		rvu->rvu_dbg.npa_aura_ctx.id = id;
1278 		rvu->rvu_dbg.npa_aura_ctx.all = all;
1279 		break;
1280 
1281 	case NPA_AQ_CTYPE_POOL:
1282 		rvu->rvu_dbg.npa_pool_ctx.lf = npalf;
1283 		rvu->rvu_dbg.npa_pool_ctx.id = id;
1284 		rvu->rvu_dbg.npa_pool_ctx.all = all;
1285 		break;
1286 	default:
1287 		return -EINVAL;
1288 	}
1289 	return 0;
1290 }
1291 
1292 static int parse_cmd_buffer_ctx(char *cmd_buf, size_t *count,
1293 				const char __user *buffer, int *npalf,
1294 				int *id, bool *all)
1295 {
1296 	int bytes_not_copied;
1297 	char *cmd_buf_tmp;
1298 	char *subtoken;
1299 	int ret;
1300 
1301 	bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
1302 	if (bytes_not_copied)
1303 		return -EFAULT;
1304 
1305 	cmd_buf[*count] = '\0';
1306 	cmd_buf_tmp = strchr(cmd_buf, '\n');
1307 
1308 	if (cmd_buf_tmp) {
1309 		*cmd_buf_tmp = '\0';
1310 		*count = cmd_buf_tmp - cmd_buf + 1;
1311 	}
1312 
1313 	subtoken = strsep(&cmd_buf, " ");
1314 	ret = subtoken ? kstrtoint(subtoken, 10, npalf) : -EINVAL;
1315 	if (ret < 0)
1316 		return ret;
1317 	subtoken = strsep(&cmd_buf, " ");
1318 	if (subtoken && strcmp(subtoken, "all") == 0) {
1319 		*all = true;
1320 	} else {
1321 		ret = subtoken ? kstrtoint(subtoken, 10, id) : -EINVAL;
1322 		if (ret < 0)
1323 			return ret;
1324 	}
1325 	if (cmd_buf)
1326 		return -EINVAL;
1327 	return ret;
1328 }
1329 
1330 static ssize_t rvu_dbg_npa_ctx_write(struct file *filp,
1331 				     const char __user *buffer,
1332 				     size_t count, loff_t *ppos, int ctype)
1333 {
1334 	char *cmd_buf, *ctype_string = (ctype == NPA_AQ_CTYPE_AURA) ?
1335 					"aura" : "pool";
1336 	struct seq_file *seqfp = filp->private_data;
1337 	struct rvu *rvu = seqfp->private;
1338 	int npalf, id = 0, ret;
1339 	bool all = false;
1340 
1341 	if ((*ppos != 0) || !count)
1342 		return -EINVAL;
1343 
1344 	cmd_buf = kzalloc(count + 1, GFP_KERNEL);
1345 	if (!cmd_buf)
1346 		return count;
1347 	ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
1348 				   &npalf, &id, &all);
1349 	if (ret < 0) {
1350 		dev_info(rvu->dev,
1351 			 "Usage: echo <npalf> [%s number/all] > %s_ctx\n",
1352 			 ctype_string, ctype_string);
1353 		goto done;
1354 	} else {
1355 		ret = write_npa_ctx(rvu, all, npalf, id, ctype);
1356 	}
1357 done:
1358 	kfree(cmd_buf);
1359 	return ret ? ret : count;
1360 }
1361 
1362 static ssize_t rvu_dbg_npa_aura_ctx_write(struct file *filp,
1363 					  const char __user *buffer,
1364 					  size_t count, loff_t *ppos)
1365 {
1366 	return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
1367 				     NPA_AQ_CTYPE_AURA);
1368 }
1369 
1370 static int rvu_dbg_npa_aura_ctx_display(struct seq_file *filp, void *unused)
1371 {
1372 	return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_AURA);
1373 }
1374 
1375 RVU_DEBUG_SEQ_FOPS(npa_aura_ctx, npa_aura_ctx_display, npa_aura_ctx_write);
1376 
1377 static ssize_t rvu_dbg_npa_pool_ctx_write(struct file *filp,
1378 					  const char __user *buffer,
1379 					  size_t count, loff_t *ppos)
1380 {
1381 	return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
1382 				     NPA_AQ_CTYPE_POOL);
1383 }
1384 
1385 static int rvu_dbg_npa_pool_ctx_display(struct seq_file *filp, void *unused)
1386 {
1387 	return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_POOL);
1388 }
1389 
1390 RVU_DEBUG_SEQ_FOPS(npa_pool_ctx, npa_pool_ctx_display, npa_pool_ctx_write);
1391 
1392 static void ndc_cache_stats(struct seq_file *s, int blk_addr,
1393 			    int ctype, int transaction)
1394 {
1395 	u64 req, out_req, lat, cant_alloc;
1396 	struct nix_hw *nix_hw;
1397 	struct rvu *rvu;
1398 	int port;
1399 
1400 	if (blk_addr == BLKADDR_NDC_NPA0) {
1401 		rvu = s->private;
1402 	} else {
1403 		nix_hw = s->private;
1404 		rvu = nix_hw->rvu;
1405 	}
1406 
1407 	for (port = 0; port < NDC_MAX_PORT; port++) {
1408 		req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC
1409 						(port, ctype, transaction));
1410 		lat = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_LAT_PC
1411 						(port, ctype, transaction));
1412 		out_req = rvu_read64(rvu, blk_addr,
1413 				     NDC_AF_PORTX_RTX_RWX_OSTDN_PC
1414 				     (port, ctype, transaction));
1415 		cant_alloc = rvu_read64(rvu, blk_addr,
1416 					NDC_AF_PORTX_RTX_CANT_ALLOC_PC
1417 					(port, transaction));
1418 		seq_printf(s, "\nPort:%d\n", port);
1419 		seq_printf(s, "\tTotal Requests:\t\t%lld\n", req);
1420 		seq_printf(s, "\tTotal Time Taken:\t%lld cycles\n", lat);
1421 		seq_printf(s, "\tAvg Latency:\t\t%lld cycles\n", lat / req);
1422 		seq_printf(s, "\tOutstanding Requests:\t%lld\n", out_req);
1423 		seq_printf(s, "\tCant Alloc Requests:\t%lld\n", cant_alloc);
1424 	}
1425 }
1426 
1427 static int ndc_blk_cache_stats(struct seq_file *s, int idx, int blk_addr)
1428 {
1429 	seq_puts(s, "\n***** CACHE mode read stats *****\n");
1430 	ndc_cache_stats(s, blk_addr, CACHING, NDC_READ_TRANS);
1431 	seq_puts(s, "\n***** CACHE mode write stats *****\n");
1432 	ndc_cache_stats(s, blk_addr, CACHING, NDC_WRITE_TRANS);
1433 	seq_puts(s, "\n***** BY-PASS mode read stats *****\n");
1434 	ndc_cache_stats(s, blk_addr, BYPASS, NDC_READ_TRANS);
1435 	seq_puts(s, "\n***** BY-PASS mode write stats *****\n");
1436 	ndc_cache_stats(s, blk_addr, BYPASS, NDC_WRITE_TRANS);
1437 	return 0;
1438 }
1439 
1440 static int rvu_dbg_npa_ndc_cache_display(struct seq_file *filp, void *unused)
1441 {
1442 	return ndc_blk_cache_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
1443 }
1444 
1445 RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL);
1446 
1447 static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
1448 {
1449 	struct nix_hw *nix_hw;
1450 	struct rvu *rvu;
1451 	int bank, max_bank;
1452 	u64 ndc_af_const;
1453 
1454 	if (blk_addr == BLKADDR_NDC_NPA0) {
1455 		rvu = s->private;
1456 	} else {
1457 		nix_hw = s->private;
1458 		rvu = nix_hw->rvu;
1459 	}
1460 
1461 	ndc_af_const = rvu_read64(rvu, blk_addr, NDC_AF_CONST);
1462 	max_bank = FIELD_GET(NDC_AF_BANK_MASK, ndc_af_const);
1463 	for (bank = 0; bank < max_bank; bank++) {
1464 		seq_printf(s, "BANK:%d\n", bank);
1465 		seq_printf(s, "\tHits:\t%lld\n",
1466 			   (u64)rvu_read64(rvu, blk_addr,
1467 			   NDC_AF_BANKX_HIT_PC(bank)));
1468 		seq_printf(s, "\tMiss:\t%lld\n",
1469 			   (u64)rvu_read64(rvu, blk_addr,
1470 			    NDC_AF_BANKX_MISS_PC(bank)));
1471 	}
1472 	return 0;
1473 }
1474 
1475 static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused)
1476 {
1477 	struct nix_hw *nix_hw = filp->private;
1478 	int blkaddr = 0;
1479 	int ndc_idx = 0;
1480 
1481 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1482 		   BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1483 	ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_RX : NIX0_RX);
1484 
1485 	return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
1486 }
1487 
1488 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL);
1489 
1490 static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused)
1491 {
1492 	struct nix_hw *nix_hw = filp->private;
1493 	int blkaddr = 0;
1494 	int ndc_idx = 0;
1495 
1496 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1497 		   BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1498 	ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_TX : NIX0_TX);
1499 
1500 	return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
1501 }
1502 
1503 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL);
1504 
1505 static int rvu_dbg_npa_ndc_hits_miss_display(struct seq_file *filp,
1506 					     void *unused)
1507 {
1508 	return ndc_blk_hits_miss_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
1509 }
1510 
1511 RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL);
1512 
1513 static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp,
1514 						void *unused)
1515 {
1516 	struct nix_hw *nix_hw = filp->private;
1517 	int ndc_idx = NPA0_U;
1518 	int blkaddr = 0;
1519 
1520 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1521 		   BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1522 
1523 	return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1524 }
1525 
1526 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL);
1527 
1528 static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp,
1529 						void *unused)
1530 {
1531 	struct nix_hw *nix_hw = filp->private;
1532 	int ndc_idx = NPA0_U;
1533 	int blkaddr = 0;
1534 
1535 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1536 		   BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1537 
1538 	return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1539 }
1540 
1541 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL);
1542 
1543 static void print_nix_cn10k_sq_ctx(struct seq_file *m,
1544 				   struct nix_cn10k_sq_ctx_s *sq_ctx)
1545 {
1546 	seq_printf(m, "W0: ena \t\t\t%d\nW0: qint_idx \t\t\t%d\n",
1547 		   sq_ctx->ena, sq_ctx->qint_idx);
1548 	seq_printf(m, "W0: substream \t\t\t0x%03x\nW0: sdp_mcast \t\t\t%d\n",
1549 		   sq_ctx->substream, sq_ctx->sdp_mcast);
1550 	seq_printf(m, "W0: cq \t\t\t\t%d\nW0: sqe_way_mask \t\t%d\n\n",
1551 		   sq_ctx->cq, sq_ctx->sqe_way_mask);
1552 
1553 	seq_printf(m, "W1: smq \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: xoff\t\t\t%d\n",
1554 		   sq_ctx->smq, sq_ctx->cq_ena, sq_ctx->xoff);
1555 	seq_printf(m, "W1: sso_ena \t\t\t%d\nW1: smq_rr_weight\t\t%d\n",
1556 		   sq_ctx->sso_ena, sq_ctx->smq_rr_weight);
1557 	seq_printf(m, "W1: default_chan\t\t%d\nW1: sqb_count\t\t\t%d\n\n",
1558 		   sq_ctx->default_chan, sq_ctx->sqb_count);
1559 
1560 	seq_printf(m, "W2: smq_rr_count_lb \t\t%d\n", sq_ctx->smq_rr_count_lb);
1561 	seq_printf(m, "W2: smq_rr_count_ub \t\t%d\n", sq_ctx->smq_rr_count_ub);
1562 	seq_printf(m, "W2: sqb_aura \t\t\t%d\nW2: sq_int \t\t\t%d\n",
1563 		   sq_ctx->sqb_aura, sq_ctx->sq_int);
1564 	seq_printf(m, "W2: sq_int_ena \t\t\t%d\nW2: sqe_stype \t\t\t%d\n",
1565 		   sq_ctx->sq_int_ena, sq_ctx->sqe_stype);
1566 
1567 	seq_printf(m, "W3: max_sqe_size\t\t%d\nW3: cq_limit\t\t\t%d\n",
1568 		   sq_ctx->max_sqe_size, sq_ctx->cq_limit);
1569 	seq_printf(m, "W3: lmt_dis \t\t\t%d\nW3: mnq_dis \t\t\t%d\n",
1570 		   sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1571 	seq_printf(m, "W3: smq_next_sq\t\t\t%d\nW3: smq_lso_segnum\t\t%d\n",
1572 		   sq_ctx->smq_next_sq, sq_ctx->smq_lso_segnum);
1573 	seq_printf(m, "W3: tail_offset \t\t%d\nW3: smenq_offset\t\t%d\n",
1574 		   sq_ctx->tail_offset, sq_ctx->smenq_offset);
1575 	seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n",
1576 		   sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld);
1577 
1578 	seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
1579 		   sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
1580 	seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1581 	seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1582 	seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1583 	seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1584 		   sq_ctx->smenq_next_sqb);
1585 
1586 	seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1587 
1588 	seq_printf(m, "W9: vfi_lso_total\t\t%d\n", sq_ctx->vfi_lso_total);
1589 	seq_printf(m, "W9: vfi_lso_sizem1\t\t%d\nW9: vfi_lso_sb\t\t\t%d\n",
1590 		   sq_ctx->vfi_lso_sizem1, sq_ctx->vfi_lso_sb);
1591 	seq_printf(m, "W9: vfi_lso_mps\t\t\t%d\nW9: vfi_lso_vlan0_ins_ena\t%d\n",
1592 		   sq_ctx->vfi_lso_mps, sq_ctx->vfi_lso_vlan0_ins_ena);
1593 	seq_printf(m, "W9: vfi_lso_vlan1_ins_ena\t%d\nW9: vfi_lso_vld \t\t%d\n\n",
1594 		   sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1595 
1596 	seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1597 		   (u64)sq_ctx->scm_lso_rem);
1598 	seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1599 	seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1600 	seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1601 		   (u64)sq_ctx->dropped_octs);
1602 	seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1603 		   (u64)sq_ctx->dropped_pkts);
1604 }
1605 
1606 static void print_tm_tree(struct seq_file *m,
1607 			  struct nix_aq_enq_rsp *rsp, u64 sq)
1608 {
1609 	struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
1610 	struct nix_hw *nix_hw = m->private;
1611 	struct rvu *rvu = nix_hw->rvu;
1612 	u16 p1, p2, p3, p4, schq;
1613 	int blkaddr;
1614 	u64 cfg;
1615 
1616 	blkaddr = nix_hw->blkaddr;
1617 	schq = sq_ctx->smq;
1618 
1619 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_MDQX_PARENT(schq));
1620 	p1 = FIELD_GET(NIX_AF_MDQ_PARENT_MASK, cfg);
1621 
1622 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_TL4X_PARENT(p1));
1623 	p2 = FIELD_GET(NIX_AF_TL4_PARENT_MASK, cfg);
1624 
1625 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_TL3X_PARENT(p2));
1626 	p3 = FIELD_GET(NIX_AF_TL3_PARENT_MASK, cfg);
1627 
1628 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_TL2X_PARENT(p3));
1629 	p4 = FIELD_GET(NIX_AF_TL2_PARENT_MASK, cfg);
1630 	seq_printf(m,
1631 		   "SQ(%llu) -> SMQ(%u) -> TL4(%u) -> TL3(%u) -> TL2(%u) -> TL1(%u)\n",
1632 		   sq, schq, p1, p2, p3, p4);
1633 }
1634 
1635 /*dumps given tm_tree registers*/
1636 static int rvu_dbg_nix_tm_tree_display(struct seq_file *m, void *unused)
1637 {
1638 	int qidx, nixlf, rc, id, max_id = 0;
1639 	struct nix_hw *nix_hw = m->private;
1640 	struct rvu *rvu = nix_hw->rvu;
1641 	struct nix_aq_enq_req aq_req;
1642 	struct nix_aq_enq_rsp rsp;
1643 	struct rvu_pfvf *pfvf;
1644 	u16 pcifunc;
1645 
1646 	nixlf = rvu->rvu_dbg.nix_tm_ctx.lf;
1647 	id = rvu->rvu_dbg.nix_tm_ctx.id;
1648 
1649 	if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1650 		return -EINVAL;
1651 
1652 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1653 	max_id = pfvf->sq_ctx->qsize;
1654 
1655 	memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1656 	aq_req.hdr.pcifunc = pcifunc;
1657 	aq_req.ctype = NIX_AQ_CTYPE_SQ;
1658 	aq_req.op = NIX_AQ_INSTOP_READ;
1659 	seq_printf(m, "pcifunc is 0x%x\n", pcifunc);
1660 	for (qidx = id; qidx < max_id; qidx++) {
1661 		aq_req.qidx = qidx;
1662 
1663 		/* Skip SQ's if not initialized */
1664 		if (!test_bit(qidx, pfvf->sq_bmap))
1665 			continue;
1666 
1667 		rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
1668 
1669 		if (rc) {
1670 			seq_printf(m, "Failed to read SQ(%d) context\n",
1671 				   aq_req.qidx);
1672 			continue;
1673 		}
1674 		print_tm_tree(m, &rsp, aq_req.qidx);
1675 	}
1676 	return 0;
1677 }
1678 
1679 static ssize_t rvu_dbg_nix_tm_tree_write(struct file *filp,
1680 					 const char __user *buffer,
1681 					 size_t count, loff_t *ppos)
1682 {
1683 	struct seq_file *m = filp->private_data;
1684 	struct nix_hw *nix_hw = m->private;
1685 	struct rvu *rvu = nix_hw->rvu;
1686 	struct rvu_pfvf *pfvf;
1687 	u16 pcifunc;
1688 	u64 nixlf;
1689 	int ret;
1690 
1691 	ret = kstrtoull_from_user(buffer, count, 10, &nixlf);
1692 	if (ret)
1693 		return ret;
1694 
1695 	if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1696 		return -EINVAL;
1697 
1698 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1699 	if (!pfvf->sq_ctx) {
1700 		dev_warn(rvu->dev, "SQ context is not initialized\n");
1701 		return -EINVAL;
1702 	}
1703 
1704 	rvu->rvu_dbg.nix_tm_ctx.lf = nixlf;
1705 	return count;
1706 }
1707 
1708 RVU_DEBUG_SEQ_FOPS(nix_tm_tree, nix_tm_tree_display, nix_tm_tree_write);
1709 
1710 static void print_tm_topo(struct seq_file *m, u64 schq, u32 lvl)
1711 {
1712 	struct nix_hw *nix_hw = m->private;
1713 	struct rvu *rvu = nix_hw->rvu;
1714 	int blkaddr, link, link_level;
1715 	struct rvu_hwinfo *hw;
1716 
1717 	hw = rvu->hw;
1718 	blkaddr = nix_hw->blkaddr;
1719 	if (lvl == NIX_TXSCH_LVL_MDQ) {
1720 		seq_printf(m, "NIX_AF_SMQ[%llu]_CFG =0x%llx\n", schq,
1721 			   rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq)));
1722 		seq_printf(m, "NIX_AF_SMQ[%llu]_STATUS =0x%llx\n", schq,
1723 			   rvu_read64(rvu, blkaddr, NIX_AF_SMQX_STATUS(schq)));
1724 		seq_printf(m, "NIX_AF_MDQ[%llu]_OUT_MD_COUNT =0x%llx\n", schq,
1725 			   rvu_read64(rvu, blkaddr,
1726 				      NIX_AF_MDQX_OUT_MD_COUNT(schq)));
1727 		seq_printf(m, "NIX_AF_MDQ[%llu]_SCHEDULE =0x%llx\n", schq,
1728 			   rvu_read64(rvu, blkaddr,
1729 				      NIX_AF_MDQX_SCHEDULE(schq)));
1730 		seq_printf(m, "NIX_AF_MDQ[%llu]_SHAPE =0x%llx\n", schq,
1731 			   rvu_read64(rvu, blkaddr, NIX_AF_MDQX_SHAPE(schq)));
1732 		seq_printf(m, "NIX_AF_MDQ[%llu]_CIR =0x%llx\n", schq,
1733 			   rvu_read64(rvu, blkaddr, NIX_AF_MDQX_CIR(schq)));
1734 		seq_printf(m, "NIX_AF_MDQ[%llu]_PIR =0x%llx\n", schq,
1735 			   rvu_read64(rvu, blkaddr, NIX_AF_MDQX_PIR(schq)));
1736 		seq_printf(m, "NIX_AF_MDQ[%llu]_SW_XOFF =0x%llx\n", schq,
1737 			   rvu_read64(rvu, blkaddr, NIX_AF_MDQX_SW_XOFF(schq)));
1738 		seq_printf(m, "NIX_AF_MDQ[%llu]_PARENT =0x%llx\n", schq,
1739 			   rvu_read64(rvu, blkaddr, NIX_AF_MDQX_PARENT(schq)));
1740 		seq_puts(m, "\n");
1741 	}
1742 
1743 	if (lvl == NIX_TXSCH_LVL_TL4) {
1744 		seq_printf(m, "NIX_AF_TL4[%llu]_SDP_LINK_CFG =0x%llx\n", schq,
1745 			   rvu_read64(rvu, blkaddr,
1746 				      NIX_AF_TL4X_SDP_LINK_CFG(schq)));
1747 		seq_printf(m, "NIX_AF_TL4[%llu]_SCHEDULE =0x%llx\n", schq,
1748 			   rvu_read64(rvu, blkaddr,
1749 				      NIX_AF_TL4X_SCHEDULE(schq)));
1750 		seq_printf(m, "NIX_AF_TL4[%llu]_SHAPE =0x%llx\n", schq,
1751 			   rvu_read64(rvu, blkaddr, NIX_AF_TL4X_SHAPE(schq)));
1752 		seq_printf(m, "NIX_AF_TL4[%llu]_CIR =0x%llx\n", schq,
1753 			   rvu_read64(rvu, blkaddr, NIX_AF_TL4X_CIR(schq)));
1754 		seq_printf(m, "NIX_AF_TL4[%llu]_PIR =0x%llx\n", schq,
1755 			   rvu_read64(rvu, blkaddr, NIX_AF_TL4X_PIR(schq)));
1756 		seq_printf(m, "NIX_AF_TL4[%llu]_SW_XOFF =0x%llx\n", schq,
1757 			   rvu_read64(rvu, blkaddr, NIX_AF_TL4X_SW_XOFF(schq)));
1758 		seq_printf(m, "NIX_AF_TL4[%llu]_TOPOLOGY =0x%llx\n", schq,
1759 			   rvu_read64(rvu, blkaddr,
1760 				      NIX_AF_TL4X_TOPOLOGY(schq)));
1761 		seq_printf(m, "NIX_AF_TL4[%llu]_PARENT =0x%llx\n", schq,
1762 			   rvu_read64(rvu, blkaddr, NIX_AF_TL4X_PARENT(schq)));
1763 		seq_printf(m, "NIX_AF_TL4[%llu]_MD_DEBUG0 =0x%llx\n", schq,
1764 			   rvu_read64(rvu, blkaddr,
1765 				      NIX_AF_TL4X_MD_DEBUG0(schq)));
1766 		seq_printf(m, "NIX_AF_TL4[%llu]_MD_DEBUG1 =0x%llx\n", schq,
1767 			   rvu_read64(rvu, blkaddr,
1768 				      NIX_AF_TL4X_MD_DEBUG1(schq)));
1769 		seq_puts(m, "\n");
1770 	}
1771 
1772 	if (lvl == NIX_TXSCH_LVL_TL3) {
1773 		seq_printf(m, "NIX_AF_TL3[%llu]_SCHEDULE =0x%llx\n", schq,
1774 			   rvu_read64(rvu, blkaddr,
1775 				      NIX_AF_TL3X_SCHEDULE(schq)));
1776 		seq_printf(m, "NIX_AF_TL3[%llu]_SHAPE =0x%llx\n", schq,
1777 			   rvu_read64(rvu, blkaddr, NIX_AF_TL3X_SHAPE(schq)));
1778 		seq_printf(m, "NIX_AF_TL3[%llu]_CIR =0x%llx\n", schq,
1779 			   rvu_read64(rvu, blkaddr, NIX_AF_TL3X_CIR(schq)));
1780 		seq_printf(m, "NIX_AF_TL3[%llu]_PIR =0x%llx\n", schq,
1781 			   rvu_read64(rvu, blkaddr, NIX_AF_TL3X_PIR(schq)));
1782 		seq_printf(m, "NIX_AF_TL3[%llu]_SW_XOFF =0x%llx\n", schq,
1783 			   rvu_read64(rvu, blkaddr, NIX_AF_TL3X_SW_XOFF(schq)));
1784 		seq_printf(m, "NIX_AF_TL3[%llu]_TOPOLOGY =0x%llx\n", schq,
1785 			   rvu_read64(rvu, blkaddr,
1786 				      NIX_AF_TL3X_TOPOLOGY(schq)));
1787 		seq_printf(m, "NIX_AF_TL3[%llu]_PARENT =0x%llx\n", schq,
1788 			   rvu_read64(rvu, blkaddr, NIX_AF_TL3X_PARENT(schq)));
1789 		seq_printf(m, "NIX_AF_TL3[%llu]_MD_DEBUG0 =0x%llx\n", schq,
1790 			   rvu_read64(rvu, blkaddr,
1791 				      NIX_AF_TL3X_MD_DEBUG0(schq)));
1792 		seq_printf(m, "NIX_AF_TL3[%llu]_MD_DEBUG1 =0x%llx\n", schq,
1793 			   rvu_read64(rvu, blkaddr,
1794 				      NIX_AF_TL3X_MD_DEBUG1(schq)));
1795 
1796 		link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL)
1797 				& 0x01 ? NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
1798 		if (lvl == link_level) {
1799 			seq_printf(m,
1800 				   "NIX_AF_TL3_TL2[%llu]_BP_STATUS =0x%llx\n",
1801 				   schq, rvu_read64(rvu, blkaddr,
1802 				   NIX_AF_TL3_TL2X_BP_STATUS(schq)));
1803 			for (link = 0; link < hw->cgx_links; link++)
1804 				seq_printf(m,
1805 					   "NIX_AF_TL3_TL2[%llu]_LINK[%d]_CFG =0x%llx\n",
1806 					   schq, link,
1807 					   rvu_read64(rvu, blkaddr,
1808 						      NIX_AF_TL3_TL2X_LINKX_CFG(schq, link)));
1809 		}
1810 		seq_puts(m, "\n");
1811 	}
1812 
1813 	if (lvl == NIX_TXSCH_LVL_TL2) {
1814 		seq_printf(m, "NIX_AF_TL2[%llu]_SHAPE =0x%llx\n", schq,
1815 			   rvu_read64(rvu, blkaddr, NIX_AF_TL2X_SHAPE(schq)));
1816 		seq_printf(m, "NIX_AF_TL2[%llu]_CIR =0x%llx\n", schq,
1817 			   rvu_read64(rvu, blkaddr, NIX_AF_TL2X_CIR(schq)));
1818 		seq_printf(m, "NIX_AF_TL2[%llu]_PIR =0x%llx\n", schq,
1819 			   rvu_read64(rvu, blkaddr, NIX_AF_TL2X_PIR(schq)));
1820 		seq_printf(m, "NIX_AF_TL2[%llu]_SW_XOFF =0x%llx\n", schq,
1821 			   rvu_read64(rvu, blkaddr, NIX_AF_TL2X_SW_XOFF(schq)));
1822 		seq_printf(m, "NIX_AF_TL2[%llu]_TOPOLOGY =0x%llx\n", schq,
1823 			   rvu_read64(rvu, blkaddr,
1824 				      NIX_AF_TL2X_TOPOLOGY(schq)));
1825 		seq_printf(m, "NIX_AF_TL2[%llu]_PARENT =0x%llx\n", schq,
1826 			   rvu_read64(rvu, blkaddr, NIX_AF_TL2X_PARENT(schq)));
1827 		seq_printf(m, "NIX_AF_TL2[%llu]_MD_DEBUG0 =0x%llx\n", schq,
1828 			   rvu_read64(rvu, blkaddr,
1829 				      NIX_AF_TL2X_MD_DEBUG0(schq)));
1830 		seq_printf(m, "NIX_AF_TL2[%llu]_MD_DEBUG1 =0x%llx\n", schq,
1831 			   rvu_read64(rvu, blkaddr,
1832 				      NIX_AF_TL2X_MD_DEBUG1(schq)));
1833 
1834 		link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL)
1835 				& 0x01 ? NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
1836 		if (lvl == link_level) {
1837 			seq_printf(m,
1838 				   "NIX_AF_TL3_TL2[%llu]_BP_STATUS =0x%llx\n",
1839 				   schq, rvu_read64(rvu, blkaddr,
1840 				   NIX_AF_TL3_TL2X_BP_STATUS(schq)));
1841 			for (link = 0; link < hw->cgx_links; link++)
1842 				seq_printf(m,
1843 					   "NIX_AF_TL3_TL2[%llu]_LINK[%d]_CFG =0x%llx\n",
1844 					   schq, link, rvu_read64(rvu, blkaddr,
1845 					   NIX_AF_TL3_TL2X_LINKX_CFG(schq, link)));
1846 		}
1847 		seq_puts(m, "\n");
1848 	}
1849 
1850 	if (lvl == NIX_TXSCH_LVL_TL1) {
1851 		seq_printf(m, "NIX_AF_TX_LINK[%llu]_NORM_CREDIT =0x%llx\n",
1852 			   schq,
1853 			   rvu_read64(rvu, blkaddr,
1854 				      NIX_AF_TX_LINKX_NORM_CREDIT(schq)));
1855 		seq_printf(m, "NIX_AF_TX_LINK[%llu]_HW_XOFF =0x%llx\n", schq,
1856 			   rvu_read64(rvu, blkaddr,
1857 				      NIX_AF_TX_LINKX_HW_XOFF(schq)));
1858 		seq_printf(m, "NIX_AF_TL1[%llu]_SCHEDULE =0x%llx\n", schq,
1859 			   rvu_read64(rvu, blkaddr,
1860 				      NIX_AF_TL1X_SCHEDULE(schq)));
1861 		seq_printf(m, "NIX_AF_TL1[%llu]_SHAPE =0x%llx\n", schq,
1862 			   rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SHAPE(schq)));
1863 		seq_printf(m, "NIX_AF_TL1[%llu]_CIR =0x%llx\n", schq,
1864 			   rvu_read64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq)));
1865 		seq_printf(m, "NIX_AF_TL1[%llu]_SW_XOFF =0x%llx\n", schq,
1866 			   rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq)));
1867 		seq_printf(m, "NIX_AF_TL1[%llu]_TOPOLOGY =0x%llx\n", schq,
1868 			   rvu_read64(rvu, blkaddr,
1869 				      NIX_AF_TL1X_TOPOLOGY(schq)));
1870 		seq_printf(m, "NIX_AF_TL1[%llu]_MD_DEBUG0 =0x%llx\n", schq,
1871 			   rvu_read64(rvu, blkaddr,
1872 				      NIX_AF_TL1X_MD_DEBUG0(schq)));
1873 		seq_printf(m, "NIX_AF_TL1[%llu]_MD_DEBUG1 =0x%llx\n", schq,
1874 			   rvu_read64(rvu, blkaddr,
1875 				      NIX_AF_TL1X_MD_DEBUG1(schq)));
1876 		seq_printf(m, "NIX_AF_TL1[%llu]_DROPPED_PACKETS =0x%llx\n",
1877 			   schq,
1878 			   rvu_read64(rvu, blkaddr,
1879 				      NIX_AF_TL1X_DROPPED_PACKETS(schq)));
1880 		seq_printf(m, "NIX_AF_TL1[%llu]_DROPPED_BYTES =0x%llx\n", schq,
1881 			   rvu_read64(rvu, blkaddr,
1882 				      NIX_AF_TL1X_DROPPED_BYTES(schq)));
1883 		seq_printf(m, "NIX_AF_TL1[%llu]_RED_PACKETS =0x%llx\n", schq,
1884 			   rvu_read64(rvu, blkaddr,
1885 				      NIX_AF_TL1X_RED_PACKETS(schq)));
1886 		seq_printf(m, "NIX_AF_TL1[%llu]_RED_BYTES =0x%llx\n", schq,
1887 			   rvu_read64(rvu, blkaddr,
1888 				      NIX_AF_TL1X_RED_BYTES(schq)));
1889 		seq_printf(m, "NIX_AF_TL1[%llu]_YELLOW_PACKETS =0x%llx\n", schq,
1890 			   rvu_read64(rvu, blkaddr,
1891 				      NIX_AF_TL1X_YELLOW_PACKETS(schq)));
1892 		seq_printf(m, "NIX_AF_TL1[%llu]_YELLOW_BYTES =0x%llx\n", schq,
1893 			   rvu_read64(rvu, blkaddr,
1894 				      NIX_AF_TL1X_YELLOW_BYTES(schq)));
1895 		seq_printf(m, "NIX_AF_TL1[%llu]_GREEN_PACKETS =0x%llx\n", schq,
1896 			   rvu_read64(rvu, blkaddr,
1897 				      NIX_AF_TL1X_GREEN_PACKETS(schq)));
1898 		seq_printf(m, "NIX_AF_TL1[%llu]_GREEN_BYTES =0x%llx\n", schq,
1899 			   rvu_read64(rvu, blkaddr,
1900 				      NIX_AF_TL1X_GREEN_BYTES(schq)));
1901 		seq_puts(m, "\n");
1902 	}
1903 }
1904 
1905 /*dumps given tm_topo registers*/
1906 static int rvu_dbg_nix_tm_topo_display(struct seq_file *m, void *unused)
1907 {
1908 	struct nix_hw *nix_hw = m->private;
1909 	struct rvu *rvu = nix_hw->rvu;
1910 	struct nix_aq_enq_req aq_req;
1911 	struct nix_txsch *txsch;
1912 	int nixlf, lvl, schq;
1913 	u16 pcifunc;
1914 
1915 	nixlf = rvu->rvu_dbg.nix_tm_ctx.lf;
1916 
1917 	if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1918 		return -EINVAL;
1919 
1920 	memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1921 	aq_req.hdr.pcifunc = pcifunc;
1922 	aq_req.ctype = NIX_AQ_CTYPE_SQ;
1923 	aq_req.op = NIX_AQ_INSTOP_READ;
1924 	seq_printf(m, "pcifunc is 0x%x\n", pcifunc);
1925 
1926 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1927 		txsch = &nix_hw->txsch[lvl];
1928 		for (schq = 0; schq < txsch->schq.max; schq++) {
1929 			if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) == pcifunc)
1930 				print_tm_topo(m, schq, lvl);
1931 		}
1932 	}
1933 	return 0;
1934 }
1935 
1936 static ssize_t rvu_dbg_nix_tm_topo_write(struct file *filp,
1937 					 const char __user *buffer,
1938 					 size_t count, loff_t *ppos)
1939 {
1940 	struct seq_file *m = filp->private_data;
1941 	struct nix_hw *nix_hw = m->private;
1942 	struct rvu *rvu = nix_hw->rvu;
1943 	struct rvu_pfvf *pfvf;
1944 	u16 pcifunc;
1945 	u64 nixlf;
1946 	int ret;
1947 
1948 	ret = kstrtoull_from_user(buffer, count, 10, &nixlf);
1949 	if (ret)
1950 		return ret;
1951 
1952 	if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1953 		return -EINVAL;
1954 
1955 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1956 	if (!pfvf->sq_ctx) {
1957 		dev_warn(rvu->dev, "SQ context is not initialized\n");
1958 		return -EINVAL;
1959 	}
1960 
1961 	rvu->rvu_dbg.nix_tm_ctx.lf = nixlf;
1962 	return count;
1963 }
1964 
1965 RVU_DEBUG_SEQ_FOPS(nix_tm_topo, nix_tm_topo_display, nix_tm_topo_write);
1966 
1967 /* Dumps given nix_sq's context */
1968 static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1969 {
1970 	struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
1971 	struct nix_hw *nix_hw = m->private;
1972 	struct rvu *rvu = nix_hw->rvu;
1973 
1974 	if (!is_rvu_otx2(rvu)) {
1975 		print_nix_cn10k_sq_ctx(m, (struct nix_cn10k_sq_ctx_s *)sq_ctx);
1976 		return;
1977 	}
1978 	seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n",
1979 		   sq_ctx->sqe_way_mask, sq_ctx->cq);
1980 	seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
1981 		   sq_ctx->sdp_mcast, sq_ctx->substream);
1982 	seq_printf(m, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n\n",
1983 		   sq_ctx->qint_idx, sq_ctx->ena);
1984 
1985 	seq_printf(m, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d\n",
1986 		   sq_ctx->sqb_count, sq_ctx->default_chan);
1987 	seq_printf(m, "W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d\n",
1988 		   sq_ctx->smq_rr_quantum, sq_ctx->sso_ena);
1989 	seq_printf(m, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n\n",
1990 		   sq_ctx->xoff, sq_ctx->cq_ena, sq_ctx->smq);
1991 
1992 	seq_printf(m, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d\n",
1993 		   sq_ctx->sqe_stype, sq_ctx->sq_int_ena);
1994 	seq_printf(m, "W2: sq_int \t\t\t%d\nW2: sqb_aura \t\t\t%d\n",
1995 		   sq_ctx->sq_int, sq_ctx->sqb_aura);
1996 	seq_printf(m, "W2: smq_rr_count \t\t%d\n\n", sq_ctx->smq_rr_count);
1997 
1998 	seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
1999 		   sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
2000 	seq_printf(m, "W3: smenq_next_sqb_vld \t\t%d\nW3: head_offset\t\t\t%d\n",
2001 		   sq_ctx->smenq_next_sqb_vld, sq_ctx->head_offset);
2002 	seq_printf(m, "W3: smenq_offset\t\t%d\nW3: tail_offset\t\t\t%d\n",
2003 		   sq_ctx->smenq_offset, sq_ctx->tail_offset);
2004 	seq_printf(m, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq\t\t\t%d\n",
2005 		   sq_ctx->smq_lso_segnum, sq_ctx->smq_next_sq);
2006 	seq_printf(m, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d\n",
2007 		   sq_ctx->mnq_dis, sq_ctx->lmt_dis);
2008 	seq_printf(m, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n\n",
2009 		   sq_ctx->cq_limit, sq_ctx->max_sqe_size);
2010 
2011 	seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
2012 	seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
2013 	seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
2014 	seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
2015 		   sq_ctx->smenq_next_sqb);
2016 
2017 	seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
2018 
2019 	seq_printf(m, "W9: vfi_lso_vld\t\t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d\n",
2020 		   sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
2021 	seq_printf(m, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d\n",
2022 		   sq_ctx->vfi_lso_vlan0_ins_ena, sq_ctx->vfi_lso_mps);
2023 	seq_printf(m, "W9: vfi_lso_sb\t\t\t%d\nW9: vfi_lso_sizem1\t\t%d\n",
2024 		   sq_ctx->vfi_lso_sb, sq_ctx->vfi_lso_sizem1);
2025 	seq_printf(m, "W9: vfi_lso_total\t\t%d\n\n", sq_ctx->vfi_lso_total);
2026 
2027 	seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
2028 		   (u64)sq_ctx->scm_lso_rem);
2029 	seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
2030 	seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
2031 	seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
2032 		   (u64)sq_ctx->dropped_octs);
2033 	seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
2034 		   (u64)sq_ctx->dropped_pkts);
2035 }
2036 
2037 static void print_nix_cn10k_rq_ctx(struct seq_file *m,
2038 				   struct nix_cn10k_rq_ctx_s *rq_ctx)
2039 {
2040 	seq_printf(m, "W0: ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
2041 		   rq_ctx->ena, rq_ctx->sso_ena);
2042 	seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
2043 		   rq_ctx->ipsech_ena, rq_ctx->ena_wqwd);
2044 	seq_printf(m, "W0: cq \t\t\t\t%d\nW0: lenerr_dis \t\t\t%d\n",
2045 		   rq_ctx->cq, rq_ctx->lenerr_dis);
2046 	seq_printf(m, "W0: csum_il4_dis \t\t%d\nW0: csum_ol4_dis \t\t%d\n",
2047 		   rq_ctx->csum_il4_dis, rq_ctx->csum_ol4_dis);
2048 	seq_printf(m, "W0: len_il4_dis \t\t%d\nW0: len_il3_dis \t\t%d\n",
2049 		   rq_ctx->len_il4_dis, rq_ctx->len_il3_dis);
2050 	seq_printf(m, "W0: len_ol4_dis \t\t%d\nW0: len_ol3_dis \t\t%d\n",
2051 		   rq_ctx->len_ol4_dis, rq_ctx->len_ol3_dis);
2052 	seq_printf(m, "W0: wqe_aura \t\t\t%d\n\n", rq_ctx->wqe_aura);
2053 
2054 	seq_printf(m, "W1: spb_aura \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
2055 		   rq_ctx->spb_aura, rq_ctx->lpb_aura);
2056 	seq_printf(m, "W1: spb_aura \t\t\t%d\n", rq_ctx->spb_aura);
2057 	seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
2058 		   rq_ctx->sso_grp, rq_ctx->sso_tt);
2059 	seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: wqe_caching \t\t%d\n",
2060 		   rq_ctx->pb_caching, rq_ctx->wqe_caching);
2061 	seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
2062 		   rq_ctx->xqe_drop_ena, rq_ctx->spb_drop_ena);
2063 	seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: pb_stashing \t\t%d\n",
2064 		   rq_ctx->lpb_drop_ena, rq_ctx->pb_stashing);
2065 	seq_printf(m, "W1: ipsecd_drop_ena \t\t%d\nW1: chi_ena \t\t\t%d\n\n",
2066 		   rq_ctx->ipsecd_drop_ena, rq_ctx->chi_ena);
2067 
2068 	seq_printf(m, "W2: band_prof_id \t\t%d\n", rq_ctx->band_prof_id);
2069 	seq_printf(m, "W2: policer_ena \t\t%d\n", rq_ctx->policer_ena);
2070 	seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n", rq_ctx->spb_sizem1);
2071 	seq_printf(m, "W2: wqe_skip \t\t\t%d\nW2: sqb_ena \t\t\t%d\n",
2072 		   rq_ctx->wqe_skip, rq_ctx->spb_ena);
2073 	seq_printf(m, "W2: lpb_size1 \t\t\t%d\nW2: first_skip \t\t\t%d\n",
2074 		   rq_ctx->lpb_sizem1, rq_ctx->first_skip);
2075 	seq_printf(m, "W2: later_skip\t\t\t%d\nW2: xqe_imm_size\t\t%d\n",
2076 		   rq_ctx->later_skip, rq_ctx->xqe_imm_size);
2077 	seq_printf(m, "W2: xqe_imm_copy \t\t%d\nW2: xqe_hdr_split \t\t%d\n\n",
2078 		   rq_ctx->xqe_imm_copy, rq_ctx->xqe_hdr_split);
2079 
2080 	seq_printf(m, "W3: xqe_drop \t\t\t%d\nW3: xqe_pass \t\t\t%d\n",
2081 		   rq_ctx->xqe_drop, rq_ctx->xqe_pass);
2082 	seq_printf(m, "W3: wqe_pool_drop \t\t%d\nW3: wqe_pool_pass \t\t%d\n",
2083 		   rq_ctx->wqe_pool_drop, rq_ctx->wqe_pool_pass);
2084 	seq_printf(m, "W3: spb_pool_drop \t\t%d\nW3: spb_pool_pass \t\t%d\n",
2085 		   rq_ctx->spb_pool_drop, rq_ctx->spb_pool_pass);
2086 	seq_printf(m, "W3: spb_aura_drop \t\t%d\nW3: spb_aura_pass \t\t%d\n\n",
2087 		   rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
2088 
2089 	seq_printf(m, "W4: lpb_aura_drop \t\t%d\nW3: lpb_aura_pass \t\t%d\n",
2090 		   rq_ctx->lpb_aura_pass, rq_ctx->lpb_aura_drop);
2091 	seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW3: lpb_pool_pass \t\t%d\n",
2092 		   rq_ctx->lpb_pool_drop, rq_ctx->lpb_pool_pass);
2093 	seq_printf(m, "W4: rq_int \t\t\t%d\nW4: rq_int_ena\t\t\t%d\n",
2094 		   rq_ctx->rq_int, rq_ctx->rq_int_ena);
2095 	seq_printf(m, "W4: qint_idx \t\t\t%d\n\n", rq_ctx->qint_idx);
2096 
2097 	seq_printf(m, "W5: ltag \t\t\t%d\nW5: good_utag \t\t\t%d\n",
2098 		   rq_ctx->ltag, rq_ctx->good_utag);
2099 	seq_printf(m, "W5: bad_utag \t\t\t%d\nW5: flow_tagw \t\t\t%d\n",
2100 		   rq_ctx->bad_utag, rq_ctx->flow_tagw);
2101 	seq_printf(m, "W5: ipsec_vwqe \t\t\t%d\nW5: vwqe_ena \t\t\t%d\n",
2102 		   rq_ctx->ipsec_vwqe, rq_ctx->vwqe_ena);
2103 	seq_printf(m, "W5: vwqe_wait \t\t\t%d\nW5: max_vsize_exp\t\t%d\n",
2104 		   rq_ctx->vwqe_wait, rq_ctx->max_vsize_exp);
2105 	seq_printf(m, "W5: vwqe_skip \t\t\t%d\n\n", rq_ctx->vwqe_skip);
2106 
2107 	seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
2108 	seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
2109 	seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
2110 	seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
2111 	seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
2112 }
2113 
2114 /* Dumps given nix_rq's context */
2115 static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
2116 {
2117 	struct nix_rq_ctx_s *rq_ctx = &rsp->rq;
2118 	struct nix_hw *nix_hw = m->private;
2119 	struct rvu *rvu = nix_hw->rvu;
2120 
2121 	if (!is_rvu_otx2(rvu)) {
2122 		print_nix_cn10k_rq_ctx(m, (struct nix_cn10k_rq_ctx_s *)rq_ctx);
2123 		return;
2124 	}
2125 
2126 	seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
2127 		   rq_ctx->wqe_aura, rq_ctx->substream);
2128 	seq_printf(m, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
2129 		   rq_ctx->cq, rq_ctx->ena_wqwd);
2130 	seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
2131 		   rq_ctx->ipsech_ena, rq_ctx->sso_ena);
2132 	seq_printf(m, "W0: ena \t\t\t%d\n\n", rq_ctx->ena);
2133 
2134 	seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
2135 		   rq_ctx->lpb_drop_ena, rq_ctx->spb_drop_ena);
2136 	seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d\n",
2137 		   rq_ctx->xqe_drop_ena, rq_ctx->wqe_caching);
2138 	seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
2139 		   rq_ctx->pb_caching, rq_ctx->sso_tt);
2140 	seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
2141 		   rq_ctx->sso_grp, rq_ctx->lpb_aura);
2142 	seq_printf(m, "W1: spb_aura \t\t\t%d\n\n", rq_ctx->spb_aura);
2143 
2144 	seq_printf(m, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d\n",
2145 		   rq_ctx->xqe_hdr_split, rq_ctx->xqe_imm_copy);
2146 	seq_printf(m, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d\n",
2147 		   rq_ctx->xqe_imm_size, rq_ctx->later_skip);
2148 	seq_printf(m, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d\n",
2149 		   rq_ctx->first_skip, rq_ctx->lpb_sizem1);
2150 	seq_printf(m, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d\n",
2151 		   rq_ctx->spb_ena, rq_ctx->wqe_skip);
2152 	seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n\n", rq_ctx->spb_sizem1);
2153 
2154 	seq_printf(m, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d\n",
2155 		   rq_ctx->spb_pool_pass, rq_ctx->spb_pool_drop);
2156 	seq_printf(m, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d\n",
2157 		   rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
2158 	seq_printf(m, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d\n",
2159 		   rq_ctx->wqe_pool_pass, rq_ctx->wqe_pool_drop);
2160 	seq_printf(m, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n\n",
2161 		   rq_ctx->xqe_pass, rq_ctx->xqe_drop);
2162 
2163 	seq_printf(m, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d\n",
2164 		   rq_ctx->qint_idx, rq_ctx->rq_int_ena);
2165 	seq_printf(m, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d\n",
2166 		   rq_ctx->rq_int, rq_ctx->lpb_pool_pass);
2167 	seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d\n",
2168 		   rq_ctx->lpb_pool_drop, rq_ctx->lpb_aura_pass);
2169 	seq_printf(m, "W4: lpb_aura_drop \t\t%d\n\n", rq_ctx->lpb_aura_drop);
2170 
2171 	seq_printf(m, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d\n",
2172 		   rq_ctx->flow_tagw, rq_ctx->bad_utag);
2173 	seq_printf(m, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n\n",
2174 		   rq_ctx->good_utag, rq_ctx->ltag);
2175 
2176 	seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
2177 	seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
2178 	seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
2179 	seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
2180 	seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
2181 }
2182 
2183 /* Dumps given nix_cq's context */
2184 static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
2185 {
2186 	struct nix_cq_ctx_s *cq_ctx = &rsp->cq;
2187 	struct nix_hw *nix_hw = m->private;
2188 	struct rvu *rvu = nix_hw->rvu;
2189 
2190 	seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base);
2191 
2192 	seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr);
2193 	seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n",
2194 		   cq_ctx->avg_con, cq_ctx->cint_idx);
2195 	seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n",
2196 		   cq_ctx->cq_err, cq_ctx->qint_idx);
2197 	seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n",
2198 		   cq_ctx->bpid, cq_ctx->bp_ena);
2199 
2200 	if (!is_rvu_otx2(rvu)) {
2201 		seq_printf(m, "W1: lbpid_high \t\t\t0x%03x\n", cq_ctx->lbpid_high);
2202 		seq_printf(m, "W1: lbpid_med \t\t\t0x%03x\n", cq_ctx->lbpid_med);
2203 		seq_printf(m, "W1: lbpid_low \t\t\t0x%03x\n", cq_ctx->lbpid_low);
2204 		seq_printf(m, "(W1: lbpid) \t\t\t0x%03x\n",
2205 			   cq_ctx->lbpid_high << 6 | cq_ctx->lbpid_med << 3 |
2206 			   cq_ctx->lbpid_low);
2207 		seq_printf(m, "W1: lbp_ena \t\t\t\t%d\n\n", cq_ctx->lbp_ena);
2208 	}
2209 
2210 	seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n",
2211 		   cq_ctx->update_time, cq_ctx->avg_level);
2212 	seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n",
2213 		   cq_ctx->head, cq_ctx->tail);
2214 
2215 	seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n",
2216 		   cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int);
2217 	seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n",
2218 		   cq_ctx->qsize, cq_ctx->caching);
2219 	seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n",
2220 		   cq_ctx->substream, cq_ctx->ena);
2221 	if (!is_rvu_otx2(rvu)) {
2222 		seq_printf(m, "W3: lbp_frac \t\t\t%d\n", cq_ctx->lbp_frac);
2223 		seq_printf(m, "W3: cpt_drop_err_en \t\t\t%d\n",
2224 			   cq_ctx->cpt_drop_err_en);
2225 	}
2226 	seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n",
2227 		   cq_ctx->drop_ena, cq_ctx->drop);
2228 	seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp);
2229 }
2230 
2231 static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
2232 					 void *unused, int ctype)
2233 {
2234 	void (*print_nix_ctx)(struct seq_file *filp,
2235 			      struct nix_aq_enq_rsp *rsp) = NULL;
2236 	struct nix_hw *nix_hw = filp->private;
2237 	struct rvu *rvu = nix_hw->rvu;
2238 	struct nix_aq_enq_req aq_req;
2239 	struct nix_aq_enq_rsp rsp;
2240 	char *ctype_string = NULL;
2241 	int qidx, rc, max_id = 0;
2242 	struct rvu_pfvf *pfvf;
2243 	int nixlf, id, all;
2244 	u16 pcifunc;
2245 
2246 	switch (ctype) {
2247 	case NIX_AQ_CTYPE_CQ:
2248 		nixlf = rvu->rvu_dbg.nix_cq_ctx.lf;
2249 		id = rvu->rvu_dbg.nix_cq_ctx.id;
2250 		all = rvu->rvu_dbg.nix_cq_ctx.all;
2251 		break;
2252 
2253 	case NIX_AQ_CTYPE_SQ:
2254 		nixlf = rvu->rvu_dbg.nix_sq_ctx.lf;
2255 		id = rvu->rvu_dbg.nix_sq_ctx.id;
2256 		all = rvu->rvu_dbg.nix_sq_ctx.all;
2257 		break;
2258 
2259 	case NIX_AQ_CTYPE_RQ:
2260 		nixlf = rvu->rvu_dbg.nix_rq_ctx.lf;
2261 		id = rvu->rvu_dbg.nix_rq_ctx.id;
2262 		all = rvu->rvu_dbg.nix_rq_ctx.all;
2263 		break;
2264 
2265 	default:
2266 		return -EINVAL;
2267 	}
2268 
2269 	if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
2270 		return -EINVAL;
2271 
2272 	pfvf = rvu_get_pfvf(rvu, pcifunc);
2273 	if (ctype == NIX_AQ_CTYPE_SQ && !pfvf->sq_ctx) {
2274 		seq_puts(filp, "SQ context is not initialized\n");
2275 		return -EINVAL;
2276 	} else if (ctype == NIX_AQ_CTYPE_RQ && !pfvf->rq_ctx) {
2277 		seq_puts(filp, "RQ context is not initialized\n");
2278 		return -EINVAL;
2279 	} else if (ctype == NIX_AQ_CTYPE_CQ && !pfvf->cq_ctx) {
2280 		seq_puts(filp, "CQ context is not initialized\n");
2281 		return -EINVAL;
2282 	}
2283 
2284 	if (ctype == NIX_AQ_CTYPE_SQ) {
2285 		max_id = pfvf->sq_ctx->qsize;
2286 		ctype_string = "sq";
2287 		print_nix_ctx = print_nix_sq_ctx;
2288 	} else if (ctype == NIX_AQ_CTYPE_RQ) {
2289 		max_id = pfvf->rq_ctx->qsize;
2290 		ctype_string = "rq";
2291 		print_nix_ctx = print_nix_rq_ctx;
2292 	} else if (ctype == NIX_AQ_CTYPE_CQ) {
2293 		max_id = pfvf->cq_ctx->qsize;
2294 		ctype_string = "cq";
2295 		print_nix_ctx = print_nix_cq_ctx;
2296 	}
2297 
2298 	memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
2299 	aq_req.hdr.pcifunc = pcifunc;
2300 	aq_req.ctype = ctype;
2301 	aq_req.op = NIX_AQ_INSTOP_READ;
2302 	if (all)
2303 		id = 0;
2304 	else
2305 		max_id = id + 1;
2306 	for (qidx = id; qidx < max_id; qidx++) {
2307 		aq_req.qidx = qidx;
2308 		seq_printf(filp, "=====%s_ctx for nixlf:%d and qidx:%d is=====\n",
2309 			   ctype_string, nixlf, aq_req.qidx);
2310 		rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
2311 		if (rc) {
2312 			seq_puts(filp, "Failed to read the context\n");
2313 			return -EINVAL;
2314 		}
2315 		print_nix_ctx(filp, &rsp);
2316 	}
2317 	return 0;
2318 }
2319 
2320 static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf,
2321 			       int id, int ctype, char *ctype_string,
2322 			       struct seq_file *m)
2323 {
2324 	struct nix_hw *nix_hw = m->private;
2325 	struct rvu_pfvf *pfvf;
2326 	int max_id = 0;
2327 	u16 pcifunc;
2328 
2329 	if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
2330 		return -EINVAL;
2331 
2332 	pfvf = rvu_get_pfvf(rvu, pcifunc);
2333 
2334 	if (ctype == NIX_AQ_CTYPE_SQ) {
2335 		if (!pfvf->sq_ctx) {
2336 			dev_warn(rvu->dev, "SQ context is not initialized\n");
2337 			return -EINVAL;
2338 		}
2339 		max_id = pfvf->sq_ctx->qsize;
2340 	} else if (ctype == NIX_AQ_CTYPE_RQ) {
2341 		if (!pfvf->rq_ctx) {
2342 			dev_warn(rvu->dev, "RQ context is not initialized\n");
2343 			return -EINVAL;
2344 		}
2345 		max_id = pfvf->rq_ctx->qsize;
2346 	} else if (ctype == NIX_AQ_CTYPE_CQ) {
2347 		if (!pfvf->cq_ctx) {
2348 			dev_warn(rvu->dev, "CQ context is not initialized\n");
2349 			return -EINVAL;
2350 		}
2351 		max_id = pfvf->cq_ctx->qsize;
2352 	}
2353 
2354 	if (id < 0 || id >= max_id) {
2355 		dev_warn(rvu->dev, "Invalid %s_ctx valid range 0-%d\n",
2356 			 ctype_string, max_id - 1);
2357 		return -EINVAL;
2358 	}
2359 	switch (ctype) {
2360 	case NIX_AQ_CTYPE_CQ:
2361 		rvu->rvu_dbg.nix_cq_ctx.lf = nixlf;
2362 		rvu->rvu_dbg.nix_cq_ctx.id = id;
2363 		rvu->rvu_dbg.nix_cq_ctx.all = all;
2364 		break;
2365 
2366 	case NIX_AQ_CTYPE_SQ:
2367 		rvu->rvu_dbg.nix_sq_ctx.lf = nixlf;
2368 		rvu->rvu_dbg.nix_sq_ctx.id = id;
2369 		rvu->rvu_dbg.nix_sq_ctx.all = all;
2370 		break;
2371 
2372 	case NIX_AQ_CTYPE_RQ:
2373 		rvu->rvu_dbg.nix_rq_ctx.lf = nixlf;
2374 		rvu->rvu_dbg.nix_rq_ctx.id = id;
2375 		rvu->rvu_dbg.nix_rq_ctx.all = all;
2376 		break;
2377 	default:
2378 		return -EINVAL;
2379 	}
2380 	return 0;
2381 }
2382 
2383 static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
2384 					   const char __user *buffer,
2385 					   size_t count, loff_t *ppos,
2386 					   int ctype)
2387 {
2388 	struct seq_file *m = filp->private_data;
2389 	struct nix_hw *nix_hw = m->private;
2390 	struct rvu *rvu = nix_hw->rvu;
2391 	char *cmd_buf, *ctype_string;
2392 	int nixlf, id = 0, ret;
2393 	bool all = false;
2394 
2395 	if ((*ppos != 0) || !count)
2396 		return -EINVAL;
2397 
2398 	switch (ctype) {
2399 	case NIX_AQ_CTYPE_SQ:
2400 		ctype_string = "sq";
2401 		break;
2402 	case NIX_AQ_CTYPE_RQ:
2403 		ctype_string = "rq";
2404 		break;
2405 	case NIX_AQ_CTYPE_CQ:
2406 		ctype_string = "cq";
2407 		break;
2408 	default:
2409 		return -EINVAL;
2410 	}
2411 
2412 	cmd_buf = kzalloc(count + 1, GFP_KERNEL);
2413 
2414 	if (!cmd_buf)
2415 		return count;
2416 
2417 	ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
2418 				   &nixlf, &id, &all);
2419 	if (ret < 0) {
2420 		dev_info(rvu->dev,
2421 			 "Usage: echo <nixlf> [%s number/all] > %s_ctx\n",
2422 			 ctype_string, ctype_string);
2423 		goto done;
2424 	} else {
2425 		ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype,
2426 					  ctype_string, m);
2427 	}
2428 done:
2429 	kfree(cmd_buf);
2430 	return ret ? ret : count;
2431 }
2432 
2433 static ssize_t rvu_dbg_nix_sq_ctx_write(struct file *filp,
2434 					const char __user *buffer,
2435 					size_t count, loff_t *ppos)
2436 {
2437 	return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
2438 					    NIX_AQ_CTYPE_SQ);
2439 }
2440 
2441 static int rvu_dbg_nix_sq_ctx_display(struct seq_file *filp, void *unused)
2442 {
2443 	return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_SQ);
2444 }
2445 
2446 RVU_DEBUG_SEQ_FOPS(nix_sq_ctx, nix_sq_ctx_display, nix_sq_ctx_write);
2447 
2448 static ssize_t rvu_dbg_nix_rq_ctx_write(struct file *filp,
2449 					const char __user *buffer,
2450 					size_t count, loff_t *ppos)
2451 {
2452 	return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
2453 					    NIX_AQ_CTYPE_RQ);
2454 }
2455 
2456 static int rvu_dbg_nix_rq_ctx_display(struct seq_file *filp, void  *unused)
2457 {
2458 	return rvu_dbg_nix_queue_ctx_display(filp, unused,  NIX_AQ_CTYPE_RQ);
2459 }
2460 
2461 RVU_DEBUG_SEQ_FOPS(nix_rq_ctx, nix_rq_ctx_display, nix_rq_ctx_write);
2462 
2463 static ssize_t rvu_dbg_nix_cq_ctx_write(struct file *filp,
2464 					const char __user *buffer,
2465 					size_t count, loff_t *ppos)
2466 {
2467 	return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
2468 					    NIX_AQ_CTYPE_CQ);
2469 }
2470 
2471 static int rvu_dbg_nix_cq_ctx_display(struct seq_file *filp, void *unused)
2472 {
2473 	return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_CQ);
2474 }
2475 
2476 RVU_DEBUG_SEQ_FOPS(nix_cq_ctx, nix_cq_ctx_display, nix_cq_ctx_write);
2477 
2478 static void print_nix_qctx_qsize(struct seq_file *filp, int qsize,
2479 				 unsigned long *bmap, char *qtype)
2480 {
2481 	char *buf;
2482 
2483 	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
2484 	if (!buf)
2485 		return;
2486 
2487 	bitmap_print_to_pagebuf(false, buf, bmap, qsize);
2488 	seq_printf(filp, "%s context count : %d\n", qtype, qsize);
2489 	seq_printf(filp, "%s context ena/dis bitmap : %s\n",
2490 		   qtype, buf);
2491 	kfree(buf);
2492 }
2493 
2494 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf)
2495 {
2496 	if (!pfvf->cq_ctx)
2497 		seq_puts(filp, "cq context is not initialized\n");
2498 	else
2499 		print_nix_qctx_qsize(filp, pfvf->cq_ctx->qsize, pfvf->cq_bmap,
2500 				     "cq");
2501 
2502 	if (!pfvf->rq_ctx)
2503 		seq_puts(filp, "rq context is not initialized\n");
2504 	else
2505 		print_nix_qctx_qsize(filp, pfvf->rq_ctx->qsize, pfvf->rq_bmap,
2506 				     "rq");
2507 
2508 	if (!pfvf->sq_ctx)
2509 		seq_puts(filp, "sq context is not initialized\n");
2510 	else
2511 		print_nix_qctx_qsize(filp, pfvf->sq_ctx->qsize, pfvf->sq_bmap,
2512 				     "sq");
2513 }
2514 
2515 static ssize_t rvu_dbg_nix_qsize_write(struct file *filp,
2516 				       const char __user *buffer,
2517 				       size_t count, loff_t *ppos)
2518 {
2519 	return rvu_dbg_qsize_write(filp, buffer, count, ppos,
2520 				   BLKTYPE_NIX);
2521 }
2522 
2523 static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
2524 {
2525 	return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NIX);
2526 }
2527 
2528 RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
2529 
2530 static void print_band_prof_ctx(struct seq_file *m,
2531 				struct nix_bandprof_s *prof)
2532 {
2533 	char *str;
2534 
2535 	switch (prof->pc_mode) {
2536 	case NIX_RX_PC_MODE_VLAN:
2537 		str = "VLAN";
2538 		break;
2539 	case NIX_RX_PC_MODE_DSCP:
2540 		str = "DSCP";
2541 		break;
2542 	case NIX_RX_PC_MODE_GEN:
2543 		str = "Generic";
2544 		break;
2545 	case NIX_RX_PC_MODE_RSVD:
2546 		str = "Reserved";
2547 		break;
2548 	}
2549 	seq_printf(m, "W0: pc_mode\t\t%s\n", str);
2550 	str = (prof->icolor == 3) ? "Color blind" :
2551 		(prof->icolor == 0) ? "Green" :
2552 		(prof->icolor == 1) ? "Yellow" : "Red";
2553 	seq_printf(m, "W0: icolor\t\t%s\n", str);
2554 	seq_printf(m, "W0: tnl_ena\t\t%d\n", prof->tnl_ena);
2555 	seq_printf(m, "W0: peir_exponent\t%d\n", prof->peir_exponent);
2556 	seq_printf(m, "W0: pebs_exponent\t%d\n", prof->pebs_exponent);
2557 	seq_printf(m, "W0: cir_exponent\t%d\n", prof->cir_exponent);
2558 	seq_printf(m, "W0: cbs_exponent\t%d\n", prof->cbs_exponent);
2559 	seq_printf(m, "W0: peir_mantissa\t%d\n", prof->peir_mantissa);
2560 	seq_printf(m, "W0: pebs_mantissa\t%d\n", prof->pebs_mantissa);
2561 	seq_printf(m, "W0: cir_mantissa\t%d\n", prof->cir_mantissa);
2562 
2563 	seq_printf(m, "W1: cbs_mantissa\t%d\n", prof->cbs_mantissa);
2564 	str = (prof->lmode == 0) ? "byte" : "packet";
2565 	seq_printf(m, "W1: lmode\t\t%s\n", str);
2566 	seq_printf(m, "W1: l_select\t\t%d\n", prof->l_sellect);
2567 	seq_printf(m, "W1: rdiv\t\t%d\n", prof->rdiv);
2568 	seq_printf(m, "W1: adjust_exponent\t%d\n", prof->adjust_exponent);
2569 	seq_printf(m, "W1: adjust_mantissa\t%d\n", prof->adjust_mantissa);
2570 	str = (prof->gc_action == 0) ? "PASS" :
2571 		(prof->gc_action == 1) ? "DROP" : "RED";
2572 	seq_printf(m, "W1: gc_action\t\t%s\n", str);
2573 	str = (prof->yc_action == 0) ? "PASS" :
2574 		(prof->yc_action == 1) ? "DROP" : "RED";
2575 	seq_printf(m, "W1: yc_action\t\t%s\n", str);
2576 	str = (prof->rc_action == 0) ? "PASS" :
2577 		(prof->rc_action == 1) ? "DROP" : "RED";
2578 	seq_printf(m, "W1: rc_action\t\t%s\n", str);
2579 	seq_printf(m, "W1: meter_algo\t\t%d\n", prof->meter_algo);
2580 	seq_printf(m, "W1: band_prof_id\t%d\n", prof->band_prof_id);
2581 	seq_printf(m, "W1: hl_en\t\t%d\n", prof->hl_en);
2582 
2583 	seq_printf(m, "W2: ts\t\t\t%lld\n", (u64)prof->ts);
2584 	seq_printf(m, "W3: pe_accum\t\t%d\n", prof->pe_accum);
2585 	seq_printf(m, "W3: c_accum\t\t%d\n", prof->c_accum);
2586 	seq_printf(m, "W4: green_pkt_pass\t%lld\n",
2587 		   (u64)prof->green_pkt_pass);
2588 	seq_printf(m, "W5: yellow_pkt_pass\t%lld\n",
2589 		   (u64)prof->yellow_pkt_pass);
2590 	seq_printf(m, "W6: red_pkt_pass\t%lld\n", (u64)prof->red_pkt_pass);
2591 	seq_printf(m, "W7: green_octs_pass\t%lld\n",
2592 		   (u64)prof->green_octs_pass);
2593 	seq_printf(m, "W8: yellow_octs_pass\t%lld\n",
2594 		   (u64)prof->yellow_octs_pass);
2595 	seq_printf(m, "W9: red_octs_pass\t%lld\n", (u64)prof->red_octs_pass);
2596 	seq_printf(m, "W10: green_pkt_drop\t%lld\n",
2597 		   (u64)prof->green_pkt_drop);
2598 	seq_printf(m, "W11: yellow_pkt_drop\t%lld\n",
2599 		   (u64)prof->yellow_pkt_drop);
2600 	seq_printf(m, "W12: red_pkt_drop\t%lld\n", (u64)prof->red_pkt_drop);
2601 	seq_printf(m, "W13: green_octs_drop\t%lld\n",
2602 		   (u64)prof->green_octs_drop);
2603 	seq_printf(m, "W14: yellow_octs_drop\t%lld\n",
2604 		   (u64)prof->yellow_octs_drop);
2605 	seq_printf(m, "W15: red_octs_drop\t%lld\n", (u64)prof->red_octs_drop);
2606 	seq_puts(m, "==============================\n");
2607 }
2608 
2609 static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused)
2610 {
2611 	struct nix_hw *nix_hw = m->private;
2612 	struct nix_cn10k_aq_enq_req aq_req;
2613 	struct nix_cn10k_aq_enq_rsp aq_rsp;
2614 	struct rvu *rvu = nix_hw->rvu;
2615 	struct nix_ipolicer *ipolicer;
2616 	int layer, prof_idx, idx, rc;
2617 	u16 pcifunc;
2618 	char *str;
2619 
2620 	/* Ingress policers do not exist on all platforms */
2621 	if (!nix_hw->ipolicer)
2622 		return 0;
2623 
2624 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
2625 		if (layer == BAND_PROF_INVAL_LAYER)
2626 			continue;
2627 		str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
2628 			(layer == BAND_PROF_MID_LAYER) ? "Mid" : "Top";
2629 
2630 		seq_printf(m, "\n%s bandwidth profiles\n", str);
2631 		seq_puts(m, "=======================\n");
2632 
2633 		ipolicer = &nix_hw->ipolicer[layer];
2634 
2635 		for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
2636 			if (is_rsrc_free(&ipolicer->band_prof, idx))
2637 				continue;
2638 
2639 			prof_idx = (idx & 0x3FFF) | (layer << 14);
2640 			rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
2641 						 0x00, NIX_AQ_CTYPE_BANDPROF,
2642 						 prof_idx);
2643 			if (rc) {
2644 				dev_err(rvu->dev,
2645 					"%s: Failed to fetch context of %s profile %d, err %d\n",
2646 					__func__, str, idx, rc);
2647 				return 0;
2648 			}
2649 			seq_printf(m, "\n%s bandwidth profile:: %d\n", str, idx);
2650 			pcifunc = ipolicer->pfvf_map[idx];
2651 			if (!(pcifunc & RVU_PFVF_FUNC_MASK))
2652 				seq_printf(m, "Allocated to :: PF %d\n",
2653 					   rvu_get_pf(pcifunc));
2654 			else
2655 				seq_printf(m, "Allocated to :: PF %d VF %d\n",
2656 					   rvu_get_pf(pcifunc),
2657 					   (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
2658 			print_band_prof_ctx(m, &aq_rsp.prof);
2659 		}
2660 	}
2661 	return 0;
2662 }
2663 
2664 RVU_DEBUG_SEQ_FOPS(nix_band_prof_ctx, nix_band_prof_ctx_display, NULL);
2665 
2666 static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused)
2667 {
2668 	struct nix_hw *nix_hw = m->private;
2669 	struct nix_ipolicer *ipolicer;
2670 	int layer;
2671 	char *str;
2672 
2673 	/* Ingress policers do not exist on all platforms */
2674 	if (!nix_hw->ipolicer)
2675 		return 0;
2676 
2677 	seq_puts(m, "\nBandwidth profile resource free count\n");
2678 	seq_puts(m, "=====================================\n");
2679 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
2680 		if (layer == BAND_PROF_INVAL_LAYER)
2681 			continue;
2682 		str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
2683 			(layer == BAND_PROF_MID_LAYER) ? "Mid " : "Top ";
2684 
2685 		ipolicer = &nix_hw->ipolicer[layer];
2686 		seq_printf(m, "%s :: Max: %4d  Free: %4d\n", str,
2687 			   ipolicer->band_prof.max,
2688 			   rvu_rsrc_free_count(&ipolicer->band_prof));
2689 	}
2690 	seq_puts(m, "=====================================\n");
2691 
2692 	return 0;
2693 }
2694 
2695 RVU_DEBUG_SEQ_FOPS(nix_band_prof_rsrc, nix_band_prof_rsrc_display, NULL);
2696 
2697 static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
2698 {
2699 	struct nix_hw *nix_hw;
2700 
2701 	if (!is_block_implemented(rvu->hw, blkaddr))
2702 		return;
2703 
2704 	if (blkaddr == BLKADDR_NIX0) {
2705 		rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root);
2706 		nix_hw = &rvu->hw->nix[0];
2707 	} else {
2708 		rvu->rvu_dbg.nix = debugfs_create_dir("nix1",
2709 						      rvu->rvu_dbg.root);
2710 		nix_hw = &rvu->hw->nix[1];
2711 	}
2712 
2713 	debugfs_create_file("tm_tree", 0600, rvu->rvu_dbg.nix, nix_hw,
2714 			    &rvu_dbg_nix_tm_tree_fops);
2715 	debugfs_create_file("tm_topo", 0600, rvu->rvu_dbg.nix, nix_hw,
2716 			    &rvu_dbg_nix_tm_topo_fops);
2717 	debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2718 			    &rvu_dbg_nix_sq_ctx_fops);
2719 	debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2720 			    &rvu_dbg_nix_rq_ctx_fops);
2721 	debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2722 			    &rvu_dbg_nix_cq_ctx_fops);
2723 	debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
2724 			    &rvu_dbg_nix_ndc_tx_cache_fops);
2725 	debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
2726 			    &rvu_dbg_nix_ndc_rx_cache_fops);
2727 	debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
2728 			    &rvu_dbg_nix_ndc_tx_hits_miss_fops);
2729 	debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
2730 			    &rvu_dbg_nix_ndc_rx_hits_miss_fops);
2731 	debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
2732 			    &rvu_dbg_nix_qsize_fops);
2733 	debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2734 			    &rvu_dbg_nix_band_prof_ctx_fops);
2735 	debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw,
2736 			    &rvu_dbg_nix_band_prof_rsrc_fops);
2737 }
2738 
2739 static void rvu_dbg_npa_init(struct rvu *rvu)
2740 {
2741 	rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root);
2742 
2743 	debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu,
2744 			    &rvu_dbg_npa_qsize_fops);
2745 	debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu,
2746 			    &rvu_dbg_npa_aura_ctx_fops);
2747 	debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
2748 			    &rvu_dbg_npa_pool_ctx_fops);
2749 	debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu,
2750 			    &rvu_dbg_npa_ndc_cache_fops);
2751 	debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa, rvu,
2752 			    &rvu_dbg_npa_ndc_hits_miss_fops);
2753 }
2754 
2755 #define PRINT_CGX_CUML_NIXRX_STATUS(idx, name)				\
2756 	({								\
2757 		u64 cnt;						\
2758 		err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx),	\
2759 					     NIX_STATS_RX, &(cnt));	\
2760 		if (!err)						\
2761 			seq_printf(s, "%s: %llu\n", name, cnt);		\
2762 		cnt;							\
2763 	})
2764 
2765 #define PRINT_CGX_CUML_NIXTX_STATUS(idx, name)			\
2766 	({								\
2767 		u64 cnt;						\
2768 		err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx),	\
2769 					  NIX_STATS_TX, &(cnt));	\
2770 		if (!err)						\
2771 			seq_printf(s, "%s: %llu\n", name, cnt);		\
2772 		cnt;							\
2773 	})
2774 
2775 static int cgx_print_stats(struct seq_file *s, int lmac_id)
2776 {
2777 	struct cgx_link_user_info linfo;
2778 	struct mac_ops *mac_ops;
2779 	void *cgxd = s->private;
2780 	u64 ucast, mcast, bcast;
2781 	int stat = 0, err = 0;
2782 	u64 tx_stat, rx_stat;
2783 	struct rvu *rvu;
2784 
2785 	rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
2786 					     PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
2787 	if (!rvu)
2788 		return -ENODEV;
2789 
2790 	mac_ops = get_mac_ops(cgxd);
2791 	/* There can be no CGX devices at all */
2792 	if (!mac_ops)
2793 		return 0;
2794 
2795 	/* Link status */
2796 	seq_puts(s, "\n=======Link Status======\n\n");
2797 	err = cgx_get_link_info(cgxd, lmac_id, &linfo);
2798 	if (err)
2799 		seq_puts(s, "Failed to read link status\n");
2800 	seq_printf(s, "\nLink is %s %d Mbps\n\n",
2801 		   linfo.link_up ? "UP" : "DOWN", linfo.speed);
2802 
2803 	/* Rx stats */
2804 	seq_printf(s, "\n=======NIX RX_STATS(%s port level)======\n\n",
2805 		   mac_ops->name);
2806 	ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames");
2807 	if (err)
2808 		return err;
2809 	mcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_MCAST, "rx_mcast_frames");
2810 	if (err)
2811 		return err;
2812 	bcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_BCAST, "rx_bcast_frames");
2813 	if (err)
2814 		return err;
2815 	seq_printf(s, "rx_frames: %llu\n", ucast + mcast + bcast);
2816 	PRINT_CGX_CUML_NIXRX_STATUS(RX_OCTS, "rx_bytes");
2817 	if (err)
2818 		return err;
2819 	PRINT_CGX_CUML_NIXRX_STATUS(RX_DROP, "rx_drops");
2820 	if (err)
2821 		return err;
2822 	PRINT_CGX_CUML_NIXRX_STATUS(RX_ERR, "rx_errors");
2823 	if (err)
2824 		return err;
2825 
2826 	/* Tx stats */
2827 	seq_printf(s, "\n=======NIX TX_STATS(%s port level)======\n\n",
2828 		   mac_ops->name);
2829 	ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames");
2830 	if (err)
2831 		return err;
2832 	mcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_MCAST, "tx_mcast_frames");
2833 	if (err)
2834 		return err;
2835 	bcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_BCAST, "tx_bcast_frames");
2836 	if (err)
2837 		return err;
2838 	seq_printf(s, "tx_frames: %llu\n", ucast + mcast + bcast);
2839 	PRINT_CGX_CUML_NIXTX_STATUS(TX_OCTS, "tx_bytes");
2840 	if (err)
2841 		return err;
2842 	PRINT_CGX_CUML_NIXTX_STATUS(TX_DROP, "tx_drops");
2843 	if (err)
2844 		return err;
2845 
2846 	/* Rx stats */
2847 	seq_printf(s, "\n=======%s RX_STATS======\n\n", mac_ops->name);
2848 	while (stat < mac_ops->rx_stats_cnt) {
2849 		err = mac_ops->mac_get_rx_stats(cgxd, lmac_id, stat, &rx_stat);
2850 		if (err)
2851 			return err;
2852 		if (is_rvu_otx2(rvu))
2853 			seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat],
2854 				   rx_stat);
2855 		else
2856 			seq_printf(s, "%s: %llu\n", rpm_rx_stats_fields[stat],
2857 				   rx_stat);
2858 		stat++;
2859 	}
2860 
2861 	/* Tx stats */
2862 	stat = 0;
2863 	seq_printf(s, "\n=======%s TX_STATS======\n\n", mac_ops->name);
2864 	while (stat < mac_ops->tx_stats_cnt) {
2865 		err = mac_ops->mac_get_tx_stats(cgxd, lmac_id, stat, &tx_stat);
2866 		if (err)
2867 			return err;
2868 
2869 		if (is_rvu_otx2(rvu))
2870 			seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat],
2871 				   tx_stat);
2872 		else
2873 			seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat],
2874 				   tx_stat);
2875 		stat++;
2876 	}
2877 
2878 	return err;
2879 }
2880 
2881 static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id)
2882 {
2883 	struct dentry *current_dir;
2884 	char *buf;
2885 
2886 	current_dir = filp->file->f_path.dentry->d_parent;
2887 	buf = strrchr(current_dir->d_name.name, 'c');
2888 	if (!buf)
2889 		return -EINVAL;
2890 
2891 	return kstrtoint(buf + 1, 10, lmac_id);
2892 }
2893 
2894 static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
2895 {
2896 	int lmac_id, err;
2897 
2898 	err = rvu_dbg_derive_lmacid(filp, &lmac_id);
2899 	if (!err)
2900 		return cgx_print_stats(filp, lmac_id);
2901 
2902 	return err;
2903 }
2904 
2905 RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
2906 
2907 static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id)
2908 {
2909 	struct pci_dev *pdev = NULL;
2910 	void *cgxd = s->private;
2911 	char *bcast, *mcast;
2912 	u16 index, domain;
2913 	u8 dmac[ETH_ALEN];
2914 	struct rvu *rvu;
2915 	u64 cfg, mac;
2916 	int pf;
2917 
2918 	rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
2919 					     PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
2920 	if (!rvu)
2921 		return -ENODEV;
2922 
2923 	pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
2924 	domain = 2;
2925 
2926 	pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
2927 	if (!pdev)
2928 		return 0;
2929 
2930 	cfg = cgx_read_dmac_ctrl(cgxd, lmac_id);
2931 	bcast = cfg & CGX_DMAC_BCAST_MODE ? "ACCEPT" : "REJECT";
2932 	mcast = cfg & CGX_DMAC_MCAST_MODE ? "ACCEPT" : "REJECT";
2933 
2934 	seq_puts(s,
2935 		 "PCI dev       RVUPF   BROADCAST  MULTICAST  FILTER-MODE\n");
2936 	seq_printf(s, "%s  PF%d  %9s  %9s",
2937 		   dev_name(&pdev->dev), pf, bcast, mcast);
2938 	if (cfg & CGX_DMAC_CAM_ACCEPT)
2939 		seq_printf(s, "%12s\n\n", "UNICAST");
2940 	else
2941 		seq_printf(s, "%16s\n\n", "PROMISCUOUS");
2942 
2943 	seq_puts(s, "\nDMAC-INDEX  ADDRESS\n");
2944 
2945 	for (index = 0 ; index < 32 ; index++) {
2946 		cfg = cgx_read_dmac_entry(cgxd, index);
2947 		/* Display enabled dmac entries associated with current lmac */
2948 		if (lmac_id == FIELD_GET(CGX_DMAC_CAM_ENTRY_LMACID, cfg) &&
2949 		    FIELD_GET(CGX_DMAC_CAM_ADDR_ENABLE, cfg)) {
2950 			mac = FIELD_GET(CGX_RX_DMAC_ADR_MASK, cfg);
2951 			u64_to_ether_addr(mac, dmac);
2952 			seq_printf(s, "%7d     %pM\n", index, dmac);
2953 		}
2954 	}
2955 
2956 	pci_dev_put(pdev);
2957 	return 0;
2958 }
2959 
2960 static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused)
2961 {
2962 	int err, lmac_id;
2963 
2964 	err = rvu_dbg_derive_lmacid(filp, &lmac_id);
2965 	if (!err)
2966 		return cgx_print_dmac_flt(filp, lmac_id);
2967 
2968 	return err;
2969 }
2970 
2971 RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL);
2972 
2973 static void rvu_dbg_cgx_init(struct rvu *rvu)
2974 {
2975 	struct mac_ops *mac_ops;
2976 	unsigned long lmac_bmap;
2977 	int i, lmac_id;
2978 	char dname[20];
2979 	void *cgx;
2980 
2981 	if (!cgx_get_cgxcnt_max())
2982 		return;
2983 
2984 	mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
2985 	if (!mac_ops)
2986 		return;
2987 
2988 	rvu->rvu_dbg.cgx_root = debugfs_create_dir(mac_ops->name,
2989 						   rvu->rvu_dbg.root);
2990 
2991 	for (i = 0; i < cgx_get_cgxcnt_max(); i++) {
2992 		cgx = rvu_cgx_pdata(i, rvu);
2993 		if (!cgx)
2994 			continue;
2995 		lmac_bmap = cgx_get_lmac_bmap(cgx);
2996 		/* cgx debugfs dir */
2997 		sprintf(dname, "%s%d", mac_ops->name, i);
2998 		rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
2999 						      rvu->rvu_dbg.cgx_root);
3000 
3001 		for_each_set_bit(lmac_id, &lmac_bmap, rvu->hw->lmac_per_cgx) {
3002 			/* lmac debugfs dir */
3003 			sprintf(dname, "lmac%d", lmac_id);
3004 			rvu->rvu_dbg.lmac =
3005 				debugfs_create_dir(dname, rvu->rvu_dbg.cgx);
3006 
3007 			debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac,
3008 					    cgx, &rvu_dbg_cgx_stat_fops);
3009 			debugfs_create_file("mac_filter", 0600,
3010 					    rvu->rvu_dbg.lmac, cgx,
3011 					    &rvu_dbg_cgx_dmac_flt_fops);
3012 		}
3013 	}
3014 }
3015 
3016 /* NPC debugfs APIs */
3017 static void rvu_print_npc_mcam_info(struct seq_file *s,
3018 				    u16 pcifunc, int blkaddr)
3019 {
3020 	struct rvu *rvu = s->private;
3021 	int entry_acnt, entry_ecnt;
3022 	int cntr_acnt, cntr_ecnt;
3023 
3024 	rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr,
3025 					  &entry_acnt, &entry_ecnt);
3026 	rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr,
3027 					    &cntr_acnt, &cntr_ecnt);
3028 	if (!entry_acnt && !cntr_acnt)
3029 		return;
3030 
3031 	if (!(pcifunc & RVU_PFVF_FUNC_MASK))
3032 		seq_printf(s, "\n\t\t Device \t\t: PF%d\n",
3033 			   rvu_get_pf(pcifunc));
3034 	else
3035 		seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n",
3036 			   rvu_get_pf(pcifunc),
3037 			   (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
3038 
3039 	if (entry_acnt) {
3040 		seq_printf(s, "\t\t Entries allocated \t: %d\n", entry_acnt);
3041 		seq_printf(s, "\t\t Entries enabled \t: %d\n", entry_ecnt);
3042 	}
3043 	if (cntr_acnt) {
3044 		seq_printf(s, "\t\t Counters allocated \t: %d\n", cntr_acnt);
3045 		seq_printf(s, "\t\t Counters enabled \t: %d\n", cntr_ecnt);
3046 	}
3047 }
3048 
3049 static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
3050 {
3051 	struct rvu *rvu = filp->private;
3052 	int pf, vf, numvfs, blkaddr;
3053 	struct npc_mcam *mcam;
3054 	u16 pcifunc, counters;
3055 	u64 cfg;
3056 
3057 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
3058 	if (blkaddr < 0)
3059 		return -ENODEV;
3060 
3061 	mcam = &rvu->hw->mcam;
3062 	counters = rvu->hw->npc_counters;
3063 
3064 	seq_puts(filp, "\nNPC MCAM info:\n");
3065 	/* MCAM keywidth on receive and transmit sides */
3066 	cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
3067 	cfg = (cfg >> 32) & 0x07;
3068 	seq_printf(filp, "\t\t RX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
3069 		   "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
3070 		   "224bits" : "448bits"));
3071 	cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX));
3072 	cfg = (cfg >> 32) & 0x07;
3073 	seq_printf(filp, "\t\t TX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
3074 		   "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
3075 		   "224bits" : "448bits"));
3076 
3077 	mutex_lock(&mcam->lock);
3078 	/* MCAM entries */
3079 	seq_printf(filp, "\n\t\t MCAM entries \t: %d\n", mcam->total_entries);
3080 	seq_printf(filp, "\t\t Reserved \t: %d\n",
3081 		   mcam->total_entries - mcam->bmap_entries);
3082 	seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt);
3083 
3084 	/* MCAM counters */
3085 	seq_printf(filp, "\n\t\t MCAM counters \t: %d\n", counters);
3086 	seq_printf(filp, "\t\t Reserved \t: %d\n",
3087 		   counters - mcam->counters.max);
3088 	seq_printf(filp, "\t\t Available \t: %d\n",
3089 		   rvu_rsrc_free_count(&mcam->counters));
3090 
3091 	if (mcam->bmap_entries == mcam->bmap_fcnt) {
3092 		mutex_unlock(&mcam->lock);
3093 		return 0;
3094 	}
3095 
3096 	seq_puts(filp, "\n\t\t Current allocation\n");
3097 	seq_puts(filp, "\t\t====================\n");
3098 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
3099 		pcifunc = (pf << RVU_PFVF_PF_SHIFT);
3100 		rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
3101 
3102 		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
3103 		numvfs = (cfg >> 12) & 0xFF;
3104 		for (vf = 0; vf < numvfs; vf++) {
3105 			pcifunc = (pf << RVU_PFVF_PF_SHIFT) | (vf + 1);
3106 			rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
3107 		}
3108 	}
3109 
3110 	mutex_unlock(&mcam->lock);
3111 	return 0;
3112 }
3113 
3114 RVU_DEBUG_SEQ_FOPS(npc_mcam_info, npc_mcam_info_display, NULL);
3115 
3116 static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
3117 					     void *unused)
3118 {
3119 	struct rvu *rvu = filp->private;
3120 	struct npc_mcam *mcam;
3121 	int blkaddr;
3122 
3123 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
3124 	if (blkaddr < 0)
3125 		return -ENODEV;
3126 
3127 	mcam = &rvu->hw->mcam;
3128 
3129 	seq_puts(filp, "\nNPC MCAM RX miss action stats\n");
3130 	seq_printf(filp, "\t\tStat %d: \t%lld\n", mcam->rx_miss_act_cntr,
3131 		   rvu_read64(rvu, blkaddr,
3132 			      NPC_AF_MATCH_STATX(mcam->rx_miss_act_cntr)));
3133 
3134 	return 0;
3135 }
3136 
3137 RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
3138 
3139 #define RVU_DBG_PRINT_MPLS_TTL(pkt, mask)                                     \
3140 do {									      \
3141 	seq_printf(s, "%ld ", FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, pkt));     \
3142 	seq_printf(s, "mask 0x%lx\n",                                         \
3143 		   FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, mask));               \
3144 } while (0)                                                                   \
3145 
3146 #define RVU_DBG_PRINT_MPLS_LBTCBOS(_pkt, _mask)                               \
3147 do {									      \
3148 	typeof(_pkt) (pkt) = (_pkt);					      \
3149 	typeof(_mask) (mask) = (_mask);                                       \
3150 	seq_printf(s, "%ld %ld %ld\n",                                        \
3151 		   FIELD_GET(OTX2_FLOWER_MASK_MPLS_LB, pkt),                  \
3152 		   FIELD_GET(OTX2_FLOWER_MASK_MPLS_TC, pkt),                  \
3153 		   FIELD_GET(OTX2_FLOWER_MASK_MPLS_BOS, pkt));                \
3154 	seq_printf(s, "\tmask 0x%lx 0x%lx 0x%lx\n",                           \
3155 		   FIELD_GET(OTX2_FLOWER_MASK_MPLS_LB, mask),                 \
3156 		   FIELD_GET(OTX2_FLOWER_MASK_MPLS_TC, mask),                 \
3157 		   FIELD_GET(OTX2_FLOWER_MASK_MPLS_BOS, mask));               \
3158 } while (0)                                                                   \
3159 
3160 static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
3161 					struct rvu_npc_mcam_rule *rule)
3162 {
3163 	u8 bit;
3164 
3165 	for_each_set_bit(bit, (unsigned long *)&rule->features, 64) {
3166 		seq_printf(s, "\t%s  ", npc_get_field_name(bit));
3167 		switch (bit) {
3168 		case NPC_LXMB:
3169 			if (rule->lxmb == 1)
3170 				seq_puts(s, "\tL2M nibble is set\n");
3171 			else
3172 				seq_puts(s, "\tL2B nibble is set\n");
3173 			break;
3174 		case NPC_DMAC:
3175 			seq_printf(s, "%pM ", rule->packet.dmac);
3176 			seq_printf(s, "mask %pM\n", rule->mask.dmac);
3177 			break;
3178 		case NPC_SMAC:
3179 			seq_printf(s, "%pM ", rule->packet.smac);
3180 			seq_printf(s, "mask %pM\n", rule->mask.smac);
3181 			break;
3182 		case NPC_ETYPE:
3183 			seq_printf(s, "0x%x ", ntohs(rule->packet.etype));
3184 			seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.etype));
3185 			break;
3186 		case NPC_OUTER_VID:
3187 			seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_tci));
3188 			seq_printf(s, "mask 0x%x\n",
3189 				   ntohs(rule->mask.vlan_tci));
3190 			break;
3191 		case NPC_INNER_VID:
3192 			seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_itci));
3193 			seq_printf(s, "mask 0x%x\n",
3194 				   ntohs(rule->mask.vlan_itci));
3195 			break;
3196 		case NPC_TOS:
3197 			seq_printf(s, "%d ", rule->packet.tos);
3198 			seq_printf(s, "mask 0x%x\n", rule->mask.tos);
3199 			break;
3200 		case NPC_SIP_IPV4:
3201 			seq_printf(s, "%pI4 ", &rule->packet.ip4src);
3202 			seq_printf(s, "mask %pI4\n", &rule->mask.ip4src);
3203 			break;
3204 		case NPC_DIP_IPV4:
3205 			seq_printf(s, "%pI4 ", &rule->packet.ip4dst);
3206 			seq_printf(s, "mask %pI4\n", &rule->mask.ip4dst);
3207 			break;
3208 		case NPC_SIP_IPV6:
3209 			seq_printf(s, "%pI6 ", rule->packet.ip6src);
3210 			seq_printf(s, "mask %pI6\n", rule->mask.ip6src);
3211 			break;
3212 		case NPC_DIP_IPV6:
3213 			seq_printf(s, "%pI6 ", rule->packet.ip6dst);
3214 			seq_printf(s, "mask %pI6\n", rule->mask.ip6dst);
3215 			break;
3216 		case NPC_IPFRAG_IPV6:
3217 			seq_printf(s, "0x%x ", rule->packet.next_header);
3218 			seq_printf(s, "mask 0x%x\n", rule->mask.next_header);
3219 			break;
3220 		case NPC_IPFRAG_IPV4:
3221 			seq_printf(s, "0x%x ", rule->packet.ip_flag);
3222 			seq_printf(s, "mask 0x%x\n", rule->mask.ip_flag);
3223 			break;
3224 		case NPC_SPORT_TCP:
3225 		case NPC_SPORT_UDP:
3226 		case NPC_SPORT_SCTP:
3227 			seq_printf(s, "%d ", ntohs(rule->packet.sport));
3228 			seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.sport));
3229 			break;
3230 		case NPC_DPORT_TCP:
3231 		case NPC_DPORT_UDP:
3232 		case NPC_DPORT_SCTP:
3233 			seq_printf(s, "%d ", ntohs(rule->packet.dport));
3234 			seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
3235 			break;
3236 		case NPC_TCP_FLAGS:
3237 			seq_printf(s, "%d ", rule->packet.tcp_flags);
3238 			seq_printf(s, "mask 0x%x\n", rule->mask.tcp_flags);
3239 			break;
3240 		case NPC_IPSEC_SPI:
3241 			seq_printf(s, "0x%x ", ntohl(rule->packet.spi));
3242 			seq_printf(s, "mask 0x%x\n", ntohl(rule->mask.spi));
3243 			break;
3244 		case NPC_MPLS1_LBTCBOS:
3245 			RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[0],
3246 						   rule->mask.mpls_lse[0]);
3247 			break;
3248 		case NPC_MPLS1_TTL:
3249 			RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[0],
3250 					       rule->mask.mpls_lse[0]);
3251 			break;
3252 		case NPC_MPLS2_LBTCBOS:
3253 			RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[1],
3254 						   rule->mask.mpls_lse[1]);
3255 			break;
3256 		case NPC_MPLS2_TTL:
3257 			RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[1],
3258 					       rule->mask.mpls_lse[1]);
3259 			break;
3260 		case NPC_MPLS3_LBTCBOS:
3261 			RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[2],
3262 						   rule->mask.mpls_lse[2]);
3263 			break;
3264 		case NPC_MPLS3_TTL:
3265 			RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[2],
3266 					       rule->mask.mpls_lse[2]);
3267 			break;
3268 		case NPC_MPLS4_LBTCBOS:
3269 			RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[3],
3270 						   rule->mask.mpls_lse[3]);
3271 			break;
3272 		case NPC_MPLS4_TTL:
3273 			RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[3],
3274 					       rule->mask.mpls_lse[3]);
3275 			break;
3276 		case NPC_TYPE_ICMP:
3277 			seq_printf(s, "%d ", rule->packet.icmp_type);
3278 			seq_printf(s, "mask 0x%x\n", rule->mask.icmp_type);
3279 			break;
3280 		case NPC_CODE_ICMP:
3281 			seq_printf(s, "%d ", rule->packet.icmp_code);
3282 			seq_printf(s, "mask 0x%x\n", rule->mask.icmp_code);
3283 			break;
3284 		default:
3285 			seq_puts(s, "\n");
3286 			break;
3287 		}
3288 	}
3289 }
3290 
3291 static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
3292 					 struct rvu_npc_mcam_rule *rule)
3293 {
3294 	if (is_npc_intf_tx(rule->intf)) {
3295 		switch (rule->tx_action.op) {
3296 		case NIX_TX_ACTIONOP_DROP:
3297 			seq_puts(s, "\taction: Drop\n");
3298 			break;
3299 		case NIX_TX_ACTIONOP_UCAST_DEFAULT:
3300 			seq_puts(s, "\taction: Unicast to default channel\n");
3301 			break;
3302 		case NIX_TX_ACTIONOP_UCAST_CHAN:
3303 			seq_printf(s, "\taction: Unicast to channel %d\n",
3304 				   rule->tx_action.index);
3305 			break;
3306 		case NIX_TX_ACTIONOP_MCAST:
3307 			seq_puts(s, "\taction: Multicast\n");
3308 			break;
3309 		case NIX_TX_ACTIONOP_DROP_VIOL:
3310 			seq_puts(s, "\taction: Lockdown Violation Drop\n");
3311 			break;
3312 		default:
3313 			break;
3314 		}
3315 	} else {
3316 		switch (rule->rx_action.op) {
3317 		case NIX_RX_ACTIONOP_DROP:
3318 			seq_puts(s, "\taction: Drop\n");
3319 			break;
3320 		case NIX_RX_ACTIONOP_UCAST:
3321 			seq_printf(s, "\taction: Direct to queue %d\n",
3322 				   rule->rx_action.index);
3323 			break;
3324 		case NIX_RX_ACTIONOP_RSS:
3325 			seq_puts(s, "\taction: RSS\n");
3326 			break;
3327 		case NIX_RX_ACTIONOP_UCAST_IPSEC:
3328 			seq_puts(s, "\taction: Unicast ipsec\n");
3329 			break;
3330 		case NIX_RX_ACTIONOP_MCAST:
3331 			seq_puts(s, "\taction: Multicast\n");
3332 			break;
3333 		default:
3334 			break;
3335 		}
3336 	}
3337 }
3338 
3339 static const char *rvu_dbg_get_intf_name(int intf)
3340 {
3341 	switch (intf) {
3342 	case NIX_INTFX_RX(0):
3343 		return "NIX0_RX";
3344 	case NIX_INTFX_RX(1):
3345 		return "NIX1_RX";
3346 	case NIX_INTFX_TX(0):
3347 		return "NIX0_TX";
3348 	case NIX_INTFX_TX(1):
3349 		return "NIX1_TX";
3350 	default:
3351 		break;
3352 	}
3353 
3354 	return "unknown";
3355 }
3356 
3357 static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
3358 {
3359 	struct rvu_npc_mcam_rule *iter;
3360 	struct rvu *rvu = s->private;
3361 	struct npc_mcam *mcam;
3362 	int pf, vf = -1;
3363 	bool enabled;
3364 	int blkaddr;
3365 	u16 target;
3366 	u64 hits;
3367 
3368 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
3369 	if (blkaddr < 0)
3370 		return 0;
3371 
3372 	mcam = &rvu->hw->mcam;
3373 
3374 	mutex_lock(&mcam->lock);
3375 	list_for_each_entry(iter, &mcam->mcam_rules, list) {
3376 		pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
3377 		seq_printf(s, "\n\tInstalled by: PF%d ", pf);
3378 
3379 		if (iter->owner & RVU_PFVF_FUNC_MASK) {
3380 			vf = (iter->owner & RVU_PFVF_FUNC_MASK) - 1;
3381 			seq_printf(s, "VF%d", vf);
3382 		}
3383 		seq_puts(s, "\n");
3384 
3385 		seq_printf(s, "\tdirection: %s\n", is_npc_intf_rx(iter->intf) ?
3386 						    "RX" : "TX");
3387 		seq_printf(s, "\tinterface: %s\n",
3388 			   rvu_dbg_get_intf_name(iter->intf));
3389 		seq_printf(s, "\tmcam entry: %d\n", iter->entry);
3390 
3391 		rvu_dbg_npc_mcam_show_flows(s, iter);
3392 		if (is_npc_intf_rx(iter->intf)) {
3393 			target = iter->rx_action.pf_func;
3394 			pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
3395 			seq_printf(s, "\tForward to: PF%d ", pf);
3396 
3397 			if (target & RVU_PFVF_FUNC_MASK) {
3398 				vf = (target & RVU_PFVF_FUNC_MASK) - 1;
3399 				seq_printf(s, "VF%d", vf);
3400 			}
3401 			seq_puts(s, "\n");
3402 			seq_printf(s, "\tchannel: 0x%x\n", iter->chan);
3403 			seq_printf(s, "\tchannel_mask: 0x%x\n", iter->chan_mask);
3404 		}
3405 
3406 		rvu_dbg_npc_mcam_show_action(s, iter);
3407 
3408 		enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, iter->entry);
3409 		seq_printf(s, "\tenabled: %s\n", enabled ? "yes" : "no");
3410 
3411 		if (!iter->has_cntr)
3412 			continue;
3413 		seq_printf(s, "\tcounter: %d\n", iter->cntr);
3414 
3415 		hits = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(iter->cntr));
3416 		seq_printf(s, "\thits: %lld\n", hits);
3417 	}
3418 	mutex_unlock(&mcam->lock);
3419 
3420 	return 0;
3421 }
3422 
3423 RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL);
3424 
3425 static int rvu_dbg_npc_exact_show_entries(struct seq_file *s, void *unused)
3426 {
3427 	struct npc_exact_table_entry *mem_entry[NPC_EXACT_TBL_MAX_WAYS] = { 0 };
3428 	struct npc_exact_table_entry *cam_entry;
3429 	struct npc_exact_table *table;
3430 	struct rvu *rvu = s->private;
3431 	int i, j;
3432 
3433 	u8 bitmap = 0;
3434 
3435 	table = rvu->hw->table;
3436 
3437 	mutex_lock(&table->lock);
3438 
3439 	/* Check if there is at least one entry in mem table */
3440 	if (!table->mem_tbl_entry_cnt)
3441 		goto dump_cam_table;
3442 
3443 	/* Print table headers */
3444 	seq_puts(s, "\n\tExact Match MEM Table\n");
3445 	seq_puts(s, "Index\t");
3446 
3447 	for (i = 0; i < table->mem_table.ways; i++) {
3448 		mem_entry[i] = list_first_entry_or_null(&table->lhead_mem_tbl_entry[i],
3449 							struct npc_exact_table_entry, list);
3450 
3451 		seq_printf(s, "Way-%d\t\t\t\t\t", i);
3452 	}
3453 
3454 	seq_puts(s, "\n");
3455 	for (i = 0; i < table->mem_table.ways; i++)
3456 		seq_puts(s, "\tChan  MAC                     \t");
3457 
3458 	seq_puts(s, "\n\n");
3459 
3460 	/* Print mem table entries */
3461 	for (i = 0; i < table->mem_table.depth; i++) {
3462 		bitmap = 0;
3463 		for (j = 0; j < table->mem_table.ways; j++) {
3464 			if (!mem_entry[j])
3465 				continue;
3466 
3467 			if (mem_entry[j]->index != i)
3468 				continue;
3469 
3470 			bitmap |= BIT(j);
3471 		}
3472 
3473 		/* No valid entries */
3474 		if (!bitmap)
3475 			continue;
3476 
3477 		seq_printf(s, "%d\t", i);
3478 		for (j = 0; j < table->mem_table.ways; j++) {
3479 			if (!(bitmap & BIT(j))) {
3480 				seq_puts(s, "nil\t\t\t\t\t");
3481 				continue;
3482 			}
3483 
3484 			seq_printf(s, "0x%x %pM\t\t\t", mem_entry[j]->chan,
3485 				   mem_entry[j]->mac);
3486 			mem_entry[j] = list_next_entry(mem_entry[j], list);
3487 		}
3488 		seq_puts(s, "\n");
3489 	}
3490 
3491 dump_cam_table:
3492 
3493 	if (!table->cam_tbl_entry_cnt)
3494 		goto done;
3495 
3496 	seq_puts(s, "\n\tExact Match CAM Table\n");
3497 	seq_puts(s, "index\tchan\tMAC\n");
3498 
3499 	/* Traverse cam table entries */
3500 	list_for_each_entry(cam_entry, &table->lhead_cam_tbl_entry, list) {
3501 		seq_printf(s, "%d\t0x%x\t%pM\n", cam_entry->index, cam_entry->chan,
3502 			   cam_entry->mac);
3503 	}
3504 
3505 done:
3506 	mutex_unlock(&table->lock);
3507 	return 0;
3508 }
3509 
3510 RVU_DEBUG_SEQ_FOPS(npc_exact_entries, npc_exact_show_entries, NULL);
3511 
3512 static int rvu_dbg_npc_exact_show_info(struct seq_file *s, void *unused)
3513 {
3514 	struct npc_exact_table *table;
3515 	struct rvu *rvu = s->private;
3516 	int i;
3517 
3518 	table = rvu->hw->table;
3519 
3520 	seq_puts(s, "\n\tExact Table Info\n");
3521 	seq_printf(s, "Exact Match Feature : %s\n",
3522 		   rvu->hw->cap.npc_exact_match_enabled ? "enabled" : "disable");
3523 	if (!rvu->hw->cap.npc_exact_match_enabled)
3524 		return 0;
3525 
3526 	seq_puts(s, "\nMCAM Index\tMAC Filter Rules Count\n");
3527 	for (i = 0; i < table->num_drop_rules; i++)
3528 		seq_printf(s, "%d\t\t%d\n", i, table->cnt_cmd_rules[i]);
3529 
3530 	seq_puts(s, "\nMcam Index\tPromisc Mode Status\n");
3531 	for (i = 0; i < table->num_drop_rules; i++)
3532 		seq_printf(s, "%d\t\t%s\n", i, table->promisc_mode[i] ? "on" : "off");
3533 
3534 	seq_puts(s, "\n\tMEM Table Info\n");
3535 	seq_printf(s, "Ways : %d\n", table->mem_table.ways);
3536 	seq_printf(s, "Depth : %d\n", table->mem_table.depth);
3537 	seq_printf(s, "Mask : 0x%llx\n", table->mem_table.mask);
3538 	seq_printf(s, "Hash Mask : 0x%x\n", table->mem_table.hash_mask);
3539 	seq_printf(s, "Hash Offset : 0x%x\n", table->mem_table.hash_offset);
3540 
3541 	seq_puts(s, "\n\tCAM Table Info\n");
3542 	seq_printf(s, "Depth : %d\n", table->cam_table.depth);
3543 
3544 	return 0;
3545 }
3546 
3547 RVU_DEBUG_SEQ_FOPS(npc_exact_info, npc_exact_show_info, NULL);
3548 
3549 static int rvu_dbg_npc_exact_drop_cnt(struct seq_file *s, void *unused)
3550 {
3551 	struct npc_exact_table *table;
3552 	struct rvu *rvu = s->private;
3553 	struct npc_key_field *field;
3554 	u16 chan, pcifunc;
3555 	int blkaddr, i;
3556 	u64 cfg, cam1;
3557 	char *str;
3558 
3559 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
3560 	table = rvu->hw->table;
3561 
3562 	field = &rvu->hw->mcam.rx_key_fields[NPC_CHAN];
3563 
3564 	seq_puts(s, "\n\t Exact Hit on drop status\n");
3565 	seq_puts(s, "\npcifunc\tmcam_idx\tHits\tchan\tstatus\n");
3566 
3567 	for (i = 0; i < table->num_drop_rules; i++) {
3568 		pcifunc = rvu_npc_exact_drop_rule_to_pcifunc(rvu, i);
3569 		cfg = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CFG(i, 0));
3570 
3571 		/* channel will be always in keyword 0 */
3572 		cam1 = rvu_read64(rvu, blkaddr,
3573 				  NPC_AF_MCAMEX_BANKX_CAMX_W0(i, 0, 1));
3574 		chan = field->kw_mask[0] & cam1;
3575 
3576 		str = (cfg & 1) ? "enabled" : "disabled";
3577 
3578 		seq_printf(s, "0x%x\t%d\t\t%llu\t0x%x\t%s\n", pcifunc, i,
3579 			   rvu_read64(rvu, blkaddr,
3580 				      NPC_AF_MATCH_STATX(table->counter_idx[i])),
3581 			   chan, str);
3582 	}
3583 
3584 	return 0;
3585 }
3586 
3587 RVU_DEBUG_SEQ_FOPS(npc_exact_drop_cnt, npc_exact_drop_cnt, NULL);
3588 
3589 static void rvu_dbg_npc_init(struct rvu *rvu)
3590 {
3591 	rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root);
3592 
3593 	debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc, rvu,
3594 			    &rvu_dbg_npc_mcam_info_fops);
3595 	debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc, rvu,
3596 			    &rvu_dbg_npc_mcam_rules_fops);
3597 
3598 	debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc, rvu,
3599 			    &rvu_dbg_npc_rx_miss_act_fops);
3600 
3601 	if (!rvu->hw->cap.npc_exact_match_enabled)
3602 		return;
3603 
3604 	debugfs_create_file("exact_entries", 0444, rvu->rvu_dbg.npc, rvu,
3605 			    &rvu_dbg_npc_exact_entries_fops);
3606 
3607 	debugfs_create_file("exact_info", 0444, rvu->rvu_dbg.npc, rvu,
3608 			    &rvu_dbg_npc_exact_info_fops);
3609 
3610 	debugfs_create_file("exact_drop_cnt", 0444, rvu->rvu_dbg.npc, rvu,
3611 			    &rvu_dbg_npc_exact_drop_cnt_fops);
3612 
3613 }
3614 
3615 static int cpt_eng_sts_display(struct seq_file *filp, u8 eng_type)
3616 {
3617 	struct cpt_ctx *ctx = filp->private;
3618 	u64 busy_sts = 0, free_sts = 0;
3619 	u32 e_min = 0, e_max = 0, e, i;
3620 	u16 max_ses, max_ies, max_aes;
3621 	struct rvu *rvu = ctx->rvu;
3622 	int blkaddr = ctx->blkaddr;
3623 	u64 reg;
3624 
3625 	reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
3626 	max_ses = reg & 0xffff;
3627 	max_ies = (reg >> 16) & 0xffff;
3628 	max_aes = (reg >> 32) & 0xffff;
3629 
3630 	switch (eng_type) {
3631 	case CPT_AE_TYPE:
3632 		e_min = max_ses + max_ies;
3633 		e_max = max_ses + max_ies + max_aes;
3634 		break;
3635 	case CPT_SE_TYPE:
3636 		e_min = 0;
3637 		e_max = max_ses;
3638 		break;
3639 	case CPT_IE_TYPE:
3640 		e_min = max_ses;
3641 		e_max = max_ses + max_ies;
3642 		break;
3643 	default:
3644 		return -EINVAL;
3645 	}
3646 
3647 	for (e = e_min, i = 0; e < e_max; e++, i++) {
3648 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e));
3649 		if (reg & 0x1)
3650 			busy_sts |= 1ULL << i;
3651 
3652 		if (reg & 0x2)
3653 			free_sts |= 1ULL << i;
3654 	}
3655 	seq_printf(filp, "FREE STS : 0x%016llx\n", free_sts);
3656 	seq_printf(filp, "BUSY STS : 0x%016llx\n", busy_sts);
3657 
3658 	return 0;
3659 }
3660 
3661 static int rvu_dbg_cpt_ae_sts_display(struct seq_file *filp, void *unused)
3662 {
3663 	return cpt_eng_sts_display(filp, CPT_AE_TYPE);
3664 }
3665 
3666 RVU_DEBUG_SEQ_FOPS(cpt_ae_sts, cpt_ae_sts_display, NULL);
3667 
3668 static int rvu_dbg_cpt_se_sts_display(struct seq_file *filp, void *unused)
3669 {
3670 	return cpt_eng_sts_display(filp, CPT_SE_TYPE);
3671 }
3672 
3673 RVU_DEBUG_SEQ_FOPS(cpt_se_sts, cpt_se_sts_display, NULL);
3674 
3675 static int rvu_dbg_cpt_ie_sts_display(struct seq_file *filp, void *unused)
3676 {
3677 	return cpt_eng_sts_display(filp, CPT_IE_TYPE);
3678 }
3679 
3680 RVU_DEBUG_SEQ_FOPS(cpt_ie_sts, cpt_ie_sts_display, NULL);
3681 
3682 static int rvu_dbg_cpt_engines_info_display(struct seq_file *filp, void *unused)
3683 {
3684 	struct cpt_ctx *ctx = filp->private;
3685 	u16 max_ses, max_ies, max_aes;
3686 	struct rvu *rvu = ctx->rvu;
3687 	int blkaddr = ctx->blkaddr;
3688 	u32 e_max, e;
3689 	u64 reg;
3690 
3691 	reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
3692 	max_ses = reg & 0xffff;
3693 	max_ies = (reg >> 16) & 0xffff;
3694 	max_aes = (reg >> 32) & 0xffff;
3695 
3696 	e_max = max_ses + max_ies + max_aes;
3697 
3698 	seq_puts(filp, "===========================================\n");
3699 	for (e = 0; e < e_max; e++) {
3700 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(e));
3701 		seq_printf(filp, "CPT Engine[%u] Group Enable   0x%02llx\n", e,
3702 			   reg & 0xff);
3703 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_ACTIVE(e));
3704 		seq_printf(filp, "CPT Engine[%u] Active Info    0x%llx\n", e,
3705 			   reg);
3706 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(e));
3707 		seq_printf(filp, "CPT Engine[%u] Control        0x%llx\n", e,
3708 			   reg);
3709 		seq_puts(filp, "===========================================\n");
3710 	}
3711 	return 0;
3712 }
3713 
3714 RVU_DEBUG_SEQ_FOPS(cpt_engines_info, cpt_engines_info_display, NULL);
3715 
3716 static int rvu_dbg_cpt_lfs_info_display(struct seq_file *filp, void *unused)
3717 {
3718 	struct cpt_ctx *ctx = filp->private;
3719 	int blkaddr = ctx->blkaddr;
3720 	struct rvu *rvu = ctx->rvu;
3721 	struct rvu_block *block;
3722 	struct rvu_hwinfo *hw;
3723 	u64 reg;
3724 	u32 lf;
3725 
3726 	hw = rvu->hw;
3727 	block = &hw->block[blkaddr];
3728 	if (!block->lf.bmap)
3729 		return -ENODEV;
3730 
3731 	seq_puts(filp, "===========================================\n");
3732 	for (lf = 0; lf < block->lf.max; lf++) {
3733 		reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(lf));
3734 		seq_printf(filp, "CPT Lf[%u] CTL          0x%llx\n", lf, reg);
3735 		reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(lf));
3736 		seq_printf(filp, "CPT Lf[%u] CTL2         0x%llx\n", lf, reg);
3737 		reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_PTR_CTL(lf));
3738 		seq_printf(filp, "CPT Lf[%u] PTR_CTL      0x%llx\n", lf, reg);
3739 		reg = rvu_read64(rvu, blkaddr, block->lfcfg_reg |
3740 				(lf << block->lfshift));
3741 		seq_printf(filp, "CPT Lf[%u] CFG          0x%llx\n", lf, reg);
3742 		seq_puts(filp, "===========================================\n");
3743 	}
3744 	return 0;
3745 }
3746 
3747 RVU_DEBUG_SEQ_FOPS(cpt_lfs_info, cpt_lfs_info_display, NULL);
3748 
3749 static int rvu_dbg_cpt_err_info_display(struct seq_file *filp, void *unused)
3750 {
3751 	struct cpt_ctx *ctx = filp->private;
3752 	struct rvu *rvu = ctx->rvu;
3753 	int blkaddr = ctx->blkaddr;
3754 	u64 reg0, reg1;
3755 
3756 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
3757 	reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
3758 	seq_printf(filp, "CPT_AF_FLTX_INT:       0x%llx 0x%llx\n", reg0, reg1);
3759 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(0));
3760 	reg1 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(1));
3761 	seq_printf(filp, "CPT_AF_PSNX_EXE:       0x%llx 0x%llx\n", reg0, reg1);
3762 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_LF(0));
3763 	seq_printf(filp, "CPT_AF_PSNX_LF:        0x%llx\n", reg0);
3764 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT);
3765 	seq_printf(filp, "CPT_AF_RVU_INT:        0x%llx\n", reg0);
3766 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT);
3767 	seq_printf(filp, "CPT_AF_RAS_INT:        0x%llx\n", reg0);
3768 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO);
3769 	seq_printf(filp, "CPT_AF_EXE_ERR_INFO:   0x%llx\n", reg0);
3770 
3771 	return 0;
3772 }
3773 
3774 RVU_DEBUG_SEQ_FOPS(cpt_err_info, cpt_err_info_display, NULL);
3775 
3776 static int rvu_dbg_cpt_pc_display(struct seq_file *filp, void *unused)
3777 {
3778 	struct cpt_ctx *ctx = filp->private;
3779 	struct rvu *rvu = ctx->rvu;
3780 	int blkaddr = ctx->blkaddr;
3781 	u64 reg;
3782 
3783 	reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC);
3784 	seq_printf(filp, "CPT instruction requests   %llu\n", reg);
3785 	reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC);
3786 	seq_printf(filp, "CPT instruction latency    %llu\n", reg);
3787 	reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC);
3788 	seq_printf(filp, "CPT NCB read requests      %llu\n", reg);
3789 	reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC);
3790 	seq_printf(filp, "CPT NCB read latency       %llu\n", reg);
3791 	reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC);
3792 	seq_printf(filp, "CPT read requests caused by UC fills   %llu\n", reg);
3793 	reg = rvu_read64(rvu, blkaddr, CPT_AF_ACTIVE_CYCLES_PC);
3794 	seq_printf(filp, "CPT active cycles pc       %llu\n", reg);
3795 	reg = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT);
3796 	seq_printf(filp, "CPT clock count pc         %llu\n", reg);
3797 
3798 	return 0;
3799 }
3800 
3801 RVU_DEBUG_SEQ_FOPS(cpt_pc, cpt_pc_display, NULL);
3802 
3803 static void rvu_dbg_cpt_init(struct rvu *rvu, int blkaddr)
3804 {
3805 	struct cpt_ctx *ctx;
3806 
3807 	if (!is_block_implemented(rvu->hw, blkaddr))
3808 		return;
3809 
3810 	if (blkaddr == BLKADDR_CPT0) {
3811 		rvu->rvu_dbg.cpt = debugfs_create_dir("cpt", rvu->rvu_dbg.root);
3812 		ctx = &rvu->rvu_dbg.cpt_ctx[0];
3813 		ctx->blkaddr = BLKADDR_CPT0;
3814 		ctx->rvu = rvu;
3815 	} else {
3816 		rvu->rvu_dbg.cpt = debugfs_create_dir("cpt1",
3817 						      rvu->rvu_dbg.root);
3818 		ctx = &rvu->rvu_dbg.cpt_ctx[1];
3819 		ctx->blkaddr = BLKADDR_CPT1;
3820 		ctx->rvu = rvu;
3821 	}
3822 
3823 	debugfs_create_file("cpt_pc", 0600, rvu->rvu_dbg.cpt, ctx,
3824 			    &rvu_dbg_cpt_pc_fops);
3825 	debugfs_create_file("cpt_ae_sts", 0600, rvu->rvu_dbg.cpt, ctx,
3826 			    &rvu_dbg_cpt_ae_sts_fops);
3827 	debugfs_create_file("cpt_se_sts", 0600, rvu->rvu_dbg.cpt, ctx,
3828 			    &rvu_dbg_cpt_se_sts_fops);
3829 	debugfs_create_file("cpt_ie_sts", 0600, rvu->rvu_dbg.cpt, ctx,
3830 			    &rvu_dbg_cpt_ie_sts_fops);
3831 	debugfs_create_file("cpt_engines_info", 0600, rvu->rvu_dbg.cpt, ctx,
3832 			    &rvu_dbg_cpt_engines_info_fops);
3833 	debugfs_create_file("cpt_lfs_info", 0600, rvu->rvu_dbg.cpt, ctx,
3834 			    &rvu_dbg_cpt_lfs_info_fops);
3835 	debugfs_create_file("cpt_err_info", 0600, rvu->rvu_dbg.cpt, ctx,
3836 			    &rvu_dbg_cpt_err_info_fops);
3837 }
3838 
3839 static const char *rvu_get_dbg_dir_name(struct rvu *rvu)
3840 {
3841 	if (!is_rvu_otx2(rvu))
3842 		return "cn10k";
3843 	else
3844 		return "octeontx2";
3845 }
3846 
3847 void rvu_dbg_init(struct rvu *rvu)
3848 {
3849 	rvu->rvu_dbg.root = debugfs_create_dir(rvu_get_dbg_dir_name(rvu), NULL);
3850 
3851 	debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
3852 			    &rvu_dbg_rsrc_status_fops);
3853 
3854 	if (!is_rvu_otx2(rvu))
3855 		debugfs_create_file("lmtst_map_table", 0444, rvu->rvu_dbg.root,
3856 				    rvu, &rvu_dbg_lmtst_map_table_fops);
3857 
3858 	if (!cgx_get_cgxcnt_max())
3859 		goto create;
3860 
3861 	if (is_rvu_otx2(rvu))
3862 		debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root,
3863 				    rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
3864 	else
3865 		debugfs_create_file("rvu_pf_rpm_map", 0444, rvu->rvu_dbg.root,
3866 				    rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
3867 
3868 create:
3869 	rvu_dbg_npa_init(rvu);
3870 	rvu_dbg_nix_init(rvu, BLKADDR_NIX0);
3871 
3872 	rvu_dbg_nix_init(rvu, BLKADDR_NIX1);
3873 	rvu_dbg_cgx_init(rvu);
3874 	rvu_dbg_npc_init(rvu);
3875 	rvu_dbg_cpt_init(rvu, BLKADDR_CPT0);
3876 	rvu_dbg_cpt_init(rvu, BLKADDR_CPT1);
3877 	rvu_dbg_mcs_init(rvu);
3878 }
3879 
3880 void rvu_dbg_exit(struct rvu *rvu)
3881 {
3882 	debugfs_remove_recursive(rvu->rvu_dbg.root);
3883 }
3884 
3885 #endif /* CONFIG_DEBUG_FS */
3886