xref: /linux/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c (revision 8f7aa3d3c7323f4ca2768a9e74ebbe359c4f8f88)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3  *
4  * Copyright (C) 2019 Marvell.
5  *
6  */
7 
8 #ifdef CONFIG_DEBUG_FS
9 
10 #include <linux/fs.h>
11 #include <linux/debugfs.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 
15 #include "rvu_struct.h"
16 #include "rvu_reg.h"
17 #include "rvu.h"
18 #include "cgx.h"
19 #include "lmac_common.h"
20 #include "npc.h"
21 #include "rvu_npc_hash.h"
22 #include "mcs.h"
23 
24 #include "cn20k/debugfs.h"
25 
26 #define DEBUGFS_DIR_NAME "octeontx2"
27 
28 enum {
29 	CGX_STAT0,
30 	CGX_STAT1,
31 	CGX_STAT2,
32 	CGX_STAT3,
33 	CGX_STAT4,
34 	CGX_STAT5,
35 	CGX_STAT6,
36 	CGX_STAT7,
37 	CGX_STAT8,
38 	CGX_STAT9,
39 	CGX_STAT10,
40 	CGX_STAT11,
41 	CGX_STAT12,
42 	CGX_STAT13,
43 	CGX_STAT14,
44 	CGX_STAT15,
45 	CGX_STAT16,
46 	CGX_STAT17,
47 	CGX_STAT18,
48 };
49 
50 static char *cgx_rx_stats_fields[] = {
51 	[CGX_STAT0]	= "Received packets",
52 	[CGX_STAT1]	= "Octets of received packets",
53 	[CGX_STAT2]	= "Received PAUSE packets",
54 	[CGX_STAT3]	= "Received PAUSE and control packets",
55 	[CGX_STAT4]	= "Filtered DMAC0 (NIX-bound) packets",
56 	[CGX_STAT5]	= "Filtered DMAC0 (NIX-bound) octets",
57 	[CGX_STAT6]	= "Packets dropped due to RX FIFO full",
58 	[CGX_STAT7]	= "Octets dropped due to RX FIFO full",
59 	[CGX_STAT8]	= "Error packets",
60 	[CGX_STAT9]	= "Filtered DMAC1 (NCSI-bound) packets",
61 	[CGX_STAT10]	= "Filtered DMAC1 (NCSI-bound) octets",
62 	[CGX_STAT11]	= "NCSI-bound packets dropped",
63 	[CGX_STAT12]	= "NCSI-bound octets dropped",
64 };
65 
66 static char *cgx_tx_stats_fields[] = {
67 	[CGX_STAT0]	= "Packets dropped due to excessive collisions",
68 	[CGX_STAT1]	= "Packets dropped due to excessive deferral",
69 	[CGX_STAT2]	= "Multiple collisions before successful transmission",
70 	[CGX_STAT3]	= "Single collisions before successful transmission",
71 	[CGX_STAT4]	= "Total octets sent on the interface",
72 	[CGX_STAT5]	= "Total frames sent on the interface",
73 	[CGX_STAT6]	= "Packets sent with an octet count < 64",
74 	[CGX_STAT7]	= "Packets sent with an octet count == 64",
75 	[CGX_STAT8]	= "Packets sent with an octet count of 65-127",
76 	[CGX_STAT9]	= "Packets sent with an octet count of 128-255",
77 	[CGX_STAT10]	= "Packets sent with an octet count of 256-511",
78 	[CGX_STAT11]	= "Packets sent with an octet count of 512-1023",
79 	[CGX_STAT12]	= "Packets sent with an octet count of 1024-1518",
80 	[CGX_STAT13]	= "Packets sent with an octet count of > 1518",
81 	[CGX_STAT14]	= "Packets sent to a broadcast DMAC",
82 	[CGX_STAT15]	= "Packets sent to the multicast DMAC",
83 	[CGX_STAT16]	= "Transmit underflow and were truncated",
84 	[CGX_STAT17]	= "Control/PAUSE packets sent",
85 };
86 
87 static char *rpm_rx_stats_fields[] = {
88 	"Octets of received packets",
89 	"Octets of received packets with out error",
90 	"Received packets with alignment errors",
91 	"Control/PAUSE packets received",
92 	"Packets received with Frame too long Errors",
93 	"Packets received with a1nrange length Errors",
94 	"Received packets",
95 	"Packets received with FrameCheckSequenceErrors",
96 	"Packets received with VLAN header",
97 	"Error packets",
98 	"Packets received with unicast DMAC",
99 	"Packets received with multicast DMAC",
100 	"Packets received with broadcast DMAC",
101 	"Dropped packets",
102 	"Total frames received on interface",
103 	"Packets received with an octet count < 64",
104 	"Packets received with an octet count == 64",
105 	"Packets received with an octet count of 65-127",
106 	"Packets received with an octet count of 128-255",
107 	"Packets received with an octet count of 256-511",
108 	"Packets received with an octet count of 512-1023",
109 	"Packets received with an octet count of 1024-1518",
110 	"Packets received with an octet count of > 1518",
111 	"Oversized Packets",
112 	"Jabber Packets",
113 	"Fragmented Packets",
114 	"CBFC(class based flow control) pause frames received for class 0",
115 	"CBFC pause frames received for class 1",
116 	"CBFC pause frames received for class 2",
117 	"CBFC pause frames received for class 3",
118 	"CBFC pause frames received for class 4",
119 	"CBFC pause frames received for class 5",
120 	"CBFC pause frames received for class 6",
121 	"CBFC pause frames received for class 7",
122 	"CBFC pause frames received for class 8",
123 	"CBFC pause frames received for class 9",
124 	"CBFC pause frames received for class 10",
125 	"CBFC pause frames received for class 11",
126 	"CBFC pause frames received for class 12",
127 	"CBFC pause frames received for class 13",
128 	"CBFC pause frames received for class 14",
129 	"CBFC pause frames received for class 15",
130 	"MAC control packets received",
131 };
132 
133 static char *rpm_tx_stats_fields[] = {
134 	"Total octets sent on the interface",
135 	"Total octets transmitted OK",
136 	"Control/Pause frames sent",
137 	"Total frames transmitted OK",
138 	"Total frames sent with VLAN header",
139 	"Error Packets",
140 	"Packets sent to unicast DMAC",
141 	"Packets sent to the multicast DMAC",
142 	"Packets sent to a broadcast DMAC",
143 	"Packets sent with an octet count == 64",
144 	"Packets sent with an octet count of 65-127",
145 	"Packets sent with an octet count of 128-255",
146 	"Packets sent with an octet count of 256-511",
147 	"Packets sent with an octet count of 512-1023",
148 	"Packets sent with an octet count of 1024-1518",
149 	"Packets sent with an octet count of > 1518",
150 	"CBFC(class based flow control) pause frames transmitted for class 0",
151 	"CBFC pause frames transmitted for class 1",
152 	"CBFC pause frames transmitted for class 2",
153 	"CBFC pause frames transmitted for class 3",
154 	"CBFC pause frames transmitted for class 4",
155 	"CBFC pause frames transmitted for class 5",
156 	"CBFC pause frames transmitted for class 6",
157 	"CBFC pause frames transmitted for class 7",
158 	"CBFC pause frames transmitted for class 8",
159 	"CBFC pause frames transmitted for class 9",
160 	"CBFC pause frames transmitted for class 10",
161 	"CBFC pause frames transmitted for class 11",
162 	"CBFC pause frames transmitted for class 12",
163 	"CBFC pause frames transmitted for class 13",
164 	"CBFC pause frames transmitted for class 14",
165 	"CBFC pause frames transmitted for class 15",
166 	"MAC control packets sent",
167 	"Total frames sent on the interface"
168 };
169 
170 enum cpt_eng_type {
171 	CPT_AE_TYPE = 1,
172 	CPT_SE_TYPE = 2,
173 	CPT_IE_TYPE = 3,
174 };
175 
176 #define rvu_dbg_NULL NULL
177 #define rvu_dbg_open_NULL NULL
178 
179 #define RVU_DEBUG_SEQ_FOPS(name, read_op, write_op)	\
180 static int rvu_dbg_open_##name(struct inode *inode, struct file *file) \
181 { \
182 	return single_open(file, rvu_dbg_##read_op, inode->i_private); \
183 } \
184 static const struct file_operations rvu_dbg_##name##_fops = { \
185 	.owner		= THIS_MODULE, \
186 	.open		= rvu_dbg_open_##name, \
187 	.read		= seq_read, \
188 	.write		= rvu_dbg_##write_op, \
189 	.llseek		= seq_lseek, \
190 	.release	= single_release, \
191 }
192 
193 #define RVU_DEBUG_FOPS(name, read_op, write_op) \
194 static const struct file_operations rvu_dbg_##name##_fops = { \
195 	.owner = THIS_MODULE, \
196 	.open = simple_open, \
197 	.read = rvu_dbg_##read_op, \
198 	.write = rvu_dbg_##write_op \
199 }
200 
201 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
202 
203 static int rvu_dbg_mcs_port_stats_display(struct seq_file *filp, void *unused, int dir)
204 {
205 	struct mcs *mcs = filp->private;
206 	struct mcs_port_stats stats;
207 	int lmac;
208 
209 	seq_puts(filp, "\n port stats\n");
210 	mutex_lock(&mcs->stats_lock);
211 	for_each_set_bit(lmac, &mcs->hw->lmac_bmap, mcs->hw->lmac_cnt) {
212 		mcs_get_port_stats(mcs, &stats, lmac, dir);
213 		seq_printf(filp, "port%d: Tcam Miss: %lld\n", lmac, stats.tcam_miss_cnt);
214 		seq_printf(filp, "port%d: Parser errors: %lld\n", lmac, stats.parser_err_cnt);
215 
216 		if (dir == MCS_RX && mcs->hw->mcs_blks > 1)
217 			seq_printf(filp, "port%d: Preempt error: %lld\n", lmac,
218 				   stats.preempt_err_cnt);
219 		if (dir == MCS_TX)
220 			seq_printf(filp, "port%d: Sectag insert error: %lld\n", lmac,
221 				   stats.sectag_insert_err_cnt);
222 	}
223 	mutex_unlock(&mcs->stats_lock);
224 	return 0;
225 }
226 
227 static int rvu_dbg_mcs_rx_port_stats_display(struct seq_file *filp, void *unused)
228 {
229 	return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_RX);
230 }
231 
232 RVU_DEBUG_SEQ_FOPS(mcs_rx_port_stats, mcs_rx_port_stats_display, NULL);
233 
234 static int rvu_dbg_mcs_tx_port_stats_display(struct seq_file *filp, void *unused)
235 {
236 	return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_TX);
237 }
238 
239 RVU_DEBUG_SEQ_FOPS(mcs_tx_port_stats, mcs_tx_port_stats_display, NULL);
240 
241 static int rvu_dbg_mcs_sa_stats_display(struct seq_file *filp, void *unused, int dir)
242 {
243 	struct mcs *mcs = filp->private;
244 	struct mcs_sa_stats stats;
245 	struct rsrc_bmap *map;
246 	int sa_id;
247 
248 	if (dir == MCS_TX) {
249 		map = &mcs->tx.sa;
250 		mutex_lock(&mcs->stats_lock);
251 		for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) {
252 			seq_puts(filp, "\n TX SA stats\n");
253 			mcs_get_sa_stats(mcs, &stats, sa_id, MCS_TX);
254 			seq_printf(filp, "sa%d: Pkts encrypted: %lld\n", sa_id,
255 				   stats.pkt_encrypt_cnt);
256 
257 			seq_printf(filp, "sa%d: Pkts protected: %lld\n", sa_id,
258 				   stats.pkt_protected_cnt);
259 		}
260 		mutex_unlock(&mcs->stats_lock);
261 		return 0;
262 	}
263 
264 	/* RX stats */
265 	map = &mcs->rx.sa;
266 	mutex_lock(&mcs->stats_lock);
267 	for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) {
268 		seq_puts(filp, "\n RX SA stats\n");
269 		mcs_get_sa_stats(mcs, &stats, sa_id, MCS_RX);
270 		seq_printf(filp, "sa%d: Invalid pkts: %lld\n", sa_id, stats.pkt_invalid_cnt);
271 		seq_printf(filp, "sa%d: Pkts no sa error: %lld\n", sa_id, stats.pkt_nosaerror_cnt);
272 		seq_printf(filp, "sa%d: Pkts not valid: %lld\n", sa_id, stats.pkt_notvalid_cnt);
273 		seq_printf(filp, "sa%d: Pkts ok: %lld\n", sa_id, stats.pkt_ok_cnt);
274 		seq_printf(filp, "sa%d: Pkts no sa: %lld\n", sa_id, stats.pkt_nosa_cnt);
275 	}
276 	mutex_unlock(&mcs->stats_lock);
277 	return 0;
278 }
279 
280 static int rvu_dbg_mcs_rx_sa_stats_display(struct seq_file *filp, void *unused)
281 {
282 	return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_RX);
283 }
284 
285 RVU_DEBUG_SEQ_FOPS(mcs_rx_sa_stats, mcs_rx_sa_stats_display, NULL);
286 
287 static int rvu_dbg_mcs_tx_sa_stats_display(struct seq_file *filp, void *unused)
288 {
289 	return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_TX);
290 }
291 
292 RVU_DEBUG_SEQ_FOPS(mcs_tx_sa_stats, mcs_tx_sa_stats_display, NULL);
293 
294 static int rvu_dbg_mcs_tx_sc_stats_display(struct seq_file *filp, void *unused)
295 {
296 	struct mcs *mcs = filp->private;
297 	struct mcs_sc_stats stats;
298 	struct rsrc_bmap *map;
299 	int sc_id;
300 
301 	map = &mcs->tx.sc;
302 	seq_puts(filp, "\n SC stats\n");
303 
304 	mutex_lock(&mcs->stats_lock);
305 	for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) {
306 		mcs_get_sc_stats(mcs, &stats, sc_id, MCS_TX);
307 		seq_printf(filp, "\n=======sc%d======\n\n", sc_id);
308 		seq_printf(filp, "sc%d: Pkts encrypted: %lld\n", sc_id, stats.pkt_encrypt_cnt);
309 		seq_printf(filp, "sc%d: Pkts protected: %lld\n", sc_id, stats.pkt_protected_cnt);
310 
311 		if (mcs->hw->mcs_blks == 1) {
312 			seq_printf(filp, "sc%d: Octets encrypted: %lld\n", sc_id,
313 				   stats.octet_encrypt_cnt);
314 			seq_printf(filp, "sc%d: Octets protected: %lld\n", sc_id,
315 				   stats.octet_protected_cnt);
316 		}
317 	}
318 	mutex_unlock(&mcs->stats_lock);
319 	return 0;
320 }
321 
322 RVU_DEBUG_SEQ_FOPS(mcs_tx_sc_stats, mcs_tx_sc_stats_display, NULL);
323 
324 static int rvu_dbg_mcs_rx_sc_stats_display(struct seq_file *filp, void *unused)
325 {
326 	struct mcs *mcs = filp->private;
327 	struct mcs_sc_stats stats;
328 	struct rsrc_bmap *map;
329 	int sc_id;
330 
331 	map = &mcs->rx.sc;
332 	seq_puts(filp, "\n SC stats\n");
333 
334 	mutex_lock(&mcs->stats_lock);
335 	for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) {
336 		mcs_get_sc_stats(mcs, &stats, sc_id, MCS_RX);
337 		seq_printf(filp, "\n=======sc%d======\n\n", sc_id);
338 		seq_printf(filp, "sc%d: Cam hits: %lld\n", sc_id, stats.hit_cnt);
339 		seq_printf(filp, "sc%d: Invalid pkts: %lld\n", sc_id, stats.pkt_invalid_cnt);
340 		seq_printf(filp, "sc%d: Late pkts: %lld\n", sc_id, stats.pkt_late_cnt);
341 		seq_printf(filp, "sc%d: Notvalid pkts: %lld\n", sc_id, stats.pkt_notvalid_cnt);
342 		seq_printf(filp, "sc%d: Unchecked pkts: %lld\n", sc_id, stats.pkt_unchecked_cnt);
343 
344 		if (mcs->hw->mcs_blks > 1) {
345 			seq_printf(filp, "sc%d: Delay pkts: %lld\n", sc_id, stats.pkt_delay_cnt);
346 			seq_printf(filp, "sc%d: Pkts ok: %lld\n", sc_id, stats.pkt_ok_cnt);
347 		}
348 		if (mcs->hw->mcs_blks == 1) {
349 			seq_printf(filp, "sc%d: Octets decrypted: %lld\n", sc_id,
350 				   stats.octet_decrypt_cnt);
351 			seq_printf(filp, "sc%d: Octets validated: %lld\n", sc_id,
352 				   stats.octet_validate_cnt);
353 		}
354 	}
355 	mutex_unlock(&mcs->stats_lock);
356 	return 0;
357 }
358 
359 RVU_DEBUG_SEQ_FOPS(mcs_rx_sc_stats, mcs_rx_sc_stats_display, NULL);
360 
361 static int rvu_dbg_mcs_flowid_stats_display(struct seq_file *filp, void *unused, int dir)
362 {
363 	struct mcs *mcs = filp->private;
364 	struct mcs_flowid_stats stats;
365 	struct rsrc_bmap *map;
366 	int flow_id;
367 
368 	seq_puts(filp, "\n Flowid stats\n");
369 
370 	if (dir == MCS_RX)
371 		map = &mcs->rx.flow_ids;
372 	else
373 		map = &mcs->tx.flow_ids;
374 
375 	mutex_lock(&mcs->stats_lock);
376 	for_each_set_bit(flow_id, map->bmap, mcs->hw->tcam_entries) {
377 		mcs_get_flowid_stats(mcs, &stats, flow_id, dir);
378 		seq_printf(filp, "Flowid%d: Hit:%lld\n", flow_id, stats.tcam_hit_cnt);
379 	}
380 	mutex_unlock(&mcs->stats_lock);
381 	return 0;
382 }
383 
384 static int rvu_dbg_mcs_tx_flowid_stats_display(struct seq_file *filp, void *unused)
385 {
386 	return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_TX);
387 }
388 
389 RVU_DEBUG_SEQ_FOPS(mcs_tx_flowid_stats, mcs_tx_flowid_stats_display, NULL);
390 
391 static int rvu_dbg_mcs_rx_flowid_stats_display(struct seq_file *filp, void *unused)
392 {
393 	return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_RX);
394 }
395 
396 RVU_DEBUG_SEQ_FOPS(mcs_rx_flowid_stats, mcs_rx_flowid_stats_display, NULL);
397 
398 static int rvu_dbg_mcs_tx_secy_stats_display(struct seq_file *filp, void *unused)
399 {
400 	struct mcs *mcs = filp->private;
401 	struct mcs_secy_stats stats;
402 	struct rsrc_bmap *map;
403 	int secy_id;
404 
405 	map = &mcs->tx.secy;
406 	seq_puts(filp, "\n MCS TX secy stats\n");
407 
408 	mutex_lock(&mcs->stats_lock);
409 	for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) {
410 		mcs_get_tx_secy_stats(mcs, &stats, secy_id);
411 		seq_printf(filp, "\n=======Secy%d======\n\n", secy_id);
412 		seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id,
413 			   stats.ctl_pkt_bcast_cnt);
414 		seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id,
415 			   stats.ctl_pkt_mcast_cnt);
416 		seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id,
417 			   stats.ctl_pkt_ucast_cnt);
418 		seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt);
419 		seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id,
420 			   stats.unctl_pkt_bcast_cnt);
421 		seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id,
422 			   stats.unctl_pkt_mcast_cnt);
423 		seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id,
424 			   stats.unctl_pkt_ucast_cnt);
425 		seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt);
426 		seq_printf(filp, "secy%d: Octet encrypted: %lld\n", secy_id,
427 			   stats.octet_encrypted_cnt);
428 		seq_printf(filp, "secy%d: octet protected: %lld\n", secy_id,
429 			   stats.octet_protected_cnt);
430 		seq_printf(filp, "secy%d: Pkts on active sa: %lld\n", secy_id,
431 			   stats.pkt_noactivesa_cnt);
432 		seq_printf(filp, "secy%d: Pkts too long: %lld\n", secy_id, stats.pkt_toolong_cnt);
433 		seq_printf(filp, "secy%d: Pkts untagged: %lld\n", secy_id, stats.pkt_untagged_cnt);
434 	}
435 	mutex_unlock(&mcs->stats_lock);
436 	return 0;
437 }
438 
439 RVU_DEBUG_SEQ_FOPS(mcs_tx_secy_stats, mcs_tx_secy_stats_display, NULL);
440 
441 static int rvu_dbg_mcs_rx_secy_stats_display(struct seq_file *filp, void *unused)
442 {
443 	struct mcs *mcs = filp->private;
444 	struct mcs_secy_stats stats;
445 	struct rsrc_bmap *map;
446 	int secy_id;
447 
448 	map = &mcs->rx.secy;
449 	seq_puts(filp, "\n MCS secy stats\n");
450 
451 	mutex_lock(&mcs->stats_lock);
452 	for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) {
453 		mcs_get_rx_secy_stats(mcs, &stats, secy_id);
454 		seq_printf(filp, "\n=======Secy%d======\n\n", secy_id);
455 		seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id,
456 			   stats.ctl_pkt_bcast_cnt);
457 		seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id,
458 			   stats.ctl_pkt_mcast_cnt);
459 		seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id,
460 			   stats.ctl_pkt_ucast_cnt);
461 		seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt);
462 		seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id,
463 			   stats.unctl_pkt_bcast_cnt);
464 		seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id,
465 			   stats.unctl_pkt_mcast_cnt);
466 		seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id,
467 			   stats.unctl_pkt_ucast_cnt);
468 		seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt);
469 		seq_printf(filp, "secy%d: Octet decrypted: %lld\n", secy_id,
470 			   stats.octet_decrypted_cnt);
471 		seq_printf(filp, "secy%d: octet validated: %lld\n", secy_id,
472 			   stats.octet_validated_cnt);
473 		seq_printf(filp, "secy%d: Pkts on disable port: %lld\n", secy_id,
474 			   stats.pkt_port_disabled_cnt);
475 		seq_printf(filp, "secy%d: Pkts with badtag: %lld\n", secy_id, stats.pkt_badtag_cnt);
476 		seq_printf(filp, "secy%d: Pkts with no SA(sectag.tci.c=0): %lld\n", secy_id,
477 			   stats.pkt_nosa_cnt);
478 		seq_printf(filp, "secy%d: Pkts with nosaerror: %lld\n", secy_id,
479 			   stats.pkt_nosaerror_cnt);
480 		seq_printf(filp, "secy%d: Tagged ctrl pkts: %lld\n", secy_id,
481 			   stats.pkt_tagged_ctl_cnt);
482 		seq_printf(filp, "secy%d: Untaged pkts: %lld\n", secy_id, stats.pkt_untaged_cnt);
483 		seq_printf(filp, "secy%d: Ctrl pkts: %lld\n", secy_id, stats.pkt_ctl_cnt);
484 		if (mcs->hw->mcs_blks > 1)
485 			seq_printf(filp, "secy%d: pkts notag: %lld\n", secy_id,
486 				   stats.pkt_notag_cnt);
487 	}
488 	mutex_unlock(&mcs->stats_lock);
489 	return 0;
490 }
491 
492 RVU_DEBUG_SEQ_FOPS(mcs_rx_secy_stats, mcs_rx_secy_stats_display, NULL);
493 
494 static void rvu_dbg_mcs_init(struct rvu *rvu)
495 {
496 	struct mcs *mcs;
497 	char dname[10];
498 	int i;
499 
500 	if (!rvu->mcs_blk_cnt)
501 		return;
502 
503 	rvu->rvu_dbg.mcs_root = debugfs_create_dir("mcs", rvu->rvu_dbg.root);
504 
505 	for (i = 0; i < rvu->mcs_blk_cnt; i++) {
506 		mcs = mcs_get_pdata(i);
507 
508 		sprintf(dname, "mcs%d", i);
509 		rvu->rvu_dbg.mcs = debugfs_create_dir(dname,
510 						      rvu->rvu_dbg.mcs_root);
511 
512 		rvu->rvu_dbg.mcs_rx = debugfs_create_dir("rx_stats", rvu->rvu_dbg.mcs);
513 
514 		debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_rx, mcs,
515 				    &rvu_dbg_mcs_rx_flowid_stats_fops);
516 
517 		debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_rx, mcs,
518 				    &rvu_dbg_mcs_rx_secy_stats_fops);
519 
520 		debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_rx, mcs,
521 				    &rvu_dbg_mcs_rx_sc_stats_fops);
522 
523 		debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_rx, mcs,
524 				    &rvu_dbg_mcs_rx_sa_stats_fops);
525 
526 		debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_rx, mcs,
527 				    &rvu_dbg_mcs_rx_port_stats_fops);
528 
529 		rvu->rvu_dbg.mcs_tx = debugfs_create_dir("tx_stats", rvu->rvu_dbg.mcs);
530 
531 		debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_tx, mcs,
532 				    &rvu_dbg_mcs_tx_flowid_stats_fops);
533 
534 		debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_tx, mcs,
535 				    &rvu_dbg_mcs_tx_secy_stats_fops);
536 
537 		debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_tx, mcs,
538 				    &rvu_dbg_mcs_tx_sc_stats_fops);
539 
540 		debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_tx, mcs,
541 				    &rvu_dbg_mcs_tx_sa_stats_fops);
542 
543 		debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_tx, mcs,
544 				    &rvu_dbg_mcs_tx_port_stats_fops);
545 	}
546 }
547 
548 #define LMT_MAPTBL_ENTRY_SIZE 16
549 /* Dump LMTST map table */
550 static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
551 					       char __user *buffer,
552 					       size_t count, loff_t *ppos)
553 {
554 	struct rvu *rvu = filp->private_data;
555 	u64 lmt_addr, val, tbl_base;
556 	int pf, vf, num_vfs, hw_vfs;
557 	void __iomem *lmt_map_base;
558 	int apr_pfs, apr_vfs;
559 	int buf_size = 10240;
560 	size_t off = 0;
561 	int index = 0;
562 	char *buf;
563 	int ret;
564 
565 	/* don't allow partial reads */
566 	if (*ppos != 0)
567 		return 0;
568 
569 	buf = kzalloc(buf_size, GFP_KERNEL);
570 	if (!buf)
571 		return -ENOMEM;
572 
573 	tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
574 	val  = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CFG);
575 	apr_vfs = 1 << (val & 0xF);
576 	apr_pfs = 1 << ((val >> 4) & 0x7);
577 
578 	lmt_map_base = ioremap_wc(tbl_base, apr_pfs * apr_vfs *
579 				  LMT_MAPTBL_ENTRY_SIZE);
580 	if (!lmt_map_base) {
581 		dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
582 		kfree(buf);
583 		return false;
584 	}
585 
586 	off +=	scnprintf(&buf[off], buf_size - 1 - off,
587 			  "\n\t\t\t\t\tLmtst Map Table Entries");
588 	off +=	scnprintf(&buf[off], buf_size - 1 - off,
589 			  "\n\t\t\t\t\t=======================");
590 	off +=	scnprintf(&buf[off], buf_size - 1 - off, "\nPcifunc\t\t\t");
591 	off +=	scnprintf(&buf[off], buf_size - 1 - off, "Table Index\t\t");
592 	off +=	scnprintf(&buf[off], buf_size - 1 - off,
593 			  "Lmtline Base (word 0)\t\t");
594 	off +=	scnprintf(&buf[off], buf_size - 1 - off,
595 			  "Lmt Map Entry (word 1)");
596 	off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
597 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
598 		off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d  \t\t\t",
599 				    pf);
600 
601 		index = pf * apr_vfs * LMT_MAPTBL_ENTRY_SIZE;
602 		off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t",
603 				 (tbl_base + index));
604 		lmt_addr = readq(lmt_map_base + index);
605 		off += scnprintf(&buf[off], buf_size - 1 - off,
606 				 " 0x%016llx\t\t", lmt_addr);
607 		index += 8;
608 		val = readq(lmt_map_base + index);
609 		off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%016llx\n",
610 				 val);
611 		/* Reading num of VFs per PF */
612 		rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs);
613 		for (vf = 0; vf < num_vfs; vf++) {
614 			index = (pf * apr_vfs * LMT_MAPTBL_ENTRY_SIZE) +
615 				((vf + 1)  * LMT_MAPTBL_ENTRY_SIZE);
616 			off += scnprintf(&buf[off], buf_size - 1 - off,
617 					    "PF%d:VF%d  \t\t", pf, vf);
618 			off += scnprintf(&buf[off], buf_size - 1 - off,
619 					 " 0x%llx\t\t", (tbl_base + index));
620 			lmt_addr = readq(lmt_map_base + index);
621 			off += scnprintf(&buf[off], buf_size - 1 - off,
622 					 " 0x%016llx\t\t", lmt_addr);
623 			index += 8;
624 			val = readq(lmt_map_base + index);
625 			off += scnprintf(&buf[off], buf_size - 1 - off,
626 					 " 0x%016llx\n", val);
627 		}
628 	}
629 	off +=	scnprintf(&buf[off], buf_size - 1 - off, "\n");
630 
631 	ret = min(off, count);
632 	if (copy_to_user(buffer, buf, ret))
633 		ret = -EFAULT;
634 	kfree(buf);
635 
636 	iounmap(lmt_map_base);
637 	if (ret < 0)
638 		return ret;
639 
640 	*ppos = ret;
641 	return ret;
642 }
643 
644 RVU_DEBUG_FOPS(lmtst_map_table, lmtst_map_table_display, NULL);
645 
646 static void get_lf_str_list(const struct rvu_block *block, int pcifunc,
647 			    char *lfs)
648 {
649 	int lf = 0, seq = 0, len = 0, prev_lf = block->lf.max;
650 
651 	for_each_set_bit(lf, block->lf.bmap, block->lf.max) {
652 		if (lf >= block->lf.max)
653 			break;
654 
655 		if (block->fn_map[lf] != pcifunc)
656 			continue;
657 
658 		if (lf == prev_lf + 1) {
659 			prev_lf = lf;
660 			seq = 1;
661 			continue;
662 		}
663 
664 		if (seq)
665 			len += sprintf(lfs + len, "-%d,%d", prev_lf, lf);
666 		else
667 			len += (len ? sprintf(lfs + len, ",%d", lf) :
668 				      sprintf(lfs + len, "%d", lf));
669 
670 		prev_lf = lf;
671 		seq = 0;
672 	}
673 
674 	if (seq)
675 		len += sprintf(lfs + len, "-%d", prev_lf);
676 
677 	lfs[len] = '\0';
678 }
679 
680 static int get_max_column_width(struct rvu *rvu)
681 {
682 	int index, pf, vf, lf_str_size = 12, buf_size = 256;
683 	struct rvu_block block;
684 	u16 pcifunc;
685 	char *buf;
686 
687 	buf = kzalloc(buf_size, GFP_KERNEL);
688 	if (!buf)
689 		return -ENOMEM;
690 
691 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
692 		for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
693 			pcifunc = rvu_make_pcifunc(rvu->pdev, pf, vf);
694 			if (!pcifunc)
695 				continue;
696 
697 			for (index = 0; index < BLK_COUNT; index++) {
698 				block = rvu->hw->block[index];
699 				if (!strlen(block.name))
700 					continue;
701 
702 				get_lf_str_list(&block, pcifunc, buf);
703 				if (lf_str_size <= strlen(buf))
704 					lf_str_size = strlen(buf) + 1;
705 			}
706 		}
707 	}
708 
709 	kfree(buf);
710 	return lf_str_size;
711 }
712 
713 /* Dumps current provisioning status of all RVU block LFs */
714 static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
715 					  char __user *buffer,
716 					  size_t count, loff_t *ppos)
717 {
718 	int index, off = 0, flag = 0, len = 0, i = 0;
719 	struct rvu *rvu = filp->private_data;
720 	int bytes_not_copied = 0;
721 	struct rvu_block block;
722 	int pf, vf, pcifunc;
723 	int buf_size = 2048;
724 	int lf_str_size;
725 	char *lfs;
726 	char *buf;
727 
728 	/* don't allow partial reads */
729 	if (*ppos != 0)
730 		return 0;
731 
732 	buf = kzalloc(buf_size, GFP_KERNEL);
733 	if (!buf)
734 		return -ENOMEM;
735 
736 	/* Get the maximum width of a column */
737 	lf_str_size = get_max_column_width(rvu);
738 
739 	lfs = kzalloc(lf_str_size, GFP_KERNEL);
740 	if (!lfs) {
741 		kfree(buf);
742 		return -ENOMEM;
743 	}
744 	off +=	scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size,
745 			  "pcifunc");
746 	for (index = 0; index < BLK_COUNT; index++)
747 		if (strlen(rvu->hw->block[index].name)) {
748 			off += scnprintf(&buf[off], buf_size - 1 - off,
749 					 "%-*s", lf_str_size,
750 					 rvu->hw->block[index].name);
751 		}
752 
753 	off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
754 	bytes_not_copied = copy_to_user(buffer + (i * off), buf, off);
755 	if (bytes_not_copied)
756 		goto out;
757 
758 	i++;
759 	*ppos += off;
760 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
761 		for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
762 			off = 0;
763 			flag = 0;
764 			pcifunc = rvu_make_pcifunc(rvu->pdev, pf, vf);
765 			if (!pcifunc)
766 				continue;
767 
768 			if (vf) {
769 				sprintf(lfs, "PF%d:VF%d", pf, vf - 1);
770 				off = scnprintf(&buf[off],
771 						buf_size - 1 - off,
772 						"%-*s", lf_str_size, lfs);
773 			} else {
774 				sprintf(lfs, "PF%d", pf);
775 				off = scnprintf(&buf[off],
776 						buf_size - 1 - off,
777 						"%-*s", lf_str_size, lfs);
778 			}
779 
780 			for (index = 0; index < BLK_COUNT; index++) {
781 				block = rvu->hw->block[index];
782 				if (!strlen(block.name))
783 					continue;
784 				len = 0;
785 				lfs[len] = '\0';
786 				get_lf_str_list(&block, pcifunc, lfs);
787 				if (strlen(lfs))
788 					flag = 1;
789 
790 				off += scnprintf(&buf[off], buf_size - 1 - off,
791 						 "%-*s", lf_str_size, lfs);
792 			}
793 			if (flag) {
794 				off +=	scnprintf(&buf[off],
795 						  buf_size - 1 - off, "\n");
796 				bytes_not_copied = copy_to_user(buffer +
797 								(i * off),
798 								buf, off);
799 				if (bytes_not_copied)
800 					goto out;
801 
802 				i++;
803 				*ppos += off;
804 			}
805 		}
806 	}
807 
808 out:
809 	kfree(lfs);
810 	kfree(buf);
811 	if (bytes_not_copied)
812 		return -EFAULT;
813 
814 	return *ppos;
815 }
816 
817 RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
818 
819 static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
820 {
821 	char cgx[10], lmac[10], chan[10];
822 	struct rvu *rvu = filp->private;
823 	struct pci_dev *pdev = NULL;
824 	struct mac_ops *mac_ops;
825 	struct rvu_pfvf *pfvf;
826 	int pf, domain, blkid;
827 	u8 cgx_id, lmac_id;
828 	u16 pcifunc;
829 
830 	domain = 2;
831 	mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
832 	/* There can be no CGX devices at all */
833 	if (!mac_ops)
834 		return 0;
835 	seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\tCHAN\n",
836 		   mac_ops->name);
837 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
838 		if (!is_pf_cgxmapped(rvu, pf))
839 			continue;
840 
841 		pdev =  pci_get_domain_bus_and_slot(domain, pf + 1, 0);
842 		if (!pdev)
843 			continue;
844 
845 		cgx[0] = 0;
846 		lmac[0] = 0;
847 		pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0);
848 		pfvf = rvu_get_pfvf(rvu, pcifunc);
849 
850 		if (pfvf->nix_blkaddr == BLKADDR_NIX0)
851 			blkid = 0;
852 		else
853 			blkid = 1;
854 
855 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id,
856 				    &lmac_id);
857 		sprintf(cgx, "%s%d", mac_ops->name, cgx_id);
858 		sprintf(lmac, "LMAC%d", lmac_id);
859 		sprintf(chan, "%d",
860 			rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0));
861 		seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\t%s\n",
862 			   dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac,
863 			   chan);
864 
865 		pci_dev_put(pdev);
866 	}
867 	return 0;
868 }
869 
870 RVU_DEBUG_SEQ_FOPS(rvu_pf_cgx_map, rvu_pf_cgx_map_display, NULL);
871 
872 static int rvu_dbg_rvu_fwdata_display(struct seq_file *s, void *unused)
873 {
874 	struct rvu *rvu = s->private;
875 	struct rvu_fwdata *fwdata;
876 	u8 mac[ETH_ALEN];
877 	int count = 0, i;
878 
879 	if (!rvu->fwdata)
880 		return -EAGAIN;
881 
882 	fwdata = rvu->fwdata;
883 	seq_puts(s, "\nRVU Firmware Data:\n");
884 	seq_puts(s, "\n\t\tPTP INFORMATION\n");
885 	seq_puts(s, "\t\t===============\n");
886 	seq_printf(s, "\t\texternal clockrate \t :%x\n",
887 		   fwdata->ptp_ext_clk_rate);
888 	seq_printf(s, "\t\texternal timestamp \t :%x\n",
889 		   fwdata->ptp_ext_tstamp);
890 	seq_puts(s, "\n");
891 
892 	seq_puts(s, "\n\t\tSDP CHANNEL INFORMATION\n");
893 	seq_puts(s, "\t\t=======================\n");
894 	seq_printf(s, "\t\tValid \t\t\t :%x\n", fwdata->channel_data.valid);
895 	seq_printf(s, "\t\tNode ID \t\t :%x\n",
896 		   fwdata->channel_data.info.node_id);
897 	seq_printf(s, "\t\tNumber of VFs  \t\t :%x\n",
898 		   fwdata->channel_data.info.max_vfs);
899 	seq_printf(s, "\t\tNumber of PF-Rings \t :%x\n",
900 		   fwdata->channel_data.info.num_pf_rings);
901 	seq_printf(s, "\t\tPF SRN \t\t\t :%x\n",
902 		   fwdata->channel_data.info.pf_srn);
903 	seq_puts(s, "\n");
904 
905 	seq_puts(s, "\n\t\tPF-INDEX  MACADDRESS\n");
906 	seq_puts(s, "\t\t====================\n");
907 	for (i = 0; i < PF_MACNUM_MAX; i++) {
908 		u64_to_ether_addr(fwdata->pf_macs[i], mac);
909 		if (!is_zero_ether_addr(mac)) {
910 			seq_printf(s, "\t\t  %d       %pM\n", i, mac);
911 			count++;
912 		}
913 	}
914 
915 	if (!count)
916 		seq_puts(s, "\t\tNo valid address found\n");
917 
918 	seq_puts(s, "\n\t\tVF-INDEX  MACADDRESS\n");
919 	seq_puts(s, "\t\t====================\n");
920 	count = 0;
921 	for (i = 0; i < VF_MACNUM_MAX; i++) {
922 		u64_to_ether_addr(fwdata->vf_macs[i], mac);
923 		if (!is_zero_ether_addr(mac)) {
924 			seq_printf(s, "\t\t  %d       %pM\n", i, mac);
925 			count++;
926 		}
927 	}
928 
929 	if (!count)
930 		seq_puts(s, "\t\tNo valid address found\n");
931 
932 	return 0;
933 }
934 
935 RVU_DEBUG_SEQ_FOPS(rvu_fwdata, rvu_fwdata_display, NULL);
936 
937 static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blkaddr, int lf,
938 				u16 *pcifunc)
939 {
940 	struct rvu_block *block;
941 	struct rvu_hwinfo *hw;
942 
943 	hw = rvu->hw;
944 	block = &hw->block[blkaddr];
945 
946 	if (lf < 0 || lf >= block->lf.max) {
947 		dev_warn(rvu->dev, "Invalid LF: valid range: 0-%d\n",
948 			 block->lf.max - 1);
949 		return false;
950 	}
951 
952 	*pcifunc = block->fn_map[lf];
953 	if (!*pcifunc) {
954 		dev_warn(rvu->dev,
955 			 "This LF is not attached to any RVU PFFUNC\n");
956 		return false;
957 	}
958 	return true;
959 }
960 
961 static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf)
962 {
963 	char *buf;
964 
965 	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
966 	if (!buf)
967 		return;
968 
969 	if (!pfvf->aura_ctx) {
970 		seq_puts(m, "Aura context is not initialized\n");
971 	} else {
972 		bitmap_print_to_pagebuf(false, buf, pfvf->aura_bmap,
973 					pfvf->aura_ctx->qsize);
974 		seq_printf(m, "Aura count : %d\n", pfvf->aura_ctx->qsize);
975 		seq_printf(m, "Aura context ena/dis bitmap : %s\n", buf);
976 	}
977 
978 	if (!pfvf->pool_ctx) {
979 		seq_puts(m, "Pool context is not initialized\n");
980 	} else {
981 		bitmap_print_to_pagebuf(false, buf, pfvf->pool_bmap,
982 					pfvf->pool_ctx->qsize);
983 		seq_printf(m, "Pool count : %d\n", pfvf->pool_ctx->qsize);
984 		seq_printf(m, "Pool context ena/dis bitmap : %s\n", buf);
985 	}
986 	kfree(buf);
987 }
988 
989 /* The 'qsize' entry dumps current Aura/Pool context Qsize
990  * and each context's current enable/disable status in a bitmap.
991  */
992 static int rvu_dbg_qsize_display(struct seq_file *s, void *unsused,
993 				 int blktype)
994 {
995 	void (*print_qsize)(struct seq_file *s,
996 			    struct rvu_pfvf *pfvf) = NULL;
997 	struct rvu_pfvf *pfvf;
998 	struct rvu *rvu;
999 	int qsize_id;
1000 	u16 pcifunc;
1001 	int blkaddr;
1002 
1003 	rvu = s->private;
1004 	switch (blktype) {
1005 	case BLKTYPE_NPA:
1006 		qsize_id = rvu->rvu_dbg.npa_qsize_id;
1007 		print_qsize = print_npa_qsize;
1008 		break;
1009 
1010 	case BLKTYPE_NIX:
1011 		qsize_id = rvu->rvu_dbg.nix_qsize_id;
1012 		print_qsize = print_nix_qsize;
1013 		break;
1014 
1015 	default:
1016 		return -EINVAL;
1017 	}
1018 
1019 	if (blktype == BLKTYPE_NPA)
1020 		blkaddr = BLKADDR_NPA;
1021 	else
1022 		blkaddr = debugfs_get_aux_num(s->file);
1023 
1024 	if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc))
1025 		return -EINVAL;
1026 
1027 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1028 	print_qsize(s, pfvf);
1029 
1030 	return 0;
1031 }
1032 
1033 static ssize_t rvu_dbg_qsize_write(struct file *file,
1034 				   const char __user *buffer, size_t count,
1035 				   loff_t *ppos, int blktype)
1036 {
1037 	char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix";
1038 	struct seq_file *seqfile = file->private_data;
1039 	char *cmd_buf, *cmd_buf_tmp, *subtoken;
1040 	struct rvu *rvu = seqfile->private;
1041 	int blkaddr;
1042 	u16 pcifunc;
1043 	int ret, lf;
1044 
1045 	cmd_buf = memdup_user_nul(buffer, count);
1046 	if (IS_ERR(cmd_buf))
1047 		return -ENOMEM;
1048 
1049 	cmd_buf_tmp = strchr(cmd_buf, '\n');
1050 	if (cmd_buf_tmp) {
1051 		*cmd_buf_tmp = '\0';
1052 		count = cmd_buf_tmp - cmd_buf + 1;
1053 	}
1054 
1055 	cmd_buf_tmp = cmd_buf;
1056 	subtoken = strsep(&cmd_buf, " ");
1057 	ret = subtoken ? kstrtoint(subtoken, 10, &lf) : -EINVAL;
1058 	if (cmd_buf)
1059 		ret = -EINVAL;
1060 
1061 	if (ret < 0 || !strncmp(subtoken, "help", 4)) {
1062 		dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
1063 		goto qsize_write_done;
1064 	}
1065 
1066 	if (blktype == BLKTYPE_NPA)
1067 		blkaddr = BLKADDR_NPA;
1068 	else
1069 		blkaddr = debugfs_get_aux_num(file);
1070 
1071 	if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) {
1072 		ret = -EINVAL;
1073 		goto qsize_write_done;
1074 	}
1075 	if (blktype  == BLKTYPE_NPA)
1076 		rvu->rvu_dbg.npa_qsize_id = lf;
1077 	else
1078 		rvu->rvu_dbg.nix_qsize_id = lf;
1079 
1080 qsize_write_done:
1081 	kfree(cmd_buf_tmp);
1082 	return ret ? ret : count;
1083 }
1084 
1085 static ssize_t rvu_dbg_npa_qsize_write(struct file *filp,
1086 				       const char __user *buffer,
1087 				       size_t count, loff_t *ppos)
1088 {
1089 	return rvu_dbg_qsize_write(filp, buffer, count, ppos,
1090 					    BLKTYPE_NPA);
1091 }
1092 
1093 static int rvu_dbg_npa_qsize_display(struct seq_file *filp, void *unused)
1094 {
1095 	return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NPA);
1096 }
1097 
1098 RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write);
1099 
1100 /* Dumps given NPA Aura's context */
1101 static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
1102 {
1103 	struct npa_aura_s *aura = &rsp->aura;
1104 	struct rvu *rvu = m->private;
1105 
1106 	if (is_cn20k(rvu->pdev)) {
1107 		print_npa_cn20k_aura_ctx(m, (struct npa_cn20k_aq_enq_rsp *)rsp);
1108 		return;
1109 	}
1110 
1111 	seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr);
1112 
1113 	seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n",
1114 		   aura->ena, aura->pool_caching);
1115 	seq_printf(m, "W1: pool way mask\t%d\nW1: avg con\t\t%d\n",
1116 		   aura->pool_way_mask, aura->avg_con);
1117 	seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n",
1118 		   aura->pool_drop_ena, aura->aura_drop_ena);
1119 	seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n",
1120 		   aura->bp_ena, aura->aura_drop);
1121 	seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n",
1122 		   aura->shift, aura->avg_level);
1123 
1124 	seq_printf(m, "W2: count\t\t%llu\nW2: nix0_bpid\t\t%d\nW2: nix1_bpid\t\t%d\n",
1125 		   (u64)aura->count, aura->nix0_bpid, aura->nix1_bpid);
1126 
1127 	seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
1128 		   (u64)aura->limit, aura->bp, aura->fc_ena);
1129 
1130 	if (!is_rvu_otx2(rvu))
1131 		seq_printf(m, "W3: fc_be\t\t%d\n", aura->fc_be);
1132 	seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n",
1133 		   aura->fc_up_crossing, aura->fc_stype);
1134 	seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits);
1135 
1136 	seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr);
1137 
1138 	seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n",
1139 		   aura->pool_drop, aura->update_time);
1140 	seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n",
1141 		   aura->err_int, aura->err_int_ena);
1142 	seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n",
1143 		   aura->thresh_int, aura->thresh_int_ena);
1144 	seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n",
1145 		   aura->thresh_up, aura->thresh_qint_idx);
1146 	seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx);
1147 
1148 	seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh);
1149 	if (!is_rvu_otx2(rvu))
1150 		seq_printf(m, "W6: fc_msh_dst\t\t%d\n", aura->fc_msh_dst);
1151 }
1152 
1153 /* Dumps given NPA Pool's context */
1154 static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
1155 {
1156 	struct npa_pool_s *pool = &rsp->pool;
1157 	struct rvu *rvu = m->private;
1158 
1159 	if (is_cn20k(rvu->pdev)) {
1160 		print_npa_cn20k_pool_ctx(m, (struct npa_cn20k_aq_enq_rsp *)rsp);
1161 		return;
1162 	}
1163 
1164 	seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base);
1165 
1166 	seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n",
1167 		   pool->ena, pool->nat_align);
1168 	seq_printf(m, "W1: stack_caching\t%d\nW1: stack_way_mask\t%d\n",
1169 		   pool->stack_caching, pool->stack_way_mask);
1170 	seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n",
1171 		   pool->buf_offset, pool->buf_size);
1172 
1173 	seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n",
1174 		   pool->stack_max_pages, pool->stack_pages);
1175 
1176 	seq_printf(m, "W3: op_pc \t\t%llu\n", (u64)pool->op_pc);
1177 
1178 	seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n",
1179 		   pool->stack_offset, pool->shift, pool->avg_level);
1180 	seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n",
1181 		   pool->avg_con, pool->fc_ena, pool->fc_stype);
1182 	seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n",
1183 		   pool->fc_hyst_bits, pool->fc_up_crossing);
1184 	if (!is_rvu_otx2(rvu))
1185 		seq_printf(m, "W4: fc_be\t\t%d\n", pool->fc_be);
1186 	seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time);
1187 
1188 	seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr);
1189 
1190 	seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start);
1191 
1192 	seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end);
1193 
1194 	seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n",
1195 		   pool->err_int, pool->err_int_ena);
1196 	seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int);
1197 	seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n",
1198 		   pool->thresh_int_ena, pool->thresh_up);
1199 	seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d\n",
1200 		   pool->thresh_qint_idx, pool->err_qint_idx);
1201 	if (!is_rvu_otx2(rvu))
1202 		seq_printf(m, "W8: fc_msh_dst\t\t%d\n", pool->fc_msh_dst);
1203 }
1204 
1205 /* Reads aura/pool's ctx from admin queue */
1206 static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype)
1207 {
1208 	void (*print_npa_ctx)(struct seq_file *m, struct npa_aq_enq_rsp *rsp);
1209 	struct npa_aq_enq_req aq_req;
1210 	struct npa_aq_enq_rsp rsp;
1211 	struct rvu_pfvf *pfvf;
1212 	int aura, rc, max_id;
1213 	int npalf, id, all;
1214 	struct rvu *rvu;
1215 	u16 pcifunc;
1216 
1217 	rvu = m->private;
1218 
1219 	switch (ctype) {
1220 	case NPA_AQ_CTYPE_AURA:
1221 		npalf = rvu->rvu_dbg.npa_aura_ctx.lf;
1222 		id = rvu->rvu_dbg.npa_aura_ctx.id;
1223 		all = rvu->rvu_dbg.npa_aura_ctx.all;
1224 		break;
1225 
1226 	case NPA_AQ_CTYPE_POOL:
1227 		npalf = rvu->rvu_dbg.npa_pool_ctx.lf;
1228 		id = rvu->rvu_dbg.npa_pool_ctx.id;
1229 		all = rvu->rvu_dbg.npa_pool_ctx.all;
1230 		break;
1231 	default:
1232 		return -EINVAL;
1233 	}
1234 
1235 	if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
1236 		return -EINVAL;
1237 
1238 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1239 	if (ctype == NPA_AQ_CTYPE_AURA && !pfvf->aura_ctx) {
1240 		seq_puts(m, "Aura context is not initialized\n");
1241 		return -EINVAL;
1242 	} else if (ctype == NPA_AQ_CTYPE_POOL && !pfvf->pool_ctx) {
1243 		seq_puts(m, "Pool context is not initialized\n");
1244 		return -EINVAL;
1245 	}
1246 
1247 	memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
1248 	aq_req.hdr.pcifunc = pcifunc;
1249 	aq_req.ctype = ctype;
1250 	aq_req.op = NPA_AQ_INSTOP_READ;
1251 	if (ctype == NPA_AQ_CTYPE_AURA) {
1252 		max_id = pfvf->aura_ctx->qsize;
1253 		print_npa_ctx = print_npa_aura_ctx;
1254 	} else {
1255 		max_id = pfvf->pool_ctx->qsize;
1256 		print_npa_ctx = print_npa_pool_ctx;
1257 	}
1258 
1259 	if (id < 0 || id >= max_id) {
1260 		seq_printf(m, "Invalid %s, valid range is 0-%d\n",
1261 			   (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
1262 			max_id - 1);
1263 		return -EINVAL;
1264 	}
1265 
1266 	if (all)
1267 		id = 0;
1268 	else
1269 		max_id = id + 1;
1270 
1271 	for (aura = id; aura < max_id; aura++) {
1272 		aq_req.aura_id = aura;
1273 
1274 		/* Skip if queue is uninitialized */
1275 		if (ctype == NPA_AQ_CTYPE_POOL && !test_bit(aura, pfvf->pool_bmap))
1276 			continue;
1277 
1278 		seq_printf(m, "======%s : %d=======\n",
1279 			   (ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL",
1280 			aq_req.aura_id);
1281 		rc = rvu_npa_aq_enq_inst(rvu, &aq_req, &rsp);
1282 		if (rc) {
1283 			seq_puts(m, "Failed to read context\n");
1284 			return -EINVAL;
1285 		}
1286 		print_npa_ctx(m, &rsp);
1287 	}
1288 	return 0;
1289 }
1290 
1291 static int write_npa_ctx(struct rvu *rvu, bool all,
1292 			 int npalf, int id, int ctype)
1293 {
1294 	struct rvu_pfvf *pfvf;
1295 	int max_id = 0;
1296 	u16 pcifunc;
1297 
1298 	if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
1299 		return -EINVAL;
1300 
1301 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1302 
1303 	if (ctype == NPA_AQ_CTYPE_AURA) {
1304 		if (!pfvf->aura_ctx) {
1305 			dev_warn(rvu->dev, "Aura context is not initialized\n");
1306 			return -EINVAL;
1307 		}
1308 		max_id = pfvf->aura_ctx->qsize;
1309 	} else if (ctype == NPA_AQ_CTYPE_POOL) {
1310 		if (!pfvf->pool_ctx) {
1311 			dev_warn(rvu->dev, "Pool context is not initialized\n");
1312 			return -EINVAL;
1313 		}
1314 		max_id = pfvf->pool_ctx->qsize;
1315 	}
1316 
1317 	if (id < 0 || id >= max_id) {
1318 		dev_warn(rvu->dev, "Invalid %s, valid range is 0-%d\n",
1319 			 (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
1320 			max_id - 1);
1321 		return -EINVAL;
1322 	}
1323 
1324 	switch (ctype) {
1325 	case NPA_AQ_CTYPE_AURA:
1326 		rvu->rvu_dbg.npa_aura_ctx.lf = npalf;
1327 		rvu->rvu_dbg.npa_aura_ctx.id = id;
1328 		rvu->rvu_dbg.npa_aura_ctx.all = all;
1329 		break;
1330 
1331 	case NPA_AQ_CTYPE_POOL:
1332 		rvu->rvu_dbg.npa_pool_ctx.lf = npalf;
1333 		rvu->rvu_dbg.npa_pool_ctx.id = id;
1334 		rvu->rvu_dbg.npa_pool_ctx.all = all;
1335 		break;
1336 	default:
1337 		return -EINVAL;
1338 	}
1339 	return 0;
1340 }
1341 
1342 static int parse_cmd_buffer_ctx(char *cmd_buf, size_t *count,
1343 				const char __user *buffer, int *npalf,
1344 				int *id, bool *all)
1345 {
1346 	int bytes_not_copied;
1347 	char *cmd_buf_tmp;
1348 	char *subtoken;
1349 	int ret;
1350 
1351 	bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
1352 	if (bytes_not_copied)
1353 		return -EFAULT;
1354 
1355 	cmd_buf[*count] = '\0';
1356 	cmd_buf_tmp = strchr(cmd_buf, '\n');
1357 
1358 	if (cmd_buf_tmp) {
1359 		*cmd_buf_tmp = '\0';
1360 		*count = cmd_buf_tmp - cmd_buf + 1;
1361 	}
1362 
1363 	subtoken = strsep(&cmd_buf, " ");
1364 	ret = subtoken ? kstrtoint(subtoken, 10, npalf) : -EINVAL;
1365 	if (ret < 0)
1366 		return ret;
1367 	subtoken = strsep(&cmd_buf, " ");
1368 	if (subtoken && strcmp(subtoken, "all") == 0) {
1369 		*all = true;
1370 	} else {
1371 		ret = subtoken ? kstrtoint(subtoken, 10, id) : -EINVAL;
1372 		if (ret < 0)
1373 			return ret;
1374 	}
1375 	if (cmd_buf)
1376 		return -EINVAL;
1377 	return ret;
1378 }
1379 
1380 static ssize_t rvu_dbg_npa_ctx_write(struct file *filp,
1381 				     const char __user *buffer,
1382 				     size_t count, loff_t *ppos, int ctype)
1383 {
1384 	char *cmd_buf, *ctype_string = (ctype == NPA_AQ_CTYPE_AURA) ?
1385 					"aura" : "pool";
1386 	struct seq_file *seqfp = filp->private_data;
1387 	struct rvu *rvu = seqfp->private;
1388 	int npalf, id = 0, ret;
1389 	bool all = false;
1390 
1391 	if ((*ppos != 0) || !count)
1392 		return -EINVAL;
1393 
1394 	cmd_buf = kzalloc(count + 1, GFP_KERNEL);
1395 	if (!cmd_buf)
1396 		return count;
1397 	ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
1398 				   &npalf, &id, &all);
1399 	if (ret < 0) {
1400 		dev_info(rvu->dev,
1401 			 "Usage: echo <npalf> [%s number/all] > %s_ctx\n",
1402 			 ctype_string, ctype_string);
1403 		goto done;
1404 	} else {
1405 		ret = write_npa_ctx(rvu, all, npalf, id, ctype);
1406 	}
1407 done:
1408 	kfree(cmd_buf);
1409 	return ret ? ret : count;
1410 }
1411 
1412 static ssize_t rvu_dbg_npa_aura_ctx_write(struct file *filp,
1413 					  const char __user *buffer,
1414 					  size_t count, loff_t *ppos)
1415 {
1416 	return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
1417 				     NPA_AQ_CTYPE_AURA);
1418 }
1419 
1420 static int rvu_dbg_npa_aura_ctx_display(struct seq_file *filp, void *unused)
1421 {
1422 	return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_AURA);
1423 }
1424 
1425 RVU_DEBUG_SEQ_FOPS(npa_aura_ctx, npa_aura_ctx_display, npa_aura_ctx_write);
1426 
1427 static ssize_t rvu_dbg_npa_pool_ctx_write(struct file *filp,
1428 					  const char __user *buffer,
1429 					  size_t count, loff_t *ppos)
1430 {
1431 	return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
1432 				     NPA_AQ_CTYPE_POOL);
1433 }
1434 
1435 static int rvu_dbg_npa_pool_ctx_display(struct seq_file *filp, void *unused)
1436 {
1437 	return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_POOL);
1438 }
1439 
1440 RVU_DEBUG_SEQ_FOPS(npa_pool_ctx, npa_pool_ctx_display, npa_pool_ctx_write);
1441 
1442 static void ndc_cache_stats(struct seq_file *s, int blk_addr,
1443 			    int ctype, int transaction)
1444 {
1445 	u64 req, out_req, lat, cant_alloc;
1446 	struct nix_hw *nix_hw;
1447 	struct rvu *rvu;
1448 	int port;
1449 
1450 	if (blk_addr == BLKADDR_NDC_NPA0) {
1451 		rvu = s->private;
1452 	} else {
1453 		nix_hw = s->private;
1454 		rvu = nix_hw->rvu;
1455 	}
1456 
1457 	for (port = 0; port < NDC_MAX_PORT; port++) {
1458 		req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC
1459 						(port, ctype, transaction));
1460 		lat = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_LAT_PC
1461 						(port, ctype, transaction));
1462 		out_req = rvu_read64(rvu, blk_addr,
1463 				     NDC_AF_PORTX_RTX_RWX_OSTDN_PC
1464 				     (port, ctype, transaction));
1465 		cant_alloc = rvu_read64(rvu, blk_addr,
1466 					NDC_AF_PORTX_RTX_CANT_ALLOC_PC
1467 					(port, transaction));
1468 		seq_printf(s, "\nPort:%d\n", port);
1469 		seq_printf(s, "\tTotal Requests:\t\t%lld\n", req);
1470 		seq_printf(s, "\tTotal Time Taken:\t%lld cycles\n", lat);
1471 		seq_printf(s, "\tAvg Latency:\t\t%lld cycles\n", lat / req);
1472 		seq_printf(s, "\tOutstanding Requests:\t%lld\n", out_req);
1473 		seq_printf(s, "\tCant Alloc Requests:\t%lld\n", cant_alloc);
1474 	}
1475 }
1476 
1477 static int ndc_blk_cache_stats(struct seq_file *s, int idx, int blk_addr)
1478 {
1479 	seq_puts(s, "\n***** CACHE mode read stats *****\n");
1480 	ndc_cache_stats(s, blk_addr, CACHING, NDC_READ_TRANS);
1481 	seq_puts(s, "\n***** CACHE mode write stats *****\n");
1482 	ndc_cache_stats(s, blk_addr, CACHING, NDC_WRITE_TRANS);
1483 	seq_puts(s, "\n***** BY-PASS mode read stats *****\n");
1484 	ndc_cache_stats(s, blk_addr, BYPASS, NDC_READ_TRANS);
1485 	seq_puts(s, "\n***** BY-PASS mode write stats *****\n");
1486 	ndc_cache_stats(s, blk_addr, BYPASS, NDC_WRITE_TRANS);
1487 	return 0;
1488 }
1489 
1490 static int rvu_dbg_npa_ndc_cache_display(struct seq_file *filp, void *unused)
1491 {
1492 	return ndc_blk_cache_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
1493 }
1494 
1495 RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL);
1496 
1497 static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
1498 {
1499 	struct nix_hw *nix_hw;
1500 	struct rvu *rvu;
1501 	int bank, max_bank;
1502 	u64 ndc_af_const;
1503 
1504 	if (blk_addr == BLKADDR_NDC_NPA0) {
1505 		rvu = s->private;
1506 	} else {
1507 		nix_hw = s->private;
1508 		rvu = nix_hw->rvu;
1509 	}
1510 
1511 	ndc_af_const = rvu_read64(rvu, blk_addr, NDC_AF_CONST);
1512 	max_bank = FIELD_GET(NDC_AF_BANK_MASK, ndc_af_const);
1513 	for (bank = 0; bank < max_bank; bank++) {
1514 		seq_printf(s, "BANK:%d\n", bank);
1515 		seq_printf(s, "\tHits:\t%lld\n",
1516 			   (u64)rvu_read64(rvu, blk_addr,
1517 			   NDC_AF_BANKX_HIT_PC(bank)));
1518 		seq_printf(s, "\tMiss:\t%lld\n",
1519 			   (u64)rvu_read64(rvu, blk_addr,
1520 			    NDC_AF_BANKX_MISS_PC(bank)));
1521 	}
1522 	return 0;
1523 }
1524 
1525 static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused)
1526 {
1527 	struct nix_hw *nix_hw = filp->private;
1528 	int blkaddr = 0;
1529 	int ndc_idx = 0;
1530 
1531 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1532 		   BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1533 	ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_RX : NIX0_RX);
1534 
1535 	return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
1536 }
1537 
1538 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL);
1539 
1540 static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused)
1541 {
1542 	struct nix_hw *nix_hw = filp->private;
1543 	int blkaddr = 0;
1544 	int ndc_idx = 0;
1545 
1546 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1547 		   BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1548 	ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_TX : NIX0_TX);
1549 
1550 	return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
1551 }
1552 
1553 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL);
1554 
1555 static int rvu_dbg_npa_ndc_hits_miss_display(struct seq_file *filp,
1556 					     void *unused)
1557 {
1558 	return ndc_blk_hits_miss_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
1559 }
1560 
1561 RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL);
1562 
1563 static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp,
1564 						void *unused)
1565 {
1566 	struct nix_hw *nix_hw = filp->private;
1567 	int ndc_idx = NPA0_U;
1568 	int blkaddr = 0;
1569 
1570 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1571 		   BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1572 
1573 	return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1574 }
1575 
1576 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL);
1577 
1578 static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp,
1579 						void *unused)
1580 {
1581 	struct nix_hw *nix_hw = filp->private;
1582 	int ndc_idx = NPA0_U;
1583 	int blkaddr = 0;
1584 
1585 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1586 		   BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1587 
1588 	return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1589 }
1590 
1591 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL);
1592 
1593 static void print_nix_cn10k_sq_ctx(struct seq_file *m,
1594 				   struct nix_cn10k_sq_ctx_s *sq_ctx)
1595 {
1596 	seq_printf(m, "W0: ena \t\t\t%d\nW0: qint_idx \t\t\t%d\n",
1597 		   sq_ctx->ena, sq_ctx->qint_idx);
1598 	seq_printf(m, "W0: substream \t\t\t0x%03x\nW0: sdp_mcast \t\t\t%d\n",
1599 		   sq_ctx->substream, sq_ctx->sdp_mcast);
1600 	seq_printf(m, "W0: cq \t\t\t\t%d\nW0: sqe_way_mask \t\t%d\n\n",
1601 		   sq_ctx->cq, sq_ctx->sqe_way_mask);
1602 
1603 	seq_printf(m, "W1: smq \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: xoff\t\t\t%d\n",
1604 		   sq_ctx->smq, sq_ctx->cq_ena, sq_ctx->xoff);
1605 	seq_printf(m, "W1: sso_ena \t\t\t%d\nW1: smq_rr_weight\t\t%d\n",
1606 		   sq_ctx->sso_ena, sq_ctx->smq_rr_weight);
1607 	seq_printf(m, "W1: default_chan\t\t%d\nW1: sqb_count\t\t\t%d\n\n",
1608 		   sq_ctx->default_chan, sq_ctx->sqb_count);
1609 
1610 	seq_printf(m, "W2: smq_rr_count_lb \t\t%d\n", sq_ctx->smq_rr_count_lb);
1611 	seq_printf(m, "W2: smq_rr_count_ub \t\t%d\n", sq_ctx->smq_rr_count_ub);
1612 	seq_printf(m, "W2: sqb_aura \t\t\t%d\nW2: sq_int \t\t\t%d\n",
1613 		   sq_ctx->sqb_aura, sq_ctx->sq_int);
1614 	seq_printf(m, "W2: sq_int_ena \t\t\t%d\nW2: sqe_stype \t\t\t%d\n",
1615 		   sq_ctx->sq_int_ena, sq_ctx->sqe_stype);
1616 
1617 	seq_printf(m, "W3: max_sqe_size\t\t%d\nW3: cq_limit\t\t\t%d\n",
1618 		   sq_ctx->max_sqe_size, sq_ctx->cq_limit);
1619 	seq_printf(m, "W3: lmt_dis \t\t\t%d\nW3: mnq_dis \t\t\t%d\n",
1620 		   sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1621 	seq_printf(m, "W3: smq_next_sq\t\t\t%d\nW3: smq_lso_segnum\t\t%d\n",
1622 		   sq_ctx->smq_next_sq, sq_ctx->smq_lso_segnum);
1623 	seq_printf(m, "W3: tail_offset \t\t%d\nW3: smenq_offset\t\t%d\n",
1624 		   sq_ctx->tail_offset, sq_ctx->smenq_offset);
1625 	seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n",
1626 		   sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld);
1627 
1628 	seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
1629 		   sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
1630 	seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1631 	seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1632 	seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1633 	seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1634 		   sq_ctx->smenq_next_sqb);
1635 
1636 	seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1637 
1638 	seq_printf(m, "W9: vfi_lso_total\t\t%d\n", sq_ctx->vfi_lso_total);
1639 	seq_printf(m, "W9: vfi_lso_sizem1\t\t%d\nW9: vfi_lso_sb\t\t\t%d\n",
1640 		   sq_ctx->vfi_lso_sizem1, sq_ctx->vfi_lso_sb);
1641 	seq_printf(m, "W9: vfi_lso_mps\t\t\t%d\nW9: vfi_lso_vlan0_ins_ena\t%d\n",
1642 		   sq_ctx->vfi_lso_mps, sq_ctx->vfi_lso_vlan0_ins_ena);
1643 	seq_printf(m, "W9: vfi_lso_vlan1_ins_ena\t%d\nW9: vfi_lso_vld \t\t%d\n\n",
1644 		   sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1645 
1646 	seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1647 		   (u64)sq_ctx->scm_lso_rem);
1648 	seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1649 	seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1650 	seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1651 		   (u64)sq_ctx->dropped_octs);
1652 	seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1653 		   (u64)sq_ctx->dropped_pkts);
1654 }
1655 
1656 static void print_tm_tree(struct seq_file *m,
1657 			  struct nix_aq_enq_rsp *rsp, u64 sq)
1658 {
1659 	struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
1660 	struct nix_hw *nix_hw = m->private;
1661 	struct rvu *rvu = nix_hw->rvu;
1662 	u16 p1, p2, p3, p4, schq;
1663 	int blkaddr;
1664 	u64 cfg;
1665 
1666 	if (!sq_ctx->ena)
1667 		return;
1668 
1669 	blkaddr = nix_hw->blkaddr;
1670 	schq = sq_ctx->smq;
1671 
1672 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_MDQX_PARENT(schq));
1673 	p1 = FIELD_GET(NIX_AF_MDQ_PARENT_MASK, cfg);
1674 
1675 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_TL4X_PARENT(p1));
1676 	p2 = FIELD_GET(NIX_AF_TL4_PARENT_MASK, cfg);
1677 
1678 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_TL3X_PARENT(p2));
1679 	p3 = FIELD_GET(NIX_AF_TL3_PARENT_MASK, cfg);
1680 
1681 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_TL2X_PARENT(p3));
1682 	p4 = FIELD_GET(NIX_AF_TL2_PARENT_MASK, cfg);
1683 	seq_printf(m,
1684 		   "SQ(%llu) -> SMQ(%u) -> TL4(%u) -> TL3(%u) -> TL2(%u) -> TL1(%u)\n",
1685 		   sq, schq, p1, p2, p3, p4);
1686 }
1687 
1688 /*dumps given tm_tree registers*/
1689 static int rvu_dbg_nix_tm_tree_display(struct seq_file *m, void *unused)
1690 {
1691 	int qidx, nixlf, rc, id, max_id = 0;
1692 	struct nix_hw *nix_hw = m->private;
1693 	struct rvu *rvu = nix_hw->rvu;
1694 	struct nix_aq_enq_req aq_req;
1695 	struct nix_aq_enq_rsp rsp;
1696 	struct rvu_pfvf *pfvf;
1697 	u16 pcifunc;
1698 
1699 	nixlf = rvu->rvu_dbg.nix_tm_ctx.lf;
1700 	id = rvu->rvu_dbg.nix_tm_ctx.id;
1701 
1702 	if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1703 		return -EINVAL;
1704 
1705 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1706 	max_id = pfvf->sq_ctx->qsize;
1707 
1708 	memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1709 	aq_req.hdr.pcifunc = pcifunc;
1710 	aq_req.ctype = NIX_AQ_CTYPE_SQ;
1711 	aq_req.op = NIX_AQ_INSTOP_READ;
1712 	seq_printf(m, "pcifunc is 0x%x\n", pcifunc);
1713 	for (qidx = id; qidx < max_id; qidx++) {
1714 		aq_req.qidx = qidx;
1715 
1716 		/* Skip SQ's if not initialized */
1717 		if (!test_bit(qidx, pfvf->sq_bmap))
1718 			continue;
1719 
1720 		rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
1721 
1722 		if (rc) {
1723 			seq_printf(m, "Failed to read SQ(%d) context\n",
1724 				   aq_req.qidx);
1725 			continue;
1726 		}
1727 		print_tm_tree(m, &rsp, aq_req.qidx);
1728 	}
1729 	return 0;
1730 }
1731 
1732 static ssize_t rvu_dbg_nix_tm_tree_write(struct file *filp,
1733 					 const char __user *buffer,
1734 					 size_t count, loff_t *ppos)
1735 {
1736 	struct seq_file *m = filp->private_data;
1737 	struct nix_hw *nix_hw = m->private;
1738 	struct rvu *rvu = nix_hw->rvu;
1739 	struct rvu_pfvf *pfvf;
1740 	u16 pcifunc;
1741 	u64 nixlf;
1742 	int ret;
1743 
1744 	ret = kstrtoull_from_user(buffer, count, 10, &nixlf);
1745 	if (ret)
1746 		return ret;
1747 
1748 	if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1749 		return -EINVAL;
1750 
1751 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1752 	if (!pfvf->sq_ctx) {
1753 		dev_warn(rvu->dev, "SQ context is not initialized\n");
1754 		return -EINVAL;
1755 	}
1756 
1757 	rvu->rvu_dbg.nix_tm_ctx.lf = nixlf;
1758 	return count;
1759 }
1760 
1761 RVU_DEBUG_SEQ_FOPS(nix_tm_tree, nix_tm_tree_display, nix_tm_tree_write);
1762 
1763 static void print_tm_topo(struct seq_file *m, u64 schq, u32 lvl)
1764 {
1765 	struct nix_hw *nix_hw = m->private;
1766 	struct rvu *rvu = nix_hw->rvu;
1767 	int blkaddr, link, link_level;
1768 	struct rvu_hwinfo *hw;
1769 
1770 	hw = rvu->hw;
1771 	blkaddr = nix_hw->blkaddr;
1772 	if (lvl == NIX_TXSCH_LVL_MDQ) {
1773 		seq_printf(m, "NIX_AF_SMQ[%llu]_CFG =0x%llx\n", schq,
1774 			   rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq)));
1775 		seq_printf(m, "NIX_AF_SMQ[%llu]_STATUS =0x%llx\n", schq,
1776 			   rvu_read64(rvu, blkaddr, NIX_AF_SMQX_STATUS(schq)));
1777 		seq_printf(m, "NIX_AF_MDQ[%llu]_OUT_MD_COUNT =0x%llx\n", schq,
1778 			   rvu_read64(rvu, blkaddr,
1779 				      NIX_AF_MDQX_OUT_MD_COUNT(schq)));
1780 		seq_printf(m, "NIX_AF_MDQ[%llu]_SCHEDULE =0x%llx\n", schq,
1781 			   rvu_read64(rvu, blkaddr,
1782 				      NIX_AF_MDQX_SCHEDULE(schq)));
1783 		seq_printf(m, "NIX_AF_MDQ[%llu]_SHAPE =0x%llx\n", schq,
1784 			   rvu_read64(rvu, blkaddr, NIX_AF_MDQX_SHAPE(schq)));
1785 		seq_printf(m, "NIX_AF_MDQ[%llu]_CIR =0x%llx\n", schq,
1786 			   rvu_read64(rvu, blkaddr, NIX_AF_MDQX_CIR(schq)));
1787 		seq_printf(m, "NIX_AF_MDQ[%llu]_PIR =0x%llx\n", schq,
1788 			   rvu_read64(rvu, blkaddr, NIX_AF_MDQX_PIR(schq)));
1789 		seq_printf(m, "NIX_AF_MDQ[%llu]_SW_XOFF =0x%llx\n", schq,
1790 			   rvu_read64(rvu, blkaddr, NIX_AF_MDQX_SW_XOFF(schq)));
1791 		seq_printf(m, "NIX_AF_MDQ[%llu]_PARENT =0x%llx\n", schq,
1792 			   rvu_read64(rvu, blkaddr, NIX_AF_MDQX_PARENT(schq)));
1793 		seq_puts(m, "\n");
1794 	}
1795 
1796 	if (lvl == NIX_TXSCH_LVL_TL4) {
1797 		seq_printf(m, "NIX_AF_TL4[%llu]_SDP_LINK_CFG =0x%llx\n", schq,
1798 			   rvu_read64(rvu, blkaddr,
1799 				      NIX_AF_TL4X_SDP_LINK_CFG(schq)));
1800 		seq_printf(m, "NIX_AF_TL4[%llu]_SCHEDULE =0x%llx\n", schq,
1801 			   rvu_read64(rvu, blkaddr,
1802 				      NIX_AF_TL4X_SCHEDULE(schq)));
1803 		seq_printf(m, "NIX_AF_TL4[%llu]_SHAPE =0x%llx\n", schq,
1804 			   rvu_read64(rvu, blkaddr, NIX_AF_TL4X_SHAPE(schq)));
1805 		seq_printf(m, "NIX_AF_TL4[%llu]_CIR =0x%llx\n", schq,
1806 			   rvu_read64(rvu, blkaddr, NIX_AF_TL4X_CIR(schq)));
1807 		seq_printf(m, "NIX_AF_TL4[%llu]_PIR =0x%llx\n", schq,
1808 			   rvu_read64(rvu, blkaddr, NIX_AF_TL4X_PIR(schq)));
1809 		seq_printf(m, "NIX_AF_TL4[%llu]_SW_XOFF =0x%llx\n", schq,
1810 			   rvu_read64(rvu, blkaddr, NIX_AF_TL4X_SW_XOFF(schq)));
1811 		seq_printf(m, "NIX_AF_TL4[%llu]_TOPOLOGY =0x%llx\n", schq,
1812 			   rvu_read64(rvu, blkaddr,
1813 				      NIX_AF_TL4X_TOPOLOGY(schq)));
1814 		seq_printf(m, "NIX_AF_TL4[%llu]_PARENT =0x%llx\n", schq,
1815 			   rvu_read64(rvu, blkaddr, NIX_AF_TL4X_PARENT(schq)));
1816 		seq_printf(m, "NIX_AF_TL4[%llu]_MD_DEBUG0 =0x%llx\n", schq,
1817 			   rvu_read64(rvu, blkaddr,
1818 				      NIX_AF_TL4X_MD_DEBUG0(schq)));
1819 		seq_printf(m, "NIX_AF_TL4[%llu]_MD_DEBUG1 =0x%llx\n", schq,
1820 			   rvu_read64(rvu, blkaddr,
1821 				      NIX_AF_TL4X_MD_DEBUG1(schq)));
1822 		seq_puts(m, "\n");
1823 	}
1824 
1825 	if (lvl == NIX_TXSCH_LVL_TL3) {
1826 		seq_printf(m, "NIX_AF_TL3[%llu]_SCHEDULE =0x%llx\n", schq,
1827 			   rvu_read64(rvu, blkaddr,
1828 				      NIX_AF_TL3X_SCHEDULE(schq)));
1829 		seq_printf(m, "NIX_AF_TL3[%llu]_SHAPE =0x%llx\n", schq,
1830 			   rvu_read64(rvu, blkaddr, NIX_AF_TL3X_SHAPE(schq)));
1831 		seq_printf(m, "NIX_AF_TL3[%llu]_CIR =0x%llx\n", schq,
1832 			   rvu_read64(rvu, blkaddr, NIX_AF_TL3X_CIR(schq)));
1833 		seq_printf(m, "NIX_AF_TL3[%llu]_PIR =0x%llx\n", schq,
1834 			   rvu_read64(rvu, blkaddr, NIX_AF_TL3X_PIR(schq)));
1835 		seq_printf(m, "NIX_AF_TL3[%llu]_SW_XOFF =0x%llx\n", schq,
1836 			   rvu_read64(rvu, blkaddr, NIX_AF_TL3X_SW_XOFF(schq)));
1837 		seq_printf(m, "NIX_AF_TL3[%llu]_TOPOLOGY =0x%llx\n", schq,
1838 			   rvu_read64(rvu, blkaddr,
1839 				      NIX_AF_TL3X_TOPOLOGY(schq)));
1840 		seq_printf(m, "NIX_AF_TL3[%llu]_PARENT =0x%llx\n", schq,
1841 			   rvu_read64(rvu, blkaddr, NIX_AF_TL3X_PARENT(schq)));
1842 		seq_printf(m, "NIX_AF_TL3[%llu]_MD_DEBUG0 =0x%llx\n", schq,
1843 			   rvu_read64(rvu, blkaddr,
1844 				      NIX_AF_TL3X_MD_DEBUG0(schq)));
1845 		seq_printf(m, "NIX_AF_TL3[%llu]_MD_DEBUG1 =0x%llx\n", schq,
1846 			   rvu_read64(rvu, blkaddr,
1847 				      NIX_AF_TL3X_MD_DEBUG1(schq)));
1848 
1849 		link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL)
1850 				& 0x01 ? NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
1851 		if (lvl == link_level) {
1852 			seq_printf(m,
1853 				   "NIX_AF_TL3_TL2[%llu]_BP_STATUS =0x%llx\n",
1854 				   schq, rvu_read64(rvu, blkaddr,
1855 				   NIX_AF_TL3_TL2X_BP_STATUS(schq)));
1856 			for (link = 0; link < hw->cgx_links; link++)
1857 				seq_printf(m,
1858 					   "NIX_AF_TL3_TL2[%llu]_LINK[%d]_CFG =0x%llx\n",
1859 					   schq, link,
1860 					   rvu_read64(rvu, blkaddr,
1861 						      NIX_AF_TL3_TL2X_LINKX_CFG(schq, link)));
1862 		}
1863 		seq_puts(m, "\n");
1864 	}
1865 
1866 	if (lvl == NIX_TXSCH_LVL_TL2) {
1867 		seq_printf(m, "NIX_AF_TL2[%llu]_SHAPE =0x%llx\n", schq,
1868 			   rvu_read64(rvu, blkaddr, NIX_AF_TL2X_SHAPE(schq)));
1869 		seq_printf(m, "NIX_AF_TL2[%llu]_CIR =0x%llx\n", schq,
1870 			   rvu_read64(rvu, blkaddr, NIX_AF_TL2X_CIR(schq)));
1871 		seq_printf(m, "NIX_AF_TL2[%llu]_PIR =0x%llx\n", schq,
1872 			   rvu_read64(rvu, blkaddr, NIX_AF_TL2X_PIR(schq)));
1873 		seq_printf(m, "NIX_AF_TL2[%llu]_SW_XOFF =0x%llx\n", schq,
1874 			   rvu_read64(rvu, blkaddr, NIX_AF_TL2X_SW_XOFF(schq)));
1875 		seq_printf(m, "NIX_AF_TL2[%llu]_TOPOLOGY =0x%llx\n", schq,
1876 			   rvu_read64(rvu, blkaddr,
1877 				      NIX_AF_TL2X_TOPOLOGY(schq)));
1878 		seq_printf(m, "NIX_AF_TL2[%llu]_PARENT =0x%llx\n", schq,
1879 			   rvu_read64(rvu, blkaddr, NIX_AF_TL2X_PARENT(schq)));
1880 		seq_printf(m, "NIX_AF_TL2[%llu]_MD_DEBUG0 =0x%llx\n", schq,
1881 			   rvu_read64(rvu, blkaddr,
1882 				      NIX_AF_TL2X_MD_DEBUG0(schq)));
1883 		seq_printf(m, "NIX_AF_TL2[%llu]_MD_DEBUG1 =0x%llx\n", schq,
1884 			   rvu_read64(rvu, blkaddr,
1885 				      NIX_AF_TL2X_MD_DEBUG1(schq)));
1886 
1887 		link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL)
1888 				& 0x01 ? NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
1889 		if (lvl == link_level) {
1890 			seq_printf(m,
1891 				   "NIX_AF_TL3_TL2[%llu]_BP_STATUS =0x%llx\n",
1892 				   schq, rvu_read64(rvu, blkaddr,
1893 				   NIX_AF_TL3_TL2X_BP_STATUS(schq)));
1894 			for (link = 0; link < hw->cgx_links; link++)
1895 				seq_printf(m,
1896 					   "NIX_AF_TL3_TL2[%llu]_LINK[%d]_CFG =0x%llx\n",
1897 					   schq, link, rvu_read64(rvu, blkaddr,
1898 					   NIX_AF_TL3_TL2X_LINKX_CFG(schq, link)));
1899 		}
1900 		seq_puts(m, "\n");
1901 	}
1902 
1903 	if (lvl == NIX_TXSCH_LVL_TL1) {
1904 		seq_printf(m, "NIX_AF_TX_LINK[%llu]_NORM_CREDIT =0x%llx\n",
1905 			   schq,
1906 			   rvu_read64(rvu, blkaddr,
1907 				      NIX_AF_TX_LINKX_NORM_CREDIT(schq)));
1908 		seq_printf(m, "NIX_AF_TX_LINK[%llu]_HW_XOFF =0x%llx\n", schq,
1909 			   rvu_read64(rvu, blkaddr,
1910 				      NIX_AF_TX_LINKX_HW_XOFF(schq)));
1911 		seq_printf(m, "NIX_AF_TL1[%llu]_SCHEDULE =0x%llx\n", schq,
1912 			   rvu_read64(rvu, blkaddr,
1913 				      NIX_AF_TL1X_SCHEDULE(schq)));
1914 		seq_printf(m, "NIX_AF_TL1[%llu]_SHAPE =0x%llx\n", schq,
1915 			   rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SHAPE(schq)));
1916 		seq_printf(m, "NIX_AF_TL1[%llu]_CIR =0x%llx\n", schq,
1917 			   rvu_read64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq)));
1918 		seq_printf(m, "NIX_AF_TL1[%llu]_SW_XOFF =0x%llx\n", schq,
1919 			   rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq)));
1920 		seq_printf(m, "NIX_AF_TL1[%llu]_TOPOLOGY =0x%llx\n", schq,
1921 			   rvu_read64(rvu, blkaddr,
1922 				      NIX_AF_TL1X_TOPOLOGY(schq)));
1923 		seq_printf(m, "NIX_AF_TL1[%llu]_MD_DEBUG0 =0x%llx\n", schq,
1924 			   rvu_read64(rvu, blkaddr,
1925 				      NIX_AF_TL1X_MD_DEBUG0(schq)));
1926 		seq_printf(m, "NIX_AF_TL1[%llu]_MD_DEBUG1 =0x%llx\n", schq,
1927 			   rvu_read64(rvu, blkaddr,
1928 				      NIX_AF_TL1X_MD_DEBUG1(schq)));
1929 		seq_printf(m, "NIX_AF_TL1[%llu]_DROPPED_PACKETS =0x%llx\n",
1930 			   schq,
1931 			   rvu_read64(rvu, blkaddr,
1932 				      NIX_AF_TL1X_DROPPED_PACKETS(schq)));
1933 		seq_printf(m, "NIX_AF_TL1[%llu]_DROPPED_BYTES =0x%llx\n", schq,
1934 			   rvu_read64(rvu, blkaddr,
1935 				      NIX_AF_TL1X_DROPPED_BYTES(schq)));
1936 		seq_printf(m, "NIX_AF_TL1[%llu]_RED_PACKETS =0x%llx\n", schq,
1937 			   rvu_read64(rvu, blkaddr,
1938 				      NIX_AF_TL1X_RED_PACKETS(schq)));
1939 		seq_printf(m, "NIX_AF_TL1[%llu]_RED_BYTES =0x%llx\n", schq,
1940 			   rvu_read64(rvu, blkaddr,
1941 				      NIX_AF_TL1X_RED_BYTES(schq)));
1942 		seq_printf(m, "NIX_AF_TL1[%llu]_YELLOW_PACKETS =0x%llx\n", schq,
1943 			   rvu_read64(rvu, blkaddr,
1944 				      NIX_AF_TL1X_YELLOW_PACKETS(schq)));
1945 		seq_printf(m, "NIX_AF_TL1[%llu]_YELLOW_BYTES =0x%llx\n", schq,
1946 			   rvu_read64(rvu, blkaddr,
1947 				      NIX_AF_TL1X_YELLOW_BYTES(schq)));
1948 		seq_printf(m, "NIX_AF_TL1[%llu]_GREEN_PACKETS =0x%llx\n", schq,
1949 			   rvu_read64(rvu, blkaddr,
1950 				      NIX_AF_TL1X_GREEN_PACKETS(schq)));
1951 		seq_printf(m, "NIX_AF_TL1[%llu]_GREEN_BYTES =0x%llx\n", schq,
1952 			   rvu_read64(rvu, blkaddr,
1953 				      NIX_AF_TL1X_GREEN_BYTES(schq)));
1954 		seq_puts(m, "\n");
1955 	}
1956 }
1957 
1958 /*dumps given tm_topo registers*/
1959 static int rvu_dbg_nix_tm_topo_display(struct seq_file *m, void *unused)
1960 {
1961 	struct nix_hw *nix_hw = m->private;
1962 	struct rvu *rvu = nix_hw->rvu;
1963 	struct nix_aq_enq_req aq_req;
1964 	struct nix_txsch *txsch;
1965 	int nixlf, lvl, schq;
1966 	u16 pcifunc;
1967 
1968 	nixlf = rvu->rvu_dbg.nix_tm_ctx.lf;
1969 
1970 	if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1971 		return -EINVAL;
1972 
1973 	memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1974 	aq_req.hdr.pcifunc = pcifunc;
1975 	aq_req.ctype = NIX_AQ_CTYPE_SQ;
1976 	aq_req.op = NIX_AQ_INSTOP_READ;
1977 	seq_printf(m, "pcifunc is 0x%x\n", pcifunc);
1978 
1979 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1980 		txsch = &nix_hw->txsch[lvl];
1981 		for (schq = 0; schq < txsch->schq.max; schq++) {
1982 			if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) == pcifunc)
1983 				print_tm_topo(m, schq, lvl);
1984 		}
1985 	}
1986 	return 0;
1987 }
1988 
1989 static ssize_t rvu_dbg_nix_tm_topo_write(struct file *filp,
1990 					 const char __user *buffer,
1991 					 size_t count, loff_t *ppos)
1992 {
1993 	struct seq_file *m = filp->private_data;
1994 	struct nix_hw *nix_hw = m->private;
1995 	struct rvu *rvu = nix_hw->rvu;
1996 	struct rvu_pfvf *pfvf;
1997 	u16 pcifunc;
1998 	u64 nixlf;
1999 	int ret;
2000 
2001 	ret = kstrtoull_from_user(buffer, count, 10, &nixlf);
2002 	if (ret)
2003 		return ret;
2004 
2005 	if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
2006 		return -EINVAL;
2007 
2008 	pfvf = rvu_get_pfvf(rvu, pcifunc);
2009 	if (!pfvf->sq_ctx) {
2010 		dev_warn(rvu->dev, "SQ context is not initialized\n");
2011 		return -EINVAL;
2012 	}
2013 
2014 	rvu->rvu_dbg.nix_tm_ctx.lf = nixlf;
2015 	return count;
2016 }
2017 
2018 RVU_DEBUG_SEQ_FOPS(nix_tm_topo, nix_tm_topo_display, nix_tm_topo_write);
2019 
2020 /* Dumps given nix_sq's context */
2021 static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
2022 {
2023 	struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
2024 	struct nix_hw *nix_hw = m->private;
2025 	struct rvu *rvu = nix_hw->rvu;
2026 
2027 	if (is_cn20k(rvu->pdev)) {
2028 		print_nix_cn20k_sq_ctx(m, (struct nix_cn20k_sq_ctx_s *)sq_ctx);
2029 		return;
2030 	}
2031 
2032 	if (!is_rvu_otx2(rvu)) {
2033 		print_nix_cn10k_sq_ctx(m, (struct nix_cn10k_sq_ctx_s *)sq_ctx);
2034 		return;
2035 	}
2036 
2037 	seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n",
2038 		   sq_ctx->sqe_way_mask, sq_ctx->cq);
2039 	seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
2040 		   sq_ctx->sdp_mcast, sq_ctx->substream);
2041 	seq_printf(m, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n\n",
2042 		   sq_ctx->qint_idx, sq_ctx->ena);
2043 
2044 	seq_printf(m, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d\n",
2045 		   sq_ctx->sqb_count, sq_ctx->default_chan);
2046 	seq_printf(m, "W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d\n",
2047 		   sq_ctx->smq_rr_quantum, sq_ctx->sso_ena);
2048 	seq_printf(m, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n\n",
2049 		   sq_ctx->xoff, sq_ctx->cq_ena, sq_ctx->smq);
2050 
2051 	seq_printf(m, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d\n",
2052 		   sq_ctx->sqe_stype, sq_ctx->sq_int_ena);
2053 	seq_printf(m, "W2: sq_int \t\t\t%d\nW2: sqb_aura \t\t\t%d\n",
2054 		   sq_ctx->sq_int, sq_ctx->sqb_aura);
2055 	seq_printf(m, "W2: smq_rr_count \t\t%d\n\n", sq_ctx->smq_rr_count);
2056 
2057 	seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
2058 		   sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
2059 	seq_printf(m, "W3: smenq_next_sqb_vld \t\t%d\nW3: head_offset\t\t\t%d\n",
2060 		   sq_ctx->smenq_next_sqb_vld, sq_ctx->head_offset);
2061 	seq_printf(m, "W3: smenq_offset\t\t%d\nW3: tail_offset\t\t\t%d\n",
2062 		   sq_ctx->smenq_offset, sq_ctx->tail_offset);
2063 	seq_printf(m, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq\t\t\t%d\n",
2064 		   sq_ctx->smq_lso_segnum, sq_ctx->smq_next_sq);
2065 	seq_printf(m, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d\n",
2066 		   sq_ctx->mnq_dis, sq_ctx->lmt_dis);
2067 	seq_printf(m, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n\n",
2068 		   sq_ctx->cq_limit, sq_ctx->max_sqe_size);
2069 
2070 	seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
2071 	seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
2072 	seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
2073 	seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
2074 		   sq_ctx->smenq_next_sqb);
2075 
2076 	seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
2077 
2078 	seq_printf(m, "W9: vfi_lso_vld\t\t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d\n",
2079 		   sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
2080 	seq_printf(m, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d\n",
2081 		   sq_ctx->vfi_lso_vlan0_ins_ena, sq_ctx->vfi_lso_mps);
2082 	seq_printf(m, "W9: vfi_lso_sb\t\t\t%d\nW9: vfi_lso_sizem1\t\t%d\n",
2083 		   sq_ctx->vfi_lso_sb, sq_ctx->vfi_lso_sizem1);
2084 	seq_printf(m, "W9: vfi_lso_total\t\t%d\n\n", sq_ctx->vfi_lso_total);
2085 
2086 	seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
2087 		   (u64)sq_ctx->scm_lso_rem);
2088 	seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
2089 	seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
2090 	seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
2091 		   (u64)sq_ctx->dropped_octs);
2092 	seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
2093 		   (u64)sq_ctx->dropped_pkts);
2094 }
2095 
2096 static void print_nix_cn10k_rq_ctx(struct seq_file *m,
2097 				   struct nix_cn10k_rq_ctx_s *rq_ctx)
2098 {
2099 	seq_printf(m, "W0: ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
2100 		   rq_ctx->ena, rq_ctx->sso_ena);
2101 	seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
2102 		   rq_ctx->ipsech_ena, rq_ctx->ena_wqwd);
2103 	seq_printf(m, "W0: cq \t\t\t\t%d\nW0: lenerr_dis \t\t\t%d\n",
2104 		   rq_ctx->cq, rq_ctx->lenerr_dis);
2105 	seq_printf(m, "W0: csum_il4_dis \t\t%d\nW0: csum_ol4_dis \t\t%d\n",
2106 		   rq_ctx->csum_il4_dis, rq_ctx->csum_ol4_dis);
2107 	seq_printf(m, "W0: len_il4_dis \t\t%d\nW0: len_il3_dis \t\t%d\n",
2108 		   rq_ctx->len_il4_dis, rq_ctx->len_il3_dis);
2109 	seq_printf(m, "W0: len_ol4_dis \t\t%d\nW0: len_ol3_dis \t\t%d\n",
2110 		   rq_ctx->len_ol4_dis, rq_ctx->len_ol3_dis);
2111 	seq_printf(m, "W0: wqe_aura \t\t\t%d\n\n", rq_ctx->wqe_aura);
2112 
2113 	seq_printf(m, "W1: spb_aura \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
2114 		   rq_ctx->spb_aura, rq_ctx->lpb_aura);
2115 	seq_printf(m, "W1: spb_aura \t\t\t%d\n", rq_ctx->spb_aura);
2116 	seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
2117 		   rq_ctx->sso_grp, rq_ctx->sso_tt);
2118 	seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: wqe_caching \t\t%d\n",
2119 		   rq_ctx->pb_caching, rq_ctx->wqe_caching);
2120 	seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
2121 		   rq_ctx->xqe_drop_ena, rq_ctx->spb_drop_ena);
2122 	seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: pb_stashing \t\t%d\n",
2123 		   rq_ctx->lpb_drop_ena, rq_ctx->pb_stashing);
2124 	seq_printf(m, "W1: ipsecd_drop_ena \t\t%d\nW1: chi_ena \t\t\t%d\n\n",
2125 		   rq_ctx->ipsecd_drop_ena, rq_ctx->chi_ena);
2126 
2127 	seq_printf(m, "W2: band_prof_id \t\t%d\n",
2128 		   (u16)rq_ctx->band_prof_id_h << 10 | rq_ctx->band_prof_id);
2129 
2130 	seq_printf(m, "W2: policer_ena \t\t%d\n", rq_ctx->policer_ena);
2131 	seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n", rq_ctx->spb_sizem1);
2132 	seq_printf(m, "W2: wqe_skip \t\t\t%d\nW2: sqb_ena \t\t\t%d\n",
2133 		   rq_ctx->wqe_skip, rq_ctx->spb_ena);
2134 	seq_printf(m, "W2: lpb_size1 \t\t\t%d\nW2: first_skip \t\t\t%d\n",
2135 		   rq_ctx->lpb_sizem1, rq_ctx->first_skip);
2136 	seq_printf(m, "W2: later_skip\t\t\t%d\nW2: xqe_imm_size\t\t%d\n",
2137 		   rq_ctx->later_skip, rq_ctx->xqe_imm_size);
2138 	seq_printf(m, "W2: xqe_imm_copy \t\t%d\nW2: xqe_hdr_split \t\t%d\n\n",
2139 		   rq_ctx->xqe_imm_copy, rq_ctx->xqe_hdr_split);
2140 
2141 	seq_printf(m, "W3: xqe_drop \t\t\t%d\nW3: xqe_pass \t\t\t%d\n",
2142 		   rq_ctx->xqe_drop, rq_ctx->xqe_pass);
2143 	seq_printf(m, "W3: wqe_pool_drop \t\t%d\nW3: wqe_pool_pass \t\t%d\n",
2144 		   rq_ctx->wqe_pool_drop, rq_ctx->wqe_pool_pass);
2145 	seq_printf(m, "W3: spb_pool_drop \t\t%d\nW3: spb_pool_pass \t\t%d\n",
2146 		   rq_ctx->spb_pool_drop, rq_ctx->spb_pool_pass);
2147 	seq_printf(m, "W3: spb_aura_drop \t\t%d\nW3: spb_aura_pass \t\t%d\n\n",
2148 		   rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
2149 
2150 	seq_printf(m, "W4: lpb_aura_drop \t\t%d\nW3: lpb_aura_pass \t\t%d\n",
2151 		   rq_ctx->lpb_aura_pass, rq_ctx->lpb_aura_drop);
2152 	seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW3: lpb_pool_pass \t\t%d\n",
2153 		   rq_ctx->lpb_pool_drop, rq_ctx->lpb_pool_pass);
2154 	seq_printf(m, "W4: rq_int \t\t\t%d\nW4: rq_int_ena\t\t\t%d\n",
2155 		   rq_ctx->rq_int, rq_ctx->rq_int_ena);
2156 	seq_printf(m, "W4: qint_idx \t\t\t%d\n\n", rq_ctx->qint_idx);
2157 
2158 	seq_printf(m, "W5: ltag \t\t\t%d\nW5: good_utag \t\t\t%d\n",
2159 		   rq_ctx->ltag, rq_ctx->good_utag);
2160 	seq_printf(m, "W5: bad_utag \t\t\t%d\nW5: flow_tagw \t\t\t%d\n",
2161 		   rq_ctx->bad_utag, rq_ctx->flow_tagw);
2162 	seq_printf(m, "W5: ipsec_vwqe \t\t\t%d\nW5: vwqe_ena \t\t\t%d\n",
2163 		   rq_ctx->ipsec_vwqe, rq_ctx->vwqe_ena);
2164 	seq_printf(m, "W5: vwqe_wait \t\t\t%d\nW5: max_vsize_exp\t\t%d\n",
2165 		   rq_ctx->vwqe_wait, rq_ctx->max_vsize_exp);
2166 	seq_printf(m, "W5: vwqe_skip \t\t\t%d\n\n", rq_ctx->vwqe_skip);
2167 
2168 	seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
2169 	seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
2170 	seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
2171 	seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
2172 	seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
2173 }
2174 
2175 /* Dumps given nix_rq's context */
2176 static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
2177 {
2178 	struct nix_rq_ctx_s *rq_ctx = &rsp->rq;
2179 	struct nix_hw *nix_hw = m->private;
2180 	struct rvu *rvu = nix_hw->rvu;
2181 
2182 	if (!is_rvu_otx2(rvu)) {
2183 		print_nix_cn10k_rq_ctx(m, (struct nix_cn10k_rq_ctx_s *)rq_ctx);
2184 		return;
2185 	}
2186 
2187 	seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
2188 		   rq_ctx->wqe_aura, rq_ctx->substream);
2189 	seq_printf(m, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
2190 		   rq_ctx->cq, rq_ctx->ena_wqwd);
2191 	seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
2192 		   rq_ctx->ipsech_ena, rq_ctx->sso_ena);
2193 	seq_printf(m, "W0: ena \t\t\t%d\n\n", rq_ctx->ena);
2194 
2195 	seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
2196 		   rq_ctx->lpb_drop_ena, rq_ctx->spb_drop_ena);
2197 	seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d\n",
2198 		   rq_ctx->xqe_drop_ena, rq_ctx->wqe_caching);
2199 	seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
2200 		   rq_ctx->pb_caching, rq_ctx->sso_tt);
2201 	seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
2202 		   rq_ctx->sso_grp, rq_ctx->lpb_aura);
2203 	seq_printf(m, "W1: spb_aura \t\t\t%d\n\n", rq_ctx->spb_aura);
2204 
2205 	seq_printf(m, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d\n",
2206 		   rq_ctx->xqe_hdr_split, rq_ctx->xqe_imm_copy);
2207 	seq_printf(m, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d\n",
2208 		   rq_ctx->xqe_imm_size, rq_ctx->later_skip);
2209 	seq_printf(m, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d\n",
2210 		   rq_ctx->first_skip, rq_ctx->lpb_sizem1);
2211 	seq_printf(m, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d\n",
2212 		   rq_ctx->spb_ena, rq_ctx->wqe_skip);
2213 	seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n\n", rq_ctx->spb_sizem1);
2214 
2215 	seq_printf(m, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d\n",
2216 		   rq_ctx->spb_pool_pass, rq_ctx->spb_pool_drop);
2217 	seq_printf(m, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d\n",
2218 		   rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
2219 	seq_printf(m, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d\n",
2220 		   rq_ctx->wqe_pool_pass, rq_ctx->wqe_pool_drop);
2221 	seq_printf(m, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n\n",
2222 		   rq_ctx->xqe_pass, rq_ctx->xqe_drop);
2223 
2224 	seq_printf(m, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d\n",
2225 		   rq_ctx->qint_idx, rq_ctx->rq_int_ena);
2226 	seq_printf(m, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d\n",
2227 		   rq_ctx->rq_int, rq_ctx->lpb_pool_pass);
2228 	seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d\n",
2229 		   rq_ctx->lpb_pool_drop, rq_ctx->lpb_aura_pass);
2230 	seq_printf(m, "W4: lpb_aura_drop \t\t%d\n\n", rq_ctx->lpb_aura_drop);
2231 
2232 	seq_printf(m, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d\n",
2233 		   rq_ctx->flow_tagw, rq_ctx->bad_utag);
2234 	seq_printf(m, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n\n",
2235 		   rq_ctx->good_utag, rq_ctx->ltag);
2236 
2237 	seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
2238 	seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
2239 	seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
2240 	seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
2241 	seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
2242 }
2243 
2244 /* Dumps given nix_cq's context */
2245 static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
2246 {
2247 	struct nix_cq_ctx_s *cq_ctx = &rsp->cq;
2248 	struct nix_hw *nix_hw = m->private;
2249 	struct rvu *rvu = nix_hw->rvu;
2250 
2251 	if (is_cn20k(rvu->pdev)) {
2252 		print_nix_cn20k_cq_ctx(m, (struct nix_cn20k_aq_enq_rsp *)rsp);
2253 		return;
2254 	}
2255 
2256 	seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base);
2257 
2258 	seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr);
2259 	seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n",
2260 		   cq_ctx->avg_con, cq_ctx->cint_idx);
2261 	seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n",
2262 		   cq_ctx->cq_err, cq_ctx->qint_idx);
2263 	seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n",
2264 		   cq_ctx->bpid, cq_ctx->bp_ena);
2265 
2266 	if (!is_rvu_otx2(rvu)) {
2267 		seq_printf(m, "W1: lbpid_high \t\t\t0x%03x\n", cq_ctx->lbpid_high);
2268 		seq_printf(m, "W1: lbpid_med \t\t\t0x%03x\n", cq_ctx->lbpid_med);
2269 		seq_printf(m, "W1: lbpid_low \t\t\t0x%03x\n", cq_ctx->lbpid_low);
2270 		seq_printf(m, "(W1: lbpid) \t\t\t0x%03x\n",
2271 			   cq_ctx->lbpid_high << 6 | cq_ctx->lbpid_med << 3 |
2272 			   cq_ctx->lbpid_low);
2273 		seq_printf(m, "W1: lbp_ena \t\t\t\t%d\n\n", cq_ctx->lbp_ena);
2274 	}
2275 
2276 	seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n",
2277 		   cq_ctx->update_time, cq_ctx->avg_level);
2278 	seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n",
2279 		   cq_ctx->head, cq_ctx->tail);
2280 
2281 	seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n",
2282 		   cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int);
2283 	seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n",
2284 		   cq_ctx->qsize, cq_ctx->caching);
2285 
2286 	seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n",
2287 		   cq_ctx->substream, cq_ctx->ena);
2288 	if (!is_rvu_otx2(rvu)) {
2289 		seq_printf(m, "W3: lbp_frac \t\t\t%d\n", cq_ctx->lbp_frac);
2290 		seq_printf(m, "W3: cpt_drop_err_en \t\t\t%d\n",
2291 			   cq_ctx->cpt_drop_err_en);
2292 	}
2293 	seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n",
2294 		   cq_ctx->drop_ena, cq_ctx->drop);
2295 	seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp);
2296 }
2297 
2298 static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
2299 					 void *unused, int ctype)
2300 {
2301 	void (*print_nix_ctx)(struct seq_file *filp,
2302 			      struct nix_aq_enq_rsp *rsp) = NULL;
2303 	struct nix_hw *nix_hw = filp->private;
2304 	struct rvu *rvu = nix_hw->rvu;
2305 	struct nix_aq_enq_req aq_req;
2306 	struct nix_aq_enq_rsp rsp;
2307 	char *ctype_string = NULL;
2308 	int qidx, rc, max_id = 0;
2309 	struct rvu_pfvf *pfvf;
2310 	int nixlf, id, all;
2311 	u16 pcifunc;
2312 
2313 	switch (ctype) {
2314 	case NIX_AQ_CTYPE_CQ:
2315 		nixlf = rvu->rvu_dbg.nix_cq_ctx.lf;
2316 		id = rvu->rvu_dbg.nix_cq_ctx.id;
2317 		all = rvu->rvu_dbg.nix_cq_ctx.all;
2318 		break;
2319 
2320 	case NIX_AQ_CTYPE_SQ:
2321 		nixlf = rvu->rvu_dbg.nix_sq_ctx.lf;
2322 		id = rvu->rvu_dbg.nix_sq_ctx.id;
2323 		all = rvu->rvu_dbg.nix_sq_ctx.all;
2324 		break;
2325 
2326 	case NIX_AQ_CTYPE_RQ:
2327 		nixlf = rvu->rvu_dbg.nix_rq_ctx.lf;
2328 		id = rvu->rvu_dbg.nix_rq_ctx.id;
2329 		all = rvu->rvu_dbg.nix_rq_ctx.all;
2330 		break;
2331 
2332 	default:
2333 		return -EINVAL;
2334 	}
2335 
2336 	if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
2337 		return -EINVAL;
2338 
2339 	pfvf = rvu_get_pfvf(rvu, pcifunc);
2340 	if (ctype == NIX_AQ_CTYPE_SQ && !pfvf->sq_ctx) {
2341 		seq_puts(filp, "SQ context is not initialized\n");
2342 		return -EINVAL;
2343 	} else if (ctype == NIX_AQ_CTYPE_RQ && !pfvf->rq_ctx) {
2344 		seq_puts(filp, "RQ context is not initialized\n");
2345 		return -EINVAL;
2346 	} else if (ctype == NIX_AQ_CTYPE_CQ && !pfvf->cq_ctx) {
2347 		seq_puts(filp, "CQ context is not initialized\n");
2348 		return -EINVAL;
2349 	}
2350 
2351 	if (ctype == NIX_AQ_CTYPE_SQ) {
2352 		max_id = pfvf->sq_ctx->qsize;
2353 		ctype_string = "sq";
2354 		print_nix_ctx = print_nix_sq_ctx;
2355 	} else if (ctype == NIX_AQ_CTYPE_RQ) {
2356 		max_id = pfvf->rq_ctx->qsize;
2357 		ctype_string = "rq";
2358 		print_nix_ctx = print_nix_rq_ctx;
2359 	} else if (ctype == NIX_AQ_CTYPE_CQ) {
2360 		max_id = pfvf->cq_ctx->qsize;
2361 		ctype_string = "cq";
2362 		print_nix_ctx = print_nix_cq_ctx;
2363 	}
2364 
2365 	memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
2366 	aq_req.hdr.pcifunc = pcifunc;
2367 	aq_req.ctype = ctype;
2368 	aq_req.op = NIX_AQ_INSTOP_READ;
2369 	if (all)
2370 		id = 0;
2371 	else
2372 		max_id = id + 1;
2373 	for (qidx = id; qidx < max_id; qidx++) {
2374 		aq_req.qidx = qidx;
2375 		seq_printf(filp, "=====%s_ctx for nixlf:%d and qidx:%d is=====\n",
2376 			   ctype_string, nixlf, aq_req.qidx);
2377 		rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
2378 		if (rc) {
2379 			seq_puts(filp, "Failed to read the context\n");
2380 			return -EINVAL;
2381 		}
2382 		print_nix_ctx(filp, &rsp);
2383 	}
2384 	return 0;
2385 }
2386 
2387 static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf,
2388 			       int id, int ctype, char *ctype_string,
2389 			       struct seq_file *m)
2390 {
2391 	struct nix_hw *nix_hw = m->private;
2392 	struct rvu_pfvf *pfvf;
2393 	int max_id = 0;
2394 	u16 pcifunc;
2395 
2396 	if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
2397 		return -EINVAL;
2398 
2399 	pfvf = rvu_get_pfvf(rvu, pcifunc);
2400 
2401 	if (ctype == NIX_AQ_CTYPE_SQ) {
2402 		if (!pfvf->sq_ctx) {
2403 			dev_warn(rvu->dev, "SQ context is not initialized\n");
2404 			return -EINVAL;
2405 		}
2406 		max_id = pfvf->sq_ctx->qsize;
2407 	} else if (ctype == NIX_AQ_CTYPE_RQ) {
2408 		if (!pfvf->rq_ctx) {
2409 			dev_warn(rvu->dev, "RQ context is not initialized\n");
2410 			return -EINVAL;
2411 		}
2412 		max_id = pfvf->rq_ctx->qsize;
2413 	} else if (ctype == NIX_AQ_CTYPE_CQ) {
2414 		if (!pfvf->cq_ctx) {
2415 			dev_warn(rvu->dev, "CQ context is not initialized\n");
2416 			return -EINVAL;
2417 		}
2418 		max_id = pfvf->cq_ctx->qsize;
2419 	}
2420 
2421 	if (id < 0 || id >= max_id) {
2422 		dev_warn(rvu->dev, "Invalid %s_ctx valid range 0-%d\n",
2423 			 ctype_string, max_id - 1);
2424 		return -EINVAL;
2425 	}
2426 	switch (ctype) {
2427 	case NIX_AQ_CTYPE_CQ:
2428 		rvu->rvu_dbg.nix_cq_ctx.lf = nixlf;
2429 		rvu->rvu_dbg.nix_cq_ctx.id = id;
2430 		rvu->rvu_dbg.nix_cq_ctx.all = all;
2431 		break;
2432 
2433 	case NIX_AQ_CTYPE_SQ:
2434 		rvu->rvu_dbg.nix_sq_ctx.lf = nixlf;
2435 		rvu->rvu_dbg.nix_sq_ctx.id = id;
2436 		rvu->rvu_dbg.nix_sq_ctx.all = all;
2437 		break;
2438 
2439 	case NIX_AQ_CTYPE_RQ:
2440 		rvu->rvu_dbg.nix_rq_ctx.lf = nixlf;
2441 		rvu->rvu_dbg.nix_rq_ctx.id = id;
2442 		rvu->rvu_dbg.nix_rq_ctx.all = all;
2443 		break;
2444 	default:
2445 		return -EINVAL;
2446 	}
2447 	return 0;
2448 }
2449 
2450 static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
2451 					   const char __user *buffer,
2452 					   size_t count, loff_t *ppos,
2453 					   int ctype)
2454 {
2455 	struct seq_file *m = filp->private_data;
2456 	struct nix_hw *nix_hw = m->private;
2457 	struct rvu *rvu = nix_hw->rvu;
2458 	char *cmd_buf, *ctype_string;
2459 	int nixlf, id = 0, ret;
2460 	bool all = false;
2461 
2462 	if ((*ppos != 0) || !count)
2463 		return -EINVAL;
2464 
2465 	switch (ctype) {
2466 	case NIX_AQ_CTYPE_SQ:
2467 		ctype_string = "sq";
2468 		break;
2469 	case NIX_AQ_CTYPE_RQ:
2470 		ctype_string = "rq";
2471 		break;
2472 	case NIX_AQ_CTYPE_CQ:
2473 		ctype_string = "cq";
2474 		break;
2475 	default:
2476 		return -EINVAL;
2477 	}
2478 
2479 	cmd_buf = kzalloc(count + 1, GFP_KERNEL);
2480 
2481 	if (!cmd_buf)
2482 		return count;
2483 
2484 	ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
2485 				   &nixlf, &id, &all);
2486 	if (ret < 0) {
2487 		dev_info(rvu->dev,
2488 			 "Usage: echo <nixlf> [%s number/all] > %s_ctx\n",
2489 			 ctype_string, ctype_string);
2490 		goto done;
2491 	} else {
2492 		ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype,
2493 					  ctype_string, m);
2494 	}
2495 done:
2496 	kfree(cmd_buf);
2497 	return ret ? ret : count;
2498 }
2499 
2500 static ssize_t rvu_dbg_nix_sq_ctx_write(struct file *filp,
2501 					const char __user *buffer,
2502 					size_t count, loff_t *ppos)
2503 {
2504 	return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
2505 					    NIX_AQ_CTYPE_SQ);
2506 }
2507 
2508 static int rvu_dbg_nix_sq_ctx_display(struct seq_file *filp, void *unused)
2509 {
2510 	return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_SQ);
2511 }
2512 
2513 RVU_DEBUG_SEQ_FOPS(nix_sq_ctx, nix_sq_ctx_display, nix_sq_ctx_write);
2514 
2515 static ssize_t rvu_dbg_nix_rq_ctx_write(struct file *filp,
2516 					const char __user *buffer,
2517 					size_t count, loff_t *ppos)
2518 {
2519 	return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
2520 					    NIX_AQ_CTYPE_RQ);
2521 }
2522 
2523 static int rvu_dbg_nix_rq_ctx_display(struct seq_file *filp, void  *unused)
2524 {
2525 	return rvu_dbg_nix_queue_ctx_display(filp, unused,  NIX_AQ_CTYPE_RQ);
2526 }
2527 
2528 RVU_DEBUG_SEQ_FOPS(nix_rq_ctx, nix_rq_ctx_display, nix_rq_ctx_write);
2529 
2530 static ssize_t rvu_dbg_nix_cq_ctx_write(struct file *filp,
2531 					const char __user *buffer,
2532 					size_t count, loff_t *ppos)
2533 {
2534 	return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
2535 					    NIX_AQ_CTYPE_CQ);
2536 }
2537 
2538 static int rvu_dbg_nix_cq_ctx_display(struct seq_file *filp, void *unused)
2539 {
2540 	return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_CQ);
2541 }
2542 
2543 RVU_DEBUG_SEQ_FOPS(nix_cq_ctx, nix_cq_ctx_display, nix_cq_ctx_write);
2544 
2545 static void print_nix_qctx_qsize(struct seq_file *filp, int qsize,
2546 				 unsigned long *bmap, char *qtype)
2547 {
2548 	char *buf;
2549 
2550 	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
2551 	if (!buf)
2552 		return;
2553 
2554 	bitmap_print_to_pagebuf(false, buf, bmap, qsize);
2555 	seq_printf(filp, "%s context count : %d\n", qtype, qsize);
2556 	seq_printf(filp, "%s context ena/dis bitmap : %s\n",
2557 		   qtype, buf);
2558 	kfree(buf);
2559 }
2560 
2561 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf)
2562 {
2563 	if (!pfvf->cq_ctx)
2564 		seq_puts(filp, "cq context is not initialized\n");
2565 	else
2566 		print_nix_qctx_qsize(filp, pfvf->cq_ctx->qsize, pfvf->cq_bmap,
2567 				     "cq");
2568 
2569 	if (!pfvf->rq_ctx)
2570 		seq_puts(filp, "rq context is not initialized\n");
2571 	else
2572 		print_nix_qctx_qsize(filp, pfvf->rq_ctx->qsize, pfvf->rq_bmap,
2573 				     "rq");
2574 
2575 	if (!pfvf->sq_ctx)
2576 		seq_puts(filp, "sq context is not initialized\n");
2577 	else
2578 		print_nix_qctx_qsize(filp, pfvf->sq_ctx->qsize, pfvf->sq_bmap,
2579 				     "sq");
2580 }
2581 
2582 static ssize_t rvu_dbg_nix_qsize_write(struct file *filp,
2583 				       const char __user *buffer,
2584 				       size_t count, loff_t *ppos)
2585 {
2586 	return rvu_dbg_qsize_write(filp, buffer, count, ppos,
2587 				   BLKTYPE_NIX);
2588 }
2589 
2590 static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
2591 {
2592 	return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NIX);
2593 }
2594 
2595 RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
2596 
2597 static void print_band_prof_ctx(struct seq_file *m,
2598 				struct nix_bandprof_s *prof)
2599 {
2600 	char *str;
2601 
2602 	switch (prof->pc_mode) {
2603 	case NIX_RX_PC_MODE_VLAN:
2604 		str = "VLAN";
2605 		break;
2606 	case NIX_RX_PC_MODE_DSCP:
2607 		str = "DSCP";
2608 		break;
2609 	case NIX_RX_PC_MODE_GEN:
2610 		str = "Generic";
2611 		break;
2612 	case NIX_RX_PC_MODE_RSVD:
2613 		str = "Reserved";
2614 		break;
2615 	}
2616 	seq_printf(m, "W0: pc_mode\t\t%s\n", str);
2617 	str = (prof->icolor == 3) ? "Color blind" :
2618 		(prof->icolor == 0) ? "Green" :
2619 		(prof->icolor == 1) ? "Yellow" : "Red";
2620 	seq_printf(m, "W0: icolor\t\t%s\n", str);
2621 	seq_printf(m, "W0: tnl_ena\t\t%d\n", prof->tnl_ena);
2622 	seq_printf(m, "W0: peir_exponent\t%d\n", prof->peir_exponent);
2623 	seq_printf(m, "W0: pebs_exponent\t%d\n", prof->pebs_exponent);
2624 	seq_printf(m, "W0: cir_exponent\t%d\n", prof->cir_exponent);
2625 	seq_printf(m, "W0: cbs_exponent\t%d\n", prof->cbs_exponent);
2626 	seq_printf(m, "W0: peir_mantissa\t%d\n", prof->peir_mantissa);
2627 	seq_printf(m, "W0: pebs_mantissa\t%d\n", prof->pebs_mantissa);
2628 	seq_printf(m, "W0: cir_mantissa\t%d\n", prof->cir_mantissa);
2629 
2630 	seq_printf(m, "W1: cbs_mantissa\t%d\n", prof->cbs_mantissa);
2631 	str = (prof->lmode == 0) ? "byte" : "packet";
2632 	seq_printf(m, "W1: lmode\t\t%s\n", str);
2633 	seq_printf(m, "W1: l_select\t\t%d\n", prof->l_sellect);
2634 	seq_printf(m, "W1: rdiv\t\t%d\n", prof->rdiv);
2635 	seq_printf(m, "W1: adjust_exponent\t%d\n", prof->adjust_exponent);
2636 	seq_printf(m, "W1: adjust_mantissa\t%d\n", prof->adjust_mantissa);
2637 	str = (prof->gc_action == 0) ? "PASS" :
2638 		(prof->gc_action == 1) ? "DROP" : "RED";
2639 	seq_printf(m, "W1: gc_action\t\t%s\n", str);
2640 	str = (prof->yc_action == 0) ? "PASS" :
2641 		(prof->yc_action == 1) ? "DROP" : "RED";
2642 	seq_printf(m, "W1: yc_action\t\t%s\n", str);
2643 	str = (prof->rc_action == 0) ? "PASS" :
2644 		(prof->rc_action == 1) ? "DROP" : "RED";
2645 	seq_printf(m, "W1: rc_action\t\t%s\n", str);
2646 	seq_printf(m, "W1: meter_algo\t\t%d\n", prof->meter_algo);
2647 
2648 	seq_printf(m, "W1: band_prof_id\t%d\n",
2649 		   (u16)prof->band_prof_id_h << 7 | prof->band_prof_id);
2650 
2651 	seq_printf(m, "W1: hl_en\t\t%d\n", prof->hl_en);
2652 
2653 	seq_printf(m, "W2: ts\t\t\t%lld\n", (u64)prof->ts);
2654 	seq_printf(m, "W3: pe_accum\t\t%d\n", prof->pe_accum);
2655 	seq_printf(m, "W3: c_accum\t\t%d\n", prof->c_accum);
2656 	seq_printf(m, "W4: green_pkt_pass\t%lld\n",
2657 		   (u64)prof->green_pkt_pass);
2658 	seq_printf(m, "W5: yellow_pkt_pass\t%lld\n",
2659 		   (u64)prof->yellow_pkt_pass);
2660 	seq_printf(m, "W6: red_pkt_pass\t%lld\n", (u64)prof->red_pkt_pass);
2661 	seq_printf(m, "W7: green_octs_pass\t%lld\n",
2662 		   (u64)prof->green_octs_pass);
2663 	seq_printf(m, "W8: yellow_octs_pass\t%lld\n",
2664 		   (u64)prof->yellow_octs_pass);
2665 	seq_printf(m, "W9: red_octs_pass\t%lld\n", (u64)prof->red_octs_pass);
2666 	seq_printf(m, "W10: green_pkt_drop\t%lld\n",
2667 		   (u64)prof->green_pkt_drop);
2668 	seq_printf(m, "W11: yellow_pkt_drop\t%lld\n",
2669 		   (u64)prof->yellow_pkt_drop);
2670 	seq_printf(m, "W12: red_pkt_drop\t%lld\n", (u64)prof->red_pkt_drop);
2671 	seq_printf(m, "W13: green_octs_drop\t%lld\n",
2672 		   (u64)prof->green_octs_drop);
2673 	seq_printf(m, "W14: yellow_octs_drop\t%lld\n",
2674 		   (u64)prof->yellow_octs_drop);
2675 	seq_printf(m, "W15: red_octs_drop\t%lld\n", (u64)prof->red_octs_drop);
2676 	seq_puts(m, "==============================\n");
2677 }
2678 
2679 static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused)
2680 {
2681 	struct nix_hw *nix_hw = m->private;
2682 	struct nix_cn10k_aq_enq_req aq_req;
2683 	struct nix_cn10k_aq_enq_rsp aq_rsp;
2684 	struct rvu *rvu = nix_hw->rvu;
2685 	struct nix_ipolicer *ipolicer;
2686 	int layer, prof_idx, idx, rc;
2687 	u16 pcifunc;
2688 	char *str;
2689 
2690 	/* Ingress policers do not exist on all platforms */
2691 	if (!nix_hw->ipolicer)
2692 		return 0;
2693 
2694 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
2695 		if (layer == BAND_PROF_INVAL_LAYER)
2696 			continue;
2697 		str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
2698 			(layer == BAND_PROF_MID_LAYER) ? "Mid" : "Top";
2699 
2700 		seq_printf(m, "\n%s bandwidth profiles\n", str);
2701 		seq_puts(m, "=======================\n");
2702 
2703 		ipolicer = &nix_hw->ipolicer[layer];
2704 
2705 		for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
2706 			if (is_rsrc_free(&ipolicer->band_prof, idx))
2707 				continue;
2708 
2709 			prof_idx = (idx & 0x3FFF) | (layer << 14);
2710 			rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
2711 						 0x00, NIX_AQ_CTYPE_BANDPROF,
2712 						 prof_idx);
2713 			if (rc) {
2714 				dev_err(rvu->dev,
2715 					"%s: Failed to fetch context of %s profile %d, err %d\n",
2716 					__func__, str, idx, rc);
2717 				return 0;
2718 			}
2719 			seq_printf(m, "\n%s bandwidth profile:: %d\n", str, idx);
2720 			pcifunc = ipolicer->pfvf_map[idx];
2721 			if (!(pcifunc & RVU_PFVF_FUNC_MASK))
2722 				seq_printf(m, "Allocated to :: PF %d\n",
2723 					   rvu_get_pf(rvu->pdev, pcifunc));
2724 			else
2725 				seq_printf(m, "Allocated to :: PF %d VF %d\n",
2726 					   rvu_get_pf(rvu->pdev, pcifunc),
2727 					   (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
2728 			print_band_prof_ctx(m, &aq_rsp.prof);
2729 		}
2730 	}
2731 	return 0;
2732 }
2733 
2734 RVU_DEBUG_SEQ_FOPS(nix_band_prof_ctx, nix_band_prof_ctx_display, NULL);
2735 
2736 static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused)
2737 {
2738 	struct nix_hw *nix_hw = m->private;
2739 	struct nix_ipolicer *ipolicer;
2740 	int layer;
2741 	char *str;
2742 
2743 	/* Ingress policers do not exist on all platforms */
2744 	if (!nix_hw->ipolicer)
2745 		return 0;
2746 
2747 	seq_puts(m, "\nBandwidth profile resource free count\n");
2748 	seq_puts(m, "=====================================\n");
2749 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
2750 		if (layer == BAND_PROF_INVAL_LAYER)
2751 			continue;
2752 		str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
2753 			(layer == BAND_PROF_MID_LAYER) ? "Mid " : "Top ";
2754 
2755 		ipolicer = &nix_hw->ipolicer[layer];
2756 		seq_printf(m, "%s :: Max: %4d  Free: %4d\n", str,
2757 			   ipolicer->band_prof.max,
2758 			   rvu_rsrc_free_count(&ipolicer->band_prof));
2759 	}
2760 	seq_puts(m, "=====================================\n");
2761 
2762 	return 0;
2763 }
2764 
2765 RVU_DEBUG_SEQ_FOPS(nix_band_prof_rsrc, nix_band_prof_rsrc_display, NULL);
2766 
2767 static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
2768 {
2769 	struct nix_hw *nix_hw;
2770 
2771 	if (!is_block_implemented(rvu->hw, blkaddr))
2772 		return;
2773 
2774 	if (blkaddr == BLKADDR_NIX0) {
2775 		rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root);
2776 		nix_hw = &rvu->hw->nix[0];
2777 	} else {
2778 		rvu->rvu_dbg.nix = debugfs_create_dir("nix1",
2779 						      rvu->rvu_dbg.root);
2780 		nix_hw = &rvu->hw->nix[1];
2781 	}
2782 
2783 	debugfs_create_file("tm_tree", 0600, rvu->rvu_dbg.nix, nix_hw,
2784 			    &rvu_dbg_nix_tm_tree_fops);
2785 	debugfs_create_file("tm_topo", 0600, rvu->rvu_dbg.nix, nix_hw,
2786 			    &rvu_dbg_nix_tm_topo_fops);
2787 	debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2788 			    &rvu_dbg_nix_sq_ctx_fops);
2789 	debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2790 			    &rvu_dbg_nix_rq_ctx_fops);
2791 	debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2792 			    &rvu_dbg_nix_cq_ctx_fops);
2793 	debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
2794 			    &rvu_dbg_nix_ndc_tx_cache_fops);
2795 	debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
2796 			    &rvu_dbg_nix_ndc_rx_cache_fops);
2797 	debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
2798 			    &rvu_dbg_nix_ndc_tx_hits_miss_fops);
2799 	debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
2800 			    &rvu_dbg_nix_ndc_rx_hits_miss_fops);
2801 	debugfs_create_file_aux_num("qsize", 0600, rvu->rvu_dbg.nix, rvu,
2802 			    blkaddr, &rvu_dbg_nix_qsize_fops);
2803 	debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2804 			    &rvu_dbg_nix_band_prof_ctx_fops);
2805 	debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw,
2806 			    &rvu_dbg_nix_band_prof_rsrc_fops);
2807 }
2808 
2809 static void rvu_dbg_npa_init(struct rvu *rvu)
2810 {
2811 	rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root);
2812 
2813 	debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu,
2814 			    &rvu_dbg_npa_qsize_fops);
2815 	debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu,
2816 			    &rvu_dbg_npa_aura_ctx_fops);
2817 	debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
2818 			    &rvu_dbg_npa_pool_ctx_fops);
2819 
2820 	if (is_cn20k(rvu->pdev)) /* NDC not appliable for cn20k */
2821 		return;
2822 	debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu,
2823 			    &rvu_dbg_npa_ndc_cache_fops);
2824 	debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa, rvu,
2825 			    &rvu_dbg_npa_ndc_hits_miss_fops);
2826 }
2827 
2828 #define PRINT_CGX_CUML_NIXRX_STATUS(idx, name)				\
2829 	({								\
2830 		u64 cnt;						\
2831 		err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx),	\
2832 					     NIX_STATS_RX, &(cnt));	\
2833 		if (!err)						\
2834 			seq_printf(s, "%s: %llu\n", name, cnt);		\
2835 		cnt;							\
2836 	})
2837 
2838 #define PRINT_CGX_CUML_NIXTX_STATUS(idx, name)			\
2839 	({								\
2840 		u64 cnt;						\
2841 		err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx),	\
2842 					  NIX_STATS_TX, &(cnt));	\
2843 		if (!err)						\
2844 			seq_printf(s, "%s: %llu\n", name, cnt);		\
2845 		cnt;							\
2846 	})
2847 
2848 static int cgx_print_stats(struct seq_file *s, int lmac_id)
2849 {
2850 	struct cgx_link_user_info linfo;
2851 	struct mac_ops *mac_ops;
2852 	void *cgxd = s->private;
2853 	u64 ucast, mcast, bcast;
2854 	int stat = 0, err = 0;
2855 	u64 tx_stat, rx_stat;
2856 	struct rvu *rvu;
2857 
2858 	rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
2859 					     PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
2860 	if (!rvu)
2861 		return -ENODEV;
2862 
2863 	mac_ops = get_mac_ops(cgxd);
2864 	/* There can be no CGX devices at all */
2865 	if (!mac_ops)
2866 		return 0;
2867 
2868 	/* Link status */
2869 	seq_puts(s, "\n=======Link Status======\n\n");
2870 	err = cgx_get_link_info(cgxd, lmac_id, &linfo);
2871 	if (err)
2872 		seq_puts(s, "Failed to read link status\n");
2873 	seq_printf(s, "\nLink is %s %d Mbps\n\n",
2874 		   linfo.link_up ? "UP" : "DOWN", linfo.speed);
2875 
2876 	/* Rx stats */
2877 	seq_printf(s, "\n=======NIX RX_STATS(%s port level)======\n\n",
2878 		   mac_ops->name);
2879 	ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames");
2880 	if (err)
2881 		return err;
2882 	mcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_MCAST, "rx_mcast_frames");
2883 	if (err)
2884 		return err;
2885 	bcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_BCAST, "rx_bcast_frames");
2886 	if (err)
2887 		return err;
2888 	seq_printf(s, "rx_frames: %llu\n", ucast + mcast + bcast);
2889 	PRINT_CGX_CUML_NIXRX_STATUS(RX_OCTS, "rx_bytes");
2890 	if (err)
2891 		return err;
2892 	PRINT_CGX_CUML_NIXRX_STATUS(RX_DROP, "rx_drops");
2893 	if (err)
2894 		return err;
2895 	PRINT_CGX_CUML_NIXRX_STATUS(RX_ERR, "rx_errors");
2896 	if (err)
2897 		return err;
2898 
2899 	/* Tx stats */
2900 	seq_printf(s, "\n=======NIX TX_STATS(%s port level)======\n\n",
2901 		   mac_ops->name);
2902 	ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames");
2903 	if (err)
2904 		return err;
2905 	mcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_MCAST, "tx_mcast_frames");
2906 	if (err)
2907 		return err;
2908 	bcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_BCAST, "tx_bcast_frames");
2909 	if (err)
2910 		return err;
2911 	seq_printf(s, "tx_frames: %llu\n", ucast + mcast + bcast);
2912 	PRINT_CGX_CUML_NIXTX_STATUS(TX_OCTS, "tx_bytes");
2913 	if (err)
2914 		return err;
2915 	PRINT_CGX_CUML_NIXTX_STATUS(TX_DROP, "tx_drops");
2916 	if (err)
2917 		return err;
2918 
2919 	/* Rx stats */
2920 	seq_printf(s, "\n=======%s RX_STATS======\n\n", mac_ops->name);
2921 	while (stat < mac_ops->rx_stats_cnt) {
2922 		err = mac_ops->mac_get_rx_stats(cgxd, lmac_id, stat, &rx_stat);
2923 		if (err)
2924 			return err;
2925 		if (is_rvu_otx2(rvu))
2926 			seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat],
2927 				   rx_stat);
2928 		else
2929 			seq_printf(s, "%s: %llu\n", rpm_rx_stats_fields[stat],
2930 				   rx_stat);
2931 		stat++;
2932 	}
2933 
2934 	/* Tx stats */
2935 	stat = 0;
2936 	seq_printf(s, "\n=======%s TX_STATS======\n\n", mac_ops->name);
2937 	while (stat < mac_ops->tx_stats_cnt) {
2938 		err = mac_ops->mac_get_tx_stats(cgxd, lmac_id, stat, &tx_stat);
2939 		if (err)
2940 			return err;
2941 
2942 		if (is_rvu_otx2(rvu))
2943 			seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat],
2944 				   tx_stat);
2945 		else
2946 			seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat],
2947 				   tx_stat);
2948 		stat++;
2949 	}
2950 
2951 	return err;
2952 }
2953 
2954 static int rvu_dbg_derive_lmacid(struct seq_file *s)
2955 {
2956 	return debugfs_get_aux_num(s->file);
2957 }
2958 
2959 static int rvu_dbg_cgx_stat_display(struct seq_file *s, void *unused)
2960 {
2961 	return cgx_print_stats(s, rvu_dbg_derive_lmacid(s));
2962 }
2963 
2964 RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
2965 
2966 static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id)
2967 {
2968 	struct pci_dev *pdev = NULL;
2969 	void *cgxd = s->private;
2970 	char *bcast, *mcast;
2971 	u16 index, domain;
2972 	u8 dmac[ETH_ALEN];
2973 	struct rvu *rvu;
2974 	u64 cfg, mac;
2975 	int pf;
2976 
2977 	rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
2978 					     PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
2979 	if (!rvu)
2980 		return -ENODEV;
2981 
2982 	pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
2983 	domain = 2;
2984 
2985 	pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
2986 	if (!pdev)
2987 		return 0;
2988 
2989 	cfg = cgx_read_dmac_ctrl(cgxd, lmac_id);
2990 	bcast = cfg & CGX_DMAC_BCAST_MODE ? "ACCEPT" : "REJECT";
2991 	mcast = cfg & CGX_DMAC_MCAST_MODE ? "ACCEPT" : "REJECT";
2992 
2993 	seq_puts(s,
2994 		 "PCI dev       RVUPF   BROADCAST  MULTICAST  FILTER-MODE\n");
2995 	seq_printf(s, "%s  PF%d  %9s  %9s",
2996 		   dev_name(&pdev->dev), pf, bcast, mcast);
2997 	if (cfg & CGX_DMAC_CAM_ACCEPT)
2998 		seq_printf(s, "%12s\n\n", "UNICAST");
2999 	else
3000 		seq_printf(s, "%16s\n\n", "PROMISCUOUS");
3001 
3002 	seq_puts(s, "\nDMAC-INDEX  ADDRESS\n");
3003 
3004 	for (index = 0 ; index < 32 ; index++) {
3005 		cfg = cgx_read_dmac_entry(cgxd, index);
3006 		/* Display enabled dmac entries associated with current lmac */
3007 		if (lmac_id == FIELD_GET(CGX_DMAC_CAM_ENTRY_LMACID, cfg) &&
3008 		    FIELD_GET(CGX_DMAC_CAM_ADDR_ENABLE, cfg)) {
3009 			mac = FIELD_GET(CGX_RX_DMAC_ADR_MASK, cfg);
3010 			u64_to_ether_addr(mac, dmac);
3011 			seq_printf(s, "%7d     %pM\n", index, dmac);
3012 		}
3013 	}
3014 
3015 	pci_dev_put(pdev);
3016 	return 0;
3017 }
3018 
3019 static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *s, void *unused)
3020 {
3021 	return cgx_print_dmac_flt(s, rvu_dbg_derive_lmacid(s));
3022 }
3023 
3024 RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL);
3025 
3026 static int cgx_print_fwdata(struct seq_file *s, int lmac_id)
3027 {
3028 	struct cgx_lmac_fwdata_s *fwdata;
3029 	void *cgxd = s->private;
3030 	struct phy_s *phy;
3031 	struct rvu *rvu;
3032 	int cgx_id, i;
3033 
3034 	rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
3035 					     PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
3036 	if (!rvu)
3037 		return -ENODEV;
3038 
3039 	if (!rvu->fwdata)
3040 		return -EAGAIN;
3041 
3042 	cgx_id = cgx_get_cgxid(cgxd);
3043 
3044 	if (rvu->hw->lmac_per_cgx == CGX_LMACS_USX)
3045 		fwdata =  &rvu->fwdata->cgx_fw_data_usx[cgx_id][lmac_id];
3046 	else
3047 		fwdata =  &rvu->fwdata->cgx_fw_data[cgx_id][lmac_id];
3048 
3049 	seq_puts(s, "\nFIRMWARE SHARED:\n");
3050 	seq_puts(s, "\t\tSUPPORTED LINK INFORMATION\t\t\n");
3051 	seq_puts(s, "\t\t==========================\n");
3052 	seq_printf(s, "\t\t Link modes \t\t :%llx\n",
3053 		   fwdata->supported_link_modes);
3054 	seq_printf(s, "\t\t Autoneg \t\t :%llx\n", fwdata->supported_an);
3055 	seq_printf(s, "\t\t FEC \t\t\t :%llx\n", fwdata->supported_fec);
3056 	seq_puts(s, "\n");
3057 
3058 	seq_puts(s, "\t\tADVERTISED LINK INFORMATION\t\t\n");
3059 	seq_puts(s, "\t\t==========================\n");
3060 	seq_printf(s, "\t\t Link modes \t\t :%llx\n",
3061 		   (u64)fwdata->advertised_link_modes);
3062 	seq_printf(s, "\t\t Autoneg \t\t :%x\n", fwdata->advertised_an);
3063 	seq_printf(s, "\t\t FEC \t\t\t :%llx\n", fwdata->advertised_fec);
3064 	seq_puts(s, "\n");
3065 
3066 	seq_puts(s, "\t\tLMAC CONFIG\t\t\n");
3067 	seq_puts(s, "\t\t============\n");
3068 	seq_printf(s, "\t\t rw_valid  \t\t :%x\n",  fwdata->rw_valid);
3069 	seq_printf(s, "\t\t lmac_type \t\t :%x\n", fwdata->lmac_type);
3070 	seq_printf(s, "\t\t portm_idx \t\t :%x\n", fwdata->portm_idx);
3071 	seq_printf(s, "\t\t mgmt_port \t\t :%x\n", fwdata->mgmt_port);
3072 	seq_printf(s, "\t\t Link modes own \t :%llx\n",
3073 		   (u64)fwdata->advertised_link_modes_own);
3074 	seq_puts(s, "\n");
3075 
3076 	seq_puts(s, "\n\t\tEEPROM DATA\n");
3077 	seq_puts(s, "\t\t===========\n");
3078 	seq_printf(s, "\t\t sff_id \t\t :%x\n", fwdata->sfp_eeprom.sff_id);
3079 	seq_puts(s, "\t\t data \t\t\t :\n");
3080 	seq_puts(s, "\t\t");
3081 	for (i = 0; i < SFP_EEPROM_SIZE; i++) {
3082 		seq_printf(s, "%x", fwdata->sfp_eeprom.buf[i]);
3083 		if ((i + 1) % 16 == 0) {
3084 			seq_puts(s, "\n");
3085 			seq_puts(s, "\t\t");
3086 		}
3087 	}
3088 	seq_puts(s, "\n");
3089 
3090 	phy = &fwdata->phy;
3091 	seq_puts(s, "\n\t\tPHY INFORMATION\n");
3092 	seq_puts(s, "\t\t===============\n");
3093 	seq_printf(s, "\t\t Mod type configurable \t\t :%x\n",
3094 		   phy->misc.can_change_mod_type);
3095 	seq_printf(s, "\t\t Mod type \t\t\t :%x\n", phy->misc.mod_type);
3096 	seq_printf(s, "\t\t Support FEC \t\t\t :%x\n", phy->misc.has_fec_stats);
3097 	seq_printf(s, "\t\t RSFEC corrected words \t\t :%x\n",
3098 		   phy->fec_stats.rsfec_corr_cws);
3099 	seq_printf(s, "\t\t RSFEC uncorrected words \t :%x\n",
3100 		   phy->fec_stats.rsfec_uncorr_cws);
3101 	seq_printf(s, "\t\t BRFEC corrected words \t\t :%x\n",
3102 		   phy->fec_stats.brfec_corr_blks);
3103 	seq_printf(s, "\t\t BRFEC uncorrected words \t :%x\n",
3104 		   phy->fec_stats.brfec_uncorr_blks);
3105 	seq_puts(s, "\n");
3106 
3107 	return 0;
3108 }
3109 
3110 static int rvu_dbg_cgx_fwdata_display(struct seq_file *s, void *unused)
3111 {
3112 	return cgx_print_fwdata(s, rvu_dbg_derive_lmacid(s));
3113 }
3114 
3115 RVU_DEBUG_SEQ_FOPS(cgx_fwdata, cgx_fwdata_display, NULL);
3116 
3117 static void rvu_dbg_cgx_init(struct rvu *rvu)
3118 {
3119 	struct mac_ops *mac_ops;
3120 	unsigned long lmac_bmap;
3121 	int i, lmac_id;
3122 	char dname[20];
3123 	void *cgx;
3124 
3125 	if (!cgx_get_cgxcnt_max())
3126 		return;
3127 
3128 	mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
3129 	if (!mac_ops)
3130 		return;
3131 
3132 	rvu->rvu_dbg.cgx_root = debugfs_create_dir(mac_ops->name,
3133 						   rvu->rvu_dbg.root);
3134 
3135 	for (i = 0; i < cgx_get_cgxcnt_max(); i++) {
3136 		cgx = rvu_cgx_pdata(i, rvu);
3137 		if (!cgx)
3138 			continue;
3139 		lmac_bmap = cgx_get_lmac_bmap(cgx);
3140 		/* cgx debugfs dir */
3141 		sprintf(dname, "%s%d", mac_ops->name, i);
3142 		rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
3143 						      rvu->rvu_dbg.cgx_root);
3144 
3145 		for_each_set_bit(lmac_id, &lmac_bmap, rvu->hw->lmac_per_cgx) {
3146 			/* lmac debugfs dir */
3147 			sprintf(dname, "lmac%d", lmac_id);
3148 			rvu->rvu_dbg.lmac =
3149 				debugfs_create_dir(dname, rvu->rvu_dbg.cgx);
3150 
3151 			debugfs_create_file_aux_num("stats", 0600, rvu->rvu_dbg.lmac,
3152 					    cgx, lmac_id, &rvu_dbg_cgx_stat_fops);
3153 			debugfs_create_file_aux_num("mac_filter", 0600,
3154 					    rvu->rvu_dbg.lmac, cgx, lmac_id,
3155 					    &rvu_dbg_cgx_dmac_flt_fops);
3156 			debugfs_create_file("fwdata", 0600,
3157 					    rvu->rvu_dbg.lmac, cgx,
3158 					    &rvu_dbg_cgx_fwdata_fops);
3159 		}
3160 	}
3161 }
3162 
3163 /* NPC debugfs APIs */
3164 static void rvu_print_npc_mcam_info(struct seq_file *s,
3165 				    u16 pcifunc, int blkaddr)
3166 {
3167 	struct rvu *rvu = s->private;
3168 	int entry_acnt, entry_ecnt;
3169 	int cntr_acnt, cntr_ecnt;
3170 
3171 	rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr,
3172 					  &entry_acnt, &entry_ecnt);
3173 	rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr,
3174 					    &cntr_acnt, &cntr_ecnt);
3175 	if (!entry_acnt && !cntr_acnt)
3176 		return;
3177 
3178 	if (!(pcifunc & RVU_PFVF_FUNC_MASK))
3179 		seq_printf(s, "\n\t\t Device \t\t: PF%d\n",
3180 			   rvu_get_pf(rvu->pdev, pcifunc));
3181 	else
3182 		seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n",
3183 			   rvu_get_pf(rvu->pdev, pcifunc),
3184 			   (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
3185 
3186 	if (entry_acnt) {
3187 		seq_printf(s, "\t\t Entries allocated \t: %d\n", entry_acnt);
3188 		seq_printf(s, "\t\t Entries enabled \t: %d\n", entry_ecnt);
3189 	}
3190 	if (cntr_acnt) {
3191 		seq_printf(s, "\t\t Counters allocated \t: %d\n", cntr_acnt);
3192 		seq_printf(s, "\t\t Counters enabled \t: %d\n", cntr_ecnt);
3193 	}
3194 }
3195 
3196 static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
3197 {
3198 	struct rvu *rvu = filp->private;
3199 	int pf, vf, numvfs, blkaddr;
3200 	struct npc_mcam *mcam;
3201 	u16 pcifunc, counters;
3202 	u64 cfg;
3203 
3204 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
3205 	if (blkaddr < 0)
3206 		return -ENODEV;
3207 
3208 	mcam = &rvu->hw->mcam;
3209 	counters = rvu->hw->npc_counters;
3210 
3211 	seq_puts(filp, "\nNPC MCAM info:\n");
3212 	/* MCAM keywidth on receive and transmit sides */
3213 	cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
3214 	cfg = (cfg >> 32) & 0x07;
3215 	seq_printf(filp, "\t\t RX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
3216 		   "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
3217 		   "224bits" : "448bits"));
3218 	cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX));
3219 	cfg = (cfg >> 32) & 0x07;
3220 	seq_printf(filp, "\t\t TX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
3221 		   "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
3222 		   "224bits" : "448bits"));
3223 
3224 	mutex_lock(&mcam->lock);
3225 	/* MCAM entries */
3226 	seq_printf(filp, "\n\t\t MCAM entries \t: %d\n", mcam->total_entries);
3227 	seq_printf(filp, "\t\t Reserved \t: %d\n",
3228 		   mcam->total_entries - mcam->bmap_entries);
3229 	seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt);
3230 
3231 	/* MCAM counters */
3232 	seq_printf(filp, "\n\t\t MCAM counters \t: %d\n", counters);
3233 	seq_printf(filp, "\t\t Reserved \t: %d\n",
3234 		   counters - mcam->counters.max);
3235 	seq_printf(filp, "\t\t Available \t: %d\n",
3236 		   rvu_rsrc_free_count(&mcam->counters));
3237 
3238 	if (mcam->bmap_entries == mcam->bmap_fcnt) {
3239 		mutex_unlock(&mcam->lock);
3240 		return 0;
3241 	}
3242 
3243 	seq_puts(filp, "\n\t\t Current allocation\n");
3244 	seq_puts(filp, "\t\t====================\n");
3245 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
3246 		pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0);
3247 		rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
3248 
3249 		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
3250 		numvfs = (cfg >> 12) & 0xFF;
3251 		for (vf = 0; vf < numvfs; vf++) {
3252 			pcifunc = rvu_make_pcifunc(rvu->pdev, pf, (vf + 1));
3253 			rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
3254 		}
3255 	}
3256 
3257 	mutex_unlock(&mcam->lock);
3258 	return 0;
3259 }
3260 
3261 RVU_DEBUG_SEQ_FOPS(npc_mcam_info, npc_mcam_info_display, NULL);
3262 
3263 static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
3264 					     void *unused)
3265 {
3266 	struct rvu *rvu = filp->private;
3267 	struct npc_mcam *mcam;
3268 	int blkaddr;
3269 
3270 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
3271 	if (blkaddr < 0)
3272 		return -ENODEV;
3273 
3274 	mcam = &rvu->hw->mcam;
3275 
3276 	seq_puts(filp, "\nNPC MCAM RX miss action stats\n");
3277 	seq_printf(filp, "\t\tStat %d: \t%lld\n", mcam->rx_miss_act_cntr,
3278 		   rvu_read64(rvu, blkaddr,
3279 			      NPC_AF_MATCH_STATX(mcam->rx_miss_act_cntr)));
3280 
3281 	return 0;
3282 }
3283 
3284 RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
3285 
3286 #define RVU_DBG_PRINT_MPLS_TTL(pkt, mask)                                     \
3287 do {									      \
3288 	seq_printf(s, "%ld ", FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, pkt));     \
3289 	seq_printf(s, "mask 0x%lx\n",                                         \
3290 		   FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, mask));               \
3291 } while (0)                                                                   \
3292 
3293 #define RVU_DBG_PRINT_MPLS_LBTCBOS(_pkt, _mask)                               \
3294 do {									      \
3295 	typeof(_pkt) (pkt) = (_pkt);					      \
3296 	typeof(_mask) (mask) = (_mask);                                       \
3297 	seq_printf(s, "%ld %ld %ld\n",                                        \
3298 		   FIELD_GET(OTX2_FLOWER_MASK_MPLS_LB, pkt),                  \
3299 		   FIELD_GET(OTX2_FLOWER_MASK_MPLS_TC, pkt),                  \
3300 		   FIELD_GET(OTX2_FLOWER_MASK_MPLS_BOS, pkt));                \
3301 	seq_printf(s, "\tmask 0x%lx 0x%lx 0x%lx\n",                           \
3302 		   FIELD_GET(OTX2_FLOWER_MASK_MPLS_LB, mask),                 \
3303 		   FIELD_GET(OTX2_FLOWER_MASK_MPLS_TC, mask),                 \
3304 		   FIELD_GET(OTX2_FLOWER_MASK_MPLS_BOS, mask));               \
3305 } while (0)                                                                   \
3306 
3307 static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
3308 					struct rvu_npc_mcam_rule *rule)
3309 {
3310 	u8 bit;
3311 
3312 	for_each_set_bit(bit, (unsigned long *)&rule->features, 64) {
3313 		seq_printf(s, "\t%s  ", npc_get_field_name(bit));
3314 		switch (bit) {
3315 		case NPC_LXMB:
3316 			if (rule->lxmb == 1)
3317 				seq_puts(s, "\tL2M nibble is set\n");
3318 			else
3319 				seq_puts(s, "\tL2B nibble is set\n");
3320 			break;
3321 		case NPC_DMAC:
3322 			seq_printf(s, "%pM ", rule->packet.dmac);
3323 			seq_printf(s, "mask %pM\n", rule->mask.dmac);
3324 			break;
3325 		case NPC_SMAC:
3326 			seq_printf(s, "%pM ", rule->packet.smac);
3327 			seq_printf(s, "mask %pM\n", rule->mask.smac);
3328 			break;
3329 		case NPC_ETYPE:
3330 			seq_printf(s, "0x%x ", ntohs(rule->packet.etype));
3331 			seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.etype));
3332 			break;
3333 		case NPC_OUTER_VID:
3334 			seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_tci));
3335 			seq_printf(s, "mask 0x%x\n",
3336 				   ntohs(rule->mask.vlan_tci));
3337 			break;
3338 		case NPC_INNER_VID:
3339 			seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_itci));
3340 			seq_printf(s, "mask 0x%x\n",
3341 				   ntohs(rule->mask.vlan_itci));
3342 			break;
3343 		case NPC_TOS:
3344 			seq_printf(s, "%d ", rule->packet.tos);
3345 			seq_printf(s, "mask 0x%x\n", rule->mask.tos);
3346 			break;
3347 		case NPC_SIP_IPV4:
3348 			seq_printf(s, "%pI4 ", &rule->packet.ip4src);
3349 			seq_printf(s, "mask %pI4\n", &rule->mask.ip4src);
3350 			break;
3351 		case NPC_DIP_IPV4:
3352 			seq_printf(s, "%pI4 ", &rule->packet.ip4dst);
3353 			seq_printf(s, "mask %pI4\n", &rule->mask.ip4dst);
3354 			break;
3355 		case NPC_SIP_IPV6:
3356 			seq_printf(s, "%pI6 ", rule->packet.ip6src);
3357 			seq_printf(s, "mask %pI6\n", rule->mask.ip6src);
3358 			break;
3359 		case NPC_DIP_IPV6:
3360 			seq_printf(s, "%pI6 ", rule->packet.ip6dst);
3361 			seq_printf(s, "mask %pI6\n", rule->mask.ip6dst);
3362 			break;
3363 		case NPC_IPFRAG_IPV6:
3364 			seq_printf(s, "0x%x ", rule->packet.next_header);
3365 			seq_printf(s, "mask 0x%x\n", rule->mask.next_header);
3366 			break;
3367 		case NPC_IPFRAG_IPV4:
3368 			seq_printf(s, "0x%x ", rule->packet.ip_flag);
3369 			seq_printf(s, "mask 0x%x\n", rule->mask.ip_flag);
3370 			break;
3371 		case NPC_SPORT_TCP:
3372 		case NPC_SPORT_UDP:
3373 		case NPC_SPORT_SCTP:
3374 			seq_printf(s, "%d ", ntohs(rule->packet.sport));
3375 			seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.sport));
3376 			break;
3377 		case NPC_DPORT_TCP:
3378 		case NPC_DPORT_UDP:
3379 		case NPC_DPORT_SCTP:
3380 			seq_printf(s, "%d ", ntohs(rule->packet.dport));
3381 			seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
3382 			break;
3383 		case NPC_TCP_FLAGS:
3384 			seq_printf(s, "%d ", rule->packet.tcp_flags);
3385 			seq_printf(s, "mask 0x%x\n", rule->mask.tcp_flags);
3386 			break;
3387 		case NPC_IPSEC_SPI:
3388 			seq_printf(s, "0x%x ", ntohl(rule->packet.spi));
3389 			seq_printf(s, "mask 0x%x\n", ntohl(rule->mask.spi));
3390 			break;
3391 		case NPC_MPLS1_LBTCBOS:
3392 			RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[0],
3393 						   rule->mask.mpls_lse[0]);
3394 			break;
3395 		case NPC_MPLS1_TTL:
3396 			RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[0],
3397 					       rule->mask.mpls_lse[0]);
3398 			break;
3399 		case NPC_MPLS2_LBTCBOS:
3400 			RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[1],
3401 						   rule->mask.mpls_lse[1]);
3402 			break;
3403 		case NPC_MPLS2_TTL:
3404 			RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[1],
3405 					       rule->mask.mpls_lse[1]);
3406 			break;
3407 		case NPC_MPLS3_LBTCBOS:
3408 			RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[2],
3409 						   rule->mask.mpls_lse[2]);
3410 			break;
3411 		case NPC_MPLS3_TTL:
3412 			RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[2],
3413 					       rule->mask.mpls_lse[2]);
3414 			break;
3415 		case NPC_MPLS4_LBTCBOS:
3416 			RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[3],
3417 						   rule->mask.mpls_lse[3]);
3418 			break;
3419 		case NPC_MPLS4_TTL:
3420 			RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[3],
3421 					       rule->mask.mpls_lse[3]);
3422 			break;
3423 		case NPC_TYPE_ICMP:
3424 			seq_printf(s, "%d ", rule->packet.icmp_type);
3425 			seq_printf(s, "mask 0x%x\n", rule->mask.icmp_type);
3426 			break;
3427 		case NPC_CODE_ICMP:
3428 			seq_printf(s, "%d ", rule->packet.icmp_code);
3429 			seq_printf(s, "mask 0x%x\n", rule->mask.icmp_code);
3430 			break;
3431 		default:
3432 			seq_puts(s, "\n");
3433 			break;
3434 		}
3435 	}
3436 }
3437 
3438 static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
3439 					 struct rvu_npc_mcam_rule *rule)
3440 {
3441 	if (is_npc_intf_tx(rule->intf)) {
3442 		switch (rule->tx_action.op) {
3443 		case NIX_TX_ACTIONOP_DROP:
3444 			seq_puts(s, "\taction: Drop\n");
3445 			break;
3446 		case NIX_TX_ACTIONOP_UCAST_DEFAULT:
3447 			seq_puts(s, "\taction: Unicast to default channel\n");
3448 			break;
3449 		case NIX_TX_ACTIONOP_UCAST_CHAN:
3450 			seq_printf(s, "\taction: Unicast to channel %d\n",
3451 				   rule->tx_action.index);
3452 			break;
3453 		case NIX_TX_ACTIONOP_MCAST:
3454 			seq_puts(s, "\taction: Multicast\n");
3455 			break;
3456 		case NIX_TX_ACTIONOP_DROP_VIOL:
3457 			seq_puts(s, "\taction: Lockdown Violation Drop\n");
3458 			break;
3459 		default:
3460 			break;
3461 		}
3462 	} else {
3463 		switch (rule->rx_action.op) {
3464 		case NIX_RX_ACTIONOP_DROP:
3465 			seq_puts(s, "\taction: Drop\n");
3466 			break;
3467 		case NIX_RX_ACTIONOP_UCAST:
3468 			seq_printf(s, "\taction: Direct to queue %d\n",
3469 				   rule->rx_action.index);
3470 			break;
3471 		case NIX_RX_ACTIONOP_RSS:
3472 			seq_puts(s, "\taction: RSS\n");
3473 			break;
3474 		case NIX_RX_ACTIONOP_UCAST_IPSEC:
3475 			seq_puts(s, "\taction: Unicast ipsec\n");
3476 			break;
3477 		case NIX_RX_ACTIONOP_MCAST:
3478 			seq_puts(s, "\taction: Multicast\n");
3479 			break;
3480 		default:
3481 			break;
3482 		}
3483 	}
3484 }
3485 
3486 static const char *rvu_dbg_get_intf_name(int intf)
3487 {
3488 	switch (intf) {
3489 	case NIX_INTFX_RX(0):
3490 		return "NIX0_RX";
3491 	case NIX_INTFX_RX(1):
3492 		return "NIX1_RX";
3493 	case NIX_INTFX_TX(0):
3494 		return "NIX0_TX";
3495 	case NIX_INTFX_TX(1):
3496 		return "NIX1_TX";
3497 	default:
3498 		break;
3499 	}
3500 
3501 	return "unknown";
3502 }
3503 
3504 static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
3505 {
3506 	struct rvu_npc_mcam_rule *iter;
3507 	struct rvu *rvu = s->private;
3508 	struct npc_mcam *mcam;
3509 	int pf, vf = -1;
3510 	bool enabled;
3511 	int blkaddr;
3512 	u16 target;
3513 	u64 hits;
3514 
3515 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
3516 	if (blkaddr < 0)
3517 		return 0;
3518 
3519 	mcam = &rvu->hw->mcam;
3520 
3521 	mutex_lock(&mcam->lock);
3522 	list_for_each_entry(iter, &mcam->mcam_rules, list) {
3523 		pf = rvu_get_pf(rvu->pdev, iter->owner);
3524 		seq_printf(s, "\n\tInstalled by: PF%d ", pf);
3525 
3526 		if (iter->owner & RVU_PFVF_FUNC_MASK) {
3527 			vf = (iter->owner & RVU_PFVF_FUNC_MASK) - 1;
3528 			seq_printf(s, "VF%d", vf);
3529 		}
3530 		seq_puts(s, "\n");
3531 
3532 		seq_printf(s, "\tdirection: %s\n", is_npc_intf_rx(iter->intf) ?
3533 						    "RX" : "TX");
3534 		seq_printf(s, "\tinterface: %s\n",
3535 			   rvu_dbg_get_intf_name(iter->intf));
3536 		seq_printf(s, "\tmcam entry: %d\n", iter->entry);
3537 
3538 		rvu_dbg_npc_mcam_show_flows(s, iter);
3539 		if (is_npc_intf_rx(iter->intf)) {
3540 			target = iter->rx_action.pf_func;
3541 			pf = rvu_get_pf(rvu->pdev, target);
3542 			seq_printf(s, "\tForward to: PF%d ", pf);
3543 
3544 			if (target & RVU_PFVF_FUNC_MASK) {
3545 				vf = (target & RVU_PFVF_FUNC_MASK) - 1;
3546 				seq_printf(s, "VF%d", vf);
3547 			}
3548 			seq_puts(s, "\n");
3549 			seq_printf(s, "\tchannel: 0x%x\n", iter->chan);
3550 			seq_printf(s, "\tchannel_mask: 0x%x\n", iter->chan_mask);
3551 		}
3552 
3553 		rvu_dbg_npc_mcam_show_action(s, iter);
3554 
3555 		enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, iter->entry);
3556 		seq_printf(s, "\tenabled: %s\n", enabled ? "yes" : "no");
3557 
3558 		if (!iter->has_cntr)
3559 			continue;
3560 		seq_printf(s, "\tcounter: %d\n", iter->cntr);
3561 
3562 		hits = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(iter->cntr));
3563 		seq_printf(s, "\thits: %lld\n", hits);
3564 	}
3565 	mutex_unlock(&mcam->lock);
3566 
3567 	return 0;
3568 }
3569 
3570 RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL);
3571 
3572 static int rvu_dbg_npc_exact_show_entries(struct seq_file *s, void *unused)
3573 {
3574 	struct npc_exact_table_entry *mem_entry[NPC_EXACT_TBL_MAX_WAYS] = { 0 };
3575 	struct npc_exact_table_entry *cam_entry;
3576 	struct npc_exact_table *table;
3577 	struct rvu *rvu = s->private;
3578 	int i, j;
3579 
3580 	u8 bitmap = 0;
3581 
3582 	table = rvu->hw->table;
3583 
3584 	mutex_lock(&table->lock);
3585 
3586 	/* Check if there is at least one entry in mem table */
3587 	if (!table->mem_tbl_entry_cnt)
3588 		goto dump_cam_table;
3589 
3590 	/* Print table headers */
3591 	seq_puts(s, "\n\tExact Match MEM Table\n");
3592 	seq_puts(s, "Index\t");
3593 
3594 	for (i = 0; i < table->mem_table.ways; i++) {
3595 		mem_entry[i] = list_first_entry_or_null(&table->lhead_mem_tbl_entry[i],
3596 							struct npc_exact_table_entry, list);
3597 
3598 		seq_printf(s, "Way-%d\t\t\t\t\t", i);
3599 	}
3600 
3601 	seq_puts(s, "\n");
3602 	for (i = 0; i < table->mem_table.ways; i++)
3603 		seq_puts(s, "\tChan  MAC                     \t");
3604 
3605 	seq_puts(s, "\n\n");
3606 
3607 	/* Print mem table entries */
3608 	for (i = 0; i < table->mem_table.depth; i++) {
3609 		bitmap = 0;
3610 		for (j = 0; j < table->mem_table.ways; j++) {
3611 			if (!mem_entry[j])
3612 				continue;
3613 
3614 			if (mem_entry[j]->index != i)
3615 				continue;
3616 
3617 			bitmap |= BIT(j);
3618 		}
3619 
3620 		/* No valid entries */
3621 		if (!bitmap)
3622 			continue;
3623 
3624 		seq_printf(s, "%d\t", i);
3625 		for (j = 0; j < table->mem_table.ways; j++) {
3626 			if (!(bitmap & BIT(j))) {
3627 				seq_puts(s, "nil\t\t\t\t\t");
3628 				continue;
3629 			}
3630 
3631 			seq_printf(s, "0x%x %pM\t\t\t", mem_entry[j]->chan,
3632 				   mem_entry[j]->mac);
3633 			mem_entry[j] = list_next_entry(mem_entry[j], list);
3634 		}
3635 		seq_puts(s, "\n");
3636 	}
3637 
3638 dump_cam_table:
3639 
3640 	if (!table->cam_tbl_entry_cnt)
3641 		goto done;
3642 
3643 	seq_puts(s, "\n\tExact Match CAM Table\n");
3644 	seq_puts(s, "index\tchan\tMAC\n");
3645 
3646 	/* Traverse cam table entries */
3647 	list_for_each_entry(cam_entry, &table->lhead_cam_tbl_entry, list) {
3648 		seq_printf(s, "%d\t0x%x\t%pM\n", cam_entry->index, cam_entry->chan,
3649 			   cam_entry->mac);
3650 	}
3651 
3652 done:
3653 	mutex_unlock(&table->lock);
3654 	return 0;
3655 }
3656 
3657 RVU_DEBUG_SEQ_FOPS(npc_exact_entries, npc_exact_show_entries, NULL);
3658 
3659 static int rvu_dbg_npc_exact_show_info(struct seq_file *s, void *unused)
3660 {
3661 	struct npc_exact_table *table;
3662 	struct rvu *rvu = s->private;
3663 	int i;
3664 
3665 	table = rvu->hw->table;
3666 
3667 	seq_puts(s, "\n\tExact Table Info\n");
3668 	seq_printf(s, "Exact Match Feature : %s\n",
3669 		   rvu->hw->cap.npc_exact_match_enabled ? "enabled" : "disable");
3670 	if (!rvu->hw->cap.npc_exact_match_enabled)
3671 		return 0;
3672 
3673 	seq_puts(s, "\nMCAM Index\tMAC Filter Rules Count\n");
3674 	for (i = 0; i < table->num_drop_rules; i++)
3675 		seq_printf(s, "%d\t\t%d\n", i, table->cnt_cmd_rules[i]);
3676 
3677 	seq_puts(s, "\nMcam Index\tPromisc Mode Status\n");
3678 	for (i = 0; i < table->num_drop_rules; i++)
3679 		seq_printf(s, "%d\t\t%s\n", i, table->promisc_mode[i] ? "on" : "off");
3680 
3681 	seq_puts(s, "\n\tMEM Table Info\n");
3682 	seq_printf(s, "Ways : %d\n", table->mem_table.ways);
3683 	seq_printf(s, "Depth : %d\n", table->mem_table.depth);
3684 	seq_printf(s, "Mask : 0x%llx\n", table->mem_table.mask);
3685 	seq_printf(s, "Hash Mask : 0x%x\n", table->mem_table.hash_mask);
3686 	seq_printf(s, "Hash Offset : 0x%x\n", table->mem_table.hash_offset);
3687 
3688 	seq_puts(s, "\n\tCAM Table Info\n");
3689 	seq_printf(s, "Depth : %d\n", table->cam_table.depth);
3690 
3691 	return 0;
3692 }
3693 
3694 RVU_DEBUG_SEQ_FOPS(npc_exact_info, npc_exact_show_info, NULL);
3695 
3696 static int rvu_dbg_npc_exact_drop_cnt(struct seq_file *s, void *unused)
3697 {
3698 	struct npc_exact_table *table;
3699 	struct rvu *rvu = s->private;
3700 	struct npc_key_field *field;
3701 	u16 chan, pcifunc;
3702 	int blkaddr, i;
3703 	u64 cfg, cam1;
3704 	char *str;
3705 
3706 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
3707 	table = rvu->hw->table;
3708 
3709 	field = &rvu->hw->mcam.rx_key_fields[NPC_CHAN];
3710 
3711 	seq_puts(s, "\n\t Exact Hit on drop status\n");
3712 	seq_puts(s, "\npcifunc\tmcam_idx\tHits\tchan\tstatus\n");
3713 
3714 	for (i = 0; i < table->num_drop_rules; i++) {
3715 		pcifunc = rvu_npc_exact_drop_rule_to_pcifunc(rvu, i);
3716 		cfg = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CFG(i, 0));
3717 
3718 		/* channel will be always in keyword 0 */
3719 		cam1 = rvu_read64(rvu, blkaddr,
3720 				  NPC_AF_MCAMEX_BANKX_CAMX_W0(i, 0, 1));
3721 		chan = field->kw_mask[0] & cam1;
3722 
3723 		str = (cfg & 1) ? "enabled" : "disabled";
3724 
3725 		seq_printf(s, "0x%x\t%d\t\t%llu\t0x%x\t%s\n", pcifunc, i,
3726 			   rvu_read64(rvu, blkaddr,
3727 				      NPC_AF_MATCH_STATX(table->counter_idx[i])),
3728 			   chan, str);
3729 	}
3730 
3731 	return 0;
3732 }
3733 
3734 RVU_DEBUG_SEQ_FOPS(npc_exact_drop_cnt, npc_exact_drop_cnt, NULL);
3735 
3736 static void rvu_dbg_npc_init(struct rvu *rvu)
3737 {
3738 	rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root);
3739 
3740 	debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc, rvu,
3741 			    &rvu_dbg_npc_mcam_info_fops);
3742 	debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc, rvu,
3743 			    &rvu_dbg_npc_mcam_rules_fops);
3744 
3745 	debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc, rvu,
3746 			    &rvu_dbg_npc_rx_miss_act_fops);
3747 
3748 	if (!rvu->hw->cap.npc_exact_match_enabled)
3749 		return;
3750 
3751 	debugfs_create_file("exact_entries", 0444, rvu->rvu_dbg.npc, rvu,
3752 			    &rvu_dbg_npc_exact_entries_fops);
3753 
3754 	debugfs_create_file("exact_info", 0444, rvu->rvu_dbg.npc, rvu,
3755 			    &rvu_dbg_npc_exact_info_fops);
3756 
3757 	debugfs_create_file("exact_drop_cnt", 0444, rvu->rvu_dbg.npc, rvu,
3758 			    &rvu_dbg_npc_exact_drop_cnt_fops);
3759 
3760 }
3761 
3762 static int cpt_eng_sts_display(struct seq_file *filp, u8 eng_type)
3763 {
3764 	struct cpt_ctx *ctx = filp->private;
3765 	u64 busy_sts = 0, free_sts = 0;
3766 	u32 e_min = 0, e_max = 0, e, i;
3767 	u16 max_ses, max_ies, max_aes;
3768 	struct rvu *rvu = ctx->rvu;
3769 	int blkaddr = ctx->blkaddr;
3770 	u64 reg;
3771 
3772 	reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
3773 	max_ses = reg & 0xffff;
3774 	max_ies = (reg >> 16) & 0xffff;
3775 	max_aes = (reg >> 32) & 0xffff;
3776 
3777 	switch (eng_type) {
3778 	case CPT_AE_TYPE:
3779 		e_min = max_ses + max_ies;
3780 		e_max = max_ses + max_ies + max_aes;
3781 		break;
3782 	case CPT_SE_TYPE:
3783 		e_min = 0;
3784 		e_max = max_ses;
3785 		break;
3786 	case CPT_IE_TYPE:
3787 		e_min = max_ses;
3788 		e_max = max_ses + max_ies;
3789 		break;
3790 	default:
3791 		return -EINVAL;
3792 	}
3793 
3794 	for (e = e_min, i = 0; e < e_max; e++, i++) {
3795 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e));
3796 		if (reg & 0x1)
3797 			busy_sts |= 1ULL << i;
3798 
3799 		if (reg & 0x2)
3800 			free_sts |= 1ULL << i;
3801 	}
3802 	seq_printf(filp, "FREE STS : 0x%016llx\n", free_sts);
3803 	seq_printf(filp, "BUSY STS : 0x%016llx\n", busy_sts);
3804 
3805 	return 0;
3806 }
3807 
3808 static int rvu_dbg_cpt_ae_sts_display(struct seq_file *filp, void *unused)
3809 {
3810 	return cpt_eng_sts_display(filp, CPT_AE_TYPE);
3811 }
3812 
3813 RVU_DEBUG_SEQ_FOPS(cpt_ae_sts, cpt_ae_sts_display, NULL);
3814 
3815 static int rvu_dbg_cpt_se_sts_display(struct seq_file *filp, void *unused)
3816 {
3817 	return cpt_eng_sts_display(filp, CPT_SE_TYPE);
3818 }
3819 
3820 RVU_DEBUG_SEQ_FOPS(cpt_se_sts, cpt_se_sts_display, NULL);
3821 
3822 static int rvu_dbg_cpt_ie_sts_display(struct seq_file *filp, void *unused)
3823 {
3824 	return cpt_eng_sts_display(filp, CPT_IE_TYPE);
3825 }
3826 
3827 RVU_DEBUG_SEQ_FOPS(cpt_ie_sts, cpt_ie_sts_display, NULL);
3828 
3829 static int rvu_dbg_cpt_engines_info_display(struct seq_file *filp, void *unused)
3830 {
3831 	struct cpt_ctx *ctx = filp->private;
3832 	u16 max_ses, max_ies, max_aes;
3833 	struct rvu *rvu = ctx->rvu;
3834 	int blkaddr = ctx->blkaddr;
3835 	u32 e_max, e;
3836 	u64 reg;
3837 
3838 	reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
3839 	max_ses = reg & 0xffff;
3840 	max_ies = (reg >> 16) & 0xffff;
3841 	max_aes = (reg >> 32) & 0xffff;
3842 
3843 	e_max = max_ses + max_ies + max_aes;
3844 
3845 	seq_puts(filp, "===========================================\n");
3846 	for (e = 0; e < e_max; e++) {
3847 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(e));
3848 		seq_printf(filp, "CPT Engine[%u] Group Enable   0x%02llx\n", e,
3849 			   reg & 0xff);
3850 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_ACTIVE(e));
3851 		seq_printf(filp, "CPT Engine[%u] Active Info    0x%llx\n", e,
3852 			   reg);
3853 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(e));
3854 		seq_printf(filp, "CPT Engine[%u] Control        0x%llx\n", e,
3855 			   reg);
3856 		seq_puts(filp, "===========================================\n");
3857 	}
3858 	return 0;
3859 }
3860 
3861 RVU_DEBUG_SEQ_FOPS(cpt_engines_info, cpt_engines_info_display, NULL);
3862 
3863 static int rvu_dbg_cpt_lfs_info_display(struct seq_file *filp, void *unused)
3864 {
3865 	struct cpt_ctx *ctx = filp->private;
3866 	int blkaddr = ctx->blkaddr;
3867 	struct rvu *rvu = ctx->rvu;
3868 	struct rvu_block *block;
3869 	struct rvu_hwinfo *hw;
3870 	u64 reg;
3871 	u32 lf;
3872 
3873 	hw = rvu->hw;
3874 	block = &hw->block[blkaddr];
3875 	if (!block->lf.bmap)
3876 		return -ENODEV;
3877 
3878 	seq_puts(filp, "===========================================\n");
3879 	for (lf = 0; lf < block->lf.max; lf++) {
3880 		reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(lf));
3881 		seq_printf(filp, "CPT Lf[%u] CTL          0x%llx\n", lf, reg);
3882 		reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(lf));
3883 		seq_printf(filp, "CPT Lf[%u] CTL2         0x%llx\n", lf, reg);
3884 		reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_PTR_CTL(lf));
3885 		seq_printf(filp, "CPT Lf[%u] PTR_CTL      0x%llx\n", lf, reg);
3886 		reg = rvu_read64(rvu, blkaddr, block->lfcfg_reg |
3887 				(lf << block->lfshift));
3888 		seq_printf(filp, "CPT Lf[%u] CFG          0x%llx\n", lf, reg);
3889 		seq_puts(filp, "===========================================\n");
3890 	}
3891 	return 0;
3892 }
3893 
3894 RVU_DEBUG_SEQ_FOPS(cpt_lfs_info, cpt_lfs_info_display, NULL);
3895 
3896 static int rvu_dbg_cpt_err_info_display(struct seq_file *filp, void *unused)
3897 {
3898 	struct cpt_ctx *ctx = filp->private;
3899 	struct rvu *rvu = ctx->rvu;
3900 	int blkaddr = ctx->blkaddr;
3901 	u64 reg0, reg1;
3902 
3903 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
3904 	reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
3905 	seq_printf(filp, "CPT_AF_FLTX_INT:       0x%llx 0x%llx\n", reg0, reg1);
3906 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(0));
3907 	reg1 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(1));
3908 	seq_printf(filp, "CPT_AF_PSNX_EXE:       0x%llx 0x%llx\n", reg0, reg1);
3909 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_LF(0));
3910 	seq_printf(filp, "CPT_AF_PSNX_LF:        0x%llx\n", reg0);
3911 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT);
3912 	seq_printf(filp, "CPT_AF_RVU_INT:        0x%llx\n", reg0);
3913 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT);
3914 	seq_printf(filp, "CPT_AF_RAS_INT:        0x%llx\n", reg0);
3915 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO);
3916 	seq_printf(filp, "CPT_AF_EXE_ERR_INFO:   0x%llx\n", reg0);
3917 
3918 	return 0;
3919 }
3920 
3921 RVU_DEBUG_SEQ_FOPS(cpt_err_info, cpt_err_info_display, NULL);
3922 
3923 static int rvu_dbg_cpt_pc_display(struct seq_file *filp, void *unused)
3924 {
3925 	struct cpt_ctx *ctx = filp->private;
3926 	struct rvu *rvu = ctx->rvu;
3927 	int blkaddr = ctx->blkaddr;
3928 	u64 reg;
3929 
3930 	reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC);
3931 	seq_printf(filp, "CPT instruction requests   %llu\n", reg);
3932 	reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC);
3933 	seq_printf(filp, "CPT instruction latency    %llu\n", reg);
3934 	reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC);
3935 	seq_printf(filp, "CPT NCB read requests      %llu\n", reg);
3936 	reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC);
3937 	seq_printf(filp, "CPT NCB read latency       %llu\n", reg);
3938 	reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC);
3939 	seq_printf(filp, "CPT read requests caused by UC fills   %llu\n", reg);
3940 	reg = rvu_read64(rvu, blkaddr, CPT_AF_ACTIVE_CYCLES_PC);
3941 	seq_printf(filp, "CPT active cycles pc       %llu\n", reg);
3942 	reg = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT);
3943 	seq_printf(filp, "CPT clock count pc         %llu\n", reg);
3944 
3945 	return 0;
3946 }
3947 
3948 RVU_DEBUG_SEQ_FOPS(cpt_pc, cpt_pc_display, NULL);
3949 
3950 static void rvu_dbg_cpt_init(struct rvu *rvu, int blkaddr)
3951 {
3952 	struct cpt_ctx *ctx;
3953 
3954 	if (!is_block_implemented(rvu->hw, blkaddr))
3955 		return;
3956 
3957 	if (blkaddr == BLKADDR_CPT0) {
3958 		rvu->rvu_dbg.cpt = debugfs_create_dir("cpt", rvu->rvu_dbg.root);
3959 		ctx = &rvu->rvu_dbg.cpt_ctx[0];
3960 		ctx->blkaddr = BLKADDR_CPT0;
3961 		ctx->rvu = rvu;
3962 	} else {
3963 		rvu->rvu_dbg.cpt = debugfs_create_dir("cpt1",
3964 						      rvu->rvu_dbg.root);
3965 		ctx = &rvu->rvu_dbg.cpt_ctx[1];
3966 		ctx->blkaddr = BLKADDR_CPT1;
3967 		ctx->rvu = rvu;
3968 	}
3969 
3970 	debugfs_create_file("cpt_pc", 0600, rvu->rvu_dbg.cpt, ctx,
3971 			    &rvu_dbg_cpt_pc_fops);
3972 	debugfs_create_file("cpt_ae_sts", 0600, rvu->rvu_dbg.cpt, ctx,
3973 			    &rvu_dbg_cpt_ae_sts_fops);
3974 	debugfs_create_file("cpt_se_sts", 0600, rvu->rvu_dbg.cpt, ctx,
3975 			    &rvu_dbg_cpt_se_sts_fops);
3976 	debugfs_create_file("cpt_ie_sts", 0600, rvu->rvu_dbg.cpt, ctx,
3977 			    &rvu_dbg_cpt_ie_sts_fops);
3978 	debugfs_create_file("cpt_engines_info", 0600, rvu->rvu_dbg.cpt, ctx,
3979 			    &rvu_dbg_cpt_engines_info_fops);
3980 	debugfs_create_file("cpt_lfs_info", 0600, rvu->rvu_dbg.cpt, ctx,
3981 			    &rvu_dbg_cpt_lfs_info_fops);
3982 	debugfs_create_file("cpt_err_info", 0600, rvu->rvu_dbg.cpt, ctx,
3983 			    &rvu_dbg_cpt_err_info_fops);
3984 }
3985 
3986 static const char *rvu_get_dbg_dir_name(struct rvu *rvu)
3987 {
3988 	if (is_cn20k(rvu->pdev))
3989 		return "cn20k";
3990 
3991 	if (!is_rvu_otx2(rvu))
3992 		return "cn10k";
3993 	else
3994 		return "octeontx2";
3995 }
3996 
3997 void rvu_dbg_init(struct rvu *rvu)
3998 {
3999 	rvu->rvu_dbg.root = debugfs_create_dir(rvu_get_dbg_dir_name(rvu), NULL);
4000 
4001 	debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
4002 			    &rvu_dbg_rsrc_status_fops);
4003 
4004 	if (!is_rvu_otx2(rvu))
4005 		debugfs_create_file("lmtst_map_table", 0444, rvu->rvu_dbg.root,
4006 				    rvu, &rvu_dbg_lmtst_map_table_fops);
4007 
4008 	debugfs_create_file("rvu_fwdata", 0444, rvu->rvu_dbg.root, rvu,
4009 			    &rvu_dbg_rvu_fwdata_fops);
4010 
4011 	if (!cgx_get_cgxcnt_max())
4012 		goto create;
4013 
4014 	if (is_rvu_otx2(rvu))
4015 		debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root,
4016 				    rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
4017 	else
4018 		debugfs_create_file("rvu_pf_rpm_map", 0444, rvu->rvu_dbg.root,
4019 				    rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
4020 
4021 create:
4022 	rvu_dbg_npa_init(rvu);
4023 	rvu_dbg_nix_init(rvu, BLKADDR_NIX0);
4024 
4025 	rvu_dbg_nix_init(rvu, BLKADDR_NIX1);
4026 	rvu_dbg_cgx_init(rvu);
4027 	rvu_dbg_npc_init(rvu);
4028 	rvu_dbg_cpt_init(rvu, BLKADDR_CPT0);
4029 	rvu_dbg_cpt_init(rvu, BLKADDR_CPT1);
4030 	rvu_dbg_mcs_init(rvu);
4031 }
4032 
4033 void rvu_dbg_exit(struct rvu *rvu)
4034 {
4035 	debugfs_remove_recursive(rvu->rvu_dbg.root);
4036 }
4037 
4038 #endif /* CONFIG_DEBUG_FS */
4039