xref: /linux/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c (revision 249ebf3f65f8530beb2cbfb91bff1d83ba88d23c)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3  *
4  * Copyright (C) 2019 Marvell.
5  *
6  */
7 
8 #ifdef CONFIG_DEBUG_FS
9 
10 #include <linux/fs.h>
11 #include <linux/debugfs.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 
15 #include "rvu_struct.h"
16 #include "rvu_reg.h"
17 #include "rvu.h"
18 #include "cgx.h"
19 #include "lmac_common.h"
20 #include "npc.h"
21 #include "rvu_npc_hash.h"
22 #include "mcs.h"
23 
24 #define DEBUGFS_DIR_NAME "octeontx2"
25 
26 enum {
27 	CGX_STAT0,
28 	CGX_STAT1,
29 	CGX_STAT2,
30 	CGX_STAT3,
31 	CGX_STAT4,
32 	CGX_STAT5,
33 	CGX_STAT6,
34 	CGX_STAT7,
35 	CGX_STAT8,
36 	CGX_STAT9,
37 	CGX_STAT10,
38 	CGX_STAT11,
39 	CGX_STAT12,
40 	CGX_STAT13,
41 	CGX_STAT14,
42 	CGX_STAT15,
43 	CGX_STAT16,
44 	CGX_STAT17,
45 	CGX_STAT18,
46 };
47 
48 /* NIX TX stats */
49 enum nix_stat_lf_tx {
50 	TX_UCAST	= 0x0,
51 	TX_BCAST	= 0x1,
52 	TX_MCAST	= 0x2,
53 	TX_DROP		= 0x3,
54 	TX_OCTS		= 0x4,
55 	TX_STATS_ENUM_LAST,
56 };
57 
58 /* NIX RX stats */
59 enum nix_stat_lf_rx {
60 	RX_OCTS		= 0x0,
61 	RX_UCAST	= 0x1,
62 	RX_BCAST	= 0x2,
63 	RX_MCAST	= 0x3,
64 	RX_DROP		= 0x4,
65 	RX_DROP_OCTS	= 0x5,
66 	RX_FCS		= 0x6,
67 	RX_ERR		= 0x7,
68 	RX_DRP_BCAST	= 0x8,
69 	RX_DRP_MCAST	= 0x9,
70 	RX_DRP_L3BCAST	= 0xa,
71 	RX_DRP_L3MCAST	= 0xb,
72 	RX_STATS_ENUM_LAST,
73 };
74 
75 static char *cgx_rx_stats_fields[] = {
76 	[CGX_STAT0]	= "Received packets",
77 	[CGX_STAT1]	= "Octets of received packets",
78 	[CGX_STAT2]	= "Received PAUSE packets",
79 	[CGX_STAT3]	= "Received PAUSE and control packets",
80 	[CGX_STAT4]	= "Filtered DMAC0 (NIX-bound) packets",
81 	[CGX_STAT5]	= "Filtered DMAC0 (NIX-bound) octets",
82 	[CGX_STAT6]	= "Packets dropped due to RX FIFO full",
83 	[CGX_STAT7]	= "Octets dropped due to RX FIFO full",
84 	[CGX_STAT8]	= "Error packets",
85 	[CGX_STAT9]	= "Filtered DMAC1 (NCSI-bound) packets",
86 	[CGX_STAT10]	= "Filtered DMAC1 (NCSI-bound) octets",
87 	[CGX_STAT11]	= "NCSI-bound packets dropped",
88 	[CGX_STAT12]	= "NCSI-bound octets dropped",
89 };
90 
91 static char *cgx_tx_stats_fields[] = {
92 	[CGX_STAT0]	= "Packets dropped due to excessive collisions",
93 	[CGX_STAT1]	= "Packets dropped due to excessive deferral",
94 	[CGX_STAT2]	= "Multiple collisions before successful transmission",
95 	[CGX_STAT3]	= "Single collisions before successful transmission",
96 	[CGX_STAT4]	= "Total octets sent on the interface",
97 	[CGX_STAT5]	= "Total frames sent on the interface",
98 	[CGX_STAT6]	= "Packets sent with an octet count < 64",
99 	[CGX_STAT7]	= "Packets sent with an octet count == 64",
100 	[CGX_STAT8]	= "Packets sent with an octet count of 65-127",
101 	[CGX_STAT9]	= "Packets sent with an octet count of 128-255",
102 	[CGX_STAT10]	= "Packets sent with an octet count of 256-511",
103 	[CGX_STAT11]	= "Packets sent with an octet count of 512-1023",
104 	[CGX_STAT12]	= "Packets sent with an octet count of 1024-1518",
105 	[CGX_STAT13]	= "Packets sent with an octet count of > 1518",
106 	[CGX_STAT14]	= "Packets sent to a broadcast DMAC",
107 	[CGX_STAT15]	= "Packets sent to the multicast DMAC",
108 	[CGX_STAT16]	= "Transmit underflow and were truncated",
109 	[CGX_STAT17]	= "Control/PAUSE packets sent",
110 };
111 
112 static char *rpm_rx_stats_fields[] = {
113 	"Octets of received packets",
114 	"Octets of received packets with out error",
115 	"Received packets with alignment errors",
116 	"Control/PAUSE packets received",
117 	"Packets received with Frame too long Errors",
118 	"Packets received with a1nrange length Errors",
119 	"Received packets",
120 	"Packets received with FrameCheckSequenceErrors",
121 	"Packets received with VLAN header",
122 	"Error packets",
123 	"Packets received with unicast DMAC",
124 	"Packets received with multicast DMAC",
125 	"Packets received with broadcast DMAC",
126 	"Dropped packets",
127 	"Total frames received on interface",
128 	"Packets received with an octet count < 64",
129 	"Packets received with an octet count == 64",
130 	"Packets received with an octet count of 65-127",
131 	"Packets received with an octet count of 128-255",
132 	"Packets received with an octet count of 256-511",
133 	"Packets received with an octet count of 512-1023",
134 	"Packets received with an octet count of 1024-1518",
135 	"Packets received with an octet count of > 1518",
136 	"Oversized Packets",
137 	"Jabber Packets",
138 	"Fragmented Packets",
139 	"CBFC(class based flow control) pause frames received for class 0",
140 	"CBFC pause frames received for class 1",
141 	"CBFC pause frames received for class 2",
142 	"CBFC pause frames received for class 3",
143 	"CBFC pause frames received for class 4",
144 	"CBFC pause frames received for class 5",
145 	"CBFC pause frames received for class 6",
146 	"CBFC pause frames received for class 7",
147 	"CBFC pause frames received for class 8",
148 	"CBFC pause frames received for class 9",
149 	"CBFC pause frames received for class 10",
150 	"CBFC pause frames received for class 11",
151 	"CBFC pause frames received for class 12",
152 	"CBFC pause frames received for class 13",
153 	"CBFC pause frames received for class 14",
154 	"CBFC pause frames received for class 15",
155 	"MAC control packets received",
156 };
157 
158 static char *rpm_tx_stats_fields[] = {
159 	"Total octets sent on the interface",
160 	"Total octets transmitted OK",
161 	"Control/Pause frames sent",
162 	"Total frames transmitted OK",
163 	"Total frames sent with VLAN header",
164 	"Error Packets",
165 	"Packets sent to unicast DMAC",
166 	"Packets sent to the multicast DMAC",
167 	"Packets sent to a broadcast DMAC",
168 	"Packets sent with an octet count == 64",
169 	"Packets sent with an octet count of 65-127",
170 	"Packets sent with an octet count of 128-255",
171 	"Packets sent with an octet count of 256-511",
172 	"Packets sent with an octet count of 512-1023",
173 	"Packets sent with an octet count of 1024-1518",
174 	"Packets sent with an octet count of > 1518",
175 	"CBFC(class based flow control) pause frames transmitted for class 0",
176 	"CBFC pause frames transmitted for class 1",
177 	"CBFC pause frames transmitted for class 2",
178 	"CBFC pause frames transmitted for class 3",
179 	"CBFC pause frames transmitted for class 4",
180 	"CBFC pause frames transmitted for class 5",
181 	"CBFC pause frames transmitted for class 6",
182 	"CBFC pause frames transmitted for class 7",
183 	"CBFC pause frames transmitted for class 8",
184 	"CBFC pause frames transmitted for class 9",
185 	"CBFC pause frames transmitted for class 10",
186 	"CBFC pause frames transmitted for class 11",
187 	"CBFC pause frames transmitted for class 12",
188 	"CBFC pause frames transmitted for class 13",
189 	"CBFC pause frames transmitted for class 14",
190 	"CBFC pause frames transmitted for class 15",
191 	"MAC control packets sent",
192 	"Total frames sent on the interface"
193 };
194 
195 enum cpt_eng_type {
196 	CPT_AE_TYPE = 1,
197 	CPT_SE_TYPE = 2,
198 	CPT_IE_TYPE = 3,
199 };
200 
201 #define rvu_dbg_NULL NULL
202 #define rvu_dbg_open_NULL NULL
203 
204 #define RVU_DEBUG_SEQ_FOPS(name, read_op, write_op)	\
205 static int rvu_dbg_open_##name(struct inode *inode, struct file *file) \
206 { \
207 	return single_open(file, rvu_dbg_##read_op, inode->i_private); \
208 } \
209 static const struct file_operations rvu_dbg_##name##_fops = { \
210 	.owner		= THIS_MODULE, \
211 	.open		= rvu_dbg_open_##name, \
212 	.read		= seq_read, \
213 	.write		= rvu_dbg_##write_op, \
214 	.llseek		= seq_lseek, \
215 	.release	= single_release, \
216 }
217 
218 #define RVU_DEBUG_FOPS(name, read_op, write_op) \
219 static const struct file_operations rvu_dbg_##name##_fops = { \
220 	.owner = THIS_MODULE, \
221 	.open = simple_open, \
222 	.read = rvu_dbg_##read_op, \
223 	.write = rvu_dbg_##write_op \
224 }
225 
226 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
227 
228 static int rvu_dbg_mcs_port_stats_display(struct seq_file *filp, void *unused, int dir)
229 {
230 	struct mcs *mcs = filp->private;
231 	struct mcs_port_stats stats;
232 	int lmac;
233 
234 	seq_puts(filp, "\n port stats\n");
235 	mutex_lock(&mcs->stats_lock);
236 	for_each_set_bit(lmac, &mcs->hw->lmac_bmap, mcs->hw->lmac_cnt) {
237 		mcs_get_port_stats(mcs, &stats, lmac, dir);
238 		seq_printf(filp, "port%d: Tcam Miss: %lld\n", lmac, stats.tcam_miss_cnt);
239 		seq_printf(filp, "port%d: Parser errors: %lld\n", lmac, stats.parser_err_cnt);
240 
241 		if (dir == MCS_RX && mcs->hw->mcs_blks > 1)
242 			seq_printf(filp, "port%d: Preempt error: %lld\n", lmac,
243 				   stats.preempt_err_cnt);
244 		if (dir == MCS_TX)
245 			seq_printf(filp, "port%d: Sectag insert error: %lld\n", lmac,
246 				   stats.sectag_insert_err_cnt);
247 	}
248 	mutex_unlock(&mcs->stats_lock);
249 	return 0;
250 }
251 
252 static int rvu_dbg_mcs_rx_port_stats_display(struct seq_file *filp, void *unused)
253 {
254 	return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_RX);
255 }
256 
257 RVU_DEBUG_SEQ_FOPS(mcs_rx_port_stats, mcs_rx_port_stats_display, NULL);
258 
259 static int rvu_dbg_mcs_tx_port_stats_display(struct seq_file *filp, void *unused)
260 {
261 	return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_TX);
262 }
263 
264 RVU_DEBUG_SEQ_FOPS(mcs_tx_port_stats, mcs_tx_port_stats_display, NULL);
265 
266 static int rvu_dbg_mcs_sa_stats_display(struct seq_file *filp, void *unused, int dir)
267 {
268 	struct mcs *mcs = filp->private;
269 	struct mcs_sa_stats stats;
270 	struct rsrc_bmap *map;
271 	int sa_id;
272 
273 	if (dir == MCS_TX) {
274 		map = &mcs->tx.sa;
275 		mutex_lock(&mcs->stats_lock);
276 		for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) {
277 			seq_puts(filp, "\n TX SA stats\n");
278 			mcs_get_sa_stats(mcs, &stats, sa_id, MCS_TX);
279 			seq_printf(filp, "sa%d: Pkts encrypted: %lld\n", sa_id,
280 				   stats.pkt_encrypt_cnt);
281 
282 			seq_printf(filp, "sa%d: Pkts protected: %lld\n", sa_id,
283 				   stats.pkt_protected_cnt);
284 		}
285 		mutex_unlock(&mcs->stats_lock);
286 		return 0;
287 	}
288 
289 	/* RX stats */
290 	map = &mcs->rx.sa;
291 	mutex_lock(&mcs->stats_lock);
292 	for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) {
293 		seq_puts(filp, "\n RX SA stats\n");
294 		mcs_get_sa_stats(mcs, &stats, sa_id, MCS_RX);
295 		seq_printf(filp, "sa%d: Invalid pkts: %lld\n", sa_id, stats.pkt_invalid_cnt);
296 		seq_printf(filp, "sa%d: Pkts no sa error: %lld\n", sa_id, stats.pkt_nosaerror_cnt);
297 		seq_printf(filp, "sa%d: Pkts not valid: %lld\n", sa_id, stats.pkt_notvalid_cnt);
298 		seq_printf(filp, "sa%d: Pkts ok: %lld\n", sa_id, stats.pkt_ok_cnt);
299 		seq_printf(filp, "sa%d: Pkts no sa: %lld\n", sa_id, stats.pkt_nosa_cnt);
300 	}
301 	mutex_unlock(&mcs->stats_lock);
302 	return 0;
303 }
304 
305 static int rvu_dbg_mcs_rx_sa_stats_display(struct seq_file *filp, void *unused)
306 {
307 	return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_RX);
308 }
309 
310 RVU_DEBUG_SEQ_FOPS(mcs_rx_sa_stats, mcs_rx_sa_stats_display, NULL);
311 
312 static int rvu_dbg_mcs_tx_sa_stats_display(struct seq_file *filp, void *unused)
313 {
314 	return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_TX);
315 }
316 
317 RVU_DEBUG_SEQ_FOPS(mcs_tx_sa_stats, mcs_tx_sa_stats_display, NULL);
318 
319 static int rvu_dbg_mcs_tx_sc_stats_display(struct seq_file *filp, void *unused)
320 {
321 	struct mcs *mcs = filp->private;
322 	struct mcs_sc_stats stats;
323 	struct rsrc_bmap *map;
324 	int sc_id;
325 
326 	map = &mcs->tx.sc;
327 	seq_puts(filp, "\n SC stats\n");
328 
329 	mutex_lock(&mcs->stats_lock);
330 	for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) {
331 		mcs_get_sc_stats(mcs, &stats, sc_id, MCS_TX);
332 		seq_printf(filp, "\n=======sc%d======\n\n", sc_id);
333 		seq_printf(filp, "sc%d: Pkts encrypted: %lld\n", sc_id, stats.pkt_encrypt_cnt);
334 		seq_printf(filp, "sc%d: Pkts protected: %lld\n", sc_id, stats.pkt_protected_cnt);
335 
336 		if (mcs->hw->mcs_blks == 1) {
337 			seq_printf(filp, "sc%d: Octets encrypted: %lld\n", sc_id,
338 				   stats.octet_encrypt_cnt);
339 			seq_printf(filp, "sc%d: Octets protected: %lld\n", sc_id,
340 				   stats.octet_protected_cnt);
341 		}
342 	}
343 	mutex_unlock(&mcs->stats_lock);
344 	return 0;
345 }
346 
347 RVU_DEBUG_SEQ_FOPS(mcs_tx_sc_stats, mcs_tx_sc_stats_display, NULL);
348 
349 static int rvu_dbg_mcs_rx_sc_stats_display(struct seq_file *filp, void *unused)
350 {
351 	struct mcs *mcs = filp->private;
352 	struct mcs_sc_stats stats;
353 	struct rsrc_bmap *map;
354 	int sc_id;
355 
356 	map = &mcs->rx.sc;
357 	seq_puts(filp, "\n SC stats\n");
358 
359 	mutex_lock(&mcs->stats_lock);
360 	for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) {
361 		mcs_get_sc_stats(mcs, &stats, sc_id, MCS_RX);
362 		seq_printf(filp, "\n=======sc%d======\n\n", sc_id);
363 		seq_printf(filp, "sc%d: Cam hits: %lld\n", sc_id, stats.hit_cnt);
364 		seq_printf(filp, "sc%d: Invalid pkts: %lld\n", sc_id, stats.pkt_invalid_cnt);
365 		seq_printf(filp, "sc%d: Late pkts: %lld\n", sc_id, stats.pkt_late_cnt);
366 		seq_printf(filp, "sc%d: Notvalid pkts: %lld\n", sc_id, stats.pkt_notvalid_cnt);
367 		seq_printf(filp, "sc%d: Unchecked pkts: %lld\n", sc_id, stats.pkt_unchecked_cnt);
368 
369 		if (mcs->hw->mcs_blks > 1) {
370 			seq_printf(filp, "sc%d: Delay pkts: %lld\n", sc_id, stats.pkt_delay_cnt);
371 			seq_printf(filp, "sc%d: Pkts ok: %lld\n", sc_id, stats.pkt_ok_cnt);
372 		}
373 		if (mcs->hw->mcs_blks == 1) {
374 			seq_printf(filp, "sc%d: Octets decrypted: %lld\n", sc_id,
375 				   stats.octet_decrypt_cnt);
376 			seq_printf(filp, "sc%d: Octets validated: %lld\n", sc_id,
377 				   stats.octet_validate_cnt);
378 		}
379 	}
380 	mutex_unlock(&mcs->stats_lock);
381 	return 0;
382 }
383 
384 RVU_DEBUG_SEQ_FOPS(mcs_rx_sc_stats, mcs_rx_sc_stats_display, NULL);
385 
386 static int rvu_dbg_mcs_flowid_stats_display(struct seq_file *filp, void *unused, int dir)
387 {
388 	struct mcs *mcs = filp->private;
389 	struct mcs_flowid_stats stats;
390 	struct rsrc_bmap *map;
391 	int flow_id;
392 
393 	seq_puts(filp, "\n Flowid stats\n");
394 
395 	if (dir == MCS_RX)
396 		map = &mcs->rx.flow_ids;
397 	else
398 		map = &mcs->tx.flow_ids;
399 
400 	mutex_lock(&mcs->stats_lock);
401 	for_each_set_bit(flow_id, map->bmap, mcs->hw->tcam_entries) {
402 		mcs_get_flowid_stats(mcs, &stats, flow_id, dir);
403 		seq_printf(filp, "Flowid%d: Hit:%lld\n", flow_id, stats.tcam_hit_cnt);
404 	}
405 	mutex_unlock(&mcs->stats_lock);
406 	return 0;
407 }
408 
409 static int rvu_dbg_mcs_tx_flowid_stats_display(struct seq_file *filp, void *unused)
410 {
411 	return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_TX);
412 }
413 
414 RVU_DEBUG_SEQ_FOPS(mcs_tx_flowid_stats, mcs_tx_flowid_stats_display, NULL);
415 
416 static int rvu_dbg_mcs_rx_flowid_stats_display(struct seq_file *filp, void *unused)
417 {
418 	return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_RX);
419 }
420 
421 RVU_DEBUG_SEQ_FOPS(mcs_rx_flowid_stats, mcs_rx_flowid_stats_display, NULL);
422 
423 static int rvu_dbg_mcs_tx_secy_stats_display(struct seq_file *filp, void *unused)
424 {
425 	struct mcs *mcs = filp->private;
426 	struct mcs_secy_stats stats;
427 	struct rsrc_bmap *map;
428 	int secy_id;
429 
430 	map = &mcs->tx.secy;
431 	seq_puts(filp, "\n MCS TX secy stats\n");
432 
433 	mutex_lock(&mcs->stats_lock);
434 	for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) {
435 		mcs_get_tx_secy_stats(mcs, &stats, secy_id);
436 		seq_printf(filp, "\n=======Secy%d======\n\n", secy_id);
437 		seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id,
438 			   stats.ctl_pkt_bcast_cnt);
439 		seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id,
440 			   stats.ctl_pkt_mcast_cnt);
441 		seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id,
442 			   stats.ctl_pkt_ucast_cnt);
443 		seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt);
444 		seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id,
445 			   stats.unctl_pkt_bcast_cnt);
446 		seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id,
447 			   stats.unctl_pkt_mcast_cnt);
448 		seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id,
449 			   stats.unctl_pkt_ucast_cnt);
450 		seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt);
451 		seq_printf(filp, "secy%d: Octet encrypted: %lld\n", secy_id,
452 			   stats.octet_encrypted_cnt);
453 		seq_printf(filp, "secy%d: octet protected: %lld\n", secy_id,
454 			   stats.octet_protected_cnt);
455 		seq_printf(filp, "secy%d: Pkts on active sa: %lld\n", secy_id,
456 			   stats.pkt_noactivesa_cnt);
457 		seq_printf(filp, "secy%d: Pkts too long: %lld\n", secy_id, stats.pkt_toolong_cnt);
458 		seq_printf(filp, "secy%d: Pkts untagged: %lld\n", secy_id, stats.pkt_untagged_cnt);
459 	}
460 	mutex_unlock(&mcs->stats_lock);
461 	return 0;
462 }
463 
464 RVU_DEBUG_SEQ_FOPS(mcs_tx_secy_stats, mcs_tx_secy_stats_display, NULL);
465 
466 static int rvu_dbg_mcs_rx_secy_stats_display(struct seq_file *filp, void *unused)
467 {
468 	struct mcs *mcs = filp->private;
469 	struct mcs_secy_stats stats;
470 	struct rsrc_bmap *map;
471 	int secy_id;
472 
473 	map = &mcs->rx.secy;
474 	seq_puts(filp, "\n MCS secy stats\n");
475 
476 	mutex_lock(&mcs->stats_lock);
477 	for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) {
478 		mcs_get_rx_secy_stats(mcs, &stats, secy_id);
479 		seq_printf(filp, "\n=======Secy%d======\n\n", secy_id);
480 		seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id,
481 			   stats.ctl_pkt_bcast_cnt);
482 		seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id,
483 			   stats.ctl_pkt_mcast_cnt);
484 		seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id,
485 			   stats.ctl_pkt_ucast_cnt);
486 		seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt);
487 		seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id,
488 			   stats.unctl_pkt_bcast_cnt);
489 		seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id,
490 			   stats.unctl_pkt_mcast_cnt);
491 		seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id,
492 			   stats.unctl_pkt_ucast_cnt);
493 		seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt);
494 		seq_printf(filp, "secy%d: Octet decrypted: %lld\n", secy_id,
495 			   stats.octet_decrypted_cnt);
496 		seq_printf(filp, "secy%d: octet validated: %lld\n", secy_id,
497 			   stats.octet_validated_cnt);
498 		seq_printf(filp, "secy%d: Pkts on disable port: %lld\n", secy_id,
499 			   stats.pkt_port_disabled_cnt);
500 		seq_printf(filp, "secy%d: Pkts with badtag: %lld\n", secy_id, stats.pkt_badtag_cnt);
501 		seq_printf(filp, "secy%d: Pkts with no SA(sectag.tci.c=0): %lld\n", secy_id,
502 			   stats.pkt_nosa_cnt);
503 		seq_printf(filp, "secy%d: Pkts with nosaerror: %lld\n", secy_id,
504 			   stats.pkt_nosaerror_cnt);
505 		seq_printf(filp, "secy%d: Tagged ctrl pkts: %lld\n", secy_id,
506 			   stats.pkt_tagged_ctl_cnt);
507 		seq_printf(filp, "secy%d: Untaged pkts: %lld\n", secy_id, stats.pkt_untaged_cnt);
508 		seq_printf(filp, "secy%d: Ctrl pkts: %lld\n", secy_id, stats.pkt_ctl_cnt);
509 		if (mcs->hw->mcs_blks > 1)
510 			seq_printf(filp, "secy%d: pkts notag: %lld\n", secy_id,
511 				   stats.pkt_notag_cnt);
512 	}
513 	mutex_unlock(&mcs->stats_lock);
514 	return 0;
515 }
516 
517 RVU_DEBUG_SEQ_FOPS(mcs_rx_secy_stats, mcs_rx_secy_stats_display, NULL);
518 
519 static void rvu_dbg_mcs_init(struct rvu *rvu)
520 {
521 	struct mcs *mcs;
522 	char dname[10];
523 	int i;
524 
525 	if (!rvu->mcs_blk_cnt)
526 		return;
527 
528 	rvu->rvu_dbg.mcs_root = debugfs_create_dir("mcs", rvu->rvu_dbg.root);
529 
530 	for (i = 0; i < rvu->mcs_blk_cnt; i++) {
531 		mcs = mcs_get_pdata(i);
532 
533 		sprintf(dname, "mcs%d", i);
534 		rvu->rvu_dbg.mcs = debugfs_create_dir(dname,
535 						      rvu->rvu_dbg.mcs_root);
536 
537 		rvu->rvu_dbg.mcs_rx = debugfs_create_dir("rx_stats", rvu->rvu_dbg.mcs);
538 
539 		debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_rx, mcs,
540 				    &rvu_dbg_mcs_rx_flowid_stats_fops);
541 
542 		debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_rx, mcs,
543 				    &rvu_dbg_mcs_rx_secy_stats_fops);
544 
545 		debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_rx, mcs,
546 				    &rvu_dbg_mcs_rx_sc_stats_fops);
547 
548 		debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_rx, mcs,
549 				    &rvu_dbg_mcs_rx_sa_stats_fops);
550 
551 		debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_rx, mcs,
552 				    &rvu_dbg_mcs_rx_port_stats_fops);
553 
554 		rvu->rvu_dbg.mcs_tx = debugfs_create_dir("tx_stats", rvu->rvu_dbg.mcs);
555 
556 		debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_tx, mcs,
557 				    &rvu_dbg_mcs_tx_flowid_stats_fops);
558 
559 		debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_tx, mcs,
560 				    &rvu_dbg_mcs_tx_secy_stats_fops);
561 
562 		debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_tx, mcs,
563 				    &rvu_dbg_mcs_tx_sc_stats_fops);
564 
565 		debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_tx, mcs,
566 				    &rvu_dbg_mcs_tx_sa_stats_fops);
567 
568 		debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_tx, mcs,
569 				    &rvu_dbg_mcs_tx_port_stats_fops);
570 	}
571 }
572 
573 #define LMT_MAPTBL_ENTRY_SIZE 16
574 /* Dump LMTST map table */
575 static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
576 					       char __user *buffer,
577 					       size_t count, loff_t *ppos)
578 {
579 	struct rvu *rvu = filp->private_data;
580 	u64 lmt_addr, val, tbl_base;
581 	int pf, vf, num_vfs, hw_vfs;
582 	void __iomem *lmt_map_base;
583 	int buf_size = 10240;
584 	size_t off = 0;
585 	int index = 0;
586 	char *buf;
587 	int ret;
588 
589 	/* don't allow partial reads */
590 	if (*ppos != 0)
591 		return 0;
592 
593 	buf = kzalloc(buf_size, GFP_KERNEL);
594 	if (!buf)
595 		return -ENOMEM;
596 
597 	tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
598 
599 	lmt_map_base = ioremap_wc(tbl_base, 128 * 1024);
600 	if (!lmt_map_base) {
601 		dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
602 		kfree(buf);
603 		return false;
604 	}
605 
606 	off +=	scnprintf(&buf[off], buf_size - 1 - off,
607 			  "\n\t\t\t\t\tLmtst Map Table Entries");
608 	off +=	scnprintf(&buf[off], buf_size - 1 - off,
609 			  "\n\t\t\t\t\t=======================");
610 	off +=	scnprintf(&buf[off], buf_size - 1 - off, "\nPcifunc\t\t\t");
611 	off +=	scnprintf(&buf[off], buf_size - 1 - off, "Table Index\t\t");
612 	off +=	scnprintf(&buf[off], buf_size - 1 - off,
613 			  "Lmtline Base (word 0)\t\t");
614 	off +=	scnprintf(&buf[off], buf_size - 1 - off,
615 			  "Lmt Map Entry (word 1)");
616 	off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
617 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
618 		off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d  \t\t\t",
619 				    pf);
620 
621 		index = pf * rvu->hw->total_vfs * LMT_MAPTBL_ENTRY_SIZE;
622 		off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t",
623 				 (tbl_base + index));
624 		lmt_addr = readq(lmt_map_base + index);
625 		off += scnprintf(&buf[off], buf_size - 1 - off,
626 				 " 0x%016llx\t\t", lmt_addr);
627 		index += 8;
628 		val = readq(lmt_map_base + index);
629 		off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%016llx\n",
630 				 val);
631 		/* Reading num of VFs per PF */
632 		rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs);
633 		for (vf = 0; vf < num_vfs; vf++) {
634 			index = (pf * rvu->hw->total_vfs * 16) +
635 				((vf + 1)  * LMT_MAPTBL_ENTRY_SIZE);
636 			off += scnprintf(&buf[off], buf_size - 1 - off,
637 					    "PF%d:VF%d  \t\t", pf, vf);
638 			off += scnprintf(&buf[off], buf_size - 1 - off,
639 					 " 0x%llx\t\t", (tbl_base + index));
640 			lmt_addr = readq(lmt_map_base + index);
641 			off += scnprintf(&buf[off], buf_size - 1 - off,
642 					 " 0x%016llx\t\t", lmt_addr);
643 			index += 8;
644 			val = readq(lmt_map_base + index);
645 			off += scnprintf(&buf[off], buf_size - 1 - off,
646 					 " 0x%016llx\n", val);
647 		}
648 	}
649 	off +=	scnprintf(&buf[off], buf_size - 1 - off, "\n");
650 
651 	ret = min(off, count);
652 	if (copy_to_user(buffer, buf, ret))
653 		ret = -EFAULT;
654 	kfree(buf);
655 
656 	iounmap(lmt_map_base);
657 	if (ret < 0)
658 		return ret;
659 
660 	*ppos = ret;
661 	return ret;
662 }
663 
664 RVU_DEBUG_FOPS(lmtst_map_table, lmtst_map_table_display, NULL);
665 
666 static void get_lf_str_list(struct rvu_block block, int pcifunc,
667 			    char *lfs)
668 {
669 	int lf = 0, seq = 0, len = 0, prev_lf = block.lf.max;
670 
671 	for_each_set_bit(lf, block.lf.bmap, block.lf.max) {
672 		if (lf >= block.lf.max)
673 			break;
674 
675 		if (block.fn_map[lf] != pcifunc)
676 			continue;
677 
678 		if (lf == prev_lf + 1) {
679 			prev_lf = lf;
680 			seq = 1;
681 			continue;
682 		}
683 
684 		if (seq)
685 			len += sprintf(lfs + len, "-%d,%d", prev_lf, lf);
686 		else
687 			len += (len ? sprintf(lfs + len, ",%d", lf) :
688 				      sprintf(lfs + len, "%d", lf));
689 
690 		prev_lf = lf;
691 		seq = 0;
692 	}
693 
694 	if (seq)
695 		len += sprintf(lfs + len, "-%d", prev_lf);
696 
697 	lfs[len] = '\0';
698 }
699 
700 static int get_max_column_width(struct rvu *rvu)
701 {
702 	int index, pf, vf, lf_str_size = 12, buf_size = 256;
703 	struct rvu_block block;
704 	u16 pcifunc;
705 	char *buf;
706 
707 	buf = kzalloc(buf_size, GFP_KERNEL);
708 	if (!buf)
709 		return -ENOMEM;
710 
711 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
712 		for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
713 			pcifunc = pf << 10 | vf;
714 			if (!pcifunc)
715 				continue;
716 
717 			for (index = 0; index < BLK_COUNT; index++) {
718 				block = rvu->hw->block[index];
719 				if (!strlen(block.name))
720 					continue;
721 
722 				get_lf_str_list(block, pcifunc, buf);
723 				if (lf_str_size <= strlen(buf))
724 					lf_str_size = strlen(buf) + 1;
725 			}
726 		}
727 	}
728 
729 	kfree(buf);
730 	return lf_str_size;
731 }
732 
733 /* Dumps current provisioning status of all RVU block LFs */
734 static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
735 					  char __user *buffer,
736 					  size_t count, loff_t *ppos)
737 {
738 	int index, off = 0, flag = 0, len = 0, i = 0;
739 	struct rvu *rvu = filp->private_data;
740 	int bytes_not_copied = 0;
741 	struct rvu_block block;
742 	int pf, vf, pcifunc;
743 	int buf_size = 2048;
744 	int lf_str_size;
745 	char *lfs;
746 	char *buf;
747 
748 	/* don't allow partial reads */
749 	if (*ppos != 0)
750 		return 0;
751 
752 	buf = kzalloc(buf_size, GFP_KERNEL);
753 	if (!buf)
754 		return -ENOMEM;
755 
756 	/* Get the maximum width of a column */
757 	lf_str_size = get_max_column_width(rvu);
758 
759 	lfs = kzalloc(lf_str_size, GFP_KERNEL);
760 	if (!lfs) {
761 		kfree(buf);
762 		return -ENOMEM;
763 	}
764 	off +=	scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size,
765 			  "pcifunc");
766 	for (index = 0; index < BLK_COUNT; index++)
767 		if (strlen(rvu->hw->block[index].name)) {
768 			off += scnprintf(&buf[off], buf_size - 1 - off,
769 					 "%-*s", lf_str_size,
770 					 rvu->hw->block[index].name);
771 		}
772 
773 	off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
774 	bytes_not_copied = copy_to_user(buffer + (i * off), buf, off);
775 	if (bytes_not_copied)
776 		goto out;
777 
778 	i++;
779 	*ppos += off;
780 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
781 		for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
782 			off = 0;
783 			flag = 0;
784 			pcifunc = pf << 10 | vf;
785 			if (!pcifunc)
786 				continue;
787 
788 			if (vf) {
789 				sprintf(lfs, "PF%d:VF%d", pf, vf - 1);
790 				off = scnprintf(&buf[off],
791 						buf_size - 1 - off,
792 						"%-*s", lf_str_size, lfs);
793 			} else {
794 				sprintf(lfs, "PF%d", pf);
795 				off = scnprintf(&buf[off],
796 						buf_size - 1 - off,
797 						"%-*s", lf_str_size, lfs);
798 			}
799 
800 			for (index = 0; index < BLK_COUNT; index++) {
801 				block = rvu->hw->block[index];
802 				if (!strlen(block.name))
803 					continue;
804 				len = 0;
805 				lfs[len] = '\0';
806 				get_lf_str_list(block, pcifunc, lfs);
807 				if (strlen(lfs))
808 					flag = 1;
809 
810 				off += scnprintf(&buf[off], buf_size - 1 - off,
811 						 "%-*s", lf_str_size, lfs);
812 			}
813 			if (flag) {
814 				off +=	scnprintf(&buf[off],
815 						  buf_size - 1 - off, "\n");
816 				bytes_not_copied = copy_to_user(buffer +
817 								(i * off),
818 								buf, off);
819 				if (bytes_not_copied)
820 					goto out;
821 
822 				i++;
823 				*ppos += off;
824 			}
825 		}
826 	}
827 
828 out:
829 	kfree(lfs);
830 	kfree(buf);
831 	if (bytes_not_copied)
832 		return -EFAULT;
833 
834 	return *ppos;
835 }
836 
837 RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
838 
839 static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
840 {
841 	struct rvu *rvu = filp->private;
842 	struct pci_dev *pdev = NULL;
843 	struct mac_ops *mac_ops;
844 	char cgx[10], lmac[10];
845 	struct rvu_pfvf *pfvf;
846 	int pf, domain, blkid;
847 	u8 cgx_id, lmac_id;
848 	u16 pcifunc;
849 
850 	domain = 2;
851 	mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
852 	/* There can be no CGX devices at all */
853 	if (!mac_ops)
854 		return 0;
855 	seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\n",
856 		   mac_ops->name);
857 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
858 		if (!is_pf_cgxmapped(rvu, pf))
859 			continue;
860 
861 		pdev =  pci_get_domain_bus_and_slot(domain, pf + 1, 0);
862 		if (!pdev)
863 			continue;
864 
865 		cgx[0] = 0;
866 		lmac[0] = 0;
867 		pcifunc = pf << 10;
868 		pfvf = rvu_get_pfvf(rvu, pcifunc);
869 
870 		if (pfvf->nix_blkaddr == BLKADDR_NIX0)
871 			blkid = 0;
872 		else
873 			blkid = 1;
874 
875 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id,
876 				    &lmac_id);
877 		sprintf(cgx, "%s%d", mac_ops->name, cgx_id);
878 		sprintf(lmac, "LMAC%d", lmac_id);
879 		seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n",
880 			   dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac);
881 
882 		pci_dev_put(pdev);
883 	}
884 	return 0;
885 }
886 
887 RVU_DEBUG_SEQ_FOPS(rvu_pf_cgx_map, rvu_pf_cgx_map_display, NULL);
888 
889 static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blkaddr, int lf,
890 				u16 *pcifunc)
891 {
892 	struct rvu_block *block;
893 	struct rvu_hwinfo *hw;
894 
895 	hw = rvu->hw;
896 	block = &hw->block[blkaddr];
897 
898 	if (lf < 0 || lf >= block->lf.max) {
899 		dev_warn(rvu->dev, "Invalid LF: valid range: 0-%d\n",
900 			 block->lf.max - 1);
901 		return false;
902 	}
903 
904 	*pcifunc = block->fn_map[lf];
905 	if (!*pcifunc) {
906 		dev_warn(rvu->dev,
907 			 "This LF is not attached to any RVU PFFUNC\n");
908 		return false;
909 	}
910 	return true;
911 }
912 
913 static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf)
914 {
915 	char *buf;
916 
917 	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
918 	if (!buf)
919 		return;
920 
921 	if (!pfvf->aura_ctx) {
922 		seq_puts(m, "Aura context is not initialized\n");
923 	} else {
924 		bitmap_print_to_pagebuf(false, buf, pfvf->aura_bmap,
925 					pfvf->aura_ctx->qsize);
926 		seq_printf(m, "Aura count : %d\n", pfvf->aura_ctx->qsize);
927 		seq_printf(m, "Aura context ena/dis bitmap : %s\n", buf);
928 	}
929 
930 	if (!pfvf->pool_ctx) {
931 		seq_puts(m, "Pool context is not initialized\n");
932 	} else {
933 		bitmap_print_to_pagebuf(false, buf, pfvf->pool_bmap,
934 					pfvf->pool_ctx->qsize);
935 		seq_printf(m, "Pool count : %d\n", pfvf->pool_ctx->qsize);
936 		seq_printf(m, "Pool context ena/dis bitmap : %s\n", buf);
937 	}
938 	kfree(buf);
939 }
940 
941 /* The 'qsize' entry dumps current Aura/Pool context Qsize
942  * and each context's current enable/disable status in a bitmap.
943  */
944 static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
945 				 int blktype)
946 {
947 	void (*print_qsize)(struct seq_file *filp,
948 			    struct rvu_pfvf *pfvf) = NULL;
949 	struct dentry *current_dir;
950 	struct rvu_pfvf *pfvf;
951 	struct rvu *rvu;
952 	int qsize_id;
953 	u16 pcifunc;
954 	int blkaddr;
955 
956 	rvu = filp->private;
957 	switch (blktype) {
958 	case BLKTYPE_NPA:
959 		qsize_id = rvu->rvu_dbg.npa_qsize_id;
960 		print_qsize = print_npa_qsize;
961 		break;
962 
963 	case BLKTYPE_NIX:
964 		qsize_id = rvu->rvu_dbg.nix_qsize_id;
965 		print_qsize = print_nix_qsize;
966 		break;
967 
968 	default:
969 		return -EINVAL;
970 	}
971 
972 	if (blktype == BLKTYPE_NPA) {
973 		blkaddr = BLKADDR_NPA;
974 	} else {
975 		current_dir = filp->file->f_path.dentry->d_parent;
976 		blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
977 				   BLKADDR_NIX1 : BLKADDR_NIX0);
978 	}
979 
980 	if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc))
981 		return -EINVAL;
982 
983 	pfvf = rvu_get_pfvf(rvu, pcifunc);
984 	print_qsize(filp, pfvf);
985 
986 	return 0;
987 }
988 
989 static ssize_t rvu_dbg_qsize_write(struct file *filp,
990 				   const char __user *buffer, size_t count,
991 				   loff_t *ppos, int blktype)
992 {
993 	char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix";
994 	struct seq_file *seqfile = filp->private_data;
995 	char *cmd_buf, *cmd_buf_tmp, *subtoken;
996 	struct rvu *rvu = seqfile->private;
997 	struct dentry *current_dir;
998 	int blkaddr;
999 	u16 pcifunc;
1000 	int ret, lf;
1001 
1002 	cmd_buf = memdup_user_nul(buffer, count);
1003 	if (IS_ERR(cmd_buf))
1004 		return -ENOMEM;
1005 
1006 	cmd_buf_tmp = strchr(cmd_buf, '\n');
1007 	if (cmd_buf_tmp) {
1008 		*cmd_buf_tmp = '\0';
1009 		count = cmd_buf_tmp - cmd_buf + 1;
1010 	}
1011 
1012 	cmd_buf_tmp = cmd_buf;
1013 	subtoken = strsep(&cmd_buf, " ");
1014 	ret = subtoken ? kstrtoint(subtoken, 10, &lf) : -EINVAL;
1015 	if (cmd_buf)
1016 		ret = -EINVAL;
1017 
1018 	if (ret < 0 || !strncmp(subtoken, "help", 4)) {
1019 		dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
1020 		goto qsize_write_done;
1021 	}
1022 
1023 	if (blktype == BLKTYPE_NPA) {
1024 		blkaddr = BLKADDR_NPA;
1025 	} else {
1026 		current_dir = filp->f_path.dentry->d_parent;
1027 		blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
1028 				   BLKADDR_NIX1 : BLKADDR_NIX0);
1029 	}
1030 
1031 	if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) {
1032 		ret = -EINVAL;
1033 		goto qsize_write_done;
1034 	}
1035 	if (blktype  == BLKTYPE_NPA)
1036 		rvu->rvu_dbg.npa_qsize_id = lf;
1037 	else
1038 		rvu->rvu_dbg.nix_qsize_id = lf;
1039 
1040 qsize_write_done:
1041 	kfree(cmd_buf_tmp);
1042 	return ret ? ret : count;
1043 }
1044 
1045 static ssize_t rvu_dbg_npa_qsize_write(struct file *filp,
1046 				       const char __user *buffer,
1047 				       size_t count, loff_t *ppos)
1048 {
1049 	return rvu_dbg_qsize_write(filp, buffer, count, ppos,
1050 					    BLKTYPE_NPA);
1051 }
1052 
1053 static int rvu_dbg_npa_qsize_display(struct seq_file *filp, void *unused)
1054 {
1055 	return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NPA);
1056 }
1057 
1058 RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write);
1059 
1060 /* Dumps given NPA Aura's context */
1061 static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
1062 {
1063 	struct npa_aura_s *aura = &rsp->aura;
1064 	struct rvu *rvu = m->private;
1065 
1066 	seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr);
1067 
1068 	seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n",
1069 		   aura->ena, aura->pool_caching);
1070 	seq_printf(m, "W1: pool way mask\t%d\nW1: avg con\t\t%d\n",
1071 		   aura->pool_way_mask, aura->avg_con);
1072 	seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n",
1073 		   aura->pool_drop_ena, aura->aura_drop_ena);
1074 	seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n",
1075 		   aura->bp_ena, aura->aura_drop);
1076 	seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n",
1077 		   aura->shift, aura->avg_level);
1078 
1079 	seq_printf(m, "W2: count\t\t%llu\nW2: nix0_bpid\t\t%d\nW2: nix1_bpid\t\t%d\n",
1080 		   (u64)aura->count, aura->nix0_bpid, aura->nix1_bpid);
1081 
1082 	seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
1083 		   (u64)aura->limit, aura->bp, aura->fc_ena);
1084 
1085 	if (!is_rvu_otx2(rvu))
1086 		seq_printf(m, "W3: fc_be\t\t%d\n", aura->fc_be);
1087 	seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n",
1088 		   aura->fc_up_crossing, aura->fc_stype);
1089 	seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits);
1090 
1091 	seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr);
1092 
1093 	seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n",
1094 		   aura->pool_drop, aura->update_time);
1095 	seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n",
1096 		   aura->err_int, aura->err_int_ena);
1097 	seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n",
1098 		   aura->thresh_int, aura->thresh_int_ena);
1099 	seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n",
1100 		   aura->thresh_up, aura->thresh_qint_idx);
1101 	seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx);
1102 
1103 	seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh);
1104 	if (!is_rvu_otx2(rvu))
1105 		seq_printf(m, "W6: fc_msh_dst\t\t%d\n", aura->fc_msh_dst);
1106 }
1107 
1108 /* Dumps given NPA Pool's context */
1109 static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
1110 {
1111 	struct npa_pool_s *pool = &rsp->pool;
1112 	struct rvu *rvu = m->private;
1113 
1114 	seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base);
1115 
1116 	seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n",
1117 		   pool->ena, pool->nat_align);
1118 	seq_printf(m, "W1: stack_caching\t%d\nW1: stack_way_mask\t%d\n",
1119 		   pool->stack_caching, pool->stack_way_mask);
1120 	seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n",
1121 		   pool->buf_offset, pool->buf_size);
1122 
1123 	seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n",
1124 		   pool->stack_max_pages, pool->stack_pages);
1125 
1126 	seq_printf(m, "W3: op_pc \t\t%llu\n", (u64)pool->op_pc);
1127 
1128 	seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n",
1129 		   pool->stack_offset, pool->shift, pool->avg_level);
1130 	seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n",
1131 		   pool->avg_con, pool->fc_ena, pool->fc_stype);
1132 	seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n",
1133 		   pool->fc_hyst_bits, pool->fc_up_crossing);
1134 	if (!is_rvu_otx2(rvu))
1135 		seq_printf(m, "W4: fc_be\t\t%d\n", pool->fc_be);
1136 	seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time);
1137 
1138 	seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr);
1139 
1140 	seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start);
1141 
1142 	seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end);
1143 
1144 	seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n",
1145 		   pool->err_int, pool->err_int_ena);
1146 	seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int);
1147 	seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n",
1148 		   pool->thresh_int_ena, pool->thresh_up);
1149 	seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d\n",
1150 		   pool->thresh_qint_idx, pool->err_qint_idx);
1151 	if (!is_rvu_otx2(rvu))
1152 		seq_printf(m, "W8: fc_msh_dst\t\t%d\n", pool->fc_msh_dst);
1153 }
1154 
1155 /* Reads aura/pool's ctx from admin queue */
1156 static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype)
1157 {
1158 	void (*print_npa_ctx)(struct seq_file *m, struct npa_aq_enq_rsp *rsp);
1159 	struct npa_aq_enq_req aq_req;
1160 	struct npa_aq_enq_rsp rsp;
1161 	struct rvu_pfvf *pfvf;
1162 	int aura, rc, max_id;
1163 	int npalf, id, all;
1164 	struct rvu *rvu;
1165 	u16 pcifunc;
1166 
1167 	rvu = m->private;
1168 
1169 	switch (ctype) {
1170 	case NPA_AQ_CTYPE_AURA:
1171 		npalf = rvu->rvu_dbg.npa_aura_ctx.lf;
1172 		id = rvu->rvu_dbg.npa_aura_ctx.id;
1173 		all = rvu->rvu_dbg.npa_aura_ctx.all;
1174 		break;
1175 
1176 	case NPA_AQ_CTYPE_POOL:
1177 		npalf = rvu->rvu_dbg.npa_pool_ctx.lf;
1178 		id = rvu->rvu_dbg.npa_pool_ctx.id;
1179 		all = rvu->rvu_dbg.npa_pool_ctx.all;
1180 		break;
1181 	default:
1182 		return -EINVAL;
1183 	}
1184 
1185 	if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
1186 		return -EINVAL;
1187 
1188 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1189 	if (ctype == NPA_AQ_CTYPE_AURA && !pfvf->aura_ctx) {
1190 		seq_puts(m, "Aura context is not initialized\n");
1191 		return -EINVAL;
1192 	} else if (ctype == NPA_AQ_CTYPE_POOL && !pfvf->pool_ctx) {
1193 		seq_puts(m, "Pool context is not initialized\n");
1194 		return -EINVAL;
1195 	}
1196 
1197 	memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
1198 	aq_req.hdr.pcifunc = pcifunc;
1199 	aq_req.ctype = ctype;
1200 	aq_req.op = NPA_AQ_INSTOP_READ;
1201 	if (ctype == NPA_AQ_CTYPE_AURA) {
1202 		max_id = pfvf->aura_ctx->qsize;
1203 		print_npa_ctx = print_npa_aura_ctx;
1204 	} else {
1205 		max_id = pfvf->pool_ctx->qsize;
1206 		print_npa_ctx = print_npa_pool_ctx;
1207 	}
1208 
1209 	if (id < 0 || id >= max_id) {
1210 		seq_printf(m, "Invalid %s, valid range is 0-%d\n",
1211 			   (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
1212 			max_id - 1);
1213 		return -EINVAL;
1214 	}
1215 
1216 	if (all)
1217 		id = 0;
1218 	else
1219 		max_id = id + 1;
1220 
1221 	for (aura = id; aura < max_id; aura++) {
1222 		aq_req.aura_id = aura;
1223 
1224 		/* Skip if queue is uninitialized */
1225 		if (ctype == NPA_AQ_CTYPE_POOL && !test_bit(aura, pfvf->pool_bmap))
1226 			continue;
1227 
1228 		seq_printf(m, "======%s : %d=======\n",
1229 			   (ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL",
1230 			aq_req.aura_id);
1231 		rc = rvu_npa_aq_enq_inst(rvu, &aq_req, &rsp);
1232 		if (rc) {
1233 			seq_puts(m, "Failed to read context\n");
1234 			return -EINVAL;
1235 		}
1236 		print_npa_ctx(m, &rsp);
1237 	}
1238 	return 0;
1239 }
1240 
1241 static int write_npa_ctx(struct rvu *rvu, bool all,
1242 			 int npalf, int id, int ctype)
1243 {
1244 	struct rvu_pfvf *pfvf;
1245 	int max_id = 0;
1246 	u16 pcifunc;
1247 
1248 	if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
1249 		return -EINVAL;
1250 
1251 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1252 
1253 	if (ctype == NPA_AQ_CTYPE_AURA) {
1254 		if (!pfvf->aura_ctx) {
1255 			dev_warn(rvu->dev, "Aura context is not initialized\n");
1256 			return -EINVAL;
1257 		}
1258 		max_id = pfvf->aura_ctx->qsize;
1259 	} else if (ctype == NPA_AQ_CTYPE_POOL) {
1260 		if (!pfvf->pool_ctx) {
1261 			dev_warn(rvu->dev, "Pool context is not initialized\n");
1262 			return -EINVAL;
1263 		}
1264 		max_id = pfvf->pool_ctx->qsize;
1265 	}
1266 
1267 	if (id < 0 || id >= max_id) {
1268 		dev_warn(rvu->dev, "Invalid %s, valid range is 0-%d\n",
1269 			 (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
1270 			max_id - 1);
1271 		return -EINVAL;
1272 	}
1273 
1274 	switch (ctype) {
1275 	case NPA_AQ_CTYPE_AURA:
1276 		rvu->rvu_dbg.npa_aura_ctx.lf = npalf;
1277 		rvu->rvu_dbg.npa_aura_ctx.id = id;
1278 		rvu->rvu_dbg.npa_aura_ctx.all = all;
1279 		break;
1280 
1281 	case NPA_AQ_CTYPE_POOL:
1282 		rvu->rvu_dbg.npa_pool_ctx.lf = npalf;
1283 		rvu->rvu_dbg.npa_pool_ctx.id = id;
1284 		rvu->rvu_dbg.npa_pool_ctx.all = all;
1285 		break;
1286 	default:
1287 		return -EINVAL;
1288 	}
1289 	return 0;
1290 }
1291 
1292 static int parse_cmd_buffer_ctx(char *cmd_buf, size_t *count,
1293 				const char __user *buffer, int *npalf,
1294 				int *id, bool *all)
1295 {
1296 	int bytes_not_copied;
1297 	char *cmd_buf_tmp;
1298 	char *subtoken;
1299 	int ret;
1300 
1301 	bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
1302 	if (bytes_not_copied)
1303 		return -EFAULT;
1304 
1305 	cmd_buf[*count] = '\0';
1306 	cmd_buf_tmp = strchr(cmd_buf, '\n');
1307 
1308 	if (cmd_buf_tmp) {
1309 		*cmd_buf_tmp = '\0';
1310 		*count = cmd_buf_tmp - cmd_buf + 1;
1311 	}
1312 
1313 	subtoken = strsep(&cmd_buf, " ");
1314 	ret = subtoken ? kstrtoint(subtoken, 10, npalf) : -EINVAL;
1315 	if (ret < 0)
1316 		return ret;
1317 	subtoken = strsep(&cmd_buf, " ");
1318 	if (subtoken && strcmp(subtoken, "all") == 0) {
1319 		*all = true;
1320 	} else {
1321 		ret = subtoken ? kstrtoint(subtoken, 10, id) : -EINVAL;
1322 		if (ret < 0)
1323 			return ret;
1324 	}
1325 	if (cmd_buf)
1326 		return -EINVAL;
1327 	return ret;
1328 }
1329 
1330 static ssize_t rvu_dbg_npa_ctx_write(struct file *filp,
1331 				     const char __user *buffer,
1332 				     size_t count, loff_t *ppos, int ctype)
1333 {
1334 	char *cmd_buf, *ctype_string = (ctype == NPA_AQ_CTYPE_AURA) ?
1335 					"aura" : "pool";
1336 	struct seq_file *seqfp = filp->private_data;
1337 	struct rvu *rvu = seqfp->private;
1338 	int npalf, id = 0, ret;
1339 	bool all = false;
1340 
1341 	if ((*ppos != 0) || !count)
1342 		return -EINVAL;
1343 
1344 	cmd_buf = kzalloc(count + 1, GFP_KERNEL);
1345 	if (!cmd_buf)
1346 		return count;
1347 	ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
1348 				   &npalf, &id, &all);
1349 	if (ret < 0) {
1350 		dev_info(rvu->dev,
1351 			 "Usage: echo <npalf> [%s number/all] > %s_ctx\n",
1352 			 ctype_string, ctype_string);
1353 		goto done;
1354 	} else {
1355 		ret = write_npa_ctx(rvu, all, npalf, id, ctype);
1356 	}
1357 done:
1358 	kfree(cmd_buf);
1359 	return ret ? ret : count;
1360 }
1361 
1362 static ssize_t rvu_dbg_npa_aura_ctx_write(struct file *filp,
1363 					  const char __user *buffer,
1364 					  size_t count, loff_t *ppos)
1365 {
1366 	return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
1367 				     NPA_AQ_CTYPE_AURA);
1368 }
1369 
1370 static int rvu_dbg_npa_aura_ctx_display(struct seq_file *filp, void *unused)
1371 {
1372 	return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_AURA);
1373 }
1374 
1375 RVU_DEBUG_SEQ_FOPS(npa_aura_ctx, npa_aura_ctx_display, npa_aura_ctx_write);
1376 
1377 static ssize_t rvu_dbg_npa_pool_ctx_write(struct file *filp,
1378 					  const char __user *buffer,
1379 					  size_t count, loff_t *ppos)
1380 {
1381 	return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
1382 				     NPA_AQ_CTYPE_POOL);
1383 }
1384 
1385 static int rvu_dbg_npa_pool_ctx_display(struct seq_file *filp, void *unused)
1386 {
1387 	return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_POOL);
1388 }
1389 
1390 RVU_DEBUG_SEQ_FOPS(npa_pool_ctx, npa_pool_ctx_display, npa_pool_ctx_write);
1391 
1392 static void ndc_cache_stats(struct seq_file *s, int blk_addr,
1393 			    int ctype, int transaction)
1394 {
1395 	u64 req, out_req, lat, cant_alloc;
1396 	struct nix_hw *nix_hw;
1397 	struct rvu *rvu;
1398 	int port;
1399 
1400 	if (blk_addr == BLKADDR_NDC_NPA0) {
1401 		rvu = s->private;
1402 	} else {
1403 		nix_hw = s->private;
1404 		rvu = nix_hw->rvu;
1405 	}
1406 
1407 	for (port = 0; port < NDC_MAX_PORT; port++) {
1408 		req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC
1409 						(port, ctype, transaction));
1410 		lat = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_LAT_PC
1411 						(port, ctype, transaction));
1412 		out_req = rvu_read64(rvu, blk_addr,
1413 				     NDC_AF_PORTX_RTX_RWX_OSTDN_PC
1414 				     (port, ctype, transaction));
1415 		cant_alloc = rvu_read64(rvu, blk_addr,
1416 					NDC_AF_PORTX_RTX_CANT_ALLOC_PC
1417 					(port, transaction));
1418 		seq_printf(s, "\nPort:%d\n", port);
1419 		seq_printf(s, "\tTotal Requests:\t\t%lld\n", req);
1420 		seq_printf(s, "\tTotal Time Taken:\t%lld cycles\n", lat);
1421 		seq_printf(s, "\tAvg Latency:\t\t%lld cycles\n", lat / req);
1422 		seq_printf(s, "\tOutstanding Requests:\t%lld\n", out_req);
1423 		seq_printf(s, "\tCant Alloc Requests:\t%lld\n", cant_alloc);
1424 	}
1425 }
1426 
1427 static int ndc_blk_cache_stats(struct seq_file *s, int idx, int blk_addr)
1428 {
1429 	seq_puts(s, "\n***** CACHE mode read stats *****\n");
1430 	ndc_cache_stats(s, blk_addr, CACHING, NDC_READ_TRANS);
1431 	seq_puts(s, "\n***** CACHE mode write stats *****\n");
1432 	ndc_cache_stats(s, blk_addr, CACHING, NDC_WRITE_TRANS);
1433 	seq_puts(s, "\n***** BY-PASS mode read stats *****\n");
1434 	ndc_cache_stats(s, blk_addr, BYPASS, NDC_READ_TRANS);
1435 	seq_puts(s, "\n***** BY-PASS mode write stats *****\n");
1436 	ndc_cache_stats(s, blk_addr, BYPASS, NDC_WRITE_TRANS);
1437 	return 0;
1438 }
1439 
1440 static int rvu_dbg_npa_ndc_cache_display(struct seq_file *filp, void *unused)
1441 {
1442 	return ndc_blk_cache_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
1443 }
1444 
1445 RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL);
1446 
1447 static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
1448 {
1449 	struct nix_hw *nix_hw;
1450 	struct rvu *rvu;
1451 	int bank, max_bank;
1452 	u64 ndc_af_const;
1453 
1454 	if (blk_addr == BLKADDR_NDC_NPA0) {
1455 		rvu = s->private;
1456 	} else {
1457 		nix_hw = s->private;
1458 		rvu = nix_hw->rvu;
1459 	}
1460 
1461 	ndc_af_const = rvu_read64(rvu, blk_addr, NDC_AF_CONST);
1462 	max_bank = FIELD_GET(NDC_AF_BANK_MASK, ndc_af_const);
1463 	for (bank = 0; bank < max_bank; bank++) {
1464 		seq_printf(s, "BANK:%d\n", bank);
1465 		seq_printf(s, "\tHits:\t%lld\n",
1466 			   (u64)rvu_read64(rvu, blk_addr,
1467 			   NDC_AF_BANKX_HIT_PC(bank)));
1468 		seq_printf(s, "\tMiss:\t%lld\n",
1469 			   (u64)rvu_read64(rvu, blk_addr,
1470 			    NDC_AF_BANKX_MISS_PC(bank)));
1471 	}
1472 	return 0;
1473 }
1474 
1475 static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused)
1476 {
1477 	struct nix_hw *nix_hw = filp->private;
1478 	int blkaddr = 0;
1479 	int ndc_idx = 0;
1480 
1481 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1482 		   BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1483 	ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_RX : NIX0_RX);
1484 
1485 	return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
1486 }
1487 
1488 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL);
1489 
1490 static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused)
1491 {
1492 	struct nix_hw *nix_hw = filp->private;
1493 	int blkaddr = 0;
1494 	int ndc_idx = 0;
1495 
1496 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1497 		   BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1498 	ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_TX : NIX0_TX);
1499 
1500 	return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
1501 }
1502 
1503 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL);
1504 
1505 static int rvu_dbg_npa_ndc_hits_miss_display(struct seq_file *filp,
1506 					     void *unused)
1507 {
1508 	return ndc_blk_hits_miss_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
1509 }
1510 
1511 RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL);
1512 
1513 static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp,
1514 						void *unused)
1515 {
1516 	struct nix_hw *nix_hw = filp->private;
1517 	int ndc_idx = NPA0_U;
1518 	int blkaddr = 0;
1519 
1520 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1521 		   BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1522 
1523 	return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1524 }
1525 
1526 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL);
1527 
1528 static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp,
1529 						void *unused)
1530 {
1531 	struct nix_hw *nix_hw = filp->private;
1532 	int ndc_idx = NPA0_U;
1533 	int blkaddr = 0;
1534 
1535 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1536 		   BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1537 
1538 	return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1539 }
1540 
1541 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL);
1542 
1543 static void print_nix_cn10k_sq_ctx(struct seq_file *m,
1544 				   struct nix_cn10k_sq_ctx_s *sq_ctx)
1545 {
1546 	seq_printf(m, "W0: ena \t\t\t%d\nW0: qint_idx \t\t\t%d\n",
1547 		   sq_ctx->ena, sq_ctx->qint_idx);
1548 	seq_printf(m, "W0: substream \t\t\t0x%03x\nW0: sdp_mcast \t\t\t%d\n",
1549 		   sq_ctx->substream, sq_ctx->sdp_mcast);
1550 	seq_printf(m, "W0: cq \t\t\t\t%d\nW0: sqe_way_mask \t\t%d\n\n",
1551 		   sq_ctx->cq, sq_ctx->sqe_way_mask);
1552 
1553 	seq_printf(m, "W1: smq \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: xoff\t\t\t%d\n",
1554 		   sq_ctx->smq, sq_ctx->cq_ena, sq_ctx->xoff);
1555 	seq_printf(m, "W1: sso_ena \t\t\t%d\nW1: smq_rr_weight\t\t%d\n",
1556 		   sq_ctx->sso_ena, sq_ctx->smq_rr_weight);
1557 	seq_printf(m, "W1: default_chan\t\t%d\nW1: sqb_count\t\t\t%d\n\n",
1558 		   sq_ctx->default_chan, sq_ctx->sqb_count);
1559 
1560 	seq_printf(m, "W2: smq_rr_count_lb \t\t%d\n", sq_ctx->smq_rr_count_lb);
1561 	seq_printf(m, "W2: smq_rr_count_ub \t\t%d\n", sq_ctx->smq_rr_count_ub);
1562 	seq_printf(m, "W2: sqb_aura \t\t\t%d\nW2: sq_int \t\t\t%d\n",
1563 		   sq_ctx->sqb_aura, sq_ctx->sq_int);
1564 	seq_printf(m, "W2: sq_int_ena \t\t\t%d\nW2: sqe_stype \t\t\t%d\n",
1565 		   sq_ctx->sq_int_ena, sq_ctx->sqe_stype);
1566 
1567 	seq_printf(m, "W3: max_sqe_size\t\t%d\nW3: cq_limit\t\t\t%d\n",
1568 		   sq_ctx->max_sqe_size, sq_ctx->cq_limit);
1569 	seq_printf(m, "W3: lmt_dis \t\t\t%d\nW3: mnq_dis \t\t\t%d\n",
1570 		   sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1571 	seq_printf(m, "W3: smq_next_sq\t\t\t%d\nW3: smq_lso_segnum\t\t%d\n",
1572 		   sq_ctx->smq_next_sq, sq_ctx->smq_lso_segnum);
1573 	seq_printf(m, "W3: tail_offset \t\t%d\nW3: smenq_offset\t\t%d\n",
1574 		   sq_ctx->tail_offset, sq_ctx->smenq_offset);
1575 	seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n",
1576 		   sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld);
1577 
1578 	seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
1579 		   sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
1580 	seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1581 	seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1582 	seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1583 	seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1584 		   sq_ctx->smenq_next_sqb);
1585 
1586 	seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1587 
1588 	seq_printf(m, "W9: vfi_lso_total\t\t%d\n", sq_ctx->vfi_lso_total);
1589 	seq_printf(m, "W9: vfi_lso_sizem1\t\t%d\nW9: vfi_lso_sb\t\t\t%d\n",
1590 		   sq_ctx->vfi_lso_sizem1, sq_ctx->vfi_lso_sb);
1591 	seq_printf(m, "W9: vfi_lso_mps\t\t\t%d\nW9: vfi_lso_vlan0_ins_ena\t%d\n",
1592 		   sq_ctx->vfi_lso_mps, sq_ctx->vfi_lso_vlan0_ins_ena);
1593 	seq_printf(m, "W9: vfi_lso_vlan1_ins_ena\t%d\nW9: vfi_lso_vld \t\t%d\n\n",
1594 		   sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1595 
1596 	seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1597 		   (u64)sq_ctx->scm_lso_rem);
1598 	seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1599 	seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1600 	seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1601 		   (u64)sq_ctx->dropped_octs);
1602 	seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1603 		   (u64)sq_ctx->dropped_pkts);
1604 }
1605 
1606 /* Dumps given nix_sq's context */
1607 static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1608 {
1609 	struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
1610 	struct nix_hw *nix_hw = m->private;
1611 	struct rvu *rvu = nix_hw->rvu;
1612 
1613 	if (!is_rvu_otx2(rvu)) {
1614 		print_nix_cn10k_sq_ctx(m, (struct nix_cn10k_sq_ctx_s *)sq_ctx);
1615 		return;
1616 	}
1617 	seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n",
1618 		   sq_ctx->sqe_way_mask, sq_ctx->cq);
1619 	seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
1620 		   sq_ctx->sdp_mcast, sq_ctx->substream);
1621 	seq_printf(m, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n\n",
1622 		   sq_ctx->qint_idx, sq_ctx->ena);
1623 
1624 	seq_printf(m, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d\n",
1625 		   sq_ctx->sqb_count, sq_ctx->default_chan);
1626 	seq_printf(m, "W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d\n",
1627 		   sq_ctx->smq_rr_quantum, sq_ctx->sso_ena);
1628 	seq_printf(m, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n\n",
1629 		   sq_ctx->xoff, sq_ctx->cq_ena, sq_ctx->smq);
1630 
1631 	seq_printf(m, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d\n",
1632 		   sq_ctx->sqe_stype, sq_ctx->sq_int_ena);
1633 	seq_printf(m, "W2: sq_int \t\t\t%d\nW2: sqb_aura \t\t\t%d\n",
1634 		   sq_ctx->sq_int, sq_ctx->sqb_aura);
1635 	seq_printf(m, "W2: smq_rr_count \t\t%d\n\n", sq_ctx->smq_rr_count);
1636 
1637 	seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
1638 		   sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
1639 	seq_printf(m, "W3: smenq_next_sqb_vld \t\t%d\nW3: head_offset\t\t\t%d\n",
1640 		   sq_ctx->smenq_next_sqb_vld, sq_ctx->head_offset);
1641 	seq_printf(m, "W3: smenq_offset\t\t%d\nW3: tail_offset\t\t\t%d\n",
1642 		   sq_ctx->smenq_offset, sq_ctx->tail_offset);
1643 	seq_printf(m, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq\t\t\t%d\n",
1644 		   sq_ctx->smq_lso_segnum, sq_ctx->smq_next_sq);
1645 	seq_printf(m, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d\n",
1646 		   sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1647 	seq_printf(m, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n\n",
1648 		   sq_ctx->cq_limit, sq_ctx->max_sqe_size);
1649 
1650 	seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1651 	seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1652 	seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1653 	seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1654 		   sq_ctx->smenq_next_sqb);
1655 
1656 	seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1657 
1658 	seq_printf(m, "W9: vfi_lso_vld\t\t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d\n",
1659 		   sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1660 	seq_printf(m, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d\n",
1661 		   sq_ctx->vfi_lso_vlan0_ins_ena, sq_ctx->vfi_lso_mps);
1662 	seq_printf(m, "W9: vfi_lso_sb\t\t\t%d\nW9: vfi_lso_sizem1\t\t%d\n",
1663 		   sq_ctx->vfi_lso_sb, sq_ctx->vfi_lso_sizem1);
1664 	seq_printf(m, "W9: vfi_lso_total\t\t%d\n\n", sq_ctx->vfi_lso_total);
1665 
1666 	seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1667 		   (u64)sq_ctx->scm_lso_rem);
1668 	seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1669 	seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1670 	seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1671 		   (u64)sq_ctx->dropped_octs);
1672 	seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1673 		   (u64)sq_ctx->dropped_pkts);
1674 }
1675 
1676 static void print_nix_cn10k_rq_ctx(struct seq_file *m,
1677 				   struct nix_cn10k_rq_ctx_s *rq_ctx)
1678 {
1679 	seq_printf(m, "W0: ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
1680 		   rq_ctx->ena, rq_ctx->sso_ena);
1681 	seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
1682 		   rq_ctx->ipsech_ena, rq_ctx->ena_wqwd);
1683 	seq_printf(m, "W0: cq \t\t\t\t%d\nW0: lenerr_dis \t\t\t%d\n",
1684 		   rq_ctx->cq, rq_ctx->lenerr_dis);
1685 	seq_printf(m, "W0: csum_il4_dis \t\t%d\nW0: csum_ol4_dis \t\t%d\n",
1686 		   rq_ctx->csum_il4_dis, rq_ctx->csum_ol4_dis);
1687 	seq_printf(m, "W0: len_il4_dis \t\t%d\nW0: len_il3_dis \t\t%d\n",
1688 		   rq_ctx->len_il4_dis, rq_ctx->len_il3_dis);
1689 	seq_printf(m, "W0: len_ol4_dis \t\t%d\nW0: len_ol3_dis \t\t%d\n",
1690 		   rq_ctx->len_ol4_dis, rq_ctx->len_ol3_dis);
1691 	seq_printf(m, "W0: wqe_aura \t\t\t%d\n\n", rq_ctx->wqe_aura);
1692 
1693 	seq_printf(m, "W1: spb_aura \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
1694 		   rq_ctx->spb_aura, rq_ctx->lpb_aura);
1695 	seq_printf(m, "W1: spb_aura \t\t\t%d\n", rq_ctx->spb_aura);
1696 	seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
1697 		   rq_ctx->sso_grp, rq_ctx->sso_tt);
1698 	seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: wqe_caching \t\t%d\n",
1699 		   rq_ctx->pb_caching, rq_ctx->wqe_caching);
1700 	seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
1701 		   rq_ctx->xqe_drop_ena, rq_ctx->spb_drop_ena);
1702 	seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: pb_stashing \t\t%d\n",
1703 		   rq_ctx->lpb_drop_ena, rq_ctx->pb_stashing);
1704 	seq_printf(m, "W1: ipsecd_drop_ena \t\t%d\nW1: chi_ena \t\t\t%d\n\n",
1705 		   rq_ctx->ipsecd_drop_ena, rq_ctx->chi_ena);
1706 
1707 	seq_printf(m, "W2: band_prof_id \t\t%d\n", rq_ctx->band_prof_id);
1708 	seq_printf(m, "W2: policer_ena \t\t%d\n", rq_ctx->policer_ena);
1709 	seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n", rq_ctx->spb_sizem1);
1710 	seq_printf(m, "W2: wqe_skip \t\t\t%d\nW2: sqb_ena \t\t\t%d\n",
1711 		   rq_ctx->wqe_skip, rq_ctx->spb_ena);
1712 	seq_printf(m, "W2: lpb_size1 \t\t\t%d\nW2: first_skip \t\t\t%d\n",
1713 		   rq_ctx->lpb_sizem1, rq_ctx->first_skip);
1714 	seq_printf(m, "W2: later_skip\t\t\t%d\nW2: xqe_imm_size\t\t%d\n",
1715 		   rq_ctx->later_skip, rq_ctx->xqe_imm_size);
1716 	seq_printf(m, "W2: xqe_imm_copy \t\t%d\nW2: xqe_hdr_split \t\t%d\n\n",
1717 		   rq_ctx->xqe_imm_copy, rq_ctx->xqe_hdr_split);
1718 
1719 	seq_printf(m, "W3: xqe_drop \t\t\t%d\nW3: xqe_pass \t\t\t%d\n",
1720 		   rq_ctx->xqe_drop, rq_ctx->xqe_pass);
1721 	seq_printf(m, "W3: wqe_pool_drop \t\t%d\nW3: wqe_pool_pass \t\t%d\n",
1722 		   rq_ctx->wqe_pool_drop, rq_ctx->wqe_pool_pass);
1723 	seq_printf(m, "W3: spb_pool_drop \t\t%d\nW3: spb_pool_pass \t\t%d\n",
1724 		   rq_ctx->spb_pool_drop, rq_ctx->spb_pool_pass);
1725 	seq_printf(m, "W3: spb_aura_drop \t\t%d\nW3: spb_aura_pass \t\t%d\n\n",
1726 		   rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
1727 
1728 	seq_printf(m, "W4: lpb_aura_drop \t\t%d\nW3: lpb_aura_pass \t\t%d\n",
1729 		   rq_ctx->lpb_aura_pass, rq_ctx->lpb_aura_drop);
1730 	seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW3: lpb_pool_pass \t\t%d\n",
1731 		   rq_ctx->lpb_pool_drop, rq_ctx->lpb_pool_pass);
1732 	seq_printf(m, "W4: rq_int \t\t\t%d\nW4: rq_int_ena\t\t\t%d\n",
1733 		   rq_ctx->rq_int, rq_ctx->rq_int_ena);
1734 	seq_printf(m, "W4: qint_idx \t\t\t%d\n\n", rq_ctx->qint_idx);
1735 
1736 	seq_printf(m, "W5: ltag \t\t\t%d\nW5: good_utag \t\t\t%d\n",
1737 		   rq_ctx->ltag, rq_ctx->good_utag);
1738 	seq_printf(m, "W5: bad_utag \t\t\t%d\nW5: flow_tagw \t\t\t%d\n",
1739 		   rq_ctx->bad_utag, rq_ctx->flow_tagw);
1740 	seq_printf(m, "W5: ipsec_vwqe \t\t\t%d\nW5: vwqe_ena \t\t\t%d\n",
1741 		   rq_ctx->ipsec_vwqe, rq_ctx->vwqe_ena);
1742 	seq_printf(m, "W5: vwqe_wait \t\t\t%d\nW5: max_vsize_exp\t\t%d\n",
1743 		   rq_ctx->vwqe_wait, rq_ctx->max_vsize_exp);
1744 	seq_printf(m, "W5: vwqe_skip \t\t\t%d\n\n", rq_ctx->vwqe_skip);
1745 
1746 	seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
1747 	seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
1748 	seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
1749 	seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
1750 	seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
1751 }
1752 
1753 /* Dumps given nix_rq's context */
1754 static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1755 {
1756 	struct nix_rq_ctx_s *rq_ctx = &rsp->rq;
1757 	struct nix_hw *nix_hw = m->private;
1758 	struct rvu *rvu = nix_hw->rvu;
1759 
1760 	if (!is_rvu_otx2(rvu)) {
1761 		print_nix_cn10k_rq_ctx(m, (struct nix_cn10k_rq_ctx_s *)rq_ctx);
1762 		return;
1763 	}
1764 
1765 	seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
1766 		   rq_ctx->wqe_aura, rq_ctx->substream);
1767 	seq_printf(m, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
1768 		   rq_ctx->cq, rq_ctx->ena_wqwd);
1769 	seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
1770 		   rq_ctx->ipsech_ena, rq_ctx->sso_ena);
1771 	seq_printf(m, "W0: ena \t\t\t%d\n\n", rq_ctx->ena);
1772 
1773 	seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
1774 		   rq_ctx->lpb_drop_ena, rq_ctx->spb_drop_ena);
1775 	seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d\n",
1776 		   rq_ctx->xqe_drop_ena, rq_ctx->wqe_caching);
1777 	seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
1778 		   rq_ctx->pb_caching, rq_ctx->sso_tt);
1779 	seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
1780 		   rq_ctx->sso_grp, rq_ctx->lpb_aura);
1781 	seq_printf(m, "W1: spb_aura \t\t\t%d\n\n", rq_ctx->spb_aura);
1782 
1783 	seq_printf(m, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d\n",
1784 		   rq_ctx->xqe_hdr_split, rq_ctx->xqe_imm_copy);
1785 	seq_printf(m, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d\n",
1786 		   rq_ctx->xqe_imm_size, rq_ctx->later_skip);
1787 	seq_printf(m, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d\n",
1788 		   rq_ctx->first_skip, rq_ctx->lpb_sizem1);
1789 	seq_printf(m, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d\n",
1790 		   rq_ctx->spb_ena, rq_ctx->wqe_skip);
1791 	seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n\n", rq_ctx->spb_sizem1);
1792 
1793 	seq_printf(m, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d\n",
1794 		   rq_ctx->spb_pool_pass, rq_ctx->spb_pool_drop);
1795 	seq_printf(m, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d\n",
1796 		   rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
1797 	seq_printf(m, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d\n",
1798 		   rq_ctx->wqe_pool_pass, rq_ctx->wqe_pool_drop);
1799 	seq_printf(m, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n\n",
1800 		   rq_ctx->xqe_pass, rq_ctx->xqe_drop);
1801 
1802 	seq_printf(m, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d\n",
1803 		   rq_ctx->qint_idx, rq_ctx->rq_int_ena);
1804 	seq_printf(m, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d\n",
1805 		   rq_ctx->rq_int, rq_ctx->lpb_pool_pass);
1806 	seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d\n",
1807 		   rq_ctx->lpb_pool_drop, rq_ctx->lpb_aura_pass);
1808 	seq_printf(m, "W4: lpb_aura_drop \t\t%d\n\n", rq_ctx->lpb_aura_drop);
1809 
1810 	seq_printf(m, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d\n",
1811 		   rq_ctx->flow_tagw, rq_ctx->bad_utag);
1812 	seq_printf(m, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n\n",
1813 		   rq_ctx->good_utag, rq_ctx->ltag);
1814 
1815 	seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
1816 	seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
1817 	seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
1818 	seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
1819 	seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
1820 }
1821 
1822 /* Dumps given nix_cq's context */
1823 static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1824 {
1825 	struct nix_cq_ctx_s *cq_ctx = &rsp->cq;
1826 	struct nix_hw *nix_hw = m->private;
1827 	struct rvu *rvu = nix_hw->rvu;
1828 
1829 	seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base);
1830 
1831 	seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr);
1832 	seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n",
1833 		   cq_ctx->avg_con, cq_ctx->cint_idx);
1834 	seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n",
1835 		   cq_ctx->cq_err, cq_ctx->qint_idx);
1836 	seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n",
1837 		   cq_ctx->bpid, cq_ctx->bp_ena);
1838 
1839 	if (!is_rvu_otx2(rvu)) {
1840 		seq_printf(m, "W1: lbpid_high \t\t\t0x%03x\n", cq_ctx->lbpid_high);
1841 		seq_printf(m, "W1: lbpid_med \t\t\t0x%03x\n", cq_ctx->lbpid_med);
1842 		seq_printf(m, "W1: lbpid_low \t\t\t0x%03x\n", cq_ctx->lbpid_low);
1843 		seq_printf(m, "(W1: lbpid) \t\t\t0x%03x\n",
1844 			   cq_ctx->lbpid_high << 6 | cq_ctx->lbpid_med << 3 |
1845 			   cq_ctx->lbpid_low);
1846 		seq_printf(m, "W1: lbp_ena \t\t\t\t%d\n\n", cq_ctx->lbp_ena);
1847 	}
1848 
1849 	seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n",
1850 		   cq_ctx->update_time, cq_ctx->avg_level);
1851 	seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n",
1852 		   cq_ctx->head, cq_ctx->tail);
1853 
1854 	seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n",
1855 		   cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int);
1856 	seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n",
1857 		   cq_ctx->qsize, cq_ctx->caching);
1858 	seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n",
1859 		   cq_ctx->substream, cq_ctx->ena);
1860 	if (!is_rvu_otx2(rvu)) {
1861 		seq_printf(m, "W3: lbp_frac \t\t\t%d\n", cq_ctx->lbp_frac);
1862 		seq_printf(m, "W3: cpt_drop_err_en \t\t\t%d\n",
1863 			   cq_ctx->cpt_drop_err_en);
1864 	}
1865 	seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n",
1866 		   cq_ctx->drop_ena, cq_ctx->drop);
1867 	seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp);
1868 }
1869 
1870 static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
1871 					 void *unused, int ctype)
1872 {
1873 	void (*print_nix_ctx)(struct seq_file *filp,
1874 			      struct nix_aq_enq_rsp *rsp) = NULL;
1875 	struct nix_hw *nix_hw = filp->private;
1876 	struct rvu *rvu = nix_hw->rvu;
1877 	struct nix_aq_enq_req aq_req;
1878 	struct nix_aq_enq_rsp rsp;
1879 	char *ctype_string = NULL;
1880 	int qidx, rc, max_id = 0;
1881 	struct rvu_pfvf *pfvf;
1882 	int nixlf, id, all;
1883 	u16 pcifunc;
1884 
1885 	switch (ctype) {
1886 	case NIX_AQ_CTYPE_CQ:
1887 		nixlf = rvu->rvu_dbg.nix_cq_ctx.lf;
1888 		id = rvu->rvu_dbg.nix_cq_ctx.id;
1889 		all = rvu->rvu_dbg.nix_cq_ctx.all;
1890 		break;
1891 
1892 	case NIX_AQ_CTYPE_SQ:
1893 		nixlf = rvu->rvu_dbg.nix_sq_ctx.lf;
1894 		id = rvu->rvu_dbg.nix_sq_ctx.id;
1895 		all = rvu->rvu_dbg.nix_sq_ctx.all;
1896 		break;
1897 
1898 	case NIX_AQ_CTYPE_RQ:
1899 		nixlf = rvu->rvu_dbg.nix_rq_ctx.lf;
1900 		id = rvu->rvu_dbg.nix_rq_ctx.id;
1901 		all = rvu->rvu_dbg.nix_rq_ctx.all;
1902 		break;
1903 
1904 	default:
1905 		return -EINVAL;
1906 	}
1907 
1908 	if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1909 		return -EINVAL;
1910 
1911 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1912 	if (ctype == NIX_AQ_CTYPE_SQ && !pfvf->sq_ctx) {
1913 		seq_puts(filp, "SQ context is not initialized\n");
1914 		return -EINVAL;
1915 	} else if (ctype == NIX_AQ_CTYPE_RQ && !pfvf->rq_ctx) {
1916 		seq_puts(filp, "RQ context is not initialized\n");
1917 		return -EINVAL;
1918 	} else if (ctype == NIX_AQ_CTYPE_CQ && !pfvf->cq_ctx) {
1919 		seq_puts(filp, "CQ context is not initialized\n");
1920 		return -EINVAL;
1921 	}
1922 
1923 	if (ctype == NIX_AQ_CTYPE_SQ) {
1924 		max_id = pfvf->sq_ctx->qsize;
1925 		ctype_string = "sq";
1926 		print_nix_ctx = print_nix_sq_ctx;
1927 	} else if (ctype == NIX_AQ_CTYPE_RQ) {
1928 		max_id = pfvf->rq_ctx->qsize;
1929 		ctype_string = "rq";
1930 		print_nix_ctx = print_nix_rq_ctx;
1931 	} else if (ctype == NIX_AQ_CTYPE_CQ) {
1932 		max_id = pfvf->cq_ctx->qsize;
1933 		ctype_string = "cq";
1934 		print_nix_ctx = print_nix_cq_ctx;
1935 	}
1936 
1937 	memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1938 	aq_req.hdr.pcifunc = pcifunc;
1939 	aq_req.ctype = ctype;
1940 	aq_req.op = NIX_AQ_INSTOP_READ;
1941 	if (all)
1942 		id = 0;
1943 	else
1944 		max_id = id + 1;
1945 	for (qidx = id; qidx < max_id; qidx++) {
1946 		aq_req.qidx = qidx;
1947 		seq_printf(filp, "=====%s_ctx for nixlf:%d and qidx:%d is=====\n",
1948 			   ctype_string, nixlf, aq_req.qidx);
1949 		rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
1950 		if (rc) {
1951 			seq_puts(filp, "Failed to read the context\n");
1952 			return -EINVAL;
1953 		}
1954 		print_nix_ctx(filp, &rsp);
1955 	}
1956 	return 0;
1957 }
1958 
1959 static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf,
1960 			       int id, int ctype, char *ctype_string,
1961 			       struct seq_file *m)
1962 {
1963 	struct nix_hw *nix_hw = m->private;
1964 	struct rvu_pfvf *pfvf;
1965 	int max_id = 0;
1966 	u16 pcifunc;
1967 
1968 	if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1969 		return -EINVAL;
1970 
1971 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1972 
1973 	if (ctype == NIX_AQ_CTYPE_SQ) {
1974 		if (!pfvf->sq_ctx) {
1975 			dev_warn(rvu->dev, "SQ context is not initialized\n");
1976 			return -EINVAL;
1977 		}
1978 		max_id = pfvf->sq_ctx->qsize;
1979 	} else if (ctype == NIX_AQ_CTYPE_RQ) {
1980 		if (!pfvf->rq_ctx) {
1981 			dev_warn(rvu->dev, "RQ context is not initialized\n");
1982 			return -EINVAL;
1983 		}
1984 		max_id = pfvf->rq_ctx->qsize;
1985 	} else if (ctype == NIX_AQ_CTYPE_CQ) {
1986 		if (!pfvf->cq_ctx) {
1987 			dev_warn(rvu->dev, "CQ context is not initialized\n");
1988 			return -EINVAL;
1989 		}
1990 		max_id = pfvf->cq_ctx->qsize;
1991 	}
1992 
1993 	if (id < 0 || id >= max_id) {
1994 		dev_warn(rvu->dev, "Invalid %s_ctx valid range 0-%d\n",
1995 			 ctype_string, max_id - 1);
1996 		return -EINVAL;
1997 	}
1998 	switch (ctype) {
1999 	case NIX_AQ_CTYPE_CQ:
2000 		rvu->rvu_dbg.nix_cq_ctx.lf = nixlf;
2001 		rvu->rvu_dbg.nix_cq_ctx.id = id;
2002 		rvu->rvu_dbg.nix_cq_ctx.all = all;
2003 		break;
2004 
2005 	case NIX_AQ_CTYPE_SQ:
2006 		rvu->rvu_dbg.nix_sq_ctx.lf = nixlf;
2007 		rvu->rvu_dbg.nix_sq_ctx.id = id;
2008 		rvu->rvu_dbg.nix_sq_ctx.all = all;
2009 		break;
2010 
2011 	case NIX_AQ_CTYPE_RQ:
2012 		rvu->rvu_dbg.nix_rq_ctx.lf = nixlf;
2013 		rvu->rvu_dbg.nix_rq_ctx.id = id;
2014 		rvu->rvu_dbg.nix_rq_ctx.all = all;
2015 		break;
2016 	default:
2017 		return -EINVAL;
2018 	}
2019 	return 0;
2020 }
2021 
2022 static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
2023 					   const char __user *buffer,
2024 					   size_t count, loff_t *ppos,
2025 					   int ctype)
2026 {
2027 	struct seq_file *m = filp->private_data;
2028 	struct nix_hw *nix_hw = m->private;
2029 	struct rvu *rvu = nix_hw->rvu;
2030 	char *cmd_buf, *ctype_string;
2031 	int nixlf, id = 0, ret;
2032 	bool all = false;
2033 
2034 	if ((*ppos != 0) || !count)
2035 		return -EINVAL;
2036 
2037 	switch (ctype) {
2038 	case NIX_AQ_CTYPE_SQ:
2039 		ctype_string = "sq";
2040 		break;
2041 	case NIX_AQ_CTYPE_RQ:
2042 		ctype_string = "rq";
2043 		break;
2044 	case NIX_AQ_CTYPE_CQ:
2045 		ctype_string = "cq";
2046 		break;
2047 	default:
2048 		return -EINVAL;
2049 	}
2050 
2051 	cmd_buf = kzalloc(count + 1, GFP_KERNEL);
2052 
2053 	if (!cmd_buf)
2054 		return count;
2055 
2056 	ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
2057 				   &nixlf, &id, &all);
2058 	if (ret < 0) {
2059 		dev_info(rvu->dev,
2060 			 "Usage: echo <nixlf> [%s number/all] > %s_ctx\n",
2061 			 ctype_string, ctype_string);
2062 		goto done;
2063 	} else {
2064 		ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype,
2065 					  ctype_string, m);
2066 	}
2067 done:
2068 	kfree(cmd_buf);
2069 	return ret ? ret : count;
2070 }
2071 
2072 static ssize_t rvu_dbg_nix_sq_ctx_write(struct file *filp,
2073 					const char __user *buffer,
2074 					size_t count, loff_t *ppos)
2075 {
2076 	return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
2077 					    NIX_AQ_CTYPE_SQ);
2078 }
2079 
2080 static int rvu_dbg_nix_sq_ctx_display(struct seq_file *filp, void *unused)
2081 {
2082 	return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_SQ);
2083 }
2084 
2085 RVU_DEBUG_SEQ_FOPS(nix_sq_ctx, nix_sq_ctx_display, nix_sq_ctx_write);
2086 
2087 static ssize_t rvu_dbg_nix_rq_ctx_write(struct file *filp,
2088 					const char __user *buffer,
2089 					size_t count, loff_t *ppos)
2090 {
2091 	return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
2092 					    NIX_AQ_CTYPE_RQ);
2093 }
2094 
2095 static int rvu_dbg_nix_rq_ctx_display(struct seq_file *filp, void  *unused)
2096 {
2097 	return rvu_dbg_nix_queue_ctx_display(filp, unused,  NIX_AQ_CTYPE_RQ);
2098 }
2099 
2100 RVU_DEBUG_SEQ_FOPS(nix_rq_ctx, nix_rq_ctx_display, nix_rq_ctx_write);
2101 
2102 static ssize_t rvu_dbg_nix_cq_ctx_write(struct file *filp,
2103 					const char __user *buffer,
2104 					size_t count, loff_t *ppos)
2105 {
2106 	return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
2107 					    NIX_AQ_CTYPE_CQ);
2108 }
2109 
2110 static int rvu_dbg_nix_cq_ctx_display(struct seq_file *filp, void *unused)
2111 {
2112 	return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_CQ);
2113 }
2114 
2115 RVU_DEBUG_SEQ_FOPS(nix_cq_ctx, nix_cq_ctx_display, nix_cq_ctx_write);
2116 
2117 static void print_nix_qctx_qsize(struct seq_file *filp, int qsize,
2118 				 unsigned long *bmap, char *qtype)
2119 {
2120 	char *buf;
2121 
2122 	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
2123 	if (!buf)
2124 		return;
2125 
2126 	bitmap_print_to_pagebuf(false, buf, bmap, qsize);
2127 	seq_printf(filp, "%s context count : %d\n", qtype, qsize);
2128 	seq_printf(filp, "%s context ena/dis bitmap : %s\n",
2129 		   qtype, buf);
2130 	kfree(buf);
2131 }
2132 
2133 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf)
2134 {
2135 	if (!pfvf->cq_ctx)
2136 		seq_puts(filp, "cq context is not initialized\n");
2137 	else
2138 		print_nix_qctx_qsize(filp, pfvf->cq_ctx->qsize, pfvf->cq_bmap,
2139 				     "cq");
2140 
2141 	if (!pfvf->rq_ctx)
2142 		seq_puts(filp, "rq context is not initialized\n");
2143 	else
2144 		print_nix_qctx_qsize(filp, pfvf->rq_ctx->qsize, pfvf->rq_bmap,
2145 				     "rq");
2146 
2147 	if (!pfvf->sq_ctx)
2148 		seq_puts(filp, "sq context is not initialized\n");
2149 	else
2150 		print_nix_qctx_qsize(filp, pfvf->sq_ctx->qsize, pfvf->sq_bmap,
2151 				     "sq");
2152 }
2153 
2154 static ssize_t rvu_dbg_nix_qsize_write(struct file *filp,
2155 				       const char __user *buffer,
2156 				       size_t count, loff_t *ppos)
2157 {
2158 	return rvu_dbg_qsize_write(filp, buffer, count, ppos,
2159 				   BLKTYPE_NIX);
2160 }
2161 
2162 static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
2163 {
2164 	return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NIX);
2165 }
2166 
2167 RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
2168 
2169 static void print_band_prof_ctx(struct seq_file *m,
2170 				struct nix_bandprof_s *prof)
2171 {
2172 	char *str;
2173 
2174 	switch (prof->pc_mode) {
2175 	case NIX_RX_PC_MODE_VLAN:
2176 		str = "VLAN";
2177 		break;
2178 	case NIX_RX_PC_MODE_DSCP:
2179 		str = "DSCP";
2180 		break;
2181 	case NIX_RX_PC_MODE_GEN:
2182 		str = "Generic";
2183 		break;
2184 	case NIX_RX_PC_MODE_RSVD:
2185 		str = "Reserved";
2186 		break;
2187 	}
2188 	seq_printf(m, "W0: pc_mode\t\t%s\n", str);
2189 	str = (prof->icolor == 3) ? "Color blind" :
2190 		(prof->icolor == 0) ? "Green" :
2191 		(prof->icolor == 1) ? "Yellow" : "Red";
2192 	seq_printf(m, "W0: icolor\t\t%s\n", str);
2193 	seq_printf(m, "W0: tnl_ena\t\t%d\n", prof->tnl_ena);
2194 	seq_printf(m, "W0: peir_exponent\t%d\n", prof->peir_exponent);
2195 	seq_printf(m, "W0: pebs_exponent\t%d\n", prof->pebs_exponent);
2196 	seq_printf(m, "W0: cir_exponent\t%d\n", prof->cir_exponent);
2197 	seq_printf(m, "W0: cbs_exponent\t%d\n", prof->cbs_exponent);
2198 	seq_printf(m, "W0: peir_mantissa\t%d\n", prof->peir_mantissa);
2199 	seq_printf(m, "W0: pebs_mantissa\t%d\n", prof->pebs_mantissa);
2200 	seq_printf(m, "W0: cir_mantissa\t%d\n", prof->cir_mantissa);
2201 
2202 	seq_printf(m, "W1: cbs_mantissa\t%d\n", prof->cbs_mantissa);
2203 	str = (prof->lmode == 0) ? "byte" : "packet";
2204 	seq_printf(m, "W1: lmode\t\t%s\n", str);
2205 	seq_printf(m, "W1: l_select\t\t%d\n", prof->l_sellect);
2206 	seq_printf(m, "W1: rdiv\t\t%d\n", prof->rdiv);
2207 	seq_printf(m, "W1: adjust_exponent\t%d\n", prof->adjust_exponent);
2208 	seq_printf(m, "W1: adjust_mantissa\t%d\n", prof->adjust_mantissa);
2209 	str = (prof->gc_action == 0) ? "PASS" :
2210 		(prof->gc_action == 1) ? "DROP" : "RED";
2211 	seq_printf(m, "W1: gc_action\t\t%s\n", str);
2212 	str = (prof->yc_action == 0) ? "PASS" :
2213 		(prof->yc_action == 1) ? "DROP" : "RED";
2214 	seq_printf(m, "W1: yc_action\t\t%s\n", str);
2215 	str = (prof->rc_action == 0) ? "PASS" :
2216 		(prof->rc_action == 1) ? "DROP" : "RED";
2217 	seq_printf(m, "W1: rc_action\t\t%s\n", str);
2218 	seq_printf(m, "W1: meter_algo\t\t%d\n", prof->meter_algo);
2219 	seq_printf(m, "W1: band_prof_id\t%d\n", prof->band_prof_id);
2220 	seq_printf(m, "W1: hl_en\t\t%d\n", prof->hl_en);
2221 
2222 	seq_printf(m, "W2: ts\t\t\t%lld\n", (u64)prof->ts);
2223 	seq_printf(m, "W3: pe_accum\t\t%d\n", prof->pe_accum);
2224 	seq_printf(m, "W3: c_accum\t\t%d\n", prof->c_accum);
2225 	seq_printf(m, "W4: green_pkt_pass\t%lld\n",
2226 		   (u64)prof->green_pkt_pass);
2227 	seq_printf(m, "W5: yellow_pkt_pass\t%lld\n",
2228 		   (u64)prof->yellow_pkt_pass);
2229 	seq_printf(m, "W6: red_pkt_pass\t%lld\n", (u64)prof->red_pkt_pass);
2230 	seq_printf(m, "W7: green_octs_pass\t%lld\n",
2231 		   (u64)prof->green_octs_pass);
2232 	seq_printf(m, "W8: yellow_octs_pass\t%lld\n",
2233 		   (u64)prof->yellow_octs_pass);
2234 	seq_printf(m, "W9: red_octs_pass\t%lld\n", (u64)prof->red_octs_pass);
2235 	seq_printf(m, "W10: green_pkt_drop\t%lld\n",
2236 		   (u64)prof->green_pkt_drop);
2237 	seq_printf(m, "W11: yellow_pkt_drop\t%lld\n",
2238 		   (u64)prof->yellow_pkt_drop);
2239 	seq_printf(m, "W12: red_pkt_drop\t%lld\n", (u64)prof->red_pkt_drop);
2240 	seq_printf(m, "W13: green_octs_drop\t%lld\n",
2241 		   (u64)prof->green_octs_drop);
2242 	seq_printf(m, "W14: yellow_octs_drop\t%lld\n",
2243 		   (u64)prof->yellow_octs_drop);
2244 	seq_printf(m, "W15: red_octs_drop\t%lld\n", (u64)prof->red_octs_drop);
2245 	seq_puts(m, "==============================\n");
2246 }
2247 
2248 static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused)
2249 {
2250 	struct nix_hw *nix_hw = m->private;
2251 	struct nix_cn10k_aq_enq_req aq_req;
2252 	struct nix_cn10k_aq_enq_rsp aq_rsp;
2253 	struct rvu *rvu = nix_hw->rvu;
2254 	struct nix_ipolicer *ipolicer;
2255 	int layer, prof_idx, idx, rc;
2256 	u16 pcifunc;
2257 	char *str;
2258 
2259 	/* Ingress policers do not exist on all platforms */
2260 	if (!nix_hw->ipolicer)
2261 		return 0;
2262 
2263 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
2264 		if (layer == BAND_PROF_INVAL_LAYER)
2265 			continue;
2266 		str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
2267 			(layer == BAND_PROF_MID_LAYER) ? "Mid" : "Top";
2268 
2269 		seq_printf(m, "\n%s bandwidth profiles\n", str);
2270 		seq_puts(m, "=======================\n");
2271 
2272 		ipolicer = &nix_hw->ipolicer[layer];
2273 
2274 		for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
2275 			if (is_rsrc_free(&ipolicer->band_prof, idx))
2276 				continue;
2277 
2278 			prof_idx = (idx & 0x3FFF) | (layer << 14);
2279 			rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
2280 						 0x00, NIX_AQ_CTYPE_BANDPROF,
2281 						 prof_idx);
2282 			if (rc) {
2283 				dev_err(rvu->dev,
2284 					"%s: Failed to fetch context of %s profile %d, err %d\n",
2285 					__func__, str, idx, rc);
2286 				return 0;
2287 			}
2288 			seq_printf(m, "\n%s bandwidth profile:: %d\n", str, idx);
2289 			pcifunc = ipolicer->pfvf_map[idx];
2290 			if (!(pcifunc & RVU_PFVF_FUNC_MASK))
2291 				seq_printf(m, "Allocated to :: PF %d\n",
2292 					   rvu_get_pf(pcifunc));
2293 			else
2294 				seq_printf(m, "Allocated to :: PF %d VF %d\n",
2295 					   rvu_get_pf(pcifunc),
2296 					   (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
2297 			print_band_prof_ctx(m, &aq_rsp.prof);
2298 		}
2299 	}
2300 	return 0;
2301 }
2302 
2303 RVU_DEBUG_SEQ_FOPS(nix_band_prof_ctx, nix_band_prof_ctx_display, NULL);
2304 
2305 static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused)
2306 {
2307 	struct nix_hw *nix_hw = m->private;
2308 	struct nix_ipolicer *ipolicer;
2309 	int layer;
2310 	char *str;
2311 
2312 	/* Ingress policers do not exist on all platforms */
2313 	if (!nix_hw->ipolicer)
2314 		return 0;
2315 
2316 	seq_puts(m, "\nBandwidth profile resource free count\n");
2317 	seq_puts(m, "=====================================\n");
2318 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
2319 		if (layer == BAND_PROF_INVAL_LAYER)
2320 			continue;
2321 		str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
2322 			(layer == BAND_PROF_MID_LAYER) ? "Mid " : "Top ";
2323 
2324 		ipolicer = &nix_hw->ipolicer[layer];
2325 		seq_printf(m, "%s :: Max: %4d  Free: %4d\n", str,
2326 			   ipolicer->band_prof.max,
2327 			   rvu_rsrc_free_count(&ipolicer->band_prof));
2328 	}
2329 	seq_puts(m, "=====================================\n");
2330 
2331 	return 0;
2332 }
2333 
2334 RVU_DEBUG_SEQ_FOPS(nix_band_prof_rsrc, nix_band_prof_rsrc_display, NULL);
2335 
2336 static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
2337 {
2338 	struct nix_hw *nix_hw;
2339 
2340 	if (!is_block_implemented(rvu->hw, blkaddr))
2341 		return;
2342 
2343 	if (blkaddr == BLKADDR_NIX0) {
2344 		rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root);
2345 		nix_hw = &rvu->hw->nix[0];
2346 	} else {
2347 		rvu->rvu_dbg.nix = debugfs_create_dir("nix1",
2348 						      rvu->rvu_dbg.root);
2349 		nix_hw = &rvu->hw->nix[1];
2350 	}
2351 
2352 	debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2353 			    &rvu_dbg_nix_sq_ctx_fops);
2354 	debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2355 			    &rvu_dbg_nix_rq_ctx_fops);
2356 	debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2357 			    &rvu_dbg_nix_cq_ctx_fops);
2358 	debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
2359 			    &rvu_dbg_nix_ndc_tx_cache_fops);
2360 	debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
2361 			    &rvu_dbg_nix_ndc_rx_cache_fops);
2362 	debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
2363 			    &rvu_dbg_nix_ndc_tx_hits_miss_fops);
2364 	debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
2365 			    &rvu_dbg_nix_ndc_rx_hits_miss_fops);
2366 	debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
2367 			    &rvu_dbg_nix_qsize_fops);
2368 	debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2369 			    &rvu_dbg_nix_band_prof_ctx_fops);
2370 	debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw,
2371 			    &rvu_dbg_nix_band_prof_rsrc_fops);
2372 }
2373 
2374 static void rvu_dbg_npa_init(struct rvu *rvu)
2375 {
2376 	rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root);
2377 
2378 	debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu,
2379 			    &rvu_dbg_npa_qsize_fops);
2380 	debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu,
2381 			    &rvu_dbg_npa_aura_ctx_fops);
2382 	debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
2383 			    &rvu_dbg_npa_pool_ctx_fops);
2384 	debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu,
2385 			    &rvu_dbg_npa_ndc_cache_fops);
2386 	debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa, rvu,
2387 			    &rvu_dbg_npa_ndc_hits_miss_fops);
2388 }
2389 
2390 #define PRINT_CGX_CUML_NIXRX_STATUS(idx, name)				\
2391 	({								\
2392 		u64 cnt;						\
2393 		err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx),	\
2394 					     NIX_STATS_RX, &(cnt));	\
2395 		if (!err)						\
2396 			seq_printf(s, "%s: %llu\n", name, cnt);		\
2397 		cnt;							\
2398 	})
2399 
2400 #define PRINT_CGX_CUML_NIXTX_STATUS(idx, name)			\
2401 	({								\
2402 		u64 cnt;						\
2403 		err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx),	\
2404 					  NIX_STATS_TX, &(cnt));	\
2405 		if (!err)						\
2406 			seq_printf(s, "%s: %llu\n", name, cnt);		\
2407 		cnt;							\
2408 	})
2409 
2410 static int cgx_print_stats(struct seq_file *s, int lmac_id)
2411 {
2412 	struct cgx_link_user_info linfo;
2413 	struct mac_ops *mac_ops;
2414 	void *cgxd = s->private;
2415 	u64 ucast, mcast, bcast;
2416 	int stat = 0, err = 0;
2417 	u64 tx_stat, rx_stat;
2418 	struct rvu *rvu;
2419 
2420 	rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
2421 					     PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
2422 	if (!rvu)
2423 		return -ENODEV;
2424 
2425 	mac_ops = get_mac_ops(cgxd);
2426 	/* There can be no CGX devices at all */
2427 	if (!mac_ops)
2428 		return 0;
2429 
2430 	/* Link status */
2431 	seq_puts(s, "\n=======Link Status======\n\n");
2432 	err = cgx_get_link_info(cgxd, lmac_id, &linfo);
2433 	if (err)
2434 		seq_puts(s, "Failed to read link status\n");
2435 	seq_printf(s, "\nLink is %s %d Mbps\n\n",
2436 		   linfo.link_up ? "UP" : "DOWN", linfo.speed);
2437 
2438 	/* Rx stats */
2439 	seq_printf(s, "\n=======NIX RX_STATS(%s port level)======\n\n",
2440 		   mac_ops->name);
2441 	ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames");
2442 	if (err)
2443 		return err;
2444 	mcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_MCAST, "rx_mcast_frames");
2445 	if (err)
2446 		return err;
2447 	bcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_BCAST, "rx_bcast_frames");
2448 	if (err)
2449 		return err;
2450 	seq_printf(s, "rx_frames: %llu\n", ucast + mcast + bcast);
2451 	PRINT_CGX_CUML_NIXRX_STATUS(RX_OCTS, "rx_bytes");
2452 	if (err)
2453 		return err;
2454 	PRINT_CGX_CUML_NIXRX_STATUS(RX_DROP, "rx_drops");
2455 	if (err)
2456 		return err;
2457 	PRINT_CGX_CUML_NIXRX_STATUS(RX_ERR, "rx_errors");
2458 	if (err)
2459 		return err;
2460 
2461 	/* Tx stats */
2462 	seq_printf(s, "\n=======NIX TX_STATS(%s port level)======\n\n",
2463 		   mac_ops->name);
2464 	ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames");
2465 	if (err)
2466 		return err;
2467 	mcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_MCAST, "tx_mcast_frames");
2468 	if (err)
2469 		return err;
2470 	bcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_BCAST, "tx_bcast_frames");
2471 	if (err)
2472 		return err;
2473 	seq_printf(s, "tx_frames: %llu\n", ucast + mcast + bcast);
2474 	PRINT_CGX_CUML_NIXTX_STATUS(TX_OCTS, "tx_bytes");
2475 	if (err)
2476 		return err;
2477 	PRINT_CGX_CUML_NIXTX_STATUS(TX_DROP, "tx_drops");
2478 	if (err)
2479 		return err;
2480 
2481 	/* Rx stats */
2482 	seq_printf(s, "\n=======%s RX_STATS======\n\n", mac_ops->name);
2483 	while (stat < mac_ops->rx_stats_cnt) {
2484 		err = mac_ops->mac_get_rx_stats(cgxd, lmac_id, stat, &rx_stat);
2485 		if (err)
2486 			return err;
2487 		if (is_rvu_otx2(rvu))
2488 			seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat],
2489 				   rx_stat);
2490 		else
2491 			seq_printf(s, "%s: %llu\n", rpm_rx_stats_fields[stat],
2492 				   rx_stat);
2493 		stat++;
2494 	}
2495 
2496 	/* Tx stats */
2497 	stat = 0;
2498 	seq_printf(s, "\n=======%s TX_STATS======\n\n", mac_ops->name);
2499 	while (stat < mac_ops->tx_stats_cnt) {
2500 		err = mac_ops->mac_get_tx_stats(cgxd, lmac_id, stat, &tx_stat);
2501 		if (err)
2502 			return err;
2503 
2504 		if (is_rvu_otx2(rvu))
2505 			seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat],
2506 				   tx_stat);
2507 		else
2508 			seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat],
2509 				   tx_stat);
2510 		stat++;
2511 	}
2512 
2513 	return err;
2514 }
2515 
2516 static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id)
2517 {
2518 	struct dentry *current_dir;
2519 	char *buf;
2520 
2521 	current_dir = filp->file->f_path.dentry->d_parent;
2522 	buf = strrchr(current_dir->d_name.name, 'c');
2523 	if (!buf)
2524 		return -EINVAL;
2525 
2526 	return kstrtoint(buf + 1, 10, lmac_id);
2527 }
2528 
2529 static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
2530 {
2531 	int lmac_id, err;
2532 
2533 	err = rvu_dbg_derive_lmacid(filp, &lmac_id);
2534 	if (!err)
2535 		return cgx_print_stats(filp, lmac_id);
2536 
2537 	return err;
2538 }
2539 
2540 RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
2541 
2542 static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id)
2543 {
2544 	struct pci_dev *pdev = NULL;
2545 	void *cgxd = s->private;
2546 	char *bcast, *mcast;
2547 	u16 index, domain;
2548 	u8 dmac[ETH_ALEN];
2549 	struct rvu *rvu;
2550 	u64 cfg, mac;
2551 	int pf;
2552 
2553 	rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
2554 					     PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
2555 	if (!rvu)
2556 		return -ENODEV;
2557 
2558 	pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
2559 	domain = 2;
2560 
2561 	pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
2562 	if (!pdev)
2563 		return 0;
2564 
2565 	cfg = cgx_read_dmac_ctrl(cgxd, lmac_id);
2566 	bcast = cfg & CGX_DMAC_BCAST_MODE ? "ACCEPT" : "REJECT";
2567 	mcast = cfg & CGX_DMAC_MCAST_MODE ? "ACCEPT" : "REJECT";
2568 
2569 	seq_puts(s,
2570 		 "PCI dev       RVUPF   BROADCAST  MULTICAST  FILTER-MODE\n");
2571 	seq_printf(s, "%s  PF%d  %9s  %9s",
2572 		   dev_name(&pdev->dev), pf, bcast, mcast);
2573 	if (cfg & CGX_DMAC_CAM_ACCEPT)
2574 		seq_printf(s, "%12s\n\n", "UNICAST");
2575 	else
2576 		seq_printf(s, "%16s\n\n", "PROMISCUOUS");
2577 
2578 	seq_puts(s, "\nDMAC-INDEX  ADDRESS\n");
2579 
2580 	for (index = 0 ; index < 32 ; index++) {
2581 		cfg = cgx_read_dmac_entry(cgxd, index);
2582 		/* Display enabled dmac entries associated with current lmac */
2583 		if (lmac_id == FIELD_GET(CGX_DMAC_CAM_ENTRY_LMACID, cfg) &&
2584 		    FIELD_GET(CGX_DMAC_CAM_ADDR_ENABLE, cfg)) {
2585 			mac = FIELD_GET(CGX_RX_DMAC_ADR_MASK, cfg);
2586 			u64_to_ether_addr(mac, dmac);
2587 			seq_printf(s, "%7d     %pM\n", index, dmac);
2588 		}
2589 	}
2590 
2591 	pci_dev_put(pdev);
2592 	return 0;
2593 }
2594 
2595 static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused)
2596 {
2597 	int err, lmac_id;
2598 
2599 	err = rvu_dbg_derive_lmacid(filp, &lmac_id);
2600 	if (!err)
2601 		return cgx_print_dmac_flt(filp, lmac_id);
2602 
2603 	return err;
2604 }
2605 
2606 RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL);
2607 
2608 static void rvu_dbg_cgx_init(struct rvu *rvu)
2609 {
2610 	struct mac_ops *mac_ops;
2611 	unsigned long lmac_bmap;
2612 	int i, lmac_id;
2613 	char dname[20];
2614 	void *cgx;
2615 
2616 	if (!cgx_get_cgxcnt_max())
2617 		return;
2618 
2619 	mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
2620 	if (!mac_ops)
2621 		return;
2622 
2623 	rvu->rvu_dbg.cgx_root = debugfs_create_dir(mac_ops->name,
2624 						   rvu->rvu_dbg.root);
2625 
2626 	for (i = 0; i < cgx_get_cgxcnt_max(); i++) {
2627 		cgx = rvu_cgx_pdata(i, rvu);
2628 		if (!cgx)
2629 			continue;
2630 		lmac_bmap = cgx_get_lmac_bmap(cgx);
2631 		/* cgx debugfs dir */
2632 		sprintf(dname, "%s%d", mac_ops->name, i);
2633 		rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
2634 						      rvu->rvu_dbg.cgx_root);
2635 
2636 		for_each_set_bit(lmac_id, &lmac_bmap, rvu->hw->lmac_per_cgx) {
2637 			/* lmac debugfs dir */
2638 			sprintf(dname, "lmac%d", lmac_id);
2639 			rvu->rvu_dbg.lmac =
2640 				debugfs_create_dir(dname, rvu->rvu_dbg.cgx);
2641 
2642 			debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac,
2643 					    cgx, &rvu_dbg_cgx_stat_fops);
2644 			debugfs_create_file("mac_filter", 0600,
2645 					    rvu->rvu_dbg.lmac, cgx,
2646 					    &rvu_dbg_cgx_dmac_flt_fops);
2647 		}
2648 	}
2649 }
2650 
2651 /* NPC debugfs APIs */
2652 static void rvu_print_npc_mcam_info(struct seq_file *s,
2653 				    u16 pcifunc, int blkaddr)
2654 {
2655 	struct rvu *rvu = s->private;
2656 	int entry_acnt, entry_ecnt;
2657 	int cntr_acnt, cntr_ecnt;
2658 
2659 	rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr,
2660 					  &entry_acnt, &entry_ecnt);
2661 	rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr,
2662 					    &cntr_acnt, &cntr_ecnt);
2663 	if (!entry_acnt && !cntr_acnt)
2664 		return;
2665 
2666 	if (!(pcifunc & RVU_PFVF_FUNC_MASK))
2667 		seq_printf(s, "\n\t\t Device \t\t: PF%d\n",
2668 			   rvu_get_pf(pcifunc));
2669 	else
2670 		seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n",
2671 			   rvu_get_pf(pcifunc),
2672 			   (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
2673 
2674 	if (entry_acnt) {
2675 		seq_printf(s, "\t\t Entries allocated \t: %d\n", entry_acnt);
2676 		seq_printf(s, "\t\t Entries enabled \t: %d\n", entry_ecnt);
2677 	}
2678 	if (cntr_acnt) {
2679 		seq_printf(s, "\t\t Counters allocated \t: %d\n", cntr_acnt);
2680 		seq_printf(s, "\t\t Counters enabled \t: %d\n", cntr_ecnt);
2681 	}
2682 }
2683 
2684 static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
2685 {
2686 	struct rvu *rvu = filp->private;
2687 	int pf, vf, numvfs, blkaddr;
2688 	struct npc_mcam *mcam;
2689 	u16 pcifunc, counters;
2690 	u64 cfg;
2691 
2692 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2693 	if (blkaddr < 0)
2694 		return -ENODEV;
2695 
2696 	mcam = &rvu->hw->mcam;
2697 	counters = rvu->hw->npc_counters;
2698 
2699 	seq_puts(filp, "\nNPC MCAM info:\n");
2700 	/* MCAM keywidth on receive and transmit sides */
2701 	cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
2702 	cfg = (cfg >> 32) & 0x07;
2703 	seq_printf(filp, "\t\t RX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
2704 		   "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
2705 		   "224bits" : "448bits"));
2706 	cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX));
2707 	cfg = (cfg >> 32) & 0x07;
2708 	seq_printf(filp, "\t\t TX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
2709 		   "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
2710 		   "224bits" : "448bits"));
2711 
2712 	mutex_lock(&mcam->lock);
2713 	/* MCAM entries */
2714 	seq_printf(filp, "\n\t\t MCAM entries \t: %d\n", mcam->total_entries);
2715 	seq_printf(filp, "\t\t Reserved \t: %d\n",
2716 		   mcam->total_entries - mcam->bmap_entries);
2717 	seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt);
2718 
2719 	/* MCAM counters */
2720 	seq_printf(filp, "\n\t\t MCAM counters \t: %d\n", counters);
2721 	seq_printf(filp, "\t\t Reserved \t: %d\n",
2722 		   counters - mcam->counters.max);
2723 	seq_printf(filp, "\t\t Available \t: %d\n",
2724 		   rvu_rsrc_free_count(&mcam->counters));
2725 
2726 	if (mcam->bmap_entries == mcam->bmap_fcnt) {
2727 		mutex_unlock(&mcam->lock);
2728 		return 0;
2729 	}
2730 
2731 	seq_puts(filp, "\n\t\t Current allocation\n");
2732 	seq_puts(filp, "\t\t====================\n");
2733 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2734 		pcifunc = (pf << RVU_PFVF_PF_SHIFT);
2735 		rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
2736 
2737 		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2738 		numvfs = (cfg >> 12) & 0xFF;
2739 		for (vf = 0; vf < numvfs; vf++) {
2740 			pcifunc = (pf << RVU_PFVF_PF_SHIFT) | (vf + 1);
2741 			rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
2742 		}
2743 	}
2744 
2745 	mutex_unlock(&mcam->lock);
2746 	return 0;
2747 }
2748 
2749 RVU_DEBUG_SEQ_FOPS(npc_mcam_info, npc_mcam_info_display, NULL);
2750 
2751 static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
2752 					     void *unused)
2753 {
2754 	struct rvu *rvu = filp->private;
2755 	struct npc_mcam *mcam;
2756 	int blkaddr;
2757 
2758 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2759 	if (blkaddr < 0)
2760 		return -ENODEV;
2761 
2762 	mcam = &rvu->hw->mcam;
2763 
2764 	seq_puts(filp, "\nNPC MCAM RX miss action stats\n");
2765 	seq_printf(filp, "\t\tStat %d: \t%lld\n", mcam->rx_miss_act_cntr,
2766 		   rvu_read64(rvu, blkaddr,
2767 			      NPC_AF_MATCH_STATX(mcam->rx_miss_act_cntr)));
2768 
2769 	return 0;
2770 }
2771 
2772 RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
2773 
2774 #define RVU_DBG_PRINT_MPLS_TTL(pkt, mask)                                     \
2775 do {									      \
2776 	seq_printf(s, "%ld ", FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, pkt));     \
2777 	seq_printf(s, "mask 0x%lx\n",                                         \
2778 		   FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, mask));               \
2779 } while (0)                                                                   \
2780 
2781 #define RVU_DBG_PRINT_MPLS_LBTCBOS(_pkt, _mask)                               \
2782 do {									      \
2783 	typeof(_pkt) (pkt) = (_pkt);					      \
2784 	typeof(_mask) (mask) = (_mask);                                       \
2785 	seq_printf(s, "%ld %ld %ld\n",                                        \
2786 		   FIELD_GET(OTX2_FLOWER_MASK_MPLS_LB, pkt),                  \
2787 		   FIELD_GET(OTX2_FLOWER_MASK_MPLS_TC, pkt),                  \
2788 		   FIELD_GET(OTX2_FLOWER_MASK_MPLS_BOS, pkt));                \
2789 	seq_printf(s, "\tmask 0x%lx 0x%lx 0x%lx\n",                           \
2790 		   FIELD_GET(OTX2_FLOWER_MASK_MPLS_LB, mask),                 \
2791 		   FIELD_GET(OTX2_FLOWER_MASK_MPLS_TC, mask),                 \
2792 		   FIELD_GET(OTX2_FLOWER_MASK_MPLS_BOS, mask));               \
2793 } while (0)                                                                   \
2794 
2795 static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
2796 					struct rvu_npc_mcam_rule *rule)
2797 {
2798 	u8 bit;
2799 
2800 	for_each_set_bit(bit, (unsigned long *)&rule->features, 64) {
2801 		seq_printf(s, "\t%s  ", npc_get_field_name(bit));
2802 		switch (bit) {
2803 		case NPC_LXMB:
2804 			if (rule->lxmb == 1)
2805 				seq_puts(s, "\tL2M nibble is set\n");
2806 			else
2807 				seq_puts(s, "\tL2B nibble is set\n");
2808 			break;
2809 		case NPC_DMAC:
2810 			seq_printf(s, "%pM ", rule->packet.dmac);
2811 			seq_printf(s, "mask %pM\n", rule->mask.dmac);
2812 			break;
2813 		case NPC_SMAC:
2814 			seq_printf(s, "%pM ", rule->packet.smac);
2815 			seq_printf(s, "mask %pM\n", rule->mask.smac);
2816 			break;
2817 		case NPC_ETYPE:
2818 			seq_printf(s, "0x%x ", ntohs(rule->packet.etype));
2819 			seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.etype));
2820 			break;
2821 		case NPC_OUTER_VID:
2822 			seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_tci));
2823 			seq_printf(s, "mask 0x%x\n",
2824 				   ntohs(rule->mask.vlan_tci));
2825 			break;
2826 		case NPC_INNER_VID:
2827 			seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_itci));
2828 			seq_printf(s, "mask 0x%x\n",
2829 				   ntohs(rule->mask.vlan_itci));
2830 			break;
2831 		case NPC_TOS:
2832 			seq_printf(s, "%d ", rule->packet.tos);
2833 			seq_printf(s, "mask 0x%x\n", rule->mask.tos);
2834 			break;
2835 		case NPC_SIP_IPV4:
2836 			seq_printf(s, "%pI4 ", &rule->packet.ip4src);
2837 			seq_printf(s, "mask %pI4\n", &rule->mask.ip4src);
2838 			break;
2839 		case NPC_DIP_IPV4:
2840 			seq_printf(s, "%pI4 ", &rule->packet.ip4dst);
2841 			seq_printf(s, "mask %pI4\n", &rule->mask.ip4dst);
2842 			break;
2843 		case NPC_SIP_IPV6:
2844 			seq_printf(s, "%pI6 ", rule->packet.ip6src);
2845 			seq_printf(s, "mask %pI6\n", rule->mask.ip6src);
2846 			break;
2847 		case NPC_DIP_IPV6:
2848 			seq_printf(s, "%pI6 ", rule->packet.ip6dst);
2849 			seq_printf(s, "mask %pI6\n", rule->mask.ip6dst);
2850 			break;
2851 		case NPC_IPFRAG_IPV6:
2852 			seq_printf(s, "0x%x ", rule->packet.next_header);
2853 			seq_printf(s, "mask 0x%x\n", rule->mask.next_header);
2854 			break;
2855 		case NPC_IPFRAG_IPV4:
2856 			seq_printf(s, "0x%x ", rule->packet.ip_flag);
2857 			seq_printf(s, "mask 0x%x\n", rule->mask.ip_flag);
2858 			break;
2859 		case NPC_SPORT_TCP:
2860 		case NPC_SPORT_UDP:
2861 		case NPC_SPORT_SCTP:
2862 			seq_printf(s, "%d ", ntohs(rule->packet.sport));
2863 			seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.sport));
2864 			break;
2865 		case NPC_DPORT_TCP:
2866 		case NPC_DPORT_UDP:
2867 		case NPC_DPORT_SCTP:
2868 			seq_printf(s, "%d ", ntohs(rule->packet.dport));
2869 			seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
2870 			break;
2871 		case NPC_TCP_FLAGS:
2872 			seq_printf(s, "%d ", rule->packet.tcp_flags);
2873 			seq_printf(s, "mask 0x%x\n", rule->mask.tcp_flags);
2874 			break;
2875 		case NPC_IPSEC_SPI:
2876 			seq_printf(s, "0x%x ", ntohl(rule->packet.spi));
2877 			seq_printf(s, "mask 0x%x\n", ntohl(rule->mask.spi));
2878 			break;
2879 		case NPC_MPLS1_LBTCBOS:
2880 			RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[0],
2881 						   rule->mask.mpls_lse[0]);
2882 			break;
2883 		case NPC_MPLS1_TTL:
2884 			RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[0],
2885 					       rule->mask.mpls_lse[0]);
2886 			break;
2887 		case NPC_MPLS2_LBTCBOS:
2888 			RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[1],
2889 						   rule->mask.mpls_lse[1]);
2890 			break;
2891 		case NPC_MPLS2_TTL:
2892 			RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[1],
2893 					       rule->mask.mpls_lse[1]);
2894 			break;
2895 		case NPC_MPLS3_LBTCBOS:
2896 			RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[2],
2897 						   rule->mask.mpls_lse[2]);
2898 			break;
2899 		case NPC_MPLS3_TTL:
2900 			RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[2],
2901 					       rule->mask.mpls_lse[2]);
2902 			break;
2903 		case NPC_MPLS4_LBTCBOS:
2904 			RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[3],
2905 						   rule->mask.mpls_lse[3]);
2906 			break;
2907 		case NPC_MPLS4_TTL:
2908 			RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[3],
2909 					       rule->mask.mpls_lse[3]);
2910 			break;
2911 		case NPC_TYPE_ICMP:
2912 			seq_printf(s, "%d ", rule->packet.icmp_type);
2913 			seq_printf(s, "mask 0x%x\n", rule->mask.icmp_type);
2914 			break;
2915 		case NPC_CODE_ICMP:
2916 			seq_printf(s, "%d ", rule->packet.icmp_code);
2917 			seq_printf(s, "mask 0x%x\n", rule->mask.icmp_code);
2918 			break;
2919 		default:
2920 			seq_puts(s, "\n");
2921 			break;
2922 		}
2923 	}
2924 }
2925 
2926 static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
2927 					 struct rvu_npc_mcam_rule *rule)
2928 {
2929 	if (is_npc_intf_tx(rule->intf)) {
2930 		switch (rule->tx_action.op) {
2931 		case NIX_TX_ACTIONOP_DROP:
2932 			seq_puts(s, "\taction: Drop\n");
2933 			break;
2934 		case NIX_TX_ACTIONOP_UCAST_DEFAULT:
2935 			seq_puts(s, "\taction: Unicast to default channel\n");
2936 			break;
2937 		case NIX_TX_ACTIONOP_UCAST_CHAN:
2938 			seq_printf(s, "\taction: Unicast to channel %d\n",
2939 				   rule->tx_action.index);
2940 			break;
2941 		case NIX_TX_ACTIONOP_MCAST:
2942 			seq_puts(s, "\taction: Multicast\n");
2943 			break;
2944 		case NIX_TX_ACTIONOP_DROP_VIOL:
2945 			seq_puts(s, "\taction: Lockdown Violation Drop\n");
2946 			break;
2947 		default:
2948 			break;
2949 		}
2950 	} else {
2951 		switch (rule->rx_action.op) {
2952 		case NIX_RX_ACTIONOP_DROP:
2953 			seq_puts(s, "\taction: Drop\n");
2954 			break;
2955 		case NIX_RX_ACTIONOP_UCAST:
2956 			seq_printf(s, "\taction: Direct to queue %d\n",
2957 				   rule->rx_action.index);
2958 			break;
2959 		case NIX_RX_ACTIONOP_RSS:
2960 			seq_puts(s, "\taction: RSS\n");
2961 			break;
2962 		case NIX_RX_ACTIONOP_UCAST_IPSEC:
2963 			seq_puts(s, "\taction: Unicast ipsec\n");
2964 			break;
2965 		case NIX_RX_ACTIONOP_MCAST:
2966 			seq_puts(s, "\taction: Multicast\n");
2967 			break;
2968 		default:
2969 			break;
2970 		}
2971 	}
2972 }
2973 
2974 static const char *rvu_dbg_get_intf_name(int intf)
2975 {
2976 	switch (intf) {
2977 	case NIX_INTFX_RX(0):
2978 		return "NIX0_RX";
2979 	case NIX_INTFX_RX(1):
2980 		return "NIX1_RX";
2981 	case NIX_INTFX_TX(0):
2982 		return "NIX0_TX";
2983 	case NIX_INTFX_TX(1):
2984 		return "NIX1_TX";
2985 	default:
2986 		break;
2987 	}
2988 
2989 	return "unknown";
2990 }
2991 
2992 static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
2993 {
2994 	struct rvu_npc_mcam_rule *iter;
2995 	struct rvu *rvu = s->private;
2996 	struct npc_mcam *mcam;
2997 	int pf, vf = -1;
2998 	bool enabled;
2999 	int blkaddr;
3000 	u16 target;
3001 	u64 hits;
3002 
3003 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
3004 	if (blkaddr < 0)
3005 		return 0;
3006 
3007 	mcam = &rvu->hw->mcam;
3008 
3009 	mutex_lock(&mcam->lock);
3010 	list_for_each_entry(iter, &mcam->mcam_rules, list) {
3011 		pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
3012 		seq_printf(s, "\n\tInstalled by: PF%d ", pf);
3013 
3014 		if (iter->owner & RVU_PFVF_FUNC_MASK) {
3015 			vf = (iter->owner & RVU_PFVF_FUNC_MASK) - 1;
3016 			seq_printf(s, "VF%d", vf);
3017 		}
3018 		seq_puts(s, "\n");
3019 
3020 		seq_printf(s, "\tdirection: %s\n", is_npc_intf_rx(iter->intf) ?
3021 						    "RX" : "TX");
3022 		seq_printf(s, "\tinterface: %s\n",
3023 			   rvu_dbg_get_intf_name(iter->intf));
3024 		seq_printf(s, "\tmcam entry: %d\n", iter->entry);
3025 
3026 		rvu_dbg_npc_mcam_show_flows(s, iter);
3027 		if (is_npc_intf_rx(iter->intf)) {
3028 			target = iter->rx_action.pf_func;
3029 			pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
3030 			seq_printf(s, "\tForward to: PF%d ", pf);
3031 
3032 			if (target & RVU_PFVF_FUNC_MASK) {
3033 				vf = (target & RVU_PFVF_FUNC_MASK) - 1;
3034 				seq_printf(s, "VF%d", vf);
3035 			}
3036 			seq_puts(s, "\n");
3037 			seq_printf(s, "\tchannel: 0x%x\n", iter->chan);
3038 			seq_printf(s, "\tchannel_mask: 0x%x\n", iter->chan_mask);
3039 		}
3040 
3041 		rvu_dbg_npc_mcam_show_action(s, iter);
3042 
3043 		enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, iter->entry);
3044 		seq_printf(s, "\tenabled: %s\n", enabled ? "yes" : "no");
3045 
3046 		if (!iter->has_cntr)
3047 			continue;
3048 		seq_printf(s, "\tcounter: %d\n", iter->cntr);
3049 
3050 		hits = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(iter->cntr));
3051 		seq_printf(s, "\thits: %lld\n", hits);
3052 	}
3053 	mutex_unlock(&mcam->lock);
3054 
3055 	return 0;
3056 }
3057 
3058 RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL);
3059 
3060 static int rvu_dbg_npc_exact_show_entries(struct seq_file *s, void *unused)
3061 {
3062 	struct npc_exact_table_entry *mem_entry[NPC_EXACT_TBL_MAX_WAYS] = { 0 };
3063 	struct npc_exact_table_entry *cam_entry;
3064 	struct npc_exact_table *table;
3065 	struct rvu *rvu = s->private;
3066 	int i, j;
3067 
3068 	u8 bitmap = 0;
3069 
3070 	table = rvu->hw->table;
3071 
3072 	mutex_lock(&table->lock);
3073 
3074 	/* Check if there is at least one entry in mem table */
3075 	if (!table->mem_tbl_entry_cnt)
3076 		goto dump_cam_table;
3077 
3078 	/* Print table headers */
3079 	seq_puts(s, "\n\tExact Match MEM Table\n");
3080 	seq_puts(s, "Index\t");
3081 
3082 	for (i = 0; i < table->mem_table.ways; i++) {
3083 		mem_entry[i] = list_first_entry_or_null(&table->lhead_mem_tbl_entry[i],
3084 							struct npc_exact_table_entry, list);
3085 
3086 		seq_printf(s, "Way-%d\t\t\t\t\t", i);
3087 	}
3088 
3089 	seq_puts(s, "\n");
3090 	for (i = 0; i < table->mem_table.ways; i++)
3091 		seq_puts(s, "\tChan  MAC                     \t");
3092 
3093 	seq_puts(s, "\n\n");
3094 
3095 	/* Print mem table entries */
3096 	for (i = 0; i < table->mem_table.depth; i++) {
3097 		bitmap = 0;
3098 		for (j = 0; j < table->mem_table.ways; j++) {
3099 			if (!mem_entry[j])
3100 				continue;
3101 
3102 			if (mem_entry[j]->index != i)
3103 				continue;
3104 
3105 			bitmap |= BIT(j);
3106 		}
3107 
3108 		/* No valid entries */
3109 		if (!bitmap)
3110 			continue;
3111 
3112 		seq_printf(s, "%d\t", i);
3113 		for (j = 0; j < table->mem_table.ways; j++) {
3114 			if (!(bitmap & BIT(j))) {
3115 				seq_puts(s, "nil\t\t\t\t\t");
3116 				continue;
3117 			}
3118 
3119 			seq_printf(s, "0x%x %pM\t\t\t", mem_entry[j]->chan,
3120 				   mem_entry[j]->mac);
3121 			mem_entry[j] = list_next_entry(mem_entry[j], list);
3122 		}
3123 		seq_puts(s, "\n");
3124 	}
3125 
3126 dump_cam_table:
3127 
3128 	if (!table->cam_tbl_entry_cnt)
3129 		goto done;
3130 
3131 	seq_puts(s, "\n\tExact Match CAM Table\n");
3132 	seq_puts(s, "index\tchan\tMAC\n");
3133 
3134 	/* Traverse cam table entries */
3135 	list_for_each_entry(cam_entry, &table->lhead_cam_tbl_entry, list) {
3136 		seq_printf(s, "%d\t0x%x\t%pM\n", cam_entry->index, cam_entry->chan,
3137 			   cam_entry->mac);
3138 	}
3139 
3140 done:
3141 	mutex_unlock(&table->lock);
3142 	return 0;
3143 }
3144 
3145 RVU_DEBUG_SEQ_FOPS(npc_exact_entries, npc_exact_show_entries, NULL);
3146 
3147 static int rvu_dbg_npc_exact_show_info(struct seq_file *s, void *unused)
3148 {
3149 	struct npc_exact_table *table;
3150 	struct rvu *rvu = s->private;
3151 	int i;
3152 
3153 	table = rvu->hw->table;
3154 
3155 	seq_puts(s, "\n\tExact Table Info\n");
3156 	seq_printf(s, "Exact Match Feature : %s\n",
3157 		   rvu->hw->cap.npc_exact_match_enabled ? "enabled" : "disable");
3158 	if (!rvu->hw->cap.npc_exact_match_enabled)
3159 		return 0;
3160 
3161 	seq_puts(s, "\nMCAM Index\tMAC Filter Rules Count\n");
3162 	for (i = 0; i < table->num_drop_rules; i++)
3163 		seq_printf(s, "%d\t\t%d\n", i, table->cnt_cmd_rules[i]);
3164 
3165 	seq_puts(s, "\nMcam Index\tPromisc Mode Status\n");
3166 	for (i = 0; i < table->num_drop_rules; i++)
3167 		seq_printf(s, "%d\t\t%s\n", i, table->promisc_mode[i] ? "on" : "off");
3168 
3169 	seq_puts(s, "\n\tMEM Table Info\n");
3170 	seq_printf(s, "Ways : %d\n", table->mem_table.ways);
3171 	seq_printf(s, "Depth : %d\n", table->mem_table.depth);
3172 	seq_printf(s, "Mask : 0x%llx\n", table->mem_table.mask);
3173 	seq_printf(s, "Hash Mask : 0x%x\n", table->mem_table.hash_mask);
3174 	seq_printf(s, "Hash Offset : 0x%x\n", table->mem_table.hash_offset);
3175 
3176 	seq_puts(s, "\n\tCAM Table Info\n");
3177 	seq_printf(s, "Depth : %d\n", table->cam_table.depth);
3178 
3179 	return 0;
3180 }
3181 
3182 RVU_DEBUG_SEQ_FOPS(npc_exact_info, npc_exact_show_info, NULL);
3183 
3184 static int rvu_dbg_npc_exact_drop_cnt(struct seq_file *s, void *unused)
3185 {
3186 	struct npc_exact_table *table;
3187 	struct rvu *rvu = s->private;
3188 	struct npc_key_field *field;
3189 	u16 chan, pcifunc;
3190 	int blkaddr, i;
3191 	u64 cfg, cam1;
3192 	char *str;
3193 
3194 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
3195 	table = rvu->hw->table;
3196 
3197 	field = &rvu->hw->mcam.rx_key_fields[NPC_CHAN];
3198 
3199 	seq_puts(s, "\n\t Exact Hit on drop status\n");
3200 	seq_puts(s, "\npcifunc\tmcam_idx\tHits\tchan\tstatus\n");
3201 
3202 	for (i = 0; i < table->num_drop_rules; i++) {
3203 		pcifunc = rvu_npc_exact_drop_rule_to_pcifunc(rvu, i);
3204 		cfg = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CFG(i, 0));
3205 
3206 		/* channel will be always in keyword 0 */
3207 		cam1 = rvu_read64(rvu, blkaddr,
3208 				  NPC_AF_MCAMEX_BANKX_CAMX_W0(i, 0, 1));
3209 		chan = field->kw_mask[0] & cam1;
3210 
3211 		str = (cfg & 1) ? "enabled" : "disabled";
3212 
3213 		seq_printf(s, "0x%x\t%d\t\t%llu\t0x%x\t%s\n", pcifunc, i,
3214 			   rvu_read64(rvu, blkaddr,
3215 				      NPC_AF_MATCH_STATX(table->counter_idx[i])),
3216 			   chan, str);
3217 	}
3218 
3219 	return 0;
3220 }
3221 
3222 RVU_DEBUG_SEQ_FOPS(npc_exact_drop_cnt, npc_exact_drop_cnt, NULL);
3223 
3224 static void rvu_dbg_npc_init(struct rvu *rvu)
3225 {
3226 	rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root);
3227 
3228 	debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc, rvu,
3229 			    &rvu_dbg_npc_mcam_info_fops);
3230 	debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc, rvu,
3231 			    &rvu_dbg_npc_mcam_rules_fops);
3232 
3233 	debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc, rvu,
3234 			    &rvu_dbg_npc_rx_miss_act_fops);
3235 
3236 	if (!rvu->hw->cap.npc_exact_match_enabled)
3237 		return;
3238 
3239 	debugfs_create_file("exact_entries", 0444, rvu->rvu_dbg.npc, rvu,
3240 			    &rvu_dbg_npc_exact_entries_fops);
3241 
3242 	debugfs_create_file("exact_info", 0444, rvu->rvu_dbg.npc, rvu,
3243 			    &rvu_dbg_npc_exact_info_fops);
3244 
3245 	debugfs_create_file("exact_drop_cnt", 0444, rvu->rvu_dbg.npc, rvu,
3246 			    &rvu_dbg_npc_exact_drop_cnt_fops);
3247 
3248 }
3249 
3250 static int cpt_eng_sts_display(struct seq_file *filp, u8 eng_type)
3251 {
3252 	struct cpt_ctx *ctx = filp->private;
3253 	u64 busy_sts = 0, free_sts = 0;
3254 	u32 e_min = 0, e_max = 0, e, i;
3255 	u16 max_ses, max_ies, max_aes;
3256 	struct rvu *rvu = ctx->rvu;
3257 	int blkaddr = ctx->blkaddr;
3258 	u64 reg;
3259 
3260 	reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
3261 	max_ses = reg & 0xffff;
3262 	max_ies = (reg >> 16) & 0xffff;
3263 	max_aes = (reg >> 32) & 0xffff;
3264 
3265 	switch (eng_type) {
3266 	case CPT_AE_TYPE:
3267 		e_min = max_ses + max_ies;
3268 		e_max = max_ses + max_ies + max_aes;
3269 		break;
3270 	case CPT_SE_TYPE:
3271 		e_min = 0;
3272 		e_max = max_ses;
3273 		break;
3274 	case CPT_IE_TYPE:
3275 		e_min = max_ses;
3276 		e_max = max_ses + max_ies;
3277 		break;
3278 	default:
3279 		return -EINVAL;
3280 	}
3281 
3282 	for (e = e_min, i = 0; e < e_max; e++, i++) {
3283 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e));
3284 		if (reg & 0x1)
3285 			busy_sts |= 1ULL << i;
3286 
3287 		if (reg & 0x2)
3288 			free_sts |= 1ULL << i;
3289 	}
3290 	seq_printf(filp, "FREE STS : 0x%016llx\n", free_sts);
3291 	seq_printf(filp, "BUSY STS : 0x%016llx\n", busy_sts);
3292 
3293 	return 0;
3294 }
3295 
3296 static int rvu_dbg_cpt_ae_sts_display(struct seq_file *filp, void *unused)
3297 {
3298 	return cpt_eng_sts_display(filp, CPT_AE_TYPE);
3299 }
3300 
3301 RVU_DEBUG_SEQ_FOPS(cpt_ae_sts, cpt_ae_sts_display, NULL);
3302 
3303 static int rvu_dbg_cpt_se_sts_display(struct seq_file *filp, void *unused)
3304 {
3305 	return cpt_eng_sts_display(filp, CPT_SE_TYPE);
3306 }
3307 
3308 RVU_DEBUG_SEQ_FOPS(cpt_se_sts, cpt_se_sts_display, NULL);
3309 
3310 static int rvu_dbg_cpt_ie_sts_display(struct seq_file *filp, void *unused)
3311 {
3312 	return cpt_eng_sts_display(filp, CPT_IE_TYPE);
3313 }
3314 
3315 RVU_DEBUG_SEQ_FOPS(cpt_ie_sts, cpt_ie_sts_display, NULL);
3316 
3317 static int rvu_dbg_cpt_engines_info_display(struct seq_file *filp, void *unused)
3318 {
3319 	struct cpt_ctx *ctx = filp->private;
3320 	u16 max_ses, max_ies, max_aes;
3321 	struct rvu *rvu = ctx->rvu;
3322 	int blkaddr = ctx->blkaddr;
3323 	u32 e_max, e;
3324 	u64 reg;
3325 
3326 	reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
3327 	max_ses = reg & 0xffff;
3328 	max_ies = (reg >> 16) & 0xffff;
3329 	max_aes = (reg >> 32) & 0xffff;
3330 
3331 	e_max = max_ses + max_ies + max_aes;
3332 
3333 	seq_puts(filp, "===========================================\n");
3334 	for (e = 0; e < e_max; e++) {
3335 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(e));
3336 		seq_printf(filp, "CPT Engine[%u] Group Enable   0x%02llx\n", e,
3337 			   reg & 0xff);
3338 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_ACTIVE(e));
3339 		seq_printf(filp, "CPT Engine[%u] Active Info    0x%llx\n", e,
3340 			   reg);
3341 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(e));
3342 		seq_printf(filp, "CPT Engine[%u] Control        0x%llx\n", e,
3343 			   reg);
3344 		seq_puts(filp, "===========================================\n");
3345 	}
3346 	return 0;
3347 }
3348 
3349 RVU_DEBUG_SEQ_FOPS(cpt_engines_info, cpt_engines_info_display, NULL);
3350 
3351 static int rvu_dbg_cpt_lfs_info_display(struct seq_file *filp, void *unused)
3352 {
3353 	struct cpt_ctx *ctx = filp->private;
3354 	int blkaddr = ctx->blkaddr;
3355 	struct rvu *rvu = ctx->rvu;
3356 	struct rvu_block *block;
3357 	struct rvu_hwinfo *hw;
3358 	u64 reg;
3359 	u32 lf;
3360 
3361 	hw = rvu->hw;
3362 	block = &hw->block[blkaddr];
3363 	if (!block->lf.bmap)
3364 		return -ENODEV;
3365 
3366 	seq_puts(filp, "===========================================\n");
3367 	for (lf = 0; lf < block->lf.max; lf++) {
3368 		reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(lf));
3369 		seq_printf(filp, "CPT Lf[%u] CTL          0x%llx\n", lf, reg);
3370 		reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(lf));
3371 		seq_printf(filp, "CPT Lf[%u] CTL2         0x%llx\n", lf, reg);
3372 		reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_PTR_CTL(lf));
3373 		seq_printf(filp, "CPT Lf[%u] PTR_CTL      0x%llx\n", lf, reg);
3374 		reg = rvu_read64(rvu, blkaddr, block->lfcfg_reg |
3375 				(lf << block->lfshift));
3376 		seq_printf(filp, "CPT Lf[%u] CFG          0x%llx\n", lf, reg);
3377 		seq_puts(filp, "===========================================\n");
3378 	}
3379 	return 0;
3380 }
3381 
3382 RVU_DEBUG_SEQ_FOPS(cpt_lfs_info, cpt_lfs_info_display, NULL);
3383 
3384 static int rvu_dbg_cpt_err_info_display(struct seq_file *filp, void *unused)
3385 {
3386 	struct cpt_ctx *ctx = filp->private;
3387 	struct rvu *rvu = ctx->rvu;
3388 	int blkaddr = ctx->blkaddr;
3389 	u64 reg0, reg1;
3390 
3391 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
3392 	reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
3393 	seq_printf(filp, "CPT_AF_FLTX_INT:       0x%llx 0x%llx\n", reg0, reg1);
3394 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(0));
3395 	reg1 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(1));
3396 	seq_printf(filp, "CPT_AF_PSNX_EXE:       0x%llx 0x%llx\n", reg0, reg1);
3397 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_LF(0));
3398 	seq_printf(filp, "CPT_AF_PSNX_LF:        0x%llx\n", reg0);
3399 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT);
3400 	seq_printf(filp, "CPT_AF_RVU_INT:        0x%llx\n", reg0);
3401 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT);
3402 	seq_printf(filp, "CPT_AF_RAS_INT:        0x%llx\n", reg0);
3403 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO);
3404 	seq_printf(filp, "CPT_AF_EXE_ERR_INFO:   0x%llx\n", reg0);
3405 
3406 	return 0;
3407 }
3408 
3409 RVU_DEBUG_SEQ_FOPS(cpt_err_info, cpt_err_info_display, NULL);
3410 
3411 static int rvu_dbg_cpt_pc_display(struct seq_file *filp, void *unused)
3412 {
3413 	struct cpt_ctx *ctx = filp->private;
3414 	struct rvu *rvu = ctx->rvu;
3415 	int blkaddr = ctx->blkaddr;
3416 	u64 reg;
3417 
3418 	reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC);
3419 	seq_printf(filp, "CPT instruction requests   %llu\n", reg);
3420 	reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC);
3421 	seq_printf(filp, "CPT instruction latency    %llu\n", reg);
3422 	reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC);
3423 	seq_printf(filp, "CPT NCB read requests      %llu\n", reg);
3424 	reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC);
3425 	seq_printf(filp, "CPT NCB read latency       %llu\n", reg);
3426 	reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC);
3427 	seq_printf(filp, "CPT read requests caused by UC fills   %llu\n", reg);
3428 	reg = rvu_read64(rvu, blkaddr, CPT_AF_ACTIVE_CYCLES_PC);
3429 	seq_printf(filp, "CPT active cycles pc       %llu\n", reg);
3430 	reg = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT);
3431 	seq_printf(filp, "CPT clock count pc         %llu\n", reg);
3432 
3433 	return 0;
3434 }
3435 
3436 RVU_DEBUG_SEQ_FOPS(cpt_pc, cpt_pc_display, NULL);
3437 
3438 static void rvu_dbg_cpt_init(struct rvu *rvu, int blkaddr)
3439 {
3440 	struct cpt_ctx *ctx;
3441 
3442 	if (!is_block_implemented(rvu->hw, blkaddr))
3443 		return;
3444 
3445 	if (blkaddr == BLKADDR_CPT0) {
3446 		rvu->rvu_dbg.cpt = debugfs_create_dir("cpt", rvu->rvu_dbg.root);
3447 		ctx = &rvu->rvu_dbg.cpt_ctx[0];
3448 		ctx->blkaddr = BLKADDR_CPT0;
3449 		ctx->rvu = rvu;
3450 	} else {
3451 		rvu->rvu_dbg.cpt = debugfs_create_dir("cpt1",
3452 						      rvu->rvu_dbg.root);
3453 		ctx = &rvu->rvu_dbg.cpt_ctx[1];
3454 		ctx->blkaddr = BLKADDR_CPT1;
3455 		ctx->rvu = rvu;
3456 	}
3457 
3458 	debugfs_create_file("cpt_pc", 0600, rvu->rvu_dbg.cpt, ctx,
3459 			    &rvu_dbg_cpt_pc_fops);
3460 	debugfs_create_file("cpt_ae_sts", 0600, rvu->rvu_dbg.cpt, ctx,
3461 			    &rvu_dbg_cpt_ae_sts_fops);
3462 	debugfs_create_file("cpt_se_sts", 0600, rvu->rvu_dbg.cpt, ctx,
3463 			    &rvu_dbg_cpt_se_sts_fops);
3464 	debugfs_create_file("cpt_ie_sts", 0600, rvu->rvu_dbg.cpt, ctx,
3465 			    &rvu_dbg_cpt_ie_sts_fops);
3466 	debugfs_create_file("cpt_engines_info", 0600, rvu->rvu_dbg.cpt, ctx,
3467 			    &rvu_dbg_cpt_engines_info_fops);
3468 	debugfs_create_file("cpt_lfs_info", 0600, rvu->rvu_dbg.cpt, ctx,
3469 			    &rvu_dbg_cpt_lfs_info_fops);
3470 	debugfs_create_file("cpt_err_info", 0600, rvu->rvu_dbg.cpt, ctx,
3471 			    &rvu_dbg_cpt_err_info_fops);
3472 }
3473 
3474 static const char *rvu_get_dbg_dir_name(struct rvu *rvu)
3475 {
3476 	if (!is_rvu_otx2(rvu))
3477 		return "cn10k";
3478 	else
3479 		return "octeontx2";
3480 }
3481 
3482 void rvu_dbg_init(struct rvu *rvu)
3483 {
3484 	rvu->rvu_dbg.root = debugfs_create_dir(rvu_get_dbg_dir_name(rvu), NULL);
3485 
3486 	debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
3487 			    &rvu_dbg_rsrc_status_fops);
3488 
3489 	if (!is_rvu_otx2(rvu))
3490 		debugfs_create_file("lmtst_map_table", 0444, rvu->rvu_dbg.root,
3491 				    rvu, &rvu_dbg_lmtst_map_table_fops);
3492 
3493 	if (!cgx_get_cgxcnt_max())
3494 		goto create;
3495 
3496 	if (is_rvu_otx2(rvu))
3497 		debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root,
3498 				    rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
3499 	else
3500 		debugfs_create_file("rvu_pf_rpm_map", 0444, rvu->rvu_dbg.root,
3501 				    rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
3502 
3503 create:
3504 	rvu_dbg_npa_init(rvu);
3505 	rvu_dbg_nix_init(rvu, BLKADDR_NIX0);
3506 
3507 	rvu_dbg_nix_init(rvu, BLKADDR_NIX1);
3508 	rvu_dbg_cgx_init(rvu);
3509 	rvu_dbg_npc_init(rvu);
3510 	rvu_dbg_cpt_init(rvu, BLKADDR_CPT0);
3511 	rvu_dbg_cpt_init(rvu, BLKADDR_CPT1);
3512 	rvu_dbg_mcs_init(rvu);
3513 }
3514 
3515 void rvu_dbg_exit(struct rvu *rvu)
3516 {
3517 	debugfs_remove_recursive(rvu->rvu_dbg.root);
3518 }
3519 
3520 #endif /* CONFIG_DEBUG_FS */
3521