1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3 *
4 * Copyright (C) 2019 Marvell.
5 *
6 */
7
8 #ifdef CONFIG_DEBUG_FS
9
10 #include <linux/fs.h>
11 #include <linux/debugfs.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14
15 #include "rvu_struct.h"
16 #include "rvu_reg.h"
17 #include "rvu.h"
18 #include "cgx.h"
19 #include "lmac_common.h"
20 #include "npc.h"
21 #include "rvu_npc_hash.h"
22 #include "mcs.h"
23
24 #define DEBUGFS_DIR_NAME "octeontx2"
25
26 enum {
27 CGX_STAT0,
28 CGX_STAT1,
29 CGX_STAT2,
30 CGX_STAT3,
31 CGX_STAT4,
32 CGX_STAT5,
33 CGX_STAT6,
34 CGX_STAT7,
35 CGX_STAT8,
36 CGX_STAT9,
37 CGX_STAT10,
38 CGX_STAT11,
39 CGX_STAT12,
40 CGX_STAT13,
41 CGX_STAT14,
42 CGX_STAT15,
43 CGX_STAT16,
44 CGX_STAT17,
45 CGX_STAT18,
46 };
47
48 /* NIX TX stats */
49 enum nix_stat_lf_tx {
50 TX_UCAST = 0x0,
51 TX_BCAST = 0x1,
52 TX_MCAST = 0x2,
53 TX_DROP = 0x3,
54 TX_OCTS = 0x4,
55 TX_STATS_ENUM_LAST,
56 };
57
58 /* NIX RX stats */
59 enum nix_stat_lf_rx {
60 RX_OCTS = 0x0,
61 RX_UCAST = 0x1,
62 RX_BCAST = 0x2,
63 RX_MCAST = 0x3,
64 RX_DROP = 0x4,
65 RX_DROP_OCTS = 0x5,
66 RX_FCS = 0x6,
67 RX_ERR = 0x7,
68 RX_DRP_BCAST = 0x8,
69 RX_DRP_MCAST = 0x9,
70 RX_DRP_L3BCAST = 0xa,
71 RX_DRP_L3MCAST = 0xb,
72 RX_STATS_ENUM_LAST,
73 };
74
75 static char *cgx_rx_stats_fields[] = {
76 [CGX_STAT0] = "Received packets",
77 [CGX_STAT1] = "Octets of received packets",
78 [CGX_STAT2] = "Received PAUSE packets",
79 [CGX_STAT3] = "Received PAUSE and control packets",
80 [CGX_STAT4] = "Filtered DMAC0 (NIX-bound) packets",
81 [CGX_STAT5] = "Filtered DMAC0 (NIX-bound) octets",
82 [CGX_STAT6] = "Packets dropped due to RX FIFO full",
83 [CGX_STAT7] = "Octets dropped due to RX FIFO full",
84 [CGX_STAT8] = "Error packets",
85 [CGX_STAT9] = "Filtered DMAC1 (NCSI-bound) packets",
86 [CGX_STAT10] = "Filtered DMAC1 (NCSI-bound) octets",
87 [CGX_STAT11] = "NCSI-bound packets dropped",
88 [CGX_STAT12] = "NCSI-bound octets dropped",
89 };
90
91 static char *cgx_tx_stats_fields[] = {
92 [CGX_STAT0] = "Packets dropped due to excessive collisions",
93 [CGX_STAT1] = "Packets dropped due to excessive deferral",
94 [CGX_STAT2] = "Multiple collisions before successful transmission",
95 [CGX_STAT3] = "Single collisions before successful transmission",
96 [CGX_STAT4] = "Total octets sent on the interface",
97 [CGX_STAT5] = "Total frames sent on the interface",
98 [CGX_STAT6] = "Packets sent with an octet count < 64",
99 [CGX_STAT7] = "Packets sent with an octet count == 64",
100 [CGX_STAT8] = "Packets sent with an octet count of 65-127",
101 [CGX_STAT9] = "Packets sent with an octet count of 128-255",
102 [CGX_STAT10] = "Packets sent with an octet count of 256-511",
103 [CGX_STAT11] = "Packets sent with an octet count of 512-1023",
104 [CGX_STAT12] = "Packets sent with an octet count of 1024-1518",
105 [CGX_STAT13] = "Packets sent with an octet count of > 1518",
106 [CGX_STAT14] = "Packets sent to a broadcast DMAC",
107 [CGX_STAT15] = "Packets sent to the multicast DMAC",
108 [CGX_STAT16] = "Transmit underflow and were truncated",
109 [CGX_STAT17] = "Control/PAUSE packets sent",
110 };
111
112 static char *rpm_rx_stats_fields[] = {
113 "Octets of received packets",
114 "Octets of received packets with out error",
115 "Received packets with alignment errors",
116 "Control/PAUSE packets received",
117 "Packets received with Frame too long Errors",
118 "Packets received with a1nrange length Errors",
119 "Received packets",
120 "Packets received with FrameCheckSequenceErrors",
121 "Packets received with VLAN header",
122 "Error packets",
123 "Packets received with unicast DMAC",
124 "Packets received with multicast DMAC",
125 "Packets received with broadcast DMAC",
126 "Dropped packets",
127 "Total frames received on interface",
128 "Packets received with an octet count < 64",
129 "Packets received with an octet count == 64",
130 "Packets received with an octet count of 65-127",
131 "Packets received with an octet count of 128-255",
132 "Packets received with an octet count of 256-511",
133 "Packets received with an octet count of 512-1023",
134 "Packets received with an octet count of 1024-1518",
135 "Packets received with an octet count of > 1518",
136 "Oversized Packets",
137 "Jabber Packets",
138 "Fragmented Packets",
139 "CBFC(class based flow control) pause frames received for class 0",
140 "CBFC pause frames received for class 1",
141 "CBFC pause frames received for class 2",
142 "CBFC pause frames received for class 3",
143 "CBFC pause frames received for class 4",
144 "CBFC pause frames received for class 5",
145 "CBFC pause frames received for class 6",
146 "CBFC pause frames received for class 7",
147 "CBFC pause frames received for class 8",
148 "CBFC pause frames received for class 9",
149 "CBFC pause frames received for class 10",
150 "CBFC pause frames received for class 11",
151 "CBFC pause frames received for class 12",
152 "CBFC pause frames received for class 13",
153 "CBFC pause frames received for class 14",
154 "CBFC pause frames received for class 15",
155 "MAC control packets received",
156 };
157
158 static char *rpm_tx_stats_fields[] = {
159 "Total octets sent on the interface",
160 "Total octets transmitted OK",
161 "Control/Pause frames sent",
162 "Total frames transmitted OK",
163 "Total frames sent with VLAN header",
164 "Error Packets",
165 "Packets sent to unicast DMAC",
166 "Packets sent to the multicast DMAC",
167 "Packets sent to a broadcast DMAC",
168 "Packets sent with an octet count == 64",
169 "Packets sent with an octet count of 65-127",
170 "Packets sent with an octet count of 128-255",
171 "Packets sent with an octet count of 256-511",
172 "Packets sent with an octet count of 512-1023",
173 "Packets sent with an octet count of 1024-1518",
174 "Packets sent with an octet count of > 1518",
175 "CBFC(class based flow control) pause frames transmitted for class 0",
176 "CBFC pause frames transmitted for class 1",
177 "CBFC pause frames transmitted for class 2",
178 "CBFC pause frames transmitted for class 3",
179 "CBFC pause frames transmitted for class 4",
180 "CBFC pause frames transmitted for class 5",
181 "CBFC pause frames transmitted for class 6",
182 "CBFC pause frames transmitted for class 7",
183 "CBFC pause frames transmitted for class 8",
184 "CBFC pause frames transmitted for class 9",
185 "CBFC pause frames transmitted for class 10",
186 "CBFC pause frames transmitted for class 11",
187 "CBFC pause frames transmitted for class 12",
188 "CBFC pause frames transmitted for class 13",
189 "CBFC pause frames transmitted for class 14",
190 "CBFC pause frames transmitted for class 15",
191 "MAC control packets sent",
192 "Total frames sent on the interface"
193 };
194
195 enum cpt_eng_type {
196 CPT_AE_TYPE = 1,
197 CPT_SE_TYPE = 2,
198 CPT_IE_TYPE = 3,
199 };
200
201 #define rvu_dbg_NULL NULL
202 #define rvu_dbg_open_NULL NULL
203
204 #define RVU_DEBUG_SEQ_FOPS(name, read_op, write_op) \
205 static int rvu_dbg_open_##name(struct inode *inode, struct file *file) \
206 { \
207 return single_open(file, rvu_dbg_##read_op, inode->i_private); \
208 } \
209 static const struct file_operations rvu_dbg_##name##_fops = { \
210 .owner = THIS_MODULE, \
211 .open = rvu_dbg_open_##name, \
212 .read = seq_read, \
213 .write = rvu_dbg_##write_op, \
214 .llseek = seq_lseek, \
215 .release = single_release, \
216 }
217
218 #define RVU_DEBUG_FOPS(name, read_op, write_op) \
219 static const struct file_operations rvu_dbg_##name##_fops = { \
220 .owner = THIS_MODULE, \
221 .open = simple_open, \
222 .read = rvu_dbg_##read_op, \
223 .write = rvu_dbg_##write_op \
224 }
225
226 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
227
rvu_dbg_mcs_port_stats_display(struct seq_file * filp,void * unused,int dir)228 static int rvu_dbg_mcs_port_stats_display(struct seq_file *filp, void *unused, int dir)
229 {
230 struct mcs *mcs = filp->private;
231 struct mcs_port_stats stats;
232 int lmac;
233
234 seq_puts(filp, "\n port stats\n");
235 mutex_lock(&mcs->stats_lock);
236 for_each_set_bit(lmac, &mcs->hw->lmac_bmap, mcs->hw->lmac_cnt) {
237 mcs_get_port_stats(mcs, &stats, lmac, dir);
238 seq_printf(filp, "port%d: Tcam Miss: %lld\n", lmac, stats.tcam_miss_cnt);
239 seq_printf(filp, "port%d: Parser errors: %lld\n", lmac, stats.parser_err_cnt);
240
241 if (dir == MCS_RX && mcs->hw->mcs_blks > 1)
242 seq_printf(filp, "port%d: Preempt error: %lld\n", lmac,
243 stats.preempt_err_cnt);
244 if (dir == MCS_TX)
245 seq_printf(filp, "port%d: Sectag insert error: %lld\n", lmac,
246 stats.sectag_insert_err_cnt);
247 }
248 mutex_unlock(&mcs->stats_lock);
249 return 0;
250 }
251
rvu_dbg_mcs_rx_port_stats_display(struct seq_file * filp,void * unused)252 static int rvu_dbg_mcs_rx_port_stats_display(struct seq_file *filp, void *unused)
253 {
254 return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_RX);
255 }
256
257 RVU_DEBUG_SEQ_FOPS(mcs_rx_port_stats, mcs_rx_port_stats_display, NULL);
258
rvu_dbg_mcs_tx_port_stats_display(struct seq_file * filp,void * unused)259 static int rvu_dbg_mcs_tx_port_stats_display(struct seq_file *filp, void *unused)
260 {
261 return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_TX);
262 }
263
264 RVU_DEBUG_SEQ_FOPS(mcs_tx_port_stats, mcs_tx_port_stats_display, NULL);
265
rvu_dbg_mcs_sa_stats_display(struct seq_file * filp,void * unused,int dir)266 static int rvu_dbg_mcs_sa_stats_display(struct seq_file *filp, void *unused, int dir)
267 {
268 struct mcs *mcs = filp->private;
269 struct mcs_sa_stats stats;
270 struct rsrc_bmap *map;
271 int sa_id;
272
273 if (dir == MCS_TX) {
274 map = &mcs->tx.sa;
275 mutex_lock(&mcs->stats_lock);
276 for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) {
277 seq_puts(filp, "\n TX SA stats\n");
278 mcs_get_sa_stats(mcs, &stats, sa_id, MCS_TX);
279 seq_printf(filp, "sa%d: Pkts encrypted: %lld\n", sa_id,
280 stats.pkt_encrypt_cnt);
281
282 seq_printf(filp, "sa%d: Pkts protected: %lld\n", sa_id,
283 stats.pkt_protected_cnt);
284 }
285 mutex_unlock(&mcs->stats_lock);
286 return 0;
287 }
288
289 /* RX stats */
290 map = &mcs->rx.sa;
291 mutex_lock(&mcs->stats_lock);
292 for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) {
293 seq_puts(filp, "\n RX SA stats\n");
294 mcs_get_sa_stats(mcs, &stats, sa_id, MCS_RX);
295 seq_printf(filp, "sa%d: Invalid pkts: %lld\n", sa_id, stats.pkt_invalid_cnt);
296 seq_printf(filp, "sa%d: Pkts no sa error: %lld\n", sa_id, stats.pkt_nosaerror_cnt);
297 seq_printf(filp, "sa%d: Pkts not valid: %lld\n", sa_id, stats.pkt_notvalid_cnt);
298 seq_printf(filp, "sa%d: Pkts ok: %lld\n", sa_id, stats.pkt_ok_cnt);
299 seq_printf(filp, "sa%d: Pkts no sa: %lld\n", sa_id, stats.pkt_nosa_cnt);
300 }
301 mutex_unlock(&mcs->stats_lock);
302 return 0;
303 }
304
rvu_dbg_mcs_rx_sa_stats_display(struct seq_file * filp,void * unused)305 static int rvu_dbg_mcs_rx_sa_stats_display(struct seq_file *filp, void *unused)
306 {
307 return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_RX);
308 }
309
310 RVU_DEBUG_SEQ_FOPS(mcs_rx_sa_stats, mcs_rx_sa_stats_display, NULL);
311
rvu_dbg_mcs_tx_sa_stats_display(struct seq_file * filp,void * unused)312 static int rvu_dbg_mcs_tx_sa_stats_display(struct seq_file *filp, void *unused)
313 {
314 return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_TX);
315 }
316
317 RVU_DEBUG_SEQ_FOPS(mcs_tx_sa_stats, mcs_tx_sa_stats_display, NULL);
318
rvu_dbg_mcs_tx_sc_stats_display(struct seq_file * filp,void * unused)319 static int rvu_dbg_mcs_tx_sc_stats_display(struct seq_file *filp, void *unused)
320 {
321 struct mcs *mcs = filp->private;
322 struct mcs_sc_stats stats;
323 struct rsrc_bmap *map;
324 int sc_id;
325
326 map = &mcs->tx.sc;
327 seq_puts(filp, "\n SC stats\n");
328
329 mutex_lock(&mcs->stats_lock);
330 for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) {
331 mcs_get_sc_stats(mcs, &stats, sc_id, MCS_TX);
332 seq_printf(filp, "\n=======sc%d======\n\n", sc_id);
333 seq_printf(filp, "sc%d: Pkts encrypted: %lld\n", sc_id, stats.pkt_encrypt_cnt);
334 seq_printf(filp, "sc%d: Pkts protected: %lld\n", sc_id, stats.pkt_protected_cnt);
335
336 if (mcs->hw->mcs_blks == 1) {
337 seq_printf(filp, "sc%d: Octets encrypted: %lld\n", sc_id,
338 stats.octet_encrypt_cnt);
339 seq_printf(filp, "sc%d: Octets protected: %lld\n", sc_id,
340 stats.octet_protected_cnt);
341 }
342 }
343 mutex_unlock(&mcs->stats_lock);
344 return 0;
345 }
346
347 RVU_DEBUG_SEQ_FOPS(mcs_tx_sc_stats, mcs_tx_sc_stats_display, NULL);
348
rvu_dbg_mcs_rx_sc_stats_display(struct seq_file * filp,void * unused)349 static int rvu_dbg_mcs_rx_sc_stats_display(struct seq_file *filp, void *unused)
350 {
351 struct mcs *mcs = filp->private;
352 struct mcs_sc_stats stats;
353 struct rsrc_bmap *map;
354 int sc_id;
355
356 map = &mcs->rx.sc;
357 seq_puts(filp, "\n SC stats\n");
358
359 mutex_lock(&mcs->stats_lock);
360 for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) {
361 mcs_get_sc_stats(mcs, &stats, sc_id, MCS_RX);
362 seq_printf(filp, "\n=======sc%d======\n\n", sc_id);
363 seq_printf(filp, "sc%d: Cam hits: %lld\n", sc_id, stats.hit_cnt);
364 seq_printf(filp, "sc%d: Invalid pkts: %lld\n", sc_id, stats.pkt_invalid_cnt);
365 seq_printf(filp, "sc%d: Late pkts: %lld\n", sc_id, stats.pkt_late_cnt);
366 seq_printf(filp, "sc%d: Notvalid pkts: %lld\n", sc_id, stats.pkt_notvalid_cnt);
367 seq_printf(filp, "sc%d: Unchecked pkts: %lld\n", sc_id, stats.pkt_unchecked_cnt);
368
369 if (mcs->hw->mcs_blks > 1) {
370 seq_printf(filp, "sc%d: Delay pkts: %lld\n", sc_id, stats.pkt_delay_cnt);
371 seq_printf(filp, "sc%d: Pkts ok: %lld\n", sc_id, stats.pkt_ok_cnt);
372 }
373 if (mcs->hw->mcs_blks == 1) {
374 seq_printf(filp, "sc%d: Octets decrypted: %lld\n", sc_id,
375 stats.octet_decrypt_cnt);
376 seq_printf(filp, "sc%d: Octets validated: %lld\n", sc_id,
377 stats.octet_validate_cnt);
378 }
379 }
380 mutex_unlock(&mcs->stats_lock);
381 return 0;
382 }
383
384 RVU_DEBUG_SEQ_FOPS(mcs_rx_sc_stats, mcs_rx_sc_stats_display, NULL);
385
rvu_dbg_mcs_flowid_stats_display(struct seq_file * filp,void * unused,int dir)386 static int rvu_dbg_mcs_flowid_stats_display(struct seq_file *filp, void *unused, int dir)
387 {
388 struct mcs *mcs = filp->private;
389 struct mcs_flowid_stats stats;
390 struct rsrc_bmap *map;
391 int flow_id;
392
393 seq_puts(filp, "\n Flowid stats\n");
394
395 if (dir == MCS_RX)
396 map = &mcs->rx.flow_ids;
397 else
398 map = &mcs->tx.flow_ids;
399
400 mutex_lock(&mcs->stats_lock);
401 for_each_set_bit(flow_id, map->bmap, mcs->hw->tcam_entries) {
402 mcs_get_flowid_stats(mcs, &stats, flow_id, dir);
403 seq_printf(filp, "Flowid%d: Hit:%lld\n", flow_id, stats.tcam_hit_cnt);
404 }
405 mutex_unlock(&mcs->stats_lock);
406 return 0;
407 }
408
rvu_dbg_mcs_tx_flowid_stats_display(struct seq_file * filp,void * unused)409 static int rvu_dbg_mcs_tx_flowid_stats_display(struct seq_file *filp, void *unused)
410 {
411 return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_TX);
412 }
413
414 RVU_DEBUG_SEQ_FOPS(mcs_tx_flowid_stats, mcs_tx_flowid_stats_display, NULL);
415
rvu_dbg_mcs_rx_flowid_stats_display(struct seq_file * filp,void * unused)416 static int rvu_dbg_mcs_rx_flowid_stats_display(struct seq_file *filp, void *unused)
417 {
418 return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_RX);
419 }
420
421 RVU_DEBUG_SEQ_FOPS(mcs_rx_flowid_stats, mcs_rx_flowid_stats_display, NULL);
422
rvu_dbg_mcs_tx_secy_stats_display(struct seq_file * filp,void * unused)423 static int rvu_dbg_mcs_tx_secy_stats_display(struct seq_file *filp, void *unused)
424 {
425 struct mcs *mcs = filp->private;
426 struct mcs_secy_stats stats;
427 struct rsrc_bmap *map;
428 int secy_id;
429
430 map = &mcs->tx.secy;
431 seq_puts(filp, "\n MCS TX secy stats\n");
432
433 mutex_lock(&mcs->stats_lock);
434 for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) {
435 mcs_get_tx_secy_stats(mcs, &stats, secy_id);
436 seq_printf(filp, "\n=======Secy%d======\n\n", secy_id);
437 seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id,
438 stats.ctl_pkt_bcast_cnt);
439 seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id,
440 stats.ctl_pkt_mcast_cnt);
441 seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id,
442 stats.ctl_pkt_ucast_cnt);
443 seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt);
444 seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id,
445 stats.unctl_pkt_bcast_cnt);
446 seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id,
447 stats.unctl_pkt_mcast_cnt);
448 seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id,
449 stats.unctl_pkt_ucast_cnt);
450 seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt);
451 seq_printf(filp, "secy%d: Octet encrypted: %lld\n", secy_id,
452 stats.octet_encrypted_cnt);
453 seq_printf(filp, "secy%d: octet protected: %lld\n", secy_id,
454 stats.octet_protected_cnt);
455 seq_printf(filp, "secy%d: Pkts on active sa: %lld\n", secy_id,
456 stats.pkt_noactivesa_cnt);
457 seq_printf(filp, "secy%d: Pkts too long: %lld\n", secy_id, stats.pkt_toolong_cnt);
458 seq_printf(filp, "secy%d: Pkts untagged: %lld\n", secy_id, stats.pkt_untagged_cnt);
459 }
460 mutex_unlock(&mcs->stats_lock);
461 return 0;
462 }
463
464 RVU_DEBUG_SEQ_FOPS(mcs_tx_secy_stats, mcs_tx_secy_stats_display, NULL);
465
rvu_dbg_mcs_rx_secy_stats_display(struct seq_file * filp,void * unused)466 static int rvu_dbg_mcs_rx_secy_stats_display(struct seq_file *filp, void *unused)
467 {
468 struct mcs *mcs = filp->private;
469 struct mcs_secy_stats stats;
470 struct rsrc_bmap *map;
471 int secy_id;
472
473 map = &mcs->rx.secy;
474 seq_puts(filp, "\n MCS secy stats\n");
475
476 mutex_lock(&mcs->stats_lock);
477 for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) {
478 mcs_get_rx_secy_stats(mcs, &stats, secy_id);
479 seq_printf(filp, "\n=======Secy%d======\n\n", secy_id);
480 seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id,
481 stats.ctl_pkt_bcast_cnt);
482 seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id,
483 stats.ctl_pkt_mcast_cnt);
484 seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id,
485 stats.ctl_pkt_ucast_cnt);
486 seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt);
487 seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id,
488 stats.unctl_pkt_bcast_cnt);
489 seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id,
490 stats.unctl_pkt_mcast_cnt);
491 seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id,
492 stats.unctl_pkt_ucast_cnt);
493 seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt);
494 seq_printf(filp, "secy%d: Octet decrypted: %lld\n", secy_id,
495 stats.octet_decrypted_cnt);
496 seq_printf(filp, "secy%d: octet validated: %lld\n", secy_id,
497 stats.octet_validated_cnt);
498 seq_printf(filp, "secy%d: Pkts on disable port: %lld\n", secy_id,
499 stats.pkt_port_disabled_cnt);
500 seq_printf(filp, "secy%d: Pkts with badtag: %lld\n", secy_id, stats.pkt_badtag_cnt);
501 seq_printf(filp, "secy%d: Pkts with no SA(sectag.tci.c=0): %lld\n", secy_id,
502 stats.pkt_nosa_cnt);
503 seq_printf(filp, "secy%d: Pkts with nosaerror: %lld\n", secy_id,
504 stats.pkt_nosaerror_cnt);
505 seq_printf(filp, "secy%d: Tagged ctrl pkts: %lld\n", secy_id,
506 stats.pkt_tagged_ctl_cnt);
507 seq_printf(filp, "secy%d: Untaged pkts: %lld\n", secy_id, stats.pkt_untaged_cnt);
508 seq_printf(filp, "secy%d: Ctrl pkts: %lld\n", secy_id, stats.pkt_ctl_cnt);
509 if (mcs->hw->mcs_blks > 1)
510 seq_printf(filp, "secy%d: pkts notag: %lld\n", secy_id,
511 stats.pkt_notag_cnt);
512 }
513 mutex_unlock(&mcs->stats_lock);
514 return 0;
515 }
516
517 RVU_DEBUG_SEQ_FOPS(mcs_rx_secy_stats, mcs_rx_secy_stats_display, NULL);
518
rvu_dbg_mcs_init(struct rvu * rvu)519 static void rvu_dbg_mcs_init(struct rvu *rvu)
520 {
521 struct mcs *mcs;
522 char dname[10];
523 int i;
524
525 if (!rvu->mcs_blk_cnt)
526 return;
527
528 rvu->rvu_dbg.mcs_root = debugfs_create_dir("mcs", rvu->rvu_dbg.root);
529
530 for (i = 0; i < rvu->mcs_blk_cnt; i++) {
531 mcs = mcs_get_pdata(i);
532
533 sprintf(dname, "mcs%d", i);
534 rvu->rvu_dbg.mcs = debugfs_create_dir(dname,
535 rvu->rvu_dbg.mcs_root);
536
537 rvu->rvu_dbg.mcs_rx = debugfs_create_dir("rx_stats", rvu->rvu_dbg.mcs);
538
539 debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_rx, mcs,
540 &rvu_dbg_mcs_rx_flowid_stats_fops);
541
542 debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_rx, mcs,
543 &rvu_dbg_mcs_rx_secy_stats_fops);
544
545 debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_rx, mcs,
546 &rvu_dbg_mcs_rx_sc_stats_fops);
547
548 debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_rx, mcs,
549 &rvu_dbg_mcs_rx_sa_stats_fops);
550
551 debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_rx, mcs,
552 &rvu_dbg_mcs_rx_port_stats_fops);
553
554 rvu->rvu_dbg.mcs_tx = debugfs_create_dir("tx_stats", rvu->rvu_dbg.mcs);
555
556 debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_tx, mcs,
557 &rvu_dbg_mcs_tx_flowid_stats_fops);
558
559 debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_tx, mcs,
560 &rvu_dbg_mcs_tx_secy_stats_fops);
561
562 debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_tx, mcs,
563 &rvu_dbg_mcs_tx_sc_stats_fops);
564
565 debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_tx, mcs,
566 &rvu_dbg_mcs_tx_sa_stats_fops);
567
568 debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_tx, mcs,
569 &rvu_dbg_mcs_tx_port_stats_fops);
570 }
571 }
572
573 #define LMT_MAPTBL_ENTRY_SIZE 16
574 /* Dump LMTST map table */
rvu_dbg_lmtst_map_table_display(struct file * filp,char __user * buffer,size_t count,loff_t * ppos)575 static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
576 char __user *buffer,
577 size_t count, loff_t *ppos)
578 {
579 struct rvu *rvu = filp->private_data;
580 u64 lmt_addr, val, tbl_base;
581 int pf, vf, num_vfs, hw_vfs;
582 void __iomem *lmt_map_base;
583 int buf_size = 10240;
584 size_t off = 0;
585 int index = 0;
586 char *buf;
587 int ret;
588
589 /* don't allow partial reads */
590 if (*ppos != 0)
591 return 0;
592
593 buf = kzalloc(buf_size, GFP_KERNEL);
594 if (!buf)
595 return -ENOMEM;
596
597 tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
598
599 lmt_map_base = ioremap_wc(tbl_base, 128 * 1024);
600 if (!lmt_map_base) {
601 dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
602 kfree(buf);
603 return false;
604 }
605
606 off += scnprintf(&buf[off], buf_size - 1 - off,
607 "\n\t\t\t\t\tLmtst Map Table Entries");
608 off += scnprintf(&buf[off], buf_size - 1 - off,
609 "\n\t\t\t\t\t=======================");
610 off += scnprintf(&buf[off], buf_size - 1 - off, "\nPcifunc\t\t\t");
611 off += scnprintf(&buf[off], buf_size - 1 - off, "Table Index\t\t");
612 off += scnprintf(&buf[off], buf_size - 1 - off,
613 "Lmtline Base (word 0)\t\t");
614 off += scnprintf(&buf[off], buf_size - 1 - off,
615 "Lmt Map Entry (word 1)");
616 off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
617 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
618 off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d \t\t\t",
619 pf);
620
621 index = pf * rvu->hw->total_vfs * LMT_MAPTBL_ENTRY_SIZE;
622 off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t",
623 (tbl_base + index));
624 lmt_addr = readq(lmt_map_base + index);
625 off += scnprintf(&buf[off], buf_size - 1 - off,
626 " 0x%016llx\t\t", lmt_addr);
627 index += 8;
628 val = readq(lmt_map_base + index);
629 off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%016llx\n",
630 val);
631 /* Reading num of VFs per PF */
632 rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs);
633 for (vf = 0; vf < num_vfs; vf++) {
634 index = (pf * rvu->hw->total_vfs * 16) +
635 ((vf + 1) * LMT_MAPTBL_ENTRY_SIZE);
636 off += scnprintf(&buf[off], buf_size - 1 - off,
637 "PF%d:VF%d \t\t", pf, vf);
638 off += scnprintf(&buf[off], buf_size - 1 - off,
639 " 0x%llx\t\t", (tbl_base + index));
640 lmt_addr = readq(lmt_map_base + index);
641 off += scnprintf(&buf[off], buf_size - 1 - off,
642 " 0x%016llx\t\t", lmt_addr);
643 index += 8;
644 val = readq(lmt_map_base + index);
645 off += scnprintf(&buf[off], buf_size - 1 - off,
646 " 0x%016llx\n", val);
647 }
648 }
649 off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
650
651 ret = min(off, count);
652 if (copy_to_user(buffer, buf, ret))
653 ret = -EFAULT;
654 kfree(buf);
655
656 iounmap(lmt_map_base);
657 if (ret < 0)
658 return ret;
659
660 *ppos = ret;
661 return ret;
662 }
663
664 RVU_DEBUG_FOPS(lmtst_map_table, lmtst_map_table_display, NULL);
665
get_lf_str_list(struct rvu_block block,int pcifunc,char * lfs)666 static void get_lf_str_list(struct rvu_block block, int pcifunc,
667 char *lfs)
668 {
669 int lf = 0, seq = 0, len = 0, prev_lf = block.lf.max;
670
671 for_each_set_bit(lf, block.lf.bmap, block.lf.max) {
672 if (lf >= block.lf.max)
673 break;
674
675 if (block.fn_map[lf] != pcifunc)
676 continue;
677
678 if (lf == prev_lf + 1) {
679 prev_lf = lf;
680 seq = 1;
681 continue;
682 }
683
684 if (seq)
685 len += sprintf(lfs + len, "-%d,%d", prev_lf, lf);
686 else
687 len += (len ? sprintf(lfs + len, ",%d", lf) :
688 sprintf(lfs + len, "%d", lf));
689
690 prev_lf = lf;
691 seq = 0;
692 }
693
694 if (seq)
695 len += sprintf(lfs + len, "-%d", prev_lf);
696
697 lfs[len] = '\0';
698 }
699
get_max_column_width(struct rvu * rvu)700 static int get_max_column_width(struct rvu *rvu)
701 {
702 int index, pf, vf, lf_str_size = 12, buf_size = 256;
703 struct rvu_block block;
704 u16 pcifunc;
705 char *buf;
706
707 buf = kzalloc(buf_size, GFP_KERNEL);
708 if (!buf)
709 return -ENOMEM;
710
711 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
712 for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
713 pcifunc = pf << 10 | vf;
714 if (!pcifunc)
715 continue;
716
717 for (index = 0; index < BLK_COUNT; index++) {
718 block = rvu->hw->block[index];
719 if (!strlen(block.name))
720 continue;
721
722 get_lf_str_list(block, pcifunc, buf);
723 if (lf_str_size <= strlen(buf))
724 lf_str_size = strlen(buf) + 1;
725 }
726 }
727 }
728
729 kfree(buf);
730 return lf_str_size;
731 }
732
733 /* Dumps current provisioning status of all RVU block LFs */
rvu_dbg_rsrc_attach_status(struct file * filp,char __user * buffer,size_t count,loff_t * ppos)734 static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
735 char __user *buffer,
736 size_t count, loff_t *ppos)
737 {
738 int index, off = 0, flag = 0, len = 0, i = 0;
739 struct rvu *rvu = filp->private_data;
740 int bytes_not_copied = 0;
741 struct rvu_block block;
742 int pf, vf, pcifunc;
743 int buf_size = 2048;
744 int lf_str_size;
745 char *lfs;
746 char *buf;
747
748 /* don't allow partial reads */
749 if (*ppos != 0)
750 return 0;
751
752 buf = kzalloc(buf_size, GFP_KERNEL);
753 if (!buf)
754 return -ENOMEM;
755
756 /* Get the maximum width of a column */
757 lf_str_size = get_max_column_width(rvu);
758
759 lfs = kzalloc(lf_str_size, GFP_KERNEL);
760 if (!lfs) {
761 kfree(buf);
762 return -ENOMEM;
763 }
764 off += scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size,
765 "pcifunc");
766 for (index = 0; index < BLK_COUNT; index++)
767 if (strlen(rvu->hw->block[index].name)) {
768 off += scnprintf(&buf[off], buf_size - 1 - off,
769 "%-*s", lf_str_size,
770 rvu->hw->block[index].name);
771 }
772
773 off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
774 bytes_not_copied = copy_to_user(buffer + (i * off), buf, off);
775 if (bytes_not_copied)
776 goto out;
777
778 i++;
779 *ppos += off;
780 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
781 for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
782 off = 0;
783 flag = 0;
784 pcifunc = pf << 10 | vf;
785 if (!pcifunc)
786 continue;
787
788 if (vf) {
789 sprintf(lfs, "PF%d:VF%d", pf, vf - 1);
790 off = scnprintf(&buf[off],
791 buf_size - 1 - off,
792 "%-*s", lf_str_size, lfs);
793 } else {
794 sprintf(lfs, "PF%d", pf);
795 off = scnprintf(&buf[off],
796 buf_size - 1 - off,
797 "%-*s", lf_str_size, lfs);
798 }
799
800 for (index = 0; index < BLK_COUNT; index++) {
801 block = rvu->hw->block[index];
802 if (!strlen(block.name))
803 continue;
804 len = 0;
805 lfs[len] = '\0';
806 get_lf_str_list(block, pcifunc, lfs);
807 if (strlen(lfs))
808 flag = 1;
809
810 off += scnprintf(&buf[off], buf_size - 1 - off,
811 "%-*s", lf_str_size, lfs);
812 }
813 if (flag) {
814 off += scnprintf(&buf[off],
815 buf_size - 1 - off, "\n");
816 bytes_not_copied = copy_to_user(buffer +
817 (i * off),
818 buf, off);
819 if (bytes_not_copied)
820 goto out;
821
822 i++;
823 *ppos += off;
824 }
825 }
826 }
827
828 out:
829 kfree(lfs);
830 kfree(buf);
831 if (bytes_not_copied)
832 return -EFAULT;
833
834 return *ppos;
835 }
836
837 RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
838
rvu_dbg_rvu_pf_cgx_map_display(struct seq_file * filp,void * unused)839 static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
840 {
841 char cgx[10], lmac[10], chan[10];
842 struct rvu *rvu = filp->private;
843 struct pci_dev *pdev = NULL;
844 struct mac_ops *mac_ops;
845 struct rvu_pfvf *pfvf;
846 int pf, domain, blkid;
847 u8 cgx_id, lmac_id;
848 u16 pcifunc;
849
850 domain = 2;
851 mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
852 /* There can be no CGX devices at all */
853 if (!mac_ops)
854 return 0;
855 seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\tCHAN\n",
856 mac_ops->name);
857 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
858 if (!is_pf_cgxmapped(rvu, pf))
859 continue;
860
861 pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
862 if (!pdev)
863 continue;
864
865 cgx[0] = 0;
866 lmac[0] = 0;
867 pcifunc = pf << 10;
868 pfvf = rvu_get_pfvf(rvu, pcifunc);
869
870 if (pfvf->nix_blkaddr == BLKADDR_NIX0)
871 blkid = 0;
872 else
873 blkid = 1;
874
875 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id,
876 &lmac_id);
877 sprintf(cgx, "%s%d", mac_ops->name, cgx_id);
878 sprintf(lmac, "LMAC%d", lmac_id);
879 sprintf(chan, "%d",
880 rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0));
881 seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\t%s\n",
882 dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac,
883 chan);
884
885 pci_dev_put(pdev);
886 }
887 return 0;
888 }
889
890 RVU_DEBUG_SEQ_FOPS(rvu_pf_cgx_map, rvu_pf_cgx_map_display, NULL);
891
rvu_dbg_is_valid_lf(struct rvu * rvu,int blkaddr,int lf,u16 * pcifunc)892 static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blkaddr, int lf,
893 u16 *pcifunc)
894 {
895 struct rvu_block *block;
896 struct rvu_hwinfo *hw;
897
898 hw = rvu->hw;
899 block = &hw->block[blkaddr];
900
901 if (lf < 0 || lf >= block->lf.max) {
902 dev_warn(rvu->dev, "Invalid LF: valid range: 0-%d\n",
903 block->lf.max - 1);
904 return false;
905 }
906
907 *pcifunc = block->fn_map[lf];
908 if (!*pcifunc) {
909 dev_warn(rvu->dev,
910 "This LF is not attached to any RVU PFFUNC\n");
911 return false;
912 }
913 return true;
914 }
915
print_npa_qsize(struct seq_file * m,struct rvu_pfvf * pfvf)916 static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf)
917 {
918 char *buf;
919
920 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
921 if (!buf)
922 return;
923
924 if (!pfvf->aura_ctx) {
925 seq_puts(m, "Aura context is not initialized\n");
926 } else {
927 bitmap_print_to_pagebuf(false, buf, pfvf->aura_bmap,
928 pfvf->aura_ctx->qsize);
929 seq_printf(m, "Aura count : %d\n", pfvf->aura_ctx->qsize);
930 seq_printf(m, "Aura context ena/dis bitmap : %s\n", buf);
931 }
932
933 if (!pfvf->pool_ctx) {
934 seq_puts(m, "Pool context is not initialized\n");
935 } else {
936 bitmap_print_to_pagebuf(false, buf, pfvf->pool_bmap,
937 pfvf->pool_ctx->qsize);
938 seq_printf(m, "Pool count : %d\n", pfvf->pool_ctx->qsize);
939 seq_printf(m, "Pool context ena/dis bitmap : %s\n", buf);
940 }
941 kfree(buf);
942 }
943
944 /* The 'qsize' entry dumps current Aura/Pool context Qsize
945 * and each context's current enable/disable status in a bitmap.
946 */
rvu_dbg_qsize_display(struct seq_file * filp,void * unsused,int blktype)947 static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
948 int blktype)
949 {
950 void (*print_qsize)(struct seq_file *filp,
951 struct rvu_pfvf *pfvf) = NULL;
952 struct dentry *current_dir;
953 struct rvu_pfvf *pfvf;
954 struct rvu *rvu;
955 int qsize_id;
956 u16 pcifunc;
957 int blkaddr;
958
959 rvu = filp->private;
960 switch (blktype) {
961 case BLKTYPE_NPA:
962 qsize_id = rvu->rvu_dbg.npa_qsize_id;
963 print_qsize = print_npa_qsize;
964 break;
965
966 case BLKTYPE_NIX:
967 qsize_id = rvu->rvu_dbg.nix_qsize_id;
968 print_qsize = print_nix_qsize;
969 break;
970
971 default:
972 return -EINVAL;
973 }
974
975 if (blktype == BLKTYPE_NPA) {
976 blkaddr = BLKADDR_NPA;
977 } else {
978 current_dir = filp->file->f_path.dentry->d_parent;
979 blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
980 BLKADDR_NIX1 : BLKADDR_NIX0);
981 }
982
983 if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc))
984 return -EINVAL;
985
986 pfvf = rvu_get_pfvf(rvu, pcifunc);
987 print_qsize(filp, pfvf);
988
989 return 0;
990 }
991
rvu_dbg_qsize_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos,int blktype)992 static ssize_t rvu_dbg_qsize_write(struct file *filp,
993 const char __user *buffer, size_t count,
994 loff_t *ppos, int blktype)
995 {
996 char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix";
997 struct seq_file *seqfile = filp->private_data;
998 char *cmd_buf, *cmd_buf_tmp, *subtoken;
999 struct rvu *rvu = seqfile->private;
1000 struct dentry *current_dir;
1001 int blkaddr;
1002 u16 pcifunc;
1003 int ret, lf;
1004
1005 cmd_buf = memdup_user_nul(buffer, count);
1006 if (IS_ERR(cmd_buf))
1007 return -ENOMEM;
1008
1009 cmd_buf_tmp = strchr(cmd_buf, '\n');
1010 if (cmd_buf_tmp) {
1011 *cmd_buf_tmp = '\0';
1012 count = cmd_buf_tmp - cmd_buf + 1;
1013 }
1014
1015 cmd_buf_tmp = cmd_buf;
1016 subtoken = strsep(&cmd_buf, " ");
1017 ret = subtoken ? kstrtoint(subtoken, 10, &lf) : -EINVAL;
1018 if (cmd_buf)
1019 ret = -EINVAL;
1020
1021 if (ret < 0 || !strncmp(subtoken, "help", 4)) {
1022 dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
1023 goto qsize_write_done;
1024 }
1025
1026 if (blktype == BLKTYPE_NPA) {
1027 blkaddr = BLKADDR_NPA;
1028 } else {
1029 current_dir = filp->f_path.dentry->d_parent;
1030 blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
1031 BLKADDR_NIX1 : BLKADDR_NIX0);
1032 }
1033
1034 if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) {
1035 ret = -EINVAL;
1036 goto qsize_write_done;
1037 }
1038 if (blktype == BLKTYPE_NPA)
1039 rvu->rvu_dbg.npa_qsize_id = lf;
1040 else
1041 rvu->rvu_dbg.nix_qsize_id = lf;
1042
1043 qsize_write_done:
1044 kfree(cmd_buf_tmp);
1045 return ret ? ret : count;
1046 }
1047
rvu_dbg_npa_qsize_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)1048 static ssize_t rvu_dbg_npa_qsize_write(struct file *filp,
1049 const char __user *buffer,
1050 size_t count, loff_t *ppos)
1051 {
1052 return rvu_dbg_qsize_write(filp, buffer, count, ppos,
1053 BLKTYPE_NPA);
1054 }
1055
rvu_dbg_npa_qsize_display(struct seq_file * filp,void * unused)1056 static int rvu_dbg_npa_qsize_display(struct seq_file *filp, void *unused)
1057 {
1058 return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NPA);
1059 }
1060
1061 RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write);
1062
1063 /* Dumps given NPA Aura's context */
print_npa_aura_ctx(struct seq_file * m,struct npa_aq_enq_rsp * rsp)1064 static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
1065 {
1066 struct npa_aura_s *aura = &rsp->aura;
1067 struct rvu *rvu = m->private;
1068
1069 seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr);
1070
1071 seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n",
1072 aura->ena, aura->pool_caching);
1073 seq_printf(m, "W1: pool way mask\t%d\nW1: avg con\t\t%d\n",
1074 aura->pool_way_mask, aura->avg_con);
1075 seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n",
1076 aura->pool_drop_ena, aura->aura_drop_ena);
1077 seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n",
1078 aura->bp_ena, aura->aura_drop);
1079 seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n",
1080 aura->shift, aura->avg_level);
1081
1082 seq_printf(m, "W2: count\t\t%llu\nW2: nix0_bpid\t\t%d\nW2: nix1_bpid\t\t%d\n",
1083 (u64)aura->count, aura->nix0_bpid, aura->nix1_bpid);
1084
1085 seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
1086 (u64)aura->limit, aura->bp, aura->fc_ena);
1087
1088 if (!is_rvu_otx2(rvu))
1089 seq_printf(m, "W3: fc_be\t\t%d\n", aura->fc_be);
1090 seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n",
1091 aura->fc_up_crossing, aura->fc_stype);
1092 seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits);
1093
1094 seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr);
1095
1096 seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n",
1097 aura->pool_drop, aura->update_time);
1098 seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n",
1099 aura->err_int, aura->err_int_ena);
1100 seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n",
1101 aura->thresh_int, aura->thresh_int_ena);
1102 seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n",
1103 aura->thresh_up, aura->thresh_qint_idx);
1104 seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx);
1105
1106 seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh);
1107 if (!is_rvu_otx2(rvu))
1108 seq_printf(m, "W6: fc_msh_dst\t\t%d\n", aura->fc_msh_dst);
1109 }
1110
1111 /* Dumps given NPA Pool's context */
print_npa_pool_ctx(struct seq_file * m,struct npa_aq_enq_rsp * rsp)1112 static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
1113 {
1114 struct npa_pool_s *pool = &rsp->pool;
1115 struct rvu *rvu = m->private;
1116
1117 seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base);
1118
1119 seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n",
1120 pool->ena, pool->nat_align);
1121 seq_printf(m, "W1: stack_caching\t%d\nW1: stack_way_mask\t%d\n",
1122 pool->stack_caching, pool->stack_way_mask);
1123 seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n",
1124 pool->buf_offset, pool->buf_size);
1125
1126 seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n",
1127 pool->stack_max_pages, pool->stack_pages);
1128
1129 seq_printf(m, "W3: op_pc \t\t%llu\n", (u64)pool->op_pc);
1130
1131 seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n",
1132 pool->stack_offset, pool->shift, pool->avg_level);
1133 seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n",
1134 pool->avg_con, pool->fc_ena, pool->fc_stype);
1135 seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n",
1136 pool->fc_hyst_bits, pool->fc_up_crossing);
1137 if (!is_rvu_otx2(rvu))
1138 seq_printf(m, "W4: fc_be\t\t%d\n", pool->fc_be);
1139 seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time);
1140
1141 seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr);
1142
1143 seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start);
1144
1145 seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end);
1146
1147 seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n",
1148 pool->err_int, pool->err_int_ena);
1149 seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int);
1150 seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n",
1151 pool->thresh_int_ena, pool->thresh_up);
1152 seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d\n",
1153 pool->thresh_qint_idx, pool->err_qint_idx);
1154 if (!is_rvu_otx2(rvu))
1155 seq_printf(m, "W8: fc_msh_dst\t\t%d\n", pool->fc_msh_dst);
1156 }
1157
1158 /* Reads aura/pool's ctx from admin queue */
rvu_dbg_npa_ctx_display(struct seq_file * m,void * unused,int ctype)1159 static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype)
1160 {
1161 void (*print_npa_ctx)(struct seq_file *m, struct npa_aq_enq_rsp *rsp);
1162 struct npa_aq_enq_req aq_req;
1163 struct npa_aq_enq_rsp rsp;
1164 struct rvu_pfvf *pfvf;
1165 int aura, rc, max_id;
1166 int npalf, id, all;
1167 struct rvu *rvu;
1168 u16 pcifunc;
1169
1170 rvu = m->private;
1171
1172 switch (ctype) {
1173 case NPA_AQ_CTYPE_AURA:
1174 npalf = rvu->rvu_dbg.npa_aura_ctx.lf;
1175 id = rvu->rvu_dbg.npa_aura_ctx.id;
1176 all = rvu->rvu_dbg.npa_aura_ctx.all;
1177 break;
1178
1179 case NPA_AQ_CTYPE_POOL:
1180 npalf = rvu->rvu_dbg.npa_pool_ctx.lf;
1181 id = rvu->rvu_dbg.npa_pool_ctx.id;
1182 all = rvu->rvu_dbg.npa_pool_ctx.all;
1183 break;
1184 default:
1185 return -EINVAL;
1186 }
1187
1188 if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
1189 return -EINVAL;
1190
1191 pfvf = rvu_get_pfvf(rvu, pcifunc);
1192 if (ctype == NPA_AQ_CTYPE_AURA && !pfvf->aura_ctx) {
1193 seq_puts(m, "Aura context is not initialized\n");
1194 return -EINVAL;
1195 } else if (ctype == NPA_AQ_CTYPE_POOL && !pfvf->pool_ctx) {
1196 seq_puts(m, "Pool context is not initialized\n");
1197 return -EINVAL;
1198 }
1199
1200 memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
1201 aq_req.hdr.pcifunc = pcifunc;
1202 aq_req.ctype = ctype;
1203 aq_req.op = NPA_AQ_INSTOP_READ;
1204 if (ctype == NPA_AQ_CTYPE_AURA) {
1205 max_id = pfvf->aura_ctx->qsize;
1206 print_npa_ctx = print_npa_aura_ctx;
1207 } else {
1208 max_id = pfvf->pool_ctx->qsize;
1209 print_npa_ctx = print_npa_pool_ctx;
1210 }
1211
1212 if (id < 0 || id >= max_id) {
1213 seq_printf(m, "Invalid %s, valid range is 0-%d\n",
1214 (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
1215 max_id - 1);
1216 return -EINVAL;
1217 }
1218
1219 if (all)
1220 id = 0;
1221 else
1222 max_id = id + 1;
1223
1224 for (aura = id; aura < max_id; aura++) {
1225 aq_req.aura_id = aura;
1226
1227 /* Skip if queue is uninitialized */
1228 if (ctype == NPA_AQ_CTYPE_POOL && !test_bit(aura, pfvf->pool_bmap))
1229 continue;
1230
1231 seq_printf(m, "======%s : %d=======\n",
1232 (ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL",
1233 aq_req.aura_id);
1234 rc = rvu_npa_aq_enq_inst(rvu, &aq_req, &rsp);
1235 if (rc) {
1236 seq_puts(m, "Failed to read context\n");
1237 return -EINVAL;
1238 }
1239 print_npa_ctx(m, &rsp);
1240 }
1241 return 0;
1242 }
1243
write_npa_ctx(struct rvu * rvu,bool all,int npalf,int id,int ctype)1244 static int write_npa_ctx(struct rvu *rvu, bool all,
1245 int npalf, int id, int ctype)
1246 {
1247 struct rvu_pfvf *pfvf;
1248 int max_id = 0;
1249 u16 pcifunc;
1250
1251 if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
1252 return -EINVAL;
1253
1254 pfvf = rvu_get_pfvf(rvu, pcifunc);
1255
1256 if (ctype == NPA_AQ_CTYPE_AURA) {
1257 if (!pfvf->aura_ctx) {
1258 dev_warn(rvu->dev, "Aura context is not initialized\n");
1259 return -EINVAL;
1260 }
1261 max_id = pfvf->aura_ctx->qsize;
1262 } else if (ctype == NPA_AQ_CTYPE_POOL) {
1263 if (!pfvf->pool_ctx) {
1264 dev_warn(rvu->dev, "Pool context is not initialized\n");
1265 return -EINVAL;
1266 }
1267 max_id = pfvf->pool_ctx->qsize;
1268 }
1269
1270 if (id < 0 || id >= max_id) {
1271 dev_warn(rvu->dev, "Invalid %s, valid range is 0-%d\n",
1272 (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
1273 max_id - 1);
1274 return -EINVAL;
1275 }
1276
1277 switch (ctype) {
1278 case NPA_AQ_CTYPE_AURA:
1279 rvu->rvu_dbg.npa_aura_ctx.lf = npalf;
1280 rvu->rvu_dbg.npa_aura_ctx.id = id;
1281 rvu->rvu_dbg.npa_aura_ctx.all = all;
1282 break;
1283
1284 case NPA_AQ_CTYPE_POOL:
1285 rvu->rvu_dbg.npa_pool_ctx.lf = npalf;
1286 rvu->rvu_dbg.npa_pool_ctx.id = id;
1287 rvu->rvu_dbg.npa_pool_ctx.all = all;
1288 break;
1289 default:
1290 return -EINVAL;
1291 }
1292 return 0;
1293 }
1294
parse_cmd_buffer_ctx(char * cmd_buf,size_t * count,const char __user * buffer,int * npalf,int * id,bool * all)1295 static int parse_cmd_buffer_ctx(char *cmd_buf, size_t *count,
1296 const char __user *buffer, int *npalf,
1297 int *id, bool *all)
1298 {
1299 int bytes_not_copied;
1300 char *cmd_buf_tmp;
1301 char *subtoken;
1302 int ret;
1303
1304 bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
1305 if (bytes_not_copied)
1306 return -EFAULT;
1307
1308 cmd_buf[*count] = '\0';
1309 cmd_buf_tmp = strchr(cmd_buf, '\n');
1310
1311 if (cmd_buf_tmp) {
1312 *cmd_buf_tmp = '\0';
1313 *count = cmd_buf_tmp - cmd_buf + 1;
1314 }
1315
1316 subtoken = strsep(&cmd_buf, " ");
1317 ret = subtoken ? kstrtoint(subtoken, 10, npalf) : -EINVAL;
1318 if (ret < 0)
1319 return ret;
1320 subtoken = strsep(&cmd_buf, " ");
1321 if (subtoken && strcmp(subtoken, "all") == 0) {
1322 *all = true;
1323 } else {
1324 ret = subtoken ? kstrtoint(subtoken, 10, id) : -EINVAL;
1325 if (ret < 0)
1326 return ret;
1327 }
1328 if (cmd_buf)
1329 return -EINVAL;
1330 return ret;
1331 }
1332
rvu_dbg_npa_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos,int ctype)1333 static ssize_t rvu_dbg_npa_ctx_write(struct file *filp,
1334 const char __user *buffer,
1335 size_t count, loff_t *ppos, int ctype)
1336 {
1337 char *cmd_buf, *ctype_string = (ctype == NPA_AQ_CTYPE_AURA) ?
1338 "aura" : "pool";
1339 struct seq_file *seqfp = filp->private_data;
1340 struct rvu *rvu = seqfp->private;
1341 int npalf, id = 0, ret;
1342 bool all = false;
1343
1344 if ((*ppos != 0) || !count)
1345 return -EINVAL;
1346
1347 cmd_buf = kzalloc(count + 1, GFP_KERNEL);
1348 if (!cmd_buf)
1349 return count;
1350 ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
1351 &npalf, &id, &all);
1352 if (ret < 0) {
1353 dev_info(rvu->dev,
1354 "Usage: echo <npalf> [%s number/all] > %s_ctx\n",
1355 ctype_string, ctype_string);
1356 goto done;
1357 } else {
1358 ret = write_npa_ctx(rvu, all, npalf, id, ctype);
1359 }
1360 done:
1361 kfree(cmd_buf);
1362 return ret ? ret : count;
1363 }
1364
rvu_dbg_npa_aura_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)1365 static ssize_t rvu_dbg_npa_aura_ctx_write(struct file *filp,
1366 const char __user *buffer,
1367 size_t count, loff_t *ppos)
1368 {
1369 return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
1370 NPA_AQ_CTYPE_AURA);
1371 }
1372
rvu_dbg_npa_aura_ctx_display(struct seq_file * filp,void * unused)1373 static int rvu_dbg_npa_aura_ctx_display(struct seq_file *filp, void *unused)
1374 {
1375 return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_AURA);
1376 }
1377
1378 RVU_DEBUG_SEQ_FOPS(npa_aura_ctx, npa_aura_ctx_display, npa_aura_ctx_write);
1379
rvu_dbg_npa_pool_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)1380 static ssize_t rvu_dbg_npa_pool_ctx_write(struct file *filp,
1381 const char __user *buffer,
1382 size_t count, loff_t *ppos)
1383 {
1384 return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
1385 NPA_AQ_CTYPE_POOL);
1386 }
1387
rvu_dbg_npa_pool_ctx_display(struct seq_file * filp,void * unused)1388 static int rvu_dbg_npa_pool_ctx_display(struct seq_file *filp, void *unused)
1389 {
1390 return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_POOL);
1391 }
1392
1393 RVU_DEBUG_SEQ_FOPS(npa_pool_ctx, npa_pool_ctx_display, npa_pool_ctx_write);
1394
ndc_cache_stats(struct seq_file * s,int blk_addr,int ctype,int transaction)1395 static void ndc_cache_stats(struct seq_file *s, int blk_addr,
1396 int ctype, int transaction)
1397 {
1398 u64 req, out_req, lat, cant_alloc;
1399 struct nix_hw *nix_hw;
1400 struct rvu *rvu;
1401 int port;
1402
1403 if (blk_addr == BLKADDR_NDC_NPA0) {
1404 rvu = s->private;
1405 } else {
1406 nix_hw = s->private;
1407 rvu = nix_hw->rvu;
1408 }
1409
1410 for (port = 0; port < NDC_MAX_PORT; port++) {
1411 req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC
1412 (port, ctype, transaction));
1413 lat = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_LAT_PC
1414 (port, ctype, transaction));
1415 out_req = rvu_read64(rvu, blk_addr,
1416 NDC_AF_PORTX_RTX_RWX_OSTDN_PC
1417 (port, ctype, transaction));
1418 cant_alloc = rvu_read64(rvu, blk_addr,
1419 NDC_AF_PORTX_RTX_CANT_ALLOC_PC
1420 (port, transaction));
1421 seq_printf(s, "\nPort:%d\n", port);
1422 seq_printf(s, "\tTotal Requests:\t\t%lld\n", req);
1423 seq_printf(s, "\tTotal Time Taken:\t%lld cycles\n", lat);
1424 seq_printf(s, "\tAvg Latency:\t\t%lld cycles\n", lat / req);
1425 seq_printf(s, "\tOutstanding Requests:\t%lld\n", out_req);
1426 seq_printf(s, "\tCant Alloc Requests:\t%lld\n", cant_alloc);
1427 }
1428 }
1429
ndc_blk_cache_stats(struct seq_file * s,int idx,int blk_addr)1430 static int ndc_blk_cache_stats(struct seq_file *s, int idx, int blk_addr)
1431 {
1432 seq_puts(s, "\n***** CACHE mode read stats *****\n");
1433 ndc_cache_stats(s, blk_addr, CACHING, NDC_READ_TRANS);
1434 seq_puts(s, "\n***** CACHE mode write stats *****\n");
1435 ndc_cache_stats(s, blk_addr, CACHING, NDC_WRITE_TRANS);
1436 seq_puts(s, "\n***** BY-PASS mode read stats *****\n");
1437 ndc_cache_stats(s, blk_addr, BYPASS, NDC_READ_TRANS);
1438 seq_puts(s, "\n***** BY-PASS mode write stats *****\n");
1439 ndc_cache_stats(s, blk_addr, BYPASS, NDC_WRITE_TRANS);
1440 return 0;
1441 }
1442
rvu_dbg_npa_ndc_cache_display(struct seq_file * filp,void * unused)1443 static int rvu_dbg_npa_ndc_cache_display(struct seq_file *filp, void *unused)
1444 {
1445 return ndc_blk_cache_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
1446 }
1447
1448 RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL);
1449
ndc_blk_hits_miss_stats(struct seq_file * s,int idx,int blk_addr)1450 static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
1451 {
1452 struct nix_hw *nix_hw;
1453 struct rvu *rvu;
1454 int bank, max_bank;
1455 u64 ndc_af_const;
1456
1457 if (blk_addr == BLKADDR_NDC_NPA0) {
1458 rvu = s->private;
1459 } else {
1460 nix_hw = s->private;
1461 rvu = nix_hw->rvu;
1462 }
1463
1464 ndc_af_const = rvu_read64(rvu, blk_addr, NDC_AF_CONST);
1465 max_bank = FIELD_GET(NDC_AF_BANK_MASK, ndc_af_const);
1466 for (bank = 0; bank < max_bank; bank++) {
1467 seq_printf(s, "BANK:%d\n", bank);
1468 seq_printf(s, "\tHits:\t%lld\n",
1469 (u64)rvu_read64(rvu, blk_addr,
1470 NDC_AF_BANKX_HIT_PC(bank)));
1471 seq_printf(s, "\tMiss:\t%lld\n",
1472 (u64)rvu_read64(rvu, blk_addr,
1473 NDC_AF_BANKX_MISS_PC(bank)));
1474 }
1475 return 0;
1476 }
1477
rvu_dbg_nix_ndc_rx_cache_display(struct seq_file * filp,void * unused)1478 static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused)
1479 {
1480 struct nix_hw *nix_hw = filp->private;
1481 int blkaddr = 0;
1482 int ndc_idx = 0;
1483
1484 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1485 BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1486 ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_RX : NIX0_RX);
1487
1488 return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
1489 }
1490
1491 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL);
1492
rvu_dbg_nix_ndc_tx_cache_display(struct seq_file * filp,void * unused)1493 static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused)
1494 {
1495 struct nix_hw *nix_hw = filp->private;
1496 int blkaddr = 0;
1497 int ndc_idx = 0;
1498
1499 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1500 BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1501 ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_TX : NIX0_TX);
1502
1503 return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
1504 }
1505
1506 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL);
1507
rvu_dbg_npa_ndc_hits_miss_display(struct seq_file * filp,void * unused)1508 static int rvu_dbg_npa_ndc_hits_miss_display(struct seq_file *filp,
1509 void *unused)
1510 {
1511 return ndc_blk_hits_miss_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
1512 }
1513
1514 RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL);
1515
rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file * filp,void * unused)1516 static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp,
1517 void *unused)
1518 {
1519 struct nix_hw *nix_hw = filp->private;
1520 int ndc_idx = NPA0_U;
1521 int blkaddr = 0;
1522
1523 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1524 BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1525
1526 return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1527 }
1528
1529 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL);
1530
rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file * filp,void * unused)1531 static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp,
1532 void *unused)
1533 {
1534 struct nix_hw *nix_hw = filp->private;
1535 int ndc_idx = NPA0_U;
1536 int blkaddr = 0;
1537
1538 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1539 BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1540
1541 return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1542 }
1543
1544 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL);
1545
print_nix_cn10k_sq_ctx(struct seq_file * m,struct nix_cn10k_sq_ctx_s * sq_ctx)1546 static void print_nix_cn10k_sq_ctx(struct seq_file *m,
1547 struct nix_cn10k_sq_ctx_s *sq_ctx)
1548 {
1549 seq_printf(m, "W0: ena \t\t\t%d\nW0: qint_idx \t\t\t%d\n",
1550 sq_ctx->ena, sq_ctx->qint_idx);
1551 seq_printf(m, "W0: substream \t\t\t0x%03x\nW0: sdp_mcast \t\t\t%d\n",
1552 sq_ctx->substream, sq_ctx->sdp_mcast);
1553 seq_printf(m, "W0: cq \t\t\t\t%d\nW0: sqe_way_mask \t\t%d\n\n",
1554 sq_ctx->cq, sq_ctx->sqe_way_mask);
1555
1556 seq_printf(m, "W1: smq \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: xoff\t\t\t%d\n",
1557 sq_ctx->smq, sq_ctx->cq_ena, sq_ctx->xoff);
1558 seq_printf(m, "W1: sso_ena \t\t\t%d\nW1: smq_rr_weight\t\t%d\n",
1559 sq_ctx->sso_ena, sq_ctx->smq_rr_weight);
1560 seq_printf(m, "W1: default_chan\t\t%d\nW1: sqb_count\t\t\t%d\n\n",
1561 sq_ctx->default_chan, sq_ctx->sqb_count);
1562
1563 seq_printf(m, "W2: smq_rr_count_lb \t\t%d\n", sq_ctx->smq_rr_count_lb);
1564 seq_printf(m, "W2: smq_rr_count_ub \t\t%d\n", sq_ctx->smq_rr_count_ub);
1565 seq_printf(m, "W2: sqb_aura \t\t\t%d\nW2: sq_int \t\t\t%d\n",
1566 sq_ctx->sqb_aura, sq_ctx->sq_int);
1567 seq_printf(m, "W2: sq_int_ena \t\t\t%d\nW2: sqe_stype \t\t\t%d\n",
1568 sq_ctx->sq_int_ena, sq_ctx->sqe_stype);
1569
1570 seq_printf(m, "W3: max_sqe_size\t\t%d\nW3: cq_limit\t\t\t%d\n",
1571 sq_ctx->max_sqe_size, sq_ctx->cq_limit);
1572 seq_printf(m, "W3: lmt_dis \t\t\t%d\nW3: mnq_dis \t\t\t%d\n",
1573 sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1574 seq_printf(m, "W3: smq_next_sq\t\t\t%d\nW3: smq_lso_segnum\t\t%d\n",
1575 sq_ctx->smq_next_sq, sq_ctx->smq_lso_segnum);
1576 seq_printf(m, "W3: tail_offset \t\t%d\nW3: smenq_offset\t\t%d\n",
1577 sq_ctx->tail_offset, sq_ctx->smenq_offset);
1578 seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n",
1579 sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld);
1580
1581 seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
1582 sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
1583 seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1584 seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1585 seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1586 seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1587 sq_ctx->smenq_next_sqb);
1588
1589 seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1590
1591 seq_printf(m, "W9: vfi_lso_total\t\t%d\n", sq_ctx->vfi_lso_total);
1592 seq_printf(m, "W9: vfi_lso_sizem1\t\t%d\nW9: vfi_lso_sb\t\t\t%d\n",
1593 sq_ctx->vfi_lso_sizem1, sq_ctx->vfi_lso_sb);
1594 seq_printf(m, "W9: vfi_lso_mps\t\t\t%d\nW9: vfi_lso_vlan0_ins_ena\t%d\n",
1595 sq_ctx->vfi_lso_mps, sq_ctx->vfi_lso_vlan0_ins_ena);
1596 seq_printf(m, "W9: vfi_lso_vlan1_ins_ena\t%d\nW9: vfi_lso_vld \t\t%d\n\n",
1597 sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1598
1599 seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1600 (u64)sq_ctx->scm_lso_rem);
1601 seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1602 seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1603 seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1604 (u64)sq_ctx->dropped_octs);
1605 seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1606 (u64)sq_ctx->dropped_pkts);
1607 }
1608
print_tm_tree(struct seq_file * m,struct nix_aq_enq_rsp * rsp,u64 sq)1609 static void print_tm_tree(struct seq_file *m,
1610 struct nix_aq_enq_rsp *rsp, u64 sq)
1611 {
1612 struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
1613 struct nix_hw *nix_hw = m->private;
1614 struct rvu *rvu = nix_hw->rvu;
1615 u16 p1, p2, p3, p4, schq;
1616 int blkaddr;
1617 u64 cfg;
1618
1619 blkaddr = nix_hw->blkaddr;
1620 schq = sq_ctx->smq;
1621
1622 cfg = rvu_read64(rvu, blkaddr, NIX_AF_MDQX_PARENT(schq));
1623 p1 = FIELD_GET(NIX_AF_MDQ_PARENT_MASK, cfg);
1624
1625 cfg = rvu_read64(rvu, blkaddr, NIX_AF_TL4X_PARENT(p1));
1626 p2 = FIELD_GET(NIX_AF_TL4_PARENT_MASK, cfg);
1627
1628 cfg = rvu_read64(rvu, blkaddr, NIX_AF_TL3X_PARENT(p2));
1629 p3 = FIELD_GET(NIX_AF_TL3_PARENT_MASK, cfg);
1630
1631 cfg = rvu_read64(rvu, blkaddr, NIX_AF_TL2X_PARENT(p3));
1632 p4 = FIELD_GET(NIX_AF_TL2_PARENT_MASK, cfg);
1633 seq_printf(m,
1634 "SQ(%llu) -> SMQ(%u) -> TL4(%u) -> TL3(%u) -> TL2(%u) -> TL1(%u)\n",
1635 sq, schq, p1, p2, p3, p4);
1636 }
1637
1638 /*dumps given tm_tree registers*/
rvu_dbg_nix_tm_tree_display(struct seq_file * m,void * unused)1639 static int rvu_dbg_nix_tm_tree_display(struct seq_file *m, void *unused)
1640 {
1641 int qidx, nixlf, rc, id, max_id = 0;
1642 struct nix_hw *nix_hw = m->private;
1643 struct rvu *rvu = nix_hw->rvu;
1644 struct nix_aq_enq_req aq_req;
1645 struct nix_aq_enq_rsp rsp;
1646 struct rvu_pfvf *pfvf;
1647 u16 pcifunc;
1648
1649 nixlf = rvu->rvu_dbg.nix_tm_ctx.lf;
1650 id = rvu->rvu_dbg.nix_tm_ctx.id;
1651
1652 if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1653 return -EINVAL;
1654
1655 pfvf = rvu_get_pfvf(rvu, pcifunc);
1656 max_id = pfvf->sq_ctx->qsize;
1657
1658 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1659 aq_req.hdr.pcifunc = pcifunc;
1660 aq_req.ctype = NIX_AQ_CTYPE_SQ;
1661 aq_req.op = NIX_AQ_INSTOP_READ;
1662 seq_printf(m, "pcifunc is 0x%x\n", pcifunc);
1663 for (qidx = id; qidx < max_id; qidx++) {
1664 aq_req.qidx = qidx;
1665
1666 /* Skip SQ's if not initialized */
1667 if (!test_bit(qidx, pfvf->sq_bmap))
1668 continue;
1669
1670 rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
1671
1672 if (rc) {
1673 seq_printf(m, "Failed to read SQ(%d) context\n",
1674 aq_req.qidx);
1675 continue;
1676 }
1677 print_tm_tree(m, &rsp, aq_req.qidx);
1678 }
1679 return 0;
1680 }
1681
rvu_dbg_nix_tm_tree_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)1682 static ssize_t rvu_dbg_nix_tm_tree_write(struct file *filp,
1683 const char __user *buffer,
1684 size_t count, loff_t *ppos)
1685 {
1686 struct seq_file *m = filp->private_data;
1687 struct nix_hw *nix_hw = m->private;
1688 struct rvu *rvu = nix_hw->rvu;
1689 struct rvu_pfvf *pfvf;
1690 u16 pcifunc;
1691 u64 nixlf;
1692 int ret;
1693
1694 ret = kstrtoull_from_user(buffer, count, 10, &nixlf);
1695 if (ret)
1696 return ret;
1697
1698 if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1699 return -EINVAL;
1700
1701 pfvf = rvu_get_pfvf(rvu, pcifunc);
1702 if (!pfvf->sq_ctx) {
1703 dev_warn(rvu->dev, "SQ context is not initialized\n");
1704 return -EINVAL;
1705 }
1706
1707 rvu->rvu_dbg.nix_tm_ctx.lf = nixlf;
1708 return count;
1709 }
1710
1711 RVU_DEBUG_SEQ_FOPS(nix_tm_tree, nix_tm_tree_display, nix_tm_tree_write);
1712
print_tm_topo(struct seq_file * m,u64 schq,u32 lvl)1713 static void print_tm_topo(struct seq_file *m, u64 schq, u32 lvl)
1714 {
1715 struct nix_hw *nix_hw = m->private;
1716 struct rvu *rvu = nix_hw->rvu;
1717 int blkaddr, link, link_level;
1718 struct rvu_hwinfo *hw;
1719
1720 hw = rvu->hw;
1721 blkaddr = nix_hw->blkaddr;
1722 if (lvl == NIX_TXSCH_LVL_MDQ) {
1723 seq_printf(m, "NIX_AF_SMQ[%llu]_CFG =0x%llx\n", schq,
1724 rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq)));
1725 seq_printf(m, "NIX_AF_SMQ[%llu]_STATUS =0x%llx\n", schq,
1726 rvu_read64(rvu, blkaddr, NIX_AF_SMQX_STATUS(schq)));
1727 seq_printf(m, "NIX_AF_MDQ[%llu]_OUT_MD_COUNT =0x%llx\n", schq,
1728 rvu_read64(rvu, blkaddr,
1729 NIX_AF_MDQX_OUT_MD_COUNT(schq)));
1730 seq_printf(m, "NIX_AF_MDQ[%llu]_SCHEDULE =0x%llx\n", schq,
1731 rvu_read64(rvu, blkaddr,
1732 NIX_AF_MDQX_SCHEDULE(schq)));
1733 seq_printf(m, "NIX_AF_MDQ[%llu]_SHAPE =0x%llx\n", schq,
1734 rvu_read64(rvu, blkaddr, NIX_AF_MDQX_SHAPE(schq)));
1735 seq_printf(m, "NIX_AF_MDQ[%llu]_CIR =0x%llx\n", schq,
1736 rvu_read64(rvu, blkaddr, NIX_AF_MDQX_CIR(schq)));
1737 seq_printf(m, "NIX_AF_MDQ[%llu]_PIR =0x%llx\n", schq,
1738 rvu_read64(rvu, blkaddr, NIX_AF_MDQX_PIR(schq)));
1739 seq_printf(m, "NIX_AF_MDQ[%llu]_SW_XOFF =0x%llx\n", schq,
1740 rvu_read64(rvu, blkaddr, NIX_AF_MDQX_SW_XOFF(schq)));
1741 seq_printf(m, "NIX_AF_MDQ[%llu]_PARENT =0x%llx\n", schq,
1742 rvu_read64(rvu, blkaddr, NIX_AF_MDQX_PARENT(schq)));
1743 seq_puts(m, "\n");
1744 }
1745
1746 if (lvl == NIX_TXSCH_LVL_TL4) {
1747 seq_printf(m, "NIX_AF_TL4[%llu]_SDP_LINK_CFG =0x%llx\n", schq,
1748 rvu_read64(rvu, blkaddr,
1749 NIX_AF_TL4X_SDP_LINK_CFG(schq)));
1750 seq_printf(m, "NIX_AF_TL4[%llu]_SCHEDULE =0x%llx\n", schq,
1751 rvu_read64(rvu, blkaddr,
1752 NIX_AF_TL4X_SCHEDULE(schq)));
1753 seq_printf(m, "NIX_AF_TL4[%llu]_SHAPE =0x%llx\n", schq,
1754 rvu_read64(rvu, blkaddr, NIX_AF_TL4X_SHAPE(schq)));
1755 seq_printf(m, "NIX_AF_TL4[%llu]_CIR =0x%llx\n", schq,
1756 rvu_read64(rvu, blkaddr, NIX_AF_TL4X_CIR(schq)));
1757 seq_printf(m, "NIX_AF_TL4[%llu]_PIR =0x%llx\n", schq,
1758 rvu_read64(rvu, blkaddr, NIX_AF_TL4X_PIR(schq)));
1759 seq_printf(m, "NIX_AF_TL4[%llu]_SW_XOFF =0x%llx\n", schq,
1760 rvu_read64(rvu, blkaddr, NIX_AF_TL4X_SW_XOFF(schq)));
1761 seq_printf(m, "NIX_AF_TL4[%llu]_TOPOLOGY =0x%llx\n", schq,
1762 rvu_read64(rvu, blkaddr,
1763 NIX_AF_TL4X_TOPOLOGY(schq)));
1764 seq_printf(m, "NIX_AF_TL4[%llu]_PARENT =0x%llx\n", schq,
1765 rvu_read64(rvu, blkaddr, NIX_AF_TL4X_PARENT(schq)));
1766 seq_printf(m, "NIX_AF_TL4[%llu]_MD_DEBUG0 =0x%llx\n", schq,
1767 rvu_read64(rvu, blkaddr,
1768 NIX_AF_TL4X_MD_DEBUG0(schq)));
1769 seq_printf(m, "NIX_AF_TL4[%llu]_MD_DEBUG1 =0x%llx\n", schq,
1770 rvu_read64(rvu, blkaddr,
1771 NIX_AF_TL4X_MD_DEBUG1(schq)));
1772 seq_puts(m, "\n");
1773 }
1774
1775 if (lvl == NIX_TXSCH_LVL_TL3) {
1776 seq_printf(m, "NIX_AF_TL3[%llu]_SCHEDULE =0x%llx\n", schq,
1777 rvu_read64(rvu, blkaddr,
1778 NIX_AF_TL3X_SCHEDULE(schq)));
1779 seq_printf(m, "NIX_AF_TL3[%llu]_SHAPE =0x%llx\n", schq,
1780 rvu_read64(rvu, blkaddr, NIX_AF_TL3X_SHAPE(schq)));
1781 seq_printf(m, "NIX_AF_TL3[%llu]_CIR =0x%llx\n", schq,
1782 rvu_read64(rvu, blkaddr, NIX_AF_TL3X_CIR(schq)));
1783 seq_printf(m, "NIX_AF_TL3[%llu]_PIR =0x%llx\n", schq,
1784 rvu_read64(rvu, blkaddr, NIX_AF_TL3X_PIR(schq)));
1785 seq_printf(m, "NIX_AF_TL3[%llu]_SW_XOFF =0x%llx\n", schq,
1786 rvu_read64(rvu, blkaddr, NIX_AF_TL3X_SW_XOFF(schq)));
1787 seq_printf(m, "NIX_AF_TL3[%llu]_TOPOLOGY =0x%llx\n", schq,
1788 rvu_read64(rvu, blkaddr,
1789 NIX_AF_TL3X_TOPOLOGY(schq)));
1790 seq_printf(m, "NIX_AF_TL3[%llu]_PARENT =0x%llx\n", schq,
1791 rvu_read64(rvu, blkaddr, NIX_AF_TL3X_PARENT(schq)));
1792 seq_printf(m, "NIX_AF_TL3[%llu]_MD_DEBUG0 =0x%llx\n", schq,
1793 rvu_read64(rvu, blkaddr,
1794 NIX_AF_TL3X_MD_DEBUG0(schq)));
1795 seq_printf(m, "NIX_AF_TL3[%llu]_MD_DEBUG1 =0x%llx\n", schq,
1796 rvu_read64(rvu, blkaddr,
1797 NIX_AF_TL3X_MD_DEBUG1(schq)));
1798
1799 link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL)
1800 & 0x01 ? NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
1801 if (lvl == link_level) {
1802 seq_printf(m,
1803 "NIX_AF_TL3_TL2[%llu]_BP_STATUS =0x%llx\n",
1804 schq, rvu_read64(rvu, blkaddr,
1805 NIX_AF_TL3_TL2X_BP_STATUS(schq)));
1806 for (link = 0; link < hw->cgx_links; link++)
1807 seq_printf(m,
1808 "NIX_AF_TL3_TL2[%llu]_LINK[%d]_CFG =0x%llx\n",
1809 schq, link,
1810 rvu_read64(rvu, blkaddr,
1811 NIX_AF_TL3_TL2X_LINKX_CFG(schq, link)));
1812 }
1813 seq_puts(m, "\n");
1814 }
1815
1816 if (lvl == NIX_TXSCH_LVL_TL2) {
1817 seq_printf(m, "NIX_AF_TL2[%llu]_SHAPE =0x%llx\n", schq,
1818 rvu_read64(rvu, blkaddr, NIX_AF_TL2X_SHAPE(schq)));
1819 seq_printf(m, "NIX_AF_TL2[%llu]_CIR =0x%llx\n", schq,
1820 rvu_read64(rvu, blkaddr, NIX_AF_TL2X_CIR(schq)));
1821 seq_printf(m, "NIX_AF_TL2[%llu]_PIR =0x%llx\n", schq,
1822 rvu_read64(rvu, blkaddr, NIX_AF_TL2X_PIR(schq)));
1823 seq_printf(m, "NIX_AF_TL2[%llu]_SW_XOFF =0x%llx\n", schq,
1824 rvu_read64(rvu, blkaddr, NIX_AF_TL2X_SW_XOFF(schq)));
1825 seq_printf(m, "NIX_AF_TL2[%llu]_TOPOLOGY =0x%llx\n", schq,
1826 rvu_read64(rvu, blkaddr,
1827 NIX_AF_TL2X_TOPOLOGY(schq)));
1828 seq_printf(m, "NIX_AF_TL2[%llu]_PARENT =0x%llx\n", schq,
1829 rvu_read64(rvu, blkaddr, NIX_AF_TL2X_PARENT(schq)));
1830 seq_printf(m, "NIX_AF_TL2[%llu]_MD_DEBUG0 =0x%llx\n", schq,
1831 rvu_read64(rvu, blkaddr,
1832 NIX_AF_TL2X_MD_DEBUG0(schq)));
1833 seq_printf(m, "NIX_AF_TL2[%llu]_MD_DEBUG1 =0x%llx\n", schq,
1834 rvu_read64(rvu, blkaddr,
1835 NIX_AF_TL2X_MD_DEBUG1(schq)));
1836
1837 link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL)
1838 & 0x01 ? NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
1839 if (lvl == link_level) {
1840 seq_printf(m,
1841 "NIX_AF_TL3_TL2[%llu]_BP_STATUS =0x%llx\n",
1842 schq, rvu_read64(rvu, blkaddr,
1843 NIX_AF_TL3_TL2X_BP_STATUS(schq)));
1844 for (link = 0; link < hw->cgx_links; link++)
1845 seq_printf(m,
1846 "NIX_AF_TL3_TL2[%llu]_LINK[%d]_CFG =0x%llx\n",
1847 schq, link, rvu_read64(rvu, blkaddr,
1848 NIX_AF_TL3_TL2X_LINKX_CFG(schq, link)));
1849 }
1850 seq_puts(m, "\n");
1851 }
1852
1853 if (lvl == NIX_TXSCH_LVL_TL1) {
1854 seq_printf(m, "NIX_AF_TX_LINK[%llu]_NORM_CREDIT =0x%llx\n",
1855 schq,
1856 rvu_read64(rvu, blkaddr,
1857 NIX_AF_TX_LINKX_NORM_CREDIT(schq)));
1858 seq_printf(m, "NIX_AF_TX_LINK[%llu]_HW_XOFF =0x%llx\n", schq,
1859 rvu_read64(rvu, blkaddr,
1860 NIX_AF_TX_LINKX_HW_XOFF(schq)));
1861 seq_printf(m, "NIX_AF_TL1[%llu]_SCHEDULE =0x%llx\n", schq,
1862 rvu_read64(rvu, blkaddr,
1863 NIX_AF_TL1X_SCHEDULE(schq)));
1864 seq_printf(m, "NIX_AF_TL1[%llu]_SHAPE =0x%llx\n", schq,
1865 rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SHAPE(schq)));
1866 seq_printf(m, "NIX_AF_TL1[%llu]_CIR =0x%llx\n", schq,
1867 rvu_read64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq)));
1868 seq_printf(m, "NIX_AF_TL1[%llu]_SW_XOFF =0x%llx\n", schq,
1869 rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq)));
1870 seq_printf(m, "NIX_AF_TL1[%llu]_TOPOLOGY =0x%llx\n", schq,
1871 rvu_read64(rvu, blkaddr,
1872 NIX_AF_TL1X_TOPOLOGY(schq)));
1873 seq_printf(m, "NIX_AF_TL1[%llu]_MD_DEBUG0 =0x%llx\n", schq,
1874 rvu_read64(rvu, blkaddr,
1875 NIX_AF_TL1X_MD_DEBUG0(schq)));
1876 seq_printf(m, "NIX_AF_TL1[%llu]_MD_DEBUG1 =0x%llx\n", schq,
1877 rvu_read64(rvu, blkaddr,
1878 NIX_AF_TL1X_MD_DEBUG1(schq)));
1879 seq_printf(m, "NIX_AF_TL1[%llu]_DROPPED_PACKETS =0x%llx\n",
1880 schq,
1881 rvu_read64(rvu, blkaddr,
1882 NIX_AF_TL1X_DROPPED_PACKETS(schq)));
1883 seq_printf(m, "NIX_AF_TL1[%llu]_DROPPED_BYTES =0x%llx\n", schq,
1884 rvu_read64(rvu, blkaddr,
1885 NIX_AF_TL1X_DROPPED_BYTES(schq)));
1886 seq_printf(m, "NIX_AF_TL1[%llu]_RED_PACKETS =0x%llx\n", schq,
1887 rvu_read64(rvu, blkaddr,
1888 NIX_AF_TL1X_RED_PACKETS(schq)));
1889 seq_printf(m, "NIX_AF_TL1[%llu]_RED_BYTES =0x%llx\n", schq,
1890 rvu_read64(rvu, blkaddr,
1891 NIX_AF_TL1X_RED_BYTES(schq)));
1892 seq_printf(m, "NIX_AF_TL1[%llu]_YELLOW_PACKETS =0x%llx\n", schq,
1893 rvu_read64(rvu, blkaddr,
1894 NIX_AF_TL1X_YELLOW_PACKETS(schq)));
1895 seq_printf(m, "NIX_AF_TL1[%llu]_YELLOW_BYTES =0x%llx\n", schq,
1896 rvu_read64(rvu, blkaddr,
1897 NIX_AF_TL1X_YELLOW_BYTES(schq)));
1898 seq_printf(m, "NIX_AF_TL1[%llu]_GREEN_PACKETS =0x%llx\n", schq,
1899 rvu_read64(rvu, blkaddr,
1900 NIX_AF_TL1X_GREEN_PACKETS(schq)));
1901 seq_printf(m, "NIX_AF_TL1[%llu]_GREEN_BYTES =0x%llx\n", schq,
1902 rvu_read64(rvu, blkaddr,
1903 NIX_AF_TL1X_GREEN_BYTES(schq)));
1904 seq_puts(m, "\n");
1905 }
1906 }
1907
1908 /*dumps given tm_topo registers*/
rvu_dbg_nix_tm_topo_display(struct seq_file * m,void * unused)1909 static int rvu_dbg_nix_tm_topo_display(struct seq_file *m, void *unused)
1910 {
1911 struct nix_hw *nix_hw = m->private;
1912 struct rvu *rvu = nix_hw->rvu;
1913 struct nix_aq_enq_req aq_req;
1914 struct nix_txsch *txsch;
1915 int nixlf, lvl, schq;
1916 u16 pcifunc;
1917
1918 nixlf = rvu->rvu_dbg.nix_tm_ctx.lf;
1919
1920 if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1921 return -EINVAL;
1922
1923 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1924 aq_req.hdr.pcifunc = pcifunc;
1925 aq_req.ctype = NIX_AQ_CTYPE_SQ;
1926 aq_req.op = NIX_AQ_INSTOP_READ;
1927 seq_printf(m, "pcifunc is 0x%x\n", pcifunc);
1928
1929 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1930 txsch = &nix_hw->txsch[lvl];
1931 for (schq = 0; schq < txsch->schq.max; schq++) {
1932 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) == pcifunc)
1933 print_tm_topo(m, schq, lvl);
1934 }
1935 }
1936 return 0;
1937 }
1938
rvu_dbg_nix_tm_topo_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)1939 static ssize_t rvu_dbg_nix_tm_topo_write(struct file *filp,
1940 const char __user *buffer,
1941 size_t count, loff_t *ppos)
1942 {
1943 struct seq_file *m = filp->private_data;
1944 struct nix_hw *nix_hw = m->private;
1945 struct rvu *rvu = nix_hw->rvu;
1946 struct rvu_pfvf *pfvf;
1947 u16 pcifunc;
1948 u64 nixlf;
1949 int ret;
1950
1951 ret = kstrtoull_from_user(buffer, count, 10, &nixlf);
1952 if (ret)
1953 return ret;
1954
1955 if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1956 return -EINVAL;
1957
1958 pfvf = rvu_get_pfvf(rvu, pcifunc);
1959 if (!pfvf->sq_ctx) {
1960 dev_warn(rvu->dev, "SQ context is not initialized\n");
1961 return -EINVAL;
1962 }
1963
1964 rvu->rvu_dbg.nix_tm_ctx.lf = nixlf;
1965 return count;
1966 }
1967
1968 RVU_DEBUG_SEQ_FOPS(nix_tm_topo, nix_tm_topo_display, nix_tm_topo_write);
1969
1970 /* Dumps given nix_sq's context */
print_nix_sq_ctx(struct seq_file * m,struct nix_aq_enq_rsp * rsp)1971 static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1972 {
1973 struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
1974 struct nix_hw *nix_hw = m->private;
1975 struct rvu *rvu = nix_hw->rvu;
1976
1977 if (!is_rvu_otx2(rvu)) {
1978 print_nix_cn10k_sq_ctx(m, (struct nix_cn10k_sq_ctx_s *)sq_ctx);
1979 return;
1980 }
1981 seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n",
1982 sq_ctx->sqe_way_mask, sq_ctx->cq);
1983 seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
1984 sq_ctx->sdp_mcast, sq_ctx->substream);
1985 seq_printf(m, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n\n",
1986 sq_ctx->qint_idx, sq_ctx->ena);
1987
1988 seq_printf(m, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d\n",
1989 sq_ctx->sqb_count, sq_ctx->default_chan);
1990 seq_printf(m, "W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d\n",
1991 sq_ctx->smq_rr_quantum, sq_ctx->sso_ena);
1992 seq_printf(m, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n\n",
1993 sq_ctx->xoff, sq_ctx->cq_ena, sq_ctx->smq);
1994
1995 seq_printf(m, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d\n",
1996 sq_ctx->sqe_stype, sq_ctx->sq_int_ena);
1997 seq_printf(m, "W2: sq_int \t\t\t%d\nW2: sqb_aura \t\t\t%d\n",
1998 sq_ctx->sq_int, sq_ctx->sqb_aura);
1999 seq_printf(m, "W2: smq_rr_count \t\t%d\n\n", sq_ctx->smq_rr_count);
2000
2001 seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
2002 sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
2003 seq_printf(m, "W3: smenq_next_sqb_vld \t\t%d\nW3: head_offset\t\t\t%d\n",
2004 sq_ctx->smenq_next_sqb_vld, sq_ctx->head_offset);
2005 seq_printf(m, "W3: smenq_offset\t\t%d\nW3: tail_offset\t\t\t%d\n",
2006 sq_ctx->smenq_offset, sq_ctx->tail_offset);
2007 seq_printf(m, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq\t\t\t%d\n",
2008 sq_ctx->smq_lso_segnum, sq_ctx->smq_next_sq);
2009 seq_printf(m, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d\n",
2010 sq_ctx->mnq_dis, sq_ctx->lmt_dis);
2011 seq_printf(m, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n\n",
2012 sq_ctx->cq_limit, sq_ctx->max_sqe_size);
2013
2014 seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
2015 seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
2016 seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
2017 seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
2018 sq_ctx->smenq_next_sqb);
2019
2020 seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
2021
2022 seq_printf(m, "W9: vfi_lso_vld\t\t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d\n",
2023 sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
2024 seq_printf(m, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d\n",
2025 sq_ctx->vfi_lso_vlan0_ins_ena, sq_ctx->vfi_lso_mps);
2026 seq_printf(m, "W9: vfi_lso_sb\t\t\t%d\nW9: vfi_lso_sizem1\t\t%d\n",
2027 sq_ctx->vfi_lso_sb, sq_ctx->vfi_lso_sizem1);
2028 seq_printf(m, "W9: vfi_lso_total\t\t%d\n\n", sq_ctx->vfi_lso_total);
2029
2030 seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
2031 (u64)sq_ctx->scm_lso_rem);
2032 seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
2033 seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
2034 seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
2035 (u64)sq_ctx->dropped_octs);
2036 seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
2037 (u64)sq_ctx->dropped_pkts);
2038 }
2039
print_nix_cn10k_rq_ctx(struct seq_file * m,struct nix_cn10k_rq_ctx_s * rq_ctx)2040 static void print_nix_cn10k_rq_ctx(struct seq_file *m,
2041 struct nix_cn10k_rq_ctx_s *rq_ctx)
2042 {
2043 seq_printf(m, "W0: ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
2044 rq_ctx->ena, rq_ctx->sso_ena);
2045 seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
2046 rq_ctx->ipsech_ena, rq_ctx->ena_wqwd);
2047 seq_printf(m, "W0: cq \t\t\t\t%d\nW0: lenerr_dis \t\t\t%d\n",
2048 rq_ctx->cq, rq_ctx->lenerr_dis);
2049 seq_printf(m, "W0: csum_il4_dis \t\t%d\nW0: csum_ol4_dis \t\t%d\n",
2050 rq_ctx->csum_il4_dis, rq_ctx->csum_ol4_dis);
2051 seq_printf(m, "W0: len_il4_dis \t\t%d\nW0: len_il3_dis \t\t%d\n",
2052 rq_ctx->len_il4_dis, rq_ctx->len_il3_dis);
2053 seq_printf(m, "W0: len_ol4_dis \t\t%d\nW0: len_ol3_dis \t\t%d\n",
2054 rq_ctx->len_ol4_dis, rq_ctx->len_ol3_dis);
2055 seq_printf(m, "W0: wqe_aura \t\t\t%d\n\n", rq_ctx->wqe_aura);
2056
2057 seq_printf(m, "W1: spb_aura \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
2058 rq_ctx->spb_aura, rq_ctx->lpb_aura);
2059 seq_printf(m, "W1: spb_aura \t\t\t%d\n", rq_ctx->spb_aura);
2060 seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
2061 rq_ctx->sso_grp, rq_ctx->sso_tt);
2062 seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: wqe_caching \t\t%d\n",
2063 rq_ctx->pb_caching, rq_ctx->wqe_caching);
2064 seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
2065 rq_ctx->xqe_drop_ena, rq_ctx->spb_drop_ena);
2066 seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: pb_stashing \t\t%d\n",
2067 rq_ctx->lpb_drop_ena, rq_ctx->pb_stashing);
2068 seq_printf(m, "W1: ipsecd_drop_ena \t\t%d\nW1: chi_ena \t\t\t%d\n\n",
2069 rq_ctx->ipsecd_drop_ena, rq_ctx->chi_ena);
2070
2071 seq_printf(m, "W2: band_prof_id \t\t%d\n", rq_ctx->band_prof_id);
2072 seq_printf(m, "W2: policer_ena \t\t%d\n", rq_ctx->policer_ena);
2073 seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n", rq_ctx->spb_sizem1);
2074 seq_printf(m, "W2: wqe_skip \t\t\t%d\nW2: sqb_ena \t\t\t%d\n",
2075 rq_ctx->wqe_skip, rq_ctx->spb_ena);
2076 seq_printf(m, "W2: lpb_size1 \t\t\t%d\nW2: first_skip \t\t\t%d\n",
2077 rq_ctx->lpb_sizem1, rq_ctx->first_skip);
2078 seq_printf(m, "W2: later_skip\t\t\t%d\nW2: xqe_imm_size\t\t%d\n",
2079 rq_ctx->later_skip, rq_ctx->xqe_imm_size);
2080 seq_printf(m, "W2: xqe_imm_copy \t\t%d\nW2: xqe_hdr_split \t\t%d\n\n",
2081 rq_ctx->xqe_imm_copy, rq_ctx->xqe_hdr_split);
2082
2083 seq_printf(m, "W3: xqe_drop \t\t\t%d\nW3: xqe_pass \t\t\t%d\n",
2084 rq_ctx->xqe_drop, rq_ctx->xqe_pass);
2085 seq_printf(m, "W3: wqe_pool_drop \t\t%d\nW3: wqe_pool_pass \t\t%d\n",
2086 rq_ctx->wqe_pool_drop, rq_ctx->wqe_pool_pass);
2087 seq_printf(m, "W3: spb_pool_drop \t\t%d\nW3: spb_pool_pass \t\t%d\n",
2088 rq_ctx->spb_pool_drop, rq_ctx->spb_pool_pass);
2089 seq_printf(m, "W3: spb_aura_drop \t\t%d\nW3: spb_aura_pass \t\t%d\n\n",
2090 rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
2091
2092 seq_printf(m, "W4: lpb_aura_drop \t\t%d\nW3: lpb_aura_pass \t\t%d\n",
2093 rq_ctx->lpb_aura_pass, rq_ctx->lpb_aura_drop);
2094 seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW3: lpb_pool_pass \t\t%d\n",
2095 rq_ctx->lpb_pool_drop, rq_ctx->lpb_pool_pass);
2096 seq_printf(m, "W4: rq_int \t\t\t%d\nW4: rq_int_ena\t\t\t%d\n",
2097 rq_ctx->rq_int, rq_ctx->rq_int_ena);
2098 seq_printf(m, "W4: qint_idx \t\t\t%d\n\n", rq_ctx->qint_idx);
2099
2100 seq_printf(m, "W5: ltag \t\t\t%d\nW5: good_utag \t\t\t%d\n",
2101 rq_ctx->ltag, rq_ctx->good_utag);
2102 seq_printf(m, "W5: bad_utag \t\t\t%d\nW5: flow_tagw \t\t\t%d\n",
2103 rq_ctx->bad_utag, rq_ctx->flow_tagw);
2104 seq_printf(m, "W5: ipsec_vwqe \t\t\t%d\nW5: vwqe_ena \t\t\t%d\n",
2105 rq_ctx->ipsec_vwqe, rq_ctx->vwqe_ena);
2106 seq_printf(m, "W5: vwqe_wait \t\t\t%d\nW5: max_vsize_exp\t\t%d\n",
2107 rq_ctx->vwqe_wait, rq_ctx->max_vsize_exp);
2108 seq_printf(m, "W5: vwqe_skip \t\t\t%d\n\n", rq_ctx->vwqe_skip);
2109
2110 seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
2111 seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
2112 seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
2113 seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
2114 seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
2115 }
2116
2117 /* Dumps given nix_rq's context */
print_nix_rq_ctx(struct seq_file * m,struct nix_aq_enq_rsp * rsp)2118 static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
2119 {
2120 struct nix_rq_ctx_s *rq_ctx = &rsp->rq;
2121 struct nix_hw *nix_hw = m->private;
2122 struct rvu *rvu = nix_hw->rvu;
2123
2124 if (!is_rvu_otx2(rvu)) {
2125 print_nix_cn10k_rq_ctx(m, (struct nix_cn10k_rq_ctx_s *)rq_ctx);
2126 return;
2127 }
2128
2129 seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
2130 rq_ctx->wqe_aura, rq_ctx->substream);
2131 seq_printf(m, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
2132 rq_ctx->cq, rq_ctx->ena_wqwd);
2133 seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
2134 rq_ctx->ipsech_ena, rq_ctx->sso_ena);
2135 seq_printf(m, "W0: ena \t\t\t%d\n\n", rq_ctx->ena);
2136
2137 seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
2138 rq_ctx->lpb_drop_ena, rq_ctx->spb_drop_ena);
2139 seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d\n",
2140 rq_ctx->xqe_drop_ena, rq_ctx->wqe_caching);
2141 seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
2142 rq_ctx->pb_caching, rq_ctx->sso_tt);
2143 seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
2144 rq_ctx->sso_grp, rq_ctx->lpb_aura);
2145 seq_printf(m, "W1: spb_aura \t\t\t%d\n\n", rq_ctx->spb_aura);
2146
2147 seq_printf(m, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d\n",
2148 rq_ctx->xqe_hdr_split, rq_ctx->xqe_imm_copy);
2149 seq_printf(m, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d\n",
2150 rq_ctx->xqe_imm_size, rq_ctx->later_skip);
2151 seq_printf(m, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d\n",
2152 rq_ctx->first_skip, rq_ctx->lpb_sizem1);
2153 seq_printf(m, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d\n",
2154 rq_ctx->spb_ena, rq_ctx->wqe_skip);
2155 seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n\n", rq_ctx->spb_sizem1);
2156
2157 seq_printf(m, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d\n",
2158 rq_ctx->spb_pool_pass, rq_ctx->spb_pool_drop);
2159 seq_printf(m, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d\n",
2160 rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
2161 seq_printf(m, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d\n",
2162 rq_ctx->wqe_pool_pass, rq_ctx->wqe_pool_drop);
2163 seq_printf(m, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n\n",
2164 rq_ctx->xqe_pass, rq_ctx->xqe_drop);
2165
2166 seq_printf(m, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d\n",
2167 rq_ctx->qint_idx, rq_ctx->rq_int_ena);
2168 seq_printf(m, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d\n",
2169 rq_ctx->rq_int, rq_ctx->lpb_pool_pass);
2170 seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d\n",
2171 rq_ctx->lpb_pool_drop, rq_ctx->lpb_aura_pass);
2172 seq_printf(m, "W4: lpb_aura_drop \t\t%d\n\n", rq_ctx->lpb_aura_drop);
2173
2174 seq_printf(m, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d\n",
2175 rq_ctx->flow_tagw, rq_ctx->bad_utag);
2176 seq_printf(m, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n\n",
2177 rq_ctx->good_utag, rq_ctx->ltag);
2178
2179 seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
2180 seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
2181 seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
2182 seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
2183 seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
2184 }
2185
2186 /* Dumps given nix_cq's context */
print_nix_cq_ctx(struct seq_file * m,struct nix_aq_enq_rsp * rsp)2187 static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
2188 {
2189 struct nix_cq_ctx_s *cq_ctx = &rsp->cq;
2190 struct nix_hw *nix_hw = m->private;
2191 struct rvu *rvu = nix_hw->rvu;
2192
2193 seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base);
2194
2195 seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr);
2196 seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n",
2197 cq_ctx->avg_con, cq_ctx->cint_idx);
2198 seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n",
2199 cq_ctx->cq_err, cq_ctx->qint_idx);
2200 seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n",
2201 cq_ctx->bpid, cq_ctx->bp_ena);
2202
2203 if (!is_rvu_otx2(rvu)) {
2204 seq_printf(m, "W1: lbpid_high \t\t\t0x%03x\n", cq_ctx->lbpid_high);
2205 seq_printf(m, "W1: lbpid_med \t\t\t0x%03x\n", cq_ctx->lbpid_med);
2206 seq_printf(m, "W1: lbpid_low \t\t\t0x%03x\n", cq_ctx->lbpid_low);
2207 seq_printf(m, "(W1: lbpid) \t\t\t0x%03x\n",
2208 cq_ctx->lbpid_high << 6 | cq_ctx->lbpid_med << 3 |
2209 cq_ctx->lbpid_low);
2210 seq_printf(m, "W1: lbp_ena \t\t\t\t%d\n\n", cq_ctx->lbp_ena);
2211 }
2212
2213 seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n",
2214 cq_ctx->update_time, cq_ctx->avg_level);
2215 seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n",
2216 cq_ctx->head, cq_ctx->tail);
2217
2218 seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n",
2219 cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int);
2220 seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n",
2221 cq_ctx->qsize, cq_ctx->caching);
2222 seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n",
2223 cq_ctx->substream, cq_ctx->ena);
2224 if (!is_rvu_otx2(rvu)) {
2225 seq_printf(m, "W3: lbp_frac \t\t\t%d\n", cq_ctx->lbp_frac);
2226 seq_printf(m, "W3: cpt_drop_err_en \t\t\t%d\n",
2227 cq_ctx->cpt_drop_err_en);
2228 }
2229 seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n",
2230 cq_ctx->drop_ena, cq_ctx->drop);
2231 seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp);
2232 }
2233
rvu_dbg_nix_queue_ctx_display(struct seq_file * filp,void * unused,int ctype)2234 static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
2235 void *unused, int ctype)
2236 {
2237 void (*print_nix_ctx)(struct seq_file *filp,
2238 struct nix_aq_enq_rsp *rsp) = NULL;
2239 struct nix_hw *nix_hw = filp->private;
2240 struct rvu *rvu = nix_hw->rvu;
2241 struct nix_aq_enq_req aq_req;
2242 struct nix_aq_enq_rsp rsp;
2243 char *ctype_string = NULL;
2244 int qidx, rc, max_id = 0;
2245 struct rvu_pfvf *pfvf;
2246 int nixlf, id, all;
2247 u16 pcifunc;
2248
2249 switch (ctype) {
2250 case NIX_AQ_CTYPE_CQ:
2251 nixlf = rvu->rvu_dbg.nix_cq_ctx.lf;
2252 id = rvu->rvu_dbg.nix_cq_ctx.id;
2253 all = rvu->rvu_dbg.nix_cq_ctx.all;
2254 break;
2255
2256 case NIX_AQ_CTYPE_SQ:
2257 nixlf = rvu->rvu_dbg.nix_sq_ctx.lf;
2258 id = rvu->rvu_dbg.nix_sq_ctx.id;
2259 all = rvu->rvu_dbg.nix_sq_ctx.all;
2260 break;
2261
2262 case NIX_AQ_CTYPE_RQ:
2263 nixlf = rvu->rvu_dbg.nix_rq_ctx.lf;
2264 id = rvu->rvu_dbg.nix_rq_ctx.id;
2265 all = rvu->rvu_dbg.nix_rq_ctx.all;
2266 break;
2267
2268 default:
2269 return -EINVAL;
2270 }
2271
2272 if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
2273 return -EINVAL;
2274
2275 pfvf = rvu_get_pfvf(rvu, pcifunc);
2276 if (ctype == NIX_AQ_CTYPE_SQ && !pfvf->sq_ctx) {
2277 seq_puts(filp, "SQ context is not initialized\n");
2278 return -EINVAL;
2279 } else if (ctype == NIX_AQ_CTYPE_RQ && !pfvf->rq_ctx) {
2280 seq_puts(filp, "RQ context is not initialized\n");
2281 return -EINVAL;
2282 } else if (ctype == NIX_AQ_CTYPE_CQ && !pfvf->cq_ctx) {
2283 seq_puts(filp, "CQ context is not initialized\n");
2284 return -EINVAL;
2285 }
2286
2287 if (ctype == NIX_AQ_CTYPE_SQ) {
2288 max_id = pfvf->sq_ctx->qsize;
2289 ctype_string = "sq";
2290 print_nix_ctx = print_nix_sq_ctx;
2291 } else if (ctype == NIX_AQ_CTYPE_RQ) {
2292 max_id = pfvf->rq_ctx->qsize;
2293 ctype_string = "rq";
2294 print_nix_ctx = print_nix_rq_ctx;
2295 } else if (ctype == NIX_AQ_CTYPE_CQ) {
2296 max_id = pfvf->cq_ctx->qsize;
2297 ctype_string = "cq";
2298 print_nix_ctx = print_nix_cq_ctx;
2299 }
2300
2301 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
2302 aq_req.hdr.pcifunc = pcifunc;
2303 aq_req.ctype = ctype;
2304 aq_req.op = NIX_AQ_INSTOP_READ;
2305 if (all)
2306 id = 0;
2307 else
2308 max_id = id + 1;
2309 for (qidx = id; qidx < max_id; qidx++) {
2310 aq_req.qidx = qidx;
2311 seq_printf(filp, "=====%s_ctx for nixlf:%d and qidx:%d is=====\n",
2312 ctype_string, nixlf, aq_req.qidx);
2313 rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
2314 if (rc) {
2315 seq_puts(filp, "Failed to read the context\n");
2316 return -EINVAL;
2317 }
2318 print_nix_ctx(filp, &rsp);
2319 }
2320 return 0;
2321 }
2322
write_nix_queue_ctx(struct rvu * rvu,bool all,int nixlf,int id,int ctype,char * ctype_string,struct seq_file * m)2323 static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf,
2324 int id, int ctype, char *ctype_string,
2325 struct seq_file *m)
2326 {
2327 struct nix_hw *nix_hw = m->private;
2328 struct rvu_pfvf *pfvf;
2329 int max_id = 0;
2330 u16 pcifunc;
2331
2332 if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
2333 return -EINVAL;
2334
2335 pfvf = rvu_get_pfvf(rvu, pcifunc);
2336
2337 if (ctype == NIX_AQ_CTYPE_SQ) {
2338 if (!pfvf->sq_ctx) {
2339 dev_warn(rvu->dev, "SQ context is not initialized\n");
2340 return -EINVAL;
2341 }
2342 max_id = pfvf->sq_ctx->qsize;
2343 } else if (ctype == NIX_AQ_CTYPE_RQ) {
2344 if (!pfvf->rq_ctx) {
2345 dev_warn(rvu->dev, "RQ context is not initialized\n");
2346 return -EINVAL;
2347 }
2348 max_id = pfvf->rq_ctx->qsize;
2349 } else if (ctype == NIX_AQ_CTYPE_CQ) {
2350 if (!pfvf->cq_ctx) {
2351 dev_warn(rvu->dev, "CQ context is not initialized\n");
2352 return -EINVAL;
2353 }
2354 max_id = pfvf->cq_ctx->qsize;
2355 }
2356
2357 if (id < 0 || id >= max_id) {
2358 dev_warn(rvu->dev, "Invalid %s_ctx valid range 0-%d\n",
2359 ctype_string, max_id - 1);
2360 return -EINVAL;
2361 }
2362 switch (ctype) {
2363 case NIX_AQ_CTYPE_CQ:
2364 rvu->rvu_dbg.nix_cq_ctx.lf = nixlf;
2365 rvu->rvu_dbg.nix_cq_ctx.id = id;
2366 rvu->rvu_dbg.nix_cq_ctx.all = all;
2367 break;
2368
2369 case NIX_AQ_CTYPE_SQ:
2370 rvu->rvu_dbg.nix_sq_ctx.lf = nixlf;
2371 rvu->rvu_dbg.nix_sq_ctx.id = id;
2372 rvu->rvu_dbg.nix_sq_ctx.all = all;
2373 break;
2374
2375 case NIX_AQ_CTYPE_RQ:
2376 rvu->rvu_dbg.nix_rq_ctx.lf = nixlf;
2377 rvu->rvu_dbg.nix_rq_ctx.id = id;
2378 rvu->rvu_dbg.nix_rq_ctx.all = all;
2379 break;
2380 default:
2381 return -EINVAL;
2382 }
2383 return 0;
2384 }
2385
rvu_dbg_nix_queue_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos,int ctype)2386 static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
2387 const char __user *buffer,
2388 size_t count, loff_t *ppos,
2389 int ctype)
2390 {
2391 struct seq_file *m = filp->private_data;
2392 struct nix_hw *nix_hw = m->private;
2393 struct rvu *rvu = nix_hw->rvu;
2394 char *cmd_buf, *ctype_string;
2395 int nixlf, id = 0, ret;
2396 bool all = false;
2397
2398 if ((*ppos != 0) || !count)
2399 return -EINVAL;
2400
2401 switch (ctype) {
2402 case NIX_AQ_CTYPE_SQ:
2403 ctype_string = "sq";
2404 break;
2405 case NIX_AQ_CTYPE_RQ:
2406 ctype_string = "rq";
2407 break;
2408 case NIX_AQ_CTYPE_CQ:
2409 ctype_string = "cq";
2410 break;
2411 default:
2412 return -EINVAL;
2413 }
2414
2415 cmd_buf = kzalloc(count + 1, GFP_KERNEL);
2416
2417 if (!cmd_buf)
2418 return count;
2419
2420 ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
2421 &nixlf, &id, &all);
2422 if (ret < 0) {
2423 dev_info(rvu->dev,
2424 "Usage: echo <nixlf> [%s number/all] > %s_ctx\n",
2425 ctype_string, ctype_string);
2426 goto done;
2427 } else {
2428 ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype,
2429 ctype_string, m);
2430 }
2431 done:
2432 kfree(cmd_buf);
2433 return ret ? ret : count;
2434 }
2435
rvu_dbg_nix_sq_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)2436 static ssize_t rvu_dbg_nix_sq_ctx_write(struct file *filp,
2437 const char __user *buffer,
2438 size_t count, loff_t *ppos)
2439 {
2440 return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
2441 NIX_AQ_CTYPE_SQ);
2442 }
2443
rvu_dbg_nix_sq_ctx_display(struct seq_file * filp,void * unused)2444 static int rvu_dbg_nix_sq_ctx_display(struct seq_file *filp, void *unused)
2445 {
2446 return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_SQ);
2447 }
2448
2449 RVU_DEBUG_SEQ_FOPS(nix_sq_ctx, nix_sq_ctx_display, nix_sq_ctx_write);
2450
rvu_dbg_nix_rq_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)2451 static ssize_t rvu_dbg_nix_rq_ctx_write(struct file *filp,
2452 const char __user *buffer,
2453 size_t count, loff_t *ppos)
2454 {
2455 return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
2456 NIX_AQ_CTYPE_RQ);
2457 }
2458
rvu_dbg_nix_rq_ctx_display(struct seq_file * filp,void * unused)2459 static int rvu_dbg_nix_rq_ctx_display(struct seq_file *filp, void *unused)
2460 {
2461 return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_RQ);
2462 }
2463
2464 RVU_DEBUG_SEQ_FOPS(nix_rq_ctx, nix_rq_ctx_display, nix_rq_ctx_write);
2465
rvu_dbg_nix_cq_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)2466 static ssize_t rvu_dbg_nix_cq_ctx_write(struct file *filp,
2467 const char __user *buffer,
2468 size_t count, loff_t *ppos)
2469 {
2470 return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
2471 NIX_AQ_CTYPE_CQ);
2472 }
2473
rvu_dbg_nix_cq_ctx_display(struct seq_file * filp,void * unused)2474 static int rvu_dbg_nix_cq_ctx_display(struct seq_file *filp, void *unused)
2475 {
2476 return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_CQ);
2477 }
2478
2479 RVU_DEBUG_SEQ_FOPS(nix_cq_ctx, nix_cq_ctx_display, nix_cq_ctx_write);
2480
print_nix_qctx_qsize(struct seq_file * filp,int qsize,unsigned long * bmap,char * qtype)2481 static void print_nix_qctx_qsize(struct seq_file *filp, int qsize,
2482 unsigned long *bmap, char *qtype)
2483 {
2484 char *buf;
2485
2486 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
2487 if (!buf)
2488 return;
2489
2490 bitmap_print_to_pagebuf(false, buf, bmap, qsize);
2491 seq_printf(filp, "%s context count : %d\n", qtype, qsize);
2492 seq_printf(filp, "%s context ena/dis bitmap : %s\n",
2493 qtype, buf);
2494 kfree(buf);
2495 }
2496
print_nix_qsize(struct seq_file * filp,struct rvu_pfvf * pfvf)2497 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf)
2498 {
2499 if (!pfvf->cq_ctx)
2500 seq_puts(filp, "cq context is not initialized\n");
2501 else
2502 print_nix_qctx_qsize(filp, pfvf->cq_ctx->qsize, pfvf->cq_bmap,
2503 "cq");
2504
2505 if (!pfvf->rq_ctx)
2506 seq_puts(filp, "rq context is not initialized\n");
2507 else
2508 print_nix_qctx_qsize(filp, pfvf->rq_ctx->qsize, pfvf->rq_bmap,
2509 "rq");
2510
2511 if (!pfvf->sq_ctx)
2512 seq_puts(filp, "sq context is not initialized\n");
2513 else
2514 print_nix_qctx_qsize(filp, pfvf->sq_ctx->qsize, pfvf->sq_bmap,
2515 "sq");
2516 }
2517
rvu_dbg_nix_qsize_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)2518 static ssize_t rvu_dbg_nix_qsize_write(struct file *filp,
2519 const char __user *buffer,
2520 size_t count, loff_t *ppos)
2521 {
2522 return rvu_dbg_qsize_write(filp, buffer, count, ppos,
2523 BLKTYPE_NIX);
2524 }
2525
rvu_dbg_nix_qsize_display(struct seq_file * filp,void * unused)2526 static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
2527 {
2528 return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NIX);
2529 }
2530
2531 RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
2532
print_band_prof_ctx(struct seq_file * m,struct nix_bandprof_s * prof)2533 static void print_band_prof_ctx(struct seq_file *m,
2534 struct nix_bandprof_s *prof)
2535 {
2536 char *str;
2537
2538 switch (prof->pc_mode) {
2539 case NIX_RX_PC_MODE_VLAN:
2540 str = "VLAN";
2541 break;
2542 case NIX_RX_PC_MODE_DSCP:
2543 str = "DSCP";
2544 break;
2545 case NIX_RX_PC_MODE_GEN:
2546 str = "Generic";
2547 break;
2548 case NIX_RX_PC_MODE_RSVD:
2549 str = "Reserved";
2550 break;
2551 }
2552 seq_printf(m, "W0: pc_mode\t\t%s\n", str);
2553 str = (prof->icolor == 3) ? "Color blind" :
2554 (prof->icolor == 0) ? "Green" :
2555 (prof->icolor == 1) ? "Yellow" : "Red";
2556 seq_printf(m, "W0: icolor\t\t%s\n", str);
2557 seq_printf(m, "W0: tnl_ena\t\t%d\n", prof->tnl_ena);
2558 seq_printf(m, "W0: peir_exponent\t%d\n", prof->peir_exponent);
2559 seq_printf(m, "W0: pebs_exponent\t%d\n", prof->pebs_exponent);
2560 seq_printf(m, "W0: cir_exponent\t%d\n", prof->cir_exponent);
2561 seq_printf(m, "W0: cbs_exponent\t%d\n", prof->cbs_exponent);
2562 seq_printf(m, "W0: peir_mantissa\t%d\n", prof->peir_mantissa);
2563 seq_printf(m, "W0: pebs_mantissa\t%d\n", prof->pebs_mantissa);
2564 seq_printf(m, "W0: cir_mantissa\t%d\n", prof->cir_mantissa);
2565
2566 seq_printf(m, "W1: cbs_mantissa\t%d\n", prof->cbs_mantissa);
2567 str = (prof->lmode == 0) ? "byte" : "packet";
2568 seq_printf(m, "W1: lmode\t\t%s\n", str);
2569 seq_printf(m, "W1: l_select\t\t%d\n", prof->l_sellect);
2570 seq_printf(m, "W1: rdiv\t\t%d\n", prof->rdiv);
2571 seq_printf(m, "W1: adjust_exponent\t%d\n", prof->adjust_exponent);
2572 seq_printf(m, "W1: adjust_mantissa\t%d\n", prof->adjust_mantissa);
2573 str = (prof->gc_action == 0) ? "PASS" :
2574 (prof->gc_action == 1) ? "DROP" : "RED";
2575 seq_printf(m, "W1: gc_action\t\t%s\n", str);
2576 str = (prof->yc_action == 0) ? "PASS" :
2577 (prof->yc_action == 1) ? "DROP" : "RED";
2578 seq_printf(m, "W1: yc_action\t\t%s\n", str);
2579 str = (prof->rc_action == 0) ? "PASS" :
2580 (prof->rc_action == 1) ? "DROP" : "RED";
2581 seq_printf(m, "W1: rc_action\t\t%s\n", str);
2582 seq_printf(m, "W1: meter_algo\t\t%d\n", prof->meter_algo);
2583 seq_printf(m, "W1: band_prof_id\t%d\n", prof->band_prof_id);
2584 seq_printf(m, "W1: hl_en\t\t%d\n", prof->hl_en);
2585
2586 seq_printf(m, "W2: ts\t\t\t%lld\n", (u64)prof->ts);
2587 seq_printf(m, "W3: pe_accum\t\t%d\n", prof->pe_accum);
2588 seq_printf(m, "W3: c_accum\t\t%d\n", prof->c_accum);
2589 seq_printf(m, "W4: green_pkt_pass\t%lld\n",
2590 (u64)prof->green_pkt_pass);
2591 seq_printf(m, "W5: yellow_pkt_pass\t%lld\n",
2592 (u64)prof->yellow_pkt_pass);
2593 seq_printf(m, "W6: red_pkt_pass\t%lld\n", (u64)prof->red_pkt_pass);
2594 seq_printf(m, "W7: green_octs_pass\t%lld\n",
2595 (u64)prof->green_octs_pass);
2596 seq_printf(m, "W8: yellow_octs_pass\t%lld\n",
2597 (u64)prof->yellow_octs_pass);
2598 seq_printf(m, "W9: red_octs_pass\t%lld\n", (u64)prof->red_octs_pass);
2599 seq_printf(m, "W10: green_pkt_drop\t%lld\n",
2600 (u64)prof->green_pkt_drop);
2601 seq_printf(m, "W11: yellow_pkt_drop\t%lld\n",
2602 (u64)prof->yellow_pkt_drop);
2603 seq_printf(m, "W12: red_pkt_drop\t%lld\n", (u64)prof->red_pkt_drop);
2604 seq_printf(m, "W13: green_octs_drop\t%lld\n",
2605 (u64)prof->green_octs_drop);
2606 seq_printf(m, "W14: yellow_octs_drop\t%lld\n",
2607 (u64)prof->yellow_octs_drop);
2608 seq_printf(m, "W15: red_octs_drop\t%lld\n", (u64)prof->red_octs_drop);
2609 seq_puts(m, "==============================\n");
2610 }
2611
rvu_dbg_nix_band_prof_ctx_display(struct seq_file * m,void * unused)2612 static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused)
2613 {
2614 struct nix_hw *nix_hw = m->private;
2615 struct nix_cn10k_aq_enq_req aq_req;
2616 struct nix_cn10k_aq_enq_rsp aq_rsp;
2617 struct rvu *rvu = nix_hw->rvu;
2618 struct nix_ipolicer *ipolicer;
2619 int layer, prof_idx, idx, rc;
2620 u16 pcifunc;
2621 char *str;
2622
2623 /* Ingress policers do not exist on all platforms */
2624 if (!nix_hw->ipolicer)
2625 return 0;
2626
2627 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
2628 if (layer == BAND_PROF_INVAL_LAYER)
2629 continue;
2630 str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
2631 (layer == BAND_PROF_MID_LAYER) ? "Mid" : "Top";
2632
2633 seq_printf(m, "\n%s bandwidth profiles\n", str);
2634 seq_puts(m, "=======================\n");
2635
2636 ipolicer = &nix_hw->ipolicer[layer];
2637
2638 for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
2639 if (is_rsrc_free(&ipolicer->band_prof, idx))
2640 continue;
2641
2642 prof_idx = (idx & 0x3FFF) | (layer << 14);
2643 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
2644 0x00, NIX_AQ_CTYPE_BANDPROF,
2645 prof_idx);
2646 if (rc) {
2647 dev_err(rvu->dev,
2648 "%s: Failed to fetch context of %s profile %d, err %d\n",
2649 __func__, str, idx, rc);
2650 return 0;
2651 }
2652 seq_printf(m, "\n%s bandwidth profile:: %d\n", str, idx);
2653 pcifunc = ipolicer->pfvf_map[idx];
2654 if (!(pcifunc & RVU_PFVF_FUNC_MASK))
2655 seq_printf(m, "Allocated to :: PF %d\n",
2656 rvu_get_pf(pcifunc));
2657 else
2658 seq_printf(m, "Allocated to :: PF %d VF %d\n",
2659 rvu_get_pf(pcifunc),
2660 (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
2661 print_band_prof_ctx(m, &aq_rsp.prof);
2662 }
2663 }
2664 return 0;
2665 }
2666
2667 RVU_DEBUG_SEQ_FOPS(nix_band_prof_ctx, nix_band_prof_ctx_display, NULL);
2668
rvu_dbg_nix_band_prof_rsrc_display(struct seq_file * m,void * unused)2669 static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused)
2670 {
2671 struct nix_hw *nix_hw = m->private;
2672 struct nix_ipolicer *ipolicer;
2673 int layer;
2674 char *str;
2675
2676 /* Ingress policers do not exist on all platforms */
2677 if (!nix_hw->ipolicer)
2678 return 0;
2679
2680 seq_puts(m, "\nBandwidth profile resource free count\n");
2681 seq_puts(m, "=====================================\n");
2682 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
2683 if (layer == BAND_PROF_INVAL_LAYER)
2684 continue;
2685 str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
2686 (layer == BAND_PROF_MID_LAYER) ? "Mid " : "Top ";
2687
2688 ipolicer = &nix_hw->ipolicer[layer];
2689 seq_printf(m, "%s :: Max: %4d Free: %4d\n", str,
2690 ipolicer->band_prof.max,
2691 rvu_rsrc_free_count(&ipolicer->band_prof));
2692 }
2693 seq_puts(m, "=====================================\n");
2694
2695 return 0;
2696 }
2697
2698 RVU_DEBUG_SEQ_FOPS(nix_band_prof_rsrc, nix_band_prof_rsrc_display, NULL);
2699
rvu_dbg_nix_init(struct rvu * rvu,int blkaddr)2700 static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
2701 {
2702 struct nix_hw *nix_hw;
2703
2704 if (!is_block_implemented(rvu->hw, blkaddr))
2705 return;
2706
2707 if (blkaddr == BLKADDR_NIX0) {
2708 rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root);
2709 nix_hw = &rvu->hw->nix[0];
2710 } else {
2711 rvu->rvu_dbg.nix = debugfs_create_dir("nix1",
2712 rvu->rvu_dbg.root);
2713 nix_hw = &rvu->hw->nix[1];
2714 }
2715
2716 debugfs_create_file("tm_tree", 0600, rvu->rvu_dbg.nix, nix_hw,
2717 &rvu_dbg_nix_tm_tree_fops);
2718 debugfs_create_file("tm_topo", 0600, rvu->rvu_dbg.nix, nix_hw,
2719 &rvu_dbg_nix_tm_topo_fops);
2720 debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2721 &rvu_dbg_nix_sq_ctx_fops);
2722 debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2723 &rvu_dbg_nix_rq_ctx_fops);
2724 debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2725 &rvu_dbg_nix_cq_ctx_fops);
2726 debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
2727 &rvu_dbg_nix_ndc_tx_cache_fops);
2728 debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
2729 &rvu_dbg_nix_ndc_rx_cache_fops);
2730 debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
2731 &rvu_dbg_nix_ndc_tx_hits_miss_fops);
2732 debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
2733 &rvu_dbg_nix_ndc_rx_hits_miss_fops);
2734 debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
2735 &rvu_dbg_nix_qsize_fops);
2736 debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2737 &rvu_dbg_nix_band_prof_ctx_fops);
2738 debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw,
2739 &rvu_dbg_nix_band_prof_rsrc_fops);
2740 }
2741
rvu_dbg_npa_init(struct rvu * rvu)2742 static void rvu_dbg_npa_init(struct rvu *rvu)
2743 {
2744 rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root);
2745
2746 debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu,
2747 &rvu_dbg_npa_qsize_fops);
2748 debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu,
2749 &rvu_dbg_npa_aura_ctx_fops);
2750 debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
2751 &rvu_dbg_npa_pool_ctx_fops);
2752 debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu,
2753 &rvu_dbg_npa_ndc_cache_fops);
2754 debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa, rvu,
2755 &rvu_dbg_npa_ndc_hits_miss_fops);
2756 }
2757
2758 #define PRINT_CGX_CUML_NIXRX_STATUS(idx, name) \
2759 ({ \
2760 u64 cnt; \
2761 err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \
2762 NIX_STATS_RX, &(cnt)); \
2763 if (!err) \
2764 seq_printf(s, "%s: %llu\n", name, cnt); \
2765 cnt; \
2766 })
2767
2768 #define PRINT_CGX_CUML_NIXTX_STATUS(idx, name) \
2769 ({ \
2770 u64 cnt; \
2771 err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \
2772 NIX_STATS_TX, &(cnt)); \
2773 if (!err) \
2774 seq_printf(s, "%s: %llu\n", name, cnt); \
2775 cnt; \
2776 })
2777
cgx_print_stats(struct seq_file * s,int lmac_id)2778 static int cgx_print_stats(struct seq_file *s, int lmac_id)
2779 {
2780 struct cgx_link_user_info linfo;
2781 struct mac_ops *mac_ops;
2782 void *cgxd = s->private;
2783 u64 ucast, mcast, bcast;
2784 int stat = 0, err = 0;
2785 u64 tx_stat, rx_stat;
2786 struct rvu *rvu;
2787
2788 rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
2789 PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
2790 if (!rvu)
2791 return -ENODEV;
2792
2793 mac_ops = get_mac_ops(cgxd);
2794 /* There can be no CGX devices at all */
2795 if (!mac_ops)
2796 return 0;
2797
2798 /* Link status */
2799 seq_puts(s, "\n=======Link Status======\n\n");
2800 err = cgx_get_link_info(cgxd, lmac_id, &linfo);
2801 if (err)
2802 seq_puts(s, "Failed to read link status\n");
2803 seq_printf(s, "\nLink is %s %d Mbps\n\n",
2804 linfo.link_up ? "UP" : "DOWN", linfo.speed);
2805
2806 /* Rx stats */
2807 seq_printf(s, "\n=======NIX RX_STATS(%s port level)======\n\n",
2808 mac_ops->name);
2809 ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames");
2810 if (err)
2811 return err;
2812 mcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_MCAST, "rx_mcast_frames");
2813 if (err)
2814 return err;
2815 bcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_BCAST, "rx_bcast_frames");
2816 if (err)
2817 return err;
2818 seq_printf(s, "rx_frames: %llu\n", ucast + mcast + bcast);
2819 PRINT_CGX_CUML_NIXRX_STATUS(RX_OCTS, "rx_bytes");
2820 if (err)
2821 return err;
2822 PRINT_CGX_CUML_NIXRX_STATUS(RX_DROP, "rx_drops");
2823 if (err)
2824 return err;
2825 PRINT_CGX_CUML_NIXRX_STATUS(RX_ERR, "rx_errors");
2826 if (err)
2827 return err;
2828
2829 /* Tx stats */
2830 seq_printf(s, "\n=======NIX TX_STATS(%s port level)======\n\n",
2831 mac_ops->name);
2832 ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames");
2833 if (err)
2834 return err;
2835 mcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_MCAST, "tx_mcast_frames");
2836 if (err)
2837 return err;
2838 bcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_BCAST, "tx_bcast_frames");
2839 if (err)
2840 return err;
2841 seq_printf(s, "tx_frames: %llu\n", ucast + mcast + bcast);
2842 PRINT_CGX_CUML_NIXTX_STATUS(TX_OCTS, "tx_bytes");
2843 if (err)
2844 return err;
2845 PRINT_CGX_CUML_NIXTX_STATUS(TX_DROP, "tx_drops");
2846 if (err)
2847 return err;
2848
2849 /* Rx stats */
2850 seq_printf(s, "\n=======%s RX_STATS======\n\n", mac_ops->name);
2851 while (stat < mac_ops->rx_stats_cnt) {
2852 err = mac_ops->mac_get_rx_stats(cgxd, lmac_id, stat, &rx_stat);
2853 if (err)
2854 return err;
2855 if (is_rvu_otx2(rvu))
2856 seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat],
2857 rx_stat);
2858 else
2859 seq_printf(s, "%s: %llu\n", rpm_rx_stats_fields[stat],
2860 rx_stat);
2861 stat++;
2862 }
2863
2864 /* Tx stats */
2865 stat = 0;
2866 seq_printf(s, "\n=======%s TX_STATS======\n\n", mac_ops->name);
2867 while (stat < mac_ops->tx_stats_cnt) {
2868 err = mac_ops->mac_get_tx_stats(cgxd, lmac_id, stat, &tx_stat);
2869 if (err)
2870 return err;
2871
2872 if (is_rvu_otx2(rvu))
2873 seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat],
2874 tx_stat);
2875 else
2876 seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat],
2877 tx_stat);
2878 stat++;
2879 }
2880
2881 return err;
2882 }
2883
rvu_dbg_derive_lmacid(struct seq_file * filp,int * lmac_id)2884 static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id)
2885 {
2886 struct dentry *current_dir;
2887 char *buf;
2888
2889 current_dir = filp->file->f_path.dentry->d_parent;
2890 buf = strrchr(current_dir->d_name.name, 'c');
2891 if (!buf)
2892 return -EINVAL;
2893
2894 return kstrtoint(buf + 1, 10, lmac_id);
2895 }
2896
rvu_dbg_cgx_stat_display(struct seq_file * filp,void * unused)2897 static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
2898 {
2899 int lmac_id, err;
2900
2901 err = rvu_dbg_derive_lmacid(filp, &lmac_id);
2902 if (!err)
2903 return cgx_print_stats(filp, lmac_id);
2904
2905 return err;
2906 }
2907
2908 RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
2909
cgx_print_dmac_flt(struct seq_file * s,int lmac_id)2910 static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id)
2911 {
2912 struct pci_dev *pdev = NULL;
2913 void *cgxd = s->private;
2914 char *bcast, *mcast;
2915 u16 index, domain;
2916 u8 dmac[ETH_ALEN];
2917 struct rvu *rvu;
2918 u64 cfg, mac;
2919 int pf;
2920
2921 rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
2922 PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
2923 if (!rvu)
2924 return -ENODEV;
2925
2926 pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
2927 domain = 2;
2928
2929 pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
2930 if (!pdev)
2931 return 0;
2932
2933 cfg = cgx_read_dmac_ctrl(cgxd, lmac_id);
2934 bcast = cfg & CGX_DMAC_BCAST_MODE ? "ACCEPT" : "REJECT";
2935 mcast = cfg & CGX_DMAC_MCAST_MODE ? "ACCEPT" : "REJECT";
2936
2937 seq_puts(s,
2938 "PCI dev RVUPF BROADCAST MULTICAST FILTER-MODE\n");
2939 seq_printf(s, "%s PF%d %9s %9s",
2940 dev_name(&pdev->dev), pf, bcast, mcast);
2941 if (cfg & CGX_DMAC_CAM_ACCEPT)
2942 seq_printf(s, "%12s\n\n", "UNICAST");
2943 else
2944 seq_printf(s, "%16s\n\n", "PROMISCUOUS");
2945
2946 seq_puts(s, "\nDMAC-INDEX ADDRESS\n");
2947
2948 for (index = 0 ; index < 32 ; index++) {
2949 cfg = cgx_read_dmac_entry(cgxd, index);
2950 /* Display enabled dmac entries associated with current lmac */
2951 if (lmac_id == FIELD_GET(CGX_DMAC_CAM_ENTRY_LMACID, cfg) &&
2952 FIELD_GET(CGX_DMAC_CAM_ADDR_ENABLE, cfg)) {
2953 mac = FIELD_GET(CGX_RX_DMAC_ADR_MASK, cfg);
2954 u64_to_ether_addr(mac, dmac);
2955 seq_printf(s, "%7d %pM\n", index, dmac);
2956 }
2957 }
2958
2959 pci_dev_put(pdev);
2960 return 0;
2961 }
2962
rvu_dbg_cgx_dmac_flt_display(struct seq_file * filp,void * unused)2963 static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused)
2964 {
2965 int err, lmac_id;
2966
2967 err = rvu_dbg_derive_lmacid(filp, &lmac_id);
2968 if (!err)
2969 return cgx_print_dmac_flt(filp, lmac_id);
2970
2971 return err;
2972 }
2973
2974 RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL);
2975
rvu_dbg_cgx_init(struct rvu * rvu)2976 static void rvu_dbg_cgx_init(struct rvu *rvu)
2977 {
2978 struct mac_ops *mac_ops;
2979 unsigned long lmac_bmap;
2980 int i, lmac_id;
2981 char dname[20];
2982 void *cgx;
2983
2984 if (!cgx_get_cgxcnt_max())
2985 return;
2986
2987 mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
2988 if (!mac_ops)
2989 return;
2990
2991 rvu->rvu_dbg.cgx_root = debugfs_create_dir(mac_ops->name,
2992 rvu->rvu_dbg.root);
2993
2994 for (i = 0; i < cgx_get_cgxcnt_max(); i++) {
2995 cgx = rvu_cgx_pdata(i, rvu);
2996 if (!cgx)
2997 continue;
2998 lmac_bmap = cgx_get_lmac_bmap(cgx);
2999 /* cgx debugfs dir */
3000 sprintf(dname, "%s%d", mac_ops->name, i);
3001 rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
3002 rvu->rvu_dbg.cgx_root);
3003
3004 for_each_set_bit(lmac_id, &lmac_bmap, rvu->hw->lmac_per_cgx) {
3005 /* lmac debugfs dir */
3006 sprintf(dname, "lmac%d", lmac_id);
3007 rvu->rvu_dbg.lmac =
3008 debugfs_create_dir(dname, rvu->rvu_dbg.cgx);
3009
3010 debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac,
3011 cgx, &rvu_dbg_cgx_stat_fops);
3012 debugfs_create_file("mac_filter", 0600,
3013 rvu->rvu_dbg.lmac, cgx,
3014 &rvu_dbg_cgx_dmac_flt_fops);
3015 }
3016 }
3017 }
3018
3019 /* NPC debugfs APIs */
rvu_print_npc_mcam_info(struct seq_file * s,u16 pcifunc,int blkaddr)3020 static void rvu_print_npc_mcam_info(struct seq_file *s,
3021 u16 pcifunc, int blkaddr)
3022 {
3023 struct rvu *rvu = s->private;
3024 int entry_acnt, entry_ecnt;
3025 int cntr_acnt, cntr_ecnt;
3026
3027 rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr,
3028 &entry_acnt, &entry_ecnt);
3029 rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr,
3030 &cntr_acnt, &cntr_ecnt);
3031 if (!entry_acnt && !cntr_acnt)
3032 return;
3033
3034 if (!(pcifunc & RVU_PFVF_FUNC_MASK))
3035 seq_printf(s, "\n\t\t Device \t\t: PF%d\n",
3036 rvu_get_pf(pcifunc));
3037 else
3038 seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n",
3039 rvu_get_pf(pcifunc),
3040 (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
3041
3042 if (entry_acnt) {
3043 seq_printf(s, "\t\t Entries allocated \t: %d\n", entry_acnt);
3044 seq_printf(s, "\t\t Entries enabled \t: %d\n", entry_ecnt);
3045 }
3046 if (cntr_acnt) {
3047 seq_printf(s, "\t\t Counters allocated \t: %d\n", cntr_acnt);
3048 seq_printf(s, "\t\t Counters enabled \t: %d\n", cntr_ecnt);
3049 }
3050 }
3051
rvu_dbg_npc_mcam_info_display(struct seq_file * filp,void * unsued)3052 static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
3053 {
3054 struct rvu *rvu = filp->private;
3055 int pf, vf, numvfs, blkaddr;
3056 struct npc_mcam *mcam;
3057 u16 pcifunc, counters;
3058 u64 cfg;
3059
3060 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
3061 if (blkaddr < 0)
3062 return -ENODEV;
3063
3064 mcam = &rvu->hw->mcam;
3065 counters = rvu->hw->npc_counters;
3066
3067 seq_puts(filp, "\nNPC MCAM info:\n");
3068 /* MCAM keywidth on receive and transmit sides */
3069 cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
3070 cfg = (cfg >> 32) & 0x07;
3071 seq_printf(filp, "\t\t RX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
3072 "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
3073 "224bits" : "448bits"));
3074 cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX));
3075 cfg = (cfg >> 32) & 0x07;
3076 seq_printf(filp, "\t\t TX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
3077 "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
3078 "224bits" : "448bits"));
3079
3080 mutex_lock(&mcam->lock);
3081 /* MCAM entries */
3082 seq_printf(filp, "\n\t\t MCAM entries \t: %d\n", mcam->total_entries);
3083 seq_printf(filp, "\t\t Reserved \t: %d\n",
3084 mcam->total_entries - mcam->bmap_entries);
3085 seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt);
3086
3087 /* MCAM counters */
3088 seq_printf(filp, "\n\t\t MCAM counters \t: %d\n", counters);
3089 seq_printf(filp, "\t\t Reserved \t: %d\n",
3090 counters - mcam->counters.max);
3091 seq_printf(filp, "\t\t Available \t: %d\n",
3092 rvu_rsrc_free_count(&mcam->counters));
3093
3094 if (mcam->bmap_entries == mcam->bmap_fcnt) {
3095 mutex_unlock(&mcam->lock);
3096 return 0;
3097 }
3098
3099 seq_puts(filp, "\n\t\t Current allocation\n");
3100 seq_puts(filp, "\t\t====================\n");
3101 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
3102 pcifunc = (pf << RVU_PFVF_PF_SHIFT);
3103 rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
3104
3105 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
3106 numvfs = (cfg >> 12) & 0xFF;
3107 for (vf = 0; vf < numvfs; vf++) {
3108 pcifunc = (pf << RVU_PFVF_PF_SHIFT) | (vf + 1);
3109 rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
3110 }
3111 }
3112
3113 mutex_unlock(&mcam->lock);
3114 return 0;
3115 }
3116
3117 RVU_DEBUG_SEQ_FOPS(npc_mcam_info, npc_mcam_info_display, NULL);
3118
rvu_dbg_npc_rx_miss_stats_display(struct seq_file * filp,void * unused)3119 static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
3120 void *unused)
3121 {
3122 struct rvu *rvu = filp->private;
3123 struct npc_mcam *mcam;
3124 int blkaddr;
3125
3126 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
3127 if (blkaddr < 0)
3128 return -ENODEV;
3129
3130 mcam = &rvu->hw->mcam;
3131
3132 seq_puts(filp, "\nNPC MCAM RX miss action stats\n");
3133 seq_printf(filp, "\t\tStat %d: \t%lld\n", mcam->rx_miss_act_cntr,
3134 rvu_read64(rvu, blkaddr,
3135 NPC_AF_MATCH_STATX(mcam->rx_miss_act_cntr)));
3136
3137 return 0;
3138 }
3139
3140 RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
3141
3142 #define RVU_DBG_PRINT_MPLS_TTL(pkt, mask) \
3143 do { \
3144 seq_printf(s, "%ld ", FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, pkt)); \
3145 seq_printf(s, "mask 0x%lx\n", \
3146 FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, mask)); \
3147 } while (0) \
3148
3149 #define RVU_DBG_PRINT_MPLS_LBTCBOS(_pkt, _mask) \
3150 do { \
3151 typeof(_pkt) (pkt) = (_pkt); \
3152 typeof(_mask) (mask) = (_mask); \
3153 seq_printf(s, "%ld %ld %ld\n", \
3154 FIELD_GET(OTX2_FLOWER_MASK_MPLS_LB, pkt), \
3155 FIELD_GET(OTX2_FLOWER_MASK_MPLS_TC, pkt), \
3156 FIELD_GET(OTX2_FLOWER_MASK_MPLS_BOS, pkt)); \
3157 seq_printf(s, "\tmask 0x%lx 0x%lx 0x%lx\n", \
3158 FIELD_GET(OTX2_FLOWER_MASK_MPLS_LB, mask), \
3159 FIELD_GET(OTX2_FLOWER_MASK_MPLS_TC, mask), \
3160 FIELD_GET(OTX2_FLOWER_MASK_MPLS_BOS, mask)); \
3161 } while (0) \
3162
rvu_dbg_npc_mcam_show_flows(struct seq_file * s,struct rvu_npc_mcam_rule * rule)3163 static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
3164 struct rvu_npc_mcam_rule *rule)
3165 {
3166 u8 bit;
3167
3168 for_each_set_bit(bit, (unsigned long *)&rule->features, 64) {
3169 seq_printf(s, "\t%s ", npc_get_field_name(bit));
3170 switch (bit) {
3171 case NPC_LXMB:
3172 if (rule->lxmb == 1)
3173 seq_puts(s, "\tL2M nibble is set\n");
3174 else
3175 seq_puts(s, "\tL2B nibble is set\n");
3176 break;
3177 case NPC_DMAC:
3178 seq_printf(s, "%pM ", rule->packet.dmac);
3179 seq_printf(s, "mask %pM\n", rule->mask.dmac);
3180 break;
3181 case NPC_SMAC:
3182 seq_printf(s, "%pM ", rule->packet.smac);
3183 seq_printf(s, "mask %pM\n", rule->mask.smac);
3184 break;
3185 case NPC_ETYPE:
3186 seq_printf(s, "0x%x ", ntohs(rule->packet.etype));
3187 seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.etype));
3188 break;
3189 case NPC_OUTER_VID:
3190 seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_tci));
3191 seq_printf(s, "mask 0x%x\n",
3192 ntohs(rule->mask.vlan_tci));
3193 break;
3194 case NPC_INNER_VID:
3195 seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_itci));
3196 seq_printf(s, "mask 0x%x\n",
3197 ntohs(rule->mask.vlan_itci));
3198 break;
3199 case NPC_TOS:
3200 seq_printf(s, "%d ", rule->packet.tos);
3201 seq_printf(s, "mask 0x%x\n", rule->mask.tos);
3202 break;
3203 case NPC_SIP_IPV4:
3204 seq_printf(s, "%pI4 ", &rule->packet.ip4src);
3205 seq_printf(s, "mask %pI4\n", &rule->mask.ip4src);
3206 break;
3207 case NPC_DIP_IPV4:
3208 seq_printf(s, "%pI4 ", &rule->packet.ip4dst);
3209 seq_printf(s, "mask %pI4\n", &rule->mask.ip4dst);
3210 break;
3211 case NPC_SIP_IPV6:
3212 seq_printf(s, "%pI6 ", rule->packet.ip6src);
3213 seq_printf(s, "mask %pI6\n", rule->mask.ip6src);
3214 break;
3215 case NPC_DIP_IPV6:
3216 seq_printf(s, "%pI6 ", rule->packet.ip6dst);
3217 seq_printf(s, "mask %pI6\n", rule->mask.ip6dst);
3218 break;
3219 case NPC_IPFRAG_IPV6:
3220 seq_printf(s, "0x%x ", rule->packet.next_header);
3221 seq_printf(s, "mask 0x%x\n", rule->mask.next_header);
3222 break;
3223 case NPC_IPFRAG_IPV4:
3224 seq_printf(s, "0x%x ", rule->packet.ip_flag);
3225 seq_printf(s, "mask 0x%x\n", rule->mask.ip_flag);
3226 break;
3227 case NPC_SPORT_TCP:
3228 case NPC_SPORT_UDP:
3229 case NPC_SPORT_SCTP:
3230 seq_printf(s, "%d ", ntohs(rule->packet.sport));
3231 seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.sport));
3232 break;
3233 case NPC_DPORT_TCP:
3234 case NPC_DPORT_UDP:
3235 case NPC_DPORT_SCTP:
3236 seq_printf(s, "%d ", ntohs(rule->packet.dport));
3237 seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
3238 break;
3239 case NPC_TCP_FLAGS:
3240 seq_printf(s, "%d ", rule->packet.tcp_flags);
3241 seq_printf(s, "mask 0x%x\n", rule->mask.tcp_flags);
3242 break;
3243 case NPC_IPSEC_SPI:
3244 seq_printf(s, "0x%x ", ntohl(rule->packet.spi));
3245 seq_printf(s, "mask 0x%x\n", ntohl(rule->mask.spi));
3246 break;
3247 case NPC_MPLS1_LBTCBOS:
3248 RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[0],
3249 rule->mask.mpls_lse[0]);
3250 break;
3251 case NPC_MPLS1_TTL:
3252 RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[0],
3253 rule->mask.mpls_lse[0]);
3254 break;
3255 case NPC_MPLS2_LBTCBOS:
3256 RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[1],
3257 rule->mask.mpls_lse[1]);
3258 break;
3259 case NPC_MPLS2_TTL:
3260 RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[1],
3261 rule->mask.mpls_lse[1]);
3262 break;
3263 case NPC_MPLS3_LBTCBOS:
3264 RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[2],
3265 rule->mask.mpls_lse[2]);
3266 break;
3267 case NPC_MPLS3_TTL:
3268 RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[2],
3269 rule->mask.mpls_lse[2]);
3270 break;
3271 case NPC_MPLS4_LBTCBOS:
3272 RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[3],
3273 rule->mask.mpls_lse[3]);
3274 break;
3275 case NPC_MPLS4_TTL:
3276 RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[3],
3277 rule->mask.mpls_lse[3]);
3278 break;
3279 case NPC_TYPE_ICMP:
3280 seq_printf(s, "%d ", rule->packet.icmp_type);
3281 seq_printf(s, "mask 0x%x\n", rule->mask.icmp_type);
3282 break;
3283 case NPC_CODE_ICMP:
3284 seq_printf(s, "%d ", rule->packet.icmp_code);
3285 seq_printf(s, "mask 0x%x\n", rule->mask.icmp_code);
3286 break;
3287 default:
3288 seq_puts(s, "\n");
3289 break;
3290 }
3291 }
3292 }
3293
rvu_dbg_npc_mcam_show_action(struct seq_file * s,struct rvu_npc_mcam_rule * rule)3294 static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
3295 struct rvu_npc_mcam_rule *rule)
3296 {
3297 if (is_npc_intf_tx(rule->intf)) {
3298 switch (rule->tx_action.op) {
3299 case NIX_TX_ACTIONOP_DROP:
3300 seq_puts(s, "\taction: Drop\n");
3301 break;
3302 case NIX_TX_ACTIONOP_UCAST_DEFAULT:
3303 seq_puts(s, "\taction: Unicast to default channel\n");
3304 break;
3305 case NIX_TX_ACTIONOP_UCAST_CHAN:
3306 seq_printf(s, "\taction: Unicast to channel %d\n",
3307 rule->tx_action.index);
3308 break;
3309 case NIX_TX_ACTIONOP_MCAST:
3310 seq_puts(s, "\taction: Multicast\n");
3311 break;
3312 case NIX_TX_ACTIONOP_DROP_VIOL:
3313 seq_puts(s, "\taction: Lockdown Violation Drop\n");
3314 break;
3315 default:
3316 break;
3317 }
3318 } else {
3319 switch (rule->rx_action.op) {
3320 case NIX_RX_ACTIONOP_DROP:
3321 seq_puts(s, "\taction: Drop\n");
3322 break;
3323 case NIX_RX_ACTIONOP_UCAST:
3324 seq_printf(s, "\taction: Direct to queue %d\n",
3325 rule->rx_action.index);
3326 break;
3327 case NIX_RX_ACTIONOP_RSS:
3328 seq_puts(s, "\taction: RSS\n");
3329 break;
3330 case NIX_RX_ACTIONOP_UCAST_IPSEC:
3331 seq_puts(s, "\taction: Unicast ipsec\n");
3332 break;
3333 case NIX_RX_ACTIONOP_MCAST:
3334 seq_puts(s, "\taction: Multicast\n");
3335 break;
3336 default:
3337 break;
3338 }
3339 }
3340 }
3341
rvu_dbg_get_intf_name(int intf)3342 static const char *rvu_dbg_get_intf_name(int intf)
3343 {
3344 switch (intf) {
3345 case NIX_INTFX_RX(0):
3346 return "NIX0_RX";
3347 case NIX_INTFX_RX(1):
3348 return "NIX1_RX";
3349 case NIX_INTFX_TX(0):
3350 return "NIX0_TX";
3351 case NIX_INTFX_TX(1):
3352 return "NIX1_TX";
3353 default:
3354 break;
3355 }
3356
3357 return "unknown";
3358 }
3359
rvu_dbg_npc_mcam_show_rules(struct seq_file * s,void * unused)3360 static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
3361 {
3362 struct rvu_npc_mcam_rule *iter;
3363 struct rvu *rvu = s->private;
3364 struct npc_mcam *mcam;
3365 int pf, vf = -1;
3366 bool enabled;
3367 int blkaddr;
3368 u16 target;
3369 u64 hits;
3370
3371 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
3372 if (blkaddr < 0)
3373 return 0;
3374
3375 mcam = &rvu->hw->mcam;
3376
3377 mutex_lock(&mcam->lock);
3378 list_for_each_entry(iter, &mcam->mcam_rules, list) {
3379 pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
3380 seq_printf(s, "\n\tInstalled by: PF%d ", pf);
3381
3382 if (iter->owner & RVU_PFVF_FUNC_MASK) {
3383 vf = (iter->owner & RVU_PFVF_FUNC_MASK) - 1;
3384 seq_printf(s, "VF%d", vf);
3385 }
3386 seq_puts(s, "\n");
3387
3388 seq_printf(s, "\tdirection: %s\n", is_npc_intf_rx(iter->intf) ?
3389 "RX" : "TX");
3390 seq_printf(s, "\tinterface: %s\n",
3391 rvu_dbg_get_intf_name(iter->intf));
3392 seq_printf(s, "\tmcam entry: %d\n", iter->entry);
3393
3394 rvu_dbg_npc_mcam_show_flows(s, iter);
3395 if (is_npc_intf_rx(iter->intf)) {
3396 target = iter->rx_action.pf_func;
3397 pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
3398 seq_printf(s, "\tForward to: PF%d ", pf);
3399
3400 if (target & RVU_PFVF_FUNC_MASK) {
3401 vf = (target & RVU_PFVF_FUNC_MASK) - 1;
3402 seq_printf(s, "VF%d", vf);
3403 }
3404 seq_puts(s, "\n");
3405 seq_printf(s, "\tchannel: 0x%x\n", iter->chan);
3406 seq_printf(s, "\tchannel_mask: 0x%x\n", iter->chan_mask);
3407 }
3408
3409 rvu_dbg_npc_mcam_show_action(s, iter);
3410
3411 enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, iter->entry);
3412 seq_printf(s, "\tenabled: %s\n", enabled ? "yes" : "no");
3413
3414 if (!iter->has_cntr)
3415 continue;
3416 seq_printf(s, "\tcounter: %d\n", iter->cntr);
3417
3418 hits = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(iter->cntr));
3419 seq_printf(s, "\thits: %lld\n", hits);
3420 }
3421 mutex_unlock(&mcam->lock);
3422
3423 return 0;
3424 }
3425
3426 RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL);
3427
rvu_dbg_npc_exact_show_entries(struct seq_file * s,void * unused)3428 static int rvu_dbg_npc_exact_show_entries(struct seq_file *s, void *unused)
3429 {
3430 struct npc_exact_table_entry *mem_entry[NPC_EXACT_TBL_MAX_WAYS] = { 0 };
3431 struct npc_exact_table_entry *cam_entry;
3432 struct npc_exact_table *table;
3433 struct rvu *rvu = s->private;
3434 int i, j;
3435
3436 u8 bitmap = 0;
3437
3438 table = rvu->hw->table;
3439
3440 mutex_lock(&table->lock);
3441
3442 /* Check if there is at least one entry in mem table */
3443 if (!table->mem_tbl_entry_cnt)
3444 goto dump_cam_table;
3445
3446 /* Print table headers */
3447 seq_puts(s, "\n\tExact Match MEM Table\n");
3448 seq_puts(s, "Index\t");
3449
3450 for (i = 0; i < table->mem_table.ways; i++) {
3451 mem_entry[i] = list_first_entry_or_null(&table->lhead_mem_tbl_entry[i],
3452 struct npc_exact_table_entry, list);
3453
3454 seq_printf(s, "Way-%d\t\t\t\t\t", i);
3455 }
3456
3457 seq_puts(s, "\n");
3458 for (i = 0; i < table->mem_table.ways; i++)
3459 seq_puts(s, "\tChan MAC \t");
3460
3461 seq_puts(s, "\n\n");
3462
3463 /* Print mem table entries */
3464 for (i = 0; i < table->mem_table.depth; i++) {
3465 bitmap = 0;
3466 for (j = 0; j < table->mem_table.ways; j++) {
3467 if (!mem_entry[j])
3468 continue;
3469
3470 if (mem_entry[j]->index != i)
3471 continue;
3472
3473 bitmap |= BIT(j);
3474 }
3475
3476 /* No valid entries */
3477 if (!bitmap)
3478 continue;
3479
3480 seq_printf(s, "%d\t", i);
3481 for (j = 0; j < table->mem_table.ways; j++) {
3482 if (!(bitmap & BIT(j))) {
3483 seq_puts(s, "nil\t\t\t\t\t");
3484 continue;
3485 }
3486
3487 seq_printf(s, "0x%x %pM\t\t\t", mem_entry[j]->chan,
3488 mem_entry[j]->mac);
3489 mem_entry[j] = list_next_entry(mem_entry[j], list);
3490 }
3491 seq_puts(s, "\n");
3492 }
3493
3494 dump_cam_table:
3495
3496 if (!table->cam_tbl_entry_cnt)
3497 goto done;
3498
3499 seq_puts(s, "\n\tExact Match CAM Table\n");
3500 seq_puts(s, "index\tchan\tMAC\n");
3501
3502 /* Traverse cam table entries */
3503 list_for_each_entry(cam_entry, &table->lhead_cam_tbl_entry, list) {
3504 seq_printf(s, "%d\t0x%x\t%pM\n", cam_entry->index, cam_entry->chan,
3505 cam_entry->mac);
3506 }
3507
3508 done:
3509 mutex_unlock(&table->lock);
3510 return 0;
3511 }
3512
3513 RVU_DEBUG_SEQ_FOPS(npc_exact_entries, npc_exact_show_entries, NULL);
3514
rvu_dbg_npc_exact_show_info(struct seq_file * s,void * unused)3515 static int rvu_dbg_npc_exact_show_info(struct seq_file *s, void *unused)
3516 {
3517 struct npc_exact_table *table;
3518 struct rvu *rvu = s->private;
3519 int i;
3520
3521 table = rvu->hw->table;
3522
3523 seq_puts(s, "\n\tExact Table Info\n");
3524 seq_printf(s, "Exact Match Feature : %s\n",
3525 rvu->hw->cap.npc_exact_match_enabled ? "enabled" : "disable");
3526 if (!rvu->hw->cap.npc_exact_match_enabled)
3527 return 0;
3528
3529 seq_puts(s, "\nMCAM Index\tMAC Filter Rules Count\n");
3530 for (i = 0; i < table->num_drop_rules; i++)
3531 seq_printf(s, "%d\t\t%d\n", i, table->cnt_cmd_rules[i]);
3532
3533 seq_puts(s, "\nMcam Index\tPromisc Mode Status\n");
3534 for (i = 0; i < table->num_drop_rules; i++)
3535 seq_printf(s, "%d\t\t%s\n", i, table->promisc_mode[i] ? "on" : "off");
3536
3537 seq_puts(s, "\n\tMEM Table Info\n");
3538 seq_printf(s, "Ways : %d\n", table->mem_table.ways);
3539 seq_printf(s, "Depth : %d\n", table->mem_table.depth);
3540 seq_printf(s, "Mask : 0x%llx\n", table->mem_table.mask);
3541 seq_printf(s, "Hash Mask : 0x%x\n", table->mem_table.hash_mask);
3542 seq_printf(s, "Hash Offset : 0x%x\n", table->mem_table.hash_offset);
3543
3544 seq_puts(s, "\n\tCAM Table Info\n");
3545 seq_printf(s, "Depth : %d\n", table->cam_table.depth);
3546
3547 return 0;
3548 }
3549
3550 RVU_DEBUG_SEQ_FOPS(npc_exact_info, npc_exact_show_info, NULL);
3551
rvu_dbg_npc_exact_drop_cnt(struct seq_file * s,void * unused)3552 static int rvu_dbg_npc_exact_drop_cnt(struct seq_file *s, void *unused)
3553 {
3554 struct npc_exact_table *table;
3555 struct rvu *rvu = s->private;
3556 struct npc_key_field *field;
3557 u16 chan, pcifunc;
3558 int blkaddr, i;
3559 u64 cfg, cam1;
3560 char *str;
3561
3562 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
3563 table = rvu->hw->table;
3564
3565 field = &rvu->hw->mcam.rx_key_fields[NPC_CHAN];
3566
3567 seq_puts(s, "\n\t Exact Hit on drop status\n");
3568 seq_puts(s, "\npcifunc\tmcam_idx\tHits\tchan\tstatus\n");
3569
3570 for (i = 0; i < table->num_drop_rules; i++) {
3571 pcifunc = rvu_npc_exact_drop_rule_to_pcifunc(rvu, i);
3572 cfg = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CFG(i, 0));
3573
3574 /* channel will be always in keyword 0 */
3575 cam1 = rvu_read64(rvu, blkaddr,
3576 NPC_AF_MCAMEX_BANKX_CAMX_W0(i, 0, 1));
3577 chan = field->kw_mask[0] & cam1;
3578
3579 str = (cfg & 1) ? "enabled" : "disabled";
3580
3581 seq_printf(s, "0x%x\t%d\t\t%llu\t0x%x\t%s\n", pcifunc, i,
3582 rvu_read64(rvu, blkaddr,
3583 NPC_AF_MATCH_STATX(table->counter_idx[i])),
3584 chan, str);
3585 }
3586
3587 return 0;
3588 }
3589
3590 RVU_DEBUG_SEQ_FOPS(npc_exact_drop_cnt, npc_exact_drop_cnt, NULL);
3591
rvu_dbg_npc_init(struct rvu * rvu)3592 static void rvu_dbg_npc_init(struct rvu *rvu)
3593 {
3594 rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root);
3595
3596 debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc, rvu,
3597 &rvu_dbg_npc_mcam_info_fops);
3598 debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc, rvu,
3599 &rvu_dbg_npc_mcam_rules_fops);
3600
3601 debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc, rvu,
3602 &rvu_dbg_npc_rx_miss_act_fops);
3603
3604 if (!rvu->hw->cap.npc_exact_match_enabled)
3605 return;
3606
3607 debugfs_create_file("exact_entries", 0444, rvu->rvu_dbg.npc, rvu,
3608 &rvu_dbg_npc_exact_entries_fops);
3609
3610 debugfs_create_file("exact_info", 0444, rvu->rvu_dbg.npc, rvu,
3611 &rvu_dbg_npc_exact_info_fops);
3612
3613 debugfs_create_file("exact_drop_cnt", 0444, rvu->rvu_dbg.npc, rvu,
3614 &rvu_dbg_npc_exact_drop_cnt_fops);
3615
3616 }
3617
cpt_eng_sts_display(struct seq_file * filp,u8 eng_type)3618 static int cpt_eng_sts_display(struct seq_file *filp, u8 eng_type)
3619 {
3620 struct cpt_ctx *ctx = filp->private;
3621 u64 busy_sts = 0, free_sts = 0;
3622 u32 e_min = 0, e_max = 0, e, i;
3623 u16 max_ses, max_ies, max_aes;
3624 struct rvu *rvu = ctx->rvu;
3625 int blkaddr = ctx->blkaddr;
3626 u64 reg;
3627
3628 reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
3629 max_ses = reg & 0xffff;
3630 max_ies = (reg >> 16) & 0xffff;
3631 max_aes = (reg >> 32) & 0xffff;
3632
3633 switch (eng_type) {
3634 case CPT_AE_TYPE:
3635 e_min = max_ses + max_ies;
3636 e_max = max_ses + max_ies + max_aes;
3637 break;
3638 case CPT_SE_TYPE:
3639 e_min = 0;
3640 e_max = max_ses;
3641 break;
3642 case CPT_IE_TYPE:
3643 e_min = max_ses;
3644 e_max = max_ses + max_ies;
3645 break;
3646 default:
3647 return -EINVAL;
3648 }
3649
3650 for (e = e_min, i = 0; e < e_max; e++, i++) {
3651 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e));
3652 if (reg & 0x1)
3653 busy_sts |= 1ULL << i;
3654
3655 if (reg & 0x2)
3656 free_sts |= 1ULL << i;
3657 }
3658 seq_printf(filp, "FREE STS : 0x%016llx\n", free_sts);
3659 seq_printf(filp, "BUSY STS : 0x%016llx\n", busy_sts);
3660
3661 return 0;
3662 }
3663
rvu_dbg_cpt_ae_sts_display(struct seq_file * filp,void * unused)3664 static int rvu_dbg_cpt_ae_sts_display(struct seq_file *filp, void *unused)
3665 {
3666 return cpt_eng_sts_display(filp, CPT_AE_TYPE);
3667 }
3668
3669 RVU_DEBUG_SEQ_FOPS(cpt_ae_sts, cpt_ae_sts_display, NULL);
3670
rvu_dbg_cpt_se_sts_display(struct seq_file * filp,void * unused)3671 static int rvu_dbg_cpt_se_sts_display(struct seq_file *filp, void *unused)
3672 {
3673 return cpt_eng_sts_display(filp, CPT_SE_TYPE);
3674 }
3675
3676 RVU_DEBUG_SEQ_FOPS(cpt_se_sts, cpt_se_sts_display, NULL);
3677
rvu_dbg_cpt_ie_sts_display(struct seq_file * filp,void * unused)3678 static int rvu_dbg_cpt_ie_sts_display(struct seq_file *filp, void *unused)
3679 {
3680 return cpt_eng_sts_display(filp, CPT_IE_TYPE);
3681 }
3682
3683 RVU_DEBUG_SEQ_FOPS(cpt_ie_sts, cpt_ie_sts_display, NULL);
3684
rvu_dbg_cpt_engines_info_display(struct seq_file * filp,void * unused)3685 static int rvu_dbg_cpt_engines_info_display(struct seq_file *filp, void *unused)
3686 {
3687 struct cpt_ctx *ctx = filp->private;
3688 u16 max_ses, max_ies, max_aes;
3689 struct rvu *rvu = ctx->rvu;
3690 int blkaddr = ctx->blkaddr;
3691 u32 e_max, e;
3692 u64 reg;
3693
3694 reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
3695 max_ses = reg & 0xffff;
3696 max_ies = (reg >> 16) & 0xffff;
3697 max_aes = (reg >> 32) & 0xffff;
3698
3699 e_max = max_ses + max_ies + max_aes;
3700
3701 seq_puts(filp, "===========================================\n");
3702 for (e = 0; e < e_max; e++) {
3703 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(e));
3704 seq_printf(filp, "CPT Engine[%u] Group Enable 0x%02llx\n", e,
3705 reg & 0xff);
3706 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_ACTIVE(e));
3707 seq_printf(filp, "CPT Engine[%u] Active Info 0x%llx\n", e,
3708 reg);
3709 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(e));
3710 seq_printf(filp, "CPT Engine[%u] Control 0x%llx\n", e,
3711 reg);
3712 seq_puts(filp, "===========================================\n");
3713 }
3714 return 0;
3715 }
3716
3717 RVU_DEBUG_SEQ_FOPS(cpt_engines_info, cpt_engines_info_display, NULL);
3718
rvu_dbg_cpt_lfs_info_display(struct seq_file * filp,void * unused)3719 static int rvu_dbg_cpt_lfs_info_display(struct seq_file *filp, void *unused)
3720 {
3721 struct cpt_ctx *ctx = filp->private;
3722 int blkaddr = ctx->blkaddr;
3723 struct rvu *rvu = ctx->rvu;
3724 struct rvu_block *block;
3725 struct rvu_hwinfo *hw;
3726 u64 reg;
3727 u32 lf;
3728
3729 hw = rvu->hw;
3730 block = &hw->block[blkaddr];
3731 if (!block->lf.bmap)
3732 return -ENODEV;
3733
3734 seq_puts(filp, "===========================================\n");
3735 for (lf = 0; lf < block->lf.max; lf++) {
3736 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(lf));
3737 seq_printf(filp, "CPT Lf[%u] CTL 0x%llx\n", lf, reg);
3738 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(lf));
3739 seq_printf(filp, "CPT Lf[%u] CTL2 0x%llx\n", lf, reg);
3740 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_PTR_CTL(lf));
3741 seq_printf(filp, "CPT Lf[%u] PTR_CTL 0x%llx\n", lf, reg);
3742 reg = rvu_read64(rvu, blkaddr, block->lfcfg_reg |
3743 (lf << block->lfshift));
3744 seq_printf(filp, "CPT Lf[%u] CFG 0x%llx\n", lf, reg);
3745 seq_puts(filp, "===========================================\n");
3746 }
3747 return 0;
3748 }
3749
3750 RVU_DEBUG_SEQ_FOPS(cpt_lfs_info, cpt_lfs_info_display, NULL);
3751
rvu_dbg_cpt_err_info_display(struct seq_file * filp,void * unused)3752 static int rvu_dbg_cpt_err_info_display(struct seq_file *filp, void *unused)
3753 {
3754 struct cpt_ctx *ctx = filp->private;
3755 struct rvu *rvu = ctx->rvu;
3756 int blkaddr = ctx->blkaddr;
3757 u64 reg0, reg1;
3758
3759 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
3760 reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
3761 seq_printf(filp, "CPT_AF_FLTX_INT: 0x%llx 0x%llx\n", reg0, reg1);
3762 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(0));
3763 reg1 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(1));
3764 seq_printf(filp, "CPT_AF_PSNX_EXE: 0x%llx 0x%llx\n", reg0, reg1);
3765 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_LF(0));
3766 seq_printf(filp, "CPT_AF_PSNX_LF: 0x%llx\n", reg0);
3767 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT);
3768 seq_printf(filp, "CPT_AF_RVU_INT: 0x%llx\n", reg0);
3769 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT);
3770 seq_printf(filp, "CPT_AF_RAS_INT: 0x%llx\n", reg0);
3771 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO);
3772 seq_printf(filp, "CPT_AF_EXE_ERR_INFO: 0x%llx\n", reg0);
3773
3774 return 0;
3775 }
3776
3777 RVU_DEBUG_SEQ_FOPS(cpt_err_info, cpt_err_info_display, NULL);
3778
rvu_dbg_cpt_pc_display(struct seq_file * filp,void * unused)3779 static int rvu_dbg_cpt_pc_display(struct seq_file *filp, void *unused)
3780 {
3781 struct cpt_ctx *ctx = filp->private;
3782 struct rvu *rvu = ctx->rvu;
3783 int blkaddr = ctx->blkaddr;
3784 u64 reg;
3785
3786 reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC);
3787 seq_printf(filp, "CPT instruction requests %llu\n", reg);
3788 reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC);
3789 seq_printf(filp, "CPT instruction latency %llu\n", reg);
3790 reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC);
3791 seq_printf(filp, "CPT NCB read requests %llu\n", reg);
3792 reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC);
3793 seq_printf(filp, "CPT NCB read latency %llu\n", reg);
3794 reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC);
3795 seq_printf(filp, "CPT read requests caused by UC fills %llu\n", reg);
3796 reg = rvu_read64(rvu, blkaddr, CPT_AF_ACTIVE_CYCLES_PC);
3797 seq_printf(filp, "CPT active cycles pc %llu\n", reg);
3798 reg = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT);
3799 seq_printf(filp, "CPT clock count pc %llu\n", reg);
3800
3801 return 0;
3802 }
3803
3804 RVU_DEBUG_SEQ_FOPS(cpt_pc, cpt_pc_display, NULL);
3805
rvu_dbg_cpt_init(struct rvu * rvu,int blkaddr)3806 static void rvu_dbg_cpt_init(struct rvu *rvu, int blkaddr)
3807 {
3808 struct cpt_ctx *ctx;
3809
3810 if (!is_block_implemented(rvu->hw, blkaddr))
3811 return;
3812
3813 if (blkaddr == BLKADDR_CPT0) {
3814 rvu->rvu_dbg.cpt = debugfs_create_dir("cpt", rvu->rvu_dbg.root);
3815 ctx = &rvu->rvu_dbg.cpt_ctx[0];
3816 ctx->blkaddr = BLKADDR_CPT0;
3817 ctx->rvu = rvu;
3818 } else {
3819 rvu->rvu_dbg.cpt = debugfs_create_dir("cpt1",
3820 rvu->rvu_dbg.root);
3821 ctx = &rvu->rvu_dbg.cpt_ctx[1];
3822 ctx->blkaddr = BLKADDR_CPT1;
3823 ctx->rvu = rvu;
3824 }
3825
3826 debugfs_create_file("cpt_pc", 0600, rvu->rvu_dbg.cpt, ctx,
3827 &rvu_dbg_cpt_pc_fops);
3828 debugfs_create_file("cpt_ae_sts", 0600, rvu->rvu_dbg.cpt, ctx,
3829 &rvu_dbg_cpt_ae_sts_fops);
3830 debugfs_create_file("cpt_se_sts", 0600, rvu->rvu_dbg.cpt, ctx,
3831 &rvu_dbg_cpt_se_sts_fops);
3832 debugfs_create_file("cpt_ie_sts", 0600, rvu->rvu_dbg.cpt, ctx,
3833 &rvu_dbg_cpt_ie_sts_fops);
3834 debugfs_create_file("cpt_engines_info", 0600, rvu->rvu_dbg.cpt, ctx,
3835 &rvu_dbg_cpt_engines_info_fops);
3836 debugfs_create_file("cpt_lfs_info", 0600, rvu->rvu_dbg.cpt, ctx,
3837 &rvu_dbg_cpt_lfs_info_fops);
3838 debugfs_create_file("cpt_err_info", 0600, rvu->rvu_dbg.cpt, ctx,
3839 &rvu_dbg_cpt_err_info_fops);
3840 }
3841
rvu_get_dbg_dir_name(struct rvu * rvu)3842 static const char *rvu_get_dbg_dir_name(struct rvu *rvu)
3843 {
3844 if (!is_rvu_otx2(rvu))
3845 return "cn10k";
3846 else
3847 return "octeontx2";
3848 }
3849
rvu_dbg_init(struct rvu * rvu)3850 void rvu_dbg_init(struct rvu *rvu)
3851 {
3852 rvu->rvu_dbg.root = debugfs_create_dir(rvu_get_dbg_dir_name(rvu), NULL);
3853
3854 debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
3855 &rvu_dbg_rsrc_status_fops);
3856
3857 if (!is_rvu_otx2(rvu))
3858 debugfs_create_file("lmtst_map_table", 0444, rvu->rvu_dbg.root,
3859 rvu, &rvu_dbg_lmtst_map_table_fops);
3860
3861 if (!cgx_get_cgxcnt_max())
3862 goto create;
3863
3864 if (is_rvu_otx2(rvu))
3865 debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root,
3866 rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
3867 else
3868 debugfs_create_file("rvu_pf_rpm_map", 0444, rvu->rvu_dbg.root,
3869 rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
3870
3871 create:
3872 rvu_dbg_npa_init(rvu);
3873 rvu_dbg_nix_init(rvu, BLKADDR_NIX0);
3874
3875 rvu_dbg_nix_init(rvu, BLKADDR_NIX1);
3876 rvu_dbg_cgx_init(rvu);
3877 rvu_dbg_npc_init(rvu);
3878 rvu_dbg_cpt_init(rvu, BLKADDR_CPT0);
3879 rvu_dbg_cpt_init(rvu, BLKADDR_CPT1);
3880 rvu_dbg_mcs_init(rvu);
3881 }
3882
rvu_dbg_exit(struct rvu * rvu)3883 void rvu_dbg_exit(struct rvu *rvu)
3884 {
3885 debugfs_remove_recursive(rvu->rvu_dbg.root);
3886 }
3887
3888 #endif /* CONFIG_DEBUG_FS */
3889