1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3 *
4 * Copyright (C) 2019 Marvell.
5 *
6 */
7
8 #ifdef CONFIG_DEBUG_FS
9
10 #include <linux/fs.h>
11 #include <linux/debugfs.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14
15 #include "rvu_struct.h"
16 #include "rvu_reg.h"
17 #include "rvu.h"
18 #include "cgx.h"
19 #include "lmac_common.h"
20 #include "npc.h"
21 #include "rvu_npc_hash.h"
22 #include "mcs.h"
23
24 #define DEBUGFS_DIR_NAME "octeontx2"
25
26 enum {
27 CGX_STAT0,
28 CGX_STAT1,
29 CGX_STAT2,
30 CGX_STAT3,
31 CGX_STAT4,
32 CGX_STAT5,
33 CGX_STAT6,
34 CGX_STAT7,
35 CGX_STAT8,
36 CGX_STAT9,
37 CGX_STAT10,
38 CGX_STAT11,
39 CGX_STAT12,
40 CGX_STAT13,
41 CGX_STAT14,
42 CGX_STAT15,
43 CGX_STAT16,
44 CGX_STAT17,
45 CGX_STAT18,
46 };
47
48 static char *cgx_rx_stats_fields[] = {
49 [CGX_STAT0] = "Received packets",
50 [CGX_STAT1] = "Octets of received packets",
51 [CGX_STAT2] = "Received PAUSE packets",
52 [CGX_STAT3] = "Received PAUSE and control packets",
53 [CGX_STAT4] = "Filtered DMAC0 (NIX-bound) packets",
54 [CGX_STAT5] = "Filtered DMAC0 (NIX-bound) octets",
55 [CGX_STAT6] = "Packets dropped due to RX FIFO full",
56 [CGX_STAT7] = "Octets dropped due to RX FIFO full",
57 [CGX_STAT8] = "Error packets",
58 [CGX_STAT9] = "Filtered DMAC1 (NCSI-bound) packets",
59 [CGX_STAT10] = "Filtered DMAC1 (NCSI-bound) octets",
60 [CGX_STAT11] = "NCSI-bound packets dropped",
61 [CGX_STAT12] = "NCSI-bound octets dropped",
62 };
63
64 static char *cgx_tx_stats_fields[] = {
65 [CGX_STAT0] = "Packets dropped due to excessive collisions",
66 [CGX_STAT1] = "Packets dropped due to excessive deferral",
67 [CGX_STAT2] = "Multiple collisions before successful transmission",
68 [CGX_STAT3] = "Single collisions before successful transmission",
69 [CGX_STAT4] = "Total octets sent on the interface",
70 [CGX_STAT5] = "Total frames sent on the interface",
71 [CGX_STAT6] = "Packets sent with an octet count < 64",
72 [CGX_STAT7] = "Packets sent with an octet count == 64",
73 [CGX_STAT8] = "Packets sent with an octet count of 65-127",
74 [CGX_STAT9] = "Packets sent with an octet count of 128-255",
75 [CGX_STAT10] = "Packets sent with an octet count of 256-511",
76 [CGX_STAT11] = "Packets sent with an octet count of 512-1023",
77 [CGX_STAT12] = "Packets sent with an octet count of 1024-1518",
78 [CGX_STAT13] = "Packets sent with an octet count of > 1518",
79 [CGX_STAT14] = "Packets sent to a broadcast DMAC",
80 [CGX_STAT15] = "Packets sent to the multicast DMAC",
81 [CGX_STAT16] = "Transmit underflow and were truncated",
82 [CGX_STAT17] = "Control/PAUSE packets sent",
83 };
84
85 static char *rpm_rx_stats_fields[] = {
86 "Octets of received packets",
87 "Octets of received packets with out error",
88 "Received packets with alignment errors",
89 "Control/PAUSE packets received",
90 "Packets received with Frame too long Errors",
91 "Packets received with a1nrange length Errors",
92 "Received packets",
93 "Packets received with FrameCheckSequenceErrors",
94 "Packets received with VLAN header",
95 "Error packets",
96 "Packets received with unicast DMAC",
97 "Packets received with multicast DMAC",
98 "Packets received with broadcast DMAC",
99 "Dropped packets",
100 "Total frames received on interface",
101 "Packets received with an octet count < 64",
102 "Packets received with an octet count == 64",
103 "Packets received with an octet count of 65-127",
104 "Packets received with an octet count of 128-255",
105 "Packets received with an octet count of 256-511",
106 "Packets received with an octet count of 512-1023",
107 "Packets received with an octet count of 1024-1518",
108 "Packets received with an octet count of > 1518",
109 "Oversized Packets",
110 "Jabber Packets",
111 "Fragmented Packets",
112 "CBFC(class based flow control) pause frames received for class 0",
113 "CBFC pause frames received for class 1",
114 "CBFC pause frames received for class 2",
115 "CBFC pause frames received for class 3",
116 "CBFC pause frames received for class 4",
117 "CBFC pause frames received for class 5",
118 "CBFC pause frames received for class 6",
119 "CBFC pause frames received for class 7",
120 "CBFC pause frames received for class 8",
121 "CBFC pause frames received for class 9",
122 "CBFC pause frames received for class 10",
123 "CBFC pause frames received for class 11",
124 "CBFC pause frames received for class 12",
125 "CBFC pause frames received for class 13",
126 "CBFC pause frames received for class 14",
127 "CBFC pause frames received for class 15",
128 "MAC control packets received",
129 };
130
131 static char *rpm_tx_stats_fields[] = {
132 "Total octets sent on the interface",
133 "Total octets transmitted OK",
134 "Control/Pause frames sent",
135 "Total frames transmitted OK",
136 "Total frames sent with VLAN header",
137 "Error Packets",
138 "Packets sent to unicast DMAC",
139 "Packets sent to the multicast DMAC",
140 "Packets sent to a broadcast DMAC",
141 "Packets sent with an octet count == 64",
142 "Packets sent with an octet count of 65-127",
143 "Packets sent with an octet count of 128-255",
144 "Packets sent with an octet count of 256-511",
145 "Packets sent with an octet count of 512-1023",
146 "Packets sent with an octet count of 1024-1518",
147 "Packets sent with an octet count of > 1518",
148 "CBFC(class based flow control) pause frames transmitted for class 0",
149 "CBFC pause frames transmitted for class 1",
150 "CBFC pause frames transmitted for class 2",
151 "CBFC pause frames transmitted for class 3",
152 "CBFC pause frames transmitted for class 4",
153 "CBFC pause frames transmitted for class 5",
154 "CBFC pause frames transmitted for class 6",
155 "CBFC pause frames transmitted for class 7",
156 "CBFC pause frames transmitted for class 8",
157 "CBFC pause frames transmitted for class 9",
158 "CBFC pause frames transmitted for class 10",
159 "CBFC pause frames transmitted for class 11",
160 "CBFC pause frames transmitted for class 12",
161 "CBFC pause frames transmitted for class 13",
162 "CBFC pause frames transmitted for class 14",
163 "CBFC pause frames transmitted for class 15",
164 "MAC control packets sent",
165 "Total frames sent on the interface"
166 };
167
168 enum cpt_eng_type {
169 CPT_AE_TYPE = 1,
170 CPT_SE_TYPE = 2,
171 CPT_IE_TYPE = 3,
172 };
173
174 #define rvu_dbg_NULL NULL
175 #define rvu_dbg_open_NULL NULL
176
177 #define RVU_DEBUG_SEQ_FOPS(name, read_op, write_op) \
178 static int rvu_dbg_open_##name(struct inode *inode, struct file *file) \
179 { \
180 return single_open(file, rvu_dbg_##read_op, inode->i_private); \
181 } \
182 static const struct file_operations rvu_dbg_##name##_fops = { \
183 .owner = THIS_MODULE, \
184 .open = rvu_dbg_open_##name, \
185 .read = seq_read, \
186 .write = rvu_dbg_##write_op, \
187 .llseek = seq_lseek, \
188 .release = single_release, \
189 }
190
191 #define RVU_DEBUG_FOPS(name, read_op, write_op) \
192 static const struct file_operations rvu_dbg_##name##_fops = { \
193 .owner = THIS_MODULE, \
194 .open = simple_open, \
195 .read = rvu_dbg_##read_op, \
196 .write = rvu_dbg_##write_op \
197 }
198
199 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
200
rvu_dbg_mcs_port_stats_display(struct seq_file * filp,void * unused,int dir)201 static int rvu_dbg_mcs_port_stats_display(struct seq_file *filp, void *unused, int dir)
202 {
203 struct mcs *mcs = filp->private;
204 struct mcs_port_stats stats;
205 int lmac;
206
207 seq_puts(filp, "\n port stats\n");
208 mutex_lock(&mcs->stats_lock);
209 for_each_set_bit(lmac, &mcs->hw->lmac_bmap, mcs->hw->lmac_cnt) {
210 mcs_get_port_stats(mcs, &stats, lmac, dir);
211 seq_printf(filp, "port%d: Tcam Miss: %lld\n", lmac, stats.tcam_miss_cnt);
212 seq_printf(filp, "port%d: Parser errors: %lld\n", lmac, stats.parser_err_cnt);
213
214 if (dir == MCS_RX && mcs->hw->mcs_blks > 1)
215 seq_printf(filp, "port%d: Preempt error: %lld\n", lmac,
216 stats.preempt_err_cnt);
217 if (dir == MCS_TX)
218 seq_printf(filp, "port%d: Sectag insert error: %lld\n", lmac,
219 stats.sectag_insert_err_cnt);
220 }
221 mutex_unlock(&mcs->stats_lock);
222 return 0;
223 }
224
rvu_dbg_mcs_rx_port_stats_display(struct seq_file * filp,void * unused)225 static int rvu_dbg_mcs_rx_port_stats_display(struct seq_file *filp, void *unused)
226 {
227 return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_RX);
228 }
229
230 RVU_DEBUG_SEQ_FOPS(mcs_rx_port_stats, mcs_rx_port_stats_display, NULL);
231
rvu_dbg_mcs_tx_port_stats_display(struct seq_file * filp,void * unused)232 static int rvu_dbg_mcs_tx_port_stats_display(struct seq_file *filp, void *unused)
233 {
234 return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_TX);
235 }
236
237 RVU_DEBUG_SEQ_FOPS(mcs_tx_port_stats, mcs_tx_port_stats_display, NULL);
238
rvu_dbg_mcs_sa_stats_display(struct seq_file * filp,void * unused,int dir)239 static int rvu_dbg_mcs_sa_stats_display(struct seq_file *filp, void *unused, int dir)
240 {
241 struct mcs *mcs = filp->private;
242 struct mcs_sa_stats stats;
243 struct rsrc_bmap *map;
244 int sa_id;
245
246 if (dir == MCS_TX) {
247 map = &mcs->tx.sa;
248 mutex_lock(&mcs->stats_lock);
249 for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) {
250 seq_puts(filp, "\n TX SA stats\n");
251 mcs_get_sa_stats(mcs, &stats, sa_id, MCS_TX);
252 seq_printf(filp, "sa%d: Pkts encrypted: %lld\n", sa_id,
253 stats.pkt_encrypt_cnt);
254
255 seq_printf(filp, "sa%d: Pkts protected: %lld\n", sa_id,
256 stats.pkt_protected_cnt);
257 }
258 mutex_unlock(&mcs->stats_lock);
259 return 0;
260 }
261
262 /* RX stats */
263 map = &mcs->rx.sa;
264 mutex_lock(&mcs->stats_lock);
265 for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) {
266 seq_puts(filp, "\n RX SA stats\n");
267 mcs_get_sa_stats(mcs, &stats, sa_id, MCS_RX);
268 seq_printf(filp, "sa%d: Invalid pkts: %lld\n", sa_id, stats.pkt_invalid_cnt);
269 seq_printf(filp, "sa%d: Pkts no sa error: %lld\n", sa_id, stats.pkt_nosaerror_cnt);
270 seq_printf(filp, "sa%d: Pkts not valid: %lld\n", sa_id, stats.pkt_notvalid_cnt);
271 seq_printf(filp, "sa%d: Pkts ok: %lld\n", sa_id, stats.pkt_ok_cnt);
272 seq_printf(filp, "sa%d: Pkts no sa: %lld\n", sa_id, stats.pkt_nosa_cnt);
273 }
274 mutex_unlock(&mcs->stats_lock);
275 return 0;
276 }
277
rvu_dbg_mcs_rx_sa_stats_display(struct seq_file * filp,void * unused)278 static int rvu_dbg_mcs_rx_sa_stats_display(struct seq_file *filp, void *unused)
279 {
280 return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_RX);
281 }
282
283 RVU_DEBUG_SEQ_FOPS(mcs_rx_sa_stats, mcs_rx_sa_stats_display, NULL);
284
rvu_dbg_mcs_tx_sa_stats_display(struct seq_file * filp,void * unused)285 static int rvu_dbg_mcs_tx_sa_stats_display(struct seq_file *filp, void *unused)
286 {
287 return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_TX);
288 }
289
290 RVU_DEBUG_SEQ_FOPS(mcs_tx_sa_stats, mcs_tx_sa_stats_display, NULL);
291
rvu_dbg_mcs_tx_sc_stats_display(struct seq_file * filp,void * unused)292 static int rvu_dbg_mcs_tx_sc_stats_display(struct seq_file *filp, void *unused)
293 {
294 struct mcs *mcs = filp->private;
295 struct mcs_sc_stats stats;
296 struct rsrc_bmap *map;
297 int sc_id;
298
299 map = &mcs->tx.sc;
300 seq_puts(filp, "\n SC stats\n");
301
302 mutex_lock(&mcs->stats_lock);
303 for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) {
304 mcs_get_sc_stats(mcs, &stats, sc_id, MCS_TX);
305 seq_printf(filp, "\n=======sc%d======\n\n", sc_id);
306 seq_printf(filp, "sc%d: Pkts encrypted: %lld\n", sc_id, stats.pkt_encrypt_cnt);
307 seq_printf(filp, "sc%d: Pkts protected: %lld\n", sc_id, stats.pkt_protected_cnt);
308
309 if (mcs->hw->mcs_blks == 1) {
310 seq_printf(filp, "sc%d: Octets encrypted: %lld\n", sc_id,
311 stats.octet_encrypt_cnt);
312 seq_printf(filp, "sc%d: Octets protected: %lld\n", sc_id,
313 stats.octet_protected_cnt);
314 }
315 }
316 mutex_unlock(&mcs->stats_lock);
317 return 0;
318 }
319
320 RVU_DEBUG_SEQ_FOPS(mcs_tx_sc_stats, mcs_tx_sc_stats_display, NULL);
321
rvu_dbg_mcs_rx_sc_stats_display(struct seq_file * filp,void * unused)322 static int rvu_dbg_mcs_rx_sc_stats_display(struct seq_file *filp, void *unused)
323 {
324 struct mcs *mcs = filp->private;
325 struct mcs_sc_stats stats;
326 struct rsrc_bmap *map;
327 int sc_id;
328
329 map = &mcs->rx.sc;
330 seq_puts(filp, "\n SC stats\n");
331
332 mutex_lock(&mcs->stats_lock);
333 for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) {
334 mcs_get_sc_stats(mcs, &stats, sc_id, MCS_RX);
335 seq_printf(filp, "\n=======sc%d======\n\n", sc_id);
336 seq_printf(filp, "sc%d: Cam hits: %lld\n", sc_id, stats.hit_cnt);
337 seq_printf(filp, "sc%d: Invalid pkts: %lld\n", sc_id, stats.pkt_invalid_cnt);
338 seq_printf(filp, "sc%d: Late pkts: %lld\n", sc_id, stats.pkt_late_cnt);
339 seq_printf(filp, "sc%d: Notvalid pkts: %lld\n", sc_id, stats.pkt_notvalid_cnt);
340 seq_printf(filp, "sc%d: Unchecked pkts: %lld\n", sc_id, stats.pkt_unchecked_cnt);
341
342 if (mcs->hw->mcs_blks > 1) {
343 seq_printf(filp, "sc%d: Delay pkts: %lld\n", sc_id, stats.pkt_delay_cnt);
344 seq_printf(filp, "sc%d: Pkts ok: %lld\n", sc_id, stats.pkt_ok_cnt);
345 }
346 if (mcs->hw->mcs_blks == 1) {
347 seq_printf(filp, "sc%d: Octets decrypted: %lld\n", sc_id,
348 stats.octet_decrypt_cnt);
349 seq_printf(filp, "sc%d: Octets validated: %lld\n", sc_id,
350 stats.octet_validate_cnt);
351 }
352 }
353 mutex_unlock(&mcs->stats_lock);
354 return 0;
355 }
356
357 RVU_DEBUG_SEQ_FOPS(mcs_rx_sc_stats, mcs_rx_sc_stats_display, NULL);
358
rvu_dbg_mcs_flowid_stats_display(struct seq_file * filp,void * unused,int dir)359 static int rvu_dbg_mcs_flowid_stats_display(struct seq_file *filp, void *unused, int dir)
360 {
361 struct mcs *mcs = filp->private;
362 struct mcs_flowid_stats stats;
363 struct rsrc_bmap *map;
364 int flow_id;
365
366 seq_puts(filp, "\n Flowid stats\n");
367
368 if (dir == MCS_RX)
369 map = &mcs->rx.flow_ids;
370 else
371 map = &mcs->tx.flow_ids;
372
373 mutex_lock(&mcs->stats_lock);
374 for_each_set_bit(flow_id, map->bmap, mcs->hw->tcam_entries) {
375 mcs_get_flowid_stats(mcs, &stats, flow_id, dir);
376 seq_printf(filp, "Flowid%d: Hit:%lld\n", flow_id, stats.tcam_hit_cnt);
377 }
378 mutex_unlock(&mcs->stats_lock);
379 return 0;
380 }
381
rvu_dbg_mcs_tx_flowid_stats_display(struct seq_file * filp,void * unused)382 static int rvu_dbg_mcs_tx_flowid_stats_display(struct seq_file *filp, void *unused)
383 {
384 return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_TX);
385 }
386
387 RVU_DEBUG_SEQ_FOPS(mcs_tx_flowid_stats, mcs_tx_flowid_stats_display, NULL);
388
rvu_dbg_mcs_rx_flowid_stats_display(struct seq_file * filp,void * unused)389 static int rvu_dbg_mcs_rx_flowid_stats_display(struct seq_file *filp, void *unused)
390 {
391 return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_RX);
392 }
393
394 RVU_DEBUG_SEQ_FOPS(mcs_rx_flowid_stats, mcs_rx_flowid_stats_display, NULL);
395
rvu_dbg_mcs_tx_secy_stats_display(struct seq_file * filp,void * unused)396 static int rvu_dbg_mcs_tx_secy_stats_display(struct seq_file *filp, void *unused)
397 {
398 struct mcs *mcs = filp->private;
399 struct mcs_secy_stats stats;
400 struct rsrc_bmap *map;
401 int secy_id;
402
403 map = &mcs->tx.secy;
404 seq_puts(filp, "\n MCS TX secy stats\n");
405
406 mutex_lock(&mcs->stats_lock);
407 for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) {
408 mcs_get_tx_secy_stats(mcs, &stats, secy_id);
409 seq_printf(filp, "\n=======Secy%d======\n\n", secy_id);
410 seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id,
411 stats.ctl_pkt_bcast_cnt);
412 seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id,
413 stats.ctl_pkt_mcast_cnt);
414 seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id,
415 stats.ctl_pkt_ucast_cnt);
416 seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt);
417 seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id,
418 stats.unctl_pkt_bcast_cnt);
419 seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id,
420 stats.unctl_pkt_mcast_cnt);
421 seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id,
422 stats.unctl_pkt_ucast_cnt);
423 seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt);
424 seq_printf(filp, "secy%d: Octet encrypted: %lld\n", secy_id,
425 stats.octet_encrypted_cnt);
426 seq_printf(filp, "secy%d: octet protected: %lld\n", secy_id,
427 stats.octet_protected_cnt);
428 seq_printf(filp, "secy%d: Pkts on active sa: %lld\n", secy_id,
429 stats.pkt_noactivesa_cnt);
430 seq_printf(filp, "secy%d: Pkts too long: %lld\n", secy_id, stats.pkt_toolong_cnt);
431 seq_printf(filp, "secy%d: Pkts untagged: %lld\n", secy_id, stats.pkt_untagged_cnt);
432 }
433 mutex_unlock(&mcs->stats_lock);
434 return 0;
435 }
436
437 RVU_DEBUG_SEQ_FOPS(mcs_tx_secy_stats, mcs_tx_secy_stats_display, NULL);
438
rvu_dbg_mcs_rx_secy_stats_display(struct seq_file * filp,void * unused)439 static int rvu_dbg_mcs_rx_secy_stats_display(struct seq_file *filp, void *unused)
440 {
441 struct mcs *mcs = filp->private;
442 struct mcs_secy_stats stats;
443 struct rsrc_bmap *map;
444 int secy_id;
445
446 map = &mcs->rx.secy;
447 seq_puts(filp, "\n MCS secy stats\n");
448
449 mutex_lock(&mcs->stats_lock);
450 for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) {
451 mcs_get_rx_secy_stats(mcs, &stats, secy_id);
452 seq_printf(filp, "\n=======Secy%d======\n\n", secy_id);
453 seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id,
454 stats.ctl_pkt_bcast_cnt);
455 seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id,
456 stats.ctl_pkt_mcast_cnt);
457 seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id,
458 stats.ctl_pkt_ucast_cnt);
459 seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt);
460 seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id,
461 stats.unctl_pkt_bcast_cnt);
462 seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id,
463 stats.unctl_pkt_mcast_cnt);
464 seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id,
465 stats.unctl_pkt_ucast_cnt);
466 seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt);
467 seq_printf(filp, "secy%d: Octet decrypted: %lld\n", secy_id,
468 stats.octet_decrypted_cnt);
469 seq_printf(filp, "secy%d: octet validated: %lld\n", secy_id,
470 stats.octet_validated_cnt);
471 seq_printf(filp, "secy%d: Pkts on disable port: %lld\n", secy_id,
472 stats.pkt_port_disabled_cnt);
473 seq_printf(filp, "secy%d: Pkts with badtag: %lld\n", secy_id, stats.pkt_badtag_cnt);
474 seq_printf(filp, "secy%d: Pkts with no SA(sectag.tci.c=0): %lld\n", secy_id,
475 stats.pkt_nosa_cnt);
476 seq_printf(filp, "secy%d: Pkts with nosaerror: %lld\n", secy_id,
477 stats.pkt_nosaerror_cnt);
478 seq_printf(filp, "secy%d: Tagged ctrl pkts: %lld\n", secy_id,
479 stats.pkt_tagged_ctl_cnt);
480 seq_printf(filp, "secy%d: Untaged pkts: %lld\n", secy_id, stats.pkt_untaged_cnt);
481 seq_printf(filp, "secy%d: Ctrl pkts: %lld\n", secy_id, stats.pkt_ctl_cnt);
482 if (mcs->hw->mcs_blks > 1)
483 seq_printf(filp, "secy%d: pkts notag: %lld\n", secy_id,
484 stats.pkt_notag_cnt);
485 }
486 mutex_unlock(&mcs->stats_lock);
487 return 0;
488 }
489
490 RVU_DEBUG_SEQ_FOPS(mcs_rx_secy_stats, mcs_rx_secy_stats_display, NULL);
491
rvu_dbg_mcs_init(struct rvu * rvu)492 static void rvu_dbg_mcs_init(struct rvu *rvu)
493 {
494 struct mcs *mcs;
495 char dname[10];
496 int i;
497
498 if (!rvu->mcs_blk_cnt)
499 return;
500
501 rvu->rvu_dbg.mcs_root = debugfs_create_dir("mcs", rvu->rvu_dbg.root);
502
503 for (i = 0; i < rvu->mcs_blk_cnt; i++) {
504 mcs = mcs_get_pdata(i);
505
506 sprintf(dname, "mcs%d", i);
507 rvu->rvu_dbg.mcs = debugfs_create_dir(dname,
508 rvu->rvu_dbg.mcs_root);
509
510 rvu->rvu_dbg.mcs_rx = debugfs_create_dir("rx_stats", rvu->rvu_dbg.mcs);
511
512 debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_rx, mcs,
513 &rvu_dbg_mcs_rx_flowid_stats_fops);
514
515 debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_rx, mcs,
516 &rvu_dbg_mcs_rx_secy_stats_fops);
517
518 debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_rx, mcs,
519 &rvu_dbg_mcs_rx_sc_stats_fops);
520
521 debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_rx, mcs,
522 &rvu_dbg_mcs_rx_sa_stats_fops);
523
524 debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_rx, mcs,
525 &rvu_dbg_mcs_rx_port_stats_fops);
526
527 rvu->rvu_dbg.mcs_tx = debugfs_create_dir("tx_stats", rvu->rvu_dbg.mcs);
528
529 debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_tx, mcs,
530 &rvu_dbg_mcs_tx_flowid_stats_fops);
531
532 debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_tx, mcs,
533 &rvu_dbg_mcs_tx_secy_stats_fops);
534
535 debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_tx, mcs,
536 &rvu_dbg_mcs_tx_sc_stats_fops);
537
538 debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_tx, mcs,
539 &rvu_dbg_mcs_tx_sa_stats_fops);
540
541 debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_tx, mcs,
542 &rvu_dbg_mcs_tx_port_stats_fops);
543 }
544 }
545
546 #define LMT_MAPTBL_ENTRY_SIZE 16
547 /* Dump LMTST map table */
rvu_dbg_lmtst_map_table_display(struct file * filp,char __user * buffer,size_t count,loff_t * ppos)548 static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
549 char __user *buffer,
550 size_t count, loff_t *ppos)
551 {
552 struct rvu *rvu = filp->private_data;
553 u64 lmt_addr, val, tbl_base;
554 int pf, vf, num_vfs, hw_vfs;
555 void __iomem *lmt_map_base;
556 int apr_pfs, apr_vfs;
557 int buf_size = 10240;
558 size_t off = 0;
559 int index = 0;
560 char *buf;
561 int ret;
562
563 /* don't allow partial reads */
564 if (*ppos != 0)
565 return 0;
566
567 buf = kzalloc(buf_size, GFP_KERNEL);
568 if (!buf)
569 return -ENOMEM;
570
571 tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
572 val = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CFG);
573 apr_vfs = 1 << (val & 0xF);
574 apr_pfs = 1 << ((val >> 4) & 0x7);
575
576 lmt_map_base = ioremap_wc(tbl_base, apr_pfs * apr_vfs *
577 LMT_MAPTBL_ENTRY_SIZE);
578 if (!lmt_map_base) {
579 dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
580 kfree(buf);
581 return false;
582 }
583
584 off += scnprintf(&buf[off], buf_size - 1 - off,
585 "\n\t\t\t\t\tLmtst Map Table Entries");
586 off += scnprintf(&buf[off], buf_size - 1 - off,
587 "\n\t\t\t\t\t=======================");
588 off += scnprintf(&buf[off], buf_size - 1 - off, "\nPcifunc\t\t\t");
589 off += scnprintf(&buf[off], buf_size - 1 - off, "Table Index\t\t");
590 off += scnprintf(&buf[off], buf_size - 1 - off,
591 "Lmtline Base (word 0)\t\t");
592 off += scnprintf(&buf[off], buf_size - 1 - off,
593 "Lmt Map Entry (word 1)");
594 off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
595 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
596 off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d \t\t\t",
597 pf);
598
599 index = pf * apr_vfs * LMT_MAPTBL_ENTRY_SIZE;
600 off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t",
601 (tbl_base + index));
602 lmt_addr = readq(lmt_map_base + index);
603 off += scnprintf(&buf[off], buf_size - 1 - off,
604 " 0x%016llx\t\t", lmt_addr);
605 index += 8;
606 val = readq(lmt_map_base + index);
607 off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%016llx\n",
608 val);
609 /* Reading num of VFs per PF */
610 rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs);
611 for (vf = 0; vf < num_vfs; vf++) {
612 index = (pf * apr_vfs * LMT_MAPTBL_ENTRY_SIZE) +
613 ((vf + 1) * LMT_MAPTBL_ENTRY_SIZE);
614 off += scnprintf(&buf[off], buf_size - 1 - off,
615 "PF%d:VF%d \t\t", pf, vf);
616 off += scnprintf(&buf[off], buf_size - 1 - off,
617 " 0x%llx\t\t", (tbl_base + index));
618 lmt_addr = readq(lmt_map_base + index);
619 off += scnprintf(&buf[off], buf_size - 1 - off,
620 " 0x%016llx\t\t", lmt_addr);
621 index += 8;
622 val = readq(lmt_map_base + index);
623 off += scnprintf(&buf[off], buf_size - 1 - off,
624 " 0x%016llx\n", val);
625 }
626 }
627 off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
628
629 ret = min(off, count);
630 if (copy_to_user(buffer, buf, ret))
631 ret = -EFAULT;
632 kfree(buf);
633
634 iounmap(lmt_map_base);
635 if (ret < 0)
636 return ret;
637
638 *ppos = ret;
639 return ret;
640 }
641
642 RVU_DEBUG_FOPS(lmtst_map_table, lmtst_map_table_display, NULL);
643
get_lf_str_list(const struct rvu_block * block,int pcifunc,char * lfs)644 static void get_lf_str_list(const struct rvu_block *block, int pcifunc,
645 char *lfs)
646 {
647 int lf = 0, seq = 0, len = 0, prev_lf = block->lf.max;
648
649 for_each_set_bit(lf, block->lf.bmap, block->lf.max) {
650 if (lf >= block->lf.max)
651 break;
652
653 if (block->fn_map[lf] != pcifunc)
654 continue;
655
656 if (lf == prev_lf + 1) {
657 prev_lf = lf;
658 seq = 1;
659 continue;
660 }
661
662 if (seq)
663 len += sprintf(lfs + len, "-%d,%d", prev_lf, lf);
664 else
665 len += (len ? sprintf(lfs + len, ",%d", lf) :
666 sprintf(lfs + len, "%d", lf));
667
668 prev_lf = lf;
669 seq = 0;
670 }
671
672 if (seq)
673 len += sprintf(lfs + len, "-%d", prev_lf);
674
675 lfs[len] = '\0';
676 }
677
get_max_column_width(struct rvu * rvu)678 static int get_max_column_width(struct rvu *rvu)
679 {
680 int index, pf, vf, lf_str_size = 12, buf_size = 256;
681 struct rvu_block block;
682 u16 pcifunc;
683 char *buf;
684
685 buf = kzalloc(buf_size, GFP_KERNEL);
686 if (!buf)
687 return -ENOMEM;
688
689 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
690 for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
691 pcifunc = pf << 10 | vf;
692 if (!pcifunc)
693 continue;
694
695 for (index = 0; index < BLK_COUNT; index++) {
696 block = rvu->hw->block[index];
697 if (!strlen(block.name))
698 continue;
699
700 get_lf_str_list(&block, pcifunc, buf);
701 if (lf_str_size <= strlen(buf))
702 lf_str_size = strlen(buf) + 1;
703 }
704 }
705 }
706
707 kfree(buf);
708 return lf_str_size;
709 }
710
711 /* Dumps current provisioning status of all RVU block LFs */
rvu_dbg_rsrc_attach_status(struct file * filp,char __user * buffer,size_t count,loff_t * ppos)712 static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
713 char __user *buffer,
714 size_t count, loff_t *ppos)
715 {
716 int index, off = 0, flag = 0, len = 0, i = 0;
717 struct rvu *rvu = filp->private_data;
718 int bytes_not_copied = 0;
719 struct rvu_block block;
720 int pf, vf, pcifunc;
721 int buf_size = 2048;
722 int lf_str_size;
723 char *lfs;
724 char *buf;
725
726 /* don't allow partial reads */
727 if (*ppos != 0)
728 return 0;
729
730 buf = kzalloc(buf_size, GFP_KERNEL);
731 if (!buf)
732 return -ENOMEM;
733
734 /* Get the maximum width of a column */
735 lf_str_size = get_max_column_width(rvu);
736
737 lfs = kzalloc(lf_str_size, GFP_KERNEL);
738 if (!lfs) {
739 kfree(buf);
740 return -ENOMEM;
741 }
742 off += scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size,
743 "pcifunc");
744 for (index = 0; index < BLK_COUNT; index++)
745 if (strlen(rvu->hw->block[index].name)) {
746 off += scnprintf(&buf[off], buf_size - 1 - off,
747 "%-*s", lf_str_size,
748 rvu->hw->block[index].name);
749 }
750
751 off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
752 bytes_not_copied = copy_to_user(buffer + (i * off), buf, off);
753 if (bytes_not_copied)
754 goto out;
755
756 i++;
757 *ppos += off;
758 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
759 for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
760 off = 0;
761 flag = 0;
762 pcifunc = pf << 10 | vf;
763 if (!pcifunc)
764 continue;
765
766 if (vf) {
767 sprintf(lfs, "PF%d:VF%d", pf, vf - 1);
768 off = scnprintf(&buf[off],
769 buf_size - 1 - off,
770 "%-*s", lf_str_size, lfs);
771 } else {
772 sprintf(lfs, "PF%d", pf);
773 off = scnprintf(&buf[off],
774 buf_size - 1 - off,
775 "%-*s", lf_str_size, lfs);
776 }
777
778 for (index = 0; index < BLK_COUNT; index++) {
779 block = rvu->hw->block[index];
780 if (!strlen(block.name))
781 continue;
782 len = 0;
783 lfs[len] = '\0';
784 get_lf_str_list(&block, pcifunc, lfs);
785 if (strlen(lfs))
786 flag = 1;
787
788 off += scnprintf(&buf[off], buf_size - 1 - off,
789 "%-*s", lf_str_size, lfs);
790 }
791 if (flag) {
792 off += scnprintf(&buf[off],
793 buf_size - 1 - off, "\n");
794 bytes_not_copied = copy_to_user(buffer +
795 (i * off),
796 buf, off);
797 if (bytes_not_copied)
798 goto out;
799
800 i++;
801 *ppos += off;
802 }
803 }
804 }
805
806 out:
807 kfree(lfs);
808 kfree(buf);
809 if (bytes_not_copied)
810 return -EFAULT;
811
812 return *ppos;
813 }
814
815 RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
816
rvu_dbg_rvu_pf_cgx_map_display(struct seq_file * filp,void * unused)817 static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
818 {
819 char cgx[10], lmac[10], chan[10];
820 struct rvu *rvu = filp->private;
821 struct pci_dev *pdev = NULL;
822 struct mac_ops *mac_ops;
823 struct rvu_pfvf *pfvf;
824 int pf, domain, blkid;
825 u8 cgx_id, lmac_id;
826 u16 pcifunc;
827
828 domain = 2;
829 mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
830 /* There can be no CGX devices at all */
831 if (!mac_ops)
832 return 0;
833 seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\tCHAN\n",
834 mac_ops->name);
835 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
836 if (!is_pf_cgxmapped(rvu, pf))
837 continue;
838
839 pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
840 if (!pdev)
841 continue;
842
843 cgx[0] = 0;
844 lmac[0] = 0;
845 pcifunc = pf << 10;
846 pfvf = rvu_get_pfvf(rvu, pcifunc);
847
848 if (pfvf->nix_blkaddr == BLKADDR_NIX0)
849 blkid = 0;
850 else
851 blkid = 1;
852
853 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id,
854 &lmac_id);
855 sprintf(cgx, "%s%d", mac_ops->name, cgx_id);
856 sprintf(lmac, "LMAC%d", lmac_id);
857 sprintf(chan, "%d",
858 rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0));
859 seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\t%s\n",
860 dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac,
861 chan);
862
863 pci_dev_put(pdev);
864 }
865 return 0;
866 }
867
868 RVU_DEBUG_SEQ_FOPS(rvu_pf_cgx_map, rvu_pf_cgx_map_display, NULL);
869
rvu_dbg_is_valid_lf(struct rvu * rvu,int blkaddr,int lf,u16 * pcifunc)870 static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blkaddr, int lf,
871 u16 *pcifunc)
872 {
873 struct rvu_block *block;
874 struct rvu_hwinfo *hw;
875
876 hw = rvu->hw;
877 block = &hw->block[blkaddr];
878
879 if (lf < 0 || lf >= block->lf.max) {
880 dev_warn(rvu->dev, "Invalid LF: valid range: 0-%d\n",
881 block->lf.max - 1);
882 return false;
883 }
884
885 *pcifunc = block->fn_map[lf];
886 if (!*pcifunc) {
887 dev_warn(rvu->dev,
888 "This LF is not attached to any RVU PFFUNC\n");
889 return false;
890 }
891 return true;
892 }
893
print_npa_qsize(struct seq_file * m,struct rvu_pfvf * pfvf)894 static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf)
895 {
896 char *buf;
897
898 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
899 if (!buf)
900 return;
901
902 if (!pfvf->aura_ctx) {
903 seq_puts(m, "Aura context is not initialized\n");
904 } else {
905 bitmap_print_to_pagebuf(false, buf, pfvf->aura_bmap,
906 pfvf->aura_ctx->qsize);
907 seq_printf(m, "Aura count : %d\n", pfvf->aura_ctx->qsize);
908 seq_printf(m, "Aura context ena/dis bitmap : %s\n", buf);
909 }
910
911 if (!pfvf->pool_ctx) {
912 seq_puts(m, "Pool context is not initialized\n");
913 } else {
914 bitmap_print_to_pagebuf(false, buf, pfvf->pool_bmap,
915 pfvf->pool_ctx->qsize);
916 seq_printf(m, "Pool count : %d\n", pfvf->pool_ctx->qsize);
917 seq_printf(m, "Pool context ena/dis bitmap : %s\n", buf);
918 }
919 kfree(buf);
920 }
921
922 /* The 'qsize' entry dumps current Aura/Pool context Qsize
923 * and each context's current enable/disable status in a bitmap.
924 */
rvu_dbg_qsize_display(struct seq_file * s,void * unsused,int blktype)925 static int rvu_dbg_qsize_display(struct seq_file *s, void *unsused,
926 int blktype)
927 {
928 void (*print_qsize)(struct seq_file *s,
929 struct rvu_pfvf *pfvf) = NULL;
930 struct rvu_pfvf *pfvf;
931 struct rvu *rvu;
932 int qsize_id;
933 u16 pcifunc;
934 int blkaddr;
935
936 rvu = s->private;
937 switch (blktype) {
938 case BLKTYPE_NPA:
939 qsize_id = rvu->rvu_dbg.npa_qsize_id;
940 print_qsize = print_npa_qsize;
941 break;
942
943 case BLKTYPE_NIX:
944 qsize_id = rvu->rvu_dbg.nix_qsize_id;
945 print_qsize = print_nix_qsize;
946 break;
947
948 default:
949 return -EINVAL;
950 }
951
952 if (blktype == BLKTYPE_NPA)
953 blkaddr = BLKADDR_NPA;
954 else
955 blkaddr = debugfs_get_aux_num(s->file);
956
957 if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc))
958 return -EINVAL;
959
960 pfvf = rvu_get_pfvf(rvu, pcifunc);
961 print_qsize(s, pfvf);
962
963 return 0;
964 }
965
rvu_dbg_qsize_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos,int blktype)966 static ssize_t rvu_dbg_qsize_write(struct file *file,
967 const char __user *buffer, size_t count,
968 loff_t *ppos, int blktype)
969 {
970 char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix";
971 struct seq_file *seqfile = file->private_data;
972 char *cmd_buf, *cmd_buf_tmp, *subtoken;
973 struct rvu *rvu = seqfile->private;
974 int blkaddr;
975 u16 pcifunc;
976 int ret, lf;
977
978 cmd_buf = memdup_user_nul(buffer, count);
979 if (IS_ERR(cmd_buf))
980 return -ENOMEM;
981
982 cmd_buf_tmp = strchr(cmd_buf, '\n');
983 if (cmd_buf_tmp) {
984 *cmd_buf_tmp = '\0';
985 count = cmd_buf_tmp - cmd_buf + 1;
986 }
987
988 cmd_buf_tmp = cmd_buf;
989 subtoken = strsep(&cmd_buf, " ");
990 ret = subtoken ? kstrtoint(subtoken, 10, &lf) : -EINVAL;
991 if (cmd_buf)
992 ret = -EINVAL;
993
994 if (ret < 0 || !strncmp(subtoken, "help", 4)) {
995 dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
996 goto qsize_write_done;
997 }
998
999 if (blktype == BLKTYPE_NPA)
1000 blkaddr = BLKADDR_NPA;
1001 else
1002 blkaddr = debugfs_get_aux_num(file);
1003
1004 if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) {
1005 ret = -EINVAL;
1006 goto qsize_write_done;
1007 }
1008 if (blktype == BLKTYPE_NPA)
1009 rvu->rvu_dbg.npa_qsize_id = lf;
1010 else
1011 rvu->rvu_dbg.nix_qsize_id = lf;
1012
1013 qsize_write_done:
1014 kfree(cmd_buf_tmp);
1015 return ret ? ret : count;
1016 }
1017
rvu_dbg_npa_qsize_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)1018 static ssize_t rvu_dbg_npa_qsize_write(struct file *filp,
1019 const char __user *buffer,
1020 size_t count, loff_t *ppos)
1021 {
1022 return rvu_dbg_qsize_write(filp, buffer, count, ppos,
1023 BLKTYPE_NPA);
1024 }
1025
rvu_dbg_npa_qsize_display(struct seq_file * filp,void * unused)1026 static int rvu_dbg_npa_qsize_display(struct seq_file *filp, void *unused)
1027 {
1028 return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NPA);
1029 }
1030
1031 RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write);
1032
1033 /* Dumps given NPA Aura's context */
print_npa_aura_ctx(struct seq_file * m,struct npa_aq_enq_rsp * rsp)1034 static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
1035 {
1036 struct npa_aura_s *aura = &rsp->aura;
1037 struct rvu *rvu = m->private;
1038
1039 seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr);
1040
1041 seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n",
1042 aura->ena, aura->pool_caching);
1043 seq_printf(m, "W1: pool way mask\t%d\nW1: avg con\t\t%d\n",
1044 aura->pool_way_mask, aura->avg_con);
1045 seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n",
1046 aura->pool_drop_ena, aura->aura_drop_ena);
1047 seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n",
1048 aura->bp_ena, aura->aura_drop);
1049 seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n",
1050 aura->shift, aura->avg_level);
1051
1052 seq_printf(m, "W2: count\t\t%llu\nW2: nix0_bpid\t\t%d\nW2: nix1_bpid\t\t%d\n",
1053 (u64)aura->count, aura->nix0_bpid, aura->nix1_bpid);
1054
1055 seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
1056 (u64)aura->limit, aura->bp, aura->fc_ena);
1057
1058 if (!is_rvu_otx2(rvu))
1059 seq_printf(m, "W3: fc_be\t\t%d\n", aura->fc_be);
1060 seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n",
1061 aura->fc_up_crossing, aura->fc_stype);
1062 seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits);
1063
1064 seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr);
1065
1066 seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n",
1067 aura->pool_drop, aura->update_time);
1068 seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n",
1069 aura->err_int, aura->err_int_ena);
1070 seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n",
1071 aura->thresh_int, aura->thresh_int_ena);
1072 seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n",
1073 aura->thresh_up, aura->thresh_qint_idx);
1074 seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx);
1075
1076 seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh);
1077 if (!is_rvu_otx2(rvu))
1078 seq_printf(m, "W6: fc_msh_dst\t\t%d\n", aura->fc_msh_dst);
1079 }
1080
1081 /* Dumps given NPA Pool's context */
print_npa_pool_ctx(struct seq_file * m,struct npa_aq_enq_rsp * rsp)1082 static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
1083 {
1084 struct npa_pool_s *pool = &rsp->pool;
1085 struct rvu *rvu = m->private;
1086
1087 seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base);
1088
1089 seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n",
1090 pool->ena, pool->nat_align);
1091 seq_printf(m, "W1: stack_caching\t%d\nW1: stack_way_mask\t%d\n",
1092 pool->stack_caching, pool->stack_way_mask);
1093 seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n",
1094 pool->buf_offset, pool->buf_size);
1095
1096 seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n",
1097 pool->stack_max_pages, pool->stack_pages);
1098
1099 seq_printf(m, "W3: op_pc \t\t%llu\n", (u64)pool->op_pc);
1100
1101 seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n",
1102 pool->stack_offset, pool->shift, pool->avg_level);
1103 seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n",
1104 pool->avg_con, pool->fc_ena, pool->fc_stype);
1105 seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n",
1106 pool->fc_hyst_bits, pool->fc_up_crossing);
1107 if (!is_rvu_otx2(rvu))
1108 seq_printf(m, "W4: fc_be\t\t%d\n", pool->fc_be);
1109 seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time);
1110
1111 seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr);
1112
1113 seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start);
1114
1115 seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end);
1116
1117 seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n",
1118 pool->err_int, pool->err_int_ena);
1119 seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int);
1120 seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n",
1121 pool->thresh_int_ena, pool->thresh_up);
1122 seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d\n",
1123 pool->thresh_qint_idx, pool->err_qint_idx);
1124 if (!is_rvu_otx2(rvu))
1125 seq_printf(m, "W8: fc_msh_dst\t\t%d\n", pool->fc_msh_dst);
1126 }
1127
1128 /* Reads aura/pool's ctx from admin queue */
rvu_dbg_npa_ctx_display(struct seq_file * m,void * unused,int ctype)1129 static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype)
1130 {
1131 void (*print_npa_ctx)(struct seq_file *m, struct npa_aq_enq_rsp *rsp);
1132 struct npa_aq_enq_req aq_req;
1133 struct npa_aq_enq_rsp rsp;
1134 struct rvu_pfvf *pfvf;
1135 int aura, rc, max_id;
1136 int npalf, id, all;
1137 struct rvu *rvu;
1138 u16 pcifunc;
1139
1140 rvu = m->private;
1141
1142 switch (ctype) {
1143 case NPA_AQ_CTYPE_AURA:
1144 npalf = rvu->rvu_dbg.npa_aura_ctx.lf;
1145 id = rvu->rvu_dbg.npa_aura_ctx.id;
1146 all = rvu->rvu_dbg.npa_aura_ctx.all;
1147 break;
1148
1149 case NPA_AQ_CTYPE_POOL:
1150 npalf = rvu->rvu_dbg.npa_pool_ctx.lf;
1151 id = rvu->rvu_dbg.npa_pool_ctx.id;
1152 all = rvu->rvu_dbg.npa_pool_ctx.all;
1153 break;
1154 default:
1155 return -EINVAL;
1156 }
1157
1158 if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
1159 return -EINVAL;
1160
1161 pfvf = rvu_get_pfvf(rvu, pcifunc);
1162 if (ctype == NPA_AQ_CTYPE_AURA && !pfvf->aura_ctx) {
1163 seq_puts(m, "Aura context is not initialized\n");
1164 return -EINVAL;
1165 } else if (ctype == NPA_AQ_CTYPE_POOL && !pfvf->pool_ctx) {
1166 seq_puts(m, "Pool context is not initialized\n");
1167 return -EINVAL;
1168 }
1169
1170 memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
1171 aq_req.hdr.pcifunc = pcifunc;
1172 aq_req.ctype = ctype;
1173 aq_req.op = NPA_AQ_INSTOP_READ;
1174 if (ctype == NPA_AQ_CTYPE_AURA) {
1175 max_id = pfvf->aura_ctx->qsize;
1176 print_npa_ctx = print_npa_aura_ctx;
1177 } else {
1178 max_id = pfvf->pool_ctx->qsize;
1179 print_npa_ctx = print_npa_pool_ctx;
1180 }
1181
1182 if (id < 0 || id >= max_id) {
1183 seq_printf(m, "Invalid %s, valid range is 0-%d\n",
1184 (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
1185 max_id - 1);
1186 return -EINVAL;
1187 }
1188
1189 if (all)
1190 id = 0;
1191 else
1192 max_id = id + 1;
1193
1194 for (aura = id; aura < max_id; aura++) {
1195 aq_req.aura_id = aura;
1196
1197 /* Skip if queue is uninitialized */
1198 if (ctype == NPA_AQ_CTYPE_POOL && !test_bit(aura, pfvf->pool_bmap))
1199 continue;
1200
1201 seq_printf(m, "======%s : %d=======\n",
1202 (ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL",
1203 aq_req.aura_id);
1204 rc = rvu_npa_aq_enq_inst(rvu, &aq_req, &rsp);
1205 if (rc) {
1206 seq_puts(m, "Failed to read context\n");
1207 return -EINVAL;
1208 }
1209 print_npa_ctx(m, &rsp);
1210 }
1211 return 0;
1212 }
1213
write_npa_ctx(struct rvu * rvu,bool all,int npalf,int id,int ctype)1214 static int write_npa_ctx(struct rvu *rvu, bool all,
1215 int npalf, int id, int ctype)
1216 {
1217 struct rvu_pfvf *pfvf;
1218 int max_id = 0;
1219 u16 pcifunc;
1220
1221 if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
1222 return -EINVAL;
1223
1224 pfvf = rvu_get_pfvf(rvu, pcifunc);
1225
1226 if (ctype == NPA_AQ_CTYPE_AURA) {
1227 if (!pfvf->aura_ctx) {
1228 dev_warn(rvu->dev, "Aura context is not initialized\n");
1229 return -EINVAL;
1230 }
1231 max_id = pfvf->aura_ctx->qsize;
1232 } else if (ctype == NPA_AQ_CTYPE_POOL) {
1233 if (!pfvf->pool_ctx) {
1234 dev_warn(rvu->dev, "Pool context is not initialized\n");
1235 return -EINVAL;
1236 }
1237 max_id = pfvf->pool_ctx->qsize;
1238 }
1239
1240 if (id < 0 || id >= max_id) {
1241 dev_warn(rvu->dev, "Invalid %s, valid range is 0-%d\n",
1242 (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
1243 max_id - 1);
1244 return -EINVAL;
1245 }
1246
1247 switch (ctype) {
1248 case NPA_AQ_CTYPE_AURA:
1249 rvu->rvu_dbg.npa_aura_ctx.lf = npalf;
1250 rvu->rvu_dbg.npa_aura_ctx.id = id;
1251 rvu->rvu_dbg.npa_aura_ctx.all = all;
1252 break;
1253
1254 case NPA_AQ_CTYPE_POOL:
1255 rvu->rvu_dbg.npa_pool_ctx.lf = npalf;
1256 rvu->rvu_dbg.npa_pool_ctx.id = id;
1257 rvu->rvu_dbg.npa_pool_ctx.all = all;
1258 break;
1259 default:
1260 return -EINVAL;
1261 }
1262 return 0;
1263 }
1264
parse_cmd_buffer_ctx(char * cmd_buf,size_t * count,const char __user * buffer,int * npalf,int * id,bool * all)1265 static int parse_cmd_buffer_ctx(char *cmd_buf, size_t *count,
1266 const char __user *buffer, int *npalf,
1267 int *id, bool *all)
1268 {
1269 int bytes_not_copied;
1270 char *cmd_buf_tmp;
1271 char *subtoken;
1272 int ret;
1273
1274 bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
1275 if (bytes_not_copied)
1276 return -EFAULT;
1277
1278 cmd_buf[*count] = '\0';
1279 cmd_buf_tmp = strchr(cmd_buf, '\n');
1280
1281 if (cmd_buf_tmp) {
1282 *cmd_buf_tmp = '\0';
1283 *count = cmd_buf_tmp - cmd_buf + 1;
1284 }
1285
1286 subtoken = strsep(&cmd_buf, " ");
1287 ret = subtoken ? kstrtoint(subtoken, 10, npalf) : -EINVAL;
1288 if (ret < 0)
1289 return ret;
1290 subtoken = strsep(&cmd_buf, " ");
1291 if (subtoken && strcmp(subtoken, "all") == 0) {
1292 *all = true;
1293 } else {
1294 ret = subtoken ? kstrtoint(subtoken, 10, id) : -EINVAL;
1295 if (ret < 0)
1296 return ret;
1297 }
1298 if (cmd_buf)
1299 return -EINVAL;
1300 return ret;
1301 }
1302
rvu_dbg_npa_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos,int ctype)1303 static ssize_t rvu_dbg_npa_ctx_write(struct file *filp,
1304 const char __user *buffer,
1305 size_t count, loff_t *ppos, int ctype)
1306 {
1307 char *cmd_buf, *ctype_string = (ctype == NPA_AQ_CTYPE_AURA) ?
1308 "aura" : "pool";
1309 struct seq_file *seqfp = filp->private_data;
1310 struct rvu *rvu = seqfp->private;
1311 int npalf, id = 0, ret;
1312 bool all = false;
1313
1314 if ((*ppos != 0) || !count)
1315 return -EINVAL;
1316
1317 cmd_buf = kzalloc(count + 1, GFP_KERNEL);
1318 if (!cmd_buf)
1319 return count;
1320 ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
1321 &npalf, &id, &all);
1322 if (ret < 0) {
1323 dev_info(rvu->dev,
1324 "Usage: echo <npalf> [%s number/all] > %s_ctx\n",
1325 ctype_string, ctype_string);
1326 goto done;
1327 } else {
1328 ret = write_npa_ctx(rvu, all, npalf, id, ctype);
1329 }
1330 done:
1331 kfree(cmd_buf);
1332 return ret ? ret : count;
1333 }
1334
rvu_dbg_npa_aura_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)1335 static ssize_t rvu_dbg_npa_aura_ctx_write(struct file *filp,
1336 const char __user *buffer,
1337 size_t count, loff_t *ppos)
1338 {
1339 return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
1340 NPA_AQ_CTYPE_AURA);
1341 }
1342
rvu_dbg_npa_aura_ctx_display(struct seq_file * filp,void * unused)1343 static int rvu_dbg_npa_aura_ctx_display(struct seq_file *filp, void *unused)
1344 {
1345 return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_AURA);
1346 }
1347
1348 RVU_DEBUG_SEQ_FOPS(npa_aura_ctx, npa_aura_ctx_display, npa_aura_ctx_write);
1349
rvu_dbg_npa_pool_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)1350 static ssize_t rvu_dbg_npa_pool_ctx_write(struct file *filp,
1351 const char __user *buffer,
1352 size_t count, loff_t *ppos)
1353 {
1354 return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
1355 NPA_AQ_CTYPE_POOL);
1356 }
1357
rvu_dbg_npa_pool_ctx_display(struct seq_file * filp,void * unused)1358 static int rvu_dbg_npa_pool_ctx_display(struct seq_file *filp, void *unused)
1359 {
1360 return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_POOL);
1361 }
1362
1363 RVU_DEBUG_SEQ_FOPS(npa_pool_ctx, npa_pool_ctx_display, npa_pool_ctx_write);
1364
ndc_cache_stats(struct seq_file * s,int blk_addr,int ctype,int transaction)1365 static void ndc_cache_stats(struct seq_file *s, int blk_addr,
1366 int ctype, int transaction)
1367 {
1368 u64 req, out_req, lat, cant_alloc;
1369 struct nix_hw *nix_hw;
1370 struct rvu *rvu;
1371 int port;
1372
1373 if (blk_addr == BLKADDR_NDC_NPA0) {
1374 rvu = s->private;
1375 } else {
1376 nix_hw = s->private;
1377 rvu = nix_hw->rvu;
1378 }
1379
1380 for (port = 0; port < NDC_MAX_PORT; port++) {
1381 req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC
1382 (port, ctype, transaction));
1383 lat = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_LAT_PC
1384 (port, ctype, transaction));
1385 out_req = rvu_read64(rvu, blk_addr,
1386 NDC_AF_PORTX_RTX_RWX_OSTDN_PC
1387 (port, ctype, transaction));
1388 cant_alloc = rvu_read64(rvu, blk_addr,
1389 NDC_AF_PORTX_RTX_CANT_ALLOC_PC
1390 (port, transaction));
1391 seq_printf(s, "\nPort:%d\n", port);
1392 seq_printf(s, "\tTotal Requests:\t\t%lld\n", req);
1393 seq_printf(s, "\tTotal Time Taken:\t%lld cycles\n", lat);
1394 seq_printf(s, "\tAvg Latency:\t\t%lld cycles\n", lat / req);
1395 seq_printf(s, "\tOutstanding Requests:\t%lld\n", out_req);
1396 seq_printf(s, "\tCant Alloc Requests:\t%lld\n", cant_alloc);
1397 }
1398 }
1399
ndc_blk_cache_stats(struct seq_file * s,int idx,int blk_addr)1400 static int ndc_blk_cache_stats(struct seq_file *s, int idx, int blk_addr)
1401 {
1402 seq_puts(s, "\n***** CACHE mode read stats *****\n");
1403 ndc_cache_stats(s, blk_addr, CACHING, NDC_READ_TRANS);
1404 seq_puts(s, "\n***** CACHE mode write stats *****\n");
1405 ndc_cache_stats(s, blk_addr, CACHING, NDC_WRITE_TRANS);
1406 seq_puts(s, "\n***** BY-PASS mode read stats *****\n");
1407 ndc_cache_stats(s, blk_addr, BYPASS, NDC_READ_TRANS);
1408 seq_puts(s, "\n***** BY-PASS mode write stats *****\n");
1409 ndc_cache_stats(s, blk_addr, BYPASS, NDC_WRITE_TRANS);
1410 return 0;
1411 }
1412
rvu_dbg_npa_ndc_cache_display(struct seq_file * filp,void * unused)1413 static int rvu_dbg_npa_ndc_cache_display(struct seq_file *filp, void *unused)
1414 {
1415 return ndc_blk_cache_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
1416 }
1417
1418 RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL);
1419
ndc_blk_hits_miss_stats(struct seq_file * s,int idx,int blk_addr)1420 static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
1421 {
1422 struct nix_hw *nix_hw;
1423 struct rvu *rvu;
1424 int bank, max_bank;
1425 u64 ndc_af_const;
1426
1427 if (blk_addr == BLKADDR_NDC_NPA0) {
1428 rvu = s->private;
1429 } else {
1430 nix_hw = s->private;
1431 rvu = nix_hw->rvu;
1432 }
1433
1434 ndc_af_const = rvu_read64(rvu, blk_addr, NDC_AF_CONST);
1435 max_bank = FIELD_GET(NDC_AF_BANK_MASK, ndc_af_const);
1436 for (bank = 0; bank < max_bank; bank++) {
1437 seq_printf(s, "BANK:%d\n", bank);
1438 seq_printf(s, "\tHits:\t%lld\n",
1439 (u64)rvu_read64(rvu, blk_addr,
1440 NDC_AF_BANKX_HIT_PC(bank)));
1441 seq_printf(s, "\tMiss:\t%lld\n",
1442 (u64)rvu_read64(rvu, blk_addr,
1443 NDC_AF_BANKX_MISS_PC(bank)));
1444 }
1445 return 0;
1446 }
1447
rvu_dbg_nix_ndc_rx_cache_display(struct seq_file * filp,void * unused)1448 static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused)
1449 {
1450 struct nix_hw *nix_hw = filp->private;
1451 int blkaddr = 0;
1452 int ndc_idx = 0;
1453
1454 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1455 BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1456 ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_RX : NIX0_RX);
1457
1458 return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
1459 }
1460
1461 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL);
1462
rvu_dbg_nix_ndc_tx_cache_display(struct seq_file * filp,void * unused)1463 static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused)
1464 {
1465 struct nix_hw *nix_hw = filp->private;
1466 int blkaddr = 0;
1467 int ndc_idx = 0;
1468
1469 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1470 BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1471 ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_TX : NIX0_TX);
1472
1473 return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
1474 }
1475
1476 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL);
1477
rvu_dbg_npa_ndc_hits_miss_display(struct seq_file * filp,void * unused)1478 static int rvu_dbg_npa_ndc_hits_miss_display(struct seq_file *filp,
1479 void *unused)
1480 {
1481 return ndc_blk_hits_miss_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
1482 }
1483
1484 RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL);
1485
rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file * filp,void * unused)1486 static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp,
1487 void *unused)
1488 {
1489 struct nix_hw *nix_hw = filp->private;
1490 int ndc_idx = NPA0_U;
1491 int blkaddr = 0;
1492
1493 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1494 BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1495
1496 return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1497 }
1498
1499 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL);
1500
rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file * filp,void * unused)1501 static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp,
1502 void *unused)
1503 {
1504 struct nix_hw *nix_hw = filp->private;
1505 int ndc_idx = NPA0_U;
1506 int blkaddr = 0;
1507
1508 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1509 BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1510
1511 return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1512 }
1513
1514 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL);
1515
print_nix_cn10k_sq_ctx(struct seq_file * m,struct nix_cn10k_sq_ctx_s * sq_ctx)1516 static void print_nix_cn10k_sq_ctx(struct seq_file *m,
1517 struct nix_cn10k_sq_ctx_s *sq_ctx)
1518 {
1519 seq_printf(m, "W0: ena \t\t\t%d\nW0: qint_idx \t\t\t%d\n",
1520 sq_ctx->ena, sq_ctx->qint_idx);
1521 seq_printf(m, "W0: substream \t\t\t0x%03x\nW0: sdp_mcast \t\t\t%d\n",
1522 sq_ctx->substream, sq_ctx->sdp_mcast);
1523 seq_printf(m, "W0: cq \t\t\t\t%d\nW0: sqe_way_mask \t\t%d\n\n",
1524 sq_ctx->cq, sq_ctx->sqe_way_mask);
1525
1526 seq_printf(m, "W1: smq \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: xoff\t\t\t%d\n",
1527 sq_ctx->smq, sq_ctx->cq_ena, sq_ctx->xoff);
1528 seq_printf(m, "W1: sso_ena \t\t\t%d\nW1: smq_rr_weight\t\t%d\n",
1529 sq_ctx->sso_ena, sq_ctx->smq_rr_weight);
1530 seq_printf(m, "W1: default_chan\t\t%d\nW1: sqb_count\t\t\t%d\n\n",
1531 sq_ctx->default_chan, sq_ctx->sqb_count);
1532
1533 seq_printf(m, "W2: smq_rr_count_lb \t\t%d\n", sq_ctx->smq_rr_count_lb);
1534 seq_printf(m, "W2: smq_rr_count_ub \t\t%d\n", sq_ctx->smq_rr_count_ub);
1535 seq_printf(m, "W2: sqb_aura \t\t\t%d\nW2: sq_int \t\t\t%d\n",
1536 sq_ctx->sqb_aura, sq_ctx->sq_int);
1537 seq_printf(m, "W2: sq_int_ena \t\t\t%d\nW2: sqe_stype \t\t\t%d\n",
1538 sq_ctx->sq_int_ena, sq_ctx->sqe_stype);
1539
1540 seq_printf(m, "W3: max_sqe_size\t\t%d\nW3: cq_limit\t\t\t%d\n",
1541 sq_ctx->max_sqe_size, sq_ctx->cq_limit);
1542 seq_printf(m, "W3: lmt_dis \t\t\t%d\nW3: mnq_dis \t\t\t%d\n",
1543 sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1544 seq_printf(m, "W3: smq_next_sq\t\t\t%d\nW3: smq_lso_segnum\t\t%d\n",
1545 sq_ctx->smq_next_sq, sq_ctx->smq_lso_segnum);
1546 seq_printf(m, "W3: tail_offset \t\t%d\nW3: smenq_offset\t\t%d\n",
1547 sq_ctx->tail_offset, sq_ctx->smenq_offset);
1548 seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n",
1549 sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld);
1550
1551 seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
1552 sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
1553 seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1554 seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1555 seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1556 seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1557 sq_ctx->smenq_next_sqb);
1558
1559 seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1560
1561 seq_printf(m, "W9: vfi_lso_total\t\t%d\n", sq_ctx->vfi_lso_total);
1562 seq_printf(m, "W9: vfi_lso_sizem1\t\t%d\nW9: vfi_lso_sb\t\t\t%d\n",
1563 sq_ctx->vfi_lso_sizem1, sq_ctx->vfi_lso_sb);
1564 seq_printf(m, "W9: vfi_lso_mps\t\t\t%d\nW9: vfi_lso_vlan0_ins_ena\t%d\n",
1565 sq_ctx->vfi_lso_mps, sq_ctx->vfi_lso_vlan0_ins_ena);
1566 seq_printf(m, "W9: vfi_lso_vlan1_ins_ena\t%d\nW9: vfi_lso_vld \t\t%d\n\n",
1567 sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1568
1569 seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1570 (u64)sq_ctx->scm_lso_rem);
1571 seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1572 seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1573 seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1574 (u64)sq_ctx->dropped_octs);
1575 seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1576 (u64)sq_ctx->dropped_pkts);
1577 }
1578
print_tm_tree(struct seq_file * m,struct nix_aq_enq_rsp * rsp,u64 sq)1579 static void print_tm_tree(struct seq_file *m,
1580 struct nix_aq_enq_rsp *rsp, u64 sq)
1581 {
1582 struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
1583 struct nix_hw *nix_hw = m->private;
1584 struct rvu *rvu = nix_hw->rvu;
1585 u16 p1, p2, p3, p4, schq;
1586 int blkaddr;
1587 u64 cfg;
1588
1589 blkaddr = nix_hw->blkaddr;
1590 schq = sq_ctx->smq;
1591
1592 cfg = rvu_read64(rvu, blkaddr, NIX_AF_MDQX_PARENT(schq));
1593 p1 = FIELD_GET(NIX_AF_MDQ_PARENT_MASK, cfg);
1594
1595 cfg = rvu_read64(rvu, blkaddr, NIX_AF_TL4X_PARENT(p1));
1596 p2 = FIELD_GET(NIX_AF_TL4_PARENT_MASK, cfg);
1597
1598 cfg = rvu_read64(rvu, blkaddr, NIX_AF_TL3X_PARENT(p2));
1599 p3 = FIELD_GET(NIX_AF_TL3_PARENT_MASK, cfg);
1600
1601 cfg = rvu_read64(rvu, blkaddr, NIX_AF_TL2X_PARENT(p3));
1602 p4 = FIELD_GET(NIX_AF_TL2_PARENT_MASK, cfg);
1603 seq_printf(m,
1604 "SQ(%llu) -> SMQ(%u) -> TL4(%u) -> TL3(%u) -> TL2(%u) -> TL1(%u)\n",
1605 sq, schq, p1, p2, p3, p4);
1606 }
1607
1608 /*dumps given tm_tree registers*/
rvu_dbg_nix_tm_tree_display(struct seq_file * m,void * unused)1609 static int rvu_dbg_nix_tm_tree_display(struct seq_file *m, void *unused)
1610 {
1611 int qidx, nixlf, rc, id, max_id = 0;
1612 struct nix_hw *nix_hw = m->private;
1613 struct rvu *rvu = nix_hw->rvu;
1614 struct nix_aq_enq_req aq_req;
1615 struct nix_aq_enq_rsp rsp;
1616 struct rvu_pfvf *pfvf;
1617 u16 pcifunc;
1618
1619 nixlf = rvu->rvu_dbg.nix_tm_ctx.lf;
1620 id = rvu->rvu_dbg.nix_tm_ctx.id;
1621
1622 if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1623 return -EINVAL;
1624
1625 pfvf = rvu_get_pfvf(rvu, pcifunc);
1626 max_id = pfvf->sq_ctx->qsize;
1627
1628 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1629 aq_req.hdr.pcifunc = pcifunc;
1630 aq_req.ctype = NIX_AQ_CTYPE_SQ;
1631 aq_req.op = NIX_AQ_INSTOP_READ;
1632 seq_printf(m, "pcifunc is 0x%x\n", pcifunc);
1633 for (qidx = id; qidx < max_id; qidx++) {
1634 aq_req.qidx = qidx;
1635
1636 /* Skip SQ's if not initialized */
1637 if (!test_bit(qidx, pfvf->sq_bmap))
1638 continue;
1639
1640 rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
1641
1642 if (rc) {
1643 seq_printf(m, "Failed to read SQ(%d) context\n",
1644 aq_req.qidx);
1645 continue;
1646 }
1647 print_tm_tree(m, &rsp, aq_req.qidx);
1648 }
1649 return 0;
1650 }
1651
rvu_dbg_nix_tm_tree_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)1652 static ssize_t rvu_dbg_nix_tm_tree_write(struct file *filp,
1653 const char __user *buffer,
1654 size_t count, loff_t *ppos)
1655 {
1656 struct seq_file *m = filp->private_data;
1657 struct nix_hw *nix_hw = m->private;
1658 struct rvu *rvu = nix_hw->rvu;
1659 struct rvu_pfvf *pfvf;
1660 u16 pcifunc;
1661 u64 nixlf;
1662 int ret;
1663
1664 ret = kstrtoull_from_user(buffer, count, 10, &nixlf);
1665 if (ret)
1666 return ret;
1667
1668 if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1669 return -EINVAL;
1670
1671 pfvf = rvu_get_pfvf(rvu, pcifunc);
1672 if (!pfvf->sq_ctx) {
1673 dev_warn(rvu->dev, "SQ context is not initialized\n");
1674 return -EINVAL;
1675 }
1676
1677 rvu->rvu_dbg.nix_tm_ctx.lf = nixlf;
1678 return count;
1679 }
1680
1681 RVU_DEBUG_SEQ_FOPS(nix_tm_tree, nix_tm_tree_display, nix_tm_tree_write);
1682
print_tm_topo(struct seq_file * m,u64 schq,u32 lvl)1683 static void print_tm_topo(struct seq_file *m, u64 schq, u32 lvl)
1684 {
1685 struct nix_hw *nix_hw = m->private;
1686 struct rvu *rvu = nix_hw->rvu;
1687 int blkaddr, link, link_level;
1688 struct rvu_hwinfo *hw;
1689
1690 hw = rvu->hw;
1691 blkaddr = nix_hw->blkaddr;
1692 if (lvl == NIX_TXSCH_LVL_MDQ) {
1693 seq_printf(m, "NIX_AF_SMQ[%llu]_CFG =0x%llx\n", schq,
1694 rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq)));
1695 seq_printf(m, "NIX_AF_SMQ[%llu]_STATUS =0x%llx\n", schq,
1696 rvu_read64(rvu, blkaddr, NIX_AF_SMQX_STATUS(schq)));
1697 seq_printf(m, "NIX_AF_MDQ[%llu]_OUT_MD_COUNT =0x%llx\n", schq,
1698 rvu_read64(rvu, blkaddr,
1699 NIX_AF_MDQX_OUT_MD_COUNT(schq)));
1700 seq_printf(m, "NIX_AF_MDQ[%llu]_SCHEDULE =0x%llx\n", schq,
1701 rvu_read64(rvu, blkaddr,
1702 NIX_AF_MDQX_SCHEDULE(schq)));
1703 seq_printf(m, "NIX_AF_MDQ[%llu]_SHAPE =0x%llx\n", schq,
1704 rvu_read64(rvu, blkaddr, NIX_AF_MDQX_SHAPE(schq)));
1705 seq_printf(m, "NIX_AF_MDQ[%llu]_CIR =0x%llx\n", schq,
1706 rvu_read64(rvu, blkaddr, NIX_AF_MDQX_CIR(schq)));
1707 seq_printf(m, "NIX_AF_MDQ[%llu]_PIR =0x%llx\n", schq,
1708 rvu_read64(rvu, blkaddr, NIX_AF_MDQX_PIR(schq)));
1709 seq_printf(m, "NIX_AF_MDQ[%llu]_SW_XOFF =0x%llx\n", schq,
1710 rvu_read64(rvu, blkaddr, NIX_AF_MDQX_SW_XOFF(schq)));
1711 seq_printf(m, "NIX_AF_MDQ[%llu]_PARENT =0x%llx\n", schq,
1712 rvu_read64(rvu, blkaddr, NIX_AF_MDQX_PARENT(schq)));
1713 seq_puts(m, "\n");
1714 }
1715
1716 if (lvl == NIX_TXSCH_LVL_TL4) {
1717 seq_printf(m, "NIX_AF_TL4[%llu]_SDP_LINK_CFG =0x%llx\n", schq,
1718 rvu_read64(rvu, blkaddr,
1719 NIX_AF_TL4X_SDP_LINK_CFG(schq)));
1720 seq_printf(m, "NIX_AF_TL4[%llu]_SCHEDULE =0x%llx\n", schq,
1721 rvu_read64(rvu, blkaddr,
1722 NIX_AF_TL4X_SCHEDULE(schq)));
1723 seq_printf(m, "NIX_AF_TL4[%llu]_SHAPE =0x%llx\n", schq,
1724 rvu_read64(rvu, blkaddr, NIX_AF_TL4X_SHAPE(schq)));
1725 seq_printf(m, "NIX_AF_TL4[%llu]_CIR =0x%llx\n", schq,
1726 rvu_read64(rvu, blkaddr, NIX_AF_TL4X_CIR(schq)));
1727 seq_printf(m, "NIX_AF_TL4[%llu]_PIR =0x%llx\n", schq,
1728 rvu_read64(rvu, blkaddr, NIX_AF_TL4X_PIR(schq)));
1729 seq_printf(m, "NIX_AF_TL4[%llu]_SW_XOFF =0x%llx\n", schq,
1730 rvu_read64(rvu, blkaddr, NIX_AF_TL4X_SW_XOFF(schq)));
1731 seq_printf(m, "NIX_AF_TL4[%llu]_TOPOLOGY =0x%llx\n", schq,
1732 rvu_read64(rvu, blkaddr,
1733 NIX_AF_TL4X_TOPOLOGY(schq)));
1734 seq_printf(m, "NIX_AF_TL4[%llu]_PARENT =0x%llx\n", schq,
1735 rvu_read64(rvu, blkaddr, NIX_AF_TL4X_PARENT(schq)));
1736 seq_printf(m, "NIX_AF_TL4[%llu]_MD_DEBUG0 =0x%llx\n", schq,
1737 rvu_read64(rvu, blkaddr,
1738 NIX_AF_TL4X_MD_DEBUG0(schq)));
1739 seq_printf(m, "NIX_AF_TL4[%llu]_MD_DEBUG1 =0x%llx\n", schq,
1740 rvu_read64(rvu, blkaddr,
1741 NIX_AF_TL4X_MD_DEBUG1(schq)));
1742 seq_puts(m, "\n");
1743 }
1744
1745 if (lvl == NIX_TXSCH_LVL_TL3) {
1746 seq_printf(m, "NIX_AF_TL3[%llu]_SCHEDULE =0x%llx\n", schq,
1747 rvu_read64(rvu, blkaddr,
1748 NIX_AF_TL3X_SCHEDULE(schq)));
1749 seq_printf(m, "NIX_AF_TL3[%llu]_SHAPE =0x%llx\n", schq,
1750 rvu_read64(rvu, blkaddr, NIX_AF_TL3X_SHAPE(schq)));
1751 seq_printf(m, "NIX_AF_TL3[%llu]_CIR =0x%llx\n", schq,
1752 rvu_read64(rvu, blkaddr, NIX_AF_TL3X_CIR(schq)));
1753 seq_printf(m, "NIX_AF_TL3[%llu]_PIR =0x%llx\n", schq,
1754 rvu_read64(rvu, blkaddr, NIX_AF_TL3X_PIR(schq)));
1755 seq_printf(m, "NIX_AF_TL3[%llu]_SW_XOFF =0x%llx\n", schq,
1756 rvu_read64(rvu, blkaddr, NIX_AF_TL3X_SW_XOFF(schq)));
1757 seq_printf(m, "NIX_AF_TL3[%llu]_TOPOLOGY =0x%llx\n", schq,
1758 rvu_read64(rvu, blkaddr,
1759 NIX_AF_TL3X_TOPOLOGY(schq)));
1760 seq_printf(m, "NIX_AF_TL3[%llu]_PARENT =0x%llx\n", schq,
1761 rvu_read64(rvu, blkaddr, NIX_AF_TL3X_PARENT(schq)));
1762 seq_printf(m, "NIX_AF_TL3[%llu]_MD_DEBUG0 =0x%llx\n", schq,
1763 rvu_read64(rvu, blkaddr,
1764 NIX_AF_TL3X_MD_DEBUG0(schq)));
1765 seq_printf(m, "NIX_AF_TL3[%llu]_MD_DEBUG1 =0x%llx\n", schq,
1766 rvu_read64(rvu, blkaddr,
1767 NIX_AF_TL3X_MD_DEBUG1(schq)));
1768
1769 link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL)
1770 & 0x01 ? NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
1771 if (lvl == link_level) {
1772 seq_printf(m,
1773 "NIX_AF_TL3_TL2[%llu]_BP_STATUS =0x%llx\n",
1774 schq, rvu_read64(rvu, blkaddr,
1775 NIX_AF_TL3_TL2X_BP_STATUS(schq)));
1776 for (link = 0; link < hw->cgx_links; link++)
1777 seq_printf(m,
1778 "NIX_AF_TL3_TL2[%llu]_LINK[%d]_CFG =0x%llx\n",
1779 schq, link,
1780 rvu_read64(rvu, blkaddr,
1781 NIX_AF_TL3_TL2X_LINKX_CFG(schq, link)));
1782 }
1783 seq_puts(m, "\n");
1784 }
1785
1786 if (lvl == NIX_TXSCH_LVL_TL2) {
1787 seq_printf(m, "NIX_AF_TL2[%llu]_SHAPE =0x%llx\n", schq,
1788 rvu_read64(rvu, blkaddr, NIX_AF_TL2X_SHAPE(schq)));
1789 seq_printf(m, "NIX_AF_TL2[%llu]_CIR =0x%llx\n", schq,
1790 rvu_read64(rvu, blkaddr, NIX_AF_TL2X_CIR(schq)));
1791 seq_printf(m, "NIX_AF_TL2[%llu]_PIR =0x%llx\n", schq,
1792 rvu_read64(rvu, blkaddr, NIX_AF_TL2X_PIR(schq)));
1793 seq_printf(m, "NIX_AF_TL2[%llu]_SW_XOFF =0x%llx\n", schq,
1794 rvu_read64(rvu, blkaddr, NIX_AF_TL2X_SW_XOFF(schq)));
1795 seq_printf(m, "NIX_AF_TL2[%llu]_TOPOLOGY =0x%llx\n", schq,
1796 rvu_read64(rvu, blkaddr,
1797 NIX_AF_TL2X_TOPOLOGY(schq)));
1798 seq_printf(m, "NIX_AF_TL2[%llu]_PARENT =0x%llx\n", schq,
1799 rvu_read64(rvu, blkaddr, NIX_AF_TL2X_PARENT(schq)));
1800 seq_printf(m, "NIX_AF_TL2[%llu]_MD_DEBUG0 =0x%llx\n", schq,
1801 rvu_read64(rvu, blkaddr,
1802 NIX_AF_TL2X_MD_DEBUG0(schq)));
1803 seq_printf(m, "NIX_AF_TL2[%llu]_MD_DEBUG1 =0x%llx\n", schq,
1804 rvu_read64(rvu, blkaddr,
1805 NIX_AF_TL2X_MD_DEBUG1(schq)));
1806
1807 link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL)
1808 & 0x01 ? NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
1809 if (lvl == link_level) {
1810 seq_printf(m,
1811 "NIX_AF_TL3_TL2[%llu]_BP_STATUS =0x%llx\n",
1812 schq, rvu_read64(rvu, blkaddr,
1813 NIX_AF_TL3_TL2X_BP_STATUS(schq)));
1814 for (link = 0; link < hw->cgx_links; link++)
1815 seq_printf(m,
1816 "NIX_AF_TL3_TL2[%llu]_LINK[%d]_CFG =0x%llx\n",
1817 schq, link, rvu_read64(rvu, blkaddr,
1818 NIX_AF_TL3_TL2X_LINKX_CFG(schq, link)));
1819 }
1820 seq_puts(m, "\n");
1821 }
1822
1823 if (lvl == NIX_TXSCH_LVL_TL1) {
1824 seq_printf(m, "NIX_AF_TX_LINK[%llu]_NORM_CREDIT =0x%llx\n",
1825 schq,
1826 rvu_read64(rvu, blkaddr,
1827 NIX_AF_TX_LINKX_NORM_CREDIT(schq)));
1828 seq_printf(m, "NIX_AF_TX_LINK[%llu]_HW_XOFF =0x%llx\n", schq,
1829 rvu_read64(rvu, blkaddr,
1830 NIX_AF_TX_LINKX_HW_XOFF(schq)));
1831 seq_printf(m, "NIX_AF_TL1[%llu]_SCHEDULE =0x%llx\n", schq,
1832 rvu_read64(rvu, blkaddr,
1833 NIX_AF_TL1X_SCHEDULE(schq)));
1834 seq_printf(m, "NIX_AF_TL1[%llu]_SHAPE =0x%llx\n", schq,
1835 rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SHAPE(schq)));
1836 seq_printf(m, "NIX_AF_TL1[%llu]_CIR =0x%llx\n", schq,
1837 rvu_read64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq)));
1838 seq_printf(m, "NIX_AF_TL1[%llu]_SW_XOFF =0x%llx\n", schq,
1839 rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq)));
1840 seq_printf(m, "NIX_AF_TL1[%llu]_TOPOLOGY =0x%llx\n", schq,
1841 rvu_read64(rvu, blkaddr,
1842 NIX_AF_TL1X_TOPOLOGY(schq)));
1843 seq_printf(m, "NIX_AF_TL1[%llu]_MD_DEBUG0 =0x%llx\n", schq,
1844 rvu_read64(rvu, blkaddr,
1845 NIX_AF_TL1X_MD_DEBUG0(schq)));
1846 seq_printf(m, "NIX_AF_TL1[%llu]_MD_DEBUG1 =0x%llx\n", schq,
1847 rvu_read64(rvu, blkaddr,
1848 NIX_AF_TL1X_MD_DEBUG1(schq)));
1849 seq_printf(m, "NIX_AF_TL1[%llu]_DROPPED_PACKETS =0x%llx\n",
1850 schq,
1851 rvu_read64(rvu, blkaddr,
1852 NIX_AF_TL1X_DROPPED_PACKETS(schq)));
1853 seq_printf(m, "NIX_AF_TL1[%llu]_DROPPED_BYTES =0x%llx\n", schq,
1854 rvu_read64(rvu, blkaddr,
1855 NIX_AF_TL1X_DROPPED_BYTES(schq)));
1856 seq_printf(m, "NIX_AF_TL1[%llu]_RED_PACKETS =0x%llx\n", schq,
1857 rvu_read64(rvu, blkaddr,
1858 NIX_AF_TL1X_RED_PACKETS(schq)));
1859 seq_printf(m, "NIX_AF_TL1[%llu]_RED_BYTES =0x%llx\n", schq,
1860 rvu_read64(rvu, blkaddr,
1861 NIX_AF_TL1X_RED_BYTES(schq)));
1862 seq_printf(m, "NIX_AF_TL1[%llu]_YELLOW_PACKETS =0x%llx\n", schq,
1863 rvu_read64(rvu, blkaddr,
1864 NIX_AF_TL1X_YELLOW_PACKETS(schq)));
1865 seq_printf(m, "NIX_AF_TL1[%llu]_YELLOW_BYTES =0x%llx\n", schq,
1866 rvu_read64(rvu, blkaddr,
1867 NIX_AF_TL1X_YELLOW_BYTES(schq)));
1868 seq_printf(m, "NIX_AF_TL1[%llu]_GREEN_PACKETS =0x%llx\n", schq,
1869 rvu_read64(rvu, blkaddr,
1870 NIX_AF_TL1X_GREEN_PACKETS(schq)));
1871 seq_printf(m, "NIX_AF_TL1[%llu]_GREEN_BYTES =0x%llx\n", schq,
1872 rvu_read64(rvu, blkaddr,
1873 NIX_AF_TL1X_GREEN_BYTES(schq)));
1874 seq_puts(m, "\n");
1875 }
1876 }
1877
1878 /*dumps given tm_topo registers*/
rvu_dbg_nix_tm_topo_display(struct seq_file * m,void * unused)1879 static int rvu_dbg_nix_tm_topo_display(struct seq_file *m, void *unused)
1880 {
1881 struct nix_hw *nix_hw = m->private;
1882 struct rvu *rvu = nix_hw->rvu;
1883 struct nix_aq_enq_req aq_req;
1884 struct nix_txsch *txsch;
1885 int nixlf, lvl, schq;
1886 u16 pcifunc;
1887
1888 nixlf = rvu->rvu_dbg.nix_tm_ctx.lf;
1889
1890 if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1891 return -EINVAL;
1892
1893 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1894 aq_req.hdr.pcifunc = pcifunc;
1895 aq_req.ctype = NIX_AQ_CTYPE_SQ;
1896 aq_req.op = NIX_AQ_INSTOP_READ;
1897 seq_printf(m, "pcifunc is 0x%x\n", pcifunc);
1898
1899 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1900 txsch = &nix_hw->txsch[lvl];
1901 for (schq = 0; schq < txsch->schq.max; schq++) {
1902 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) == pcifunc)
1903 print_tm_topo(m, schq, lvl);
1904 }
1905 }
1906 return 0;
1907 }
1908
rvu_dbg_nix_tm_topo_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)1909 static ssize_t rvu_dbg_nix_tm_topo_write(struct file *filp,
1910 const char __user *buffer,
1911 size_t count, loff_t *ppos)
1912 {
1913 struct seq_file *m = filp->private_data;
1914 struct nix_hw *nix_hw = m->private;
1915 struct rvu *rvu = nix_hw->rvu;
1916 struct rvu_pfvf *pfvf;
1917 u16 pcifunc;
1918 u64 nixlf;
1919 int ret;
1920
1921 ret = kstrtoull_from_user(buffer, count, 10, &nixlf);
1922 if (ret)
1923 return ret;
1924
1925 if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1926 return -EINVAL;
1927
1928 pfvf = rvu_get_pfvf(rvu, pcifunc);
1929 if (!pfvf->sq_ctx) {
1930 dev_warn(rvu->dev, "SQ context is not initialized\n");
1931 return -EINVAL;
1932 }
1933
1934 rvu->rvu_dbg.nix_tm_ctx.lf = nixlf;
1935 return count;
1936 }
1937
1938 RVU_DEBUG_SEQ_FOPS(nix_tm_topo, nix_tm_topo_display, nix_tm_topo_write);
1939
1940 /* Dumps given nix_sq's context */
print_nix_sq_ctx(struct seq_file * m,struct nix_aq_enq_rsp * rsp)1941 static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1942 {
1943 struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
1944 struct nix_hw *nix_hw = m->private;
1945 struct rvu *rvu = nix_hw->rvu;
1946
1947 if (!is_rvu_otx2(rvu)) {
1948 print_nix_cn10k_sq_ctx(m, (struct nix_cn10k_sq_ctx_s *)sq_ctx);
1949 return;
1950 }
1951 seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n",
1952 sq_ctx->sqe_way_mask, sq_ctx->cq);
1953 seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
1954 sq_ctx->sdp_mcast, sq_ctx->substream);
1955 seq_printf(m, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n\n",
1956 sq_ctx->qint_idx, sq_ctx->ena);
1957
1958 seq_printf(m, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d\n",
1959 sq_ctx->sqb_count, sq_ctx->default_chan);
1960 seq_printf(m, "W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d\n",
1961 sq_ctx->smq_rr_quantum, sq_ctx->sso_ena);
1962 seq_printf(m, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n\n",
1963 sq_ctx->xoff, sq_ctx->cq_ena, sq_ctx->smq);
1964
1965 seq_printf(m, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d\n",
1966 sq_ctx->sqe_stype, sq_ctx->sq_int_ena);
1967 seq_printf(m, "W2: sq_int \t\t\t%d\nW2: sqb_aura \t\t\t%d\n",
1968 sq_ctx->sq_int, sq_ctx->sqb_aura);
1969 seq_printf(m, "W2: smq_rr_count \t\t%d\n\n", sq_ctx->smq_rr_count);
1970
1971 seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
1972 sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
1973 seq_printf(m, "W3: smenq_next_sqb_vld \t\t%d\nW3: head_offset\t\t\t%d\n",
1974 sq_ctx->smenq_next_sqb_vld, sq_ctx->head_offset);
1975 seq_printf(m, "W3: smenq_offset\t\t%d\nW3: tail_offset\t\t\t%d\n",
1976 sq_ctx->smenq_offset, sq_ctx->tail_offset);
1977 seq_printf(m, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq\t\t\t%d\n",
1978 sq_ctx->smq_lso_segnum, sq_ctx->smq_next_sq);
1979 seq_printf(m, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d\n",
1980 sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1981 seq_printf(m, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n\n",
1982 sq_ctx->cq_limit, sq_ctx->max_sqe_size);
1983
1984 seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1985 seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1986 seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1987 seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1988 sq_ctx->smenq_next_sqb);
1989
1990 seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1991
1992 seq_printf(m, "W9: vfi_lso_vld\t\t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d\n",
1993 sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1994 seq_printf(m, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d\n",
1995 sq_ctx->vfi_lso_vlan0_ins_ena, sq_ctx->vfi_lso_mps);
1996 seq_printf(m, "W9: vfi_lso_sb\t\t\t%d\nW9: vfi_lso_sizem1\t\t%d\n",
1997 sq_ctx->vfi_lso_sb, sq_ctx->vfi_lso_sizem1);
1998 seq_printf(m, "W9: vfi_lso_total\t\t%d\n\n", sq_ctx->vfi_lso_total);
1999
2000 seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
2001 (u64)sq_ctx->scm_lso_rem);
2002 seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
2003 seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
2004 seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
2005 (u64)sq_ctx->dropped_octs);
2006 seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
2007 (u64)sq_ctx->dropped_pkts);
2008 }
2009
print_nix_cn10k_rq_ctx(struct seq_file * m,struct nix_cn10k_rq_ctx_s * rq_ctx)2010 static void print_nix_cn10k_rq_ctx(struct seq_file *m,
2011 struct nix_cn10k_rq_ctx_s *rq_ctx)
2012 {
2013 seq_printf(m, "W0: ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
2014 rq_ctx->ena, rq_ctx->sso_ena);
2015 seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
2016 rq_ctx->ipsech_ena, rq_ctx->ena_wqwd);
2017 seq_printf(m, "W0: cq \t\t\t\t%d\nW0: lenerr_dis \t\t\t%d\n",
2018 rq_ctx->cq, rq_ctx->lenerr_dis);
2019 seq_printf(m, "W0: csum_il4_dis \t\t%d\nW0: csum_ol4_dis \t\t%d\n",
2020 rq_ctx->csum_il4_dis, rq_ctx->csum_ol4_dis);
2021 seq_printf(m, "W0: len_il4_dis \t\t%d\nW0: len_il3_dis \t\t%d\n",
2022 rq_ctx->len_il4_dis, rq_ctx->len_il3_dis);
2023 seq_printf(m, "W0: len_ol4_dis \t\t%d\nW0: len_ol3_dis \t\t%d\n",
2024 rq_ctx->len_ol4_dis, rq_ctx->len_ol3_dis);
2025 seq_printf(m, "W0: wqe_aura \t\t\t%d\n\n", rq_ctx->wqe_aura);
2026
2027 seq_printf(m, "W1: spb_aura \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
2028 rq_ctx->spb_aura, rq_ctx->lpb_aura);
2029 seq_printf(m, "W1: spb_aura \t\t\t%d\n", rq_ctx->spb_aura);
2030 seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
2031 rq_ctx->sso_grp, rq_ctx->sso_tt);
2032 seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: wqe_caching \t\t%d\n",
2033 rq_ctx->pb_caching, rq_ctx->wqe_caching);
2034 seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
2035 rq_ctx->xqe_drop_ena, rq_ctx->spb_drop_ena);
2036 seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: pb_stashing \t\t%d\n",
2037 rq_ctx->lpb_drop_ena, rq_ctx->pb_stashing);
2038 seq_printf(m, "W1: ipsecd_drop_ena \t\t%d\nW1: chi_ena \t\t\t%d\n\n",
2039 rq_ctx->ipsecd_drop_ena, rq_ctx->chi_ena);
2040
2041 seq_printf(m, "W2: band_prof_id \t\t%d\n", rq_ctx->band_prof_id);
2042 seq_printf(m, "W2: policer_ena \t\t%d\n", rq_ctx->policer_ena);
2043 seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n", rq_ctx->spb_sizem1);
2044 seq_printf(m, "W2: wqe_skip \t\t\t%d\nW2: sqb_ena \t\t\t%d\n",
2045 rq_ctx->wqe_skip, rq_ctx->spb_ena);
2046 seq_printf(m, "W2: lpb_size1 \t\t\t%d\nW2: first_skip \t\t\t%d\n",
2047 rq_ctx->lpb_sizem1, rq_ctx->first_skip);
2048 seq_printf(m, "W2: later_skip\t\t\t%d\nW2: xqe_imm_size\t\t%d\n",
2049 rq_ctx->later_skip, rq_ctx->xqe_imm_size);
2050 seq_printf(m, "W2: xqe_imm_copy \t\t%d\nW2: xqe_hdr_split \t\t%d\n\n",
2051 rq_ctx->xqe_imm_copy, rq_ctx->xqe_hdr_split);
2052
2053 seq_printf(m, "W3: xqe_drop \t\t\t%d\nW3: xqe_pass \t\t\t%d\n",
2054 rq_ctx->xqe_drop, rq_ctx->xqe_pass);
2055 seq_printf(m, "W3: wqe_pool_drop \t\t%d\nW3: wqe_pool_pass \t\t%d\n",
2056 rq_ctx->wqe_pool_drop, rq_ctx->wqe_pool_pass);
2057 seq_printf(m, "W3: spb_pool_drop \t\t%d\nW3: spb_pool_pass \t\t%d\n",
2058 rq_ctx->spb_pool_drop, rq_ctx->spb_pool_pass);
2059 seq_printf(m, "W3: spb_aura_drop \t\t%d\nW3: spb_aura_pass \t\t%d\n\n",
2060 rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
2061
2062 seq_printf(m, "W4: lpb_aura_drop \t\t%d\nW3: lpb_aura_pass \t\t%d\n",
2063 rq_ctx->lpb_aura_pass, rq_ctx->lpb_aura_drop);
2064 seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW3: lpb_pool_pass \t\t%d\n",
2065 rq_ctx->lpb_pool_drop, rq_ctx->lpb_pool_pass);
2066 seq_printf(m, "W4: rq_int \t\t\t%d\nW4: rq_int_ena\t\t\t%d\n",
2067 rq_ctx->rq_int, rq_ctx->rq_int_ena);
2068 seq_printf(m, "W4: qint_idx \t\t\t%d\n\n", rq_ctx->qint_idx);
2069
2070 seq_printf(m, "W5: ltag \t\t\t%d\nW5: good_utag \t\t\t%d\n",
2071 rq_ctx->ltag, rq_ctx->good_utag);
2072 seq_printf(m, "W5: bad_utag \t\t\t%d\nW5: flow_tagw \t\t\t%d\n",
2073 rq_ctx->bad_utag, rq_ctx->flow_tagw);
2074 seq_printf(m, "W5: ipsec_vwqe \t\t\t%d\nW5: vwqe_ena \t\t\t%d\n",
2075 rq_ctx->ipsec_vwqe, rq_ctx->vwqe_ena);
2076 seq_printf(m, "W5: vwqe_wait \t\t\t%d\nW5: max_vsize_exp\t\t%d\n",
2077 rq_ctx->vwqe_wait, rq_ctx->max_vsize_exp);
2078 seq_printf(m, "W5: vwqe_skip \t\t\t%d\n\n", rq_ctx->vwqe_skip);
2079
2080 seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
2081 seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
2082 seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
2083 seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
2084 seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
2085 }
2086
2087 /* Dumps given nix_rq's context */
print_nix_rq_ctx(struct seq_file * m,struct nix_aq_enq_rsp * rsp)2088 static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
2089 {
2090 struct nix_rq_ctx_s *rq_ctx = &rsp->rq;
2091 struct nix_hw *nix_hw = m->private;
2092 struct rvu *rvu = nix_hw->rvu;
2093
2094 if (!is_rvu_otx2(rvu)) {
2095 print_nix_cn10k_rq_ctx(m, (struct nix_cn10k_rq_ctx_s *)rq_ctx);
2096 return;
2097 }
2098
2099 seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
2100 rq_ctx->wqe_aura, rq_ctx->substream);
2101 seq_printf(m, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
2102 rq_ctx->cq, rq_ctx->ena_wqwd);
2103 seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
2104 rq_ctx->ipsech_ena, rq_ctx->sso_ena);
2105 seq_printf(m, "W0: ena \t\t\t%d\n\n", rq_ctx->ena);
2106
2107 seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
2108 rq_ctx->lpb_drop_ena, rq_ctx->spb_drop_ena);
2109 seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d\n",
2110 rq_ctx->xqe_drop_ena, rq_ctx->wqe_caching);
2111 seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
2112 rq_ctx->pb_caching, rq_ctx->sso_tt);
2113 seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
2114 rq_ctx->sso_grp, rq_ctx->lpb_aura);
2115 seq_printf(m, "W1: spb_aura \t\t\t%d\n\n", rq_ctx->spb_aura);
2116
2117 seq_printf(m, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d\n",
2118 rq_ctx->xqe_hdr_split, rq_ctx->xqe_imm_copy);
2119 seq_printf(m, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d\n",
2120 rq_ctx->xqe_imm_size, rq_ctx->later_skip);
2121 seq_printf(m, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d\n",
2122 rq_ctx->first_skip, rq_ctx->lpb_sizem1);
2123 seq_printf(m, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d\n",
2124 rq_ctx->spb_ena, rq_ctx->wqe_skip);
2125 seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n\n", rq_ctx->spb_sizem1);
2126
2127 seq_printf(m, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d\n",
2128 rq_ctx->spb_pool_pass, rq_ctx->spb_pool_drop);
2129 seq_printf(m, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d\n",
2130 rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
2131 seq_printf(m, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d\n",
2132 rq_ctx->wqe_pool_pass, rq_ctx->wqe_pool_drop);
2133 seq_printf(m, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n\n",
2134 rq_ctx->xqe_pass, rq_ctx->xqe_drop);
2135
2136 seq_printf(m, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d\n",
2137 rq_ctx->qint_idx, rq_ctx->rq_int_ena);
2138 seq_printf(m, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d\n",
2139 rq_ctx->rq_int, rq_ctx->lpb_pool_pass);
2140 seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d\n",
2141 rq_ctx->lpb_pool_drop, rq_ctx->lpb_aura_pass);
2142 seq_printf(m, "W4: lpb_aura_drop \t\t%d\n\n", rq_ctx->lpb_aura_drop);
2143
2144 seq_printf(m, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d\n",
2145 rq_ctx->flow_tagw, rq_ctx->bad_utag);
2146 seq_printf(m, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n\n",
2147 rq_ctx->good_utag, rq_ctx->ltag);
2148
2149 seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
2150 seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
2151 seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
2152 seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
2153 seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
2154 }
2155
2156 /* Dumps given nix_cq's context */
print_nix_cq_ctx(struct seq_file * m,struct nix_aq_enq_rsp * rsp)2157 static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
2158 {
2159 struct nix_cq_ctx_s *cq_ctx = &rsp->cq;
2160 struct nix_hw *nix_hw = m->private;
2161 struct rvu *rvu = nix_hw->rvu;
2162
2163 seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base);
2164
2165 seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr);
2166 seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n",
2167 cq_ctx->avg_con, cq_ctx->cint_idx);
2168 seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n",
2169 cq_ctx->cq_err, cq_ctx->qint_idx);
2170 seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n",
2171 cq_ctx->bpid, cq_ctx->bp_ena);
2172
2173 if (!is_rvu_otx2(rvu)) {
2174 seq_printf(m, "W1: lbpid_high \t\t\t0x%03x\n", cq_ctx->lbpid_high);
2175 seq_printf(m, "W1: lbpid_med \t\t\t0x%03x\n", cq_ctx->lbpid_med);
2176 seq_printf(m, "W1: lbpid_low \t\t\t0x%03x\n", cq_ctx->lbpid_low);
2177 seq_printf(m, "(W1: lbpid) \t\t\t0x%03x\n",
2178 cq_ctx->lbpid_high << 6 | cq_ctx->lbpid_med << 3 |
2179 cq_ctx->lbpid_low);
2180 seq_printf(m, "W1: lbp_ena \t\t\t\t%d\n\n", cq_ctx->lbp_ena);
2181 }
2182
2183 seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n",
2184 cq_ctx->update_time, cq_ctx->avg_level);
2185 seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n",
2186 cq_ctx->head, cq_ctx->tail);
2187
2188 seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n",
2189 cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int);
2190 seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n",
2191 cq_ctx->qsize, cq_ctx->caching);
2192 seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n",
2193 cq_ctx->substream, cq_ctx->ena);
2194 if (!is_rvu_otx2(rvu)) {
2195 seq_printf(m, "W3: lbp_frac \t\t\t%d\n", cq_ctx->lbp_frac);
2196 seq_printf(m, "W3: cpt_drop_err_en \t\t\t%d\n",
2197 cq_ctx->cpt_drop_err_en);
2198 }
2199 seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n",
2200 cq_ctx->drop_ena, cq_ctx->drop);
2201 seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp);
2202 }
2203
rvu_dbg_nix_queue_ctx_display(struct seq_file * filp,void * unused,int ctype)2204 static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
2205 void *unused, int ctype)
2206 {
2207 void (*print_nix_ctx)(struct seq_file *filp,
2208 struct nix_aq_enq_rsp *rsp) = NULL;
2209 struct nix_hw *nix_hw = filp->private;
2210 struct rvu *rvu = nix_hw->rvu;
2211 struct nix_aq_enq_req aq_req;
2212 struct nix_aq_enq_rsp rsp;
2213 char *ctype_string = NULL;
2214 int qidx, rc, max_id = 0;
2215 struct rvu_pfvf *pfvf;
2216 int nixlf, id, all;
2217 u16 pcifunc;
2218
2219 switch (ctype) {
2220 case NIX_AQ_CTYPE_CQ:
2221 nixlf = rvu->rvu_dbg.nix_cq_ctx.lf;
2222 id = rvu->rvu_dbg.nix_cq_ctx.id;
2223 all = rvu->rvu_dbg.nix_cq_ctx.all;
2224 break;
2225
2226 case NIX_AQ_CTYPE_SQ:
2227 nixlf = rvu->rvu_dbg.nix_sq_ctx.lf;
2228 id = rvu->rvu_dbg.nix_sq_ctx.id;
2229 all = rvu->rvu_dbg.nix_sq_ctx.all;
2230 break;
2231
2232 case NIX_AQ_CTYPE_RQ:
2233 nixlf = rvu->rvu_dbg.nix_rq_ctx.lf;
2234 id = rvu->rvu_dbg.nix_rq_ctx.id;
2235 all = rvu->rvu_dbg.nix_rq_ctx.all;
2236 break;
2237
2238 default:
2239 return -EINVAL;
2240 }
2241
2242 if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
2243 return -EINVAL;
2244
2245 pfvf = rvu_get_pfvf(rvu, pcifunc);
2246 if (ctype == NIX_AQ_CTYPE_SQ && !pfvf->sq_ctx) {
2247 seq_puts(filp, "SQ context is not initialized\n");
2248 return -EINVAL;
2249 } else if (ctype == NIX_AQ_CTYPE_RQ && !pfvf->rq_ctx) {
2250 seq_puts(filp, "RQ context is not initialized\n");
2251 return -EINVAL;
2252 } else if (ctype == NIX_AQ_CTYPE_CQ && !pfvf->cq_ctx) {
2253 seq_puts(filp, "CQ context is not initialized\n");
2254 return -EINVAL;
2255 }
2256
2257 if (ctype == NIX_AQ_CTYPE_SQ) {
2258 max_id = pfvf->sq_ctx->qsize;
2259 ctype_string = "sq";
2260 print_nix_ctx = print_nix_sq_ctx;
2261 } else if (ctype == NIX_AQ_CTYPE_RQ) {
2262 max_id = pfvf->rq_ctx->qsize;
2263 ctype_string = "rq";
2264 print_nix_ctx = print_nix_rq_ctx;
2265 } else if (ctype == NIX_AQ_CTYPE_CQ) {
2266 max_id = pfvf->cq_ctx->qsize;
2267 ctype_string = "cq";
2268 print_nix_ctx = print_nix_cq_ctx;
2269 }
2270
2271 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
2272 aq_req.hdr.pcifunc = pcifunc;
2273 aq_req.ctype = ctype;
2274 aq_req.op = NIX_AQ_INSTOP_READ;
2275 if (all)
2276 id = 0;
2277 else
2278 max_id = id + 1;
2279 for (qidx = id; qidx < max_id; qidx++) {
2280 aq_req.qidx = qidx;
2281 seq_printf(filp, "=====%s_ctx for nixlf:%d and qidx:%d is=====\n",
2282 ctype_string, nixlf, aq_req.qidx);
2283 rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
2284 if (rc) {
2285 seq_puts(filp, "Failed to read the context\n");
2286 return -EINVAL;
2287 }
2288 print_nix_ctx(filp, &rsp);
2289 }
2290 return 0;
2291 }
2292
write_nix_queue_ctx(struct rvu * rvu,bool all,int nixlf,int id,int ctype,char * ctype_string,struct seq_file * m)2293 static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf,
2294 int id, int ctype, char *ctype_string,
2295 struct seq_file *m)
2296 {
2297 struct nix_hw *nix_hw = m->private;
2298 struct rvu_pfvf *pfvf;
2299 int max_id = 0;
2300 u16 pcifunc;
2301
2302 if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
2303 return -EINVAL;
2304
2305 pfvf = rvu_get_pfvf(rvu, pcifunc);
2306
2307 if (ctype == NIX_AQ_CTYPE_SQ) {
2308 if (!pfvf->sq_ctx) {
2309 dev_warn(rvu->dev, "SQ context is not initialized\n");
2310 return -EINVAL;
2311 }
2312 max_id = pfvf->sq_ctx->qsize;
2313 } else if (ctype == NIX_AQ_CTYPE_RQ) {
2314 if (!pfvf->rq_ctx) {
2315 dev_warn(rvu->dev, "RQ context is not initialized\n");
2316 return -EINVAL;
2317 }
2318 max_id = pfvf->rq_ctx->qsize;
2319 } else if (ctype == NIX_AQ_CTYPE_CQ) {
2320 if (!pfvf->cq_ctx) {
2321 dev_warn(rvu->dev, "CQ context is not initialized\n");
2322 return -EINVAL;
2323 }
2324 max_id = pfvf->cq_ctx->qsize;
2325 }
2326
2327 if (id < 0 || id >= max_id) {
2328 dev_warn(rvu->dev, "Invalid %s_ctx valid range 0-%d\n",
2329 ctype_string, max_id - 1);
2330 return -EINVAL;
2331 }
2332 switch (ctype) {
2333 case NIX_AQ_CTYPE_CQ:
2334 rvu->rvu_dbg.nix_cq_ctx.lf = nixlf;
2335 rvu->rvu_dbg.nix_cq_ctx.id = id;
2336 rvu->rvu_dbg.nix_cq_ctx.all = all;
2337 break;
2338
2339 case NIX_AQ_CTYPE_SQ:
2340 rvu->rvu_dbg.nix_sq_ctx.lf = nixlf;
2341 rvu->rvu_dbg.nix_sq_ctx.id = id;
2342 rvu->rvu_dbg.nix_sq_ctx.all = all;
2343 break;
2344
2345 case NIX_AQ_CTYPE_RQ:
2346 rvu->rvu_dbg.nix_rq_ctx.lf = nixlf;
2347 rvu->rvu_dbg.nix_rq_ctx.id = id;
2348 rvu->rvu_dbg.nix_rq_ctx.all = all;
2349 break;
2350 default:
2351 return -EINVAL;
2352 }
2353 return 0;
2354 }
2355
rvu_dbg_nix_queue_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos,int ctype)2356 static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
2357 const char __user *buffer,
2358 size_t count, loff_t *ppos,
2359 int ctype)
2360 {
2361 struct seq_file *m = filp->private_data;
2362 struct nix_hw *nix_hw = m->private;
2363 struct rvu *rvu = nix_hw->rvu;
2364 char *cmd_buf, *ctype_string;
2365 int nixlf, id = 0, ret;
2366 bool all = false;
2367
2368 if ((*ppos != 0) || !count)
2369 return -EINVAL;
2370
2371 switch (ctype) {
2372 case NIX_AQ_CTYPE_SQ:
2373 ctype_string = "sq";
2374 break;
2375 case NIX_AQ_CTYPE_RQ:
2376 ctype_string = "rq";
2377 break;
2378 case NIX_AQ_CTYPE_CQ:
2379 ctype_string = "cq";
2380 break;
2381 default:
2382 return -EINVAL;
2383 }
2384
2385 cmd_buf = kzalloc(count + 1, GFP_KERNEL);
2386
2387 if (!cmd_buf)
2388 return count;
2389
2390 ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
2391 &nixlf, &id, &all);
2392 if (ret < 0) {
2393 dev_info(rvu->dev,
2394 "Usage: echo <nixlf> [%s number/all] > %s_ctx\n",
2395 ctype_string, ctype_string);
2396 goto done;
2397 } else {
2398 ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype,
2399 ctype_string, m);
2400 }
2401 done:
2402 kfree(cmd_buf);
2403 return ret ? ret : count;
2404 }
2405
rvu_dbg_nix_sq_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)2406 static ssize_t rvu_dbg_nix_sq_ctx_write(struct file *filp,
2407 const char __user *buffer,
2408 size_t count, loff_t *ppos)
2409 {
2410 return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
2411 NIX_AQ_CTYPE_SQ);
2412 }
2413
rvu_dbg_nix_sq_ctx_display(struct seq_file * filp,void * unused)2414 static int rvu_dbg_nix_sq_ctx_display(struct seq_file *filp, void *unused)
2415 {
2416 return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_SQ);
2417 }
2418
2419 RVU_DEBUG_SEQ_FOPS(nix_sq_ctx, nix_sq_ctx_display, nix_sq_ctx_write);
2420
rvu_dbg_nix_rq_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)2421 static ssize_t rvu_dbg_nix_rq_ctx_write(struct file *filp,
2422 const char __user *buffer,
2423 size_t count, loff_t *ppos)
2424 {
2425 return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
2426 NIX_AQ_CTYPE_RQ);
2427 }
2428
rvu_dbg_nix_rq_ctx_display(struct seq_file * filp,void * unused)2429 static int rvu_dbg_nix_rq_ctx_display(struct seq_file *filp, void *unused)
2430 {
2431 return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_RQ);
2432 }
2433
2434 RVU_DEBUG_SEQ_FOPS(nix_rq_ctx, nix_rq_ctx_display, nix_rq_ctx_write);
2435
rvu_dbg_nix_cq_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)2436 static ssize_t rvu_dbg_nix_cq_ctx_write(struct file *filp,
2437 const char __user *buffer,
2438 size_t count, loff_t *ppos)
2439 {
2440 return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
2441 NIX_AQ_CTYPE_CQ);
2442 }
2443
rvu_dbg_nix_cq_ctx_display(struct seq_file * filp,void * unused)2444 static int rvu_dbg_nix_cq_ctx_display(struct seq_file *filp, void *unused)
2445 {
2446 return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_CQ);
2447 }
2448
2449 RVU_DEBUG_SEQ_FOPS(nix_cq_ctx, nix_cq_ctx_display, nix_cq_ctx_write);
2450
print_nix_qctx_qsize(struct seq_file * filp,int qsize,unsigned long * bmap,char * qtype)2451 static void print_nix_qctx_qsize(struct seq_file *filp, int qsize,
2452 unsigned long *bmap, char *qtype)
2453 {
2454 char *buf;
2455
2456 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
2457 if (!buf)
2458 return;
2459
2460 bitmap_print_to_pagebuf(false, buf, bmap, qsize);
2461 seq_printf(filp, "%s context count : %d\n", qtype, qsize);
2462 seq_printf(filp, "%s context ena/dis bitmap : %s\n",
2463 qtype, buf);
2464 kfree(buf);
2465 }
2466
print_nix_qsize(struct seq_file * filp,struct rvu_pfvf * pfvf)2467 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf)
2468 {
2469 if (!pfvf->cq_ctx)
2470 seq_puts(filp, "cq context is not initialized\n");
2471 else
2472 print_nix_qctx_qsize(filp, pfvf->cq_ctx->qsize, pfvf->cq_bmap,
2473 "cq");
2474
2475 if (!pfvf->rq_ctx)
2476 seq_puts(filp, "rq context is not initialized\n");
2477 else
2478 print_nix_qctx_qsize(filp, pfvf->rq_ctx->qsize, pfvf->rq_bmap,
2479 "rq");
2480
2481 if (!pfvf->sq_ctx)
2482 seq_puts(filp, "sq context is not initialized\n");
2483 else
2484 print_nix_qctx_qsize(filp, pfvf->sq_ctx->qsize, pfvf->sq_bmap,
2485 "sq");
2486 }
2487
rvu_dbg_nix_qsize_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)2488 static ssize_t rvu_dbg_nix_qsize_write(struct file *filp,
2489 const char __user *buffer,
2490 size_t count, loff_t *ppos)
2491 {
2492 return rvu_dbg_qsize_write(filp, buffer, count, ppos,
2493 BLKTYPE_NIX);
2494 }
2495
rvu_dbg_nix_qsize_display(struct seq_file * filp,void * unused)2496 static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
2497 {
2498 return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NIX);
2499 }
2500
2501 RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
2502
print_band_prof_ctx(struct seq_file * m,struct nix_bandprof_s * prof)2503 static void print_band_prof_ctx(struct seq_file *m,
2504 struct nix_bandprof_s *prof)
2505 {
2506 char *str;
2507
2508 switch (prof->pc_mode) {
2509 case NIX_RX_PC_MODE_VLAN:
2510 str = "VLAN";
2511 break;
2512 case NIX_RX_PC_MODE_DSCP:
2513 str = "DSCP";
2514 break;
2515 case NIX_RX_PC_MODE_GEN:
2516 str = "Generic";
2517 break;
2518 case NIX_RX_PC_MODE_RSVD:
2519 str = "Reserved";
2520 break;
2521 }
2522 seq_printf(m, "W0: pc_mode\t\t%s\n", str);
2523 str = (prof->icolor == 3) ? "Color blind" :
2524 (prof->icolor == 0) ? "Green" :
2525 (prof->icolor == 1) ? "Yellow" : "Red";
2526 seq_printf(m, "W0: icolor\t\t%s\n", str);
2527 seq_printf(m, "W0: tnl_ena\t\t%d\n", prof->tnl_ena);
2528 seq_printf(m, "W0: peir_exponent\t%d\n", prof->peir_exponent);
2529 seq_printf(m, "W0: pebs_exponent\t%d\n", prof->pebs_exponent);
2530 seq_printf(m, "W0: cir_exponent\t%d\n", prof->cir_exponent);
2531 seq_printf(m, "W0: cbs_exponent\t%d\n", prof->cbs_exponent);
2532 seq_printf(m, "W0: peir_mantissa\t%d\n", prof->peir_mantissa);
2533 seq_printf(m, "W0: pebs_mantissa\t%d\n", prof->pebs_mantissa);
2534 seq_printf(m, "W0: cir_mantissa\t%d\n", prof->cir_mantissa);
2535
2536 seq_printf(m, "W1: cbs_mantissa\t%d\n", prof->cbs_mantissa);
2537 str = (prof->lmode == 0) ? "byte" : "packet";
2538 seq_printf(m, "W1: lmode\t\t%s\n", str);
2539 seq_printf(m, "W1: l_select\t\t%d\n", prof->l_sellect);
2540 seq_printf(m, "W1: rdiv\t\t%d\n", prof->rdiv);
2541 seq_printf(m, "W1: adjust_exponent\t%d\n", prof->adjust_exponent);
2542 seq_printf(m, "W1: adjust_mantissa\t%d\n", prof->adjust_mantissa);
2543 str = (prof->gc_action == 0) ? "PASS" :
2544 (prof->gc_action == 1) ? "DROP" : "RED";
2545 seq_printf(m, "W1: gc_action\t\t%s\n", str);
2546 str = (prof->yc_action == 0) ? "PASS" :
2547 (prof->yc_action == 1) ? "DROP" : "RED";
2548 seq_printf(m, "W1: yc_action\t\t%s\n", str);
2549 str = (prof->rc_action == 0) ? "PASS" :
2550 (prof->rc_action == 1) ? "DROP" : "RED";
2551 seq_printf(m, "W1: rc_action\t\t%s\n", str);
2552 seq_printf(m, "W1: meter_algo\t\t%d\n", prof->meter_algo);
2553 seq_printf(m, "W1: band_prof_id\t%d\n", prof->band_prof_id);
2554 seq_printf(m, "W1: hl_en\t\t%d\n", prof->hl_en);
2555
2556 seq_printf(m, "W2: ts\t\t\t%lld\n", (u64)prof->ts);
2557 seq_printf(m, "W3: pe_accum\t\t%d\n", prof->pe_accum);
2558 seq_printf(m, "W3: c_accum\t\t%d\n", prof->c_accum);
2559 seq_printf(m, "W4: green_pkt_pass\t%lld\n",
2560 (u64)prof->green_pkt_pass);
2561 seq_printf(m, "W5: yellow_pkt_pass\t%lld\n",
2562 (u64)prof->yellow_pkt_pass);
2563 seq_printf(m, "W6: red_pkt_pass\t%lld\n", (u64)prof->red_pkt_pass);
2564 seq_printf(m, "W7: green_octs_pass\t%lld\n",
2565 (u64)prof->green_octs_pass);
2566 seq_printf(m, "W8: yellow_octs_pass\t%lld\n",
2567 (u64)prof->yellow_octs_pass);
2568 seq_printf(m, "W9: red_octs_pass\t%lld\n", (u64)prof->red_octs_pass);
2569 seq_printf(m, "W10: green_pkt_drop\t%lld\n",
2570 (u64)prof->green_pkt_drop);
2571 seq_printf(m, "W11: yellow_pkt_drop\t%lld\n",
2572 (u64)prof->yellow_pkt_drop);
2573 seq_printf(m, "W12: red_pkt_drop\t%lld\n", (u64)prof->red_pkt_drop);
2574 seq_printf(m, "W13: green_octs_drop\t%lld\n",
2575 (u64)prof->green_octs_drop);
2576 seq_printf(m, "W14: yellow_octs_drop\t%lld\n",
2577 (u64)prof->yellow_octs_drop);
2578 seq_printf(m, "W15: red_octs_drop\t%lld\n", (u64)prof->red_octs_drop);
2579 seq_puts(m, "==============================\n");
2580 }
2581
rvu_dbg_nix_band_prof_ctx_display(struct seq_file * m,void * unused)2582 static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused)
2583 {
2584 struct nix_hw *nix_hw = m->private;
2585 struct nix_cn10k_aq_enq_req aq_req;
2586 struct nix_cn10k_aq_enq_rsp aq_rsp;
2587 struct rvu *rvu = nix_hw->rvu;
2588 struct nix_ipolicer *ipolicer;
2589 int layer, prof_idx, idx, rc;
2590 u16 pcifunc;
2591 char *str;
2592
2593 /* Ingress policers do not exist on all platforms */
2594 if (!nix_hw->ipolicer)
2595 return 0;
2596
2597 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
2598 if (layer == BAND_PROF_INVAL_LAYER)
2599 continue;
2600 str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
2601 (layer == BAND_PROF_MID_LAYER) ? "Mid" : "Top";
2602
2603 seq_printf(m, "\n%s bandwidth profiles\n", str);
2604 seq_puts(m, "=======================\n");
2605
2606 ipolicer = &nix_hw->ipolicer[layer];
2607
2608 for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
2609 if (is_rsrc_free(&ipolicer->band_prof, idx))
2610 continue;
2611
2612 prof_idx = (idx & 0x3FFF) | (layer << 14);
2613 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
2614 0x00, NIX_AQ_CTYPE_BANDPROF,
2615 prof_idx);
2616 if (rc) {
2617 dev_err(rvu->dev,
2618 "%s: Failed to fetch context of %s profile %d, err %d\n",
2619 __func__, str, idx, rc);
2620 return 0;
2621 }
2622 seq_printf(m, "\n%s bandwidth profile:: %d\n", str, idx);
2623 pcifunc = ipolicer->pfvf_map[idx];
2624 if (!(pcifunc & RVU_PFVF_FUNC_MASK))
2625 seq_printf(m, "Allocated to :: PF %d\n",
2626 rvu_get_pf(pcifunc));
2627 else
2628 seq_printf(m, "Allocated to :: PF %d VF %d\n",
2629 rvu_get_pf(pcifunc),
2630 (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
2631 print_band_prof_ctx(m, &aq_rsp.prof);
2632 }
2633 }
2634 return 0;
2635 }
2636
2637 RVU_DEBUG_SEQ_FOPS(nix_band_prof_ctx, nix_band_prof_ctx_display, NULL);
2638
rvu_dbg_nix_band_prof_rsrc_display(struct seq_file * m,void * unused)2639 static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused)
2640 {
2641 struct nix_hw *nix_hw = m->private;
2642 struct nix_ipolicer *ipolicer;
2643 int layer;
2644 char *str;
2645
2646 /* Ingress policers do not exist on all platforms */
2647 if (!nix_hw->ipolicer)
2648 return 0;
2649
2650 seq_puts(m, "\nBandwidth profile resource free count\n");
2651 seq_puts(m, "=====================================\n");
2652 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
2653 if (layer == BAND_PROF_INVAL_LAYER)
2654 continue;
2655 str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
2656 (layer == BAND_PROF_MID_LAYER) ? "Mid " : "Top ";
2657
2658 ipolicer = &nix_hw->ipolicer[layer];
2659 seq_printf(m, "%s :: Max: %4d Free: %4d\n", str,
2660 ipolicer->band_prof.max,
2661 rvu_rsrc_free_count(&ipolicer->band_prof));
2662 }
2663 seq_puts(m, "=====================================\n");
2664
2665 return 0;
2666 }
2667
2668 RVU_DEBUG_SEQ_FOPS(nix_band_prof_rsrc, nix_band_prof_rsrc_display, NULL);
2669
rvu_dbg_nix_init(struct rvu * rvu,int blkaddr)2670 static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
2671 {
2672 struct nix_hw *nix_hw;
2673
2674 if (!is_block_implemented(rvu->hw, blkaddr))
2675 return;
2676
2677 if (blkaddr == BLKADDR_NIX0) {
2678 rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root);
2679 nix_hw = &rvu->hw->nix[0];
2680 } else {
2681 rvu->rvu_dbg.nix = debugfs_create_dir("nix1",
2682 rvu->rvu_dbg.root);
2683 nix_hw = &rvu->hw->nix[1];
2684 }
2685
2686 debugfs_create_file("tm_tree", 0600, rvu->rvu_dbg.nix, nix_hw,
2687 &rvu_dbg_nix_tm_tree_fops);
2688 debugfs_create_file("tm_topo", 0600, rvu->rvu_dbg.nix, nix_hw,
2689 &rvu_dbg_nix_tm_topo_fops);
2690 debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2691 &rvu_dbg_nix_sq_ctx_fops);
2692 debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2693 &rvu_dbg_nix_rq_ctx_fops);
2694 debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2695 &rvu_dbg_nix_cq_ctx_fops);
2696 debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
2697 &rvu_dbg_nix_ndc_tx_cache_fops);
2698 debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
2699 &rvu_dbg_nix_ndc_rx_cache_fops);
2700 debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
2701 &rvu_dbg_nix_ndc_tx_hits_miss_fops);
2702 debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
2703 &rvu_dbg_nix_ndc_rx_hits_miss_fops);
2704 debugfs_create_file_aux_num("qsize", 0600, rvu->rvu_dbg.nix, rvu,
2705 blkaddr, &rvu_dbg_nix_qsize_fops);
2706 debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2707 &rvu_dbg_nix_band_prof_ctx_fops);
2708 debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw,
2709 &rvu_dbg_nix_band_prof_rsrc_fops);
2710 }
2711
rvu_dbg_npa_init(struct rvu * rvu)2712 static void rvu_dbg_npa_init(struct rvu *rvu)
2713 {
2714 rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root);
2715
2716 debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu,
2717 &rvu_dbg_npa_qsize_fops);
2718 debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu,
2719 &rvu_dbg_npa_aura_ctx_fops);
2720 debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
2721 &rvu_dbg_npa_pool_ctx_fops);
2722 debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu,
2723 &rvu_dbg_npa_ndc_cache_fops);
2724 debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa, rvu,
2725 &rvu_dbg_npa_ndc_hits_miss_fops);
2726 }
2727
2728 #define PRINT_CGX_CUML_NIXRX_STATUS(idx, name) \
2729 ({ \
2730 u64 cnt; \
2731 err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \
2732 NIX_STATS_RX, &(cnt)); \
2733 if (!err) \
2734 seq_printf(s, "%s: %llu\n", name, cnt); \
2735 cnt; \
2736 })
2737
2738 #define PRINT_CGX_CUML_NIXTX_STATUS(idx, name) \
2739 ({ \
2740 u64 cnt; \
2741 err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \
2742 NIX_STATS_TX, &(cnt)); \
2743 if (!err) \
2744 seq_printf(s, "%s: %llu\n", name, cnt); \
2745 cnt; \
2746 })
2747
cgx_print_stats(struct seq_file * s,int lmac_id)2748 static int cgx_print_stats(struct seq_file *s, int lmac_id)
2749 {
2750 struct cgx_link_user_info linfo;
2751 struct mac_ops *mac_ops;
2752 void *cgxd = s->private;
2753 u64 ucast, mcast, bcast;
2754 int stat = 0, err = 0;
2755 u64 tx_stat, rx_stat;
2756 struct rvu *rvu;
2757
2758 rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
2759 PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
2760 if (!rvu)
2761 return -ENODEV;
2762
2763 mac_ops = get_mac_ops(cgxd);
2764 /* There can be no CGX devices at all */
2765 if (!mac_ops)
2766 return 0;
2767
2768 /* Link status */
2769 seq_puts(s, "\n=======Link Status======\n\n");
2770 err = cgx_get_link_info(cgxd, lmac_id, &linfo);
2771 if (err)
2772 seq_puts(s, "Failed to read link status\n");
2773 seq_printf(s, "\nLink is %s %d Mbps\n\n",
2774 linfo.link_up ? "UP" : "DOWN", linfo.speed);
2775
2776 /* Rx stats */
2777 seq_printf(s, "\n=======NIX RX_STATS(%s port level)======\n\n",
2778 mac_ops->name);
2779 ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames");
2780 if (err)
2781 return err;
2782 mcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_MCAST, "rx_mcast_frames");
2783 if (err)
2784 return err;
2785 bcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_BCAST, "rx_bcast_frames");
2786 if (err)
2787 return err;
2788 seq_printf(s, "rx_frames: %llu\n", ucast + mcast + bcast);
2789 PRINT_CGX_CUML_NIXRX_STATUS(RX_OCTS, "rx_bytes");
2790 if (err)
2791 return err;
2792 PRINT_CGX_CUML_NIXRX_STATUS(RX_DROP, "rx_drops");
2793 if (err)
2794 return err;
2795 PRINT_CGX_CUML_NIXRX_STATUS(RX_ERR, "rx_errors");
2796 if (err)
2797 return err;
2798
2799 /* Tx stats */
2800 seq_printf(s, "\n=======NIX TX_STATS(%s port level)======\n\n",
2801 mac_ops->name);
2802 ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames");
2803 if (err)
2804 return err;
2805 mcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_MCAST, "tx_mcast_frames");
2806 if (err)
2807 return err;
2808 bcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_BCAST, "tx_bcast_frames");
2809 if (err)
2810 return err;
2811 seq_printf(s, "tx_frames: %llu\n", ucast + mcast + bcast);
2812 PRINT_CGX_CUML_NIXTX_STATUS(TX_OCTS, "tx_bytes");
2813 if (err)
2814 return err;
2815 PRINT_CGX_CUML_NIXTX_STATUS(TX_DROP, "tx_drops");
2816 if (err)
2817 return err;
2818
2819 /* Rx stats */
2820 seq_printf(s, "\n=======%s RX_STATS======\n\n", mac_ops->name);
2821 while (stat < mac_ops->rx_stats_cnt) {
2822 err = mac_ops->mac_get_rx_stats(cgxd, lmac_id, stat, &rx_stat);
2823 if (err)
2824 return err;
2825 if (is_rvu_otx2(rvu))
2826 seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat],
2827 rx_stat);
2828 else
2829 seq_printf(s, "%s: %llu\n", rpm_rx_stats_fields[stat],
2830 rx_stat);
2831 stat++;
2832 }
2833
2834 /* Tx stats */
2835 stat = 0;
2836 seq_printf(s, "\n=======%s TX_STATS======\n\n", mac_ops->name);
2837 while (stat < mac_ops->tx_stats_cnt) {
2838 err = mac_ops->mac_get_tx_stats(cgxd, lmac_id, stat, &tx_stat);
2839 if (err)
2840 return err;
2841
2842 if (is_rvu_otx2(rvu))
2843 seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat],
2844 tx_stat);
2845 else
2846 seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat],
2847 tx_stat);
2848 stat++;
2849 }
2850
2851 return err;
2852 }
2853
rvu_dbg_derive_lmacid(struct seq_file * s)2854 static int rvu_dbg_derive_lmacid(struct seq_file *s)
2855 {
2856 return debugfs_get_aux_num(s->file);
2857 }
2858
rvu_dbg_cgx_stat_display(struct seq_file * s,void * unused)2859 static int rvu_dbg_cgx_stat_display(struct seq_file *s, void *unused)
2860 {
2861 return cgx_print_stats(s, rvu_dbg_derive_lmacid(s));
2862 }
2863
2864 RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
2865
cgx_print_dmac_flt(struct seq_file * s,int lmac_id)2866 static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id)
2867 {
2868 struct pci_dev *pdev = NULL;
2869 void *cgxd = s->private;
2870 char *bcast, *mcast;
2871 u16 index, domain;
2872 u8 dmac[ETH_ALEN];
2873 struct rvu *rvu;
2874 u64 cfg, mac;
2875 int pf;
2876
2877 rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
2878 PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
2879 if (!rvu)
2880 return -ENODEV;
2881
2882 pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
2883 domain = 2;
2884
2885 pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
2886 if (!pdev)
2887 return 0;
2888
2889 cfg = cgx_read_dmac_ctrl(cgxd, lmac_id);
2890 bcast = cfg & CGX_DMAC_BCAST_MODE ? "ACCEPT" : "REJECT";
2891 mcast = cfg & CGX_DMAC_MCAST_MODE ? "ACCEPT" : "REJECT";
2892
2893 seq_puts(s,
2894 "PCI dev RVUPF BROADCAST MULTICAST FILTER-MODE\n");
2895 seq_printf(s, "%s PF%d %9s %9s",
2896 dev_name(&pdev->dev), pf, bcast, mcast);
2897 if (cfg & CGX_DMAC_CAM_ACCEPT)
2898 seq_printf(s, "%12s\n\n", "UNICAST");
2899 else
2900 seq_printf(s, "%16s\n\n", "PROMISCUOUS");
2901
2902 seq_puts(s, "\nDMAC-INDEX ADDRESS\n");
2903
2904 for (index = 0 ; index < 32 ; index++) {
2905 cfg = cgx_read_dmac_entry(cgxd, index);
2906 /* Display enabled dmac entries associated with current lmac */
2907 if (lmac_id == FIELD_GET(CGX_DMAC_CAM_ENTRY_LMACID, cfg) &&
2908 FIELD_GET(CGX_DMAC_CAM_ADDR_ENABLE, cfg)) {
2909 mac = FIELD_GET(CGX_RX_DMAC_ADR_MASK, cfg);
2910 u64_to_ether_addr(mac, dmac);
2911 seq_printf(s, "%7d %pM\n", index, dmac);
2912 }
2913 }
2914
2915 pci_dev_put(pdev);
2916 return 0;
2917 }
2918
rvu_dbg_cgx_dmac_flt_display(struct seq_file * s,void * unused)2919 static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *s, void *unused)
2920 {
2921 return cgx_print_dmac_flt(s, rvu_dbg_derive_lmacid(s));
2922 }
2923
2924 RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL);
2925
rvu_dbg_cgx_init(struct rvu * rvu)2926 static void rvu_dbg_cgx_init(struct rvu *rvu)
2927 {
2928 struct mac_ops *mac_ops;
2929 unsigned long lmac_bmap;
2930 int i, lmac_id;
2931 char dname[20];
2932 void *cgx;
2933
2934 if (!cgx_get_cgxcnt_max())
2935 return;
2936
2937 mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
2938 if (!mac_ops)
2939 return;
2940
2941 rvu->rvu_dbg.cgx_root = debugfs_create_dir(mac_ops->name,
2942 rvu->rvu_dbg.root);
2943
2944 for (i = 0; i < cgx_get_cgxcnt_max(); i++) {
2945 cgx = rvu_cgx_pdata(i, rvu);
2946 if (!cgx)
2947 continue;
2948 lmac_bmap = cgx_get_lmac_bmap(cgx);
2949 /* cgx debugfs dir */
2950 sprintf(dname, "%s%d", mac_ops->name, i);
2951 rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
2952 rvu->rvu_dbg.cgx_root);
2953
2954 for_each_set_bit(lmac_id, &lmac_bmap, rvu->hw->lmac_per_cgx) {
2955 /* lmac debugfs dir */
2956 sprintf(dname, "lmac%d", lmac_id);
2957 rvu->rvu_dbg.lmac =
2958 debugfs_create_dir(dname, rvu->rvu_dbg.cgx);
2959
2960 debugfs_create_file_aux_num("stats", 0600, rvu->rvu_dbg.lmac,
2961 cgx, lmac_id, &rvu_dbg_cgx_stat_fops);
2962 debugfs_create_file_aux_num("mac_filter", 0600,
2963 rvu->rvu_dbg.lmac, cgx, lmac_id,
2964 &rvu_dbg_cgx_dmac_flt_fops);
2965 }
2966 }
2967 }
2968
2969 /* NPC debugfs APIs */
rvu_print_npc_mcam_info(struct seq_file * s,u16 pcifunc,int blkaddr)2970 static void rvu_print_npc_mcam_info(struct seq_file *s,
2971 u16 pcifunc, int blkaddr)
2972 {
2973 struct rvu *rvu = s->private;
2974 int entry_acnt, entry_ecnt;
2975 int cntr_acnt, cntr_ecnt;
2976
2977 rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr,
2978 &entry_acnt, &entry_ecnt);
2979 rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr,
2980 &cntr_acnt, &cntr_ecnt);
2981 if (!entry_acnt && !cntr_acnt)
2982 return;
2983
2984 if (!(pcifunc & RVU_PFVF_FUNC_MASK))
2985 seq_printf(s, "\n\t\t Device \t\t: PF%d\n",
2986 rvu_get_pf(pcifunc));
2987 else
2988 seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n",
2989 rvu_get_pf(pcifunc),
2990 (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
2991
2992 if (entry_acnt) {
2993 seq_printf(s, "\t\t Entries allocated \t: %d\n", entry_acnt);
2994 seq_printf(s, "\t\t Entries enabled \t: %d\n", entry_ecnt);
2995 }
2996 if (cntr_acnt) {
2997 seq_printf(s, "\t\t Counters allocated \t: %d\n", cntr_acnt);
2998 seq_printf(s, "\t\t Counters enabled \t: %d\n", cntr_ecnt);
2999 }
3000 }
3001
rvu_dbg_npc_mcam_info_display(struct seq_file * filp,void * unsued)3002 static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
3003 {
3004 struct rvu *rvu = filp->private;
3005 int pf, vf, numvfs, blkaddr;
3006 struct npc_mcam *mcam;
3007 u16 pcifunc, counters;
3008 u64 cfg;
3009
3010 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
3011 if (blkaddr < 0)
3012 return -ENODEV;
3013
3014 mcam = &rvu->hw->mcam;
3015 counters = rvu->hw->npc_counters;
3016
3017 seq_puts(filp, "\nNPC MCAM info:\n");
3018 /* MCAM keywidth on receive and transmit sides */
3019 cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
3020 cfg = (cfg >> 32) & 0x07;
3021 seq_printf(filp, "\t\t RX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
3022 "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
3023 "224bits" : "448bits"));
3024 cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX));
3025 cfg = (cfg >> 32) & 0x07;
3026 seq_printf(filp, "\t\t TX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
3027 "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
3028 "224bits" : "448bits"));
3029
3030 mutex_lock(&mcam->lock);
3031 /* MCAM entries */
3032 seq_printf(filp, "\n\t\t MCAM entries \t: %d\n", mcam->total_entries);
3033 seq_printf(filp, "\t\t Reserved \t: %d\n",
3034 mcam->total_entries - mcam->bmap_entries);
3035 seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt);
3036
3037 /* MCAM counters */
3038 seq_printf(filp, "\n\t\t MCAM counters \t: %d\n", counters);
3039 seq_printf(filp, "\t\t Reserved \t: %d\n",
3040 counters - mcam->counters.max);
3041 seq_printf(filp, "\t\t Available \t: %d\n",
3042 rvu_rsrc_free_count(&mcam->counters));
3043
3044 if (mcam->bmap_entries == mcam->bmap_fcnt) {
3045 mutex_unlock(&mcam->lock);
3046 return 0;
3047 }
3048
3049 seq_puts(filp, "\n\t\t Current allocation\n");
3050 seq_puts(filp, "\t\t====================\n");
3051 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
3052 pcifunc = (pf << RVU_PFVF_PF_SHIFT);
3053 rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
3054
3055 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
3056 numvfs = (cfg >> 12) & 0xFF;
3057 for (vf = 0; vf < numvfs; vf++) {
3058 pcifunc = (pf << RVU_PFVF_PF_SHIFT) | (vf + 1);
3059 rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
3060 }
3061 }
3062
3063 mutex_unlock(&mcam->lock);
3064 return 0;
3065 }
3066
3067 RVU_DEBUG_SEQ_FOPS(npc_mcam_info, npc_mcam_info_display, NULL);
3068
rvu_dbg_npc_rx_miss_stats_display(struct seq_file * filp,void * unused)3069 static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
3070 void *unused)
3071 {
3072 struct rvu *rvu = filp->private;
3073 struct npc_mcam *mcam;
3074 int blkaddr;
3075
3076 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
3077 if (blkaddr < 0)
3078 return -ENODEV;
3079
3080 mcam = &rvu->hw->mcam;
3081
3082 seq_puts(filp, "\nNPC MCAM RX miss action stats\n");
3083 seq_printf(filp, "\t\tStat %d: \t%lld\n", mcam->rx_miss_act_cntr,
3084 rvu_read64(rvu, blkaddr,
3085 NPC_AF_MATCH_STATX(mcam->rx_miss_act_cntr)));
3086
3087 return 0;
3088 }
3089
3090 RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
3091
3092 #define RVU_DBG_PRINT_MPLS_TTL(pkt, mask) \
3093 do { \
3094 seq_printf(s, "%ld ", FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, pkt)); \
3095 seq_printf(s, "mask 0x%lx\n", \
3096 FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, mask)); \
3097 } while (0) \
3098
3099 #define RVU_DBG_PRINT_MPLS_LBTCBOS(_pkt, _mask) \
3100 do { \
3101 typeof(_pkt) (pkt) = (_pkt); \
3102 typeof(_mask) (mask) = (_mask); \
3103 seq_printf(s, "%ld %ld %ld\n", \
3104 FIELD_GET(OTX2_FLOWER_MASK_MPLS_LB, pkt), \
3105 FIELD_GET(OTX2_FLOWER_MASK_MPLS_TC, pkt), \
3106 FIELD_GET(OTX2_FLOWER_MASK_MPLS_BOS, pkt)); \
3107 seq_printf(s, "\tmask 0x%lx 0x%lx 0x%lx\n", \
3108 FIELD_GET(OTX2_FLOWER_MASK_MPLS_LB, mask), \
3109 FIELD_GET(OTX2_FLOWER_MASK_MPLS_TC, mask), \
3110 FIELD_GET(OTX2_FLOWER_MASK_MPLS_BOS, mask)); \
3111 } while (0) \
3112
rvu_dbg_npc_mcam_show_flows(struct seq_file * s,struct rvu_npc_mcam_rule * rule)3113 static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
3114 struct rvu_npc_mcam_rule *rule)
3115 {
3116 u8 bit;
3117
3118 for_each_set_bit(bit, (unsigned long *)&rule->features, 64) {
3119 seq_printf(s, "\t%s ", npc_get_field_name(bit));
3120 switch (bit) {
3121 case NPC_LXMB:
3122 if (rule->lxmb == 1)
3123 seq_puts(s, "\tL2M nibble is set\n");
3124 else
3125 seq_puts(s, "\tL2B nibble is set\n");
3126 break;
3127 case NPC_DMAC:
3128 seq_printf(s, "%pM ", rule->packet.dmac);
3129 seq_printf(s, "mask %pM\n", rule->mask.dmac);
3130 break;
3131 case NPC_SMAC:
3132 seq_printf(s, "%pM ", rule->packet.smac);
3133 seq_printf(s, "mask %pM\n", rule->mask.smac);
3134 break;
3135 case NPC_ETYPE:
3136 seq_printf(s, "0x%x ", ntohs(rule->packet.etype));
3137 seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.etype));
3138 break;
3139 case NPC_OUTER_VID:
3140 seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_tci));
3141 seq_printf(s, "mask 0x%x\n",
3142 ntohs(rule->mask.vlan_tci));
3143 break;
3144 case NPC_INNER_VID:
3145 seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_itci));
3146 seq_printf(s, "mask 0x%x\n",
3147 ntohs(rule->mask.vlan_itci));
3148 break;
3149 case NPC_TOS:
3150 seq_printf(s, "%d ", rule->packet.tos);
3151 seq_printf(s, "mask 0x%x\n", rule->mask.tos);
3152 break;
3153 case NPC_SIP_IPV4:
3154 seq_printf(s, "%pI4 ", &rule->packet.ip4src);
3155 seq_printf(s, "mask %pI4\n", &rule->mask.ip4src);
3156 break;
3157 case NPC_DIP_IPV4:
3158 seq_printf(s, "%pI4 ", &rule->packet.ip4dst);
3159 seq_printf(s, "mask %pI4\n", &rule->mask.ip4dst);
3160 break;
3161 case NPC_SIP_IPV6:
3162 seq_printf(s, "%pI6 ", rule->packet.ip6src);
3163 seq_printf(s, "mask %pI6\n", rule->mask.ip6src);
3164 break;
3165 case NPC_DIP_IPV6:
3166 seq_printf(s, "%pI6 ", rule->packet.ip6dst);
3167 seq_printf(s, "mask %pI6\n", rule->mask.ip6dst);
3168 break;
3169 case NPC_IPFRAG_IPV6:
3170 seq_printf(s, "0x%x ", rule->packet.next_header);
3171 seq_printf(s, "mask 0x%x\n", rule->mask.next_header);
3172 break;
3173 case NPC_IPFRAG_IPV4:
3174 seq_printf(s, "0x%x ", rule->packet.ip_flag);
3175 seq_printf(s, "mask 0x%x\n", rule->mask.ip_flag);
3176 break;
3177 case NPC_SPORT_TCP:
3178 case NPC_SPORT_UDP:
3179 case NPC_SPORT_SCTP:
3180 seq_printf(s, "%d ", ntohs(rule->packet.sport));
3181 seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.sport));
3182 break;
3183 case NPC_DPORT_TCP:
3184 case NPC_DPORT_UDP:
3185 case NPC_DPORT_SCTP:
3186 seq_printf(s, "%d ", ntohs(rule->packet.dport));
3187 seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
3188 break;
3189 case NPC_TCP_FLAGS:
3190 seq_printf(s, "%d ", rule->packet.tcp_flags);
3191 seq_printf(s, "mask 0x%x\n", rule->mask.tcp_flags);
3192 break;
3193 case NPC_IPSEC_SPI:
3194 seq_printf(s, "0x%x ", ntohl(rule->packet.spi));
3195 seq_printf(s, "mask 0x%x\n", ntohl(rule->mask.spi));
3196 break;
3197 case NPC_MPLS1_LBTCBOS:
3198 RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[0],
3199 rule->mask.mpls_lse[0]);
3200 break;
3201 case NPC_MPLS1_TTL:
3202 RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[0],
3203 rule->mask.mpls_lse[0]);
3204 break;
3205 case NPC_MPLS2_LBTCBOS:
3206 RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[1],
3207 rule->mask.mpls_lse[1]);
3208 break;
3209 case NPC_MPLS2_TTL:
3210 RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[1],
3211 rule->mask.mpls_lse[1]);
3212 break;
3213 case NPC_MPLS3_LBTCBOS:
3214 RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[2],
3215 rule->mask.mpls_lse[2]);
3216 break;
3217 case NPC_MPLS3_TTL:
3218 RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[2],
3219 rule->mask.mpls_lse[2]);
3220 break;
3221 case NPC_MPLS4_LBTCBOS:
3222 RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[3],
3223 rule->mask.mpls_lse[3]);
3224 break;
3225 case NPC_MPLS4_TTL:
3226 RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[3],
3227 rule->mask.mpls_lse[3]);
3228 break;
3229 case NPC_TYPE_ICMP:
3230 seq_printf(s, "%d ", rule->packet.icmp_type);
3231 seq_printf(s, "mask 0x%x\n", rule->mask.icmp_type);
3232 break;
3233 case NPC_CODE_ICMP:
3234 seq_printf(s, "%d ", rule->packet.icmp_code);
3235 seq_printf(s, "mask 0x%x\n", rule->mask.icmp_code);
3236 break;
3237 default:
3238 seq_puts(s, "\n");
3239 break;
3240 }
3241 }
3242 }
3243
rvu_dbg_npc_mcam_show_action(struct seq_file * s,struct rvu_npc_mcam_rule * rule)3244 static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
3245 struct rvu_npc_mcam_rule *rule)
3246 {
3247 if (is_npc_intf_tx(rule->intf)) {
3248 switch (rule->tx_action.op) {
3249 case NIX_TX_ACTIONOP_DROP:
3250 seq_puts(s, "\taction: Drop\n");
3251 break;
3252 case NIX_TX_ACTIONOP_UCAST_DEFAULT:
3253 seq_puts(s, "\taction: Unicast to default channel\n");
3254 break;
3255 case NIX_TX_ACTIONOP_UCAST_CHAN:
3256 seq_printf(s, "\taction: Unicast to channel %d\n",
3257 rule->tx_action.index);
3258 break;
3259 case NIX_TX_ACTIONOP_MCAST:
3260 seq_puts(s, "\taction: Multicast\n");
3261 break;
3262 case NIX_TX_ACTIONOP_DROP_VIOL:
3263 seq_puts(s, "\taction: Lockdown Violation Drop\n");
3264 break;
3265 default:
3266 break;
3267 }
3268 } else {
3269 switch (rule->rx_action.op) {
3270 case NIX_RX_ACTIONOP_DROP:
3271 seq_puts(s, "\taction: Drop\n");
3272 break;
3273 case NIX_RX_ACTIONOP_UCAST:
3274 seq_printf(s, "\taction: Direct to queue %d\n",
3275 rule->rx_action.index);
3276 break;
3277 case NIX_RX_ACTIONOP_RSS:
3278 seq_puts(s, "\taction: RSS\n");
3279 break;
3280 case NIX_RX_ACTIONOP_UCAST_IPSEC:
3281 seq_puts(s, "\taction: Unicast ipsec\n");
3282 break;
3283 case NIX_RX_ACTIONOP_MCAST:
3284 seq_puts(s, "\taction: Multicast\n");
3285 break;
3286 default:
3287 break;
3288 }
3289 }
3290 }
3291
rvu_dbg_get_intf_name(int intf)3292 static const char *rvu_dbg_get_intf_name(int intf)
3293 {
3294 switch (intf) {
3295 case NIX_INTFX_RX(0):
3296 return "NIX0_RX";
3297 case NIX_INTFX_RX(1):
3298 return "NIX1_RX";
3299 case NIX_INTFX_TX(0):
3300 return "NIX0_TX";
3301 case NIX_INTFX_TX(1):
3302 return "NIX1_TX";
3303 default:
3304 break;
3305 }
3306
3307 return "unknown";
3308 }
3309
rvu_dbg_npc_mcam_show_rules(struct seq_file * s,void * unused)3310 static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
3311 {
3312 struct rvu_npc_mcam_rule *iter;
3313 struct rvu *rvu = s->private;
3314 struct npc_mcam *mcam;
3315 int pf, vf = -1;
3316 bool enabled;
3317 int blkaddr;
3318 u16 target;
3319 u64 hits;
3320
3321 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
3322 if (blkaddr < 0)
3323 return 0;
3324
3325 mcam = &rvu->hw->mcam;
3326
3327 mutex_lock(&mcam->lock);
3328 list_for_each_entry(iter, &mcam->mcam_rules, list) {
3329 pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
3330 seq_printf(s, "\n\tInstalled by: PF%d ", pf);
3331
3332 if (iter->owner & RVU_PFVF_FUNC_MASK) {
3333 vf = (iter->owner & RVU_PFVF_FUNC_MASK) - 1;
3334 seq_printf(s, "VF%d", vf);
3335 }
3336 seq_puts(s, "\n");
3337
3338 seq_printf(s, "\tdirection: %s\n", is_npc_intf_rx(iter->intf) ?
3339 "RX" : "TX");
3340 seq_printf(s, "\tinterface: %s\n",
3341 rvu_dbg_get_intf_name(iter->intf));
3342 seq_printf(s, "\tmcam entry: %d\n", iter->entry);
3343
3344 rvu_dbg_npc_mcam_show_flows(s, iter);
3345 if (is_npc_intf_rx(iter->intf)) {
3346 target = iter->rx_action.pf_func;
3347 pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
3348 seq_printf(s, "\tForward to: PF%d ", pf);
3349
3350 if (target & RVU_PFVF_FUNC_MASK) {
3351 vf = (target & RVU_PFVF_FUNC_MASK) - 1;
3352 seq_printf(s, "VF%d", vf);
3353 }
3354 seq_puts(s, "\n");
3355 seq_printf(s, "\tchannel: 0x%x\n", iter->chan);
3356 seq_printf(s, "\tchannel_mask: 0x%x\n", iter->chan_mask);
3357 }
3358
3359 rvu_dbg_npc_mcam_show_action(s, iter);
3360
3361 enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, iter->entry);
3362 seq_printf(s, "\tenabled: %s\n", enabled ? "yes" : "no");
3363
3364 if (!iter->has_cntr)
3365 continue;
3366 seq_printf(s, "\tcounter: %d\n", iter->cntr);
3367
3368 hits = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(iter->cntr));
3369 seq_printf(s, "\thits: %lld\n", hits);
3370 }
3371 mutex_unlock(&mcam->lock);
3372
3373 return 0;
3374 }
3375
3376 RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL);
3377
rvu_dbg_npc_exact_show_entries(struct seq_file * s,void * unused)3378 static int rvu_dbg_npc_exact_show_entries(struct seq_file *s, void *unused)
3379 {
3380 struct npc_exact_table_entry *mem_entry[NPC_EXACT_TBL_MAX_WAYS] = { 0 };
3381 struct npc_exact_table_entry *cam_entry;
3382 struct npc_exact_table *table;
3383 struct rvu *rvu = s->private;
3384 int i, j;
3385
3386 u8 bitmap = 0;
3387
3388 table = rvu->hw->table;
3389
3390 mutex_lock(&table->lock);
3391
3392 /* Check if there is at least one entry in mem table */
3393 if (!table->mem_tbl_entry_cnt)
3394 goto dump_cam_table;
3395
3396 /* Print table headers */
3397 seq_puts(s, "\n\tExact Match MEM Table\n");
3398 seq_puts(s, "Index\t");
3399
3400 for (i = 0; i < table->mem_table.ways; i++) {
3401 mem_entry[i] = list_first_entry_or_null(&table->lhead_mem_tbl_entry[i],
3402 struct npc_exact_table_entry, list);
3403
3404 seq_printf(s, "Way-%d\t\t\t\t\t", i);
3405 }
3406
3407 seq_puts(s, "\n");
3408 for (i = 0; i < table->mem_table.ways; i++)
3409 seq_puts(s, "\tChan MAC \t");
3410
3411 seq_puts(s, "\n\n");
3412
3413 /* Print mem table entries */
3414 for (i = 0; i < table->mem_table.depth; i++) {
3415 bitmap = 0;
3416 for (j = 0; j < table->mem_table.ways; j++) {
3417 if (!mem_entry[j])
3418 continue;
3419
3420 if (mem_entry[j]->index != i)
3421 continue;
3422
3423 bitmap |= BIT(j);
3424 }
3425
3426 /* No valid entries */
3427 if (!bitmap)
3428 continue;
3429
3430 seq_printf(s, "%d\t", i);
3431 for (j = 0; j < table->mem_table.ways; j++) {
3432 if (!(bitmap & BIT(j))) {
3433 seq_puts(s, "nil\t\t\t\t\t");
3434 continue;
3435 }
3436
3437 seq_printf(s, "0x%x %pM\t\t\t", mem_entry[j]->chan,
3438 mem_entry[j]->mac);
3439 mem_entry[j] = list_next_entry(mem_entry[j], list);
3440 }
3441 seq_puts(s, "\n");
3442 }
3443
3444 dump_cam_table:
3445
3446 if (!table->cam_tbl_entry_cnt)
3447 goto done;
3448
3449 seq_puts(s, "\n\tExact Match CAM Table\n");
3450 seq_puts(s, "index\tchan\tMAC\n");
3451
3452 /* Traverse cam table entries */
3453 list_for_each_entry(cam_entry, &table->lhead_cam_tbl_entry, list) {
3454 seq_printf(s, "%d\t0x%x\t%pM\n", cam_entry->index, cam_entry->chan,
3455 cam_entry->mac);
3456 }
3457
3458 done:
3459 mutex_unlock(&table->lock);
3460 return 0;
3461 }
3462
3463 RVU_DEBUG_SEQ_FOPS(npc_exact_entries, npc_exact_show_entries, NULL);
3464
rvu_dbg_npc_exact_show_info(struct seq_file * s,void * unused)3465 static int rvu_dbg_npc_exact_show_info(struct seq_file *s, void *unused)
3466 {
3467 struct npc_exact_table *table;
3468 struct rvu *rvu = s->private;
3469 int i;
3470
3471 table = rvu->hw->table;
3472
3473 seq_puts(s, "\n\tExact Table Info\n");
3474 seq_printf(s, "Exact Match Feature : %s\n",
3475 rvu->hw->cap.npc_exact_match_enabled ? "enabled" : "disable");
3476 if (!rvu->hw->cap.npc_exact_match_enabled)
3477 return 0;
3478
3479 seq_puts(s, "\nMCAM Index\tMAC Filter Rules Count\n");
3480 for (i = 0; i < table->num_drop_rules; i++)
3481 seq_printf(s, "%d\t\t%d\n", i, table->cnt_cmd_rules[i]);
3482
3483 seq_puts(s, "\nMcam Index\tPromisc Mode Status\n");
3484 for (i = 0; i < table->num_drop_rules; i++)
3485 seq_printf(s, "%d\t\t%s\n", i, table->promisc_mode[i] ? "on" : "off");
3486
3487 seq_puts(s, "\n\tMEM Table Info\n");
3488 seq_printf(s, "Ways : %d\n", table->mem_table.ways);
3489 seq_printf(s, "Depth : %d\n", table->mem_table.depth);
3490 seq_printf(s, "Mask : 0x%llx\n", table->mem_table.mask);
3491 seq_printf(s, "Hash Mask : 0x%x\n", table->mem_table.hash_mask);
3492 seq_printf(s, "Hash Offset : 0x%x\n", table->mem_table.hash_offset);
3493
3494 seq_puts(s, "\n\tCAM Table Info\n");
3495 seq_printf(s, "Depth : %d\n", table->cam_table.depth);
3496
3497 return 0;
3498 }
3499
3500 RVU_DEBUG_SEQ_FOPS(npc_exact_info, npc_exact_show_info, NULL);
3501
rvu_dbg_npc_exact_drop_cnt(struct seq_file * s,void * unused)3502 static int rvu_dbg_npc_exact_drop_cnt(struct seq_file *s, void *unused)
3503 {
3504 struct npc_exact_table *table;
3505 struct rvu *rvu = s->private;
3506 struct npc_key_field *field;
3507 u16 chan, pcifunc;
3508 int blkaddr, i;
3509 u64 cfg, cam1;
3510 char *str;
3511
3512 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
3513 table = rvu->hw->table;
3514
3515 field = &rvu->hw->mcam.rx_key_fields[NPC_CHAN];
3516
3517 seq_puts(s, "\n\t Exact Hit on drop status\n");
3518 seq_puts(s, "\npcifunc\tmcam_idx\tHits\tchan\tstatus\n");
3519
3520 for (i = 0; i < table->num_drop_rules; i++) {
3521 pcifunc = rvu_npc_exact_drop_rule_to_pcifunc(rvu, i);
3522 cfg = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CFG(i, 0));
3523
3524 /* channel will be always in keyword 0 */
3525 cam1 = rvu_read64(rvu, blkaddr,
3526 NPC_AF_MCAMEX_BANKX_CAMX_W0(i, 0, 1));
3527 chan = field->kw_mask[0] & cam1;
3528
3529 str = (cfg & 1) ? "enabled" : "disabled";
3530
3531 seq_printf(s, "0x%x\t%d\t\t%llu\t0x%x\t%s\n", pcifunc, i,
3532 rvu_read64(rvu, blkaddr,
3533 NPC_AF_MATCH_STATX(table->counter_idx[i])),
3534 chan, str);
3535 }
3536
3537 return 0;
3538 }
3539
3540 RVU_DEBUG_SEQ_FOPS(npc_exact_drop_cnt, npc_exact_drop_cnt, NULL);
3541
rvu_dbg_npc_init(struct rvu * rvu)3542 static void rvu_dbg_npc_init(struct rvu *rvu)
3543 {
3544 rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root);
3545
3546 debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc, rvu,
3547 &rvu_dbg_npc_mcam_info_fops);
3548 debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc, rvu,
3549 &rvu_dbg_npc_mcam_rules_fops);
3550
3551 debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc, rvu,
3552 &rvu_dbg_npc_rx_miss_act_fops);
3553
3554 if (!rvu->hw->cap.npc_exact_match_enabled)
3555 return;
3556
3557 debugfs_create_file("exact_entries", 0444, rvu->rvu_dbg.npc, rvu,
3558 &rvu_dbg_npc_exact_entries_fops);
3559
3560 debugfs_create_file("exact_info", 0444, rvu->rvu_dbg.npc, rvu,
3561 &rvu_dbg_npc_exact_info_fops);
3562
3563 debugfs_create_file("exact_drop_cnt", 0444, rvu->rvu_dbg.npc, rvu,
3564 &rvu_dbg_npc_exact_drop_cnt_fops);
3565
3566 }
3567
cpt_eng_sts_display(struct seq_file * filp,u8 eng_type)3568 static int cpt_eng_sts_display(struct seq_file *filp, u8 eng_type)
3569 {
3570 struct cpt_ctx *ctx = filp->private;
3571 u64 busy_sts = 0, free_sts = 0;
3572 u32 e_min = 0, e_max = 0, e, i;
3573 u16 max_ses, max_ies, max_aes;
3574 struct rvu *rvu = ctx->rvu;
3575 int blkaddr = ctx->blkaddr;
3576 u64 reg;
3577
3578 reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
3579 max_ses = reg & 0xffff;
3580 max_ies = (reg >> 16) & 0xffff;
3581 max_aes = (reg >> 32) & 0xffff;
3582
3583 switch (eng_type) {
3584 case CPT_AE_TYPE:
3585 e_min = max_ses + max_ies;
3586 e_max = max_ses + max_ies + max_aes;
3587 break;
3588 case CPT_SE_TYPE:
3589 e_min = 0;
3590 e_max = max_ses;
3591 break;
3592 case CPT_IE_TYPE:
3593 e_min = max_ses;
3594 e_max = max_ses + max_ies;
3595 break;
3596 default:
3597 return -EINVAL;
3598 }
3599
3600 for (e = e_min, i = 0; e < e_max; e++, i++) {
3601 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e));
3602 if (reg & 0x1)
3603 busy_sts |= 1ULL << i;
3604
3605 if (reg & 0x2)
3606 free_sts |= 1ULL << i;
3607 }
3608 seq_printf(filp, "FREE STS : 0x%016llx\n", free_sts);
3609 seq_printf(filp, "BUSY STS : 0x%016llx\n", busy_sts);
3610
3611 return 0;
3612 }
3613
rvu_dbg_cpt_ae_sts_display(struct seq_file * filp,void * unused)3614 static int rvu_dbg_cpt_ae_sts_display(struct seq_file *filp, void *unused)
3615 {
3616 return cpt_eng_sts_display(filp, CPT_AE_TYPE);
3617 }
3618
3619 RVU_DEBUG_SEQ_FOPS(cpt_ae_sts, cpt_ae_sts_display, NULL);
3620
rvu_dbg_cpt_se_sts_display(struct seq_file * filp,void * unused)3621 static int rvu_dbg_cpt_se_sts_display(struct seq_file *filp, void *unused)
3622 {
3623 return cpt_eng_sts_display(filp, CPT_SE_TYPE);
3624 }
3625
3626 RVU_DEBUG_SEQ_FOPS(cpt_se_sts, cpt_se_sts_display, NULL);
3627
rvu_dbg_cpt_ie_sts_display(struct seq_file * filp,void * unused)3628 static int rvu_dbg_cpt_ie_sts_display(struct seq_file *filp, void *unused)
3629 {
3630 return cpt_eng_sts_display(filp, CPT_IE_TYPE);
3631 }
3632
3633 RVU_DEBUG_SEQ_FOPS(cpt_ie_sts, cpt_ie_sts_display, NULL);
3634
rvu_dbg_cpt_engines_info_display(struct seq_file * filp,void * unused)3635 static int rvu_dbg_cpt_engines_info_display(struct seq_file *filp, void *unused)
3636 {
3637 struct cpt_ctx *ctx = filp->private;
3638 u16 max_ses, max_ies, max_aes;
3639 struct rvu *rvu = ctx->rvu;
3640 int blkaddr = ctx->blkaddr;
3641 u32 e_max, e;
3642 u64 reg;
3643
3644 reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
3645 max_ses = reg & 0xffff;
3646 max_ies = (reg >> 16) & 0xffff;
3647 max_aes = (reg >> 32) & 0xffff;
3648
3649 e_max = max_ses + max_ies + max_aes;
3650
3651 seq_puts(filp, "===========================================\n");
3652 for (e = 0; e < e_max; e++) {
3653 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(e));
3654 seq_printf(filp, "CPT Engine[%u] Group Enable 0x%02llx\n", e,
3655 reg & 0xff);
3656 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_ACTIVE(e));
3657 seq_printf(filp, "CPT Engine[%u] Active Info 0x%llx\n", e,
3658 reg);
3659 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(e));
3660 seq_printf(filp, "CPT Engine[%u] Control 0x%llx\n", e,
3661 reg);
3662 seq_puts(filp, "===========================================\n");
3663 }
3664 return 0;
3665 }
3666
3667 RVU_DEBUG_SEQ_FOPS(cpt_engines_info, cpt_engines_info_display, NULL);
3668
rvu_dbg_cpt_lfs_info_display(struct seq_file * filp,void * unused)3669 static int rvu_dbg_cpt_lfs_info_display(struct seq_file *filp, void *unused)
3670 {
3671 struct cpt_ctx *ctx = filp->private;
3672 int blkaddr = ctx->blkaddr;
3673 struct rvu *rvu = ctx->rvu;
3674 struct rvu_block *block;
3675 struct rvu_hwinfo *hw;
3676 u64 reg;
3677 u32 lf;
3678
3679 hw = rvu->hw;
3680 block = &hw->block[blkaddr];
3681 if (!block->lf.bmap)
3682 return -ENODEV;
3683
3684 seq_puts(filp, "===========================================\n");
3685 for (lf = 0; lf < block->lf.max; lf++) {
3686 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(lf));
3687 seq_printf(filp, "CPT Lf[%u] CTL 0x%llx\n", lf, reg);
3688 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(lf));
3689 seq_printf(filp, "CPT Lf[%u] CTL2 0x%llx\n", lf, reg);
3690 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_PTR_CTL(lf));
3691 seq_printf(filp, "CPT Lf[%u] PTR_CTL 0x%llx\n", lf, reg);
3692 reg = rvu_read64(rvu, blkaddr, block->lfcfg_reg |
3693 (lf << block->lfshift));
3694 seq_printf(filp, "CPT Lf[%u] CFG 0x%llx\n", lf, reg);
3695 seq_puts(filp, "===========================================\n");
3696 }
3697 return 0;
3698 }
3699
3700 RVU_DEBUG_SEQ_FOPS(cpt_lfs_info, cpt_lfs_info_display, NULL);
3701
rvu_dbg_cpt_err_info_display(struct seq_file * filp,void * unused)3702 static int rvu_dbg_cpt_err_info_display(struct seq_file *filp, void *unused)
3703 {
3704 struct cpt_ctx *ctx = filp->private;
3705 struct rvu *rvu = ctx->rvu;
3706 int blkaddr = ctx->blkaddr;
3707 u64 reg0, reg1;
3708
3709 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
3710 reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
3711 seq_printf(filp, "CPT_AF_FLTX_INT: 0x%llx 0x%llx\n", reg0, reg1);
3712 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(0));
3713 reg1 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(1));
3714 seq_printf(filp, "CPT_AF_PSNX_EXE: 0x%llx 0x%llx\n", reg0, reg1);
3715 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_LF(0));
3716 seq_printf(filp, "CPT_AF_PSNX_LF: 0x%llx\n", reg0);
3717 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT);
3718 seq_printf(filp, "CPT_AF_RVU_INT: 0x%llx\n", reg0);
3719 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT);
3720 seq_printf(filp, "CPT_AF_RAS_INT: 0x%llx\n", reg0);
3721 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO);
3722 seq_printf(filp, "CPT_AF_EXE_ERR_INFO: 0x%llx\n", reg0);
3723
3724 return 0;
3725 }
3726
3727 RVU_DEBUG_SEQ_FOPS(cpt_err_info, cpt_err_info_display, NULL);
3728
rvu_dbg_cpt_pc_display(struct seq_file * filp,void * unused)3729 static int rvu_dbg_cpt_pc_display(struct seq_file *filp, void *unused)
3730 {
3731 struct cpt_ctx *ctx = filp->private;
3732 struct rvu *rvu = ctx->rvu;
3733 int blkaddr = ctx->blkaddr;
3734 u64 reg;
3735
3736 reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC);
3737 seq_printf(filp, "CPT instruction requests %llu\n", reg);
3738 reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC);
3739 seq_printf(filp, "CPT instruction latency %llu\n", reg);
3740 reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC);
3741 seq_printf(filp, "CPT NCB read requests %llu\n", reg);
3742 reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC);
3743 seq_printf(filp, "CPT NCB read latency %llu\n", reg);
3744 reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC);
3745 seq_printf(filp, "CPT read requests caused by UC fills %llu\n", reg);
3746 reg = rvu_read64(rvu, blkaddr, CPT_AF_ACTIVE_CYCLES_PC);
3747 seq_printf(filp, "CPT active cycles pc %llu\n", reg);
3748 reg = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT);
3749 seq_printf(filp, "CPT clock count pc %llu\n", reg);
3750
3751 return 0;
3752 }
3753
3754 RVU_DEBUG_SEQ_FOPS(cpt_pc, cpt_pc_display, NULL);
3755
rvu_dbg_cpt_init(struct rvu * rvu,int blkaddr)3756 static void rvu_dbg_cpt_init(struct rvu *rvu, int blkaddr)
3757 {
3758 struct cpt_ctx *ctx;
3759
3760 if (!is_block_implemented(rvu->hw, blkaddr))
3761 return;
3762
3763 if (blkaddr == BLKADDR_CPT0) {
3764 rvu->rvu_dbg.cpt = debugfs_create_dir("cpt", rvu->rvu_dbg.root);
3765 ctx = &rvu->rvu_dbg.cpt_ctx[0];
3766 ctx->blkaddr = BLKADDR_CPT0;
3767 ctx->rvu = rvu;
3768 } else {
3769 rvu->rvu_dbg.cpt = debugfs_create_dir("cpt1",
3770 rvu->rvu_dbg.root);
3771 ctx = &rvu->rvu_dbg.cpt_ctx[1];
3772 ctx->blkaddr = BLKADDR_CPT1;
3773 ctx->rvu = rvu;
3774 }
3775
3776 debugfs_create_file("cpt_pc", 0600, rvu->rvu_dbg.cpt, ctx,
3777 &rvu_dbg_cpt_pc_fops);
3778 debugfs_create_file("cpt_ae_sts", 0600, rvu->rvu_dbg.cpt, ctx,
3779 &rvu_dbg_cpt_ae_sts_fops);
3780 debugfs_create_file("cpt_se_sts", 0600, rvu->rvu_dbg.cpt, ctx,
3781 &rvu_dbg_cpt_se_sts_fops);
3782 debugfs_create_file("cpt_ie_sts", 0600, rvu->rvu_dbg.cpt, ctx,
3783 &rvu_dbg_cpt_ie_sts_fops);
3784 debugfs_create_file("cpt_engines_info", 0600, rvu->rvu_dbg.cpt, ctx,
3785 &rvu_dbg_cpt_engines_info_fops);
3786 debugfs_create_file("cpt_lfs_info", 0600, rvu->rvu_dbg.cpt, ctx,
3787 &rvu_dbg_cpt_lfs_info_fops);
3788 debugfs_create_file("cpt_err_info", 0600, rvu->rvu_dbg.cpt, ctx,
3789 &rvu_dbg_cpt_err_info_fops);
3790 }
3791
rvu_get_dbg_dir_name(struct rvu * rvu)3792 static const char *rvu_get_dbg_dir_name(struct rvu *rvu)
3793 {
3794 if (!is_rvu_otx2(rvu))
3795 return "cn10k";
3796 else
3797 return "octeontx2";
3798 }
3799
rvu_dbg_init(struct rvu * rvu)3800 void rvu_dbg_init(struct rvu *rvu)
3801 {
3802 rvu->rvu_dbg.root = debugfs_create_dir(rvu_get_dbg_dir_name(rvu), NULL);
3803
3804 debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
3805 &rvu_dbg_rsrc_status_fops);
3806
3807 if (!is_rvu_otx2(rvu))
3808 debugfs_create_file("lmtst_map_table", 0444, rvu->rvu_dbg.root,
3809 rvu, &rvu_dbg_lmtst_map_table_fops);
3810
3811 if (!cgx_get_cgxcnt_max())
3812 goto create;
3813
3814 if (is_rvu_otx2(rvu))
3815 debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root,
3816 rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
3817 else
3818 debugfs_create_file("rvu_pf_rpm_map", 0444, rvu->rvu_dbg.root,
3819 rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
3820
3821 create:
3822 rvu_dbg_npa_init(rvu);
3823 rvu_dbg_nix_init(rvu, BLKADDR_NIX0);
3824
3825 rvu_dbg_nix_init(rvu, BLKADDR_NIX1);
3826 rvu_dbg_cgx_init(rvu);
3827 rvu_dbg_npc_init(rvu);
3828 rvu_dbg_cpt_init(rvu, BLKADDR_CPT0);
3829 rvu_dbg_cpt_init(rvu, BLKADDR_CPT1);
3830 rvu_dbg_mcs_init(rvu);
3831 }
3832
rvu_dbg_exit(struct rvu * rvu)3833 void rvu_dbg_exit(struct rvu *rvu)
3834 {
3835 debugfs_remove_recursive(rvu->rvu_dbg.root);
3836 }
3837
3838 #endif /* CONFIG_DEBUG_FS */
3839