1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3 *
4 * Copyright (C) 2019 Marvell.
5 *
6 */
7
8 #ifdef CONFIG_DEBUG_FS
9
10 #include <linux/fs.h>
11 #include <linux/debugfs.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14
15 #include "rvu_struct.h"
16 #include "rvu_reg.h"
17 #include "rvu.h"
18 #include "cgx.h"
19 #include "lmac_common.h"
20 #include "npc.h"
21 #include "rvu_npc_hash.h"
22 #include "mcs.h"
23
24 #define DEBUGFS_DIR_NAME "octeontx2"
25
26 enum {
27 CGX_STAT0,
28 CGX_STAT1,
29 CGX_STAT2,
30 CGX_STAT3,
31 CGX_STAT4,
32 CGX_STAT5,
33 CGX_STAT6,
34 CGX_STAT7,
35 CGX_STAT8,
36 CGX_STAT9,
37 CGX_STAT10,
38 CGX_STAT11,
39 CGX_STAT12,
40 CGX_STAT13,
41 CGX_STAT14,
42 CGX_STAT15,
43 CGX_STAT16,
44 CGX_STAT17,
45 CGX_STAT18,
46 };
47
48 static char *cgx_rx_stats_fields[] = {
49 [CGX_STAT0] = "Received packets",
50 [CGX_STAT1] = "Octets of received packets",
51 [CGX_STAT2] = "Received PAUSE packets",
52 [CGX_STAT3] = "Received PAUSE and control packets",
53 [CGX_STAT4] = "Filtered DMAC0 (NIX-bound) packets",
54 [CGX_STAT5] = "Filtered DMAC0 (NIX-bound) octets",
55 [CGX_STAT6] = "Packets dropped due to RX FIFO full",
56 [CGX_STAT7] = "Octets dropped due to RX FIFO full",
57 [CGX_STAT8] = "Error packets",
58 [CGX_STAT9] = "Filtered DMAC1 (NCSI-bound) packets",
59 [CGX_STAT10] = "Filtered DMAC1 (NCSI-bound) octets",
60 [CGX_STAT11] = "NCSI-bound packets dropped",
61 [CGX_STAT12] = "NCSI-bound octets dropped",
62 };
63
64 static char *cgx_tx_stats_fields[] = {
65 [CGX_STAT0] = "Packets dropped due to excessive collisions",
66 [CGX_STAT1] = "Packets dropped due to excessive deferral",
67 [CGX_STAT2] = "Multiple collisions before successful transmission",
68 [CGX_STAT3] = "Single collisions before successful transmission",
69 [CGX_STAT4] = "Total octets sent on the interface",
70 [CGX_STAT5] = "Total frames sent on the interface",
71 [CGX_STAT6] = "Packets sent with an octet count < 64",
72 [CGX_STAT7] = "Packets sent with an octet count == 64",
73 [CGX_STAT8] = "Packets sent with an octet count of 65-127",
74 [CGX_STAT9] = "Packets sent with an octet count of 128-255",
75 [CGX_STAT10] = "Packets sent with an octet count of 256-511",
76 [CGX_STAT11] = "Packets sent with an octet count of 512-1023",
77 [CGX_STAT12] = "Packets sent with an octet count of 1024-1518",
78 [CGX_STAT13] = "Packets sent with an octet count of > 1518",
79 [CGX_STAT14] = "Packets sent to a broadcast DMAC",
80 [CGX_STAT15] = "Packets sent to the multicast DMAC",
81 [CGX_STAT16] = "Transmit underflow and were truncated",
82 [CGX_STAT17] = "Control/PAUSE packets sent",
83 };
84
85 static char *rpm_rx_stats_fields[] = {
86 "Octets of received packets",
87 "Octets of received packets with out error",
88 "Received packets with alignment errors",
89 "Control/PAUSE packets received",
90 "Packets received with Frame too long Errors",
91 "Packets received with a1nrange length Errors",
92 "Received packets",
93 "Packets received with FrameCheckSequenceErrors",
94 "Packets received with VLAN header",
95 "Error packets",
96 "Packets received with unicast DMAC",
97 "Packets received with multicast DMAC",
98 "Packets received with broadcast DMAC",
99 "Dropped packets",
100 "Total frames received on interface",
101 "Packets received with an octet count < 64",
102 "Packets received with an octet count == 64",
103 "Packets received with an octet count of 65-127",
104 "Packets received with an octet count of 128-255",
105 "Packets received with an octet count of 256-511",
106 "Packets received with an octet count of 512-1023",
107 "Packets received with an octet count of 1024-1518",
108 "Packets received with an octet count of > 1518",
109 "Oversized Packets",
110 "Jabber Packets",
111 "Fragmented Packets",
112 "CBFC(class based flow control) pause frames received for class 0",
113 "CBFC pause frames received for class 1",
114 "CBFC pause frames received for class 2",
115 "CBFC pause frames received for class 3",
116 "CBFC pause frames received for class 4",
117 "CBFC pause frames received for class 5",
118 "CBFC pause frames received for class 6",
119 "CBFC pause frames received for class 7",
120 "CBFC pause frames received for class 8",
121 "CBFC pause frames received for class 9",
122 "CBFC pause frames received for class 10",
123 "CBFC pause frames received for class 11",
124 "CBFC pause frames received for class 12",
125 "CBFC pause frames received for class 13",
126 "CBFC pause frames received for class 14",
127 "CBFC pause frames received for class 15",
128 "MAC control packets received",
129 };
130
131 static char *rpm_tx_stats_fields[] = {
132 "Total octets sent on the interface",
133 "Total octets transmitted OK",
134 "Control/Pause frames sent",
135 "Total frames transmitted OK",
136 "Total frames sent with VLAN header",
137 "Error Packets",
138 "Packets sent to unicast DMAC",
139 "Packets sent to the multicast DMAC",
140 "Packets sent to a broadcast DMAC",
141 "Packets sent with an octet count == 64",
142 "Packets sent with an octet count of 65-127",
143 "Packets sent with an octet count of 128-255",
144 "Packets sent with an octet count of 256-511",
145 "Packets sent with an octet count of 512-1023",
146 "Packets sent with an octet count of 1024-1518",
147 "Packets sent with an octet count of > 1518",
148 "CBFC(class based flow control) pause frames transmitted for class 0",
149 "CBFC pause frames transmitted for class 1",
150 "CBFC pause frames transmitted for class 2",
151 "CBFC pause frames transmitted for class 3",
152 "CBFC pause frames transmitted for class 4",
153 "CBFC pause frames transmitted for class 5",
154 "CBFC pause frames transmitted for class 6",
155 "CBFC pause frames transmitted for class 7",
156 "CBFC pause frames transmitted for class 8",
157 "CBFC pause frames transmitted for class 9",
158 "CBFC pause frames transmitted for class 10",
159 "CBFC pause frames transmitted for class 11",
160 "CBFC pause frames transmitted for class 12",
161 "CBFC pause frames transmitted for class 13",
162 "CBFC pause frames transmitted for class 14",
163 "CBFC pause frames transmitted for class 15",
164 "MAC control packets sent",
165 "Total frames sent on the interface"
166 };
167
168 enum cpt_eng_type {
169 CPT_AE_TYPE = 1,
170 CPT_SE_TYPE = 2,
171 CPT_IE_TYPE = 3,
172 };
173
174 #define rvu_dbg_NULL NULL
175 #define rvu_dbg_open_NULL NULL
176
177 #define RVU_DEBUG_SEQ_FOPS(name, read_op, write_op) \
178 static int rvu_dbg_open_##name(struct inode *inode, struct file *file) \
179 { \
180 return single_open(file, rvu_dbg_##read_op, inode->i_private); \
181 } \
182 static const struct file_operations rvu_dbg_##name##_fops = { \
183 .owner = THIS_MODULE, \
184 .open = rvu_dbg_open_##name, \
185 .read = seq_read, \
186 .write = rvu_dbg_##write_op, \
187 .llseek = seq_lseek, \
188 .release = single_release, \
189 }
190
191 #define RVU_DEBUG_FOPS(name, read_op, write_op) \
192 static const struct file_operations rvu_dbg_##name##_fops = { \
193 .owner = THIS_MODULE, \
194 .open = simple_open, \
195 .read = rvu_dbg_##read_op, \
196 .write = rvu_dbg_##write_op \
197 }
198
199 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
200
rvu_dbg_mcs_port_stats_display(struct seq_file * filp,void * unused,int dir)201 static int rvu_dbg_mcs_port_stats_display(struct seq_file *filp, void *unused, int dir)
202 {
203 struct mcs *mcs = filp->private;
204 struct mcs_port_stats stats;
205 int lmac;
206
207 seq_puts(filp, "\n port stats\n");
208 mutex_lock(&mcs->stats_lock);
209 for_each_set_bit(lmac, &mcs->hw->lmac_bmap, mcs->hw->lmac_cnt) {
210 mcs_get_port_stats(mcs, &stats, lmac, dir);
211 seq_printf(filp, "port%d: Tcam Miss: %lld\n", lmac, stats.tcam_miss_cnt);
212 seq_printf(filp, "port%d: Parser errors: %lld\n", lmac, stats.parser_err_cnt);
213
214 if (dir == MCS_RX && mcs->hw->mcs_blks > 1)
215 seq_printf(filp, "port%d: Preempt error: %lld\n", lmac,
216 stats.preempt_err_cnt);
217 if (dir == MCS_TX)
218 seq_printf(filp, "port%d: Sectag insert error: %lld\n", lmac,
219 stats.sectag_insert_err_cnt);
220 }
221 mutex_unlock(&mcs->stats_lock);
222 return 0;
223 }
224
rvu_dbg_mcs_rx_port_stats_display(struct seq_file * filp,void * unused)225 static int rvu_dbg_mcs_rx_port_stats_display(struct seq_file *filp, void *unused)
226 {
227 return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_RX);
228 }
229
230 RVU_DEBUG_SEQ_FOPS(mcs_rx_port_stats, mcs_rx_port_stats_display, NULL);
231
rvu_dbg_mcs_tx_port_stats_display(struct seq_file * filp,void * unused)232 static int rvu_dbg_mcs_tx_port_stats_display(struct seq_file *filp, void *unused)
233 {
234 return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_TX);
235 }
236
237 RVU_DEBUG_SEQ_FOPS(mcs_tx_port_stats, mcs_tx_port_stats_display, NULL);
238
rvu_dbg_mcs_sa_stats_display(struct seq_file * filp,void * unused,int dir)239 static int rvu_dbg_mcs_sa_stats_display(struct seq_file *filp, void *unused, int dir)
240 {
241 struct mcs *mcs = filp->private;
242 struct mcs_sa_stats stats;
243 struct rsrc_bmap *map;
244 int sa_id;
245
246 if (dir == MCS_TX) {
247 map = &mcs->tx.sa;
248 mutex_lock(&mcs->stats_lock);
249 for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) {
250 seq_puts(filp, "\n TX SA stats\n");
251 mcs_get_sa_stats(mcs, &stats, sa_id, MCS_TX);
252 seq_printf(filp, "sa%d: Pkts encrypted: %lld\n", sa_id,
253 stats.pkt_encrypt_cnt);
254
255 seq_printf(filp, "sa%d: Pkts protected: %lld\n", sa_id,
256 stats.pkt_protected_cnt);
257 }
258 mutex_unlock(&mcs->stats_lock);
259 return 0;
260 }
261
262 /* RX stats */
263 map = &mcs->rx.sa;
264 mutex_lock(&mcs->stats_lock);
265 for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) {
266 seq_puts(filp, "\n RX SA stats\n");
267 mcs_get_sa_stats(mcs, &stats, sa_id, MCS_RX);
268 seq_printf(filp, "sa%d: Invalid pkts: %lld\n", sa_id, stats.pkt_invalid_cnt);
269 seq_printf(filp, "sa%d: Pkts no sa error: %lld\n", sa_id, stats.pkt_nosaerror_cnt);
270 seq_printf(filp, "sa%d: Pkts not valid: %lld\n", sa_id, stats.pkt_notvalid_cnt);
271 seq_printf(filp, "sa%d: Pkts ok: %lld\n", sa_id, stats.pkt_ok_cnt);
272 seq_printf(filp, "sa%d: Pkts no sa: %lld\n", sa_id, stats.pkt_nosa_cnt);
273 }
274 mutex_unlock(&mcs->stats_lock);
275 return 0;
276 }
277
rvu_dbg_mcs_rx_sa_stats_display(struct seq_file * filp,void * unused)278 static int rvu_dbg_mcs_rx_sa_stats_display(struct seq_file *filp, void *unused)
279 {
280 return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_RX);
281 }
282
283 RVU_DEBUG_SEQ_FOPS(mcs_rx_sa_stats, mcs_rx_sa_stats_display, NULL);
284
rvu_dbg_mcs_tx_sa_stats_display(struct seq_file * filp,void * unused)285 static int rvu_dbg_mcs_tx_sa_stats_display(struct seq_file *filp, void *unused)
286 {
287 return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_TX);
288 }
289
290 RVU_DEBUG_SEQ_FOPS(mcs_tx_sa_stats, mcs_tx_sa_stats_display, NULL);
291
rvu_dbg_mcs_tx_sc_stats_display(struct seq_file * filp,void * unused)292 static int rvu_dbg_mcs_tx_sc_stats_display(struct seq_file *filp, void *unused)
293 {
294 struct mcs *mcs = filp->private;
295 struct mcs_sc_stats stats;
296 struct rsrc_bmap *map;
297 int sc_id;
298
299 map = &mcs->tx.sc;
300 seq_puts(filp, "\n SC stats\n");
301
302 mutex_lock(&mcs->stats_lock);
303 for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) {
304 mcs_get_sc_stats(mcs, &stats, sc_id, MCS_TX);
305 seq_printf(filp, "\n=======sc%d======\n\n", sc_id);
306 seq_printf(filp, "sc%d: Pkts encrypted: %lld\n", sc_id, stats.pkt_encrypt_cnt);
307 seq_printf(filp, "sc%d: Pkts protected: %lld\n", sc_id, stats.pkt_protected_cnt);
308
309 if (mcs->hw->mcs_blks == 1) {
310 seq_printf(filp, "sc%d: Octets encrypted: %lld\n", sc_id,
311 stats.octet_encrypt_cnt);
312 seq_printf(filp, "sc%d: Octets protected: %lld\n", sc_id,
313 stats.octet_protected_cnt);
314 }
315 }
316 mutex_unlock(&mcs->stats_lock);
317 return 0;
318 }
319
320 RVU_DEBUG_SEQ_FOPS(mcs_tx_sc_stats, mcs_tx_sc_stats_display, NULL);
321
rvu_dbg_mcs_rx_sc_stats_display(struct seq_file * filp,void * unused)322 static int rvu_dbg_mcs_rx_sc_stats_display(struct seq_file *filp, void *unused)
323 {
324 struct mcs *mcs = filp->private;
325 struct mcs_sc_stats stats;
326 struct rsrc_bmap *map;
327 int sc_id;
328
329 map = &mcs->rx.sc;
330 seq_puts(filp, "\n SC stats\n");
331
332 mutex_lock(&mcs->stats_lock);
333 for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) {
334 mcs_get_sc_stats(mcs, &stats, sc_id, MCS_RX);
335 seq_printf(filp, "\n=======sc%d======\n\n", sc_id);
336 seq_printf(filp, "sc%d: Cam hits: %lld\n", sc_id, stats.hit_cnt);
337 seq_printf(filp, "sc%d: Invalid pkts: %lld\n", sc_id, stats.pkt_invalid_cnt);
338 seq_printf(filp, "sc%d: Late pkts: %lld\n", sc_id, stats.pkt_late_cnt);
339 seq_printf(filp, "sc%d: Notvalid pkts: %lld\n", sc_id, stats.pkt_notvalid_cnt);
340 seq_printf(filp, "sc%d: Unchecked pkts: %lld\n", sc_id, stats.pkt_unchecked_cnt);
341
342 if (mcs->hw->mcs_blks > 1) {
343 seq_printf(filp, "sc%d: Delay pkts: %lld\n", sc_id, stats.pkt_delay_cnt);
344 seq_printf(filp, "sc%d: Pkts ok: %lld\n", sc_id, stats.pkt_ok_cnt);
345 }
346 if (mcs->hw->mcs_blks == 1) {
347 seq_printf(filp, "sc%d: Octets decrypted: %lld\n", sc_id,
348 stats.octet_decrypt_cnt);
349 seq_printf(filp, "sc%d: Octets validated: %lld\n", sc_id,
350 stats.octet_validate_cnt);
351 }
352 }
353 mutex_unlock(&mcs->stats_lock);
354 return 0;
355 }
356
357 RVU_DEBUG_SEQ_FOPS(mcs_rx_sc_stats, mcs_rx_sc_stats_display, NULL);
358
rvu_dbg_mcs_flowid_stats_display(struct seq_file * filp,void * unused,int dir)359 static int rvu_dbg_mcs_flowid_stats_display(struct seq_file *filp, void *unused, int dir)
360 {
361 struct mcs *mcs = filp->private;
362 struct mcs_flowid_stats stats;
363 struct rsrc_bmap *map;
364 int flow_id;
365
366 seq_puts(filp, "\n Flowid stats\n");
367
368 if (dir == MCS_RX)
369 map = &mcs->rx.flow_ids;
370 else
371 map = &mcs->tx.flow_ids;
372
373 mutex_lock(&mcs->stats_lock);
374 for_each_set_bit(flow_id, map->bmap, mcs->hw->tcam_entries) {
375 mcs_get_flowid_stats(mcs, &stats, flow_id, dir);
376 seq_printf(filp, "Flowid%d: Hit:%lld\n", flow_id, stats.tcam_hit_cnt);
377 }
378 mutex_unlock(&mcs->stats_lock);
379 return 0;
380 }
381
rvu_dbg_mcs_tx_flowid_stats_display(struct seq_file * filp,void * unused)382 static int rvu_dbg_mcs_tx_flowid_stats_display(struct seq_file *filp, void *unused)
383 {
384 return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_TX);
385 }
386
387 RVU_DEBUG_SEQ_FOPS(mcs_tx_flowid_stats, mcs_tx_flowid_stats_display, NULL);
388
rvu_dbg_mcs_rx_flowid_stats_display(struct seq_file * filp,void * unused)389 static int rvu_dbg_mcs_rx_flowid_stats_display(struct seq_file *filp, void *unused)
390 {
391 return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_RX);
392 }
393
394 RVU_DEBUG_SEQ_FOPS(mcs_rx_flowid_stats, mcs_rx_flowid_stats_display, NULL);
395
rvu_dbg_mcs_tx_secy_stats_display(struct seq_file * filp,void * unused)396 static int rvu_dbg_mcs_tx_secy_stats_display(struct seq_file *filp, void *unused)
397 {
398 struct mcs *mcs = filp->private;
399 struct mcs_secy_stats stats;
400 struct rsrc_bmap *map;
401 int secy_id;
402
403 map = &mcs->tx.secy;
404 seq_puts(filp, "\n MCS TX secy stats\n");
405
406 mutex_lock(&mcs->stats_lock);
407 for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) {
408 mcs_get_tx_secy_stats(mcs, &stats, secy_id);
409 seq_printf(filp, "\n=======Secy%d======\n\n", secy_id);
410 seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id,
411 stats.ctl_pkt_bcast_cnt);
412 seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id,
413 stats.ctl_pkt_mcast_cnt);
414 seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id,
415 stats.ctl_pkt_ucast_cnt);
416 seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt);
417 seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id,
418 stats.unctl_pkt_bcast_cnt);
419 seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id,
420 stats.unctl_pkt_mcast_cnt);
421 seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id,
422 stats.unctl_pkt_ucast_cnt);
423 seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt);
424 seq_printf(filp, "secy%d: Octet encrypted: %lld\n", secy_id,
425 stats.octet_encrypted_cnt);
426 seq_printf(filp, "secy%d: octet protected: %lld\n", secy_id,
427 stats.octet_protected_cnt);
428 seq_printf(filp, "secy%d: Pkts on active sa: %lld\n", secy_id,
429 stats.pkt_noactivesa_cnt);
430 seq_printf(filp, "secy%d: Pkts too long: %lld\n", secy_id, stats.pkt_toolong_cnt);
431 seq_printf(filp, "secy%d: Pkts untagged: %lld\n", secy_id, stats.pkt_untagged_cnt);
432 }
433 mutex_unlock(&mcs->stats_lock);
434 return 0;
435 }
436
437 RVU_DEBUG_SEQ_FOPS(mcs_tx_secy_stats, mcs_tx_secy_stats_display, NULL);
438
rvu_dbg_mcs_rx_secy_stats_display(struct seq_file * filp,void * unused)439 static int rvu_dbg_mcs_rx_secy_stats_display(struct seq_file *filp, void *unused)
440 {
441 struct mcs *mcs = filp->private;
442 struct mcs_secy_stats stats;
443 struct rsrc_bmap *map;
444 int secy_id;
445
446 map = &mcs->rx.secy;
447 seq_puts(filp, "\n MCS secy stats\n");
448
449 mutex_lock(&mcs->stats_lock);
450 for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) {
451 mcs_get_rx_secy_stats(mcs, &stats, secy_id);
452 seq_printf(filp, "\n=======Secy%d======\n\n", secy_id);
453 seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id,
454 stats.ctl_pkt_bcast_cnt);
455 seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id,
456 stats.ctl_pkt_mcast_cnt);
457 seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id,
458 stats.ctl_pkt_ucast_cnt);
459 seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt);
460 seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id,
461 stats.unctl_pkt_bcast_cnt);
462 seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id,
463 stats.unctl_pkt_mcast_cnt);
464 seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id,
465 stats.unctl_pkt_ucast_cnt);
466 seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt);
467 seq_printf(filp, "secy%d: Octet decrypted: %lld\n", secy_id,
468 stats.octet_decrypted_cnt);
469 seq_printf(filp, "secy%d: octet validated: %lld\n", secy_id,
470 stats.octet_validated_cnt);
471 seq_printf(filp, "secy%d: Pkts on disable port: %lld\n", secy_id,
472 stats.pkt_port_disabled_cnt);
473 seq_printf(filp, "secy%d: Pkts with badtag: %lld\n", secy_id, stats.pkt_badtag_cnt);
474 seq_printf(filp, "secy%d: Pkts with no SA(sectag.tci.c=0): %lld\n", secy_id,
475 stats.pkt_nosa_cnt);
476 seq_printf(filp, "secy%d: Pkts with nosaerror: %lld\n", secy_id,
477 stats.pkt_nosaerror_cnt);
478 seq_printf(filp, "secy%d: Tagged ctrl pkts: %lld\n", secy_id,
479 stats.pkt_tagged_ctl_cnt);
480 seq_printf(filp, "secy%d: Untaged pkts: %lld\n", secy_id, stats.pkt_untaged_cnt);
481 seq_printf(filp, "secy%d: Ctrl pkts: %lld\n", secy_id, stats.pkt_ctl_cnt);
482 if (mcs->hw->mcs_blks > 1)
483 seq_printf(filp, "secy%d: pkts notag: %lld\n", secy_id,
484 stats.pkt_notag_cnt);
485 }
486 mutex_unlock(&mcs->stats_lock);
487 return 0;
488 }
489
490 RVU_DEBUG_SEQ_FOPS(mcs_rx_secy_stats, mcs_rx_secy_stats_display, NULL);
491
rvu_dbg_mcs_init(struct rvu * rvu)492 static void rvu_dbg_mcs_init(struct rvu *rvu)
493 {
494 struct mcs *mcs;
495 char dname[10];
496 int i;
497
498 if (!rvu->mcs_blk_cnt)
499 return;
500
501 rvu->rvu_dbg.mcs_root = debugfs_create_dir("mcs", rvu->rvu_dbg.root);
502
503 for (i = 0; i < rvu->mcs_blk_cnt; i++) {
504 mcs = mcs_get_pdata(i);
505
506 sprintf(dname, "mcs%d", i);
507 rvu->rvu_dbg.mcs = debugfs_create_dir(dname,
508 rvu->rvu_dbg.mcs_root);
509
510 rvu->rvu_dbg.mcs_rx = debugfs_create_dir("rx_stats", rvu->rvu_dbg.mcs);
511
512 debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_rx, mcs,
513 &rvu_dbg_mcs_rx_flowid_stats_fops);
514
515 debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_rx, mcs,
516 &rvu_dbg_mcs_rx_secy_stats_fops);
517
518 debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_rx, mcs,
519 &rvu_dbg_mcs_rx_sc_stats_fops);
520
521 debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_rx, mcs,
522 &rvu_dbg_mcs_rx_sa_stats_fops);
523
524 debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_rx, mcs,
525 &rvu_dbg_mcs_rx_port_stats_fops);
526
527 rvu->rvu_dbg.mcs_tx = debugfs_create_dir("tx_stats", rvu->rvu_dbg.mcs);
528
529 debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_tx, mcs,
530 &rvu_dbg_mcs_tx_flowid_stats_fops);
531
532 debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_tx, mcs,
533 &rvu_dbg_mcs_tx_secy_stats_fops);
534
535 debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_tx, mcs,
536 &rvu_dbg_mcs_tx_sc_stats_fops);
537
538 debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_tx, mcs,
539 &rvu_dbg_mcs_tx_sa_stats_fops);
540
541 debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_tx, mcs,
542 &rvu_dbg_mcs_tx_port_stats_fops);
543 }
544 }
545
546 #define LMT_MAPTBL_ENTRY_SIZE 16
547 /* Dump LMTST map table */
rvu_dbg_lmtst_map_table_display(struct file * filp,char __user * buffer,size_t count,loff_t * ppos)548 static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
549 char __user *buffer,
550 size_t count, loff_t *ppos)
551 {
552 struct rvu *rvu = filp->private_data;
553 u64 lmt_addr, val, tbl_base;
554 int pf, vf, num_vfs, hw_vfs;
555 void __iomem *lmt_map_base;
556 int apr_pfs, apr_vfs;
557 int buf_size = 10240;
558 size_t off = 0;
559 int index = 0;
560 char *buf;
561 int ret;
562
563 /* don't allow partial reads */
564 if (*ppos != 0)
565 return 0;
566
567 buf = kzalloc(buf_size, GFP_KERNEL);
568 if (!buf)
569 return -ENOMEM;
570
571 tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
572 val = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CFG);
573 apr_vfs = 1 << (val & 0xF);
574 apr_pfs = 1 << ((val >> 4) & 0x7);
575
576 lmt_map_base = ioremap_wc(tbl_base, apr_pfs * apr_vfs *
577 LMT_MAPTBL_ENTRY_SIZE);
578 if (!lmt_map_base) {
579 dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
580 kfree(buf);
581 return false;
582 }
583
584 off += scnprintf(&buf[off], buf_size - 1 - off,
585 "\n\t\t\t\t\tLmtst Map Table Entries");
586 off += scnprintf(&buf[off], buf_size - 1 - off,
587 "\n\t\t\t\t\t=======================");
588 off += scnprintf(&buf[off], buf_size - 1 - off, "\nPcifunc\t\t\t");
589 off += scnprintf(&buf[off], buf_size - 1 - off, "Table Index\t\t");
590 off += scnprintf(&buf[off], buf_size - 1 - off,
591 "Lmtline Base (word 0)\t\t");
592 off += scnprintf(&buf[off], buf_size - 1 - off,
593 "Lmt Map Entry (word 1)");
594 off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
595 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
596 off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d \t\t\t",
597 pf);
598
599 index = pf * apr_vfs * LMT_MAPTBL_ENTRY_SIZE;
600 off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t",
601 (tbl_base + index));
602 lmt_addr = readq(lmt_map_base + index);
603 off += scnprintf(&buf[off], buf_size - 1 - off,
604 " 0x%016llx\t\t", lmt_addr);
605 index += 8;
606 val = readq(lmt_map_base + index);
607 off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%016llx\n",
608 val);
609 /* Reading num of VFs per PF */
610 rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs);
611 for (vf = 0; vf < num_vfs; vf++) {
612 index = (pf * apr_vfs * LMT_MAPTBL_ENTRY_SIZE) +
613 ((vf + 1) * LMT_MAPTBL_ENTRY_SIZE);
614 off += scnprintf(&buf[off], buf_size - 1 - off,
615 "PF%d:VF%d \t\t", pf, vf);
616 off += scnprintf(&buf[off], buf_size - 1 - off,
617 " 0x%llx\t\t", (tbl_base + index));
618 lmt_addr = readq(lmt_map_base + index);
619 off += scnprintf(&buf[off], buf_size - 1 - off,
620 " 0x%016llx\t\t", lmt_addr);
621 index += 8;
622 val = readq(lmt_map_base + index);
623 off += scnprintf(&buf[off], buf_size - 1 - off,
624 " 0x%016llx\n", val);
625 }
626 }
627 off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
628
629 ret = min(off, count);
630 if (copy_to_user(buffer, buf, ret))
631 ret = -EFAULT;
632 kfree(buf);
633
634 iounmap(lmt_map_base);
635 if (ret < 0)
636 return ret;
637
638 *ppos = ret;
639 return ret;
640 }
641
642 RVU_DEBUG_FOPS(lmtst_map_table, lmtst_map_table_display, NULL);
643
get_lf_str_list(const struct rvu_block * block,int pcifunc,char * lfs)644 static void get_lf_str_list(const struct rvu_block *block, int pcifunc,
645 char *lfs)
646 {
647 int lf = 0, seq = 0, len = 0, prev_lf = block->lf.max;
648
649 for_each_set_bit(lf, block->lf.bmap, block->lf.max) {
650 if (lf >= block->lf.max)
651 break;
652
653 if (block->fn_map[lf] != pcifunc)
654 continue;
655
656 if (lf == prev_lf + 1) {
657 prev_lf = lf;
658 seq = 1;
659 continue;
660 }
661
662 if (seq)
663 len += sprintf(lfs + len, "-%d,%d", prev_lf, lf);
664 else
665 len += (len ? sprintf(lfs + len, ",%d", lf) :
666 sprintf(lfs + len, "%d", lf));
667
668 prev_lf = lf;
669 seq = 0;
670 }
671
672 if (seq)
673 len += sprintf(lfs + len, "-%d", prev_lf);
674
675 lfs[len] = '\0';
676 }
677
get_max_column_width(struct rvu * rvu)678 static int get_max_column_width(struct rvu *rvu)
679 {
680 int index, pf, vf, lf_str_size = 12, buf_size = 256;
681 struct rvu_block block;
682 u16 pcifunc;
683 char *buf;
684
685 buf = kzalloc(buf_size, GFP_KERNEL);
686 if (!buf)
687 return -ENOMEM;
688
689 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
690 for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
691 pcifunc = rvu_make_pcifunc(rvu->pdev, pf, vf);
692 if (!pcifunc)
693 continue;
694
695 for (index = 0; index < BLK_COUNT; index++) {
696 block = rvu->hw->block[index];
697 if (!strlen(block.name))
698 continue;
699
700 get_lf_str_list(&block, pcifunc, buf);
701 if (lf_str_size <= strlen(buf))
702 lf_str_size = strlen(buf) + 1;
703 }
704 }
705 }
706
707 kfree(buf);
708 return lf_str_size;
709 }
710
711 /* Dumps current provisioning status of all RVU block LFs */
rvu_dbg_rsrc_attach_status(struct file * filp,char __user * buffer,size_t count,loff_t * ppos)712 static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
713 char __user *buffer,
714 size_t count, loff_t *ppos)
715 {
716 int index, off = 0, flag = 0, len = 0, i = 0;
717 struct rvu *rvu = filp->private_data;
718 int bytes_not_copied = 0;
719 struct rvu_block block;
720 int pf, vf, pcifunc;
721 int buf_size = 2048;
722 int lf_str_size;
723 char *lfs;
724 char *buf;
725
726 /* don't allow partial reads */
727 if (*ppos != 0)
728 return 0;
729
730 buf = kzalloc(buf_size, GFP_KERNEL);
731 if (!buf)
732 return -ENOMEM;
733
734 /* Get the maximum width of a column */
735 lf_str_size = get_max_column_width(rvu);
736
737 lfs = kzalloc(lf_str_size, GFP_KERNEL);
738 if (!lfs) {
739 kfree(buf);
740 return -ENOMEM;
741 }
742 off += scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size,
743 "pcifunc");
744 for (index = 0; index < BLK_COUNT; index++)
745 if (strlen(rvu->hw->block[index].name)) {
746 off += scnprintf(&buf[off], buf_size - 1 - off,
747 "%-*s", lf_str_size,
748 rvu->hw->block[index].name);
749 }
750
751 off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
752 bytes_not_copied = copy_to_user(buffer + (i * off), buf, off);
753 if (bytes_not_copied)
754 goto out;
755
756 i++;
757 *ppos += off;
758 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
759 for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
760 off = 0;
761 flag = 0;
762 pcifunc = rvu_make_pcifunc(rvu->pdev, pf, vf);
763 if (!pcifunc)
764 continue;
765
766 if (vf) {
767 sprintf(lfs, "PF%d:VF%d", pf, vf - 1);
768 off = scnprintf(&buf[off],
769 buf_size - 1 - off,
770 "%-*s", lf_str_size, lfs);
771 } else {
772 sprintf(lfs, "PF%d", pf);
773 off = scnprintf(&buf[off],
774 buf_size - 1 - off,
775 "%-*s", lf_str_size, lfs);
776 }
777
778 for (index = 0; index < BLK_COUNT; index++) {
779 block = rvu->hw->block[index];
780 if (!strlen(block.name))
781 continue;
782 len = 0;
783 lfs[len] = '\0';
784 get_lf_str_list(&block, pcifunc, lfs);
785 if (strlen(lfs))
786 flag = 1;
787
788 off += scnprintf(&buf[off], buf_size - 1 - off,
789 "%-*s", lf_str_size, lfs);
790 }
791 if (flag) {
792 off += scnprintf(&buf[off],
793 buf_size - 1 - off, "\n");
794 bytes_not_copied = copy_to_user(buffer +
795 (i * off),
796 buf, off);
797 if (bytes_not_copied)
798 goto out;
799
800 i++;
801 *ppos += off;
802 }
803 }
804 }
805
806 out:
807 kfree(lfs);
808 kfree(buf);
809 if (bytes_not_copied)
810 return -EFAULT;
811
812 return *ppos;
813 }
814
815 RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
816
rvu_dbg_rvu_pf_cgx_map_display(struct seq_file * filp,void * unused)817 static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
818 {
819 char cgx[10], lmac[10], chan[10];
820 struct rvu *rvu = filp->private;
821 struct pci_dev *pdev = NULL;
822 struct mac_ops *mac_ops;
823 struct rvu_pfvf *pfvf;
824 int pf, domain, blkid;
825 u8 cgx_id, lmac_id;
826 u16 pcifunc;
827
828 domain = 2;
829 mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
830 /* There can be no CGX devices at all */
831 if (!mac_ops)
832 return 0;
833 seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\tCHAN\n",
834 mac_ops->name);
835 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
836 if (!is_pf_cgxmapped(rvu, pf))
837 continue;
838
839 pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
840 if (!pdev)
841 continue;
842
843 cgx[0] = 0;
844 lmac[0] = 0;
845 pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0);
846 pfvf = rvu_get_pfvf(rvu, pcifunc);
847
848 if (pfvf->nix_blkaddr == BLKADDR_NIX0)
849 blkid = 0;
850 else
851 blkid = 1;
852
853 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id,
854 &lmac_id);
855 sprintf(cgx, "%s%d", mac_ops->name, cgx_id);
856 sprintf(lmac, "LMAC%d", lmac_id);
857 sprintf(chan, "%d",
858 rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0));
859 seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\t%s\n",
860 dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac,
861 chan);
862
863 pci_dev_put(pdev);
864 }
865 return 0;
866 }
867
868 RVU_DEBUG_SEQ_FOPS(rvu_pf_cgx_map, rvu_pf_cgx_map_display, NULL);
869
rvu_dbg_rvu_fwdata_display(struct seq_file * s,void * unused)870 static int rvu_dbg_rvu_fwdata_display(struct seq_file *s, void *unused)
871 {
872 struct rvu *rvu = s->private;
873 struct rvu_fwdata *fwdata;
874 u8 mac[ETH_ALEN];
875 int count = 0, i;
876
877 if (!rvu->fwdata)
878 return -EAGAIN;
879
880 fwdata = rvu->fwdata;
881 seq_puts(s, "\nRVU Firmware Data:\n");
882 seq_puts(s, "\n\t\tPTP INFORMATION\n");
883 seq_puts(s, "\t\t===============\n");
884 seq_printf(s, "\t\texternal clockrate \t :%x\n",
885 fwdata->ptp_ext_clk_rate);
886 seq_printf(s, "\t\texternal timestamp \t :%x\n",
887 fwdata->ptp_ext_tstamp);
888 seq_puts(s, "\n");
889
890 seq_puts(s, "\n\t\tSDP CHANNEL INFORMATION\n");
891 seq_puts(s, "\t\t=======================\n");
892 seq_printf(s, "\t\tValid \t\t\t :%x\n", fwdata->channel_data.valid);
893 seq_printf(s, "\t\tNode ID \t\t :%x\n",
894 fwdata->channel_data.info.node_id);
895 seq_printf(s, "\t\tNumber of VFs \t\t :%x\n",
896 fwdata->channel_data.info.max_vfs);
897 seq_printf(s, "\t\tNumber of PF-Rings \t :%x\n",
898 fwdata->channel_data.info.num_pf_rings);
899 seq_printf(s, "\t\tPF SRN \t\t\t :%x\n",
900 fwdata->channel_data.info.pf_srn);
901 seq_puts(s, "\n");
902
903 seq_puts(s, "\n\t\tPF-INDEX MACADDRESS\n");
904 seq_puts(s, "\t\t====================\n");
905 for (i = 0; i < PF_MACNUM_MAX; i++) {
906 u64_to_ether_addr(fwdata->pf_macs[i], mac);
907 if (!is_zero_ether_addr(mac)) {
908 seq_printf(s, "\t\t %d %pM\n", i, mac);
909 count++;
910 }
911 }
912
913 if (!count)
914 seq_puts(s, "\t\tNo valid address found\n");
915
916 seq_puts(s, "\n\t\tVF-INDEX MACADDRESS\n");
917 seq_puts(s, "\t\t====================\n");
918 count = 0;
919 for (i = 0; i < VF_MACNUM_MAX; i++) {
920 u64_to_ether_addr(fwdata->vf_macs[i], mac);
921 if (!is_zero_ether_addr(mac)) {
922 seq_printf(s, "\t\t %d %pM\n", i, mac);
923 count++;
924 }
925 }
926
927 if (!count)
928 seq_puts(s, "\t\tNo valid address found\n");
929
930 return 0;
931 }
932
933 RVU_DEBUG_SEQ_FOPS(rvu_fwdata, rvu_fwdata_display, NULL);
934
rvu_dbg_is_valid_lf(struct rvu * rvu,int blkaddr,int lf,u16 * pcifunc)935 static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blkaddr, int lf,
936 u16 *pcifunc)
937 {
938 struct rvu_block *block;
939 struct rvu_hwinfo *hw;
940
941 hw = rvu->hw;
942 block = &hw->block[blkaddr];
943
944 if (lf < 0 || lf >= block->lf.max) {
945 dev_warn(rvu->dev, "Invalid LF: valid range: 0-%d\n",
946 block->lf.max - 1);
947 return false;
948 }
949
950 *pcifunc = block->fn_map[lf];
951 if (!*pcifunc) {
952 dev_warn(rvu->dev,
953 "This LF is not attached to any RVU PFFUNC\n");
954 return false;
955 }
956 return true;
957 }
958
print_npa_qsize(struct seq_file * m,struct rvu_pfvf * pfvf)959 static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf)
960 {
961 char *buf;
962
963 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
964 if (!buf)
965 return;
966
967 if (!pfvf->aura_ctx) {
968 seq_puts(m, "Aura context is not initialized\n");
969 } else {
970 bitmap_print_to_pagebuf(false, buf, pfvf->aura_bmap,
971 pfvf->aura_ctx->qsize);
972 seq_printf(m, "Aura count : %d\n", pfvf->aura_ctx->qsize);
973 seq_printf(m, "Aura context ena/dis bitmap : %s\n", buf);
974 }
975
976 if (!pfvf->pool_ctx) {
977 seq_puts(m, "Pool context is not initialized\n");
978 } else {
979 bitmap_print_to_pagebuf(false, buf, pfvf->pool_bmap,
980 pfvf->pool_ctx->qsize);
981 seq_printf(m, "Pool count : %d\n", pfvf->pool_ctx->qsize);
982 seq_printf(m, "Pool context ena/dis bitmap : %s\n", buf);
983 }
984 kfree(buf);
985 }
986
987 /* The 'qsize' entry dumps current Aura/Pool context Qsize
988 * and each context's current enable/disable status in a bitmap.
989 */
rvu_dbg_qsize_display(struct seq_file * s,void * unsused,int blktype)990 static int rvu_dbg_qsize_display(struct seq_file *s, void *unsused,
991 int blktype)
992 {
993 void (*print_qsize)(struct seq_file *s,
994 struct rvu_pfvf *pfvf) = NULL;
995 struct rvu_pfvf *pfvf;
996 struct rvu *rvu;
997 int qsize_id;
998 u16 pcifunc;
999 int blkaddr;
1000
1001 rvu = s->private;
1002 switch (blktype) {
1003 case BLKTYPE_NPA:
1004 qsize_id = rvu->rvu_dbg.npa_qsize_id;
1005 print_qsize = print_npa_qsize;
1006 break;
1007
1008 case BLKTYPE_NIX:
1009 qsize_id = rvu->rvu_dbg.nix_qsize_id;
1010 print_qsize = print_nix_qsize;
1011 break;
1012
1013 default:
1014 return -EINVAL;
1015 }
1016
1017 if (blktype == BLKTYPE_NPA)
1018 blkaddr = BLKADDR_NPA;
1019 else
1020 blkaddr = debugfs_get_aux_num(s->file);
1021
1022 if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc))
1023 return -EINVAL;
1024
1025 pfvf = rvu_get_pfvf(rvu, pcifunc);
1026 print_qsize(s, pfvf);
1027
1028 return 0;
1029 }
1030
rvu_dbg_qsize_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos,int blktype)1031 static ssize_t rvu_dbg_qsize_write(struct file *file,
1032 const char __user *buffer, size_t count,
1033 loff_t *ppos, int blktype)
1034 {
1035 char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix";
1036 struct seq_file *seqfile = file->private_data;
1037 char *cmd_buf, *cmd_buf_tmp, *subtoken;
1038 struct rvu *rvu = seqfile->private;
1039 int blkaddr;
1040 u16 pcifunc;
1041 int ret, lf;
1042
1043 cmd_buf = memdup_user_nul(buffer, count);
1044 if (IS_ERR(cmd_buf))
1045 return -ENOMEM;
1046
1047 cmd_buf_tmp = strchr(cmd_buf, '\n');
1048 if (cmd_buf_tmp) {
1049 *cmd_buf_tmp = '\0';
1050 count = cmd_buf_tmp - cmd_buf + 1;
1051 }
1052
1053 cmd_buf_tmp = cmd_buf;
1054 subtoken = strsep(&cmd_buf, " ");
1055 ret = subtoken ? kstrtoint(subtoken, 10, &lf) : -EINVAL;
1056 if (cmd_buf)
1057 ret = -EINVAL;
1058
1059 if (ret < 0 || !strncmp(subtoken, "help", 4)) {
1060 dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
1061 goto qsize_write_done;
1062 }
1063
1064 if (blktype == BLKTYPE_NPA)
1065 blkaddr = BLKADDR_NPA;
1066 else
1067 blkaddr = debugfs_get_aux_num(file);
1068
1069 if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) {
1070 ret = -EINVAL;
1071 goto qsize_write_done;
1072 }
1073 if (blktype == BLKTYPE_NPA)
1074 rvu->rvu_dbg.npa_qsize_id = lf;
1075 else
1076 rvu->rvu_dbg.nix_qsize_id = lf;
1077
1078 qsize_write_done:
1079 kfree(cmd_buf_tmp);
1080 return ret ? ret : count;
1081 }
1082
rvu_dbg_npa_qsize_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)1083 static ssize_t rvu_dbg_npa_qsize_write(struct file *filp,
1084 const char __user *buffer,
1085 size_t count, loff_t *ppos)
1086 {
1087 return rvu_dbg_qsize_write(filp, buffer, count, ppos,
1088 BLKTYPE_NPA);
1089 }
1090
rvu_dbg_npa_qsize_display(struct seq_file * filp,void * unused)1091 static int rvu_dbg_npa_qsize_display(struct seq_file *filp, void *unused)
1092 {
1093 return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NPA);
1094 }
1095
1096 RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write);
1097
1098 /* Dumps given NPA Aura's context */
print_npa_aura_ctx(struct seq_file * m,struct npa_aq_enq_rsp * rsp)1099 static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
1100 {
1101 struct npa_aura_s *aura = &rsp->aura;
1102 struct rvu *rvu = m->private;
1103
1104 seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr);
1105
1106 seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n",
1107 aura->ena, aura->pool_caching);
1108 seq_printf(m, "W1: pool way mask\t%d\nW1: avg con\t\t%d\n",
1109 aura->pool_way_mask, aura->avg_con);
1110 seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n",
1111 aura->pool_drop_ena, aura->aura_drop_ena);
1112 seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n",
1113 aura->bp_ena, aura->aura_drop);
1114 seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n",
1115 aura->shift, aura->avg_level);
1116
1117 seq_printf(m, "W2: count\t\t%llu\nW2: nix0_bpid\t\t%d\nW2: nix1_bpid\t\t%d\n",
1118 (u64)aura->count, aura->nix0_bpid, aura->nix1_bpid);
1119
1120 seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
1121 (u64)aura->limit, aura->bp, aura->fc_ena);
1122
1123 if (!is_rvu_otx2(rvu))
1124 seq_printf(m, "W3: fc_be\t\t%d\n", aura->fc_be);
1125 seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n",
1126 aura->fc_up_crossing, aura->fc_stype);
1127 seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits);
1128
1129 seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr);
1130
1131 seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n",
1132 aura->pool_drop, aura->update_time);
1133 seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n",
1134 aura->err_int, aura->err_int_ena);
1135 seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n",
1136 aura->thresh_int, aura->thresh_int_ena);
1137 seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n",
1138 aura->thresh_up, aura->thresh_qint_idx);
1139 seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx);
1140
1141 seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh);
1142 if (!is_rvu_otx2(rvu))
1143 seq_printf(m, "W6: fc_msh_dst\t\t%d\n", aura->fc_msh_dst);
1144 }
1145
1146 /* Dumps given NPA Pool's context */
print_npa_pool_ctx(struct seq_file * m,struct npa_aq_enq_rsp * rsp)1147 static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
1148 {
1149 struct npa_pool_s *pool = &rsp->pool;
1150 struct rvu *rvu = m->private;
1151
1152 seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base);
1153
1154 seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n",
1155 pool->ena, pool->nat_align);
1156 seq_printf(m, "W1: stack_caching\t%d\nW1: stack_way_mask\t%d\n",
1157 pool->stack_caching, pool->stack_way_mask);
1158 seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n",
1159 pool->buf_offset, pool->buf_size);
1160
1161 seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n",
1162 pool->stack_max_pages, pool->stack_pages);
1163
1164 seq_printf(m, "W3: op_pc \t\t%llu\n", (u64)pool->op_pc);
1165
1166 seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n",
1167 pool->stack_offset, pool->shift, pool->avg_level);
1168 seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n",
1169 pool->avg_con, pool->fc_ena, pool->fc_stype);
1170 seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n",
1171 pool->fc_hyst_bits, pool->fc_up_crossing);
1172 if (!is_rvu_otx2(rvu))
1173 seq_printf(m, "W4: fc_be\t\t%d\n", pool->fc_be);
1174 seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time);
1175
1176 seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr);
1177
1178 seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start);
1179
1180 seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end);
1181
1182 seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n",
1183 pool->err_int, pool->err_int_ena);
1184 seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int);
1185 seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n",
1186 pool->thresh_int_ena, pool->thresh_up);
1187 seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d\n",
1188 pool->thresh_qint_idx, pool->err_qint_idx);
1189 if (!is_rvu_otx2(rvu))
1190 seq_printf(m, "W8: fc_msh_dst\t\t%d\n", pool->fc_msh_dst);
1191 }
1192
1193 /* Reads aura/pool's ctx from admin queue */
rvu_dbg_npa_ctx_display(struct seq_file * m,void * unused,int ctype)1194 static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype)
1195 {
1196 void (*print_npa_ctx)(struct seq_file *m, struct npa_aq_enq_rsp *rsp);
1197 struct npa_aq_enq_req aq_req;
1198 struct npa_aq_enq_rsp rsp;
1199 struct rvu_pfvf *pfvf;
1200 int aura, rc, max_id;
1201 int npalf, id, all;
1202 struct rvu *rvu;
1203 u16 pcifunc;
1204
1205 rvu = m->private;
1206
1207 switch (ctype) {
1208 case NPA_AQ_CTYPE_AURA:
1209 npalf = rvu->rvu_dbg.npa_aura_ctx.lf;
1210 id = rvu->rvu_dbg.npa_aura_ctx.id;
1211 all = rvu->rvu_dbg.npa_aura_ctx.all;
1212 break;
1213
1214 case NPA_AQ_CTYPE_POOL:
1215 npalf = rvu->rvu_dbg.npa_pool_ctx.lf;
1216 id = rvu->rvu_dbg.npa_pool_ctx.id;
1217 all = rvu->rvu_dbg.npa_pool_ctx.all;
1218 break;
1219 default:
1220 return -EINVAL;
1221 }
1222
1223 if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
1224 return -EINVAL;
1225
1226 pfvf = rvu_get_pfvf(rvu, pcifunc);
1227 if (ctype == NPA_AQ_CTYPE_AURA && !pfvf->aura_ctx) {
1228 seq_puts(m, "Aura context is not initialized\n");
1229 return -EINVAL;
1230 } else if (ctype == NPA_AQ_CTYPE_POOL && !pfvf->pool_ctx) {
1231 seq_puts(m, "Pool context is not initialized\n");
1232 return -EINVAL;
1233 }
1234
1235 memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
1236 aq_req.hdr.pcifunc = pcifunc;
1237 aq_req.ctype = ctype;
1238 aq_req.op = NPA_AQ_INSTOP_READ;
1239 if (ctype == NPA_AQ_CTYPE_AURA) {
1240 max_id = pfvf->aura_ctx->qsize;
1241 print_npa_ctx = print_npa_aura_ctx;
1242 } else {
1243 max_id = pfvf->pool_ctx->qsize;
1244 print_npa_ctx = print_npa_pool_ctx;
1245 }
1246
1247 if (id < 0 || id >= max_id) {
1248 seq_printf(m, "Invalid %s, valid range is 0-%d\n",
1249 (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
1250 max_id - 1);
1251 return -EINVAL;
1252 }
1253
1254 if (all)
1255 id = 0;
1256 else
1257 max_id = id + 1;
1258
1259 for (aura = id; aura < max_id; aura++) {
1260 aq_req.aura_id = aura;
1261
1262 /* Skip if queue is uninitialized */
1263 if (ctype == NPA_AQ_CTYPE_POOL && !test_bit(aura, pfvf->pool_bmap))
1264 continue;
1265
1266 seq_printf(m, "======%s : %d=======\n",
1267 (ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL",
1268 aq_req.aura_id);
1269 rc = rvu_npa_aq_enq_inst(rvu, &aq_req, &rsp);
1270 if (rc) {
1271 seq_puts(m, "Failed to read context\n");
1272 return -EINVAL;
1273 }
1274 print_npa_ctx(m, &rsp);
1275 }
1276 return 0;
1277 }
1278
write_npa_ctx(struct rvu * rvu,bool all,int npalf,int id,int ctype)1279 static int write_npa_ctx(struct rvu *rvu, bool all,
1280 int npalf, int id, int ctype)
1281 {
1282 struct rvu_pfvf *pfvf;
1283 int max_id = 0;
1284 u16 pcifunc;
1285
1286 if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
1287 return -EINVAL;
1288
1289 pfvf = rvu_get_pfvf(rvu, pcifunc);
1290
1291 if (ctype == NPA_AQ_CTYPE_AURA) {
1292 if (!pfvf->aura_ctx) {
1293 dev_warn(rvu->dev, "Aura context is not initialized\n");
1294 return -EINVAL;
1295 }
1296 max_id = pfvf->aura_ctx->qsize;
1297 } else if (ctype == NPA_AQ_CTYPE_POOL) {
1298 if (!pfvf->pool_ctx) {
1299 dev_warn(rvu->dev, "Pool context is not initialized\n");
1300 return -EINVAL;
1301 }
1302 max_id = pfvf->pool_ctx->qsize;
1303 }
1304
1305 if (id < 0 || id >= max_id) {
1306 dev_warn(rvu->dev, "Invalid %s, valid range is 0-%d\n",
1307 (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
1308 max_id - 1);
1309 return -EINVAL;
1310 }
1311
1312 switch (ctype) {
1313 case NPA_AQ_CTYPE_AURA:
1314 rvu->rvu_dbg.npa_aura_ctx.lf = npalf;
1315 rvu->rvu_dbg.npa_aura_ctx.id = id;
1316 rvu->rvu_dbg.npa_aura_ctx.all = all;
1317 break;
1318
1319 case NPA_AQ_CTYPE_POOL:
1320 rvu->rvu_dbg.npa_pool_ctx.lf = npalf;
1321 rvu->rvu_dbg.npa_pool_ctx.id = id;
1322 rvu->rvu_dbg.npa_pool_ctx.all = all;
1323 break;
1324 default:
1325 return -EINVAL;
1326 }
1327 return 0;
1328 }
1329
parse_cmd_buffer_ctx(char * cmd_buf,size_t * count,const char __user * buffer,int * npalf,int * id,bool * all)1330 static int parse_cmd_buffer_ctx(char *cmd_buf, size_t *count,
1331 const char __user *buffer, int *npalf,
1332 int *id, bool *all)
1333 {
1334 int bytes_not_copied;
1335 char *cmd_buf_tmp;
1336 char *subtoken;
1337 int ret;
1338
1339 bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
1340 if (bytes_not_copied)
1341 return -EFAULT;
1342
1343 cmd_buf[*count] = '\0';
1344 cmd_buf_tmp = strchr(cmd_buf, '\n');
1345
1346 if (cmd_buf_tmp) {
1347 *cmd_buf_tmp = '\0';
1348 *count = cmd_buf_tmp - cmd_buf + 1;
1349 }
1350
1351 subtoken = strsep(&cmd_buf, " ");
1352 ret = subtoken ? kstrtoint(subtoken, 10, npalf) : -EINVAL;
1353 if (ret < 0)
1354 return ret;
1355 subtoken = strsep(&cmd_buf, " ");
1356 if (subtoken && strcmp(subtoken, "all") == 0) {
1357 *all = true;
1358 } else {
1359 ret = subtoken ? kstrtoint(subtoken, 10, id) : -EINVAL;
1360 if (ret < 0)
1361 return ret;
1362 }
1363 if (cmd_buf)
1364 return -EINVAL;
1365 return ret;
1366 }
1367
rvu_dbg_npa_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos,int ctype)1368 static ssize_t rvu_dbg_npa_ctx_write(struct file *filp,
1369 const char __user *buffer,
1370 size_t count, loff_t *ppos, int ctype)
1371 {
1372 char *cmd_buf, *ctype_string = (ctype == NPA_AQ_CTYPE_AURA) ?
1373 "aura" : "pool";
1374 struct seq_file *seqfp = filp->private_data;
1375 struct rvu *rvu = seqfp->private;
1376 int npalf, id = 0, ret;
1377 bool all = false;
1378
1379 if ((*ppos != 0) || !count)
1380 return -EINVAL;
1381
1382 cmd_buf = kzalloc(count + 1, GFP_KERNEL);
1383 if (!cmd_buf)
1384 return count;
1385 ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
1386 &npalf, &id, &all);
1387 if (ret < 0) {
1388 dev_info(rvu->dev,
1389 "Usage: echo <npalf> [%s number/all] > %s_ctx\n",
1390 ctype_string, ctype_string);
1391 goto done;
1392 } else {
1393 ret = write_npa_ctx(rvu, all, npalf, id, ctype);
1394 }
1395 done:
1396 kfree(cmd_buf);
1397 return ret ? ret : count;
1398 }
1399
rvu_dbg_npa_aura_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)1400 static ssize_t rvu_dbg_npa_aura_ctx_write(struct file *filp,
1401 const char __user *buffer,
1402 size_t count, loff_t *ppos)
1403 {
1404 return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
1405 NPA_AQ_CTYPE_AURA);
1406 }
1407
rvu_dbg_npa_aura_ctx_display(struct seq_file * filp,void * unused)1408 static int rvu_dbg_npa_aura_ctx_display(struct seq_file *filp, void *unused)
1409 {
1410 return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_AURA);
1411 }
1412
1413 RVU_DEBUG_SEQ_FOPS(npa_aura_ctx, npa_aura_ctx_display, npa_aura_ctx_write);
1414
rvu_dbg_npa_pool_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)1415 static ssize_t rvu_dbg_npa_pool_ctx_write(struct file *filp,
1416 const char __user *buffer,
1417 size_t count, loff_t *ppos)
1418 {
1419 return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
1420 NPA_AQ_CTYPE_POOL);
1421 }
1422
rvu_dbg_npa_pool_ctx_display(struct seq_file * filp,void * unused)1423 static int rvu_dbg_npa_pool_ctx_display(struct seq_file *filp, void *unused)
1424 {
1425 return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_POOL);
1426 }
1427
1428 RVU_DEBUG_SEQ_FOPS(npa_pool_ctx, npa_pool_ctx_display, npa_pool_ctx_write);
1429
ndc_cache_stats(struct seq_file * s,int blk_addr,int ctype,int transaction)1430 static void ndc_cache_stats(struct seq_file *s, int blk_addr,
1431 int ctype, int transaction)
1432 {
1433 u64 req, out_req, lat, cant_alloc;
1434 struct nix_hw *nix_hw;
1435 struct rvu *rvu;
1436 int port;
1437
1438 if (blk_addr == BLKADDR_NDC_NPA0) {
1439 rvu = s->private;
1440 } else {
1441 nix_hw = s->private;
1442 rvu = nix_hw->rvu;
1443 }
1444
1445 for (port = 0; port < NDC_MAX_PORT; port++) {
1446 req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC
1447 (port, ctype, transaction));
1448 lat = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_LAT_PC
1449 (port, ctype, transaction));
1450 out_req = rvu_read64(rvu, blk_addr,
1451 NDC_AF_PORTX_RTX_RWX_OSTDN_PC
1452 (port, ctype, transaction));
1453 cant_alloc = rvu_read64(rvu, blk_addr,
1454 NDC_AF_PORTX_RTX_CANT_ALLOC_PC
1455 (port, transaction));
1456 seq_printf(s, "\nPort:%d\n", port);
1457 seq_printf(s, "\tTotal Requests:\t\t%lld\n", req);
1458 seq_printf(s, "\tTotal Time Taken:\t%lld cycles\n", lat);
1459 seq_printf(s, "\tAvg Latency:\t\t%lld cycles\n", lat / req);
1460 seq_printf(s, "\tOutstanding Requests:\t%lld\n", out_req);
1461 seq_printf(s, "\tCant Alloc Requests:\t%lld\n", cant_alloc);
1462 }
1463 }
1464
ndc_blk_cache_stats(struct seq_file * s,int idx,int blk_addr)1465 static int ndc_blk_cache_stats(struct seq_file *s, int idx, int blk_addr)
1466 {
1467 seq_puts(s, "\n***** CACHE mode read stats *****\n");
1468 ndc_cache_stats(s, blk_addr, CACHING, NDC_READ_TRANS);
1469 seq_puts(s, "\n***** CACHE mode write stats *****\n");
1470 ndc_cache_stats(s, blk_addr, CACHING, NDC_WRITE_TRANS);
1471 seq_puts(s, "\n***** BY-PASS mode read stats *****\n");
1472 ndc_cache_stats(s, blk_addr, BYPASS, NDC_READ_TRANS);
1473 seq_puts(s, "\n***** BY-PASS mode write stats *****\n");
1474 ndc_cache_stats(s, blk_addr, BYPASS, NDC_WRITE_TRANS);
1475 return 0;
1476 }
1477
rvu_dbg_npa_ndc_cache_display(struct seq_file * filp,void * unused)1478 static int rvu_dbg_npa_ndc_cache_display(struct seq_file *filp, void *unused)
1479 {
1480 return ndc_blk_cache_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
1481 }
1482
1483 RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL);
1484
ndc_blk_hits_miss_stats(struct seq_file * s,int idx,int blk_addr)1485 static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
1486 {
1487 struct nix_hw *nix_hw;
1488 struct rvu *rvu;
1489 int bank, max_bank;
1490 u64 ndc_af_const;
1491
1492 if (blk_addr == BLKADDR_NDC_NPA0) {
1493 rvu = s->private;
1494 } else {
1495 nix_hw = s->private;
1496 rvu = nix_hw->rvu;
1497 }
1498
1499 ndc_af_const = rvu_read64(rvu, blk_addr, NDC_AF_CONST);
1500 max_bank = FIELD_GET(NDC_AF_BANK_MASK, ndc_af_const);
1501 for (bank = 0; bank < max_bank; bank++) {
1502 seq_printf(s, "BANK:%d\n", bank);
1503 seq_printf(s, "\tHits:\t%lld\n",
1504 (u64)rvu_read64(rvu, blk_addr,
1505 NDC_AF_BANKX_HIT_PC(bank)));
1506 seq_printf(s, "\tMiss:\t%lld\n",
1507 (u64)rvu_read64(rvu, blk_addr,
1508 NDC_AF_BANKX_MISS_PC(bank)));
1509 }
1510 return 0;
1511 }
1512
rvu_dbg_nix_ndc_rx_cache_display(struct seq_file * filp,void * unused)1513 static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused)
1514 {
1515 struct nix_hw *nix_hw = filp->private;
1516 int blkaddr = 0;
1517 int ndc_idx = 0;
1518
1519 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1520 BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1521 ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_RX : NIX0_RX);
1522
1523 return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
1524 }
1525
1526 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL);
1527
rvu_dbg_nix_ndc_tx_cache_display(struct seq_file * filp,void * unused)1528 static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused)
1529 {
1530 struct nix_hw *nix_hw = filp->private;
1531 int blkaddr = 0;
1532 int ndc_idx = 0;
1533
1534 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1535 BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1536 ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_TX : NIX0_TX);
1537
1538 return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
1539 }
1540
1541 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL);
1542
rvu_dbg_npa_ndc_hits_miss_display(struct seq_file * filp,void * unused)1543 static int rvu_dbg_npa_ndc_hits_miss_display(struct seq_file *filp,
1544 void *unused)
1545 {
1546 return ndc_blk_hits_miss_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
1547 }
1548
1549 RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL);
1550
rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file * filp,void * unused)1551 static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp,
1552 void *unused)
1553 {
1554 struct nix_hw *nix_hw = filp->private;
1555 int ndc_idx = NPA0_U;
1556 int blkaddr = 0;
1557
1558 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1559 BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1560
1561 return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1562 }
1563
1564 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL);
1565
rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file * filp,void * unused)1566 static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp,
1567 void *unused)
1568 {
1569 struct nix_hw *nix_hw = filp->private;
1570 int ndc_idx = NPA0_U;
1571 int blkaddr = 0;
1572
1573 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1574 BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1575
1576 return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1577 }
1578
1579 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL);
1580
print_nix_cn10k_sq_ctx(struct seq_file * m,struct nix_cn10k_sq_ctx_s * sq_ctx)1581 static void print_nix_cn10k_sq_ctx(struct seq_file *m,
1582 struct nix_cn10k_sq_ctx_s *sq_ctx)
1583 {
1584 seq_printf(m, "W0: ena \t\t\t%d\nW0: qint_idx \t\t\t%d\n",
1585 sq_ctx->ena, sq_ctx->qint_idx);
1586 seq_printf(m, "W0: substream \t\t\t0x%03x\nW0: sdp_mcast \t\t\t%d\n",
1587 sq_ctx->substream, sq_ctx->sdp_mcast);
1588 seq_printf(m, "W0: cq \t\t\t\t%d\nW0: sqe_way_mask \t\t%d\n\n",
1589 sq_ctx->cq, sq_ctx->sqe_way_mask);
1590
1591 seq_printf(m, "W1: smq \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: xoff\t\t\t%d\n",
1592 sq_ctx->smq, sq_ctx->cq_ena, sq_ctx->xoff);
1593 seq_printf(m, "W1: sso_ena \t\t\t%d\nW1: smq_rr_weight\t\t%d\n",
1594 sq_ctx->sso_ena, sq_ctx->smq_rr_weight);
1595 seq_printf(m, "W1: default_chan\t\t%d\nW1: sqb_count\t\t\t%d\n\n",
1596 sq_ctx->default_chan, sq_ctx->sqb_count);
1597
1598 seq_printf(m, "W2: smq_rr_count_lb \t\t%d\n", sq_ctx->smq_rr_count_lb);
1599 seq_printf(m, "W2: smq_rr_count_ub \t\t%d\n", sq_ctx->smq_rr_count_ub);
1600 seq_printf(m, "W2: sqb_aura \t\t\t%d\nW2: sq_int \t\t\t%d\n",
1601 sq_ctx->sqb_aura, sq_ctx->sq_int);
1602 seq_printf(m, "W2: sq_int_ena \t\t\t%d\nW2: sqe_stype \t\t\t%d\n",
1603 sq_ctx->sq_int_ena, sq_ctx->sqe_stype);
1604
1605 seq_printf(m, "W3: max_sqe_size\t\t%d\nW3: cq_limit\t\t\t%d\n",
1606 sq_ctx->max_sqe_size, sq_ctx->cq_limit);
1607 seq_printf(m, "W3: lmt_dis \t\t\t%d\nW3: mnq_dis \t\t\t%d\n",
1608 sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1609 seq_printf(m, "W3: smq_next_sq\t\t\t%d\nW3: smq_lso_segnum\t\t%d\n",
1610 sq_ctx->smq_next_sq, sq_ctx->smq_lso_segnum);
1611 seq_printf(m, "W3: tail_offset \t\t%d\nW3: smenq_offset\t\t%d\n",
1612 sq_ctx->tail_offset, sq_ctx->smenq_offset);
1613 seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n",
1614 sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld);
1615
1616 seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
1617 sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
1618 seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1619 seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1620 seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1621 seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1622 sq_ctx->smenq_next_sqb);
1623
1624 seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1625
1626 seq_printf(m, "W9: vfi_lso_total\t\t%d\n", sq_ctx->vfi_lso_total);
1627 seq_printf(m, "W9: vfi_lso_sizem1\t\t%d\nW9: vfi_lso_sb\t\t\t%d\n",
1628 sq_ctx->vfi_lso_sizem1, sq_ctx->vfi_lso_sb);
1629 seq_printf(m, "W9: vfi_lso_mps\t\t\t%d\nW9: vfi_lso_vlan0_ins_ena\t%d\n",
1630 sq_ctx->vfi_lso_mps, sq_ctx->vfi_lso_vlan0_ins_ena);
1631 seq_printf(m, "W9: vfi_lso_vlan1_ins_ena\t%d\nW9: vfi_lso_vld \t\t%d\n\n",
1632 sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1633
1634 seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1635 (u64)sq_ctx->scm_lso_rem);
1636 seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1637 seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1638 seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1639 (u64)sq_ctx->dropped_octs);
1640 seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1641 (u64)sq_ctx->dropped_pkts);
1642 }
1643
print_tm_tree(struct seq_file * m,struct nix_aq_enq_rsp * rsp,u64 sq)1644 static void print_tm_tree(struct seq_file *m,
1645 struct nix_aq_enq_rsp *rsp, u64 sq)
1646 {
1647 struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
1648 struct nix_hw *nix_hw = m->private;
1649 struct rvu *rvu = nix_hw->rvu;
1650 u16 p1, p2, p3, p4, schq;
1651 int blkaddr;
1652 u64 cfg;
1653
1654 blkaddr = nix_hw->blkaddr;
1655 schq = sq_ctx->smq;
1656
1657 cfg = rvu_read64(rvu, blkaddr, NIX_AF_MDQX_PARENT(schq));
1658 p1 = FIELD_GET(NIX_AF_MDQ_PARENT_MASK, cfg);
1659
1660 cfg = rvu_read64(rvu, blkaddr, NIX_AF_TL4X_PARENT(p1));
1661 p2 = FIELD_GET(NIX_AF_TL4_PARENT_MASK, cfg);
1662
1663 cfg = rvu_read64(rvu, blkaddr, NIX_AF_TL3X_PARENT(p2));
1664 p3 = FIELD_GET(NIX_AF_TL3_PARENT_MASK, cfg);
1665
1666 cfg = rvu_read64(rvu, blkaddr, NIX_AF_TL2X_PARENT(p3));
1667 p4 = FIELD_GET(NIX_AF_TL2_PARENT_MASK, cfg);
1668 seq_printf(m,
1669 "SQ(%llu) -> SMQ(%u) -> TL4(%u) -> TL3(%u) -> TL2(%u) -> TL1(%u)\n",
1670 sq, schq, p1, p2, p3, p4);
1671 }
1672
1673 /*dumps given tm_tree registers*/
rvu_dbg_nix_tm_tree_display(struct seq_file * m,void * unused)1674 static int rvu_dbg_nix_tm_tree_display(struct seq_file *m, void *unused)
1675 {
1676 int qidx, nixlf, rc, id, max_id = 0;
1677 struct nix_hw *nix_hw = m->private;
1678 struct rvu *rvu = nix_hw->rvu;
1679 struct nix_aq_enq_req aq_req;
1680 struct nix_aq_enq_rsp rsp;
1681 struct rvu_pfvf *pfvf;
1682 u16 pcifunc;
1683
1684 nixlf = rvu->rvu_dbg.nix_tm_ctx.lf;
1685 id = rvu->rvu_dbg.nix_tm_ctx.id;
1686
1687 if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1688 return -EINVAL;
1689
1690 pfvf = rvu_get_pfvf(rvu, pcifunc);
1691 max_id = pfvf->sq_ctx->qsize;
1692
1693 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1694 aq_req.hdr.pcifunc = pcifunc;
1695 aq_req.ctype = NIX_AQ_CTYPE_SQ;
1696 aq_req.op = NIX_AQ_INSTOP_READ;
1697 seq_printf(m, "pcifunc is 0x%x\n", pcifunc);
1698 for (qidx = id; qidx < max_id; qidx++) {
1699 aq_req.qidx = qidx;
1700
1701 /* Skip SQ's if not initialized */
1702 if (!test_bit(qidx, pfvf->sq_bmap))
1703 continue;
1704
1705 rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
1706
1707 if (rc) {
1708 seq_printf(m, "Failed to read SQ(%d) context\n",
1709 aq_req.qidx);
1710 continue;
1711 }
1712 print_tm_tree(m, &rsp, aq_req.qidx);
1713 }
1714 return 0;
1715 }
1716
rvu_dbg_nix_tm_tree_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)1717 static ssize_t rvu_dbg_nix_tm_tree_write(struct file *filp,
1718 const char __user *buffer,
1719 size_t count, loff_t *ppos)
1720 {
1721 struct seq_file *m = filp->private_data;
1722 struct nix_hw *nix_hw = m->private;
1723 struct rvu *rvu = nix_hw->rvu;
1724 struct rvu_pfvf *pfvf;
1725 u16 pcifunc;
1726 u64 nixlf;
1727 int ret;
1728
1729 ret = kstrtoull_from_user(buffer, count, 10, &nixlf);
1730 if (ret)
1731 return ret;
1732
1733 if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1734 return -EINVAL;
1735
1736 pfvf = rvu_get_pfvf(rvu, pcifunc);
1737 if (!pfvf->sq_ctx) {
1738 dev_warn(rvu->dev, "SQ context is not initialized\n");
1739 return -EINVAL;
1740 }
1741
1742 rvu->rvu_dbg.nix_tm_ctx.lf = nixlf;
1743 return count;
1744 }
1745
1746 RVU_DEBUG_SEQ_FOPS(nix_tm_tree, nix_tm_tree_display, nix_tm_tree_write);
1747
print_tm_topo(struct seq_file * m,u64 schq,u32 lvl)1748 static void print_tm_topo(struct seq_file *m, u64 schq, u32 lvl)
1749 {
1750 struct nix_hw *nix_hw = m->private;
1751 struct rvu *rvu = nix_hw->rvu;
1752 int blkaddr, link, link_level;
1753 struct rvu_hwinfo *hw;
1754
1755 hw = rvu->hw;
1756 blkaddr = nix_hw->blkaddr;
1757 if (lvl == NIX_TXSCH_LVL_MDQ) {
1758 seq_printf(m, "NIX_AF_SMQ[%llu]_CFG =0x%llx\n", schq,
1759 rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq)));
1760 seq_printf(m, "NIX_AF_SMQ[%llu]_STATUS =0x%llx\n", schq,
1761 rvu_read64(rvu, blkaddr, NIX_AF_SMQX_STATUS(schq)));
1762 seq_printf(m, "NIX_AF_MDQ[%llu]_OUT_MD_COUNT =0x%llx\n", schq,
1763 rvu_read64(rvu, blkaddr,
1764 NIX_AF_MDQX_OUT_MD_COUNT(schq)));
1765 seq_printf(m, "NIX_AF_MDQ[%llu]_SCHEDULE =0x%llx\n", schq,
1766 rvu_read64(rvu, blkaddr,
1767 NIX_AF_MDQX_SCHEDULE(schq)));
1768 seq_printf(m, "NIX_AF_MDQ[%llu]_SHAPE =0x%llx\n", schq,
1769 rvu_read64(rvu, blkaddr, NIX_AF_MDQX_SHAPE(schq)));
1770 seq_printf(m, "NIX_AF_MDQ[%llu]_CIR =0x%llx\n", schq,
1771 rvu_read64(rvu, blkaddr, NIX_AF_MDQX_CIR(schq)));
1772 seq_printf(m, "NIX_AF_MDQ[%llu]_PIR =0x%llx\n", schq,
1773 rvu_read64(rvu, blkaddr, NIX_AF_MDQX_PIR(schq)));
1774 seq_printf(m, "NIX_AF_MDQ[%llu]_SW_XOFF =0x%llx\n", schq,
1775 rvu_read64(rvu, blkaddr, NIX_AF_MDQX_SW_XOFF(schq)));
1776 seq_printf(m, "NIX_AF_MDQ[%llu]_PARENT =0x%llx\n", schq,
1777 rvu_read64(rvu, blkaddr, NIX_AF_MDQX_PARENT(schq)));
1778 seq_puts(m, "\n");
1779 }
1780
1781 if (lvl == NIX_TXSCH_LVL_TL4) {
1782 seq_printf(m, "NIX_AF_TL4[%llu]_SDP_LINK_CFG =0x%llx\n", schq,
1783 rvu_read64(rvu, blkaddr,
1784 NIX_AF_TL4X_SDP_LINK_CFG(schq)));
1785 seq_printf(m, "NIX_AF_TL4[%llu]_SCHEDULE =0x%llx\n", schq,
1786 rvu_read64(rvu, blkaddr,
1787 NIX_AF_TL4X_SCHEDULE(schq)));
1788 seq_printf(m, "NIX_AF_TL4[%llu]_SHAPE =0x%llx\n", schq,
1789 rvu_read64(rvu, blkaddr, NIX_AF_TL4X_SHAPE(schq)));
1790 seq_printf(m, "NIX_AF_TL4[%llu]_CIR =0x%llx\n", schq,
1791 rvu_read64(rvu, blkaddr, NIX_AF_TL4X_CIR(schq)));
1792 seq_printf(m, "NIX_AF_TL4[%llu]_PIR =0x%llx\n", schq,
1793 rvu_read64(rvu, blkaddr, NIX_AF_TL4X_PIR(schq)));
1794 seq_printf(m, "NIX_AF_TL4[%llu]_SW_XOFF =0x%llx\n", schq,
1795 rvu_read64(rvu, blkaddr, NIX_AF_TL4X_SW_XOFF(schq)));
1796 seq_printf(m, "NIX_AF_TL4[%llu]_TOPOLOGY =0x%llx\n", schq,
1797 rvu_read64(rvu, blkaddr,
1798 NIX_AF_TL4X_TOPOLOGY(schq)));
1799 seq_printf(m, "NIX_AF_TL4[%llu]_PARENT =0x%llx\n", schq,
1800 rvu_read64(rvu, blkaddr, NIX_AF_TL4X_PARENT(schq)));
1801 seq_printf(m, "NIX_AF_TL4[%llu]_MD_DEBUG0 =0x%llx\n", schq,
1802 rvu_read64(rvu, blkaddr,
1803 NIX_AF_TL4X_MD_DEBUG0(schq)));
1804 seq_printf(m, "NIX_AF_TL4[%llu]_MD_DEBUG1 =0x%llx\n", schq,
1805 rvu_read64(rvu, blkaddr,
1806 NIX_AF_TL4X_MD_DEBUG1(schq)));
1807 seq_puts(m, "\n");
1808 }
1809
1810 if (lvl == NIX_TXSCH_LVL_TL3) {
1811 seq_printf(m, "NIX_AF_TL3[%llu]_SCHEDULE =0x%llx\n", schq,
1812 rvu_read64(rvu, blkaddr,
1813 NIX_AF_TL3X_SCHEDULE(schq)));
1814 seq_printf(m, "NIX_AF_TL3[%llu]_SHAPE =0x%llx\n", schq,
1815 rvu_read64(rvu, blkaddr, NIX_AF_TL3X_SHAPE(schq)));
1816 seq_printf(m, "NIX_AF_TL3[%llu]_CIR =0x%llx\n", schq,
1817 rvu_read64(rvu, blkaddr, NIX_AF_TL3X_CIR(schq)));
1818 seq_printf(m, "NIX_AF_TL3[%llu]_PIR =0x%llx\n", schq,
1819 rvu_read64(rvu, blkaddr, NIX_AF_TL3X_PIR(schq)));
1820 seq_printf(m, "NIX_AF_TL3[%llu]_SW_XOFF =0x%llx\n", schq,
1821 rvu_read64(rvu, blkaddr, NIX_AF_TL3X_SW_XOFF(schq)));
1822 seq_printf(m, "NIX_AF_TL3[%llu]_TOPOLOGY =0x%llx\n", schq,
1823 rvu_read64(rvu, blkaddr,
1824 NIX_AF_TL3X_TOPOLOGY(schq)));
1825 seq_printf(m, "NIX_AF_TL3[%llu]_PARENT =0x%llx\n", schq,
1826 rvu_read64(rvu, blkaddr, NIX_AF_TL3X_PARENT(schq)));
1827 seq_printf(m, "NIX_AF_TL3[%llu]_MD_DEBUG0 =0x%llx\n", schq,
1828 rvu_read64(rvu, blkaddr,
1829 NIX_AF_TL3X_MD_DEBUG0(schq)));
1830 seq_printf(m, "NIX_AF_TL3[%llu]_MD_DEBUG1 =0x%llx\n", schq,
1831 rvu_read64(rvu, blkaddr,
1832 NIX_AF_TL3X_MD_DEBUG1(schq)));
1833
1834 link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL)
1835 & 0x01 ? NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
1836 if (lvl == link_level) {
1837 seq_printf(m,
1838 "NIX_AF_TL3_TL2[%llu]_BP_STATUS =0x%llx\n",
1839 schq, rvu_read64(rvu, blkaddr,
1840 NIX_AF_TL3_TL2X_BP_STATUS(schq)));
1841 for (link = 0; link < hw->cgx_links; link++)
1842 seq_printf(m,
1843 "NIX_AF_TL3_TL2[%llu]_LINK[%d]_CFG =0x%llx\n",
1844 schq, link,
1845 rvu_read64(rvu, blkaddr,
1846 NIX_AF_TL3_TL2X_LINKX_CFG(schq, link)));
1847 }
1848 seq_puts(m, "\n");
1849 }
1850
1851 if (lvl == NIX_TXSCH_LVL_TL2) {
1852 seq_printf(m, "NIX_AF_TL2[%llu]_SHAPE =0x%llx\n", schq,
1853 rvu_read64(rvu, blkaddr, NIX_AF_TL2X_SHAPE(schq)));
1854 seq_printf(m, "NIX_AF_TL2[%llu]_CIR =0x%llx\n", schq,
1855 rvu_read64(rvu, blkaddr, NIX_AF_TL2X_CIR(schq)));
1856 seq_printf(m, "NIX_AF_TL2[%llu]_PIR =0x%llx\n", schq,
1857 rvu_read64(rvu, blkaddr, NIX_AF_TL2X_PIR(schq)));
1858 seq_printf(m, "NIX_AF_TL2[%llu]_SW_XOFF =0x%llx\n", schq,
1859 rvu_read64(rvu, blkaddr, NIX_AF_TL2X_SW_XOFF(schq)));
1860 seq_printf(m, "NIX_AF_TL2[%llu]_TOPOLOGY =0x%llx\n", schq,
1861 rvu_read64(rvu, blkaddr,
1862 NIX_AF_TL2X_TOPOLOGY(schq)));
1863 seq_printf(m, "NIX_AF_TL2[%llu]_PARENT =0x%llx\n", schq,
1864 rvu_read64(rvu, blkaddr, NIX_AF_TL2X_PARENT(schq)));
1865 seq_printf(m, "NIX_AF_TL2[%llu]_MD_DEBUG0 =0x%llx\n", schq,
1866 rvu_read64(rvu, blkaddr,
1867 NIX_AF_TL2X_MD_DEBUG0(schq)));
1868 seq_printf(m, "NIX_AF_TL2[%llu]_MD_DEBUG1 =0x%llx\n", schq,
1869 rvu_read64(rvu, blkaddr,
1870 NIX_AF_TL2X_MD_DEBUG1(schq)));
1871
1872 link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL)
1873 & 0x01 ? NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
1874 if (lvl == link_level) {
1875 seq_printf(m,
1876 "NIX_AF_TL3_TL2[%llu]_BP_STATUS =0x%llx\n",
1877 schq, rvu_read64(rvu, blkaddr,
1878 NIX_AF_TL3_TL2X_BP_STATUS(schq)));
1879 for (link = 0; link < hw->cgx_links; link++)
1880 seq_printf(m,
1881 "NIX_AF_TL3_TL2[%llu]_LINK[%d]_CFG =0x%llx\n",
1882 schq, link, rvu_read64(rvu, blkaddr,
1883 NIX_AF_TL3_TL2X_LINKX_CFG(schq, link)));
1884 }
1885 seq_puts(m, "\n");
1886 }
1887
1888 if (lvl == NIX_TXSCH_LVL_TL1) {
1889 seq_printf(m, "NIX_AF_TX_LINK[%llu]_NORM_CREDIT =0x%llx\n",
1890 schq,
1891 rvu_read64(rvu, blkaddr,
1892 NIX_AF_TX_LINKX_NORM_CREDIT(schq)));
1893 seq_printf(m, "NIX_AF_TX_LINK[%llu]_HW_XOFF =0x%llx\n", schq,
1894 rvu_read64(rvu, blkaddr,
1895 NIX_AF_TX_LINKX_HW_XOFF(schq)));
1896 seq_printf(m, "NIX_AF_TL1[%llu]_SCHEDULE =0x%llx\n", schq,
1897 rvu_read64(rvu, blkaddr,
1898 NIX_AF_TL1X_SCHEDULE(schq)));
1899 seq_printf(m, "NIX_AF_TL1[%llu]_SHAPE =0x%llx\n", schq,
1900 rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SHAPE(schq)));
1901 seq_printf(m, "NIX_AF_TL1[%llu]_CIR =0x%llx\n", schq,
1902 rvu_read64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq)));
1903 seq_printf(m, "NIX_AF_TL1[%llu]_SW_XOFF =0x%llx\n", schq,
1904 rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq)));
1905 seq_printf(m, "NIX_AF_TL1[%llu]_TOPOLOGY =0x%llx\n", schq,
1906 rvu_read64(rvu, blkaddr,
1907 NIX_AF_TL1X_TOPOLOGY(schq)));
1908 seq_printf(m, "NIX_AF_TL1[%llu]_MD_DEBUG0 =0x%llx\n", schq,
1909 rvu_read64(rvu, blkaddr,
1910 NIX_AF_TL1X_MD_DEBUG0(schq)));
1911 seq_printf(m, "NIX_AF_TL1[%llu]_MD_DEBUG1 =0x%llx\n", schq,
1912 rvu_read64(rvu, blkaddr,
1913 NIX_AF_TL1X_MD_DEBUG1(schq)));
1914 seq_printf(m, "NIX_AF_TL1[%llu]_DROPPED_PACKETS =0x%llx\n",
1915 schq,
1916 rvu_read64(rvu, blkaddr,
1917 NIX_AF_TL1X_DROPPED_PACKETS(schq)));
1918 seq_printf(m, "NIX_AF_TL1[%llu]_DROPPED_BYTES =0x%llx\n", schq,
1919 rvu_read64(rvu, blkaddr,
1920 NIX_AF_TL1X_DROPPED_BYTES(schq)));
1921 seq_printf(m, "NIX_AF_TL1[%llu]_RED_PACKETS =0x%llx\n", schq,
1922 rvu_read64(rvu, blkaddr,
1923 NIX_AF_TL1X_RED_PACKETS(schq)));
1924 seq_printf(m, "NIX_AF_TL1[%llu]_RED_BYTES =0x%llx\n", schq,
1925 rvu_read64(rvu, blkaddr,
1926 NIX_AF_TL1X_RED_BYTES(schq)));
1927 seq_printf(m, "NIX_AF_TL1[%llu]_YELLOW_PACKETS =0x%llx\n", schq,
1928 rvu_read64(rvu, blkaddr,
1929 NIX_AF_TL1X_YELLOW_PACKETS(schq)));
1930 seq_printf(m, "NIX_AF_TL1[%llu]_YELLOW_BYTES =0x%llx\n", schq,
1931 rvu_read64(rvu, blkaddr,
1932 NIX_AF_TL1X_YELLOW_BYTES(schq)));
1933 seq_printf(m, "NIX_AF_TL1[%llu]_GREEN_PACKETS =0x%llx\n", schq,
1934 rvu_read64(rvu, blkaddr,
1935 NIX_AF_TL1X_GREEN_PACKETS(schq)));
1936 seq_printf(m, "NIX_AF_TL1[%llu]_GREEN_BYTES =0x%llx\n", schq,
1937 rvu_read64(rvu, blkaddr,
1938 NIX_AF_TL1X_GREEN_BYTES(schq)));
1939 seq_puts(m, "\n");
1940 }
1941 }
1942
1943 /*dumps given tm_topo registers*/
rvu_dbg_nix_tm_topo_display(struct seq_file * m,void * unused)1944 static int rvu_dbg_nix_tm_topo_display(struct seq_file *m, void *unused)
1945 {
1946 struct nix_hw *nix_hw = m->private;
1947 struct rvu *rvu = nix_hw->rvu;
1948 struct nix_aq_enq_req aq_req;
1949 struct nix_txsch *txsch;
1950 int nixlf, lvl, schq;
1951 u16 pcifunc;
1952
1953 nixlf = rvu->rvu_dbg.nix_tm_ctx.lf;
1954
1955 if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1956 return -EINVAL;
1957
1958 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1959 aq_req.hdr.pcifunc = pcifunc;
1960 aq_req.ctype = NIX_AQ_CTYPE_SQ;
1961 aq_req.op = NIX_AQ_INSTOP_READ;
1962 seq_printf(m, "pcifunc is 0x%x\n", pcifunc);
1963
1964 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1965 txsch = &nix_hw->txsch[lvl];
1966 for (schq = 0; schq < txsch->schq.max; schq++) {
1967 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) == pcifunc)
1968 print_tm_topo(m, schq, lvl);
1969 }
1970 }
1971 return 0;
1972 }
1973
rvu_dbg_nix_tm_topo_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)1974 static ssize_t rvu_dbg_nix_tm_topo_write(struct file *filp,
1975 const char __user *buffer,
1976 size_t count, loff_t *ppos)
1977 {
1978 struct seq_file *m = filp->private_data;
1979 struct nix_hw *nix_hw = m->private;
1980 struct rvu *rvu = nix_hw->rvu;
1981 struct rvu_pfvf *pfvf;
1982 u16 pcifunc;
1983 u64 nixlf;
1984 int ret;
1985
1986 ret = kstrtoull_from_user(buffer, count, 10, &nixlf);
1987 if (ret)
1988 return ret;
1989
1990 if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1991 return -EINVAL;
1992
1993 pfvf = rvu_get_pfvf(rvu, pcifunc);
1994 if (!pfvf->sq_ctx) {
1995 dev_warn(rvu->dev, "SQ context is not initialized\n");
1996 return -EINVAL;
1997 }
1998
1999 rvu->rvu_dbg.nix_tm_ctx.lf = nixlf;
2000 return count;
2001 }
2002
2003 RVU_DEBUG_SEQ_FOPS(nix_tm_topo, nix_tm_topo_display, nix_tm_topo_write);
2004
2005 /* Dumps given nix_sq's context */
print_nix_sq_ctx(struct seq_file * m,struct nix_aq_enq_rsp * rsp)2006 static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
2007 {
2008 struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
2009 struct nix_hw *nix_hw = m->private;
2010 struct rvu *rvu = nix_hw->rvu;
2011
2012 if (!is_rvu_otx2(rvu)) {
2013 print_nix_cn10k_sq_ctx(m, (struct nix_cn10k_sq_ctx_s *)sq_ctx);
2014 return;
2015 }
2016 seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n",
2017 sq_ctx->sqe_way_mask, sq_ctx->cq);
2018 seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
2019 sq_ctx->sdp_mcast, sq_ctx->substream);
2020 seq_printf(m, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n\n",
2021 sq_ctx->qint_idx, sq_ctx->ena);
2022
2023 seq_printf(m, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d\n",
2024 sq_ctx->sqb_count, sq_ctx->default_chan);
2025 seq_printf(m, "W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d\n",
2026 sq_ctx->smq_rr_quantum, sq_ctx->sso_ena);
2027 seq_printf(m, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n\n",
2028 sq_ctx->xoff, sq_ctx->cq_ena, sq_ctx->smq);
2029
2030 seq_printf(m, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d\n",
2031 sq_ctx->sqe_stype, sq_ctx->sq_int_ena);
2032 seq_printf(m, "W2: sq_int \t\t\t%d\nW2: sqb_aura \t\t\t%d\n",
2033 sq_ctx->sq_int, sq_ctx->sqb_aura);
2034 seq_printf(m, "W2: smq_rr_count \t\t%d\n\n", sq_ctx->smq_rr_count);
2035
2036 seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
2037 sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
2038 seq_printf(m, "W3: smenq_next_sqb_vld \t\t%d\nW3: head_offset\t\t\t%d\n",
2039 sq_ctx->smenq_next_sqb_vld, sq_ctx->head_offset);
2040 seq_printf(m, "W3: smenq_offset\t\t%d\nW3: tail_offset\t\t\t%d\n",
2041 sq_ctx->smenq_offset, sq_ctx->tail_offset);
2042 seq_printf(m, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq\t\t\t%d\n",
2043 sq_ctx->smq_lso_segnum, sq_ctx->smq_next_sq);
2044 seq_printf(m, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d\n",
2045 sq_ctx->mnq_dis, sq_ctx->lmt_dis);
2046 seq_printf(m, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n\n",
2047 sq_ctx->cq_limit, sq_ctx->max_sqe_size);
2048
2049 seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
2050 seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
2051 seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
2052 seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
2053 sq_ctx->smenq_next_sqb);
2054
2055 seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
2056
2057 seq_printf(m, "W9: vfi_lso_vld\t\t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d\n",
2058 sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
2059 seq_printf(m, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d\n",
2060 sq_ctx->vfi_lso_vlan0_ins_ena, sq_ctx->vfi_lso_mps);
2061 seq_printf(m, "W9: vfi_lso_sb\t\t\t%d\nW9: vfi_lso_sizem1\t\t%d\n",
2062 sq_ctx->vfi_lso_sb, sq_ctx->vfi_lso_sizem1);
2063 seq_printf(m, "W9: vfi_lso_total\t\t%d\n\n", sq_ctx->vfi_lso_total);
2064
2065 seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
2066 (u64)sq_ctx->scm_lso_rem);
2067 seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
2068 seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
2069 seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
2070 (u64)sq_ctx->dropped_octs);
2071 seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
2072 (u64)sq_ctx->dropped_pkts);
2073 }
2074
print_nix_cn10k_rq_ctx(struct seq_file * m,struct nix_cn10k_rq_ctx_s * rq_ctx)2075 static void print_nix_cn10k_rq_ctx(struct seq_file *m,
2076 struct nix_cn10k_rq_ctx_s *rq_ctx)
2077 {
2078 seq_printf(m, "W0: ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
2079 rq_ctx->ena, rq_ctx->sso_ena);
2080 seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
2081 rq_ctx->ipsech_ena, rq_ctx->ena_wqwd);
2082 seq_printf(m, "W0: cq \t\t\t\t%d\nW0: lenerr_dis \t\t\t%d\n",
2083 rq_ctx->cq, rq_ctx->lenerr_dis);
2084 seq_printf(m, "W0: csum_il4_dis \t\t%d\nW0: csum_ol4_dis \t\t%d\n",
2085 rq_ctx->csum_il4_dis, rq_ctx->csum_ol4_dis);
2086 seq_printf(m, "W0: len_il4_dis \t\t%d\nW0: len_il3_dis \t\t%d\n",
2087 rq_ctx->len_il4_dis, rq_ctx->len_il3_dis);
2088 seq_printf(m, "W0: len_ol4_dis \t\t%d\nW0: len_ol3_dis \t\t%d\n",
2089 rq_ctx->len_ol4_dis, rq_ctx->len_ol3_dis);
2090 seq_printf(m, "W0: wqe_aura \t\t\t%d\n\n", rq_ctx->wqe_aura);
2091
2092 seq_printf(m, "W1: spb_aura \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
2093 rq_ctx->spb_aura, rq_ctx->lpb_aura);
2094 seq_printf(m, "W1: spb_aura \t\t\t%d\n", rq_ctx->spb_aura);
2095 seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
2096 rq_ctx->sso_grp, rq_ctx->sso_tt);
2097 seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: wqe_caching \t\t%d\n",
2098 rq_ctx->pb_caching, rq_ctx->wqe_caching);
2099 seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
2100 rq_ctx->xqe_drop_ena, rq_ctx->spb_drop_ena);
2101 seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: pb_stashing \t\t%d\n",
2102 rq_ctx->lpb_drop_ena, rq_ctx->pb_stashing);
2103 seq_printf(m, "W1: ipsecd_drop_ena \t\t%d\nW1: chi_ena \t\t\t%d\n\n",
2104 rq_ctx->ipsecd_drop_ena, rq_ctx->chi_ena);
2105
2106 seq_printf(m, "W2: band_prof_id \t\t%d\n", rq_ctx->band_prof_id);
2107 seq_printf(m, "W2: policer_ena \t\t%d\n", rq_ctx->policer_ena);
2108 seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n", rq_ctx->spb_sizem1);
2109 seq_printf(m, "W2: wqe_skip \t\t\t%d\nW2: sqb_ena \t\t\t%d\n",
2110 rq_ctx->wqe_skip, rq_ctx->spb_ena);
2111 seq_printf(m, "W2: lpb_size1 \t\t\t%d\nW2: first_skip \t\t\t%d\n",
2112 rq_ctx->lpb_sizem1, rq_ctx->first_skip);
2113 seq_printf(m, "W2: later_skip\t\t\t%d\nW2: xqe_imm_size\t\t%d\n",
2114 rq_ctx->later_skip, rq_ctx->xqe_imm_size);
2115 seq_printf(m, "W2: xqe_imm_copy \t\t%d\nW2: xqe_hdr_split \t\t%d\n\n",
2116 rq_ctx->xqe_imm_copy, rq_ctx->xqe_hdr_split);
2117
2118 seq_printf(m, "W3: xqe_drop \t\t\t%d\nW3: xqe_pass \t\t\t%d\n",
2119 rq_ctx->xqe_drop, rq_ctx->xqe_pass);
2120 seq_printf(m, "W3: wqe_pool_drop \t\t%d\nW3: wqe_pool_pass \t\t%d\n",
2121 rq_ctx->wqe_pool_drop, rq_ctx->wqe_pool_pass);
2122 seq_printf(m, "W3: spb_pool_drop \t\t%d\nW3: spb_pool_pass \t\t%d\n",
2123 rq_ctx->spb_pool_drop, rq_ctx->spb_pool_pass);
2124 seq_printf(m, "W3: spb_aura_drop \t\t%d\nW3: spb_aura_pass \t\t%d\n\n",
2125 rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
2126
2127 seq_printf(m, "W4: lpb_aura_drop \t\t%d\nW3: lpb_aura_pass \t\t%d\n",
2128 rq_ctx->lpb_aura_pass, rq_ctx->lpb_aura_drop);
2129 seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW3: lpb_pool_pass \t\t%d\n",
2130 rq_ctx->lpb_pool_drop, rq_ctx->lpb_pool_pass);
2131 seq_printf(m, "W4: rq_int \t\t\t%d\nW4: rq_int_ena\t\t\t%d\n",
2132 rq_ctx->rq_int, rq_ctx->rq_int_ena);
2133 seq_printf(m, "W4: qint_idx \t\t\t%d\n\n", rq_ctx->qint_idx);
2134
2135 seq_printf(m, "W5: ltag \t\t\t%d\nW5: good_utag \t\t\t%d\n",
2136 rq_ctx->ltag, rq_ctx->good_utag);
2137 seq_printf(m, "W5: bad_utag \t\t\t%d\nW5: flow_tagw \t\t\t%d\n",
2138 rq_ctx->bad_utag, rq_ctx->flow_tagw);
2139 seq_printf(m, "W5: ipsec_vwqe \t\t\t%d\nW5: vwqe_ena \t\t\t%d\n",
2140 rq_ctx->ipsec_vwqe, rq_ctx->vwqe_ena);
2141 seq_printf(m, "W5: vwqe_wait \t\t\t%d\nW5: max_vsize_exp\t\t%d\n",
2142 rq_ctx->vwqe_wait, rq_ctx->max_vsize_exp);
2143 seq_printf(m, "W5: vwqe_skip \t\t\t%d\n\n", rq_ctx->vwqe_skip);
2144
2145 seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
2146 seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
2147 seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
2148 seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
2149 seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
2150 }
2151
2152 /* Dumps given nix_rq's context */
print_nix_rq_ctx(struct seq_file * m,struct nix_aq_enq_rsp * rsp)2153 static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
2154 {
2155 struct nix_rq_ctx_s *rq_ctx = &rsp->rq;
2156 struct nix_hw *nix_hw = m->private;
2157 struct rvu *rvu = nix_hw->rvu;
2158
2159 if (!is_rvu_otx2(rvu)) {
2160 print_nix_cn10k_rq_ctx(m, (struct nix_cn10k_rq_ctx_s *)rq_ctx);
2161 return;
2162 }
2163
2164 seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
2165 rq_ctx->wqe_aura, rq_ctx->substream);
2166 seq_printf(m, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
2167 rq_ctx->cq, rq_ctx->ena_wqwd);
2168 seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
2169 rq_ctx->ipsech_ena, rq_ctx->sso_ena);
2170 seq_printf(m, "W0: ena \t\t\t%d\n\n", rq_ctx->ena);
2171
2172 seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
2173 rq_ctx->lpb_drop_ena, rq_ctx->spb_drop_ena);
2174 seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d\n",
2175 rq_ctx->xqe_drop_ena, rq_ctx->wqe_caching);
2176 seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
2177 rq_ctx->pb_caching, rq_ctx->sso_tt);
2178 seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
2179 rq_ctx->sso_grp, rq_ctx->lpb_aura);
2180 seq_printf(m, "W1: spb_aura \t\t\t%d\n\n", rq_ctx->spb_aura);
2181
2182 seq_printf(m, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d\n",
2183 rq_ctx->xqe_hdr_split, rq_ctx->xqe_imm_copy);
2184 seq_printf(m, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d\n",
2185 rq_ctx->xqe_imm_size, rq_ctx->later_skip);
2186 seq_printf(m, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d\n",
2187 rq_ctx->first_skip, rq_ctx->lpb_sizem1);
2188 seq_printf(m, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d\n",
2189 rq_ctx->spb_ena, rq_ctx->wqe_skip);
2190 seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n\n", rq_ctx->spb_sizem1);
2191
2192 seq_printf(m, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d\n",
2193 rq_ctx->spb_pool_pass, rq_ctx->spb_pool_drop);
2194 seq_printf(m, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d\n",
2195 rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
2196 seq_printf(m, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d\n",
2197 rq_ctx->wqe_pool_pass, rq_ctx->wqe_pool_drop);
2198 seq_printf(m, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n\n",
2199 rq_ctx->xqe_pass, rq_ctx->xqe_drop);
2200
2201 seq_printf(m, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d\n",
2202 rq_ctx->qint_idx, rq_ctx->rq_int_ena);
2203 seq_printf(m, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d\n",
2204 rq_ctx->rq_int, rq_ctx->lpb_pool_pass);
2205 seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d\n",
2206 rq_ctx->lpb_pool_drop, rq_ctx->lpb_aura_pass);
2207 seq_printf(m, "W4: lpb_aura_drop \t\t%d\n\n", rq_ctx->lpb_aura_drop);
2208
2209 seq_printf(m, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d\n",
2210 rq_ctx->flow_tagw, rq_ctx->bad_utag);
2211 seq_printf(m, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n\n",
2212 rq_ctx->good_utag, rq_ctx->ltag);
2213
2214 seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
2215 seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
2216 seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
2217 seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
2218 seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
2219 }
2220
2221 /* Dumps given nix_cq's context */
print_nix_cq_ctx(struct seq_file * m,struct nix_aq_enq_rsp * rsp)2222 static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
2223 {
2224 struct nix_cq_ctx_s *cq_ctx = &rsp->cq;
2225 struct nix_hw *nix_hw = m->private;
2226 struct rvu *rvu = nix_hw->rvu;
2227
2228 seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base);
2229
2230 seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr);
2231 seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n",
2232 cq_ctx->avg_con, cq_ctx->cint_idx);
2233 seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n",
2234 cq_ctx->cq_err, cq_ctx->qint_idx);
2235 seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n",
2236 cq_ctx->bpid, cq_ctx->bp_ena);
2237
2238 if (!is_rvu_otx2(rvu)) {
2239 seq_printf(m, "W1: lbpid_high \t\t\t0x%03x\n", cq_ctx->lbpid_high);
2240 seq_printf(m, "W1: lbpid_med \t\t\t0x%03x\n", cq_ctx->lbpid_med);
2241 seq_printf(m, "W1: lbpid_low \t\t\t0x%03x\n", cq_ctx->lbpid_low);
2242 seq_printf(m, "(W1: lbpid) \t\t\t0x%03x\n",
2243 cq_ctx->lbpid_high << 6 | cq_ctx->lbpid_med << 3 |
2244 cq_ctx->lbpid_low);
2245 seq_printf(m, "W1: lbp_ena \t\t\t\t%d\n\n", cq_ctx->lbp_ena);
2246 }
2247
2248 seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n",
2249 cq_ctx->update_time, cq_ctx->avg_level);
2250 seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n",
2251 cq_ctx->head, cq_ctx->tail);
2252
2253 seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n",
2254 cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int);
2255 seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n",
2256 cq_ctx->qsize, cq_ctx->caching);
2257 seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n",
2258 cq_ctx->substream, cq_ctx->ena);
2259 if (!is_rvu_otx2(rvu)) {
2260 seq_printf(m, "W3: lbp_frac \t\t\t%d\n", cq_ctx->lbp_frac);
2261 seq_printf(m, "W3: cpt_drop_err_en \t\t\t%d\n",
2262 cq_ctx->cpt_drop_err_en);
2263 }
2264 seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n",
2265 cq_ctx->drop_ena, cq_ctx->drop);
2266 seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp);
2267 }
2268
rvu_dbg_nix_queue_ctx_display(struct seq_file * filp,void * unused,int ctype)2269 static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
2270 void *unused, int ctype)
2271 {
2272 void (*print_nix_ctx)(struct seq_file *filp,
2273 struct nix_aq_enq_rsp *rsp) = NULL;
2274 struct nix_hw *nix_hw = filp->private;
2275 struct rvu *rvu = nix_hw->rvu;
2276 struct nix_aq_enq_req aq_req;
2277 struct nix_aq_enq_rsp rsp;
2278 char *ctype_string = NULL;
2279 int qidx, rc, max_id = 0;
2280 struct rvu_pfvf *pfvf;
2281 int nixlf, id, all;
2282 u16 pcifunc;
2283
2284 switch (ctype) {
2285 case NIX_AQ_CTYPE_CQ:
2286 nixlf = rvu->rvu_dbg.nix_cq_ctx.lf;
2287 id = rvu->rvu_dbg.nix_cq_ctx.id;
2288 all = rvu->rvu_dbg.nix_cq_ctx.all;
2289 break;
2290
2291 case NIX_AQ_CTYPE_SQ:
2292 nixlf = rvu->rvu_dbg.nix_sq_ctx.lf;
2293 id = rvu->rvu_dbg.nix_sq_ctx.id;
2294 all = rvu->rvu_dbg.nix_sq_ctx.all;
2295 break;
2296
2297 case NIX_AQ_CTYPE_RQ:
2298 nixlf = rvu->rvu_dbg.nix_rq_ctx.lf;
2299 id = rvu->rvu_dbg.nix_rq_ctx.id;
2300 all = rvu->rvu_dbg.nix_rq_ctx.all;
2301 break;
2302
2303 default:
2304 return -EINVAL;
2305 }
2306
2307 if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
2308 return -EINVAL;
2309
2310 pfvf = rvu_get_pfvf(rvu, pcifunc);
2311 if (ctype == NIX_AQ_CTYPE_SQ && !pfvf->sq_ctx) {
2312 seq_puts(filp, "SQ context is not initialized\n");
2313 return -EINVAL;
2314 } else if (ctype == NIX_AQ_CTYPE_RQ && !pfvf->rq_ctx) {
2315 seq_puts(filp, "RQ context is not initialized\n");
2316 return -EINVAL;
2317 } else if (ctype == NIX_AQ_CTYPE_CQ && !pfvf->cq_ctx) {
2318 seq_puts(filp, "CQ context is not initialized\n");
2319 return -EINVAL;
2320 }
2321
2322 if (ctype == NIX_AQ_CTYPE_SQ) {
2323 max_id = pfvf->sq_ctx->qsize;
2324 ctype_string = "sq";
2325 print_nix_ctx = print_nix_sq_ctx;
2326 } else if (ctype == NIX_AQ_CTYPE_RQ) {
2327 max_id = pfvf->rq_ctx->qsize;
2328 ctype_string = "rq";
2329 print_nix_ctx = print_nix_rq_ctx;
2330 } else if (ctype == NIX_AQ_CTYPE_CQ) {
2331 max_id = pfvf->cq_ctx->qsize;
2332 ctype_string = "cq";
2333 print_nix_ctx = print_nix_cq_ctx;
2334 }
2335
2336 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
2337 aq_req.hdr.pcifunc = pcifunc;
2338 aq_req.ctype = ctype;
2339 aq_req.op = NIX_AQ_INSTOP_READ;
2340 if (all)
2341 id = 0;
2342 else
2343 max_id = id + 1;
2344 for (qidx = id; qidx < max_id; qidx++) {
2345 aq_req.qidx = qidx;
2346 seq_printf(filp, "=====%s_ctx for nixlf:%d and qidx:%d is=====\n",
2347 ctype_string, nixlf, aq_req.qidx);
2348 rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
2349 if (rc) {
2350 seq_puts(filp, "Failed to read the context\n");
2351 return -EINVAL;
2352 }
2353 print_nix_ctx(filp, &rsp);
2354 }
2355 return 0;
2356 }
2357
write_nix_queue_ctx(struct rvu * rvu,bool all,int nixlf,int id,int ctype,char * ctype_string,struct seq_file * m)2358 static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf,
2359 int id, int ctype, char *ctype_string,
2360 struct seq_file *m)
2361 {
2362 struct nix_hw *nix_hw = m->private;
2363 struct rvu_pfvf *pfvf;
2364 int max_id = 0;
2365 u16 pcifunc;
2366
2367 if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
2368 return -EINVAL;
2369
2370 pfvf = rvu_get_pfvf(rvu, pcifunc);
2371
2372 if (ctype == NIX_AQ_CTYPE_SQ) {
2373 if (!pfvf->sq_ctx) {
2374 dev_warn(rvu->dev, "SQ context is not initialized\n");
2375 return -EINVAL;
2376 }
2377 max_id = pfvf->sq_ctx->qsize;
2378 } else if (ctype == NIX_AQ_CTYPE_RQ) {
2379 if (!pfvf->rq_ctx) {
2380 dev_warn(rvu->dev, "RQ context is not initialized\n");
2381 return -EINVAL;
2382 }
2383 max_id = pfvf->rq_ctx->qsize;
2384 } else if (ctype == NIX_AQ_CTYPE_CQ) {
2385 if (!pfvf->cq_ctx) {
2386 dev_warn(rvu->dev, "CQ context is not initialized\n");
2387 return -EINVAL;
2388 }
2389 max_id = pfvf->cq_ctx->qsize;
2390 }
2391
2392 if (id < 0 || id >= max_id) {
2393 dev_warn(rvu->dev, "Invalid %s_ctx valid range 0-%d\n",
2394 ctype_string, max_id - 1);
2395 return -EINVAL;
2396 }
2397 switch (ctype) {
2398 case NIX_AQ_CTYPE_CQ:
2399 rvu->rvu_dbg.nix_cq_ctx.lf = nixlf;
2400 rvu->rvu_dbg.nix_cq_ctx.id = id;
2401 rvu->rvu_dbg.nix_cq_ctx.all = all;
2402 break;
2403
2404 case NIX_AQ_CTYPE_SQ:
2405 rvu->rvu_dbg.nix_sq_ctx.lf = nixlf;
2406 rvu->rvu_dbg.nix_sq_ctx.id = id;
2407 rvu->rvu_dbg.nix_sq_ctx.all = all;
2408 break;
2409
2410 case NIX_AQ_CTYPE_RQ:
2411 rvu->rvu_dbg.nix_rq_ctx.lf = nixlf;
2412 rvu->rvu_dbg.nix_rq_ctx.id = id;
2413 rvu->rvu_dbg.nix_rq_ctx.all = all;
2414 break;
2415 default:
2416 return -EINVAL;
2417 }
2418 return 0;
2419 }
2420
rvu_dbg_nix_queue_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos,int ctype)2421 static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
2422 const char __user *buffer,
2423 size_t count, loff_t *ppos,
2424 int ctype)
2425 {
2426 struct seq_file *m = filp->private_data;
2427 struct nix_hw *nix_hw = m->private;
2428 struct rvu *rvu = nix_hw->rvu;
2429 char *cmd_buf, *ctype_string;
2430 int nixlf, id = 0, ret;
2431 bool all = false;
2432
2433 if ((*ppos != 0) || !count)
2434 return -EINVAL;
2435
2436 switch (ctype) {
2437 case NIX_AQ_CTYPE_SQ:
2438 ctype_string = "sq";
2439 break;
2440 case NIX_AQ_CTYPE_RQ:
2441 ctype_string = "rq";
2442 break;
2443 case NIX_AQ_CTYPE_CQ:
2444 ctype_string = "cq";
2445 break;
2446 default:
2447 return -EINVAL;
2448 }
2449
2450 cmd_buf = kzalloc(count + 1, GFP_KERNEL);
2451
2452 if (!cmd_buf)
2453 return count;
2454
2455 ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
2456 &nixlf, &id, &all);
2457 if (ret < 0) {
2458 dev_info(rvu->dev,
2459 "Usage: echo <nixlf> [%s number/all] > %s_ctx\n",
2460 ctype_string, ctype_string);
2461 goto done;
2462 } else {
2463 ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype,
2464 ctype_string, m);
2465 }
2466 done:
2467 kfree(cmd_buf);
2468 return ret ? ret : count;
2469 }
2470
rvu_dbg_nix_sq_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)2471 static ssize_t rvu_dbg_nix_sq_ctx_write(struct file *filp,
2472 const char __user *buffer,
2473 size_t count, loff_t *ppos)
2474 {
2475 return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
2476 NIX_AQ_CTYPE_SQ);
2477 }
2478
rvu_dbg_nix_sq_ctx_display(struct seq_file * filp,void * unused)2479 static int rvu_dbg_nix_sq_ctx_display(struct seq_file *filp, void *unused)
2480 {
2481 return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_SQ);
2482 }
2483
2484 RVU_DEBUG_SEQ_FOPS(nix_sq_ctx, nix_sq_ctx_display, nix_sq_ctx_write);
2485
rvu_dbg_nix_rq_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)2486 static ssize_t rvu_dbg_nix_rq_ctx_write(struct file *filp,
2487 const char __user *buffer,
2488 size_t count, loff_t *ppos)
2489 {
2490 return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
2491 NIX_AQ_CTYPE_RQ);
2492 }
2493
rvu_dbg_nix_rq_ctx_display(struct seq_file * filp,void * unused)2494 static int rvu_dbg_nix_rq_ctx_display(struct seq_file *filp, void *unused)
2495 {
2496 return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_RQ);
2497 }
2498
2499 RVU_DEBUG_SEQ_FOPS(nix_rq_ctx, nix_rq_ctx_display, nix_rq_ctx_write);
2500
rvu_dbg_nix_cq_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)2501 static ssize_t rvu_dbg_nix_cq_ctx_write(struct file *filp,
2502 const char __user *buffer,
2503 size_t count, loff_t *ppos)
2504 {
2505 return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
2506 NIX_AQ_CTYPE_CQ);
2507 }
2508
rvu_dbg_nix_cq_ctx_display(struct seq_file * filp,void * unused)2509 static int rvu_dbg_nix_cq_ctx_display(struct seq_file *filp, void *unused)
2510 {
2511 return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_CQ);
2512 }
2513
2514 RVU_DEBUG_SEQ_FOPS(nix_cq_ctx, nix_cq_ctx_display, nix_cq_ctx_write);
2515
print_nix_qctx_qsize(struct seq_file * filp,int qsize,unsigned long * bmap,char * qtype)2516 static void print_nix_qctx_qsize(struct seq_file *filp, int qsize,
2517 unsigned long *bmap, char *qtype)
2518 {
2519 char *buf;
2520
2521 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
2522 if (!buf)
2523 return;
2524
2525 bitmap_print_to_pagebuf(false, buf, bmap, qsize);
2526 seq_printf(filp, "%s context count : %d\n", qtype, qsize);
2527 seq_printf(filp, "%s context ena/dis bitmap : %s\n",
2528 qtype, buf);
2529 kfree(buf);
2530 }
2531
print_nix_qsize(struct seq_file * filp,struct rvu_pfvf * pfvf)2532 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf)
2533 {
2534 if (!pfvf->cq_ctx)
2535 seq_puts(filp, "cq context is not initialized\n");
2536 else
2537 print_nix_qctx_qsize(filp, pfvf->cq_ctx->qsize, pfvf->cq_bmap,
2538 "cq");
2539
2540 if (!pfvf->rq_ctx)
2541 seq_puts(filp, "rq context is not initialized\n");
2542 else
2543 print_nix_qctx_qsize(filp, pfvf->rq_ctx->qsize, pfvf->rq_bmap,
2544 "rq");
2545
2546 if (!pfvf->sq_ctx)
2547 seq_puts(filp, "sq context is not initialized\n");
2548 else
2549 print_nix_qctx_qsize(filp, pfvf->sq_ctx->qsize, pfvf->sq_bmap,
2550 "sq");
2551 }
2552
rvu_dbg_nix_qsize_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)2553 static ssize_t rvu_dbg_nix_qsize_write(struct file *filp,
2554 const char __user *buffer,
2555 size_t count, loff_t *ppos)
2556 {
2557 return rvu_dbg_qsize_write(filp, buffer, count, ppos,
2558 BLKTYPE_NIX);
2559 }
2560
rvu_dbg_nix_qsize_display(struct seq_file * filp,void * unused)2561 static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
2562 {
2563 return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NIX);
2564 }
2565
2566 RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
2567
print_band_prof_ctx(struct seq_file * m,struct nix_bandprof_s * prof)2568 static void print_band_prof_ctx(struct seq_file *m,
2569 struct nix_bandprof_s *prof)
2570 {
2571 char *str;
2572
2573 switch (prof->pc_mode) {
2574 case NIX_RX_PC_MODE_VLAN:
2575 str = "VLAN";
2576 break;
2577 case NIX_RX_PC_MODE_DSCP:
2578 str = "DSCP";
2579 break;
2580 case NIX_RX_PC_MODE_GEN:
2581 str = "Generic";
2582 break;
2583 case NIX_RX_PC_MODE_RSVD:
2584 str = "Reserved";
2585 break;
2586 }
2587 seq_printf(m, "W0: pc_mode\t\t%s\n", str);
2588 str = (prof->icolor == 3) ? "Color blind" :
2589 (prof->icolor == 0) ? "Green" :
2590 (prof->icolor == 1) ? "Yellow" : "Red";
2591 seq_printf(m, "W0: icolor\t\t%s\n", str);
2592 seq_printf(m, "W0: tnl_ena\t\t%d\n", prof->tnl_ena);
2593 seq_printf(m, "W0: peir_exponent\t%d\n", prof->peir_exponent);
2594 seq_printf(m, "W0: pebs_exponent\t%d\n", prof->pebs_exponent);
2595 seq_printf(m, "W0: cir_exponent\t%d\n", prof->cir_exponent);
2596 seq_printf(m, "W0: cbs_exponent\t%d\n", prof->cbs_exponent);
2597 seq_printf(m, "W0: peir_mantissa\t%d\n", prof->peir_mantissa);
2598 seq_printf(m, "W0: pebs_mantissa\t%d\n", prof->pebs_mantissa);
2599 seq_printf(m, "W0: cir_mantissa\t%d\n", prof->cir_mantissa);
2600
2601 seq_printf(m, "W1: cbs_mantissa\t%d\n", prof->cbs_mantissa);
2602 str = (prof->lmode == 0) ? "byte" : "packet";
2603 seq_printf(m, "W1: lmode\t\t%s\n", str);
2604 seq_printf(m, "W1: l_select\t\t%d\n", prof->l_sellect);
2605 seq_printf(m, "W1: rdiv\t\t%d\n", prof->rdiv);
2606 seq_printf(m, "W1: adjust_exponent\t%d\n", prof->adjust_exponent);
2607 seq_printf(m, "W1: adjust_mantissa\t%d\n", prof->adjust_mantissa);
2608 str = (prof->gc_action == 0) ? "PASS" :
2609 (prof->gc_action == 1) ? "DROP" : "RED";
2610 seq_printf(m, "W1: gc_action\t\t%s\n", str);
2611 str = (prof->yc_action == 0) ? "PASS" :
2612 (prof->yc_action == 1) ? "DROP" : "RED";
2613 seq_printf(m, "W1: yc_action\t\t%s\n", str);
2614 str = (prof->rc_action == 0) ? "PASS" :
2615 (prof->rc_action == 1) ? "DROP" : "RED";
2616 seq_printf(m, "W1: rc_action\t\t%s\n", str);
2617 seq_printf(m, "W1: meter_algo\t\t%d\n", prof->meter_algo);
2618 seq_printf(m, "W1: band_prof_id\t%d\n", prof->band_prof_id);
2619 seq_printf(m, "W1: hl_en\t\t%d\n", prof->hl_en);
2620
2621 seq_printf(m, "W2: ts\t\t\t%lld\n", (u64)prof->ts);
2622 seq_printf(m, "W3: pe_accum\t\t%d\n", prof->pe_accum);
2623 seq_printf(m, "W3: c_accum\t\t%d\n", prof->c_accum);
2624 seq_printf(m, "W4: green_pkt_pass\t%lld\n",
2625 (u64)prof->green_pkt_pass);
2626 seq_printf(m, "W5: yellow_pkt_pass\t%lld\n",
2627 (u64)prof->yellow_pkt_pass);
2628 seq_printf(m, "W6: red_pkt_pass\t%lld\n", (u64)prof->red_pkt_pass);
2629 seq_printf(m, "W7: green_octs_pass\t%lld\n",
2630 (u64)prof->green_octs_pass);
2631 seq_printf(m, "W8: yellow_octs_pass\t%lld\n",
2632 (u64)prof->yellow_octs_pass);
2633 seq_printf(m, "W9: red_octs_pass\t%lld\n", (u64)prof->red_octs_pass);
2634 seq_printf(m, "W10: green_pkt_drop\t%lld\n",
2635 (u64)prof->green_pkt_drop);
2636 seq_printf(m, "W11: yellow_pkt_drop\t%lld\n",
2637 (u64)prof->yellow_pkt_drop);
2638 seq_printf(m, "W12: red_pkt_drop\t%lld\n", (u64)prof->red_pkt_drop);
2639 seq_printf(m, "W13: green_octs_drop\t%lld\n",
2640 (u64)prof->green_octs_drop);
2641 seq_printf(m, "W14: yellow_octs_drop\t%lld\n",
2642 (u64)prof->yellow_octs_drop);
2643 seq_printf(m, "W15: red_octs_drop\t%lld\n", (u64)prof->red_octs_drop);
2644 seq_puts(m, "==============================\n");
2645 }
2646
rvu_dbg_nix_band_prof_ctx_display(struct seq_file * m,void * unused)2647 static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused)
2648 {
2649 struct nix_hw *nix_hw = m->private;
2650 struct nix_cn10k_aq_enq_req aq_req;
2651 struct nix_cn10k_aq_enq_rsp aq_rsp;
2652 struct rvu *rvu = nix_hw->rvu;
2653 struct nix_ipolicer *ipolicer;
2654 int layer, prof_idx, idx, rc;
2655 u16 pcifunc;
2656 char *str;
2657
2658 /* Ingress policers do not exist on all platforms */
2659 if (!nix_hw->ipolicer)
2660 return 0;
2661
2662 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
2663 if (layer == BAND_PROF_INVAL_LAYER)
2664 continue;
2665 str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
2666 (layer == BAND_PROF_MID_LAYER) ? "Mid" : "Top";
2667
2668 seq_printf(m, "\n%s bandwidth profiles\n", str);
2669 seq_puts(m, "=======================\n");
2670
2671 ipolicer = &nix_hw->ipolicer[layer];
2672
2673 for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
2674 if (is_rsrc_free(&ipolicer->band_prof, idx))
2675 continue;
2676
2677 prof_idx = (idx & 0x3FFF) | (layer << 14);
2678 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
2679 0x00, NIX_AQ_CTYPE_BANDPROF,
2680 prof_idx);
2681 if (rc) {
2682 dev_err(rvu->dev,
2683 "%s: Failed to fetch context of %s profile %d, err %d\n",
2684 __func__, str, idx, rc);
2685 return 0;
2686 }
2687 seq_printf(m, "\n%s bandwidth profile:: %d\n", str, idx);
2688 pcifunc = ipolicer->pfvf_map[idx];
2689 if (!(pcifunc & RVU_PFVF_FUNC_MASK))
2690 seq_printf(m, "Allocated to :: PF %d\n",
2691 rvu_get_pf(rvu->pdev, pcifunc));
2692 else
2693 seq_printf(m, "Allocated to :: PF %d VF %d\n",
2694 rvu_get_pf(rvu->pdev, pcifunc),
2695 (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
2696 print_band_prof_ctx(m, &aq_rsp.prof);
2697 }
2698 }
2699 return 0;
2700 }
2701
2702 RVU_DEBUG_SEQ_FOPS(nix_band_prof_ctx, nix_band_prof_ctx_display, NULL);
2703
rvu_dbg_nix_band_prof_rsrc_display(struct seq_file * m,void * unused)2704 static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused)
2705 {
2706 struct nix_hw *nix_hw = m->private;
2707 struct nix_ipolicer *ipolicer;
2708 int layer;
2709 char *str;
2710
2711 /* Ingress policers do not exist on all platforms */
2712 if (!nix_hw->ipolicer)
2713 return 0;
2714
2715 seq_puts(m, "\nBandwidth profile resource free count\n");
2716 seq_puts(m, "=====================================\n");
2717 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
2718 if (layer == BAND_PROF_INVAL_LAYER)
2719 continue;
2720 str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
2721 (layer == BAND_PROF_MID_LAYER) ? "Mid " : "Top ";
2722
2723 ipolicer = &nix_hw->ipolicer[layer];
2724 seq_printf(m, "%s :: Max: %4d Free: %4d\n", str,
2725 ipolicer->band_prof.max,
2726 rvu_rsrc_free_count(&ipolicer->band_prof));
2727 }
2728 seq_puts(m, "=====================================\n");
2729
2730 return 0;
2731 }
2732
2733 RVU_DEBUG_SEQ_FOPS(nix_band_prof_rsrc, nix_band_prof_rsrc_display, NULL);
2734
rvu_dbg_nix_init(struct rvu * rvu,int blkaddr)2735 static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
2736 {
2737 struct nix_hw *nix_hw;
2738
2739 if (!is_block_implemented(rvu->hw, blkaddr))
2740 return;
2741
2742 if (blkaddr == BLKADDR_NIX0) {
2743 rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root);
2744 nix_hw = &rvu->hw->nix[0];
2745 } else {
2746 rvu->rvu_dbg.nix = debugfs_create_dir("nix1",
2747 rvu->rvu_dbg.root);
2748 nix_hw = &rvu->hw->nix[1];
2749 }
2750
2751 debugfs_create_file("tm_tree", 0600, rvu->rvu_dbg.nix, nix_hw,
2752 &rvu_dbg_nix_tm_tree_fops);
2753 debugfs_create_file("tm_topo", 0600, rvu->rvu_dbg.nix, nix_hw,
2754 &rvu_dbg_nix_tm_topo_fops);
2755 debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2756 &rvu_dbg_nix_sq_ctx_fops);
2757 debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2758 &rvu_dbg_nix_rq_ctx_fops);
2759 debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2760 &rvu_dbg_nix_cq_ctx_fops);
2761 debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
2762 &rvu_dbg_nix_ndc_tx_cache_fops);
2763 debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
2764 &rvu_dbg_nix_ndc_rx_cache_fops);
2765 debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
2766 &rvu_dbg_nix_ndc_tx_hits_miss_fops);
2767 debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
2768 &rvu_dbg_nix_ndc_rx_hits_miss_fops);
2769 debugfs_create_file_aux_num("qsize", 0600, rvu->rvu_dbg.nix, rvu,
2770 blkaddr, &rvu_dbg_nix_qsize_fops);
2771 debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2772 &rvu_dbg_nix_band_prof_ctx_fops);
2773 debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw,
2774 &rvu_dbg_nix_band_prof_rsrc_fops);
2775 }
2776
rvu_dbg_npa_init(struct rvu * rvu)2777 static void rvu_dbg_npa_init(struct rvu *rvu)
2778 {
2779 rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root);
2780
2781 debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu,
2782 &rvu_dbg_npa_qsize_fops);
2783 debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu,
2784 &rvu_dbg_npa_aura_ctx_fops);
2785 debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
2786 &rvu_dbg_npa_pool_ctx_fops);
2787 debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu,
2788 &rvu_dbg_npa_ndc_cache_fops);
2789 debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa, rvu,
2790 &rvu_dbg_npa_ndc_hits_miss_fops);
2791 }
2792
2793 #define PRINT_CGX_CUML_NIXRX_STATUS(idx, name) \
2794 ({ \
2795 u64 cnt; \
2796 err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \
2797 NIX_STATS_RX, &(cnt)); \
2798 if (!err) \
2799 seq_printf(s, "%s: %llu\n", name, cnt); \
2800 cnt; \
2801 })
2802
2803 #define PRINT_CGX_CUML_NIXTX_STATUS(idx, name) \
2804 ({ \
2805 u64 cnt; \
2806 err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \
2807 NIX_STATS_TX, &(cnt)); \
2808 if (!err) \
2809 seq_printf(s, "%s: %llu\n", name, cnt); \
2810 cnt; \
2811 })
2812
cgx_print_stats(struct seq_file * s,int lmac_id)2813 static int cgx_print_stats(struct seq_file *s, int lmac_id)
2814 {
2815 struct cgx_link_user_info linfo;
2816 struct mac_ops *mac_ops;
2817 void *cgxd = s->private;
2818 u64 ucast, mcast, bcast;
2819 int stat = 0, err = 0;
2820 u64 tx_stat, rx_stat;
2821 struct rvu *rvu;
2822
2823 rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
2824 PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
2825 if (!rvu)
2826 return -ENODEV;
2827
2828 mac_ops = get_mac_ops(cgxd);
2829 /* There can be no CGX devices at all */
2830 if (!mac_ops)
2831 return 0;
2832
2833 /* Link status */
2834 seq_puts(s, "\n=======Link Status======\n\n");
2835 err = cgx_get_link_info(cgxd, lmac_id, &linfo);
2836 if (err)
2837 seq_puts(s, "Failed to read link status\n");
2838 seq_printf(s, "\nLink is %s %d Mbps\n\n",
2839 linfo.link_up ? "UP" : "DOWN", linfo.speed);
2840
2841 /* Rx stats */
2842 seq_printf(s, "\n=======NIX RX_STATS(%s port level)======\n\n",
2843 mac_ops->name);
2844 ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames");
2845 if (err)
2846 return err;
2847 mcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_MCAST, "rx_mcast_frames");
2848 if (err)
2849 return err;
2850 bcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_BCAST, "rx_bcast_frames");
2851 if (err)
2852 return err;
2853 seq_printf(s, "rx_frames: %llu\n", ucast + mcast + bcast);
2854 PRINT_CGX_CUML_NIXRX_STATUS(RX_OCTS, "rx_bytes");
2855 if (err)
2856 return err;
2857 PRINT_CGX_CUML_NIXRX_STATUS(RX_DROP, "rx_drops");
2858 if (err)
2859 return err;
2860 PRINT_CGX_CUML_NIXRX_STATUS(RX_ERR, "rx_errors");
2861 if (err)
2862 return err;
2863
2864 /* Tx stats */
2865 seq_printf(s, "\n=======NIX TX_STATS(%s port level)======\n\n",
2866 mac_ops->name);
2867 ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames");
2868 if (err)
2869 return err;
2870 mcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_MCAST, "tx_mcast_frames");
2871 if (err)
2872 return err;
2873 bcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_BCAST, "tx_bcast_frames");
2874 if (err)
2875 return err;
2876 seq_printf(s, "tx_frames: %llu\n", ucast + mcast + bcast);
2877 PRINT_CGX_CUML_NIXTX_STATUS(TX_OCTS, "tx_bytes");
2878 if (err)
2879 return err;
2880 PRINT_CGX_CUML_NIXTX_STATUS(TX_DROP, "tx_drops");
2881 if (err)
2882 return err;
2883
2884 /* Rx stats */
2885 seq_printf(s, "\n=======%s RX_STATS======\n\n", mac_ops->name);
2886 while (stat < mac_ops->rx_stats_cnt) {
2887 err = mac_ops->mac_get_rx_stats(cgxd, lmac_id, stat, &rx_stat);
2888 if (err)
2889 return err;
2890 if (is_rvu_otx2(rvu))
2891 seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat],
2892 rx_stat);
2893 else
2894 seq_printf(s, "%s: %llu\n", rpm_rx_stats_fields[stat],
2895 rx_stat);
2896 stat++;
2897 }
2898
2899 /* Tx stats */
2900 stat = 0;
2901 seq_printf(s, "\n=======%s TX_STATS======\n\n", mac_ops->name);
2902 while (stat < mac_ops->tx_stats_cnt) {
2903 err = mac_ops->mac_get_tx_stats(cgxd, lmac_id, stat, &tx_stat);
2904 if (err)
2905 return err;
2906
2907 if (is_rvu_otx2(rvu))
2908 seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat],
2909 tx_stat);
2910 else
2911 seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat],
2912 tx_stat);
2913 stat++;
2914 }
2915
2916 return err;
2917 }
2918
rvu_dbg_derive_lmacid(struct seq_file * s)2919 static int rvu_dbg_derive_lmacid(struct seq_file *s)
2920 {
2921 return debugfs_get_aux_num(s->file);
2922 }
2923
rvu_dbg_cgx_stat_display(struct seq_file * s,void * unused)2924 static int rvu_dbg_cgx_stat_display(struct seq_file *s, void *unused)
2925 {
2926 return cgx_print_stats(s, rvu_dbg_derive_lmacid(s));
2927 }
2928
2929 RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
2930
cgx_print_dmac_flt(struct seq_file * s,int lmac_id)2931 static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id)
2932 {
2933 struct pci_dev *pdev = NULL;
2934 void *cgxd = s->private;
2935 char *bcast, *mcast;
2936 u16 index, domain;
2937 u8 dmac[ETH_ALEN];
2938 struct rvu *rvu;
2939 u64 cfg, mac;
2940 int pf;
2941
2942 rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
2943 PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
2944 if (!rvu)
2945 return -ENODEV;
2946
2947 pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
2948 domain = 2;
2949
2950 pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
2951 if (!pdev)
2952 return 0;
2953
2954 cfg = cgx_read_dmac_ctrl(cgxd, lmac_id);
2955 bcast = cfg & CGX_DMAC_BCAST_MODE ? "ACCEPT" : "REJECT";
2956 mcast = cfg & CGX_DMAC_MCAST_MODE ? "ACCEPT" : "REJECT";
2957
2958 seq_puts(s,
2959 "PCI dev RVUPF BROADCAST MULTICAST FILTER-MODE\n");
2960 seq_printf(s, "%s PF%d %9s %9s",
2961 dev_name(&pdev->dev), pf, bcast, mcast);
2962 if (cfg & CGX_DMAC_CAM_ACCEPT)
2963 seq_printf(s, "%12s\n\n", "UNICAST");
2964 else
2965 seq_printf(s, "%16s\n\n", "PROMISCUOUS");
2966
2967 seq_puts(s, "\nDMAC-INDEX ADDRESS\n");
2968
2969 for (index = 0 ; index < 32 ; index++) {
2970 cfg = cgx_read_dmac_entry(cgxd, index);
2971 /* Display enabled dmac entries associated with current lmac */
2972 if (lmac_id == FIELD_GET(CGX_DMAC_CAM_ENTRY_LMACID, cfg) &&
2973 FIELD_GET(CGX_DMAC_CAM_ADDR_ENABLE, cfg)) {
2974 mac = FIELD_GET(CGX_RX_DMAC_ADR_MASK, cfg);
2975 u64_to_ether_addr(mac, dmac);
2976 seq_printf(s, "%7d %pM\n", index, dmac);
2977 }
2978 }
2979
2980 pci_dev_put(pdev);
2981 return 0;
2982 }
2983
rvu_dbg_cgx_dmac_flt_display(struct seq_file * s,void * unused)2984 static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *s, void *unused)
2985 {
2986 return cgx_print_dmac_flt(s, rvu_dbg_derive_lmacid(s));
2987 }
2988
2989 RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL);
2990
cgx_print_fwdata(struct seq_file * s,int lmac_id)2991 static int cgx_print_fwdata(struct seq_file *s, int lmac_id)
2992 {
2993 struct cgx_lmac_fwdata_s *fwdata;
2994 void *cgxd = s->private;
2995 struct phy_s *phy;
2996 struct rvu *rvu;
2997 int cgx_id, i;
2998
2999 rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
3000 PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
3001 if (!rvu)
3002 return -ENODEV;
3003
3004 if (!rvu->fwdata)
3005 return -EAGAIN;
3006
3007 cgx_id = cgx_get_cgxid(cgxd);
3008
3009 if (rvu->hw->lmac_per_cgx == CGX_LMACS_USX)
3010 fwdata = &rvu->fwdata->cgx_fw_data_usx[cgx_id][lmac_id];
3011 else
3012 fwdata = &rvu->fwdata->cgx_fw_data[cgx_id][lmac_id];
3013
3014 seq_puts(s, "\nFIRMWARE SHARED:\n");
3015 seq_puts(s, "\t\tSUPPORTED LINK INFORMATION\t\t\n");
3016 seq_puts(s, "\t\t==========================\n");
3017 seq_printf(s, "\t\t Link modes \t\t :%llx\n",
3018 fwdata->supported_link_modes);
3019 seq_printf(s, "\t\t Autoneg \t\t :%llx\n", fwdata->supported_an);
3020 seq_printf(s, "\t\t FEC \t\t\t :%llx\n", fwdata->supported_fec);
3021 seq_puts(s, "\n");
3022
3023 seq_puts(s, "\t\tADVERTISED LINK INFORMATION\t\t\n");
3024 seq_puts(s, "\t\t==========================\n");
3025 seq_printf(s, "\t\t Link modes \t\t :%llx\n",
3026 (u64)fwdata->advertised_link_modes);
3027 seq_printf(s, "\t\t Autoneg \t\t :%x\n", fwdata->advertised_an);
3028 seq_printf(s, "\t\t FEC \t\t\t :%llx\n", fwdata->advertised_fec);
3029 seq_puts(s, "\n");
3030
3031 seq_puts(s, "\t\tLMAC CONFIG\t\t\n");
3032 seq_puts(s, "\t\t============\n");
3033 seq_printf(s, "\t\t rw_valid \t\t :%x\n", fwdata->rw_valid);
3034 seq_printf(s, "\t\t lmac_type \t\t :%x\n", fwdata->lmac_type);
3035 seq_printf(s, "\t\t portm_idx \t\t :%x\n", fwdata->portm_idx);
3036 seq_printf(s, "\t\t mgmt_port \t\t :%x\n", fwdata->mgmt_port);
3037 seq_printf(s, "\t\t Link modes own \t :%llx\n",
3038 (u64)fwdata->advertised_link_modes_own);
3039 seq_puts(s, "\n");
3040
3041 seq_puts(s, "\n\t\tEEPROM DATA\n");
3042 seq_puts(s, "\t\t===========\n");
3043 seq_printf(s, "\t\t sff_id \t\t :%x\n", fwdata->sfp_eeprom.sff_id);
3044 seq_puts(s, "\t\t data \t\t\t :\n");
3045 seq_puts(s, "\t\t");
3046 for (i = 0; i < SFP_EEPROM_SIZE; i++) {
3047 seq_printf(s, "%x", fwdata->sfp_eeprom.buf[i]);
3048 if ((i + 1) % 16 == 0) {
3049 seq_puts(s, "\n");
3050 seq_puts(s, "\t\t");
3051 }
3052 }
3053 seq_puts(s, "\n");
3054
3055 phy = &fwdata->phy;
3056 seq_puts(s, "\n\t\tPHY INFORMATION\n");
3057 seq_puts(s, "\t\t===============\n");
3058 seq_printf(s, "\t\t Mod type configurable \t\t :%x\n",
3059 phy->misc.can_change_mod_type);
3060 seq_printf(s, "\t\t Mod type \t\t\t :%x\n", phy->misc.mod_type);
3061 seq_printf(s, "\t\t Support FEC \t\t\t :%x\n", phy->misc.has_fec_stats);
3062 seq_printf(s, "\t\t RSFEC corrected words \t\t :%x\n",
3063 phy->fec_stats.rsfec_corr_cws);
3064 seq_printf(s, "\t\t RSFEC uncorrected words \t :%x\n",
3065 phy->fec_stats.rsfec_uncorr_cws);
3066 seq_printf(s, "\t\t BRFEC corrected words \t\t :%x\n",
3067 phy->fec_stats.brfec_corr_blks);
3068 seq_printf(s, "\t\t BRFEC uncorrected words \t :%x\n",
3069 phy->fec_stats.brfec_uncorr_blks);
3070 seq_puts(s, "\n");
3071
3072 return 0;
3073 }
3074
rvu_dbg_cgx_fwdata_display(struct seq_file * s,void * unused)3075 static int rvu_dbg_cgx_fwdata_display(struct seq_file *s, void *unused)
3076 {
3077 return cgx_print_fwdata(s, rvu_dbg_derive_lmacid(s));
3078 }
3079
3080 RVU_DEBUG_SEQ_FOPS(cgx_fwdata, cgx_fwdata_display, NULL);
3081
rvu_dbg_cgx_init(struct rvu * rvu)3082 static void rvu_dbg_cgx_init(struct rvu *rvu)
3083 {
3084 struct mac_ops *mac_ops;
3085 unsigned long lmac_bmap;
3086 int i, lmac_id;
3087 char dname[20];
3088 void *cgx;
3089
3090 if (!cgx_get_cgxcnt_max())
3091 return;
3092
3093 mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
3094 if (!mac_ops)
3095 return;
3096
3097 rvu->rvu_dbg.cgx_root = debugfs_create_dir(mac_ops->name,
3098 rvu->rvu_dbg.root);
3099
3100 for (i = 0; i < cgx_get_cgxcnt_max(); i++) {
3101 cgx = rvu_cgx_pdata(i, rvu);
3102 if (!cgx)
3103 continue;
3104 lmac_bmap = cgx_get_lmac_bmap(cgx);
3105 /* cgx debugfs dir */
3106 sprintf(dname, "%s%d", mac_ops->name, i);
3107 rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
3108 rvu->rvu_dbg.cgx_root);
3109
3110 for_each_set_bit(lmac_id, &lmac_bmap, rvu->hw->lmac_per_cgx) {
3111 /* lmac debugfs dir */
3112 sprintf(dname, "lmac%d", lmac_id);
3113 rvu->rvu_dbg.lmac =
3114 debugfs_create_dir(dname, rvu->rvu_dbg.cgx);
3115
3116 debugfs_create_file_aux_num("stats", 0600, rvu->rvu_dbg.lmac,
3117 cgx, lmac_id, &rvu_dbg_cgx_stat_fops);
3118 debugfs_create_file_aux_num("mac_filter", 0600,
3119 rvu->rvu_dbg.lmac, cgx, lmac_id,
3120 &rvu_dbg_cgx_dmac_flt_fops);
3121 debugfs_create_file("fwdata", 0600,
3122 rvu->rvu_dbg.lmac, cgx,
3123 &rvu_dbg_cgx_fwdata_fops);
3124 }
3125 }
3126 }
3127
3128 /* NPC debugfs APIs */
rvu_print_npc_mcam_info(struct seq_file * s,u16 pcifunc,int blkaddr)3129 static void rvu_print_npc_mcam_info(struct seq_file *s,
3130 u16 pcifunc, int blkaddr)
3131 {
3132 struct rvu *rvu = s->private;
3133 int entry_acnt, entry_ecnt;
3134 int cntr_acnt, cntr_ecnt;
3135
3136 rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr,
3137 &entry_acnt, &entry_ecnt);
3138 rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr,
3139 &cntr_acnt, &cntr_ecnt);
3140 if (!entry_acnt && !cntr_acnt)
3141 return;
3142
3143 if (!(pcifunc & RVU_PFVF_FUNC_MASK))
3144 seq_printf(s, "\n\t\t Device \t\t: PF%d\n",
3145 rvu_get_pf(rvu->pdev, pcifunc));
3146 else
3147 seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n",
3148 rvu_get_pf(rvu->pdev, pcifunc),
3149 (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
3150
3151 if (entry_acnt) {
3152 seq_printf(s, "\t\t Entries allocated \t: %d\n", entry_acnt);
3153 seq_printf(s, "\t\t Entries enabled \t: %d\n", entry_ecnt);
3154 }
3155 if (cntr_acnt) {
3156 seq_printf(s, "\t\t Counters allocated \t: %d\n", cntr_acnt);
3157 seq_printf(s, "\t\t Counters enabled \t: %d\n", cntr_ecnt);
3158 }
3159 }
3160
rvu_dbg_npc_mcam_info_display(struct seq_file * filp,void * unsued)3161 static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
3162 {
3163 struct rvu *rvu = filp->private;
3164 int pf, vf, numvfs, blkaddr;
3165 struct npc_mcam *mcam;
3166 u16 pcifunc, counters;
3167 u64 cfg;
3168
3169 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
3170 if (blkaddr < 0)
3171 return -ENODEV;
3172
3173 mcam = &rvu->hw->mcam;
3174 counters = rvu->hw->npc_counters;
3175
3176 seq_puts(filp, "\nNPC MCAM info:\n");
3177 /* MCAM keywidth on receive and transmit sides */
3178 cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
3179 cfg = (cfg >> 32) & 0x07;
3180 seq_printf(filp, "\t\t RX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
3181 "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
3182 "224bits" : "448bits"));
3183 cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX));
3184 cfg = (cfg >> 32) & 0x07;
3185 seq_printf(filp, "\t\t TX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
3186 "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
3187 "224bits" : "448bits"));
3188
3189 mutex_lock(&mcam->lock);
3190 /* MCAM entries */
3191 seq_printf(filp, "\n\t\t MCAM entries \t: %d\n", mcam->total_entries);
3192 seq_printf(filp, "\t\t Reserved \t: %d\n",
3193 mcam->total_entries - mcam->bmap_entries);
3194 seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt);
3195
3196 /* MCAM counters */
3197 seq_printf(filp, "\n\t\t MCAM counters \t: %d\n", counters);
3198 seq_printf(filp, "\t\t Reserved \t: %d\n",
3199 counters - mcam->counters.max);
3200 seq_printf(filp, "\t\t Available \t: %d\n",
3201 rvu_rsrc_free_count(&mcam->counters));
3202
3203 if (mcam->bmap_entries == mcam->bmap_fcnt) {
3204 mutex_unlock(&mcam->lock);
3205 return 0;
3206 }
3207
3208 seq_puts(filp, "\n\t\t Current allocation\n");
3209 seq_puts(filp, "\t\t====================\n");
3210 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
3211 pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0);
3212 rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
3213
3214 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
3215 numvfs = (cfg >> 12) & 0xFF;
3216 for (vf = 0; vf < numvfs; vf++) {
3217 pcifunc = rvu_make_pcifunc(rvu->pdev, pf, (vf + 1));
3218 rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
3219 }
3220 }
3221
3222 mutex_unlock(&mcam->lock);
3223 return 0;
3224 }
3225
3226 RVU_DEBUG_SEQ_FOPS(npc_mcam_info, npc_mcam_info_display, NULL);
3227
rvu_dbg_npc_rx_miss_stats_display(struct seq_file * filp,void * unused)3228 static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
3229 void *unused)
3230 {
3231 struct rvu *rvu = filp->private;
3232 struct npc_mcam *mcam;
3233 int blkaddr;
3234
3235 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
3236 if (blkaddr < 0)
3237 return -ENODEV;
3238
3239 mcam = &rvu->hw->mcam;
3240
3241 seq_puts(filp, "\nNPC MCAM RX miss action stats\n");
3242 seq_printf(filp, "\t\tStat %d: \t%lld\n", mcam->rx_miss_act_cntr,
3243 rvu_read64(rvu, blkaddr,
3244 NPC_AF_MATCH_STATX(mcam->rx_miss_act_cntr)));
3245
3246 return 0;
3247 }
3248
3249 RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
3250
3251 #define RVU_DBG_PRINT_MPLS_TTL(pkt, mask) \
3252 do { \
3253 seq_printf(s, "%ld ", FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, pkt)); \
3254 seq_printf(s, "mask 0x%lx\n", \
3255 FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, mask)); \
3256 } while (0) \
3257
3258 #define RVU_DBG_PRINT_MPLS_LBTCBOS(_pkt, _mask) \
3259 do { \
3260 typeof(_pkt) (pkt) = (_pkt); \
3261 typeof(_mask) (mask) = (_mask); \
3262 seq_printf(s, "%ld %ld %ld\n", \
3263 FIELD_GET(OTX2_FLOWER_MASK_MPLS_LB, pkt), \
3264 FIELD_GET(OTX2_FLOWER_MASK_MPLS_TC, pkt), \
3265 FIELD_GET(OTX2_FLOWER_MASK_MPLS_BOS, pkt)); \
3266 seq_printf(s, "\tmask 0x%lx 0x%lx 0x%lx\n", \
3267 FIELD_GET(OTX2_FLOWER_MASK_MPLS_LB, mask), \
3268 FIELD_GET(OTX2_FLOWER_MASK_MPLS_TC, mask), \
3269 FIELD_GET(OTX2_FLOWER_MASK_MPLS_BOS, mask)); \
3270 } while (0) \
3271
rvu_dbg_npc_mcam_show_flows(struct seq_file * s,struct rvu_npc_mcam_rule * rule)3272 static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
3273 struct rvu_npc_mcam_rule *rule)
3274 {
3275 u8 bit;
3276
3277 for_each_set_bit(bit, (unsigned long *)&rule->features, 64) {
3278 seq_printf(s, "\t%s ", npc_get_field_name(bit));
3279 switch (bit) {
3280 case NPC_LXMB:
3281 if (rule->lxmb == 1)
3282 seq_puts(s, "\tL2M nibble is set\n");
3283 else
3284 seq_puts(s, "\tL2B nibble is set\n");
3285 break;
3286 case NPC_DMAC:
3287 seq_printf(s, "%pM ", rule->packet.dmac);
3288 seq_printf(s, "mask %pM\n", rule->mask.dmac);
3289 break;
3290 case NPC_SMAC:
3291 seq_printf(s, "%pM ", rule->packet.smac);
3292 seq_printf(s, "mask %pM\n", rule->mask.smac);
3293 break;
3294 case NPC_ETYPE:
3295 seq_printf(s, "0x%x ", ntohs(rule->packet.etype));
3296 seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.etype));
3297 break;
3298 case NPC_OUTER_VID:
3299 seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_tci));
3300 seq_printf(s, "mask 0x%x\n",
3301 ntohs(rule->mask.vlan_tci));
3302 break;
3303 case NPC_INNER_VID:
3304 seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_itci));
3305 seq_printf(s, "mask 0x%x\n",
3306 ntohs(rule->mask.vlan_itci));
3307 break;
3308 case NPC_TOS:
3309 seq_printf(s, "%d ", rule->packet.tos);
3310 seq_printf(s, "mask 0x%x\n", rule->mask.tos);
3311 break;
3312 case NPC_SIP_IPV4:
3313 seq_printf(s, "%pI4 ", &rule->packet.ip4src);
3314 seq_printf(s, "mask %pI4\n", &rule->mask.ip4src);
3315 break;
3316 case NPC_DIP_IPV4:
3317 seq_printf(s, "%pI4 ", &rule->packet.ip4dst);
3318 seq_printf(s, "mask %pI4\n", &rule->mask.ip4dst);
3319 break;
3320 case NPC_SIP_IPV6:
3321 seq_printf(s, "%pI6 ", rule->packet.ip6src);
3322 seq_printf(s, "mask %pI6\n", rule->mask.ip6src);
3323 break;
3324 case NPC_DIP_IPV6:
3325 seq_printf(s, "%pI6 ", rule->packet.ip6dst);
3326 seq_printf(s, "mask %pI6\n", rule->mask.ip6dst);
3327 break;
3328 case NPC_IPFRAG_IPV6:
3329 seq_printf(s, "0x%x ", rule->packet.next_header);
3330 seq_printf(s, "mask 0x%x\n", rule->mask.next_header);
3331 break;
3332 case NPC_IPFRAG_IPV4:
3333 seq_printf(s, "0x%x ", rule->packet.ip_flag);
3334 seq_printf(s, "mask 0x%x\n", rule->mask.ip_flag);
3335 break;
3336 case NPC_SPORT_TCP:
3337 case NPC_SPORT_UDP:
3338 case NPC_SPORT_SCTP:
3339 seq_printf(s, "%d ", ntohs(rule->packet.sport));
3340 seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.sport));
3341 break;
3342 case NPC_DPORT_TCP:
3343 case NPC_DPORT_UDP:
3344 case NPC_DPORT_SCTP:
3345 seq_printf(s, "%d ", ntohs(rule->packet.dport));
3346 seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
3347 break;
3348 case NPC_TCP_FLAGS:
3349 seq_printf(s, "%d ", rule->packet.tcp_flags);
3350 seq_printf(s, "mask 0x%x\n", rule->mask.tcp_flags);
3351 break;
3352 case NPC_IPSEC_SPI:
3353 seq_printf(s, "0x%x ", ntohl(rule->packet.spi));
3354 seq_printf(s, "mask 0x%x\n", ntohl(rule->mask.spi));
3355 break;
3356 case NPC_MPLS1_LBTCBOS:
3357 RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[0],
3358 rule->mask.mpls_lse[0]);
3359 break;
3360 case NPC_MPLS1_TTL:
3361 RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[0],
3362 rule->mask.mpls_lse[0]);
3363 break;
3364 case NPC_MPLS2_LBTCBOS:
3365 RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[1],
3366 rule->mask.mpls_lse[1]);
3367 break;
3368 case NPC_MPLS2_TTL:
3369 RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[1],
3370 rule->mask.mpls_lse[1]);
3371 break;
3372 case NPC_MPLS3_LBTCBOS:
3373 RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[2],
3374 rule->mask.mpls_lse[2]);
3375 break;
3376 case NPC_MPLS3_TTL:
3377 RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[2],
3378 rule->mask.mpls_lse[2]);
3379 break;
3380 case NPC_MPLS4_LBTCBOS:
3381 RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[3],
3382 rule->mask.mpls_lse[3]);
3383 break;
3384 case NPC_MPLS4_TTL:
3385 RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[3],
3386 rule->mask.mpls_lse[3]);
3387 break;
3388 case NPC_TYPE_ICMP:
3389 seq_printf(s, "%d ", rule->packet.icmp_type);
3390 seq_printf(s, "mask 0x%x\n", rule->mask.icmp_type);
3391 break;
3392 case NPC_CODE_ICMP:
3393 seq_printf(s, "%d ", rule->packet.icmp_code);
3394 seq_printf(s, "mask 0x%x\n", rule->mask.icmp_code);
3395 break;
3396 default:
3397 seq_puts(s, "\n");
3398 break;
3399 }
3400 }
3401 }
3402
rvu_dbg_npc_mcam_show_action(struct seq_file * s,struct rvu_npc_mcam_rule * rule)3403 static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
3404 struct rvu_npc_mcam_rule *rule)
3405 {
3406 if (is_npc_intf_tx(rule->intf)) {
3407 switch (rule->tx_action.op) {
3408 case NIX_TX_ACTIONOP_DROP:
3409 seq_puts(s, "\taction: Drop\n");
3410 break;
3411 case NIX_TX_ACTIONOP_UCAST_DEFAULT:
3412 seq_puts(s, "\taction: Unicast to default channel\n");
3413 break;
3414 case NIX_TX_ACTIONOP_UCAST_CHAN:
3415 seq_printf(s, "\taction: Unicast to channel %d\n",
3416 rule->tx_action.index);
3417 break;
3418 case NIX_TX_ACTIONOP_MCAST:
3419 seq_puts(s, "\taction: Multicast\n");
3420 break;
3421 case NIX_TX_ACTIONOP_DROP_VIOL:
3422 seq_puts(s, "\taction: Lockdown Violation Drop\n");
3423 break;
3424 default:
3425 break;
3426 }
3427 } else {
3428 switch (rule->rx_action.op) {
3429 case NIX_RX_ACTIONOP_DROP:
3430 seq_puts(s, "\taction: Drop\n");
3431 break;
3432 case NIX_RX_ACTIONOP_UCAST:
3433 seq_printf(s, "\taction: Direct to queue %d\n",
3434 rule->rx_action.index);
3435 break;
3436 case NIX_RX_ACTIONOP_RSS:
3437 seq_puts(s, "\taction: RSS\n");
3438 break;
3439 case NIX_RX_ACTIONOP_UCAST_IPSEC:
3440 seq_puts(s, "\taction: Unicast ipsec\n");
3441 break;
3442 case NIX_RX_ACTIONOP_MCAST:
3443 seq_puts(s, "\taction: Multicast\n");
3444 break;
3445 default:
3446 break;
3447 }
3448 }
3449 }
3450
rvu_dbg_get_intf_name(int intf)3451 static const char *rvu_dbg_get_intf_name(int intf)
3452 {
3453 switch (intf) {
3454 case NIX_INTFX_RX(0):
3455 return "NIX0_RX";
3456 case NIX_INTFX_RX(1):
3457 return "NIX1_RX";
3458 case NIX_INTFX_TX(0):
3459 return "NIX0_TX";
3460 case NIX_INTFX_TX(1):
3461 return "NIX1_TX";
3462 default:
3463 break;
3464 }
3465
3466 return "unknown";
3467 }
3468
rvu_dbg_npc_mcam_show_rules(struct seq_file * s,void * unused)3469 static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
3470 {
3471 struct rvu_npc_mcam_rule *iter;
3472 struct rvu *rvu = s->private;
3473 struct npc_mcam *mcam;
3474 int pf, vf = -1;
3475 bool enabled;
3476 int blkaddr;
3477 u16 target;
3478 u64 hits;
3479
3480 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
3481 if (blkaddr < 0)
3482 return 0;
3483
3484 mcam = &rvu->hw->mcam;
3485
3486 mutex_lock(&mcam->lock);
3487 list_for_each_entry(iter, &mcam->mcam_rules, list) {
3488 pf = rvu_get_pf(rvu->pdev, iter->owner);
3489 seq_printf(s, "\n\tInstalled by: PF%d ", pf);
3490
3491 if (iter->owner & RVU_PFVF_FUNC_MASK) {
3492 vf = (iter->owner & RVU_PFVF_FUNC_MASK) - 1;
3493 seq_printf(s, "VF%d", vf);
3494 }
3495 seq_puts(s, "\n");
3496
3497 seq_printf(s, "\tdirection: %s\n", is_npc_intf_rx(iter->intf) ?
3498 "RX" : "TX");
3499 seq_printf(s, "\tinterface: %s\n",
3500 rvu_dbg_get_intf_name(iter->intf));
3501 seq_printf(s, "\tmcam entry: %d\n", iter->entry);
3502
3503 rvu_dbg_npc_mcam_show_flows(s, iter);
3504 if (is_npc_intf_rx(iter->intf)) {
3505 target = iter->rx_action.pf_func;
3506 pf = rvu_get_pf(rvu->pdev, target);
3507 seq_printf(s, "\tForward to: PF%d ", pf);
3508
3509 if (target & RVU_PFVF_FUNC_MASK) {
3510 vf = (target & RVU_PFVF_FUNC_MASK) - 1;
3511 seq_printf(s, "VF%d", vf);
3512 }
3513 seq_puts(s, "\n");
3514 seq_printf(s, "\tchannel: 0x%x\n", iter->chan);
3515 seq_printf(s, "\tchannel_mask: 0x%x\n", iter->chan_mask);
3516 }
3517
3518 rvu_dbg_npc_mcam_show_action(s, iter);
3519
3520 enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, iter->entry);
3521 seq_printf(s, "\tenabled: %s\n", enabled ? "yes" : "no");
3522
3523 if (!iter->has_cntr)
3524 continue;
3525 seq_printf(s, "\tcounter: %d\n", iter->cntr);
3526
3527 hits = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(iter->cntr));
3528 seq_printf(s, "\thits: %lld\n", hits);
3529 }
3530 mutex_unlock(&mcam->lock);
3531
3532 return 0;
3533 }
3534
3535 RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL);
3536
rvu_dbg_npc_exact_show_entries(struct seq_file * s,void * unused)3537 static int rvu_dbg_npc_exact_show_entries(struct seq_file *s, void *unused)
3538 {
3539 struct npc_exact_table_entry *mem_entry[NPC_EXACT_TBL_MAX_WAYS] = { 0 };
3540 struct npc_exact_table_entry *cam_entry;
3541 struct npc_exact_table *table;
3542 struct rvu *rvu = s->private;
3543 int i, j;
3544
3545 u8 bitmap = 0;
3546
3547 table = rvu->hw->table;
3548
3549 mutex_lock(&table->lock);
3550
3551 /* Check if there is at least one entry in mem table */
3552 if (!table->mem_tbl_entry_cnt)
3553 goto dump_cam_table;
3554
3555 /* Print table headers */
3556 seq_puts(s, "\n\tExact Match MEM Table\n");
3557 seq_puts(s, "Index\t");
3558
3559 for (i = 0; i < table->mem_table.ways; i++) {
3560 mem_entry[i] = list_first_entry_or_null(&table->lhead_mem_tbl_entry[i],
3561 struct npc_exact_table_entry, list);
3562
3563 seq_printf(s, "Way-%d\t\t\t\t\t", i);
3564 }
3565
3566 seq_puts(s, "\n");
3567 for (i = 0; i < table->mem_table.ways; i++)
3568 seq_puts(s, "\tChan MAC \t");
3569
3570 seq_puts(s, "\n\n");
3571
3572 /* Print mem table entries */
3573 for (i = 0; i < table->mem_table.depth; i++) {
3574 bitmap = 0;
3575 for (j = 0; j < table->mem_table.ways; j++) {
3576 if (!mem_entry[j])
3577 continue;
3578
3579 if (mem_entry[j]->index != i)
3580 continue;
3581
3582 bitmap |= BIT(j);
3583 }
3584
3585 /* No valid entries */
3586 if (!bitmap)
3587 continue;
3588
3589 seq_printf(s, "%d\t", i);
3590 for (j = 0; j < table->mem_table.ways; j++) {
3591 if (!(bitmap & BIT(j))) {
3592 seq_puts(s, "nil\t\t\t\t\t");
3593 continue;
3594 }
3595
3596 seq_printf(s, "0x%x %pM\t\t\t", mem_entry[j]->chan,
3597 mem_entry[j]->mac);
3598 mem_entry[j] = list_next_entry(mem_entry[j], list);
3599 }
3600 seq_puts(s, "\n");
3601 }
3602
3603 dump_cam_table:
3604
3605 if (!table->cam_tbl_entry_cnt)
3606 goto done;
3607
3608 seq_puts(s, "\n\tExact Match CAM Table\n");
3609 seq_puts(s, "index\tchan\tMAC\n");
3610
3611 /* Traverse cam table entries */
3612 list_for_each_entry(cam_entry, &table->lhead_cam_tbl_entry, list) {
3613 seq_printf(s, "%d\t0x%x\t%pM\n", cam_entry->index, cam_entry->chan,
3614 cam_entry->mac);
3615 }
3616
3617 done:
3618 mutex_unlock(&table->lock);
3619 return 0;
3620 }
3621
3622 RVU_DEBUG_SEQ_FOPS(npc_exact_entries, npc_exact_show_entries, NULL);
3623
rvu_dbg_npc_exact_show_info(struct seq_file * s,void * unused)3624 static int rvu_dbg_npc_exact_show_info(struct seq_file *s, void *unused)
3625 {
3626 struct npc_exact_table *table;
3627 struct rvu *rvu = s->private;
3628 int i;
3629
3630 table = rvu->hw->table;
3631
3632 seq_puts(s, "\n\tExact Table Info\n");
3633 seq_printf(s, "Exact Match Feature : %s\n",
3634 rvu->hw->cap.npc_exact_match_enabled ? "enabled" : "disable");
3635 if (!rvu->hw->cap.npc_exact_match_enabled)
3636 return 0;
3637
3638 seq_puts(s, "\nMCAM Index\tMAC Filter Rules Count\n");
3639 for (i = 0; i < table->num_drop_rules; i++)
3640 seq_printf(s, "%d\t\t%d\n", i, table->cnt_cmd_rules[i]);
3641
3642 seq_puts(s, "\nMcam Index\tPromisc Mode Status\n");
3643 for (i = 0; i < table->num_drop_rules; i++)
3644 seq_printf(s, "%d\t\t%s\n", i, table->promisc_mode[i] ? "on" : "off");
3645
3646 seq_puts(s, "\n\tMEM Table Info\n");
3647 seq_printf(s, "Ways : %d\n", table->mem_table.ways);
3648 seq_printf(s, "Depth : %d\n", table->mem_table.depth);
3649 seq_printf(s, "Mask : 0x%llx\n", table->mem_table.mask);
3650 seq_printf(s, "Hash Mask : 0x%x\n", table->mem_table.hash_mask);
3651 seq_printf(s, "Hash Offset : 0x%x\n", table->mem_table.hash_offset);
3652
3653 seq_puts(s, "\n\tCAM Table Info\n");
3654 seq_printf(s, "Depth : %d\n", table->cam_table.depth);
3655
3656 return 0;
3657 }
3658
3659 RVU_DEBUG_SEQ_FOPS(npc_exact_info, npc_exact_show_info, NULL);
3660
rvu_dbg_npc_exact_drop_cnt(struct seq_file * s,void * unused)3661 static int rvu_dbg_npc_exact_drop_cnt(struct seq_file *s, void *unused)
3662 {
3663 struct npc_exact_table *table;
3664 struct rvu *rvu = s->private;
3665 struct npc_key_field *field;
3666 u16 chan, pcifunc;
3667 int blkaddr, i;
3668 u64 cfg, cam1;
3669 char *str;
3670
3671 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
3672 table = rvu->hw->table;
3673
3674 field = &rvu->hw->mcam.rx_key_fields[NPC_CHAN];
3675
3676 seq_puts(s, "\n\t Exact Hit on drop status\n");
3677 seq_puts(s, "\npcifunc\tmcam_idx\tHits\tchan\tstatus\n");
3678
3679 for (i = 0; i < table->num_drop_rules; i++) {
3680 pcifunc = rvu_npc_exact_drop_rule_to_pcifunc(rvu, i);
3681 cfg = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CFG(i, 0));
3682
3683 /* channel will be always in keyword 0 */
3684 cam1 = rvu_read64(rvu, blkaddr,
3685 NPC_AF_MCAMEX_BANKX_CAMX_W0(i, 0, 1));
3686 chan = field->kw_mask[0] & cam1;
3687
3688 str = (cfg & 1) ? "enabled" : "disabled";
3689
3690 seq_printf(s, "0x%x\t%d\t\t%llu\t0x%x\t%s\n", pcifunc, i,
3691 rvu_read64(rvu, blkaddr,
3692 NPC_AF_MATCH_STATX(table->counter_idx[i])),
3693 chan, str);
3694 }
3695
3696 return 0;
3697 }
3698
3699 RVU_DEBUG_SEQ_FOPS(npc_exact_drop_cnt, npc_exact_drop_cnt, NULL);
3700
rvu_dbg_npc_init(struct rvu * rvu)3701 static void rvu_dbg_npc_init(struct rvu *rvu)
3702 {
3703 rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root);
3704
3705 debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc, rvu,
3706 &rvu_dbg_npc_mcam_info_fops);
3707 debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc, rvu,
3708 &rvu_dbg_npc_mcam_rules_fops);
3709
3710 debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc, rvu,
3711 &rvu_dbg_npc_rx_miss_act_fops);
3712
3713 if (!rvu->hw->cap.npc_exact_match_enabled)
3714 return;
3715
3716 debugfs_create_file("exact_entries", 0444, rvu->rvu_dbg.npc, rvu,
3717 &rvu_dbg_npc_exact_entries_fops);
3718
3719 debugfs_create_file("exact_info", 0444, rvu->rvu_dbg.npc, rvu,
3720 &rvu_dbg_npc_exact_info_fops);
3721
3722 debugfs_create_file("exact_drop_cnt", 0444, rvu->rvu_dbg.npc, rvu,
3723 &rvu_dbg_npc_exact_drop_cnt_fops);
3724
3725 }
3726
cpt_eng_sts_display(struct seq_file * filp,u8 eng_type)3727 static int cpt_eng_sts_display(struct seq_file *filp, u8 eng_type)
3728 {
3729 struct cpt_ctx *ctx = filp->private;
3730 u64 busy_sts = 0, free_sts = 0;
3731 u32 e_min = 0, e_max = 0, e, i;
3732 u16 max_ses, max_ies, max_aes;
3733 struct rvu *rvu = ctx->rvu;
3734 int blkaddr = ctx->blkaddr;
3735 u64 reg;
3736
3737 reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
3738 max_ses = reg & 0xffff;
3739 max_ies = (reg >> 16) & 0xffff;
3740 max_aes = (reg >> 32) & 0xffff;
3741
3742 switch (eng_type) {
3743 case CPT_AE_TYPE:
3744 e_min = max_ses + max_ies;
3745 e_max = max_ses + max_ies + max_aes;
3746 break;
3747 case CPT_SE_TYPE:
3748 e_min = 0;
3749 e_max = max_ses;
3750 break;
3751 case CPT_IE_TYPE:
3752 e_min = max_ses;
3753 e_max = max_ses + max_ies;
3754 break;
3755 default:
3756 return -EINVAL;
3757 }
3758
3759 for (e = e_min, i = 0; e < e_max; e++, i++) {
3760 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e));
3761 if (reg & 0x1)
3762 busy_sts |= 1ULL << i;
3763
3764 if (reg & 0x2)
3765 free_sts |= 1ULL << i;
3766 }
3767 seq_printf(filp, "FREE STS : 0x%016llx\n", free_sts);
3768 seq_printf(filp, "BUSY STS : 0x%016llx\n", busy_sts);
3769
3770 return 0;
3771 }
3772
rvu_dbg_cpt_ae_sts_display(struct seq_file * filp,void * unused)3773 static int rvu_dbg_cpt_ae_sts_display(struct seq_file *filp, void *unused)
3774 {
3775 return cpt_eng_sts_display(filp, CPT_AE_TYPE);
3776 }
3777
3778 RVU_DEBUG_SEQ_FOPS(cpt_ae_sts, cpt_ae_sts_display, NULL);
3779
rvu_dbg_cpt_se_sts_display(struct seq_file * filp,void * unused)3780 static int rvu_dbg_cpt_se_sts_display(struct seq_file *filp, void *unused)
3781 {
3782 return cpt_eng_sts_display(filp, CPT_SE_TYPE);
3783 }
3784
3785 RVU_DEBUG_SEQ_FOPS(cpt_se_sts, cpt_se_sts_display, NULL);
3786
rvu_dbg_cpt_ie_sts_display(struct seq_file * filp,void * unused)3787 static int rvu_dbg_cpt_ie_sts_display(struct seq_file *filp, void *unused)
3788 {
3789 return cpt_eng_sts_display(filp, CPT_IE_TYPE);
3790 }
3791
3792 RVU_DEBUG_SEQ_FOPS(cpt_ie_sts, cpt_ie_sts_display, NULL);
3793
rvu_dbg_cpt_engines_info_display(struct seq_file * filp,void * unused)3794 static int rvu_dbg_cpt_engines_info_display(struct seq_file *filp, void *unused)
3795 {
3796 struct cpt_ctx *ctx = filp->private;
3797 u16 max_ses, max_ies, max_aes;
3798 struct rvu *rvu = ctx->rvu;
3799 int blkaddr = ctx->blkaddr;
3800 u32 e_max, e;
3801 u64 reg;
3802
3803 reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
3804 max_ses = reg & 0xffff;
3805 max_ies = (reg >> 16) & 0xffff;
3806 max_aes = (reg >> 32) & 0xffff;
3807
3808 e_max = max_ses + max_ies + max_aes;
3809
3810 seq_puts(filp, "===========================================\n");
3811 for (e = 0; e < e_max; e++) {
3812 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(e));
3813 seq_printf(filp, "CPT Engine[%u] Group Enable 0x%02llx\n", e,
3814 reg & 0xff);
3815 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_ACTIVE(e));
3816 seq_printf(filp, "CPT Engine[%u] Active Info 0x%llx\n", e,
3817 reg);
3818 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(e));
3819 seq_printf(filp, "CPT Engine[%u] Control 0x%llx\n", e,
3820 reg);
3821 seq_puts(filp, "===========================================\n");
3822 }
3823 return 0;
3824 }
3825
3826 RVU_DEBUG_SEQ_FOPS(cpt_engines_info, cpt_engines_info_display, NULL);
3827
rvu_dbg_cpt_lfs_info_display(struct seq_file * filp,void * unused)3828 static int rvu_dbg_cpt_lfs_info_display(struct seq_file *filp, void *unused)
3829 {
3830 struct cpt_ctx *ctx = filp->private;
3831 int blkaddr = ctx->blkaddr;
3832 struct rvu *rvu = ctx->rvu;
3833 struct rvu_block *block;
3834 struct rvu_hwinfo *hw;
3835 u64 reg;
3836 u32 lf;
3837
3838 hw = rvu->hw;
3839 block = &hw->block[blkaddr];
3840 if (!block->lf.bmap)
3841 return -ENODEV;
3842
3843 seq_puts(filp, "===========================================\n");
3844 for (lf = 0; lf < block->lf.max; lf++) {
3845 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(lf));
3846 seq_printf(filp, "CPT Lf[%u] CTL 0x%llx\n", lf, reg);
3847 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(lf));
3848 seq_printf(filp, "CPT Lf[%u] CTL2 0x%llx\n", lf, reg);
3849 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_PTR_CTL(lf));
3850 seq_printf(filp, "CPT Lf[%u] PTR_CTL 0x%llx\n", lf, reg);
3851 reg = rvu_read64(rvu, blkaddr, block->lfcfg_reg |
3852 (lf << block->lfshift));
3853 seq_printf(filp, "CPT Lf[%u] CFG 0x%llx\n", lf, reg);
3854 seq_puts(filp, "===========================================\n");
3855 }
3856 return 0;
3857 }
3858
3859 RVU_DEBUG_SEQ_FOPS(cpt_lfs_info, cpt_lfs_info_display, NULL);
3860
rvu_dbg_cpt_err_info_display(struct seq_file * filp,void * unused)3861 static int rvu_dbg_cpt_err_info_display(struct seq_file *filp, void *unused)
3862 {
3863 struct cpt_ctx *ctx = filp->private;
3864 struct rvu *rvu = ctx->rvu;
3865 int blkaddr = ctx->blkaddr;
3866 u64 reg0, reg1;
3867
3868 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
3869 reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
3870 seq_printf(filp, "CPT_AF_FLTX_INT: 0x%llx 0x%llx\n", reg0, reg1);
3871 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(0));
3872 reg1 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(1));
3873 seq_printf(filp, "CPT_AF_PSNX_EXE: 0x%llx 0x%llx\n", reg0, reg1);
3874 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_LF(0));
3875 seq_printf(filp, "CPT_AF_PSNX_LF: 0x%llx\n", reg0);
3876 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT);
3877 seq_printf(filp, "CPT_AF_RVU_INT: 0x%llx\n", reg0);
3878 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT);
3879 seq_printf(filp, "CPT_AF_RAS_INT: 0x%llx\n", reg0);
3880 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO);
3881 seq_printf(filp, "CPT_AF_EXE_ERR_INFO: 0x%llx\n", reg0);
3882
3883 return 0;
3884 }
3885
3886 RVU_DEBUG_SEQ_FOPS(cpt_err_info, cpt_err_info_display, NULL);
3887
rvu_dbg_cpt_pc_display(struct seq_file * filp,void * unused)3888 static int rvu_dbg_cpt_pc_display(struct seq_file *filp, void *unused)
3889 {
3890 struct cpt_ctx *ctx = filp->private;
3891 struct rvu *rvu = ctx->rvu;
3892 int blkaddr = ctx->blkaddr;
3893 u64 reg;
3894
3895 reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC);
3896 seq_printf(filp, "CPT instruction requests %llu\n", reg);
3897 reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC);
3898 seq_printf(filp, "CPT instruction latency %llu\n", reg);
3899 reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC);
3900 seq_printf(filp, "CPT NCB read requests %llu\n", reg);
3901 reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC);
3902 seq_printf(filp, "CPT NCB read latency %llu\n", reg);
3903 reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC);
3904 seq_printf(filp, "CPT read requests caused by UC fills %llu\n", reg);
3905 reg = rvu_read64(rvu, blkaddr, CPT_AF_ACTIVE_CYCLES_PC);
3906 seq_printf(filp, "CPT active cycles pc %llu\n", reg);
3907 reg = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT);
3908 seq_printf(filp, "CPT clock count pc %llu\n", reg);
3909
3910 return 0;
3911 }
3912
3913 RVU_DEBUG_SEQ_FOPS(cpt_pc, cpt_pc_display, NULL);
3914
rvu_dbg_cpt_init(struct rvu * rvu,int blkaddr)3915 static void rvu_dbg_cpt_init(struct rvu *rvu, int blkaddr)
3916 {
3917 struct cpt_ctx *ctx;
3918
3919 if (!is_block_implemented(rvu->hw, blkaddr))
3920 return;
3921
3922 if (blkaddr == BLKADDR_CPT0) {
3923 rvu->rvu_dbg.cpt = debugfs_create_dir("cpt", rvu->rvu_dbg.root);
3924 ctx = &rvu->rvu_dbg.cpt_ctx[0];
3925 ctx->blkaddr = BLKADDR_CPT0;
3926 ctx->rvu = rvu;
3927 } else {
3928 rvu->rvu_dbg.cpt = debugfs_create_dir("cpt1",
3929 rvu->rvu_dbg.root);
3930 ctx = &rvu->rvu_dbg.cpt_ctx[1];
3931 ctx->blkaddr = BLKADDR_CPT1;
3932 ctx->rvu = rvu;
3933 }
3934
3935 debugfs_create_file("cpt_pc", 0600, rvu->rvu_dbg.cpt, ctx,
3936 &rvu_dbg_cpt_pc_fops);
3937 debugfs_create_file("cpt_ae_sts", 0600, rvu->rvu_dbg.cpt, ctx,
3938 &rvu_dbg_cpt_ae_sts_fops);
3939 debugfs_create_file("cpt_se_sts", 0600, rvu->rvu_dbg.cpt, ctx,
3940 &rvu_dbg_cpt_se_sts_fops);
3941 debugfs_create_file("cpt_ie_sts", 0600, rvu->rvu_dbg.cpt, ctx,
3942 &rvu_dbg_cpt_ie_sts_fops);
3943 debugfs_create_file("cpt_engines_info", 0600, rvu->rvu_dbg.cpt, ctx,
3944 &rvu_dbg_cpt_engines_info_fops);
3945 debugfs_create_file("cpt_lfs_info", 0600, rvu->rvu_dbg.cpt, ctx,
3946 &rvu_dbg_cpt_lfs_info_fops);
3947 debugfs_create_file("cpt_err_info", 0600, rvu->rvu_dbg.cpt, ctx,
3948 &rvu_dbg_cpt_err_info_fops);
3949 }
3950
rvu_get_dbg_dir_name(struct rvu * rvu)3951 static const char *rvu_get_dbg_dir_name(struct rvu *rvu)
3952 {
3953 if (!is_rvu_otx2(rvu))
3954 return "cn10k";
3955 else
3956 return "octeontx2";
3957 }
3958
rvu_dbg_init(struct rvu * rvu)3959 void rvu_dbg_init(struct rvu *rvu)
3960 {
3961 rvu->rvu_dbg.root = debugfs_create_dir(rvu_get_dbg_dir_name(rvu), NULL);
3962
3963 debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
3964 &rvu_dbg_rsrc_status_fops);
3965
3966 if (!is_rvu_otx2(rvu))
3967 debugfs_create_file("lmtst_map_table", 0444, rvu->rvu_dbg.root,
3968 rvu, &rvu_dbg_lmtst_map_table_fops);
3969
3970 debugfs_create_file("rvu_fwdata", 0444, rvu->rvu_dbg.root, rvu,
3971 &rvu_dbg_rvu_fwdata_fops);
3972
3973 if (!cgx_get_cgxcnt_max())
3974 goto create;
3975
3976 if (is_rvu_otx2(rvu))
3977 debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root,
3978 rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
3979 else
3980 debugfs_create_file("rvu_pf_rpm_map", 0444, rvu->rvu_dbg.root,
3981 rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
3982
3983 create:
3984 rvu_dbg_npa_init(rvu);
3985 rvu_dbg_nix_init(rvu, BLKADDR_NIX0);
3986
3987 rvu_dbg_nix_init(rvu, BLKADDR_NIX1);
3988 rvu_dbg_cgx_init(rvu);
3989 rvu_dbg_npc_init(rvu);
3990 rvu_dbg_cpt_init(rvu, BLKADDR_CPT0);
3991 rvu_dbg_cpt_init(rvu, BLKADDR_CPT1);
3992 rvu_dbg_mcs_init(rvu);
3993 }
3994
rvu_dbg_exit(struct rvu * rvu)3995 void rvu_dbg_exit(struct rvu *rvu)
3996 {
3997 debugfs_remove_recursive(rvu->rvu_dbg.root);
3998 }
3999
4000 #endif /* CONFIG_DEBUG_FS */
4001