1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function Devlink
3 *
4 * Copyright (C) 2020 Marvell.
5 *
6 */
7
8 #include <linux/bitfield.h>
9
10 #include "rvu.h"
11 #include "rvu_reg.h"
12 #include "rvu_struct.h"
13 #include "rvu_npc_hash.h"
14
15 #define DRV_NAME "octeontx2-af"
16
rvu_report_pair_start(struct devlink_fmsg * fmsg,const char * name)17 static void rvu_report_pair_start(struct devlink_fmsg *fmsg, const char *name)
18 {
19 devlink_fmsg_pair_nest_start(fmsg, name);
20 devlink_fmsg_obj_nest_start(fmsg);
21 }
22
rvu_report_pair_end(struct devlink_fmsg * fmsg)23 static void rvu_report_pair_end(struct devlink_fmsg *fmsg)
24 {
25 devlink_fmsg_obj_nest_end(fmsg);
26 devlink_fmsg_pair_nest_end(fmsg);
27 }
28
rvu_common_request_irq(struct rvu * rvu,int offset,const char * name,irq_handler_t fn)29 static bool rvu_common_request_irq(struct rvu *rvu, int offset,
30 const char *name, irq_handler_t fn)
31 {
32 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
33 int rc;
34
35 sprintf(&rvu->irq_name[offset * NAME_SIZE], "%s", name);
36 rc = request_irq(pci_irq_vector(rvu->pdev, offset), fn, 0,
37 &rvu->irq_name[offset * NAME_SIZE], rvu_dl);
38 if (rc)
39 dev_warn(rvu->dev, "Failed to register %s irq\n", name);
40 else
41 rvu->irq_allocated[offset] = true;
42
43 return rvu->irq_allocated[offset];
44 }
45
rvu_nix_intr_work(struct work_struct * work)46 static void rvu_nix_intr_work(struct work_struct *work)
47 {
48 struct rvu_nix_health_reporters *rvu_nix_health_reporter;
49
50 rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, intr_work);
51 devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_intr_reporter,
52 "NIX_AF_RVU Error",
53 rvu_nix_health_reporter->nix_event_ctx);
54 }
55
rvu_nix_af_rvu_intr_handler(int irq,void * rvu_irq)56 static irqreturn_t rvu_nix_af_rvu_intr_handler(int irq, void *rvu_irq)
57 {
58 struct rvu_nix_event_ctx *nix_event_context;
59 struct rvu_devlink *rvu_dl = rvu_irq;
60 struct rvu *rvu;
61 int blkaddr;
62 u64 intr;
63
64 rvu = rvu_dl->rvu;
65 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
66 if (blkaddr < 0)
67 return IRQ_NONE;
68
69 nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
70 intr = rvu_read64(rvu, blkaddr, NIX_AF_RVU_INT);
71 nix_event_context->nix_af_rvu_int = intr;
72
73 /* Clear interrupts */
74 rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT, intr);
75 rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1C, ~0ULL);
76 queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->intr_work);
77
78 return IRQ_HANDLED;
79 }
80
rvu_nix_gen_work(struct work_struct * work)81 static void rvu_nix_gen_work(struct work_struct *work)
82 {
83 struct rvu_nix_health_reporters *rvu_nix_health_reporter;
84
85 rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, gen_work);
86 devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_gen_reporter,
87 "NIX_AF_GEN Error",
88 rvu_nix_health_reporter->nix_event_ctx);
89 }
90
rvu_nix_af_rvu_gen_handler(int irq,void * rvu_irq)91 static irqreturn_t rvu_nix_af_rvu_gen_handler(int irq, void *rvu_irq)
92 {
93 struct rvu_nix_event_ctx *nix_event_context;
94 struct rvu_devlink *rvu_dl = rvu_irq;
95 struct rvu *rvu;
96 int blkaddr;
97 u64 intr;
98
99 rvu = rvu_dl->rvu;
100 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
101 if (blkaddr < 0)
102 return IRQ_NONE;
103
104 nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
105 intr = rvu_read64(rvu, blkaddr, NIX_AF_GEN_INT);
106 nix_event_context->nix_af_rvu_gen = intr;
107
108 /* Clear interrupts */
109 rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT, intr);
110 rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1C, ~0ULL);
111 queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->gen_work);
112
113 return IRQ_HANDLED;
114 }
115
rvu_nix_err_work(struct work_struct * work)116 static void rvu_nix_err_work(struct work_struct *work)
117 {
118 struct rvu_nix_health_reporters *rvu_nix_health_reporter;
119
120 rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, err_work);
121 devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_err_reporter,
122 "NIX_AF_ERR Error",
123 rvu_nix_health_reporter->nix_event_ctx);
124 }
125
rvu_nix_af_rvu_err_handler(int irq,void * rvu_irq)126 static irqreturn_t rvu_nix_af_rvu_err_handler(int irq, void *rvu_irq)
127 {
128 struct rvu_nix_event_ctx *nix_event_context;
129 struct rvu_devlink *rvu_dl = rvu_irq;
130 struct rvu *rvu;
131 int blkaddr;
132 u64 intr;
133
134 rvu = rvu_dl->rvu;
135 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
136 if (blkaddr < 0)
137 return IRQ_NONE;
138
139 nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
140 intr = rvu_read64(rvu, blkaddr, NIX_AF_ERR_INT);
141 nix_event_context->nix_af_rvu_err = intr;
142
143 /* Clear interrupts */
144 rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT, intr);
145 rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1C, ~0ULL);
146 queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->err_work);
147
148 return IRQ_HANDLED;
149 }
150
rvu_nix_ras_work(struct work_struct * work)151 static void rvu_nix_ras_work(struct work_struct *work)
152 {
153 struct rvu_nix_health_reporters *rvu_nix_health_reporter;
154
155 rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, ras_work);
156 devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_ras_reporter,
157 "NIX_AF_RAS Error",
158 rvu_nix_health_reporter->nix_event_ctx);
159 }
160
rvu_nix_af_rvu_ras_handler(int irq,void * rvu_irq)161 static irqreturn_t rvu_nix_af_rvu_ras_handler(int irq, void *rvu_irq)
162 {
163 struct rvu_nix_event_ctx *nix_event_context;
164 struct rvu_devlink *rvu_dl = rvu_irq;
165 struct rvu *rvu;
166 int blkaddr;
167 u64 intr;
168
169 rvu = rvu_dl->rvu;
170 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
171 if (blkaddr < 0)
172 return IRQ_NONE;
173
174 nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
175 intr = rvu_read64(rvu, blkaddr, NIX_AF_ERR_INT);
176 nix_event_context->nix_af_rvu_ras = intr;
177
178 /* Clear interrupts */
179 rvu_write64(rvu, blkaddr, NIX_AF_RAS, intr);
180 rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1C, ~0ULL);
181 queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->ras_work);
182
183 return IRQ_HANDLED;
184 }
185
rvu_nix_unregister_interrupts(struct rvu * rvu)186 static void rvu_nix_unregister_interrupts(struct rvu *rvu)
187 {
188 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
189 int offs, i, blkaddr;
190
191 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
192 if (blkaddr < 0)
193 return;
194
195 offs = rvu_read64(rvu, blkaddr, NIX_PRIV_AF_INT_CFG) & 0x3ff;
196 if (!offs)
197 return;
198
199 rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1C, ~0ULL);
200 rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1C, ~0ULL);
201 rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1C, ~0ULL);
202 rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1C, ~0ULL);
203
204 if (rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU]) {
205 free_irq(pci_irq_vector(rvu->pdev, offs + NIX_AF_INT_VEC_RVU),
206 rvu_dl);
207 rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU] = false;
208 }
209
210 for (i = NIX_AF_INT_VEC_GEN; i < NIX_AF_INT_VEC_CNT; i++)
211 if (rvu->irq_allocated[offs + i]) {
212 free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu_dl);
213 rvu->irq_allocated[offs + i] = false;
214 }
215 }
216
rvu_nix_register_interrupts(struct rvu * rvu)217 static int rvu_nix_register_interrupts(struct rvu *rvu)
218 {
219 int blkaddr, base;
220 bool rc;
221
222 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
223 if (blkaddr < 0)
224 return blkaddr;
225
226 /* Get NIX AF MSIX vectors offset. */
227 base = rvu_read64(rvu, blkaddr, NIX_PRIV_AF_INT_CFG) & 0x3ff;
228 if (!base) {
229 dev_warn(rvu->dev,
230 "Failed to get NIX%d NIX_AF_INT vector offsets\n",
231 blkaddr - BLKADDR_NIX0);
232 return 0;
233 }
234 /* Register and enable NIX_AF_RVU_INT interrupt */
235 rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_RVU,
236 "NIX_AF_RVU_INT",
237 rvu_nix_af_rvu_intr_handler);
238 if (!rc)
239 goto err;
240 rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1S, ~0ULL);
241
242 /* Register and enable NIX_AF_GEN_INT interrupt */
243 rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_GEN,
244 "NIX_AF_GEN_INT",
245 rvu_nix_af_rvu_gen_handler);
246 if (!rc)
247 goto err;
248 rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1S, ~0ULL);
249
250 /* Register and enable NIX_AF_ERR_INT interrupt */
251 rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_AF_ERR,
252 "NIX_AF_ERR_INT",
253 rvu_nix_af_rvu_err_handler);
254 if (!rc)
255 goto err;
256 rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1S, ~0ULL);
257
258 /* Register and enable NIX_AF_RAS interrupt */
259 rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_POISON,
260 "NIX_AF_RAS",
261 rvu_nix_af_rvu_ras_handler);
262 if (!rc)
263 goto err;
264 rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1S, ~0ULL);
265
266 return 0;
267 err:
268 rvu_nix_unregister_interrupts(rvu);
269 return rc;
270 }
271
rvu_nix_report_show(struct devlink_fmsg * fmsg,void * ctx,enum nix_af_rvu_health health_reporter)272 static int rvu_nix_report_show(struct devlink_fmsg *fmsg, void *ctx,
273 enum nix_af_rvu_health health_reporter)
274 {
275 struct rvu_nix_event_ctx *nix_event_context;
276 u64 intr_val;
277
278 nix_event_context = ctx;
279 switch (health_reporter) {
280 case NIX_AF_RVU_INTR:
281 intr_val = nix_event_context->nix_af_rvu_int;
282 rvu_report_pair_start(fmsg, "NIX_AF_RVU");
283 devlink_fmsg_u64_pair_put(fmsg, "\tNIX RVU Interrupt Reg ",
284 nix_event_context->nix_af_rvu_int);
285 if (intr_val & BIT_ULL(0))
286 devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error");
287 rvu_report_pair_end(fmsg);
288 break;
289 case NIX_AF_RVU_GEN:
290 intr_val = nix_event_context->nix_af_rvu_gen;
291 rvu_report_pair_start(fmsg, "NIX_AF_GENERAL");
292 devlink_fmsg_u64_pair_put(fmsg, "\tNIX General Interrupt Reg ",
293 nix_event_context->nix_af_rvu_gen);
294 if (intr_val & BIT_ULL(0))
295 devlink_fmsg_string_put(fmsg, "\n\tRx multicast pkt drop");
296 if (intr_val & BIT_ULL(1))
297 devlink_fmsg_string_put(fmsg, "\n\tRx mirror pkt drop");
298 if (intr_val & BIT_ULL(4))
299 devlink_fmsg_string_put(fmsg, "\n\tSMQ flush done");
300 rvu_report_pair_end(fmsg);
301 break;
302 case NIX_AF_RVU_ERR:
303 intr_val = nix_event_context->nix_af_rvu_err;
304 rvu_report_pair_start(fmsg, "NIX_AF_ERR");
305 devlink_fmsg_u64_pair_put(fmsg, "\tNIX Error Interrupt Reg ",
306 nix_event_context->nix_af_rvu_err);
307 if (intr_val & BIT_ULL(14))
308 devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_INST_S read");
309 if (intr_val & BIT_ULL(13))
310 devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_RES_S write");
311 if (intr_val & BIT_ULL(12))
312 devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error");
313 if (intr_val & BIT_ULL(6))
314 devlink_fmsg_string_put(fmsg, "\n\tRx on unmapped PF_FUNC");
315 if (intr_val & BIT_ULL(5))
316 devlink_fmsg_string_put(fmsg, "\n\tRx multicast replication error");
317 if (intr_val & BIT_ULL(4))
318 devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_RX_MCE_S read");
319 if (intr_val & BIT_ULL(3))
320 devlink_fmsg_string_put(fmsg, "\n\tFault on multicast WQE read");
321 if (intr_val & BIT_ULL(2))
322 devlink_fmsg_string_put(fmsg, "\n\tFault on mirror WQE read");
323 if (intr_val & BIT_ULL(1))
324 devlink_fmsg_string_put(fmsg, "\n\tFault on mirror pkt write");
325 if (intr_val & BIT_ULL(0))
326 devlink_fmsg_string_put(fmsg, "\n\tFault on multicast pkt write");
327 rvu_report_pair_end(fmsg);
328 break;
329 case NIX_AF_RVU_RAS:
330 intr_val = nix_event_context->nix_af_rvu_err;
331 rvu_report_pair_start(fmsg, "NIX_AF_RAS");
332 devlink_fmsg_u64_pair_put(fmsg, "\tNIX RAS Interrupt Reg ",
333 nix_event_context->nix_af_rvu_err);
334 devlink_fmsg_string_put(fmsg, "\n\tPoison Data on:");
335 if (intr_val & BIT_ULL(34))
336 devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_INST_S");
337 if (intr_val & BIT_ULL(33))
338 devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_RES_S");
339 if (intr_val & BIT_ULL(32))
340 devlink_fmsg_string_put(fmsg, "\n\tHW ctx");
341 if (intr_val & BIT_ULL(4))
342 devlink_fmsg_string_put(fmsg, "\n\tPacket from mirror buffer");
343 if (intr_val & BIT_ULL(3))
344 devlink_fmsg_string_put(fmsg, "\n\tPacket from multicast buffer");
345 if (intr_val & BIT_ULL(2))
346 devlink_fmsg_string_put(fmsg, "\n\tWQE read from mirror buffer");
347 if (intr_val & BIT_ULL(1))
348 devlink_fmsg_string_put(fmsg, "\n\tWQE read from multicast buffer");
349 if (intr_val & BIT_ULL(0))
350 devlink_fmsg_string_put(fmsg, "\n\tNIX_RX_MCE_S read");
351 rvu_report_pair_end(fmsg);
352 break;
353 default:
354 return -EINVAL;
355 }
356
357 return 0;
358 }
359
rvu_hw_nix_intr_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)360 static int rvu_hw_nix_intr_dump(struct devlink_health_reporter *reporter,
361 struct devlink_fmsg *fmsg, void *ctx,
362 struct netlink_ext_ack *netlink_extack)
363 {
364 struct rvu *rvu = devlink_health_reporter_priv(reporter);
365 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
366 struct rvu_nix_event_ctx *nix_ctx;
367
368 nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
369
370 return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_INTR) :
371 rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_INTR);
372 }
373
rvu_hw_nix_intr_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)374 static int rvu_hw_nix_intr_recover(struct devlink_health_reporter *reporter,
375 void *ctx, struct netlink_ext_ack *netlink_extack)
376 {
377 struct rvu *rvu = devlink_health_reporter_priv(reporter);
378 struct rvu_nix_event_ctx *nix_event_ctx = ctx;
379 int blkaddr;
380
381 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
382 if (blkaddr < 0)
383 return blkaddr;
384
385 if (nix_event_ctx->nix_af_rvu_int)
386 rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1S, ~0ULL);
387
388 return 0;
389 }
390
rvu_hw_nix_gen_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)391 static int rvu_hw_nix_gen_dump(struct devlink_health_reporter *reporter,
392 struct devlink_fmsg *fmsg, void *ctx,
393 struct netlink_ext_ack *netlink_extack)
394 {
395 struct rvu *rvu = devlink_health_reporter_priv(reporter);
396 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
397 struct rvu_nix_event_ctx *nix_ctx;
398
399 nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
400
401 return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_GEN) :
402 rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_GEN);
403 }
404
rvu_hw_nix_gen_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)405 static int rvu_hw_nix_gen_recover(struct devlink_health_reporter *reporter,
406 void *ctx, struct netlink_ext_ack *netlink_extack)
407 {
408 struct rvu *rvu = devlink_health_reporter_priv(reporter);
409 struct rvu_nix_event_ctx *nix_event_ctx = ctx;
410 int blkaddr;
411
412 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
413 if (blkaddr < 0)
414 return blkaddr;
415
416 if (nix_event_ctx->nix_af_rvu_gen)
417 rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1S, ~0ULL);
418
419 return 0;
420 }
421
rvu_hw_nix_err_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)422 static int rvu_hw_nix_err_dump(struct devlink_health_reporter *reporter,
423 struct devlink_fmsg *fmsg, void *ctx,
424 struct netlink_ext_ack *netlink_extack)
425 {
426 struct rvu *rvu = devlink_health_reporter_priv(reporter);
427 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
428 struct rvu_nix_event_ctx *nix_ctx;
429
430 nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
431
432 return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_ERR) :
433 rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_ERR);
434 }
435
rvu_hw_nix_err_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)436 static int rvu_hw_nix_err_recover(struct devlink_health_reporter *reporter,
437 void *ctx, struct netlink_ext_ack *netlink_extack)
438 {
439 struct rvu *rvu = devlink_health_reporter_priv(reporter);
440 struct rvu_nix_event_ctx *nix_event_ctx = ctx;
441 int blkaddr;
442
443 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
444 if (blkaddr < 0)
445 return blkaddr;
446
447 if (nix_event_ctx->nix_af_rvu_err)
448 rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1S, ~0ULL);
449
450 return 0;
451 }
452
rvu_hw_nix_ras_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)453 static int rvu_hw_nix_ras_dump(struct devlink_health_reporter *reporter,
454 struct devlink_fmsg *fmsg, void *ctx,
455 struct netlink_ext_ack *netlink_extack)
456 {
457 struct rvu *rvu = devlink_health_reporter_priv(reporter);
458 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
459 struct rvu_nix_event_ctx *nix_ctx;
460
461 nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
462
463 return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_RAS) :
464 rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_RAS);
465 }
466
rvu_hw_nix_ras_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)467 static int rvu_hw_nix_ras_recover(struct devlink_health_reporter *reporter,
468 void *ctx, struct netlink_ext_ack *netlink_extack)
469 {
470 struct rvu *rvu = devlink_health_reporter_priv(reporter);
471 struct rvu_nix_event_ctx *nix_event_ctx = ctx;
472 int blkaddr;
473
474 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
475 if (blkaddr < 0)
476 return blkaddr;
477
478 if (nix_event_ctx->nix_af_rvu_int)
479 rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1S, ~0ULL);
480
481 return 0;
482 }
483
484 RVU_REPORTERS(hw_nix_intr);
485 RVU_REPORTERS(hw_nix_gen);
486 RVU_REPORTERS(hw_nix_err);
487 RVU_REPORTERS(hw_nix_ras);
488
489 static void rvu_nix_health_reporters_destroy(struct rvu_devlink *rvu_dl);
490
rvu_nix_register_reporters(struct rvu_devlink * rvu_dl)491 static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl)
492 {
493 struct rvu_nix_health_reporters *rvu_reporters;
494 struct rvu_nix_event_ctx *nix_event_context;
495 struct rvu *rvu = rvu_dl->rvu;
496
497 rvu_reporters = kzalloc_obj(*rvu_reporters);
498 if (!rvu_reporters)
499 return -ENOMEM;
500
501 rvu_dl->rvu_nix_health_reporter = rvu_reporters;
502 nix_event_context = kzalloc_obj(*nix_event_context);
503 if (!nix_event_context)
504 return -ENOMEM;
505
506 rvu_reporters->nix_event_ctx = nix_event_context;
507 rvu_reporters->rvu_hw_nix_intr_reporter =
508 devlink_health_reporter_create(rvu_dl->dl,
509 &rvu_hw_nix_intr_reporter_ops,
510 rvu);
511 if (IS_ERR(rvu_reporters->rvu_hw_nix_intr_reporter)) {
512 dev_warn(rvu->dev, "Failed to create hw_nix_intr reporter, err=%ld\n",
513 PTR_ERR(rvu_reporters->rvu_hw_nix_intr_reporter));
514 return PTR_ERR(rvu_reporters->rvu_hw_nix_intr_reporter);
515 }
516
517 rvu_reporters->rvu_hw_nix_gen_reporter =
518 devlink_health_reporter_create(rvu_dl->dl,
519 &rvu_hw_nix_gen_reporter_ops,
520 rvu);
521 if (IS_ERR(rvu_reporters->rvu_hw_nix_gen_reporter)) {
522 dev_warn(rvu->dev, "Failed to create hw_nix_gen reporter, err=%ld\n",
523 PTR_ERR(rvu_reporters->rvu_hw_nix_gen_reporter));
524 return PTR_ERR(rvu_reporters->rvu_hw_nix_gen_reporter);
525 }
526
527 rvu_reporters->rvu_hw_nix_err_reporter =
528 devlink_health_reporter_create(rvu_dl->dl,
529 &rvu_hw_nix_err_reporter_ops,
530 rvu);
531 if (IS_ERR(rvu_reporters->rvu_hw_nix_err_reporter)) {
532 dev_warn(rvu->dev, "Failed to create hw_nix_err reporter, err=%ld\n",
533 PTR_ERR(rvu_reporters->rvu_hw_nix_err_reporter));
534 return PTR_ERR(rvu_reporters->rvu_hw_nix_err_reporter);
535 }
536
537 rvu_reporters->rvu_hw_nix_ras_reporter =
538 devlink_health_reporter_create(rvu_dl->dl,
539 &rvu_hw_nix_ras_reporter_ops,
540 rvu);
541 if (IS_ERR(rvu_reporters->rvu_hw_nix_ras_reporter)) {
542 dev_warn(rvu->dev, "Failed to create hw_nix_ras reporter, err=%ld\n",
543 PTR_ERR(rvu_reporters->rvu_hw_nix_ras_reporter));
544 return PTR_ERR(rvu_reporters->rvu_hw_nix_ras_reporter);
545 }
546
547 rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq");
548 if (!rvu_dl->devlink_wq)
549 return -ENOMEM;
550
551 INIT_WORK(&rvu_reporters->intr_work, rvu_nix_intr_work);
552 INIT_WORK(&rvu_reporters->gen_work, rvu_nix_gen_work);
553 INIT_WORK(&rvu_reporters->err_work, rvu_nix_err_work);
554 INIT_WORK(&rvu_reporters->ras_work, rvu_nix_ras_work);
555
556 return 0;
557 }
558
rvu_nix_health_reporters_create(struct rvu_devlink * rvu_dl)559 static int rvu_nix_health_reporters_create(struct rvu_devlink *rvu_dl)
560 {
561 struct rvu *rvu = rvu_dl->rvu;
562 int err;
563
564 err = rvu_nix_register_reporters(rvu_dl);
565 if (err) {
566 dev_warn(rvu->dev, "Failed to create nix reporter, err =%d\n",
567 err);
568 return err;
569 }
570 rvu_nix_register_interrupts(rvu);
571
572 return 0;
573 }
574
rvu_nix_health_reporters_destroy(struct rvu_devlink * rvu_dl)575 static void rvu_nix_health_reporters_destroy(struct rvu_devlink *rvu_dl)
576 {
577 struct rvu_nix_health_reporters *nix_reporters;
578 struct rvu *rvu = rvu_dl->rvu;
579
580 nix_reporters = rvu_dl->rvu_nix_health_reporter;
581
582 if (!nix_reporters->rvu_hw_nix_ras_reporter)
583 return;
584 if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_intr_reporter))
585 devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_intr_reporter);
586
587 if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_gen_reporter))
588 devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_gen_reporter);
589
590 if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_err_reporter))
591 devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_err_reporter);
592
593 if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_ras_reporter))
594 devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_ras_reporter);
595
596 rvu_nix_unregister_interrupts(rvu);
597 kfree(rvu_dl->rvu_nix_health_reporter->nix_event_ctx);
598 kfree(rvu_dl->rvu_nix_health_reporter);
599 }
600
rvu_npa_intr_work(struct work_struct * work)601 static void rvu_npa_intr_work(struct work_struct *work)
602 {
603 struct rvu_npa_health_reporters *rvu_npa_health_reporter;
604
605 rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, intr_work);
606 devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_intr_reporter,
607 "NPA_AF_RVU Error",
608 rvu_npa_health_reporter->npa_event_ctx);
609 }
610
rvu_npa_af_rvu_intr_handler(int irq,void * rvu_irq)611 static irqreturn_t rvu_npa_af_rvu_intr_handler(int irq, void *rvu_irq)
612 {
613 struct rvu_npa_event_ctx *npa_event_context;
614 struct rvu_devlink *rvu_dl = rvu_irq;
615 struct rvu *rvu;
616 int blkaddr;
617 u64 intr;
618
619 rvu = rvu_dl->rvu;
620 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
621 if (blkaddr < 0)
622 return IRQ_NONE;
623
624 npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
625 intr = rvu_read64(rvu, blkaddr, NPA_AF_RVU_INT);
626 npa_event_context->npa_af_rvu_int = intr;
627
628 /* Clear interrupts */
629 rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT, intr);
630 rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1C, ~0ULL);
631 queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->intr_work);
632
633 return IRQ_HANDLED;
634 }
635
rvu_npa_gen_work(struct work_struct * work)636 static void rvu_npa_gen_work(struct work_struct *work)
637 {
638 struct rvu_npa_health_reporters *rvu_npa_health_reporter;
639
640 rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, gen_work);
641 devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_gen_reporter,
642 "NPA_AF_GEN Error",
643 rvu_npa_health_reporter->npa_event_ctx);
644 }
645
rvu_npa_af_gen_intr_handler(int irq,void * rvu_irq)646 static irqreturn_t rvu_npa_af_gen_intr_handler(int irq, void *rvu_irq)
647 {
648 struct rvu_npa_event_ctx *npa_event_context;
649 struct rvu_devlink *rvu_dl = rvu_irq;
650 struct rvu *rvu;
651 int blkaddr;
652 u64 intr;
653
654 rvu = rvu_dl->rvu;
655 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
656 if (blkaddr < 0)
657 return IRQ_NONE;
658
659 npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
660 intr = rvu_read64(rvu, blkaddr, NPA_AF_GEN_INT);
661 npa_event_context->npa_af_rvu_gen = intr;
662
663 /* Clear interrupts */
664 rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT, intr);
665 rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1C, ~0ULL);
666 queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->gen_work);
667
668 return IRQ_HANDLED;
669 }
670
rvu_npa_err_work(struct work_struct * work)671 static void rvu_npa_err_work(struct work_struct *work)
672 {
673 struct rvu_npa_health_reporters *rvu_npa_health_reporter;
674
675 rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, err_work);
676 devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_err_reporter,
677 "NPA_AF_ERR Error",
678 rvu_npa_health_reporter->npa_event_ctx);
679 }
680
rvu_npa_af_err_intr_handler(int irq,void * rvu_irq)681 static irqreturn_t rvu_npa_af_err_intr_handler(int irq, void *rvu_irq)
682 {
683 struct rvu_npa_event_ctx *npa_event_context;
684 struct rvu_devlink *rvu_dl = rvu_irq;
685 struct rvu *rvu;
686 int blkaddr;
687 u64 intr;
688
689 rvu = rvu_dl->rvu;
690 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
691 if (blkaddr < 0)
692 return IRQ_NONE;
693 npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
694 intr = rvu_read64(rvu, blkaddr, NPA_AF_ERR_INT);
695 npa_event_context->npa_af_rvu_err = intr;
696
697 /* Clear interrupts */
698 rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT, intr);
699 rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1C, ~0ULL);
700 queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->err_work);
701
702 return IRQ_HANDLED;
703 }
704
rvu_npa_ras_work(struct work_struct * work)705 static void rvu_npa_ras_work(struct work_struct *work)
706 {
707 struct rvu_npa_health_reporters *rvu_npa_health_reporter;
708
709 rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, ras_work);
710 devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_ras_reporter,
711 "HW NPA_AF_RAS Error reported",
712 rvu_npa_health_reporter->npa_event_ctx);
713 }
714
rvu_npa_af_ras_intr_handler(int irq,void * rvu_irq)715 static irqreturn_t rvu_npa_af_ras_intr_handler(int irq, void *rvu_irq)
716 {
717 struct rvu_npa_event_ctx *npa_event_context;
718 struct rvu_devlink *rvu_dl = rvu_irq;
719 struct rvu *rvu;
720 int blkaddr;
721 u64 intr;
722
723 rvu = rvu_dl->rvu;
724 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
725 if (blkaddr < 0)
726 return IRQ_NONE;
727
728 npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
729 intr = rvu_read64(rvu, blkaddr, NPA_AF_RAS);
730 npa_event_context->npa_af_rvu_ras = intr;
731
732 /* Clear interrupts */
733 rvu_write64(rvu, blkaddr, NPA_AF_RAS, intr);
734 rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1C, ~0ULL);
735 queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->ras_work);
736
737 return IRQ_HANDLED;
738 }
739
rvu_npa_unregister_interrupts(struct rvu * rvu)740 static void rvu_npa_unregister_interrupts(struct rvu *rvu)
741 {
742 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
743 int i, offs, blkaddr;
744 u64 reg;
745
746 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
747 if (blkaddr < 0)
748 return;
749
750 reg = rvu_read64(rvu, blkaddr, NPA_PRIV_AF_INT_CFG);
751 offs = reg & 0x3FF;
752
753 rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1C, ~0ULL);
754 rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1C, ~0ULL);
755 rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1C, ~0ULL);
756 rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1C, ~0ULL);
757
758 for (i = 0; i < NPA_AF_INT_VEC_CNT; i++)
759 if (rvu->irq_allocated[offs + i]) {
760 free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu_dl);
761 rvu->irq_allocated[offs + i] = false;
762 }
763 }
764
rvu_npa_register_interrupts(struct rvu * rvu)765 static int rvu_npa_register_interrupts(struct rvu *rvu)
766 {
767 int blkaddr, base;
768 bool rc;
769
770 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
771 if (blkaddr < 0)
772 return blkaddr;
773
774 /* Get NPA AF MSIX vectors offset. */
775 base = rvu_read64(rvu, blkaddr, NPA_PRIV_AF_INT_CFG) & 0x3ff;
776 if (!base) {
777 dev_warn(rvu->dev,
778 "Failed to get NPA_AF_INT vector offsets\n");
779 return 0;
780 }
781
782 /* Register and enable NPA_AF_RVU_INT interrupt */
783 rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_RVU,
784 "NPA_AF_RVU_INT",
785 rvu_npa_af_rvu_intr_handler);
786 if (!rc)
787 goto err;
788 rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1S, ~0ULL);
789
790 /* Register and enable NPA_AF_GEN_INT interrupt */
791 rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_GEN,
792 "NPA_AF_RVU_GEN",
793 rvu_npa_af_gen_intr_handler);
794 if (!rc)
795 goto err;
796 rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1S, ~0ULL);
797
798 /* Register and enable NPA_AF_ERR_INT interrupt */
799 rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_AF_ERR,
800 "NPA_AF_ERR_INT",
801 rvu_npa_af_err_intr_handler);
802 if (!rc)
803 goto err;
804 rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1S, ~0ULL);
805
806 /* Register and enable NPA_AF_RAS interrupt */
807 rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_POISON,
808 "NPA_AF_RAS",
809 rvu_npa_af_ras_intr_handler);
810 if (!rc)
811 goto err;
812 rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1S, ~0ULL);
813
814 return 0;
815 err:
816 rvu_npa_unregister_interrupts(rvu);
817 return rc;
818 }
819
rvu_npa_report_show(struct devlink_fmsg * fmsg,void * ctx,enum npa_af_rvu_health health_reporter)820 static int rvu_npa_report_show(struct devlink_fmsg *fmsg, void *ctx,
821 enum npa_af_rvu_health health_reporter)
822 {
823 struct rvu_npa_event_ctx *npa_event_context;
824 unsigned int alloc_dis, free_dis;
825 u64 intr_val;
826
827 npa_event_context = ctx;
828 switch (health_reporter) {
829 case NPA_AF_RVU_GEN:
830 intr_val = npa_event_context->npa_af_rvu_gen;
831 rvu_report_pair_start(fmsg, "NPA_AF_GENERAL");
832 devlink_fmsg_u64_pair_put(fmsg, "\tNPA General Interrupt Reg ",
833 npa_event_context->npa_af_rvu_gen);
834 if (intr_val & BIT_ULL(32))
835 devlink_fmsg_string_put(fmsg, "\n\tUnmap PF Error");
836
837 free_dis = FIELD_GET(GENMASK(15, 0), intr_val);
838 if (free_dis & BIT(NPA_INPQ_NIX0_RX))
839 devlink_fmsg_string_put(fmsg, "\n\tNIX0: free disabled RX");
840 if (free_dis & BIT(NPA_INPQ_NIX0_TX))
841 devlink_fmsg_string_put(fmsg, "\n\tNIX0:free disabled TX");
842 if (free_dis & BIT(NPA_INPQ_NIX1_RX))
843 devlink_fmsg_string_put(fmsg, "\n\tNIX1: free disabled RX");
844 if (free_dis & BIT(NPA_INPQ_NIX1_TX))
845 devlink_fmsg_string_put(fmsg, "\n\tNIX1:free disabled TX");
846 if (free_dis & BIT(NPA_INPQ_SSO))
847 devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for SSO");
848 if (free_dis & BIT(NPA_INPQ_TIM))
849 devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for TIM");
850 if (free_dis & BIT(NPA_INPQ_DPI))
851 devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for DPI");
852 if (free_dis & BIT(NPA_INPQ_AURA_OP))
853 devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for AURA");
854
855 alloc_dis = FIELD_GET(GENMASK(31, 16), intr_val);
856 if (alloc_dis & BIT(NPA_INPQ_NIX0_RX))
857 devlink_fmsg_string_put(fmsg, "\n\tNIX0: alloc disabled RX");
858 if (alloc_dis & BIT(NPA_INPQ_NIX0_TX))
859 devlink_fmsg_string_put(fmsg, "\n\tNIX0:alloc disabled TX");
860 if (alloc_dis & BIT(NPA_INPQ_NIX1_RX))
861 devlink_fmsg_string_put(fmsg, "\n\tNIX1: alloc disabled RX");
862 if (alloc_dis & BIT(NPA_INPQ_NIX1_TX))
863 devlink_fmsg_string_put(fmsg, "\n\tNIX1:alloc disabled TX");
864 if (alloc_dis & BIT(NPA_INPQ_SSO))
865 devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for SSO");
866 if (alloc_dis & BIT(NPA_INPQ_TIM))
867 devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for TIM");
868 if (alloc_dis & BIT(NPA_INPQ_DPI))
869 devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for DPI");
870 if (alloc_dis & BIT(NPA_INPQ_AURA_OP))
871 devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for AURA");
872
873 rvu_report_pair_end(fmsg);
874 break;
875 case NPA_AF_RVU_ERR:
876 rvu_report_pair_start(fmsg, "NPA_AF_ERR");
877 devlink_fmsg_u64_pair_put(fmsg, "\tNPA Error Interrupt Reg ",
878 npa_event_context->npa_af_rvu_err);
879 if (npa_event_context->npa_af_rvu_err & BIT_ULL(14))
880 devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_INST_S read");
881 if (npa_event_context->npa_af_rvu_err & BIT_ULL(13))
882 devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_RES_S write");
883 if (npa_event_context->npa_af_rvu_err & BIT_ULL(12))
884 devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error");
885 rvu_report_pair_end(fmsg);
886 break;
887 case NPA_AF_RVU_RAS:
888 rvu_report_pair_start(fmsg, "NPA_AF_RVU_RAS");
889 devlink_fmsg_u64_pair_put(fmsg, "\tNPA RAS Interrupt Reg ",
890 npa_event_context->npa_af_rvu_ras);
891 if (npa_event_context->npa_af_rvu_ras & BIT_ULL(34))
892 devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_INST_S");
893 if (npa_event_context->npa_af_rvu_ras & BIT_ULL(33))
894 devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_RES_S");
895 if (npa_event_context->npa_af_rvu_ras & BIT_ULL(32))
896 devlink_fmsg_string_put(fmsg, "\n\tPoison data on HW context");
897 rvu_report_pair_end(fmsg);
898 break;
899 case NPA_AF_RVU_INTR:
900 rvu_report_pair_start(fmsg, "NPA_AF_RVU");
901 devlink_fmsg_u64_pair_put(fmsg, "\tNPA RVU Interrupt Reg ",
902 npa_event_context->npa_af_rvu_int);
903 if (npa_event_context->npa_af_rvu_int & BIT_ULL(0))
904 devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error");
905 rvu_report_pair_end(fmsg);
906 break;
907 default:
908 return -EINVAL;
909 }
910
911 return 0;
912 }
913
rvu_hw_npa_intr_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)914 static int rvu_hw_npa_intr_dump(struct devlink_health_reporter *reporter,
915 struct devlink_fmsg *fmsg, void *ctx,
916 struct netlink_ext_ack *netlink_extack)
917 {
918 struct rvu *rvu = devlink_health_reporter_priv(reporter);
919 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
920 struct rvu_npa_event_ctx *npa_ctx;
921
922 npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
923
924 return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_INTR) :
925 rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_INTR);
926 }
927
rvu_hw_npa_intr_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)928 static int rvu_hw_npa_intr_recover(struct devlink_health_reporter *reporter,
929 void *ctx, struct netlink_ext_ack *netlink_extack)
930 {
931 struct rvu *rvu = devlink_health_reporter_priv(reporter);
932 struct rvu_npa_event_ctx *npa_event_ctx = ctx;
933 int blkaddr;
934
935 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
936 if (blkaddr < 0)
937 return blkaddr;
938
939 if (npa_event_ctx->npa_af_rvu_int)
940 rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1S, ~0ULL);
941
942 return 0;
943 }
944
rvu_hw_npa_gen_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)945 static int rvu_hw_npa_gen_dump(struct devlink_health_reporter *reporter,
946 struct devlink_fmsg *fmsg, void *ctx,
947 struct netlink_ext_ack *netlink_extack)
948 {
949 struct rvu *rvu = devlink_health_reporter_priv(reporter);
950 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
951 struct rvu_npa_event_ctx *npa_ctx;
952
953 npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
954
955 return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_GEN) :
956 rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_GEN);
957 }
958
rvu_hw_npa_gen_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)959 static int rvu_hw_npa_gen_recover(struct devlink_health_reporter *reporter,
960 void *ctx, struct netlink_ext_ack *netlink_extack)
961 {
962 struct rvu *rvu = devlink_health_reporter_priv(reporter);
963 struct rvu_npa_event_ctx *npa_event_ctx = ctx;
964 int blkaddr;
965
966 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
967 if (blkaddr < 0)
968 return blkaddr;
969
970 if (npa_event_ctx->npa_af_rvu_gen)
971 rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1S, ~0ULL);
972
973 return 0;
974 }
975
rvu_hw_npa_err_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)976 static int rvu_hw_npa_err_dump(struct devlink_health_reporter *reporter,
977 struct devlink_fmsg *fmsg, void *ctx,
978 struct netlink_ext_ack *netlink_extack)
979 {
980 struct rvu *rvu = devlink_health_reporter_priv(reporter);
981 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
982 struct rvu_npa_event_ctx *npa_ctx;
983
984 npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
985
986 return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_ERR) :
987 rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_ERR);
988 }
989
rvu_hw_npa_err_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)990 static int rvu_hw_npa_err_recover(struct devlink_health_reporter *reporter,
991 void *ctx, struct netlink_ext_ack *netlink_extack)
992 {
993 struct rvu *rvu = devlink_health_reporter_priv(reporter);
994 struct rvu_npa_event_ctx *npa_event_ctx = ctx;
995 int blkaddr;
996
997 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
998 if (blkaddr < 0)
999 return blkaddr;
1000
1001 if (npa_event_ctx->npa_af_rvu_err)
1002 rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1S, ~0ULL);
1003
1004 return 0;
1005 }
1006
rvu_hw_npa_ras_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)1007 static int rvu_hw_npa_ras_dump(struct devlink_health_reporter *reporter,
1008 struct devlink_fmsg *fmsg, void *ctx,
1009 struct netlink_ext_ack *netlink_extack)
1010 {
1011 struct rvu *rvu = devlink_health_reporter_priv(reporter);
1012 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
1013 struct rvu_npa_event_ctx *npa_ctx;
1014
1015 npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
1016
1017 return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_RAS) :
1018 rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_RAS);
1019 }
1020
rvu_hw_npa_ras_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)1021 static int rvu_hw_npa_ras_recover(struct devlink_health_reporter *reporter,
1022 void *ctx, struct netlink_ext_ack *netlink_extack)
1023 {
1024 struct rvu *rvu = devlink_health_reporter_priv(reporter);
1025 struct rvu_npa_event_ctx *npa_event_ctx = ctx;
1026 int blkaddr;
1027
1028 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
1029 if (blkaddr < 0)
1030 return blkaddr;
1031
1032 if (npa_event_ctx->npa_af_rvu_ras)
1033 rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1S, ~0ULL);
1034
1035 return 0;
1036 }
1037
1038 RVU_REPORTERS(hw_npa_intr);
1039 RVU_REPORTERS(hw_npa_gen);
1040 RVU_REPORTERS(hw_npa_err);
1041 RVU_REPORTERS(hw_npa_ras);
1042
1043 static void rvu_npa_health_reporters_destroy(struct rvu_devlink *rvu_dl);
1044
rvu_npa_register_reporters(struct rvu_devlink * rvu_dl)1045 static int rvu_npa_register_reporters(struct rvu_devlink *rvu_dl)
1046 {
1047 struct rvu_npa_health_reporters *rvu_reporters;
1048 struct rvu_npa_event_ctx *npa_event_context;
1049 struct rvu *rvu = rvu_dl->rvu;
1050
1051 rvu_reporters = kzalloc_obj(*rvu_reporters);
1052 if (!rvu_reporters)
1053 return -ENOMEM;
1054
1055 rvu_dl->rvu_npa_health_reporter = rvu_reporters;
1056 npa_event_context = kzalloc_obj(*npa_event_context);
1057 if (!npa_event_context)
1058 return -ENOMEM;
1059
1060 rvu_reporters->npa_event_ctx = npa_event_context;
1061 rvu_reporters->rvu_hw_npa_intr_reporter =
1062 devlink_health_reporter_create(rvu_dl->dl,
1063 &rvu_hw_npa_intr_reporter_ops,
1064 rvu);
1065 if (IS_ERR(rvu_reporters->rvu_hw_npa_intr_reporter)) {
1066 dev_warn(rvu->dev, "Failed to create hw_npa_intr reporter, err=%ld\n",
1067 PTR_ERR(rvu_reporters->rvu_hw_npa_intr_reporter));
1068 return PTR_ERR(rvu_reporters->rvu_hw_npa_intr_reporter);
1069 }
1070
1071 rvu_reporters->rvu_hw_npa_gen_reporter =
1072 devlink_health_reporter_create(rvu_dl->dl,
1073 &rvu_hw_npa_gen_reporter_ops,
1074 rvu);
1075 if (IS_ERR(rvu_reporters->rvu_hw_npa_gen_reporter)) {
1076 dev_warn(rvu->dev, "Failed to create hw_npa_gen reporter, err=%ld\n",
1077 PTR_ERR(rvu_reporters->rvu_hw_npa_gen_reporter));
1078 return PTR_ERR(rvu_reporters->rvu_hw_npa_gen_reporter);
1079 }
1080
1081 rvu_reporters->rvu_hw_npa_err_reporter =
1082 devlink_health_reporter_create(rvu_dl->dl,
1083 &rvu_hw_npa_err_reporter_ops,
1084 rvu);
1085 if (IS_ERR(rvu_reporters->rvu_hw_npa_err_reporter)) {
1086 dev_warn(rvu->dev, "Failed to create hw_npa_err reporter, err=%ld\n",
1087 PTR_ERR(rvu_reporters->rvu_hw_npa_err_reporter));
1088 return PTR_ERR(rvu_reporters->rvu_hw_npa_err_reporter);
1089 }
1090
1091 rvu_reporters->rvu_hw_npa_ras_reporter =
1092 devlink_health_reporter_create(rvu_dl->dl,
1093 &rvu_hw_npa_ras_reporter_ops,
1094 rvu);
1095 if (IS_ERR(rvu_reporters->rvu_hw_npa_ras_reporter)) {
1096 dev_warn(rvu->dev, "Failed to create hw_npa_ras reporter, err=%ld\n",
1097 PTR_ERR(rvu_reporters->rvu_hw_npa_ras_reporter));
1098 return PTR_ERR(rvu_reporters->rvu_hw_npa_ras_reporter);
1099 }
1100
1101 rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq");
1102 if (!rvu_dl->devlink_wq)
1103 return -ENOMEM;
1104
1105 INIT_WORK(&rvu_reporters->intr_work, rvu_npa_intr_work);
1106 INIT_WORK(&rvu_reporters->err_work, rvu_npa_err_work);
1107 INIT_WORK(&rvu_reporters->gen_work, rvu_npa_gen_work);
1108 INIT_WORK(&rvu_reporters->ras_work, rvu_npa_ras_work);
1109
1110 return 0;
1111 }
1112
rvu_npa_health_reporters_create(struct rvu_devlink * rvu_dl)1113 static int rvu_npa_health_reporters_create(struct rvu_devlink *rvu_dl)
1114 {
1115 struct rvu *rvu = rvu_dl->rvu;
1116 int err;
1117
1118 err = rvu_npa_register_reporters(rvu_dl);
1119 if (err) {
1120 dev_warn(rvu->dev, "Failed to create npa reporter, err =%d\n",
1121 err);
1122 return err;
1123 }
1124 rvu_npa_register_interrupts(rvu);
1125
1126 return 0;
1127 }
1128
rvu_npa_health_reporters_destroy(struct rvu_devlink * rvu_dl)1129 static void rvu_npa_health_reporters_destroy(struct rvu_devlink *rvu_dl)
1130 {
1131 struct rvu_npa_health_reporters *npa_reporters;
1132 struct rvu *rvu = rvu_dl->rvu;
1133
1134 npa_reporters = rvu_dl->rvu_npa_health_reporter;
1135
1136 if (!npa_reporters->rvu_hw_npa_ras_reporter)
1137 return;
1138 if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_intr_reporter))
1139 devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_intr_reporter);
1140
1141 if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_gen_reporter))
1142 devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_gen_reporter);
1143
1144 if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_err_reporter))
1145 devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_err_reporter);
1146
1147 if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_ras_reporter))
1148 devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_ras_reporter);
1149
1150 rvu_npa_unregister_interrupts(rvu);
1151 kfree(rvu_dl->rvu_npa_health_reporter->npa_event_ctx);
1152 kfree(rvu_dl->rvu_npa_health_reporter);
1153 }
1154
rvu_health_reporters_create(struct rvu * rvu)1155 static int rvu_health_reporters_create(struct rvu *rvu)
1156 {
1157 struct rvu_devlink *rvu_dl;
1158 int err;
1159
1160 rvu_dl = rvu->rvu_dl;
1161 err = rvu_npa_health_reporters_create(rvu_dl);
1162 if (err)
1163 return err;
1164
1165 return rvu_nix_health_reporters_create(rvu_dl);
1166 }
1167
rvu_health_reporters_destroy(struct rvu * rvu)1168 static void rvu_health_reporters_destroy(struct rvu *rvu)
1169 {
1170 struct rvu_devlink *rvu_dl;
1171
1172 if (!rvu->rvu_dl)
1173 return;
1174
1175 rvu_dl = rvu->rvu_dl;
1176 rvu_npa_health_reporters_destroy(rvu_dl);
1177 rvu_nix_health_reporters_destroy(rvu_dl);
1178 }
1179
1180 /* Devlink Params APIs */
rvu_af_dl_dwrr_mtu_validate(struct devlink * devlink,u32 id,union devlink_param_value val,struct netlink_ext_ack * extack)1181 static int rvu_af_dl_dwrr_mtu_validate(struct devlink *devlink, u32 id,
1182 union devlink_param_value val,
1183 struct netlink_ext_ack *extack)
1184 {
1185 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1186 struct rvu *rvu = rvu_dl->rvu;
1187 int dwrr_mtu = val.vu32;
1188 struct nix_txsch *txsch;
1189 struct nix_hw *nix_hw;
1190
1191 if (!rvu->hw->cap.nix_common_dwrr_mtu) {
1192 NL_SET_ERR_MSG_MOD(extack,
1193 "Setting DWRR_MTU is not supported on this silicon");
1194 return -EOPNOTSUPP;
1195 }
1196
1197 if ((dwrr_mtu > 65536 || !is_power_of_2(dwrr_mtu)) &&
1198 (dwrr_mtu != 9728 && dwrr_mtu != 10240)) {
1199 NL_SET_ERR_MSG_MOD(extack,
1200 "Invalid, supported MTUs are 0,2,4,8.16,32,64....4K,8K,32K,64K and 9728, 10240");
1201 return -EINVAL;
1202 }
1203
1204 nix_hw = get_nix_hw(rvu->hw, BLKADDR_NIX0);
1205 if (!nix_hw)
1206 return -ENODEV;
1207
1208 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1209 if (rvu_rsrc_free_count(&txsch->schq) != txsch->schq.max) {
1210 NL_SET_ERR_MSG_MOD(extack,
1211 "Changing DWRR MTU is not supported when there are active NIXLFs");
1212 NL_SET_ERR_MSG_MOD(extack,
1213 "Make sure none of the PF/VF interfaces are initialized and retry");
1214 return -EOPNOTSUPP;
1215 }
1216
1217 return 0;
1218 }
1219
rvu_af_dl_dwrr_mtu_set(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)1220 static int rvu_af_dl_dwrr_mtu_set(struct devlink *devlink, u32 id,
1221 struct devlink_param_gset_ctx *ctx,
1222 struct netlink_ext_ack *extack)
1223 {
1224 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1225 struct rvu *rvu = rvu_dl->rvu;
1226 u64 dwrr_mtu;
1227
1228 dwrr_mtu = convert_bytes_to_dwrr_mtu(ctx->val.vu32);
1229 rvu_write64(rvu, BLKADDR_NIX0,
1230 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM), dwrr_mtu);
1231
1232 return 0;
1233 }
1234
rvu_af_dl_dwrr_mtu_get(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)1235 static int rvu_af_dl_dwrr_mtu_get(struct devlink *devlink, u32 id,
1236 struct devlink_param_gset_ctx *ctx,
1237 struct netlink_ext_ack *extack)
1238 {
1239 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1240 struct rvu *rvu = rvu_dl->rvu;
1241 u64 dwrr_mtu;
1242
1243 if (!rvu->hw->cap.nix_common_dwrr_mtu)
1244 return -EOPNOTSUPP;
1245
1246 dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0,
1247 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM));
1248 ctx->val.vu32 = convert_dwrr_mtu_to_bytes(dwrr_mtu);
1249
1250 return 0;
1251 }
1252
1253 enum rvu_af_dl_param_id {
1254 RVU_AF_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
1255 RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
1256 RVU_AF_DEVLINK_PARAM_ID_NPC_MCAM_ZONE_PERCENT,
1257 RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
1258 RVU_AF_DEVLINK_PARAM_ID_NPC_DEF_RULE_CNTR_ENABLE,
1259 RVU_AF_DEVLINK_PARAM_ID_NIX_MAXLF,
1260 };
1261
rvu_af_npc_exact_feature_get(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)1262 static int rvu_af_npc_exact_feature_get(struct devlink *devlink, u32 id,
1263 struct devlink_param_gset_ctx *ctx,
1264 struct netlink_ext_ack *extack)
1265 {
1266 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1267 struct rvu *rvu = rvu_dl->rvu;
1268 bool enabled;
1269
1270 enabled = rvu_npc_exact_has_match_table(rvu);
1271
1272 snprintf(ctx->val.vstr, sizeof(ctx->val.vstr), "%s",
1273 enabled ? "enabled" : "disabled");
1274
1275 return 0;
1276 }
1277
rvu_af_npc_exact_feature_disable(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)1278 static int rvu_af_npc_exact_feature_disable(struct devlink *devlink, u32 id,
1279 struct devlink_param_gset_ctx *ctx,
1280 struct netlink_ext_ack *extack)
1281 {
1282 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1283 struct rvu *rvu = rvu_dl->rvu;
1284
1285 rvu_npc_exact_disable_feature(rvu);
1286
1287 return 0;
1288 }
1289
rvu_af_npc_exact_feature_validate(struct devlink * devlink,u32 id,union devlink_param_value val,struct netlink_ext_ack * extack)1290 static int rvu_af_npc_exact_feature_validate(struct devlink *devlink, u32 id,
1291 union devlink_param_value val,
1292 struct netlink_ext_ack *extack)
1293 {
1294 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1295 struct rvu *rvu = rvu_dl->rvu;
1296 u64 enable;
1297
1298 if (kstrtoull(val.vstr, 10, &enable)) {
1299 NL_SET_ERR_MSG_MOD(extack,
1300 "Only 1 value is supported");
1301 return -EINVAL;
1302 }
1303
1304 if (enable != 1) {
1305 NL_SET_ERR_MSG_MOD(extack,
1306 "Only disabling exact match feature is supported");
1307 return -EINVAL;
1308 }
1309
1310 if (rvu_npc_exact_can_disable_feature(rvu))
1311 return 0;
1312
1313 NL_SET_ERR_MSG_MOD(extack,
1314 "Can't disable exact match feature; Please try before any configuration");
1315 return -EFAULT;
1316 }
1317
rvu_af_dl_npc_mcam_high_zone_percent_get(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)1318 static int rvu_af_dl_npc_mcam_high_zone_percent_get(struct devlink *devlink, u32 id,
1319 struct devlink_param_gset_ctx *ctx,
1320 struct netlink_ext_ack *extack)
1321 {
1322 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1323 struct rvu *rvu = rvu_dl->rvu;
1324 struct npc_mcam *mcam;
1325 u32 percent;
1326
1327 mcam = &rvu->hw->mcam;
1328 percent = (mcam->hprio_count * 100) / mcam->bmap_entries;
1329 ctx->val.vu8 = (u8)percent;
1330
1331 return 0;
1332 }
1333
rvu_af_dl_npc_mcam_high_zone_percent_set(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)1334 static int rvu_af_dl_npc_mcam_high_zone_percent_set(struct devlink *devlink, u32 id,
1335 struct devlink_param_gset_ctx *ctx,
1336 struct netlink_ext_ack *extack)
1337 {
1338 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1339 struct rvu *rvu = rvu_dl->rvu;
1340 struct npc_mcam *mcam;
1341 u32 percent;
1342
1343 percent = ctx->val.vu8;
1344 mcam = &rvu->hw->mcam;
1345 mcam->hprio_count = (mcam->bmap_entries * percent) / 100;
1346 mcam->hprio_end = mcam->hprio_count;
1347 mcam->lprio_count = (mcam->bmap_entries - mcam->hprio_count) / 2;
1348 mcam->lprio_start = mcam->bmap_entries - mcam->lprio_count;
1349
1350 return 0;
1351 }
1352
rvu_af_dl_npc_mcam_high_zone_percent_validate(struct devlink * devlink,u32 id,union devlink_param_value val,struct netlink_ext_ack * extack)1353 static int rvu_af_dl_npc_mcam_high_zone_percent_validate(struct devlink *devlink, u32 id,
1354 union devlink_param_value val,
1355 struct netlink_ext_ack *extack)
1356 {
1357 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1358 struct rvu *rvu = rvu_dl->rvu;
1359 struct npc_mcam *mcam;
1360
1361 /* The percent of high prio zone must range from 12% to 100% of unreserved mcam space */
1362 if (val.vu8 < 12 || val.vu8 > 100) {
1363 NL_SET_ERR_MSG_MOD(extack,
1364 "mcam high zone percent must be between 12% to 100%");
1365 return -EINVAL;
1366 }
1367
1368 /* Do not allow user to modify the high priority zone entries while mcam entries
1369 * have already been assigned.
1370 */
1371 mcam = &rvu->hw->mcam;
1372 if (mcam->bmap_fcnt < mcam->bmap_entries) {
1373 NL_SET_ERR_MSG_MOD(extack,
1374 "mcam entries have already been assigned, can't resize");
1375 return -EPERM;
1376 }
1377
1378 return 0;
1379 }
1380
rvu_af_dl_npc_def_rule_cntr_get(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)1381 static int rvu_af_dl_npc_def_rule_cntr_get(struct devlink *devlink, u32 id,
1382 struct devlink_param_gset_ctx *ctx,
1383 struct netlink_ext_ack *extack)
1384 {
1385 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1386 struct rvu *rvu = rvu_dl->rvu;
1387
1388 ctx->val.vbool = rvu->def_rule_cntr_en;
1389
1390 return 0;
1391 }
1392
rvu_af_dl_npc_def_rule_cntr_set(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)1393 static int rvu_af_dl_npc_def_rule_cntr_set(struct devlink *devlink, u32 id,
1394 struct devlink_param_gset_ctx *ctx,
1395 struct netlink_ext_ack *extack)
1396 {
1397 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1398 struct rvu *rvu = rvu_dl->rvu;
1399 int err;
1400
1401 err = npc_config_cntr_default_entries(rvu, ctx->val.vbool);
1402 if (!err)
1403 rvu->def_rule_cntr_en = ctx->val.vbool;
1404
1405 return err;
1406 }
1407
rvu_af_dl_nix_maxlf_get(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)1408 static int rvu_af_dl_nix_maxlf_get(struct devlink *devlink, u32 id,
1409 struct devlink_param_gset_ctx *ctx,
1410 struct netlink_ext_ack *extack)
1411 {
1412 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1413 struct rvu *rvu = rvu_dl->rvu;
1414
1415 ctx->val.vu16 = (u16)rvu_get_nixlf_count(rvu);
1416
1417 return 0;
1418 }
1419
rvu_af_dl_nix_maxlf_set(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)1420 static int rvu_af_dl_nix_maxlf_set(struct devlink *devlink, u32 id,
1421 struct devlink_param_gset_ctx *ctx,
1422 struct netlink_ext_ack *extack)
1423 {
1424 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1425 struct rvu *rvu = rvu_dl->rvu;
1426 struct rvu_block *block;
1427 int blkaddr = 0;
1428
1429 npc_mcam_rsrcs_deinit(rvu);
1430 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
1431 while (blkaddr) {
1432 block = &rvu->hw->block[blkaddr];
1433 block->lf.max = ctx->val.vu16;
1434 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
1435 }
1436
1437 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
1438 npc_mcam_rsrcs_init(rvu, blkaddr);
1439
1440 return 0;
1441 }
1442
rvu_af_dl_nix_maxlf_validate(struct devlink * devlink,u32 id,union devlink_param_value val,struct netlink_ext_ack * extack)1443 static int rvu_af_dl_nix_maxlf_validate(struct devlink *devlink, u32 id,
1444 union devlink_param_value val,
1445 struct netlink_ext_ack *extack)
1446 {
1447 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1448 struct rvu *rvu = rvu_dl->rvu;
1449 u16 max_nix0_lf, max_nix1_lf;
1450 struct npc_mcam *mcam;
1451 u64 cfg;
1452
1453 cfg = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_CONST2);
1454 max_nix0_lf = cfg & 0xFFF;
1455 cfg = rvu_read64(rvu, BLKADDR_NIX1, NIX_AF_CONST2);
1456 max_nix1_lf = cfg & 0xFFF;
1457
1458 /* Do not allow user to modify maximum NIX LFs while mcam entries
1459 * have already been assigned.
1460 */
1461 mcam = &rvu->hw->mcam;
1462 if (mcam->bmap_fcnt < mcam->bmap_entries) {
1463 NL_SET_ERR_MSG_MOD(extack,
1464 "mcam entries have already been assigned, can't resize");
1465 return -EPERM;
1466 }
1467
1468 if (max_nix0_lf && val.vu16 > max_nix0_lf) {
1469 NL_SET_ERR_MSG_MOD(extack,
1470 "requested nixlf is greater than the max supported nix0_lf");
1471 return -EPERM;
1472 }
1473
1474 if (max_nix1_lf && val.vu16 > max_nix1_lf) {
1475 NL_SET_ERR_MSG_MOD(extack,
1476 "requested nixlf is greater than the max supported nix1_lf");
1477 return -EINVAL;
1478 }
1479
1480 return 0;
1481 }
1482
1483 static const struct devlink_param rvu_af_dl_params[] = {
1484 DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
1485 "dwrr_mtu", DEVLINK_PARAM_TYPE_U32,
1486 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1487 rvu_af_dl_dwrr_mtu_get, rvu_af_dl_dwrr_mtu_set,
1488 rvu_af_dl_dwrr_mtu_validate),
1489 DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_MCAM_ZONE_PERCENT,
1490 "npc_mcam_high_zone_percent", DEVLINK_PARAM_TYPE_U8,
1491 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1492 rvu_af_dl_npc_mcam_high_zone_percent_get,
1493 rvu_af_dl_npc_mcam_high_zone_percent_set,
1494 rvu_af_dl_npc_mcam_high_zone_percent_validate),
1495 DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_DEF_RULE_CNTR_ENABLE,
1496 "npc_def_rule_cntr", DEVLINK_PARAM_TYPE_BOOL,
1497 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1498 rvu_af_dl_npc_def_rule_cntr_get,
1499 rvu_af_dl_npc_def_rule_cntr_set, NULL),
1500 DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NIX_MAXLF,
1501 "nix_maxlf", DEVLINK_PARAM_TYPE_U16,
1502 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1503 rvu_af_dl_nix_maxlf_get,
1504 rvu_af_dl_nix_maxlf_set,
1505 rvu_af_dl_nix_maxlf_validate),
1506 };
1507
1508 static const struct devlink_param rvu_af_dl_param_exact_match[] = {
1509 DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
1510 "npc_exact_feature_disable", DEVLINK_PARAM_TYPE_STRING,
1511 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1512 rvu_af_npc_exact_feature_get,
1513 rvu_af_npc_exact_feature_disable,
1514 rvu_af_npc_exact_feature_validate),
1515 };
1516
1517 /* Devlink switch mode */
rvu_devlink_eswitch_mode_get(struct devlink * devlink,u16 * mode)1518 static int rvu_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
1519 {
1520 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1521 struct rvu *rvu = rvu_dl->rvu;
1522 struct rvu_switch *rswitch;
1523
1524 if (rvu->rep_mode)
1525 return -EOPNOTSUPP;
1526
1527 rswitch = &rvu->rswitch;
1528 *mode = rswitch->mode;
1529
1530 return 0;
1531 }
1532
rvu_devlink_eswitch_mode_set(struct devlink * devlink,u16 mode,struct netlink_ext_ack * extack)1533 static int rvu_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
1534 struct netlink_ext_ack *extack)
1535 {
1536 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1537 struct rvu *rvu = rvu_dl->rvu;
1538 struct rvu_switch *rswitch;
1539
1540 rswitch = &rvu->rswitch;
1541 switch (mode) {
1542 case DEVLINK_ESWITCH_MODE_LEGACY:
1543 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
1544 if (rswitch->mode == mode)
1545 return 0;
1546 rswitch->mode = mode;
1547 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
1548 rvu_switch_enable(rvu);
1549 else
1550 rvu_switch_disable(rvu);
1551 break;
1552 default:
1553 return -EINVAL;
1554 }
1555
1556 return 0;
1557 }
1558
1559 static const struct devlink_ops rvu_devlink_ops = {
1560 .eswitch_mode_get = rvu_devlink_eswitch_mode_get,
1561 .eswitch_mode_set = rvu_devlink_eswitch_mode_set,
1562 };
1563
rvu_register_dl(struct rvu * rvu)1564 int rvu_register_dl(struct rvu *rvu)
1565 {
1566 struct rvu_devlink *rvu_dl;
1567 struct devlink *dl;
1568 int err;
1569
1570 dl = devlink_alloc(&rvu_devlink_ops, sizeof(struct rvu_devlink),
1571 rvu->dev);
1572 if (!dl) {
1573 dev_warn(rvu->dev, "devlink_alloc failed\n");
1574 return -ENOMEM;
1575 }
1576
1577 rvu_dl = devlink_priv(dl);
1578 rvu_dl->dl = dl;
1579 rvu_dl->rvu = rvu;
1580 rvu->rvu_dl = rvu_dl;
1581
1582 err = rvu_health_reporters_create(rvu);
1583 if (err) {
1584 dev_err(rvu->dev,
1585 "devlink health reporter creation failed with error %d\n", err);
1586 goto err_dl_health;
1587 }
1588
1589 err = devlink_params_register(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
1590 if (err) {
1591 dev_err(rvu->dev,
1592 "devlink params register failed with error %d", err);
1593 goto err_dl_health;
1594 }
1595
1596 /* Register exact match devlink only for CN10K-B */
1597 if (!rvu_npc_exact_has_match_table(rvu))
1598 goto done;
1599
1600 err = devlink_params_register(dl, rvu_af_dl_param_exact_match,
1601 ARRAY_SIZE(rvu_af_dl_param_exact_match));
1602 if (err) {
1603 dev_err(rvu->dev,
1604 "devlink exact match params register failed with error %d", err);
1605 goto err_dl_exact_match;
1606 }
1607
1608 done:
1609 devlink_register(dl);
1610 return 0;
1611
1612 err_dl_exact_match:
1613 devlink_params_unregister(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
1614
1615 err_dl_health:
1616 rvu_health_reporters_destroy(rvu);
1617 devlink_free(dl);
1618 return err;
1619 }
1620
rvu_unregister_dl(struct rvu * rvu)1621 void rvu_unregister_dl(struct rvu *rvu)
1622 {
1623 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
1624 struct devlink *dl = rvu_dl->dl;
1625
1626 devlink_unregister(dl);
1627
1628 devlink_params_unregister(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
1629
1630 /* Unregister exact match devlink only for CN10K-B */
1631 if (rvu_npc_exact_has_match_table(rvu))
1632 devlink_params_unregister(dl, rvu_af_dl_param_exact_match,
1633 ARRAY_SIZE(rvu_af_dl_param_exact_match));
1634
1635 rvu_health_reporters_destroy(rvu);
1636 devlink_free(dl);
1637 }
1638