xref: /linux/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c (revision a7ddedc84c59a645ef970b992f7cda5bffc70cc0)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function Devlink
3  *
4  * Copyright (C) 2020 Marvell.
5  *
6  */
7 
8 #include <linux/bitfield.h>
9 
10 #include "rvu.h"
11 #include "rvu_reg.h"
12 #include "rvu_struct.h"
13 #include "rvu_npc_hash.h"
14 
15 #define DRV_NAME "octeontx2-af"
16 
17 static void rvu_report_pair_start(struct devlink_fmsg *fmsg, const char *name)
18 {
19 	devlink_fmsg_pair_nest_start(fmsg, name);
20 	devlink_fmsg_obj_nest_start(fmsg);
21 }
22 
23 static void rvu_report_pair_end(struct devlink_fmsg *fmsg)
24 {
25 	devlink_fmsg_obj_nest_end(fmsg);
26 	devlink_fmsg_pair_nest_end(fmsg);
27 }
28 
29 static bool rvu_common_request_irq(struct rvu *rvu, int offset,
30 				   const char *name, irq_handler_t fn)
31 {
32 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
33 	int rc;
34 
35 	sprintf(&rvu->irq_name[offset * NAME_SIZE], "%s", name);
36 	rc = request_irq(pci_irq_vector(rvu->pdev, offset), fn, 0,
37 			 &rvu->irq_name[offset * NAME_SIZE], rvu_dl);
38 	if (rc)
39 		dev_warn(rvu->dev, "Failed to register %s irq\n", name);
40 	else
41 		rvu->irq_allocated[offset] = true;
42 
43 	return rvu->irq_allocated[offset];
44 }
45 
46 static void rvu_nix_intr_work(struct work_struct *work)
47 {
48 	struct rvu_nix_health_reporters *rvu_nix_health_reporter;
49 
50 	rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, intr_work);
51 	devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_intr_reporter,
52 			      "NIX_AF_RVU Error",
53 			      rvu_nix_health_reporter->nix_event_ctx);
54 }
55 
56 static irqreturn_t rvu_nix_af_rvu_intr_handler(int irq, void *rvu_irq)
57 {
58 	struct rvu_nix_event_ctx *nix_event_context;
59 	struct rvu_devlink *rvu_dl = rvu_irq;
60 	struct rvu *rvu;
61 	int blkaddr;
62 	u64 intr;
63 
64 	rvu = rvu_dl->rvu;
65 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
66 	if (blkaddr < 0)
67 		return IRQ_NONE;
68 
69 	nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
70 	intr = rvu_read64(rvu, blkaddr, NIX_AF_RVU_INT);
71 	nix_event_context->nix_af_rvu_int = intr;
72 
73 	/* Clear interrupts */
74 	rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT, intr);
75 	rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1C, ~0ULL);
76 	queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->intr_work);
77 
78 	return IRQ_HANDLED;
79 }
80 
81 static void rvu_nix_gen_work(struct work_struct *work)
82 {
83 	struct rvu_nix_health_reporters *rvu_nix_health_reporter;
84 
85 	rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, gen_work);
86 	devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_gen_reporter,
87 			      "NIX_AF_GEN Error",
88 			      rvu_nix_health_reporter->nix_event_ctx);
89 }
90 
91 static irqreturn_t rvu_nix_af_rvu_gen_handler(int irq, void *rvu_irq)
92 {
93 	struct rvu_nix_event_ctx *nix_event_context;
94 	struct rvu_devlink *rvu_dl = rvu_irq;
95 	struct rvu *rvu;
96 	int blkaddr;
97 	u64 intr;
98 
99 	rvu = rvu_dl->rvu;
100 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
101 	if (blkaddr < 0)
102 		return IRQ_NONE;
103 
104 	nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
105 	intr = rvu_read64(rvu, blkaddr, NIX_AF_GEN_INT);
106 	nix_event_context->nix_af_rvu_gen = intr;
107 
108 	/* Clear interrupts */
109 	rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT, intr);
110 	rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1C, ~0ULL);
111 	queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->gen_work);
112 
113 	return IRQ_HANDLED;
114 }
115 
116 static void rvu_nix_err_work(struct work_struct *work)
117 {
118 	struct rvu_nix_health_reporters *rvu_nix_health_reporter;
119 
120 	rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, err_work);
121 	devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_err_reporter,
122 			      "NIX_AF_ERR Error",
123 			      rvu_nix_health_reporter->nix_event_ctx);
124 }
125 
126 static irqreturn_t rvu_nix_af_rvu_err_handler(int irq, void *rvu_irq)
127 {
128 	struct rvu_nix_event_ctx *nix_event_context;
129 	struct rvu_devlink *rvu_dl = rvu_irq;
130 	struct rvu *rvu;
131 	int blkaddr;
132 	u64 intr;
133 
134 	rvu = rvu_dl->rvu;
135 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
136 	if (blkaddr < 0)
137 		return IRQ_NONE;
138 
139 	nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
140 	intr = rvu_read64(rvu, blkaddr, NIX_AF_ERR_INT);
141 	nix_event_context->nix_af_rvu_err = intr;
142 
143 	/* Clear interrupts */
144 	rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT, intr);
145 	rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1C, ~0ULL);
146 	queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->err_work);
147 
148 	return IRQ_HANDLED;
149 }
150 
151 static void rvu_nix_ras_work(struct work_struct *work)
152 {
153 	struct rvu_nix_health_reporters *rvu_nix_health_reporter;
154 
155 	rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, ras_work);
156 	devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_ras_reporter,
157 			      "NIX_AF_RAS Error",
158 			      rvu_nix_health_reporter->nix_event_ctx);
159 }
160 
161 static irqreturn_t rvu_nix_af_rvu_ras_handler(int irq, void *rvu_irq)
162 {
163 	struct rvu_nix_event_ctx *nix_event_context;
164 	struct rvu_devlink *rvu_dl = rvu_irq;
165 	struct rvu *rvu;
166 	int blkaddr;
167 	u64 intr;
168 
169 	rvu = rvu_dl->rvu;
170 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
171 	if (blkaddr < 0)
172 		return IRQ_NONE;
173 
174 	nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
175 	intr = rvu_read64(rvu, blkaddr, NIX_AF_ERR_INT);
176 	nix_event_context->nix_af_rvu_ras = intr;
177 
178 	/* Clear interrupts */
179 	rvu_write64(rvu, blkaddr, NIX_AF_RAS, intr);
180 	rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1C, ~0ULL);
181 	queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->ras_work);
182 
183 	return IRQ_HANDLED;
184 }
185 
186 static void rvu_nix_unregister_interrupts(struct rvu *rvu)
187 {
188 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
189 	int offs, i, blkaddr;
190 
191 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
192 	if (blkaddr < 0)
193 		return;
194 
195 	offs = rvu_read64(rvu, blkaddr, NIX_PRIV_AF_INT_CFG) & 0x3ff;
196 	if (!offs)
197 		return;
198 
199 	rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1C, ~0ULL);
200 	rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1C, ~0ULL);
201 	rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1C, ~0ULL);
202 	rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1C, ~0ULL);
203 
204 	if (rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU]) {
205 		free_irq(pci_irq_vector(rvu->pdev, offs + NIX_AF_INT_VEC_RVU),
206 			 rvu_dl);
207 		rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU] = false;
208 	}
209 
210 	for (i = NIX_AF_INT_VEC_GEN; i < NIX_AF_INT_VEC_CNT; i++)
211 		if (rvu->irq_allocated[offs + i]) {
212 			free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu_dl);
213 			rvu->irq_allocated[offs + i] = false;
214 		}
215 }
216 
217 static int rvu_nix_register_interrupts(struct rvu *rvu)
218 {
219 	int blkaddr, base;
220 	bool rc;
221 
222 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
223 	if (blkaddr < 0)
224 		return blkaddr;
225 
226 	/* Get NIX AF MSIX vectors offset. */
227 	base = rvu_read64(rvu, blkaddr, NIX_PRIV_AF_INT_CFG) & 0x3ff;
228 	if (!base) {
229 		dev_warn(rvu->dev,
230 			 "Failed to get NIX%d NIX_AF_INT vector offsets\n",
231 			 blkaddr - BLKADDR_NIX0);
232 		return 0;
233 	}
234 	/* Register and enable NIX_AF_RVU_INT interrupt */
235 	rc = rvu_common_request_irq(rvu, base +  NIX_AF_INT_VEC_RVU,
236 				    "NIX_AF_RVU_INT",
237 				    rvu_nix_af_rvu_intr_handler);
238 	if (!rc)
239 		goto err;
240 	rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1S, ~0ULL);
241 
242 	/* Register and enable NIX_AF_GEN_INT interrupt */
243 	rc = rvu_common_request_irq(rvu, base +  NIX_AF_INT_VEC_GEN,
244 				    "NIX_AF_GEN_INT",
245 				    rvu_nix_af_rvu_gen_handler);
246 	if (!rc)
247 		goto err;
248 	rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1S, ~0ULL);
249 
250 	/* Register and enable NIX_AF_ERR_INT interrupt */
251 	rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_AF_ERR,
252 				    "NIX_AF_ERR_INT",
253 				    rvu_nix_af_rvu_err_handler);
254 	if (!rc)
255 		goto err;
256 	rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1S, ~0ULL);
257 
258 	/* Register and enable NIX_AF_RAS interrupt */
259 	rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_POISON,
260 				    "NIX_AF_RAS",
261 				    rvu_nix_af_rvu_ras_handler);
262 	if (!rc)
263 		goto err;
264 	rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1S, ~0ULL);
265 
266 	return 0;
267 err:
268 	rvu_nix_unregister_interrupts(rvu);
269 	return rc;
270 }
271 
272 static int rvu_nix_report_show(struct devlink_fmsg *fmsg, void *ctx,
273 			       enum nix_af_rvu_health health_reporter)
274 {
275 	struct rvu_nix_event_ctx *nix_event_context;
276 	u64 intr_val;
277 
278 	nix_event_context = ctx;
279 	switch (health_reporter) {
280 	case NIX_AF_RVU_INTR:
281 		intr_val = nix_event_context->nix_af_rvu_int;
282 		rvu_report_pair_start(fmsg, "NIX_AF_RVU");
283 		devlink_fmsg_u64_pair_put(fmsg, "\tNIX RVU Interrupt Reg ",
284 					  nix_event_context->nix_af_rvu_int);
285 		if (intr_val & BIT_ULL(0))
286 			devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error");
287 		rvu_report_pair_end(fmsg);
288 		break;
289 	case NIX_AF_RVU_GEN:
290 		intr_val = nix_event_context->nix_af_rvu_gen;
291 		rvu_report_pair_start(fmsg, "NIX_AF_GENERAL");
292 		devlink_fmsg_u64_pair_put(fmsg, "\tNIX General Interrupt Reg ",
293 					  nix_event_context->nix_af_rvu_gen);
294 		if (intr_val & BIT_ULL(0))
295 			devlink_fmsg_string_put(fmsg, "\n\tRx multicast pkt drop");
296 		if (intr_val & BIT_ULL(1))
297 			devlink_fmsg_string_put(fmsg, "\n\tRx mirror pkt drop");
298 		if (intr_val & BIT_ULL(4))
299 			devlink_fmsg_string_put(fmsg, "\n\tSMQ flush done");
300 		rvu_report_pair_end(fmsg);
301 		break;
302 	case NIX_AF_RVU_ERR:
303 		intr_val = nix_event_context->nix_af_rvu_err;
304 		rvu_report_pair_start(fmsg, "NIX_AF_ERR");
305 		devlink_fmsg_u64_pair_put(fmsg, "\tNIX Error Interrupt Reg ",
306 					  nix_event_context->nix_af_rvu_err);
307 		if (intr_val & BIT_ULL(14))
308 			devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_INST_S read");
309 		if (intr_val & BIT_ULL(13))
310 			devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_RES_S write");
311 		if (intr_val & BIT_ULL(12))
312 			devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error");
313 		if (intr_val & BIT_ULL(6))
314 			devlink_fmsg_string_put(fmsg, "\n\tRx on unmapped PF_FUNC");
315 		if (intr_val & BIT_ULL(5))
316 			devlink_fmsg_string_put(fmsg, "\n\tRx multicast replication error");
317 		if (intr_val & BIT_ULL(4))
318 			devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_RX_MCE_S read");
319 		if (intr_val & BIT_ULL(3))
320 			devlink_fmsg_string_put(fmsg, "\n\tFault on multicast WQE read");
321 		if (intr_val & BIT_ULL(2))
322 			devlink_fmsg_string_put(fmsg, "\n\tFault on mirror WQE read");
323 		if (intr_val & BIT_ULL(1))
324 			devlink_fmsg_string_put(fmsg, "\n\tFault on mirror pkt write");
325 		if (intr_val & BIT_ULL(0))
326 			devlink_fmsg_string_put(fmsg, "\n\tFault on multicast pkt write");
327 		rvu_report_pair_end(fmsg);
328 		break;
329 	case NIX_AF_RVU_RAS:
330 		intr_val = nix_event_context->nix_af_rvu_err;
331 		rvu_report_pair_start(fmsg, "NIX_AF_RAS");
332 		devlink_fmsg_u64_pair_put(fmsg, "\tNIX RAS Interrupt Reg ",
333 					  nix_event_context->nix_af_rvu_err);
334 		devlink_fmsg_string_put(fmsg, "\n\tPoison Data on:");
335 		if (intr_val & BIT_ULL(34))
336 			devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_INST_S");
337 		if (intr_val & BIT_ULL(33))
338 			devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_RES_S");
339 		if (intr_val & BIT_ULL(32))
340 			devlink_fmsg_string_put(fmsg, "\n\tHW ctx");
341 		if (intr_val & BIT_ULL(4))
342 			devlink_fmsg_string_put(fmsg, "\n\tPacket from mirror buffer");
343 		if (intr_val & BIT_ULL(3))
344 			devlink_fmsg_string_put(fmsg, "\n\tPacket from multicast buffer");
345 		if (intr_val & BIT_ULL(2))
346 			devlink_fmsg_string_put(fmsg, "\n\tWQE read from mirror buffer");
347 		if (intr_val & BIT_ULL(1))
348 			devlink_fmsg_string_put(fmsg, "\n\tWQE read from multicast buffer");
349 		if (intr_val & BIT_ULL(0))
350 			devlink_fmsg_string_put(fmsg, "\n\tNIX_RX_MCE_S read");
351 		rvu_report_pair_end(fmsg);
352 		break;
353 	default:
354 		return -EINVAL;
355 	}
356 
357 	return 0;
358 }
359 
360 static int rvu_hw_nix_intr_dump(struct devlink_health_reporter *reporter,
361 				struct devlink_fmsg *fmsg, void *ctx,
362 				struct netlink_ext_ack *netlink_extack)
363 {
364 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
365 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
366 	struct rvu_nix_event_ctx *nix_ctx;
367 
368 	nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
369 
370 	return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_INTR) :
371 		     rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_INTR);
372 }
373 
374 static int rvu_hw_nix_intr_recover(struct devlink_health_reporter *reporter,
375 				   void *ctx, struct netlink_ext_ack *netlink_extack)
376 {
377 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
378 	struct rvu_nix_event_ctx *nix_event_ctx = ctx;
379 	int blkaddr;
380 
381 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
382 	if (blkaddr < 0)
383 		return blkaddr;
384 
385 	if (nix_event_ctx->nix_af_rvu_int)
386 		rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1S, ~0ULL);
387 
388 	return 0;
389 }
390 
391 static int rvu_hw_nix_gen_dump(struct devlink_health_reporter *reporter,
392 			       struct devlink_fmsg *fmsg, void *ctx,
393 			       struct netlink_ext_ack *netlink_extack)
394 {
395 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
396 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
397 	struct rvu_nix_event_ctx *nix_ctx;
398 
399 	nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
400 
401 	return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_GEN) :
402 		     rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_GEN);
403 }
404 
405 static int rvu_hw_nix_gen_recover(struct devlink_health_reporter *reporter,
406 				  void *ctx, struct netlink_ext_ack *netlink_extack)
407 {
408 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
409 	struct rvu_nix_event_ctx *nix_event_ctx = ctx;
410 	int blkaddr;
411 
412 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
413 	if (blkaddr < 0)
414 		return blkaddr;
415 
416 	if (nix_event_ctx->nix_af_rvu_gen)
417 		rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1S, ~0ULL);
418 
419 	return 0;
420 }
421 
422 static int rvu_hw_nix_err_dump(struct devlink_health_reporter *reporter,
423 			       struct devlink_fmsg *fmsg, void *ctx,
424 			       struct netlink_ext_ack *netlink_extack)
425 {
426 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
427 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
428 	struct rvu_nix_event_ctx *nix_ctx;
429 
430 	nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
431 
432 	return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_ERR) :
433 		     rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_ERR);
434 }
435 
436 static int rvu_hw_nix_err_recover(struct devlink_health_reporter *reporter,
437 				  void *ctx, struct netlink_ext_ack *netlink_extack)
438 {
439 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
440 	struct rvu_nix_event_ctx *nix_event_ctx = ctx;
441 	int blkaddr;
442 
443 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
444 	if (blkaddr < 0)
445 		return blkaddr;
446 
447 	if (nix_event_ctx->nix_af_rvu_err)
448 		rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1S, ~0ULL);
449 
450 	return 0;
451 }
452 
453 static int rvu_hw_nix_ras_dump(struct devlink_health_reporter *reporter,
454 			       struct devlink_fmsg *fmsg, void *ctx,
455 			       struct netlink_ext_ack *netlink_extack)
456 {
457 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
458 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
459 	struct rvu_nix_event_ctx *nix_ctx;
460 
461 	nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
462 
463 	return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_RAS) :
464 		     rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_RAS);
465 }
466 
467 static int rvu_hw_nix_ras_recover(struct devlink_health_reporter *reporter,
468 				  void *ctx, struct netlink_ext_ack *netlink_extack)
469 {
470 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
471 	struct rvu_nix_event_ctx *nix_event_ctx = ctx;
472 	int blkaddr;
473 
474 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
475 	if (blkaddr < 0)
476 		return blkaddr;
477 
478 	if (nix_event_ctx->nix_af_rvu_int)
479 		rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1S, ~0ULL);
480 
481 	return 0;
482 }
483 
484 RVU_REPORTERS(hw_nix_intr);
485 RVU_REPORTERS(hw_nix_gen);
486 RVU_REPORTERS(hw_nix_err);
487 RVU_REPORTERS(hw_nix_ras);
488 
489 static void rvu_nix_health_reporters_destroy(struct rvu_devlink *rvu_dl);
490 
491 static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl)
492 {
493 	struct rvu_nix_health_reporters *rvu_reporters;
494 	struct rvu_nix_event_ctx *nix_event_context;
495 	struct rvu *rvu = rvu_dl->rvu;
496 
497 	rvu_reporters = kzalloc(sizeof(*rvu_reporters), GFP_KERNEL);
498 	if (!rvu_reporters)
499 		return -ENOMEM;
500 
501 	rvu_dl->rvu_nix_health_reporter = rvu_reporters;
502 	nix_event_context = kzalloc(sizeof(*nix_event_context), GFP_KERNEL);
503 	if (!nix_event_context)
504 		return -ENOMEM;
505 
506 	rvu_reporters->nix_event_ctx = nix_event_context;
507 	rvu_reporters->rvu_hw_nix_intr_reporter =
508 		devlink_health_reporter_create(rvu_dl->dl,
509 					       &rvu_hw_nix_intr_reporter_ops,
510 					       rvu);
511 	if (IS_ERR(rvu_reporters->rvu_hw_nix_intr_reporter)) {
512 		dev_warn(rvu->dev, "Failed to create hw_nix_intr reporter, err=%ld\n",
513 			 PTR_ERR(rvu_reporters->rvu_hw_nix_intr_reporter));
514 		return PTR_ERR(rvu_reporters->rvu_hw_nix_intr_reporter);
515 	}
516 
517 	rvu_reporters->rvu_hw_nix_gen_reporter =
518 		devlink_health_reporter_create(rvu_dl->dl,
519 					       &rvu_hw_nix_gen_reporter_ops,
520 					       rvu);
521 	if (IS_ERR(rvu_reporters->rvu_hw_nix_gen_reporter)) {
522 		dev_warn(rvu->dev, "Failed to create hw_nix_gen reporter, err=%ld\n",
523 			 PTR_ERR(rvu_reporters->rvu_hw_nix_gen_reporter));
524 		return PTR_ERR(rvu_reporters->rvu_hw_nix_gen_reporter);
525 	}
526 
527 	rvu_reporters->rvu_hw_nix_err_reporter =
528 		devlink_health_reporter_create(rvu_dl->dl,
529 					       &rvu_hw_nix_err_reporter_ops,
530 					       rvu);
531 	if (IS_ERR(rvu_reporters->rvu_hw_nix_err_reporter)) {
532 		dev_warn(rvu->dev, "Failed to create hw_nix_err reporter, err=%ld\n",
533 			 PTR_ERR(rvu_reporters->rvu_hw_nix_err_reporter));
534 		return PTR_ERR(rvu_reporters->rvu_hw_nix_err_reporter);
535 	}
536 
537 	rvu_reporters->rvu_hw_nix_ras_reporter =
538 		devlink_health_reporter_create(rvu_dl->dl,
539 					       &rvu_hw_nix_ras_reporter_ops,
540 					       rvu);
541 	if (IS_ERR(rvu_reporters->rvu_hw_nix_ras_reporter)) {
542 		dev_warn(rvu->dev, "Failed to create hw_nix_ras reporter, err=%ld\n",
543 			 PTR_ERR(rvu_reporters->rvu_hw_nix_ras_reporter));
544 		return PTR_ERR(rvu_reporters->rvu_hw_nix_ras_reporter);
545 	}
546 
547 	rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq");
548 	if (!rvu_dl->devlink_wq)
549 		return -ENOMEM;
550 
551 	INIT_WORK(&rvu_reporters->intr_work, rvu_nix_intr_work);
552 	INIT_WORK(&rvu_reporters->gen_work, rvu_nix_gen_work);
553 	INIT_WORK(&rvu_reporters->err_work, rvu_nix_err_work);
554 	INIT_WORK(&rvu_reporters->ras_work, rvu_nix_ras_work);
555 
556 	return 0;
557 }
558 
559 static int rvu_nix_health_reporters_create(struct rvu_devlink *rvu_dl)
560 {
561 	struct rvu *rvu = rvu_dl->rvu;
562 	int err;
563 
564 	err = rvu_nix_register_reporters(rvu_dl);
565 	if (err) {
566 		dev_warn(rvu->dev, "Failed to create nix reporter, err =%d\n",
567 			 err);
568 		return err;
569 	}
570 	rvu_nix_register_interrupts(rvu);
571 
572 	return 0;
573 }
574 
575 static void rvu_nix_health_reporters_destroy(struct rvu_devlink *rvu_dl)
576 {
577 	struct rvu_nix_health_reporters *nix_reporters;
578 	struct rvu *rvu = rvu_dl->rvu;
579 
580 	nix_reporters = rvu_dl->rvu_nix_health_reporter;
581 
582 	if (!nix_reporters->rvu_hw_nix_ras_reporter)
583 		return;
584 	if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_intr_reporter))
585 		devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_intr_reporter);
586 
587 	if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_gen_reporter))
588 		devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_gen_reporter);
589 
590 	if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_err_reporter))
591 		devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_err_reporter);
592 
593 	if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_ras_reporter))
594 		devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_ras_reporter);
595 
596 	rvu_nix_unregister_interrupts(rvu);
597 	kfree(rvu_dl->rvu_nix_health_reporter->nix_event_ctx);
598 	kfree(rvu_dl->rvu_nix_health_reporter);
599 }
600 
601 static void rvu_npa_intr_work(struct work_struct *work)
602 {
603 	struct rvu_npa_health_reporters *rvu_npa_health_reporter;
604 
605 	rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, intr_work);
606 	devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_intr_reporter,
607 			      "NPA_AF_RVU Error",
608 			      rvu_npa_health_reporter->npa_event_ctx);
609 }
610 
611 static irqreturn_t rvu_npa_af_rvu_intr_handler(int irq, void *rvu_irq)
612 {
613 	struct rvu_npa_event_ctx *npa_event_context;
614 	struct rvu_devlink *rvu_dl = rvu_irq;
615 	struct rvu *rvu;
616 	int blkaddr;
617 	u64 intr;
618 
619 	rvu = rvu_dl->rvu;
620 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
621 	if (blkaddr < 0)
622 		return IRQ_NONE;
623 
624 	npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
625 	intr = rvu_read64(rvu, blkaddr, NPA_AF_RVU_INT);
626 	npa_event_context->npa_af_rvu_int = intr;
627 
628 	/* Clear interrupts */
629 	rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT, intr);
630 	rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1C, ~0ULL);
631 	queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->intr_work);
632 
633 	return IRQ_HANDLED;
634 }
635 
636 static void rvu_npa_gen_work(struct work_struct *work)
637 {
638 	struct rvu_npa_health_reporters *rvu_npa_health_reporter;
639 
640 	rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, gen_work);
641 	devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_gen_reporter,
642 			      "NPA_AF_GEN Error",
643 			      rvu_npa_health_reporter->npa_event_ctx);
644 }
645 
646 static irqreturn_t rvu_npa_af_gen_intr_handler(int irq, void *rvu_irq)
647 {
648 	struct rvu_npa_event_ctx *npa_event_context;
649 	struct rvu_devlink *rvu_dl = rvu_irq;
650 	struct rvu *rvu;
651 	int blkaddr;
652 	u64 intr;
653 
654 	rvu = rvu_dl->rvu;
655 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
656 	if (blkaddr < 0)
657 		return IRQ_NONE;
658 
659 	npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
660 	intr = rvu_read64(rvu, blkaddr, NPA_AF_GEN_INT);
661 	npa_event_context->npa_af_rvu_gen = intr;
662 
663 	/* Clear interrupts */
664 	rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT, intr);
665 	rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1C, ~0ULL);
666 	queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->gen_work);
667 
668 	return IRQ_HANDLED;
669 }
670 
671 static void rvu_npa_err_work(struct work_struct *work)
672 {
673 	struct rvu_npa_health_reporters *rvu_npa_health_reporter;
674 
675 	rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, err_work);
676 	devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_err_reporter,
677 			      "NPA_AF_ERR Error",
678 			      rvu_npa_health_reporter->npa_event_ctx);
679 }
680 
681 static irqreturn_t rvu_npa_af_err_intr_handler(int irq, void *rvu_irq)
682 {
683 	struct rvu_npa_event_ctx *npa_event_context;
684 	struct rvu_devlink *rvu_dl = rvu_irq;
685 	struct rvu *rvu;
686 	int blkaddr;
687 	u64 intr;
688 
689 	rvu = rvu_dl->rvu;
690 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
691 	if (blkaddr < 0)
692 		return IRQ_NONE;
693 	npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
694 	intr = rvu_read64(rvu, blkaddr, NPA_AF_ERR_INT);
695 	npa_event_context->npa_af_rvu_err = intr;
696 
697 	/* Clear interrupts */
698 	rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT, intr);
699 	rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1C, ~0ULL);
700 	queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->err_work);
701 
702 	return IRQ_HANDLED;
703 }
704 
705 static void rvu_npa_ras_work(struct work_struct *work)
706 {
707 	struct rvu_npa_health_reporters *rvu_npa_health_reporter;
708 
709 	rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, ras_work);
710 	devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_ras_reporter,
711 			      "HW NPA_AF_RAS Error reported",
712 			      rvu_npa_health_reporter->npa_event_ctx);
713 }
714 
715 static irqreturn_t rvu_npa_af_ras_intr_handler(int irq, void *rvu_irq)
716 {
717 	struct rvu_npa_event_ctx *npa_event_context;
718 	struct rvu_devlink *rvu_dl = rvu_irq;
719 	struct rvu *rvu;
720 	int blkaddr;
721 	u64 intr;
722 
723 	rvu = rvu_dl->rvu;
724 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
725 	if (blkaddr < 0)
726 		return IRQ_NONE;
727 
728 	npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
729 	intr = rvu_read64(rvu, blkaddr, NPA_AF_RAS);
730 	npa_event_context->npa_af_rvu_ras = intr;
731 
732 	/* Clear interrupts */
733 	rvu_write64(rvu, blkaddr, NPA_AF_RAS, intr);
734 	rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1C, ~0ULL);
735 	queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->ras_work);
736 
737 	return IRQ_HANDLED;
738 }
739 
740 static void rvu_npa_unregister_interrupts(struct rvu *rvu)
741 {
742 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
743 	int i, offs, blkaddr;
744 	u64 reg;
745 
746 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
747 	if (blkaddr < 0)
748 		return;
749 
750 	reg = rvu_read64(rvu, blkaddr, NPA_PRIV_AF_INT_CFG);
751 	offs = reg & 0x3FF;
752 
753 	rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1C, ~0ULL);
754 	rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1C, ~0ULL);
755 	rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1C, ~0ULL);
756 	rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1C, ~0ULL);
757 
758 	for (i = 0; i < NPA_AF_INT_VEC_CNT; i++)
759 		if (rvu->irq_allocated[offs + i]) {
760 			free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu_dl);
761 			rvu->irq_allocated[offs + i] = false;
762 		}
763 }
764 
765 static int rvu_npa_register_interrupts(struct rvu *rvu)
766 {
767 	int blkaddr, base;
768 	bool rc;
769 
770 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
771 	if (blkaddr < 0)
772 		return blkaddr;
773 
774 	/* Get NPA AF MSIX vectors offset. */
775 	base = rvu_read64(rvu, blkaddr, NPA_PRIV_AF_INT_CFG) & 0x3ff;
776 	if (!base) {
777 		dev_warn(rvu->dev,
778 			 "Failed to get NPA_AF_INT vector offsets\n");
779 		return 0;
780 	}
781 
782 	/* Register and enable NPA_AF_RVU_INT interrupt */
783 	rc = rvu_common_request_irq(rvu, base +  NPA_AF_INT_VEC_RVU,
784 				    "NPA_AF_RVU_INT",
785 				    rvu_npa_af_rvu_intr_handler);
786 	if (!rc)
787 		goto err;
788 	rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1S, ~0ULL);
789 
790 	/* Register and enable NPA_AF_GEN_INT interrupt */
791 	rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_GEN,
792 				    "NPA_AF_RVU_GEN",
793 				    rvu_npa_af_gen_intr_handler);
794 	if (!rc)
795 		goto err;
796 	rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1S, ~0ULL);
797 
798 	/* Register and enable NPA_AF_ERR_INT interrupt */
799 	rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_AF_ERR,
800 				    "NPA_AF_ERR_INT",
801 				    rvu_npa_af_err_intr_handler);
802 	if (!rc)
803 		goto err;
804 	rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1S, ~0ULL);
805 
806 	/* Register and enable NPA_AF_RAS interrupt */
807 	rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_POISON,
808 				    "NPA_AF_RAS",
809 				    rvu_npa_af_ras_intr_handler);
810 	if (!rc)
811 		goto err;
812 	rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1S, ~0ULL);
813 
814 	return 0;
815 err:
816 	rvu_npa_unregister_interrupts(rvu);
817 	return rc;
818 }
819 
820 static int rvu_npa_report_show(struct devlink_fmsg *fmsg, void *ctx,
821 			       enum npa_af_rvu_health health_reporter)
822 {
823 	struct rvu_npa_event_ctx *npa_event_context;
824 	unsigned int alloc_dis, free_dis;
825 	u64 intr_val;
826 
827 	npa_event_context = ctx;
828 	switch (health_reporter) {
829 	case NPA_AF_RVU_GEN:
830 		intr_val = npa_event_context->npa_af_rvu_gen;
831 		rvu_report_pair_start(fmsg, "NPA_AF_GENERAL");
832 		devlink_fmsg_u64_pair_put(fmsg, "\tNPA General Interrupt Reg ",
833 					  npa_event_context->npa_af_rvu_gen);
834 		if (intr_val & BIT_ULL(32))
835 			devlink_fmsg_string_put(fmsg, "\n\tUnmap PF Error");
836 
837 		free_dis = FIELD_GET(GENMASK(15, 0), intr_val);
838 		if (free_dis & BIT(NPA_INPQ_NIX0_RX))
839 			devlink_fmsg_string_put(fmsg, "\n\tNIX0: free disabled RX");
840 		if (free_dis & BIT(NPA_INPQ_NIX0_TX))
841 			devlink_fmsg_string_put(fmsg, "\n\tNIX0:free disabled TX");
842 		if (free_dis & BIT(NPA_INPQ_NIX1_RX))
843 			devlink_fmsg_string_put(fmsg, "\n\tNIX1: free disabled RX");
844 		if (free_dis & BIT(NPA_INPQ_NIX1_TX))
845 			devlink_fmsg_string_put(fmsg, "\n\tNIX1:free disabled TX");
846 		if (free_dis & BIT(NPA_INPQ_SSO))
847 			devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for SSO");
848 		if (free_dis & BIT(NPA_INPQ_TIM))
849 			devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for TIM");
850 		if (free_dis & BIT(NPA_INPQ_DPI))
851 			devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for DPI");
852 		if (free_dis & BIT(NPA_INPQ_AURA_OP))
853 			devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for AURA");
854 
855 		alloc_dis = FIELD_GET(GENMASK(31, 16), intr_val);
856 		if (alloc_dis & BIT(NPA_INPQ_NIX0_RX))
857 			devlink_fmsg_string_put(fmsg, "\n\tNIX0: alloc disabled RX");
858 		if (alloc_dis & BIT(NPA_INPQ_NIX0_TX))
859 			devlink_fmsg_string_put(fmsg, "\n\tNIX0:alloc disabled TX");
860 		if (alloc_dis & BIT(NPA_INPQ_NIX1_RX))
861 			devlink_fmsg_string_put(fmsg, "\n\tNIX1: alloc disabled RX");
862 		if (alloc_dis & BIT(NPA_INPQ_NIX1_TX))
863 			devlink_fmsg_string_put(fmsg, "\n\tNIX1:alloc disabled TX");
864 		if (alloc_dis & BIT(NPA_INPQ_SSO))
865 			devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for SSO");
866 		if (alloc_dis & BIT(NPA_INPQ_TIM))
867 			devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for TIM");
868 		if (alloc_dis & BIT(NPA_INPQ_DPI))
869 			devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for DPI");
870 		if (alloc_dis & BIT(NPA_INPQ_AURA_OP))
871 			devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for AURA");
872 
873 		rvu_report_pair_end(fmsg);
874 		break;
875 	case NPA_AF_RVU_ERR:
876 		rvu_report_pair_start(fmsg, "NPA_AF_ERR");
877 		devlink_fmsg_u64_pair_put(fmsg, "\tNPA Error Interrupt Reg ",
878 					  npa_event_context->npa_af_rvu_err);
879 		if (npa_event_context->npa_af_rvu_err & BIT_ULL(14))
880 			devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_INST_S read");
881 		if (npa_event_context->npa_af_rvu_err & BIT_ULL(13))
882 			devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_RES_S write");
883 		if (npa_event_context->npa_af_rvu_err & BIT_ULL(12))
884 			devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error");
885 		rvu_report_pair_end(fmsg);
886 		break;
887 	case NPA_AF_RVU_RAS:
888 		rvu_report_pair_start(fmsg, "NPA_AF_RVU_RAS");
889 		devlink_fmsg_u64_pair_put(fmsg, "\tNPA RAS Interrupt Reg ",
890 					  npa_event_context->npa_af_rvu_ras);
891 		if (npa_event_context->npa_af_rvu_ras & BIT_ULL(34))
892 			devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_INST_S");
893 		if (npa_event_context->npa_af_rvu_ras & BIT_ULL(33))
894 			devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_RES_S");
895 		if (npa_event_context->npa_af_rvu_ras & BIT_ULL(32))
896 			devlink_fmsg_string_put(fmsg, "\n\tPoison data on HW context");
897 		rvu_report_pair_end(fmsg);
898 		break;
899 	case NPA_AF_RVU_INTR:
900 		rvu_report_pair_start(fmsg, "NPA_AF_RVU");
901 		devlink_fmsg_u64_pair_put(fmsg, "\tNPA RVU Interrupt Reg ",
902 					  npa_event_context->npa_af_rvu_int);
903 		if (npa_event_context->npa_af_rvu_int & BIT_ULL(0))
904 			devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error");
905 		rvu_report_pair_end(fmsg);
906 		break;
907 	default:
908 		return -EINVAL;
909 	}
910 
911 	return 0;
912 }
913 
914 static int rvu_hw_npa_intr_dump(struct devlink_health_reporter *reporter,
915 				struct devlink_fmsg *fmsg, void *ctx,
916 				struct netlink_ext_ack *netlink_extack)
917 {
918 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
919 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
920 	struct rvu_npa_event_ctx *npa_ctx;
921 
922 	npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
923 
924 	return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_INTR) :
925 		     rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_INTR);
926 }
927 
928 static int rvu_hw_npa_intr_recover(struct devlink_health_reporter *reporter,
929 				   void *ctx, struct netlink_ext_ack *netlink_extack)
930 {
931 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
932 	struct rvu_npa_event_ctx *npa_event_ctx = ctx;
933 	int blkaddr;
934 
935 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
936 	if (blkaddr < 0)
937 		return blkaddr;
938 
939 	if (npa_event_ctx->npa_af_rvu_int)
940 		rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1S, ~0ULL);
941 
942 	return 0;
943 }
944 
945 static int rvu_hw_npa_gen_dump(struct devlink_health_reporter *reporter,
946 			       struct devlink_fmsg *fmsg, void *ctx,
947 			       struct netlink_ext_ack *netlink_extack)
948 {
949 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
950 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
951 	struct rvu_npa_event_ctx *npa_ctx;
952 
953 	npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
954 
955 	return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_GEN) :
956 		     rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_GEN);
957 }
958 
959 static int rvu_hw_npa_gen_recover(struct devlink_health_reporter *reporter,
960 				  void *ctx, struct netlink_ext_ack *netlink_extack)
961 {
962 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
963 	struct rvu_npa_event_ctx *npa_event_ctx = ctx;
964 	int blkaddr;
965 
966 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
967 	if (blkaddr < 0)
968 		return blkaddr;
969 
970 	if (npa_event_ctx->npa_af_rvu_gen)
971 		rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1S, ~0ULL);
972 
973 	return 0;
974 }
975 
976 static int rvu_hw_npa_err_dump(struct devlink_health_reporter *reporter,
977 			       struct devlink_fmsg *fmsg, void *ctx,
978 			       struct netlink_ext_ack *netlink_extack)
979 {
980 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
981 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
982 	struct rvu_npa_event_ctx *npa_ctx;
983 
984 	npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
985 
986 	return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_ERR) :
987 		     rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_ERR);
988 }
989 
990 static int rvu_hw_npa_err_recover(struct devlink_health_reporter *reporter,
991 				  void *ctx, struct netlink_ext_ack *netlink_extack)
992 {
993 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
994 	struct rvu_npa_event_ctx *npa_event_ctx = ctx;
995 	int blkaddr;
996 
997 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
998 	if (blkaddr < 0)
999 		return blkaddr;
1000 
1001 	if (npa_event_ctx->npa_af_rvu_err)
1002 		rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1S, ~0ULL);
1003 
1004 	return 0;
1005 }
1006 
1007 static int rvu_hw_npa_ras_dump(struct devlink_health_reporter *reporter,
1008 			       struct devlink_fmsg *fmsg, void *ctx,
1009 			       struct netlink_ext_ack *netlink_extack)
1010 {
1011 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
1012 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
1013 	struct rvu_npa_event_ctx *npa_ctx;
1014 
1015 	npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
1016 
1017 	return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_RAS) :
1018 		     rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_RAS);
1019 }
1020 
1021 static int rvu_hw_npa_ras_recover(struct devlink_health_reporter *reporter,
1022 				  void *ctx, struct netlink_ext_ack *netlink_extack)
1023 {
1024 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
1025 	struct rvu_npa_event_ctx *npa_event_ctx = ctx;
1026 	int blkaddr;
1027 
1028 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
1029 	if (blkaddr < 0)
1030 		return blkaddr;
1031 
1032 	if (npa_event_ctx->npa_af_rvu_ras)
1033 		rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1S, ~0ULL);
1034 
1035 	return 0;
1036 }
1037 
1038 RVU_REPORTERS(hw_npa_intr);
1039 RVU_REPORTERS(hw_npa_gen);
1040 RVU_REPORTERS(hw_npa_err);
1041 RVU_REPORTERS(hw_npa_ras);
1042 
1043 static void rvu_npa_health_reporters_destroy(struct rvu_devlink *rvu_dl);
1044 
1045 static int rvu_npa_register_reporters(struct rvu_devlink *rvu_dl)
1046 {
1047 	struct rvu_npa_health_reporters *rvu_reporters;
1048 	struct rvu_npa_event_ctx *npa_event_context;
1049 	struct rvu *rvu = rvu_dl->rvu;
1050 
1051 	rvu_reporters = kzalloc(sizeof(*rvu_reporters), GFP_KERNEL);
1052 	if (!rvu_reporters)
1053 		return -ENOMEM;
1054 
1055 	rvu_dl->rvu_npa_health_reporter = rvu_reporters;
1056 	npa_event_context = kzalloc(sizeof(*npa_event_context), GFP_KERNEL);
1057 	if (!npa_event_context)
1058 		return -ENOMEM;
1059 
1060 	rvu_reporters->npa_event_ctx = npa_event_context;
1061 	rvu_reporters->rvu_hw_npa_intr_reporter =
1062 		devlink_health_reporter_create(rvu_dl->dl,
1063 					       &rvu_hw_npa_intr_reporter_ops,
1064 					       rvu);
1065 	if (IS_ERR(rvu_reporters->rvu_hw_npa_intr_reporter)) {
1066 		dev_warn(rvu->dev, "Failed to create hw_npa_intr reporter, err=%ld\n",
1067 			 PTR_ERR(rvu_reporters->rvu_hw_npa_intr_reporter));
1068 		return PTR_ERR(rvu_reporters->rvu_hw_npa_intr_reporter);
1069 	}
1070 
1071 	rvu_reporters->rvu_hw_npa_gen_reporter =
1072 		devlink_health_reporter_create(rvu_dl->dl,
1073 					       &rvu_hw_npa_gen_reporter_ops,
1074 					       rvu);
1075 	if (IS_ERR(rvu_reporters->rvu_hw_npa_gen_reporter)) {
1076 		dev_warn(rvu->dev, "Failed to create hw_npa_gen reporter, err=%ld\n",
1077 			 PTR_ERR(rvu_reporters->rvu_hw_npa_gen_reporter));
1078 		return PTR_ERR(rvu_reporters->rvu_hw_npa_gen_reporter);
1079 	}
1080 
1081 	rvu_reporters->rvu_hw_npa_err_reporter =
1082 		devlink_health_reporter_create(rvu_dl->dl,
1083 					       &rvu_hw_npa_err_reporter_ops,
1084 					       rvu);
1085 	if (IS_ERR(rvu_reporters->rvu_hw_npa_err_reporter)) {
1086 		dev_warn(rvu->dev, "Failed to create hw_npa_err reporter, err=%ld\n",
1087 			 PTR_ERR(rvu_reporters->rvu_hw_npa_err_reporter));
1088 		return PTR_ERR(rvu_reporters->rvu_hw_npa_err_reporter);
1089 	}
1090 
1091 	rvu_reporters->rvu_hw_npa_ras_reporter =
1092 		devlink_health_reporter_create(rvu_dl->dl,
1093 					       &rvu_hw_npa_ras_reporter_ops,
1094 					       rvu);
1095 	if (IS_ERR(rvu_reporters->rvu_hw_npa_ras_reporter)) {
1096 		dev_warn(rvu->dev, "Failed to create hw_npa_ras reporter, err=%ld\n",
1097 			 PTR_ERR(rvu_reporters->rvu_hw_npa_ras_reporter));
1098 		return PTR_ERR(rvu_reporters->rvu_hw_npa_ras_reporter);
1099 	}
1100 
1101 	rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq");
1102 	if (!rvu_dl->devlink_wq)
1103 		return -ENOMEM;
1104 
1105 	INIT_WORK(&rvu_reporters->intr_work, rvu_npa_intr_work);
1106 	INIT_WORK(&rvu_reporters->err_work, rvu_npa_err_work);
1107 	INIT_WORK(&rvu_reporters->gen_work, rvu_npa_gen_work);
1108 	INIT_WORK(&rvu_reporters->ras_work, rvu_npa_ras_work);
1109 
1110 	return 0;
1111 }
1112 
1113 static int rvu_npa_health_reporters_create(struct rvu_devlink *rvu_dl)
1114 {
1115 	struct rvu *rvu = rvu_dl->rvu;
1116 	int err;
1117 
1118 	err = rvu_npa_register_reporters(rvu_dl);
1119 	if (err) {
1120 		dev_warn(rvu->dev, "Failed to create npa reporter, err =%d\n",
1121 			 err);
1122 		return err;
1123 	}
1124 	rvu_npa_register_interrupts(rvu);
1125 
1126 	return 0;
1127 }
1128 
1129 static void rvu_npa_health_reporters_destroy(struct rvu_devlink *rvu_dl)
1130 {
1131 	struct rvu_npa_health_reporters *npa_reporters;
1132 	struct rvu *rvu = rvu_dl->rvu;
1133 
1134 	npa_reporters = rvu_dl->rvu_npa_health_reporter;
1135 
1136 	if (!npa_reporters->rvu_hw_npa_ras_reporter)
1137 		return;
1138 	if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_intr_reporter))
1139 		devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_intr_reporter);
1140 
1141 	if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_gen_reporter))
1142 		devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_gen_reporter);
1143 
1144 	if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_err_reporter))
1145 		devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_err_reporter);
1146 
1147 	if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_ras_reporter))
1148 		devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_ras_reporter);
1149 
1150 	rvu_npa_unregister_interrupts(rvu);
1151 	kfree(rvu_dl->rvu_npa_health_reporter->npa_event_ctx);
1152 	kfree(rvu_dl->rvu_npa_health_reporter);
1153 }
1154 
1155 static int rvu_health_reporters_create(struct rvu *rvu)
1156 {
1157 	struct rvu_devlink *rvu_dl;
1158 	int err;
1159 
1160 	rvu_dl = rvu->rvu_dl;
1161 	err = rvu_npa_health_reporters_create(rvu_dl);
1162 	if (err)
1163 		return err;
1164 
1165 	return rvu_nix_health_reporters_create(rvu_dl);
1166 }
1167 
1168 static void rvu_health_reporters_destroy(struct rvu *rvu)
1169 {
1170 	struct rvu_devlink *rvu_dl;
1171 
1172 	if (!rvu->rvu_dl)
1173 		return;
1174 
1175 	rvu_dl = rvu->rvu_dl;
1176 	rvu_npa_health_reporters_destroy(rvu_dl);
1177 	rvu_nix_health_reporters_destroy(rvu_dl);
1178 }
1179 
1180 /* Devlink Params APIs */
1181 static int rvu_af_dl_dwrr_mtu_validate(struct devlink *devlink, u32 id,
1182 				       union devlink_param_value val,
1183 				       struct netlink_ext_ack *extack)
1184 {
1185 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1186 	struct rvu *rvu = rvu_dl->rvu;
1187 	int dwrr_mtu = val.vu32;
1188 	struct nix_txsch *txsch;
1189 	struct nix_hw *nix_hw;
1190 
1191 	if (!rvu->hw->cap.nix_common_dwrr_mtu) {
1192 		NL_SET_ERR_MSG_MOD(extack,
1193 				   "Setting DWRR_MTU is not supported on this silicon");
1194 		return -EOPNOTSUPP;
1195 	}
1196 
1197 	if ((dwrr_mtu > 65536 || !is_power_of_2(dwrr_mtu)) &&
1198 	    (dwrr_mtu != 9728 && dwrr_mtu != 10240)) {
1199 		NL_SET_ERR_MSG_MOD(extack,
1200 				   "Invalid, supported MTUs are 0,2,4,8.16,32,64....4K,8K,32K,64K and 9728, 10240");
1201 		return -EINVAL;
1202 	}
1203 
1204 	nix_hw = get_nix_hw(rvu->hw, BLKADDR_NIX0);
1205 	if (!nix_hw)
1206 		return -ENODEV;
1207 
1208 	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1209 	if (rvu_rsrc_free_count(&txsch->schq) != txsch->schq.max) {
1210 		NL_SET_ERR_MSG_MOD(extack,
1211 				   "Changing DWRR MTU is not supported when there are active NIXLFs");
1212 		NL_SET_ERR_MSG_MOD(extack,
1213 				   "Make sure none of the PF/VF interfaces are initialized and retry");
1214 		return -EOPNOTSUPP;
1215 	}
1216 
1217 	return 0;
1218 }
1219 
1220 static int rvu_af_dl_dwrr_mtu_set(struct devlink *devlink, u32 id,
1221 				  struct devlink_param_gset_ctx *ctx,
1222 				  struct netlink_ext_ack *extack)
1223 {
1224 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1225 	struct rvu *rvu = rvu_dl->rvu;
1226 	u64 dwrr_mtu;
1227 
1228 	dwrr_mtu = convert_bytes_to_dwrr_mtu(ctx->val.vu32);
1229 	rvu_write64(rvu, BLKADDR_NIX0,
1230 		    nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM), dwrr_mtu);
1231 
1232 	return 0;
1233 }
1234 
1235 static int rvu_af_dl_dwrr_mtu_get(struct devlink *devlink, u32 id,
1236 				  struct devlink_param_gset_ctx *ctx)
1237 {
1238 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1239 	struct rvu *rvu = rvu_dl->rvu;
1240 	u64 dwrr_mtu;
1241 
1242 	if (!rvu->hw->cap.nix_common_dwrr_mtu)
1243 		return -EOPNOTSUPP;
1244 
1245 	dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0,
1246 			      nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM));
1247 	ctx->val.vu32 = convert_dwrr_mtu_to_bytes(dwrr_mtu);
1248 
1249 	return 0;
1250 }
1251 
1252 enum rvu_af_dl_param_id {
1253 	RVU_AF_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
1254 	RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
1255 	RVU_AF_DEVLINK_PARAM_ID_NPC_MCAM_ZONE_PERCENT,
1256 	RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
1257 	RVU_AF_DEVLINK_PARAM_ID_NPC_DEF_RULE_CNTR_ENABLE,
1258 	RVU_AF_DEVLINK_PARAM_ID_NIX_MAXLF,
1259 };
1260 
1261 static int rvu_af_npc_exact_feature_get(struct devlink *devlink, u32 id,
1262 					struct devlink_param_gset_ctx *ctx)
1263 {
1264 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1265 	struct rvu *rvu = rvu_dl->rvu;
1266 	bool enabled;
1267 
1268 	enabled = rvu_npc_exact_has_match_table(rvu);
1269 
1270 	snprintf(ctx->val.vstr, sizeof(ctx->val.vstr), "%s",
1271 		 enabled ? "enabled" : "disabled");
1272 
1273 	return 0;
1274 }
1275 
1276 static int rvu_af_npc_exact_feature_disable(struct devlink *devlink, u32 id,
1277 					    struct devlink_param_gset_ctx *ctx,
1278 					    struct netlink_ext_ack *extack)
1279 {
1280 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1281 	struct rvu *rvu = rvu_dl->rvu;
1282 
1283 	rvu_npc_exact_disable_feature(rvu);
1284 
1285 	return 0;
1286 }
1287 
1288 static int rvu_af_npc_exact_feature_validate(struct devlink *devlink, u32 id,
1289 					     union devlink_param_value val,
1290 					     struct netlink_ext_ack *extack)
1291 {
1292 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1293 	struct rvu *rvu = rvu_dl->rvu;
1294 	u64 enable;
1295 
1296 	if (kstrtoull(val.vstr, 10, &enable)) {
1297 		NL_SET_ERR_MSG_MOD(extack,
1298 				   "Only 1 value is supported");
1299 		return -EINVAL;
1300 	}
1301 
1302 	if (enable != 1) {
1303 		NL_SET_ERR_MSG_MOD(extack,
1304 				   "Only disabling exact match feature is supported");
1305 		return -EINVAL;
1306 	}
1307 
1308 	if (rvu_npc_exact_can_disable_feature(rvu))
1309 		return 0;
1310 
1311 	NL_SET_ERR_MSG_MOD(extack,
1312 			   "Can't disable exact match feature; Please try before any configuration");
1313 	return -EFAULT;
1314 }
1315 
1316 static int rvu_af_dl_npc_mcam_high_zone_percent_get(struct devlink *devlink, u32 id,
1317 						    struct devlink_param_gset_ctx *ctx)
1318 {
1319 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1320 	struct rvu *rvu = rvu_dl->rvu;
1321 	struct npc_mcam *mcam;
1322 	u32 percent;
1323 
1324 	mcam = &rvu->hw->mcam;
1325 	percent = (mcam->hprio_count * 100) / mcam->bmap_entries;
1326 	ctx->val.vu8 = (u8)percent;
1327 
1328 	return 0;
1329 }
1330 
1331 static int rvu_af_dl_npc_mcam_high_zone_percent_set(struct devlink *devlink, u32 id,
1332 						    struct devlink_param_gset_ctx *ctx,
1333 						    struct netlink_ext_ack *extack)
1334 {
1335 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1336 	struct rvu *rvu = rvu_dl->rvu;
1337 	struct npc_mcam *mcam;
1338 	u32 percent;
1339 
1340 	percent = ctx->val.vu8;
1341 	mcam = &rvu->hw->mcam;
1342 	mcam->hprio_count = (mcam->bmap_entries * percent) / 100;
1343 	mcam->hprio_end = mcam->hprio_count;
1344 	mcam->lprio_count = (mcam->bmap_entries - mcam->hprio_count) / 2;
1345 	mcam->lprio_start = mcam->bmap_entries - mcam->lprio_count;
1346 
1347 	return 0;
1348 }
1349 
1350 static int rvu_af_dl_npc_mcam_high_zone_percent_validate(struct devlink *devlink, u32 id,
1351 							 union devlink_param_value val,
1352 							 struct netlink_ext_ack *extack)
1353 {
1354 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1355 	struct rvu *rvu = rvu_dl->rvu;
1356 	struct npc_mcam *mcam;
1357 
1358 	/* The percent of high prio zone must range from 12% to 100% of unreserved mcam space */
1359 	if (val.vu8 < 12 || val.vu8 > 100) {
1360 		NL_SET_ERR_MSG_MOD(extack,
1361 				   "mcam high zone percent must be between 12% to 100%");
1362 		return -EINVAL;
1363 	}
1364 
1365 	/* Do not allow user to modify the high priority zone entries while mcam entries
1366 	 * have already been assigned.
1367 	 */
1368 	mcam = &rvu->hw->mcam;
1369 	if (mcam->bmap_fcnt < mcam->bmap_entries) {
1370 		NL_SET_ERR_MSG_MOD(extack,
1371 				   "mcam entries have already been assigned, can't resize");
1372 		return -EPERM;
1373 	}
1374 
1375 	return 0;
1376 }
1377 
1378 static int rvu_af_dl_npc_def_rule_cntr_get(struct devlink *devlink, u32 id,
1379 					   struct devlink_param_gset_ctx *ctx)
1380 {
1381 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1382 	struct rvu *rvu = rvu_dl->rvu;
1383 
1384 	ctx->val.vbool = rvu->def_rule_cntr_en;
1385 
1386 	return 0;
1387 }
1388 
1389 static int rvu_af_dl_npc_def_rule_cntr_set(struct devlink *devlink, u32 id,
1390 					   struct devlink_param_gset_ctx *ctx,
1391 					   struct netlink_ext_ack *extack)
1392 {
1393 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1394 	struct rvu *rvu = rvu_dl->rvu;
1395 	int err;
1396 
1397 	err = npc_config_cntr_default_entries(rvu, ctx->val.vbool);
1398 	if (!err)
1399 		rvu->def_rule_cntr_en = ctx->val.vbool;
1400 
1401 	return err;
1402 }
1403 
1404 static int rvu_af_dl_nix_maxlf_get(struct devlink *devlink, u32 id,
1405 				   struct devlink_param_gset_ctx *ctx)
1406 {
1407 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1408 	struct rvu *rvu = rvu_dl->rvu;
1409 
1410 	ctx->val.vu16 = (u16)rvu_get_nixlf_count(rvu);
1411 
1412 	return 0;
1413 }
1414 
1415 static int rvu_af_dl_nix_maxlf_set(struct devlink *devlink, u32 id,
1416 				   struct devlink_param_gset_ctx *ctx,
1417 				   struct netlink_ext_ack *extack)
1418 {
1419 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1420 	struct rvu *rvu = rvu_dl->rvu;
1421 	struct rvu_block *block;
1422 	int blkaddr = 0;
1423 
1424 	npc_mcam_rsrcs_deinit(rvu);
1425 	blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
1426 	while (blkaddr) {
1427 		block = &rvu->hw->block[blkaddr];
1428 		block->lf.max = ctx->val.vu16;
1429 		blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
1430 	}
1431 
1432 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
1433 	npc_mcam_rsrcs_init(rvu, blkaddr);
1434 
1435 	return 0;
1436 }
1437 
1438 static int rvu_af_dl_nix_maxlf_validate(struct devlink *devlink, u32 id,
1439 					union devlink_param_value val,
1440 					struct netlink_ext_ack *extack)
1441 {
1442 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1443 	struct rvu *rvu = rvu_dl->rvu;
1444 	u16 max_nix0_lf, max_nix1_lf;
1445 	struct npc_mcam *mcam;
1446 	u64 cfg;
1447 
1448 	cfg = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_CONST2);
1449 	max_nix0_lf = cfg & 0xFFF;
1450 	cfg = rvu_read64(rvu, BLKADDR_NIX1, NIX_AF_CONST2);
1451 	max_nix1_lf = cfg & 0xFFF;
1452 
1453 	/* Do not allow user to modify maximum NIX LFs while mcam entries
1454 	 * have already been assigned.
1455 	 */
1456 	mcam = &rvu->hw->mcam;
1457 	if (mcam->bmap_fcnt < mcam->bmap_entries) {
1458 		NL_SET_ERR_MSG_MOD(extack,
1459 				   "mcam entries have already been assigned, can't resize");
1460 		return -EPERM;
1461 	}
1462 
1463 	if (max_nix0_lf && val.vu16 > max_nix0_lf) {
1464 		NL_SET_ERR_MSG_MOD(extack,
1465 				   "requested nixlf is greater than the max supported nix0_lf");
1466 		return -EPERM;
1467 	}
1468 
1469 	if (max_nix1_lf && val.vu16 > max_nix1_lf) {
1470 		NL_SET_ERR_MSG_MOD(extack,
1471 				   "requested nixlf is greater than the max supported nix1_lf");
1472 		return -EINVAL;
1473 	}
1474 
1475 	return 0;
1476 }
1477 
1478 static const struct devlink_param rvu_af_dl_params[] = {
1479 	DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
1480 			     "dwrr_mtu", DEVLINK_PARAM_TYPE_U32,
1481 			     BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1482 			     rvu_af_dl_dwrr_mtu_get, rvu_af_dl_dwrr_mtu_set,
1483 			     rvu_af_dl_dwrr_mtu_validate),
1484 	DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_MCAM_ZONE_PERCENT,
1485 			     "npc_mcam_high_zone_percent", DEVLINK_PARAM_TYPE_U8,
1486 			     BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1487 			     rvu_af_dl_npc_mcam_high_zone_percent_get,
1488 			     rvu_af_dl_npc_mcam_high_zone_percent_set,
1489 			     rvu_af_dl_npc_mcam_high_zone_percent_validate),
1490 	DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_DEF_RULE_CNTR_ENABLE,
1491 			     "npc_def_rule_cntr", DEVLINK_PARAM_TYPE_BOOL,
1492 			     BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1493 			     rvu_af_dl_npc_def_rule_cntr_get,
1494 			     rvu_af_dl_npc_def_rule_cntr_set, NULL),
1495 	DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NIX_MAXLF,
1496 			     "nix_maxlf", DEVLINK_PARAM_TYPE_U16,
1497 			     BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1498 			     rvu_af_dl_nix_maxlf_get,
1499 			     rvu_af_dl_nix_maxlf_set,
1500 			     rvu_af_dl_nix_maxlf_validate),
1501 };
1502 
1503 static const struct devlink_param rvu_af_dl_param_exact_match[] = {
1504 	DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
1505 			     "npc_exact_feature_disable", DEVLINK_PARAM_TYPE_STRING,
1506 			     BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1507 			     rvu_af_npc_exact_feature_get,
1508 			     rvu_af_npc_exact_feature_disable,
1509 			     rvu_af_npc_exact_feature_validate),
1510 };
1511 
1512 /* Devlink switch mode */
1513 static int rvu_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
1514 {
1515 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1516 	struct rvu *rvu = rvu_dl->rvu;
1517 	struct rvu_switch *rswitch;
1518 
1519 	if (rvu->rep_mode)
1520 		return -EOPNOTSUPP;
1521 
1522 	rswitch = &rvu->rswitch;
1523 	*mode = rswitch->mode;
1524 
1525 	return 0;
1526 }
1527 
1528 static int rvu_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
1529 					struct netlink_ext_ack *extack)
1530 {
1531 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1532 	struct rvu *rvu = rvu_dl->rvu;
1533 	struct rvu_switch *rswitch;
1534 
1535 	rswitch = &rvu->rswitch;
1536 	switch (mode) {
1537 	case DEVLINK_ESWITCH_MODE_LEGACY:
1538 	case DEVLINK_ESWITCH_MODE_SWITCHDEV:
1539 		if (rswitch->mode == mode)
1540 			return 0;
1541 		rswitch->mode = mode;
1542 		if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
1543 			rvu_switch_enable(rvu);
1544 		else
1545 			rvu_switch_disable(rvu);
1546 		break;
1547 	default:
1548 		return -EINVAL;
1549 	}
1550 
1551 	return 0;
1552 }
1553 
1554 static const struct devlink_ops rvu_devlink_ops = {
1555 	.eswitch_mode_get = rvu_devlink_eswitch_mode_get,
1556 	.eswitch_mode_set = rvu_devlink_eswitch_mode_set,
1557 };
1558 
1559 int rvu_register_dl(struct rvu *rvu)
1560 {
1561 	struct rvu_devlink *rvu_dl;
1562 	struct devlink *dl;
1563 	int err;
1564 
1565 	dl = devlink_alloc(&rvu_devlink_ops, sizeof(struct rvu_devlink),
1566 			   rvu->dev);
1567 	if (!dl) {
1568 		dev_warn(rvu->dev, "devlink_alloc failed\n");
1569 		return -ENOMEM;
1570 	}
1571 
1572 	rvu_dl = devlink_priv(dl);
1573 	rvu_dl->dl = dl;
1574 	rvu_dl->rvu = rvu;
1575 	rvu->rvu_dl = rvu_dl;
1576 
1577 	err = rvu_health_reporters_create(rvu);
1578 	if (err) {
1579 		dev_err(rvu->dev,
1580 			"devlink health reporter creation failed with error %d\n", err);
1581 		goto err_dl_health;
1582 	}
1583 
1584 	err = devlink_params_register(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
1585 	if (err) {
1586 		dev_err(rvu->dev,
1587 			"devlink params register failed with error %d", err);
1588 		goto err_dl_health;
1589 	}
1590 
1591 	/* Register exact match devlink only for CN10K-B */
1592 	if (!rvu_npc_exact_has_match_table(rvu))
1593 		goto done;
1594 
1595 	err = devlink_params_register(dl, rvu_af_dl_param_exact_match,
1596 				      ARRAY_SIZE(rvu_af_dl_param_exact_match));
1597 	if (err) {
1598 		dev_err(rvu->dev,
1599 			"devlink exact match params register failed with error %d", err);
1600 		goto err_dl_exact_match;
1601 	}
1602 
1603 done:
1604 	devlink_register(dl);
1605 	return 0;
1606 
1607 err_dl_exact_match:
1608 	devlink_params_unregister(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
1609 
1610 err_dl_health:
1611 	rvu_health_reporters_destroy(rvu);
1612 	devlink_free(dl);
1613 	return err;
1614 }
1615 
1616 void rvu_unregister_dl(struct rvu *rvu)
1617 {
1618 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
1619 	struct devlink *dl = rvu_dl->dl;
1620 
1621 	devlink_unregister(dl);
1622 
1623 	devlink_params_unregister(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
1624 
1625 	/* Unregister exact match devlink only for CN10K-B */
1626 	if (rvu_npc_exact_has_match_table(rvu))
1627 		devlink_params_unregister(dl, rvu_af_dl_param_exact_match,
1628 					  ARRAY_SIZE(rvu_af_dl_param_exact_match));
1629 
1630 	rvu_health_reporters_destroy(rvu);
1631 	devlink_free(dl);
1632 }
1633