xref: /linux/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c (revision 7e17d44d61921cab8e52cb30cc082e3ee96abddd)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function Devlink
3  *
4  * Copyright (C) 2020 Marvell.
5  *
6  */
7 
8 #include<linux/bitfield.h>
9 
10 #include "rvu.h"
11 #include "rvu_reg.h"
12 #include "rvu_struct.h"
13 #include "rvu_npc_hash.h"
14 
15 #define DRV_NAME "octeontx2-af"
16 
17 static void rvu_report_pair_start(struct devlink_fmsg *fmsg, const char *name)
18 {
19 	devlink_fmsg_pair_nest_start(fmsg, name);
20 	devlink_fmsg_obj_nest_start(fmsg);
21 }
22 
23 static void rvu_report_pair_end(struct devlink_fmsg *fmsg)
24 {
25 	devlink_fmsg_obj_nest_end(fmsg);
26 	devlink_fmsg_pair_nest_end(fmsg);
27 }
28 
29 static bool rvu_common_request_irq(struct rvu *rvu, int offset,
30 				   const char *name, irq_handler_t fn)
31 {
32 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
33 	int rc;
34 
35 	sprintf(&rvu->irq_name[offset * NAME_SIZE], "%s", name);
36 	rc = request_irq(pci_irq_vector(rvu->pdev, offset), fn, 0,
37 			 &rvu->irq_name[offset * NAME_SIZE], rvu_dl);
38 	if (rc)
39 		dev_warn(rvu->dev, "Failed to register %s irq\n", name);
40 	else
41 		rvu->irq_allocated[offset] = true;
42 
43 	return rvu->irq_allocated[offset];
44 }
45 
46 static void rvu_nix_intr_work(struct work_struct *work)
47 {
48 	struct rvu_nix_health_reporters *rvu_nix_health_reporter;
49 
50 	rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, intr_work);
51 	devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_intr_reporter,
52 			      "NIX_AF_RVU Error",
53 			      rvu_nix_health_reporter->nix_event_ctx);
54 }
55 
56 static irqreturn_t rvu_nix_af_rvu_intr_handler(int irq, void *rvu_irq)
57 {
58 	struct rvu_nix_event_ctx *nix_event_context;
59 	struct rvu_devlink *rvu_dl = rvu_irq;
60 	struct rvu *rvu;
61 	int blkaddr;
62 	u64 intr;
63 
64 	rvu = rvu_dl->rvu;
65 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
66 	if (blkaddr < 0)
67 		return IRQ_NONE;
68 
69 	nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
70 	intr = rvu_read64(rvu, blkaddr, NIX_AF_RVU_INT);
71 	nix_event_context->nix_af_rvu_int = intr;
72 
73 	/* Clear interrupts */
74 	rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT, intr);
75 	rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1C, ~0ULL);
76 	queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->intr_work);
77 
78 	return IRQ_HANDLED;
79 }
80 
81 static void rvu_nix_gen_work(struct work_struct *work)
82 {
83 	struct rvu_nix_health_reporters *rvu_nix_health_reporter;
84 
85 	rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, gen_work);
86 	devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_gen_reporter,
87 			      "NIX_AF_GEN Error",
88 			      rvu_nix_health_reporter->nix_event_ctx);
89 }
90 
91 static irqreturn_t rvu_nix_af_rvu_gen_handler(int irq, void *rvu_irq)
92 {
93 	struct rvu_nix_event_ctx *nix_event_context;
94 	struct rvu_devlink *rvu_dl = rvu_irq;
95 	struct rvu *rvu;
96 	int blkaddr;
97 	u64 intr;
98 
99 	rvu = rvu_dl->rvu;
100 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
101 	if (blkaddr < 0)
102 		return IRQ_NONE;
103 
104 	nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
105 	intr = rvu_read64(rvu, blkaddr, NIX_AF_GEN_INT);
106 	nix_event_context->nix_af_rvu_gen = intr;
107 
108 	/* Clear interrupts */
109 	rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT, intr);
110 	rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1C, ~0ULL);
111 	queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->gen_work);
112 
113 	return IRQ_HANDLED;
114 }
115 
116 static void rvu_nix_err_work(struct work_struct *work)
117 {
118 	struct rvu_nix_health_reporters *rvu_nix_health_reporter;
119 
120 	rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, err_work);
121 	devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_err_reporter,
122 			      "NIX_AF_ERR Error",
123 			      rvu_nix_health_reporter->nix_event_ctx);
124 }
125 
126 static irqreturn_t rvu_nix_af_rvu_err_handler(int irq, void *rvu_irq)
127 {
128 	struct rvu_nix_event_ctx *nix_event_context;
129 	struct rvu_devlink *rvu_dl = rvu_irq;
130 	struct rvu *rvu;
131 	int blkaddr;
132 	u64 intr;
133 
134 	rvu = rvu_dl->rvu;
135 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
136 	if (blkaddr < 0)
137 		return IRQ_NONE;
138 
139 	nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
140 	intr = rvu_read64(rvu, blkaddr, NIX_AF_ERR_INT);
141 	nix_event_context->nix_af_rvu_err = intr;
142 
143 	/* Clear interrupts */
144 	rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT, intr);
145 	rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1C, ~0ULL);
146 	queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->err_work);
147 
148 	return IRQ_HANDLED;
149 }
150 
151 static void rvu_nix_ras_work(struct work_struct *work)
152 {
153 	struct rvu_nix_health_reporters *rvu_nix_health_reporter;
154 
155 	rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, ras_work);
156 	devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_ras_reporter,
157 			      "NIX_AF_RAS Error",
158 			      rvu_nix_health_reporter->nix_event_ctx);
159 }
160 
161 static irqreturn_t rvu_nix_af_rvu_ras_handler(int irq, void *rvu_irq)
162 {
163 	struct rvu_nix_event_ctx *nix_event_context;
164 	struct rvu_devlink *rvu_dl = rvu_irq;
165 	struct rvu *rvu;
166 	int blkaddr;
167 	u64 intr;
168 
169 	rvu = rvu_dl->rvu;
170 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
171 	if (blkaddr < 0)
172 		return IRQ_NONE;
173 
174 	nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
175 	intr = rvu_read64(rvu, blkaddr, NIX_AF_ERR_INT);
176 	nix_event_context->nix_af_rvu_ras = intr;
177 
178 	/* Clear interrupts */
179 	rvu_write64(rvu, blkaddr, NIX_AF_RAS, intr);
180 	rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1C, ~0ULL);
181 	queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->ras_work);
182 
183 	return IRQ_HANDLED;
184 }
185 
186 static void rvu_nix_unregister_interrupts(struct rvu *rvu)
187 {
188 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
189 	int offs, i, blkaddr;
190 
191 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
192 	if (blkaddr < 0)
193 		return;
194 
195 	offs = rvu_read64(rvu, blkaddr, NIX_PRIV_AF_INT_CFG) & 0x3ff;
196 	if (!offs)
197 		return;
198 
199 	rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1C, ~0ULL);
200 	rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1C, ~0ULL);
201 	rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1C, ~0ULL);
202 	rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1C, ~0ULL);
203 
204 	if (rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU]) {
205 		free_irq(pci_irq_vector(rvu->pdev, offs + NIX_AF_INT_VEC_RVU),
206 			 rvu_dl);
207 		rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU] = false;
208 	}
209 
210 	for (i = NIX_AF_INT_VEC_AF_ERR; i < NIX_AF_INT_VEC_CNT; i++)
211 		if (rvu->irq_allocated[offs + i]) {
212 			free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu_dl);
213 			rvu->irq_allocated[offs + i] = false;
214 		}
215 }
216 
217 static int rvu_nix_register_interrupts(struct rvu *rvu)
218 {
219 	int blkaddr, base;
220 	bool rc;
221 
222 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
223 	if (blkaddr < 0)
224 		return blkaddr;
225 
226 	/* Get NIX AF MSIX vectors offset. */
227 	base = rvu_read64(rvu, blkaddr, NIX_PRIV_AF_INT_CFG) & 0x3ff;
228 	if (!base) {
229 		dev_warn(rvu->dev,
230 			 "Failed to get NIX%d NIX_AF_INT vector offsets\n",
231 			 blkaddr - BLKADDR_NIX0);
232 		return 0;
233 	}
234 	/* Register and enable NIX_AF_RVU_INT interrupt */
235 	rc = rvu_common_request_irq(rvu, base +  NIX_AF_INT_VEC_RVU,
236 				    "NIX_AF_RVU_INT",
237 				    rvu_nix_af_rvu_intr_handler);
238 	if (!rc)
239 		goto err;
240 	rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1S, ~0ULL);
241 
242 	/* Register and enable NIX_AF_GEN_INT interrupt */
243 	rc = rvu_common_request_irq(rvu, base +  NIX_AF_INT_VEC_GEN,
244 				    "NIX_AF_GEN_INT",
245 				    rvu_nix_af_rvu_gen_handler);
246 	if (!rc)
247 		goto err;
248 	rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1S, ~0ULL);
249 
250 	/* Register and enable NIX_AF_ERR_INT interrupt */
251 	rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_AF_ERR,
252 				    "NIX_AF_ERR_INT",
253 				    rvu_nix_af_rvu_err_handler);
254 	if (!rc)
255 		goto err;
256 	rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1S, ~0ULL);
257 
258 	/* Register and enable NIX_AF_RAS interrupt */
259 	rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_POISON,
260 				    "NIX_AF_RAS",
261 				    rvu_nix_af_rvu_ras_handler);
262 	if (!rc)
263 		goto err;
264 	rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1S, ~0ULL);
265 
266 	return 0;
267 err:
268 	rvu_nix_unregister_interrupts(rvu);
269 	return rc;
270 }
271 
272 static int rvu_nix_report_show(struct devlink_fmsg *fmsg, void *ctx,
273 			       enum nix_af_rvu_health health_reporter)
274 {
275 	struct rvu_nix_event_ctx *nix_event_context;
276 	u64 intr_val;
277 
278 	nix_event_context = ctx;
279 	switch (health_reporter) {
280 	case NIX_AF_RVU_INTR:
281 		intr_val = nix_event_context->nix_af_rvu_int;
282 		rvu_report_pair_start(fmsg, "NIX_AF_RVU");
283 		devlink_fmsg_u64_pair_put(fmsg, "\tNIX RVU Interrupt Reg ",
284 					  nix_event_context->nix_af_rvu_int);
285 		if (intr_val & BIT_ULL(0))
286 			devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error");
287 		rvu_report_pair_end(fmsg);
288 		break;
289 	case NIX_AF_RVU_GEN:
290 		intr_val = nix_event_context->nix_af_rvu_gen;
291 		rvu_report_pair_start(fmsg, "NIX_AF_GENERAL");
292 		devlink_fmsg_u64_pair_put(fmsg, "\tNIX General Interrupt Reg ",
293 					  nix_event_context->nix_af_rvu_gen);
294 		if (intr_val & BIT_ULL(0))
295 			devlink_fmsg_string_put(fmsg, "\n\tRx multicast pkt drop");
296 		if (intr_val & BIT_ULL(1))
297 			devlink_fmsg_string_put(fmsg, "\n\tRx mirror pkt drop");
298 		if (intr_val & BIT_ULL(4))
299 			devlink_fmsg_string_put(fmsg, "\n\tSMQ flush done");
300 		rvu_report_pair_end(fmsg);
301 		break;
302 	case NIX_AF_RVU_ERR:
303 		intr_val = nix_event_context->nix_af_rvu_err;
304 		rvu_report_pair_start(fmsg, "NIX_AF_ERR");
305 		devlink_fmsg_u64_pair_put(fmsg, "\tNIX Error Interrupt Reg ",
306 					  nix_event_context->nix_af_rvu_err);
307 		if (intr_val & BIT_ULL(14))
308 			devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_INST_S read");
309 		if (intr_val & BIT_ULL(13))
310 			devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_RES_S write");
311 		if (intr_val & BIT_ULL(12))
312 			devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error");
313 		if (intr_val & BIT_ULL(6))
314 			devlink_fmsg_string_put(fmsg, "\n\tRx on unmapped PF_FUNC");
315 		if (intr_val & BIT_ULL(5))
316 			devlink_fmsg_string_put(fmsg, "\n\tRx multicast replication error");
317 		if (intr_val & BIT_ULL(4))
318 			devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_RX_MCE_S read");
319 		if (intr_val & BIT_ULL(3))
320 			devlink_fmsg_string_put(fmsg, "\n\tFault on multicast WQE read");
321 		if (intr_val & BIT_ULL(2))
322 			devlink_fmsg_string_put(fmsg, "\n\tFault on mirror WQE read");
323 		if (intr_val & BIT_ULL(1))
324 			devlink_fmsg_string_put(fmsg, "\n\tFault on mirror pkt write");
325 		if (intr_val & BIT_ULL(0))
326 			devlink_fmsg_string_put(fmsg, "\n\tFault on multicast pkt write");
327 		rvu_report_pair_end(fmsg);
328 		break;
329 	case NIX_AF_RVU_RAS:
330 		intr_val = nix_event_context->nix_af_rvu_err;
331 		rvu_report_pair_start(fmsg, "NIX_AF_RAS");
332 		devlink_fmsg_u64_pair_put(fmsg, "\tNIX RAS Interrupt Reg ",
333 					  nix_event_context->nix_af_rvu_err);
334 		devlink_fmsg_string_put(fmsg, "\n\tPoison Data on:");
335 		if (intr_val & BIT_ULL(34))
336 			devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_INST_S");
337 		if (intr_val & BIT_ULL(33))
338 			devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_RES_S");
339 		if (intr_val & BIT_ULL(32))
340 			devlink_fmsg_string_put(fmsg, "\n\tHW ctx");
341 		if (intr_val & BIT_ULL(4))
342 			devlink_fmsg_string_put(fmsg, "\n\tPacket from mirror buffer");
343 		if (intr_val & BIT_ULL(3))
344 			devlink_fmsg_string_put(fmsg, "\n\tPacket from multicast buffer");
345 		if (intr_val & BIT_ULL(2))
346 			devlink_fmsg_string_put(fmsg, "\n\tWQE read from mirror buffer");
347 		if (intr_val & BIT_ULL(1))
348 			devlink_fmsg_string_put(fmsg, "\n\tWQE read from multicast buffer");
349 		if (intr_val & BIT_ULL(0))
350 			devlink_fmsg_string_put(fmsg, "\n\tNIX_RX_MCE_S read");
351 		rvu_report_pair_end(fmsg);
352 		break;
353 	default:
354 		return -EINVAL;
355 	}
356 
357 	return 0;
358 }
359 
360 static int rvu_hw_nix_intr_dump(struct devlink_health_reporter *reporter,
361 				struct devlink_fmsg *fmsg, void *ctx,
362 				struct netlink_ext_ack *netlink_extack)
363 {
364 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
365 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
366 	struct rvu_nix_event_ctx *nix_ctx;
367 
368 	nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
369 
370 	return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_INTR) :
371 		     rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_INTR);
372 }
373 
374 static int rvu_hw_nix_intr_recover(struct devlink_health_reporter *reporter,
375 				   void *ctx, struct netlink_ext_ack *netlink_extack)
376 {
377 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
378 	struct rvu_nix_event_ctx *nix_event_ctx = ctx;
379 	int blkaddr;
380 
381 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
382 	if (blkaddr < 0)
383 		return blkaddr;
384 
385 	if (nix_event_ctx->nix_af_rvu_int)
386 		rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1S, ~0ULL);
387 
388 	return 0;
389 }
390 
391 static int rvu_hw_nix_gen_dump(struct devlink_health_reporter *reporter,
392 			       struct devlink_fmsg *fmsg, void *ctx,
393 			       struct netlink_ext_ack *netlink_extack)
394 {
395 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
396 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
397 	struct rvu_nix_event_ctx *nix_ctx;
398 
399 	nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
400 
401 	return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_GEN) :
402 		     rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_GEN);
403 }
404 
405 static int rvu_hw_nix_gen_recover(struct devlink_health_reporter *reporter,
406 				  void *ctx, struct netlink_ext_ack *netlink_extack)
407 {
408 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
409 	struct rvu_nix_event_ctx *nix_event_ctx = ctx;
410 	int blkaddr;
411 
412 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
413 	if (blkaddr < 0)
414 		return blkaddr;
415 
416 	if (nix_event_ctx->nix_af_rvu_gen)
417 		rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1S, ~0ULL);
418 
419 	return 0;
420 }
421 
422 static int rvu_hw_nix_err_dump(struct devlink_health_reporter *reporter,
423 			       struct devlink_fmsg *fmsg, void *ctx,
424 			       struct netlink_ext_ack *netlink_extack)
425 {
426 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
427 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
428 	struct rvu_nix_event_ctx *nix_ctx;
429 
430 	nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
431 
432 	return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_ERR) :
433 		     rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_ERR);
434 }
435 
436 static int rvu_hw_nix_err_recover(struct devlink_health_reporter *reporter,
437 				  void *ctx, struct netlink_ext_ack *netlink_extack)
438 {
439 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
440 	struct rvu_nix_event_ctx *nix_event_ctx = ctx;
441 	int blkaddr;
442 
443 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
444 	if (blkaddr < 0)
445 		return blkaddr;
446 
447 	if (nix_event_ctx->nix_af_rvu_err)
448 		rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1S, ~0ULL);
449 
450 	return 0;
451 }
452 
453 static int rvu_hw_nix_ras_dump(struct devlink_health_reporter *reporter,
454 			       struct devlink_fmsg *fmsg, void *ctx,
455 			       struct netlink_ext_ack *netlink_extack)
456 {
457 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
458 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
459 	struct rvu_nix_event_ctx *nix_ctx;
460 
461 	nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
462 
463 	return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_RAS) :
464 		     rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_RAS);
465 }
466 
467 static int rvu_hw_nix_ras_recover(struct devlink_health_reporter *reporter,
468 				  void *ctx, struct netlink_ext_ack *netlink_extack)
469 {
470 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
471 	struct rvu_nix_event_ctx *nix_event_ctx = ctx;
472 	int blkaddr;
473 
474 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
475 	if (blkaddr < 0)
476 		return blkaddr;
477 
478 	if (nix_event_ctx->nix_af_rvu_int)
479 		rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1S, ~0ULL);
480 
481 	return 0;
482 }
483 
484 RVU_REPORTERS(hw_nix_intr);
485 RVU_REPORTERS(hw_nix_gen);
486 RVU_REPORTERS(hw_nix_err);
487 RVU_REPORTERS(hw_nix_ras);
488 
489 static void rvu_nix_health_reporters_destroy(struct rvu_devlink *rvu_dl);
490 
491 static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl)
492 {
493 	struct rvu_nix_health_reporters *rvu_reporters;
494 	struct rvu_nix_event_ctx *nix_event_context;
495 	struct rvu *rvu = rvu_dl->rvu;
496 
497 	rvu_reporters = kzalloc(sizeof(*rvu_reporters), GFP_KERNEL);
498 	if (!rvu_reporters)
499 		return -ENOMEM;
500 
501 	rvu_dl->rvu_nix_health_reporter = rvu_reporters;
502 	nix_event_context = kzalloc(sizeof(*nix_event_context), GFP_KERNEL);
503 	if (!nix_event_context)
504 		return -ENOMEM;
505 
506 	rvu_reporters->nix_event_ctx = nix_event_context;
507 	rvu_reporters->rvu_hw_nix_intr_reporter =
508 		devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_intr_reporter_ops, 0, rvu);
509 	if (IS_ERR(rvu_reporters->rvu_hw_nix_intr_reporter)) {
510 		dev_warn(rvu->dev, "Failed to create hw_nix_intr reporter, err=%ld\n",
511 			 PTR_ERR(rvu_reporters->rvu_hw_nix_intr_reporter));
512 		return PTR_ERR(rvu_reporters->rvu_hw_nix_intr_reporter);
513 	}
514 
515 	rvu_reporters->rvu_hw_nix_gen_reporter =
516 		devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_gen_reporter_ops, 0, rvu);
517 	if (IS_ERR(rvu_reporters->rvu_hw_nix_gen_reporter)) {
518 		dev_warn(rvu->dev, "Failed to create hw_nix_gen reporter, err=%ld\n",
519 			 PTR_ERR(rvu_reporters->rvu_hw_nix_gen_reporter));
520 		return PTR_ERR(rvu_reporters->rvu_hw_nix_gen_reporter);
521 	}
522 
523 	rvu_reporters->rvu_hw_nix_err_reporter =
524 		devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_err_reporter_ops, 0, rvu);
525 	if (IS_ERR(rvu_reporters->rvu_hw_nix_err_reporter)) {
526 		dev_warn(rvu->dev, "Failed to create hw_nix_err reporter, err=%ld\n",
527 			 PTR_ERR(rvu_reporters->rvu_hw_nix_err_reporter));
528 		return PTR_ERR(rvu_reporters->rvu_hw_nix_err_reporter);
529 	}
530 
531 	rvu_reporters->rvu_hw_nix_ras_reporter =
532 		devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_ras_reporter_ops, 0, rvu);
533 	if (IS_ERR(rvu_reporters->rvu_hw_nix_ras_reporter)) {
534 		dev_warn(rvu->dev, "Failed to create hw_nix_ras reporter, err=%ld\n",
535 			 PTR_ERR(rvu_reporters->rvu_hw_nix_ras_reporter));
536 		return PTR_ERR(rvu_reporters->rvu_hw_nix_ras_reporter);
537 	}
538 
539 	rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq");
540 	if (!rvu_dl->devlink_wq)
541 		goto err;
542 
543 	INIT_WORK(&rvu_reporters->intr_work, rvu_nix_intr_work);
544 	INIT_WORK(&rvu_reporters->gen_work, rvu_nix_gen_work);
545 	INIT_WORK(&rvu_reporters->err_work, rvu_nix_err_work);
546 	INIT_WORK(&rvu_reporters->ras_work, rvu_nix_ras_work);
547 
548 	return 0;
549 err:
550 	rvu_nix_health_reporters_destroy(rvu_dl);
551 	return -ENOMEM;
552 }
553 
554 static int rvu_nix_health_reporters_create(struct rvu_devlink *rvu_dl)
555 {
556 	struct rvu *rvu = rvu_dl->rvu;
557 	int err;
558 
559 	err = rvu_nix_register_reporters(rvu_dl);
560 	if (err) {
561 		dev_warn(rvu->dev, "Failed to create nix reporter, err =%d\n",
562 			 err);
563 		return err;
564 	}
565 	rvu_nix_register_interrupts(rvu);
566 
567 	return 0;
568 }
569 
570 static void rvu_nix_health_reporters_destroy(struct rvu_devlink *rvu_dl)
571 {
572 	struct rvu_nix_health_reporters *nix_reporters;
573 	struct rvu *rvu = rvu_dl->rvu;
574 
575 	nix_reporters = rvu_dl->rvu_nix_health_reporter;
576 
577 	if (!nix_reporters->rvu_hw_nix_ras_reporter)
578 		return;
579 	if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_intr_reporter))
580 		devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_intr_reporter);
581 
582 	if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_gen_reporter))
583 		devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_gen_reporter);
584 
585 	if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_err_reporter))
586 		devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_err_reporter);
587 
588 	if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_ras_reporter))
589 		devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_ras_reporter);
590 
591 	rvu_nix_unregister_interrupts(rvu);
592 	kfree(rvu_dl->rvu_nix_health_reporter->nix_event_ctx);
593 	kfree(rvu_dl->rvu_nix_health_reporter);
594 }
595 
596 static void rvu_npa_intr_work(struct work_struct *work)
597 {
598 	struct rvu_npa_health_reporters *rvu_npa_health_reporter;
599 
600 	rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, intr_work);
601 	devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_intr_reporter,
602 			      "NPA_AF_RVU Error",
603 			      rvu_npa_health_reporter->npa_event_ctx);
604 }
605 
606 static irqreturn_t rvu_npa_af_rvu_intr_handler(int irq, void *rvu_irq)
607 {
608 	struct rvu_npa_event_ctx *npa_event_context;
609 	struct rvu_devlink *rvu_dl = rvu_irq;
610 	struct rvu *rvu;
611 	int blkaddr;
612 	u64 intr;
613 
614 	rvu = rvu_dl->rvu;
615 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
616 	if (blkaddr < 0)
617 		return IRQ_NONE;
618 
619 	npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
620 	intr = rvu_read64(rvu, blkaddr, NPA_AF_RVU_INT);
621 	npa_event_context->npa_af_rvu_int = intr;
622 
623 	/* Clear interrupts */
624 	rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT, intr);
625 	rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1C, ~0ULL);
626 	queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->intr_work);
627 
628 	return IRQ_HANDLED;
629 }
630 
631 static void rvu_npa_gen_work(struct work_struct *work)
632 {
633 	struct rvu_npa_health_reporters *rvu_npa_health_reporter;
634 
635 	rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, gen_work);
636 	devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_gen_reporter,
637 			      "NPA_AF_GEN Error",
638 			      rvu_npa_health_reporter->npa_event_ctx);
639 }
640 
641 static irqreturn_t rvu_npa_af_gen_intr_handler(int irq, void *rvu_irq)
642 {
643 	struct rvu_npa_event_ctx *npa_event_context;
644 	struct rvu_devlink *rvu_dl = rvu_irq;
645 	struct rvu *rvu;
646 	int blkaddr;
647 	u64 intr;
648 
649 	rvu = rvu_dl->rvu;
650 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
651 	if (blkaddr < 0)
652 		return IRQ_NONE;
653 
654 	npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
655 	intr = rvu_read64(rvu, blkaddr, NPA_AF_GEN_INT);
656 	npa_event_context->npa_af_rvu_gen = intr;
657 
658 	/* Clear interrupts */
659 	rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT, intr);
660 	rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1C, ~0ULL);
661 	queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->gen_work);
662 
663 	return IRQ_HANDLED;
664 }
665 
666 static void rvu_npa_err_work(struct work_struct *work)
667 {
668 	struct rvu_npa_health_reporters *rvu_npa_health_reporter;
669 
670 	rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, err_work);
671 	devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_err_reporter,
672 			      "NPA_AF_ERR Error",
673 			      rvu_npa_health_reporter->npa_event_ctx);
674 }
675 
676 static irqreturn_t rvu_npa_af_err_intr_handler(int irq, void *rvu_irq)
677 {
678 	struct rvu_npa_event_ctx *npa_event_context;
679 	struct rvu_devlink *rvu_dl = rvu_irq;
680 	struct rvu *rvu;
681 	int blkaddr;
682 	u64 intr;
683 
684 	rvu = rvu_dl->rvu;
685 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
686 	if (blkaddr < 0)
687 		return IRQ_NONE;
688 	npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
689 	intr = rvu_read64(rvu, blkaddr, NPA_AF_ERR_INT);
690 	npa_event_context->npa_af_rvu_err = intr;
691 
692 	/* Clear interrupts */
693 	rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT, intr);
694 	rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1C, ~0ULL);
695 	queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->err_work);
696 
697 	return IRQ_HANDLED;
698 }
699 
700 static void rvu_npa_ras_work(struct work_struct *work)
701 {
702 	struct rvu_npa_health_reporters *rvu_npa_health_reporter;
703 
704 	rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, ras_work);
705 	devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_ras_reporter,
706 			      "HW NPA_AF_RAS Error reported",
707 			      rvu_npa_health_reporter->npa_event_ctx);
708 }
709 
710 static irqreturn_t rvu_npa_af_ras_intr_handler(int irq, void *rvu_irq)
711 {
712 	struct rvu_npa_event_ctx *npa_event_context;
713 	struct rvu_devlink *rvu_dl = rvu_irq;
714 	struct rvu *rvu;
715 	int blkaddr;
716 	u64 intr;
717 
718 	rvu = rvu_dl->rvu;
719 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
720 	if (blkaddr < 0)
721 		return IRQ_NONE;
722 
723 	npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
724 	intr = rvu_read64(rvu, blkaddr, NPA_AF_RAS);
725 	npa_event_context->npa_af_rvu_ras = intr;
726 
727 	/* Clear interrupts */
728 	rvu_write64(rvu, blkaddr, NPA_AF_RAS, intr);
729 	rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1C, ~0ULL);
730 	queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->ras_work);
731 
732 	return IRQ_HANDLED;
733 }
734 
735 static void rvu_npa_unregister_interrupts(struct rvu *rvu)
736 {
737 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
738 	int i, offs, blkaddr;
739 	u64 reg;
740 
741 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
742 	if (blkaddr < 0)
743 		return;
744 
745 	reg = rvu_read64(rvu, blkaddr, NPA_PRIV_AF_INT_CFG);
746 	offs = reg & 0x3FF;
747 
748 	rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1C, ~0ULL);
749 	rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1C, ~0ULL);
750 	rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1C, ~0ULL);
751 	rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1C, ~0ULL);
752 
753 	for (i = 0; i < NPA_AF_INT_VEC_CNT; i++)
754 		if (rvu->irq_allocated[offs + i]) {
755 			free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu_dl);
756 			rvu->irq_allocated[offs + i] = false;
757 		}
758 }
759 
760 static int rvu_npa_register_interrupts(struct rvu *rvu)
761 {
762 	int blkaddr, base;
763 	bool rc;
764 
765 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
766 	if (blkaddr < 0)
767 		return blkaddr;
768 
769 	/* Get NPA AF MSIX vectors offset. */
770 	base = rvu_read64(rvu, blkaddr, NPA_PRIV_AF_INT_CFG) & 0x3ff;
771 	if (!base) {
772 		dev_warn(rvu->dev,
773 			 "Failed to get NPA_AF_INT vector offsets\n");
774 		return 0;
775 	}
776 
777 	/* Register and enable NPA_AF_RVU_INT interrupt */
778 	rc = rvu_common_request_irq(rvu, base +  NPA_AF_INT_VEC_RVU,
779 				    "NPA_AF_RVU_INT",
780 				    rvu_npa_af_rvu_intr_handler);
781 	if (!rc)
782 		goto err;
783 	rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1S, ~0ULL);
784 
785 	/* Register and enable NPA_AF_GEN_INT interrupt */
786 	rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_GEN,
787 				    "NPA_AF_RVU_GEN",
788 				    rvu_npa_af_gen_intr_handler);
789 	if (!rc)
790 		goto err;
791 	rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1S, ~0ULL);
792 
793 	/* Register and enable NPA_AF_ERR_INT interrupt */
794 	rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_AF_ERR,
795 				    "NPA_AF_ERR_INT",
796 				    rvu_npa_af_err_intr_handler);
797 	if (!rc)
798 		goto err;
799 	rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1S, ~0ULL);
800 
801 	/* Register and enable NPA_AF_RAS interrupt */
802 	rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_POISON,
803 				    "NPA_AF_RAS",
804 				    rvu_npa_af_ras_intr_handler);
805 	if (!rc)
806 		goto err;
807 	rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1S, ~0ULL);
808 
809 	return 0;
810 err:
811 	rvu_npa_unregister_interrupts(rvu);
812 	return rc;
813 }
814 
815 static int rvu_npa_report_show(struct devlink_fmsg *fmsg, void *ctx,
816 			       enum npa_af_rvu_health health_reporter)
817 {
818 	struct rvu_npa_event_ctx *npa_event_context;
819 	unsigned int alloc_dis, free_dis;
820 	u64 intr_val;
821 
822 	npa_event_context = ctx;
823 	switch (health_reporter) {
824 	case NPA_AF_RVU_GEN:
825 		intr_val = npa_event_context->npa_af_rvu_gen;
826 		rvu_report_pair_start(fmsg, "NPA_AF_GENERAL");
827 		devlink_fmsg_u64_pair_put(fmsg, "\tNPA General Interrupt Reg ",
828 					  npa_event_context->npa_af_rvu_gen);
829 		if (intr_val & BIT_ULL(32))
830 			devlink_fmsg_string_put(fmsg, "\n\tUnmap PF Error");
831 
832 		free_dis = FIELD_GET(GENMASK(15, 0), intr_val);
833 		if (free_dis & BIT(NPA_INPQ_NIX0_RX))
834 			devlink_fmsg_string_put(fmsg, "\n\tNIX0: free disabled RX");
835 		if (free_dis & BIT(NPA_INPQ_NIX0_TX))
836 			devlink_fmsg_string_put(fmsg, "\n\tNIX0:free disabled TX");
837 		if (free_dis & BIT(NPA_INPQ_NIX1_RX))
838 			devlink_fmsg_string_put(fmsg, "\n\tNIX1: free disabled RX");
839 		if (free_dis & BIT(NPA_INPQ_NIX1_TX))
840 			devlink_fmsg_string_put(fmsg, "\n\tNIX1:free disabled TX");
841 		if (free_dis & BIT(NPA_INPQ_SSO))
842 			devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for SSO");
843 		if (free_dis & BIT(NPA_INPQ_TIM))
844 			devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for TIM");
845 		if (free_dis & BIT(NPA_INPQ_DPI))
846 			devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for DPI");
847 		if (free_dis & BIT(NPA_INPQ_AURA_OP))
848 			devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for AURA");
849 
850 		alloc_dis = FIELD_GET(GENMASK(31, 16), intr_val);
851 		if (alloc_dis & BIT(NPA_INPQ_NIX0_RX))
852 			devlink_fmsg_string_put(fmsg, "\n\tNIX0: alloc disabled RX");
853 		if (alloc_dis & BIT(NPA_INPQ_NIX0_TX))
854 			devlink_fmsg_string_put(fmsg, "\n\tNIX0:alloc disabled TX");
855 		if (alloc_dis & BIT(NPA_INPQ_NIX1_RX))
856 			devlink_fmsg_string_put(fmsg, "\n\tNIX1: alloc disabled RX");
857 		if (alloc_dis & BIT(NPA_INPQ_NIX1_TX))
858 			devlink_fmsg_string_put(fmsg, "\n\tNIX1:alloc disabled TX");
859 		if (alloc_dis & BIT(NPA_INPQ_SSO))
860 			devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for SSO");
861 		if (alloc_dis & BIT(NPA_INPQ_TIM))
862 			devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for TIM");
863 		if (alloc_dis & BIT(NPA_INPQ_DPI))
864 			devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for DPI");
865 		if (alloc_dis & BIT(NPA_INPQ_AURA_OP))
866 			devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for AURA");
867 
868 		rvu_report_pair_end(fmsg);
869 		break;
870 	case NPA_AF_RVU_ERR:
871 		rvu_report_pair_start(fmsg, "NPA_AF_ERR");
872 		devlink_fmsg_u64_pair_put(fmsg, "\tNPA Error Interrupt Reg ",
873 					  npa_event_context->npa_af_rvu_err);
874 		if (npa_event_context->npa_af_rvu_err & BIT_ULL(14))
875 			devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_INST_S read");
876 		if (npa_event_context->npa_af_rvu_err & BIT_ULL(13))
877 			devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_RES_S write");
878 		if (npa_event_context->npa_af_rvu_err & BIT_ULL(12))
879 			devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error");
880 		rvu_report_pair_end(fmsg);
881 		break;
882 	case NPA_AF_RVU_RAS:
883 		rvu_report_pair_start(fmsg, "NPA_AF_RVU_RAS");
884 		devlink_fmsg_u64_pair_put(fmsg, "\tNPA RAS Interrupt Reg ",
885 					  npa_event_context->npa_af_rvu_ras);
886 		if (npa_event_context->npa_af_rvu_ras & BIT_ULL(34))
887 			devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_INST_S");
888 		if (npa_event_context->npa_af_rvu_ras & BIT_ULL(33))
889 			devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_RES_S");
890 		if (npa_event_context->npa_af_rvu_ras & BIT_ULL(32))
891 			devlink_fmsg_string_put(fmsg, "\n\tPoison data on HW context");
892 		rvu_report_pair_end(fmsg);
893 		break;
894 	case NPA_AF_RVU_INTR:
895 		rvu_report_pair_start(fmsg, "NPA_AF_RVU");
896 		devlink_fmsg_u64_pair_put(fmsg, "\tNPA RVU Interrupt Reg ",
897 					  npa_event_context->npa_af_rvu_int);
898 		if (npa_event_context->npa_af_rvu_int & BIT_ULL(0))
899 			devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error");
900 		rvu_report_pair_end(fmsg);
901 		break;
902 	default:
903 		return -EINVAL;
904 	}
905 
906 	return 0;
907 }
908 
909 static int rvu_hw_npa_intr_dump(struct devlink_health_reporter *reporter,
910 				struct devlink_fmsg *fmsg, void *ctx,
911 				struct netlink_ext_ack *netlink_extack)
912 {
913 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
914 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
915 	struct rvu_npa_event_ctx *npa_ctx;
916 
917 	npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
918 
919 	return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_INTR) :
920 		     rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_INTR);
921 }
922 
923 static int rvu_hw_npa_intr_recover(struct devlink_health_reporter *reporter,
924 				   void *ctx, struct netlink_ext_ack *netlink_extack)
925 {
926 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
927 	struct rvu_npa_event_ctx *npa_event_ctx = ctx;
928 	int blkaddr;
929 
930 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
931 	if (blkaddr < 0)
932 		return blkaddr;
933 
934 	if (npa_event_ctx->npa_af_rvu_int)
935 		rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1S, ~0ULL);
936 
937 	return 0;
938 }
939 
940 static int rvu_hw_npa_gen_dump(struct devlink_health_reporter *reporter,
941 			       struct devlink_fmsg *fmsg, void *ctx,
942 			       struct netlink_ext_ack *netlink_extack)
943 {
944 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
945 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
946 	struct rvu_npa_event_ctx *npa_ctx;
947 
948 	npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
949 
950 	return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_GEN) :
951 		     rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_GEN);
952 }
953 
954 static int rvu_hw_npa_gen_recover(struct devlink_health_reporter *reporter,
955 				  void *ctx, struct netlink_ext_ack *netlink_extack)
956 {
957 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
958 	struct rvu_npa_event_ctx *npa_event_ctx = ctx;
959 	int blkaddr;
960 
961 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
962 	if (blkaddr < 0)
963 		return blkaddr;
964 
965 	if (npa_event_ctx->npa_af_rvu_gen)
966 		rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1S, ~0ULL);
967 
968 	return 0;
969 }
970 
971 static int rvu_hw_npa_err_dump(struct devlink_health_reporter *reporter,
972 			       struct devlink_fmsg *fmsg, void *ctx,
973 			       struct netlink_ext_ack *netlink_extack)
974 {
975 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
976 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
977 	struct rvu_npa_event_ctx *npa_ctx;
978 
979 	npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
980 
981 	return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_ERR) :
982 		     rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_ERR);
983 }
984 
985 static int rvu_hw_npa_err_recover(struct devlink_health_reporter *reporter,
986 				  void *ctx, struct netlink_ext_ack *netlink_extack)
987 {
988 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
989 	struct rvu_npa_event_ctx *npa_event_ctx = ctx;
990 	int blkaddr;
991 
992 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
993 	if (blkaddr < 0)
994 		return blkaddr;
995 
996 	if (npa_event_ctx->npa_af_rvu_err)
997 		rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1S, ~0ULL);
998 
999 	return 0;
1000 }
1001 
1002 static int rvu_hw_npa_ras_dump(struct devlink_health_reporter *reporter,
1003 			       struct devlink_fmsg *fmsg, void *ctx,
1004 			       struct netlink_ext_ack *netlink_extack)
1005 {
1006 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
1007 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
1008 	struct rvu_npa_event_ctx *npa_ctx;
1009 
1010 	npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
1011 
1012 	return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_RAS) :
1013 		     rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_RAS);
1014 }
1015 
1016 static int rvu_hw_npa_ras_recover(struct devlink_health_reporter *reporter,
1017 				  void *ctx, struct netlink_ext_ack *netlink_extack)
1018 {
1019 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
1020 	struct rvu_npa_event_ctx *npa_event_ctx = ctx;
1021 	int blkaddr;
1022 
1023 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
1024 	if (blkaddr < 0)
1025 		return blkaddr;
1026 
1027 	if (npa_event_ctx->npa_af_rvu_ras)
1028 		rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1S, ~0ULL);
1029 
1030 	return 0;
1031 }
1032 
1033 RVU_REPORTERS(hw_npa_intr);
1034 RVU_REPORTERS(hw_npa_gen);
1035 RVU_REPORTERS(hw_npa_err);
1036 RVU_REPORTERS(hw_npa_ras);
1037 
1038 static void rvu_npa_health_reporters_destroy(struct rvu_devlink *rvu_dl);
1039 
1040 static int rvu_npa_register_reporters(struct rvu_devlink *rvu_dl)
1041 {
1042 	struct rvu_npa_health_reporters *rvu_reporters;
1043 	struct rvu_npa_event_ctx *npa_event_context;
1044 	struct rvu *rvu = rvu_dl->rvu;
1045 
1046 	rvu_reporters = kzalloc(sizeof(*rvu_reporters), GFP_KERNEL);
1047 	if (!rvu_reporters)
1048 		return -ENOMEM;
1049 
1050 	rvu_dl->rvu_npa_health_reporter = rvu_reporters;
1051 	npa_event_context = kzalloc(sizeof(*npa_event_context), GFP_KERNEL);
1052 	if (!npa_event_context)
1053 		return -ENOMEM;
1054 
1055 	rvu_reporters->npa_event_ctx = npa_event_context;
1056 	rvu_reporters->rvu_hw_npa_intr_reporter =
1057 		devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_intr_reporter_ops, 0, rvu);
1058 	if (IS_ERR(rvu_reporters->rvu_hw_npa_intr_reporter)) {
1059 		dev_warn(rvu->dev, "Failed to create hw_npa_intr reporter, err=%ld\n",
1060 			 PTR_ERR(rvu_reporters->rvu_hw_npa_intr_reporter));
1061 		return PTR_ERR(rvu_reporters->rvu_hw_npa_intr_reporter);
1062 	}
1063 
1064 	rvu_reporters->rvu_hw_npa_gen_reporter =
1065 		devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_gen_reporter_ops, 0, rvu);
1066 	if (IS_ERR(rvu_reporters->rvu_hw_npa_gen_reporter)) {
1067 		dev_warn(rvu->dev, "Failed to create hw_npa_gen reporter, err=%ld\n",
1068 			 PTR_ERR(rvu_reporters->rvu_hw_npa_gen_reporter));
1069 		return PTR_ERR(rvu_reporters->rvu_hw_npa_gen_reporter);
1070 	}
1071 
1072 	rvu_reporters->rvu_hw_npa_err_reporter =
1073 		devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_err_reporter_ops, 0, rvu);
1074 	if (IS_ERR(rvu_reporters->rvu_hw_npa_err_reporter)) {
1075 		dev_warn(rvu->dev, "Failed to create hw_npa_err reporter, err=%ld\n",
1076 			 PTR_ERR(rvu_reporters->rvu_hw_npa_err_reporter));
1077 		return PTR_ERR(rvu_reporters->rvu_hw_npa_err_reporter);
1078 	}
1079 
1080 	rvu_reporters->rvu_hw_npa_ras_reporter =
1081 		devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_ras_reporter_ops, 0, rvu);
1082 	if (IS_ERR(rvu_reporters->rvu_hw_npa_ras_reporter)) {
1083 		dev_warn(rvu->dev, "Failed to create hw_npa_ras reporter, err=%ld\n",
1084 			 PTR_ERR(rvu_reporters->rvu_hw_npa_ras_reporter));
1085 		return PTR_ERR(rvu_reporters->rvu_hw_npa_ras_reporter);
1086 	}
1087 
1088 	rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq");
1089 	if (!rvu_dl->devlink_wq)
1090 		goto err;
1091 
1092 	INIT_WORK(&rvu_reporters->intr_work, rvu_npa_intr_work);
1093 	INIT_WORK(&rvu_reporters->err_work, rvu_npa_err_work);
1094 	INIT_WORK(&rvu_reporters->gen_work, rvu_npa_gen_work);
1095 	INIT_WORK(&rvu_reporters->ras_work, rvu_npa_ras_work);
1096 
1097 	return 0;
1098 err:
1099 	rvu_npa_health_reporters_destroy(rvu_dl);
1100 	return -ENOMEM;
1101 }
1102 
1103 static int rvu_npa_health_reporters_create(struct rvu_devlink *rvu_dl)
1104 {
1105 	struct rvu *rvu = rvu_dl->rvu;
1106 	int err;
1107 
1108 	err = rvu_npa_register_reporters(rvu_dl);
1109 	if (err) {
1110 		dev_warn(rvu->dev, "Failed to create npa reporter, err =%d\n",
1111 			 err);
1112 		return err;
1113 	}
1114 	rvu_npa_register_interrupts(rvu);
1115 
1116 	return 0;
1117 }
1118 
1119 static void rvu_npa_health_reporters_destroy(struct rvu_devlink *rvu_dl)
1120 {
1121 	struct rvu_npa_health_reporters *npa_reporters;
1122 	struct rvu *rvu = rvu_dl->rvu;
1123 
1124 	npa_reporters = rvu_dl->rvu_npa_health_reporter;
1125 
1126 	if (!npa_reporters->rvu_hw_npa_ras_reporter)
1127 		return;
1128 	if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_intr_reporter))
1129 		devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_intr_reporter);
1130 
1131 	if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_gen_reporter))
1132 		devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_gen_reporter);
1133 
1134 	if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_err_reporter))
1135 		devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_err_reporter);
1136 
1137 	if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_ras_reporter))
1138 		devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_ras_reporter);
1139 
1140 	rvu_npa_unregister_interrupts(rvu);
1141 	kfree(rvu_dl->rvu_npa_health_reporter->npa_event_ctx);
1142 	kfree(rvu_dl->rvu_npa_health_reporter);
1143 }
1144 
1145 static int rvu_health_reporters_create(struct rvu *rvu)
1146 {
1147 	struct rvu_devlink *rvu_dl;
1148 	int err;
1149 
1150 	rvu_dl = rvu->rvu_dl;
1151 	err = rvu_npa_health_reporters_create(rvu_dl);
1152 	if (err)
1153 		return err;
1154 
1155 	return rvu_nix_health_reporters_create(rvu_dl);
1156 }
1157 
1158 static void rvu_health_reporters_destroy(struct rvu *rvu)
1159 {
1160 	struct rvu_devlink *rvu_dl;
1161 
1162 	if (!rvu->rvu_dl)
1163 		return;
1164 
1165 	rvu_dl = rvu->rvu_dl;
1166 	rvu_npa_health_reporters_destroy(rvu_dl);
1167 	rvu_nix_health_reporters_destroy(rvu_dl);
1168 }
1169 
1170 /* Devlink Params APIs */
1171 static int rvu_af_dl_dwrr_mtu_validate(struct devlink *devlink, u32 id,
1172 				       union devlink_param_value val,
1173 				       struct netlink_ext_ack *extack)
1174 {
1175 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1176 	struct rvu *rvu = rvu_dl->rvu;
1177 	int dwrr_mtu = val.vu32;
1178 	struct nix_txsch *txsch;
1179 	struct nix_hw *nix_hw;
1180 
1181 	if (!rvu->hw->cap.nix_common_dwrr_mtu) {
1182 		NL_SET_ERR_MSG_MOD(extack,
1183 				   "Setting DWRR_MTU is not supported on this silicon");
1184 		return -EOPNOTSUPP;
1185 	}
1186 
1187 	if ((dwrr_mtu > 65536 || !is_power_of_2(dwrr_mtu)) &&
1188 	    (dwrr_mtu != 9728 && dwrr_mtu != 10240)) {
1189 		NL_SET_ERR_MSG_MOD(extack,
1190 				   "Invalid, supported MTUs are 0,2,4,8.16,32,64....4K,8K,32K,64K and 9728, 10240");
1191 		return -EINVAL;
1192 	}
1193 
1194 	nix_hw = get_nix_hw(rvu->hw, BLKADDR_NIX0);
1195 	if (!nix_hw)
1196 		return -ENODEV;
1197 
1198 	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1199 	if (rvu_rsrc_free_count(&txsch->schq) != txsch->schq.max) {
1200 		NL_SET_ERR_MSG_MOD(extack,
1201 				   "Changing DWRR MTU is not supported when there are active NIXLFs");
1202 		NL_SET_ERR_MSG_MOD(extack,
1203 				   "Make sure none of the PF/VF interfaces are initialized and retry");
1204 		return -EOPNOTSUPP;
1205 	}
1206 
1207 	return 0;
1208 }
1209 
1210 static int rvu_af_dl_dwrr_mtu_set(struct devlink *devlink, u32 id,
1211 				  struct devlink_param_gset_ctx *ctx)
1212 {
1213 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1214 	struct rvu *rvu = rvu_dl->rvu;
1215 	u64 dwrr_mtu;
1216 
1217 	dwrr_mtu = convert_bytes_to_dwrr_mtu(ctx->val.vu32);
1218 	rvu_write64(rvu, BLKADDR_NIX0,
1219 		    nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM), dwrr_mtu);
1220 
1221 	return 0;
1222 }
1223 
1224 static int rvu_af_dl_dwrr_mtu_get(struct devlink *devlink, u32 id,
1225 				  struct devlink_param_gset_ctx *ctx)
1226 {
1227 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1228 	struct rvu *rvu = rvu_dl->rvu;
1229 	u64 dwrr_mtu;
1230 
1231 	if (!rvu->hw->cap.nix_common_dwrr_mtu)
1232 		return -EOPNOTSUPP;
1233 
1234 	dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0,
1235 			      nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM));
1236 	ctx->val.vu32 = convert_dwrr_mtu_to_bytes(dwrr_mtu);
1237 
1238 	return 0;
1239 }
1240 
1241 enum rvu_af_dl_param_id {
1242 	RVU_AF_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
1243 	RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
1244 	RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
1245 	RVU_AF_DEVLINK_PARAM_ID_NPC_MCAM_ZONE_PERCENT,
1246 };
1247 
1248 static int rvu_af_npc_exact_feature_get(struct devlink *devlink, u32 id,
1249 					struct devlink_param_gset_ctx *ctx)
1250 {
1251 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1252 	struct rvu *rvu = rvu_dl->rvu;
1253 	bool enabled;
1254 
1255 	enabled = rvu_npc_exact_has_match_table(rvu);
1256 
1257 	snprintf(ctx->val.vstr, sizeof(ctx->val.vstr), "%s",
1258 		 enabled ? "enabled" : "disabled");
1259 
1260 	return 0;
1261 }
1262 
1263 static int rvu_af_npc_exact_feature_disable(struct devlink *devlink, u32 id,
1264 					    struct devlink_param_gset_ctx *ctx)
1265 {
1266 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1267 	struct rvu *rvu = rvu_dl->rvu;
1268 
1269 	rvu_npc_exact_disable_feature(rvu);
1270 
1271 	return 0;
1272 }
1273 
1274 static int rvu_af_npc_exact_feature_validate(struct devlink *devlink, u32 id,
1275 					     union devlink_param_value val,
1276 					     struct netlink_ext_ack *extack)
1277 {
1278 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1279 	struct rvu *rvu = rvu_dl->rvu;
1280 	u64 enable;
1281 
1282 	if (kstrtoull(val.vstr, 10, &enable)) {
1283 		NL_SET_ERR_MSG_MOD(extack,
1284 				   "Only 1 value is supported");
1285 		return -EINVAL;
1286 	}
1287 
1288 	if (enable != 1) {
1289 		NL_SET_ERR_MSG_MOD(extack,
1290 				   "Only disabling exact match feature is supported");
1291 		return -EINVAL;
1292 	}
1293 
1294 	if (rvu_npc_exact_can_disable_feature(rvu))
1295 		return 0;
1296 
1297 	NL_SET_ERR_MSG_MOD(extack,
1298 			   "Can't disable exact match feature; Please try before any configuration");
1299 	return -EFAULT;
1300 }
1301 
1302 static int rvu_af_dl_npc_mcam_high_zone_percent_get(struct devlink *devlink, u32 id,
1303 						    struct devlink_param_gset_ctx *ctx)
1304 {
1305 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1306 	struct rvu *rvu = rvu_dl->rvu;
1307 	struct npc_mcam *mcam;
1308 	u32 percent;
1309 
1310 	mcam = &rvu->hw->mcam;
1311 	percent = (mcam->hprio_count * 100) / mcam->bmap_entries;
1312 	ctx->val.vu8 = (u8)percent;
1313 
1314 	return 0;
1315 }
1316 
1317 static int rvu_af_dl_npc_mcam_high_zone_percent_set(struct devlink *devlink, u32 id,
1318 						    struct devlink_param_gset_ctx *ctx)
1319 {
1320 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1321 	struct rvu *rvu = rvu_dl->rvu;
1322 	struct npc_mcam *mcam;
1323 	u32 percent;
1324 
1325 	percent = ctx->val.vu8;
1326 	mcam = &rvu->hw->mcam;
1327 	mcam->hprio_count = (mcam->bmap_entries * percent) / 100;
1328 	mcam->hprio_end = mcam->hprio_count;
1329 	mcam->lprio_count = (mcam->bmap_entries - mcam->hprio_count) / 2;
1330 	mcam->lprio_start = mcam->bmap_entries - mcam->lprio_count;
1331 
1332 	return 0;
1333 }
1334 
1335 static int rvu_af_dl_npc_mcam_high_zone_percent_validate(struct devlink *devlink, u32 id,
1336 							 union devlink_param_value val,
1337 							 struct netlink_ext_ack *extack)
1338 {
1339 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1340 	struct rvu *rvu = rvu_dl->rvu;
1341 	struct npc_mcam *mcam;
1342 
1343 	/* The percent of high prio zone must range from 12% to 100% of unreserved mcam space */
1344 	if (val.vu8 < 12 || val.vu8 > 100) {
1345 		NL_SET_ERR_MSG_MOD(extack,
1346 				   "mcam high zone percent must be between 12% to 100%");
1347 		return -EINVAL;
1348 	}
1349 
1350 	/* Do not allow user to modify the high priority zone entries while mcam entries
1351 	 * have already been assigned.
1352 	 */
1353 	mcam = &rvu->hw->mcam;
1354 	if (mcam->bmap_fcnt < mcam->bmap_entries) {
1355 		NL_SET_ERR_MSG_MOD(extack,
1356 				   "mcam entries have already been assigned, can't resize");
1357 		return -EPERM;
1358 	}
1359 
1360 	return 0;
1361 }
1362 
1363 static const struct devlink_param rvu_af_dl_params[] = {
1364 	DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
1365 			     "dwrr_mtu", DEVLINK_PARAM_TYPE_U32,
1366 			     BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1367 			     rvu_af_dl_dwrr_mtu_get, rvu_af_dl_dwrr_mtu_set,
1368 			     rvu_af_dl_dwrr_mtu_validate),
1369 };
1370 
1371 static const struct devlink_param rvu_af_dl_param_exact_match[] = {
1372 	DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
1373 			     "npc_exact_feature_disable", DEVLINK_PARAM_TYPE_STRING,
1374 			     BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1375 			     rvu_af_npc_exact_feature_get,
1376 			     rvu_af_npc_exact_feature_disable,
1377 			     rvu_af_npc_exact_feature_validate),
1378 	DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_MCAM_ZONE_PERCENT,
1379 			     "npc_mcam_high_zone_percent", DEVLINK_PARAM_TYPE_U8,
1380 			     BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1381 			     rvu_af_dl_npc_mcam_high_zone_percent_get,
1382 			     rvu_af_dl_npc_mcam_high_zone_percent_set,
1383 			     rvu_af_dl_npc_mcam_high_zone_percent_validate),
1384 };
1385 
1386 /* Devlink switch mode */
1387 static int rvu_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
1388 {
1389 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1390 	struct rvu *rvu = rvu_dl->rvu;
1391 	struct rvu_switch *rswitch;
1392 
1393 	rswitch = &rvu->rswitch;
1394 	*mode = rswitch->mode;
1395 
1396 	return 0;
1397 }
1398 
1399 static int rvu_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
1400 					struct netlink_ext_ack *extack)
1401 {
1402 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1403 	struct rvu *rvu = rvu_dl->rvu;
1404 	struct rvu_switch *rswitch;
1405 
1406 	rswitch = &rvu->rswitch;
1407 	switch (mode) {
1408 	case DEVLINK_ESWITCH_MODE_LEGACY:
1409 	case DEVLINK_ESWITCH_MODE_SWITCHDEV:
1410 		if (rswitch->mode == mode)
1411 			return 0;
1412 		rswitch->mode = mode;
1413 		if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
1414 			rvu_switch_enable(rvu);
1415 		else
1416 			rvu_switch_disable(rvu);
1417 		break;
1418 	default:
1419 		return -EINVAL;
1420 	}
1421 
1422 	return 0;
1423 }
1424 
1425 static const struct devlink_ops rvu_devlink_ops = {
1426 	.eswitch_mode_get = rvu_devlink_eswitch_mode_get,
1427 	.eswitch_mode_set = rvu_devlink_eswitch_mode_set,
1428 };
1429 
1430 int rvu_register_dl(struct rvu *rvu)
1431 {
1432 	struct rvu_devlink *rvu_dl;
1433 	struct devlink *dl;
1434 	int err;
1435 
1436 	dl = devlink_alloc(&rvu_devlink_ops, sizeof(struct rvu_devlink),
1437 			   rvu->dev);
1438 	if (!dl) {
1439 		dev_warn(rvu->dev, "devlink_alloc failed\n");
1440 		return -ENOMEM;
1441 	}
1442 
1443 	rvu_dl = devlink_priv(dl);
1444 	rvu_dl->dl = dl;
1445 	rvu_dl->rvu = rvu;
1446 	rvu->rvu_dl = rvu_dl;
1447 
1448 	err = rvu_health_reporters_create(rvu);
1449 	if (err) {
1450 		dev_err(rvu->dev,
1451 			"devlink health reporter creation failed with error %d\n", err);
1452 		goto err_dl_health;
1453 	}
1454 
1455 	err = devlink_params_register(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
1456 	if (err) {
1457 		dev_err(rvu->dev,
1458 			"devlink params register failed with error %d", err);
1459 		goto err_dl_health;
1460 	}
1461 
1462 	/* Register exact match devlink only for CN10K-B */
1463 	if (!rvu_npc_exact_has_match_table(rvu))
1464 		goto done;
1465 
1466 	err = devlink_params_register(dl, rvu_af_dl_param_exact_match,
1467 				      ARRAY_SIZE(rvu_af_dl_param_exact_match));
1468 	if (err) {
1469 		dev_err(rvu->dev,
1470 			"devlink exact match params register failed with error %d", err);
1471 		goto err_dl_exact_match;
1472 	}
1473 
1474 done:
1475 	devlink_register(dl);
1476 	return 0;
1477 
1478 err_dl_exact_match:
1479 	devlink_params_unregister(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
1480 
1481 err_dl_health:
1482 	rvu_health_reporters_destroy(rvu);
1483 	devlink_free(dl);
1484 	return err;
1485 }
1486 
1487 void rvu_unregister_dl(struct rvu *rvu)
1488 {
1489 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
1490 	struct devlink *dl = rvu_dl->dl;
1491 
1492 	devlink_unregister(dl);
1493 
1494 	devlink_params_unregister(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
1495 
1496 	/* Unregister exact match devlink only for CN10K-B */
1497 	if (rvu_npc_exact_has_match_table(rvu))
1498 		devlink_params_unregister(dl, rvu_af_dl_param_exact_match,
1499 					  ARRAY_SIZE(rvu_af_dl_param_exact_match));
1500 
1501 	rvu_health_reporters_destroy(rvu);
1502 	devlink_free(dl);
1503 }
1504