xref: /linux/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c (revision 621cde16e49b3ecf7d59a8106a20aaebfb4a59a9)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function Devlink
3  *
4  * Copyright (C) 2020 Marvell.
5  *
6  */
7 
8 #include <linux/bitfield.h>
9 
10 #include "rvu.h"
11 #include "rvu_reg.h"
12 #include "rvu_struct.h"
13 #include "rvu_npc_hash.h"
14 
15 #define DRV_NAME "octeontx2-af"
16 
rvu_report_pair_start(struct devlink_fmsg * fmsg,const char * name)17 static void rvu_report_pair_start(struct devlink_fmsg *fmsg, const char *name)
18 {
19 	devlink_fmsg_pair_nest_start(fmsg, name);
20 	devlink_fmsg_obj_nest_start(fmsg);
21 }
22 
rvu_report_pair_end(struct devlink_fmsg * fmsg)23 static void rvu_report_pair_end(struct devlink_fmsg *fmsg)
24 {
25 	devlink_fmsg_obj_nest_end(fmsg);
26 	devlink_fmsg_pair_nest_end(fmsg);
27 }
28 
rvu_common_request_irq(struct rvu * rvu,int offset,const char * name,irq_handler_t fn)29 static bool rvu_common_request_irq(struct rvu *rvu, int offset,
30 				   const char *name, irq_handler_t fn)
31 {
32 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
33 	int rc;
34 
35 	sprintf(&rvu->irq_name[offset * NAME_SIZE], "%s", name);
36 	rc = request_irq(pci_irq_vector(rvu->pdev, offset), fn, 0,
37 			 &rvu->irq_name[offset * NAME_SIZE], rvu_dl);
38 	if (rc)
39 		dev_warn(rvu->dev, "Failed to register %s irq\n", name);
40 	else
41 		rvu->irq_allocated[offset] = true;
42 
43 	return rvu->irq_allocated[offset];
44 }
45 
rvu_nix_intr_work(struct work_struct * work)46 static void rvu_nix_intr_work(struct work_struct *work)
47 {
48 	struct rvu_nix_health_reporters *rvu_nix_health_reporter;
49 
50 	rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, intr_work);
51 	devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_intr_reporter,
52 			      "NIX_AF_RVU Error",
53 			      rvu_nix_health_reporter->nix_event_ctx);
54 }
55 
rvu_nix_af_rvu_intr_handler(int irq,void * rvu_irq)56 static irqreturn_t rvu_nix_af_rvu_intr_handler(int irq, void *rvu_irq)
57 {
58 	struct rvu_nix_event_ctx *nix_event_context;
59 	struct rvu_devlink *rvu_dl = rvu_irq;
60 	struct rvu *rvu;
61 	int blkaddr;
62 	u64 intr;
63 
64 	rvu = rvu_dl->rvu;
65 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
66 	if (blkaddr < 0)
67 		return IRQ_NONE;
68 
69 	nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
70 	intr = rvu_read64(rvu, blkaddr, NIX_AF_RVU_INT);
71 	nix_event_context->nix_af_rvu_int = intr;
72 
73 	/* Clear interrupts */
74 	rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT, intr);
75 	rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1C, ~0ULL);
76 	queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->intr_work);
77 
78 	return IRQ_HANDLED;
79 }
80 
rvu_nix_gen_work(struct work_struct * work)81 static void rvu_nix_gen_work(struct work_struct *work)
82 {
83 	struct rvu_nix_health_reporters *rvu_nix_health_reporter;
84 
85 	rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, gen_work);
86 	devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_gen_reporter,
87 			      "NIX_AF_GEN Error",
88 			      rvu_nix_health_reporter->nix_event_ctx);
89 }
90 
rvu_nix_af_rvu_gen_handler(int irq,void * rvu_irq)91 static irqreturn_t rvu_nix_af_rvu_gen_handler(int irq, void *rvu_irq)
92 {
93 	struct rvu_nix_event_ctx *nix_event_context;
94 	struct rvu_devlink *rvu_dl = rvu_irq;
95 	struct rvu *rvu;
96 	int blkaddr;
97 	u64 intr;
98 
99 	rvu = rvu_dl->rvu;
100 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
101 	if (blkaddr < 0)
102 		return IRQ_NONE;
103 
104 	nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
105 	intr = rvu_read64(rvu, blkaddr, NIX_AF_GEN_INT);
106 	nix_event_context->nix_af_rvu_gen = intr;
107 
108 	/* Clear interrupts */
109 	rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT, intr);
110 	rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1C, ~0ULL);
111 	queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->gen_work);
112 
113 	return IRQ_HANDLED;
114 }
115 
rvu_nix_err_work(struct work_struct * work)116 static void rvu_nix_err_work(struct work_struct *work)
117 {
118 	struct rvu_nix_health_reporters *rvu_nix_health_reporter;
119 
120 	rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, err_work);
121 	devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_err_reporter,
122 			      "NIX_AF_ERR Error",
123 			      rvu_nix_health_reporter->nix_event_ctx);
124 }
125 
rvu_nix_af_rvu_err_handler(int irq,void * rvu_irq)126 static irqreturn_t rvu_nix_af_rvu_err_handler(int irq, void *rvu_irq)
127 {
128 	struct rvu_nix_event_ctx *nix_event_context;
129 	struct rvu_devlink *rvu_dl = rvu_irq;
130 	struct rvu *rvu;
131 	int blkaddr;
132 	u64 intr;
133 
134 	rvu = rvu_dl->rvu;
135 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
136 	if (blkaddr < 0)
137 		return IRQ_NONE;
138 
139 	nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
140 	intr = rvu_read64(rvu, blkaddr, NIX_AF_ERR_INT);
141 	nix_event_context->nix_af_rvu_err = intr;
142 
143 	/* Clear interrupts */
144 	rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT, intr);
145 	rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1C, ~0ULL);
146 	queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->err_work);
147 
148 	return IRQ_HANDLED;
149 }
150 
rvu_nix_ras_work(struct work_struct * work)151 static void rvu_nix_ras_work(struct work_struct *work)
152 {
153 	struct rvu_nix_health_reporters *rvu_nix_health_reporter;
154 
155 	rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, ras_work);
156 	devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_ras_reporter,
157 			      "NIX_AF_RAS Error",
158 			      rvu_nix_health_reporter->nix_event_ctx);
159 }
160 
rvu_nix_af_rvu_ras_handler(int irq,void * rvu_irq)161 static irqreturn_t rvu_nix_af_rvu_ras_handler(int irq, void *rvu_irq)
162 {
163 	struct rvu_nix_event_ctx *nix_event_context;
164 	struct rvu_devlink *rvu_dl = rvu_irq;
165 	struct rvu *rvu;
166 	int blkaddr;
167 	u64 intr;
168 
169 	rvu = rvu_dl->rvu;
170 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
171 	if (blkaddr < 0)
172 		return IRQ_NONE;
173 
174 	nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
175 	intr = rvu_read64(rvu, blkaddr, NIX_AF_ERR_INT);
176 	nix_event_context->nix_af_rvu_ras = intr;
177 
178 	/* Clear interrupts */
179 	rvu_write64(rvu, blkaddr, NIX_AF_RAS, intr);
180 	rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1C, ~0ULL);
181 	queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->ras_work);
182 
183 	return IRQ_HANDLED;
184 }
185 
rvu_nix_unregister_interrupts(struct rvu * rvu)186 static void rvu_nix_unregister_interrupts(struct rvu *rvu)
187 {
188 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
189 	int offs, i, blkaddr;
190 
191 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
192 	if (blkaddr < 0)
193 		return;
194 
195 	offs = rvu_read64(rvu, blkaddr, NIX_PRIV_AF_INT_CFG) & 0x3ff;
196 	if (!offs)
197 		return;
198 
199 	rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1C, ~0ULL);
200 	rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1C, ~0ULL);
201 	rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1C, ~0ULL);
202 	rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1C, ~0ULL);
203 
204 	if (rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU]) {
205 		free_irq(pci_irq_vector(rvu->pdev, offs + NIX_AF_INT_VEC_RVU),
206 			 rvu_dl);
207 		rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU] = false;
208 	}
209 
210 	for (i = NIX_AF_INT_VEC_AF_ERR; i < NIX_AF_INT_VEC_CNT; i++)
211 		if (rvu->irq_allocated[offs + i]) {
212 			free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu_dl);
213 			rvu->irq_allocated[offs + i] = false;
214 		}
215 }
216 
rvu_nix_register_interrupts(struct rvu * rvu)217 static int rvu_nix_register_interrupts(struct rvu *rvu)
218 {
219 	int blkaddr, base;
220 	bool rc;
221 
222 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
223 	if (blkaddr < 0)
224 		return blkaddr;
225 
226 	/* Get NIX AF MSIX vectors offset. */
227 	base = rvu_read64(rvu, blkaddr, NIX_PRIV_AF_INT_CFG) & 0x3ff;
228 	if (!base) {
229 		dev_warn(rvu->dev,
230 			 "Failed to get NIX%d NIX_AF_INT vector offsets\n",
231 			 blkaddr - BLKADDR_NIX0);
232 		return 0;
233 	}
234 	/* Register and enable NIX_AF_RVU_INT interrupt */
235 	rc = rvu_common_request_irq(rvu, base +  NIX_AF_INT_VEC_RVU,
236 				    "NIX_AF_RVU_INT",
237 				    rvu_nix_af_rvu_intr_handler);
238 	if (!rc)
239 		goto err;
240 	rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1S, ~0ULL);
241 
242 	/* Register and enable NIX_AF_GEN_INT interrupt */
243 	rc = rvu_common_request_irq(rvu, base +  NIX_AF_INT_VEC_GEN,
244 				    "NIX_AF_GEN_INT",
245 				    rvu_nix_af_rvu_gen_handler);
246 	if (!rc)
247 		goto err;
248 	rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1S, ~0ULL);
249 
250 	/* Register and enable NIX_AF_ERR_INT interrupt */
251 	rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_AF_ERR,
252 				    "NIX_AF_ERR_INT",
253 				    rvu_nix_af_rvu_err_handler);
254 	if (!rc)
255 		goto err;
256 	rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1S, ~0ULL);
257 
258 	/* Register and enable NIX_AF_RAS interrupt */
259 	rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_POISON,
260 				    "NIX_AF_RAS",
261 				    rvu_nix_af_rvu_ras_handler);
262 	if (!rc)
263 		goto err;
264 	rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1S, ~0ULL);
265 
266 	return 0;
267 err:
268 	rvu_nix_unregister_interrupts(rvu);
269 	return rc;
270 }
271 
rvu_nix_report_show(struct devlink_fmsg * fmsg,void * ctx,enum nix_af_rvu_health health_reporter)272 static int rvu_nix_report_show(struct devlink_fmsg *fmsg, void *ctx,
273 			       enum nix_af_rvu_health health_reporter)
274 {
275 	struct rvu_nix_event_ctx *nix_event_context;
276 	u64 intr_val;
277 
278 	nix_event_context = ctx;
279 	switch (health_reporter) {
280 	case NIX_AF_RVU_INTR:
281 		intr_val = nix_event_context->nix_af_rvu_int;
282 		rvu_report_pair_start(fmsg, "NIX_AF_RVU");
283 		devlink_fmsg_u64_pair_put(fmsg, "\tNIX RVU Interrupt Reg ",
284 					  nix_event_context->nix_af_rvu_int);
285 		if (intr_val & BIT_ULL(0))
286 			devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error");
287 		rvu_report_pair_end(fmsg);
288 		break;
289 	case NIX_AF_RVU_GEN:
290 		intr_val = nix_event_context->nix_af_rvu_gen;
291 		rvu_report_pair_start(fmsg, "NIX_AF_GENERAL");
292 		devlink_fmsg_u64_pair_put(fmsg, "\tNIX General Interrupt Reg ",
293 					  nix_event_context->nix_af_rvu_gen);
294 		if (intr_val & BIT_ULL(0))
295 			devlink_fmsg_string_put(fmsg, "\n\tRx multicast pkt drop");
296 		if (intr_val & BIT_ULL(1))
297 			devlink_fmsg_string_put(fmsg, "\n\tRx mirror pkt drop");
298 		if (intr_val & BIT_ULL(4))
299 			devlink_fmsg_string_put(fmsg, "\n\tSMQ flush done");
300 		rvu_report_pair_end(fmsg);
301 		break;
302 	case NIX_AF_RVU_ERR:
303 		intr_val = nix_event_context->nix_af_rvu_err;
304 		rvu_report_pair_start(fmsg, "NIX_AF_ERR");
305 		devlink_fmsg_u64_pair_put(fmsg, "\tNIX Error Interrupt Reg ",
306 					  nix_event_context->nix_af_rvu_err);
307 		if (intr_val & BIT_ULL(14))
308 			devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_INST_S read");
309 		if (intr_val & BIT_ULL(13))
310 			devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_RES_S write");
311 		if (intr_val & BIT_ULL(12))
312 			devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error");
313 		if (intr_val & BIT_ULL(6))
314 			devlink_fmsg_string_put(fmsg, "\n\tRx on unmapped PF_FUNC");
315 		if (intr_val & BIT_ULL(5))
316 			devlink_fmsg_string_put(fmsg, "\n\tRx multicast replication error");
317 		if (intr_val & BIT_ULL(4))
318 			devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_RX_MCE_S read");
319 		if (intr_val & BIT_ULL(3))
320 			devlink_fmsg_string_put(fmsg, "\n\tFault on multicast WQE read");
321 		if (intr_val & BIT_ULL(2))
322 			devlink_fmsg_string_put(fmsg, "\n\tFault on mirror WQE read");
323 		if (intr_val & BIT_ULL(1))
324 			devlink_fmsg_string_put(fmsg, "\n\tFault on mirror pkt write");
325 		if (intr_val & BIT_ULL(0))
326 			devlink_fmsg_string_put(fmsg, "\n\tFault on multicast pkt write");
327 		rvu_report_pair_end(fmsg);
328 		break;
329 	case NIX_AF_RVU_RAS:
330 		intr_val = nix_event_context->nix_af_rvu_err;
331 		rvu_report_pair_start(fmsg, "NIX_AF_RAS");
332 		devlink_fmsg_u64_pair_put(fmsg, "\tNIX RAS Interrupt Reg ",
333 					  nix_event_context->nix_af_rvu_err);
334 		devlink_fmsg_string_put(fmsg, "\n\tPoison Data on:");
335 		if (intr_val & BIT_ULL(34))
336 			devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_INST_S");
337 		if (intr_val & BIT_ULL(33))
338 			devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_RES_S");
339 		if (intr_val & BIT_ULL(32))
340 			devlink_fmsg_string_put(fmsg, "\n\tHW ctx");
341 		if (intr_val & BIT_ULL(4))
342 			devlink_fmsg_string_put(fmsg, "\n\tPacket from mirror buffer");
343 		if (intr_val & BIT_ULL(3))
344 			devlink_fmsg_string_put(fmsg, "\n\tPacket from multicast buffer");
345 		if (intr_val & BIT_ULL(2))
346 			devlink_fmsg_string_put(fmsg, "\n\tWQE read from mirror buffer");
347 		if (intr_val & BIT_ULL(1))
348 			devlink_fmsg_string_put(fmsg, "\n\tWQE read from multicast buffer");
349 		if (intr_val & BIT_ULL(0))
350 			devlink_fmsg_string_put(fmsg, "\n\tNIX_RX_MCE_S read");
351 		rvu_report_pair_end(fmsg);
352 		break;
353 	default:
354 		return -EINVAL;
355 	}
356 
357 	return 0;
358 }
359 
rvu_hw_nix_intr_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)360 static int rvu_hw_nix_intr_dump(struct devlink_health_reporter *reporter,
361 				struct devlink_fmsg *fmsg, void *ctx,
362 				struct netlink_ext_ack *netlink_extack)
363 {
364 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
365 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
366 	struct rvu_nix_event_ctx *nix_ctx;
367 
368 	nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
369 
370 	return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_INTR) :
371 		     rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_INTR);
372 }
373 
rvu_hw_nix_intr_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)374 static int rvu_hw_nix_intr_recover(struct devlink_health_reporter *reporter,
375 				   void *ctx, struct netlink_ext_ack *netlink_extack)
376 {
377 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
378 	struct rvu_nix_event_ctx *nix_event_ctx = ctx;
379 	int blkaddr;
380 
381 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
382 	if (blkaddr < 0)
383 		return blkaddr;
384 
385 	if (nix_event_ctx->nix_af_rvu_int)
386 		rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1S, ~0ULL);
387 
388 	return 0;
389 }
390 
rvu_hw_nix_gen_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)391 static int rvu_hw_nix_gen_dump(struct devlink_health_reporter *reporter,
392 			       struct devlink_fmsg *fmsg, void *ctx,
393 			       struct netlink_ext_ack *netlink_extack)
394 {
395 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
396 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
397 	struct rvu_nix_event_ctx *nix_ctx;
398 
399 	nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
400 
401 	return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_GEN) :
402 		     rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_GEN);
403 }
404 
rvu_hw_nix_gen_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)405 static int rvu_hw_nix_gen_recover(struct devlink_health_reporter *reporter,
406 				  void *ctx, struct netlink_ext_ack *netlink_extack)
407 {
408 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
409 	struct rvu_nix_event_ctx *nix_event_ctx = ctx;
410 	int blkaddr;
411 
412 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
413 	if (blkaddr < 0)
414 		return blkaddr;
415 
416 	if (nix_event_ctx->nix_af_rvu_gen)
417 		rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1S, ~0ULL);
418 
419 	return 0;
420 }
421 
rvu_hw_nix_err_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)422 static int rvu_hw_nix_err_dump(struct devlink_health_reporter *reporter,
423 			       struct devlink_fmsg *fmsg, void *ctx,
424 			       struct netlink_ext_ack *netlink_extack)
425 {
426 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
427 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
428 	struct rvu_nix_event_ctx *nix_ctx;
429 
430 	nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
431 
432 	return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_ERR) :
433 		     rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_ERR);
434 }
435 
rvu_hw_nix_err_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)436 static int rvu_hw_nix_err_recover(struct devlink_health_reporter *reporter,
437 				  void *ctx, struct netlink_ext_ack *netlink_extack)
438 {
439 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
440 	struct rvu_nix_event_ctx *nix_event_ctx = ctx;
441 	int blkaddr;
442 
443 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
444 	if (blkaddr < 0)
445 		return blkaddr;
446 
447 	if (nix_event_ctx->nix_af_rvu_err)
448 		rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1S, ~0ULL);
449 
450 	return 0;
451 }
452 
rvu_hw_nix_ras_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)453 static int rvu_hw_nix_ras_dump(struct devlink_health_reporter *reporter,
454 			       struct devlink_fmsg *fmsg, void *ctx,
455 			       struct netlink_ext_ack *netlink_extack)
456 {
457 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
458 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
459 	struct rvu_nix_event_ctx *nix_ctx;
460 
461 	nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
462 
463 	return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_RAS) :
464 		     rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_RAS);
465 }
466 
rvu_hw_nix_ras_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)467 static int rvu_hw_nix_ras_recover(struct devlink_health_reporter *reporter,
468 				  void *ctx, struct netlink_ext_ack *netlink_extack)
469 {
470 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
471 	struct rvu_nix_event_ctx *nix_event_ctx = ctx;
472 	int blkaddr;
473 
474 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
475 	if (blkaddr < 0)
476 		return blkaddr;
477 
478 	if (nix_event_ctx->nix_af_rvu_int)
479 		rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1S, ~0ULL);
480 
481 	return 0;
482 }
483 
484 RVU_REPORTERS(hw_nix_intr);
485 RVU_REPORTERS(hw_nix_gen);
486 RVU_REPORTERS(hw_nix_err);
487 RVU_REPORTERS(hw_nix_ras);
488 
489 static void rvu_nix_health_reporters_destroy(struct rvu_devlink *rvu_dl);
490 
rvu_nix_register_reporters(struct rvu_devlink * rvu_dl)491 static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl)
492 {
493 	struct rvu_nix_health_reporters *rvu_reporters;
494 	struct rvu_nix_event_ctx *nix_event_context;
495 	struct rvu *rvu = rvu_dl->rvu;
496 
497 	rvu_reporters = kzalloc(sizeof(*rvu_reporters), GFP_KERNEL);
498 	if (!rvu_reporters)
499 		return -ENOMEM;
500 
501 	rvu_dl->rvu_nix_health_reporter = rvu_reporters;
502 	nix_event_context = kzalloc(sizeof(*nix_event_context), GFP_KERNEL);
503 	if (!nix_event_context)
504 		return -ENOMEM;
505 
506 	rvu_reporters->nix_event_ctx = nix_event_context;
507 	rvu_reporters->rvu_hw_nix_intr_reporter =
508 		devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_intr_reporter_ops, 0, rvu);
509 	if (IS_ERR(rvu_reporters->rvu_hw_nix_intr_reporter)) {
510 		dev_warn(rvu->dev, "Failed to create hw_nix_intr reporter, err=%ld\n",
511 			 PTR_ERR(rvu_reporters->rvu_hw_nix_intr_reporter));
512 		return PTR_ERR(rvu_reporters->rvu_hw_nix_intr_reporter);
513 	}
514 
515 	rvu_reporters->rvu_hw_nix_gen_reporter =
516 		devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_gen_reporter_ops, 0, rvu);
517 	if (IS_ERR(rvu_reporters->rvu_hw_nix_gen_reporter)) {
518 		dev_warn(rvu->dev, "Failed to create hw_nix_gen reporter, err=%ld\n",
519 			 PTR_ERR(rvu_reporters->rvu_hw_nix_gen_reporter));
520 		return PTR_ERR(rvu_reporters->rvu_hw_nix_gen_reporter);
521 	}
522 
523 	rvu_reporters->rvu_hw_nix_err_reporter =
524 		devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_err_reporter_ops, 0, rvu);
525 	if (IS_ERR(rvu_reporters->rvu_hw_nix_err_reporter)) {
526 		dev_warn(rvu->dev, "Failed to create hw_nix_err reporter, err=%ld\n",
527 			 PTR_ERR(rvu_reporters->rvu_hw_nix_err_reporter));
528 		return PTR_ERR(rvu_reporters->rvu_hw_nix_err_reporter);
529 	}
530 
531 	rvu_reporters->rvu_hw_nix_ras_reporter =
532 		devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_ras_reporter_ops, 0, rvu);
533 	if (IS_ERR(rvu_reporters->rvu_hw_nix_ras_reporter)) {
534 		dev_warn(rvu->dev, "Failed to create hw_nix_ras reporter, err=%ld\n",
535 			 PTR_ERR(rvu_reporters->rvu_hw_nix_ras_reporter));
536 		return PTR_ERR(rvu_reporters->rvu_hw_nix_ras_reporter);
537 	}
538 
539 	rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq");
540 	if (!rvu_dl->devlink_wq)
541 		return -ENOMEM;
542 
543 	INIT_WORK(&rvu_reporters->intr_work, rvu_nix_intr_work);
544 	INIT_WORK(&rvu_reporters->gen_work, rvu_nix_gen_work);
545 	INIT_WORK(&rvu_reporters->err_work, rvu_nix_err_work);
546 	INIT_WORK(&rvu_reporters->ras_work, rvu_nix_ras_work);
547 
548 	return 0;
549 }
550 
rvu_nix_health_reporters_create(struct rvu_devlink * rvu_dl)551 static int rvu_nix_health_reporters_create(struct rvu_devlink *rvu_dl)
552 {
553 	struct rvu *rvu = rvu_dl->rvu;
554 	int err;
555 
556 	err = rvu_nix_register_reporters(rvu_dl);
557 	if (err) {
558 		dev_warn(rvu->dev, "Failed to create nix reporter, err =%d\n",
559 			 err);
560 		return err;
561 	}
562 	rvu_nix_register_interrupts(rvu);
563 
564 	return 0;
565 }
566 
rvu_nix_health_reporters_destroy(struct rvu_devlink * rvu_dl)567 static void rvu_nix_health_reporters_destroy(struct rvu_devlink *rvu_dl)
568 {
569 	struct rvu_nix_health_reporters *nix_reporters;
570 	struct rvu *rvu = rvu_dl->rvu;
571 
572 	nix_reporters = rvu_dl->rvu_nix_health_reporter;
573 
574 	if (!nix_reporters->rvu_hw_nix_ras_reporter)
575 		return;
576 	if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_intr_reporter))
577 		devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_intr_reporter);
578 
579 	if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_gen_reporter))
580 		devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_gen_reporter);
581 
582 	if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_err_reporter))
583 		devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_err_reporter);
584 
585 	if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_ras_reporter))
586 		devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_ras_reporter);
587 
588 	rvu_nix_unregister_interrupts(rvu);
589 	kfree(rvu_dl->rvu_nix_health_reporter->nix_event_ctx);
590 	kfree(rvu_dl->rvu_nix_health_reporter);
591 }
592 
rvu_npa_intr_work(struct work_struct * work)593 static void rvu_npa_intr_work(struct work_struct *work)
594 {
595 	struct rvu_npa_health_reporters *rvu_npa_health_reporter;
596 
597 	rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, intr_work);
598 	devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_intr_reporter,
599 			      "NPA_AF_RVU Error",
600 			      rvu_npa_health_reporter->npa_event_ctx);
601 }
602 
rvu_npa_af_rvu_intr_handler(int irq,void * rvu_irq)603 static irqreturn_t rvu_npa_af_rvu_intr_handler(int irq, void *rvu_irq)
604 {
605 	struct rvu_npa_event_ctx *npa_event_context;
606 	struct rvu_devlink *rvu_dl = rvu_irq;
607 	struct rvu *rvu;
608 	int blkaddr;
609 	u64 intr;
610 
611 	rvu = rvu_dl->rvu;
612 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
613 	if (blkaddr < 0)
614 		return IRQ_NONE;
615 
616 	npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
617 	intr = rvu_read64(rvu, blkaddr, NPA_AF_RVU_INT);
618 	npa_event_context->npa_af_rvu_int = intr;
619 
620 	/* Clear interrupts */
621 	rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT, intr);
622 	rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1C, ~0ULL);
623 	queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->intr_work);
624 
625 	return IRQ_HANDLED;
626 }
627 
rvu_npa_gen_work(struct work_struct * work)628 static void rvu_npa_gen_work(struct work_struct *work)
629 {
630 	struct rvu_npa_health_reporters *rvu_npa_health_reporter;
631 
632 	rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, gen_work);
633 	devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_gen_reporter,
634 			      "NPA_AF_GEN Error",
635 			      rvu_npa_health_reporter->npa_event_ctx);
636 }
637 
rvu_npa_af_gen_intr_handler(int irq,void * rvu_irq)638 static irqreturn_t rvu_npa_af_gen_intr_handler(int irq, void *rvu_irq)
639 {
640 	struct rvu_npa_event_ctx *npa_event_context;
641 	struct rvu_devlink *rvu_dl = rvu_irq;
642 	struct rvu *rvu;
643 	int blkaddr;
644 	u64 intr;
645 
646 	rvu = rvu_dl->rvu;
647 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
648 	if (blkaddr < 0)
649 		return IRQ_NONE;
650 
651 	npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
652 	intr = rvu_read64(rvu, blkaddr, NPA_AF_GEN_INT);
653 	npa_event_context->npa_af_rvu_gen = intr;
654 
655 	/* Clear interrupts */
656 	rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT, intr);
657 	rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1C, ~0ULL);
658 	queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->gen_work);
659 
660 	return IRQ_HANDLED;
661 }
662 
rvu_npa_err_work(struct work_struct * work)663 static void rvu_npa_err_work(struct work_struct *work)
664 {
665 	struct rvu_npa_health_reporters *rvu_npa_health_reporter;
666 
667 	rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, err_work);
668 	devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_err_reporter,
669 			      "NPA_AF_ERR Error",
670 			      rvu_npa_health_reporter->npa_event_ctx);
671 }
672 
rvu_npa_af_err_intr_handler(int irq,void * rvu_irq)673 static irqreturn_t rvu_npa_af_err_intr_handler(int irq, void *rvu_irq)
674 {
675 	struct rvu_npa_event_ctx *npa_event_context;
676 	struct rvu_devlink *rvu_dl = rvu_irq;
677 	struct rvu *rvu;
678 	int blkaddr;
679 	u64 intr;
680 
681 	rvu = rvu_dl->rvu;
682 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
683 	if (blkaddr < 0)
684 		return IRQ_NONE;
685 	npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
686 	intr = rvu_read64(rvu, blkaddr, NPA_AF_ERR_INT);
687 	npa_event_context->npa_af_rvu_err = intr;
688 
689 	/* Clear interrupts */
690 	rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT, intr);
691 	rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1C, ~0ULL);
692 	queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->err_work);
693 
694 	return IRQ_HANDLED;
695 }
696 
rvu_npa_ras_work(struct work_struct * work)697 static void rvu_npa_ras_work(struct work_struct *work)
698 {
699 	struct rvu_npa_health_reporters *rvu_npa_health_reporter;
700 
701 	rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, ras_work);
702 	devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_ras_reporter,
703 			      "HW NPA_AF_RAS Error reported",
704 			      rvu_npa_health_reporter->npa_event_ctx);
705 }
706 
rvu_npa_af_ras_intr_handler(int irq,void * rvu_irq)707 static irqreturn_t rvu_npa_af_ras_intr_handler(int irq, void *rvu_irq)
708 {
709 	struct rvu_npa_event_ctx *npa_event_context;
710 	struct rvu_devlink *rvu_dl = rvu_irq;
711 	struct rvu *rvu;
712 	int blkaddr;
713 	u64 intr;
714 
715 	rvu = rvu_dl->rvu;
716 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
717 	if (blkaddr < 0)
718 		return IRQ_NONE;
719 
720 	npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
721 	intr = rvu_read64(rvu, blkaddr, NPA_AF_RAS);
722 	npa_event_context->npa_af_rvu_ras = intr;
723 
724 	/* Clear interrupts */
725 	rvu_write64(rvu, blkaddr, NPA_AF_RAS, intr);
726 	rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1C, ~0ULL);
727 	queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->ras_work);
728 
729 	return IRQ_HANDLED;
730 }
731 
rvu_npa_unregister_interrupts(struct rvu * rvu)732 static void rvu_npa_unregister_interrupts(struct rvu *rvu)
733 {
734 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
735 	int i, offs, blkaddr;
736 	u64 reg;
737 
738 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
739 	if (blkaddr < 0)
740 		return;
741 
742 	reg = rvu_read64(rvu, blkaddr, NPA_PRIV_AF_INT_CFG);
743 	offs = reg & 0x3FF;
744 
745 	rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1C, ~0ULL);
746 	rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1C, ~0ULL);
747 	rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1C, ~0ULL);
748 	rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1C, ~0ULL);
749 
750 	for (i = 0; i < NPA_AF_INT_VEC_CNT; i++)
751 		if (rvu->irq_allocated[offs + i]) {
752 			free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu_dl);
753 			rvu->irq_allocated[offs + i] = false;
754 		}
755 }
756 
rvu_npa_register_interrupts(struct rvu * rvu)757 static int rvu_npa_register_interrupts(struct rvu *rvu)
758 {
759 	int blkaddr, base;
760 	bool rc;
761 
762 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
763 	if (blkaddr < 0)
764 		return blkaddr;
765 
766 	/* Get NPA AF MSIX vectors offset. */
767 	base = rvu_read64(rvu, blkaddr, NPA_PRIV_AF_INT_CFG) & 0x3ff;
768 	if (!base) {
769 		dev_warn(rvu->dev,
770 			 "Failed to get NPA_AF_INT vector offsets\n");
771 		return 0;
772 	}
773 
774 	/* Register and enable NPA_AF_RVU_INT interrupt */
775 	rc = rvu_common_request_irq(rvu, base +  NPA_AF_INT_VEC_RVU,
776 				    "NPA_AF_RVU_INT",
777 				    rvu_npa_af_rvu_intr_handler);
778 	if (!rc)
779 		goto err;
780 	rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1S, ~0ULL);
781 
782 	/* Register and enable NPA_AF_GEN_INT interrupt */
783 	rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_GEN,
784 				    "NPA_AF_RVU_GEN",
785 				    rvu_npa_af_gen_intr_handler);
786 	if (!rc)
787 		goto err;
788 	rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1S, ~0ULL);
789 
790 	/* Register and enable NPA_AF_ERR_INT interrupt */
791 	rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_AF_ERR,
792 				    "NPA_AF_ERR_INT",
793 				    rvu_npa_af_err_intr_handler);
794 	if (!rc)
795 		goto err;
796 	rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1S, ~0ULL);
797 
798 	/* Register and enable NPA_AF_RAS interrupt */
799 	rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_POISON,
800 				    "NPA_AF_RAS",
801 				    rvu_npa_af_ras_intr_handler);
802 	if (!rc)
803 		goto err;
804 	rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1S, ~0ULL);
805 
806 	return 0;
807 err:
808 	rvu_npa_unregister_interrupts(rvu);
809 	return rc;
810 }
811 
rvu_npa_report_show(struct devlink_fmsg * fmsg,void * ctx,enum npa_af_rvu_health health_reporter)812 static int rvu_npa_report_show(struct devlink_fmsg *fmsg, void *ctx,
813 			       enum npa_af_rvu_health health_reporter)
814 {
815 	struct rvu_npa_event_ctx *npa_event_context;
816 	unsigned int alloc_dis, free_dis;
817 	u64 intr_val;
818 
819 	npa_event_context = ctx;
820 	switch (health_reporter) {
821 	case NPA_AF_RVU_GEN:
822 		intr_val = npa_event_context->npa_af_rvu_gen;
823 		rvu_report_pair_start(fmsg, "NPA_AF_GENERAL");
824 		devlink_fmsg_u64_pair_put(fmsg, "\tNPA General Interrupt Reg ",
825 					  npa_event_context->npa_af_rvu_gen);
826 		if (intr_val & BIT_ULL(32))
827 			devlink_fmsg_string_put(fmsg, "\n\tUnmap PF Error");
828 
829 		free_dis = FIELD_GET(GENMASK(15, 0), intr_val);
830 		if (free_dis & BIT(NPA_INPQ_NIX0_RX))
831 			devlink_fmsg_string_put(fmsg, "\n\tNIX0: free disabled RX");
832 		if (free_dis & BIT(NPA_INPQ_NIX0_TX))
833 			devlink_fmsg_string_put(fmsg, "\n\tNIX0:free disabled TX");
834 		if (free_dis & BIT(NPA_INPQ_NIX1_RX))
835 			devlink_fmsg_string_put(fmsg, "\n\tNIX1: free disabled RX");
836 		if (free_dis & BIT(NPA_INPQ_NIX1_TX))
837 			devlink_fmsg_string_put(fmsg, "\n\tNIX1:free disabled TX");
838 		if (free_dis & BIT(NPA_INPQ_SSO))
839 			devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for SSO");
840 		if (free_dis & BIT(NPA_INPQ_TIM))
841 			devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for TIM");
842 		if (free_dis & BIT(NPA_INPQ_DPI))
843 			devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for DPI");
844 		if (free_dis & BIT(NPA_INPQ_AURA_OP))
845 			devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for AURA");
846 
847 		alloc_dis = FIELD_GET(GENMASK(31, 16), intr_val);
848 		if (alloc_dis & BIT(NPA_INPQ_NIX0_RX))
849 			devlink_fmsg_string_put(fmsg, "\n\tNIX0: alloc disabled RX");
850 		if (alloc_dis & BIT(NPA_INPQ_NIX0_TX))
851 			devlink_fmsg_string_put(fmsg, "\n\tNIX0:alloc disabled TX");
852 		if (alloc_dis & BIT(NPA_INPQ_NIX1_RX))
853 			devlink_fmsg_string_put(fmsg, "\n\tNIX1: alloc disabled RX");
854 		if (alloc_dis & BIT(NPA_INPQ_NIX1_TX))
855 			devlink_fmsg_string_put(fmsg, "\n\tNIX1:alloc disabled TX");
856 		if (alloc_dis & BIT(NPA_INPQ_SSO))
857 			devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for SSO");
858 		if (alloc_dis & BIT(NPA_INPQ_TIM))
859 			devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for TIM");
860 		if (alloc_dis & BIT(NPA_INPQ_DPI))
861 			devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for DPI");
862 		if (alloc_dis & BIT(NPA_INPQ_AURA_OP))
863 			devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for AURA");
864 
865 		rvu_report_pair_end(fmsg);
866 		break;
867 	case NPA_AF_RVU_ERR:
868 		rvu_report_pair_start(fmsg, "NPA_AF_ERR");
869 		devlink_fmsg_u64_pair_put(fmsg, "\tNPA Error Interrupt Reg ",
870 					  npa_event_context->npa_af_rvu_err);
871 		if (npa_event_context->npa_af_rvu_err & BIT_ULL(14))
872 			devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_INST_S read");
873 		if (npa_event_context->npa_af_rvu_err & BIT_ULL(13))
874 			devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_RES_S write");
875 		if (npa_event_context->npa_af_rvu_err & BIT_ULL(12))
876 			devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error");
877 		rvu_report_pair_end(fmsg);
878 		break;
879 	case NPA_AF_RVU_RAS:
880 		rvu_report_pair_start(fmsg, "NPA_AF_RVU_RAS");
881 		devlink_fmsg_u64_pair_put(fmsg, "\tNPA RAS Interrupt Reg ",
882 					  npa_event_context->npa_af_rvu_ras);
883 		if (npa_event_context->npa_af_rvu_ras & BIT_ULL(34))
884 			devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_INST_S");
885 		if (npa_event_context->npa_af_rvu_ras & BIT_ULL(33))
886 			devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_RES_S");
887 		if (npa_event_context->npa_af_rvu_ras & BIT_ULL(32))
888 			devlink_fmsg_string_put(fmsg, "\n\tPoison data on HW context");
889 		rvu_report_pair_end(fmsg);
890 		break;
891 	case NPA_AF_RVU_INTR:
892 		rvu_report_pair_start(fmsg, "NPA_AF_RVU");
893 		devlink_fmsg_u64_pair_put(fmsg, "\tNPA RVU Interrupt Reg ",
894 					  npa_event_context->npa_af_rvu_int);
895 		if (npa_event_context->npa_af_rvu_int & BIT_ULL(0))
896 			devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error");
897 		rvu_report_pair_end(fmsg);
898 		break;
899 	default:
900 		return -EINVAL;
901 	}
902 
903 	return 0;
904 }
905 
rvu_hw_npa_intr_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)906 static int rvu_hw_npa_intr_dump(struct devlink_health_reporter *reporter,
907 				struct devlink_fmsg *fmsg, void *ctx,
908 				struct netlink_ext_ack *netlink_extack)
909 {
910 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
911 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
912 	struct rvu_npa_event_ctx *npa_ctx;
913 
914 	npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
915 
916 	return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_INTR) :
917 		     rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_INTR);
918 }
919 
rvu_hw_npa_intr_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)920 static int rvu_hw_npa_intr_recover(struct devlink_health_reporter *reporter,
921 				   void *ctx, struct netlink_ext_ack *netlink_extack)
922 {
923 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
924 	struct rvu_npa_event_ctx *npa_event_ctx = ctx;
925 	int blkaddr;
926 
927 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
928 	if (blkaddr < 0)
929 		return blkaddr;
930 
931 	if (npa_event_ctx->npa_af_rvu_int)
932 		rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1S, ~0ULL);
933 
934 	return 0;
935 }
936 
rvu_hw_npa_gen_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)937 static int rvu_hw_npa_gen_dump(struct devlink_health_reporter *reporter,
938 			       struct devlink_fmsg *fmsg, void *ctx,
939 			       struct netlink_ext_ack *netlink_extack)
940 {
941 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
942 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
943 	struct rvu_npa_event_ctx *npa_ctx;
944 
945 	npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
946 
947 	return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_GEN) :
948 		     rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_GEN);
949 }
950 
rvu_hw_npa_gen_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)951 static int rvu_hw_npa_gen_recover(struct devlink_health_reporter *reporter,
952 				  void *ctx, struct netlink_ext_ack *netlink_extack)
953 {
954 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
955 	struct rvu_npa_event_ctx *npa_event_ctx = ctx;
956 	int blkaddr;
957 
958 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
959 	if (blkaddr < 0)
960 		return blkaddr;
961 
962 	if (npa_event_ctx->npa_af_rvu_gen)
963 		rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1S, ~0ULL);
964 
965 	return 0;
966 }
967 
rvu_hw_npa_err_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)968 static int rvu_hw_npa_err_dump(struct devlink_health_reporter *reporter,
969 			       struct devlink_fmsg *fmsg, void *ctx,
970 			       struct netlink_ext_ack *netlink_extack)
971 {
972 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
973 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
974 	struct rvu_npa_event_ctx *npa_ctx;
975 
976 	npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
977 
978 	return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_ERR) :
979 		     rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_ERR);
980 }
981 
rvu_hw_npa_err_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)982 static int rvu_hw_npa_err_recover(struct devlink_health_reporter *reporter,
983 				  void *ctx, struct netlink_ext_ack *netlink_extack)
984 {
985 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
986 	struct rvu_npa_event_ctx *npa_event_ctx = ctx;
987 	int blkaddr;
988 
989 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
990 	if (blkaddr < 0)
991 		return blkaddr;
992 
993 	if (npa_event_ctx->npa_af_rvu_err)
994 		rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1S, ~0ULL);
995 
996 	return 0;
997 }
998 
rvu_hw_npa_ras_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)999 static int rvu_hw_npa_ras_dump(struct devlink_health_reporter *reporter,
1000 			       struct devlink_fmsg *fmsg, void *ctx,
1001 			       struct netlink_ext_ack *netlink_extack)
1002 {
1003 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
1004 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
1005 	struct rvu_npa_event_ctx *npa_ctx;
1006 
1007 	npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
1008 
1009 	return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_RAS) :
1010 		     rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_RAS);
1011 }
1012 
rvu_hw_npa_ras_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)1013 static int rvu_hw_npa_ras_recover(struct devlink_health_reporter *reporter,
1014 				  void *ctx, struct netlink_ext_ack *netlink_extack)
1015 {
1016 	struct rvu *rvu = devlink_health_reporter_priv(reporter);
1017 	struct rvu_npa_event_ctx *npa_event_ctx = ctx;
1018 	int blkaddr;
1019 
1020 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
1021 	if (blkaddr < 0)
1022 		return blkaddr;
1023 
1024 	if (npa_event_ctx->npa_af_rvu_ras)
1025 		rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1S, ~0ULL);
1026 
1027 	return 0;
1028 }
1029 
1030 RVU_REPORTERS(hw_npa_intr);
1031 RVU_REPORTERS(hw_npa_gen);
1032 RVU_REPORTERS(hw_npa_err);
1033 RVU_REPORTERS(hw_npa_ras);
1034 
1035 static void rvu_npa_health_reporters_destroy(struct rvu_devlink *rvu_dl);
1036 
rvu_npa_register_reporters(struct rvu_devlink * rvu_dl)1037 static int rvu_npa_register_reporters(struct rvu_devlink *rvu_dl)
1038 {
1039 	struct rvu_npa_health_reporters *rvu_reporters;
1040 	struct rvu_npa_event_ctx *npa_event_context;
1041 	struct rvu *rvu = rvu_dl->rvu;
1042 
1043 	rvu_reporters = kzalloc(sizeof(*rvu_reporters), GFP_KERNEL);
1044 	if (!rvu_reporters)
1045 		return -ENOMEM;
1046 
1047 	rvu_dl->rvu_npa_health_reporter = rvu_reporters;
1048 	npa_event_context = kzalloc(sizeof(*npa_event_context), GFP_KERNEL);
1049 	if (!npa_event_context)
1050 		return -ENOMEM;
1051 
1052 	rvu_reporters->npa_event_ctx = npa_event_context;
1053 	rvu_reporters->rvu_hw_npa_intr_reporter =
1054 		devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_intr_reporter_ops, 0, rvu);
1055 	if (IS_ERR(rvu_reporters->rvu_hw_npa_intr_reporter)) {
1056 		dev_warn(rvu->dev, "Failed to create hw_npa_intr reporter, err=%ld\n",
1057 			 PTR_ERR(rvu_reporters->rvu_hw_npa_intr_reporter));
1058 		return PTR_ERR(rvu_reporters->rvu_hw_npa_intr_reporter);
1059 	}
1060 
1061 	rvu_reporters->rvu_hw_npa_gen_reporter =
1062 		devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_gen_reporter_ops, 0, rvu);
1063 	if (IS_ERR(rvu_reporters->rvu_hw_npa_gen_reporter)) {
1064 		dev_warn(rvu->dev, "Failed to create hw_npa_gen reporter, err=%ld\n",
1065 			 PTR_ERR(rvu_reporters->rvu_hw_npa_gen_reporter));
1066 		return PTR_ERR(rvu_reporters->rvu_hw_npa_gen_reporter);
1067 	}
1068 
1069 	rvu_reporters->rvu_hw_npa_err_reporter =
1070 		devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_err_reporter_ops, 0, rvu);
1071 	if (IS_ERR(rvu_reporters->rvu_hw_npa_err_reporter)) {
1072 		dev_warn(rvu->dev, "Failed to create hw_npa_err reporter, err=%ld\n",
1073 			 PTR_ERR(rvu_reporters->rvu_hw_npa_err_reporter));
1074 		return PTR_ERR(rvu_reporters->rvu_hw_npa_err_reporter);
1075 	}
1076 
1077 	rvu_reporters->rvu_hw_npa_ras_reporter =
1078 		devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_ras_reporter_ops, 0, rvu);
1079 	if (IS_ERR(rvu_reporters->rvu_hw_npa_ras_reporter)) {
1080 		dev_warn(rvu->dev, "Failed to create hw_npa_ras reporter, err=%ld\n",
1081 			 PTR_ERR(rvu_reporters->rvu_hw_npa_ras_reporter));
1082 		return PTR_ERR(rvu_reporters->rvu_hw_npa_ras_reporter);
1083 	}
1084 
1085 	rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq");
1086 	if (!rvu_dl->devlink_wq)
1087 		return -ENOMEM;
1088 
1089 	INIT_WORK(&rvu_reporters->intr_work, rvu_npa_intr_work);
1090 	INIT_WORK(&rvu_reporters->err_work, rvu_npa_err_work);
1091 	INIT_WORK(&rvu_reporters->gen_work, rvu_npa_gen_work);
1092 	INIT_WORK(&rvu_reporters->ras_work, rvu_npa_ras_work);
1093 
1094 	return 0;
1095 }
1096 
rvu_npa_health_reporters_create(struct rvu_devlink * rvu_dl)1097 static int rvu_npa_health_reporters_create(struct rvu_devlink *rvu_dl)
1098 {
1099 	struct rvu *rvu = rvu_dl->rvu;
1100 	int err;
1101 
1102 	err = rvu_npa_register_reporters(rvu_dl);
1103 	if (err) {
1104 		dev_warn(rvu->dev, "Failed to create npa reporter, err =%d\n",
1105 			 err);
1106 		return err;
1107 	}
1108 	rvu_npa_register_interrupts(rvu);
1109 
1110 	return 0;
1111 }
1112 
rvu_npa_health_reporters_destroy(struct rvu_devlink * rvu_dl)1113 static void rvu_npa_health_reporters_destroy(struct rvu_devlink *rvu_dl)
1114 {
1115 	struct rvu_npa_health_reporters *npa_reporters;
1116 	struct rvu *rvu = rvu_dl->rvu;
1117 
1118 	npa_reporters = rvu_dl->rvu_npa_health_reporter;
1119 
1120 	if (!npa_reporters->rvu_hw_npa_ras_reporter)
1121 		return;
1122 	if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_intr_reporter))
1123 		devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_intr_reporter);
1124 
1125 	if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_gen_reporter))
1126 		devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_gen_reporter);
1127 
1128 	if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_err_reporter))
1129 		devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_err_reporter);
1130 
1131 	if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_ras_reporter))
1132 		devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_ras_reporter);
1133 
1134 	rvu_npa_unregister_interrupts(rvu);
1135 	kfree(rvu_dl->rvu_npa_health_reporter->npa_event_ctx);
1136 	kfree(rvu_dl->rvu_npa_health_reporter);
1137 }
1138 
rvu_health_reporters_create(struct rvu * rvu)1139 static int rvu_health_reporters_create(struct rvu *rvu)
1140 {
1141 	struct rvu_devlink *rvu_dl;
1142 	int err;
1143 
1144 	rvu_dl = rvu->rvu_dl;
1145 	err = rvu_npa_health_reporters_create(rvu_dl);
1146 	if (err)
1147 		return err;
1148 
1149 	return rvu_nix_health_reporters_create(rvu_dl);
1150 }
1151 
rvu_health_reporters_destroy(struct rvu * rvu)1152 static void rvu_health_reporters_destroy(struct rvu *rvu)
1153 {
1154 	struct rvu_devlink *rvu_dl;
1155 
1156 	if (!rvu->rvu_dl)
1157 		return;
1158 
1159 	rvu_dl = rvu->rvu_dl;
1160 	rvu_npa_health_reporters_destroy(rvu_dl);
1161 	rvu_nix_health_reporters_destroy(rvu_dl);
1162 }
1163 
1164 /* Devlink Params APIs */
rvu_af_dl_dwrr_mtu_validate(struct devlink * devlink,u32 id,union devlink_param_value val,struct netlink_ext_ack * extack)1165 static int rvu_af_dl_dwrr_mtu_validate(struct devlink *devlink, u32 id,
1166 				       union devlink_param_value val,
1167 				       struct netlink_ext_ack *extack)
1168 {
1169 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1170 	struct rvu *rvu = rvu_dl->rvu;
1171 	int dwrr_mtu = val.vu32;
1172 	struct nix_txsch *txsch;
1173 	struct nix_hw *nix_hw;
1174 
1175 	if (!rvu->hw->cap.nix_common_dwrr_mtu) {
1176 		NL_SET_ERR_MSG_MOD(extack,
1177 				   "Setting DWRR_MTU is not supported on this silicon");
1178 		return -EOPNOTSUPP;
1179 	}
1180 
1181 	if ((dwrr_mtu > 65536 || !is_power_of_2(dwrr_mtu)) &&
1182 	    (dwrr_mtu != 9728 && dwrr_mtu != 10240)) {
1183 		NL_SET_ERR_MSG_MOD(extack,
1184 				   "Invalid, supported MTUs are 0,2,4,8.16,32,64....4K,8K,32K,64K and 9728, 10240");
1185 		return -EINVAL;
1186 	}
1187 
1188 	nix_hw = get_nix_hw(rvu->hw, BLKADDR_NIX0);
1189 	if (!nix_hw)
1190 		return -ENODEV;
1191 
1192 	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1193 	if (rvu_rsrc_free_count(&txsch->schq) != txsch->schq.max) {
1194 		NL_SET_ERR_MSG_MOD(extack,
1195 				   "Changing DWRR MTU is not supported when there are active NIXLFs");
1196 		NL_SET_ERR_MSG_MOD(extack,
1197 				   "Make sure none of the PF/VF interfaces are initialized and retry");
1198 		return -EOPNOTSUPP;
1199 	}
1200 
1201 	return 0;
1202 }
1203 
rvu_af_dl_dwrr_mtu_set(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)1204 static int rvu_af_dl_dwrr_mtu_set(struct devlink *devlink, u32 id,
1205 				  struct devlink_param_gset_ctx *ctx,
1206 				  struct netlink_ext_ack *extack)
1207 {
1208 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1209 	struct rvu *rvu = rvu_dl->rvu;
1210 	u64 dwrr_mtu;
1211 
1212 	dwrr_mtu = convert_bytes_to_dwrr_mtu(ctx->val.vu32);
1213 	rvu_write64(rvu, BLKADDR_NIX0,
1214 		    nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM), dwrr_mtu);
1215 
1216 	return 0;
1217 }
1218 
rvu_af_dl_dwrr_mtu_get(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx)1219 static int rvu_af_dl_dwrr_mtu_get(struct devlink *devlink, u32 id,
1220 				  struct devlink_param_gset_ctx *ctx)
1221 {
1222 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1223 	struct rvu *rvu = rvu_dl->rvu;
1224 	u64 dwrr_mtu;
1225 
1226 	if (!rvu->hw->cap.nix_common_dwrr_mtu)
1227 		return -EOPNOTSUPP;
1228 
1229 	dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0,
1230 			      nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM));
1231 	ctx->val.vu32 = convert_dwrr_mtu_to_bytes(dwrr_mtu);
1232 
1233 	return 0;
1234 }
1235 
1236 enum rvu_af_dl_param_id {
1237 	RVU_AF_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
1238 	RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
1239 	RVU_AF_DEVLINK_PARAM_ID_NPC_MCAM_ZONE_PERCENT,
1240 	RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
1241 	RVU_AF_DEVLINK_PARAM_ID_NIX_MAXLF,
1242 };
1243 
rvu_af_npc_exact_feature_get(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx)1244 static int rvu_af_npc_exact_feature_get(struct devlink *devlink, u32 id,
1245 					struct devlink_param_gset_ctx *ctx)
1246 {
1247 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1248 	struct rvu *rvu = rvu_dl->rvu;
1249 	bool enabled;
1250 
1251 	enabled = rvu_npc_exact_has_match_table(rvu);
1252 
1253 	snprintf(ctx->val.vstr, sizeof(ctx->val.vstr), "%s",
1254 		 enabled ? "enabled" : "disabled");
1255 
1256 	return 0;
1257 }
1258 
rvu_af_npc_exact_feature_disable(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)1259 static int rvu_af_npc_exact_feature_disable(struct devlink *devlink, u32 id,
1260 					    struct devlink_param_gset_ctx *ctx,
1261 					    struct netlink_ext_ack *extack)
1262 {
1263 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1264 	struct rvu *rvu = rvu_dl->rvu;
1265 
1266 	rvu_npc_exact_disable_feature(rvu);
1267 
1268 	return 0;
1269 }
1270 
rvu_af_npc_exact_feature_validate(struct devlink * devlink,u32 id,union devlink_param_value val,struct netlink_ext_ack * extack)1271 static int rvu_af_npc_exact_feature_validate(struct devlink *devlink, u32 id,
1272 					     union devlink_param_value val,
1273 					     struct netlink_ext_ack *extack)
1274 {
1275 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1276 	struct rvu *rvu = rvu_dl->rvu;
1277 	u64 enable;
1278 
1279 	if (kstrtoull(val.vstr, 10, &enable)) {
1280 		NL_SET_ERR_MSG_MOD(extack,
1281 				   "Only 1 value is supported");
1282 		return -EINVAL;
1283 	}
1284 
1285 	if (enable != 1) {
1286 		NL_SET_ERR_MSG_MOD(extack,
1287 				   "Only disabling exact match feature is supported");
1288 		return -EINVAL;
1289 	}
1290 
1291 	if (rvu_npc_exact_can_disable_feature(rvu))
1292 		return 0;
1293 
1294 	NL_SET_ERR_MSG_MOD(extack,
1295 			   "Can't disable exact match feature; Please try before any configuration");
1296 	return -EFAULT;
1297 }
1298 
rvu_af_dl_npc_mcam_high_zone_percent_get(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx)1299 static int rvu_af_dl_npc_mcam_high_zone_percent_get(struct devlink *devlink, u32 id,
1300 						    struct devlink_param_gset_ctx *ctx)
1301 {
1302 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1303 	struct rvu *rvu = rvu_dl->rvu;
1304 	struct npc_mcam *mcam;
1305 	u32 percent;
1306 
1307 	mcam = &rvu->hw->mcam;
1308 	percent = (mcam->hprio_count * 100) / mcam->bmap_entries;
1309 	ctx->val.vu8 = (u8)percent;
1310 
1311 	return 0;
1312 }
1313 
rvu_af_dl_npc_mcam_high_zone_percent_set(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)1314 static int rvu_af_dl_npc_mcam_high_zone_percent_set(struct devlink *devlink, u32 id,
1315 						    struct devlink_param_gset_ctx *ctx,
1316 						    struct netlink_ext_ack *extack)
1317 {
1318 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1319 	struct rvu *rvu = rvu_dl->rvu;
1320 	struct npc_mcam *mcam;
1321 	u32 percent;
1322 
1323 	percent = ctx->val.vu8;
1324 	mcam = &rvu->hw->mcam;
1325 	mcam->hprio_count = (mcam->bmap_entries * percent) / 100;
1326 	mcam->hprio_end = mcam->hprio_count;
1327 	mcam->lprio_count = (mcam->bmap_entries - mcam->hprio_count) / 2;
1328 	mcam->lprio_start = mcam->bmap_entries - mcam->lprio_count;
1329 
1330 	return 0;
1331 }
1332 
rvu_af_dl_npc_mcam_high_zone_percent_validate(struct devlink * devlink,u32 id,union devlink_param_value val,struct netlink_ext_ack * extack)1333 static int rvu_af_dl_npc_mcam_high_zone_percent_validate(struct devlink *devlink, u32 id,
1334 							 union devlink_param_value val,
1335 							 struct netlink_ext_ack *extack)
1336 {
1337 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1338 	struct rvu *rvu = rvu_dl->rvu;
1339 	struct npc_mcam *mcam;
1340 
1341 	/* The percent of high prio zone must range from 12% to 100% of unreserved mcam space */
1342 	if (val.vu8 < 12 || val.vu8 > 100) {
1343 		NL_SET_ERR_MSG_MOD(extack,
1344 				   "mcam high zone percent must be between 12% to 100%");
1345 		return -EINVAL;
1346 	}
1347 
1348 	/* Do not allow user to modify the high priority zone entries while mcam entries
1349 	 * have already been assigned.
1350 	 */
1351 	mcam = &rvu->hw->mcam;
1352 	if (mcam->bmap_fcnt < mcam->bmap_entries) {
1353 		NL_SET_ERR_MSG_MOD(extack,
1354 				   "mcam entries have already been assigned, can't resize");
1355 		return -EPERM;
1356 	}
1357 
1358 	return 0;
1359 }
1360 
rvu_af_dl_nix_maxlf_get(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx)1361 static int rvu_af_dl_nix_maxlf_get(struct devlink *devlink, u32 id,
1362 				   struct devlink_param_gset_ctx *ctx)
1363 {
1364 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1365 	struct rvu *rvu = rvu_dl->rvu;
1366 
1367 	ctx->val.vu16 = (u16)rvu_get_nixlf_count(rvu);
1368 
1369 	return 0;
1370 }
1371 
rvu_af_dl_nix_maxlf_set(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)1372 static int rvu_af_dl_nix_maxlf_set(struct devlink *devlink, u32 id,
1373 				   struct devlink_param_gset_ctx *ctx,
1374 				   struct netlink_ext_ack *extack)
1375 {
1376 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1377 	struct rvu *rvu = rvu_dl->rvu;
1378 	struct rvu_block *block;
1379 	int blkaddr = 0;
1380 
1381 	npc_mcam_rsrcs_deinit(rvu);
1382 	blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
1383 	while (blkaddr) {
1384 		block = &rvu->hw->block[blkaddr];
1385 		block->lf.max = ctx->val.vu16;
1386 		blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
1387 	}
1388 
1389 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
1390 	npc_mcam_rsrcs_init(rvu, blkaddr);
1391 
1392 	return 0;
1393 }
1394 
rvu_af_dl_nix_maxlf_validate(struct devlink * devlink,u32 id,union devlink_param_value val,struct netlink_ext_ack * extack)1395 static int rvu_af_dl_nix_maxlf_validate(struct devlink *devlink, u32 id,
1396 					union devlink_param_value val,
1397 					struct netlink_ext_ack *extack)
1398 {
1399 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1400 	struct rvu *rvu = rvu_dl->rvu;
1401 	u16 max_nix0_lf, max_nix1_lf;
1402 	struct npc_mcam *mcam;
1403 	u64 cfg;
1404 
1405 	cfg = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_CONST2);
1406 	max_nix0_lf = cfg & 0xFFF;
1407 	cfg = rvu_read64(rvu, BLKADDR_NIX1, NIX_AF_CONST2);
1408 	max_nix1_lf = cfg & 0xFFF;
1409 
1410 	/* Do not allow user to modify maximum NIX LFs while mcam entries
1411 	 * have already been assigned.
1412 	 */
1413 	mcam = &rvu->hw->mcam;
1414 	if (mcam->bmap_fcnt < mcam->bmap_entries) {
1415 		NL_SET_ERR_MSG_MOD(extack,
1416 				   "mcam entries have already been assigned, can't resize");
1417 		return -EPERM;
1418 	}
1419 
1420 	if (max_nix0_lf && val.vu16 > max_nix0_lf) {
1421 		NL_SET_ERR_MSG_MOD(extack,
1422 				   "requested nixlf is greater than the max supported nix0_lf");
1423 		return -EPERM;
1424 	}
1425 
1426 	if (max_nix1_lf && val.vu16 > max_nix1_lf) {
1427 		NL_SET_ERR_MSG_MOD(extack,
1428 				   "requested nixlf is greater than the max supported nix1_lf");
1429 		return -EINVAL;
1430 	}
1431 
1432 	return 0;
1433 }
1434 
1435 static const struct devlink_param rvu_af_dl_params[] = {
1436 	DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
1437 			     "dwrr_mtu", DEVLINK_PARAM_TYPE_U32,
1438 			     BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1439 			     rvu_af_dl_dwrr_mtu_get, rvu_af_dl_dwrr_mtu_set,
1440 			     rvu_af_dl_dwrr_mtu_validate),
1441 	DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_MCAM_ZONE_PERCENT,
1442 			     "npc_mcam_high_zone_percent", DEVLINK_PARAM_TYPE_U8,
1443 			     BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1444 			     rvu_af_dl_npc_mcam_high_zone_percent_get,
1445 			     rvu_af_dl_npc_mcam_high_zone_percent_set,
1446 			     rvu_af_dl_npc_mcam_high_zone_percent_validate),
1447 	DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NIX_MAXLF,
1448 			     "nix_maxlf", DEVLINK_PARAM_TYPE_U16,
1449 			     BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1450 			     rvu_af_dl_nix_maxlf_get,
1451 			     rvu_af_dl_nix_maxlf_set,
1452 			     rvu_af_dl_nix_maxlf_validate),
1453 };
1454 
1455 static const struct devlink_param rvu_af_dl_param_exact_match[] = {
1456 	DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
1457 			     "npc_exact_feature_disable", DEVLINK_PARAM_TYPE_STRING,
1458 			     BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1459 			     rvu_af_npc_exact_feature_get,
1460 			     rvu_af_npc_exact_feature_disable,
1461 			     rvu_af_npc_exact_feature_validate),
1462 };
1463 
1464 /* Devlink switch mode */
rvu_devlink_eswitch_mode_get(struct devlink * devlink,u16 * mode)1465 static int rvu_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
1466 {
1467 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1468 	struct rvu *rvu = rvu_dl->rvu;
1469 	struct rvu_switch *rswitch;
1470 
1471 	rswitch = &rvu->rswitch;
1472 	*mode = rswitch->mode;
1473 
1474 	return 0;
1475 }
1476 
rvu_devlink_eswitch_mode_set(struct devlink * devlink,u16 mode,struct netlink_ext_ack * extack)1477 static int rvu_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
1478 					struct netlink_ext_ack *extack)
1479 {
1480 	struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1481 	struct rvu *rvu = rvu_dl->rvu;
1482 	struct rvu_switch *rswitch;
1483 
1484 	rswitch = &rvu->rswitch;
1485 	switch (mode) {
1486 	case DEVLINK_ESWITCH_MODE_LEGACY:
1487 	case DEVLINK_ESWITCH_MODE_SWITCHDEV:
1488 		if (rswitch->mode == mode)
1489 			return 0;
1490 		rswitch->mode = mode;
1491 		if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
1492 			rvu_switch_enable(rvu);
1493 		else
1494 			rvu_switch_disable(rvu);
1495 		break;
1496 	default:
1497 		return -EINVAL;
1498 	}
1499 
1500 	return 0;
1501 }
1502 
1503 static const struct devlink_ops rvu_devlink_ops = {
1504 	.eswitch_mode_get = rvu_devlink_eswitch_mode_get,
1505 	.eswitch_mode_set = rvu_devlink_eswitch_mode_set,
1506 };
1507 
rvu_register_dl(struct rvu * rvu)1508 int rvu_register_dl(struct rvu *rvu)
1509 {
1510 	struct rvu_devlink *rvu_dl;
1511 	struct devlink *dl;
1512 	int err;
1513 
1514 	dl = devlink_alloc(&rvu_devlink_ops, sizeof(struct rvu_devlink),
1515 			   rvu->dev);
1516 	if (!dl) {
1517 		dev_warn(rvu->dev, "devlink_alloc failed\n");
1518 		return -ENOMEM;
1519 	}
1520 
1521 	rvu_dl = devlink_priv(dl);
1522 	rvu_dl->dl = dl;
1523 	rvu_dl->rvu = rvu;
1524 	rvu->rvu_dl = rvu_dl;
1525 
1526 	err = rvu_health_reporters_create(rvu);
1527 	if (err) {
1528 		dev_err(rvu->dev,
1529 			"devlink health reporter creation failed with error %d\n", err);
1530 		goto err_dl_health;
1531 	}
1532 
1533 	err = devlink_params_register(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
1534 	if (err) {
1535 		dev_err(rvu->dev,
1536 			"devlink params register failed with error %d", err);
1537 		goto err_dl_health;
1538 	}
1539 
1540 	/* Register exact match devlink only for CN10K-B */
1541 	if (!rvu_npc_exact_has_match_table(rvu))
1542 		goto done;
1543 
1544 	err = devlink_params_register(dl, rvu_af_dl_param_exact_match,
1545 				      ARRAY_SIZE(rvu_af_dl_param_exact_match));
1546 	if (err) {
1547 		dev_err(rvu->dev,
1548 			"devlink exact match params register failed with error %d", err);
1549 		goto err_dl_exact_match;
1550 	}
1551 
1552 done:
1553 	devlink_register(dl);
1554 	return 0;
1555 
1556 err_dl_exact_match:
1557 	devlink_params_unregister(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
1558 
1559 err_dl_health:
1560 	rvu_health_reporters_destroy(rvu);
1561 	devlink_free(dl);
1562 	return err;
1563 }
1564 
rvu_unregister_dl(struct rvu * rvu)1565 void rvu_unregister_dl(struct rvu *rvu)
1566 {
1567 	struct rvu_devlink *rvu_dl = rvu->rvu_dl;
1568 	struct devlink *dl = rvu_dl->dl;
1569 
1570 	devlink_unregister(dl);
1571 
1572 	devlink_params_unregister(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
1573 
1574 	/* Unregister exact match devlink only for CN10K-B */
1575 	if (rvu_npc_exact_has_match_table(rvu))
1576 		devlink_params_unregister(dl, rvu_af_dl_param_exact_match,
1577 					  ARRAY_SIZE(rvu_af_dl_param_exact_match));
1578 
1579 	rvu_health_reporters_destroy(rvu);
1580 	devlink_free(dl);
1581 }
1582