xref: /linux/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c (revision ae22a94997b8a03dcb3c922857c203246711f9d4)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3  *
4  * Copyright (C) 2018 Marvell.
5  *
6  */
7 #include <linux/bitfield.h>
8 #include <linux/module.h>
9 #include <linux/pci.h>
10 
11 #include "rvu_struct.h"
12 #include "rvu_reg.h"
13 #include "rvu.h"
14 
15 static int npa_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
16 			       struct npa_aq_inst_s *inst)
17 {
18 	struct admin_queue *aq = block->aq;
19 	struct npa_aq_res_s *result;
20 	int timeout = 1000;
21 	u64 reg, head;
22 
23 	result = (struct npa_aq_res_s *)aq->res->base;
24 
25 	/* Get current head pointer where to append this instruction */
26 	reg = rvu_read64(rvu, block->addr, NPA_AF_AQ_STATUS);
27 	head = (reg >> 4) & AQ_PTR_MASK;
28 
29 	memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
30 	       (void *)inst, aq->inst->entry_sz);
31 	memset(result, 0, sizeof(*result));
32 	/* sync into memory */
33 	wmb();
34 
35 	/* Ring the doorbell and wait for result */
36 	rvu_write64(rvu, block->addr, NPA_AF_AQ_DOOR, 1);
37 	while (result->compcode == NPA_AQ_COMP_NOTDONE) {
38 		cpu_relax();
39 		udelay(1);
40 		timeout--;
41 		if (!timeout)
42 			return -EBUSY;
43 	}
44 
45 	if (result->compcode != NPA_AQ_COMP_GOOD) {
46 		/* TODO: Replace this with some error code */
47 		if (result->compcode == NPA_AQ_COMP_CTX_FAULT ||
48 		    result->compcode == NPA_AQ_COMP_LOCKERR ||
49 		    result->compcode == NPA_AQ_COMP_CTX_POISON) {
50 			if (rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NPA0))
51 				dev_err(rvu->dev,
52 					"%s: Not able to unlock cachelines\n", __func__);
53 		}
54 
55 		return -EBUSY;
56 	}
57 
58 	return 0;
59 }
60 
61 int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
62 			struct npa_aq_enq_rsp *rsp)
63 {
64 	struct rvu_hwinfo *hw = rvu->hw;
65 	u16 pcifunc = req->hdr.pcifunc;
66 	int blkaddr, npalf, rc = 0;
67 	struct npa_aq_inst_s inst;
68 	struct rvu_block *block;
69 	struct admin_queue *aq;
70 	struct rvu_pfvf *pfvf;
71 	void *ctx, *mask;
72 	bool ena;
73 
74 	pfvf = rvu_get_pfvf(rvu, pcifunc);
75 	if (!pfvf->aura_ctx || req->aura_id >= pfvf->aura_ctx->qsize)
76 		return NPA_AF_ERR_AQ_ENQUEUE;
77 
78 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
79 	if (!pfvf->npalf || blkaddr < 0)
80 		return NPA_AF_ERR_AF_LF_INVALID;
81 
82 	block = &hw->block[blkaddr];
83 	aq = block->aq;
84 	if (!aq) {
85 		dev_warn(rvu->dev, "%s: NPA AQ not initialized\n", __func__);
86 		return NPA_AF_ERR_AQ_ENQUEUE;
87 	}
88 
89 	npalf = rvu_get_lf(rvu, block, pcifunc, 0);
90 	if (npalf < 0)
91 		return NPA_AF_ERR_AF_LF_INVALID;
92 
93 	memset(&inst, 0, sizeof(struct npa_aq_inst_s));
94 	inst.cindex = req->aura_id;
95 	inst.lf = npalf;
96 	inst.ctype = req->ctype;
97 	inst.op = req->op;
98 	/* Currently we are not supporting enqueuing multiple instructions,
99 	 * so always choose first entry in result memory.
100 	 */
101 	inst.res_addr = (u64)aq->res->iova;
102 
103 	/* Hardware uses same aq->res->base for updating result of
104 	 * previous instruction hence wait here till it is done.
105 	 */
106 	spin_lock(&aq->lock);
107 
108 	/* Clean result + context memory */
109 	memset(aq->res->base, 0, aq->res->entry_sz);
110 	/* Context needs to be written at RES_ADDR + 128 */
111 	ctx = aq->res->base + 128;
112 	/* Mask needs to be written at RES_ADDR + 256 */
113 	mask = aq->res->base + 256;
114 
115 	switch (req->op) {
116 	case NPA_AQ_INSTOP_WRITE:
117 		/* Copy context and write mask */
118 		if (req->ctype == NPA_AQ_CTYPE_AURA) {
119 			memcpy(mask, &req->aura_mask,
120 			       sizeof(struct npa_aura_s));
121 			memcpy(ctx, &req->aura, sizeof(struct npa_aura_s));
122 		} else {
123 			memcpy(mask, &req->pool_mask,
124 			       sizeof(struct npa_pool_s));
125 			memcpy(ctx, &req->pool, sizeof(struct npa_pool_s));
126 		}
127 		break;
128 	case NPA_AQ_INSTOP_INIT:
129 		if (req->ctype == NPA_AQ_CTYPE_AURA) {
130 			if (req->aura.pool_addr >= pfvf->pool_ctx->qsize) {
131 				rc = NPA_AF_ERR_AQ_FULL;
132 				break;
133 			}
134 			/* Set pool's context address */
135 			req->aura.pool_addr = pfvf->pool_ctx->iova +
136 			(req->aura.pool_addr * pfvf->pool_ctx->entry_sz);
137 			memcpy(ctx, &req->aura, sizeof(struct npa_aura_s));
138 		} else { /* POOL's context */
139 			memcpy(ctx, &req->pool, sizeof(struct npa_pool_s));
140 		}
141 		break;
142 	case NPA_AQ_INSTOP_NOP:
143 	case NPA_AQ_INSTOP_READ:
144 	case NPA_AQ_INSTOP_LOCK:
145 	case NPA_AQ_INSTOP_UNLOCK:
146 		break;
147 	default:
148 		rc = NPA_AF_ERR_AQ_FULL;
149 		break;
150 	}
151 
152 	if (rc) {
153 		spin_unlock(&aq->lock);
154 		return rc;
155 	}
156 
157 	/* Submit the instruction to AQ */
158 	rc = npa_aq_enqueue_wait(rvu, block, &inst);
159 	if (rc) {
160 		spin_unlock(&aq->lock);
161 		return rc;
162 	}
163 
164 	/* Set aura bitmap if aura hw context is enabled */
165 	if (req->ctype == NPA_AQ_CTYPE_AURA) {
166 		if (req->op == NPA_AQ_INSTOP_INIT && req->aura.ena)
167 			__set_bit(req->aura_id, pfvf->aura_bmap);
168 		if (req->op == NPA_AQ_INSTOP_WRITE) {
169 			ena = (req->aura.ena & req->aura_mask.ena) |
170 				(test_bit(req->aura_id, pfvf->aura_bmap) &
171 				~req->aura_mask.ena);
172 			if (ena)
173 				__set_bit(req->aura_id, pfvf->aura_bmap);
174 			else
175 				__clear_bit(req->aura_id, pfvf->aura_bmap);
176 		}
177 	}
178 
179 	/* Set pool bitmap if pool hw context is enabled */
180 	if (req->ctype == NPA_AQ_CTYPE_POOL) {
181 		if (req->op == NPA_AQ_INSTOP_INIT && req->pool.ena)
182 			__set_bit(req->aura_id, pfvf->pool_bmap);
183 		if (req->op == NPA_AQ_INSTOP_WRITE) {
184 			ena = (req->pool.ena & req->pool_mask.ena) |
185 				(test_bit(req->aura_id, pfvf->pool_bmap) &
186 				~req->pool_mask.ena);
187 			if (ena)
188 				__set_bit(req->aura_id, pfvf->pool_bmap);
189 			else
190 				__clear_bit(req->aura_id, pfvf->pool_bmap);
191 		}
192 	}
193 	spin_unlock(&aq->lock);
194 
195 	if (rsp) {
196 		/* Copy read context into mailbox */
197 		if (req->op == NPA_AQ_INSTOP_READ) {
198 			if (req->ctype == NPA_AQ_CTYPE_AURA)
199 				memcpy(&rsp->aura, ctx,
200 				       sizeof(struct npa_aura_s));
201 			else
202 				memcpy(&rsp->pool, ctx,
203 				       sizeof(struct npa_pool_s));
204 		}
205 	}
206 
207 	return 0;
208 }
209 
210 static int npa_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
211 {
212 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
213 	struct npa_aq_enq_req aq_req;
214 	unsigned long *bmap;
215 	int id, cnt = 0;
216 	int err = 0, rc;
217 
218 	if (!pfvf->pool_ctx || !pfvf->aura_ctx)
219 		return NPA_AF_ERR_AQ_ENQUEUE;
220 
221 	memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
222 	aq_req.hdr.pcifunc = req->hdr.pcifunc;
223 
224 	if (req->ctype == NPA_AQ_CTYPE_POOL) {
225 		aq_req.pool.ena = 0;
226 		aq_req.pool_mask.ena = 1;
227 		cnt = pfvf->pool_ctx->qsize;
228 		bmap = pfvf->pool_bmap;
229 	} else if (req->ctype == NPA_AQ_CTYPE_AURA) {
230 		aq_req.aura.ena = 0;
231 		aq_req.aura_mask.ena = 1;
232 		aq_req.aura.bp_ena = 0;
233 		aq_req.aura_mask.bp_ena = 1;
234 		cnt = pfvf->aura_ctx->qsize;
235 		bmap = pfvf->aura_bmap;
236 	}
237 
238 	aq_req.ctype = req->ctype;
239 	aq_req.op = NPA_AQ_INSTOP_WRITE;
240 
241 	for (id = 0; id < cnt; id++) {
242 		if (!test_bit(id, bmap))
243 			continue;
244 		aq_req.aura_id = id;
245 		rc = rvu_npa_aq_enq_inst(rvu, &aq_req, NULL);
246 		if (rc) {
247 			err = rc;
248 			dev_err(rvu->dev, "Failed to disable %s:%d context\n",
249 				(req->ctype == NPA_AQ_CTYPE_AURA) ?
250 				"Aura" : "Pool", id);
251 		}
252 	}
253 
254 	return err;
255 }
256 
257 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
258 static int npa_lf_hwctx_lockdown(struct rvu *rvu, struct npa_aq_enq_req *req)
259 {
260 	struct npa_aq_enq_req lock_ctx_req;
261 	int err;
262 
263 	if (req->op != NPA_AQ_INSTOP_INIT)
264 		return 0;
265 
266 	memset(&lock_ctx_req, 0, sizeof(struct npa_aq_enq_req));
267 	lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
268 	lock_ctx_req.ctype = req->ctype;
269 	lock_ctx_req.op = NPA_AQ_INSTOP_LOCK;
270 	lock_ctx_req.aura_id = req->aura_id;
271 	err = rvu_npa_aq_enq_inst(rvu, &lock_ctx_req, NULL);
272 	if (err)
273 		dev_err(rvu->dev,
274 			"PFUNC 0x%x: Failed to lock NPA context %s:%d\n",
275 			req->hdr.pcifunc,
276 			(req->ctype == NPA_AQ_CTYPE_AURA) ?
277 			"Aura" : "Pool", req->aura_id);
278 	return err;
279 }
280 
281 int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
282 				struct npa_aq_enq_req *req,
283 				struct npa_aq_enq_rsp *rsp)
284 {
285 	int err;
286 
287 	err = rvu_npa_aq_enq_inst(rvu, req, rsp);
288 	if (!err)
289 		err = npa_lf_hwctx_lockdown(rvu, req);
290 	return err;
291 }
292 #else
293 
294 int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
295 				struct npa_aq_enq_req *req,
296 				struct npa_aq_enq_rsp *rsp)
297 {
298 	return rvu_npa_aq_enq_inst(rvu, req, rsp);
299 }
300 #endif
301 
302 int rvu_mbox_handler_npa_hwctx_disable(struct rvu *rvu,
303 				       struct hwctx_disable_req *req,
304 				       struct msg_rsp *rsp)
305 {
306 	return npa_lf_hwctx_disable(rvu, req);
307 }
308 
309 static void npa_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
310 {
311 	kfree(pfvf->aura_bmap);
312 	pfvf->aura_bmap = NULL;
313 
314 	qmem_free(rvu->dev, pfvf->aura_ctx);
315 	pfvf->aura_ctx = NULL;
316 
317 	kfree(pfvf->pool_bmap);
318 	pfvf->pool_bmap = NULL;
319 
320 	qmem_free(rvu->dev, pfvf->pool_ctx);
321 	pfvf->pool_ctx = NULL;
322 
323 	qmem_free(rvu->dev, pfvf->npa_qints_ctx);
324 	pfvf->npa_qints_ctx = NULL;
325 }
326 
327 int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu,
328 				  struct npa_lf_alloc_req *req,
329 				  struct npa_lf_alloc_rsp *rsp)
330 {
331 	int npalf, qints, hwctx_size, err, rc = 0;
332 	struct rvu_hwinfo *hw = rvu->hw;
333 	u16 pcifunc = req->hdr.pcifunc;
334 	struct rvu_block *block;
335 	struct rvu_pfvf *pfvf;
336 	u64 cfg, ctx_cfg;
337 	int blkaddr;
338 
339 	if (req->aura_sz > NPA_AURA_SZ_MAX ||
340 	    req->aura_sz == NPA_AURA_SZ_0 || !req->nr_pools)
341 		return NPA_AF_ERR_PARAM;
342 
343 	if (req->way_mask)
344 		req->way_mask &= 0xFFFF;
345 
346 	pfvf = rvu_get_pfvf(rvu, pcifunc);
347 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
348 	if (!pfvf->npalf || blkaddr < 0)
349 		return NPA_AF_ERR_AF_LF_INVALID;
350 
351 	block = &hw->block[blkaddr];
352 	npalf = rvu_get_lf(rvu, block, pcifunc, 0);
353 	if (npalf < 0)
354 		return NPA_AF_ERR_AF_LF_INVALID;
355 
356 	/* Reset this NPA LF */
357 	err = rvu_lf_reset(rvu, block, npalf);
358 	if (err) {
359 		dev_err(rvu->dev, "Failed to reset NPALF%d\n", npalf);
360 		return NPA_AF_ERR_LF_RESET;
361 	}
362 
363 	ctx_cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST1);
364 
365 	/* Alloc memory for aura HW contexts */
366 	hwctx_size = 1UL << (ctx_cfg & 0xF);
367 	err = qmem_alloc(rvu->dev, &pfvf->aura_ctx,
368 			 NPA_AURA_COUNT(req->aura_sz), hwctx_size);
369 	if (err)
370 		goto free_mem;
371 
372 	pfvf->aura_bmap = kcalloc(NPA_AURA_COUNT(req->aura_sz), sizeof(long),
373 				  GFP_KERNEL);
374 	if (!pfvf->aura_bmap)
375 		goto free_mem;
376 
377 	/* Alloc memory for pool HW contexts */
378 	hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
379 	err = qmem_alloc(rvu->dev, &pfvf->pool_ctx, req->nr_pools, hwctx_size);
380 	if (err)
381 		goto free_mem;
382 
383 	pfvf->pool_bmap = kcalloc(NPA_AURA_COUNT(req->aura_sz), sizeof(long),
384 				  GFP_KERNEL);
385 	if (!pfvf->pool_bmap)
386 		goto free_mem;
387 
388 	/* Get no of queue interrupts supported */
389 	cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST);
390 	qints = (cfg >> 28) & 0xFFF;
391 
392 	/* Alloc memory for Qints HW contexts */
393 	hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
394 	err = qmem_alloc(rvu->dev, &pfvf->npa_qints_ctx, qints, hwctx_size);
395 	if (err)
396 		goto free_mem;
397 
398 	cfg = rvu_read64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf));
399 	/* Clear way partition mask and set aura offset to '0' */
400 	cfg &= ~(BIT_ULL(34) - 1);
401 	/* Set aura size & enable caching of contexts */
402 	cfg |= (req->aura_sz << 16) | BIT_ULL(34) | req->way_mask;
403 
404 	rvu_write64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf), cfg);
405 
406 	/* Configure aura HW context's base */
407 	rvu_write64(rvu, blkaddr, NPA_AF_LFX_LOC_AURAS_BASE(npalf),
408 		    (u64)pfvf->aura_ctx->iova);
409 
410 	/* Enable caching of qints hw context */
411 	rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_CFG(npalf),
412 		    BIT_ULL(36) | req->way_mask << 20);
413 	rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_BASE(npalf),
414 		    (u64)pfvf->npa_qints_ctx->iova);
415 
416 	goto exit;
417 
418 free_mem:
419 	npa_ctx_free(rvu, pfvf);
420 	rc = -ENOMEM;
421 
422 exit:
423 	/* set stack page info */
424 	cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST);
425 	rsp->stack_pg_ptrs = (cfg >> 8) & 0xFF;
426 	rsp->stack_pg_bytes = cfg & 0xFF;
427 	rsp->qints = (cfg >> 28) & 0xFFF;
428 	if (!is_rvu_otx2(rvu)) {
429 		cfg = rvu_read64(rvu, block->addr, NPA_AF_BATCH_CTL);
430 		rsp->cache_lines = (cfg >> 1) & 0x3F;
431 	}
432 	return rc;
433 }
434 
435 int rvu_mbox_handler_npa_lf_free(struct rvu *rvu, struct msg_req *req,
436 				 struct msg_rsp *rsp)
437 {
438 	struct rvu_hwinfo *hw = rvu->hw;
439 	u16 pcifunc = req->hdr.pcifunc;
440 	struct rvu_block *block;
441 	struct rvu_pfvf *pfvf;
442 	int npalf, err;
443 	int blkaddr;
444 
445 	pfvf = rvu_get_pfvf(rvu, pcifunc);
446 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
447 	if (!pfvf->npalf || blkaddr < 0)
448 		return NPA_AF_ERR_AF_LF_INVALID;
449 
450 	block = &hw->block[blkaddr];
451 	npalf = rvu_get_lf(rvu, block, pcifunc, 0);
452 	if (npalf < 0)
453 		return NPA_AF_ERR_AF_LF_INVALID;
454 
455 	/* Reset this NPA LF */
456 	err = rvu_lf_reset(rvu, block, npalf);
457 	if (err) {
458 		dev_err(rvu->dev, "Failed to reset NPALF%d\n", npalf);
459 		return NPA_AF_ERR_LF_RESET;
460 	}
461 
462 	npa_ctx_free(rvu, pfvf);
463 
464 	return 0;
465 }
466 
467 static int npa_aq_init(struct rvu *rvu, struct rvu_block *block)
468 {
469 	u64 cfg;
470 	int err;
471 
472 	/* Set admin queue endianness */
473 	cfg = rvu_read64(rvu, block->addr, NPA_AF_GEN_CFG);
474 #ifdef __BIG_ENDIAN
475 	cfg |= BIT_ULL(1);
476 	rvu_write64(rvu, block->addr, NPA_AF_GEN_CFG, cfg);
477 #else
478 	cfg &= ~BIT_ULL(1);
479 	rvu_write64(rvu, block->addr, NPA_AF_GEN_CFG, cfg);
480 #endif
481 
482 	/* Do not bypass NDC cache */
483 	cfg = rvu_read64(rvu, block->addr, NPA_AF_NDC_CFG);
484 	cfg &= ~0x03DULL;
485 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
486 	/* Disable caching of stack pages */
487 	cfg |= 0x10ULL;
488 #endif
489 	rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg);
490 
491 	/* For CN10K NPA BATCH DMA set 35 cache lines */
492 	if (!is_rvu_otx2(rvu)) {
493 		cfg = rvu_read64(rvu, block->addr, NPA_AF_BATCH_CTL);
494 		cfg &= ~0x7EULL;
495 		cfg |= BIT_ULL(6) | BIT_ULL(2) | BIT_ULL(1);
496 		rvu_write64(rvu, block->addr, NPA_AF_BATCH_CTL, cfg);
497 	}
498 	/* Result structure can be followed by Aura/Pool context at
499 	 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
500 	 * operation type. Alloc sufficient result memory for all operations.
501 	 */
502 	err = rvu_aq_alloc(rvu, &block->aq,
503 			   Q_COUNT(AQ_SIZE), sizeof(struct npa_aq_inst_s),
504 			   ALIGN(sizeof(struct npa_aq_res_s), 128) + 256);
505 	if (err)
506 		return err;
507 
508 	rvu_write64(rvu, block->addr, NPA_AF_AQ_CFG, AQ_SIZE);
509 	rvu_write64(rvu, block->addr,
510 		    NPA_AF_AQ_BASE, (u64)block->aq->inst->iova);
511 	return 0;
512 }
513 
514 int rvu_npa_init(struct rvu *rvu)
515 {
516 	struct rvu_hwinfo *hw = rvu->hw;
517 	int blkaddr;
518 
519 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
520 	if (blkaddr < 0)
521 		return 0;
522 
523 	/* Initialize admin queue */
524 	return npa_aq_init(rvu, &hw->block[blkaddr]);
525 }
526 
527 void rvu_npa_freemem(struct rvu *rvu)
528 {
529 	struct rvu_hwinfo *hw = rvu->hw;
530 	struct rvu_block *block;
531 	int blkaddr;
532 
533 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
534 	if (blkaddr < 0)
535 		return;
536 
537 	block = &hw->block[blkaddr];
538 	rvu_aq_free(rvu, block->aq);
539 }
540 
541 void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf)
542 {
543 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
544 	struct hwctx_disable_req ctx_req;
545 
546 	/* Disable all pools */
547 	ctx_req.hdr.pcifunc = pcifunc;
548 	ctx_req.ctype = NPA_AQ_CTYPE_POOL;
549 	npa_lf_hwctx_disable(rvu, &ctx_req);
550 
551 	/* Disable all auras */
552 	ctx_req.ctype = NPA_AQ_CTYPE_AURA;
553 	npa_lf_hwctx_disable(rvu, &ctx_req);
554 
555 	npa_ctx_free(rvu, pfvf);
556 }
557 
558 /* Due to an Hardware errata, in some corner cases, AQ context lock
559  * operations can result in a NDC way getting into an illegal state
560  * of not valid but locked.
561  *
562  * This API solves the problem by clearing the lock bit of the NDC block.
563  * The operation needs to be done for each line of all the NDC banks.
564  */
565 int rvu_ndc_fix_locked_cacheline(struct rvu *rvu, int blkaddr)
566 {
567 	int bank, max_bank, line, max_line, err;
568 	u64 reg, ndc_af_const;
569 
570 	/* Set the ENABLE bit(63) to '0' */
571 	reg = rvu_read64(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL);
572 	rvu_write64(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL, reg & GENMASK_ULL(62, 0));
573 
574 	/* Poll until the BUSY bits(47:32) are set to '0' */
575 	err = rvu_poll_reg(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL, GENMASK_ULL(47, 32), true);
576 	if (err) {
577 		dev_err(rvu->dev, "Timed out while polling for NDC CAM busy bits.\n");
578 		return err;
579 	}
580 
581 	ndc_af_const = rvu_read64(rvu, blkaddr, NDC_AF_CONST);
582 	max_bank = FIELD_GET(NDC_AF_BANK_MASK, ndc_af_const);
583 	max_line = FIELD_GET(NDC_AF_BANK_LINE_MASK, ndc_af_const);
584 	for (bank = 0; bank < max_bank; bank++) {
585 		for (line = 0; line < max_line; line++) {
586 			/* Check if 'cache line valid bit(63)' is not set
587 			 * but 'cache line lock bit(60)' is set and on
588 			 * success, reset the lock bit(60).
589 			 */
590 			reg = rvu_read64(rvu, blkaddr,
591 					 NDC_AF_BANKX_LINEX_METADATA(bank, line));
592 			if (!(reg & BIT_ULL(63)) && (reg & BIT_ULL(60))) {
593 				rvu_write64(rvu, blkaddr,
594 					    NDC_AF_BANKX_LINEX_METADATA(bank, line),
595 					    reg & ~BIT_ULL(60));
596 			}
597 		}
598 	}
599 
600 	return 0;
601 }
602