xref: /linux/drivers/net/ethernet/marvell/octeontx2/af/rvu.c (revision a77fb1ace44ed11874b9bf064208cad5fc0d94a5)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3  *
4  * Copyright (C) 2018 Marvell.
5  *
6  */
7 
8 #include <linux/module.h>
9 #include <linux/interrupt.h>
10 #include <linux/delay.h>
11 #include <linux/irq.h>
12 #include <linux/pci.h>
13 #include <linux/sysfs.h>
14 
15 #include "cgx.h"
16 #include "rvu.h"
17 #include "rvu_reg.h"
18 #include "ptp.h"
19 #include "mcs.h"
20 
21 #include "rvu_trace.h"
22 #include "rvu_npc_hash.h"
23 #include "cn20k/reg.h"
24 #include "cn20k/api.h"
25 #include "cn20k/npc.h"
26 
27 #define DRV_NAME	"rvu_af"
28 #define DRV_STRING      "Marvell OcteonTX2 RVU Admin Function Driver"
29 
30 static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
31 				struct rvu_block *block, int lf);
32 static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
33 				  struct rvu_block *block, int lf);
34 static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc);
35 
36 static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
37 			 int type, int num,
38 			 void (mbox_handler)(struct work_struct *),
39 			 void (mbox_up_handler)(struct work_struct *));
40 static irqreturn_t rvu_mbox_pf_intr_handler(int irq, void *rvu_irq);
41 static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq);
42 
43 /* Supported devices */
44 static const struct pci_device_id rvu_id_table[] = {
45 	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AF) },
46 	{ 0, }  /* end of table */
47 };
48 
49 MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
50 MODULE_DESCRIPTION(DRV_STRING);
51 MODULE_LICENSE("GPL v2");
52 MODULE_DEVICE_TABLE(pci, rvu_id_table);
53 
54 static char *mkex_profile; /* MKEX profile name */
55 module_param(mkex_profile, charp, 0000);
56 MODULE_PARM_DESC(mkex_profile, "MKEX profile name string");
57 
58 static char *kpu_profile; /* KPU profile name */
59 module_param(kpu_profile, charp, 0000);
60 MODULE_PARM_DESC(kpu_profile, "KPU profile name string");
61 
62 static void rvu_setup_hw_capabilities(struct rvu *rvu)
63 {
64 	struct rvu_hwinfo *hw = rvu->hw;
65 
66 	hw->cap.nix_tx_aggr_lvl = NIX_TXSCH_LVL_TL1;
67 	hw->cap.nix_fixed_txschq_mapping = false;
68 	hw->cap.nix_shaping = true;
69 	hw->cap.nix_tx_link_bp = true;
70 	hw->cap.nix_rx_multicast = true;
71 	hw->cap.nix_shaper_toggle_wait = false;
72 	hw->cap.npc_hash_extract = false;
73 	hw->cap.npc_exact_match_enabled = false;
74 	hw->rvu = rvu;
75 
76 	if (is_rvu_pre_96xx_C0(rvu)) {
77 		hw->cap.nix_fixed_txschq_mapping = true;
78 		hw->cap.nix_txsch_per_cgx_lmac = 4;
79 		hw->cap.nix_txsch_per_lbk_lmac = 132;
80 		hw->cap.nix_txsch_per_sdp_lmac = 76;
81 		hw->cap.nix_shaping = false;
82 		hw->cap.nix_tx_link_bp = false;
83 		if (is_rvu_96xx_A0(rvu) || is_rvu_95xx_A0(rvu))
84 			hw->cap.nix_rx_multicast = false;
85 	}
86 	if (!is_rvu_pre_96xx_C0(rvu))
87 		hw->cap.nix_shaper_toggle_wait = true;
88 
89 	if (!is_rvu_otx2(rvu))
90 		hw->cap.per_pf_mbox_regs = true;
91 
92 	if (is_rvu_npc_hash_extract_en(rvu))
93 		hw->cap.npc_hash_extract = true;
94 }
95 
96 /* Poll a RVU block's register 'offset', for a 'zero'
97  * or 'nonzero' at bits specified by 'mask'
98  */
99 int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero)
100 {
101 	unsigned long timeout = jiffies + usecs_to_jiffies(20000);
102 	bool twice = false;
103 	void __iomem *reg;
104 	u64 reg_val;
105 
106 	reg = rvu->afreg_base + ((block << 28) | offset);
107 again:
108 	reg_val = readq(reg);
109 	if (zero && !(reg_val & mask))
110 		return 0;
111 	if (!zero && (reg_val & mask))
112 		return 0;
113 	if (time_before(jiffies, timeout)) {
114 		usleep_range(1, 5);
115 		goto again;
116 	}
117 	/* In scenarios where CPU is scheduled out before checking
118 	 * 'time_before' (above) and gets scheduled in such that
119 	 * jiffies are beyond timeout value, then check again if HW is
120 	 * done with the operation in the meantime.
121 	 */
122 	if (!twice) {
123 		twice = true;
124 		goto again;
125 	}
126 	return -EBUSY;
127 }
128 
129 int rvu_alloc_rsrc(struct rsrc_bmap *rsrc)
130 {
131 	int id;
132 
133 	if (!rsrc->bmap)
134 		return -EINVAL;
135 
136 	id = find_first_zero_bit(rsrc->bmap, rsrc->max);
137 	if (id >= rsrc->max)
138 		return -ENOSPC;
139 
140 	__set_bit(id, rsrc->bmap);
141 
142 	return id;
143 }
144 
145 int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc)
146 {
147 	int start;
148 
149 	if (!rsrc->bmap)
150 		return -EINVAL;
151 
152 	start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
153 	if (start >= rsrc->max)
154 		return -ENOSPC;
155 
156 	bitmap_set(rsrc->bmap, start, nrsrc);
157 	return start;
158 }
159 
160 void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start)
161 {
162 	if (!rsrc->bmap)
163 		return;
164 	if (start >= rsrc->max)
165 		return;
166 
167 	bitmap_clear(rsrc->bmap, start, nrsrc);
168 }
169 
170 bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc)
171 {
172 	int start;
173 
174 	if (!rsrc->bmap)
175 		return false;
176 
177 	start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
178 	if (start >= rsrc->max)
179 		return false;
180 
181 	return true;
182 }
183 
184 void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id)
185 {
186 	if (!rsrc->bmap)
187 		return;
188 
189 	__clear_bit(id, rsrc->bmap);
190 }
191 
192 int rvu_rsrc_free_count(struct rsrc_bmap *rsrc)
193 {
194 	int used;
195 
196 	if (!rsrc->bmap)
197 		return 0;
198 
199 	used = bitmap_weight(rsrc->bmap, rsrc->max);
200 	return (rsrc->max - used);
201 }
202 
203 bool is_rsrc_free(struct rsrc_bmap *rsrc, int id)
204 {
205 	if (!rsrc->bmap)
206 		return false;
207 
208 	return !test_bit(id, rsrc->bmap);
209 }
210 
211 int rvu_alloc_bitmap(struct rsrc_bmap *rsrc)
212 {
213 	rsrc->bmap = kcalloc(BITS_TO_LONGS(rsrc->max),
214 			     sizeof(long), GFP_KERNEL);
215 	if (!rsrc->bmap)
216 		return -ENOMEM;
217 	return 0;
218 }
219 
220 void rvu_free_bitmap(struct rsrc_bmap *rsrc)
221 {
222 	kfree(rsrc->bmap);
223 }
224 
225 /* Get block LF's HW index from a PF_FUNC's block slot number */
226 int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot)
227 {
228 	u16 match = 0;
229 	int lf;
230 
231 	mutex_lock(&rvu->rsrc_lock);
232 	for (lf = 0; lf < block->lf.max; lf++) {
233 		if (block->fn_map[lf] == pcifunc) {
234 			if (slot == match) {
235 				mutex_unlock(&rvu->rsrc_lock);
236 				return lf;
237 			}
238 			match++;
239 		}
240 	}
241 	mutex_unlock(&rvu->rsrc_lock);
242 	return -ENODEV;
243 }
244 
245 /* Convert BLOCK_TYPE_E to a BLOCK_ADDR_E.
246  * Some silicon variants of OcteonTX2 supports
247  * multiple blocks of same type.
248  *
249  * @pcifunc has to be zero when no LF is yet attached.
250  *
251  * For a pcifunc if LFs are attached from multiple blocks of same type, then
252  * return blkaddr of first encountered block.
253  */
254 int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc)
255 {
256 	int devnum, blkaddr = -ENODEV;
257 	u64 cfg, reg;
258 	bool is_pf;
259 
260 	switch (blktype) {
261 	case BLKTYPE_NPC:
262 		blkaddr = BLKADDR_NPC;
263 		goto exit;
264 	case BLKTYPE_NPA:
265 		blkaddr = BLKADDR_NPA;
266 		goto exit;
267 	case BLKTYPE_NIX:
268 		/* For now assume NIX0 */
269 		if (!pcifunc) {
270 			blkaddr = BLKADDR_NIX0;
271 			goto exit;
272 		}
273 		break;
274 	case BLKTYPE_SSO:
275 		blkaddr = BLKADDR_SSO;
276 		goto exit;
277 	case BLKTYPE_SSOW:
278 		blkaddr = BLKADDR_SSOW;
279 		goto exit;
280 	case BLKTYPE_TIM:
281 		blkaddr = BLKADDR_TIM;
282 		goto exit;
283 	case BLKTYPE_CPT:
284 		/* For now assume CPT0 */
285 		if (!pcifunc) {
286 			blkaddr = BLKADDR_CPT0;
287 			goto exit;
288 		}
289 		break;
290 	}
291 
292 	/* Check if this is a RVU PF or VF */
293 	if (pcifunc & RVU_PFVF_FUNC_MASK) {
294 		is_pf = false;
295 		devnum = rvu_get_hwvf(rvu, pcifunc);
296 	} else {
297 		is_pf = true;
298 		devnum = rvu_get_pf(rvu->pdev, pcifunc);
299 	}
300 
301 	/* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' or
302 	 * 'BLKADDR_NIX1'.
303 	 */
304 	if (blktype == BLKTYPE_NIX) {
305 		reg = is_pf ? RVU_PRIV_PFX_NIXX_CFG(0) :
306 			RVU_PRIV_HWVFX_NIXX_CFG(0);
307 		cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
308 		if (cfg) {
309 			blkaddr = BLKADDR_NIX0;
310 			goto exit;
311 		}
312 
313 		reg = is_pf ? RVU_PRIV_PFX_NIXX_CFG(1) :
314 			RVU_PRIV_HWVFX_NIXX_CFG(1);
315 		cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
316 		if (cfg)
317 			blkaddr = BLKADDR_NIX1;
318 	}
319 
320 	if (blktype == BLKTYPE_CPT) {
321 		reg = is_pf ? RVU_PRIV_PFX_CPTX_CFG(0) :
322 			RVU_PRIV_HWVFX_CPTX_CFG(0);
323 		cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
324 		if (cfg) {
325 			blkaddr = BLKADDR_CPT0;
326 			goto exit;
327 		}
328 
329 		reg = is_pf ? RVU_PRIV_PFX_CPTX_CFG(1) :
330 			RVU_PRIV_HWVFX_CPTX_CFG(1);
331 		cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
332 		if (cfg)
333 			blkaddr = BLKADDR_CPT1;
334 	}
335 
336 exit:
337 	if (is_block_implemented(rvu->hw, blkaddr))
338 		return blkaddr;
339 	return -ENODEV;
340 }
341 
342 static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf,
343 				struct rvu_block *block, u16 pcifunc,
344 				u16 lf, bool attach)
345 {
346 	int devnum, num_lfs = 0;
347 	bool is_pf;
348 	u64 reg;
349 
350 	if (lf >= block->lf.max) {
351 		dev_err(&rvu->pdev->dev,
352 			"%s: FATAL: LF %d is >= %s's max lfs i.e %d\n",
353 			__func__, lf, block->name, block->lf.max);
354 		return;
355 	}
356 
357 	/* Check if this is for a RVU PF or VF */
358 	if (pcifunc & RVU_PFVF_FUNC_MASK) {
359 		is_pf = false;
360 		devnum = rvu_get_hwvf(rvu, pcifunc);
361 	} else {
362 		is_pf = true;
363 		devnum = rvu_get_pf(rvu->pdev, pcifunc);
364 	}
365 
366 	block->fn_map[lf] = attach ? pcifunc : 0;
367 
368 	switch (block->addr) {
369 	case BLKADDR_NPA:
370 		pfvf->npalf = attach ? true : false;
371 		num_lfs = pfvf->npalf;
372 		break;
373 	case BLKADDR_NIX0:
374 	case BLKADDR_NIX1:
375 		pfvf->nixlf = attach ? true : false;
376 		num_lfs = pfvf->nixlf;
377 		break;
378 	case BLKADDR_SSO:
379 		attach ? pfvf->sso++ : pfvf->sso--;
380 		num_lfs = pfvf->sso;
381 		break;
382 	case BLKADDR_SSOW:
383 		attach ? pfvf->ssow++ : pfvf->ssow--;
384 		num_lfs = pfvf->ssow;
385 		break;
386 	case BLKADDR_TIM:
387 		attach ? pfvf->timlfs++ : pfvf->timlfs--;
388 		num_lfs = pfvf->timlfs;
389 		break;
390 	case BLKADDR_CPT0:
391 		attach ? pfvf->cptlfs++ : pfvf->cptlfs--;
392 		num_lfs = pfvf->cptlfs;
393 		break;
394 	case BLKADDR_CPT1:
395 		attach ? pfvf->cpt1_lfs++ : pfvf->cpt1_lfs--;
396 		num_lfs = pfvf->cpt1_lfs;
397 		break;
398 	}
399 
400 	reg = is_pf ? block->pf_lfcnt_reg : block->vf_lfcnt_reg;
401 	rvu_write64(rvu, BLKADDR_RVUM, reg | (devnum << 16), num_lfs);
402 }
403 
404 void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf)
405 {
406 	u64 cfg;
407 
408 	/* Get numVFs attached to this PF and first HWVF */
409 	cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
410 	if (numvfs)
411 		*numvfs = (cfg >> 12) & 0xFF;
412 	if (hwvf)
413 		*hwvf = cfg & 0xFFF;
414 }
415 
416 int rvu_get_hwvf(struct rvu *rvu, int pcifunc)
417 {
418 	int pf, func;
419 	u64 cfg;
420 
421 	pf = rvu_get_pf(rvu->pdev, pcifunc);
422 	func = pcifunc & RVU_PFVF_FUNC_MASK;
423 
424 	/* Get first HWVF attached to this PF */
425 	cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
426 
427 	return ((cfg & 0xFFF) + func - 1);
428 }
429 
430 struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc)
431 {
432 	/* Check if it is a PF or VF */
433 	if (pcifunc & RVU_PFVF_FUNC_MASK)
434 		return &rvu->hwvf[rvu_get_hwvf(rvu, pcifunc)];
435 	else
436 		return &rvu->pf[rvu_get_pf(rvu->pdev, pcifunc)];
437 }
438 
439 static bool is_pf_func_valid(struct rvu *rvu, u16 pcifunc)
440 {
441 	int pf, vf, nvfs;
442 	u64 cfg;
443 
444 	pf = rvu_get_pf(rvu->pdev, pcifunc);
445 	if (pf >= rvu->hw->total_pfs)
446 		return false;
447 
448 	if (!(pcifunc & RVU_PFVF_FUNC_MASK))
449 		return true;
450 
451 	/* Check if VF is within number of VFs attached to this PF */
452 	vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
453 	cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
454 	nvfs = (cfg >> 12) & 0xFF;
455 	if (vf >= nvfs)
456 		return false;
457 
458 	return true;
459 }
460 
461 bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr)
462 {
463 	struct rvu_block *block;
464 
465 	if (blkaddr < BLKADDR_RVUM || blkaddr >= BLK_COUNT)
466 		return false;
467 
468 	block = &hw->block[blkaddr];
469 	return block->implemented;
470 }
471 
472 static void rvu_check_block_implemented(struct rvu *rvu)
473 {
474 	struct rvu_hwinfo *hw = rvu->hw;
475 	struct rvu_block *block;
476 	int blkid;
477 	u64 cfg;
478 
479 	/* For each block check if 'implemented' bit is set */
480 	for (blkid = 0; blkid < BLK_COUNT; blkid++) {
481 		block = &hw->block[blkid];
482 		cfg = rvupf_read64(rvu, RVU_PF_BLOCK_ADDRX_DISC(blkid));
483 		if (cfg & BIT_ULL(11))
484 			block->implemented = true;
485 	}
486 }
487 
488 static void rvu_setup_rvum_blk_revid(struct rvu *rvu)
489 {
490 	rvu_write64(rvu, BLKADDR_RVUM,
491 		    RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM),
492 		    RVU_BLK_RVUM_REVID);
493 }
494 
495 static void rvu_clear_rvum_blk_revid(struct rvu *rvu)
496 {
497 	rvu_write64(rvu, BLKADDR_RVUM,
498 		    RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM), 0x00);
499 }
500 
501 int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf)
502 {
503 	int err;
504 
505 	if (!block->implemented)
506 		return 0;
507 
508 	rvu_write64(rvu, block->addr, block->lfreset_reg, lf | BIT_ULL(12));
509 	err = rvu_poll_reg(rvu, block->addr, block->lfreset_reg, BIT_ULL(12),
510 			   true);
511 	return err;
512 }
513 
514 static void rvu_block_reset(struct rvu *rvu, int blkaddr, u64 rst_reg)
515 {
516 	struct rvu_block *block = &rvu->hw->block[blkaddr];
517 	int err;
518 
519 	if (!block->implemented)
520 		return;
521 
522 	rvu_write64(rvu, blkaddr, rst_reg, BIT_ULL(0));
523 	err = rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true);
524 	if (err) {
525 		dev_err(rvu->dev, "HW block:%d reset timeout retrying again\n", blkaddr);
526 		while (rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true) == -EBUSY)
527 			;
528 	}
529 }
530 
531 static void rvu_reset_all_blocks(struct rvu *rvu)
532 {
533 	/* Do a HW reset of all RVU blocks */
534 	rvu_block_reset(rvu, BLKADDR_NPA, NPA_AF_BLK_RST);
535 	rvu_block_reset(rvu, BLKADDR_NIX0, NIX_AF_BLK_RST);
536 	rvu_block_reset(rvu, BLKADDR_NIX1, NIX_AF_BLK_RST);
537 	rvu_block_reset(rvu, BLKADDR_NPC, NPC_AF_BLK_RST);
538 	rvu_block_reset(rvu, BLKADDR_SSO, SSO_AF_BLK_RST);
539 	rvu_block_reset(rvu, BLKADDR_TIM, TIM_AF_BLK_RST);
540 	rvu_block_reset(rvu, BLKADDR_CPT0, CPT_AF_BLK_RST);
541 	rvu_block_reset(rvu, BLKADDR_CPT1, CPT_AF_BLK_RST);
542 	rvu_block_reset(rvu, BLKADDR_NDC_NIX0_RX, NDC_AF_BLK_RST);
543 	rvu_block_reset(rvu, BLKADDR_NDC_NIX0_TX, NDC_AF_BLK_RST);
544 	rvu_block_reset(rvu, BLKADDR_NDC_NIX1_RX, NDC_AF_BLK_RST);
545 	rvu_block_reset(rvu, BLKADDR_NDC_NIX1_TX, NDC_AF_BLK_RST);
546 	rvu_block_reset(rvu, BLKADDR_NDC_NPA0, NDC_AF_BLK_RST);
547 }
548 
549 static void rvu_scan_block(struct rvu *rvu, struct rvu_block *block)
550 {
551 	struct rvu_pfvf *pfvf;
552 	u64 cfg;
553 	int lf;
554 
555 	for (lf = 0; lf < block->lf.max; lf++) {
556 		cfg = rvu_read64(rvu, block->addr,
557 				 block->lfcfg_reg | (lf << block->lfshift));
558 		if (!(cfg & BIT_ULL(63)))
559 			continue;
560 
561 		/* Set this resource as being used */
562 		__set_bit(lf, block->lf.bmap);
563 
564 		/* Get, to whom this LF is attached */
565 		pfvf = rvu_get_pfvf(rvu, (cfg >> 8) & 0xFFFF);
566 		rvu_update_rsrc_map(rvu, pfvf, block,
567 				    (cfg >> 8) & 0xFFFF, lf, true);
568 
569 		/* Set start MSIX vector for this LF within this PF/VF */
570 		rvu_set_msix_offset(rvu, pfvf, block, lf);
571 	}
572 }
573 
574 static void rvu_check_min_msix_vec(struct rvu *rvu, int nvecs, int pf, int vf)
575 {
576 	int min_vecs;
577 
578 	if (!vf)
579 		goto check_pf;
580 
581 	if (!nvecs) {
582 		dev_warn(rvu->dev,
583 			 "PF%d:VF%d is configured with zero msix vectors, %d\n",
584 			 pf, vf - 1, nvecs);
585 	}
586 	return;
587 
588 check_pf:
589 	if (pf == 0)
590 		min_vecs = RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT;
591 	else
592 		min_vecs = RVU_PF_INT_VEC_CNT;
593 
594 	if (!(nvecs < min_vecs))
595 		return;
596 	dev_warn(rvu->dev,
597 		 "PF%d is configured with too few vectors, %d, min is %d\n",
598 		 pf, nvecs, min_vecs);
599 }
600 
601 static int rvu_setup_msix_resources(struct rvu *rvu)
602 {
603 	struct altaf_intr_notify *altaf_intr_data;
604 	struct rvu_hwinfo *hw = rvu->hw;
605 	int pf, vf, numvfs, hwvf, err;
606 	int nvecs, offset, max_msix;
607 	struct rvu_pfvf *pfvf;
608 	u64 cfg, phy_addr;
609 	dma_addr_t iova;
610 
611 	for (pf = 0; pf < hw->total_pfs; pf++) {
612 		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
613 		/* If PF is not enabled, nothing to do */
614 		if (!((cfg >> 20) & 0x01))
615 			continue;
616 
617 		rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
618 
619 		pfvf = &rvu->pf[pf];
620 		/* Get num of MSIX vectors attached to this PF */
621 		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_MSIX_CFG(pf));
622 		pfvf->msix.max = ((cfg >> 32) & 0xFFF) + 1;
623 		rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, 0);
624 
625 		/* Alloc msix bitmap for this PF */
626 		err = rvu_alloc_bitmap(&pfvf->msix);
627 		if (err)
628 			return err;
629 
630 		/* Allocate memory for MSIX vector to RVU block LF mapping */
631 		pfvf->msix_lfmap = devm_kcalloc(rvu->dev, pfvf->msix.max,
632 						sizeof(u16), GFP_KERNEL);
633 		if (!pfvf->msix_lfmap)
634 			return -ENOMEM;
635 
636 		/* For PF0 (AF) firmware will set msix vector offsets for
637 		 * AF, block AF and PF0_INT vectors, so jump to VFs.
638 		 */
639 		if (!pf)
640 			goto setup_vfmsix;
641 
642 		/* Set MSIX offset for PF's 'RVU_PF_INT_VEC' vectors.
643 		 * These are allocated on driver init and never freed,
644 		 * so no need to set 'msix_lfmap' for these.
645 		 */
646 		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(pf));
647 		nvecs = (cfg >> 12) & 0xFF;
648 		cfg &= ~0x7FFULL;
649 		offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
650 		rvu_write64(rvu, BLKADDR_RVUM,
651 			    RVU_PRIV_PFX_INT_CFG(pf), cfg | offset);
652 setup_vfmsix:
653 		/* Alloc msix bitmap for VFs */
654 		for (vf = 0; vf < numvfs; vf++) {
655 			pfvf =  &rvu->hwvf[hwvf + vf];
656 			/* Get num of MSIX vectors attached to this VF */
657 			cfg = rvu_read64(rvu, BLKADDR_RVUM,
658 					 RVU_PRIV_PFX_MSIX_CFG(pf));
659 			pfvf->msix.max = (cfg & 0xFFF) + 1;
660 			rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, vf + 1);
661 
662 			/* Alloc msix bitmap for this VF */
663 			err = rvu_alloc_bitmap(&pfvf->msix);
664 			if (err)
665 				return err;
666 
667 			pfvf->msix_lfmap =
668 				devm_kcalloc(rvu->dev, pfvf->msix.max,
669 					     sizeof(u16), GFP_KERNEL);
670 			if (!pfvf->msix_lfmap)
671 				return -ENOMEM;
672 
673 			/* Set MSIX offset for HWVF's 'RVU_VF_INT_VEC' vectors.
674 			 * These are allocated on driver init and never freed,
675 			 * so no need to set 'msix_lfmap' for these.
676 			 */
677 			cfg = rvu_read64(rvu, BLKADDR_RVUM,
678 					 RVU_PRIV_HWVFX_INT_CFG(hwvf + vf));
679 			nvecs = (cfg >> 12) & 0xFF;
680 			cfg &= ~0x7FFULL;
681 			offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
682 			rvu_write64(rvu, BLKADDR_RVUM,
683 				    RVU_PRIV_HWVFX_INT_CFG(hwvf + vf),
684 				    cfg | offset);
685 		}
686 	}
687 
688 	/* HW interprets RVU_AF_MSIXTR_BASE address as an IOVA, hence
689 	 * create an IOMMU mapping for the physical address configured by
690 	 * firmware and reconfig RVU_AF_MSIXTR_BASE with IOVA.
691 	 */
692 	cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
693 	max_msix = cfg & 0xFFFFF;
694 	if (rvu->fwdata && rvu->fwdata->msixtr_base)
695 		phy_addr = rvu->fwdata->msixtr_base;
696 	else
697 		phy_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE);
698 
699 	iova = dma_map_resource(rvu->dev, phy_addr,
700 				max_msix * PCI_MSIX_ENTRY_SIZE,
701 				DMA_BIDIRECTIONAL, 0);
702 
703 	if (dma_mapping_error(rvu->dev, iova))
704 		return -ENOMEM;
705 
706 	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE, (u64)iova);
707 	rvu->msix_base_iova = iova;
708 	rvu->msixtr_base_phy = phy_addr;
709 
710 	if (is_rvu_otx2(rvu) || is_cn20k(rvu->pdev))
711 		return 0;
712 
713 	if (!rvu->fwdata)
714 		goto fail;
715 
716 	altaf_intr_data = &rvu->fwdata->altaf_intr_info;
717 	if (altaf_intr_data->gint_paddr) {
718 		iova = dma_map_resource(rvu->dev, altaf_intr_data->gint_paddr,
719 					PCI_MSIX_ENTRY_SIZE,
720 					DMA_BIDIRECTIONAL, 0);
721 
722 		if (dma_mapping_error(rvu->dev, iova))
723 			goto fail;
724 
725 		altaf_intr_data->gint_iova_addr = iova;
726 	}
727 
728 	return 0;
729 
730 fail:
731 	dma_unmap_resource(rvu->dev, phy_addr, max_msix * PCI_MSIX_ENTRY_SIZE,
732 			   DMA_BIDIRECTIONAL, 0);
733 	return -EFAULT;
734 }
735 
736 static void rvu_reset_msix(struct rvu *rvu)
737 {
738 	/* Restore msixtr base register */
739 	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE,
740 		    rvu->msixtr_base_phy);
741 }
742 
743 static void rvu_free_hw_resources(struct rvu *rvu)
744 {
745 	struct rvu_hwinfo *hw = rvu->hw;
746 	struct rvu_block *block;
747 	struct rvu_pfvf  *pfvf;
748 	int id, max_msix;
749 	u64 cfg;
750 
751 	rvu_npa_freemem(rvu);
752 	rvu_npc_freemem(rvu);
753 	rvu_nix_freemem(rvu);
754 
755 	/* Free block LF bitmaps */
756 	for (id = 0; id < BLK_COUNT; id++) {
757 		block = &hw->block[id];
758 		kfree(block->lf.bmap);
759 	}
760 
761 	/* Free MSIX bitmaps */
762 	for (id = 0; id < hw->total_pfs; id++) {
763 		pfvf = &rvu->pf[id];
764 		kfree(pfvf->msix.bmap);
765 	}
766 
767 	for (id = 0; id < hw->total_vfs; id++) {
768 		pfvf = &rvu->hwvf[id];
769 		kfree(pfvf->msix.bmap);
770 	}
771 
772 	/* Unmap MSIX vector base IOVA mapping */
773 	if (!rvu->msix_base_iova)
774 		return;
775 	cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
776 	max_msix = cfg & 0xFFFFF;
777 	dma_unmap_resource(rvu->dev, rvu->msix_base_iova,
778 			   max_msix * PCI_MSIX_ENTRY_SIZE,
779 			   DMA_BIDIRECTIONAL, 0);
780 
781 	rvu_reset_msix(rvu);
782 	mutex_destroy(&rvu->rsrc_lock);
783 
784 	/* Free the QINT/CINT memory */
785 	pfvf = &rvu->pf[RVU_AFPF];
786 	qmem_free(rvu->dev, pfvf->nix_qints_ctx);
787 	qmem_free(rvu->dev, pfvf->cq_ints_ctx);
788 }
789 
790 static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
791 {
792 	struct rvu_hwinfo *hw = rvu->hw;
793 	int pf, vf, numvfs, hwvf;
794 	struct rvu_pfvf *pfvf;
795 	u64 *mac;
796 
797 	for (pf = 0; pf < hw->total_pfs; pf++) {
798 		/* For PF0(AF), Assign MAC address to only VFs (LBKVFs) */
799 		if (!pf)
800 			goto lbkvf;
801 
802 		if (!is_pf_cgxmapped(rvu, pf))
803 			continue;
804 		/* Assign MAC address to PF */
805 		pfvf = &rvu->pf[pf];
806 		if (rvu->fwdata && pf < PF_MACNUM_MAX) {
807 			mac = &rvu->fwdata->pf_macs[pf];
808 			if (*mac)
809 				u64_to_ether_addr(*mac, pfvf->mac_addr);
810 			else
811 				eth_random_addr(pfvf->mac_addr);
812 		} else {
813 			eth_random_addr(pfvf->mac_addr);
814 		}
815 		ether_addr_copy(pfvf->default_mac, pfvf->mac_addr);
816 
817 lbkvf:
818 		/* Assign MAC address to VFs*/
819 		rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
820 		for (vf = 0; vf < numvfs; vf++, hwvf++) {
821 			pfvf = &rvu->hwvf[hwvf];
822 			if (rvu->fwdata && hwvf < VF_MACNUM_MAX) {
823 				mac = &rvu->fwdata->vf_macs[hwvf];
824 				if (*mac)
825 					u64_to_ether_addr(*mac, pfvf->mac_addr);
826 				else
827 					eth_random_addr(pfvf->mac_addr);
828 			} else {
829 				eth_random_addr(pfvf->mac_addr);
830 			}
831 			ether_addr_copy(pfvf->default_mac, pfvf->mac_addr);
832 		}
833 	}
834 }
835 
836 static int rvu_fwdata_init(struct rvu *rvu)
837 {
838 	u64 fwdbase;
839 	int err;
840 
841 	/* Get firmware data base address */
842 	err = cgx_get_fwdata_base(&fwdbase);
843 	if (err)
844 		goto fail;
845 
846 	BUILD_BUG_ON(offsetof(struct rvu_fwdata, cgx_fw_data) > FWDATA_CGX_LMAC_OFFSET);
847 	rvu->fwdata = ioremap_wc(fwdbase, sizeof(struct rvu_fwdata));
848 	if (!rvu->fwdata)
849 		goto fail;
850 	if (!is_rvu_fwdata_valid(rvu)) {
851 		dev_err(rvu->dev,
852 			"Mismatch in 'fwdata' struct btw kernel and firmware\n");
853 		iounmap(rvu->fwdata);
854 		rvu->fwdata = NULL;
855 		return -EINVAL;
856 	}
857 	return 0;
858 fail:
859 	dev_info(rvu->dev, "Unable to fetch 'fwdata' from firmware\n");
860 	return -EIO;
861 }
862 
863 static void rvu_fwdata_exit(struct rvu *rvu)
864 {
865 	if (rvu->fwdata)
866 		iounmap(rvu->fwdata);
867 }
868 
869 static int rvu_setup_nix_hw_resource(struct rvu *rvu, int blkaddr)
870 {
871 	struct rvu_hwinfo *hw = rvu->hw;
872 	struct rvu_block *block;
873 	int blkid;
874 	u64 cfg;
875 
876 	/* Init NIX LF's bitmap */
877 	block = &hw->block[blkaddr];
878 	if (!block->implemented)
879 		return 0;
880 	blkid = (blkaddr == BLKADDR_NIX0) ? 0 : 1;
881 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
882 	block->lf.max = cfg & 0xFFF;
883 	block->addr = blkaddr;
884 	block->type = BLKTYPE_NIX;
885 	block->lfshift = 8;
886 	block->lookup_reg = NIX_AF_RVU_LF_CFG_DEBUG;
887 	block->pf_lfcnt_reg = RVU_PRIV_PFX_NIXX_CFG(blkid);
888 	block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NIXX_CFG(blkid);
889 	block->lfcfg_reg = NIX_PRIV_LFX_CFG;
890 	block->msixcfg_reg = NIX_PRIV_LFX_INT_CFG;
891 	block->lfreset_reg = NIX_AF_LF_RST;
892 	block->rvu = rvu;
893 	sprintf(block->name, "NIX%d", blkid);
894 	rvu->nix_blkaddr[blkid] = blkaddr;
895 	return rvu_alloc_bitmap(&block->lf);
896 }
897 
898 static int rvu_setup_cpt_hw_resource(struct rvu *rvu, int blkaddr)
899 {
900 	struct rvu_hwinfo *hw = rvu->hw;
901 	struct rvu_block *block;
902 	int blkid;
903 	u64 cfg;
904 
905 	/* Init CPT LF's bitmap */
906 	block = &hw->block[blkaddr];
907 	if (!block->implemented)
908 		return 0;
909 	blkid = (blkaddr == BLKADDR_CPT0) ? 0 : 1;
910 	cfg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS0);
911 	block->lf.max = cfg & 0xFF;
912 	block->addr = blkaddr;
913 	block->type = BLKTYPE_CPT;
914 	block->multislot = true;
915 	block->lfshift = 3;
916 	block->lookup_reg = CPT_AF_RVU_LF_CFG_DEBUG;
917 	block->pf_lfcnt_reg = RVU_PRIV_PFX_CPTX_CFG(blkid);
918 	block->vf_lfcnt_reg = RVU_PRIV_HWVFX_CPTX_CFG(blkid);
919 	block->lfcfg_reg = CPT_PRIV_LFX_CFG;
920 	block->msixcfg_reg = CPT_PRIV_LFX_INT_CFG;
921 	block->lfreset_reg = CPT_AF_LF_RST;
922 	block->rvu = rvu;
923 	sprintf(block->name, "CPT%d", blkid);
924 	return rvu_alloc_bitmap(&block->lf);
925 }
926 
927 static void rvu_get_lbk_bufsize(struct rvu *rvu)
928 {
929 	struct pci_dev *pdev = NULL;
930 	void __iomem *base;
931 	u64 lbk_const;
932 
933 	pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
934 			      PCI_DEVID_OCTEONTX2_LBK, pdev);
935 	if (!pdev)
936 		return;
937 
938 	base = pci_ioremap_bar(pdev, 0);
939 	if (!base)
940 		goto err_put;
941 
942 	lbk_const = readq(base + LBK_CONST);
943 
944 	/* cache fifo size */
945 	rvu->hw->lbk_bufsize = FIELD_GET(LBK_CONST_BUF_SIZE, lbk_const);
946 
947 	iounmap(base);
948 err_put:
949 	pci_dev_put(pdev);
950 }
951 
952 static int rvu_setup_hw_resources(struct rvu *rvu)
953 {
954 	struct rvu_hwinfo *hw = rvu->hw;
955 	struct rvu_block *block;
956 	int blkid, err;
957 	u64 cfg;
958 
959 	/* Get HW supported max RVU PF & VF count */
960 	cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
961 	hw->total_pfs = (cfg >> 32) & 0xFF;
962 	hw->total_vfs = (cfg >> 20) & 0xFFF;
963 	hw->max_vfs_per_pf = (cfg >> 40) & 0xFF;
964 
965 	if (!is_rvu_otx2(rvu))
966 		rvu_apr_block_cn10k_init(rvu);
967 
968 	/* Init NPA LF's bitmap */
969 	block = &hw->block[BLKADDR_NPA];
970 	if (!block->implemented)
971 		goto nix;
972 	cfg = rvu_read64(rvu, BLKADDR_NPA, NPA_AF_CONST);
973 	block->lf.max = (cfg >> 16) & 0xFFF;
974 	block->addr = BLKADDR_NPA;
975 	block->type = BLKTYPE_NPA;
976 	block->lfshift = 8;
977 	block->lookup_reg = NPA_AF_RVU_LF_CFG_DEBUG;
978 	block->pf_lfcnt_reg = RVU_PRIV_PFX_NPA_CFG;
979 	block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NPA_CFG;
980 	block->lfcfg_reg = NPA_PRIV_LFX_CFG;
981 	block->msixcfg_reg = NPA_PRIV_LFX_INT_CFG;
982 	block->lfreset_reg = NPA_AF_LF_RST;
983 	block->rvu = rvu;
984 	sprintf(block->name, "NPA");
985 	err = rvu_alloc_bitmap(&block->lf);
986 	if (err) {
987 		dev_err(rvu->dev,
988 			"%s: Failed to allocate NPA LF bitmap\n", __func__);
989 		return err;
990 	}
991 
992 nix:
993 	err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX0);
994 	if (err) {
995 		dev_err(rvu->dev,
996 			"%s: Failed to allocate NIX0 LFs bitmap\n", __func__);
997 		return err;
998 	}
999 
1000 	err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX1);
1001 	if (err) {
1002 		dev_err(rvu->dev,
1003 			"%s: Failed to allocate NIX1 LFs bitmap\n", __func__);
1004 		return err;
1005 	}
1006 
1007 	/* Init SSO group's bitmap */
1008 	block = &hw->block[BLKADDR_SSO];
1009 	if (!block->implemented)
1010 		goto ssow;
1011 	cfg = rvu_read64(rvu, BLKADDR_SSO, SSO_AF_CONST);
1012 	block->lf.max = cfg & 0xFFFF;
1013 	block->addr = BLKADDR_SSO;
1014 	block->type = BLKTYPE_SSO;
1015 	block->multislot = true;
1016 	block->lfshift = 3;
1017 	block->lookup_reg = SSO_AF_RVU_LF_CFG_DEBUG;
1018 	block->pf_lfcnt_reg = RVU_PRIV_PFX_SSO_CFG;
1019 	block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSO_CFG;
1020 	block->lfcfg_reg = SSO_PRIV_LFX_HWGRP_CFG;
1021 	block->msixcfg_reg = SSO_PRIV_LFX_HWGRP_INT_CFG;
1022 	block->lfreset_reg = SSO_AF_LF_HWGRP_RST;
1023 	block->rvu = rvu;
1024 	sprintf(block->name, "SSO GROUP");
1025 	err = rvu_alloc_bitmap(&block->lf);
1026 	if (err) {
1027 		dev_err(rvu->dev,
1028 			"%s: Failed to allocate SSO LF bitmap\n", __func__);
1029 		return err;
1030 	}
1031 
1032 ssow:
1033 	/* Init SSO workslot's bitmap */
1034 	block = &hw->block[BLKADDR_SSOW];
1035 	if (!block->implemented)
1036 		goto tim;
1037 	block->lf.max = (cfg >> 56) & 0xFF;
1038 	block->addr = BLKADDR_SSOW;
1039 	block->type = BLKTYPE_SSOW;
1040 	block->multislot = true;
1041 	block->lfshift = 3;
1042 	block->lookup_reg = SSOW_AF_RVU_LF_HWS_CFG_DEBUG;
1043 	block->pf_lfcnt_reg = RVU_PRIV_PFX_SSOW_CFG;
1044 	block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSOW_CFG;
1045 	block->lfcfg_reg = SSOW_PRIV_LFX_HWS_CFG;
1046 	block->msixcfg_reg = SSOW_PRIV_LFX_HWS_INT_CFG;
1047 	block->lfreset_reg = SSOW_AF_LF_HWS_RST;
1048 	block->rvu = rvu;
1049 	sprintf(block->name, "SSOWS");
1050 	err = rvu_alloc_bitmap(&block->lf);
1051 	if (err) {
1052 		dev_err(rvu->dev,
1053 			"%s: Failed to allocate SSOW LF bitmap\n", __func__);
1054 		return err;
1055 	}
1056 
1057 tim:
1058 	/* Init TIM LF's bitmap */
1059 	block = &hw->block[BLKADDR_TIM];
1060 	if (!block->implemented)
1061 		goto cpt;
1062 	cfg = rvu_read64(rvu, BLKADDR_TIM, TIM_AF_CONST);
1063 	block->lf.max = cfg & 0xFFFF;
1064 	block->addr = BLKADDR_TIM;
1065 	block->type = BLKTYPE_TIM;
1066 	block->multislot = true;
1067 	block->lfshift = 3;
1068 	block->lookup_reg = TIM_AF_RVU_LF_CFG_DEBUG;
1069 	block->pf_lfcnt_reg = RVU_PRIV_PFX_TIM_CFG;
1070 	block->vf_lfcnt_reg = RVU_PRIV_HWVFX_TIM_CFG;
1071 	block->lfcfg_reg = TIM_PRIV_LFX_CFG;
1072 	block->msixcfg_reg = TIM_PRIV_LFX_INT_CFG;
1073 	block->lfreset_reg = TIM_AF_LF_RST;
1074 	block->rvu = rvu;
1075 	sprintf(block->name, "TIM");
1076 	err = rvu_alloc_bitmap(&block->lf);
1077 	if (err) {
1078 		dev_err(rvu->dev,
1079 			"%s: Failed to allocate TIM LF bitmap\n", __func__);
1080 		return err;
1081 	}
1082 
1083 cpt:
1084 	err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT0);
1085 	if (err) {
1086 		dev_err(rvu->dev,
1087 			"%s: Failed to allocate CPT0 LF bitmap\n", __func__);
1088 		return err;
1089 	}
1090 	err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT1);
1091 	if (err) {
1092 		dev_err(rvu->dev,
1093 			"%s: Failed to allocate CPT1 LF bitmap\n", __func__);
1094 		return err;
1095 	}
1096 
1097 	/* Allocate memory for PFVF data */
1098 	rvu->pf = devm_kcalloc(rvu->dev, hw->total_pfs,
1099 			       sizeof(struct rvu_pfvf), GFP_KERNEL);
1100 	if (!rvu->pf) {
1101 		dev_err(rvu->dev,
1102 			"%s: Failed to allocate memory for PF's rvu_pfvf struct\n", __func__);
1103 		return -ENOMEM;
1104 	}
1105 
1106 	rvu->hwvf = devm_kcalloc(rvu->dev, hw->total_vfs,
1107 				 sizeof(struct rvu_pfvf), GFP_KERNEL);
1108 	if (!rvu->hwvf) {
1109 		dev_err(rvu->dev,
1110 			"%s: Failed to allocate memory for VF's rvu_pfvf struct\n", __func__);
1111 		return -ENOMEM;
1112 	}
1113 
1114 	mutex_init(&rvu->rsrc_lock);
1115 
1116 	rvu_fwdata_init(rvu);
1117 
1118 	err = rvu_setup_msix_resources(rvu);
1119 	if (err) {
1120 		dev_err(rvu->dev,
1121 			"%s: Failed to setup MSIX resources\n", __func__);
1122 		return err;
1123 	}
1124 
1125 	for (blkid = 0; blkid < BLK_COUNT; blkid++) {
1126 		block = &hw->block[blkid];
1127 		if (!block->lf.bmap)
1128 			continue;
1129 
1130 		/* Allocate memory for block LF/slot to pcifunc mapping info */
1131 		block->fn_map = devm_kcalloc(rvu->dev, block->lf.max,
1132 					     sizeof(u16), GFP_KERNEL);
1133 		if (!block->fn_map) {
1134 			err = -ENOMEM;
1135 			goto msix_err;
1136 		}
1137 
1138 		/* Scan all blocks to check if low level firmware has
1139 		 * already provisioned any of the resources to a PF/VF.
1140 		 */
1141 		rvu_scan_block(rvu, block);
1142 	}
1143 
1144 	err = rvu_set_channels_base(rvu);
1145 	if (err)
1146 		goto msix_err;
1147 
1148 	err = rvu_npc_init(rvu);
1149 	if (err) {
1150 		dev_err(rvu->dev, "%s: Failed to initialize npc\n", __func__);
1151 		goto npc_err;
1152 	}
1153 
1154 	err = rvu_cgx_init(rvu);
1155 	if (err) {
1156 		dev_err(rvu->dev, "%s: Failed to initialize cgx\n", __func__);
1157 		goto cgx_err;
1158 	}
1159 
1160 	err = rvu_npc_exact_init(rvu);
1161 	if (err) {
1162 		dev_err(rvu->dev, "failed to initialize exact match table\n");
1163 		return err;
1164 	}
1165 
1166 	/* Assign MACs for CGX mapped functions */
1167 	rvu_setup_pfvf_macaddress(rvu);
1168 
1169 	err = rvu_npa_init(rvu);
1170 	if (err) {
1171 		dev_err(rvu->dev, "%s: Failed to initialize npa\n", __func__);
1172 		goto npa_err;
1173 	}
1174 
1175 	rvu_get_lbk_bufsize(rvu);
1176 
1177 	err = rvu_nix_init(rvu);
1178 	if (err) {
1179 		dev_err(rvu->dev, "%s: Failed to initialize nix\n", __func__);
1180 		goto nix_err;
1181 	}
1182 
1183 	err = rvu_sdp_init(rvu);
1184 	if (err) {
1185 		dev_err(rvu->dev, "%s: Failed to initialize sdp\n", __func__);
1186 		goto nix_err;
1187 	}
1188 
1189 	rvu_program_channels(rvu);
1190 	cgx_start_linkup(rvu);
1191 
1192 	rvu_block_bcast_xon(rvu, BLKADDR_NIX0);
1193 	rvu_block_bcast_xon(rvu, BLKADDR_NIX1);
1194 
1195 	err = rvu_mcs_init(rvu);
1196 	if (err) {
1197 		dev_err(rvu->dev, "%s: Failed to initialize mcs\n", __func__);
1198 		goto nix_err;
1199 	}
1200 
1201 	err = rvu_cpt_init(rvu);
1202 	if (err) {
1203 		dev_err(rvu->dev, "%s: Failed to initialize cpt\n", __func__);
1204 		goto mcs_err;
1205 	}
1206 
1207 	return 0;
1208 
1209 mcs_err:
1210 	rvu_mcs_exit(rvu);
1211 nix_err:
1212 	rvu_nix_freemem(rvu);
1213 npa_err:
1214 	rvu_npa_freemem(rvu);
1215 cgx_err:
1216 	rvu_cgx_exit(rvu);
1217 npc_err:
1218 	rvu_npc_freemem(rvu);
1219 	rvu_fwdata_exit(rvu);
1220 msix_err:
1221 	rvu_reset_msix(rvu);
1222 	return err;
1223 }
1224 
1225 /* NPA and NIX admin queue APIs */
1226 void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq)
1227 {
1228 	if (!aq)
1229 		return;
1230 
1231 	qmem_free(rvu->dev, aq->inst);
1232 	qmem_free(rvu->dev, aq->res);
1233 	devm_kfree(rvu->dev, aq);
1234 }
1235 
1236 int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
1237 		 int qsize, int inst_size, int res_size)
1238 {
1239 	struct admin_queue *aq;
1240 	int err;
1241 
1242 	*ad_queue = devm_kzalloc(rvu->dev, sizeof(*aq), GFP_KERNEL);
1243 	if (!*ad_queue)
1244 		return -ENOMEM;
1245 	aq = *ad_queue;
1246 
1247 	/* Alloc memory for instructions i.e AQ */
1248 	err = qmem_alloc(rvu->dev, &aq->inst, qsize, inst_size);
1249 	if (err) {
1250 		devm_kfree(rvu->dev, aq);
1251 		return err;
1252 	}
1253 
1254 	/* Alloc memory for results */
1255 	err = qmem_alloc(rvu->dev, &aq->res, qsize, res_size);
1256 	if (err) {
1257 		rvu_aq_free(rvu, aq);
1258 		return err;
1259 	}
1260 
1261 	spin_lock_init(&aq->lock);
1262 	return 0;
1263 }
1264 
1265 int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req,
1266 			   struct ready_msg_rsp *rsp)
1267 {
1268 	if (rvu->fwdata) {
1269 		rsp->rclk_freq = rvu->fwdata->rclk;
1270 		rsp->sclk_freq = rvu->fwdata->sclk;
1271 	}
1272 	return 0;
1273 }
1274 
1275 /* Get current count of a RVU block's LF/slots
1276  * provisioned to a given RVU func.
1277  */
1278 u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr)
1279 {
1280 	switch (blkaddr) {
1281 	case BLKADDR_NPA:
1282 		return pfvf->npalf ? 1 : 0;
1283 	case BLKADDR_NIX0:
1284 	case BLKADDR_NIX1:
1285 		return pfvf->nixlf ? 1 : 0;
1286 	case BLKADDR_SSO:
1287 		return pfvf->sso;
1288 	case BLKADDR_SSOW:
1289 		return pfvf->ssow;
1290 	case BLKADDR_TIM:
1291 		return pfvf->timlfs;
1292 	case BLKADDR_CPT0:
1293 		return pfvf->cptlfs;
1294 	case BLKADDR_CPT1:
1295 		return pfvf->cpt1_lfs;
1296 	}
1297 	return 0;
1298 }
1299 
1300 /* Return true if LFs of block type are attached to pcifunc */
1301 static bool is_blktype_attached(struct rvu_pfvf *pfvf, int blktype)
1302 {
1303 	switch (blktype) {
1304 	case BLKTYPE_NPA:
1305 		return pfvf->npalf ? 1 : 0;
1306 	case BLKTYPE_NIX:
1307 		return pfvf->nixlf ? 1 : 0;
1308 	case BLKTYPE_SSO:
1309 		return !!pfvf->sso;
1310 	case BLKTYPE_SSOW:
1311 		return !!pfvf->ssow;
1312 	case BLKTYPE_TIM:
1313 		return !!pfvf->timlfs;
1314 	case BLKTYPE_CPT:
1315 		return pfvf->cptlfs || pfvf->cpt1_lfs;
1316 	}
1317 
1318 	return false;
1319 }
1320 
1321 bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype)
1322 {
1323 	struct rvu_pfvf *pfvf;
1324 
1325 	if (!is_pf_func_valid(rvu, pcifunc))
1326 		return false;
1327 
1328 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1329 
1330 	/* Check if this PFFUNC has a LF of type blktype attached */
1331 	if (!is_blktype_attached(pfvf, blktype))
1332 		return false;
1333 
1334 	return true;
1335 }
1336 
1337 static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block,
1338 			   int pcifunc, int slot)
1339 {
1340 	u64 val;
1341 
1342 	val = ((u64)pcifunc << 24) | (slot << 16) | (1ULL << 13);
1343 	rvu_write64(rvu, block->addr, block->lookup_reg, val);
1344 	/* Wait for the lookup to finish */
1345 	/* TODO: put some timeout here */
1346 	while (rvu_read64(rvu, block->addr, block->lookup_reg) & (1ULL << 13))
1347 		;
1348 
1349 	val = rvu_read64(rvu, block->addr, block->lookup_reg);
1350 
1351 	/* Check LF valid bit */
1352 	if (!(val & (1ULL << 12)))
1353 		return -1;
1354 
1355 	return (val & 0xFFF);
1356 }
1357 
1358 int rvu_get_blkaddr_from_slot(struct rvu *rvu, int blktype, u16 pcifunc,
1359 			      u16 global_slot, u16 *slot_in_block)
1360 {
1361 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1362 	int numlfs, total_lfs = 0, nr_blocks = 0;
1363 	int i, num_blkaddr[BLK_COUNT] = { 0 };
1364 	struct rvu_block *block;
1365 	int blkaddr;
1366 	u16 start_slot;
1367 
1368 	if (!is_blktype_attached(pfvf, blktype))
1369 		return -ENODEV;
1370 
1371 	/* Get all the block addresses from which LFs are attached to
1372 	 * the given pcifunc in num_blkaddr[].
1373 	 */
1374 	for (blkaddr = BLKADDR_RVUM; blkaddr < BLK_COUNT; blkaddr++) {
1375 		block = &rvu->hw->block[blkaddr];
1376 		if (block->type != blktype)
1377 			continue;
1378 		if (!is_block_implemented(rvu->hw, blkaddr))
1379 			continue;
1380 
1381 		numlfs = rvu_get_rsrc_mapcount(pfvf, blkaddr);
1382 		if (numlfs) {
1383 			total_lfs += numlfs;
1384 			num_blkaddr[nr_blocks] = blkaddr;
1385 			nr_blocks++;
1386 		}
1387 	}
1388 
1389 	if (global_slot >= total_lfs)
1390 		return -ENODEV;
1391 
1392 	/* Based on the given global slot number retrieve the
1393 	 * correct block address out of all attached block
1394 	 * addresses and slot number in that block.
1395 	 */
1396 	total_lfs = 0;
1397 	blkaddr = -ENODEV;
1398 	for (i = 0; i < nr_blocks; i++) {
1399 		numlfs = rvu_get_rsrc_mapcount(pfvf, num_blkaddr[i]);
1400 		total_lfs += numlfs;
1401 		if (global_slot < total_lfs) {
1402 			blkaddr = num_blkaddr[i];
1403 			start_slot = total_lfs - numlfs;
1404 			*slot_in_block = global_slot - start_slot;
1405 			break;
1406 		}
1407 	}
1408 
1409 	return blkaddr;
1410 }
1411 
1412 static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
1413 {
1414 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1415 	struct rvu_hwinfo *hw = rvu->hw;
1416 	struct rvu_block *block;
1417 	int slot, lf, num_lfs;
1418 	int blkaddr;
1419 
1420 	blkaddr = rvu_get_blkaddr(rvu, blktype, pcifunc);
1421 	if (blkaddr < 0)
1422 		return;
1423 
1424 	block = &hw->block[blkaddr];
1425 
1426 	num_lfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1427 	if (!num_lfs)
1428 		return;
1429 
1430 	for (slot = 0; slot < num_lfs; slot++) {
1431 		lf = rvu_lookup_rsrc(rvu, block, pcifunc, slot);
1432 		if (lf < 0) /* This should never happen */
1433 			continue;
1434 
1435 		if (blktype == BLKTYPE_NIX) {
1436 			rvu_nix_reset_mac(pfvf, pcifunc);
1437 			rvu_npc_clear_ucast_entry(rvu, pcifunc, lf);
1438 		}
1439 		/* Disable the LF */
1440 		rvu_write64(rvu, blkaddr, block->lfcfg_reg |
1441 			    (lf << block->lfshift), 0x00ULL);
1442 
1443 		/* Update SW maintained mapping info as well */
1444 		rvu_update_rsrc_map(rvu, pfvf, block,
1445 				    pcifunc, lf, false);
1446 
1447 		/* Free the resource */
1448 		rvu_free_rsrc(&block->lf, lf);
1449 
1450 		/* Clear MSIX vector offset for this LF */
1451 		rvu_clear_msix_offset(rvu, pfvf, block, lf);
1452 	}
1453 }
1454 
1455 static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach,
1456 			    u16 pcifunc)
1457 {
1458 	struct rvu_hwinfo *hw = rvu->hw;
1459 	bool detach_all = true;
1460 	struct rvu_block *block;
1461 	int blkid;
1462 
1463 	mutex_lock(&rvu->rsrc_lock);
1464 
1465 	/* Check for partial resource detach */
1466 	if (detach && detach->partial)
1467 		detach_all = false;
1468 
1469 	/* Check for RVU block's LFs attached to this func,
1470 	 * if so, detach them.
1471 	 */
1472 	for (blkid = 0; blkid < BLK_COUNT; blkid++) {
1473 		block = &hw->block[blkid];
1474 		if (!block->lf.bmap)
1475 			continue;
1476 		if (!detach_all && detach) {
1477 			if (blkid == BLKADDR_NPA && !detach->npalf)
1478 				continue;
1479 			else if ((blkid == BLKADDR_NIX0) && !detach->nixlf)
1480 				continue;
1481 			else if ((blkid == BLKADDR_NIX1) && !detach->nixlf)
1482 				continue;
1483 			else if ((blkid == BLKADDR_SSO) && !detach->sso)
1484 				continue;
1485 			else if ((blkid == BLKADDR_SSOW) && !detach->ssow)
1486 				continue;
1487 			else if ((blkid == BLKADDR_TIM) && !detach->timlfs)
1488 				continue;
1489 			else if ((blkid == BLKADDR_CPT0) && !detach->cptlfs)
1490 				continue;
1491 			else if ((blkid == BLKADDR_CPT1) && !detach->cptlfs)
1492 				continue;
1493 		}
1494 
1495 		if (detach_all ||
1496 		    (detach && (blkid == BLKADDR_NIX0 ||
1497 				blkid == BLKADDR_NIX1) &&
1498 		     detach->nixlf))
1499 			npc_cn20k_dft_rules_free(rvu, pcifunc);
1500 
1501 		rvu_detach_block(rvu, pcifunc, block->type);
1502 	}
1503 
1504 	mutex_unlock(&rvu->rsrc_lock);
1505 	return 0;
1506 }
1507 
1508 int rvu_mbox_handler_detach_resources(struct rvu *rvu,
1509 				      struct rsrc_detach *detach,
1510 				      struct msg_rsp *rsp)
1511 {
1512 	return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc);
1513 }
1514 
1515 int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc)
1516 {
1517 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1518 	int blkaddr = BLKADDR_NIX0, vf;
1519 	struct rvu_pfvf *pf;
1520 
1521 	pf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
1522 
1523 	/* All CGX mapped PFs are set with assigned NIX block during init */
1524 	if (is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc))) {
1525 		blkaddr = pf->nix_blkaddr;
1526 	} else if (is_lbk_vf(rvu, pcifunc)) {
1527 		vf = pcifunc - 1;
1528 		/* Assign NIX based on VF number. All even numbered VFs get
1529 		 * NIX0 and odd numbered gets NIX1
1530 		 */
1531 		blkaddr = (vf & 1) ? BLKADDR_NIX1 : BLKADDR_NIX0;
1532 		/* NIX1 is not present on all silicons */
1533 		if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
1534 			blkaddr = BLKADDR_NIX0;
1535 	}
1536 
1537 	/* if SDP1 then the blkaddr is NIX1 */
1538 	if (is_sdp_pfvf(rvu, pcifunc) && pf->sdp_info->node_id == 1)
1539 		blkaddr = BLKADDR_NIX1;
1540 
1541 	switch (blkaddr) {
1542 	case BLKADDR_NIX1:
1543 		pfvf->nix_blkaddr = BLKADDR_NIX1;
1544 		pfvf->nix_rx_intf = NIX_INTFX_RX(1);
1545 		pfvf->nix_tx_intf = NIX_INTFX_TX(1);
1546 		break;
1547 	case BLKADDR_NIX0:
1548 	default:
1549 		pfvf->nix_blkaddr = BLKADDR_NIX0;
1550 		pfvf->nix_rx_intf = NIX_INTFX_RX(0);
1551 		pfvf->nix_tx_intf = NIX_INTFX_TX(0);
1552 		break;
1553 	}
1554 
1555 	return pfvf->nix_blkaddr;
1556 }
1557 
1558 static int rvu_get_attach_blkaddr(struct rvu *rvu, int blktype,
1559 				  u16 pcifunc, struct rsrc_attach *attach)
1560 {
1561 	int blkaddr;
1562 
1563 	switch (blktype) {
1564 	case BLKTYPE_NIX:
1565 		blkaddr = rvu_get_nix_blkaddr(rvu, pcifunc);
1566 		break;
1567 	case BLKTYPE_CPT:
1568 		if (attach->hdr.ver < RVU_MULTI_BLK_VER)
1569 			return rvu_get_blkaddr(rvu, blktype, 0);
1570 		blkaddr = attach->cpt_blkaddr ? attach->cpt_blkaddr :
1571 			  BLKADDR_CPT0;
1572 		if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1)
1573 			return -ENODEV;
1574 		break;
1575 	default:
1576 		return rvu_get_blkaddr(rvu, blktype, 0);
1577 	}
1578 
1579 	if (is_block_implemented(rvu->hw, blkaddr))
1580 		return blkaddr;
1581 
1582 	return -ENODEV;
1583 }
1584 
1585 static int rvu_attach_block(struct rvu *rvu, int pcifunc, int blktype,
1586 			    int num_lfs, struct rsrc_attach *attach)
1587 {
1588 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1589 	struct rvu_hwinfo *hw = rvu->hw;
1590 	struct rvu_block *block;
1591 	int slot, lf;
1592 	int blkaddr;
1593 	u64 cfg;
1594 
1595 	if (!num_lfs)
1596 		return -EINVAL;
1597 
1598 	blkaddr = rvu_get_attach_blkaddr(rvu, blktype, pcifunc, attach);
1599 	if (blkaddr < 0)
1600 		return -EFAULT;
1601 
1602 	block = &hw->block[blkaddr];
1603 	if (!block->lf.bmap)
1604 		return -ESRCH;
1605 
1606 	for (slot = 0; slot < num_lfs; slot++) {
1607 		/* Allocate the resource */
1608 		lf = rvu_alloc_rsrc(&block->lf);
1609 		if (lf < 0)
1610 			return -EFAULT;
1611 
1612 		cfg = (1ULL << 63) | (pcifunc << 8) | slot;
1613 		rvu_write64(rvu, blkaddr, block->lfcfg_reg |
1614 			    (lf << block->lfshift), cfg);
1615 		rvu_update_rsrc_map(rvu, pfvf, block,
1616 				    pcifunc, lf, true);
1617 
1618 		/* Set start MSIX vector for this LF within this PF/VF */
1619 		rvu_set_msix_offset(rvu, pfvf, block, lf);
1620 	}
1621 
1622 	return 0;
1623 }
1624 
1625 static int rvu_check_rsrc_availability(struct rvu *rvu,
1626 				       struct rsrc_attach *req, u16 pcifunc)
1627 {
1628 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1629 	int free_lfs, mappedlfs, blkaddr;
1630 	struct rvu_hwinfo *hw = rvu->hw;
1631 	struct rvu_block *block;
1632 
1633 	/* Only one NPA LF can be attached */
1634 	if (req->npalf && !is_blktype_attached(pfvf, BLKTYPE_NPA)) {
1635 		block = &hw->block[BLKADDR_NPA];
1636 		free_lfs = rvu_rsrc_free_count(&block->lf);
1637 		if (!free_lfs)
1638 			goto fail;
1639 	} else if (req->npalf) {
1640 		dev_err(&rvu->pdev->dev,
1641 			"Func 0x%x: Invalid req, already has NPA\n",
1642 			 pcifunc);
1643 		return -EINVAL;
1644 	}
1645 
1646 	/* Only one NIX LF can be attached */
1647 	if (req->nixlf && !is_blktype_attached(pfvf, BLKTYPE_NIX)) {
1648 		blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_NIX,
1649 						 pcifunc, req);
1650 		if (blkaddr < 0)
1651 			return blkaddr;
1652 		block = &hw->block[blkaddr];
1653 		free_lfs = rvu_rsrc_free_count(&block->lf);
1654 		if (!free_lfs)
1655 			goto fail;
1656 	} else if (req->nixlf) {
1657 		dev_err(&rvu->pdev->dev,
1658 			"Func 0x%x: Invalid req, already has NIX\n",
1659 			pcifunc);
1660 		return -EINVAL;
1661 	}
1662 
1663 	if (req->sso) {
1664 		block = &hw->block[BLKADDR_SSO];
1665 		/* Is request within limits ? */
1666 		if (req->sso > block->lf.max) {
1667 			dev_err(&rvu->pdev->dev,
1668 				"Func 0x%x: Invalid SSO req, %d > max %d\n",
1669 				 pcifunc, req->sso, block->lf.max);
1670 			return -EINVAL;
1671 		}
1672 		mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1673 		free_lfs = rvu_rsrc_free_count(&block->lf);
1674 		/* Check if additional resources are available */
1675 		if (req->sso > mappedlfs &&
1676 		    ((req->sso - mappedlfs) > free_lfs))
1677 			goto fail;
1678 	}
1679 
1680 	if (req->ssow) {
1681 		block = &hw->block[BLKADDR_SSOW];
1682 		if (req->ssow > block->lf.max) {
1683 			dev_err(&rvu->pdev->dev,
1684 				"Func 0x%x: Invalid SSOW req, %d > max %d\n",
1685 				 pcifunc, req->ssow, block->lf.max);
1686 			return -EINVAL;
1687 		}
1688 		mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1689 		free_lfs = rvu_rsrc_free_count(&block->lf);
1690 		if (req->ssow > mappedlfs &&
1691 		    ((req->ssow - mappedlfs) > free_lfs))
1692 			goto fail;
1693 	}
1694 
1695 	if (req->timlfs) {
1696 		block = &hw->block[BLKADDR_TIM];
1697 		if (req->timlfs > block->lf.max) {
1698 			dev_err(&rvu->pdev->dev,
1699 				"Func 0x%x: Invalid TIMLF req, %d > max %d\n",
1700 				 pcifunc, req->timlfs, block->lf.max);
1701 			return -EINVAL;
1702 		}
1703 		mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1704 		free_lfs = rvu_rsrc_free_count(&block->lf);
1705 		if (req->timlfs > mappedlfs &&
1706 		    ((req->timlfs - mappedlfs) > free_lfs))
1707 			goto fail;
1708 	}
1709 
1710 	if (req->cptlfs) {
1711 		blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_CPT,
1712 						 pcifunc, req);
1713 		if (blkaddr < 0)
1714 			return blkaddr;
1715 		block = &hw->block[blkaddr];
1716 		if (req->cptlfs > block->lf.max) {
1717 			dev_err(&rvu->pdev->dev,
1718 				"Func 0x%x: Invalid CPTLF req, %d > max %d\n",
1719 				 pcifunc, req->cptlfs, block->lf.max);
1720 			return -EINVAL;
1721 		}
1722 		mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1723 		free_lfs = rvu_rsrc_free_count(&block->lf);
1724 		if (req->cptlfs > mappedlfs &&
1725 		    ((req->cptlfs - mappedlfs) > free_lfs))
1726 			goto fail;
1727 	}
1728 
1729 	return 0;
1730 
1731 fail:
1732 	dev_info(rvu->dev, "Request for %s failed\n", block->name);
1733 	return -ENOSPC;
1734 }
1735 
1736 static bool rvu_attach_from_same_block(struct rvu *rvu, int blktype,
1737 				       struct rsrc_attach *attach)
1738 {
1739 	int blkaddr, num_lfs;
1740 
1741 	blkaddr = rvu_get_attach_blkaddr(rvu, blktype,
1742 					 attach->hdr.pcifunc, attach);
1743 	if (blkaddr < 0)
1744 		return false;
1745 
1746 	num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, attach->hdr.pcifunc),
1747 					blkaddr);
1748 	/* Requester already has LFs from given block ? */
1749 	return !!num_lfs;
1750 }
1751 
1752 int rvu_mbox_handler_attach_resources(struct rvu *rvu,
1753 				      struct rsrc_attach *attach,
1754 				      struct msg_rsp *rsp)
1755 {
1756 	u16 pcifunc = attach->hdr.pcifunc;
1757 	int err;
1758 
1759 	/* If first request, detach all existing attached resources */
1760 	if (!attach->modify) {
1761 		err = rvu_detach_rsrcs(rvu, NULL, pcifunc);
1762 		if (err)
1763 			return err;
1764 	}
1765 
1766 	mutex_lock(&rvu->rsrc_lock);
1767 
1768 	/* Check if the request can be accommodated */
1769 	err = rvu_check_rsrc_availability(rvu, attach, pcifunc);
1770 	if (err)
1771 		goto fail1;
1772 
1773 	/* Now attach the requested resources */
1774 	if (attach->npalf) {
1775 		err = rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1, attach);
1776 		if (err)
1777 			goto fail1;
1778 	}
1779 
1780 	if (attach->nixlf) {
1781 		err = rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1, attach);
1782 		if (err)
1783 			goto fail2;
1784 
1785 		if (is_cn20k(rvu->pdev)) {
1786 			err = npc_cn20k_dft_rules_alloc(rvu, pcifunc);
1787 			if (err)
1788 				goto fail3;
1789 		}
1790 	}
1791 
1792 	if (attach->sso) {
1793 		/* RVU func doesn't know which exact LF or slot is attached
1794 		 * to it, it always sees as slot 0,1,2. So for a 'modify'
1795 		 * request, simply detach all existing attached LFs/slots
1796 		 * and attach a fresh.
1797 		 */
1798 		if (attach->modify)
1799 			rvu_detach_block(rvu, pcifunc, BLKTYPE_SSO);
1800 		err = rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO,
1801 				       attach->sso, attach);
1802 		if (err)
1803 			goto fail4;
1804 	}
1805 
1806 	if (attach->ssow) {
1807 		if (attach->modify)
1808 			rvu_detach_block(rvu, pcifunc, BLKTYPE_SSOW);
1809 		err = rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW,
1810 				       attach->ssow, attach);
1811 		if (err)
1812 			goto fail5;
1813 	}
1814 
1815 	if (attach->timlfs) {
1816 		if (attach->modify)
1817 			rvu_detach_block(rvu, pcifunc, BLKTYPE_TIM);
1818 		err = rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM,
1819 				       attach->timlfs, attach);
1820 		if (err)
1821 			goto fail6;
1822 	}
1823 
1824 	if (attach->cptlfs) {
1825 		if (attach->modify &&
1826 		    rvu_attach_from_same_block(rvu, BLKTYPE_CPT, attach))
1827 			rvu_detach_block(rvu, pcifunc, BLKTYPE_CPT);
1828 		err = rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT,
1829 				       attach->cptlfs, attach);
1830 		if (err)
1831 			goto fail7;
1832 	}
1833 
1834 	mutex_unlock(&rvu->rsrc_lock);
1835 	return 0;
1836 
1837 fail7:
1838 	if (attach->timlfs)
1839 		rvu_detach_block(rvu, pcifunc, BLKTYPE_TIM);
1840 
1841 fail6:
1842 	if (attach->ssow)
1843 		rvu_detach_block(rvu, pcifunc, BLKTYPE_SSOW);
1844 
1845 fail5:
1846 	if (attach->sso)
1847 		rvu_detach_block(rvu, pcifunc, BLKTYPE_SSO);
1848 
1849 fail4:
1850 	if (is_cn20k(rvu->pdev))
1851 		npc_cn20k_dft_rules_free(rvu, pcifunc);
1852 
1853 fail3:
1854 	if (attach->nixlf)
1855 		rvu_detach_block(rvu, pcifunc, BLKTYPE_NIX);
1856 
1857 fail2:
1858 	if (attach->npalf)
1859 		rvu_detach_block(rvu, pcifunc, BLKTYPE_NPA);
1860 
1861 fail1:
1862 	mutex_unlock(&rvu->rsrc_lock);
1863 	return err;
1864 }
1865 
1866 static u16 rvu_get_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1867 			       int blkaddr, int lf)
1868 {
1869 	u16 vec;
1870 
1871 	if (lf < 0)
1872 		return MSIX_VECTOR_INVALID;
1873 
1874 	for (vec = 0; vec < pfvf->msix.max; vec++) {
1875 		if (pfvf->msix_lfmap[vec] == MSIX_BLKLF(blkaddr, lf))
1876 			return vec;
1877 	}
1878 	return MSIX_VECTOR_INVALID;
1879 }
1880 
1881 static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1882 				struct rvu_block *block, int lf)
1883 {
1884 	u16 nvecs, vec, offset;
1885 	u64 cfg;
1886 
1887 	cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
1888 			 (lf << block->lfshift));
1889 	nvecs = (cfg >> 12) & 0xFF;
1890 
1891 	/* Check and alloc MSIX vectors, must be contiguous */
1892 	if (!rvu_rsrc_check_contig(&pfvf->msix, nvecs))
1893 		return;
1894 
1895 	offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
1896 
1897 	/* Config MSIX offset in LF */
1898 	rvu_write64(rvu, block->addr, block->msixcfg_reg |
1899 		    (lf << block->lfshift), (cfg & ~0x7FFULL) | offset);
1900 
1901 	/* Update the bitmap as well */
1902 	for (vec = 0; vec < nvecs; vec++)
1903 		pfvf->msix_lfmap[offset + vec] = MSIX_BLKLF(block->addr, lf);
1904 }
1905 
1906 static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1907 				  struct rvu_block *block, int lf)
1908 {
1909 	u16 nvecs, vec, offset;
1910 	u64 cfg;
1911 
1912 	cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
1913 			 (lf << block->lfshift));
1914 	nvecs = (cfg >> 12) & 0xFF;
1915 
1916 	/* Clear MSIX offset in LF */
1917 	rvu_write64(rvu, block->addr, block->msixcfg_reg |
1918 		    (lf << block->lfshift), cfg & ~0x7FFULL);
1919 
1920 	offset = rvu_get_msix_offset(rvu, pfvf, block->addr, lf);
1921 
1922 	/* Update the mapping */
1923 	for (vec = 0; vec < nvecs; vec++)
1924 		pfvf->msix_lfmap[offset + vec] = 0;
1925 
1926 	/* Free the same in MSIX bitmap */
1927 	rvu_free_rsrc_contig(&pfvf->msix, nvecs, offset);
1928 }
1929 
1930 int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
1931 				 struct msix_offset_rsp *rsp)
1932 {
1933 	struct rvu_hwinfo *hw = rvu->hw;
1934 	u16 pcifunc = req->hdr.pcifunc;
1935 	struct rvu_pfvf *pfvf;
1936 	int lf, slot, blkaddr;
1937 
1938 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1939 	if (!pfvf->msix.bmap)
1940 		return 0;
1941 
1942 	/* Set MSIX offsets for each block's LFs attached to this PF/VF */
1943 	lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NPA], pcifunc, 0);
1944 	rsp->npa_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NPA, lf);
1945 
1946 	/* Get BLKADDR from which LFs are attached to pcifunc */
1947 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1948 	if (blkaddr < 0) {
1949 		rsp->nix_msixoff = MSIX_VECTOR_INVALID;
1950 	} else {
1951 		lf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1952 		rsp->nix_msixoff = rvu_get_msix_offset(rvu, pfvf, blkaddr, lf);
1953 	}
1954 
1955 	rsp->sso = pfvf->sso;
1956 	for (slot = 0; slot < rsp->sso; slot++) {
1957 		lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSO], pcifunc, slot);
1958 		rsp->sso_msixoff[slot] =
1959 			rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSO, lf);
1960 	}
1961 
1962 	rsp->ssow = pfvf->ssow;
1963 	for (slot = 0; slot < rsp->ssow; slot++) {
1964 		lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSOW], pcifunc, slot);
1965 		rsp->ssow_msixoff[slot] =
1966 			rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSOW, lf);
1967 	}
1968 
1969 	rsp->timlfs = pfvf->timlfs;
1970 	for (slot = 0; slot < rsp->timlfs; slot++) {
1971 		lf = rvu_get_lf(rvu, &hw->block[BLKADDR_TIM], pcifunc, slot);
1972 		rsp->timlf_msixoff[slot] =
1973 			rvu_get_msix_offset(rvu, pfvf, BLKADDR_TIM, lf);
1974 	}
1975 
1976 	rsp->cptlfs = pfvf->cptlfs;
1977 	for (slot = 0; slot < rsp->cptlfs; slot++) {
1978 		lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT0], pcifunc, slot);
1979 		rsp->cptlf_msixoff[slot] =
1980 			rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT0, lf);
1981 	}
1982 
1983 	rsp->cpt1_lfs = pfvf->cpt1_lfs;
1984 	for (slot = 0; slot < rsp->cpt1_lfs; slot++) {
1985 		lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT1], pcifunc, slot);
1986 		rsp->cpt1_lf_msixoff[slot] =
1987 			rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT1, lf);
1988 	}
1989 
1990 	return 0;
1991 }
1992 
1993 int rvu_mbox_handler_free_rsrc_cnt(struct rvu *rvu, struct msg_req *req,
1994 				   struct free_rsrcs_rsp *rsp)
1995 {
1996 	struct rvu_hwinfo *hw = rvu->hw;
1997 	struct rvu_block *block;
1998 	struct nix_txsch *txsch;
1999 	struct nix_hw *nix_hw;
2000 
2001 	mutex_lock(&rvu->rsrc_lock);
2002 
2003 	block = &hw->block[BLKADDR_NPA];
2004 	rsp->npa = rvu_rsrc_free_count(&block->lf);
2005 
2006 	block = &hw->block[BLKADDR_NIX0];
2007 	rsp->nix = rvu_rsrc_free_count(&block->lf);
2008 
2009 	block = &hw->block[BLKADDR_NIX1];
2010 	rsp->nix1 = rvu_rsrc_free_count(&block->lf);
2011 
2012 	block = &hw->block[BLKADDR_SSO];
2013 	rsp->sso = rvu_rsrc_free_count(&block->lf);
2014 
2015 	block = &hw->block[BLKADDR_SSOW];
2016 	rsp->ssow = rvu_rsrc_free_count(&block->lf);
2017 
2018 	block = &hw->block[BLKADDR_TIM];
2019 	rsp->tim = rvu_rsrc_free_count(&block->lf);
2020 
2021 	block = &hw->block[BLKADDR_CPT0];
2022 	rsp->cpt = rvu_rsrc_free_count(&block->lf);
2023 
2024 	block = &hw->block[BLKADDR_CPT1];
2025 	rsp->cpt1 = rvu_rsrc_free_count(&block->lf);
2026 
2027 	if (rvu->hw->cap.nix_fixed_txschq_mapping) {
2028 		rsp->schq[NIX_TXSCH_LVL_SMQ] = 1;
2029 		rsp->schq[NIX_TXSCH_LVL_TL4] = 1;
2030 		rsp->schq[NIX_TXSCH_LVL_TL3] = 1;
2031 		rsp->schq[NIX_TXSCH_LVL_TL2] = 1;
2032 		/* NIX1 */
2033 		if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
2034 			goto out;
2035 		rsp->schq_nix1[NIX_TXSCH_LVL_SMQ] = 1;
2036 		rsp->schq_nix1[NIX_TXSCH_LVL_TL4] = 1;
2037 		rsp->schq_nix1[NIX_TXSCH_LVL_TL3] = 1;
2038 		rsp->schq_nix1[NIX_TXSCH_LVL_TL2] = 1;
2039 	} else {
2040 		nix_hw = get_nix_hw(hw, BLKADDR_NIX0);
2041 		txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
2042 		rsp->schq[NIX_TXSCH_LVL_SMQ] =
2043 				rvu_rsrc_free_count(&txsch->schq);
2044 
2045 		txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL4];
2046 		rsp->schq[NIX_TXSCH_LVL_TL4] =
2047 				rvu_rsrc_free_count(&txsch->schq);
2048 
2049 		txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL3];
2050 		rsp->schq[NIX_TXSCH_LVL_TL3] =
2051 				rvu_rsrc_free_count(&txsch->schq);
2052 
2053 		txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
2054 		rsp->schq[NIX_TXSCH_LVL_TL2] =
2055 				rvu_rsrc_free_count(&txsch->schq);
2056 
2057 		if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
2058 			goto out;
2059 
2060 		nix_hw = get_nix_hw(hw, BLKADDR_NIX1);
2061 		txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
2062 		rsp->schq_nix1[NIX_TXSCH_LVL_SMQ] =
2063 				rvu_rsrc_free_count(&txsch->schq);
2064 
2065 		txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL4];
2066 		rsp->schq_nix1[NIX_TXSCH_LVL_TL4] =
2067 				rvu_rsrc_free_count(&txsch->schq);
2068 
2069 		txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL3];
2070 		rsp->schq_nix1[NIX_TXSCH_LVL_TL3] =
2071 				rvu_rsrc_free_count(&txsch->schq);
2072 
2073 		txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
2074 		rsp->schq_nix1[NIX_TXSCH_LVL_TL2] =
2075 				rvu_rsrc_free_count(&txsch->schq);
2076 	}
2077 
2078 	rsp->schq_nix1[NIX_TXSCH_LVL_TL1] = 1;
2079 out:
2080 	rsp->schq[NIX_TXSCH_LVL_TL1] = 1;
2081 	mutex_unlock(&rvu->rsrc_lock);
2082 
2083 	return 0;
2084 }
2085 
2086 int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
2087 			    struct msg_rsp *rsp)
2088 {
2089 	u16 pcifunc = req->hdr.pcifunc;
2090 	u16 vf, numvfs;
2091 	u64 cfg;
2092 
2093 	vf = pcifunc & RVU_PFVF_FUNC_MASK;
2094 	cfg = rvu_read64(rvu, BLKADDR_RVUM,
2095 			 RVU_PRIV_PFX_CFG(rvu_get_pf(rvu->pdev, pcifunc)));
2096 	numvfs = (cfg >> 12) & 0xFF;
2097 
2098 	if (vf && vf <= numvfs)
2099 		__rvu_flr_handler(rvu, pcifunc);
2100 	else
2101 		return RVU_INVALID_VF_ID;
2102 
2103 	return 0;
2104 }
2105 
2106 int rvu_ndc_sync(struct rvu *rvu, int lfblkaddr, int lfidx, u64 lfoffset)
2107 {
2108 	/* Sync cached info for this LF in NDC to LLC/DRAM */
2109 	rvu_write64(rvu, lfblkaddr, lfoffset, BIT_ULL(12) | lfidx);
2110 	return rvu_poll_reg(rvu, lfblkaddr, lfoffset, BIT_ULL(12), true);
2111 }
2112 
2113 int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req,
2114 				struct get_hw_cap_rsp *rsp)
2115 {
2116 	struct rvu_hwinfo *hw = rvu->hw;
2117 
2118 	rsp->nix_fixed_txschq_mapping = hw->cap.nix_fixed_txschq_mapping;
2119 	rsp->nix_shaping = hw->cap.nix_shaping;
2120 	rsp->npc_hash_extract = hw->cap.npc_hash_extract;
2121 
2122 	if (rvu->mcs_blk_cnt)
2123 		rsp->hw_caps = HW_CAP_MACSEC;
2124 
2125 	return 0;
2126 }
2127 
2128 int rvu_mbox_handler_set_vf_perm(struct rvu *rvu, struct set_vf_perm *req,
2129 				 struct msg_rsp *rsp)
2130 {
2131 	struct rvu_hwinfo *hw = rvu->hw;
2132 	u16 pcifunc = req->hdr.pcifunc;
2133 	struct rvu_pfvf *pfvf;
2134 	int blkaddr, nixlf;
2135 	u16 target;
2136 
2137 	/* Only PF can add VF permissions */
2138 	if ((pcifunc & RVU_PFVF_FUNC_MASK) || is_lbk_vf(rvu, pcifunc))
2139 		return -EOPNOTSUPP;
2140 
2141 	target = (pcifunc & ~RVU_PFVF_FUNC_MASK) | (req->vf + 1);
2142 	pfvf = rvu_get_pfvf(rvu, target);
2143 
2144 	if (req->flags & RESET_VF_PERM) {
2145 		pfvf->flags &= RVU_CLEAR_VF_PERM;
2146 	} else if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) ^
2147 		 (req->flags & VF_TRUSTED)) {
2148 		change_bit(PF_SET_VF_TRUSTED, &pfvf->flags);
2149 		/* disable multicast and promisc entries */
2150 		if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags)) {
2151 			blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, target);
2152 			if (blkaddr < 0)
2153 				return 0;
2154 			nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
2155 					   target, 0);
2156 			if (nixlf < 0)
2157 				return 0;
2158 			npc_enadis_default_mce_entry(rvu, target, nixlf,
2159 						     NIXLF_ALLMULTI_ENTRY,
2160 						     false);
2161 			npc_enadis_default_mce_entry(rvu, target, nixlf,
2162 						     NIXLF_PROMISC_ENTRY,
2163 						     false);
2164 		}
2165 	}
2166 
2167 	return 0;
2168 }
2169 
2170 int rvu_mbox_handler_ndc_sync_op(struct rvu *rvu,
2171 				 struct ndc_sync_op *req,
2172 				 struct msg_rsp *rsp)
2173 {
2174 	struct rvu_hwinfo *hw = rvu->hw;
2175 	u16 pcifunc = req->hdr.pcifunc;
2176 	int err, lfidx, lfblkaddr;
2177 
2178 	if (req->npa_lf_sync) {
2179 		/* Get NPA LF data */
2180 		lfblkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
2181 		if (lfblkaddr < 0)
2182 			return NPA_AF_ERR_AF_LF_INVALID;
2183 
2184 		lfidx = rvu_get_lf(rvu, &hw->block[lfblkaddr], pcifunc, 0);
2185 		if (lfidx < 0)
2186 			return NPA_AF_ERR_AF_LF_INVALID;
2187 
2188 		/* Sync NPA NDC */
2189 		err = rvu_ndc_sync(rvu, lfblkaddr,
2190 				   lfidx, NPA_AF_NDC_SYNC);
2191 		if (err)
2192 			dev_err(rvu->dev,
2193 				"NDC-NPA sync failed for LF %u\n", lfidx);
2194 	}
2195 
2196 	if (!req->nix_lf_tx_sync && !req->nix_lf_rx_sync)
2197 		return 0;
2198 
2199 	/* Get NIX LF data */
2200 	lfblkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2201 	if (lfblkaddr < 0)
2202 		return NIX_AF_ERR_AF_LF_INVALID;
2203 
2204 	lfidx = rvu_get_lf(rvu, &hw->block[lfblkaddr], pcifunc, 0);
2205 	if (lfidx < 0)
2206 		return NIX_AF_ERR_AF_LF_INVALID;
2207 
2208 	if (req->nix_lf_tx_sync) {
2209 		/* Sync NIX TX NDC */
2210 		err = rvu_ndc_sync(rvu, lfblkaddr,
2211 				   lfidx, NIX_AF_NDC_TX_SYNC);
2212 		if (err)
2213 			dev_err(rvu->dev,
2214 				"NDC-NIX-TX sync fail for LF %u\n", lfidx);
2215 	}
2216 
2217 	if (req->nix_lf_rx_sync) {
2218 		/* Sync NIX RX NDC */
2219 		err = rvu_ndc_sync(rvu, lfblkaddr,
2220 				   lfidx, NIX_AF_NDC_RX_SYNC);
2221 		if (err)
2222 			dev_err(rvu->dev,
2223 				"NDC-NIX-RX sync failed for LF %u\n", lfidx);
2224 	}
2225 
2226 	return 0;
2227 }
2228 
2229 static void rvu_notify_altaf(struct rvu *rvu, u16 pcifunc, u64 op)
2230 {
2231 	int pf, vf;
2232 
2233 	if (!rvu->fwdata)
2234 		return;
2235 
2236 	if (op == ALTAF_FLR) {
2237 		pf = rvu_get_pf(rvu->pdev, pcifunc);
2238 		set_bit(pf, rvu->fwdata->altaf_intr_info.flr_pf_bmap);
2239 		if (pcifunc & RVU_PFVF_FUNC_MASK) {
2240 			vf = pcifunc & RVU_PFVF_FUNC_MASK;
2241 			if (vf >= 128) {
2242 				WARN(1,
2243 				     "flr_vf_bmap size is 128 bits, vf=%u\n",
2244 				     vf);
2245 				return;
2246 			}
2247 
2248 			set_bit(vf, rvu->fwdata->altaf_intr_info.flr_vf_bmap);
2249 		}
2250 	}
2251 
2252 	rvu_write64(rvu, BLKADDR_NIX0, AF_BAR2_ALIASX(0, NIX_GINT_INT_W1S), op);
2253 	usleep_range(5000, 6000);
2254 }
2255 
2256 static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid,
2257 				struct mbox_msghdr *req)
2258 {
2259 	struct rvu *rvu = pci_get_drvdata(mbox->pdev);
2260 
2261 	/* Check if valid, if not reply with a invalid msg */
2262 	if (req->sig != OTX2_MBOX_REQ_SIG)
2263 		goto bad_message;
2264 
2265 	switch (req->id) {
2266 #define M(_name, _id, _fn_name, _req_type, _rsp_type)			\
2267 	case _id: {							\
2268 		struct _rsp_type *rsp;					\
2269 		int err;						\
2270 									\
2271 		rsp = (struct _rsp_type *)otx2_mbox_alloc_msg(		\
2272 			mbox, devid,					\
2273 			sizeof(struct _rsp_type));			\
2274 		/* some handlers should complete even if reply */	\
2275 		/* could not be allocated */				\
2276 		if (!rsp &&						\
2277 		    _id != MBOX_MSG_DETACH_RESOURCES &&			\
2278 		    _id != MBOX_MSG_NIX_TXSCH_FREE &&			\
2279 		    _id != MBOX_MSG_VF_FLR)				\
2280 			return -ENOMEM;					\
2281 		if (rsp) {						\
2282 			rsp->hdr.id = _id;				\
2283 			rsp->hdr.sig = OTX2_MBOX_RSP_SIG;		\
2284 			rsp->hdr.pcifunc = req->pcifunc;		\
2285 			rsp->hdr.rc = 0;				\
2286 		}							\
2287 									\
2288 		err = rvu_mbox_handler_ ## _fn_name(rvu,		\
2289 						    (struct _req_type *)req, \
2290 						    rsp);		\
2291 		if (rsp && err)						\
2292 			rsp->hdr.rc = err;				\
2293 									\
2294 		trace_otx2_msg_process(mbox->pdev, _id, err, req->pcifunc); \
2295 		return rsp ? err : -ENOMEM;				\
2296 	}
2297 MBOX_MESSAGES
2298 #undef M
2299 
2300 bad_message:
2301 	default:
2302 		otx2_reply_invalid_msg(mbox, devid, req->pcifunc, req->id);
2303 		return -ENODEV;
2304 	}
2305 }
2306 
2307 static void __rvu_mbox_handler(struct rvu_work *mwork, int type, bool poll)
2308 {
2309 	struct rvu *rvu = mwork->rvu;
2310 	int offset, err, id, devid;
2311 	struct otx2_mbox_dev *mdev;
2312 	struct mbox_hdr *req_hdr;
2313 	struct mbox_msghdr *msg;
2314 	struct mbox_wq_info *mw;
2315 	struct otx2_mbox *mbox;
2316 
2317 	switch (type) {
2318 	case TYPE_AFPF:
2319 		mw = &rvu->afpf_wq_info;
2320 		break;
2321 	case TYPE_AFVF:
2322 		mw = &rvu->afvf_wq_info;
2323 		break;
2324 	default:
2325 		return;
2326 	}
2327 
2328 	devid = mwork - mw->mbox_wrk;
2329 	mbox = &mw->mbox;
2330 	mdev = &mbox->dev[devid];
2331 
2332 	/* Process received mbox messages */
2333 	req_hdr = mdev->mbase + mbox->rx_start;
2334 	if (mw->mbox_wrk[devid].num_msgs == 0)
2335 		return;
2336 
2337 	offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
2338 
2339 	if (req_hdr->sig && rvu->altaf_ready &&
2340 	    !(is_rvu_otx2(rvu) || is_cn20k(rvu->pdev))) {
2341 		req_hdr->opt_msg = mw->mbox_wrk[devid].num_msgs;
2342 		rvu_write64(rvu, BLKADDR_NIX0, RVU_AF_BAR2_SEL,
2343 			    RVU_AF_BAR2_PFID);
2344 		if (type == TYPE_AFPF)
2345 			rvu_write64(rvu, BLKADDR_NIX0,
2346 				    AF_BAR2_ALIASX(0, NIX_CINTX_INT_W1S(devid)),
2347 				    0x1);
2348 		else
2349 			rvu_write64(rvu, BLKADDR_NIX0,
2350 				    AF_BAR2_ALIASX(0, NIX_QINTX_CNT(devid)),
2351 				    0x1);
2352 		usleep_range(5000, 6000);
2353 		goto done;
2354 	}
2355 
2356 	for (id = 0; id < mw->mbox_wrk[devid].num_msgs; id++) {
2357 		msg = mdev->mbase + offset;
2358 
2359 		/* Set which PF/VF sent this message based on mbox IRQ */
2360 		switch (type) {
2361 		case TYPE_AFPF:
2362 			msg->pcifunc &= rvu_pcifunc_pf_mask(rvu->pdev);
2363 			msg->pcifunc |= rvu_make_pcifunc(rvu->pdev, devid, 0);
2364 			break;
2365 		case TYPE_AFVF:
2366 			msg->pcifunc &=
2367 				~(RVU_PFVF_FUNC_MASK << RVU_PFVF_FUNC_SHIFT);
2368 			msg->pcifunc |= (devid << RVU_PFVF_FUNC_SHIFT) + 1;
2369 			break;
2370 		}
2371 
2372 		err = rvu_process_mbox_msg(mbox, devid, msg);
2373 		if (!err) {
2374 			offset = mbox->rx_start + msg->next_msgoff;
2375 			continue;
2376 		}
2377 
2378 		if (msg->pcifunc & RVU_PFVF_FUNC_MASK)
2379 			dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n",
2380 				 err, otx2_mbox_id2name(msg->id),
2381 				 msg->id, rvu_get_pf(rvu->pdev, msg->pcifunc),
2382 				 (msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1);
2383 		else
2384 			dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n",
2385 				 err, otx2_mbox_id2name(msg->id),
2386 				 msg->id, devid);
2387 	}
2388 done:
2389 	mw->mbox_wrk[devid].num_msgs = 0;
2390 
2391 	if (!is_cn20k(mbox->pdev) && poll)
2392 		otx2_mbox_wait_for_zero(mbox, devid);
2393 
2394 	/* Send mbox responses to VF/PF */
2395 	otx2_mbox_msg_send(mbox, devid);
2396 }
2397 
2398 static inline void rvu_afpf_mbox_handler(struct work_struct *work)
2399 {
2400 	struct rvu_work *mwork = container_of(work, struct rvu_work, work);
2401 	struct rvu *rvu = mwork->rvu;
2402 
2403 	mutex_lock(&rvu->mbox_lock);
2404 	__rvu_mbox_handler(mwork, TYPE_AFPF, true);
2405 	mutex_unlock(&rvu->mbox_lock);
2406 }
2407 
2408 static inline void rvu_afvf_mbox_handler(struct work_struct *work)
2409 {
2410 	struct rvu_work *mwork = container_of(work, struct rvu_work, work);
2411 
2412 	__rvu_mbox_handler(mwork, TYPE_AFVF, false);
2413 }
2414 
2415 static void __rvu_mbox_up_handler(struct rvu_work *mwork, int type)
2416 {
2417 	struct rvu *rvu = mwork->rvu;
2418 	struct otx2_mbox_dev *mdev;
2419 	struct mbox_hdr *rsp_hdr;
2420 	struct mbox_msghdr *msg;
2421 	struct mbox_wq_info *mw;
2422 	struct otx2_mbox *mbox;
2423 	int offset, id, devid;
2424 
2425 	switch (type) {
2426 	case TYPE_AFPF:
2427 		mw = &rvu->afpf_wq_info;
2428 		break;
2429 	case TYPE_AFVF:
2430 		mw = &rvu->afvf_wq_info;
2431 		break;
2432 	default:
2433 		return;
2434 	}
2435 
2436 	devid = mwork - mw->mbox_wrk_up;
2437 	mbox = &mw->mbox_up;
2438 	mdev = &mbox->dev[devid];
2439 
2440 	rsp_hdr = mdev->mbase + mbox->rx_start;
2441 	if (mw->mbox_wrk_up[devid].up_num_msgs == 0) {
2442 		dev_warn(rvu->dev, "mbox up handler: num_msgs = 0\n");
2443 		return;
2444 	}
2445 
2446 	offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
2447 
2448 	for (id = 0; id < mw->mbox_wrk_up[devid].up_num_msgs; id++) {
2449 		msg = mdev->mbase + offset;
2450 
2451 		if (msg->id >= MBOX_MSG_MAX) {
2452 			dev_err(rvu->dev,
2453 				"Mbox msg with unknown ID 0x%x\n", msg->id);
2454 			goto end;
2455 		}
2456 
2457 		if (msg->sig != OTX2_MBOX_RSP_SIG) {
2458 			dev_err(rvu->dev,
2459 				"Mbox msg with wrong signature %x, ID 0x%x\n",
2460 				msg->sig, msg->id);
2461 			goto end;
2462 		}
2463 
2464 		switch (msg->id) {
2465 		case MBOX_MSG_CGX_LINK_EVENT:
2466 			break;
2467 		default:
2468 			if (msg->rc)
2469 				dev_err(rvu->dev,
2470 					"Mbox msg response has err %d, ID 0x%x\n",
2471 					msg->rc, msg->id);
2472 			break;
2473 		}
2474 end:
2475 		offset = mbox->rx_start + msg->next_msgoff;
2476 		mdev->msgs_acked++;
2477 	}
2478 	mw->mbox_wrk_up[devid].up_num_msgs = 0;
2479 
2480 	otx2_mbox_reset(mbox, devid);
2481 }
2482 
2483 static inline void rvu_afpf_mbox_up_handler(struct work_struct *work)
2484 {
2485 	struct rvu_work *mwork = container_of(work, struct rvu_work, work);
2486 
2487 	__rvu_mbox_up_handler(mwork, TYPE_AFPF);
2488 }
2489 
2490 static inline void rvu_afvf_mbox_up_handler(struct work_struct *work)
2491 {
2492 	struct rvu_work *mwork = container_of(work, struct rvu_work, work);
2493 
2494 	__rvu_mbox_up_handler(mwork, TYPE_AFVF);
2495 }
2496 
2497 static int rvu_get_mbox_regions(struct rvu *rvu, void __iomem **mbox_addr,
2498 				int num, int type, unsigned long *pf_bmap)
2499 {
2500 	struct rvu_hwinfo *hw = rvu->hw;
2501 	int region;
2502 	u64 bar4;
2503 
2504 	/* For cn20k platform AF mailbox region is allocated by software
2505 	 * and the corresponding IOVA is programmed in hardware unlike earlier
2506 	 * silicons where software uses the hardware region after ioremap.
2507 	 */
2508 	if (is_cn20k(rvu->pdev))
2509 		return cn20k_rvu_get_mbox_regions(rvu, (void *)mbox_addr,
2510 						  num, type, pf_bmap);
2511 
2512 	/* For cn10k platform VF mailbox regions of a PF follows after the
2513 	 * PF <-> AF mailbox region. Whereas for Octeontx2 it is read from
2514 	 * RVU_PF_VF_BAR4_ADDR register.
2515 	 */
2516 	if (type == TYPE_AFVF) {
2517 		for (region = 0; region < num; region++) {
2518 			if (!test_bit(region, pf_bmap))
2519 				continue;
2520 
2521 			if (hw->cap.per_pf_mbox_regs) {
2522 				bar4 = rvu_read64(rvu, BLKADDR_RVUM,
2523 						  RVU_AF_PFX_BAR4_ADDR(0)) +
2524 						  MBOX_SIZE;
2525 				bar4 += region * MBOX_SIZE;
2526 			} else {
2527 				bar4 = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR);
2528 				bar4 += region * MBOX_SIZE;
2529 			}
2530 			mbox_addr[region] = ioremap_wc(bar4, MBOX_SIZE);
2531 			if (!mbox_addr[region])
2532 				goto error;
2533 		}
2534 		return 0;
2535 	}
2536 
2537 	/* For cn10k platform AF <-> PF mailbox region of a PF is read from per
2538 	 * PF registers. Whereas for Octeontx2 it is read from
2539 	 * RVU_AF_PF_BAR4_ADDR register.
2540 	 */
2541 	for (region = 0; region < num; region++) {
2542 		if (!test_bit(region, pf_bmap))
2543 			continue;
2544 
2545 		if (hw->cap.per_pf_mbox_regs) {
2546 			bar4 = rvu_read64(rvu, BLKADDR_RVUM,
2547 					  RVU_AF_PFX_BAR4_ADDR(region));
2548 		} else {
2549 			bar4 = rvu_read64(rvu, BLKADDR_RVUM,
2550 					  RVU_AF_PF_BAR4_ADDR);
2551 			bar4 += region * MBOX_SIZE;
2552 		}
2553 		mbox_addr[region] = ioremap_wc(bar4, MBOX_SIZE);
2554 		if (!mbox_addr[region])
2555 			goto error;
2556 	}
2557 	return 0;
2558 
2559 error:
2560 	while (region--)
2561 		iounmap(mbox_addr[region]);
2562 	return -ENOMEM;
2563 }
2564 
2565 static struct mbox_ops rvu_mbox_ops = {
2566 	.pf_intr_handler = rvu_mbox_pf_intr_handler,
2567 	.afvf_intr_handler = rvu_mbox_intr_handler,
2568 };
2569 
2570 static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
2571 			 int type, int num,
2572 			 void (mbox_handler)(struct work_struct *),
2573 			 void (mbox_up_handler)(struct work_struct *))
2574 {
2575 	void __iomem **mbox_regions;
2576 	struct ng_rvu *ng_rvu_mbox;
2577 	int err, i, dir, dir_up;
2578 	void __iomem *reg_base;
2579 	struct rvu_work *mwork;
2580 	unsigned long *pf_bmap;
2581 	const char *name;
2582 	u64 cfg;
2583 
2584 	pf_bmap = bitmap_zalloc(num, GFP_KERNEL);
2585 	if (!pf_bmap)
2586 		return -ENOMEM;
2587 
2588 	ng_rvu_mbox = kzalloc_obj(*ng_rvu_mbox);
2589 	if (!ng_rvu_mbox) {
2590 		err = -ENOMEM;
2591 		goto free_bitmap;
2592 	}
2593 
2594 	/* RVU VFs */
2595 	if (type == TYPE_AFVF)
2596 		bitmap_set(pf_bmap, 0, num);
2597 
2598 	if (type == TYPE_AFPF) {
2599 		/* Mark enabled PFs in bitmap */
2600 		for (i = 0; i < num; i++) {
2601 			cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(i));
2602 			if (cfg & BIT_ULL(20))
2603 				set_bit(i, pf_bmap);
2604 		}
2605 	}
2606 
2607 	rvu->ng_rvu = ng_rvu_mbox;
2608 
2609 	rvu->ng_rvu->rvu_mbox_ops = &rvu_mbox_ops;
2610 
2611 	err = cn20k_rvu_mbox_init(rvu, type, num);
2612 	if (err)
2613 		goto free_mem;
2614 
2615 	mutex_init(&rvu->mbox_lock);
2616 
2617 	mbox_regions = kcalloc(num, sizeof(void __iomem *), GFP_KERNEL);
2618 	if (!mbox_regions) {
2619 		err = -ENOMEM;
2620 		goto free_qmem;
2621 	}
2622 
2623 	switch (type) {
2624 	case TYPE_AFPF:
2625 		name = "rvu_afpf_mailbox";
2626 		dir = MBOX_DIR_AFPF;
2627 		dir_up = MBOX_DIR_AFPF_UP;
2628 		reg_base = rvu->afreg_base;
2629 		err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFPF, pf_bmap);
2630 		if (err)
2631 			goto free_regions;
2632 		break;
2633 	case TYPE_AFVF:
2634 		name = "rvu_afvf_mailbox";
2635 		dir = MBOX_DIR_PFVF;
2636 		dir_up = MBOX_DIR_PFVF_UP;
2637 		reg_base = rvu->pfreg_base;
2638 		err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFVF, pf_bmap);
2639 		if (err)
2640 			goto free_regions;
2641 		break;
2642 	default:
2643 		err = -EINVAL;
2644 		goto free_regions;
2645 	}
2646 
2647 	mw->mbox_wq = alloc_workqueue("%s",
2648 				      WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_PERCPU,
2649 				      num, name);
2650 	if (!mw->mbox_wq) {
2651 		err = -ENOMEM;
2652 		goto unmap_regions;
2653 	}
2654 
2655 	mw->mbox_wrk = devm_kcalloc(rvu->dev, num,
2656 				    sizeof(struct rvu_work), GFP_KERNEL);
2657 	if (!mw->mbox_wrk) {
2658 		err = -ENOMEM;
2659 		goto exit;
2660 	}
2661 
2662 	mw->mbox_wrk_up = devm_kcalloc(rvu->dev, num,
2663 				       sizeof(struct rvu_work), GFP_KERNEL);
2664 	if (!mw->mbox_wrk_up) {
2665 		err = -ENOMEM;
2666 		goto exit;
2667 	}
2668 
2669 	err = otx2_mbox_regions_init(&mw->mbox, mbox_regions, rvu->pdev,
2670 				     reg_base, dir, num, pf_bmap);
2671 	if (err)
2672 		goto exit;
2673 
2674 	err = otx2_mbox_regions_init(&mw->mbox_up, mbox_regions, rvu->pdev,
2675 				     reg_base, dir_up, num, pf_bmap);
2676 	if (err)
2677 		goto exit;
2678 
2679 	for (i = 0; i < num; i++) {
2680 		if (!test_bit(i, pf_bmap))
2681 			continue;
2682 
2683 		mwork = &mw->mbox_wrk[i];
2684 		mwork->rvu = rvu;
2685 		INIT_WORK(&mwork->work, mbox_handler);
2686 
2687 		mwork = &mw->mbox_wrk_up[i];
2688 		mwork->rvu = rvu;
2689 		INIT_WORK(&mwork->work, mbox_up_handler);
2690 	}
2691 
2692 	kfree(mbox_regions);
2693 	bitmap_free(pf_bmap);
2694 
2695 	return 0;
2696 
2697 exit:
2698 	destroy_workqueue(mw->mbox_wq);
2699 unmap_regions:
2700 	while (num--)
2701 		iounmap((void __iomem *)mbox_regions[num]);
2702 free_regions:
2703 	kfree(mbox_regions);
2704 free_qmem:
2705 	cn20k_free_mbox_memory(rvu);
2706 free_mem:
2707 	kfree(rvu->ng_rvu);
2708 free_bitmap:
2709 	bitmap_free(pf_bmap);
2710 	return err;
2711 }
2712 
2713 static void rvu_mbox_destroy(struct mbox_wq_info *mw)
2714 {
2715 	struct otx2_mbox *mbox = &mw->mbox;
2716 	struct otx2_mbox_dev *mdev;
2717 	int devid;
2718 
2719 	if (mw->mbox_wq) {
2720 		destroy_workqueue(mw->mbox_wq);
2721 		mw->mbox_wq = NULL;
2722 	}
2723 
2724 	for (devid = 0; devid < mbox->ndevs; devid++) {
2725 		mdev = &mbox->dev[devid];
2726 		if (mdev->hwbase)
2727 			iounmap((void __iomem *)mdev->hwbase);
2728 	}
2729 
2730 	otx2_mbox_destroy(&mw->mbox);
2731 	otx2_mbox_destroy(&mw->mbox_up);
2732 }
2733 
2734 void rvu_queue_work(struct mbox_wq_info *mw, int first,
2735 		    int mdevs, u64 intr)
2736 {
2737 	struct otx2_mbox_dev *mdev;
2738 	struct otx2_mbox *mbox;
2739 	struct mbox_hdr *hdr;
2740 	int i;
2741 
2742 	for (i = first; i < mdevs; i++) {
2743 		/* start from 0 */
2744 		if (!(intr & BIT_ULL(i - first)))
2745 			continue;
2746 
2747 		mbox = &mw->mbox;
2748 		mdev = &mbox->dev[i];
2749 		hdr = mdev->mbase + mbox->rx_start;
2750 
2751 		/*The hdr->num_msgs is set to zero immediately in the interrupt
2752 		 * handler to  ensure that it holds a correct value next time
2753 		 * when the interrupt handler is called.
2754 		 * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler
2755 		 * pf>mbox.up_num_msgs holds the data for use in
2756 		 * pfaf_mbox_up_handler.
2757 		 */
2758 
2759 		if (hdr->num_msgs) {
2760 			mw->mbox_wrk[i].num_msgs = hdr->num_msgs;
2761 			hdr->num_msgs = 0;
2762 			queue_work(mw->mbox_wq, &mw->mbox_wrk[i].work);
2763 		}
2764 		mbox = &mw->mbox_up;
2765 		mdev = &mbox->dev[i];
2766 		hdr = mdev->mbase + mbox->rx_start;
2767 		if (hdr->num_msgs) {
2768 			mw->mbox_wrk_up[i].up_num_msgs = hdr->num_msgs;
2769 			hdr->num_msgs = 0;
2770 			queue_work(mw->mbox_wq, &mw->mbox_wrk_up[i].work);
2771 		}
2772 	}
2773 }
2774 
2775 static irqreturn_t rvu_mbox_pf_intr_handler(int irq, void *rvu_irq)
2776 {
2777 	struct rvu *rvu = (struct rvu *)rvu_irq;
2778 	u64 intr;
2779 
2780 	intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT);
2781 	/* Clear interrupts */
2782 	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT, intr);
2783 	if (intr)
2784 		trace_otx2_msg_interrupt(rvu->pdev, "PF(s) to AF", intr);
2785 
2786 	/* Sync with mbox memory region */
2787 	rmb();
2788 
2789 	rvu_queue_work(&rvu->afpf_wq_info, 0, rvu->hw->total_pfs, intr);
2790 
2791 	return IRQ_HANDLED;
2792 }
2793 
2794 static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
2795 {
2796 	struct rvu *rvu = (struct rvu *)rvu_irq;
2797 	int vfs = rvu->vfs;
2798 	u64 intr;
2799 
2800 	/* Sync with mbox memory region */
2801 	rmb();
2802 
2803 	/* Handle VF interrupts */
2804 	if (vfs > 64) {
2805 		intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(1));
2806 		rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), intr);
2807 
2808 		rvu_queue_work(&rvu->afvf_wq_info, 64, vfs, intr);
2809 		vfs = 64;
2810 	}
2811 
2812 	intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(0));
2813 	rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), intr);
2814 	if (intr)
2815 		trace_otx2_msg_interrupt(rvu->pdev, "VF(s) to AF", intr);
2816 
2817 	rvu_queue_work(&rvu->afvf_wq_info, 0, vfs, intr);
2818 
2819 	return IRQ_HANDLED;
2820 }
2821 
2822 static void rvu_enable_mbox_intr(struct rvu *rvu)
2823 {
2824 	struct rvu_hwinfo *hw = rvu->hw;
2825 
2826 	if (is_cn20k(rvu->pdev)) {
2827 		cn20k_rvu_enable_mbox_intr(rvu);
2828 		return;
2829 	}
2830 
2831 	/* Clear spurious irqs, if any */
2832 	rvu_write64(rvu, BLKADDR_RVUM,
2833 		    RVU_AF_PFAF_MBOX_INT, INTR_MASK(hw->total_pfs));
2834 
2835 	/* Enable mailbox interrupt for all PFs except PF0 i.e AF itself */
2836 	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1S,
2837 		    INTR_MASK(hw->total_pfs) & ~1ULL);
2838 }
2839 
2840 static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr)
2841 {
2842 	struct rvu_block *block;
2843 	int slot, lf, num_lfs;
2844 	int err;
2845 
2846 	block = &rvu->hw->block[blkaddr];
2847 	num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
2848 					block->addr);
2849 
2850 	if (block->addr == BLKADDR_TIM && rvu->altaf_ready) {
2851 		rvu_notify_altaf(rvu, pcifunc, ALTAF_FLR);
2852 		return;
2853 	}
2854 
2855 	if ((block->addr == BLKADDR_SSO || block->addr == BLKADDR_SSOW) &&
2856 	    rvu->altaf_ready)
2857 		return;
2858 
2859 	if (!num_lfs)
2860 		return;
2861 	for (slot = 0; slot < num_lfs; slot++) {
2862 		lf = rvu_get_lf(rvu, block, pcifunc, slot);
2863 		if (lf < 0)
2864 			continue;
2865 
2866 		/* Cleanup LF and reset it */
2867 		if (block->addr == BLKADDR_NIX0 || block->addr == BLKADDR_NIX1)
2868 			rvu_nix_lf_teardown(rvu, pcifunc, block->addr, lf);
2869 		else if (block->addr == BLKADDR_NPA)
2870 			rvu_npa_lf_teardown(rvu, pcifunc, lf);
2871 		else if ((block->addr == BLKADDR_CPT0) ||
2872 			 (block->addr == BLKADDR_CPT1))
2873 			rvu_cpt_lf_teardown(rvu, pcifunc, block->addr, lf,
2874 					    slot);
2875 
2876 		err = rvu_lf_reset(rvu, block, lf);
2877 		if (err) {
2878 			dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n",
2879 				block->addr, lf);
2880 		}
2881 	}
2882 }
2883 
2884 static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
2885 {
2886 	if (rvu_npc_exact_has_match_table(rvu))
2887 		rvu_npc_exact_reset(rvu, pcifunc);
2888 
2889 	mutex_lock(&rvu->flr_lock);
2890 	/* Reset order should reflect inter-block dependencies:
2891 	 * 1. Reset any packet/work sources (NIX, CPT, TIM)
2892 	 * 2. Flush and reset SSO/SSOW
2893 	 * 3. Cleanup pools (NPA)
2894 	 */
2895 
2896 	/* Free allocated BPIDs */
2897 	rvu_nix_flr_free_bpids(rvu, pcifunc);
2898 
2899 	/* Free multicast/mirror node associated with the 'pcifunc' */
2900 	rvu_nix_mcast_flr_free_entries(rvu, pcifunc);
2901 
2902 	rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX0);
2903 	rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX1);
2904 	rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT0);
2905 	rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT1);
2906 	rvu_blklf_teardown(rvu, pcifunc, BLKADDR_TIM);
2907 	rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSOW);
2908 	rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO);
2909 	rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA);
2910 	rvu_reset_lmt_map_tbl(rvu, pcifunc);
2911 	rvu_detach_rsrcs(rvu, NULL, pcifunc);
2912 	/* In scenarios where PF/VF drivers detach NIXLF without freeing MCAM
2913 	 * entries, check and free the MCAM entries explicitly to avoid leak.
2914 	 * Since LF is detached use LF number as -1.
2915 	 */
2916 	rvu_npc_free_mcam_entries(rvu, pcifunc, -1);
2917 	rvu_mac_reset(rvu, pcifunc);
2918 
2919 	if (rvu->mcs_blk_cnt)
2920 		rvu_mcs_flr_handler(rvu, pcifunc);
2921 
2922 	mutex_unlock(&rvu->flr_lock);
2923 }
2924 
2925 static void rvu_afvf_flr_handler(struct rvu *rvu, int vf)
2926 {
2927 	int reg = 0;
2928 
2929 	/* pcifunc = 0(PF0) | (vf + 1) */
2930 	__rvu_flr_handler(rvu, vf + 1);
2931 
2932 	if (vf >= 64) {
2933 		reg = 1;
2934 		vf = vf - 64;
2935 	}
2936 
2937 	/* Signal FLR finish and enable IRQ */
2938 	rvupf_write64(rvu, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
2939 	rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
2940 }
2941 
2942 static void rvu_flr_handler(struct work_struct *work)
2943 {
2944 	struct rvu_work *flrwork = container_of(work, struct rvu_work, work);
2945 	struct rvu *rvu = flrwork->rvu;
2946 	u16 pcifunc, numvfs, vf;
2947 	u64 cfg;
2948 	int pf;
2949 
2950 	pf = flrwork - rvu->flr_wrk;
2951 	if (pf >= rvu->hw->total_pfs) {
2952 		rvu_afvf_flr_handler(rvu, pf - rvu->hw->total_pfs);
2953 		return;
2954 	}
2955 
2956 	cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2957 	numvfs = (cfg >> 12) & 0xFF;
2958 	pcifunc  = rvu_make_pcifunc(rvu->pdev, pf, 0);
2959 
2960 	for (vf = 0; vf < numvfs; vf++)
2961 		__rvu_flr_handler(rvu, (pcifunc | (vf + 1)));
2962 
2963 	__rvu_flr_handler(rvu, pcifunc);
2964 
2965 	/* Signal FLR finish */
2966 	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND, BIT_ULL(pf));
2967 
2968 	/* Enable interrupt */
2969 	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S,  BIT_ULL(pf));
2970 }
2971 
2972 static void rvu_afvf_queue_flr_work(struct rvu *rvu, int start_vf, int numvfs)
2973 {
2974 	int dev, vf, reg = 0;
2975 	u64 intr;
2976 
2977 	if (start_vf >= 64)
2978 		reg = 1;
2979 
2980 	intr = rvupf_read64(rvu, RVU_PF_VFFLR_INTX(reg));
2981 	if (!intr)
2982 		return;
2983 
2984 	for (vf = 0; vf < numvfs; vf++) {
2985 		if (!(intr & BIT_ULL(vf)))
2986 			continue;
2987 		/* Clear and disable the interrupt */
2988 		rvupf_write64(rvu, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
2989 		rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(reg), BIT_ULL(vf));
2990 
2991 		dev = vf + start_vf + rvu->hw->total_pfs;
2992 		queue_work(rvu->flr_wq, &rvu->flr_wrk[dev].work);
2993 	}
2994 }
2995 
2996 static irqreturn_t rvu_flr_intr_handler(int irq, void *rvu_irq)
2997 {
2998 	struct rvu *rvu = (struct rvu *)rvu_irq;
2999 	u64 intr;
3000 	u8  pf;
3001 
3002 	intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT);
3003 	if (!intr)
3004 		goto afvf_flr;
3005 
3006 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
3007 		if (intr & (1ULL << pf)) {
3008 			/* clear interrupt */
3009 			rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT,
3010 				    BIT_ULL(pf));
3011 			/* Disable the interrupt */
3012 			rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
3013 				    BIT_ULL(pf));
3014 			/* PF is already dead do only AF related operations */
3015 			queue_work(rvu->flr_wq, &rvu->flr_wrk[pf].work);
3016 		}
3017 	}
3018 
3019 afvf_flr:
3020 	rvu_afvf_queue_flr_work(rvu, 0, 64);
3021 	if (rvu->vfs > 64)
3022 		rvu_afvf_queue_flr_work(rvu, 64, rvu->vfs - 64);
3023 
3024 	return IRQ_HANDLED;
3025 }
3026 
3027 static void rvu_me_handle_vfset(struct rvu *rvu, int idx, u64 intr)
3028 {
3029 	int vf;
3030 
3031 	/* Nothing to be done here other than clearing the
3032 	 * TRPEND bit.
3033 	 */
3034 	for (vf = 0; vf < 64; vf++) {
3035 		if (intr & (1ULL << vf)) {
3036 			/* clear the trpend due to ME(master enable) */
3037 			rvupf_write64(rvu, RVU_PF_VFTRPENDX(idx), BIT_ULL(vf));
3038 			/* clear interrupt */
3039 			rvupf_write64(rvu, RVU_PF_VFME_INTX(idx), BIT_ULL(vf));
3040 		}
3041 	}
3042 }
3043 
3044 /* Handles ME interrupts from VFs of AF */
3045 static irqreturn_t rvu_me_vf_intr_handler(int irq, void *rvu_irq)
3046 {
3047 	struct rvu *rvu = (struct rvu *)rvu_irq;
3048 	int vfset;
3049 	u64 intr;
3050 
3051 	intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT);
3052 
3053 	for (vfset = 0; vfset <= 1; vfset++) {
3054 		intr = rvupf_read64(rvu, RVU_PF_VFME_INTX(vfset));
3055 		if (intr)
3056 			rvu_me_handle_vfset(rvu, vfset, intr);
3057 	}
3058 
3059 	return IRQ_HANDLED;
3060 }
3061 
3062 /* Handles ME interrupts from PFs */
3063 static irqreturn_t rvu_me_pf_intr_handler(int irq, void *rvu_irq)
3064 {
3065 	struct rvu *rvu = (struct rvu *)rvu_irq;
3066 	u64 intr;
3067 	u8  pf;
3068 
3069 	intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT);
3070 
3071 	/* Nothing to be done here other than clearing the
3072 	 * TRPEND bit.
3073 	 */
3074 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
3075 		if (intr & (1ULL << pf)) {
3076 			/* clear the trpend due to ME(master enable) */
3077 			rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND,
3078 				    BIT_ULL(pf));
3079 			/* clear interrupt */
3080 			rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT,
3081 				    BIT_ULL(pf));
3082 		}
3083 	}
3084 
3085 	return IRQ_HANDLED;
3086 }
3087 
3088 static void rvu_unregister_interrupts(struct rvu *rvu)
3089 {
3090 	int irq;
3091 
3092 	rvu_cpt_unregister_interrupts(rvu);
3093 
3094 	if (!is_cn20k(rvu->pdev))
3095 		/* Disable the Mbox interrupt */
3096 		rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C,
3097 			    INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
3098 	else
3099 		cn20k_rvu_unregister_interrupts(rvu);
3100 
3101 	/* Disable the PF FLR interrupt */
3102 	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
3103 		    INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
3104 
3105 	/* Disable the PF ME interrupt */
3106 	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1C,
3107 		    INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
3108 
3109 	for (irq = 0; irq < rvu->num_vec; irq++) {
3110 		if (rvu->irq_allocated[irq]) {
3111 			free_irq(pci_irq_vector(rvu->pdev, irq), rvu);
3112 			rvu->irq_allocated[irq] = false;
3113 		}
3114 	}
3115 
3116 	pci_free_irq_vectors(rvu->pdev);
3117 	rvu->num_vec = 0;
3118 }
3119 
3120 static int rvu_afvf_msix_vectors_num_ok(struct rvu *rvu)
3121 {
3122 	struct rvu_pfvf *pfvf = &rvu->pf[0];
3123 	int offset;
3124 
3125 	pfvf = &rvu->pf[0];
3126 	offset = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
3127 
3128 	/* Make sure there are enough MSIX vectors configured so that
3129 	 * VF interrupts can be handled. Offset equal to zero means
3130 	 * that PF vectors are not configured and overlapping AF vectors.
3131 	 */
3132 	if (is_cn20k(rvu->pdev))
3133 		return (pfvf->msix.max >= RVU_AF_CN20K_INT_VEC_CNT +
3134 			RVU_MBOX_PF_INT_VEC_CNT) && offset;
3135 
3136 	return (pfvf->msix.max >= RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT) &&
3137 	       offset;
3138 }
3139 
3140 static int rvu_register_interrupts(struct rvu *rvu)
3141 {
3142 	int i, ret, offset, pf_vec_start;
3143 
3144 	rvu->num_vec = pci_msix_vec_count(rvu->pdev);
3145 
3146 	rvu->irq_name = devm_kcalloc(rvu->dev, rvu->num_vec,
3147 				     NAME_SIZE, GFP_KERNEL);
3148 	if (!rvu->irq_name)
3149 		return -ENOMEM;
3150 
3151 	rvu->irq_allocated = devm_kcalloc(rvu->dev, rvu->num_vec,
3152 					  sizeof(bool), GFP_KERNEL);
3153 	if (!rvu->irq_allocated)
3154 		return -ENOMEM;
3155 
3156 	/* Enable MSI-X */
3157 	ret = pci_alloc_irq_vectors(rvu->pdev, rvu->num_vec,
3158 				    rvu->num_vec, PCI_IRQ_MSIX);
3159 	if (ret < 0) {
3160 		dev_err(rvu->dev,
3161 			"RVUAF: Request for %d msix vectors failed, ret %d\n",
3162 			rvu->num_vec, ret);
3163 		return ret;
3164 	}
3165 
3166 	if (!is_cn20k(rvu->pdev)) {
3167 		/* Register mailbox interrupt handler */
3168 		sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE],
3169 			"RVUAF Mbox");
3170 		ret = request_irq(pci_irq_vector
3171 				  (rvu->pdev, RVU_AF_INT_VEC_MBOX),
3172 				  rvu->ng_rvu->rvu_mbox_ops->pf_intr_handler, 0,
3173 				  &rvu->irq_name[RVU_AF_INT_VEC_MBOX *
3174 				  NAME_SIZE], rvu);
3175 		if (ret) {
3176 			dev_err(rvu->dev,
3177 				"RVUAF: IRQ registration failed for mbox\n");
3178 			goto fail;
3179 		}
3180 
3181 		rvu->irq_allocated[RVU_AF_INT_VEC_MBOX] = true;
3182 	} else {
3183 		ret = cn20k_register_afpf_mbox_intr(rvu);
3184 		if (ret) {
3185 			dev_err(rvu->dev,
3186 				"RVUAF: IRQ registration failed for mbox\n");
3187 			goto fail;
3188 		}
3189 	}
3190 
3191 	/* Enable mailbox interrupts from all PFs */
3192 	rvu_enable_mbox_intr(rvu);
3193 
3194 	/* Register FLR interrupt handler */
3195 	sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE],
3196 		"RVUAF FLR");
3197 	ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFFLR),
3198 			  rvu_flr_intr_handler, 0,
3199 			  &rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE],
3200 			  rvu);
3201 	if (ret) {
3202 		dev_err(rvu->dev,
3203 			"RVUAF: IRQ registration failed for FLR\n");
3204 		goto fail;
3205 	}
3206 	rvu->irq_allocated[RVU_AF_INT_VEC_PFFLR] = true;
3207 
3208 	/* Enable FLR interrupt for all PFs*/
3209 	rvu_write64(rvu, BLKADDR_RVUM,
3210 		    RVU_AF_PFFLR_INT, INTR_MASK(rvu->hw->total_pfs));
3211 
3212 	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S,
3213 		    INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
3214 
3215 	/* Register ME interrupt handler */
3216 	sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE],
3217 		"RVUAF ME");
3218 	ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFME),
3219 			  rvu_me_pf_intr_handler, 0,
3220 			  &rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE],
3221 			  rvu);
3222 	if (ret) {
3223 		dev_err(rvu->dev,
3224 			"RVUAF: IRQ registration failed for ME\n");
3225 	}
3226 	rvu->irq_allocated[RVU_AF_INT_VEC_PFME] = true;
3227 
3228 	/* Clear TRPEND bit for all PF */
3229 	rvu_write64(rvu, BLKADDR_RVUM,
3230 		    RVU_AF_PFTRPEND, INTR_MASK(rvu->hw->total_pfs));
3231 	/* Enable ME interrupt for all PFs*/
3232 	rvu_write64(rvu, BLKADDR_RVUM,
3233 		    RVU_AF_PFME_INT, INTR_MASK(rvu->hw->total_pfs));
3234 
3235 	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1S,
3236 		    INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
3237 
3238 	if (!rvu_afvf_msix_vectors_num_ok(rvu))
3239 		return 0;
3240 
3241 	/* Get PF MSIX vectors offset. */
3242 	pf_vec_start = rvu_read64(rvu, BLKADDR_RVUM,
3243 				  RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
3244 	if (!is_cn20k(rvu->pdev)) {
3245 		/* Register MBOX0 interrupt. */
3246 		offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX0;
3247 		sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox0");
3248 		ret = request_irq(pci_irq_vector(rvu->pdev, offset),
3249 				  rvu->ng_rvu->rvu_mbox_ops->afvf_intr_handler, 0,
3250 				  &rvu->irq_name[offset * NAME_SIZE],
3251 				  rvu);
3252 		if (ret)
3253 			dev_err(rvu->dev,
3254 				"RVUAF: IRQ registration failed for Mbox0\n");
3255 
3256 		rvu->irq_allocated[offset] = true;
3257 
3258 		/* Register MBOX1 interrupt. MBOX1 IRQ number follows MBOX0 so
3259 		 * simply increment current offset by 1.
3260 		 */
3261 		offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX1;
3262 		sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox1");
3263 		ret = request_irq(pci_irq_vector(rvu->pdev, offset),
3264 				  rvu->ng_rvu->rvu_mbox_ops->afvf_intr_handler, 0,
3265 				  &rvu->irq_name[offset * NAME_SIZE],
3266 				  rvu);
3267 		if (ret)
3268 			dev_err(rvu->dev,
3269 				"RVUAF: IRQ registration failed for Mbox1\n");
3270 
3271 		rvu->irq_allocated[offset] = true;
3272 	} else {
3273 		ret = cn20k_register_afvf_mbox_intr(rvu, pf_vec_start);
3274 		if (ret)
3275 			dev_err(rvu->dev,
3276 				"RVUAF: IRQ registration failed for Mbox\n");
3277 	}
3278 
3279 	/* Register FLR interrupt handler for AF's VFs */
3280 	offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR0;
3281 	sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR0");
3282 	ret = request_irq(pci_irq_vector(rvu->pdev, offset),
3283 			  rvu_flr_intr_handler, 0,
3284 			  &rvu->irq_name[offset * NAME_SIZE], rvu);
3285 	if (ret) {
3286 		dev_err(rvu->dev,
3287 			"RVUAF: IRQ registration failed for RVUAFVF FLR0\n");
3288 		goto fail;
3289 	}
3290 	rvu->irq_allocated[offset] = true;
3291 
3292 	offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR1;
3293 	sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR1");
3294 	ret = request_irq(pci_irq_vector(rvu->pdev, offset),
3295 			  rvu_flr_intr_handler, 0,
3296 			  &rvu->irq_name[offset * NAME_SIZE], rvu);
3297 	if (ret) {
3298 		dev_err(rvu->dev,
3299 			"RVUAF: IRQ registration failed for RVUAFVF FLR1\n");
3300 		goto fail;
3301 	}
3302 	rvu->irq_allocated[offset] = true;
3303 
3304 	/* Register ME interrupt handler for AF's VFs */
3305 	offset = pf_vec_start + RVU_PF_INT_VEC_VFME0;
3306 	sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME0");
3307 	ret = request_irq(pci_irq_vector(rvu->pdev, offset),
3308 			  rvu_me_vf_intr_handler, 0,
3309 			  &rvu->irq_name[offset * NAME_SIZE], rvu);
3310 	if (ret) {
3311 		dev_err(rvu->dev,
3312 			"RVUAF: IRQ registration failed for RVUAFVF ME0\n");
3313 		goto fail;
3314 	}
3315 	rvu->irq_allocated[offset] = true;
3316 
3317 	offset = pf_vec_start + RVU_PF_INT_VEC_VFME1;
3318 	sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME1");
3319 	ret = request_irq(pci_irq_vector(rvu->pdev, offset),
3320 			  rvu_me_vf_intr_handler, 0,
3321 			  &rvu->irq_name[offset * NAME_SIZE], rvu);
3322 	if (ret) {
3323 		dev_err(rvu->dev,
3324 			"RVUAF: IRQ registration failed for RVUAFVF ME1\n");
3325 		goto fail;
3326 	}
3327 	rvu->irq_allocated[offset] = true;
3328 
3329 	ret = rvu_cpt_register_interrupts(rvu);
3330 	if (ret)
3331 		goto fail;
3332 
3333 	for (i = 0; i < rvu->num_vec; i++) {
3334 		if (strstr(&rvu->irq_name[i * NAME_SIZE], "Mbox") ||
3335 		    strstr(&rvu->irq_name[i * NAME_SIZE], "FLR"))
3336 			irq_set_affinity(pci_irq_vector(rvu->pdev, i),
3337 					 cpumask_of(0));
3338 	}
3339 
3340 	return 0;
3341 
3342 fail:
3343 	rvu_unregister_interrupts(rvu);
3344 	return ret;
3345 }
3346 
3347 static void rvu_flr_wq_destroy(struct rvu *rvu)
3348 {
3349 	if (rvu->flr_wq) {
3350 		destroy_workqueue(rvu->flr_wq);
3351 		rvu->flr_wq = NULL;
3352 	}
3353 }
3354 
3355 static int rvu_flr_init(struct rvu *rvu)
3356 {
3357 	int dev, num_devs;
3358 	u64 cfg;
3359 	int pf;
3360 
3361 	/* Enable FLR for all PFs*/
3362 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
3363 		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
3364 		rvu_write64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf),
3365 			    cfg | BIT_ULL(22));
3366 	}
3367 
3368 	rvu->flr_wq = alloc_workqueue("rvu_afpf_flr",
3369 				      WQ_HIGHPRI | WQ_MEM_RECLAIM, 0);
3370 	if (!rvu->flr_wq)
3371 		return -ENOMEM;
3372 
3373 	num_devs = rvu->hw->total_pfs + pci_sriov_get_totalvfs(rvu->pdev);
3374 	rvu->flr_wrk = devm_kcalloc(rvu->dev, num_devs,
3375 				    sizeof(struct rvu_work), GFP_KERNEL);
3376 	if (!rvu->flr_wrk) {
3377 		destroy_workqueue(rvu->flr_wq);
3378 		return -ENOMEM;
3379 	}
3380 
3381 	for (dev = 0; dev < num_devs; dev++) {
3382 		rvu->flr_wrk[dev].rvu = rvu;
3383 		INIT_WORK(&rvu->flr_wrk[dev].work, rvu_flr_handler);
3384 	}
3385 
3386 	mutex_init(&rvu->flr_lock);
3387 
3388 	return 0;
3389 }
3390 
3391 static void rvu_disable_afvf_intr(struct rvu *rvu)
3392 {
3393 	int vfs = rvu->vfs;
3394 
3395 	if (is_cn20k(rvu->pdev))
3396 		return cn20k_rvu_disable_afvf_intr(rvu, vfs);
3397 
3398 	rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), INTR_MASK(vfs));
3399 	rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs));
3400 	rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs));
3401 	if (vfs <= 64)
3402 		return;
3403 
3404 	rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1),
3405 		      INTR_MASK(vfs - 64));
3406 	rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
3407 	rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
3408 }
3409 
3410 static void rvu_enable_afvf_intr(struct rvu *rvu)
3411 {
3412 	int vfs = rvu->vfs;
3413 
3414 	if (is_cn20k(rvu->pdev))
3415 		return cn20k_rvu_enable_afvf_intr(rvu, vfs);
3416 
3417 	/* Clear any pending interrupts and enable AF VF interrupts for
3418 	 * the first 64 VFs.
3419 	 */
3420 	/* Mbox */
3421 	rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), INTR_MASK(vfs));
3422 	rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(vfs));
3423 
3424 	/* FLR */
3425 	rvupf_write64(rvu, RVU_PF_VFFLR_INTX(0), INTR_MASK(vfs));
3426 	rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(vfs));
3427 	rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(vfs));
3428 
3429 	/* Same for remaining VFs, if any. */
3430 	if (vfs <= 64)
3431 		return;
3432 
3433 	rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), INTR_MASK(vfs - 64));
3434 	rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
3435 		      INTR_MASK(vfs - 64));
3436 
3437 	rvupf_write64(rvu, RVU_PF_VFFLR_INTX(1), INTR_MASK(vfs - 64));
3438 	rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
3439 	rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
3440 }
3441 
3442 int rvu_get_num_lbk_chans(void)
3443 {
3444 	struct pci_dev *pdev;
3445 	void __iomem *base;
3446 	int ret = -EIO;
3447 
3448 	pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_LBK,
3449 			      NULL);
3450 	if (!pdev)
3451 		goto err;
3452 
3453 	base = pci_ioremap_bar(pdev, 0);
3454 	if (!base)
3455 		goto err_put;
3456 
3457 	/* Read number of available LBK channels from LBK(0)_CONST register. */
3458 	ret = (readq(base + 0x10) >> 32) & 0xffff;
3459 	iounmap(base);
3460 err_put:
3461 	pci_dev_put(pdev);
3462 err:
3463 	return ret;
3464 }
3465 
3466 static int rvu_enable_sriov(struct rvu *rvu)
3467 {
3468 	struct pci_dev *pdev = rvu->pdev;
3469 	int err, chans, vfs;
3470 	int pos = 0;
3471 
3472 	if (!rvu_afvf_msix_vectors_num_ok(rvu)) {
3473 		dev_warn(&pdev->dev,
3474 			 "Skipping SRIOV enablement since not enough IRQs are available\n");
3475 		return 0;
3476 	}
3477 
3478 	/* Get RVU VFs device id */
3479 	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
3480 	if (!pos)
3481 		return 0;
3482 	pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &rvu->vf_devid);
3483 
3484 	chans = rvu_get_num_lbk_chans();
3485 	if (chans < 0)
3486 		return chans;
3487 
3488 	vfs = pci_sriov_get_totalvfs(pdev);
3489 
3490 	/* Limit VFs in case we have more VFs than LBK channels available. */
3491 	if (vfs > chans)
3492 		vfs = chans;
3493 
3494 	if (!vfs)
3495 		return 0;
3496 
3497 	/* LBK channel number 63 is used for switching packets between
3498 	 * CGX mapped VFs. Hence limit LBK pairs till 62 only.
3499 	 */
3500 	if (vfs > 62)
3501 		vfs = 62;
3502 
3503 	/* Save VFs number for reference in VF interrupts handlers.
3504 	 * Since interrupts might start arriving during SRIOV enablement
3505 	 * ordinary API cannot be used to get number of enabled VFs.
3506 	 */
3507 	rvu->vfs = vfs;
3508 
3509 	err = rvu_mbox_init(rvu, &rvu->afvf_wq_info, TYPE_AFVF, vfs,
3510 			    rvu_afvf_mbox_handler, rvu_afvf_mbox_up_handler);
3511 	if (err)
3512 		return err;
3513 
3514 	rvu_enable_afvf_intr(rvu);
3515 	/* Make sure IRQs are enabled before SRIOV. */
3516 	mb();
3517 
3518 	err = pci_enable_sriov(pdev, vfs);
3519 	if (err) {
3520 		rvu_disable_afvf_intr(rvu);
3521 		rvu_mbox_destroy(&rvu->afvf_wq_info);
3522 		return err;
3523 	}
3524 
3525 	return 0;
3526 }
3527 
3528 static void rvu_disable_sriov(struct rvu *rvu)
3529 {
3530 	rvu_disable_afvf_intr(rvu);
3531 	rvu_mbox_destroy(&rvu->afvf_wq_info);
3532 	pci_disable_sriov(rvu->pdev);
3533 }
3534 
3535 static void rvu_update_module_params(struct rvu *rvu)
3536 {
3537 	const char *default_pfl_name = "default";
3538 
3539 	strscpy(rvu->mkex_pfl_name,
3540 		mkex_profile ? mkex_profile : default_pfl_name, MKEX_NAME_LEN);
3541 	strscpy(rvu->kpu_pfl_name,
3542 		kpu_profile ? kpu_profile : default_pfl_name, KPU_NAME_LEN);
3543 }
3544 
3545 static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3546 {
3547 	struct device *dev = &pdev->dev;
3548 	struct rvu *rvu;
3549 	int    err;
3550 
3551 	rvu = devm_kzalloc(dev, sizeof(*rvu), GFP_KERNEL);
3552 	if (!rvu)
3553 		return -ENOMEM;
3554 
3555 	rvu->hw = devm_kzalloc(dev, sizeof(struct rvu_hwinfo), GFP_KERNEL);
3556 	if (!rvu->hw) {
3557 		devm_kfree(dev, rvu);
3558 		return -ENOMEM;
3559 	}
3560 
3561 	pci_set_drvdata(pdev, rvu);
3562 	rvu->pdev = pdev;
3563 	rvu->dev = &pdev->dev;
3564 
3565 	err = pci_enable_device(pdev);
3566 	if (err) {
3567 		dev_err(dev, "Failed to enable PCI device\n");
3568 		goto err_freemem;
3569 	}
3570 
3571 	err = pci_request_regions(pdev, DRV_NAME);
3572 	if (err) {
3573 		dev_err(dev, "PCI request regions failed 0x%x\n", err);
3574 		goto err_disable_device;
3575 	}
3576 
3577 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
3578 	if (err) {
3579 		dev_err(dev, "DMA mask config failed, abort\n");
3580 		goto err_release_regions;
3581 	}
3582 
3583 	pci_set_master(pdev);
3584 
3585 	rvu->ptp = ptp_get();
3586 	if (IS_ERR(rvu->ptp)) {
3587 		err = PTR_ERR(rvu->ptp);
3588 		if (err)
3589 			goto err_release_regions;
3590 		rvu->ptp = NULL;
3591 	}
3592 
3593 	/* Map Admin function CSRs */
3594 	rvu->afreg_base = pcim_iomap(pdev, PCI_AF_REG_BAR_NUM, 0);
3595 	rvu->pfreg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0);
3596 	if (!rvu->afreg_base || !rvu->pfreg_base) {
3597 		dev_err(dev, "Unable to map admin function CSRs, aborting\n");
3598 		err = -ENOMEM;
3599 		goto err_put_ptp;
3600 	}
3601 
3602 	/* Store module params in rvu structure */
3603 	rvu_update_module_params(rvu);
3604 
3605 	/* Check which blocks the HW supports */
3606 	rvu_check_block_implemented(rvu);
3607 
3608 	rvu_reset_all_blocks(rvu);
3609 
3610 	rvu_setup_hw_capabilities(rvu);
3611 
3612 	err = rvu_setup_hw_resources(rvu);
3613 	if (err)
3614 		goto err_put_ptp;
3615 
3616 	/* Init mailbox btw AF and PFs */
3617 	err = rvu_mbox_init(rvu, &rvu->afpf_wq_info, TYPE_AFPF,
3618 			    rvu->hw->total_pfs, rvu_afpf_mbox_handler,
3619 			    rvu_afpf_mbox_up_handler);
3620 	if (err) {
3621 		dev_err(dev, "%s: Failed to initialize mbox\n", __func__);
3622 		goto err_hwsetup;
3623 	}
3624 
3625 	err = rvu_flr_init(rvu);
3626 	if (err) {
3627 		dev_err(dev, "%s: Failed to initialize flr\n", __func__);
3628 		goto err_mbox;
3629 	}
3630 
3631 	err = rvu_register_interrupts(rvu);
3632 	if (err) {
3633 		dev_err(dev, "%s: Failed to register interrupts\n", __func__);
3634 		goto err_flr;
3635 	}
3636 
3637 	err = rvu_register_dl(rvu);
3638 	if (err) {
3639 		dev_err(dev, "%s: Failed to register devlink\n", __func__);
3640 		goto err_irq;
3641 	}
3642 
3643 	rvu_setup_rvum_blk_revid(rvu);
3644 
3645 	/* Enable AF's VFs (if any) */
3646 	err = rvu_enable_sriov(rvu);
3647 	if (err) {
3648 		dev_err(dev, "%s: Failed to enable sriov\n", __func__);
3649 		goto err_dl;
3650 	}
3651 
3652 	/* Initialize debugfs */
3653 	rvu_dbg_init(rvu);
3654 
3655 	mutex_init(&rvu->rswitch.switch_lock);
3656 
3657 	if (rvu->fwdata)
3658 		ptp_start(rvu, rvu->fwdata->sclk, rvu->fwdata->ptp_ext_clk_rate,
3659 			  rvu->fwdata->ptp_ext_tstamp);
3660 
3661 	/* Alloc CINT and QINT memory */
3662 	rvu_alloc_cint_qint_mem(rvu, &rvu->pf[RVU_AFPF], BLKADDR_NIX0,
3663 				(rvu->hw->block[BLKADDR_NIX0].lf.max));
3664 	return 0;
3665 err_dl:
3666 	rvu_unregister_dl(rvu);
3667 err_irq:
3668 	rvu_unregister_interrupts(rvu);
3669 err_flr:
3670 	rvu_flr_wq_destroy(rvu);
3671 err_mbox:
3672 	rvu_mbox_destroy(&rvu->afpf_wq_info);
3673 err_hwsetup:
3674 	rvu_cgx_exit(rvu);
3675 	rvu_fwdata_exit(rvu);
3676 	rvu_mcs_exit(rvu);
3677 	rvu_reset_all_blocks(rvu);
3678 	rvu_free_hw_resources(rvu);
3679 	rvu_clear_rvum_blk_revid(rvu);
3680 err_put_ptp:
3681 	ptp_put(rvu->ptp);
3682 err_release_regions:
3683 	pci_release_regions(pdev);
3684 err_disable_device:
3685 	pci_disable_device(pdev);
3686 err_freemem:
3687 	pci_set_drvdata(pdev, NULL);
3688 	devm_kfree(&pdev->dev, rvu->hw);
3689 	devm_kfree(dev, rvu);
3690 	return err;
3691 }
3692 
3693 static void rvu_remove(struct pci_dev *pdev)
3694 {
3695 	struct rvu *rvu = pci_get_drvdata(pdev);
3696 
3697 	rvu_dbg_exit(rvu);
3698 	rvu_unregister_dl(rvu);
3699 	rvu_unregister_interrupts(rvu);
3700 	rvu_flr_wq_destroy(rvu);
3701 	rvu_cgx_exit(rvu);
3702 	rvu_fwdata_exit(rvu);
3703 	rvu_mcs_exit(rvu);
3704 	rvu_mbox_destroy(&rvu->afpf_wq_info);
3705 	rvu_disable_sriov(rvu);
3706 	rvu_reset_all_blocks(rvu);
3707 	rvu_free_hw_resources(rvu);
3708 	rvu_clear_rvum_blk_revid(rvu);
3709 	ptp_put(rvu->ptp);
3710 	pci_release_regions(pdev);
3711 	pci_disable_device(pdev);
3712 	pci_set_drvdata(pdev, NULL);
3713 
3714 	devm_kfree(&pdev->dev, rvu->hw);
3715 	if (is_cn20k(rvu->pdev))
3716 		cn20k_free_mbox_memory(rvu);
3717 	kfree(rvu->ng_rvu);
3718 	devm_kfree(&pdev->dev, rvu);
3719 }
3720 
3721 static void rvu_shutdown(struct pci_dev *pdev)
3722 {
3723 	struct rvu *rvu = pci_get_drvdata(pdev);
3724 
3725 	if (!rvu)
3726 		return;
3727 
3728 	rvu_clear_rvum_blk_revid(rvu);
3729 }
3730 
3731 static struct pci_driver rvu_driver = {
3732 	.name = DRV_NAME,
3733 	.id_table = rvu_id_table,
3734 	.probe = rvu_probe,
3735 	.remove = rvu_remove,
3736 	.shutdown = rvu_shutdown,
3737 };
3738 
3739 static int __init rvu_init_module(void)
3740 {
3741 	int err;
3742 
3743 	pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
3744 
3745 	err = pci_register_driver(&cgx_driver);
3746 	if (err < 0)
3747 		return err;
3748 
3749 	err = pci_register_driver(&ptp_driver);
3750 	if (err < 0)
3751 		goto ptp_err;
3752 
3753 	err = pci_register_driver(&mcs_driver);
3754 	if (err < 0)
3755 		goto mcs_err;
3756 
3757 	err =  pci_register_driver(&rvu_driver);
3758 	if (err < 0)
3759 		goto rvu_err;
3760 
3761 	return 0;
3762 rvu_err:
3763 	pci_unregister_driver(&mcs_driver);
3764 mcs_err:
3765 	pci_unregister_driver(&ptp_driver);
3766 ptp_err:
3767 	pci_unregister_driver(&cgx_driver);
3768 
3769 	return err;
3770 }
3771 
3772 static void __exit rvu_cleanup_module(void)
3773 {
3774 	pci_unregister_driver(&rvu_driver);
3775 	pci_unregister_driver(&mcs_driver);
3776 	pci_unregister_driver(&ptp_driver);
3777 	pci_unregister_driver(&cgx_driver);
3778 }
3779 
3780 module_init(rvu_init_module);
3781 module_exit(rvu_cleanup_module);
3782