xref: /linux/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c (revision be239684b18e1cdcafcf8c7face4a2f562c745ad)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3  *
4  * Copyright (C) 2018 Marvell.
5  *
6  */
7 
8 #include <linux/module.h>
9 #include <linux/pci.h>
10 
11 #include "rvu_struct.h"
12 #include "rvu_reg.h"
13 #include "rvu.h"
14 #include "npc.h"
15 #include "mcs.h"
16 #include "cgx.h"
17 #include "lmac_common.h"
18 #include "rvu_npc_hash.h"
19 
20 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc);
21 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
22 			    int type, int chan_id);
23 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
24 			       int type, bool add);
25 static int nix_setup_ipolicers(struct rvu *rvu,
26 			       struct nix_hw *nix_hw, int blkaddr);
27 static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw);
28 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
29 			       struct nix_hw *nix_hw, u16 pcifunc);
30 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc);
31 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
32 				     u32 leaf_prof);
33 static const char *nix_get_ctx_name(int ctype);
34 
35 enum mc_tbl_sz {
36 	MC_TBL_SZ_256,
37 	MC_TBL_SZ_512,
38 	MC_TBL_SZ_1K,
39 	MC_TBL_SZ_2K,
40 	MC_TBL_SZ_4K,
41 	MC_TBL_SZ_8K,
42 	MC_TBL_SZ_16K,
43 	MC_TBL_SZ_32K,
44 	MC_TBL_SZ_64K,
45 };
46 
47 enum mc_buf_cnt {
48 	MC_BUF_CNT_8,
49 	MC_BUF_CNT_16,
50 	MC_BUF_CNT_32,
51 	MC_BUF_CNT_64,
52 	MC_BUF_CNT_128,
53 	MC_BUF_CNT_256,
54 	MC_BUF_CNT_512,
55 	MC_BUF_CNT_1024,
56 	MC_BUF_CNT_2048,
57 };
58 
59 enum nix_makr_fmt_indexes {
60 	NIX_MARK_CFG_IP_DSCP_RED,
61 	NIX_MARK_CFG_IP_DSCP_YELLOW,
62 	NIX_MARK_CFG_IP_DSCP_YELLOW_RED,
63 	NIX_MARK_CFG_IP_ECN_RED,
64 	NIX_MARK_CFG_IP_ECN_YELLOW,
65 	NIX_MARK_CFG_IP_ECN_YELLOW_RED,
66 	NIX_MARK_CFG_VLAN_DEI_RED,
67 	NIX_MARK_CFG_VLAN_DEI_YELLOW,
68 	NIX_MARK_CFG_VLAN_DEI_YELLOW_RED,
69 	NIX_MARK_CFG_MAX,
70 };
71 
72 /* For now considering MC resources needed for broadcast
73  * pkt replication only. i.e 256 HWVFs + 12 PFs.
74  */
75 #define MC_TBL_SIZE	MC_TBL_SZ_2K
76 #define MC_BUF_CNT	MC_BUF_CNT_1024
77 
78 #define MC_TX_MAX	2048
79 
80 struct mce {
81 	struct hlist_node	node;
82 	u32			rq_rss_index;
83 	u16			pcifunc;
84 	u16			channel;
85 	u8			dest_type;
86 	u8			is_active;
87 	u8			reserved[2];
88 };
89 
90 int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr)
91 {
92 	int i = 0;
93 
94 	/*If blkaddr is 0, return the first nix block address*/
95 	if (blkaddr == 0)
96 		return rvu->nix_blkaddr[blkaddr];
97 
98 	while (i + 1 < MAX_NIX_BLKS) {
99 		if (rvu->nix_blkaddr[i] == blkaddr)
100 			return rvu->nix_blkaddr[i + 1];
101 		i++;
102 	}
103 
104 	return 0;
105 }
106 
107 bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
108 {
109 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
110 	int blkaddr;
111 
112 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
113 	if (!pfvf->nixlf || blkaddr < 0)
114 		return false;
115 	return true;
116 }
117 
118 int rvu_get_nixlf_count(struct rvu *rvu)
119 {
120 	int blkaddr = 0, max = 0;
121 	struct rvu_block *block;
122 
123 	blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
124 	while (blkaddr) {
125 		block = &rvu->hw->block[blkaddr];
126 		max += block->lf.max;
127 		blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
128 	}
129 	return max;
130 }
131 
132 int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr)
133 {
134 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
135 	struct rvu_hwinfo *hw = rvu->hw;
136 	int blkaddr;
137 
138 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
139 	if (!pfvf->nixlf || blkaddr < 0)
140 		return NIX_AF_ERR_AF_LF_INVALID;
141 
142 	*nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
143 	if (*nixlf < 0)
144 		return NIX_AF_ERR_AF_LF_INVALID;
145 
146 	if (nix_blkaddr)
147 		*nix_blkaddr = blkaddr;
148 
149 	return 0;
150 }
151 
152 int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc,
153 			struct nix_hw **nix_hw, int *blkaddr)
154 {
155 	struct rvu_pfvf *pfvf;
156 
157 	pfvf = rvu_get_pfvf(rvu, pcifunc);
158 	*blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
159 	if (!pfvf->nixlf || *blkaddr < 0)
160 		return NIX_AF_ERR_AF_LF_INVALID;
161 
162 	*nix_hw = get_nix_hw(rvu->hw, *blkaddr);
163 	if (!*nix_hw)
164 		return NIX_AF_ERR_INVALID_NIXBLK;
165 	return 0;
166 }
167 
168 static void nix_mce_list_init(struct nix_mce_list *list, int max)
169 {
170 	INIT_HLIST_HEAD(&list->head);
171 	list->count = 0;
172 	list->max = max;
173 }
174 
175 static int nix_alloc_mce_list(struct nix_mcast *mcast, int count, u8 dir)
176 {
177 	struct rsrc_bmap *mce_counter;
178 	int idx;
179 
180 	if (!mcast)
181 		return -EINVAL;
182 
183 	mce_counter = &mcast->mce_counter[dir];
184 	if (!rvu_rsrc_check_contig(mce_counter, count))
185 		return -ENOSPC;
186 
187 	idx = rvu_alloc_rsrc_contig(mce_counter, count);
188 	return idx;
189 }
190 
191 static void nix_free_mce_list(struct nix_mcast *mcast, int count, int start, u8 dir)
192 {
193 	struct rsrc_bmap *mce_counter;
194 
195 	if (!mcast)
196 		return;
197 
198 	mce_counter = &mcast->mce_counter[dir];
199 	rvu_free_rsrc_contig(mce_counter, count, start);
200 }
201 
202 struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
203 {
204 	int nix_blkaddr = 0, i = 0;
205 	struct rvu *rvu = hw->rvu;
206 
207 	nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
208 	while (nix_blkaddr) {
209 		if (blkaddr == nix_blkaddr && hw->nix)
210 			return &hw->nix[i];
211 		nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
212 		i++;
213 	}
214 	return NULL;
215 }
216 
217 int nix_get_dwrr_mtu_reg(struct rvu_hwinfo *hw, int smq_link_type)
218 {
219 	if (hw->cap.nix_multiple_dwrr_mtu)
220 		return NIX_AF_DWRR_MTUX(smq_link_type);
221 
222 	if (smq_link_type == SMQ_LINK_TYPE_SDP)
223 		return NIX_AF_DWRR_SDP_MTU;
224 
225 	/* Here it's same reg for RPM and LBK */
226 	return NIX_AF_DWRR_RPM_MTU;
227 }
228 
229 u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu)
230 {
231 	dwrr_mtu &= 0x1FULL;
232 
233 	/* MTU used for DWRR calculation is in power of 2 up until 64K bytes.
234 	 * Value of 4 is reserved for MTU value of 9728 bytes.
235 	 * Value of 5 is reserved for MTU value of 10240 bytes.
236 	 */
237 	switch (dwrr_mtu) {
238 	case 4:
239 		return 9728;
240 	case 5:
241 		return 10240;
242 	default:
243 		return BIT_ULL(dwrr_mtu);
244 	}
245 
246 	return 0;
247 }
248 
249 u32 convert_bytes_to_dwrr_mtu(u32 bytes)
250 {
251 	/* MTU used for DWRR calculation is in power of 2 up until 64K bytes.
252 	 * Value of 4 is reserved for MTU value of 9728 bytes.
253 	 * Value of 5 is reserved for MTU value of 10240 bytes.
254 	 */
255 	if (bytes > BIT_ULL(16))
256 		return 0;
257 
258 	switch (bytes) {
259 	case 9728:
260 		return 4;
261 	case 10240:
262 		return 5;
263 	default:
264 		return ilog2(bytes);
265 	}
266 
267 	return 0;
268 }
269 
270 static void nix_rx_sync(struct rvu *rvu, int blkaddr)
271 {
272 	int err;
273 
274 	/* Sync all in flight RX packets to LLC/DRAM */
275 	rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
276 	err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
277 	if (err)
278 		dev_err(rvu->dev, "SYNC1: NIX RX software sync failed\n");
279 
280 	/* SW_SYNC ensures all existing transactions are finished and pkts
281 	 * are written to LLC/DRAM, queues should be teared down after
282 	 * successful SW_SYNC. Due to a HW errata, in some rare scenarios
283 	 * an existing transaction might end after SW_SYNC operation. To
284 	 * ensure operation is fully done, do the SW_SYNC twice.
285 	 */
286 	rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
287 	err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
288 	if (err)
289 		dev_err(rvu->dev, "SYNC2: NIX RX software sync failed\n");
290 }
291 
292 static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
293 			    int lvl, u16 pcifunc, u16 schq)
294 {
295 	struct rvu_hwinfo *hw = rvu->hw;
296 	struct nix_txsch *txsch;
297 	struct nix_hw *nix_hw;
298 	u16 map_func;
299 
300 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
301 	if (!nix_hw)
302 		return false;
303 
304 	txsch = &nix_hw->txsch[lvl];
305 	/* Check out of bounds */
306 	if (schq >= txsch->schq.max)
307 		return false;
308 
309 	mutex_lock(&rvu->rsrc_lock);
310 	map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
311 	mutex_unlock(&rvu->rsrc_lock);
312 
313 	/* TLs aggegating traffic are shared across PF and VFs */
314 	if (lvl >= hw->cap.nix_tx_aggr_lvl) {
315 		if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
316 			return false;
317 		else
318 			return true;
319 	}
320 
321 	if (map_func != pcifunc)
322 		return false;
323 
324 	return true;
325 }
326 
327 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf,
328 			      struct nix_lf_alloc_rsp *rsp, bool loop)
329 {
330 	struct rvu_pfvf *parent_pf, *pfvf = rvu_get_pfvf(rvu, pcifunc);
331 	u16 req_chan_base, req_chan_end, req_chan_cnt;
332 	struct rvu_hwinfo *hw = rvu->hw;
333 	struct sdp_node_info *sdp_info;
334 	int pkind, pf, vf, lbkid, vfid;
335 	u8 cgx_id, lmac_id;
336 	bool from_vf;
337 	int err;
338 
339 	pf = rvu_get_pf(pcifunc);
340 	if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK &&
341 	    type != NIX_INTF_TYPE_SDP)
342 		return 0;
343 
344 	switch (type) {
345 	case NIX_INTF_TYPE_CGX:
346 		pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf];
347 		rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
348 
349 		pkind = rvu_npc_get_pkind(rvu, pf);
350 		if (pkind < 0) {
351 			dev_err(rvu->dev,
352 				"PF_Func 0x%x: Invalid pkind\n", pcifunc);
353 			return -EINVAL;
354 		}
355 		pfvf->rx_chan_base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0);
356 		pfvf->tx_chan_base = pfvf->rx_chan_base;
357 		pfvf->rx_chan_cnt = 1;
358 		pfvf->tx_chan_cnt = 1;
359 		rsp->tx_link = cgx_id * hw->lmac_per_cgx + lmac_id;
360 
361 		cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
362 		rvu_npc_set_pkind(rvu, pkind, pfvf);
363 
364 		break;
365 	case NIX_INTF_TYPE_LBK:
366 		vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
367 
368 		/* If NIX1 block is present on the silicon then NIXes are
369 		 * assigned alternatively for lbk interfaces. NIX0 should
370 		 * send packets on lbk link 1 channels and NIX1 should send
371 		 * on lbk link 0 channels for the communication between
372 		 * NIX0 and NIX1.
373 		 */
374 		lbkid = 0;
375 		if (rvu->hw->lbk_links > 1)
376 			lbkid = vf & 0x1 ? 0 : 1;
377 
378 		/* By default NIX0 is configured to send packet on lbk link 1
379 		 * (which corresponds to LBK1), same packet will receive on
380 		 * NIX1 over lbk link 0. If NIX1 sends packet on lbk link 0
381 		 * (which corresponds to LBK2) packet will receive on NIX0 lbk
382 		 * link 1.
383 		 * But if lbk links for NIX0 and NIX1 are negated, i.e NIX0
384 		 * transmits and receives on lbk link 0, whick corresponds
385 		 * to LBK1 block, back to back connectivity between NIX and
386 		 * LBK can be achieved (which is similar to 96xx)
387 		 *
388 		 *			RX		TX
389 		 * NIX0 lbk link	1 (LBK2)	1 (LBK1)
390 		 * NIX0 lbk link	0 (LBK0)	0 (LBK0)
391 		 * NIX1 lbk link	0 (LBK1)	0 (LBK2)
392 		 * NIX1 lbk link	1 (LBK3)	1 (LBK3)
393 		 */
394 		if (loop)
395 			lbkid = !lbkid;
396 
397 		/* Note that AF's VFs work in pairs and talk over consecutive
398 		 * loopback channels.Therefore if odd number of AF VFs are
399 		 * enabled then the last VF remains with no pair.
400 		 */
401 		pfvf->rx_chan_base = rvu_nix_chan_lbk(rvu, lbkid, vf);
402 		pfvf->tx_chan_base = vf & 0x1 ?
403 					rvu_nix_chan_lbk(rvu, lbkid, vf - 1) :
404 					rvu_nix_chan_lbk(rvu, lbkid, vf + 1);
405 		pfvf->rx_chan_cnt = 1;
406 		pfvf->tx_chan_cnt = 1;
407 		rsp->tx_link = hw->cgx_links + lbkid;
408 		pfvf->lbkid = lbkid;
409 		rvu_npc_set_pkind(rvu, NPC_RX_LBK_PKIND, pfvf);
410 		rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
411 					      pfvf->rx_chan_base,
412 					      pfvf->rx_chan_cnt);
413 
414 		break;
415 	case NIX_INTF_TYPE_SDP:
416 		from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
417 		parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
418 		sdp_info = parent_pf->sdp_info;
419 		if (!sdp_info) {
420 			dev_err(rvu->dev, "Invalid sdp_info pointer\n");
421 			return -EINVAL;
422 		}
423 		if (from_vf) {
424 			req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn +
425 				sdp_info->num_pf_rings;
426 			vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
427 			for (vfid = 0; vfid < vf; vfid++)
428 				req_chan_base += sdp_info->vf_rings[vfid];
429 			req_chan_cnt = sdp_info->vf_rings[vf];
430 			req_chan_end = req_chan_base + req_chan_cnt - 1;
431 			if (req_chan_base < rvu_nix_chan_sdp(rvu, 0) ||
432 			    req_chan_end > rvu_nix_chan_sdp(rvu, 255)) {
433 				dev_err(rvu->dev,
434 					"PF_Func 0x%x: Invalid channel base and count\n",
435 					pcifunc);
436 				return -EINVAL;
437 			}
438 		} else {
439 			req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn;
440 			req_chan_cnt = sdp_info->num_pf_rings;
441 		}
442 
443 		pfvf->rx_chan_base = req_chan_base;
444 		pfvf->rx_chan_cnt = req_chan_cnt;
445 		pfvf->tx_chan_base = pfvf->rx_chan_base;
446 		pfvf->tx_chan_cnt = pfvf->rx_chan_cnt;
447 
448 		rsp->tx_link = hw->cgx_links + hw->lbk_links;
449 		rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
450 					      pfvf->rx_chan_base,
451 					      pfvf->rx_chan_cnt);
452 		break;
453 	}
454 
455 	/* Add a UCAST forwarding rule in MCAM with this NIXLF attached
456 	 * RVU PF/VF's MAC address.
457 	 */
458 	rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
459 				    pfvf->rx_chan_base, pfvf->mac_addr);
460 
461 	/* Add this PF_FUNC to bcast pkt replication list */
462 	err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, true);
463 	if (err) {
464 		dev_err(rvu->dev,
465 			"Bcast list, failed to enable PF_FUNC 0x%x\n",
466 			pcifunc);
467 		return err;
468 	}
469 	/* Install MCAM rule matching Ethernet broadcast mac address */
470 	rvu_npc_install_bcast_match_entry(rvu, pcifunc,
471 					  nixlf, pfvf->rx_chan_base);
472 
473 	pfvf->maxlen = NIC_HW_MIN_FRS;
474 	pfvf->minlen = NIC_HW_MIN_FRS;
475 
476 	return 0;
477 }
478 
479 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
480 {
481 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
482 	int err;
483 
484 	pfvf->maxlen = 0;
485 	pfvf->minlen = 0;
486 
487 	/* Remove this PF_FUNC from bcast pkt replication list */
488 	err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, false);
489 	if (err) {
490 		dev_err(rvu->dev,
491 			"Bcast list, failed to disable PF_FUNC 0x%x\n",
492 			pcifunc);
493 	}
494 
495 	/* Free and disable any MCAM entries used by this NIX LF */
496 	rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
497 
498 	/* Disable DMAC filters used */
499 	rvu_cgx_disable_dmac_entries(rvu, pcifunc);
500 }
501 
502 int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
503 				    struct nix_bp_cfg_req *req,
504 				    struct msg_rsp *rsp)
505 {
506 	u16 pcifunc = req->hdr.pcifunc;
507 	struct rvu_pfvf *pfvf;
508 	int blkaddr, pf, type;
509 	u16 chan_base, chan;
510 	u64 cfg;
511 
512 	pf = rvu_get_pf(pcifunc);
513 	type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
514 	if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
515 		return 0;
516 
517 	pfvf = rvu_get_pfvf(rvu, pcifunc);
518 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
519 
520 	chan_base = pfvf->rx_chan_base + req->chan_base;
521 	for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
522 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
523 		rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
524 			    cfg & ~BIT_ULL(16));
525 	}
526 	return 0;
527 }
528 
529 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
530 			    int type, int chan_id)
531 {
532 	int bpid, blkaddr, lmac_chan_cnt, sdp_chan_cnt;
533 	u16 cgx_bpid_cnt, lbk_bpid_cnt, sdp_bpid_cnt;
534 	struct rvu_hwinfo *hw = rvu->hw;
535 	struct rvu_pfvf *pfvf;
536 	u8 cgx_id, lmac_id;
537 	u64 cfg;
538 
539 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
540 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
541 	lmac_chan_cnt = cfg & 0xFF;
542 
543 	cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
544 	lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
545 
546 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
547 	sdp_chan_cnt = cfg & 0xFFF;
548 	sdp_bpid_cnt = hw->sdp_links * sdp_chan_cnt;
549 
550 	pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
551 
552 	/* Backpressure IDs range division
553 	 * CGX channles are mapped to (0 - 191) BPIDs
554 	 * LBK channles are mapped to (192 - 255) BPIDs
555 	 * SDP channles are mapped to (256 - 511) BPIDs
556 	 *
557 	 * Lmac channles and bpids mapped as follows
558 	 * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15)
559 	 * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) ....
560 	 * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) ....
561 	 */
562 	switch (type) {
563 	case NIX_INTF_TYPE_CGX:
564 		if ((req->chan_base + req->chan_cnt) > 16)
565 			return -EINVAL;
566 		rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
567 		/* Assign bpid based on cgx, lmac and chan id */
568 		bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) +
569 			(lmac_id * lmac_chan_cnt) + req->chan_base;
570 
571 		if (req->bpid_per_chan)
572 			bpid += chan_id;
573 		if (bpid > cgx_bpid_cnt)
574 			return -EINVAL;
575 		break;
576 
577 	case NIX_INTF_TYPE_LBK:
578 		if ((req->chan_base + req->chan_cnt) > 63)
579 			return -EINVAL;
580 		bpid = cgx_bpid_cnt + req->chan_base;
581 		if (req->bpid_per_chan)
582 			bpid += chan_id;
583 		if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt))
584 			return -EINVAL;
585 		break;
586 	case NIX_INTF_TYPE_SDP:
587 		if ((req->chan_base + req->chan_cnt) > 255)
588 			return -EINVAL;
589 
590 		bpid = sdp_bpid_cnt + req->chan_base;
591 		if (req->bpid_per_chan)
592 			bpid += chan_id;
593 
594 		if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt + sdp_bpid_cnt))
595 			return -EINVAL;
596 		break;
597 	default:
598 		return -EINVAL;
599 	}
600 	return bpid;
601 }
602 
603 int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
604 				   struct nix_bp_cfg_req *req,
605 				   struct nix_bp_cfg_rsp *rsp)
606 {
607 	int blkaddr, pf, type, chan_id = 0;
608 	u16 pcifunc = req->hdr.pcifunc;
609 	struct rvu_pfvf *pfvf;
610 	u16 chan_base, chan;
611 	s16 bpid, bpid_base;
612 	u64 cfg;
613 
614 	pf = rvu_get_pf(pcifunc);
615 	type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
616 	if (is_sdp_pfvf(pcifunc))
617 		type = NIX_INTF_TYPE_SDP;
618 
619 	/* Enable backpressure only for CGX mapped PFs and LBK/SDP interface */
620 	if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK &&
621 	    type != NIX_INTF_TYPE_SDP)
622 		return 0;
623 
624 	pfvf = rvu_get_pfvf(rvu, pcifunc);
625 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
626 
627 	bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id);
628 	chan_base = pfvf->rx_chan_base + req->chan_base;
629 	bpid = bpid_base;
630 
631 	for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
632 		if (bpid < 0) {
633 			dev_warn(rvu->dev, "Fail to enable backpressure\n");
634 			return -EINVAL;
635 		}
636 
637 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
638 		cfg &= ~GENMASK_ULL(8, 0);
639 		rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
640 			    cfg | (bpid & GENMASK_ULL(8, 0)) | BIT_ULL(16));
641 		chan_id++;
642 		bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
643 	}
644 
645 	for (chan = 0; chan < req->chan_cnt; chan++) {
646 		/* Map channel and bpid assign to it */
647 		rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 |
648 					(bpid_base & 0x3FF);
649 		if (req->bpid_per_chan)
650 			bpid_base++;
651 	}
652 	rsp->chan_cnt = req->chan_cnt;
653 
654 	return 0;
655 }
656 
657 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
658 				 u64 format, bool v4, u64 *fidx)
659 {
660 	struct nix_lso_format field = {0};
661 
662 	/* IP's Length field */
663 	field.layer = NIX_TXLAYER_OL3;
664 	/* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
665 	field.offset = v4 ? 2 : 4;
666 	field.sizem1 = 1; /* i.e 2 bytes */
667 	field.alg = NIX_LSOALG_ADD_PAYLEN;
668 	rvu_write64(rvu, blkaddr,
669 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
670 		    *(u64 *)&field);
671 
672 	/* No ID field in IPv6 header */
673 	if (!v4)
674 		return;
675 
676 	/* IP's ID field */
677 	field.layer = NIX_TXLAYER_OL3;
678 	field.offset = 4;
679 	field.sizem1 = 1; /* i.e 2 bytes */
680 	field.alg = NIX_LSOALG_ADD_SEGNUM;
681 	rvu_write64(rvu, blkaddr,
682 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
683 		    *(u64 *)&field);
684 }
685 
686 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
687 				 u64 format, u64 *fidx)
688 {
689 	struct nix_lso_format field = {0};
690 
691 	/* TCP's sequence number field */
692 	field.layer = NIX_TXLAYER_OL4;
693 	field.offset = 4;
694 	field.sizem1 = 3; /* i.e 4 bytes */
695 	field.alg = NIX_LSOALG_ADD_OFFSET;
696 	rvu_write64(rvu, blkaddr,
697 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
698 		    *(u64 *)&field);
699 
700 	/* TCP's flags field */
701 	field.layer = NIX_TXLAYER_OL4;
702 	field.offset = 12;
703 	field.sizem1 = 1; /* 2 bytes */
704 	field.alg = NIX_LSOALG_TCP_FLAGS;
705 	rvu_write64(rvu, blkaddr,
706 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
707 		    *(u64 *)&field);
708 }
709 
710 static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
711 {
712 	u64 cfg, idx, fidx = 0;
713 
714 	/* Get max HW supported format indices */
715 	cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF;
716 	nix_hw->lso.total = cfg;
717 
718 	/* Enable LSO */
719 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
720 	/* For TSO, set first and middle segment flags to
721 	 * mask out PSH, RST & FIN flags in TCP packet
722 	 */
723 	cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
724 	cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
725 	rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
726 
727 	/* Setup default static LSO formats
728 	 *
729 	 * Configure format fields for TCPv4 segmentation offload
730 	 */
731 	idx = NIX_LSO_FORMAT_IDX_TSOV4;
732 	nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
733 	nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
734 
735 	/* Set rest of the fields to NOP */
736 	for (; fidx < 8; fidx++) {
737 		rvu_write64(rvu, blkaddr,
738 			    NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
739 	}
740 	nix_hw->lso.in_use++;
741 
742 	/* Configure format fields for TCPv6 segmentation offload */
743 	idx = NIX_LSO_FORMAT_IDX_TSOV6;
744 	fidx = 0;
745 	nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
746 	nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
747 
748 	/* Set rest of the fields to NOP */
749 	for (; fidx < 8; fidx++) {
750 		rvu_write64(rvu, blkaddr,
751 			    NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
752 	}
753 	nix_hw->lso.in_use++;
754 }
755 
756 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
757 {
758 	kfree(pfvf->rq_bmap);
759 	kfree(pfvf->sq_bmap);
760 	kfree(pfvf->cq_bmap);
761 	if (pfvf->rq_ctx)
762 		qmem_free(rvu->dev, pfvf->rq_ctx);
763 	if (pfvf->sq_ctx)
764 		qmem_free(rvu->dev, pfvf->sq_ctx);
765 	if (pfvf->cq_ctx)
766 		qmem_free(rvu->dev, pfvf->cq_ctx);
767 	if (pfvf->rss_ctx)
768 		qmem_free(rvu->dev, pfvf->rss_ctx);
769 	if (pfvf->nix_qints_ctx)
770 		qmem_free(rvu->dev, pfvf->nix_qints_ctx);
771 	if (pfvf->cq_ints_ctx)
772 		qmem_free(rvu->dev, pfvf->cq_ints_ctx);
773 
774 	pfvf->rq_bmap = NULL;
775 	pfvf->cq_bmap = NULL;
776 	pfvf->sq_bmap = NULL;
777 	pfvf->rq_ctx = NULL;
778 	pfvf->sq_ctx = NULL;
779 	pfvf->cq_ctx = NULL;
780 	pfvf->rss_ctx = NULL;
781 	pfvf->nix_qints_ctx = NULL;
782 	pfvf->cq_ints_ctx = NULL;
783 }
784 
785 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
786 			      struct rvu_pfvf *pfvf, int nixlf,
787 			      int rss_sz, int rss_grps, int hwctx_size,
788 			      u64 way_mask, bool tag_lsb_as_adder)
789 {
790 	int err, grp, num_indices;
791 	u64 val;
792 
793 	/* RSS is not requested for this NIXLF */
794 	if (!rss_sz)
795 		return 0;
796 	num_indices = rss_sz * rss_grps;
797 
798 	/* Alloc NIX RSS HW context memory and config the base */
799 	err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
800 	if (err)
801 		return err;
802 
803 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
804 		    (u64)pfvf->rss_ctx->iova);
805 
806 	/* Config full RSS table size, enable RSS and caching */
807 	val = BIT_ULL(36) | BIT_ULL(4) | way_mask << 20 |
808 			ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE);
809 
810 	if (tag_lsb_as_adder)
811 		val |= BIT_ULL(5);
812 
813 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf), val);
814 	/* Config RSS group offset and sizes */
815 	for (grp = 0; grp < rss_grps; grp++)
816 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
817 			    ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
818 	return 0;
819 }
820 
821 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
822 			       struct nix_aq_inst_s *inst)
823 {
824 	struct admin_queue *aq = block->aq;
825 	struct nix_aq_res_s *result;
826 	int timeout = 1000;
827 	u64 reg, head;
828 	int ret;
829 
830 	result = (struct nix_aq_res_s *)aq->res->base;
831 
832 	/* Get current head pointer where to append this instruction */
833 	reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
834 	head = (reg >> 4) & AQ_PTR_MASK;
835 
836 	memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
837 	       (void *)inst, aq->inst->entry_sz);
838 	memset(result, 0, sizeof(*result));
839 	/* sync into memory */
840 	wmb();
841 
842 	/* Ring the doorbell and wait for result */
843 	rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
844 	while (result->compcode == NIX_AQ_COMP_NOTDONE) {
845 		cpu_relax();
846 		udelay(1);
847 		timeout--;
848 		if (!timeout)
849 			return -EBUSY;
850 	}
851 
852 	if (result->compcode != NIX_AQ_COMP_GOOD) {
853 		/* TODO: Replace this with some error code */
854 		if (result->compcode == NIX_AQ_COMP_CTX_FAULT ||
855 		    result->compcode == NIX_AQ_COMP_LOCKERR ||
856 		    result->compcode == NIX_AQ_COMP_CTX_POISON) {
857 			ret = rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_RX);
858 			ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_TX);
859 			ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_RX);
860 			ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_TX);
861 			if (ret)
862 				dev_err(rvu->dev,
863 					"%s: Not able to unlock cachelines\n", __func__);
864 		}
865 
866 		return -EBUSY;
867 	}
868 
869 	return 0;
870 }
871 
872 static void nix_get_aq_req_smq(struct rvu *rvu, struct nix_aq_enq_req *req,
873 			       u16 *smq, u16 *smq_mask)
874 {
875 	struct nix_cn10k_aq_enq_req *aq_req;
876 
877 	if (!is_rvu_otx2(rvu)) {
878 		aq_req = (struct nix_cn10k_aq_enq_req *)req;
879 		*smq = aq_req->sq.smq;
880 		*smq_mask = aq_req->sq_mask.smq;
881 	} else {
882 		*smq = req->sq.smq;
883 		*smq_mask = req->sq_mask.smq;
884 	}
885 }
886 
887 static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
888 				   struct nix_aq_enq_req *req,
889 				   struct nix_aq_enq_rsp *rsp)
890 {
891 	struct rvu_hwinfo *hw = rvu->hw;
892 	u16 pcifunc = req->hdr.pcifunc;
893 	int nixlf, blkaddr, rc = 0;
894 	struct nix_aq_inst_s inst;
895 	struct rvu_block *block;
896 	struct admin_queue *aq;
897 	struct rvu_pfvf *pfvf;
898 	u16 smq, smq_mask;
899 	void *ctx, *mask;
900 	bool ena;
901 	u64 cfg;
902 
903 	blkaddr = nix_hw->blkaddr;
904 	block = &hw->block[blkaddr];
905 	aq = block->aq;
906 	if (!aq) {
907 		dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
908 		return NIX_AF_ERR_AQ_ENQUEUE;
909 	}
910 
911 	pfvf = rvu_get_pfvf(rvu, pcifunc);
912 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
913 
914 	/* Skip NIXLF check for broadcast MCE entry and bandwidth profile
915 	 * operations done by AF itself.
916 	 */
917 	if (!((!rsp && req->ctype == NIX_AQ_CTYPE_MCE) ||
918 	      (req->ctype == NIX_AQ_CTYPE_BANDPROF && !pcifunc))) {
919 		if (!pfvf->nixlf || nixlf < 0)
920 			return NIX_AF_ERR_AF_LF_INVALID;
921 	}
922 
923 	switch (req->ctype) {
924 	case NIX_AQ_CTYPE_RQ:
925 		/* Check if index exceeds max no of queues */
926 		if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
927 			rc = NIX_AF_ERR_AQ_ENQUEUE;
928 		break;
929 	case NIX_AQ_CTYPE_SQ:
930 		if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
931 			rc = NIX_AF_ERR_AQ_ENQUEUE;
932 		break;
933 	case NIX_AQ_CTYPE_CQ:
934 		if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
935 			rc = NIX_AF_ERR_AQ_ENQUEUE;
936 		break;
937 	case NIX_AQ_CTYPE_RSS:
938 		/* Check if RSS is enabled and qidx is within range */
939 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
940 		if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
941 		    (req->qidx >= (256UL << (cfg & 0xF))))
942 			rc = NIX_AF_ERR_AQ_ENQUEUE;
943 		break;
944 	case NIX_AQ_CTYPE_MCE:
945 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
946 
947 		/* Check if index exceeds MCE list length */
948 		if (!nix_hw->mcast.mce_ctx ||
949 		    (req->qidx >= (256UL << (cfg & 0xF))))
950 			rc = NIX_AF_ERR_AQ_ENQUEUE;
951 
952 		/* Adding multicast lists for requests from PF/VFs is not
953 		 * yet supported, so ignore this.
954 		 */
955 		if (rsp)
956 			rc = NIX_AF_ERR_AQ_ENQUEUE;
957 		break;
958 	case NIX_AQ_CTYPE_BANDPROF:
959 		if (nix_verify_bandprof((struct nix_cn10k_aq_enq_req *)req,
960 					nix_hw, pcifunc))
961 			rc = NIX_AF_ERR_INVALID_BANDPROF;
962 		break;
963 	default:
964 		rc = NIX_AF_ERR_AQ_ENQUEUE;
965 	}
966 
967 	if (rc)
968 		return rc;
969 
970 	nix_get_aq_req_smq(rvu, req, &smq, &smq_mask);
971 	/* Check if SQ pointed SMQ belongs to this PF/VF or not */
972 	if (req->ctype == NIX_AQ_CTYPE_SQ &&
973 	    ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
974 	     (req->op == NIX_AQ_INSTOP_WRITE &&
975 	      req->sq_mask.ena && req->sq.ena && smq_mask))) {
976 		if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
977 				     pcifunc, smq))
978 			return NIX_AF_ERR_AQ_ENQUEUE;
979 	}
980 
981 	memset(&inst, 0, sizeof(struct nix_aq_inst_s));
982 	inst.lf = nixlf;
983 	inst.cindex = req->qidx;
984 	inst.ctype = req->ctype;
985 	inst.op = req->op;
986 	/* Currently we are not supporting enqueuing multiple instructions,
987 	 * so always choose first entry in result memory.
988 	 */
989 	inst.res_addr = (u64)aq->res->iova;
990 
991 	/* Hardware uses same aq->res->base for updating result of
992 	 * previous instruction hence wait here till it is done.
993 	 */
994 	spin_lock(&aq->lock);
995 
996 	/* Clean result + context memory */
997 	memset(aq->res->base, 0, aq->res->entry_sz);
998 	/* Context needs to be written at RES_ADDR + 128 */
999 	ctx = aq->res->base + 128;
1000 	/* Mask needs to be written at RES_ADDR + 256 */
1001 	mask = aq->res->base + 256;
1002 
1003 	switch (req->op) {
1004 	case NIX_AQ_INSTOP_WRITE:
1005 		if (req->ctype == NIX_AQ_CTYPE_RQ)
1006 			memcpy(mask, &req->rq_mask,
1007 			       sizeof(struct nix_rq_ctx_s));
1008 		else if (req->ctype == NIX_AQ_CTYPE_SQ)
1009 			memcpy(mask, &req->sq_mask,
1010 			       sizeof(struct nix_sq_ctx_s));
1011 		else if (req->ctype == NIX_AQ_CTYPE_CQ)
1012 			memcpy(mask, &req->cq_mask,
1013 			       sizeof(struct nix_cq_ctx_s));
1014 		else if (req->ctype == NIX_AQ_CTYPE_RSS)
1015 			memcpy(mask, &req->rss_mask,
1016 			       sizeof(struct nix_rsse_s));
1017 		else if (req->ctype == NIX_AQ_CTYPE_MCE)
1018 			memcpy(mask, &req->mce_mask,
1019 			       sizeof(struct nix_rx_mce_s));
1020 		else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
1021 			memcpy(mask, &req->prof_mask,
1022 			       sizeof(struct nix_bandprof_s));
1023 		fallthrough;
1024 	case NIX_AQ_INSTOP_INIT:
1025 		if (req->ctype == NIX_AQ_CTYPE_RQ)
1026 			memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
1027 		else if (req->ctype == NIX_AQ_CTYPE_SQ)
1028 			memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
1029 		else if (req->ctype == NIX_AQ_CTYPE_CQ)
1030 			memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
1031 		else if (req->ctype == NIX_AQ_CTYPE_RSS)
1032 			memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
1033 		else if (req->ctype == NIX_AQ_CTYPE_MCE)
1034 			memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
1035 		else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
1036 			memcpy(ctx, &req->prof, sizeof(struct nix_bandprof_s));
1037 		break;
1038 	case NIX_AQ_INSTOP_NOP:
1039 	case NIX_AQ_INSTOP_READ:
1040 	case NIX_AQ_INSTOP_LOCK:
1041 	case NIX_AQ_INSTOP_UNLOCK:
1042 		break;
1043 	default:
1044 		rc = NIX_AF_ERR_AQ_ENQUEUE;
1045 		spin_unlock(&aq->lock);
1046 		return rc;
1047 	}
1048 
1049 	/* Submit the instruction to AQ */
1050 	rc = nix_aq_enqueue_wait(rvu, block, &inst);
1051 	if (rc) {
1052 		spin_unlock(&aq->lock);
1053 		return rc;
1054 	}
1055 
1056 	/* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
1057 	if (req->op == NIX_AQ_INSTOP_INIT) {
1058 		if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
1059 			__set_bit(req->qidx, pfvf->rq_bmap);
1060 		if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
1061 			__set_bit(req->qidx, pfvf->sq_bmap);
1062 		if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
1063 			__set_bit(req->qidx, pfvf->cq_bmap);
1064 	}
1065 
1066 	if (req->op == NIX_AQ_INSTOP_WRITE) {
1067 		if (req->ctype == NIX_AQ_CTYPE_RQ) {
1068 			ena = (req->rq.ena & req->rq_mask.ena) |
1069 				(test_bit(req->qidx, pfvf->rq_bmap) &
1070 				~req->rq_mask.ena);
1071 			if (ena)
1072 				__set_bit(req->qidx, pfvf->rq_bmap);
1073 			else
1074 				__clear_bit(req->qidx, pfvf->rq_bmap);
1075 		}
1076 		if (req->ctype == NIX_AQ_CTYPE_SQ) {
1077 			ena = (req->rq.ena & req->sq_mask.ena) |
1078 				(test_bit(req->qidx, pfvf->sq_bmap) &
1079 				~req->sq_mask.ena);
1080 			if (ena)
1081 				__set_bit(req->qidx, pfvf->sq_bmap);
1082 			else
1083 				__clear_bit(req->qidx, pfvf->sq_bmap);
1084 		}
1085 		if (req->ctype == NIX_AQ_CTYPE_CQ) {
1086 			ena = (req->rq.ena & req->cq_mask.ena) |
1087 				(test_bit(req->qidx, pfvf->cq_bmap) &
1088 				~req->cq_mask.ena);
1089 			if (ena)
1090 				__set_bit(req->qidx, pfvf->cq_bmap);
1091 			else
1092 				__clear_bit(req->qidx, pfvf->cq_bmap);
1093 		}
1094 	}
1095 
1096 	if (rsp) {
1097 		/* Copy read context into mailbox */
1098 		if (req->op == NIX_AQ_INSTOP_READ) {
1099 			if (req->ctype == NIX_AQ_CTYPE_RQ)
1100 				memcpy(&rsp->rq, ctx,
1101 				       sizeof(struct nix_rq_ctx_s));
1102 			else if (req->ctype == NIX_AQ_CTYPE_SQ)
1103 				memcpy(&rsp->sq, ctx,
1104 				       sizeof(struct nix_sq_ctx_s));
1105 			else if (req->ctype == NIX_AQ_CTYPE_CQ)
1106 				memcpy(&rsp->cq, ctx,
1107 				       sizeof(struct nix_cq_ctx_s));
1108 			else if (req->ctype == NIX_AQ_CTYPE_RSS)
1109 				memcpy(&rsp->rss, ctx,
1110 				       sizeof(struct nix_rsse_s));
1111 			else if (req->ctype == NIX_AQ_CTYPE_MCE)
1112 				memcpy(&rsp->mce, ctx,
1113 				       sizeof(struct nix_rx_mce_s));
1114 			else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
1115 				memcpy(&rsp->prof, ctx,
1116 				       sizeof(struct nix_bandprof_s));
1117 		}
1118 	}
1119 
1120 	spin_unlock(&aq->lock);
1121 	return 0;
1122 }
1123 
1124 static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw,
1125 				 struct nix_aq_enq_req *req, u8 ctype)
1126 {
1127 	struct nix_cn10k_aq_enq_req aq_req;
1128 	struct nix_cn10k_aq_enq_rsp aq_rsp;
1129 	int rc, word;
1130 
1131 	if (req->ctype != NIX_AQ_CTYPE_CQ)
1132 		return 0;
1133 
1134 	rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
1135 				 req->hdr.pcifunc, ctype, req->qidx);
1136 	if (rc) {
1137 		dev_err(rvu->dev,
1138 			"%s: Failed to fetch %s%d context of PFFUNC 0x%x\n",
1139 			__func__, nix_get_ctx_name(ctype), req->qidx,
1140 			req->hdr.pcifunc);
1141 		return rc;
1142 	}
1143 
1144 	/* Make copy of original context & mask which are required
1145 	 * for resubmission
1146 	 */
1147 	memcpy(&aq_req.cq_mask, &req->cq_mask, sizeof(struct nix_cq_ctx_s));
1148 	memcpy(&aq_req.cq, &req->cq, sizeof(struct nix_cq_ctx_s));
1149 
1150 	/* exclude fields which HW can update */
1151 	aq_req.cq_mask.cq_err       = 0;
1152 	aq_req.cq_mask.wrptr        = 0;
1153 	aq_req.cq_mask.tail         = 0;
1154 	aq_req.cq_mask.head	    = 0;
1155 	aq_req.cq_mask.avg_level    = 0;
1156 	aq_req.cq_mask.update_time  = 0;
1157 	aq_req.cq_mask.substream    = 0;
1158 
1159 	/* Context mask (cq_mask) holds mask value of fields which
1160 	 * are changed in AQ WRITE operation.
1161 	 * for example cq.drop = 0xa;
1162 	 *	       cq_mask.drop = 0xff;
1163 	 * Below logic performs '&' between cq and cq_mask so that non
1164 	 * updated fields are masked out for request and response
1165 	 * comparison
1166 	 */
1167 	for (word = 0; word < sizeof(struct nix_cq_ctx_s) / sizeof(u64);
1168 	     word++) {
1169 		*(u64 *)((u8 *)&aq_rsp.cq + word * 8) &=
1170 			(*(u64 *)((u8 *)&aq_req.cq_mask + word * 8));
1171 		*(u64 *)((u8 *)&aq_req.cq + word * 8) &=
1172 			(*(u64 *)((u8 *)&aq_req.cq_mask + word * 8));
1173 	}
1174 
1175 	if (memcmp(&aq_req.cq, &aq_rsp.cq, sizeof(struct nix_cq_ctx_s)))
1176 		return NIX_AF_ERR_AQ_CTX_RETRY_WRITE;
1177 
1178 	return 0;
1179 }
1180 
1181 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
1182 			       struct nix_aq_enq_rsp *rsp)
1183 {
1184 	struct nix_hw *nix_hw;
1185 	int err, retries = 5;
1186 	int blkaddr;
1187 
1188 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
1189 	if (blkaddr < 0)
1190 		return NIX_AF_ERR_AF_LF_INVALID;
1191 
1192 	nix_hw =  get_nix_hw(rvu->hw, blkaddr);
1193 	if (!nix_hw)
1194 		return NIX_AF_ERR_INVALID_NIXBLK;
1195 
1196 retry:
1197 	err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp);
1198 
1199 	/* HW errata 'AQ Modification to CQ could be discarded on heavy traffic'
1200 	 * As a work around perfrom CQ context read after each AQ write. If AQ
1201 	 * read shows AQ write is not updated perform AQ write again.
1202 	 */
1203 	if (!err && req->op == NIX_AQ_INSTOP_WRITE) {
1204 		err = rvu_nix_verify_aq_ctx(rvu, nix_hw, req, NIX_AQ_CTYPE_CQ);
1205 		if (err == NIX_AF_ERR_AQ_CTX_RETRY_WRITE) {
1206 			if (retries--)
1207 				goto retry;
1208 			else
1209 				return NIX_AF_ERR_CQ_CTX_WRITE_ERR;
1210 		}
1211 	}
1212 
1213 	return err;
1214 }
1215 
1216 static const char *nix_get_ctx_name(int ctype)
1217 {
1218 	switch (ctype) {
1219 	case NIX_AQ_CTYPE_CQ:
1220 		return "CQ";
1221 	case NIX_AQ_CTYPE_SQ:
1222 		return "SQ";
1223 	case NIX_AQ_CTYPE_RQ:
1224 		return "RQ";
1225 	case NIX_AQ_CTYPE_RSS:
1226 		return "RSS";
1227 	}
1228 	return "";
1229 }
1230 
1231 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
1232 {
1233 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
1234 	struct nix_aq_enq_req aq_req;
1235 	unsigned long *bmap;
1236 	int qidx, q_cnt = 0;
1237 	int err = 0, rc;
1238 
1239 	if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx)
1240 		return NIX_AF_ERR_AQ_ENQUEUE;
1241 
1242 	memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1243 	aq_req.hdr.pcifunc = req->hdr.pcifunc;
1244 
1245 	if (req->ctype == NIX_AQ_CTYPE_CQ) {
1246 		aq_req.cq.ena = 0;
1247 		aq_req.cq_mask.ena = 1;
1248 		aq_req.cq.bp_ena = 0;
1249 		aq_req.cq_mask.bp_ena = 1;
1250 		q_cnt = pfvf->cq_ctx->qsize;
1251 		bmap = pfvf->cq_bmap;
1252 	}
1253 	if (req->ctype == NIX_AQ_CTYPE_SQ) {
1254 		aq_req.sq.ena = 0;
1255 		aq_req.sq_mask.ena = 1;
1256 		q_cnt = pfvf->sq_ctx->qsize;
1257 		bmap = pfvf->sq_bmap;
1258 	}
1259 	if (req->ctype == NIX_AQ_CTYPE_RQ) {
1260 		aq_req.rq.ena = 0;
1261 		aq_req.rq_mask.ena = 1;
1262 		q_cnt = pfvf->rq_ctx->qsize;
1263 		bmap = pfvf->rq_bmap;
1264 	}
1265 
1266 	aq_req.ctype = req->ctype;
1267 	aq_req.op = NIX_AQ_INSTOP_WRITE;
1268 
1269 	for (qidx = 0; qidx < q_cnt; qidx++) {
1270 		if (!test_bit(qidx, bmap))
1271 			continue;
1272 		aq_req.qidx = qidx;
1273 		rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
1274 		if (rc) {
1275 			err = rc;
1276 			dev_err(rvu->dev, "Failed to disable %s:%d context\n",
1277 				nix_get_ctx_name(req->ctype), qidx);
1278 		}
1279 	}
1280 
1281 	return err;
1282 }
1283 
1284 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
1285 static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req)
1286 {
1287 	struct nix_aq_enq_req lock_ctx_req;
1288 	int err;
1289 
1290 	if (req->op != NIX_AQ_INSTOP_INIT)
1291 		return 0;
1292 
1293 	if (req->ctype == NIX_AQ_CTYPE_MCE ||
1294 	    req->ctype == NIX_AQ_CTYPE_DYNO)
1295 		return 0;
1296 
1297 	memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req));
1298 	lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
1299 	lock_ctx_req.ctype = req->ctype;
1300 	lock_ctx_req.op = NIX_AQ_INSTOP_LOCK;
1301 	lock_ctx_req.qidx = req->qidx;
1302 	err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL);
1303 	if (err)
1304 		dev_err(rvu->dev,
1305 			"PFUNC 0x%x: Failed to lock NIX %s:%d context\n",
1306 			req->hdr.pcifunc,
1307 			nix_get_ctx_name(req->ctype), req->qidx);
1308 	return err;
1309 }
1310 
1311 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
1312 				struct nix_aq_enq_req *req,
1313 				struct nix_aq_enq_rsp *rsp)
1314 {
1315 	int err;
1316 
1317 	err = rvu_nix_aq_enq_inst(rvu, req, rsp);
1318 	if (!err)
1319 		err = nix_lf_hwctx_lockdown(rvu, req);
1320 	return err;
1321 }
1322 #else
1323 
1324 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
1325 				struct nix_aq_enq_req *req,
1326 				struct nix_aq_enq_rsp *rsp)
1327 {
1328 	return rvu_nix_aq_enq_inst(rvu, req, rsp);
1329 }
1330 #endif
1331 /* CN10K mbox handler */
1332 int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu,
1333 				      struct nix_cn10k_aq_enq_req *req,
1334 				      struct nix_cn10k_aq_enq_rsp *rsp)
1335 {
1336 	return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req,
1337 				  (struct nix_aq_enq_rsp *)rsp);
1338 }
1339 
1340 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
1341 				       struct hwctx_disable_req *req,
1342 				       struct msg_rsp *rsp)
1343 {
1344 	return nix_lf_hwctx_disable(rvu, req);
1345 }
1346 
1347 int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
1348 				  struct nix_lf_alloc_req *req,
1349 				  struct nix_lf_alloc_rsp *rsp)
1350 {
1351 	int nixlf, qints, hwctx_size, intf, err, rc = 0;
1352 	struct rvu_hwinfo *hw = rvu->hw;
1353 	u16 pcifunc = req->hdr.pcifunc;
1354 	struct rvu_block *block;
1355 	struct rvu_pfvf *pfvf;
1356 	u64 cfg, ctx_cfg;
1357 	int blkaddr;
1358 
1359 	if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
1360 		return NIX_AF_ERR_PARAM;
1361 
1362 	if (req->way_mask)
1363 		req->way_mask &= 0xFFFF;
1364 
1365 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1366 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1367 	if (!pfvf->nixlf || blkaddr < 0)
1368 		return NIX_AF_ERR_AF_LF_INVALID;
1369 
1370 	block = &hw->block[blkaddr];
1371 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1372 	if (nixlf < 0)
1373 		return NIX_AF_ERR_AF_LF_INVALID;
1374 
1375 	/* Check if requested 'NIXLF <=> NPALF' mapping is valid */
1376 	if (req->npa_func) {
1377 		/* If default, use 'this' NIXLF's PFFUNC */
1378 		if (req->npa_func == RVU_DEFAULT_PF_FUNC)
1379 			req->npa_func = pcifunc;
1380 		if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA))
1381 			return NIX_AF_INVAL_NPA_PF_FUNC;
1382 	}
1383 
1384 	/* Check if requested 'NIXLF <=> SSOLF' mapping is valid */
1385 	if (req->sso_func) {
1386 		/* If default, use 'this' NIXLF's PFFUNC */
1387 		if (req->sso_func == RVU_DEFAULT_PF_FUNC)
1388 			req->sso_func = pcifunc;
1389 		if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO))
1390 			return NIX_AF_INVAL_SSO_PF_FUNC;
1391 	}
1392 
1393 	/* If RSS is being enabled, check if requested config is valid.
1394 	 * RSS table size should be power of two, otherwise
1395 	 * RSS_GRP::OFFSET + adder might go beyond that group or
1396 	 * won't be able to use entire table.
1397 	 */
1398 	if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
1399 			    !is_power_of_2(req->rss_sz)))
1400 		return NIX_AF_ERR_RSS_SIZE_INVALID;
1401 
1402 	if (req->rss_sz &&
1403 	    (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
1404 		return NIX_AF_ERR_RSS_GRPS_INVALID;
1405 
1406 	/* Reset this NIX LF */
1407 	err = rvu_lf_reset(rvu, block, nixlf);
1408 	if (err) {
1409 		dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1410 			block->addr - BLKADDR_NIX0, nixlf);
1411 		return NIX_AF_ERR_LF_RESET;
1412 	}
1413 
1414 	ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
1415 
1416 	/* Alloc NIX RQ HW context memory and config the base */
1417 	hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
1418 	err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
1419 	if (err)
1420 		goto free_mem;
1421 
1422 	pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
1423 	if (!pfvf->rq_bmap)
1424 		goto free_mem;
1425 
1426 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
1427 		    (u64)pfvf->rq_ctx->iova);
1428 
1429 	/* Set caching and queue count in HW */
1430 	cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20;
1431 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
1432 
1433 	/* Alloc NIX SQ HW context memory and config the base */
1434 	hwctx_size = 1UL << (ctx_cfg & 0xF);
1435 	err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
1436 	if (err)
1437 		goto free_mem;
1438 
1439 	pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
1440 	if (!pfvf->sq_bmap)
1441 		goto free_mem;
1442 
1443 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
1444 		    (u64)pfvf->sq_ctx->iova);
1445 
1446 	cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20;
1447 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
1448 
1449 	/* Alloc NIX CQ HW context memory and config the base */
1450 	hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
1451 	err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
1452 	if (err)
1453 		goto free_mem;
1454 
1455 	pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
1456 	if (!pfvf->cq_bmap)
1457 		goto free_mem;
1458 
1459 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
1460 		    (u64)pfvf->cq_ctx->iova);
1461 
1462 	cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20;
1463 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
1464 
1465 	/* Initialize receive side scaling (RSS) */
1466 	hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
1467 	err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz,
1468 				 req->rss_grps, hwctx_size, req->way_mask,
1469 				 !!(req->flags & NIX_LF_RSS_TAG_LSB_AS_ADDER));
1470 	if (err)
1471 		goto free_mem;
1472 
1473 	/* Alloc memory for CQINT's HW contexts */
1474 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1475 	qints = (cfg >> 24) & 0xFFF;
1476 	hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
1477 	err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
1478 	if (err)
1479 		goto free_mem;
1480 
1481 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
1482 		    (u64)pfvf->cq_ints_ctx->iova);
1483 
1484 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf),
1485 		    BIT_ULL(36) | req->way_mask << 20);
1486 
1487 	/* Alloc memory for QINT's HW contexts */
1488 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1489 	qints = (cfg >> 12) & 0xFFF;
1490 	hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
1491 	err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
1492 	if (err)
1493 		goto free_mem;
1494 
1495 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
1496 		    (u64)pfvf->nix_qints_ctx->iova);
1497 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf),
1498 		    BIT_ULL(36) | req->way_mask << 20);
1499 
1500 	/* Setup VLANX TPID's.
1501 	 * Use VLAN1 for 802.1Q
1502 	 * and VLAN0 for 802.1AD.
1503 	 */
1504 	cfg = (0x8100ULL << 16) | 0x88A8ULL;
1505 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
1506 
1507 	/* Enable LMTST for this NIX LF */
1508 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
1509 
1510 	/* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */
1511 	if (req->npa_func)
1512 		cfg = req->npa_func;
1513 	if (req->sso_func)
1514 		cfg |= (u64)req->sso_func << 16;
1515 
1516 	cfg |= (u64)req->xqe_sz << 33;
1517 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
1518 
1519 	/* Config Rx pkt length, csum checks and apad  enable / disable */
1520 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
1521 
1522 	/* Configure pkind for TX parse config */
1523 	cfg = NPC_TX_DEF_PKIND;
1524 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
1525 
1526 	intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
1527 	if (is_sdp_pfvf(pcifunc))
1528 		intf = NIX_INTF_TYPE_SDP;
1529 
1530 	err = nix_interface_init(rvu, pcifunc, intf, nixlf, rsp,
1531 				 !!(req->flags & NIX_LF_LBK_BLK_SEL));
1532 	if (err)
1533 		goto free_mem;
1534 
1535 	/* Disable NPC entries as NIXLF's contexts are not initialized yet */
1536 	rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
1537 
1538 	/* Configure RX VTAG Type 7 (strip) for vf vlan */
1539 	rvu_write64(rvu, blkaddr,
1540 		    NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7),
1541 		    VTAGSIZE_T4 | VTAG_STRIP);
1542 
1543 	goto exit;
1544 
1545 free_mem:
1546 	nix_ctx_free(rvu, pfvf);
1547 	rc = -ENOMEM;
1548 
1549 exit:
1550 	/* Set macaddr of this PF/VF */
1551 	ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
1552 
1553 	/* set SQB size info */
1554 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
1555 	rsp->sqb_size = (cfg >> 34) & 0xFFFF;
1556 	rsp->rx_chan_base = pfvf->rx_chan_base;
1557 	rsp->tx_chan_base = pfvf->tx_chan_base;
1558 	rsp->rx_chan_cnt = pfvf->rx_chan_cnt;
1559 	rsp->tx_chan_cnt = pfvf->tx_chan_cnt;
1560 	rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
1561 	rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
1562 	/* Get HW supported stat count */
1563 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
1564 	rsp->lf_rx_stats = ((cfg >> 32) & 0xFF);
1565 	rsp->lf_tx_stats = ((cfg >> 24) & 0xFF);
1566 	/* Get count of CQ IRQs and error IRQs supported per LF */
1567 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1568 	rsp->qints = ((cfg >> 12) & 0xFFF);
1569 	rsp->cints = ((cfg >> 24) & 0xFFF);
1570 	rsp->cgx_links = hw->cgx_links;
1571 	rsp->lbk_links = hw->lbk_links;
1572 	rsp->sdp_links = hw->sdp_links;
1573 
1574 	return rc;
1575 }
1576 
1577 int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req,
1578 				 struct msg_rsp *rsp)
1579 {
1580 	struct rvu_hwinfo *hw = rvu->hw;
1581 	u16 pcifunc = req->hdr.pcifunc;
1582 	struct rvu_block *block;
1583 	int blkaddr, nixlf, err;
1584 	struct rvu_pfvf *pfvf;
1585 
1586 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1587 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1588 	if (!pfvf->nixlf || blkaddr < 0)
1589 		return NIX_AF_ERR_AF_LF_INVALID;
1590 
1591 	block = &hw->block[blkaddr];
1592 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1593 	if (nixlf < 0)
1594 		return NIX_AF_ERR_AF_LF_INVALID;
1595 
1596 	if (req->flags & NIX_LF_DISABLE_FLOWS)
1597 		rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
1598 	else
1599 		rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
1600 
1601 	/* Free any tx vtag def entries used by this NIX LF */
1602 	if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG))
1603 		nix_free_tx_vtag_entries(rvu, pcifunc);
1604 
1605 	nix_interface_deinit(rvu, pcifunc, nixlf);
1606 
1607 	/* Reset this NIX LF */
1608 	err = rvu_lf_reset(rvu, block, nixlf);
1609 	if (err) {
1610 		dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1611 			block->addr - BLKADDR_NIX0, nixlf);
1612 		return NIX_AF_ERR_LF_RESET;
1613 	}
1614 
1615 	nix_ctx_free(rvu, pfvf);
1616 
1617 	return 0;
1618 }
1619 
1620 int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
1621 					 struct nix_mark_format_cfg  *req,
1622 					 struct nix_mark_format_cfg_rsp *rsp)
1623 {
1624 	u16 pcifunc = req->hdr.pcifunc;
1625 	struct nix_hw *nix_hw;
1626 	struct rvu_pfvf *pfvf;
1627 	int blkaddr, rc;
1628 	u32 cfg;
1629 
1630 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1631 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1632 	if (!pfvf->nixlf || blkaddr < 0)
1633 		return NIX_AF_ERR_AF_LF_INVALID;
1634 
1635 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1636 	if (!nix_hw)
1637 		return NIX_AF_ERR_INVALID_NIXBLK;
1638 
1639 	cfg = (((u32)req->offset & 0x7) << 16) |
1640 	      (((u32)req->y_mask & 0xF) << 12) |
1641 	      (((u32)req->y_val & 0xF) << 8) |
1642 	      (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF);
1643 
1644 	rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg);
1645 	if (rc < 0) {
1646 		dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)",
1647 			rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
1648 		return NIX_AF_ERR_MARK_CFG_FAIL;
1649 	}
1650 
1651 	rsp->mark_format_idx = rc;
1652 	return 0;
1653 }
1654 
1655 /* Handle shaper update specially for few revisions */
1656 static bool
1657 handle_txschq_shaper_update(struct rvu *rvu, int blkaddr, int nixlf,
1658 			    int lvl, u64 reg, u64 regval)
1659 {
1660 	u64 regbase, oldval, sw_xoff = 0;
1661 	u64 dbgval, md_debug0 = 0;
1662 	unsigned long poll_tmo;
1663 	bool rate_reg = 0;
1664 	u32 schq;
1665 
1666 	regbase = reg & 0xFFFF;
1667 	schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1668 
1669 	/* Check for rate register */
1670 	switch (lvl) {
1671 	case NIX_TXSCH_LVL_TL1:
1672 		md_debug0 = NIX_AF_TL1X_MD_DEBUG0(schq);
1673 		sw_xoff = NIX_AF_TL1X_SW_XOFF(schq);
1674 
1675 		rate_reg = !!(regbase == NIX_AF_TL1X_CIR(0));
1676 		break;
1677 	case NIX_TXSCH_LVL_TL2:
1678 		md_debug0 = NIX_AF_TL2X_MD_DEBUG0(schq);
1679 		sw_xoff = NIX_AF_TL2X_SW_XOFF(schq);
1680 
1681 		rate_reg = (regbase == NIX_AF_TL2X_CIR(0) ||
1682 			    regbase == NIX_AF_TL2X_PIR(0));
1683 		break;
1684 	case NIX_TXSCH_LVL_TL3:
1685 		md_debug0 = NIX_AF_TL3X_MD_DEBUG0(schq);
1686 		sw_xoff = NIX_AF_TL3X_SW_XOFF(schq);
1687 
1688 		rate_reg = (regbase == NIX_AF_TL3X_CIR(0) ||
1689 			    regbase == NIX_AF_TL3X_PIR(0));
1690 		break;
1691 	case NIX_TXSCH_LVL_TL4:
1692 		md_debug0 = NIX_AF_TL4X_MD_DEBUG0(schq);
1693 		sw_xoff = NIX_AF_TL4X_SW_XOFF(schq);
1694 
1695 		rate_reg = (regbase == NIX_AF_TL4X_CIR(0) ||
1696 			    regbase == NIX_AF_TL4X_PIR(0));
1697 		break;
1698 	case NIX_TXSCH_LVL_MDQ:
1699 		sw_xoff = NIX_AF_MDQX_SW_XOFF(schq);
1700 		rate_reg = (regbase == NIX_AF_MDQX_CIR(0) ||
1701 			    regbase == NIX_AF_MDQX_PIR(0));
1702 		break;
1703 	}
1704 
1705 	if (!rate_reg)
1706 		return false;
1707 
1708 	/* Nothing special to do when state is not toggled */
1709 	oldval = rvu_read64(rvu, blkaddr, reg);
1710 	if ((oldval & 0x1) == (regval & 0x1)) {
1711 		rvu_write64(rvu, blkaddr, reg, regval);
1712 		return true;
1713 	}
1714 
1715 	/* PIR/CIR disable */
1716 	if (!(regval & 0x1)) {
1717 		rvu_write64(rvu, blkaddr, sw_xoff, 1);
1718 		rvu_write64(rvu, blkaddr, reg, 0);
1719 		udelay(4);
1720 		rvu_write64(rvu, blkaddr, sw_xoff, 0);
1721 		return true;
1722 	}
1723 
1724 	/* PIR/CIR enable */
1725 	rvu_write64(rvu, blkaddr, sw_xoff, 1);
1726 	if (md_debug0) {
1727 		poll_tmo = jiffies + usecs_to_jiffies(10000);
1728 		/* Wait until VLD(bit32) == 1 or C_CON(bit48) == 0 */
1729 		do {
1730 			if (time_after(jiffies, poll_tmo)) {
1731 				dev_err(rvu->dev,
1732 					"NIXLF%d: TLX%u(lvl %u) CIR/PIR enable failed\n",
1733 					nixlf, schq, lvl);
1734 				goto exit;
1735 			}
1736 			usleep_range(1, 5);
1737 			dbgval = rvu_read64(rvu, blkaddr, md_debug0);
1738 		} while (!(dbgval & BIT_ULL(32)) && (dbgval & BIT_ULL(48)));
1739 	}
1740 	rvu_write64(rvu, blkaddr, reg, regval);
1741 exit:
1742 	rvu_write64(rvu, blkaddr, sw_xoff, 0);
1743 	return true;
1744 }
1745 
1746 static void nix_reset_tx_schedule(struct rvu *rvu, int blkaddr,
1747 				  int lvl, int schq)
1748 {
1749 	u64 tlx_parent = 0, tlx_schedule = 0;
1750 
1751 	switch (lvl) {
1752 	case NIX_TXSCH_LVL_TL2:
1753 		tlx_parent   = NIX_AF_TL2X_PARENT(schq);
1754 		tlx_schedule = NIX_AF_TL2X_SCHEDULE(schq);
1755 		break;
1756 	case NIX_TXSCH_LVL_TL3:
1757 		tlx_parent   = NIX_AF_TL3X_PARENT(schq);
1758 		tlx_schedule = NIX_AF_TL3X_SCHEDULE(schq);
1759 		break;
1760 	case NIX_TXSCH_LVL_TL4:
1761 		tlx_parent   = NIX_AF_TL4X_PARENT(schq);
1762 		tlx_schedule = NIX_AF_TL4X_SCHEDULE(schq);
1763 		break;
1764 	case NIX_TXSCH_LVL_MDQ:
1765 		/* no need to reset SMQ_CFG as HW clears this CSR
1766 		 * on SMQ flush
1767 		 */
1768 		tlx_parent   = NIX_AF_MDQX_PARENT(schq);
1769 		tlx_schedule = NIX_AF_MDQX_SCHEDULE(schq);
1770 		break;
1771 	default:
1772 		return;
1773 	}
1774 
1775 	if (tlx_parent)
1776 		rvu_write64(rvu, blkaddr, tlx_parent, 0x0);
1777 
1778 	if (tlx_schedule)
1779 		rvu_write64(rvu, blkaddr, tlx_schedule, 0x0);
1780 }
1781 
1782 /* Disable shaping of pkts by a scheduler queue
1783  * at a given scheduler level.
1784  */
1785 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
1786 				 int nixlf, int lvl, int schq)
1787 {
1788 	struct rvu_hwinfo *hw = rvu->hw;
1789 	u64  cir_reg = 0, pir_reg = 0;
1790 	u64  cfg;
1791 
1792 	switch (lvl) {
1793 	case NIX_TXSCH_LVL_TL1:
1794 		cir_reg = NIX_AF_TL1X_CIR(schq);
1795 		pir_reg = 0; /* PIR not available at TL1 */
1796 		break;
1797 	case NIX_TXSCH_LVL_TL2:
1798 		cir_reg = NIX_AF_TL2X_CIR(schq);
1799 		pir_reg = NIX_AF_TL2X_PIR(schq);
1800 		break;
1801 	case NIX_TXSCH_LVL_TL3:
1802 		cir_reg = NIX_AF_TL3X_CIR(schq);
1803 		pir_reg = NIX_AF_TL3X_PIR(schq);
1804 		break;
1805 	case NIX_TXSCH_LVL_TL4:
1806 		cir_reg = NIX_AF_TL4X_CIR(schq);
1807 		pir_reg = NIX_AF_TL4X_PIR(schq);
1808 		break;
1809 	case NIX_TXSCH_LVL_MDQ:
1810 		cir_reg = NIX_AF_MDQX_CIR(schq);
1811 		pir_reg = NIX_AF_MDQX_PIR(schq);
1812 		break;
1813 	}
1814 
1815 	/* Shaper state toggle needs wait/poll */
1816 	if (hw->cap.nix_shaper_toggle_wait) {
1817 		if (cir_reg)
1818 			handle_txschq_shaper_update(rvu, blkaddr, nixlf,
1819 						    lvl, cir_reg, 0);
1820 		if (pir_reg)
1821 			handle_txschq_shaper_update(rvu, blkaddr, nixlf,
1822 						    lvl, pir_reg, 0);
1823 		return;
1824 	}
1825 
1826 	if (!cir_reg)
1827 		return;
1828 	cfg = rvu_read64(rvu, blkaddr, cir_reg);
1829 	rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0));
1830 
1831 	if (!pir_reg)
1832 		return;
1833 	cfg = rvu_read64(rvu, blkaddr, pir_reg);
1834 	rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0));
1835 }
1836 
1837 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
1838 				 int lvl, int schq)
1839 {
1840 	struct rvu_hwinfo *hw = rvu->hw;
1841 	int link_level;
1842 	int link;
1843 
1844 	if (lvl >= hw->cap.nix_tx_aggr_lvl)
1845 		return;
1846 
1847 	/* Reset TL4's SDP link config */
1848 	if (lvl == NIX_TXSCH_LVL_TL4)
1849 		rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
1850 
1851 	link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
1852 			NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
1853 	if (lvl != link_level)
1854 		return;
1855 
1856 	/* Reset TL2's CGX or LBK link config */
1857 	for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
1858 		rvu_write64(rvu, blkaddr,
1859 			    NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
1860 }
1861 
1862 static void nix_clear_tx_xoff(struct rvu *rvu, int blkaddr,
1863 			      int lvl, int schq)
1864 {
1865 	struct rvu_hwinfo *hw = rvu->hw;
1866 	u64 reg;
1867 
1868 	/* Skip this if shaping is not supported */
1869 	if (!hw->cap.nix_shaping)
1870 		return;
1871 
1872 	/* Clear level specific SW_XOFF */
1873 	switch (lvl) {
1874 	case NIX_TXSCH_LVL_TL1:
1875 		reg = NIX_AF_TL1X_SW_XOFF(schq);
1876 		break;
1877 	case NIX_TXSCH_LVL_TL2:
1878 		reg = NIX_AF_TL2X_SW_XOFF(schq);
1879 		break;
1880 	case NIX_TXSCH_LVL_TL3:
1881 		reg = NIX_AF_TL3X_SW_XOFF(schq);
1882 		break;
1883 	case NIX_TXSCH_LVL_TL4:
1884 		reg = NIX_AF_TL4X_SW_XOFF(schq);
1885 		break;
1886 	case NIX_TXSCH_LVL_MDQ:
1887 		reg = NIX_AF_MDQX_SW_XOFF(schq);
1888 		break;
1889 	default:
1890 		return;
1891 	}
1892 
1893 	rvu_write64(rvu, blkaddr, reg, 0x0);
1894 }
1895 
1896 static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
1897 {
1898 	struct rvu_hwinfo *hw = rvu->hw;
1899 	int pf = rvu_get_pf(pcifunc);
1900 	u8 cgx_id = 0, lmac_id = 0;
1901 
1902 	if (is_afvf(pcifunc)) {/* LBK links */
1903 		return hw->cgx_links;
1904 	} else if (is_pf_cgxmapped(rvu, pf)) {
1905 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1906 		return (cgx_id * hw->lmac_per_cgx) + lmac_id;
1907 	}
1908 
1909 	/* SDP link */
1910 	return hw->cgx_links + hw->lbk_links;
1911 }
1912 
1913 static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc,
1914 				 int link, int *start, int *end)
1915 {
1916 	struct rvu_hwinfo *hw = rvu->hw;
1917 	int pf = rvu_get_pf(pcifunc);
1918 
1919 	if (is_afvf(pcifunc)) { /* LBK links */
1920 		*start = hw->cap.nix_txsch_per_cgx_lmac * link;
1921 		*end = *start + hw->cap.nix_txsch_per_lbk_lmac;
1922 	} else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */
1923 		*start = hw->cap.nix_txsch_per_cgx_lmac * link;
1924 		*end = *start + hw->cap.nix_txsch_per_cgx_lmac;
1925 	} else { /* SDP link */
1926 		*start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) +
1927 			(hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links);
1928 		*end = *start + hw->cap.nix_txsch_per_sdp_lmac;
1929 	}
1930 }
1931 
1932 static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc,
1933 				      struct nix_hw *nix_hw,
1934 				      struct nix_txsch_alloc_req *req)
1935 {
1936 	struct rvu_hwinfo *hw = rvu->hw;
1937 	int schq, req_schq, free_cnt;
1938 	struct nix_txsch *txsch;
1939 	int link, start, end;
1940 
1941 	txsch = &nix_hw->txsch[lvl];
1942 	req_schq = req->schq_contig[lvl] + req->schq[lvl];
1943 
1944 	if (!req_schq)
1945 		return 0;
1946 
1947 	link = nix_get_tx_link(rvu, pcifunc);
1948 
1949 	/* For traffic aggregating scheduler level, one queue is enough */
1950 	if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1951 		if (req_schq != 1)
1952 			return NIX_AF_ERR_TLX_ALLOC_FAIL;
1953 		return 0;
1954 	}
1955 
1956 	/* Get free SCHQ count and check if request can be accomodated */
1957 	if (hw->cap.nix_fixed_txschq_mapping) {
1958 		nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
1959 		schq = start + (pcifunc & RVU_PFVF_FUNC_MASK);
1960 		if (end <= txsch->schq.max && schq < end &&
1961 		    !test_bit(schq, txsch->schq.bmap))
1962 			free_cnt = 1;
1963 		else
1964 			free_cnt = 0;
1965 	} else {
1966 		free_cnt = rvu_rsrc_free_count(&txsch->schq);
1967 	}
1968 
1969 	if (free_cnt < req_schq || req->schq[lvl] > MAX_TXSCHQ_PER_FUNC ||
1970 	    req->schq_contig[lvl] > MAX_TXSCHQ_PER_FUNC)
1971 		return NIX_AF_ERR_TLX_ALLOC_FAIL;
1972 
1973 	/* If contiguous queues are needed, check for availability */
1974 	if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] &&
1975 	    !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
1976 		return NIX_AF_ERR_TLX_ALLOC_FAIL;
1977 
1978 	return 0;
1979 }
1980 
1981 static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch,
1982 			    struct nix_txsch_alloc_rsp *rsp,
1983 			    int lvl, int start, int end)
1984 {
1985 	struct rvu_hwinfo *hw = rvu->hw;
1986 	u16 pcifunc = rsp->hdr.pcifunc;
1987 	int idx, schq;
1988 
1989 	/* For traffic aggregating levels, queue alloc is based
1990 	 * on transmit link to which PF_FUNC is mapped to.
1991 	 */
1992 	if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1993 		/* A single TL queue is allocated */
1994 		if (rsp->schq_contig[lvl]) {
1995 			rsp->schq_contig[lvl] = 1;
1996 			rsp->schq_contig_list[lvl][0] = start;
1997 		}
1998 
1999 		/* Both contig and non-contig reqs doesn't make sense here */
2000 		if (rsp->schq_contig[lvl])
2001 			rsp->schq[lvl] = 0;
2002 
2003 		if (rsp->schq[lvl]) {
2004 			rsp->schq[lvl] = 1;
2005 			rsp->schq_list[lvl][0] = start;
2006 		}
2007 		return;
2008 	}
2009 
2010 	/* Adjust the queue request count if HW supports
2011 	 * only one queue per level configuration.
2012 	 */
2013 	if (hw->cap.nix_fixed_txschq_mapping) {
2014 		idx = pcifunc & RVU_PFVF_FUNC_MASK;
2015 		schq = start + idx;
2016 		if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) {
2017 			rsp->schq_contig[lvl] = 0;
2018 			rsp->schq[lvl] = 0;
2019 			return;
2020 		}
2021 
2022 		if (rsp->schq_contig[lvl]) {
2023 			rsp->schq_contig[lvl] = 1;
2024 			set_bit(schq, txsch->schq.bmap);
2025 			rsp->schq_contig_list[lvl][0] = schq;
2026 			rsp->schq[lvl] = 0;
2027 		} else if (rsp->schq[lvl]) {
2028 			rsp->schq[lvl] = 1;
2029 			set_bit(schq, txsch->schq.bmap);
2030 			rsp->schq_list[lvl][0] = schq;
2031 		}
2032 		return;
2033 	}
2034 
2035 	/* Allocate contiguous queue indices requesty first */
2036 	if (rsp->schq_contig[lvl]) {
2037 		schq = bitmap_find_next_zero_area(txsch->schq.bmap,
2038 						  txsch->schq.max, start,
2039 						  rsp->schq_contig[lvl], 0);
2040 		if (schq >= end)
2041 			rsp->schq_contig[lvl] = 0;
2042 		for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) {
2043 			set_bit(schq, txsch->schq.bmap);
2044 			rsp->schq_contig_list[lvl][idx] = schq;
2045 			schq++;
2046 		}
2047 	}
2048 
2049 	/* Allocate non-contiguous queue indices */
2050 	if (rsp->schq[lvl]) {
2051 		idx = 0;
2052 		for (schq = start; schq < end; schq++) {
2053 			if (!test_bit(schq, txsch->schq.bmap)) {
2054 				set_bit(schq, txsch->schq.bmap);
2055 				rsp->schq_list[lvl][idx++] = schq;
2056 			}
2057 			if (idx == rsp->schq[lvl])
2058 				break;
2059 		}
2060 		/* Update how many were allocated */
2061 		rsp->schq[lvl] = idx;
2062 	}
2063 }
2064 
2065 int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
2066 				     struct nix_txsch_alloc_req *req,
2067 				     struct nix_txsch_alloc_rsp *rsp)
2068 {
2069 	struct rvu_hwinfo *hw = rvu->hw;
2070 	u16 pcifunc = req->hdr.pcifunc;
2071 	int link, blkaddr, rc = 0;
2072 	int lvl, idx, start, end;
2073 	struct nix_txsch *txsch;
2074 	struct nix_hw *nix_hw;
2075 	u32 *pfvf_map;
2076 	int nixlf;
2077 	u16 schq;
2078 
2079 	rc = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2080 	if (rc)
2081 		return rc;
2082 
2083 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
2084 	if (!nix_hw)
2085 		return NIX_AF_ERR_INVALID_NIXBLK;
2086 
2087 	mutex_lock(&rvu->rsrc_lock);
2088 
2089 	/* Check if request is valid as per HW capabilities
2090 	 * and can be accomodated.
2091 	 */
2092 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2093 		rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req);
2094 		if (rc)
2095 			goto err;
2096 	}
2097 
2098 	/* Allocate requested Tx scheduler queues */
2099 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2100 		txsch = &nix_hw->txsch[lvl];
2101 		pfvf_map = txsch->pfvf_map;
2102 
2103 		if (!req->schq[lvl] && !req->schq_contig[lvl])
2104 			continue;
2105 
2106 		rsp->schq[lvl] = req->schq[lvl];
2107 		rsp->schq_contig[lvl] = req->schq_contig[lvl];
2108 
2109 		link = nix_get_tx_link(rvu, pcifunc);
2110 
2111 		if (lvl >= hw->cap.nix_tx_aggr_lvl) {
2112 			start = link;
2113 			end = link;
2114 		} else if (hw->cap.nix_fixed_txschq_mapping) {
2115 			nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
2116 		} else {
2117 			start = 0;
2118 			end = txsch->schq.max;
2119 		}
2120 
2121 		nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end);
2122 
2123 		/* Reset queue config */
2124 		for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
2125 			schq = rsp->schq_contig_list[lvl][idx];
2126 			if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
2127 			    NIX_TXSCHQ_CFG_DONE))
2128 				pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
2129 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
2130 			nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
2131 			nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
2132 		}
2133 
2134 		for (idx = 0; idx < req->schq[lvl]; idx++) {
2135 			schq = rsp->schq_list[lvl][idx];
2136 			if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
2137 			    NIX_TXSCHQ_CFG_DONE))
2138 				pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
2139 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
2140 			nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
2141 			nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
2142 		}
2143 	}
2144 
2145 	rsp->aggr_level = hw->cap.nix_tx_aggr_lvl;
2146 	rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
2147 	rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr,
2148 				       NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
2149 				       NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
2150 	goto exit;
2151 err:
2152 	rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
2153 exit:
2154 	mutex_unlock(&rvu->rsrc_lock);
2155 	return rc;
2156 }
2157 
2158 static void nix_smq_flush_fill_ctx(struct rvu *rvu, int blkaddr, int smq,
2159 				   struct nix_smq_flush_ctx *smq_flush_ctx)
2160 {
2161 	struct nix_smq_tree_ctx *smq_tree_ctx;
2162 	u64 parent_off, regval;
2163 	u16 schq;
2164 	int lvl;
2165 
2166 	smq_flush_ctx->smq = smq;
2167 
2168 	schq = smq;
2169 	for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) {
2170 		smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl];
2171 		if (lvl == NIX_TXSCH_LVL_TL1) {
2172 			smq_flush_ctx->tl1_schq = schq;
2173 			smq_tree_ctx->cir_off = NIX_AF_TL1X_CIR(schq);
2174 			smq_tree_ctx->pir_off = 0;
2175 			smq_tree_ctx->pir_val = 0;
2176 			parent_off = 0;
2177 		} else if (lvl == NIX_TXSCH_LVL_TL2) {
2178 			smq_flush_ctx->tl2_schq = schq;
2179 			smq_tree_ctx->cir_off = NIX_AF_TL2X_CIR(schq);
2180 			smq_tree_ctx->pir_off = NIX_AF_TL2X_PIR(schq);
2181 			parent_off = NIX_AF_TL2X_PARENT(schq);
2182 		} else if (lvl == NIX_TXSCH_LVL_TL3) {
2183 			smq_tree_ctx->cir_off = NIX_AF_TL3X_CIR(schq);
2184 			smq_tree_ctx->pir_off = NIX_AF_TL3X_PIR(schq);
2185 			parent_off = NIX_AF_TL3X_PARENT(schq);
2186 		} else if (lvl == NIX_TXSCH_LVL_TL4) {
2187 			smq_tree_ctx->cir_off = NIX_AF_TL4X_CIR(schq);
2188 			smq_tree_ctx->pir_off = NIX_AF_TL4X_PIR(schq);
2189 			parent_off = NIX_AF_TL4X_PARENT(schq);
2190 		} else if (lvl == NIX_TXSCH_LVL_MDQ) {
2191 			smq_tree_ctx->cir_off = NIX_AF_MDQX_CIR(schq);
2192 			smq_tree_ctx->pir_off = NIX_AF_MDQX_PIR(schq);
2193 			parent_off = NIX_AF_MDQX_PARENT(schq);
2194 		}
2195 		/* save cir/pir register values */
2196 		smq_tree_ctx->cir_val = rvu_read64(rvu, blkaddr, smq_tree_ctx->cir_off);
2197 		if (smq_tree_ctx->pir_off)
2198 			smq_tree_ctx->pir_val = rvu_read64(rvu, blkaddr, smq_tree_ctx->pir_off);
2199 
2200 		/* get parent txsch node */
2201 		if (parent_off) {
2202 			regval = rvu_read64(rvu, blkaddr, parent_off);
2203 			schq = (regval >> 16) & 0x1FF;
2204 		}
2205 	}
2206 }
2207 
2208 static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr,
2209 				      struct nix_smq_flush_ctx *smq_flush_ctx, bool enable)
2210 {
2211 	struct nix_txsch *txsch;
2212 	struct nix_hw *nix_hw;
2213 	u64 regoff;
2214 	int tl2;
2215 
2216 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
2217 	if (!nix_hw)
2218 		return;
2219 
2220 	/* loop through all TL2s with matching PF_FUNC */
2221 	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
2222 	for (tl2 = 0; tl2 < txsch->schq.max; tl2++) {
2223 		/* skip the smq(flush) TL2 */
2224 		if (tl2 == smq_flush_ctx->tl2_schq)
2225 			continue;
2226 		/* skip unused TL2s */
2227 		if (TXSCH_MAP_FLAGS(txsch->pfvf_map[tl2]) & NIX_TXSCHQ_FREE)
2228 			continue;
2229 		/* skip if PF_FUNC doesn't match */
2230 		if ((TXSCH_MAP_FUNC(txsch->pfvf_map[tl2]) & ~RVU_PFVF_FUNC_MASK) !=
2231 		    (TXSCH_MAP_FUNC(txsch->pfvf_map[smq_flush_ctx->tl2_schq] &
2232 				    ~RVU_PFVF_FUNC_MASK)))
2233 			continue;
2234 		/* enable/disable XOFF */
2235 		regoff = NIX_AF_TL2X_SW_XOFF(tl2);
2236 		if (enable)
2237 			rvu_write64(rvu, blkaddr, regoff, 0x1);
2238 		else
2239 			rvu_write64(rvu, blkaddr, regoff, 0x0);
2240 	}
2241 }
2242 
2243 static void nix_smq_flush_enadis_rate(struct rvu *rvu, int blkaddr,
2244 				      struct nix_smq_flush_ctx *smq_flush_ctx, bool enable)
2245 {
2246 	u64 cir_off, pir_off, cir_val, pir_val;
2247 	struct nix_smq_tree_ctx *smq_tree_ctx;
2248 	int lvl;
2249 
2250 	for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) {
2251 		smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl];
2252 		cir_off = smq_tree_ctx->cir_off;
2253 		cir_val = smq_tree_ctx->cir_val;
2254 		pir_off = smq_tree_ctx->pir_off;
2255 		pir_val = smq_tree_ctx->pir_val;
2256 
2257 		if (enable) {
2258 			rvu_write64(rvu, blkaddr, cir_off, cir_val);
2259 			if (lvl != NIX_TXSCH_LVL_TL1)
2260 				rvu_write64(rvu, blkaddr, pir_off, pir_val);
2261 		} else {
2262 			rvu_write64(rvu, blkaddr, cir_off, 0x0);
2263 			if (lvl != NIX_TXSCH_LVL_TL1)
2264 				rvu_write64(rvu, blkaddr, pir_off, 0x0);
2265 		}
2266 	}
2267 }
2268 
2269 static int nix_smq_flush(struct rvu *rvu, int blkaddr,
2270 			 int smq, u16 pcifunc, int nixlf)
2271 {
2272 	struct nix_smq_flush_ctx *smq_flush_ctx;
2273 	int pf = rvu_get_pf(pcifunc);
2274 	u8 cgx_id = 0, lmac_id = 0;
2275 	int err, restore_tx_en = 0;
2276 	u64 cfg;
2277 
2278 	if (!is_rvu_otx2(rvu)) {
2279 		/* Skip SMQ flush if pkt count is zero */
2280 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_MDQX_IN_MD_COUNT(smq));
2281 		if (!cfg)
2282 			return 0;
2283 	}
2284 
2285 	/* enable cgx tx if disabled */
2286 	if (is_pf_cgxmapped(rvu, pf)) {
2287 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
2288 		restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu),
2289 						   lmac_id, true);
2290 	}
2291 
2292 	/* XOFF all TL2s whose parent TL1 matches SMQ tree TL1 */
2293 	smq_flush_ctx = kzalloc(sizeof(*smq_flush_ctx), GFP_KERNEL);
2294 	if (!smq_flush_ctx)
2295 		return -ENOMEM;
2296 	nix_smq_flush_fill_ctx(rvu, blkaddr, smq, smq_flush_ctx);
2297 	nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, true);
2298 	nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, false);
2299 
2300 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
2301 	/* Do SMQ flush and set enqueue xoff */
2302 	cfg |= BIT_ULL(50) | BIT_ULL(49);
2303 	rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
2304 
2305 	/* Disable backpressure from physical link,
2306 	 * otherwise SMQ flush may stall.
2307 	 */
2308 	rvu_cgx_enadis_rx_bp(rvu, pf, false);
2309 
2310 	/* Wait for flush to complete */
2311 	err = rvu_poll_reg(rvu, blkaddr,
2312 			   NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
2313 	if (err)
2314 		dev_info(rvu->dev,
2315 			 "NIXLF%d: SMQ%d flush failed, txlink might be busy\n",
2316 			 nixlf, smq);
2317 
2318 	/* clear XOFF on TL2s */
2319 	nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, true);
2320 	nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, false);
2321 	kfree(smq_flush_ctx);
2322 
2323 	rvu_cgx_enadis_rx_bp(rvu, pf, true);
2324 	/* restore cgx tx state */
2325 	if (restore_tx_en)
2326 		rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
2327 	return err;
2328 }
2329 
2330 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
2331 {
2332 	int blkaddr, nixlf, lvl, schq, err;
2333 	struct rvu_hwinfo *hw = rvu->hw;
2334 	struct nix_txsch *txsch;
2335 	struct nix_hw *nix_hw;
2336 	u16 map_func;
2337 
2338 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2339 	if (blkaddr < 0)
2340 		return NIX_AF_ERR_AF_LF_INVALID;
2341 
2342 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
2343 	if (!nix_hw)
2344 		return NIX_AF_ERR_INVALID_NIXBLK;
2345 
2346 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
2347 	if (nixlf < 0)
2348 		return NIX_AF_ERR_AF_LF_INVALID;
2349 
2350 	/* Disable TL2/3 queue links and all XOFF's before SMQ flush*/
2351 	mutex_lock(&rvu->rsrc_lock);
2352 	for (lvl = NIX_TXSCH_LVL_MDQ; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2353 		txsch = &nix_hw->txsch[lvl];
2354 
2355 		if (lvl >= hw->cap.nix_tx_aggr_lvl)
2356 			continue;
2357 
2358 		for (schq = 0; schq < txsch->schq.max; schq++) {
2359 			if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2360 				continue;
2361 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
2362 			nix_clear_tx_xoff(rvu, blkaddr, lvl, schq);
2363 			nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
2364 		}
2365 	}
2366 	nix_clear_tx_xoff(rvu, blkaddr, NIX_TXSCH_LVL_TL1,
2367 			  nix_get_tx_link(rvu, pcifunc));
2368 
2369 	/* On PF cleanup, clear cfg done flag as
2370 	 * PF would have changed default config.
2371 	 */
2372 	if (!(pcifunc & RVU_PFVF_FUNC_MASK)) {
2373 		txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1];
2374 		schq = nix_get_tx_link(rvu, pcifunc);
2375 		/* Do not clear pcifunc in txsch->pfvf_map[schq] because
2376 		 * VF might be using this TL1 queue
2377 		 */
2378 		map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
2379 		txsch->pfvf_map[schq] = TXSCH_SET_FLAG(map_func, 0x0);
2380 	}
2381 
2382 	/* Flush SMQs */
2383 	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
2384 	for (schq = 0; schq < txsch->schq.max; schq++) {
2385 		if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2386 			continue;
2387 		nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
2388 	}
2389 
2390 	/* Now free scheduler queues to free pool */
2391 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2392 		 /* TLs above aggregation level are shared across all PF
2393 		  * and it's VFs, hence skip freeing them.
2394 		  */
2395 		if (lvl >= hw->cap.nix_tx_aggr_lvl)
2396 			continue;
2397 
2398 		txsch = &nix_hw->txsch[lvl];
2399 		for (schq = 0; schq < txsch->schq.max; schq++) {
2400 			if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2401 				continue;
2402 			nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
2403 			rvu_free_rsrc(&txsch->schq, schq);
2404 			txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
2405 		}
2406 	}
2407 	mutex_unlock(&rvu->rsrc_lock);
2408 
2409 	/* Sync cached info for this LF in NDC-TX to LLC/DRAM */
2410 	rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
2411 	err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
2412 	if (err)
2413 		dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
2414 
2415 	return 0;
2416 }
2417 
2418 static int nix_txschq_free_one(struct rvu *rvu,
2419 			       struct nix_txsch_free_req *req)
2420 {
2421 	struct rvu_hwinfo *hw = rvu->hw;
2422 	u16 pcifunc = req->hdr.pcifunc;
2423 	int lvl, schq, nixlf, blkaddr;
2424 	struct nix_txsch *txsch;
2425 	struct nix_hw *nix_hw;
2426 	u32 *pfvf_map;
2427 	int rc;
2428 
2429 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2430 	if (blkaddr < 0)
2431 		return NIX_AF_ERR_AF_LF_INVALID;
2432 
2433 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
2434 	if (!nix_hw)
2435 		return NIX_AF_ERR_INVALID_NIXBLK;
2436 
2437 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
2438 	if (nixlf < 0)
2439 		return NIX_AF_ERR_AF_LF_INVALID;
2440 
2441 	lvl = req->schq_lvl;
2442 	schq = req->schq;
2443 	txsch = &nix_hw->txsch[lvl];
2444 
2445 	if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max)
2446 		return 0;
2447 
2448 	pfvf_map = txsch->pfvf_map;
2449 	mutex_lock(&rvu->rsrc_lock);
2450 
2451 	if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) {
2452 		rc = NIX_AF_ERR_TLX_INVALID;
2453 		goto err;
2454 	}
2455 
2456 	/* Clear SW_XOFF of this resource only.
2457 	 * For SMQ level, all path XOFF's
2458 	 * need to be made clear by user
2459 	 */
2460 	nix_clear_tx_xoff(rvu, blkaddr, lvl, schq);
2461 
2462 	nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
2463 	nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
2464 
2465 	/* Flush if it is a SMQ. Onus of disabling
2466 	 * TL2/3 queue links before SMQ flush is on user
2467 	 */
2468 	if (lvl == NIX_TXSCH_LVL_SMQ &&
2469 	    nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf)) {
2470 		rc = NIX_AF_SMQ_FLUSH_FAILED;
2471 		goto err;
2472 	}
2473 
2474 	nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
2475 
2476 	/* Free the resource */
2477 	rvu_free_rsrc(&txsch->schq, schq);
2478 	txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
2479 	mutex_unlock(&rvu->rsrc_lock);
2480 	return 0;
2481 err:
2482 	mutex_unlock(&rvu->rsrc_lock);
2483 	return rc;
2484 }
2485 
2486 int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
2487 				    struct nix_txsch_free_req *req,
2488 				    struct msg_rsp *rsp)
2489 {
2490 	if (req->flags & TXSCHQ_FREE_ALL)
2491 		return nix_txschq_free(rvu, req->hdr.pcifunc);
2492 	else
2493 		return nix_txschq_free_one(rvu, req);
2494 }
2495 
2496 static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
2497 				      int lvl, u64 reg, u64 regval)
2498 {
2499 	u64 regbase = reg & 0xFFFF;
2500 	u16 schq, parent;
2501 
2502 	if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg))
2503 		return false;
2504 
2505 	schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2506 	/* Check if this schq belongs to this PF/VF or not */
2507 	if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq))
2508 		return false;
2509 
2510 	parent = (regval >> 16) & 0x1FF;
2511 	/* Validate MDQ's TL4 parent */
2512 	if (regbase == NIX_AF_MDQX_PARENT(0) &&
2513 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent))
2514 		return false;
2515 
2516 	/* Validate TL4's TL3 parent */
2517 	if (regbase == NIX_AF_TL4X_PARENT(0) &&
2518 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent))
2519 		return false;
2520 
2521 	/* Validate TL3's TL2 parent */
2522 	if (regbase == NIX_AF_TL3X_PARENT(0) &&
2523 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent))
2524 		return false;
2525 
2526 	/* Validate TL2's TL1 parent */
2527 	if (regbase == NIX_AF_TL2X_PARENT(0) &&
2528 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent))
2529 		return false;
2530 
2531 	return true;
2532 }
2533 
2534 static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg)
2535 {
2536 	u64 regbase;
2537 
2538 	if (hw->cap.nix_shaping)
2539 		return true;
2540 
2541 	/* If shaping and coloring is not supported, then
2542 	 * *_CIR and *_PIR registers should not be configured.
2543 	 */
2544 	regbase = reg & 0xFFFF;
2545 
2546 	switch (lvl) {
2547 	case NIX_TXSCH_LVL_TL1:
2548 		if (regbase == NIX_AF_TL1X_CIR(0))
2549 			return false;
2550 		break;
2551 	case NIX_TXSCH_LVL_TL2:
2552 		if (regbase == NIX_AF_TL2X_CIR(0) ||
2553 		    regbase == NIX_AF_TL2X_PIR(0))
2554 			return false;
2555 		break;
2556 	case NIX_TXSCH_LVL_TL3:
2557 		if (regbase == NIX_AF_TL3X_CIR(0) ||
2558 		    regbase == NIX_AF_TL3X_PIR(0))
2559 			return false;
2560 		break;
2561 	case NIX_TXSCH_LVL_TL4:
2562 		if (regbase == NIX_AF_TL4X_CIR(0) ||
2563 		    regbase == NIX_AF_TL4X_PIR(0))
2564 			return false;
2565 		break;
2566 	case NIX_TXSCH_LVL_MDQ:
2567 		if (regbase == NIX_AF_MDQX_CIR(0) ||
2568 		    regbase == NIX_AF_MDQX_PIR(0))
2569 			return false;
2570 		break;
2571 	}
2572 	return true;
2573 }
2574 
2575 static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
2576 				u16 pcifunc, int blkaddr)
2577 {
2578 	u32 *pfvf_map;
2579 	int schq;
2580 
2581 	schq = nix_get_tx_link(rvu, pcifunc);
2582 	pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
2583 	/* Skip if PF has already done the config */
2584 	if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE)
2585 		return;
2586 	rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq),
2587 		    (TXSCH_TL1_DFLT_RR_PRIO << 1));
2588 
2589 	/* On OcteonTx2 the config was in bytes and newer silcons
2590 	 * it's changed to weight.
2591 	 */
2592 	if (!rvu->hw->cap.nix_common_dwrr_mtu)
2593 		rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
2594 			    TXSCH_TL1_DFLT_RR_QTM);
2595 	else
2596 		rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
2597 			    CN10K_MAX_DWRR_WEIGHT);
2598 
2599 	rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00);
2600 	pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE);
2601 }
2602 
2603 /* Register offset - [15:0]
2604  * Scheduler Queue number - [25:16]
2605  */
2606 #define NIX_TX_SCHQ_MASK	GENMASK_ULL(25, 0)
2607 
2608 static int nix_txschq_cfg_read(struct rvu *rvu, struct nix_hw *nix_hw,
2609 			       int blkaddr, struct nix_txschq_config *req,
2610 			       struct nix_txschq_config *rsp)
2611 {
2612 	u16 pcifunc = req->hdr.pcifunc;
2613 	int idx, schq;
2614 	u64 reg;
2615 
2616 	for (idx = 0; idx < req->num_regs; idx++) {
2617 		reg = req->reg[idx];
2618 		reg &= NIX_TX_SCHQ_MASK;
2619 		schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2620 		if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, req->lvl, reg) ||
2621 		    !is_valid_txschq(rvu, blkaddr, req->lvl, pcifunc, schq))
2622 			return NIX_AF_INVAL_TXSCHQ_CFG;
2623 		rsp->regval[idx] = rvu_read64(rvu, blkaddr, reg);
2624 	}
2625 	rsp->lvl = req->lvl;
2626 	rsp->num_regs = req->num_regs;
2627 	return 0;
2628 }
2629 
2630 void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, u16 pcifunc,
2631 			struct nix_txsch *txsch, bool enable)
2632 {
2633 	struct rvu_hwinfo *hw = rvu->hw;
2634 	int lbk_link_start, lbk_links;
2635 	u8 pf = rvu_get_pf(pcifunc);
2636 	int schq;
2637 	u64 cfg;
2638 
2639 	if (!is_pf_cgxmapped(rvu, pf))
2640 		return;
2641 
2642 	cfg = enable ? (BIT_ULL(12) | RVU_SWITCH_LBK_CHAN) : 0;
2643 	lbk_link_start = hw->cgx_links;
2644 
2645 	for (schq = 0; schq < txsch->schq.max; schq++) {
2646 		if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2647 			continue;
2648 		/* Enable all LBK links with channel 63 by default so that
2649 		 * packets can be sent to LBK with a NPC TX MCAM rule
2650 		 */
2651 		lbk_links = hw->lbk_links;
2652 		while (lbk_links--)
2653 			rvu_write64(rvu, blkaddr,
2654 				    NIX_AF_TL3_TL2X_LINKX_CFG(schq,
2655 							      lbk_link_start +
2656 							      lbk_links), cfg);
2657 	}
2658 }
2659 
2660 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
2661 				    struct nix_txschq_config *req,
2662 				    struct nix_txschq_config *rsp)
2663 {
2664 	u64 reg, val, regval, schq_regbase, val_mask;
2665 	struct rvu_hwinfo *hw = rvu->hw;
2666 	u16 pcifunc = req->hdr.pcifunc;
2667 	struct nix_txsch *txsch;
2668 	struct nix_hw *nix_hw;
2669 	int blkaddr, idx, err;
2670 	int nixlf, schq;
2671 	u32 *pfvf_map;
2672 
2673 	if (req->lvl >= NIX_TXSCH_LVL_CNT ||
2674 	    req->num_regs > MAX_REGS_PER_MBOX_MSG)
2675 		return NIX_AF_INVAL_TXSCHQ_CFG;
2676 
2677 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2678 	if (err)
2679 		return err;
2680 
2681 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
2682 	if (!nix_hw)
2683 		return NIX_AF_ERR_INVALID_NIXBLK;
2684 
2685 	if (req->read)
2686 		return nix_txschq_cfg_read(rvu, nix_hw, blkaddr, req, rsp);
2687 
2688 	txsch = &nix_hw->txsch[req->lvl];
2689 	pfvf_map = txsch->pfvf_map;
2690 
2691 	if (req->lvl >= hw->cap.nix_tx_aggr_lvl &&
2692 	    pcifunc & RVU_PFVF_FUNC_MASK) {
2693 		mutex_lock(&rvu->rsrc_lock);
2694 		if (req->lvl == NIX_TXSCH_LVL_TL1)
2695 			nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr);
2696 		mutex_unlock(&rvu->rsrc_lock);
2697 		return 0;
2698 	}
2699 
2700 	for (idx = 0; idx < req->num_regs; idx++) {
2701 		reg = req->reg[idx];
2702 		reg &= NIX_TX_SCHQ_MASK;
2703 		regval = req->regval[idx];
2704 		schq_regbase = reg & 0xFFFF;
2705 		val_mask = req->regval_mask[idx];
2706 
2707 		if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr,
2708 					       txsch->lvl, reg, regval))
2709 			return NIX_AF_INVAL_TXSCHQ_CFG;
2710 
2711 		/* Check if shaping and coloring is supported */
2712 		if (!is_txschq_shaping_valid(hw, req->lvl, reg))
2713 			continue;
2714 
2715 		val = rvu_read64(rvu, blkaddr, reg);
2716 		regval = (val & val_mask) | (regval & ~val_mask);
2717 
2718 		/* Handle shaping state toggle specially */
2719 		if (hw->cap.nix_shaper_toggle_wait &&
2720 		    handle_txschq_shaper_update(rvu, blkaddr, nixlf,
2721 						req->lvl, reg, regval))
2722 			continue;
2723 
2724 		/* Replace PF/VF visible NIXLF slot with HW NIXLF id */
2725 		if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
2726 			nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
2727 					   pcifunc, 0);
2728 			regval &= ~(0x7FULL << 24);
2729 			regval |= ((u64)nixlf << 24);
2730 		}
2731 
2732 		/* Clear 'BP_ENA' config, if it's not allowed */
2733 		if (!hw->cap.nix_tx_link_bp) {
2734 			if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) ||
2735 			    (schq_regbase & 0xFF00) ==
2736 			    NIX_AF_TL3_TL2X_LINKX_CFG(0, 0))
2737 				regval &= ~BIT_ULL(13);
2738 		}
2739 
2740 		/* Mark config as done for TL1 by PF */
2741 		if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) &&
2742 		    schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) {
2743 			schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2744 			mutex_lock(&rvu->rsrc_lock);
2745 			pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq],
2746 							NIX_TXSCHQ_CFG_DONE);
2747 			mutex_unlock(&rvu->rsrc_lock);
2748 		}
2749 
2750 		/* SMQ flush is special hence split register writes such
2751 		 * that flush first and write rest of the bits later.
2752 		 */
2753 		if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
2754 		    (regval & BIT_ULL(49))) {
2755 			schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2756 			nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
2757 			regval &= ~BIT_ULL(49);
2758 		}
2759 		rvu_write64(rvu, blkaddr, reg, regval);
2760 	}
2761 
2762 	return 0;
2763 }
2764 
2765 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
2766 			   struct nix_vtag_config *req)
2767 {
2768 	u64 regval = req->vtag_size;
2769 
2770 	if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 ||
2771 	    req->vtag_size > VTAGSIZE_T8)
2772 		return -EINVAL;
2773 
2774 	/* RX VTAG Type 7 reserved for vf vlan */
2775 	if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7)
2776 		return NIX_AF_ERR_RX_VTAG_INUSE;
2777 
2778 	if (req->rx.capture_vtag)
2779 		regval |= BIT_ULL(5);
2780 	if (req->rx.strip_vtag)
2781 		regval |= BIT_ULL(4);
2782 
2783 	rvu_write64(rvu, blkaddr,
2784 		    NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
2785 	return 0;
2786 }
2787 
2788 static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr,
2789 			    u16 pcifunc, int index)
2790 {
2791 	struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2792 	struct nix_txvlan *vlan;
2793 
2794 	if (!nix_hw)
2795 		return NIX_AF_ERR_INVALID_NIXBLK;
2796 
2797 	vlan = &nix_hw->txvlan;
2798 	if (vlan->entry2pfvf_map[index] != pcifunc)
2799 		return NIX_AF_ERR_PARAM;
2800 
2801 	rvu_write64(rvu, blkaddr,
2802 		    NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull);
2803 	rvu_write64(rvu, blkaddr,
2804 		    NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull);
2805 
2806 	vlan->entry2pfvf_map[index] = 0;
2807 	rvu_free_rsrc(&vlan->rsrc, index);
2808 
2809 	return 0;
2810 }
2811 
2812 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc)
2813 {
2814 	struct nix_txvlan *vlan;
2815 	struct nix_hw *nix_hw;
2816 	int index, blkaddr;
2817 
2818 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2819 	if (blkaddr < 0)
2820 		return;
2821 
2822 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
2823 	if (!nix_hw)
2824 		return;
2825 
2826 	vlan = &nix_hw->txvlan;
2827 
2828 	mutex_lock(&vlan->rsrc_lock);
2829 	/* Scan all the entries and free the ones mapped to 'pcifunc' */
2830 	for (index = 0; index < vlan->rsrc.max; index++) {
2831 		if (vlan->entry2pfvf_map[index] == pcifunc)
2832 			nix_tx_vtag_free(rvu, blkaddr, pcifunc, index);
2833 	}
2834 	mutex_unlock(&vlan->rsrc_lock);
2835 }
2836 
2837 static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr,
2838 			     u64 vtag, u8 size)
2839 {
2840 	struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2841 	struct nix_txvlan *vlan;
2842 	u64 regval;
2843 	int index;
2844 
2845 	if (!nix_hw)
2846 		return NIX_AF_ERR_INVALID_NIXBLK;
2847 
2848 	vlan = &nix_hw->txvlan;
2849 
2850 	mutex_lock(&vlan->rsrc_lock);
2851 
2852 	index = rvu_alloc_rsrc(&vlan->rsrc);
2853 	if (index < 0) {
2854 		mutex_unlock(&vlan->rsrc_lock);
2855 		return index;
2856 	}
2857 
2858 	mutex_unlock(&vlan->rsrc_lock);
2859 
2860 	regval = size ? vtag : vtag << 32;
2861 
2862 	rvu_write64(rvu, blkaddr,
2863 		    NIX_AF_TX_VTAG_DEFX_DATA(index), regval);
2864 	rvu_write64(rvu, blkaddr,
2865 		    NIX_AF_TX_VTAG_DEFX_CTL(index), size);
2866 
2867 	return index;
2868 }
2869 
2870 static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr,
2871 			     struct nix_vtag_config *req)
2872 {
2873 	struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2874 	u16 pcifunc = req->hdr.pcifunc;
2875 	int idx0 = req->tx.vtag0_idx;
2876 	int idx1 = req->tx.vtag1_idx;
2877 	struct nix_txvlan *vlan;
2878 	int err = 0;
2879 
2880 	if (!nix_hw)
2881 		return NIX_AF_ERR_INVALID_NIXBLK;
2882 
2883 	vlan = &nix_hw->txvlan;
2884 	if (req->tx.free_vtag0 && req->tx.free_vtag1)
2885 		if (vlan->entry2pfvf_map[idx0] != pcifunc ||
2886 		    vlan->entry2pfvf_map[idx1] != pcifunc)
2887 			return NIX_AF_ERR_PARAM;
2888 
2889 	mutex_lock(&vlan->rsrc_lock);
2890 
2891 	if (req->tx.free_vtag0) {
2892 		err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx0);
2893 		if (err)
2894 			goto exit;
2895 	}
2896 
2897 	if (req->tx.free_vtag1)
2898 		err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx1);
2899 
2900 exit:
2901 	mutex_unlock(&vlan->rsrc_lock);
2902 	return err;
2903 }
2904 
2905 static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr,
2906 			   struct nix_vtag_config *req,
2907 			   struct nix_vtag_config_rsp *rsp)
2908 {
2909 	struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2910 	struct nix_txvlan *vlan;
2911 	u16 pcifunc = req->hdr.pcifunc;
2912 
2913 	if (!nix_hw)
2914 		return NIX_AF_ERR_INVALID_NIXBLK;
2915 
2916 	vlan = &nix_hw->txvlan;
2917 	if (req->tx.cfg_vtag0) {
2918 		rsp->vtag0_idx =
2919 			nix_tx_vtag_alloc(rvu, blkaddr,
2920 					  req->tx.vtag0, req->vtag_size);
2921 
2922 		if (rsp->vtag0_idx < 0)
2923 			return NIX_AF_ERR_TX_VTAG_NOSPC;
2924 
2925 		vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc;
2926 	}
2927 
2928 	if (req->tx.cfg_vtag1) {
2929 		rsp->vtag1_idx =
2930 			nix_tx_vtag_alloc(rvu, blkaddr,
2931 					  req->tx.vtag1, req->vtag_size);
2932 
2933 		if (rsp->vtag1_idx < 0)
2934 			goto err_free;
2935 
2936 		vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc;
2937 	}
2938 
2939 	return 0;
2940 
2941 err_free:
2942 	if (req->tx.cfg_vtag0)
2943 		nix_tx_vtag_free(rvu, blkaddr, pcifunc, rsp->vtag0_idx);
2944 
2945 	return NIX_AF_ERR_TX_VTAG_NOSPC;
2946 }
2947 
2948 int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
2949 				  struct nix_vtag_config *req,
2950 				  struct nix_vtag_config_rsp *rsp)
2951 {
2952 	u16 pcifunc = req->hdr.pcifunc;
2953 	int blkaddr, nixlf, err;
2954 
2955 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2956 	if (err)
2957 		return err;
2958 
2959 	if (req->cfg_type) {
2960 		/* rx vtag configuration */
2961 		err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
2962 		if (err)
2963 			return NIX_AF_ERR_PARAM;
2964 	} else {
2965 		/* tx vtag configuration */
2966 		if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) &&
2967 		    (req->tx.free_vtag0 || req->tx.free_vtag1))
2968 			return NIX_AF_ERR_PARAM;
2969 
2970 		if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1)
2971 			return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp);
2972 
2973 		if (req->tx.free_vtag0 || req->tx.free_vtag1)
2974 			return nix_tx_vtag_decfg(rvu, blkaddr, req);
2975 	}
2976 
2977 	return 0;
2978 }
2979 
2980 static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
2981 			     int mce, u8 op, u16 pcifunc, int next,
2982 			     int index, u8 mce_op, bool eol)
2983 {
2984 	struct nix_aq_enq_req aq_req;
2985 	int err;
2986 
2987 	aq_req.hdr.pcifunc = 0;
2988 	aq_req.ctype = NIX_AQ_CTYPE_MCE;
2989 	aq_req.op = op;
2990 	aq_req.qidx = mce;
2991 
2992 	/* Use RSS with RSS index 0 */
2993 	aq_req.mce.op = mce_op;
2994 	aq_req.mce.index = index;
2995 	aq_req.mce.eol = eol;
2996 	aq_req.mce.pf_func = pcifunc;
2997 	aq_req.mce.next = next;
2998 
2999 	/* All fields valid */
3000 	*(u64 *)(&aq_req.mce_mask) = ~0ULL;
3001 
3002 	err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL);
3003 	if (err) {
3004 		dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
3005 			rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
3006 		return err;
3007 	}
3008 	return 0;
3009 }
3010 
3011 static void nix_delete_mcast_mce_list(struct nix_mce_list *mce_list)
3012 {
3013 	struct hlist_node *tmp;
3014 	struct mce *mce;
3015 
3016 	/* Scan through the current list */
3017 	hlist_for_each_entry_safe(mce, tmp, &mce_list->head, node) {
3018 		hlist_del(&mce->node);
3019 		kfree(mce);
3020 	}
3021 
3022 	mce_list->count = 0;
3023 	mce_list->max = 0;
3024 }
3025 
3026 static int nix_get_last_mce_list_index(struct nix_mcast_grp_elem *elem)
3027 {
3028 	return elem->mce_start_index + elem->mcast_mce_list.count - 1;
3029 }
3030 
3031 static int nix_update_ingress_mce_list_hw(struct rvu *rvu,
3032 					  struct nix_hw *nix_hw,
3033 					  struct nix_mcast_grp_elem *elem)
3034 {
3035 	int idx, last_idx, next_idx, err;
3036 	struct nix_mce_list *mce_list;
3037 	struct mce *mce, *prev_mce;
3038 
3039 	mce_list = &elem->mcast_mce_list;
3040 	idx = elem->mce_start_index;
3041 	last_idx = nix_get_last_mce_list_index(elem);
3042 	hlist_for_each_entry(mce, &mce_list->head, node) {
3043 		if (idx > last_idx)
3044 			break;
3045 
3046 		if (!mce->is_active) {
3047 			if (idx == elem->mce_start_index) {
3048 				idx++;
3049 				prev_mce = mce;
3050 				elem->mce_start_index = idx;
3051 				continue;
3052 			} else if (idx == last_idx) {
3053 				err = nix_blk_setup_mce(rvu, nix_hw, idx - 1, NIX_AQ_INSTOP_WRITE,
3054 							prev_mce->pcifunc, next_idx,
3055 							prev_mce->rq_rss_index,
3056 							prev_mce->dest_type,
3057 							false);
3058 				if (err)
3059 					return err;
3060 
3061 				break;
3062 			}
3063 		}
3064 
3065 		next_idx = idx + 1;
3066 		/* EOL should be set in last MCE */
3067 		err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE,
3068 					mce->pcifunc, next_idx,
3069 					mce->rq_rss_index, mce->dest_type,
3070 					(next_idx > last_idx) ? true : false);
3071 		if (err)
3072 			return err;
3073 
3074 		idx++;
3075 		prev_mce = mce;
3076 	}
3077 
3078 	return 0;
3079 }
3080 
3081 static void nix_update_egress_mce_list_hw(struct rvu *rvu,
3082 					  struct nix_hw *nix_hw,
3083 					  struct nix_mcast_grp_elem *elem)
3084 {
3085 	struct nix_mce_list *mce_list;
3086 	int idx, last_idx, next_idx;
3087 	struct mce *mce, *prev_mce;
3088 	u64 regval;
3089 	u8 eol;
3090 
3091 	mce_list = &elem->mcast_mce_list;
3092 	idx = elem->mce_start_index;
3093 	last_idx = nix_get_last_mce_list_index(elem);
3094 	hlist_for_each_entry(mce, &mce_list->head, node) {
3095 		if (idx > last_idx)
3096 			break;
3097 
3098 		if (!mce->is_active) {
3099 			if (idx == elem->mce_start_index) {
3100 				idx++;
3101 				prev_mce = mce;
3102 				elem->mce_start_index = idx;
3103 				continue;
3104 			} else if (idx == last_idx) {
3105 				regval = (next_idx << 16) | (1 << 12) | prev_mce->channel;
3106 				rvu_write64(rvu, nix_hw->blkaddr,
3107 					    NIX_AF_TX_MCASTX(idx - 1),
3108 					    regval);
3109 				break;
3110 			}
3111 		}
3112 
3113 		eol = 0;
3114 		next_idx = idx + 1;
3115 		/* EOL should be set in last MCE */
3116 		if (next_idx > last_idx)
3117 			eol = 1;
3118 
3119 		regval = (next_idx << 16) | (eol << 12) | mce->channel;
3120 		rvu_write64(rvu, nix_hw->blkaddr,
3121 			    NIX_AF_TX_MCASTX(idx),
3122 			    regval);
3123 		idx++;
3124 		prev_mce = mce;
3125 	}
3126 }
3127 
3128 static int nix_del_mce_list_entry(struct rvu *rvu,
3129 				  struct nix_hw *nix_hw,
3130 				  struct nix_mcast_grp_elem *elem,
3131 				  struct nix_mcast_grp_update_req *req)
3132 {
3133 	u32 num_entry = req->num_mce_entry;
3134 	struct nix_mce_list *mce_list;
3135 	struct mce *mce;
3136 	bool is_found;
3137 	int i;
3138 
3139 	mce_list = &elem->mcast_mce_list;
3140 	for (i = 0; i < num_entry; i++) {
3141 		is_found = false;
3142 		hlist_for_each_entry(mce, &mce_list->head, node) {
3143 			/* If already exists, then delete */
3144 			if (mce->pcifunc == req->pcifunc[i]) {
3145 				hlist_del(&mce->node);
3146 				kfree(mce);
3147 				mce_list->count--;
3148 				is_found = true;
3149 				break;
3150 			}
3151 		}
3152 
3153 		if (!is_found)
3154 			return NIX_AF_ERR_INVALID_MCAST_DEL_REQ;
3155 	}
3156 
3157 	mce_list->max = mce_list->count;
3158 	/* Dump the updated list to HW */
3159 	if (elem->dir == NIX_MCAST_INGRESS)
3160 		return nix_update_ingress_mce_list_hw(rvu, nix_hw, elem);
3161 
3162 	nix_update_egress_mce_list_hw(rvu, nix_hw, elem);
3163 	return 0;
3164 }
3165 
3166 static int nix_add_mce_list_entry(struct rvu *rvu,
3167 				  struct nix_hw *nix_hw,
3168 				  struct nix_mcast_grp_elem *elem,
3169 				  struct nix_mcast_grp_update_req *req)
3170 {
3171 	u32 num_entry = req->num_mce_entry;
3172 	struct nix_mce_list *mce_list;
3173 	struct hlist_node *tmp;
3174 	struct mce *mce;
3175 	int i;
3176 
3177 	mce_list = &elem->mcast_mce_list;
3178 	for (i = 0; i < num_entry; i++) {
3179 		mce = kzalloc(sizeof(*mce), GFP_KERNEL);
3180 		if (!mce)
3181 			goto free_mce;
3182 
3183 		mce->pcifunc = req->pcifunc[i];
3184 		mce->channel = req->channel[i];
3185 		mce->rq_rss_index = req->rq_rss_index[i];
3186 		mce->dest_type = req->dest_type[i];
3187 		mce->is_active = 1;
3188 		hlist_add_head(&mce->node, &mce_list->head);
3189 		mce_list->count++;
3190 	}
3191 
3192 	mce_list->max += num_entry;
3193 
3194 	/* Dump the updated list to HW */
3195 	if (elem->dir == NIX_MCAST_INGRESS)
3196 		return nix_update_ingress_mce_list_hw(rvu, nix_hw, elem);
3197 
3198 	nix_update_egress_mce_list_hw(rvu, nix_hw, elem);
3199 	return 0;
3200 
3201 free_mce:
3202 	hlist_for_each_entry_safe(mce, tmp, &mce_list->head, node) {
3203 		hlist_del(&mce->node);
3204 		kfree(mce);
3205 		mce_list->count--;
3206 	}
3207 
3208 	return -ENOMEM;
3209 }
3210 
3211 static int nix_update_mce_list_entry(struct nix_mce_list *mce_list,
3212 				     u16 pcifunc, bool add)
3213 {
3214 	struct mce *mce, *tail = NULL;
3215 	bool delete = false;
3216 
3217 	/* Scan through the current list */
3218 	hlist_for_each_entry(mce, &mce_list->head, node) {
3219 		/* If already exists, then delete */
3220 		if (mce->pcifunc == pcifunc && !add) {
3221 			delete = true;
3222 			break;
3223 		} else if (mce->pcifunc == pcifunc && add) {
3224 			/* entry already exists */
3225 			return 0;
3226 		}
3227 		tail = mce;
3228 	}
3229 
3230 	if (delete) {
3231 		hlist_del(&mce->node);
3232 		kfree(mce);
3233 		mce_list->count--;
3234 		return 0;
3235 	}
3236 
3237 	if (!add)
3238 		return 0;
3239 
3240 	/* Add a new one to the list, at the tail */
3241 	mce = kzalloc(sizeof(*mce), GFP_KERNEL);
3242 	if (!mce)
3243 		return -ENOMEM;
3244 	mce->pcifunc = pcifunc;
3245 	if (!tail)
3246 		hlist_add_head(&mce->node, &mce_list->head);
3247 	else
3248 		hlist_add_behind(&mce->node, &tail->node);
3249 	mce_list->count++;
3250 	return 0;
3251 }
3252 
3253 int nix_update_mce_list(struct rvu *rvu, u16 pcifunc,
3254 			struct nix_mce_list *mce_list,
3255 			int mce_idx, int mcam_index, bool add)
3256 {
3257 	int err = 0, idx, next_idx, last_idx, blkaddr, npc_blkaddr;
3258 	struct npc_mcam *mcam = &rvu->hw->mcam;
3259 	struct nix_mcast *mcast;
3260 	struct nix_hw *nix_hw;
3261 	struct mce *mce;
3262 
3263 	if (!mce_list)
3264 		return -EINVAL;
3265 
3266 	/* Get this PF/VF func's MCE index */
3267 	idx = mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
3268 
3269 	if (idx > (mce_idx + mce_list->max)) {
3270 		dev_err(rvu->dev,
3271 			"%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
3272 			__func__, idx, mce_list->max,
3273 			pcifunc >> RVU_PFVF_PF_SHIFT);
3274 		return -EINVAL;
3275 	}
3276 
3277 	err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
3278 	if (err)
3279 		return err;
3280 
3281 	mcast = &nix_hw->mcast;
3282 	mutex_lock(&mcast->mce_lock);
3283 
3284 	err = nix_update_mce_list_entry(mce_list, pcifunc, add);
3285 	if (err)
3286 		goto end;
3287 
3288 	/* Disable MCAM entry in NPC */
3289 	if (!mce_list->count) {
3290 		npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
3291 		npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, mcam_index, false);
3292 		goto end;
3293 	}
3294 
3295 	/* Dump the updated list to HW */
3296 	idx = mce_idx;
3297 	last_idx = idx + mce_list->count - 1;
3298 	hlist_for_each_entry(mce, &mce_list->head, node) {
3299 		if (idx > last_idx)
3300 			break;
3301 
3302 		next_idx = idx + 1;
3303 		/* EOL should be set in last MCE */
3304 		err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE,
3305 					mce->pcifunc, next_idx,
3306 					0, 1,
3307 					(next_idx > last_idx) ? true : false);
3308 		if (err)
3309 			goto end;
3310 		idx++;
3311 	}
3312 
3313 end:
3314 	mutex_unlock(&mcast->mce_lock);
3315 	return err;
3316 }
3317 
3318 void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type,
3319 		      struct nix_mce_list **mce_list, int *mce_idx)
3320 {
3321 	struct rvu_hwinfo *hw = rvu->hw;
3322 	struct rvu_pfvf *pfvf;
3323 
3324 	if (!hw->cap.nix_rx_multicast ||
3325 	    !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc & ~RVU_PFVF_FUNC_MASK))) {
3326 		*mce_list = NULL;
3327 		*mce_idx = 0;
3328 		return;
3329 	}
3330 
3331 	/* Get this PF/VF func's MCE index */
3332 	pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
3333 
3334 	if (type == NIXLF_BCAST_ENTRY) {
3335 		*mce_list = &pfvf->bcast_mce_list;
3336 		*mce_idx = pfvf->bcast_mce_idx;
3337 	} else if (type == NIXLF_ALLMULTI_ENTRY) {
3338 		*mce_list = &pfvf->mcast_mce_list;
3339 		*mce_idx = pfvf->mcast_mce_idx;
3340 	} else if (type == NIXLF_PROMISC_ENTRY) {
3341 		*mce_list = &pfvf->promisc_mce_list;
3342 		*mce_idx = pfvf->promisc_mce_idx;
3343 	}  else {
3344 		*mce_list = NULL;
3345 		*mce_idx = 0;
3346 	}
3347 }
3348 
3349 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
3350 			       int type, bool add)
3351 {
3352 	int err = 0, nixlf, blkaddr, mcam_index, mce_idx;
3353 	struct npc_mcam *mcam = &rvu->hw->mcam;
3354 	struct rvu_hwinfo *hw = rvu->hw;
3355 	struct nix_mce_list *mce_list;
3356 	int pf;
3357 
3358 	/* skip multicast pkt replication for AF's VFs & SDP links */
3359 	if (is_afvf(pcifunc) || is_sdp_pfvf(pcifunc))
3360 		return 0;
3361 
3362 	if (!hw->cap.nix_rx_multicast)
3363 		return 0;
3364 
3365 	pf = rvu_get_pf(pcifunc);
3366 	if (!is_pf_cgxmapped(rvu, pf))
3367 		return 0;
3368 
3369 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3370 	if (blkaddr < 0)
3371 		return -EINVAL;
3372 
3373 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
3374 	if (nixlf < 0)
3375 		return -EINVAL;
3376 
3377 	nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx);
3378 
3379 	mcam_index = npc_get_nixlf_mcam_index(mcam,
3380 					      pcifunc & ~RVU_PFVF_FUNC_MASK,
3381 					      nixlf, type);
3382 	err = nix_update_mce_list(rvu, pcifunc, mce_list,
3383 				  mce_idx, mcam_index, add);
3384 	return err;
3385 }
3386 
3387 static void nix_setup_mcast_grp(struct nix_hw *nix_hw)
3388 {
3389 	struct nix_mcast_grp *mcast_grp = &nix_hw->mcast_grp;
3390 
3391 	INIT_LIST_HEAD(&mcast_grp->mcast_grp_head);
3392 	mutex_init(&mcast_grp->mcast_grp_lock);
3393 	mcast_grp->next_grp_index = 1;
3394 	mcast_grp->count = 0;
3395 }
3396 
3397 static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw)
3398 {
3399 	struct nix_mcast *mcast = &nix_hw->mcast;
3400 	int err, pf, numvfs, idx;
3401 	struct rvu_pfvf *pfvf;
3402 	u16 pcifunc;
3403 	u64 cfg;
3404 
3405 	/* Skip PF0 (i.e AF) */
3406 	for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) {
3407 		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
3408 		/* If PF is not enabled, nothing to do */
3409 		if (!((cfg >> 20) & 0x01))
3410 			continue;
3411 		/* Get numVFs attached to this PF */
3412 		numvfs = (cfg >> 12) & 0xFF;
3413 
3414 		pfvf = &rvu->pf[pf];
3415 
3416 		/* This NIX0/1 block mapped to PF ? */
3417 		if (pfvf->nix_blkaddr != nix_hw->blkaddr)
3418 			continue;
3419 
3420 		/* save start idx of broadcast mce list */
3421 		pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1, NIX_MCAST_INGRESS);
3422 		nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
3423 
3424 		/* save start idx of multicast mce list */
3425 		pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1, NIX_MCAST_INGRESS);
3426 		nix_mce_list_init(&pfvf->mcast_mce_list, numvfs + 1);
3427 
3428 		/* save the start idx of promisc mce list */
3429 		pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1, NIX_MCAST_INGRESS);
3430 		nix_mce_list_init(&pfvf->promisc_mce_list, numvfs + 1);
3431 
3432 		for (idx = 0; idx < (numvfs + 1); idx++) {
3433 			/* idx-0 is for PF, followed by VFs */
3434 			pcifunc = (pf << RVU_PFVF_PF_SHIFT);
3435 			pcifunc |= idx;
3436 			/* Add dummy entries now, so that we don't have to check
3437 			 * for whether AQ_OP should be INIT/WRITE later on.
3438 			 * Will be updated when a NIXLF is attached/detached to
3439 			 * these PF/VFs.
3440 			 */
3441 			err = nix_blk_setup_mce(rvu, nix_hw,
3442 						pfvf->bcast_mce_idx + idx,
3443 						NIX_AQ_INSTOP_INIT,
3444 						pcifunc, 0, 0, 1, true);
3445 			if (err)
3446 				return err;
3447 
3448 			/* add dummy entries to multicast mce list */
3449 			err = nix_blk_setup_mce(rvu, nix_hw,
3450 						pfvf->mcast_mce_idx + idx,
3451 						NIX_AQ_INSTOP_INIT,
3452 						pcifunc, 0, 0, 1, true);
3453 			if (err)
3454 				return err;
3455 
3456 			/* add dummy entries to promisc mce list */
3457 			err = nix_blk_setup_mce(rvu, nix_hw,
3458 						pfvf->promisc_mce_idx + idx,
3459 						NIX_AQ_INSTOP_INIT,
3460 						pcifunc, 0, 0, 1, true);
3461 			if (err)
3462 				return err;
3463 		}
3464 	}
3465 	return 0;
3466 }
3467 
3468 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
3469 {
3470 	struct nix_mcast *mcast = &nix_hw->mcast;
3471 	struct rvu_hwinfo *hw = rvu->hw;
3472 	int err, size;
3473 
3474 	size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
3475 	size = BIT_ULL(size);
3476 
3477 	/* Allocate bitmap for rx mce entries */
3478 	mcast->mce_counter[NIX_MCAST_INGRESS].max = 256UL << MC_TBL_SIZE;
3479 	err = rvu_alloc_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]);
3480 	if (err)
3481 		return -ENOMEM;
3482 
3483 	/* Allocate bitmap for tx mce entries */
3484 	mcast->mce_counter[NIX_MCAST_EGRESS].max = MC_TX_MAX;
3485 	err = rvu_alloc_bitmap(&mcast->mce_counter[NIX_MCAST_EGRESS]);
3486 	if (err) {
3487 		rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]);
3488 		return -ENOMEM;
3489 	}
3490 
3491 	/* Alloc memory for multicast/mirror replication entries */
3492 	err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
3493 			 mcast->mce_counter[NIX_MCAST_INGRESS].max, size);
3494 	if (err) {
3495 		rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]);
3496 		rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_EGRESS]);
3497 		return -ENOMEM;
3498 	}
3499 
3500 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
3501 		    (u64)mcast->mce_ctx->iova);
3502 
3503 	/* Set max list length equal to max no of VFs per PF  + PF itself */
3504 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG,
3505 		    BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE);
3506 
3507 	/* Alloc memory for multicast replication buffers */
3508 	size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
3509 	err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
3510 			 (8UL << MC_BUF_CNT), size);
3511 	if (err) {
3512 		rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]);
3513 		rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_EGRESS]);
3514 		return -ENOMEM;
3515 	}
3516 
3517 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
3518 		    (u64)mcast->mcast_buf->iova);
3519 
3520 	/* Alloc pkind for NIX internal RX multicast/mirror replay */
3521 	mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc);
3522 
3523 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG,
3524 		    BIT_ULL(63) | (mcast->replay_pkind << 24) |
3525 		    BIT_ULL(20) | MC_BUF_CNT);
3526 
3527 	mutex_init(&mcast->mce_lock);
3528 
3529 	nix_setup_mcast_grp(nix_hw);
3530 
3531 	return nix_setup_mce_tables(rvu, nix_hw);
3532 }
3533 
3534 static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw)
3535 {
3536 	struct nix_txvlan *vlan = &nix_hw->txvlan;
3537 	int err;
3538 
3539 	/* Allocate resource bimap for tx vtag def registers*/
3540 	vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX;
3541 	err = rvu_alloc_bitmap(&vlan->rsrc);
3542 	if (err)
3543 		return -ENOMEM;
3544 
3545 	/* Alloc memory for saving entry to RVU PFFUNC allocation mapping */
3546 	vlan->entry2pfvf_map = devm_kcalloc(rvu->dev, vlan->rsrc.max,
3547 					    sizeof(u16), GFP_KERNEL);
3548 	if (!vlan->entry2pfvf_map)
3549 		goto free_mem;
3550 
3551 	mutex_init(&vlan->rsrc_lock);
3552 	return 0;
3553 
3554 free_mem:
3555 	kfree(vlan->rsrc.bmap);
3556 	return -ENOMEM;
3557 }
3558 
3559 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
3560 {
3561 	struct nix_txsch *txsch;
3562 	int err, lvl, schq;
3563 	u64 cfg, reg;
3564 
3565 	/* Get scheduler queue count of each type and alloc
3566 	 * bitmap for each for alloc/free/attach operations.
3567 	 */
3568 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
3569 		txsch = &nix_hw->txsch[lvl];
3570 		txsch->lvl = lvl;
3571 		switch (lvl) {
3572 		case NIX_TXSCH_LVL_SMQ:
3573 			reg = NIX_AF_MDQ_CONST;
3574 			break;
3575 		case NIX_TXSCH_LVL_TL4:
3576 			reg = NIX_AF_TL4_CONST;
3577 			break;
3578 		case NIX_TXSCH_LVL_TL3:
3579 			reg = NIX_AF_TL3_CONST;
3580 			break;
3581 		case NIX_TXSCH_LVL_TL2:
3582 			reg = NIX_AF_TL2_CONST;
3583 			break;
3584 		case NIX_TXSCH_LVL_TL1:
3585 			reg = NIX_AF_TL1_CONST;
3586 			break;
3587 		}
3588 		cfg = rvu_read64(rvu, blkaddr, reg);
3589 		txsch->schq.max = cfg & 0xFFFF;
3590 		err = rvu_alloc_bitmap(&txsch->schq);
3591 		if (err)
3592 			return err;
3593 
3594 		/* Allocate memory for scheduler queues to
3595 		 * PF/VF pcifunc mapping info.
3596 		 */
3597 		txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
3598 					       sizeof(u32), GFP_KERNEL);
3599 		if (!txsch->pfvf_map)
3600 			return -ENOMEM;
3601 		for (schq = 0; schq < txsch->schq.max; schq++)
3602 			txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
3603 	}
3604 
3605 	/* Setup a default value of 8192 as DWRR MTU */
3606 	if (rvu->hw->cap.nix_common_dwrr_mtu ||
3607 	    rvu->hw->cap.nix_multiple_dwrr_mtu) {
3608 		rvu_write64(rvu, blkaddr,
3609 			    nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM),
3610 			    convert_bytes_to_dwrr_mtu(8192));
3611 		rvu_write64(rvu, blkaddr,
3612 			    nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_LBK),
3613 			    convert_bytes_to_dwrr_mtu(8192));
3614 		rvu_write64(rvu, blkaddr,
3615 			    nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_SDP),
3616 			    convert_bytes_to_dwrr_mtu(8192));
3617 	}
3618 
3619 	return 0;
3620 }
3621 
3622 int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
3623 				int blkaddr, u32 cfg)
3624 {
3625 	int fmt_idx;
3626 
3627 	for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) {
3628 		if (nix_hw->mark_format.cfg[fmt_idx] == cfg)
3629 			return fmt_idx;
3630 	}
3631 	if (fmt_idx >= nix_hw->mark_format.total)
3632 		return -ERANGE;
3633 
3634 	rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg);
3635 	nix_hw->mark_format.cfg[fmt_idx] = cfg;
3636 	nix_hw->mark_format.in_use++;
3637 	return fmt_idx;
3638 }
3639 
3640 static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw,
3641 				    int blkaddr)
3642 {
3643 	u64 cfgs[] = {
3644 		[NIX_MARK_CFG_IP_DSCP_RED]         = 0x10003,
3645 		[NIX_MARK_CFG_IP_DSCP_YELLOW]      = 0x11200,
3646 		[NIX_MARK_CFG_IP_DSCP_YELLOW_RED]  = 0x11203,
3647 		[NIX_MARK_CFG_IP_ECN_RED]          = 0x6000c,
3648 		[NIX_MARK_CFG_IP_ECN_YELLOW]       = 0x60c00,
3649 		[NIX_MARK_CFG_IP_ECN_YELLOW_RED]   = 0x60c0c,
3650 		[NIX_MARK_CFG_VLAN_DEI_RED]        = 0x30008,
3651 		[NIX_MARK_CFG_VLAN_DEI_YELLOW]     = 0x30800,
3652 		[NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808,
3653 	};
3654 	int i, rc;
3655 	u64 total;
3656 
3657 	total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8;
3658 	nix_hw->mark_format.total = (u8)total;
3659 	nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32),
3660 					       GFP_KERNEL);
3661 	if (!nix_hw->mark_format.cfg)
3662 		return -ENOMEM;
3663 	for (i = 0; i < NIX_MARK_CFG_MAX; i++) {
3664 		rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]);
3665 		if (rc < 0)
3666 			dev_err(rvu->dev, "Err %d in setup mark format %d\n",
3667 				i, rc);
3668 	}
3669 
3670 	return 0;
3671 }
3672 
3673 static void rvu_get_lbk_link_max_frs(struct rvu *rvu,  u16 *max_mtu)
3674 {
3675 	/* CN10K supports LBK FIFO size 72 KB */
3676 	if (rvu->hw->lbk_bufsize == 0x12000)
3677 		*max_mtu = CN10K_LBK_LINK_MAX_FRS;
3678 	else
3679 		*max_mtu = NIC_HW_MAX_FRS;
3680 }
3681 
3682 static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu)
3683 {
3684 	int fifo_size = rvu_cgx_get_fifolen(rvu);
3685 
3686 	/* RPM supports FIFO len 128 KB and RPM2 supports double the
3687 	 * FIFO len to accommodate 8 LMACS
3688 	 */
3689 	if (fifo_size == 0x20000 || fifo_size == 0x40000)
3690 		*max_mtu = CN10K_LMAC_LINK_MAX_FRS;
3691 	else
3692 		*max_mtu = NIC_HW_MAX_FRS;
3693 }
3694 
3695 int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req,
3696 				     struct nix_hw_info *rsp)
3697 {
3698 	u16 pcifunc = req->hdr.pcifunc;
3699 	u64 dwrr_mtu;
3700 	int blkaddr;
3701 
3702 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3703 	if (blkaddr < 0)
3704 		return NIX_AF_ERR_AF_LF_INVALID;
3705 
3706 	if (is_afvf(pcifunc))
3707 		rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu);
3708 	else
3709 		rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu);
3710 
3711 	rsp->min_mtu = NIC_HW_MIN_FRS;
3712 
3713 	if (!rvu->hw->cap.nix_common_dwrr_mtu &&
3714 	    !rvu->hw->cap.nix_multiple_dwrr_mtu) {
3715 		/* Return '1' on OTx2 */
3716 		rsp->rpm_dwrr_mtu = 1;
3717 		rsp->sdp_dwrr_mtu = 1;
3718 		rsp->lbk_dwrr_mtu = 1;
3719 		return 0;
3720 	}
3721 
3722 	/* Return DWRR_MTU for TLx_SCHEDULE[RR_WEIGHT] config */
3723 	dwrr_mtu = rvu_read64(rvu, blkaddr,
3724 			      nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM));
3725 	rsp->rpm_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
3726 
3727 	dwrr_mtu = rvu_read64(rvu, blkaddr,
3728 			      nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_SDP));
3729 	rsp->sdp_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
3730 
3731 	dwrr_mtu = rvu_read64(rvu, blkaddr,
3732 			      nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_LBK));
3733 	rsp->lbk_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
3734 
3735 	return 0;
3736 }
3737 
3738 int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
3739 				   struct msg_rsp *rsp)
3740 {
3741 	u16 pcifunc = req->hdr.pcifunc;
3742 	int i, nixlf, blkaddr, err;
3743 	u64 stats;
3744 
3745 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
3746 	if (err)
3747 		return err;
3748 
3749 	/* Get stats count supported by HW */
3750 	stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
3751 
3752 	/* Reset tx stats */
3753 	for (i = 0; i < ((stats >> 24) & 0xFF); i++)
3754 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0);
3755 
3756 	/* Reset rx stats */
3757 	for (i = 0; i < ((stats >> 32) & 0xFF); i++)
3758 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0);
3759 
3760 	return 0;
3761 }
3762 
3763 /* Returns the ALG index to be set into NPC_RX_ACTION */
3764 static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg)
3765 {
3766 	int i;
3767 
3768 	/* Scan over exiting algo entries to find a match */
3769 	for (i = 0; i < nix_hw->flowkey.in_use; i++)
3770 		if (nix_hw->flowkey.flowkey[i] == flow_cfg)
3771 			return i;
3772 
3773 	return -ERANGE;
3774 }
3775 
3776 static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
3777 {
3778 	int idx, nr_field, key_off, field_marker, keyoff_marker;
3779 	int max_key_off, max_bit_pos, group_member;
3780 	struct nix_rx_flowkey_alg *field;
3781 	struct nix_rx_flowkey_alg tmp;
3782 	u32 key_type, valid_key;
3783 	u32 l3_l4_src_dst;
3784 	int l4_key_offset = 0;
3785 
3786 	if (!alg)
3787 		return -EINVAL;
3788 
3789 #define FIELDS_PER_ALG  5
3790 #define MAX_KEY_OFF	40
3791 	/* Clear all fields */
3792 	memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG);
3793 
3794 	/* Each of the 32 possible flow key algorithm definitions should
3795 	 * fall into above incremental config (except ALG0). Otherwise a
3796 	 * single NPC MCAM entry is not sufficient for supporting RSS.
3797 	 *
3798 	 * If a different definition or combination needed then NPC MCAM
3799 	 * has to be programmed to filter such pkts and it's action should
3800 	 * point to this definition to calculate flowtag or hash.
3801 	 *
3802 	 * The `for loop` goes over _all_ protocol field and the following
3803 	 * variables depicts the state machine forward progress logic.
3804 	 *
3805 	 * keyoff_marker - Enabled when hash byte length needs to be accounted
3806 	 * in field->key_offset update.
3807 	 * field_marker - Enabled when a new field needs to be selected.
3808 	 * group_member - Enabled when protocol is part of a group.
3809 	 */
3810 
3811 	/* Last 4 bits (31:28) are reserved to specify SRC, DST
3812 	 * selection for L3, L4 i.e IPV[4,6]_SRC, IPV[4,6]_DST,
3813 	 * [TCP,UDP,SCTP]_SRC, [TCP,UDP,SCTP]_DST
3814 	 * 31 => L3_SRC, 30 => L3_DST, 29 => L4_SRC, 28 => L4_DST
3815 	 */
3816 	l3_l4_src_dst = flow_cfg;
3817 	/* Reset these 4 bits, so that these won't be part of key */
3818 	flow_cfg &= NIX_FLOW_KEY_TYPE_L3_L4_MASK;
3819 
3820 	keyoff_marker = 0; max_key_off = 0; group_member = 0;
3821 	nr_field = 0; key_off = 0; field_marker = 1;
3822 	field = &tmp; max_bit_pos = fls(flow_cfg);
3823 	for (idx = 0;
3824 	     idx < max_bit_pos && nr_field < FIELDS_PER_ALG &&
3825 	     key_off < MAX_KEY_OFF; idx++) {
3826 		key_type = BIT(idx);
3827 		valid_key = flow_cfg & key_type;
3828 		/* Found a field marker, reset the field values */
3829 		if (field_marker)
3830 			memset(&tmp, 0, sizeof(tmp));
3831 
3832 		field_marker = true;
3833 		keyoff_marker = true;
3834 		switch (key_type) {
3835 		case NIX_FLOW_KEY_TYPE_PORT:
3836 			field->sel_chan = true;
3837 			/* This should be set to 1, when SEL_CHAN is set */
3838 			field->bytesm1 = 1;
3839 			break;
3840 		case NIX_FLOW_KEY_TYPE_IPV4_PROTO:
3841 			field->lid = NPC_LID_LC;
3842 			field->hdr_offset = 9; /* offset */
3843 			field->bytesm1 = 0; /* 1 byte */
3844 			field->ltype_match = NPC_LT_LC_IP;
3845 			field->ltype_mask = 0xF;
3846 			break;
3847 		case NIX_FLOW_KEY_TYPE_IPV4:
3848 		case NIX_FLOW_KEY_TYPE_INNR_IPV4:
3849 			field->lid = NPC_LID_LC;
3850 			field->ltype_match = NPC_LT_LC_IP;
3851 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) {
3852 				field->lid = NPC_LID_LG;
3853 				field->ltype_match = NPC_LT_LG_TU_IP;
3854 			}
3855 			field->hdr_offset = 12; /* SIP offset */
3856 			field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
3857 
3858 			/* Only SIP */
3859 			if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY)
3860 				field->bytesm1 = 3; /* SIP, 4 bytes */
3861 
3862 			if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) {
3863 				/* Both SIP + DIP */
3864 				if (field->bytesm1 == 3) {
3865 					field->bytesm1 = 7; /* SIP + DIP, 8B */
3866 				} else {
3867 					/* Only DIP */
3868 					field->hdr_offset = 16; /* DIP off */
3869 					field->bytesm1 = 3; /* DIP, 4 bytes */
3870 				}
3871 			}
3872 
3873 			field->ltype_mask = 0xF; /* Match only IPv4 */
3874 			keyoff_marker = false;
3875 			break;
3876 		case NIX_FLOW_KEY_TYPE_IPV6:
3877 		case NIX_FLOW_KEY_TYPE_INNR_IPV6:
3878 			field->lid = NPC_LID_LC;
3879 			field->ltype_match = NPC_LT_LC_IP6;
3880 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) {
3881 				field->lid = NPC_LID_LG;
3882 				field->ltype_match = NPC_LT_LG_TU_IP6;
3883 			}
3884 			field->hdr_offset = 8; /* SIP offset */
3885 			field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
3886 
3887 			/* Only SIP */
3888 			if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY)
3889 				field->bytesm1 = 15; /* SIP, 16 bytes */
3890 
3891 			if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) {
3892 				/* Both SIP + DIP */
3893 				if (field->bytesm1 == 15) {
3894 					/* SIP + DIP, 32 bytes */
3895 					field->bytesm1 = 31;
3896 				} else {
3897 					/* Only DIP */
3898 					field->hdr_offset = 24; /* DIP off */
3899 					field->bytesm1 = 15; /* DIP,16 bytes */
3900 				}
3901 			}
3902 			field->ltype_mask = 0xF; /* Match only IPv6 */
3903 			break;
3904 		case NIX_FLOW_KEY_TYPE_TCP:
3905 		case NIX_FLOW_KEY_TYPE_UDP:
3906 		case NIX_FLOW_KEY_TYPE_SCTP:
3907 		case NIX_FLOW_KEY_TYPE_INNR_TCP:
3908 		case NIX_FLOW_KEY_TYPE_INNR_UDP:
3909 		case NIX_FLOW_KEY_TYPE_INNR_SCTP:
3910 			field->lid = NPC_LID_LD;
3911 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP ||
3912 			    key_type == NIX_FLOW_KEY_TYPE_INNR_UDP ||
3913 			    key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP)
3914 				field->lid = NPC_LID_LH;
3915 			field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
3916 
3917 			if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_SRC_ONLY)
3918 				field->bytesm1 = 1; /* SRC, 2 bytes */
3919 
3920 			if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_DST_ONLY) {
3921 				/* Both SRC + DST */
3922 				if (field->bytesm1 == 1) {
3923 					/* SRC + DST, 4 bytes */
3924 					field->bytesm1 = 3;
3925 				} else {
3926 					/* Only DIP */
3927 					field->hdr_offset = 2; /* DST off */
3928 					field->bytesm1 = 1; /* DST, 2 bytes */
3929 				}
3930 			}
3931 
3932 			/* Enum values for NPC_LID_LD and NPC_LID_LG are same,
3933 			 * so no need to change the ltype_match, just change
3934 			 * the lid for inner protocols
3935 			 */
3936 			BUILD_BUG_ON((int)NPC_LT_LD_TCP !=
3937 				     (int)NPC_LT_LH_TU_TCP);
3938 			BUILD_BUG_ON((int)NPC_LT_LD_UDP !=
3939 				     (int)NPC_LT_LH_TU_UDP);
3940 			BUILD_BUG_ON((int)NPC_LT_LD_SCTP !=
3941 				     (int)NPC_LT_LH_TU_SCTP);
3942 
3943 			if ((key_type == NIX_FLOW_KEY_TYPE_TCP ||
3944 			     key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) &&
3945 			    valid_key) {
3946 				field->ltype_match |= NPC_LT_LD_TCP;
3947 				group_member = true;
3948 			} else if ((key_type == NIX_FLOW_KEY_TYPE_UDP ||
3949 				    key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) &&
3950 				   valid_key) {
3951 				field->ltype_match |= NPC_LT_LD_UDP;
3952 				group_member = true;
3953 			} else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP ||
3954 				    key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) &&
3955 				   valid_key) {
3956 				field->ltype_match |= NPC_LT_LD_SCTP;
3957 				group_member = true;
3958 			}
3959 			field->ltype_mask = ~field->ltype_match;
3960 			if (key_type == NIX_FLOW_KEY_TYPE_SCTP ||
3961 			    key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) {
3962 				/* Handle the case where any of the group item
3963 				 * is enabled in the group but not the final one
3964 				 */
3965 				if (group_member) {
3966 					valid_key = true;
3967 					group_member = false;
3968 				}
3969 			} else {
3970 				field_marker = false;
3971 				keyoff_marker = false;
3972 			}
3973 
3974 			/* TCP/UDP/SCTP and ESP/AH falls at same offset so
3975 			 * remember the TCP key offset of 40 byte hash key.
3976 			 */
3977 			if (key_type == NIX_FLOW_KEY_TYPE_TCP)
3978 				l4_key_offset = key_off;
3979 			break;
3980 		case NIX_FLOW_KEY_TYPE_NVGRE:
3981 			field->lid = NPC_LID_LD;
3982 			field->hdr_offset = 4; /* VSID offset */
3983 			field->bytesm1 = 2;
3984 			field->ltype_match = NPC_LT_LD_NVGRE;
3985 			field->ltype_mask = 0xF;
3986 			break;
3987 		case NIX_FLOW_KEY_TYPE_VXLAN:
3988 		case NIX_FLOW_KEY_TYPE_GENEVE:
3989 			field->lid = NPC_LID_LE;
3990 			field->bytesm1 = 2;
3991 			field->hdr_offset = 4;
3992 			field->ltype_mask = 0xF;
3993 			field_marker = false;
3994 			keyoff_marker = false;
3995 
3996 			if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) {
3997 				field->ltype_match |= NPC_LT_LE_VXLAN;
3998 				group_member = true;
3999 			}
4000 
4001 			if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) {
4002 				field->ltype_match |= NPC_LT_LE_GENEVE;
4003 				group_member = true;
4004 			}
4005 
4006 			if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) {
4007 				if (group_member) {
4008 					field->ltype_mask = ~field->ltype_match;
4009 					field_marker = true;
4010 					keyoff_marker = true;
4011 					valid_key = true;
4012 					group_member = false;
4013 				}
4014 			}
4015 			break;
4016 		case NIX_FLOW_KEY_TYPE_ETH_DMAC:
4017 		case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC:
4018 			field->lid = NPC_LID_LA;
4019 			field->ltype_match = NPC_LT_LA_ETHER;
4020 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) {
4021 				field->lid = NPC_LID_LF;
4022 				field->ltype_match = NPC_LT_LF_TU_ETHER;
4023 			}
4024 			field->hdr_offset = 0;
4025 			field->bytesm1 = 5; /* DMAC 6 Byte */
4026 			field->ltype_mask = 0xF;
4027 			break;
4028 		case NIX_FLOW_KEY_TYPE_IPV6_EXT:
4029 			field->lid = NPC_LID_LC;
4030 			field->hdr_offset = 40; /* IPV6 hdr */
4031 			field->bytesm1 = 0; /* 1 Byte ext hdr*/
4032 			field->ltype_match = NPC_LT_LC_IP6_EXT;
4033 			field->ltype_mask = 0xF;
4034 			break;
4035 		case NIX_FLOW_KEY_TYPE_GTPU:
4036 			field->lid = NPC_LID_LE;
4037 			field->hdr_offset = 4;
4038 			field->bytesm1 = 3; /* 4 bytes TID*/
4039 			field->ltype_match = NPC_LT_LE_GTPU;
4040 			field->ltype_mask = 0xF;
4041 			break;
4042 		case NIX_FLOW_KEY_TYPE_VLAN:
4043 			field->lid = NPC_LID_LB;
4044 			field->hdr_offset = 2; /* Skip TPID (2-bytes) */
4045 			field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */
4046 			field->ltype_match = NPC_LT_LB_CTAG;
4047 			field->ltype_mask = 0xF;
4048 			field->fn_mask = 1; /* Mask out the first nibble */
4049 			break;
4050 		case NIX_FLOW_KEY_TYPE_AH:
4051 		case NIX_FLOW_KEY_TYPE_ESP:
4052 			field->hdr_offset = 0;
4053 			field->bytesm1 = 7; /* SPI + sequence number */
4054 			field->ltype_mask = 0xF;
4055 			field->lid = NPC_LID_LE;
4056 			field->ltype_match = NPC_LT_LE_ESP;
4057 			if (key_type == NIX_FLOW_KEY_TYPE_AH) {
4058 				field->lid = NPC_LID_LD;
4059 				field->ltype_match = NPC_LT_LD_AH;
4060 				field->hdr_offset = 4;
4061 				keyoff_marker = false;
4062 			}
4063 			break;
4064 		}
4065 		field->ena = 1;
4066 
4067 		/* Found a valid flow key type */
4068 		if (valid_key) {
4069 			/* Use the key offset of TCP/UDP/SCTP fields
4070 			 * for ESP/AH fields.
4071 			 */
4072 			if (key_type == NIX_FLOW_KEY_TYPE_ESP ||
4073 			    key_type == NIX_FLOW_KEY_TYPE_AH)
4074 				key_off = l4_key_offset;
4075 			field->key_offset = key_off;
4076 			memcpy(&alg[nr_field], field, sizeof(*field));
4077 			max_key_off = max(max_key_off, field->bytesm1 + 1);
4078 
4079 			/* Found a field marker, get the next field */
4080 			if (field_marker)
4081 				nr_field++;
4082 		}
4083 
4084 		/* Found a keyoff marker, update the new key_off */
4085 		if (keyoff_marker) {
4086 			key_off += max_key_off;
4087 			max_key_off = 0;
4088 		}
4089 	}
4090 	/* Processed all the flow key types */
4091 	if (idx == max_bit_pos && key_off <= MAX_KEY_OFF)
4092 		return 0;
4093 	else
4094 		return NIX_AF_ERR_RSS_NOSPC_FIELD;
4095 }
4096 
4097 static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg)
4098 {
4099 	u64 field[FIELDS_PER_ALG];
4100 	struct nix_hw *hw;
4101 	int fid, rc;
4102 
4103 	hw = get_nix_hw(rvu->hw, blkaddr);
4104 	if (!hw)
4105 		return NIX_AF_ERR_INVALID_NIXBLK;
4106 
4107 	/* No room to add new flow hash algoritham */
4108 	if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX)
4109 		return NIX_AF_ERR_RSS_NOSPC_ALGO;
4110 
4111 	/* Generate algo fields for the given flow_cfg */
4112 	rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg);
4113 	if (rc)
4114 		return rc;
4115 
4116 	/* Update ALGX_FIELDX register with generated fields */
4117 	for (fid = 0; fid < FIELDS_PER_ALG; fid++)
4118 		rvu_write64(rvu, blkaddr,
4119 			    NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use,
4120 							   fid), field[fid]);
4121 
4122 	/* Store the flow_cfg for futher lookup */
4123 	rc = hw->flowkey.in_use;
4124 	hw->flowkey.flowkey[rc] = flow_cfg;
4125 	hw->flowkey.in_use++;
4126 
4127 	return rc;
4128 }
4129 
4130 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
4131 					 struct nix_rss_flowkey_cfg *req,
4132 					 struct nix_rss_flowkey_cfg_rsp *rsp)
4133 {
4134 	u16 pcifunc = req->hdr.pcifunc;
4135 	int alg_idx, nixlf, blkaddr;
4136 	struct nix_hw *nix_hw;
4137 	int err;
4138 
4139 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
4140 	if (err)
4141 		return err;
4142 
4143 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
4144 	if (!nix_hw)
4145 		return NIX_AF_ERR_INVALID_NIXBLK;
4146 
4147 	alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg);
4148 	/* Failed to get algo index from the exiting list, reserve new  */
4149 	if (alg_idx < 0) {
4150 		alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr,
4151 						  req->flowkey_cfg);
4152 		if (alg_idx < 0)
4153 			return alg_idx;
4154 	}
4155 	rsp->alg_idx = alg_idx;
4156 	rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
4157 				       alg_idx, req->mcam_index);
4158 	return 0;
4159 }
4160 
4161 static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
4162 {
4163 	u32 flowkey_cfg, minkey_cfg;
4164 	int alg, fid, rc;
4165 
4166 	/* Disable all flow key algx fieldx */
4167 	for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) {
4168 		for (fid = 0; fid < FIELDS_PER_ALG; fid++)
4169 			rvu_write64(rvu, blkaddr,
4170 				    NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
4171 				    0);
4172 	}
4173 
4174 	/* IPv4/IPv6 SIP/DIPs */
4175 	flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
4176 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
4177 	if (rc < 0)
4178 		return rc;
4179 
4180 	/* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
4181 	minkey_cfg = flowkey_cfg;
4182 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP;
4183 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
4184 	if (rc < 0)
4185 		return rc;
4186 
4187 	/* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
4188 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP;
4189 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
4190 	if (rc < 0)
4191 		return rc;
4192 
4193 	/* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
4194 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP;
4195 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
4196 	if (rc < 0)
4197 		return rc;
4198 
4199 	/* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
4200 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
4201 			NIX_FLOW_KEY_TYPE_UDP;
4202 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
4203 	if (rc < 0)
4204 		return rc;
4205 
4206 	/* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
4207 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
4208 			NIX_FLOW_KEY_TYPE_SCTP;
4209 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
4210 	if (rc < 0)
4211 		return rc;
4212 
4213 	/* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
4214 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP |
4215 			NIX_FLOW_KEY_TYPE_SCTP;
4216 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
4217 	if (rc < 0)
4218 		return rc;
4219 
4220 	/* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
4221 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
4222 		      NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP;
4223 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
4224 	if (rc < 0)
4225 		return rc;
4226 
4227 	return 0;
4228 }
4229 
4230 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
4231 				      struct nix_set_mac_addr *req,
4232 				      struct msg_rsp *rsp)
4233 {
4234 	bool from_vf = req->hdr.pcifunc & RVU_PFVF_FUNC_MASK;
4235 	u16 pcifunc = req->hdr.pcifunc;
4236 	int blkaddr, nixlf, err;
4237 	struct rvu_pfvf *pfvf;
4238 
4239 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
4240 	if (err)
4241 		return err;
4242 
4243 	pfvf = rvu_get_pfvf(rvu, pcifunc);
4244 
4245 	/* untrusted VF can't overwrite admin(PF) changes */
4246 	if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
4247 	    (from_vf && test_bit(PF_SET_VF_MAC, &pfvf->flags))) {
4248 		dev_warn(rvu->dev,
4249 			 "MAC address set by admin(PF) cannot be overwritten by untrusted VF");
4250 		return -EPERM;
4251 	}
4252 
4253 	ether_addr_copy(pfvf->mac_addr, req->mac_addr);
4254 
4255 	rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
4256 				    pfvf->rx_chan_base, req->mac_addr);
4257 
4258 	if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf)
4259 		ether_addr_copy(pfvf->default_mac, req->mac_addr);
4260 
4261 	rvu_switch_update_rules(rvu, pcifunc);
4262 
4263 	return 0;
4264 }
4265 
4266 int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu,
4267 				      struct msg_req *req,
4268 				      struct nix_get_mac_addr_rsp *rsp)
4269 {
4270 	u16 pcifunc = req->hdr.pcifunc;
4271 	struct rvu_pfvf *pfvf;
4272 
4273 	if (!is_nixlf_attached(rvu, pcifunc))
4274 		return NIX_AF_ERR_AF_LF_INVALID;
4275 
4276 	pfvf = rvu_get_pfvf(rvu, pcifunc);
4277 
4278 	ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
4279 
4280 	return 0;
4281 }
4282 
4283 int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
4284 				     struct msg_rsp *rsp)
4285 {
4286 	bool allmulti, promisc, nix_rx_multicast;
4287 	u16 pcifunc = req->hdr.pcifunc;
4288 	struct rvu_pfvf *pfvf;
4289 	int nixlf, err;
4290 
4291 	pfvf = rvu_get_pfvf(rvu, pcifunc);
4292 	promisc = req->mode & NIX_RX_MODE_PROMISC ? true : false;
4293 	allmulti = req->mode & NIX_RX_MODE_ALLMULTI ? true : false;
4294 	pfvf->use_mce_list = req->mode & NIX_RX_MODE_USE_MCE ? true : false;
4295 
4296 	nix_rx_multicast = rvu->hw->cap.nix_rx_multicast & pfvf->use_mce_list;
4297 
4298 	if (is_vf(pcifunc) && !nix_rx_multicast &&
4299 	    (promisc || allmulti)) {
4300 		dev_warn_ratelimited(rvu->dev,
4301 				     "VF promisc/multicast not supported\n");
4302 		return 0;
4303 	}
4304 
4305 	/* untrusted VF can't configure promisc/allmulti */
4306 	if (is_vf(pcifunc) && !test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
4307 	    (promisc || allmulti))
4308 		return 0;
4309 
4310 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
4311 	if (err)
4312 		return err;
4313 
4314 	if (nix_rx_multicast) {
4315 		/* add/del this PF_FUNC to/from mcast pkt replication list */
4316 		err = nix_update_mce_rule(rvu, pcifunc, NIXLF_ALLMULTI_ENTRY,
4317 					  allmulti);
4318 		if (err) {
4319 			dev_err(rvu->dev,
4320 				"Failed to update pcifunc 0x%x to multicast list\n",
4321 				pcifunc);
4322 			return err;
4323 		}
4324 
4325 		/* add/del this PF_FUNC to/from promisc pkt replication list */
4326 		err = nix_update_mce_rule(rvu, pcifunc, NIXLF_PROMISC_ENTRY,
4327 					  promisc);
4328 		if (err) {
4329 			dev_err(rvu->dev,
4330 				"Failed to update pcifunc 0x%x to promisc list\n",
4331 				pcifunc);
4332 			return err;
4333 		}
4334 	}
4335 
4336 	/* install/uninstall allmulti entry */
4337 	if (allmulti) {
4338 		rvu_npc_install_allmulti_entry(rvu, pcifunc, nixlf,
4339 					       pfvf->rx_chan_base);
4340 	} else {
4341 		if (!nix_rx_multicast)
4342 			rvu_npc_enable_allmulti_entry(rvu, pcifunc, nixlf, false);
4343 	}
4344 
4345 	/* install/uninstall promisc entry */
4346 	if (promisc)
4347 		rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
4348 					      pfvf->rx_chan_base,
4349 					      pfvf->rx_chan_cnt);
4350 	else
4351 		if (!nix_rx_multicast)
4352 			rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false);
4353 
4354 	return 0;
4355 }
4356 
4357 static void nix_find_link_frs(struct rvu *rvu,
4358 			      struct nix_frs_cfg *req, u16 pcifunc)
4359 {
4360 	int pf = rvu_get_pf(pcifunc);
4361 	struct rvu_pfvf *pfvf;
4362 	int maxlen, minlen;
4363 	int numvfs, hwvf;
4364 	int vf;
4365 
4366 	/* Update with requester's min/max lengths */
4367 	pfvf = rvu_get_pfvf(rvu, pcifunc);
4368 	pfvf->maxlen = req->maxlen;
4369 	if (req->update_minlen)
4370 		pfvf->minlen = req->minlen;
4371 
4372 	maxlen = req->maxlen;
4373 	minlen = req->update_minlen ? req->minlen : 0;
4374 
4375 	/* Get this PF's numVFs and starting hwvf */
4376 	rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
4377 
4378 	/* For each VF, compare requested max/minlen */
4379 	for (vf = 0; vf < numvfs; vf++) {
4380 		pfvf =  &rvu->hwvf[hwvf + vf];
4381 		if (pfvf->maxlen > maxlen)
4382 			maxlen = pfvf->maxlen;
4383 		if (req->update_minlen &&
4384 		    pfvf->minlen && pfvf->minlen < minlen)
4385 			minlen = pfvf->minlen;
4386 	}
4387 
4388 	/* Compare requested max/minlen with PF's max/minlen */
4389 	pfvf = &rvu->pf[pf];
4390 	if (pfvf->maxlen > maxlen)
4391 		maxlen = pfvf->maxlen;
4392 	if (req->update_minlen &&
4393 	    pfvf->minlen && pfvf->minlen < minlen)
4394 		minlen = pfvf->minlen;
4395 
4396 	/* Update the request with max/min PF's and it's VF's max/min */
4397 	req->maxlen = maxlen;
4398 	if (req->update_minlen)
4399 		req->minlen = minlen;
4400 }
4401 
4402 int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
4403 				    struct msg_rsp *rsp)
4404 {
4405 	struct rvu_hwinfo *hw = rvu->hw;
4406 	u16 pcifunc = req->hdr.pcifunc;
4407 	int pf = rvu_get_pf(pcifunc);
4408 	int blkaddr, link = -1;
4409 	struct nix_hw *nix_hw;
4410 	struct rvu_pfvf *pfvf;
4411 	u8 cgx = 0, lmac = 0;
4412 	u16 max_mtu;
4413 	u64 cfg;
4414 
4415 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
4416 	if (blkaddr < 0)
4417 		return NIX_AF_ERR_AF_LF_INVALID;
4418 
4419 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
4420 	if (!nix_hw)
4421 		return NIX_AF_ERR_INVALID_NIXBLK;
4422 
4423 	if (is_afvf(pcifunc))
4424 		rvu_get_lbk_link_max_frs(rvu, &max_mtu);
4425 	else
4426 		rvu_get_lmac_link_max_frs(rvu, &max_mtu);
4427 
4428 	if (!req->sdp_link && req->maxlen > max_mtu)
4429 		return NIX_AF_ERR_FRS_INVALID;
4430 
4431 	if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
4432 		return NIX_AF_ERR_FRS_INVALID;
4433 
4434 	/* Check if config is for SDP link */
4435 	if (req->sdp_link) {
4436 		if (!hw->sdp_links)
4437 			return NIX_AF_ERR_RX_LINK_INVALID;
4438 		link = hw->cgx_links + hw->lbk_links;
4439 		goto linkcfg;
4440 	}
4441 
4442 	/* Check if the request is from CGX mapped RVU PF */
4443 	if (is_pf_cgxmapped(rvu, pf)) {
4444 		/* Get CGX and LMAC to which this PF is mapped and find link */
4445 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
4446 		link = (cgx * hw->lmac_per_cgx) + lmac;
4447 	} else if (pf == 0) {
4448 		/* For VFs of PF0 ingress is LBK port, so config LBK link */
4449 		pfvf = rvu_get_pfvf(rvu, pcifunc);
4450 		link = hw->cgx_links + pfvf->lbkid;
4451 	}
4452 
4453 	if (link < 0)
4454 		return NIX_AF_ERR_RX_LINK_INVALID;
4455 
4456 linkcfg:
4457 	nix_find_link_frs(rvu, req, pcifunc);
4458 
4459 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
4460 	cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
4461 	if (req->update_minlen)
4462 		cfg = (cfg & ~0xFFFFULL) | req->minlen;
4463 	rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg);
4464 
4465 	return 0;
4466 }
4467 
4468 int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
4469 				    struct msg_rsp *rsp)
4470 {
4471 	int nixlf, blkaddr, err;
4472 	u64 cfg;
4473 
4474 	err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr);
4475 	if (err)
4476 		return err;
4477 
4478 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf));
4479 	/* Set the interface configuration */
4480 	if (req->len_verify & BIT(0))
4481 		cfg |= BIT_ULL(41);
4482 	else
4483 		cfg &= ~BIT_ULL(41);
4484 
4485 	if (req->len_verify & BIT(1))
4486 		cfg |= BIT_ULL(40);
4487 	else
4488 		cfg &= ~BIT_ULL(40);
4489 
4490 	if (req->len_verify & NIX_RX_DROP_RE)
4491 		cfg |= BIT_ULL(32);
4492 	else
4493 		cfg &= ~BIT_ULL(32);
4494 
4495 	if (req->csum_verify & BIT(0))
4496 		cfg |= BIT_ULL(37);
4497 	else
4498 		cfg &= ~BIT_ULL(37);
4499 
4500 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg);
4501 
4502 	return 0;
4503 }
4504 
4505 static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs)
4506 {
4507 	return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */
4508 }
4509 
4510 static void nix_link_config(struct rvu *rvu, int blkaddr,
4511 			    struct nix_hw *nix_hw)
4512 {
4513 	struct rvu_hwinfo *hw = rvu->hw;
4514 	int cgx, lmac_cnt, slink, link;
4515 	u16 lbk_max_frs, lmac_max_frs;
4516 	unsigned long lmac_bmap;
4517 	u64 tx_credits, cfg;
4518 	u64 lmac_fifo_len;
4519 	int iter;
4520 
4521 	rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs);
4522 	rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs);
4523 
4524 	/* Set default min/max packet lengths allowed on NIX Rx links.
4525 	 *
4526 	 * With HW reset minlen value of 60byte, HW will treat ARP pkts
4527 	 * as undersize and report them to SW as error pkts, hence
4528 	 * setting it to 40 bytes.
4529 	 */
4530 	for (link = 0; link < hw->cgx_links; link++) {
4531 		rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
4532 				((u64)lmac_max_frs << 16) | NIC_HW_MIN_FRS);
4533 	}
4534 
4535 	for (link = hw->cgx_links; link < hw->lbk_links; link++) {
4536 		rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
4537 			    ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS);
4538 	}
4539 	if (hw->sdp_links) {
4540 		link = hw->cgx_links + hw->lbk_links;
4541 		rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
4542 			    SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
4543 	}
4544 
4545 	/* Get MCS external bypass status for CN10K-B */
4546 	if (mcs_get_blkcnt() == 1) {
4547 		/* Adjust for 2 credits when external bypass is disabled */
4548 		nix_hw->cc_mcs_cnt = is_mcs_bypass(0) ? 0 : 2;
4549 	}
4550 
4551 	/* Set credits for Tx links assuming max packet length allowed.
4552 	 * This will be reconfigured based on MTU set for PF/VF.
4553 	 */
4554 	for (cgx = 0; cgx < hw->cgx; cgx++) {
4555 		lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
4556 		/* Skip when cgx is not available or lmac cnt is zero */
4557 		if (lmac_cnt <= 0)
4558 			continue;
4559 		slink = cgx * hw->lmac_per_cgx;
4560 
4561 		/* Get LMAC id's from bitmap */
4562 		lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
4563 		for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
4564 			lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, iter);
4565 			if (!lmac_fifo_len) {
4566 				dev_err(rvu->dev,
4567 					"%s: Failed to get CGX/RPM%d:LMAC%d FIFO size\n",
4568 					__func__, cgx, iter);
4569 				continue;
4570 			}
4571 			tx_credits = (lmac_fifo_len - lmac_max_frs) / 16;
4572 			/* Enable credits and set credit pkt count to max allowed */
4573 			cfg =  (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
4574 			cfg |= FIELD_PREP(NIX_AF_LINKX_MCS_CNT_MASK, nix_hw->cc_mcs_cnt);
4575 
4576 			link = iter + slink;
4577 			nix_hw->tx_credits[link] = tx_credits;
4578 			rvu_write64(rvu, blkaddr,
4579 				    NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
4580 		}
4581 	}
4582 
4583 	/* Set Tx credits for LBK link */
4584 	slink = hw->cgx_links;
4585 	for (link = slink; link < (slink + hw->lbk_links); link++) {
4586 		tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs);
4587 		nix_hw->tx_credits[link] = tx_credits;
4588 		/* Enable credits and set credit pkt count to max allowed */
4589 		tx_credits =  (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
4590 		rvu_write64(rvu, blkaddr,
4591 			    NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits);
4592 	}
4593 }
4594 
4595 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
4596 {
4597 	int idx, err;
4598 	u64 status;
4599 
4600 	/* Start X2P bus calibration */
4601 	rvu_write64(rvu, blkaddr, NIX_AF_CFG,
4602 		    rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
4603 	/* Wait for calibration to complete */
4604 	err = rvu_poll_reg(rvu, blkaddr,
4605 			   NIX_AF_STATUS, BIT_ULL(10), false);
4606 	if (err) {
4607 		dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
4608 		return err;
4609 	}
4610 
4611 	status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
4612 	/* Check if CGX devices are ready */
4613 	for (idx = 0; idx < rvu->cgx_cnt_max; idx++) {
4614 		/* Skip when cgx port is not available */
4615 		if (!rvu_cgx_pdata(idx, rvu) ||
4616 		    (status & (BIT_ULL(16 + idx))))
4617 			continue;
4618 		dev_err(rvu->dev,
4619 			"CGX%d didn't respond to NIX X2P calibration\n", idx);
4620 		err = -EBUSY;
4621 	}
4622 
4623 	/* Check if LBK is ready */
4624 	if (!(status & BIT_ULL(19))) {
4625 		dev_err(rvu->dev,
4626 			"LBK didn't respond to NIX X2P calibration\n");
4627 		err = -EBUSY;
4628 	}
4629 
4630 	/* Clear 'calibrate_x2p' bit */
4631 	rvu_write64(rvu, blkaddr, NIX_AF_CFG,
4632 		    rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
4633 	if (err || (status & 0x3FFULL))
4634 		dev_err(rvu->dev,
4635 			"NIX X2P calibration failed, status 0x%llx\n", status);
4636 	if (err)
4637 		return err;
4638 	return 0;
4639 }
4640 
4641 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
4642 {
4643 	u64 cfg;
4644 	int err;
4645 
4646 	/* Set admin queue endianness */
4647 	cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
4648 #ifdef __BIG_ENDIAN
4649 	cfg |= BIT_ULL(8);
4650 	rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
4651 #else
4652 	cfg &= ~BIT_ULL(8);
4653 	rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
4654 #endif
4655 
4656 	/* Do not bypass NDC cache */
4657 	cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
4658 	cfg &= ~0x3FFEULL;
4659 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
4660 	/* Disable caching of SQB aka SQEs */
4661 	cfg |= 0x04ULL;
4662 #endif
4663 	rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
4664 
4665 	/* Result structure can be followed by RQ/SQ/CQ context at
4666 	 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
4667 	 * operation type. Alloc sufficient result memory for all operations.
4668 	 */
4669 	err = rvu_aq_alloc(rvu, &block->aq,
4670 			   Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
4671 			   ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
4672 	if (err)
4673 		return err;
4674 
4675 	rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
4676 	rvu_write64(rvu, block->addr,
4677 		    NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
4678 	return 0;
4679 }
4680 
4681 static void rvu_nix_setup_capabilities(struct rvu *rvu, int blkaddr)
4682 {
4683 	struct rvu_hwinfo *hw = rvu->hw;
4684 	u64 hw_const;
4685 
4686 	hw_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
4687 
4688 	/* On OcteonTx2 DWRR quantum is directly configured into each of
4689 	 * the transmit scheduler queues. And PF/VF drivers were free to
4690 	 * config any value upto 2^24.
4691 	 * On CN10K, HW is modified, the quantum configuration at scheduler
4692 	 * queues is in terms of weight. And SW needs to setup a base DWRR MTU
4693 	 * at NIX_AF_DWRR_RPM_MTU / NIX_AF_DWRR_SDP_MTU. HW will do
4694 	 * 'DWRR MTU * weight' to get the quantum.
4695 	 *
4696 	 * Check if HW uses a common MTU for all DWRR quantum configs.
4697 	 * On OcteonTx2 this register field is '0'.
4698 	 */
4699 	if ((((hw_const >> 56) & 0x10) == 0x10) && !(hw_const & BIT_ULL(61)))
4700 		hw->cap.nix_common_dwrr_mtu = true;
4701 
4702 	if (hw_const & BIT_ULL(61))
4703 		hw->cap.nix_multiple_dwrr_mtu = true;
4704 }
4705 
4706 static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
4707 {
4708 	const struct npc_lt_def_cfg *ltdefs;
4709 	struct rvu_hwinfo *hw = rvu->hw;
4710 	int blkaddr = nix_hw->blkaddr;
4711 	struct rvu_block *block;
4712 	int err;
4713 	u64 cfg;
4714 
4715 	block = &hw->block[blkaddr];
4716 
4717 	if (is_rvu_96xx_B0(rvu)) {
4718 		/* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt
4719 		 * internal state when conditional clocks are turned off.
4720 		 * Hence enable them.
4721 		 */
4722 		rvu_write64(rvu, blkaddr, NIX_AF_CFG,
4723 			    rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
4724 
4725 		/* Set chan/link to backpressure TL3 instead of TL2 */
4726 		rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
4727 
4728 		/* Disable SQ manager's sticky mode operation (set TM6 = 0)
4729 		 * This sticky mode is known to cause SQ stalls when multiple
4730 		 * SQs are mapped to same SMQ and transmitting pkts at a time.
4731 		 */
4732 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
4733 		cfg &= ~BIT_ULL(15);
4734 		rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
4735 	}
4736 
4737 	ltdefs = rvu->kpu.lt_def;
4738 	/* Calibrate X2P bus to check if CGX/LBK links are fine */
4739 	err = nix_calibrate_x2p(rvu, blkaddr);
4740 	if (err)
4741 		return err;
4742 
4743 	/* Setup capabilities of the NIX block */
4744 	rvu_nix_setup_capabilities(rvu, blkaddr);
4745 
4746 	/* Initialize admin queue */
4747 	err = nix_aq_init(rvu, block);
4748 	if (err)
4749 		return err;
4750 
4751 	/* Restore CINT timer delay to HW reset values */
4752 	rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
4753 
4754 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SEB_CFG);
4755 
4756 	/* For better performance use NDC TX instead of NDC RX for SQ's SQEs" */
4757 	cfg |= 1ULL;
4758 	if (!is_rvu_otx2(rvu))
4759 		cfg |= NIX_PTP_1STEP_EN;
4760 
4761 	rvu_write64(rvu, blkaddr, NIX_AF_SEB_CFG, cfg);
4762 
4763 	if (!is_rvu_otx2(rvu))
4764 		rvu_nix_block_cn10k_init(rvu, nix_hw);
4765 
4766 	if (is_block_implemented(hw, blkaddr)) {
4767 		err = nix_setup_txschq(rvu, nix_hw, blkaddr);
4768 		if (err)
4769 			return err;
4770 
4771 		err = nix_setup_ipolicers(rvu, nix_hw, blkaddr);
4772 		if (err)
4773 			return err;
4774 
4775 		err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr);
4776 		if (err)
4777 			return err;
4778 
4779 		err = nix_setup_mcast(rvu, nix_hw, blkaddr);
4780 		if (err)
4781 			return err;
4782 
4783 		err = nix_setup_txvlan(rvu, nix_hw);
4784 		if (err)
4785 			return err;
4786 
4787 		/* Configure segmentation offload formats */
4788 		nix_setup_lso(rvu, nix_hw, blkaddr);
4789 
4790 		/* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info.
4791 		 * This helps HW protocol checker to identify headers
4792 		 * and validate length and checksums.
4793 		 */
4794 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
4795 			    (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) |
4796 			    ltdefs->rx_ol2.ltype_mask);
4797 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
4798 			    (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) |
4799 			    ltdefs->rx_oip4.ltype_mask);
4800 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4,
4801 			    (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) |
4802 			    ltdefs->rx_iip4.ltype_mask);
4803 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6,
4804 			    (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) |
4805 			    ltdefs->rx_oip6.ltype_mask);
4806 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6,
4807 			    (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) |
4808 			    ltdefs->rx_iip6.ltype_mask);
4809 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
4810 			    (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) |
4811 			    ltdefs->rx_otcp.ltype_mask);
4812 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP,
4813 			    (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) |
4814 			    ltdefs->rx_itcp.ltype_mask);
4815 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
4816 			    (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) |
4817 			    ltdefs->rx_oudp.ltype_mask);
4818 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP,
4819 			    (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) |
4820 			    ltdefs->rx_iudp.ltype_mask);
4821 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP,
4822 			    (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) |
4823 			    ltdefs->rx_osctp.ltype_mask);
4824 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP,
4825 			    (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) |
4826 			    ltdefs->rx_isctp.ltype_mask);
4827 
4828 		if (!is_rvu_otx2(rvu)) {
4829 			/* Enable APAD calculation for other protocols
4830 			 * matching APAD0 and APAD1 lt def registers.
4831 			 */
4832 			rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD0,
4833 				    (ltdefs->rx_apad0.valid << 11) |
4834 				    (ltdefs->rx_apad0.lid << 8) |
4835 				    (ltdefs->rx_apad0.ltype_match << 4) |
4836 				    ltdefs->rx_apad0.ltype_mask);
4837 			rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD1,
4838 				    (ltdefs->rx_apad1.valid << 11) |
4839 				    (ltdefs->rx_apad1.lid << 8) |
4840 				    (ltdefs->rx_apad1.ltype_match << 4) |
4841 				    ltdefs->rx_apad1.ltype_mask);
4842 
4843 			/* Receive ethertype defination register defines layer
4844 			 * information in NPC_RESULT_S to identify the Ethertype
4845 			 * location in L2 header. Used for Ethertype overwriting
4846 			 * in inline IPsec flow.
4847 			 */
4848 			rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(0),
4849 				    (ltdefs->rx_et[0].offset << 12) |
4850 				    (ltdefs->rx_et[0].valid << 11) |
4851 				    (ltdefs->rx_et[0].lid << 8) |
4852 				    (ltdefs->rx_et[0].ltype_match << 4) |
4853 				    ltdefs->rx_et[0].ltype_mask);
4854 			rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(1),
4855 				    (ltdefs->rx_et[1].offset << 12) |
4856 				    (ltdefs->rx_et[1].valid << 11) |
4857 				    (ltdefs->rx_et[1].lid << 8) |
4858 				    (ltdefs->rx_et[1].ltype_match << 4) |
4859 				    ltdefs->rx_et[1].ltype_mask);
4860 		}
4861 
4862 		err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
4863 		if (err)
4864 			return err;
4865 
4866 		nix_hw->tx_credits = kcalloc(hw->cgx_links + hw->lbk_links,
4867 					     sizeof(u64), GFP_KERNEL);
4868 		if (!nix_hw->tx_credits)
4869 			return -ENOMEM;
4870 
4871 		/* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
4872 		nix_link_config(rvu, blkaddr, nix_hw);
4873 
4874 		/* Enable Channel backpressure */
4875 		rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0));
4876 	}
4877 	return 0;
4878 }
4879 
4880 int rvu_nix_init(struct rvu *rvu)
4881 {
4882 	struct rvu_hwinfo *hw = rvu->hw;
4883 	struct nix_hw *nix_hw;
4884 	int blkaddr = 0, err;
4885 	int i = 0;
4886 
4887 	hw->nix = devm_kcalloc(rvu->dev, MAX_NIX_BLKS, sizeof(struct nix_hw),
4888 			       GFP_KERNEL);
4889 	if (!hw->nix)
4890 		return -ENOMEM;
4891 
4892 	blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
4893 	while (blkaddr) {
4894 		nix_hw = &hw->nix[i];
4895 		nix_hw->rvu = rvu;
4896 		nix_hw->blkaddr = blkaddr;
4897 		err = rvu_nix_block_init(rvu, nix_hw);
4898 		if (err)
4899 			return err;
4900 		blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
4901 		i++;
4902 	}
4903 
4904 	return 0;
4905 }
4906 
4907 static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr,
4908 				  struct rvu_block *block)
4909 {
4910 	struct nix_txsch *txsch;
4911 	struct nix_mcast *mcast;
4912 	struct nix_txvlan *vlan;
4913 	struct nix_hw *nix_hw;
4914 	int lvl;
4915 
4916 	rvu_aq_free(rvu, block->aq);
4917 
4918 	if (is_block_implemented(rvu->hw, blkaddr)) {
4919 		nix_hw = get_nix_hw(rvu->hw, blkaddr);
4920 		if (!nix_hw)
4921 			return;
4922 
4923 		for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
4924 			txsch = &nix_hw->txsch[lvl];
4925 			kfree(txsch->schq.bmap);
4926 		}
4927 
4928 		kfree(nix_hw->tx_credits);
4929 
4930 		nix_ipolicer_freemem(rvu, nix_hw);
4931 
4932 		vlan = &nix_hw->txvlan;
4933 		kfree(vlan->rsrc.bmap);
4934 		mutex_destroy(&vlan->rsrc_lock);
4935 
4936 		mcast = &nix_hw->mcast;
4937 		qmem_free(rvu->dev, mcast->mce_ctx);
4938 		qmem_free(rvu->dev, mcast->mcast_buf);
4939 		mutex_destroy(&mcast->mce_lock);
4940 	}
4941 }
4942 
4943 void rvu_nix_freemem(struct rvu *rvu)
4944 {
4945 	struct rvu_hwinfo *hw = rvu->hw;
4946 	struct rvu_block *block;
4947 	int blkaddr = 0;
4948 
4949 	blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
4950 	while (blkaddr) {
4951 		block = &hw->block[blkaddr];
4952 		rvu_nix_block_freemem(rvu, blkaddr, block);
4953 		blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
4954 	}
4955 }
4956 
4957 static void nix_mcast_update_action(struct rvu *rvu,
4958 				    struct nix_mcast_grp_elem *elem)
4959 {
4960 	struct npc_mcam *mcam = &rvu->hw->mcam;
4961 	struct nix_rx_action rx_action = { 0 };
4962 	struct nix_tx_action tx_action = { 0 };
4963 	int npc_blkaddr;
4964 
4965 	npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
4966 	if (elem->dir == NIX_MCAST_INGRESS) {
4967 		*(u64 *)&rx_action = npc_get_mcam_action(rvu, mcam,
4968 							 npc_blkaddr,
4969 							 elem->mcam_index);
4970 		rx_action.index = elem->mce_start_index;
4971 		npc_set_mcam_action(rvu, mcam, npc_blkaddr, elem->mcam_index,
4972 				    *(u64 *)&rx_action);
4973 	} else {
4974 		*(u64 *)&tx_action = npc_get_mcam_action(rvu, mcam,
4975 							 npc_blkaddr,
4976 							 elem->mcam_index);
4977 		tx_action.index = elem->mce_start_index;
4978 		npc_set_mcam_action(rvu, mcam, npc_blkaddr, elem->mcam_index,
4979 				    *(u64 *)&tx_action);
4980 	}
4981 }
4982 
4983 static void nix_mcast_update_mce_entry(struct rvu *rvu, u16 pcifunc, u8 is_active)
4984 {
4985 	struct nix_mcast_grp_elem *elem;
4986 	struct nix_mcast_grp *mcast_grp;
4987 	struct nix_hw *nix_hw;
4988 	int blkaddr;
4989 
4990 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
4991 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
4992 	if (!nix_hw)
4993 		return;
4994 
4995 	mcast_grp = &nix_hw->mcast_grp;
4996 
4997 	mutex_lock(&mcast_grp->mcast_grp_lock);
4998 	list_for_each_entry(elem, &mcast_grp->mcast_grp_head, list) {
4999 		struct nix_mce_list *mce_list;
5000 		struct mce *mce;
5001 
5002 		/* Iterate the group elements and disable the element which
5003 		 * received the disable request.
5004 		 */
5005 		mce_list = &elem->mcast_mce_list;
5006 		hlist_for_each_entry(mce, &mce_list->head, node) {
5007 			if (mce->pcifunc == pcifunc) {
5008 				mce->is_active = is_active;
5009 				break;
5010 			}
5011 		}
5012 
5013 		/* Dump the updated list to HW */
5014 		if (elem->dir == NIX_MCAST_INGRESS)
5015 			nix_update_ingress_mce_list_hw(rvu, nix_hw, elem);
5016 		else
5017 			nix_update_egress_mce_list_hw(rvu, nix_hw, elem);
5018 
5019 		/* Update the multicast index in NPC rule */
5020 		nix_mcast_update_action(rvu, elem);
5021 	}
5022 	mutex_unlock(&mcast_grp->mcast_grp_lock);
5023 }
5024 
5025 int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
5026 				     struct msg_rsp *rsp)
5027 {
5028 	u16 pcifunc = req->hdr.pcifunc;
5029 	struct rvu_pfvf *pfvf;
5030 	int nixlf, err;
5031 
5032 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
5033 	if (err)
5034 		return err;
5035 
5036 	/* Enable the interface if it is in any multicast list */
5037 	nix_mcast_update_mce_entry(rvu, pcifunc, 1);
5038 
5039 	rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
5040 
5041 	npc_mcam_enable_flows(rvu, pcifunc);
5042 
5043 	pfvf = rvu_get_pfvf(rvu, pcifunc);
5044 	set_bit(NIXLF_INITIALIZED, &pfvf->flags);
5045 
5046 	rvu_switch_update_rules(rvu, pcifunc);
5047 
5048 	return rvu_cgx_start_stop_io(rvu, pcifunc, true);
5049 }
5050 
5051 int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
5052 				    struct msg_rsp *rsp)
5053 {
5054 	u16 pcifunc = req->hdr.pcifunc;
5055 	struct rvu_pfvf *pfvf;
5056 	int nixlf, err;
5057 
5058 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
5059 	if (err)
5060 		return err;
5061 
5062 	rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
5063 	/* Disable the interface if it is in any multicast list */
5064 	nix_mcast_update_mce_entry(rvu, pcifunc, 0);
5065 
5066 
5067 	pfvf = rvu_get_pfvf(rvu, pcifunc);
5068 	clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
5069 
5070 	err = rvu_cgx_start_stop_io(rvu, pcifunc, false);
5071 	if (err)
5072 		return err;
5073 
5074 	rvu_cgx_tx_enable(rvu, pcifunc, true);
5075 
5076 	return 0;
5077 }
5078 
5079 #define RX_SA_BASE  GENMASK_ULL(52, 7)
5080 
5081 void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
5082 {
5083 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
5084 	struct hwctx_disable_req ctx_req;
5085 	int pf = rvu_get_pf(pcifunc);
5086 	struct mac_ops *mac_ops;
5087 	u8 cgx_id, lmac_id;
5088 	u64 sa_base;
5089 	void *cgxd;
5090 	int err;
5091 
5092 	ctx_req.hdr.pcifunc = pcifunc;
5093 
5094 	/* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
5095 	rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
5096 	rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
5097 	nix_interface_deinit(rvu, pcifunc, nixlf);
5098 	nix_rx_sync(rvu, blkaddr);
5099 	nix_txschq_free(rvu, pcifunc);
5100 
5101 	clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
5102 
5103 	rvu_cgx_start_stop_io(rvu, pcifunc, false);
5104 
5105 	if (pfvf->sq_ctx) {
5106 		ctx_req.ctype = NIX_AQ_CTYPE_SQ;
5107 		err = nix_lf_hwctx_disable(rvu, &ctx_req);
5108 		if (err)
5109 			dev_err(rvu->dev, "SQ ctx disable failed\n");
5110 	}
5111 
5112 	if (pfvf->rq_ctx) {
5113 		ctx_req.ctype = NIX_AQ_CTYPE_RQ;
5114 		err = nix_lf_hwctx_disable(rvu, &ctx_req);
5115 		if (err)
5116 			dev_err(rvu->dev, "RQ ctx disable failed\n");
5117 	}
5118 
5119 	if (pfvf->cq_ctx) {
5120 		ctx_req.ctype = NIX_AQ_CTYPE_CQ;
5121 		err = nix_lf_hwctx_disable(rvu, &ctx_req);
5122 		if (err)
5123 			dev_err(rvu->dev, "CQ ctx disable failed\n");
5124 	}
5125 
5126 	/* reset HW config done for Switch headers */
5127 	rvu_npc_set_parse_mode(rvu, pcifunc, OTX2_PRIV_FLAGS_DEFAULT,
5128 			       (PKIND_TX | PKIND_RX), 0, 0, 0, 0);
5129 
5130 	/* Disabling CGX and NPC config done for PTP */
5131 	if (pfvf->hw_rx_tstamp_en) {
5132 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
5133 		cgxd = rvu_cgx_pdata(cgx_id, rvu);
5134 		mac_ops = get_mac_ops(cgxd);
5135 		mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, false);
5136 		/* Undo NPC config done for PTP */
5137 		if (npc_config_ts_kpuaction(rvu, pf, pcifunc, false))
5138 			dev_err(rvu->dev, "NPC config for PTP failed\n");
5139 		pfvf->hw_rx_tstamp_en = false;
5140 	}
5141 
5142 	/* reset priority flow control config */
5143 	rvu_cgx_prio_flow_ctrl_cfg(rvu, pcifunc, 0, 0, 0);
5144 
5145 	/* reset 802.3x flow control config */
5146 	rvu_cgx_cfg_pause_frm(rvu, pcifunc, 0, 0);
5147 
5148 	nix_ctx_free(rvu, pfvf);
5149 
5150 	nix_free_all_bandprof(rvu, pcifunc);
5151 
5152 	sa_base = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(nixlf));
5153 	if (FIELD_GET(RX_SA_BASE, sa_base)) {
5154 		err = rvu_cpt_ctx_flush(rvu, pcifunc);
5155 		if (err)
5156 			dev_err(rvu->dev,
5157 				"CPT ctx flush failed with error: %d\n", err);
5158 	}
5159 }
5160 
5161 #define NIX_AF_LFX_TX_CFG_PTP_EN	BIT_ULL(32)
5162 
5163 static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
5164 {
5165 	struct rvu_hwinfo *hw = rvu->hw;
5166 	struct rvu_block *block;
5167 	int blkaddr, pf;
5168 	int nixlf;
5169 	u64 cfg;
5170 
5171 	pf = rvu_get_pf(pcifunc);
5172 	if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
5173 		return 0;
5174 
5175 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
5176 	if (blkaddr < 0)
5177 		return NIX_AF_ERR_AF_LF_INVALID;
5178 
5179 	block = &hw->block[blkaddr];
5180 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
5181 	if (nixlf < 0)
5182 		return NIX_AF_ERR_AF_LF_INVALID;
5183 
5184 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf));
5185 
5186 	if (enable)
5187 		cfg |= NIX_AF_LFX_TX_CFG_PTP_EN;
5188 	else
5189 		cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN;
5190 
5191 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
5192 
5193 	return 0;
5194 }
5195 
5196 int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req,
5197 					  struct msg_rsp *rsp)
5198 {
5199 	return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true);
5200 }
5201 
5202 int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req,
5203 					   struct msg_rsp *rsp)
5204 {
5205 	return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false);
5206 }
5207 
5208 int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
5209 					struct nix_lso_format_cfg *req,
5210 					struct nix_lso_format_cfg_rsp *rsp)
5211 {
5212 	u16 pcifunc = req->hdr.pcifunc;
5213 	struct nix_hw *nix_hw;
5214 	struct rvu_pfvf *pfvf;
5215 	int blkaddr, idx, f;
5216 	u64 reg;
5217 
5218 	pfvf = rvu_get_pfvf(rvu, pcifunc);
5219 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
5220 	if (!pfvf->nixlf || blkaddr < 0)
5221 		return NIX_AF_ERR_AF_LF_INVALID;
5222 
5223 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
5224 	if (!nix_hw)
5225 		return NIX_AF_ERR_INVALID_NIXBLK;
5226 
5227 	/* Find existing matching LSO format, if any */
5228 	for (idx = 0; idx < nix_hw->lso.in_use; idx++) {
5229 		for (f = 0; f < NIX_LSO_FIELD_MAX; f++) {
5230 			reg = rvu_read64(rvu, blkaddr,
5231 					 NIX_AF_LSO_FORMATX_FIELDX(idx, f));
5232 			if (req->fields[f] != (reg & req->field_mask))
5233 				break;
5234 		}
5235 
5236 		if (f == NIX_LSO_FIELD_MAX)
5237 			break;
5238 	}
5239 
5240 	if (idx < nix_hw->lso.in_use) {
5241 		/* Match found */
5242 		rsp->lso_format_idx = idx;
5243 		return 0;
5244 	}
5245 
5246 	if (nix_hw->lso.in_use == nix_hw->lso.total)
5247 		return NIX_AF_ERR_LSO_CFG_FAIL;
5248 
5249 	rsp->lso_format_idx = nix_hw->lso.in_use++;
5250 
5251 	for (f = 0; f < NIX_LSO_FIELD_MAX; f++)
5252 		rvu_write64(rvu, blkaddr,
5253 			    NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f),
5254 			    req->fields[f]);
5255 
5256 	return 0;
5257 }
5258 
5259 #define IPSEC_GEN_CFG_EGRP    GENMASK_ULL(50, 48)
5260 #define IPSEC_GEN_CFG_OPCODE  GENMASK_ULL(47, 32)
5261 #define IPSEC_GEN_CFG_PARAM1  GENMASK_ULL(31, 16)
5262 #define IPSEC_GEN_CFG_PARAM2  GENMASK_ULL(15, 0)
5263 
5264 #define CPT_INST_QSEL_BLOCK   GENMASK_ULL(28, 24)
5265 #define CPT_INST_QSEL_PF_FUNC GENMASK_ULL(23, 8)
5266 #define CPT_INST_QSEL_SLOT    GENMASK_ULL(7, 0)
5267 
5268 #define CPT_INST_CREDIT_TH    GENMASK_ULL(53, 32)
5269 #define CPT_INST_CREDIT_BPID  GENMASK_ULL(30, 22)
5270 #define CPT_INST_CREDIT_CNT   GENMASK_ULL(21, 0)
5271 
5272 static void nix_inline_ipsec_cfg(struct rvu *rvu, struct nix_inline_ipsec_cfg *req,
5273 				 int blkaddr)
5274 {
5275 	u8 cpt_idx, cpt_blkaddr;
5276 	u64 val;
5277 
5278 	cpt_idx = (blkaddr == BLKADDR_NIX0) ? 0 : 1;
5279 	if (req->enable) {
5280 		val = 0;
5281 		/* Enable context prefetching */
5282 		if (!is_rvu_otx2(rvu))
5283 			val |= BIT_ULL(51);
5284 
5285 		/* Set OPCODE and EGRP */
5286 		val |= FIELD_PREP(IPSEC_GEN_CFG_EGRP, req->gen_cfg.egrp);
5287 		val |= FIELD_PREP(IPSEC_GEN_CFG_OPCODE, req->gen_cfg.opcode);
5288 		val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM1, req->gen_cfg.param1);
5289 		val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM2, req->gen_cfg.param2);
5290 
5291 		rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, val);
5292 
5293 		/* Set CPT queue for inline IPSec */
5294 		val = FIELD_PREP(CPT_INST_QSEL_SLOT, req->inst_qsel.cpt_slot);
5295 		val |= FIELD_PREP(CPT_INST_QSEL_PF_FUNC,
5296 				  req->inst_qsel.cpt_pf_func);
5297 
5298 		if (!is_rvu_otx2(rvu)) {
5299 			cpt_blkaddr = (cpt_idx == 0) ? BLKADDR_CPT0 :
5300 						       BLKADDR_CPT1;
5301 			val |= FIELD_PREP(CPT_INST_QSEL_BLOCK, cpt_blkaddr);
5302 		}
5303 
5304 		rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx),
5305 			    val);
5306 
5307 		/* Set CPT credit */
5308 		val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx));
5309 		if ((val & 0x3FFFFF) != 0x3FFFFF)
5310 			rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
5311 				    0x3FFFFF - val);
5312 
5313 		val = FIELD_PREP(CPT_INST_CREDIT_CNT, req->cpt_credit);
5314 		val |= FIELD_PREP(CPT_INST_CREDIT_BPID, req->bpid);
5315 		val |= FIELD_PREP(CPT_INST_CREDIT_TH, req->credit_th);
5316 		rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), val);
5317 	} else {
5318 		rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, 0x0);
5319 		rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx),
5320 			    0x0);
5321 		val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx));
5322 		if ((val & 0x3FFFFF) != 0x3FFFFF)
5323 			rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
5324 				    0x3FFFFF - val);
5325 	}
5326 }
5327 
5328 int rvu_mbox_handler_nix_inline_ipsec_cfg(struct rvu *rvu,
5329 					  struct nix_inline_ipsec_cfg *req,
5330 					  struct msg_rsp *rsp)
5331 {
5332 	if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
5333 		return 0;
5334 
5335 	nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX0);
5336 	if (is_block_implemented(rvu->hw, BLKADDR_CPT1))
5337 		nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX1);
5338 
5339 	return 0;
5340 }
5341 
5342 int rvu_mbox_handler_nix_read_inline_ipsec_cfg(struct rvu *rvu,
5343 					       struct msg_req *req,
5344 					       struct nix_inline_ipsec_cfg *rsp)
5345 
5346 {
5347 	u64 val;
5348 
5349 	if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
5350 		return 0;
5351 
5352 	val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_IPSEC_GEN_CFG);
5353 	rsp->gen_cfg.egrp = FIELD_GET(IPSEC_GEN_CFG_EGRP, val);
5354 	rsp->gen_cfg.opcode = FIELD_GET(IPSEC_GEN_CFG_OPCODE, val);
5355 	rsp->gen_cfg.param1 = FIELD_GET(IPSEC_GEN_CFG_PARAM1, val);
5356 	rsp->gen_cfg.param2 = FIELD_GET(IPSEC_GEN_CFG_PARAM2, val);
5357 
5358 	val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_CPTX_CREDIT(0));
5359 	rsp->cpt_credit = FIELD_GET(CPT_INST_CREDIT_CNT, val);
5360 	rsp->credit_th = FIELD_GET(CPT_INST_CREDIT_TH, val);
5361 	rsp->bpid = FIELD_GET(CPT_INST_CREDIT_BPID, val);
5362 
5363 	return 0;
5364 }
5365 
5366 int rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu *rvu,
5367 					     struct nix_inline_ipsec_lf_cfg *req,
5368 					     struct msg_rsp *rsp)
5369 {
5370 	int lf, blkaddr, err;
5371 	u64 val;
5372 
5373 	if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
5374 		return 0;
5375 
5376 	err = nix_get_nixlf(rvu, req->hdr.pcifunc, &lf, &blkaddr);
5377 	if (err)
5378 		return err;
5379 
5380 	if (req->enable) {
5381 		/* Set TT, TAG_CONST, SA_POW2_SIZE and LENM1_MAX */
5382 		val = (u64)req->ipsec_cfg0.tt << 44 |
5383 		      (u64)req->ipsec_cfg0.tag_const << 20 |
5384 		      (u64)req->ipsec_cfg0.sa_pow2_size << 16 |
5385 		      req->ipsec_cfg0.lenm1_max;
5386 
5387 		if (blkaddr == BLKADDR_NIX1)
5388 			val |= BIT_ULL(46);
5389 
5390 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), val);
5391 
5392 		/* Set SA_IDX_W and SA_IDX_MAX */
5393 		val = (u64)req->ipsec_cfg1.sa_idx_w << 32 |
5394 		      req->ipsec_cfg1.sa_idx_max;
5395 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), val);
5396 
5397 		/* Set SA base address */
5398 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf),
5399 			    req->sa_base_addr);
5400 	} else {
5401 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), 0x0);
5402 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), 0x0);
5403 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf),
5404 			    0x0);
5405 	}
5406 
5407 	return 0;
5408 }
5409 
5410 void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc)
5411 {
5412 	bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
5413 
5414 	/* overwrite vf mac address with default_mac */
5415 	if (from_vf)
5416 		ether_addr_copy(pfvf->mac_addr, pfvf->default_mac);
5417 }
5418 
5419 /* NIX ingress policers or bandwidth profiles APIs */
5420 static void nix_config_rx_pkt_policer_precolor(struct rvu *rvu, int blkaddr)
5421 {
5422 	struct npc_lt_def_cfg defs, *ltdefs;
5423 
5424 	ltdefs = &defs;
5425 	memcpy(ltdefs, rvu->kpu.lt_def, sizeof(struct npc_lt_def_cfg));
5426 
5427 	/* Extract PCP and DEI fields from outer VLAN from byte offset
5428 	 * 2 from the start of LB_PTR (ie TAG).
5429 	 * VLAN0 is Outer VLAN and VLAN1 is Inner VLAN. Inner VLAN
5430 	 * fields are considered when 'Tunnel enable' is set in profile.
5431 	 */
5432 	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN0_PCP_DEI,
5433 		    (2UL << 12) | (ltdefs->ovlan.lid << 8) |
5434 		    (ltdefs->ovlan.ltype_match << 4) |
5435 		    ltdefs->ovlan.ltype_mask);
5436 	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN1_PCP_DEI,
5437 		    (2UL << 12) | (ltdefs->ivlan.lid << 8) |
5438 		    (ltdefs->ivlan.ltype_match << 4) |
5439 		    ltdefs->ivlan.ltype_mask);
5440 
5441 	/* DSCP field in outer and tunneled IPv4 packets */
5442 	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4_DSCP,
5443 		    (1UL << 12) | (ltdefs->rx_oip4.lid << 8) |
5444 		    (ltdefs->rx_oip4.ltype_match << 4) |
5445 		    ltdefs->rx_oip4.ltype_mask);
5446 	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4_DSCP,
5447 		    (1UL << 12) | (ltdefs->rx_iip4.lid << 8) |
5448 		    (ltdefs->rx_iip4.ltype_match << 4) |
5449 		    ltdefs->rx_iip4.ltype_mask);
5450 
5451 	/* DSCP field (traffic class) in outer and tunneled IPv6 packets */
5452 	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6_DSCP,
5453 		    (1UL << 11) | (ltdefs->rx_oip6.lid << 8) |
5454 		    (ltdefs->rx_oip6.ltype_match << 4) |
5455 		    ltdefs->rx_oip6.ltype_mask);
5456 	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6_DSCP,
5457 		    (1UL << 11) | (ltdefs->rx_iip6.lid << 8) |
5458 		    (ltdefs->rx_iip6.ltype_match << 4) |
5459 		    ltdefs->rx_iip6.ltype_mask);
5460 }
5461 
5462 static int nix_init_policer_context(struct rvu *rvu, struct nix_hw *nix_hw,
5463 				    int layer, int prof_idx)
5464 {
5465 	struct nix_cn10k_aq_enq_req aq_req;
5466 	int rc;
5467 
5468 	memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
5469 
5470 	aq_req.qidx = (prof_idx & 0x3FFF) | (layer << 14);
5471 	aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
5472 	aq_req.op = NIX_AQ_INSTOP_INIT;
5473 
5474 	/* Context is all zeros, submit to AQ */
5475 	rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
5476 				     (struct nix_aq_enq_req *)&aq_req, NULL);
5477 	if (rc)
5478 		dev_err(rvu->dev, "Failed to INIT bandwidth profile layer %d profile %d\n",
5479 			layer, prof_idx);
5480 	return rc;
5481 }
5482 
5483 static int nix_setup_ipolicers(struct rvu *rvu,
5484 			       struct nix_hw *nix_hw, int blkaddr)
5485 {
5486 	struct rvu_hwinfo *hw = rvu->hw;
5487 	struct nix_ipolicer *ipolicer;
5488 	int err, layer, prof_idx;
5489 	u64 cfg;
5490 
5491 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
5492 	if (!(cfg & BIT_ULL(61))) {
5493 		hw->cap.ipolicer = false;
5494 		return 0;
5495 	}
5496 
5497 	hw->cap.ipolicer = true;
5498 	nix_hw->ipolicer = devm_kcalloc(rvu->dev, BAND_PROF_NUM_LAYERS,
5499 					sizeof(*ipolicer), GFP_KERNEL);
5500 	if (!nix_hw->ipolicer)
5501 		return -ENOMEM;
5502 
5503 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_PL_CONST);
5504 
5505 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5506 		ipolicer = &nix_hw->ipolicer[layer];
5507 		switch (layer) {
5508 		case BAND_PROF_LEAF_LAYER:
5509 			ipolicer->band_prof.max = cfg & 0XFFFF;
5510 			break;
5511 		case BAND_PROF_MID_LAYER:
5512 			ipolicer->band_prof.max = (cfg >> 16) & 0XFFFF;
5513 			break;
5514 		case BAND_PROF_TOP_LAYER:
5515 			ipolicer->band_prof.max = (cfg >> 32) & 0XFFFF;
5516 			break;
5517 		}
5518 
5519 		if (!ipolicer->band_prof.max)
5520 			continue;
5521 
5522 		err = rvu_alloc_bitmap(&ipolicer->band_prof);
5523 		if (err)
5524 			return err;
5525 
5526 		ipolicer->pfvf_map = devm_kcalloc(rvu->dev,
5527 						  ipolicer->band_prof.max,
5528 						  sizeof(u16), GFP_KERNEL);
5529 		if (!ipolicer->pfvf_map)
5530 			return -ENOMEM;
5531 
5532 		ipolicer->match_id = devm_kcalloc(rvu->dev,
5533 						  ipolicer->band_prof.max,
5534 						  sizeof(u16), GFP_KERNEL);
5535 		if (!ipolicer->match_id)
5536 			return -ENOMEM;
5537 
5538 		for (prof_idx = 0;
5539 		     prof_idx < ipolicer->band_prof.max; prof_idx++) {
5540 			/* Set AF as current owner for INIT ops to succeed */
5541 			ipolicer->pfvf_map[prof_idx] = 0x00;
5542 
5543 			/* There is no enable bit in the profile context,
5544 			 * so no context disable. So let's INIT them here
5545 			 * so that PF/VF later on have to just do WRITE to
5546 			 * setup policer rates and config.
5547 			 */
5548 			err = nix_init_policer_context(rvu, nix_hw,
5549 						       layer, prof_idx);
5550 			if (err)
5551 				return err;
5552 		}
5553 
5554 		/* Allocate memory for maintaining ref_counts for MID level
5555 		 * profiles, this will be needed for leaf layer profiles'
5556 		 * aggregation.
5557 		 */
5558 		if (layer != BAND_PROF_MID_LAYER)
5559 			continue;
5560 
5561 		ipolicer->ref_count = devm_kcalloc(rvu->dev,
5562 						   ipolicer->band_prof.max,
5563 						   sizeof(u16), GFP_KERNEL);
5564 		if (!ipolicer->ref_count)
5565 			return -ENOMEM;
5566 	}
5567 
5568 	/* Set policer timeunit to 2us ie  (19 + 1) * 100 nsec = 2us */
5569 	rvu_write64(rvu, blkaddr, NIX_AF_PL_TS, 19);
5570 
5571 	nix_config_rx_pkt_policer_precolor(rvu, blkaddr);
5572 
5573 	return 0;
5574 }
5575 
5576 static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw)
5577 {
5578 	struct nix_ipolicer *ipolicer;
5579 	int layer;
5580 
5581 	if (!rvu->hw->cap.ipolicer)
5582 		return;
5583 
5584 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5585 		ipolicer = &nix_hw->ipolicer[layer];
5586 
5587 		if (!ipolicer->band_prof.max)
5588 			continue;
5589 
5590 		kfree(ipolicer->band_prof.bmap);
5591 	}
5592 }
5593 
5594 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
5595 			       struct nix_hw *nix_hw, u16 pcifunc)
5596 {
5597 	struct nix_ipolicer *ipolicer;
5598 	int layer, hi_layer, prof_idx;
5599 
5600 	/* Bits [15:14] in profile index represent layer */
5601 	layer = (req->qidx >> 14) & 0x03;
5602 	prof_idx = req->qidx & 0x3FFF;
5603 
5604 	ipolicer = &nix_hw->ipolicer[layer];
5605 	if (prof_idx >= ipolicer->band_prof.max)
5606 		return -EINVAL;
5607 
5608 	/* Check if the profile is allocated to the requesting PCIFUNC or not
5609 	 * with the exception of AF. AF is allowed to read and update contexts.
5610 	 */
5611 	if (pcifunc && ipolicer->pfvf_map[prof_idx] != pcifunc)
5612 		return -EINVAL;
5613 
5614 	/* If this profile is linked to higher layer profile then check
5615 	 * if that profile is also allocated to the requesting PCIFUNC
5616 	 * or not.
5617 	 */
5618 	if (!req->prof.hl_en)
5619 		return 0;
5620 
5621 	/* Leaf layer profile can link only to mid layer and
5622 	 * mid layer to top layer.
5623 	 */
5624 	if (layer == BAND_PROF_LEAF_LAYER)
5625 		hi_layer = BAND_PROF_MID_LAYER;
5626 	else if (layer == BAND_PROF_MID_LAYER)
5627 		hi_layer = BAND_PROF_TOP_LAYER;
5628 	else
5629 		return -EINVAL;
5630 
5631 	ipolicer = &nix_hw->ipolicer[hi_layer];
5632 	prof_idx = req->prof.band_prof_id;
5633 	if (prof_idx >= ipolicer->band_prof.max ||
5634 	    ipolicer->pfvf_map[prof_idx] != pcifunc)
5635 		return -EINVAL;
5636 
5637 	return 0;
5638 }
5639 
5640 int rvu_mbox_handler_nix_bandprof_alloc(struct rvu *rvu,
5641 					struct nix_bandprof_alloc_req *req,
5642 					struct nix_bandprof_alloc_rsp *rsp)
5643 {
5644 	int blkaddr, layer, prof, idx, err;
5645 	u16 pcifunc = req->hdr.pcifunc;
5646 	struct nix_ipolicer *ipolicer;
5647 	struct nix_hw *nix_hw;
5648 
5649 	if (!rvu->hw->cap.ipolicer)
5650 		return NIX_AF_ERR_IPOLICER_NOTSUPP;
5651 
5652 	err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
5653 	if (err)
5654 		return err;
5655 
5656 	mutex_lock(&rvu->rsrc_lock);
5657 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5658 		if (layer == BAND_PROF_INVAL_LAYER)
5659 			continue;
5660 		if (!req->prof_count[layer])
5661 			continue;
5662 
5663 		ipolicer = &nix_hw->ipolicer[layer];
5664 		for (idx = 0; idx < req->prof_count[layer]; idx++) {
5665 			/* Allocate a max of 'MAX_BANDPROF_PER_PFFUNC' profiles */
5666 			if (idx == MAX_BANDPROF_PER_PFFUNC)
5667 				break;
5668 
5669 			prof = rvu_alloc_rsrc(&ipolicer->band_prof);
5670 			if (prof < 0)
5671 				break;
5672 			rsp->prof_count[layer]++;
5673 			rsp->prof_idx[layer][idx] = prof;
5674 			ipolicer->pfvf_map[prof] = pcifunc;
5675 		}
5676 	}
5677 	mutex_unlock(&rvu->rsrc_lock);
5678 	return 0;
5679 }
5680 
5681 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc)
5682 {
5683 	int blkaddr, layer, prof_idx, err;
5684 	struct nix_ipolicer *ipolicer;
5685 	struct nix_hw *nix_hw;
5686 
5687 	if (!rvu->hw->cap.ipolicer)
5688 		return NIX_AF_ERR_IPOLICER_NOTSUPP;
5689 
5690 	err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
5691 	if (err)
5692 		return err;
5693 
5694 	mutex_lock(&rvu->rsrc_lock);
5695 	/* Free all the profiles allocated to the PCIFUNC */
5696 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5697 		if (layer == BAND_PROF_INVAL_LAYER)
5698 			continue;
5699 		ipolicer = &nix_hw->ipolicer[layer];
5700 
5701 		for (prof_idx = 0; prof_idx < ipolicer->band_prof.max; prof_idx++) {
5702 			if (ipolicer->pfvf_map[prof_idx] != pcifunc)
5703 				continue;
5704 
5705 			/* Clear ratelimit aggregation, if any */
5706 			if (layer == BAND_PROF_LEAF_LAYER &&
5707 			    ipolicer->match_id[prof_idx])
5708 				nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
5709 
5710 			ipolicer->pfvf_map[prof_idx] = 0x00;
5711 			ipolicer->match_id[prof_idx] = 0;
5712 			rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
5713 		}
5714 	}
5715 	mutex_unlock(&rvu->rsrc_lock);
5716 	return 0;
5717 }
5718 
5719 int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu,
5720 				       struct nix_bandprof_free_req *req,
5721 				       struct msg_rsp *rsp)
5722 {
5723 	int blkaddr, layer, prof_idx, idx, err;
5724 	u16 pcifunc = req->hdr.pcifunc;
5725 	struct nix_ipolicer *ipolicer;
5726 	struct nix_hw *nix_hw;
5727 
5728 	if (req->free_all)
5729 		return nix_free_all_bandprof(rvu, pcifunc);
5730 
5731 	if (!rvu->hw->cap.ipolicer)
5732 		return NIX_AF_ERR_IPOLICER_NOTSUPP;
5733 
5734 	err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
5735 	if (err)
5736 		return err;
5737 
5738 	mutex_lock(&rvu->rsrc_lock);
5739 	/* Free the requested profile indices */
5740 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5741 		if (layer == BAND_PROF_INVAL_LAYER)
5742 			continue;
5743 		if (!req->prof_count[layer])
5744 			continue;
5745 
5746 		ipolicer = &nix_hw->ipolicer[layer];
5747 		for (idx = 0; idx < req->prof_count[layer]; idx++) {
5748 			if (idx == MAX_BANDPROF_PER_PFFUNC)
5749 				break;
5750 			prof_idx = req->prof_idx[layer][idx];
5751 			if (prof_idx >= ipolicer->band_prof.max ||
5752 			    ipolicer->pfvf_map[prof_idx] != pcifunc)
5753 				continue;
5754 
5755 			/* Clear ratelimit aggregation, if any */
5756 			if (layer == BAND_PROF_LEAF_LAYER &&
5757 			    ipolicer->match_id[prof_idx])
5758 				nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
5759 
5760 			ipolicer->pfvf_map[prof_idx] = 0x00;
5761 			ipolicer->match_id[prof_idx] = 0;
5762 			rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
5763 		}
5764 	}
5765 	mutex_unlock(&rvu->rsrc_lock);
5766 	return 0;
5767 }
5768 
5769 int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw,
5770 			struct nix_cn10k_aq_enq_req *aq_req,
5771 			struct nix_cn10k_aq_enq_rsp *aq_rsp,
5772 			u16 pcifunc, u8 ctype, u32 qidx)
5773 {
5774 	memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
5775 	aq_req->hdr.pcifunc = pcifunc;
5776 	aq_req->ctype = ctype;
5777 	aq_req->op = NIX_AQ_INSTOP_READ;
5778 	aq_req->qidx = qidx;
5779 
5780 	return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
5781 				       (struct nix_aq_enq_req *)aq_req,
5782 				       (struct nix_aq_enq_rsp *)aq_rsp);
5783 }
5784 
5785 static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu,
5786 					  struct nix_hw *nix_hw,
5787 					  struct nix_cn10k_aq_enq_req *aq_req,
5788 					  struct nix_cn10k_aq_enq_rsp *aq_rsp,
5789 					  u32 leaf_prof, u16 mid_prof)
5790 {
5791 	memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
5792 	aq_req->hdr.pcifunc = 0x00;
5793 	aq_req->ctype = NIX_AQ_CTYPE_BANDPROF;
5794 	aq_req->op = NIX_AQ_INSTOP_WRITE;
5795 	aq_req->qidx = leaf_prof;
5796 
5797 	aq_req->prof.band_prof_id = mid_prof;
5798 	aq_req->prof_mask.band_prof_id = GENMASK(6, 0);
5799 	aq_req->prof.hl_en = 1;
5800 	aq_req->prof_mask.hl_en = 1;
5801 
5802 	return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
5803 				       (struct nix_aq_enq_req *)aq_req,
5804 				       (struct nix_aq_enq_rsp *)aq_rsp);
5805 }
5806 
5807 int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc,
5808 				 u16 rq_idx, u16 match_id)
5809 {
5810 	int leaf_prof, mid_prof, leaf_match;
5811 	struct nix_cn10k_aq_enq_req aq_req;
5812 	struct nix_cn10k_aq_enq_rsp aq_rsp;
5813 	struct nix_ipolicer *ipolicer;
5814 	struct nix_hw *nix_hw;
5815 	int blkaddr, idx, rc;
5816 
5817 	if (!rvu->hw->cap.ipolicer)
5818 		return 0;
5819 
5820 	rc = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
5821 	if (rc)
5822 		return rc;
5823 
5824 	/* Fetch the RQ's context to see if policing is enabled */
5825 	rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, pcifunc,
5826 				 NIX_AQ_CTYPE_RQ, rq_idx);
5827 	if (rc) {
5828 		dev_err(rvu->dev,
5829 			"%s: Failed to fetch RQ%d context of PFFUNC 0x%x\n",
5830 			__func__, rq_idx, pcifunc);
5831 		return rc;
5832 	}
5833 
5834 	if (!aq_rsp.rq.policer_ena)
5835 		return 0;
5836 
5837 	/* Get the bandwidth profile ID mapped to this RQ */
5838 	leaf_prof = aq_rsp.rq.band_prof_id;
5839 
5840 	ipolicer = &nix_hw->ipolicer[BAND_PROF_LEAF_LAYER];
5841 	ipolicer->match_id[leaf_prof] = match_id;
5842 
5843 	/* Check if any other leaf profile is marked with same match_id */
5844 	for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
5845 		if (idx == leaf_prof)
5846 			continue;
5847 		if (ipolicer->match_id[idx] != match_id)
5848 			continue;
5849 
5850 		leaf_match = idx;
5851 		break;
5852 	}
5853 
5854 	if (idx == ipolicer->band_prof.max)
5855 		return 0;
5856 
5857 	/* Fetch the matching profile's context to check if it's already
5858 	 * mapped to a mid level profile.
5859 	 */
5860 	rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
5861 				 NIX_AQ_CTYPE_BANDPROF, leaf_match);
5862 	if (rc) {
5863 		dev_err(rvu->dev,
5864 			"%s: Failed to fetch context of leaf profile %d\n",
5865 			__func__, leaf_match);
5866 		return rc;
5867 	}
5868 
5869 	ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
5870 	if (aq_rsp.prof.hl_en) {
5871 		/* Get Mid layer prof index and map leaf_prof index
5872 		 * also such that flows that are being steered
5873 		 * to different RQs and marked with same match_id
5874 		 * are rate limited in a aggregate fashion
5875 		 */
5876 		mid_prof = aq_rsp.prof.band_prof_id;
5877 		rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
5878 						    &aq_req, &aq_rsp,
5879 						    leaf_prof, mid_prof);
5880 		if (rc) {
5881 			dev_err(rvu->dev,
5882 				"%s: Failed to map leaf(%d) and mid(%d) profiles\n",
5883 				__func__, leaf_prof, mid_prof);
5884 			goto exit;
5885 		}
5886 
5887 		mutex_lock(&rvu->rsrc_lock);
5888 		ipolicer->ref_count[mid_prof]++;
5889 		mutex_unlock(&rvu->rsrc_lock);
5890 		goto exit;
5891 	}
5892 
5893 	/* Allocate a mid layer profile and
5894 	 * map both 'leaf_prof' and 'leaf_match' profiles to it.
5895 	 */
5896 	mutex_lock(&rvu->rsrc_lock);
5897 	mid_prof = rvu_alloc_rsrc(&ipolicer->band_prof);
5898 	if (mid_prof < 0) {
5899 		dev_err(rvu->dev,
5900 			"%s: Unable to allocate mid layer profile\n", __func__);
5901 		mutex_unlock(&rvu->rsrc_lock);
5902 		goto exit;
5903 	}
5904 	mutex_unlock(&rvu->rsrc_lock);
5905 	ipolicer->pfvf_map[mid_prof] = 0x00;
5906 	ipolicer->ref_count[mid_prof] = 0;
5907 
5908 	/* Initialize mid layer profile same as 'leaf_prof' */
5909 	rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
5910 				 NIX_AQ_CTYPE_BANDPROF, leaf_prof);
5911 	if (rc) {
5912 		dev_err(rvu->dev,
5913 			"%s: Failed to fetch context of leaf profile %d\n",
5914 			__func__, leaf_prof);
5915 		goto exit;
5916 	}
5917 
5918 	memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
5919 	aq_req.hdr.pcifunc = 0x00;
5920 	aq_req.qidx = (mid_prof & 0x3FFF) | (BAND_PROF_MID_LAYER << 14);
5921 	aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
5922 	aq_req.op = NIX_AQ_INSTOP_WRITE;
5923 	memcpy(&aq_req.prof, &aq_rsp.prof, sizeof(struct nix_bandprof_s));
5924 	memset((char *)&aq_req.prof_mask, 0xff, sizeof(struct nix_bandprof_s));
5925 	/* Clear higher layer enable bit in the mid profile, just in case */
5926 	aq_req.prof.hl_en = 0;
5927 	aq_req.prof_mask.hl_en = 1;
5928 
5929 	rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
5930 				     (struct nix_aq_enq_req *)&aq_req, NULL);
5931 	if (rc) {
5932 		dev_err(rvu->dev,
5933 			"%s: Failed to INIT context of mid layer profile %d\n",
5934 			__func__, mid_prof);
5935 		goto exit;
5936 	}
5937 
5938 	/* Map both leaf profiles to this mid layer profile */
5939 	rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
5940 					    &aq_req, &aq_rsp,
5941 					    leaf_prof, mid_prof);
5942 	if (rc) {
5943 		dev_err(rvu->dev,
5944 			"%s: Failed to map leaf(%d) and mid(%d) profiles\n",
5945 			__func__, leaf_prof, mid_prof);
5946 		goto exit;
5947 	}
5948 
5949 	mutex_lock(&rvu->rsrc_lock);
5950 	ipolicer->ref_count[mid_prof]++;
5951 	mutex_unlock(&rvu->rsrc_lock);
5952 
5953 	rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
5954 					    &aq_req, &aq_rsp,
5955 					    leaf_match, mid_prof);
5956 	if (rc) {
5957 		dev_err(rvu->dev,
5958 			"%s: Failed to map leaf(%d) and mid(%d) profiles\n",
5959 			__func__, leaf_match, mid_prof);
5960 		ipolicer->ref_count[mid_prof]--;
5961 		goto exit;
5962 	}
5963 
5964 	mutex_lock(&rvu->rsrc_lock);
5965 	ipolicer->ref_count[mid_prof]++;
5966 	mutex_unlock(&rvu->rsrc_lock);
5967 
5968 exit:
5969 	return rc;
5970 }
5971 
5972 /* Called with mutex rsrc_lock */
5973 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
5974 				     u32 leaf_prof)
5975 {
5976 	struct nix_cn10k_aq_enq_req aq_req;
5977 	struct nix_cn10k_aq_enq_rsp aq_rsp;
5978 	struct nix_ipolicer *ipolicer;
5979 	u16 mid_prof;
5980 	int rc;
5981 
5982 	mutex_unlock(&rvu->rsrc_lock);
5983 
5984 	rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
5985 				 NIX_AQ_CTYPE_BANDPROF, leaf_prof);
5986 
5987 	mutex_lock(&rvu->rsrc_lock);
5988 	if (rc) {
5989 		dev_err(rvu->dev,
5990 			"%s: Failed to fetch context of leaf profile %d\n",
5991 			__func__, leaf_prof);
5992 		return;
5993 	}
5994 
5995 	if (!aq_rsp.prof.hl_en)
5996 		return;
5997 
5998 	mid_prof = aq_rsp.prof.band_prof_id;
5999 	ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
6000 	ipolicer->ref_count[mid_prof]--;
6001 	/* If ref_count is zero, free mid layer profile */
6002 	if (!ipolicer->ref_count[mid_prof]) {
6003 		ipolicer->pfvf_map[mid_prof] = 0x00;
6004 		rvu_free_rsrc(&ipolicer->band_prof, mid_prof);
6005 	}
6006 }
6007 
6008 int rvu_mbox_handler_nix_bandprof_get_hwinfo(struct rvu *rvu, struct msg_req *req,
6009 					     struct nix_bandprof_get_hwinfo_rsp *rsp)
6010 {
6011 	struct nix_ipolicer *ipolicer;
6012 	int blkaddr, layer, err;
6013 	struct nix_hw *nix_hw;
6014 	u64 tu;
6015 
6016 	if (!rvu->hw->cap.ipolicer)
6017 		return NIX_AF_ERR_IPOLICER_NOTSUPP;
6018 
6019 	err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
6020 	if (err)
6021 		return err;
6022 
6023 	/* Return number of bandwidth profiles free at each layer */
6024 	mutex_lock(&rvu->rsrc_lock);
6025 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
6026 		if (layer == BAND_PROF_INVAL_LAYER)
6027 			continue;
6028 
6029 		ipolicer = &nix_hw->ipolicer[layer];
6030 		rsp->prof_count[layer] = rvu_rsrc_free_count(&ipolicer->band_prof);
6031 	}
6032 	mutex_unlock(&rvu->rsrc_lock);
6033 
6034 	/* Set the policer timeunit in nanosec */
6035 	tu = rvu_read64(rvu, blkaddr, NIX_AF_PL_TS) & GENMASK_ULL(9, 0);
6036 	rsp->policer_timeunit = (tu + 1) * 100;
6037 
6038 	return 0;
6039 }
6040 
6041 static struct nix_mcast_grp_elem *rvu_nix_mcast_find_grp_elem(struct nix_mcast_grp *mcast_grp,
6042 							      u32 mcast_grp_idx)
6043 {
6044 	struct nix_mcast_grp_elem *iter;
6045 	bool is_found = false;
6046 
6047 	list_for_each_entry(iter, &mcast_grp->mcast_grp_head, list) {
6048 		if (iter->mcast_grp_idx == mcast_grp_idx) {
6049 			is_found = true;
6050 			break;
6051 		}
6052 	}
6053 
6054 	if (is_found)
6055 		return iter;
6056 
6057 	return NULL;
6058 }
6059 
6060 int rvu_nix_mcast_get_mce_index(struct rvu *rvu, u16 pcifunc, u32 mcast_grp_idx)
6061 {
6062 	struct nix_mcast_grp_elem *elem;
6063 	struct nix_mcast_grp *mcast_grp;
6064 	struct nix_hw *nix_hw;
6065 	int blkaddr, ret;
6066 
6067 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
6068 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
6069 	if (!nix_hw)
6070 		return NIX_AF_ERR_INVALID_NIXBLK;
6071 
6072 	mcast_grp = &nix_hw->mcast_grp;
6073 	mutex_lock(&mcast_grp->mcast_grp_lock);
6074 	elem = rvu_nix_mcast_find_grp_elem(mcast_grp, mcast_grp_idx);
6075 	if (!elem)
6076 		ret = NIX_AF_ERR_INVALID_MCAST_GRP;
6077 	else
6078 		ret = elem->mce_start_index;
6079 
6080 	mutex_unlock(&mcast_grp->mcast_grp_lock);
6081 	return ret;
6082 }
6083 
6084 void rvu_nix_mcast_flr_free_entries(struct rvu *rvu, u16 pcifunc)
6085 {
6086 	struct nix_mcast_grp_destroy_req dreq = { 0 };
6087 	struct nix_mcast_grp_update_req ureq = { 0 };
6088 	struct nix_mcast_grp_update_rsp ursp = { 0 };
6089 	struct nix_mcast_grp_elem *elem, *tmp;
6090 	struct nix_mcast_grp *mcast_grp;
6091 	struct nix_hw *nix_hw;
6092 	int blkaddr;
6093 
6094 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
6095 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
6096 	if (!nix_hw)
6097 		return;
6098 
6099 	mcast_grp = &nix_hw->mcast_grp;
6100 
6101 	mutex_lock(&mcast_grp->mcast_grp_lock);
6102 	list_for_each_entry_safe(elem, tmp, &mcast_grp->mcast_grp_head, list) {
6103 		struct nix_mce_list *mce_list;
6104 		struct hlist_node *tmp;
6105 		struct mce *mce;
6106 
6107 		/* If the pcifunc which created the multicast/mirror
6108 		 * group received an FLR, then delete the entire group.
6109 		 */
6110 		if (elem->pcifunc == pcifunc) {
6111 			/* Delete group */
6112 			dreq.hdr.pcifunc = elem->pcifunc;
6113 			dreq.mcast_grp_idx = elem->mcast_grp_idx;
6114 			dreq.is_af = 1;
6115 			rvu_mbox_handler_nix_mcast_grp_destroy(rvu, &dreq, NULL);
6116 			continue;
6117 		}
6118 
6119 		/* Iterate the group elements and delete the element which
6120 		 * received the FLR.
6121 		 */
6122 		mce_list = &elem->mcast_mce_list;
6123 		hlist_for_each_entry_safe(mce, tmp, &mce_list->head, node) {
6124 			if (mce->pcifunc == pcifunc) {
6125 				ureq.hdr.pcifunc = pcifunc;
6126 				ureq.num_mce_entry = 1;
6127 				ureq.mcast_grp_idx = elem->mcast_grp_idx;
6128 				ureq.op = NIX_MCAST_OP_DEL_ENTRY;
6129 				ureq.pcifunc[0] = pcifunc;
6130 				ureq.is_af = 1;
6131 				rvu_mbox_handler_nix_mcast_grp_update(rvu, &ureq, &ursp);
6132 				break;
6133 			}
6134 		}
6135 	}
6136 	mutex_unlock(&mcast_grp->mcast_grp_lock);
6137 }
6138 
6139 int rvu_nix_mcast_update_mcam_entry(struct rvu *rvu, u16 pcifunc,
6140 				    u32 mcast_grp_idx, u16 mcam_index)
6141 {
6142 	struct nix_mcast_grp_elem *elem;
6143 	struct nix_mcast_grp *mcast_grp;
6144 	struct nix_hw *nix_hw;
6145 	int blkaddr, ret = 0;
6146 
6147 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
6148 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
6149 	if (!nix_hw)
6150 		return NIX_AF_ERR_INVALID_NIXBLK;
6151 
6152 	mcast_grp = &nix_hw->mcast_grp;
6153 	mutex_lock(&mcast_grp->mcast_grp_lock);
6154 	elem = rvu_nix_mcast_find_grp_elem(mcast_grp, mcast_grp_idx);
6155 	if (!elem)
6156 		ret = NIX_AF_ERR_INVALID_MCAST_GRP;
6157 	else
6158 		elem->mcam_index = mcam_index;
6159 
6160 	mutex_unlock(&mcast_grp->mcast_grp_lock);
6161 	return ret;
6162 }
6163 
6164 int rvu_mbox_handler_nix_mcast_grp_create(struct rvu *rvu,
6165 					  struct nix_mcast_grp_create_req *req,
6166 					  struct nix_mcast_grp_create_rsp *rsp)
6167 {
6168 	struct nix_mcast_grp_elem *elem;
6169 	struct nix_mcast_grp *mcast_grp;
6170 	struct nix_hw *nix_hw;
6171 	int blkaddr, err;
6172 
6173 	err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
6174 	if (err)
6175 		return err;
6176 
6177 	mcast_grp = &nix_hw->mcast_grp;
6178 	elem = kzalloc(sizeof(*elem), GFP_KERNEL);
6179 	if (!elem)
6180 		return -ENOMEM;
6181 
6182 	INIT_HLIST_HEAD(&elem->mcast_mce_list.head);
6183 	elem->mcam_index = -1;
6184 	elem->mce_start_index = -1;
6185 	elem->pcifunc = req->hdr.pcifunc;
6186 	elem->dir = req->dir;
6187 	elem->mcast_grp_idx = mcast_grp->next_grp_index++;
6188 
6189 	mutex_lock(&mcast_grp->mcast_grp_lock);
6190 	list_add_tail(&elem->list, &mcast_grp->mcast_grp_head);
6191 	mcast_grp->count++;
6192 	mutex_unlock(&mcast_grp->mcast_grp_lock);
6193 
6194 	rsp->mcast_grp_idx = elem->mcast_grp_idx;
6195 	return 0;
6196 }
6197 
6198 int rvu_mbox_handler_nix_mcast_grp_destroy(struct rvu *rvu,
6199 					   struct nix_mcast_grp_destroy_req *req,
6200 					   struct msg_rsp *rsp)
6201 {
6202 	struct npc_delete_flow_req uninstall_req = { 0 };
6203 	struct npc_delete_flow_rsp uninstall_rsp = { 0 };
6204 	struct nix_mcast_grp_elem *elem;
6205 	struct nix_mcast_grp *mcast_grp;
6206 	int blkaddr, err, ret = 0;
6207 	struct nix_mcast *mcast;
6208 	struct nix_hw *nix_hw;
6209 
6210 	err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
6211 	if (err)
6212 		return err;
6213 
6214 	mcast_grp = &nix_hw->mcast_grp;
6215 
6216 	/* If AF is requesting for the deletion,
6217 	 * then AF is already taking the lock
6218 	 */
6219 	if (!req->is_af)
6220 		mutex_lock(&mcast_grp->mcast_grp_lock);
6221 
6222 	elem = rvu_nix_mcast_find_grp_elem(mcast_grp, req->mcast_grp_idx);
6223 	if (!elem) {
6224 		ret = NIX_AF_ERR_INVALID_MCAST_GRP;
6225 		goto unlock_grp;
6226 	}
6227 
6228 	/* If no mce entries are associated with the group
6229 	 * then just remove it from the global list.
6230 	 */
6231 	if (!elem->mcast_mce_list.count)
6232 		goto delete_grp;
6233 
6234 	/* Delete the associated mcam entry and
6235 	 * remove all mce entries from the group
6236 	 */
6237 	mcast = &nix_hw->mcast;
6238 	mutex_lock(&mcast->mce_lock);
6239 	if (elem->mcam_index != -1) {
6240 		uninstall_req.hdr.pcifunc = req->hdr.pcifunc;
6241 		uninstall_req.entry = elem->mcam_index;
6242 		rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &uninstall_rsp);
6243 	}
6244 
6245 	nix_free_mce_list(mcast, elem->mcast_mce_list.count,
6246 			  elem->mce_start_index, elem->dir);
6247 	nix_delete_mcast_mce_list(&elem->mcast_mce_list);
6248 	mutex_unlock(&mcast->mce_lock);
6249 
6250 delete_grp:
6251 	list_del(&elem->list);
6252 	kfree(elem);
6253 	mcast_grp->count--;
6254 
6255 unlock_grp:
6256 	if (!req->is_af)
6257 		mutex_unlock(&mcast_grp->mcast_grp_lock);
6258 
6259 	return ret;
6260 }
6261 
6262 int rvu_mbox_handler_nix_mcast_grp_update(struct rvu *rvu,
6263 					  struct nix_mcast_grp_update_req *req,
6264 					  struct nix_mcast_grp_update_rsp *rsp)
6265 {
6266 	struct nix_mcast_grp_destroy_req dreq = { 0 };
6267 	struct npc_mcam *mcam = &rvu->hw->mcam;
6268 	struct nix_mcast_grp_elem *elem;
6269 	struct nix_mcast_grp *mcast_grp;
6270 	int blkaddr, err, npc_blkaddr;
6271 	u16 prev_count, new_count;
6272 	struct nix_mcast *mcast;
6273 	struct nix_hw *nix_hw;
6274 	int i, ret;
6275 
6276 	if (!req->num_mce_entry)
6277 		return 0;
6278 
6279 	err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
6280 	if (err)
6281 		return err;
6282 
6283 	mcast_grp = &nix_hw->mcast_grp;
6284 
6285 	/* If AF is requesting for the updation,
6286 	 * then AF is already taking the lock
6287 	 */
6288 	if (!req->is_af)
6289 		mutex_lock(&mcast_grp->mcast_grp_lock);
6290 
6291 	elem = rvu_nix_mcast_find_grp_elem(mcast_grp, req->mcast_grp_idx);
6292 	if (!elem) {
6293 		ret = NIX_AF_ERR_INVALID_MCAST_GRP;
6294 		goto unlock_grp;
6295 	}
6296 
6297 	/* If any pcifunc matches the group's pcifunc, then we can
6298 	 * delete the entire group.
6299 	 */
6300 	if (req->op == NIX_MCAST_OP_DEL_ENTRY) {
6301 		for (i = 0; i < req->num_mce_entry; i++) {
6302 			if (elem->pcifunc == req->pcifunc[i]) {
6303 				/* Delete group */
6304 				dreq.hdr.pcifunc = elem->pcifunc;
6305 				dreq.mcast_grp_idx = elem->mcast_grp_idx;
6306 				dreq.is_af = 1;
6307 				rvu_mbox_handler_nix_mcast_grp_destroy(rvu, &dreq, NULL);
6308 				ret = 0;
6309 				goto unlock_grp;
6310 			}
6311 		}
6312 	}
6313 
6314 	mcast = &nix_hw->mcast;
6315 	mutex_lock(&mcast->mce_lock);
6316 	npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
6317 	if (elem->mcam_index != -1)
6318 		npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, elem->mcam_index, false);
6319 
6320 	prev_count = elem->mcast_mce_list.count;
6321 	if (req->op == NIX_MCAST_OP_ADD_ENTRY) {
6322 		new_count = prev_count + req->num_mce_entry;
6323 		if (prev_count)
6324 			nix_free_mce_list(mcast, prev_count, elem->mce_start_index, elem->dir);
6325 
6326 		elem->mce_start_index = nix_alloc_mce_list(mcast, new_count, elem->dir);
6327 
6328 		/* It is possible not to get contiguous memory */
6329 		if (elem->mce_start_index < 0) {
6330 			if (elem->mcam_index != -1) {
6331 				npc_enable_mcam_entry(rvu, mcam, npc_blkaddr,
6332 						      elem->mcam_index, true);
6333 				ret = NIX_AF_ERR_NON_CONTIG_MCE_LIST;
6334 				goto unlock_mce;
6335 			}
6336 		}
6337 
6338 		ret = nix_add_mce_list_entry(rvu, nix_hw, elem, req);
6339 		if (ret) {
6340 			nix_free_mce_list(mcast, new_count, elem->mce_start_index, elem->dir);
6341 			if (prev_count)
6342 				elem->mce_start_index = nix_alloc_mce_list(mcast,
6343 									   prev_count,
6344 									   elem->dir);
6345 
6346 			if (elem->mcam_index != -1)
6347 				npc_enable_mcam_entry(rvu, mcam, npc_blkaddr,
6348 						      elem->mcam_index, true);
6349 
6350 			goto unlock_mce;
6351 		}
6352 	} else {
6353 		if (!prev_count || prev_count < req->num_mce_entry) {
6354 			if (elem->mcam_index != -1)
6355 				npc_enable_mcam_entry(rvu, mcam, npc_blkaddr,
6356 						      elem->mcam_index, true);
6357 			ret = NIX_AF_ERR_INVALID_MCAST_DEL_REQ;
6358 			goto unlock_mce;
6359 		}
6360 
6361 		nix_free_mce_list(mcast, prev_count, elem->mce_start_index, elem->dir);
6362 		new_count = prev_count - req->num_mce_entry;
6363 		elem->mce_start_index = nix_alloc_mce_list(mcast, new_count, elem->dir);
6364 		ret = nix_del_mce_list_entry(rvu, nix_hw, elem, req);
6365 		if (ret) {
6366 			nix_free_mce_list(mcast, new_count, elem->mce_start_index, elem->dir);
6367 			elem->mce_start_index = nix_alloc_mce_list(mcast, prev_count, elem->dir);
6368 			if (elem->mcam_index != -1)
6369 				npc_enable_mcam_entry(rvu, mcam,
6370 						      npc_blkaddr,
6371 						      elem->mcam_index,
6372 						      true);
6373 
6374 			goto unlock_mce;
6375 		}
6376 	}
6377 
6378 	if (elem->mcam_index == -1) {
6379 		rsp->mce_start_index = elem->mce_start_index;
6380 		ret = 0;
6381 		goto unlock_mce;
6382 	}
6383 
6384 	nix_mcast_update_action(rvu, elem);
6385 	npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, elem->mcam_index, true);
6386 	rsp->mce_start_index = elem->mce_start_index;
6387 	ret = 0;
6388 
6389 unlock_mce:
6390 	mutex_unlock(&mcast->mce_lock);
6391 
6392 unlock_grp:
6393 	if (!req->is_af)
6394 		mutex_unlock(&mcast_grp->mcast_grp_lock);
6395 
6396 	return ret;
6397 }
6398