xref: /linux/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c (revision 3494bec0f6ac8ac06e0ad7c35933db345b2c5a83)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
3  *
4  * Copyright (C) 2018 Marvell International Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 
14 #include "rvu_struct.h"
15 #include "rvu_reg.h"
16 #include "rvu.h"
17 #include "npc.h"
18 #include "cgx.h"
19 
20 static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add);
21 
22 enum mc_tbl_sz {
23 	MC_TBL_SZ_256,
24 	MC_TBL_SZ_512,
25 	MC_TBL_SZ_1K,
26 	MC_TBL_SZ_2K,
27 	MC_TBL_SZ_4K,
28 	MC_TBL_SZ_8K,
29 	MC_TBL_SZ_16K,
30 	MC_TBL_SZ_32K,
31 	MC_TBL_SZ_64K,
32 };
33 
34 enum mc_buf_cnt {
35 	MC_BUF_CNT_8,
36 	MC_BUF_CNT_16,
37 	MC_BUF_CNT_32,
38 	MC_BUF_CNT_64,
39 	MC_BUF_CNT_128,
40 	MC_BUF_CNT_256,
41 	MC_BUF_CNT_512,
42 	MC_BUF_CNT_1024,
43 	MC_BUF_CNT_2048,
44 };
45 
46 enum nix_makr_fmt_indexes {
47 	NIX_MARK_CFG_IP_DSCP_RED,
48 	NIX_MARK_CFG_IP_DSCP_YELLOW,
49 	NIX_MARK_CFG_IP_DSCP_YELLOW_RED,
50 	NIX_MARK_CFG_IP_ECN_RED,
51 	NIX_MARK_CFG_IP_ECN_YELLOW,
52 	NIX_MARK_CFG_IP_ECN_YELLOW_RED,
53 	NIX_MARK_CFG_VLAN_DEI_RED,
54 	NIX_MARK_CFG_VLAN_DEI_YELLOW,
55 	NIX_MARK_CFG_VLAN_DEI_YELLOW_RED,
56 	NIX_MARK_CFG_MAX,
57 };
58 
59 /* For now considering MC resources needed for broadcast
60  * pkt replication only. i.e 256 HWVFs + 12 PFs.
61  */
62 #define MC_TBL_SIZE	MC_TBL_SZ_512
63 #define MC_BUF_CNT	MC_BUF_CNT_128
64 
65 struct mce {
66 	struct hlist_node	node;
67 	u16			pcifunc;
68 };
69 
70 bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
71 {
72 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
73 	int blkaddr;
74 
75 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
76 	if (!pfvf->nixlf || blkaddr < 0)
77 		return false;
78 	return true;
79 }
80 
81 int rvu_get_nixlf_count(struct rvu *rvu)
82 {
83 	struct rvu_block *block;
84 	int blkaddr;
85 
86 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
87 	if (blkaddr < 0)
88 		return 0;
89 	block = &rvu->hw->block[blkaddr];
90 	return block->lf.max;
91 }
92 
93 int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr)
94 {
95 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
96 	struct rvu_hwinfo *hw = rvu->hw;
97 	int blkaddr;
98 
99 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
100 	if (!pfvf->nixlf || blkaddr < 0)
101 		return NIX_AF_ERR_AF_LF_INVALID;
102 
103 	*nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
104 	if (*nixlf < 0)
105 		return NIX_AF_ERR_AF_LF_INVALID;
106 
107 	if (nix_blkaddr)
108 		*nix_blkaddr = blkaddr;
109 
110 	return 0;
111 }
112 
113 static void nix_mce_list_init(struct nix_mce_list *list, int max)
114 {
115 	INIT_HLIST_HEAD(&list->head);
116 	list->count = 0;
117 	list->max = max;
118 }
119 
120 static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
121 {
122 	int idx;
123 
124 	if (!mcast)
125 		return 0;
126 
127 	idx = mcast->next_free_mce;
128 	mcast->next_free_mce += count;
129 	return idx;
130 }
131 
132 static inline struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
133 {
134 	if (blkaddr == BLKADDR_NIX0 && hw->nix0)
135 		return hw->nix0;
136 
137 	return NULL;
138 }
139 
140 static void nix_rx_sync(struct rvu *rvu, int blkaddr)
141 {
142 	int err;
143 
144 	/*Sync all in flight RX packets to LLC/DRAM */
145 	rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
146 	err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
147 	if (err)
148 		dev_err(rvu->dev, "NIX RX software sync failed\n");
149 }
150 
151 static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
152 			    int lvl, u16 pcifunc, u16 schq)
153 {
154 	struct rvu_hwinfo *hw = rvu->hw;
155 	struct nix_txsch *txsch;
156 	struct nix_hw *nix_hw;
157 	u16 map_func;
158 
159 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
160 	if (!nix_hw)
161 		return false;
162 
163 	txsch = &nix_hw->txsch[lvl];
164 	/* Check out of bounds */
165 	if (schq >= txsch->schq.max)
166 		return false;
167 
168 	mutex_lock(&rvu->rsrc_lock);
169 	map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
170 	mutex_unlock(&rvu->rsrc_lock);
171 
172 	/* TLs aggegating traffic are shared across PF and VFs */
173 	if (lvl >= hw->cap.nix_tx_aggr_lvl) {
174 		if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
175 			return false;
176 		else
177 			return true;
178 	}
179 
180 	if (map_func != pcifunc)
181 		return false;
182 
183 	return true;
184 }
185 
186 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
187 {
188 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
189 	u8 cgx_id, lmac_id;
190 	int pkind, pf, vf;
191 	int err;
192 
193 	pf = rvu_get_pf(pcifunc);
194 	if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
195 		return 0;
196 
197 	switch (type) {
198 	case NIX_INTF_TYPE_CGX:
199 		pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf];
200 		rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
201 
202 		pkind = rvu_npc_get_pkind(rvu, pf);
203 		if (pkind < 0) {
204 			dev_err(rvu->dev,
205 				"PF_Func 0x%x: Invalid pkind\n", pcifunc);
206 			return -EINVAL;
207 		}
208 		pfvf->rx_chan_base = NIX_CHAN_CGX_LMAC_CHX(cgx_id, lmac_id, 0);
209 		pfvf->tx_chan_base = pfvf->rx_chan_base;
210 		pfvf->rx_chan_cnt = 1;
211 		pfvf->tx_chan_cnt = 1;
212 		cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
213 		rvu_npc_set_pkind(rvu, pkind, pfvf);
214 		break;
215 	case NIX_INTF_TYPE_LBK:
216 		vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
217 
218 		/* Note that AF's VFs work in pairs and talk over consecutive
219 		 * loopback channels.Therefore if odd number of AF VFs are
220 		 * enabled then the last VF remains with no pair.
221 		 */
222 		pfvf->rx_chan_base = NIX_CHAN_LBK_CHX(0, vf);
223 		pfvf->tx_chan_base = vf & 0x1 ? NIX_CHAN_LBK_CHX(0, vf - 1) :
224 						NIX_CHAN_LBK_CHX(0, vf + 1);
225 		pfvf->rx_chan_cnt = 1;
226 		pfvf->tx_chan_cnt = 1;
227 		rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
228 					      pfvf->rx_chan_base, false);
229 		break;
230 	}
231 
232 	/* Add a UCAST forwarding rule in MCAM with this NIXLF attached
233 	 * RVU PF/VF's MAC address.
234 	 */
235 	rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
236 				    pfvf->rx_chan_base, pfvf->mac_addr);
237 
238 	/* Add this PF_FUNC to bcast pkt replication list */
239 	err = nix_update_bcast_mce_list(rvu, pcifunc, true);
240 	if (err) {
241 		dev_err(rvu->dev,
242 			"Bcast list, failed to enable PF_FUNC 0x%x\n",
243 			pcifunc);
244 		return err;
245 	}
246 
247 	rvu_npc_install_bcast_match_entry(rvu, pcifunc,
248 					  nixlf, pfvf->rx_chan_base);
249 	pfvf->maxlen = NIC_HW_MIN_FRS;
250 	pfvf->minlen = NIC_HW_MIN_FRS;
251 
252 	return 0;
253 }
254 
255 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
256 {
257 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
258 	int err;
259 
260 	pfvf->maxlen = 0;
261 	pfvf->minlen = 0;
262 	pfvf->rxvlan = false;
263 
264 	/* Remove this PF_FUNC from bcast pkt replication list */
265 	err = nix_update_bcast_mce_list(rvu, pcifunc, false);
266 	if (err) {
267 		dev_err(rvu->dev,
268 			"Bcast list, failed to disable PF_FUNC 0x%x\n",
269 			pcifunc);
270 	}
271 
272 	/* Free and disable any MCAM entries used by this NIX LF */
273 	rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
274 }
275 
276 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
277 				 u64 format, bool v4, u64 *fidx)
278 {
279 	struct nix_lso_format field = {0};
280 
281 	/* IP's Length field */
282 	field.layer = NIX_TXLAYER_OL3;
283 	/* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
284 	field.offset = v4 ? 2 : 4;
285 	field.sizem1 = 1; /* i.e 2 bytes */
286 	field.alg = NIX_LSOALG_ADD_PAYLEN;
287 	rvu_write64(rvu, blkaddr,
288 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
289 		    *(u64 *)&field);
290 
291 	/* No ID field in IPv6 header */
292 	if (!v4)
293 		return;
294 
295 	/* IP's ID field */
296 	field.layer = NIX_TXLAYER_OL3;
297 	field.offset = 4;
298 	field.sizem1 = 1; /* i.e 2 bytes */
299 	field.alg = NIX_LSOALG_ADD_SEGNUM;
300 	rvu_write64(rvu, blkaddr,
301 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
302 		    *(u64 *)&field);
303 }
304 
305 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
306 				 u64 format, u64 *fidx)
307 {
308 	struct nix_lso_format field = {0};
309 
310 	/* TCP's sequence number field */
311 	field.layer = NIX_TXLAYER_OL4;
312 	field.offset = 4;
313 	field.sizem1 = 3; /* i.e 4 bytes */
314 	field.alg = NIX_LSOALG_ADD_OFFSET;
315 	rvu_write64(rvu, blkaddr,
316 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
317 		    *(u64 *)&field);
318 
319 	/* TCP's flags field */
320 	field.layer = NIX_TXLAYER_OL4;
321 	field.offset = 12;
322 	field.sizem1 = 1; /* 2 bytes */
323 	field.alg = NIX_LSOALG_TCP_FLAGS;
324 	rvu_write64(rvu, blkaddr,
325 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
326 		    *(u64 *)&field);
327 }
328 
329 static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
330 {
331 	u64 cfg, idx, fidx = 0;
332 
333 	/* Get max HW supported format indices */
334 	cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF;
335 	nix_hw->lso.total = cfg;
336 
337 	/* Enable LSO */
338 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
339 	/* For TSO, set first and middle segment flags to
340 	 * mask out PSH, RST & FIN flags in TCP packet
341 	 */
342 	cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
343 	cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
344 	rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
345 
346 	/* Setup default static LSO formats
347 	 *
348 	 * Configure format fields for TCPv4 segmentation offload
349 	 */
350 	idx = NIX_LSO_FORMAT_IDX_TSOV4;
351 	nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
352 	nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
353 
354 	/* Set rest of the fields to NOP */
355 	for (; fidx < 8; fidx++) {
356 		rvu_write64(rvu, blkaddr,
357 			    NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
358 	}
359 	nix_hw->lso.in_use++;
360 
361 	/* Configure format fields for TCPv6 segmentation offload */
362 	idx = NIX_LSO_FORMAT_IDX_TSOV6;
363 	fidx = 0;
364 	nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
365 	nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
366 
367 	/* Set rest of the fields to NOP */
368 	for (; fidx < 8; fidx++) {
369 		rvu_write64(rvu, blkaddr,
370 			    NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
371 	}
372 	nix_hw->lso.in_use++;
373 }
374 
375 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
376 {
377 	kfree(pfvf->rq_bmap);
378 	kfree(pfvf->sq_bmap);
379 	kfree(pfvf->cq_bmap);
380 	if (pfvf->rq_ctx)
381 		qmem_free(rvu->dev, pfvf->rq_ctx);
382 	if (pfvf->sq_ctx)
383 		qmem_free(rvu->dev, pfvf->sq_ctx);
384 	if (pfvf->cq_ctx)
385 		qmem_free(rvu->dev, pfvf->cq_ctx);
386 	if (pfvf->rss_ctx)
387 		qmem_free(rvu->dev, pfvf->rss_ctx);
388 	if (pfvf->nix_qints_ctx)
389 		qmem_free(rvu->dev, pfvf->nix_qints_ctx);
390 	if (pfvf->cq_ints_ctx)
391 		qmem_free(rvu->dev, pfvf->cq_ints_ctx);
392 
393 	pfvf->rq_bmap = NULL;
394 	pfvf->cq_bmap = NULL;
395 	pfvf->sq_bmap = NULL;
396 	pfvf->rq_ctx = NULL;
397 	pfvf->sq_ctx = NULL;
398 	pfvf->cq_ctx = NULL;
399 	pfvf->rss_ctx = NULL;
400 	pfvf->nix_qints_ctx = NULL;
401 	pfvf->cq_ints_ctx = NULL;
402 }
403 
404 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
405 			      struct rvu_pfvf *pfvf, int nixlf,
406 			      int rss_sz, int rss_grps, int hwctx_size,
407 			      u64 way_mask)
408 {
409 	int err, grp, num_indices;
410 
411 	/* RSS is not requested for this NIXLF */
412 	if (!rss_sz)
413 		return 0;
414 	num_indices = rss_sz * rss_grps;
415 
416 	/* Alloc NIX RSS HW context memory and config the base */
417 	err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
418 	if (err)
419 		return err;
420 
421 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
422 		    (u64)pfvf->rss_ctx->iova);
423 
424 	/* Config full RSS table size, enable RSS and caching */
425 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf),
426 		    BIT_ULL(36) | BIT_ULL(4) |
427 		    ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE) |
428 		    way_mask << 20);
429 	/* Config RSS group offset and sizes */
430 	for (grp = 0; grp < rss_grps; grp++)
431 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
432 			    ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
433 	return 0;
434 }
435 
436 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
437 			       struct nix_aq_inst_s *inst)
438 {
439 	struct admin_queue *aq = block->aq;
440 	struct nix_aq_res_s *result;
441 	int timeout = 1000;
442 	u64 reg, head;
443 
444 	result = (struct nix_aq_res_s *)aq->res->base;
445 
446 	/* Get current head pointer where to append this instruction */
447 	reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
448 	head = (reg >> 4) & AQ_PTR_MASK;
449 
450 	memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
451 	       (void *)inst, aq->inst->entry_sz);
452 	memset(result, 0, sizeof(*result));
453 	/* sync into memory */
454 	wmb();
455 
456 	/* Ring the doorbell and wait for result */
457 	rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
458 	while (result->compcode == NIX_AQ_COMP_NOTDONE) {
459 		cpu_relax();
460 		udelay(1);
461 		timeout--;
462 		if (!timeout)
463 			return -EBUSY;
464 	}
465 
466 	if (result->compcode != NIX_AQ_COMP_GOOD)
467 		/* TODO: Replace this with some error code */
468 		return -EBUSY;
469 
470 	return 0;
471 }
472 
473 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
474 			       struct nix_aq_enq_rsp *rsp)
475 {
476 	struct rvu_hwinfo *hw = rvu->hw;
477 	u16 pcifunc = req->hdr.pcifunc;
478 	int nixlf, blkaddr, rc = 0;
479 	struct nix_aq_inst_s inst;
480 	struct rvu_block *block;
481 	struct admin_queue *aq;
482 	struct rvu_pfvf *pfvf;
483 	void *ctx, *mask;
484 	bool ena;
485 	u64 cfg;
486 
487 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
488 	if (blkaddr < 0)
489 		return NIX_AF_ERR_AF_LF_INVALID;
490 
491 	block = &hw->block[blkaddr];
492 	aq = block->aq;
493 	if (!aq) {
494 		dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
495 		return NIX_AF_ERR_AQ_ENQUEUE;
496 	}
497 
498 	pfvf = rvu_get_pfvf(rvu, pcifunc);
499 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
500 
501 	/* Skip NIXLF check for broadcast MCE entry init */
502 	if (!(!rsp && req->ctype == NIX_AQ_CTYPE_MCE)) {
503 		if (!pfvf->nixlf || nixlf < 0)
504 			return NIX_AF_ERR_AF_LF_INVALID;
505 	}
506 
507 	switch (req->ctype) {
508 	case NIX_AQ_CTYPE_RQ:
509 		/* Check if index exceeds max no of queues */
510 		if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
511 			rc = NIX_AF_ERR_AQ_ENQUEUE;
512 		break;
513 	case NIX_AQ_CTYPE_SQ:
514 		if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
515 			rc = NIX_AF_ERR_AQ_ENQUEUE;
516 		break;
517 	case NIX_AQ_CTYPE_CQ:
518 		if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
519 			rc = NIX_AF_ERR_AQ_ENQUEUE;
520 		break;
521 	case NIX_AQ_CTYPE_RSS:
522 		/* Check if RSS is enabled and qidx is within range */
523 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
524 		if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
525 		    (req->qidx >= (256UL << (cfg & 0xF))))
526 			rc = NIX_AF_ERR_AQ_ENQUEUE;
527 		break;
528 	case NIX_AQ_CTYPE_MCE:
529 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
530 		/* Check if index exceeds MCE list length */
531 		if (!hw->nix0->mcast.mce_ctx ||
532 		    (req->qidx >= (256UL << (cfg & 0xF))))
533 			rc = NIX_AF_ERR_AQ_ENQUEUE;
534 
535 		/* Adding multicast lists for requests from PF/VFs is not
536 		 * yet supported, so ignore this.
537 		 */
538 		if (rsp)
539 			rc = NIX_AF_ERR_AQ_ENQUEUE;
540 		break;
541 	default:
542 		rc = NIX_AF_ERR_AQ_ENQUEUE;
543 	}
544 
545 	if (rc)
546 		return rc;
547 
548 	/* Check if SQ pointed SMQ belongs to this PF/VF or not */
549 	if (req->ctype == NIX_AQ_CTYPE_SQ &&
550 	    ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
551 	     (req->op == NIX_AQ_INSTOP_WRITE &&
552 	      req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) {
553 		if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
554 				     pcifunc, req->sq.smq))
555 			return NIX_AF_ERR_AQ_ENQUEUE;
556 	}
557 
558 	memset(&inst, 0, sizeof(struct nix_aq_inst_s));
559 	inst.lf = nixlf;
560 	inst.cindex = req->qidx;
561 	inst.ctype = req->ctype;
562 	inst.op = req->op;
563 	/* Currently we are not supporting enqueuing multiple instructions,
564 	 * so always choose first entry in result memory.
565 	 */
566 	inst.res_addr = (u64)aq->res->iova;
567 
568 	/* Clean result + context memory */
569 	memset(aq->res->base, 0, aq->res->entry_sz);
570 	/* Context needs to be written at RES_ADDR + 128 */
571 	ctx = aq->res->base + 128;
572 	/* Mask needs to be written at RES_ADDR + 256 */
573 	mask = aq->res->base + 256;
574 
575 	switch (req->op) {
576 	case NIX_AQ_INSTOP_WRITE:
577 		if (req->ctype == NIX_AQ_CTYPE_RQ)
578 			memcpy(mask, &req->rq_mask,
579 			       sizeof(struct nix_rq_ctx_s));
580 		else if (req->ctype == NIX_AQ_CTYPE_SQ)
581 			memcpy(mask, &req->sq_mask,
582 			       sizeof(struct nix_sq_ctx_s));
583 		else if (req->ctype == NIX_AQ_CTYPE_CQ)
584 			memcpy(mask, &req->cq_mask,
585 			       sizeof(struct nix_cq_ctx_s));
586 		else if (req->ctype == NIX_AQ_CTYPE_RSS)
587 			memcpy(mask, &req->rss_mask,
588 			       sizeof(struct nix_rsse_s));
589 		else if (req->ctype == NIX_AQ_CTYPE_MCE)
590 			memcpy(mask, &req->mce_mask,
591 			       sizeof(struct nix_rx_mce_s));
592 		/* Fall through */
593 	case NIX_AQ_INSTOP_INIT:
594 		if (req->ctype == NIX_AQ_CTYPE_RQ)
595 			memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
596 		else if (req->ctype == NIX_AQ_CTYPE_SQ)
597 			memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
598 		else if (req->ctype == NIX_AQ_CTYPE_CQ)
599 			memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
600 		else if (req->ctype == NIX_AQ_CTYPE_RSS)
601 			memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
602 		else if (req->ctype == NIX_AQ_CTYPE_MCE)
603 			memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
604 		break;
605 	case NIX_AQ_INSTOP_NOP:
606 	case NIX_AQ_INSTOP_READ:
607 	case NIX_AQ_INSTOP_LOCK:
608 	case NIX_AQ_INSTOP_UNLOCK:
609 		break;
610 	default:
611 		rc = NIX_AF_ERR_AQ_ENQUEUE;
612 		return rc;
613 	}
614 
615 	spin_lock(&aq->lock);
616 
617 	/* Submit the instruction to AQ */
618 	rc = nix_aq_enqueue_wait(rvu, block, &inst);
619 	if (rc) {
620 		spin_unlock(&aq->lock);
621 		return rc;
622 	}
623 
624 	/* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
625 	if (req->op == NIX_AQ_INSTOP_INIT) {
626 		if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
627 			__set_bit(req->qidx, pfvf->rq_bmap);
628 		if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
629 			__set_bit(req->qidx, pfvf->sq_bmap);
630 		if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
631 			__set_bit(req->qidx, pfvf->cq_bmap);
632 	}
633 
634 	if (req->op == NIX_AQ_INSTOP_WRITE) {
635 		if (req->ctype == NIX_AQ_CTYPE_RQ) {
636 			ena = (req->rq.ena & req->rq_mask.ena) |
637 				(test_bit(req->qidx, pfvf->rq_bmap) &
638 				~req->rq_mask.ena);
639 			if (ena)
640 				__set_bit(req->qidx, pfvf->rq_bmap);
641 			else
642 				__clear_bit(req->qidx, pfvf->rq_bmap);
643 		}
644 		if (req->ctype == NIX_AQ_CTYPE_SQ) {
645 			ena = (req->rq.ena & req->sq_mask.ena) |
646 				(test_bit(req->qidx, pfvf->sq_bmap) &
647 				~req->sq_mask.ena);
648 			if (ena)
649 				__set_bit(req->qidx, pfvf->sq_bmap);
650 			else
651 				__clear_bit(req->qidx, pfvf->sq_bmap);
652 		}
653 		if (req->ctype == NIX_AQ_CTYPE_CQ) {
654 			ena = (req->rq.ena & req->cq_mask.ena) |
655 				(test_bit(req->qidx, pfvf->cq_bmap) &
656 				~req->cq_mask.ena);
657 			if (ena)
658 				__set_bit(req->qidx, pfvf->cq_bmap);
659 			else
660 				__clear_bit(req->qidx, pfvf->cq_bmap);
661 		}
662 	}
663 
664 	if (rsp) {
665 		/* Copy read context into mailbox */
666 		if (req->op == NIX_AQ_INSTOP_READ) {
667 			if (req->ctype == NIX_AQ_CTYPE_RQ)
668 				memcpy(&rsp->rq, ctx,
669 				       sizeof(struct nix_rq_ctx_s));
670 			else if (req->ctype == NIX_AQ_CTYPE_SQ)
671 				memcpy(&rsp->sq, ctx,
672 				       sizeof(struct nix_sq_ctx_s));
673 			else if (req->ctype == NIX_AQ_CTYPE_CQ)
674 				memcpy(&rsp->cq, ctx,
675 				       sizeof(struct nix_cq_ctx_s));
676 			else if (req->ctype == NIX_AQ_CTYPE_RSS)
677 				memcpy(&rsp->rss, ctx,
678 				       sizeof(struct nix_rsse_s));
679 			else if (req->ctype == NIX_AQ_CTYPE_MCE)
680 				memcpy(&rsp->mce, ctx,
681 				       sizeof(struct nix_rx_mce_s));
682 		}
683 	}
684 
685 	spin_unlock(&aq->lock);
686 	return 0;
687 }
688 
689 static const char *nix_get_ctx_name(int ctype)
690 {
691 	switch (ctype) {
692 	case NIX_AQ_CTYPE_CQ:
693 		return "CQ";
694 	case NIX_AQ_CTYPE_SQ:
695 		return "SQ";
696 	case NIX_AQ_CTYPE_RQ:
697 		return "RQ";
698 	case NIX_AQ_CTYPE_RSS:
699 		return "RSS";
700 	}
701 	return "";
702 }
703 
704 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
705 {
706 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
707 	struct nix_aq_enq_req aq_req;
708 	unsigned long *bmap;
709 	int qidx, q_cnt = 0;
710 	int err = 0, rc;
711 
712 	if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx)
713 		return NIX_AF_ERR_AQ_ENQUEUE;
714 
715 	memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
716 	aq_req.hdr.pcifunc = req->hdr.pcifunc;
717 
718 	if (req->ctype == NIX_AQ_CTYPE_CQ) {
719 		aq_req.cq.ena = 0;
720 		aq_req.cq_mask.ena = 1;
721 		q_cnt = pfvf->cq_ctx->qsize;
722 		bmap = pfvf->cq_bmap;
723 	}
724 	if (req->ctype == NIX_AQ_CTYPE_SQ) {
725 		aq_req.sq.ena = 0;
726 		aq_req.sq_mask.ena = 1;
727 		q_cnt = pfvf->sq_ctx->qsize;
728 		bmap = pfvf->sq_bmap;
729 	}
730 	if (req->ctype == NIX_AQ_CTYPE_RQ) {
731 		aq_req.rq.ena = 0;
732 		aq_req.rq_mask.ena = 1;
733 		q_cnt = pfvf->rq_ctx->qsize;
734 		bmap = pfvf->rq_bmap;
735 	}
736 
737 	aq_req.ctype = req->ctype;
738 	aq_req.op = NIX_AQ_INSTOP_WRITE;
739 
740 	for (qidx = 0; qidx < q_cnt; qidx++) {
741 		if (!test_bit(qidx, bmap))
742 			continue;
743 		aq_req.qidx = qidx;
744 		rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
745 		if (rc) {
746 			err = rc;
747 			dev_err(rvu->dev, "Failed to disable %s:%d context\n",
748 				nix_get_ctx_name(req->ctype), qidx);
749 		}
750 	}
751 
752 	return err;
753 }
754 
755 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
756 static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req)
757 {
758 	struct nix_aq_enq_req lock_ctx_req;
759 	int err;
760 
761 	if (req->op != NIX_AQ_INSTOP_INIT)
762 		return 0;
763 
764 	if (req->ctype == NIX_AQ_CTYPE_MCE ||
765 	    req->ctype == NIX_AQ_CTYPE_DYNO)
766 		return 0;
767 
768 	memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req));
769 	lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
770 	lock_ctx_req.ctype = req->ctype;
771 	lock_ctx_req.op = NIX_AQ_INSTOP_LOCK;
772 	lock_ctx_req.qidx = req->qidx;
773 	err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL);
774 	if (err)
775 		dev_err(rvu->dev,
776 			"PFUNC 0x%x: Failed to lock NIX %s:%d context\n",
777 			req->hdr.pcifunc,
778 			nix_get_ctx_name(req->ctype), req->qidx);
779 	return err;
780 }
781 
782 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
783 				struct nix_aq_enq_req *req,
784 				struct nix_aq_enq_rsp *rsp)
785 {
786 	int err;
787 
788 	err = rvu_nix_aq_enq_inst(rvu, req, rsp);
789 	if (!err)
790 		err = nix_lf_hwctx_lockdown(rvu, req);
791 	return err;
792 }
793 #else
794 
795 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
796 				struct nix_aq_enq_req *req,
797 				struct nix_aq_enq_rsp *rsp)
798 {
799 	return rvu_nix_aq_enq_inst(rvu, req, rsp);
800 }
801 #endif
802 
803 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
804 				       struct hwctx_disable_req *req,
805 				       struct msg_rsp *rsp)
806 {
807 	return nix_lf_hwctx_disable(rvu, req);
808 }
809 
810 int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
811 				  struct nix_lf_alloc_req *req,
812 				  struct nix_lf_alloc_rsp *rsp)
813 {
814 	int nixlf, qints, hwctx_size, intf, err, rc = 0;
815 	struct rvu_hwinfo *hw = rvu->hw;
816 	u16 pcifunc = req->hdr.pcifunc;
817 	struct rvu_block *block;
818 	struct rvu_pfvf *pfvf;
819 	u64 cfg, ctx_cfg;
820 	int blkaddr;
821 
822 	if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
823 		return NIX_AF_ERR_PARAM;
824 
825 	if (req->way_mask)
826 		req->way_mask &= 0xFFFF;
827 
828 	pfvf = rvu_get_pfvf(rvu, pcifunc);
829 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
830 	if (!pfvf->nixlf || blkaddr < 0)
831 		return NIX_AF_ERR_AF_LF_INVALID;
832 
833 	block = &hw->block[blkaddr];
834 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
835 	if (nixlf < 0)
836 		return NIX_AF_ERR_AF_LF_INVALID;
837 
838 	/* Check if requested 'NIXLF <=> NPALF' mapping is valid */
839 	if (req->npa_func) {
840 		/* If default, use 'this' NIXLF's PFFUNC */
841 		if (req->npa_func == RVU_DEFAULT_PF_FUNC)
842 			req->npa_func = pcifunc;
843 		if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA))
844 			return NIX_AF_INVAL_NPA_PF_FUNC;
845 	}
846 
847 	/* Check if requested 'NIXLF <=> SSOLF' mapping is valid */
848 	if (req->sso_func) {
849 		/* If default, use 'this' NIXLF's PFFUNC */
850 		if (req->sso_func == RVU_DEFAULT_PF_FUNC)
851 			req->sso_func = pcifunc;
852 		if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO))
853 			return NIX_AF_INVAL_SSO_PF_FUNC;
854 	}
855 
856 	/* If RSS is being enabled, check if requested config is valid.
857 	 * RSS table size should be power of two, otherwise
858 	 * RSS_GRP::OFFSET + adder might go beyond that group or
859 	 * won't be able to use entire table.
860 	 */
861 	if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
862 			    !is_power_of_2(req->rss_sz)))
863 		return NIX_AF_ERR_RSS_SIZE_INVALID;
864 
865 	if (req->rss_sz &&
866 	    (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
867 		return NIX_AF_ERR_RSS_GRPS_INVALID;
868 
869 	/* Reset this NIX LF */
870 	err = rvu_lf_reset(rvu, block, nixlf);
871 	if (err) {
872 		dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
873 			block->addr - BLKADDR_NIX0, nixlf);
874 		return NIX_AF_ERR_LF_RESET;
875 	}
876 
877 	ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
878 
879 	/* Alloc NIX RQ HW context memory and config the base */
880 	hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
881 	err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
882 	if (err)
883 		goto free_mem;
884 
885 	pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
886 	if (!pfvf->rq_bmap)
887 		goto free_mem;
888 
889 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
890 		    (u64)pfvf->rq_ctx->iova);
891 
892 	/* Set caching and queue count in HW */
893 	cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20;
894 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
895 
896 	/* Alloc NIX SQ HW context memory and config the base */
897 	hwctx_size = 1UL << (ctx_cfg & 0xF);
898 	err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
899 	if (err)
900 		goto free_mem;
901 
902 	pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
903 	if (!pfvf->sq_bmap)
904 		goto free_mem;
905 
906 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
907 		    (u64)pfvf->sq_ctx->iova);
908 
909 	cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20;
910 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
911 
912 	/* Alloc NIX CQ HW context memory and config the base */
913 	hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
914 	err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
915 	if (err)
916 		goto free_mem;
917 
918 	pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
919 	if (!pfvf->cq_bmap)
920 		goto free_mem;
921 
922 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
923 		    (u64)pfvf->cq_ctx->iova);
924 
925 	cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20;
926 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
927 
928 	/* Initialize receive side scaling (RSS) */
929 	hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
930 	err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz,
931 				 req->rss_grps, hwctx_size, req->way_mask);
932 	if (err)
933 		goto free_mem;
934 
935 	/* Alloc memory for CQINT's HW contexts */
936 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
937 	qints = (cfg >> 24) & 0xFFF;
938 	hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
939 	err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
940 	if (err)
941 		goto free_mem;
942 
943 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
944 		    (u64)pfvf->cq_ints_ctx->iova);
945 
946 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf),
947 		    BIT_ULL(36) | req->way_mask << 20);
948 
949 	/* Alloc memory for QINT's HW contexts */
950 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
951 	qints = (cfg >> 12) & 0xFFF;
952 	hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
953 	err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
954 	if (err)
955 		goto free_mem;
956 
957 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
958 		    (u64)pfvf->nix_qints_ctx->iova);
959 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf),
960 		    BIT_ULL(36) | req->way_mask << 20);
961 
962 	/* Setup VLANX TPID's.
963 	 * Use VLAN1 for 802.1Q
964 	 * and VLAN0 for 802.1AD.
965 	 */
966 	cfg = (0x8100ULL << 16) | 0x88A8ULL;
967 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
968 
969 	/* Enable LMTST for this NIX LF */
970 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
971 
972 	/* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */
973 	if (req->npa_func)
974 		cfg = req->npa_func;
975 	if (req->sso_func)
976 		cfg |= (u64)req->sso_func << 16;
977 
978 	cfg |= (u64)req->xqe_sz << 33;
979 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
980 
981 	/* Config Rx pkt length, csum checks and apad  enable / disable */
982 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
983 
984 	intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
985 	err = nix_interface_init(rvu, pcifunc, intf, nixlf);
986 	if (err)
987 		goto free_mem;
988 
989 	/* Disable NPC entries as NIXLF's contexts are not initialized yet */
990 	rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
991 
992 	goto exit;
993 
994 free_mem:
995 	nix_ctx_free(rvu, pfvf);
996 	rc = -ENOMEM;
997 
998 exit:
999 	/* Set macaddr of this PF/VF */
1000 	ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
1001 
1002 	/* set SQB size info */
1003 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
1004 	rsp->sqb_size = (cfg >> 34) & 0xFFFF;
1005 	rsp->rx_chan_base = pfvf->rx_chan_base;
1006 	rsp->tx_chan_base = pfvf->tx_chan_base;
1007 	rsp->rx_chan_cnt = pfvf->rx_chan_cnt;
1008 	rsp->tx_chan_cnt = pfvf->tx_chan_cnt;
1009 	rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
1010 	rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
1011 	/* Get HW supported stat count */
1012 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
1013 	rsp->lf_rx_stats = ((cfg >> 32) & 0xFF);
1014 	rsp->lf_tx_stats = ((cfg >> 24) & 0xFF);
1015 	/* Get count of CQ IRQs and error IRQs supported per LF */
1016 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1017 	rsp->qints = ((cfg >> 12) & 0xFFF);
1018 	rsp->cints = ((cfg >> 24) & 0xFFF);
1019 	return rc;
1020 }
1021 
1022 int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req,
1023 				 struct msg_rsp *rsp)
1024 {
1025 	struct rvu_hwinfo *hw = rvu->hw;
1026 	u16 pcifunc = req->hdr.pcifunc;
1027 	struct rvu_block *block;
1028 	int blkaddr, nixlf, err;
1029 	struct rvu_pfvf *pfvf;
1030 
1031 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1032 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1033 	if (!pfvf->nixlf || blkaddr < 0)
1034 		return NIX_AF_ERR_AF_LF_INVALID;
1035 
1036 	block = &hw->block[blkaddr];
1037 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1038 	if (nixlf < 0)
1039 		return NIX_AF_ERR_AF_LF_INVALID;
1040 
1041 	nix_interface_deinit(rvu, pcifunc, nixlf);
1042 
1043 	/* Reset this NIX LF */
1044 	err = rvu_lf_reset(rvu, block, nixlf);
1045 	if (err) {
1046 		dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1047 			block->addr - BLKADDR_NIX0, nixlf);
1048 		return NIX_AF_ERR_LF_RESET;
1049 	}
1050 
1051 	nix_ctx_free(rvu, pfvf);
1052 
1053 	return 0;
1054 }
1055 
1056 int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
1057 					 struct nix_mark_format_cfg  *req,
1058 					 struct nix_mark_format_cfg_rsp *rsp)
1059 {
1060 	u16 pcifunc = req->hdr.pcifunc;
1061 	struct nix_hw *nix_hw;
1062 	struct rvu_pfvf *pfvf;
1063 	int blkaddr, rc;
1064 	u32 cfg;
1065 
1066 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1067 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1068 	if (!pfvf->nixlf || blkaddr < 0)
1069 		return NIX_AF_ERR_AF_LF_INVALID;
1070 
1071 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1072 	if (!nix_hw)
1073 		return -EINVAL;
1074 
1075 	cfg = (((u32)req->offset & 0x7) << 16) |
1076 	      (((u32)req->y_mask & 0xF) << 12) |
1077 	      (((u32)req->y_val & 0xF) << 8) |
1078 	      (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF);
1079 
1080 	rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg);
1081 	if (rc < 0) {
1082 		dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)",
1083 			rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
1084 		return NIX_AF_ERR_MARK_CFG_FAIL;
1085 	}
1086 
1087 	rsp->mark_format_idx = rc;
1088 	return 0;
1089 }
1090 
1091 /* Disable shaping of pkts by a scheduler queue
1092  * at a given scheduler level.
1093  */
1094 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
1095 				 int lvl, int schq)
1096 {
1097 	u64  cir_reg = 0, pir_reg = 0;
1098 	u64  cfg;
1099 
1100 	switch (lvl) {
1101 	case NIX_TXSCH_LVL_TL1:
1102 		cir_reg = NIX_AF_TL1X_CIR(schq);
1103 		pir_reg = 0; /* PIR not available at TL1 */
1104 		break;
1105 	case NIX_TXSCH_LVL_TL2:
1106 		cir_reg = NIX_AF_TL2X_CIR(schq);
1107 		pir_reg = NIX_AF_TL2X_PIR(schq);
1108 		break;
1109 	case NIX_TXSCH_LVL_TL3:
1110 		cir_reg = NIX_AF_TL3X_CIR(schq);
1111 		pir_reg = NIX_AF_TL3X_PIR(schq);
1112 		break;
1113 	case NIX_TXSCH_LVL_TL4:
1114 		cir_reg = NIX_AF_TL4X_CIR(schq);
1115 		pir_reg = NIX_AF_TL4X_PIR(schq);
1116 		break;
1117 	}
1118 
1119 	if (!cir_reg)
1120 		return;
1121 	cfg = rvu_read64(rvu, blkaddr, cir_reg);
1122 	rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0));
1123 
1124 	if (!pir_reg)
1125 		return;
1126 	cfg = rvu_read64(rvu, blkaddr, pir_reg);
1127 	rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0));
1128 }
1129 
1130 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
1131 				 int lvl, int schq)
1132 {
1133 	struct rvu_hwinfo *hw = rvu->hw;
1134 	int link;
1135 
1136 	if (lvl >= hw->cap.nix_tx_aggr_lvl)
1137 		return;
1138 
1139 	/* Reset TL4's SDP link config */
1140 	if (lvl == NIX_TXSCH_LVL_TL4)
1141 		rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
1142 
1143 	if (lvl != NIX_TXSCH_LVL_TL2)
1144 		return;
1145 
1146 	/* Reset TL2's CGX or LBK link config */
1147 	for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
1148 		rvu_write64(rvu, blkaddr,
1149 			    NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
1150 }
1151 
1152 static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
1153 {
1154 	struct rvu_hwinfo *hw = rvu->hw;
1155 	int pf = rvu_get_pf(pcifunc);
1156 	u8 cgx_id = 0, lmac_id = 0;
1157 
1158 	if (is_afvf(pcifunc)) {/* LBK links */
1159 		return hw->cgx_links;
1160 	} else if (is_pf_cgxmapped(rvu, pf)) {
1161 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1162 		return (cgx_id * hw->lmac_per_cgx) + lmac_id;
1163 	}
1164 
1165 	/* SDP link */
1166 	return hw->cgx_links + hw->lbk_links;
1167 }
1168 
1169 static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc,
1170 				 int link, int *start, int *end)
1171 {
1172 	struct rvu_hwinfo *hw = rvu->hw;
1173 	int pf = rvu_get_pf(pcifunc);
1174 
1175 	if (is_afvf(pcifunc)) { /* LBK links */
1176 		*start = hw->cap.nix_txsch_per_cgx_lmac * link;
1177 		*end = *start + hw->cap.nix_txsch_per_lbk_lmac;
1178 	} else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */
1179 		*start = hw->cap.nix_txsch_per_cgx_lmac * link;
1180 		*end = *start + hw->cap.nix_txsch_per_cgx_lmac;
1181 	} else { /* SDP link */
1182 		*start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) +
1183 			(hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links);
1184 		*end = *start + hw->cap.nix_txsch_per_sdp_lmac;
1185 	}
1186 }
1187 
1188 static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc,
1189 				      struct nix_hw *nix_hw,
1190 				      struct nix_txsch_alloc_req *req)
1191 {
1192 	struct rvu_hwinfo *hw = rvu->hw;
1193 	int schq, req_schq, free_cnt;
1194 	struct nix_txsch *txsch;
1195 	int link, start, end;
1196 
1197 	txsch = &nix_hw->txsch[lvl];
1198 	req_schq = req->schq_contig[lvl] + req->schq[lvl];
1199 
1200 	if (!req_schq)
1201 		return 0;
1202 
1203 	link = nix_get_tx_link(rvu, pcifunc);
1204 
1205 	/* For traffic aggregating scheduler level, one queue is enough */
1206 	if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1207 		if (req_schq != 1)
1208 			return NIX_AF_ERR_TLX_ALLOC_FAIL;
1209 		return 0;
1210 	}
1211 
1212 	/* Get free SCHQ count and check if request can be accomodated */
1213 	if (hw->cap.nix_fixed_txschq_mapping) {
1214 		nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
1215 		schq = start + (pcifunc & RVU_PFVF_FUNC_MASK);
1216 		if (end <= txsch->schq.max && schq < end &&
1217 		    !test_bit(schq, txsch->schq.bmap))
1218 			free_cnt = 1;
1219 		else
1220 			free_cnt = 0;
1221 	} else {
1222 		free_cnt = rvu_rsrc_free_count(&txsch->schq);
1223 	}
1224 
1225 	if (free_cnt < req_schq || req_schq > MAX_TXSCHQ_PER_FUNC)
1226 		return NIX_AF_ERR_TLX_ALLOC_FAIL;
1227 
1228 	/* If contiguous queues are needed, check for availability */
1229 	if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] &&
1230 	    !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
1231 		return NIX_AF_ERR_TLX_ALLOC_FAIL;
1232 
1233 	return 0;
1234 }
1235 
1236 static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch,
1237 			    struct nix_txsch_alloc_rsp *rsp,
1238 			    int lvl, int start, int end)
1239 {
1240 	struct rvu_hwinfo *hw = rvu->hw;
1241 	u16 pcifunc = rsp->hdr.pcifunc;
1242 	int idx, schq;
1243 
1244 	/* For traffic aggregating levels, queue alloc is based
1245 	 * on transmit link to which PF_FUNC is mapped to.
1246 	 */
1247 	if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1248 		/* A single TL queue is allocated */
1249 		if (rsp->schq_contig[lvl]) {
1250 			rsp->schq_contig[lvl] = 1;
1251 			rsp->schq_contig_list[lvl][0] = start;
1252 		}
1253 
1254 		/* Both contig and non-contig reqs doesn't make sense here */
1255 		if (rsp->schq_contig[lvl])
1256 			rsp->schq[lvl] = 0;
1257 
1258 		if (rsp->schq[lvl]) {
1259 			rsp->schq[lvl] = 1;
1260 			rsp->schq_list[lvl][0] = start;
1261 		}
1262 		return;
1263 	}
1264 
1265 	/* Adjust the queue request count if HW supports
1266 	 * only one queue per level configuration.
1267 	 */
1268 	if (hw->cap.nix_fixed_txschq_mapping) {
1269 		idx = pcifunc & RVU_PFVF_FUNC_MASK;
1270 		schq = start + idx;
1271 		if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) {
1272 			rsp->schq_contig[lvl] = 0;
1273 			rsp->schq[lvl] = 0;
1274 			return;
1275 		}
1276 
1277 		if (rsp->schq_contig[lvl]) {
1278 			rsp->schq_contig[lvl] = 1;
1279 			set_bit(schq, txsch->schq.bmap);
1280 			rsp->schq_contig_list[lvl][0] = schq;
1281 			rsp->schq[lvl] = 0;
1282 		} else if (rsp->schq[lvl]) {
1283 			rsp->schq[lvl] = 1;
1284 			set_bit(schq, txsch->schq.bmap);
1285 			rsp->schq_list[lvl][0] = schq;
1286 		}
1287 		return;
1288 	}
1289 
1290 	/* Allocate contiguous queue indices requesty first */
1291 	if (rsp->schq_contig[lvl]) {
1292 		schq = bitmap_find_next_zero_area(txsch->schq.bmap,
1293 						  txsch->schq.max, start,
1294 						  rsp->schq_contig[lvl], 0);
1295 		if (schq >= end)
1296 			rsp->schq_contig[lvl] = 0;
1297 		for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) {
1298 			set_bit(schq, txsch->schq.bmap);
1299 			rsp->schq_contig_list[lvl][idx] = schq;
1300 			schq++;
1301 		}
1302 	}
1303 
1304 	/* Allocate non-contiguous queue indices */
1305 	if (rsp->schq[lvl]) {
1306 		idx = 0;
1307 		for (schq = start; schq < end; schq++) {
1308 			if (!test_bit(schq, txsch->schq.bmap)) {
1309 				set_bit(schq, txsch->schq.bmap);
1310 				rsp->schq_list[lvl][idx++] = schq;
1311 			}
1312 			if (idx == rsp->schq[lvl])
1313 				break;
1314 		}
1315 		/* Update how many were allocated */
1316 		rsp->schq[lvl] = idx;
1317 	}
1318 }
1319 
1320 int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
1321 				     struct nix_txsch_alloc_req *req,
1322 				     struct nix_txsch_alloc_rsp *rsp)
1323 {
1324 	struct rvu_hwinfo *hw = rvu->hw;
1325 	u16 pcifunc = req->hdr.pcifunc;
1326 	int link, blkaddr, rc = 0;
1327 	int lvl, idx, start, end;
1328 	struct nix_txsch *txsch;
1329 	struct rvu_pfvf *pfvf;
1330 	struct nix_hw *nix_hw;
1331 	u32 *pfvf_map;
1332 	u16 schq;
1333 
1334 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1335 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1336 	if (!pfvf->nixlf || blkaddr < 0)
1337 		return NIX_AF_ERR_AF_LF_INVALID;
1338 
1339 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1340 	if (!nix_hw)
1341 		return -EINVAL;
1342 
1343 	mutex_lock(&rvu->rsrc_lock);
1344 
1345 	/* Check if request is valid as per HW capabilities
1346 	 * and can be accomodated.
1347 	 */
1348 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1349 		rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req);
1350 		if (rc)
1351 			goto err;
1352 	}
1353 
1354 	/* Allocate requested Tx scheduler queues */
1355 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1356 		txsch = &nix_hw->txsch[lvl];
1357 		pfvf_map = txsch->pfvf_map;
1358 
1359 		if (!req->schq[lvl] && !req->schq_contig[lvl])
1360 			continue;
1361 
1362 		rsp->schq[lvl] = req->schq[lvl];
1363 		rsp->schq_contig[lvl] = req->schq_contig[lvl];
1364 
1365 		link = nix_get_tx_link(rvu, pcifunc);
1366 
1367 		if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1368 			start = link;
1369 			end = link;
1370 		} else if (hw->cap.nix_fixed_txschq_mapping) {
1371 			nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
1372 		} else {
1373 			start = 0;
1374 			end = txsch->schq.max;
1375 		}
1376 
1377 		nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end);
1378 
1379 		/* Reset queue config */
1380 		for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
1381 			schq = rsp->schq_contig_list[lvl][idx];
1382 			if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
1383 			    NIX_TXSCHQ_CFG_DONE))
1384 				pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
1385 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1386 			nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
1387 		}
1388 
1389 		for (idx = 0; idx < req->schq[lvl]; idx++) {
1390 			schq = rsp->schq_list[lvl][idx];
1391 			if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
1392 			    NIX_TXSCHQ_CFG_DONE))
1393 				pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
1394 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1395 			nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
1396 		}
1397 	}
1398 
1399 	rsp->aggr_level = hw->cap.nix_tx_aggr_lvl;
1400 	rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
1401 	rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr,
1402 				       NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
1403 				       NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
1404 	goto exit;
1405 err:
1406 	rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
1407 exit:
1408 	mutex_unlock(&rvu->rsrc_lock);
1409 	return rc;
1410 }
1411 
1412 static void nix_smq_flush(struct rvu *rvu, int blkaddr,
1413 			  int smq, u16 pcifunc, int nixlf)
1414 {
1415 	int pf = rvu_get_pf(pcifunc);
1416 	u8 cgx_id = 0, lmac_id = 0;
1417 	int err, restore_tx_en = 0;
1418 	u64 cfg;
1419 
1420 	/* enable cgx tx if disabled */
1421 	if (is_pf_cgxmapped(rvu, pf)) {
1422 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1423 		restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu),
1424 						    lmac_id, true);
1425 	}
1426 
1427 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
1428 	/* Do SMQ flush and set enqueue xoff */
1429 	cfg |= BIT_ULL(50) | BIT_ULL(49);
1430 	rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
1431 
1432 	/* Disable backpressure from physical link,
1433 	 * otherwise SMQ flush may stall.
1434 	 */
1435 	rvu_cgx_enadis_rx_bp(rvu, pf, false);
1436 
1437 	/* Wait for flush to complete */
1438 	err = rvu_poll_reg(rvu, blkaddr,
1439 			   NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
1440 	if (err)
1441 		dev_err(rvu->dev,
1442 			"NIXLF%d: SMQ%d flush failed\n", nixlf, smq);
1443 
1444 	rvu_cgx_enadis_rx_bp(rvu, pf, true);
1445 	/* restore cgx tx state */
1446 	if (restore_tx_en)
1447 		cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
1448 }
1449 
1450 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
1451 {
1452 	int blkaddr, nixlf, lvl, schq, err;
1453 	struct rvu_hwinfo *hw = rvu->hw;
1454 	struct nix_txsch *txsch;
1455 	struct nix_hw *nix_hw;
1456 
1457 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1458 	if (blkaddr < 0)
1459 		return NIX_AF_ERR_AF_LF_INVALID;
1460 
1461 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1462 	if (!nix_hw)
1463 		return -EINVAL;
1464 
1465 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1466 	if (nixlf < 0)
1467 		return NIX_AF_ERR_AF_LF_INVALID;
1468 
1469 	/* Disable TL2/3 queue links before SMQ flush*/
1470 	mutex_lock(&rvu->rsrc_lock);
1471 	for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1472 		if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4)
1473 			continue;
1474 
1475 		txsch = &nix_hw->txsch[lvl];
1476 		for (schq = 0; schq < txsch->schq.max; schq++) {
1477 			if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1478 				continue;
1479 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1480 		}
1481 	}
1482 
1483 	/* Flush SMQs */
1484 	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1485 	for (schq = 0; schq < txsch->schq.max; schq++) {
1486 		if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1487 			continue;
1488 		nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
1489 	}
1490 
1491 	/* Now free scheduler queues to free pool */
1492 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1493 		 /* TLs above aggregation level are shared across all PF
1494 		  * and it's VFs, hence skip freeing them.
1495 		  */
1496 		if (lvl >= hw->cap.nix_tx_aggr_lvl)
1497 			continue;
1498 
1499 		txsch = &nix_hw->txsch[lvl];
1500 		for (schq = 0; schq < txsch->schq.max; schq++) {
1501 			if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1502 				continue;
1503 			rvu_free_rsrc(&txsch->schq, schq);
1504 			txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
1505 		}
1506 	}
1507 	mutex_unlock(&rvu->rsrc_lock);
1508 
1509 	/* Sync cached info for this LF in NDC-TX to LLC/DRAM */
1510 	rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
1511 	err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
1512 	if (err)
1513 		dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
1514 
1515 	return 0;
1516 }
1517 
1518 static int nix_txschq_free_one(struct rvu *rvu,
1519 			       struct nix_txsch_free_req *req)
1520 {
1521 	struct rvu_hwinfo *hw = rvu->hw;
1522 	u16 pcifunc = req->hdr.pcifunc;
1523 	int lvl, schq, nixlf, blkaddr;
1524 	struct nix_txsch *txsch;
1525 	struct nix_hw *nix_hw;
1526 	u32 *pfvf_map;
1527 
1528 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1529 	if (blkaddr < 0)
1530 		return NIX_AF_ERR_AF_LF_INVALID;
1531 
1532 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1533 	if (!nix_hw)
1534 		return -EINVAL;
1535 
1536 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1537 	if (nixlf < 0)
1538 		return NIX_AF_ERR_AF_LF_INVALID;
1539 
1540 	lvl = req->schq_lvl;
1541 	schq = req->schq;
1542 	txsch = &nix_hw->txsch[lvl];
1543 
1544 	if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max)
1545 		return 0;
1546 
1547 	pfvf_map = txsch->pfvf_map;
1548 	mutex_lock(&rvu->rsrc_lock);
1549 
1550 	if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) {
1551 		mutex_unlock(&rvu->rsrc_lock);
1552 		goto err;
1553 	}
1554 
1555 	/* Flush if it is a SMQ. Onus of disabling
1556 	 * TL2/3 queue links before SMQ flush is on user
1557 	 */
1558 	if (lvl == NIX_TXSCH_LVL_SMQ)
1559 		nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
1560 
1561 	/* Free the resource */
1562 	rvu_free_rsrc(&txsch->schq, schq);
1563 	txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
1564 	mutex_unlock(&rvu->rsrc_lock);
1565 	return 0;
1566 err:
1567 	return NIX_AF_ERR_TLX_INVALID;
1568 }
1569 
1570 int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
1571 				    struct nix_txsch_free_req *req,
1572 				    struct msg_rsp *rsp)
1573 {
1574 	if (req->flags & TXSCHQ_FREE_ALL)
1575 		return nix_txschq_free(rvu, req->hdr.pcifunc);
1576 	else
1577 		return nix_txschq_free_one(rvu, req);
1578 }
1579 
1580 static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
1581 				      int lvl, u64 reg, u64 regval)
1582 {
1583 	u64 regbase = reg & 0xFFFF;
1584 	u16 schq, parent;
1585 
1586 	if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg))
1587 		return false;
1588 
1589 	schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1590 	/* Check if this schq belongs to this PF/VF or not */
1591 	if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq))
1592 		return false;
1593 
1594 	parent = (regval >> 16) & 0x1FF;
1595 	/* Validate MDQ's TL4 parent */
1596 	if (regbase == NIX_AF_MDQX_PARENT(0) &&
1597 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent))
1598 		return false;
1599 
1600 	/* Validate TL4's TL3 parent */
1601 	if (regbase == NIX_AF_TL4X_PARENT(0) &&
1602 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent))
1603 		return false;
1604 
1605 	/* Validate TL3's TL2 parent */
1606 	if (regbase == NIX_AF_TL3X_PARENT(0) &&
1607 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent))
1608 		return false;
1609 
1610 	/* Validate TL2's TL1 parent */
1611 	if (regbase == NIX_AF_TL2X_PARENT(0) &&
1612 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent))
1613 		return false;
1614 
1615 	return true;
1616 }
1617 
1618 static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg)
1619 {
1620 	u64 regbase;
1621 
1622 	if (hw->cap.nix_shaping)
1623 		return true;
1624 
1625 	/* If shaping and coloring is not supported, then
1626 	 * *_CIR and *_PIR registers should not be configured.
1627 	 */
1628 	regbase = reg & 0xFFFF;
1629 
1630 	switch (lvl) {
1631 	case NIX_TXSCH_LVL_TL1:
1632 		if (regbase == NIX_AF_TL1X_CIR(0))
1633 			return false;
1634 		break;
1635 	case NIX_TXSCH_LVL_TL2:
1636 		if (regbase == NIX_AF_TL2X_CIR(0) ||
1637 		    regbase == NIX_AF_TL2X_PIR(0))
1638 			return false;
1639 		break;
1640 	case NIX_TXSCH_LVL_TL3:
1641 		if (regbase == NIX_AF_TL3X_CIR(0) ||
1642 		    regbase == NIX_AF_TL3X_PIR(0))
1643 			return false;
1644 		break;
1645 	case NIX_TXSCH_LVL_TL4:
1646 		if (regbase == NIX_AF_TL4X_CIR(0) ||
1647 		    regbase == NIX_AF_TL4X_PIR(0))
1648 			return false;
1649 		break;
1650 	}
1651 	return true;
1652 }
1653 
1654 static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
1655 				u16 pcifunc, int blkaddr)
1656 {
1657 	u32 *pfvf_map;
1658 	int schq;
1659 
1660 	schq = nix_get_tx_link(rvu, pcifunc);
1661 	pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
1662 	/* Skip if PF has already done the config */
1663 	if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE)
1664 		return;
1665 	rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq),
1666 		    (TXSCH_TL1_DFLT_RR_PRIO << 1));
1667 	rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
1668 		    TXSCH_TL1_DFLT_RR_QTM);
1669 	rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00);
1670 	pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE);
1671 }
1672 
1673 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
1674 				    struct nix_txschq_config *req,
1675 				    struct msg_rsp *rsp)
1676 {
1677 	struct rvu_hwinfo *hw = rvu->hw;
1678 	u16 pcifunc = req->hdr.pcifunc;
1679 	u64 reg, regval, schq_regbase;
1680 	struct nix_txsch *txsch;
1681 	struct nix_hw *nix_hw;
1682 	int blkaddr, idx, err;
1683 	int nixlf, schq;
1684 	u32 *pfvf_map;
1685 
1686 	if (req->lvl >= NIX_TXSCH_LVL_CNT ||
1687 	    req->num_regs > MAX_REGS_PER_MBOX_MSG)
1688 		return NIX_AF_INVAL_TXSCHQ_CFG;
1689 
1690 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
1691 	if (err)
1692 		return err;
1693 
1694 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1695 	if (!nix_hw)
1696 		return -EINVAL;
1697 
1698 	txsch = &nix_hw->txsch[req->lvl];
1699 	pfvf_map = txsch->pfvf_map;
1700 
1701 	if (req->lvl >= hw->cap.nix_tx_aggr_lvl &&
1702 	    pcifunc & RVU_PFVF_FUNC_MASK) {
1703 		mutex_lock(&rvu->rsrc_lock);
1704 		if (req->lvl == NIX_TXSCH_LVL_TL1)
1705 			nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr);
1706 		mutex_unlock(&rvu->rsrc_lock);
1707 		return 0;
1708 	}
1709 
1710 	for (idx = 0; idx < req->num_regs; idx++) {
1711 		reg = req->reg[idx];
1712 		regval = req->regval[idx];
1713 		schq_regbase = reg & 0xFFFF;
1714 
1715 		if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr,
1716 					       txsch->lvl, reg, regval))
1717 			return NIX_AF_INVAL_TXSCHQ_CFG;
1718 
1719 		/* Check if shaping and coloring is supported */
1720 		if (!is_txschq_shaping_valid(hw, req->lvl, reg))
1721 			continue;
1722 
1723 		/* Replace PF/VF visible NIXLF slot with HW NIXLF id */
1724 		if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
1725 			nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
1726 					   pcifunc, 0);
1727 			regval &= ~(0x7FULL << 24);
1728 			regval |= ((u64)nixlf << 24);
1729 		}
1730 
1731 		/* Clear 'BP_ENA' config, if it's not allowed */
1732 		if (!hw->cap.nix_tx_link_bp) {
1733 			if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) ||
1734 			    (schq_regbase & 0xFF00) ==
1735 			    NIX_AF_TL3_TL2X_LINKX_CFG(0, 0))
1736 				regval &= ~BIT_ULL(13);
1737 		}
1738 
1739 		/* Mark config as done for TL1 by PF */
1740 		if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) &&
1741 		    schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) {
1742 			schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1743 			mutex_lock(&rvu->rsrc_lock);
1744 			pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq],
1745 							NIX_TXSCHQ_CFG_DONE);
1746 			mutex_unlock(&rvu->rsrc_lock);
1747 		}
1748 
1749 		/* SMQ flush is special hence split register writes such
1750 		 * that flush first and write rest of the bits later.
1751 		 */
1752 		if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
1753 		    (regval & BIT_ULL(49))) {
1754 			schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1755 			nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
1756 			regval &= ~BIT_ULL(49);
1757 		}
1758 		rvu_write64(rvu, blkaddr, reg, regval);
1759 	}
1760 
1761 	return 0;
1762 }
1763 
1764 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
1765 			   struct nix_vtag_config *req)
1766 {
1767 	u64 regval = req->vtag_size;
1768 
1769 	if (req->rx.vtag_type > 7 || req->vtag_size > VTAGSIZE_T8)
1770 		return -EINVAL;
1771 
1772 	if (req->rx.capture_vtag)
1773 		regval |= BIT_ULL(5);
1774 	if (req->rx.strip_vtag)
1775 		regval |= BIT_ULL(4);
1776 
1777 	rvu_write64(rvu, blkaddr,
1778 		    NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
1779 	return 0;
1780 }
1781 
1782 int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
1783 				  struct nix_vtag_config *req,
1784 				  struct msg_rsp *rsp)
1785 {
1786 	u16 pcifunc = req->hdr.pcifunc;
1787 	int blkaddr, nixlf, err;
1788 
1789 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
1790 	if (err)
1791 		return err;
1792 
1793 	if (req->cfg_type) {
1794 		err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
1795 		if (err)
1796 			return NIX_AF_ERR_PARAM;
1797 	} else {
1798 		/* TODO: handle tx vtag configuration */
1799 		return 0;
1800 	}
1801 
1802 	return 0;
1803 }
1804 
1805 static int nix_setup_mce(struct rvu *rvu, int mce, u8 op,
1806 			 u16 pcifunc, int next, bool eol)
1807 {
1808 	struct nix_aq_enq_req aq_req;
1809 	int err;
1810 
1811 	aq_req.hdr.pcifunc = 0;
1812 	aq_req.ctype = NIX_AQ_CTYPE_MCE;
1813 	aq_req.op = op;
1814 	aq_req.qidx = mce;
1815 
1816 	/* Forward bcast pkts to RQ0, RSS not needed */
1817 	aq_req.mce.op = 0;
1818 	aq_req.mce.index = 0;
1819 	aq_req.mce.eol = eol;
1820 	aq_req.mce.pf_func = pcifunc;
1821 	aq_req.mce.next = next;
1822 
1823 	/* All fields valid */
1824 	*(u64 *)(&aq_req.mce_mask) = ~0ULL;
1825 
1826 	err = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
1827 	if (err) {
1828 		dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
1829 			rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
1830 		return err;
1831 	}
1832 	return 0;
1833 }
1834 
1835 static int nix_update_mce_list(struct nix_mce_list *mce_list,
1836 			       u16 pcifunc, bool add)
1837 {
1838 	struct mce *mce, *tail = NULL;
1839 	bool delete = false;
1840 
1841 	/* Scan through the current list */
1842 	hlist_for_each_entry(mce, &mce_list->head, node) {
1843 		/* If already exists, then delete */
1844 		if (mce->pcifunc == pcifunc && !add) {
1845 			delete = true;
1846 			break;
1847 		}
1848 		tail = mce;
1849 	}
1850 
1851 	if (delete) {
1852 		hlist_del(&mce->node);
1853 		kfree(mce);
1854 		mce_list->count--;
1855 		return 0;
1856 	}
1857 
1858 	if (!add)
1859 		return 0;
1860 
1861 	/* Add a new one to the list, at the tail */
1862 	mce = kzalloc(sizeof(*mce), GFP_KERNEL);
1863 	if (!mce)
1864 		return -ENOMEM;
1865 	mce->pcifunc = pcifunc;
1866 	if (!tail)
1867 		hlist_add_head(&mce->node, &mce_list->head);
1868 	else
1869 		hlist_add_behind(&mce->node, &tail->node);
1870 	mce_list->count++;
1871 	return 0;
1872 }
1873 
1874 static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
1875 {
1876 	int err = 0, idx, next_idx, last_idx;
1877 	struct nix_mce_list *mce_list;
1878 	struct nix_mcast *mcast;
1879 	struct nix_hw *nix_hw;
1880 	struct rvu_pfvf *pfvf;
1881 	struct mce *mce;
1882 	int blkaddr;
1883 
1884 	/* Broadcast pkt replication is not needed for AF's VFs, hence skip */
1885 	if (is_afvf(pcifunc))
1886 		return 0;
1887 
1888 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1889 	if (blkaddr < 0)
1890 		return 0;
1891 
1892 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1893 	if (!nix_hw)
1894 		return 0;
1895 
1896 	mcast = &nix_hw->mcast;
1897 
1898 	/* Get this PF/VF func's MCE index */
1899 	pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
1900 	idx = pfvf->bcast_mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
1901 
1902 	mce_list = &pfvf->bcast_mce_list;
1903 	if (idx > (pfvf->bcast_mce_idx + mce_list->max)) {
1904 		dev_err(rvu->dev,
1905 			"%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
1906 			__func__, idx, mce_list->max,
1907 			pcifunc >> RVU_PFVF_PF_SHIFT);
1908 		return -EINVAL;
1909 	}
1910 
1911 	mutex_lock(&mcast->mce_lock);
1912 
1913 	err = nix_update_mce_list(mce_list, pcifunc, add);
1914 	if (err)
1915 		goto end;
1916 
1917 	/* Disable MCAM entry in NPC */
1918 	if (!mce_list->count) {
1919 		rvu_npc_disable_bcast_entry(rvu, pcifunc);
1920 		goto end;
1921 	}
1922 
1923 	/* Dump the updated list to HW */
1924 	idx = pfvf->bcast_mce_idx;
1925 	last_idx = idx + mce_list->count - 1;
1926 	hlist_for_each_entry(mce, &mce_list->head, node) {
1927 		if (idx > last_idx)
1928 			break;
1929 
1930 		next_idx = idx + 1;
1931 		/* EOL should be set in last MCE */
1932 		err = nix_setup_mce(rvu, idx, NIX_AQ_INSTOP_WRITE,
1933 				    mce->pcifunc, next_idx,
1934 				    (next_idx > last_idx) ? true : false);
1935 		if (err)
1936 			goto end;
1937 		idx++;
1938 	}
1939 
1940 end:
1941 	mutex_unlock(&mcast->mce_lock);
1942 	return err;
1943 }
1944 
1945 static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw)
1946 {
1947 	struct nix_mcast *mcast = &nix_hw->mcast;
1948 	int err, pf, numvfs, idx;
1949 	struct rvu_pfvf *pfvf;
1950 	u16 pcifunc;
1951 	u64 cfg;
1952 
1953 	/* Skip PF0 (i.e AF) */
1954 	for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) {
1955 		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
1956 		/* If PF is not enabled, nothing to do */
1957 		if (!((cfg >> 20) & 0x01))
1958 			continue;
1959 		/* Get numVFs attached to this PF */
1960 		numvfs = (cfg >> 12) & 0xFF;
1961 
1962 		pfvf = &rvu->pf[pf];
1963 		/* Save the start MCE */
1964 		pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
1965 
1966 		nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
1967 
1968 		for (idx = 0; idx < (numvfs + 1); idx++) {
1969 			/* idx-0 is for PF, followed by VFs */
1970 			pcifunc = (pf << RVU_PFVF_PF_SHIFT);
1971 			pcifunc |= idx;
1972 			/* Add dummy entries now, so that we don't have to check
1973 			 * for whether AQ_OP should be INIT/WRITE later on.
1974 			 * Will be updated when a NIXLF is attached/detached to
1975 			 * these PF/VFs.
1976 			 */
1977 			err = nix_setup_mce(rvu, pfvf->bcast_mce_idx + idx,
1978 					    NIX_AQ_INSTOP_INIT,
1979 					    pcifunc, 0, true);
1980 			if (err)
1981 				return err;
1982 		}
1983 	}
1984 	return 0;
1985 }
1986 
1987 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
1988 {
1989 	struct nix_mcast *mcast = &nix_hw->mcast;
1990 	struct rvu_hwinfo *hw = rvu->hw;
1991 	int err, size;
1992 
1993 	size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
1994 	size = (1ULL << size);
1995 
1996 	/* Alloc memory for multicast/mirror replication entries */
1997 	err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
1998 			 (256UL << MC_TBL_SIZE), size);
1999 	if (err)
2000 		return -ENOMEM;
2001 
2002 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
2003 		    (u64)mcast->mce_ctx->iova);
2004 
2005 	/* Set max list length equal to max no of VFs per PF  + PF itself */
2006 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG,
2007 		    BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE);
2008 
2009 	/* Alloc memory for multicast replication buffers */
2010 	size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
2011 	err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
2012 			 (8UL << MC_BUF_CNT), size);
2013 	if (err)
2014 		return -ENOMEM;
2015 
2016 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
2017 		    (u64)mcast->mcast_buf->iova);
2018 
2019 	/* Alloc pkind for NIX internal RX multicast/mirror replay */
2020 	mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc);
2021 
2022 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG,
2023 		    BIT_ULL(63) | (mcast->replay_pkind << 24) |
2024 		    BIT_ULL(20) | MC_BUF_CNT);
2025 
2026 	mutex_init(&mcast->mce_lock);
2027 
2028 	return nix_setup_bcast_tables(rvu, nix_hw);
2029 }
2030 
2031 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
2032 {
2033 	struct nix_txsch *txsch;
2034 	int err, lvl, schq;
2035 	u64 cfg, reg;
2036 
2037 	/* Get scheduler queue count of each type and alloc
2038 	 * bitmap for each for alloc/free/attach operations.
2039 	 */
2040 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2041 		txsch = &nix_hw->txsch[lvl];
2042 		txsch->lvl = lvl;
2043 		switch (lvl) {
2044 		case NIX_TXSCH_LVL_SMQ:
2045 			reg = NIX_AF_MDQ_CONST;
2046 			break;
2047 		case NIX_TXSCH_LVL_TL4:
2048 			reg = NIX_AF_TL4_CONST;
2049 			break;
2050 		case NIX_TXSCH_LVL_TL3:
2051 			reg = NIX_AF_TL3_CONST;
2052 			break;
2053 		case NIX_TXSCH_LVL_TL2:
2054 			reg = NIX_AF_TL2_CONST;
2055 			break;
2056 		case NIX_TXSCH_LVL_TL1:
2057 			reg = NIX_AF_TL1_CONST;
2058 			break;
2059 		}
2060 		cfg = rvu_read64(rvu, blkaddr, reg);
2061 		txsch->schq.max = cfg & 0xFFFF;
2062 		err = rvu_alloc_bitmap(&txsch->schq);
2063 		if (err)
2064 			return err;
2065 
2066 		/* Allocate memory for scheduler queues to
2067 		 * PF/VF pcifunc mapping info.
2068 		 */
2069 		txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
2070 					       sizeof(u32), GFP_KERNEL);
2071 		if (!txsch->pfvf_map)
2072 			return -ENOMEM;
2073 		for (schq = 0; schq < txsch->schq.max; schq++)
2074 			txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
2075 	}
2076 	return 0;
2077 }
2078 
2079 int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
2080 				int blkaddr, u32 cfg)
2081 {
2082 	int fmt_idx;
2083 
2084 	for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) {
2085 		if (nix_hw->mark_format.cfg[fmt_idx] == cfg)
2086 			return fmt_idx;
2087 	}
2088 	if (fmt_idx >= nix_hw->mark_format.total)
2089 		return -ERANGE;
2090 
2091 	rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg);
2092 	nix_hw->mark_format.cfg[fmt_idx] = cfg;
2093 	nix_hw->mark_format.in_use++;
2094 	return fmt_idx;
2095 }
2096 
2097 static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw,
2098 				    int blkaddr)
2099 {
2100 	u64 cfgs[] = {
2101 		[NIX_MARK_CFG_IP_DSCP_RED]         = 0x10003,
2102 		[NIX_MARK_CFG_IP_DSCP_YELLOW]      = 0x11200,
2103 		[NIX_MARK_CFG_IP_DSCP_YELLOW_RED]  = 0x11203,
2104 		[NIX_MARK_CFG_IP_ECN_RED]          = 0x6000c,
2105 		[NIX_MARK_CFG_IP_ECN_YELLOW]       = 0x60c00,
2106 		[NIX_MARK_CFG_IP_ECN_YELLOW_RED]   = 0x60c0c,
2107 		[NIX_MARK_CFG_VLAN_DEI_RED]        = 0x30008,
2108 		[NIX_MARK_CFG_VLAN_DEI_YELLOW]     = 0x30800,
2109 		[NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808,
2110 	};
2111 	int i, rc;
2112 	u64 total;
2113 
2114 	total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8;
2115 	nix_hw->mark_format.total = (u8)total;
2116 	nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32),
2117 					       GFP_KERNEL);
2118 	if (!nix_hw->mark_format.cfg)
2119 		return -ENOMEM;
2120 	for (i = 0; i < NIX_MARK_CFG_MAX; i++) {
2121 		rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]);
2122 		if (rc < 0)
2123 			dev_err(rvu->dev, "Err %d in setup mark format %d\n",
2124 				i, rc);
2125 	}
2126 
2127 	return 0;
2128 }
2129 
2130 int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
2131 				   struct msg_rsp *rsp)
2132 {
2133 	u16 pcifunc = req->hdr.pcifunc;
2134 	int i, nixlf, blkaddr, err;
2135 	u64 stats;
2136 
2137 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2138 	if (err)
2139 		return err;
2140 
2141 	/* Get stats count supported by HW */
2142 	stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
2143 
2144 	/* Reset tx stats */
2145 	for (i = 0; i < ((stats >> 24) & 0xFF); i++)
2146 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0);
2147 
2148 	/* Reset rx stats */
2149 	for (i = 0; i < ((stats >> 32) & 0xFF); i++)
2150 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0);
2151 
2152 	return 0;
2153 }
2154 
2155 /* Returns the ALG index to be set into NPC_RX_ACTION */
2156 static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg)
2157 {
2158 	int i;
2159 
2160 	/* Scan over exiting algo entries to find a match */
2161 	for (i = 0; i < nix_hw->flowkey.in_use; i++)
2162 		if (nix_hw->flowkey.flowkey[i] == flow_cfg)
2163 			return i;
2164 
2165 	return -ERANGE;
2166 }
2167 
2168 static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
2169 {
2170 	int idx, nr_field, key_off, field_marker, keyoff_marker;
2171 	int max_key_off, max_bit_pos, group_member;
2172 	struct nix_rx_flowkey_alg *field;
2173 	struct nix_rx_flowkey_alg tmp;
2174 	u32 key_type, valid_key;
2175 
2176 	if (!alg)
2177 		return -EINVAL;
2178 
2179 #define FIELDS_PER_ALG  5
2180 #define MAX_KEY_OFF	40
2181 	/* Clear all fields */
2182 	memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG);
2183 
2184 	/* Each of the 32 possible flow key algorithm definitions should
2185 	 * fall into above incremental config (except ALG0). Otherwise a
2186 	 * single NPC MCAM entry is not sufficient for supporting RSS.
2187 	 *
2188 	 * If a different definition or combination needed then NPC MCAM
2189 	 * has to be programmed to filter such pkts and it's action should
2190 	 * point to this definition to calculate flowtag or hash.
2191 	 *
2192 	 * The `for loop` goes over _all_ protocol field and the following
2193 	 * variables depicts the state machine forward progress logic.
2194 	 *
2195 	 * keyoff_marker - Enabled when hash byte length needs to be accounted
2196 	 * in field->key_offset update.
2197 	 * field_marker - Enabled when a new field needs to be selected.
2198 	 * group_member - Enabled when protocol is part of a group.
2199 	 */
2200 
2201 	keyoff_marker = 0; max_key_off = 0; group_member = 0;
2202 	nr_field = 0; key_off = 0; field_marker = 1;
2203 	field = &tmp; max_bit_pos = fls(flow_cfg);
2204 	for (idx = 0;
2205 	     idx < max_bit_pos && nr_field < FIELDS_PER_ALG &&
2206 	     key_off < MAX_KEY_OFF; idx++) {
2207 		key_type = BIT(idx);
2208 		valid_key = flow_cfg & key_type;
2209 		/* Found a field marker, reset the field values */
2210 		if (field_marker)
2211 			memset(&tmp, 0, sizeof(tmp));
2212 
2213 		field_marker = true;
2214 		keyoff_marker = true;
2215 		switch (key_type) {
2216 		case NIX_FLOW_KEY_TYPE_PORT:
2217 			field->sel_chan = true;
2218 			/* This should be set to 1, when SEL_CHAN is set */
2219 			field->bytesm1 = 1;
2220 			break;
2221 		case NIX_FLOW_KEY_TYPE_IPV4:
2222 		case NIX_FLOW_KEY_TYPE_INNR_IPV4:
2223 			field->lid = NPC_LID_LC;
2224 			field->ltype_match = NPC_LT_LC_IP;
2225 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) {
2226 				field->lid = NPC_LID_LG;
2227 				field->ltype_match = NPC_LT_LG_TU_IP;
2228 			}
2229 			field->hdr_offset = 12; /* SIP offset */
2230 			field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
2231 			field->ltype_mask = 0xF; /* Match only IPv4 */
2232 			keyoff_marker = false;
2233 			break;
2234 		case NIX_FLOW_KEY_TYPE_IPV6:
2235 		case NIX_FLOW_KEY_TYPE_INNR_IPV6:
2236 			field->lid = NPC_LID_LC;
2237 			field->ltype_match = NPC_LT_LC_IP6;
2238 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) {
2239 				field->lid = NPC_LID_LG;
2240 				field->ltype_match = NPC_LT_LG_TU_IP6;
2241 			}
2242 			field->hdr_offset = 8; /* SIP offset */
2243 			field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
2244 			field->ltype_mask = 0xF; /* Match only IPv6 */
2245 			break;
2246 		case NIX_FLOW_KEY_TYPE_TCP:
2247 		case NIX_FLOW_KEY_TYPE_UDP:
2248 		case NIX_FLOW_KEY_TYPE_SCTP:
2249 		case NIX_FLOW_KEY_TYPE_INNR_TCP:
2250 		case NIX_FLOW_KEY_TYPE_INNR_UDP:
2251 		case NIX_FLOW_KEY_TYPE_INNR_SCTP:
2252 			field->lid = NPC_LID_LD;
2253 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP ||
2254 			    key_type == NIX_FLOW_KEY_TYPE_INNR_UDP ||
2255 			    key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP)
2256 				field->lid = NPC_LID_LH;
2257 			field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
2258 
2259 			/* Enum values for NPC_LID_LD and NPC_LID_LG are same,
2260 			 * so no need to change the ltype_match, just change
2261 			 * the lid for inner protocols
2262 			 */
2263 			BUILD_BUG_ON((int)NPC_LT_LD_TCP !=
2264 				     (int)NPC_LT_LH_TU_TCP);
2265 			BUILD_BUG_ON((int)NPC_LT_LD_UDP !=
2266 				     (int)NPC_LT_LH_TU_UDP);
2267 			BUILD_BUG_ON((int)NPC_LT_LD_SCTP !=
2268 				     (int)NPC_LT_LH_TU_SCTP);
2269 
2270 			if ((key_type == NIX_FLOW_KEY_TYPE_TCP ||
2271 			     key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) &&
2272 			    valid_key) {
2273 				field->ltype_match |= NPC_LT_LD_TCP;
2274 				group_member = true;
2275 			} else if ((key_type == NIX_FLOW_KEY_TYPE_UDP ||
2276 				    key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) &&
2277 				   valid_key) {
2278 				field->ltype_match |= NPC_LT_LD_UDP;
2279 				group_member = true;
2280 			} else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP ||
2281 				    key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) &&
2282 				   valid_key) {
2283 				field->ltype_match |= NPC_LT_LD_SCTP;
2284 				group_member = true;
2285 			}
2286 			field->ltype_mask = ~field->ltype_match;
2287 			if (key_type == NIX_FLOW_KEY_TYPE_SCTP ||
2288 			    key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) {
2289 				/* Handle the case where any of the group item
2290 				 * is enabled in the group but not the final one
2291 				 */
2292 				if (group_member) {
2293 					valid_key = true;
2294 					group_member = false;
2295 				}
2296 			} else {
2297 				field_marker = false;
2298 				keyoff_marker = false;
2299 			}
2300 			break;
2301 		case NIX_FLOW_KEY_TYPE_NVGRE:
2302 			field->lid = NPC_LID_LD;
2303 			field->hdr_offset = 4; /* VSID offset */
2304 			field->bytesm1 = 2;
2305 			field->ltype_match = NPC_LT_LD_NVGRE;
2306 			field->ltype_mask = 0xF;
2307 			break;
2308 		case NIX_FLOW_KEY_TYPE_VXLAN:
2309 		case NIX_FLOW_KEY_TYPE_GENEVE:
2310 			field->lid = NPC_LID_LE;
2311 			field->bytesm1 = 2;
2312 			field->hdr_offset = 4;
2313 			field->ltype_mask = 0xF;
2314 			field_marker = false;
2315 			keyoff_marker = false;
2316 
2317 			if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) {
2318 				field->ltype_match |= NPC_LT_LE_VXLAN;
2319 				group_member = true;
2320 			}
2321 
2322 			if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) {
2323 				field->ltype_match |= NPC_LT_LE_GENEVE;
2324 				group_member = true;
2325 			}
2326 
2327 			if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) {
2328 				if (group_member) {
2329 					field->ltype_mask = ~field->ltype_match;
2330 					field_marker = true;
2331 					keyoff_marker = true;
2332 					valid_key = true;
2333 					group_member = false;
2334 				}
2335 			}
2336 			break;
2337 		case NIX_FLOW_KEY_TYPE_ETH_DMAC:
2338 		case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC:
2339 			field->lid = NPC_LID_LA;
2340 			field->ltype_match = NPC_LT_LA_ETHER;
2341 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) {
2342 				field->lid = NPC_LID_LF;
2343 				field->ltype_match = NPC_LT_LF_TU_ETHER;
2344 			}
2345 			field->hdr_offset = 0;
2346 			field->bytesm1 = 5; /* DMAC 6 Byte */
2347 			field->ltype_mask = 0xF;
2348 			break;
2349 		case NIX_FLOW_KEY_TYPE_IPV6_EXT:
2350 			field->lid = NPC_LID_LC;
2351 			field->hdr_offset = 40; /* IPV6 hdr */
2352 			field->bytesm1 = 0; /* 1 Byte ext hdr*/
2353 			field->ltype_match = NPC_LT_LC_IP6_EXT;
2354 			field->ltype_mask = 0xF;
2355 			break;
2356 		case NIX_FLOW_KEY_TYPE_GTPU:
2357 			field->lid = NPC_LID_LE;
2358 			field->hdr_offset = 4;
2359 			field->bytesm1 = 3; /* 4 bytes TID*/
2360 			field->ltype_match = NPC_LT_LE_GTPU;
2361 			field->ltype_mask = 0xF;
2362 			break;
2363 		}
2364 		field->ena = 1;
2365 
2366 		/* Found a valid flow key type */
2367 		if (valid_key) {
2368 			field->key_offset = key_off;
2369 			memcpy(&alg[nr_field], field, sizeof(*field));
2370 			max_key_off = max(max_key_off, field->bytesm1 + 1);
2371 
2372 			/* Found a field marker, get the next field */
2373 			if (field_marker)
2374 				nr_field++;
2375 		}
2376 
2377 		/* Found a keyoff marker, update the new key_off */
2378 		if (keyoff_marker) {
2379 			key_off += max_key_off;
2380 			max_key_off = 0;
2381 		}
2382 	}
2383 	/* Processed all the flow key types */
2384 	if (idx == max_bit_pos && key_off <= MAX_KEY_OFF)
2385 		return 0;
2386 	else
2387 		return NIX_AF_ERR_RSS_NOSPC_FIELD;
2388 }
2389 
2390 static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg)
2391 {
2392 	u64 field[FIELDS_PER_ALG];
2393 	struct nix_hw *hw;
2394 	int fid, rc;
2395 
2396 	hw = get_nix_hw(rvu->hw, blkaddr);
2397 	if (!hw)
2398 		return -EINVAL;
2399 
2400 	/* No room to add new flow hash algoritham */
2401 	if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX)
2402 		return NIX_AF_ERR_RSS_NOSPC_ALGO;
2403 
2404 	/* Generate algo fields for the given flow_cfg */
2405 	rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg);
2406 	if (rc)
2407 		return rc;
2408 
2409 	/* Update ALGX_FIELDX register with generated fields */
2410 	for (fid = 0; fid < FIELDS_PER_ALG; fid++)
2411 		rvu_write64(rvu, blkaddr,
2412 			    NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use,
2413 							   fid), field[fid]);
2414 
2415 	/* Store the flow_cfg for futher lookup */
2416 	rc = hw->flowkey.in_use;
2417 	hw->flowkey.flowkey[rc] = flow_cfg;
2418 	hw->flowkey.in_use++;
2419 
2420 	return rc;
2421 }
2422 
2423 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
2424 					 struct nix_rss_flowkey_cfg *req,
2425 					 struct nix_rss_flowkey_cfg_rsp *rsp)
2426 {
2427 	u16 pcifunc = req->hdr.pcifunc;
2428 	int alg_idx, nixlf, blkaddr;
2429 	struct nix_hw *nix_hw;
2430 	int err;
2431 
2432 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2433 	if (err)
2434 		return err;
2435 
2436 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
2437 	if (!nix_hw)
2438 		return -EINVAL;
2439 
2440 	alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg);
2441 	/* Failed to get algo index from the exiting list, reserve new  */
2442 	if (alg_idx < 0) {
2443 		alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr,
2444 						  req->flowkey_cfg);
2445 		if (alg_idx < 0)
2446 			return alg_idx;
2447 	}
2448 	rsp->alg_idx = alg_idx;
2449 	rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
2450 				       alg_idx, req->mcam_index);
2451 	return 0;
2452 }
2453 
2454 static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
2455 {
2456 	u32 flowkey_cfg, minkey_cfg;
2457 	int alg, fid, rc;
2458 
2459 	/* Disable all flow key algx fieldx */
2460 	for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) {
2461 		for (fid = 0; fid < FIELDS_PER_ALG; fid++)
2462 			rvu_write64(rvu, blkaddr,
2463 				    NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
2464 				    0);
2465 	}
2466 
2467 	/* IPv4/IPv6 SIP/DIPs */
2468 	flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
2469 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2470 	if (rc < 0)
2471 		return rc;
2472 
2473 	/* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
2474 	minkey_cfg = flowkey_cfg;
2475 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP;
2476 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2477 	if (rc < 0)
2478 		return rc;
2479 
2480 	/* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
2481 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP;
2482 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2483 	if (rc < 0)
2484 		return rc;
2485 
2486 	/* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
2487 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP;
2488 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2489 	if (rc < 0)
2490 		return rc;
2491 
2492 	/* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
2493 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
2494 			NIX_FLOW_KEY_TYPE_UDP;
2495 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2496 	if (rc < 0)
2497 		return rc;
2498 
2499 	/* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
2500 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
2501 			NIX_FLOW_KEY_TYPE_SCTP;
2502 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2503 	if (rc < 0)
2504 		return rc;
2505 
2506 	/* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
2507 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP |
2508 			NIX_FLOW_KEY_TYPE_SCTP;
2509 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2510 	if (rc < 0)
2511 		return rc;
2512 
2513 	/* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
2514 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
2515 		      NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP;
2516 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2517 	if (rc < 0)
2518 		return rc;
2519 
2520 	return 0;
2521 }
2522 
2523 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
2524 				      struct nix_set_mac_addr *req,
2525 				      struct msg_rsp *rsp)
2526 {
2527 	u16 pcifunc = req->hdr.pcifunc;
2528 	int blkaddr, nixlf, err;
2529 	struct rvu_pfvf *pfvf;
2530 
2531 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2532 	if (err)
2533 		return err;
2534 
2535 	pfvf = rvu_get_pfvf(rvu, pcifunc);
2536 
2537 	ether_addr_copy(pfvf->mac_addr, req->mac_addr);
2538 
2539 	rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
2540 				    pfvf->rx_chan_base, req->mac_addr);
2541 
2542 	rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
2543 
2544 	return 0;
2545 }
2546 
2547 int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu,
2548 				      struct msg_req *req,
2549 				      struct nix_get_mac_addr_rsp *rsp)
2550 {
2551 	u16 pcifunc = req->hdr.pcifunc;
2552 	struct rvu_pfvf *pfvf;
2553 
2554 	if (!is_nixlf_attached(rvu, pcifunc))
2555 		return NIX_AF_ERR_AF_LF_INVALID;
2556 
2557 	pfvf = rvu_get_pfvf(rvu, pcifunc);
2558 
2559 	ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
2560 
2561 	return 0;
2562 }
2563 
2564 int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
2565 				     struct msg_rsp *rsp)
2566 {
2567 	bool allmulti = false, disable_promisc = false;
2568 	u16 pcifunc = req->hdr.pcifunc;
2569 	int blkaddr, nixlf, err;
2570 	struct rvu_pfvf *pfvf;
2571 
2572 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2573 	if (err)
2574 		return err;
2575 
2576 	pfvf = rvu_get_pfvf(rvu, pcifunc);
2577 
2578 	if (req->mode & NIX_RX_MODE_PROMISC)
2579 		allmulti = false;
2580 	else if (req->mode & NIX_RX_MODE_ALLMULTI)
2581 		allmulti = true;
2582 	else
2583 		disable_promisc = true;
2584 
2585 	if (disable_promisc)
2586 		rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf);
2587 	else
2588 		rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
2589 					      pfvf->rx_chan_base, allmulti);
2590 
2591 	rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
2592 
2593 	return 0;
2594 }
2595 
2596 static void nix_find_link_frs(struct rvu *rvu,
2597 			      struct nix_frs_cfg *req, u16 pcifunc)
2598 {
2599 	int pf = rvu_get_pf(pcifunc);
2600 	struct rvu_pfvf *pfvf;
2601 	int maxlen, minlen;
2602 	int numvfs, hwvf;
2603 	int vf;
2604 
2605 	/* Update with requester's min/max lengths */
2606 	pfvf = rvu_get_pfvf(rvu, pcifunc);
2607 	pfvf->maxlen = req->maxlen;
2608 	if (req->update_minlen)
2609 		pfvf->minlen = req->minlen;
2610 
2611 	maxlen = req->maxlen;
2612 	minlen = req->update_minlen ? req->minlen : 0;
2613 
2614 	/* Get this PF's numVFs and starting hwvf */
2615 	rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
2616 
2617 	/* For each VF, compare requested max/minlen */
2618 	for (vf = 0; vf < numvfs; vf++) {
2619 		pfvf =  &rvu->hwvf[hwvf + vf];
2620 		if (pfvf->maxlen > maxlen)
2621 			maxlen = pfvf->maxlen;
2622 		if (req->update_minlen &&
2623 		    pfvf->minlen && pfvf->minlen < minlen)
2624 			minlen = pfvf->minlen;
2625 	}
2626 
2627 	/* Compare requested max/minlen with PF's max/minlen */
2628 	pfvf = &rvu->pf[pf];
2629 	if (pfvf->maxlen > maxlen)
2630 		maxlen = pfvf->maxlen;
2631 	if (req->update_minlen &&
2632 	    pfvf->minlen && pfvf->minlen < minlen)
2633 		minlen = pfvf->minlen;
2634 
2635 	/* Update the request with max/min PF's and it's VF's max/min */
2636 	req->maxlen = maxlen;
2637 	if (req->update_minlen)
2638 		req->minlen = minlen;
2639 }
2640 
2641 int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
2642 				    struct msg_rsp *rsp)
2643 {
2644 	struct rvu_hwinfo *hw = rvu->hw;
2645 	u16 pcifunc = req->hdr.pcifunc;
2646 	int pf = rvu_get_pf(pcifunc);
2647 	int blkaddr, schq, link = -1;
2648 	struct nix_txsch *txsch;
2649 	u64 cfg, lmac_fifo_len;
2650 	struct nix_hw *nix_hw;
2651 	u8 cgx = 0, lmac = 0;
2652 
2653 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2654 	if (blkaddr < 0)
2655 		return NIX_AF_ERR_AF_LF_INVALID;
2656 
2657 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
2658 	if (!nix_hw)
2659 		return -EINVAL;
2660 
2661 	if (!req->sdp_link && req->maxlen > NIC_HW_MAX_FRS)
2662 		return NIX_AF_ERR_FRS_INVALID;
2663 
2664 	if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
2665 		return NIX_AF_ERR_FRS_INVALID;
2666 
2667 	/* Check if requester wants to update SMQ's */
2668 	if (!req->update_smq)
2669 		goto rx_frscfg;
2670 
2671 	/* Update min/maxlen in each of the SMQ attached to this PF/VF */
2672 	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
2673 	mutex_lock(&rvu->rsrc_lock);
2674 	for (schq = 0; schq < txsch->schq.max; schq++) {
2675 		if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2676 			continue;
2677 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
2678 		cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8);
2679 		if (req->update_minlen)
2680 			cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F);
2681 		rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
2682 	}
2683 	mutex_unlock(&rvu->rsrc_lock);
2684 
2685 rx_frscfg:
2686 	/* Check if config is for SDP link */
2687 	if (req->sdp_link) {
2688 		if (!hw->sdp_links)
2689 			return NIX_AF_ERR_RX_LINK_INVALID;
2690 		link = hw->cgx_links + hw->lbk_links;
2691 		goto linkcfg;
2692 	}
2693 
2694 	/* Check if the request is from CGX mapped RVU PF */
2695 	if (is_pf_cgxmapped(rvu, pf)) {
2696 		/* Get CGX and LMAC to which this PF is mapped and find link */
2697 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
2698 		link = (cgx * hw->lmac_per_cgx) + lmac;
2699 	} else if (pf == 0) {
2700 		/* For VFs of PF0 ingress is LBK port, so config LBK link */
2701 		link = hw->cgx_links;
2702 	}
2703 
2704 	if (link < 0)
2705 		return NIX_AF_ERR_RX_LINK_INVALID;
2706 
2707 	nix_find_link_frs(rvu, req, pcifunc);
2708 
2709 linkcfg:
2710 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
2711 	cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
2712 	if (req->update_minlen)
2713 		cfg = (cfg & ~0xFFFFULL) | req->minlen;
2714 	rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg);
2715 
2716 	if (req->sdp_link || pf == 0)
2717 		return 0;
2718 
2719 	/* Update transmit credits for CGX links */
2720 	lmac_fifo_len =
2721 		CGX_FIFO_LEN / cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
2722 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link));
2723 	cfg &= ~(0xFFFFFULL << 12);
2724 	cfg |=  ((lmac_fifo_len - req->maxlen) / 16) << 12;
2725 	rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
2726 	return 0;
2727 }
2728 
2729 int rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req,
2730 				      struct msg_rsp *rsp)
2731 {
2732 	struct npc_mcam_alloc_entry_req alloc_req = { };
2733 	struct npc_mcam_alloc_entry_rsp alloc_rsp = { };
2734 	struct npc_mcam_free_entry_req free_req = { };
2735 	u16 pcifunc = req->hdr.pcifunc;
2736 	int blkaddr, nixlf, err;
2737 	struct rvu_pfvf *pfvf;
2738 
2739 	/* LBK VFs do not have separate MCAM UCAST entry hence
2740 	 * skip allocating rxvlan for them
2741 	 */
2742 	if (is_afvf(pcifunc))
2743 		return 0;
2744 
2745 	pfvf = rvu_get_pfvf(rvu, pcifunc);
2746 	if (pfvf->rxvlan)
2747 		return 0;
2748 
2749 	/* alloc new mcam entry */
2750 	alloc_req.hdr.pcifunc = pcifunc;
2751 	alloc_req.count = 1;
2752 
2753 	err = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req,
2754 						    &alloc_rsp);
2755 	if (err)
2756 		return err;
2757 
2758 	/* update entry to enable rxvlan offload */
2759 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2760 	if (blkaddr < 0) {
2761 		err = NIX_AF_ERR_AF_LF_INVALID;
2762 		goto free_entry;
2763 	}
2764 
2765 	nixlf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], pcifunc, 0);
2766 	if (nixlf < 0) {
2767 		err = NIX_AF_ERR_AF_LF_INVALID;
2768 		goto free_entry;
2769 	}
2770 
2771 	pfvf->rxvlan_index = alloc_rsp.entry_list[0];
2772 	/* all it means is that rxvlan_index is valid */
2773 	pfvf->rxvlan = true;
2774 
2775 	err = rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
2776 	if (err)
2777 		goto free_entry;
2778 
2779 	return 0;
2780 free_entry:
2781 	free_req.hdr.pcifunc = pcifunc;
2782 	free_req.entry = alloc_rsp.entry_list[0];
2783 	rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, rsp);
2784 	pfvf->rxvlan = false;
2785 	return err;
2786 }
2787 
2788 int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
2789 				    struct msg_rsp *rsp)
2790 {
2791 	int nixlf, blkaddr, err;
2792 	u64 cfg;
2793 
2794 	err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr);
2795 	if (err)
2796 		return err;
2797 
2798 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf));
2799 	/* Set the interface configuration */
2800 	if (req->len_verify & BIT(0))
2801 		cfg |= BIT_ULL(41);
2802 	else
2803 		cfg &= ~BIT_ULL(41);
2804 
2805 	if (req->len_verify & BIT(1))
2806 		cfg |= BIT_ULL(40);
2807 	else
2808 		cfg &= ~BIT_ULL(40);
2809 
2810 	if (req->csum_verify & BIT(0))
2811 		cfg |= BIT_ULL(37);
2812 	else
2813 		cfg &= ~BIT_ULL(37);
2814 
2815 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg);
2816 
2817 	return 0;
2818 }
2819 
2820 static void nix_link_config(struct rvu *rvu, int blkaddr)
2821 {
2822 	struct rvu_hwinfo *hw = rvu->hw;
2823 	int cgx, lmac_cnt, slink, link;
2824 	u64 tx_credits;
2825 
2826 	/* Set default min/max packet lengths allowed on NIX Rx links.
2827 	 *
2828 	 * With HW reset minlen value of 60byte, HW will treat ARP pkts
2829 	 * as undersize and report them to SW as error pkts, hence
2830 	 * setting it to 40 bytes.
2831 	 */
2832 	for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) {
2833 		rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
2834 			    NIC_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
2835 	}
2836 
2837 	if (hw->sdp_links) {
2838 		link = hw->cgx_links + hw->lbk_links;
2839 		rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
2840 			    SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
2841 	}
2842 
2843 	/* Set credits for Tx links assuming max packet length allowed.
2844 	 * This will be reconfigured based on MTU set for PF/VF.
2845 	 */
2846 	for (cgx = 0; cgx < hw->cgx; cgx++) {
2847 		lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
2848 		tx_credits = ((CGX_FIFO_LEN / lmac_cnt) - NIC_HW_MAX_FRS) / 16;
2849 		/* Enable credits and set credit pkt count to max allowed */
2850 		tx_credits =  (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
2851 		slink = cgx * hw->lmac_per_cgx;
2852 		for (link = slink; link < (slink + lmac_cnt); link++) {
2853 			rvu_write64(rvu, blkaddr,
2854 				    NIX_AF_TX_LINKX_NORM_CREDIT(link),
2855 				    tx_credits);
2856 		}
2857 	}
2858 
2859 	/* Set Tx credits for LBK link */
2860 	slink = hw->cgx_links;
2861 	for (link = slink; link < (slink + hw->lbk_links); link++) {
2862 		tx_credits = 1000; /* 10 * max LBK datarate = 10 * 100Gbps */
2863 		/* Enable credits and set credit pkt count to max allowed */
2864 		tx_credits =  (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
2865 		rvu_write64(rvu, blkaddr,
2866 			    NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits);
2867 	}
2868 }
2869 
2870 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
2871 {
2872 	int idx, err;
2873 	u64 status;
2874 
2875 	/* Start X2P bus calibration */
2876 	rvu_write64(rvu, blkaddr, NIX_AF_CFG,
2877 		    rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
2878 	/* Wait for calibration to complete */
2879 	err = rvu_poll_reg(rvu, blkaddr,
2880 			   NIX_AF_STATUS, BIT_ULL(10), false);
2881 	if (err) {
2882 		dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
2883 		return err;
2884 	}
2885 
2886 	status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
2887 	/* Check if CGX devices are ready */
2888 	for (idx = 0; idx < rvu->cgx_cnt_max; idx++) {
2889 		/* Skip when cgx port is not available */
2890 		if (!rvu_cgx_pdata(idx, rvu) ||
2891 		    (status & (BIT_ULL(16 + idx))))
2892 			continue;
2893 		dev_err(rvu->dev,
2894 			"CGX%d didn't respond to NIX X2P calibration\n", idx);
2895 		err = -EBUSY;
2896 	}
2897 
2898 	/* Check if LBK is ready */
2899 	if (!(status & BIT_ULL(19))) {
2900 		dev_err(rvu->dev,
2901 			"LBK didn't respond to NIX X2P calibration\n");
2902 		err = -EBUSY;
2903 	}
2904 
2905 	/* Clear 'calibrate_x2p' bit */
2906 	rvu_write64(rvu, blkaddr, NIX_AF_CFG,
2907 		    rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
2908 	if (err || (status & 0x3FFULL))
2909 		dev_err(rvu->dev,
2910 			"NIX X2P calibration failed, status 0x%llx\n", status);
2911 	if (err)
2912 		return err;
2913 	return 0;
2914 }
2915 
2916 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
2917 {
2918 	u64 cfg;
2919 	int err;
2920 
2921 	/* Set admin queue endianness */
2922 	cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
2923 #ifdef __BIG_ENDIAN
2924 	cfg |= BIT_ULL(8);
2925 	rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
2926 #else
2927 	cfg &= ~BIT_ULL(8);
2928 	rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
2929 #endif
2930 
2931 	/* Do not bypass NDC cache */
2932 	cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
2933 	cfg &= ~0x3FFEULL;
2934 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
2935 	/* Disable caching of SQB aka SQEs */
2936 	cfg |= 0x04ULL;
2937 #endif
2938 	rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
2939 
2940 	/* Result structure can be followed by RQ/SQ/CQ context at
2941 	 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
2942 	 * operation type. Alloc sufficient result memory for all operations.
2943 	 */
2944 	err = rvu_aq_alloc(rvu, &block->aq,
2945 			   Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
2946 			   ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
2947 	if (err)
2948 		return err;
2949 
2950 	rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
2951 	rvu_write64(rvu, block->addr,
2952 		    NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
2953 	return 0;
2954 }
2955 
2956 int rvu_nix_init(struct rvu *rvu)
2957 {
2958 	struct rvu_hwinfo *hw = rvu->hw;
2959 	struct rvu_block *block;
2960 	int blkaddr, err;
2961 	u64 cfg;
2962 
2963 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
2964 	if (blkaddr < 0)
2965 		return 0;
2966 	block = &hw->block[blkaddr];
2967 
2968 	if (is_rvu_96xx_B0(rvu)) {
2969 		/* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt
2970 		 * internal state when conditional clocks are turned off.
2971 		 * Hence enable them.
2972 		 */
2973 		rvu_write64(rvu, blkaddr, NIX_AF_CFG,
2974 			    rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
2975 
2976 		/* Set chan/link to backpressure TL3 instead of TL2 */
2977 		rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
2978 
2979 		/* Disable SQ manager's sticky mode operation (set TM6 = 0)
2980 		 * This sticky mode is known to cause SQ stalls when multiple
2981 		 * SQs are mapped to same SMQ and transmitting pkts at a time.
2982 		 */
2983 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
2984 		cfg &= ~BIT_ULL(15);
2985 		rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
2986 	}
2987 
2988 	/* Calibrate X2P bus to check if CGX/LBK links are fine */
2989 	err = nix_calibrate_x2p(rvu, blkaddr);
2990 	if (err)
2991 		return err;
2992 
2993 	/* Set num of links of each type */
2994 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
2995 	hw->cgx = (cfg >> 12) & 0xF;
2996 	hw->lmac_per_cgx = (cfg >> 8) & 0xF;
2997 	hw->cgx_links = hw->cgx * hw->lmac_per_cgx;
2998 	hw->lbk_links = 1;
2999 	hw->sdp_links = 1;
3000 
3001 	/* Initialize admin queue */
3002 	err = nix_aq_init(rvu, block);
3003 	if (err)
3004 		return err;
3005 
3006 	/* Restore CINT timer delay to HW reset values */
3007 	rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
3008 
3009 	if (blkaddr == BLKADDR_NIX0) {
3010 		hw->nix0 = devm_kzalloc(rvu->dev,
3011 					sizeof(struct nix_hw), GFP_KERNEL);
3012 		if (!hw->nix0)
3013 			return -ENOMEM;
3014 
3015 		err = nix_setup_txschq(rvu, hw->nix0, blkaddr);
3016 		if (err)
3017 			return err;
3018 
3019 		err = nix_af_mark_format_setup(rvu, hw->nix0, blkaddr);
3020 		if (err)
3021 			return err;
3022 
3023 		err = nix_setup_mcast(rvu, hw->nix0, blkaddr);
3024 		if (err)
3025 			return err;
3026 
3027 		/* Configure segmentation offload formats */
3028 		nix_setup_lso(rvu, hw->nix0, blkaddr);
3029 
3030 		/* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info.
3031 		 * This helps HW protocol checker to identify headers
3032 		 * and validate length and checksums.
3033 		 */
3034 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
3035 			    (NPC_LID_LA << 8) | (NPC_LT_LA_ETHER << 4) | 0x0F);
3036 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
3037 			    (NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F);
3038 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4,
3039 			    (NPC_LID_LG << 8) | (NPC_LT_LG_TU_IP << 4) | 0x0F);
3040 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6,
3041 			    (NPC_LID_LC << 8) | (NPC_LT_LC_IP6 << 4) | 0x0F);
3042 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6,
3043 			    (NPC_LID_LG << 8) | (NPC_LT_LG_TU_IP6 << 4) | 0x0F);
3044 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
3045 			    (NPC_LID_LD << 8) | (NPC_LT_LD_TCP << 4) | 0x0F);
3046 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP,
3047 			    (NPC_LID_LH << 8) | (NPC_LT_LH_TU_TCP << 4) | 0x0F);
3048 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
3049 			    (NPC_LID_LD << 8) | (NPC_LT_LD_UDP << 4) | 0x0F);
3050 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP,
3051 			    (NPC_LID_LH << 8) | (NPC_LT_LH_TU_UDP << 4) | 0x0F);
3052 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP,
3053 			    (NPC_LID_LD << 8) | (NPC_LT_LD_SCTP << 4) | 0x0F);
3054 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP,
3055 			    (NPC_LID_LH << 8) | (NPC_LT_LH_TU_SCTP << 4) |
3056 			    0x0F);
3057 
3058 		err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
3059 		if (err)
3060 			return err;
3061 
3062 		/* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
3063 		nix_link_config(rvu, blkaddr);
3064 	}
3065 	return 0;
3066 }
3067 
3068 void rvu_nix_freemem(struct rvu *rvu)
3069 {
3070 	struct rvu_hwinfo *hw = rvu->hw;
3071 	struct rvu_block *block;
3072 	struct nix_txsch *txsch;
3073 	struct nix_mcast *mcast;
3074 	struct nix_hw *nix_hw;
3075 	int blkaddr, lvl;
3076 
3077 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
3078 	if (blkaddr < 0)
3079 		return;
3080 
3081 	block = &hw->block[blkaddr];
3082 	rvu_aq_free(rvu, block->aq);
3083 
3084 	if (blkaddr == BLKADDR_NIX0) {
3085 		nix_hw = get_nix_hw(rvu->hw, blkaddr);
3086 		if (!nix_hw)
3087 			return;
3088 
3089 		for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
3090 			txsch = &nix_hw->txsch[lvl];
3091 			kfree(txsch->schq.bmap);
3092 		}
3093 
3094 		mcast = &nix_hw->mcast;
3095 		qmem_free(rvu->dev, mcast->mce_ctx);
3096 		qmem_free(rvu->dev, mcast->mcast_buf);
3097 		mutex_destroy(&mcast->mce_lock);
3098 	}
3099 }
3100 
3101 int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
3102 				     struct msg_rsp *rsp)
3103 {
3104 	u16 pcifunc = req->hdr.pcifunc;
3105 	int nixlf, err;
3106 
3107 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
3108 	if (err)
3109 		return err;
3110 
3111 	rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
3112 
3113 	return rvu_cgx_start_stop_io(rvu, pcifunc, true);
3114 }
3115 
3116 int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
3117 				    struct msg_rsp *rsp)
3118 {
3119 	u16 pcifunc = req->hdr.pcifunc;
3120 	int nixlf, err;
3121 
3122 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
3123 	if (err)
3124 		return err;
3125 
3126 	rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
3127 
3128 	return rvu_cgx_start_stop_io(rvu, pcifunc, false);
3129 }
3130 
3131 void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
3132 {
3133 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
3134 	struct hwctx_disable_req ctx_req;
3135 	int err;
3136 
3137 	ctx_req.hdr.pcifunc = pcifunc;
3138 
3139 	/* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
3140 	nix_interface_deinit(rvu, pcifunc, nixlf);
3141 	nix_rx_sync(rvu, blkaddr);
3142 	nix_txschq_free(rvu, pcifunc);
3143 
3144 	rvu_cgx_start_stop_io(rvu, pcifunc, false);
3145 
3146 	if (pfvf->sq_ctx) {
3147 		ctx_req.ctype = NIX_AQ_CTYPE_SQ;
3148 		err = nix_lf_hwctx_disable(rvu, &ctx_req);
3149 		if (err)
3150 			dev_err(rvu->dev, "SQ ctx disable failed\n");
3151 	}
3152 
3153 	if (pfvf->rq_ctx) {
3154 		ctx_req.ctype = NIX_AQ_CTYPE_RQ;
3155 		err = nix_lf_hwctx_disable(rvu, &ctx_req);
3156 		if (err)
3157 			dev_err(rvu->dev, "RQ ctx disable failed\n");
3158 	}
3159 
3160 	if (pfvf->cq_ctx) {
3161 		ctx_req.ctype = NIX_AQ_CTYPE_CQ;
3162 		err = nix_lf_hwctx_disable(rvu, &ctx_req);
3163 		if (err)
3164 			dev_err(rvu->dev, "CQ ctx disable failed\n");
3165 	}
3166 
3167 	nix_ctx_free(rvu, pfvf);
3168 }
3169 
3170 int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
3171 					struct nix_lso_format_cfg *req,
3172 					struct nix_lso_format_cfg_rsp *rsp)
3173 {
3174 	u16 pcifunc = req->hdr.pcifunc;
3175 	struct nix_hw *nix_hw;
3176 	struct rvu_pfvf *pfvf;
3177 	int blkaddr, idx, f;
3178 	u64 reg;
3179 
3180 	pfvf = rvu_get_pfvf(rvu, pcifunc);
3181 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3182 	if (!pfvf->nixlf || blkaddr < 0)
3183 		return NIX_AF_ERR_AF_LF_INVALID;
3184 
3185 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
3186 	if (!nix_hw)
3187 		return -EINVAL;
3188 
3189 	/* Find existing matching LSO format, if any */
3190 	for (idx = 0; idx < nix_hw->lso.in_use; idx++) {
3191 		for (f = 0; f < NIX_LSO_FIELD_MAX; f++) {
3192 			reg = rvu_read64(rvu, blkaddr,
3193 					 NIX_AF_LSO_FORMATX_FIELDX(idx, f));
3194 			if (req->fields[f] != (reg & req->field_mask))
3195 				break;
3196 		}
3197 
3198 		if (f == NIX_LSO_FIELD_MAX)
3199 			break;
3200 	}
3201 
3202 	if (idx < nix_hw->lso.in_use) {
3203 		/* Match found */
3204 		rsp->lso_format_idx = idx;
3205 		return 0;
3206 	}
3207 
3208 	if (nix_hw->lso.in_use == nix_hw->lso.total)
3209 		return NIX_AF_ERR_LSO_CFG_FAIL;
3210 
3211 	rsp->lso_format_idx = nix_hw->lso.in_use++;
3212 
3213 	for (f = 0; f < NIX_LSO_FIELD_MAX; f++)
3214 		rvu_write64(rvu, blkaddr,
3215 			    NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f),
3216 			    req->fields[f]);
3217 
3218 	return 0;
3219 }
3220