xref: /linux/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c (revision 442bc81bd344dc52c37d8f80b854cc6da062b2d0)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3  *
4  * Copyright (C) 2018 Marvell.
5  *
6  */
7 
8 #include <linux/module.h>
9 #include <linux/pci.h>
10 
11 #include "rvu_struct.h"
12 #include "rvu_reg.h"
13 #include "rvu.h"
14 #include "npc.h"
15 #include "mcs.h"
16 #include "cgx.h"
17 #include "lmac_common.h"
18 #include "rvu_npc_hash.h"
19 
20 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc);
21 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
22 			    int type, int chan_id);
23 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
24 			       int type, bool add);
25 static int nix_setup_ipolicers(struct rvu *rvu,
26 			       struct nix_hw *nix_hw, int blkaddr);
27 static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw);
28 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
29 			       struct nix_hw *nix_hw, u16 pcifunc);
30 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc);
31 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
32 				     u32 leaf_prof);
33 static const char *nix_get_ctx_name(int ctype);
34 static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc);
35 
36 enum mc_tbl_sz {
37 	MC_TBL_SZ_256,
38 	MC_TBL_SZ_512,
39 	MC_TBL_SZ_1K,
40 	MC_TBL_SZ_2K,
41 	MC_TBL_SZ_4K,
42 	MC_TBL_SZ_8K,
43 	MC_TBL_SZ_16K,
44 	MC_TBL_SZ_32K,
45 	MC_TBL_SZ_64K,
46 };
47 
48 enum mc_buf_cnt {
49 	MC_BUF_CNT_8,
50 	MC_BUF_CNT_16,
51 	MC_BUF_CNT_32,
52 	MC_BUF_CNT_64,
53 	MC_BUF_CNT_128,
54 	MC_BUF_CNT_256,
55 	MC_BUF_CNT_512,
56 	MC_BUF_CNT_1024,
57 	MC_BUF_CNT_2048,
58 };
59 
60 enum nix_makr_fmt_indexes {
61 	NIX_MARK_CFG_IP_DSCP_RED,
62 	NIX_MARK_CFG_IP_DSCP_YELLOW,
63 	NIX_MARK_CFG_IP_DSCP_YELLOW_RED,
64 	NIX_MARK_CFG_IP_ECN_RED,
65 	NIX_MARK_CFG_IP_ECN_YELLOW,
66 	NIX_MARK_CFG_IP_ECN_YELLOW_RED,
67 	NIX_MARK_CFG_VLAN_DEI_RED,
68 	NIX_MARK_CFG_VLAN_DEI_YELLOW,
69 	NIX_MARK_CFG_VLAN_DEI_YELLOW_RED,
70 	NIX_MARK_CFG_MAX,
71 };
72 
73 /* For now considering MC resources needed for broadcast
74  * pkt replication only. i.e 256 HWVFs + 12 PFs.
75  */
76 #define MC_TBL_SIZE	MC_TBL_SZ_2K
77 #define MC_BUF_CNT	MC_BUF_CNT_1024
78 
79 #define MC_TX_MAX	2048
80 
81 struct mce {
82 	struct hlist_node	node;
83 	u32			rq_rss_index;
84 	u16			pcifunc;
85 	u16			channel;
86 	u8			dest_type;
87 	u8			is_active;
88 	u8			reserved[2];
89 };
90 
rvu_get_next_nix_blkaddr(struct rvu * rvu,int blkaddr)91 int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr)
92 {
93 	int i = 0;
94 
95 	/*If blkaddr is 0, return the first nix block address*/
96 	if (blkaddr == 0)
97 		return rvu->nix_blkaddr[blkaddr];
98 
99 	while (i + 1 < MAX_NIX_BLKS) {
100 		if (rvu->nix_blkaddr[i] == blkaddr)
101 			return rvu->nix_blkaddr[i + 1];
102 		i++;
103 	}
104 
105 	return 0;
106 }
107 
is_nixlf_attached(struct rvu * rvu,u16 pcifunc)108 bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
109 {
110 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
111 	int blkaddr;
112 
113 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
114 	if (!pfvf->nixlf || blkaddr < 0)
115 		return false;
116 	return true;
117 }
118 
rvu_get_nixlf_count(struct rvu * rvu)119 int rvu_get_nixlf_count(struct rvu *rvu)
120 {
121 	int blkaddr = 0, max = 0;
122 	struct rvu_block *block;
123 
124 	blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
125 	while (blkaddr) {
126 		block = &rvu->hw->block[blkaddr];
127 		max += block->lf.max;
128 		blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
129 	}
130 	return max;
131 }
132 
nix_get_nixlf(struct rvu * rvu,u16 pcifunc,int * nixlf,int * nix_blkaddr)133 int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr)
134 {
135 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
136 	struct rvu_hwinfo *hw = rvu->hw;
137 	int blkaddr;
138 
139 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
140 	if (!pfvf->nixlf || blkaddr < 0)
141 		return NIX_AF_ERR_AF_LF_INVALID;
142 
143 	*nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
144 	if (*nixlf < 0)
145 		return NIX_AF_ERR_AF_LF_INVALID;
146 
147 	if (nix_blkaddr)
148 		*nix_blkaddr = blkaddr;
149 
150 	return 0;
151 }
152 
nix_get_struct_ptrs(struct rvu * rvu,u16 pcifunc,struct nix_hw ** nix_hw,int * blkaddr)153 int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc,
154 			struct nix_hw **nix_hw, int *blkaddr)
155 {
156 	struct rvu_pfvf *pfvf;
157 
158 	pfvf = rvu_get_pfvf(rvu, pcifunc);
159 	*blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
160 	if (!pfvf->nixlf || *blkaddr < 0)
161 		return NIX_AF_ERR_AF_LF_INVALID;
162 
163 	*nix_hw = get_nix_hw(rvu->hw, *blkaddr);
164 	if (!*nix_hw)
165 		return NIX_AF_ERR_INVALID_NIXBLK;
166 	return 0;
167 }
168 
nix_mce_list_init(struct nix_mce_list * list,int max)169 static void nix_mce_list_init(struct nix_mce_list *list, int max)
170 {
171 	INIT_HLIST_HEAD(&list->head);
172 	list->count = 0;
173 	list->max = max;
174 }
175 
nix_alloc_mce_list(struct nix_mcast * mcast,int count,u8 dir)176 static int nix_alloc_mce_list(struct nix_mcast *mcast, int count, u8 dir)
177 {
178 	struct rsrc_bmap *mce_counter;
179 	int idx;
180 
181 	if (!mcast)
182 		return -EINVAL;
183 
184 	mce_counter = &mcast->mce_counter[dir];
185 	if (!rvu_rsrc_check_contig(mce_counter, count))
186 		return -ENOSPC;
187 
188 	idx = rvu_alloc_rsrc_contig(mce_counter, count);
189 	return idx;
190 }
191 
nix_free_mce_list(struct nix_mcast * mcast,int count,int start,u8 dir)192 static void nix_free_mce_list(struct nix_mcast *mcast, int count, int start, u8 dir)
193 {
194 	struct rsrc_bmap *mce_counter;
195 
196 	if (!mcast)
197 		return;
198 
199 	mce_counter = &mcast->mce_counter[dir];
200 	rvu_free_rsrc_contig(mce_counter, count, start);
201 }
202 
get_nix_hw(struct rvu_hwinfo * hw,int blkaddr)203 struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
204 {
205 	int nix_blkaddr = 0, i = 0;
206 	struct rvu *rvu = hw->rvu;
207 
208 	nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
209 	while (nix_blkaddr) {
210 		if (blkaddr == nix_blkaddr && hw->nix)
211 			return &hw->nix[i];
212 		nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
213 		i++;
214 	}
215 	return NULL;
216 }
217 
nix_get_dwrr_mtu_reg(struct rvu_hwinfo * hw,int smq_link_type)218 int nix_get_dwrr_mtu_reg(struct rvu_hwinfo *hw, int smq_link_type)
219 {
220 	if (hw->cap.nix_multiple_dwrr_mtu)
221 		return NIX_AF_DWRR_MTUX(smq_link_type);
222 
223 	if (smq_link_type == SMQ_LINK_TYPE_SDP)
224 		return NIX_AF_DWRR_SDP_MTU;
225 
226 	/* Here it's same reg for RPM and LBK */
227 	return NIX_AF_DWRR_RPM_MTU;
228 }
229 
convert_dwrr_mtu_to_bytes(u8 dwrr_mtu)230 u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu)
231 {
232 	dwrr_mtu &= 0x1FULL;
233 
234 	/* MTU used for DWRR calculation is in power of 2 up until 64K bytes.
235 	 * Value of 4 is reserved for MTU value of 9728 bytes.
236 	 * Value of 5 is reserved for MTU value of 10240 bytes.
237 	 */
238 	switch (dwrr_mtu) {
239 	case 4:
240 		return 9728;
241 	case 5:
242 		return 10240;
243 	default:
244 		return BIT_ULL(dwrr_mtu);
245 	}
246 
247 	return 0;
248 }
249 
convert_bytes_to_dwrr_mtu(u32 bytes)250 u32 convert_bytes_to_dwrr_mtu(u32 bytes)
251 {
252 	/* MTU used for DWRR calculation is in power of 2 up until 64K bytes.
253 	 * Value of 4 is reserved for MTU value of 9728 bytes.
254 	 * Value of 5 is reserved for MTU value of 10240 bytes.
255 	 */
256 	if (bytes > BIT_ULL(16))
257 		return 0;
258 
259 	switch (bytes) {
260 	case 9728:
261 		return 4;
262 	case 10240:
263 		return 5;
264 	default:
265 		return ilog2(bytes);
266 	}
267 
268 	return 0;
269 }
270 
nix_rx_sync(struct rvu * rvu,int blkaddr)271 static void nix_rx_sync(struct rvu *rvu, int blkaddr)
272 {
273 	int err;
274 
275 	/* Sync all in flight RX packets to LLC/DRAM */
276 	rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
277 	err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
278 	if (err)
279 		dev_err(rvu->dev, "SYNC1: NIX RX software sync failed\n");
280 
281 	/* SW_SYNC ensures all existing transactions are finished and pkts
282 	 * are written to LLC/DRAM, queues should be teared down after
283 	 * successful SW_SYNC. Due to a HW errata, in some rare scenarios
284 	 * an existing transaction might end after SW_SYNC operation. To
285 	 * ensure operation is fully done, do the SW_SYNC twice.
286 	 */
287 	rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
288 	err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
289 	if (err)
290 		dev_err(rvu->dev, "SYNC2: NIX RX software sync failed\n");
291 }
292 
is_valid_txschq(struct rvu * rvu,int blkaddr,int lvl,u16 pcifunc,u16 schq)293 static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
294 			    int lvl, u16 pcifunc, u16 schq)
295 {
296 	struct rvu_hwinfo *hw = rvu->hw;
297 	struct nix_txsch *txsch;
298 	struct nix_hw *nix_hw;
299 	u16 map_func;
300 
301 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
302 	if (!nix_hw)
303 		return false;
304 
305 	txsch = &nix_hw->txsch[lvl];
306 	/* Check out of bounds */
307 	if (schq >= txsch->schq.max)
308 		return false;
309 
310 	mutex_lock(&rvu->rsrc_lock);
311 	map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
312 	mutex_unlock(&rvu->rsrc_lock);
313 
314 	/* TLs aggegating traffic are shared across PF and VFs */
315 	if (lvl >= hw->cap.nix_tx_aggr_lvl) {
316 		if ((nix_get_tx_link(rvu, map_func) !=
317 		     nix_get_tx_link(rvu, pcifunc)) &&
318 		     (rvu_get_pf(map_func) != rvu_get_pf(pcifunc)))
319 			return false;
320 		else
321 			return true;
322 	}
323 
324 	if (map_func != pcifunc)
325 		return false;
326 
327 	return true;
328 }
329 
nix_interface_init(struct rvu * rvu,u16 pcifunc,int type,int nixlf,struct nix_lf_alloc_rsp * rsp,bool loop)330 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf,
331 			      struct nix_lf_alloc_rsp *rsp, bool loop)
332 {
333 	struct rvu_pfvf *parent_pf, *pfvf = rvu_get_pfvf(rvu, pcifunc);
334 	u16 req_chan_base, req_chan_end, req_chan_cnt;
335 	struct rvu_hwinfo *hw = rvu->hw;
336 	struct sdp_node_info *sdp_info;
337 	int pkind, pf, vf, lbkid, vfid;
338 	u8 cgx_id, lmac_id;
339 	bool from_vf;
340 	int err;
341 
342 	pf = rvu_get_pf(pcifunc);
343 	if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK &&
344 	    type != NIX_INTF_TYPE_SDP)
345 		return 0;
346 
347 	switch (type) {
348 	case NIX_INTF_TYPE_CGX:
349 		pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf];
350 		rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
351 
352 		pkind = rvu_npc_get_pkind(rvu, pf);
353 		if (pkind < 0) {
354 			dev_err(rvu->dev,
355 				"PF_Func 0x%x: Invalid pkind\n", pcifunc);
356 			return -EINVAL;
357 		}
358 		pfvf->rx_chan_base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0);
359 		pfvf->tx_chan_base = pfvf->rx_chan_base;
360 		pfvf->rx_chan_cnt = 1;
361 		pfvf->tx_chan_cnt = 1;
362 		rsp->tx_link = cgx_id * hw->lmac_per_cgx + lmac_id;
363 
364 		cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
365 		rvu_npc_set_pkind(rvu, pkind, pfvf);
366 		break;
367 	case NIX_INTF_TYPE_LBK:
368 		vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
369 
370 		/* If NIX1 block is present on the silicon then NIXes are
371 		 * assigned alternatively for lbk interfaces. NIX0 should
372 		 * send packets on lbk link 1 channels and NIX1 should send
373 		 * on lbk link 0 channels for the communication between
374 		 * NIX0 and NIX1.
375 		 */
376 		lbkid = 0;
377 		if (rvu->hw->lbk_links > 1)
378 			lbkid = vf & 0x1 ? 0 : 1;
379 
380 		/* By default NIX0 is configured to send packet on lbk link 1
381 		 * (which corresponds to LBK1), same packet will receive on
382 		 * NIX1 over lbk link 0. If NIX1 sends packet on lbk link 0
383 		 * (which corresponds to LBK2) packet will receive on NIX0 lbk
384 		 * link 1.
385 		 * But if lbk links for NIX0 and NIX1 are negated, i.e NIX0
386 		 * transmits and receives on lbk link 0, whick corresponds
387 		 * to LBK1 block, back to back connectivity between NIX and
388 		 * LBK can be achieved (which is similar to 96xx)
389 		 *
390 		 *			RX		TX
391 		 * NIX0 lbk link	1 (LBK2)	1 (LBK1)
392 		 * NIX0 lbk link	0 (LBK0)	0 (LBK0)
393 		 * NIX1 lbk link	0 (LBK1)	0 (LBK2)
394 		 * NIX1 lbk link	1 (LBK3)	1 (LBK3)
395 		 */
396 		if (loop)
397 			lbkid = !lbkid;
398 
399 		/* Note that AF's VFs work in pairs and talk over consecutive
400 		 * loopback channels.Therefore if odd number of AF VFs are
401 		 * enabled then the last VF remains with no pair.
402 		 */
403 		pfvf->rx_chan_base = rvu_nix_chan_lbk(rvu, lbkid, vf);
404 		pfvf->tx_chan_base = vf & 0x1 ?
405 					rvu_nix_chan_lbk(rvu, lbkid, vf - 1) :
406 					rvu_nix_chan_lbk(rvu, lbkid, vf + 1);
407 		pfvf->rx_chan_cnt = 1;
408 		pfvf->tx_chan_cnt = 1;
409 		rsp->tx_link = hw->cgx_links + lbkid;
410 		pfvf->lbkid = lbkid;
411 		rvu_npc_set_pkind(rvu, NPC_RX_LBK_PKIND, pfvf);
412 		rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
413 					      pfvf->rx_chan_base,
414 					      pfvf->rx_chan_cnt);
415 
416 		break;
417 	case NIX_INTF_TYPE_SDP:
418 		from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
419 		parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
420 		sdp_info = parent_pf->sdp_info;
421 		if (!sdp_info) {
422 			dev_err(rvu->dev, "Invalid sdp_info pointer\n");
423 			return -EINVAL;
424 		}
425 		if (from_vf) {
426 			req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn +
427 				sdp_info->num_pf_rings;
428 			vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
429 			for (vfid = 0; vfid < vf; vfid++)
430 				req_chan_base += sdp_info->vf_rings[vfid];
431 			req_chan_cnt = sdp_info->vf_rings[vf];
432 			req_chan_end = req_chan_base + req_chan_cnt - 1;
433 			if (req_chan_base < rvu_nix_chan_sdp(rvu, 0) ||
434 			    req_chan_end > rvu_nix_chan_sdp(rvu, 255)) {
435 				dev_err(rvu->dev,
436 					"PF_Func 0x%x: Invalid channel base and count\n",
437 					pcifunc);
438 				return -EINVAL;
439 			}
440 		} else {
441 			req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn;
442 			req_chan_cnt = sdp_info->num_pf_rings;
443 		}
444 
445 		pfvf->rx_chan_base = req_chan_base;
446 		pfvf->rx_chan_cnt = req_chan_cnt;
447 		pfvf->tx_chan_base = pfvf->rx_chan_base;
448 		pfvf->tx_chan_cnt = pfvf->rx_chan_cnt;
449 
450 		rsp->tx_link = hw->cgx_links + hw->lbk_links;
451 		rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
452 					      pfvf->rx_chan_base,
453 					      pfvf->rx_chan_cnt);
454 		break;
455 	}
456 
457 	/* Add a UCAST forwarding rule in MCAM with this NIXLF attached
458 	 * RVU PF/VF's MAC address.
459 	 */
460 	rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
461 				    pfvf->rx_chan_base, pfvf->mac_addr);
462 
463 	/* Add this PF_FUNC to bcast pkt replication list */
464 	err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, true);
465 	if (err) {
466 		dev_err(rvu->dev,
467 			"Bcast list, failed to enable PF_FUNC 0x%x\n",
468 			pcifunc);
469 		return err;
470 	}
471 	/* Install MCAM rule matching Ethernet broadcast mac address */
472 	rvu_npc_install_bcast_match_entry(rvu, pcifunc,
473 					  nixlf, pfvf->rx_chan_base);
474 
475 	pfvf->maxlen = NIC_HW_MIN_FRS;
476 	pfvf->minlen = NIC_HW_MIN_FRS;
477 
478 	return 0;
479 }
480 
nix_interface_deinit(struct rvu * rvu,u16 pcifunc,u8 nixlf)481 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
482 {
483 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
484 	int err;
485 
486 	pfvf->maxlen = 0;
487 	pfvf->minlen = 0;
488 
489 	/* Remove this PF_FUNC from bcast pkt replication list */
490 	err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, false);
491 	if (err) {
492 		dev_err(rvu->dev,
493 			"Bcast list, failed to disable PF_FUNC 0x%x\n",
494 			pcifunc);
495 	}
496 
497 	/* Free and disable any MCAM entries used by this NIX LF */
498 	rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
499 
500 	/* Disable DMAC filters used */
501 	rvu_cgx_disable_dmac_entries(rvu, pcifunc);
502 }
503 
504 #define NIX_BPIDS_PER_LMAC	8
505 #define NIX_BPIDS_PER_CPT	1
nix_setup_bpids(struct rvu * rvu,struct nix_hw * hw,int blkaddr)506 static int nix_setup_bpids(struct rvu *rvu, struct nix_hw *hw, int blkaddr)
507 {
508 	struct nix_bp *bp = &hw->bp;
509 	int err, max_bpids;
510 	u64 cfg;
511 
512 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
513 	max_bpids =  FIELD_GET(NIX_CONST_MAX_BPIDS, cfg);
514 
515 	/* Reserve the BPIds for CGX and SDP */
516 	bp->cgx_bpid_cnt = rvu->hw->cgx_links * NIX_BPIDS_PER_LMAC;
517 	bp->sdp_bpid_cnt = rvu->hw->sdp_links * FIELD_GET(NIX_CONST_SDP_CHANS, cfg);
518 	bp->free_pool_base = bp->cgx_bpid_cnt + bp->sdp_bpid_cnt +
519 			     NIX_BPIDS_PER_CPT;
520 	bp->bpids.max = max_bpids - bp->free_pool_base;
521 
522 	err = rvu_alloc_bitmap(&bp->bpids);
523 	if (err)
524 		return err;
525 
526 	bp->fn_map = devm_kcalloc(rvu->dev, bp->bpids.max,
527 				  sizeof(u16), GFP_KERNEL);
528 	if (!bp->fn_map)
529 		return -ENOMEM;
530 
531 	bp->intf_map = devm_kcalloc(rvu->dev, bp->bpids.max,
532 				    sizeof(u8), GFP_KERNEL);
533 	if (!bp->intf_map)
534 		return -ENOMEM;
535 
536 	bp->ref_cnt = devm_kcalloc(rvu->dev, bp->bpids.max,
537 				   sizeof(u8), GFP_KERNEL);
538 	if (!bp->ref_cnt)
539 		return -ENOMEM;
540 
541 	return 0;
542 }
543 
rvu_nix_flr_free_bpids(struct rvu * rvu,u16 pcifunc)544 void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc)
545 {
546 	int blkaddr, bpid, err;
547 	struct nix_hw *nix_hw;
548 	struct nix_bp *bp;
549 
550 	if (!is_lbk_vf(rvu, pcifunc))
551 		return;
552 
553 	err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
554 	if (err)
555 		return;
556 
557 	bp = &nix_hw->bp;
558 
559 	mutex_lock(&rvu->rsrc_lock);
560 	for (bpid = 0; bpid < bp->bpids.max; bpid++) {
561 		if (bp->fn_map[bpid] == pcifunc) {
562 			bp->ref_cnt[bpid]--;
563 			if (bp->ref_cnt[bpid])
564 				continue;
565 			rvu_free_rsrc(&bp->bpids, bpid);
566 			bp->fn_map[bpid] = 0;
567 		}
568 	}
569 	mutex_unlock(&rvu->rsrc_lock);
570 }
571 
nix_get_channel(u16 chan,bool cpt_link)572 static u16 nix_get_channel(u16 chan, bool cpt_link)
573 {
574 	/* CPT channel for a given link channel is always
575 	 * assumed to be BIT(11) set in link channel.
576 	 */
577 	return cpt_link ? chan | BIT(11) : chan;
578 }
579 
nix_bp_disable(struct rvu * rvu,struct nix_bp_cfg_req * req,struct msg_rsp * rsp,bool cpt_link)580 static int nix_bp_disable(struct rvu *rvu,
581 			  struct nix_bp_cfg_req *req,
582 			  struct msg_rsp *rsp, bool cpt_link)
583 {
584 	u16 pcifunc = req->hdr.pcifunc;
585 	int blkaddr, pf, type, err;
586 	u16 chan_base, chan, bpid;
587 	struct rvu_pfvf *pfvf;
588 	struct nix_hw *nix_hw;
589 	struct nix_bp *bp;
590 	u16 chan_v;
591 	u64 cfg;
592 
593 	pf = rvu_get_pf(pcifunc);
594 	type = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
595 	if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
596 		return 0;
597 
598 	if (is_sdp_pfvf(pcifunc))
599 		type = NIX_INTF_TYPE_SDP;
600 
601 	if (cpt_link && !rvu->hw->cpt_links)
602 		return 0;
603 
604 	pfvf = rvu_get_pfvf(rvu, pcifunc);
605 	err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
606 	if (err)
607 		return err;
608 
609 	bp = &nix_hw->bp;
610 	chan_base = pfvf->rx_chan_base + req->chan_base;
611 	for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
612 		chan_v = nix_get_channel(chan, cpt_link);
613 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v));
614 		rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v),
615 			    cfg & ~BIT_ULL(16));
616 
617 		if (type == NIX_INTF_TYPE_LBK) {
618 			bpid = cfg & GENMASK(8, 0);
619 			mutex_lock(&rvu->rsrc_lock);
620 			rvu_free_rsrc(&bp->bpids, bpid - bp->free_pool_base);
621 			for (bpid = 0; bpid < bp->bpids.max; bpid++) {
622 				if (bp->fn_map[bpid] == pcifunc) {
623 					bp->fn_map[bpid] = 0;
624 					bp->ref_cnt[bpid] = 0;
625 				}
626 			}
627 			mutex_unlock(&rvu->rsrc_lock);
628 		}
629 	}
630 	return 0;
631 }
632 
rvu_mbox_handler_nix_bp_disable(struct rvu * rvu,struct nix_bp_cfg_req * req,struct msg_rsp * rsp)633 int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
634 				    struct nix_bp_cfg_req *req,
635 				    struct msg_rsp *rsp)
636 {
637 	return nix_bp_disable(rvu, req, rsp, false);
638 }
639 
rvu_mbox_handler_nix_cpt_bp_disable(struct rvu * rvu,struct nix_bp_cfg_req * req,struct msg_rsp * rsp)640 int rvu_mbox_handler_nix_cpt_bp_disable(struct rvu *rvu,
641 					struct nix_bp_cfg_req *req,
642 					struct msg_rsp *rsp)
643 {
644 	return nix_bp_disable(rvu, req, rsp, true);
645 }
646 
rvu_nix_get_bpid(struct rvu * rvu,struct nix_bp_cfg_req * req,int type,int chan_id)647 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
648 			    int type, int chan_id)
649 {
650 	int bpid, blkaddr, sdp_chan_base, err;
651 	struct rvu_hwinfo *hw = rvu->hw;
652 	struct rvu_pfvf *pfvf;
653 	struct nix_hw *nix_hw;
654 	u8 cgx_id, lmac_id;
655 	struct nix_bp *bp;
656 
657 	pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
658 
659 	err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
660 	if (err)
661 		return err;
662 
663 	bp = &nix_hw->bp;
664 
665 	/* Backpressure IDs range division
666 	 * CGX channles are mapped to (0 - 191) BPIDs
667 	 * LBK channles are mapped to (192 - 255) BPIDs
668 	 * SDP channles are mapped to (256 - 511) BPIDs
669 	 *
670 	 * Lmac channles and bpids mapped as follows
671 	 * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15)
672 	 * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) ....
673 	 * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) ....
674 	 */
675 	switch (type) {
676 	case NIX_INTF_TYPE_CGX:
677 		if ((req->chan_base + req->chan_cnt) > NIX_BPIDS_PER_LMAC)
678 			return NIX_AF_ERR_INVALID_BPID_REQ;
679 		rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
680 		/* Assign bpid based on cgx, lmac and chan id */
681 		bpid = (cgx_id * hw->lmac_per_cgx * NIX_BPIDS_PER_LMAC) +
682 			(lmac_id * NIX_BPIDS_PER_LMAC) + req->chan_base;
683 
684 		if (req->bpid_per_chan)
685 			bpid += chan_id;
686 		if (bpid > bp->cgx_bpid_cnt)
687 			return NIX_AF_ERR_INVALID_BPID;
688 		break;
689 
690 	case NIX_INTF_TYPE_LBK:
691 		/* Alloc bpid from the free pool */
692 		mutex_lock(&rvu->rsrc_lock);
693 		bpid = rvu_alloc_rsrc(&bp->bpids);
694 		if (bpid < 0) {
695 			mutex_unlock(&rvu->rsrc_lock);
696 			return NIX_AF_ERR_INVALID_BPID;
697 		}
698 		bp->fn_map[bpid] = req->hdr.pcifunc;
699 		bp->ref_cnt[bpid]++;
700 		bpid += bp->free_pool_base;
701 		mutex_unlock(&rvu->rsrc_lock);
702 		break;
703 	case NIX_INTF_TYPE_SDP:
704 		if ((req->chan_base + req->chan_cnt) > bp->sdp_bpid_cnt)
705 			return NIX_AF_ERR_INVALID_BPID_REQ;
706 
707 		/* Handle usecase of 2 SDP blocks */
708 		if (!hw->cap.programmable_chans)
709 			sdp_chan_base = pfvf->rx_chan_base - NIX_CHAN_SDP_CH_START;
710 		else
711 			sdp_chan_base = pfvf->rx_chan_base - hw->sdp_chan_base;
712 
713 		bpid = bp->cgx_bpid_cnt + req->chan_base + sdp_chan_base;
714 		if (req->bpid_per_chan)
715 			bpid += chan_id;
716 
717 		if (bpid > (bp->cgx_bpid_cnt + bp->sdp_bpid_cnt))
718 			return NIX_AF_ERR_INVALID_BPID;
719 		break;
720 	default:
721 		return -EINVAL;
722 	}
723 	return bpid;
724 }
725 
nix_bp_enable(struct rvu * rvu,struct nix_bp_cfg_req * req,struct nix_bp_cfg_rsp * rsp,bool cpt_link)726 static int nix_bp_enable(struct rvu *rvu,
727 			 struct nix_bp_cfg_req *req,
728 			 struct nix_bp_cfg_rsp *rsp,
729 			 bool cpt_link)
730 {
731 	int blkaddr, pf, type, chan_id = 0;
732 	u16 pcifunc = req->hdr.pcifunc;
733 	struct rvu_pfvf *pfvf;
734 	u16 chan_base, chan;
735 	s16 bpid, bpid_base;
736 	u16 chan_v;
737 	u64 cfg;
738 
739 	pf = rvu_get_pf(pcifunc);
740 	type = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
741 	if (is_sdp_pfvf(pcifunc))
742 		type = NIX_INTF_TYPE_SDP;
743 
744 	/* Enable backpressure only for CGX mapped PFs and LBK/SDP interface */
745 	if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK &&
746 	    type != NIX_INTF_TYPE_SDP)
747 		return 0;
748 
749 	if (cpt_link && !rvu->hw->cpt_links)
750 		return 0;
751 
752 	pfvf = rvu_get_pfvf(rvu, pcifunc);
753 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
754 
755 	bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id);
756 	chan_base = pfvf->rx_chan_base + req->chan_base;
757 	bpid = bpid_base;
758 
759 	for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
760 		if (bpid < 0) {
761 			dev_warn(rvu->dev, "Fail to enable backpressure\n");
762 			return -EINVAL;
763 		}
764 
765 		chan_v = nix_get_channel(chan, cpt_link);
766 
767 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v));
768 		cfg &= ~GENMASK_ULL(8, 0);
769 		rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v),
770 			    cfg | (bpid & GENMASK_ULL(8, 0)) | BIT_ULL(16));
771 		chan_id++;
772 		bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
773 	}
774 
775 	for (chan = 0; chan < req->chan_cnt; chan++) {
776 		/* Map channel and bpid assign to it */
777 		rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 |
778 					(bpid_base & 0x3FF);
779 		if (req->bpid_per_chan)
780 			bpid_base++;
781 	}
782 	rsp->chan_cnt = req->chan_cnt;
783 
784 	return 0;
785 }
786 
rvu_mbox_handler_nix_bp_enable(struct rvu * rvu,struct nix_bp_cfg_req * req,struct nix_bp_cfg_rsp * rsp)787 int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
788 				   struct nix_bp_cfg_req *req,
789 				   struct nix_bp_cfg_rsp *rsp)
790 {
791 	return nix_bp_enable(rvu, req, rsp, false);
792 }
793 
rvu_mbox_handler_nix_cpt_bp_enable(struct rvu * rvu,struct nix_bp_cfg_req * req,struct nix_bp_cfg_rsp * rsp)794 int rvu_mbox_handler_nix_cpt_bp_enable(struct rvu *rvu,
795 				       struct nix_bp_cfg_req *req,
796 				       struct nix_bp_cfg_rsp *rsp)
797 {
798 	return nix_bp_enable(rvu, req, rsp, true);
799 }
800 
nix_setup_lso_tso_l3(struct rvu * rvu,int blkaddr,u64 format,bool v4,u64 * fidx)801 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
802 				 u64 format, bool v4, u64 *fidx)
803 {
804 	struct nix_lso_format field = {0};
805 
806 	/* IP's Length field */
807 	field.layer = NIX_TXLAYER_OL3;
808 	/* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
809 	field.offset = v4 ? 2 : 4;
810 	field.sizem1 = 1; /* i.e 2 bytes */
811 	field.alg = NIX_LSOALG_ADD_PAYLEN;
812 	rvu_write64(rvu, blkaddr,
813 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
814 		    *(u64 *)&field);
815 
816 	/* No ID field in IPv6 header */
817 	if (!v4)
818 		return;
819 
820 	/* IP's ID field */
821 	field.layer = NIX_TXLAYER_OL3;
822 	field.offset = 4;
823 	field.sizem1 = 1; /* i.e 2 bytes */
824 	field.alg = NIX_LSOALG_ADD_SEGNUM;
825 	rvu_write64(rvu, blkaddr,
826 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
827 		    *(u64 *)&field);
828 }
829 
nix_setup_lso_tso_l4(struct rvu * rvu,int blkaddr,u64 format,u64 * fidx)830 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
831 				 u64 format, u64 *fidx)
832 {
833 	struct nix_lso_format field = {0};
834 
835 	/* TCP's sequence number field */
836 	field.layer = NIX_TXLAYER_OL4;
837 	field.offset = 4;
838 	field.sizem1 = 3; /* i.e 4 bytes */
839 	field.alg = NIX_LSOALG_ADD_OFFSET;
840 	rvu_write64(rvu, blkaddr,
841 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
842 		    *(u64 *)&field);
843 
844 	/* TCP's flags field */
845 	field.layer = NIX_TXLAYER_OL4;
846 	field.offset = 12;
847 	field.sizem1 = 1; /* 2 bytes */
848 	field.alg = NIX_LSOALG_TCP_FLAGS;
849 	rvu_write64(rvu, blkaddr,
850 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
851 		    *(u64 *)&field);
852 }
853 
nix_setup_lso(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr)854 static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
855 {
856 	u64 cfg, idx, fidx = 0;
857 
858 	/* Get max HW supported format indices */
859 	cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF;
860 	nix_hw->lso.total = cfg;
861 
862 	/* Enable LSO */
863 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
864 	/* For TSO, set first and middle segment flags to
865 	 * mask out PSH, RST & FIN flags in TCP packet
866 	 */
867 	cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
868 	cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
869 	rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
870 
871 	/* Setup default static LSO formats
872 	 *
873 	 * Configure format fields for TCPv4 segmentation offload
874 	 */
875 	idx = NIX_LSO_FORMAT_IDX_TSOV4;
876 	nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
877 	nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
878 
879 	/* Set rest of the fields to NOP */
880 	for (; fidx < 8; fidx++) {
881 		rvu_write64(rvu, blkaddr,
882 			    NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
883 	}
884 	nix_hw->lso.in_use++;
885 
886 	/* Configure format fields for TCPv6 segmentation offload */
887 	idx = NIX_LSO_FORMAT_IDX_TSOV6;
888 	fidx = 0;
889 	nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
890 	nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
891 
892 	/* Set rest of the fields to NOP */
893 	for (; fidx < 8; fidx++) {
894 		rvu_write64(rvu, blkaddr,
895 			    NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
896 	}
897 	nix_hw->lso.in_use++;
898 }
899 
nix_ctx_free(struct rvu * rvu,struct rvu_pfvf * pfvf)900 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
901 {
902 	kfree(pfvf->rq_bmap);
903 	kfree(pfvf->sq_bmap);
904 	kfree(pfvf->cq_bmap);
905 	if (pfvf->rq_ctx)
906 		qmem_free(rvu->dev, pfvf->rq_ctx);
907 	if (pfvf->sq_ctx)
908 		qmem_free(rvu->dev, pfvf->sq_ctx);
909 	if (pfvf->cq_ctx)
910 		qmem_free(rvu->dev, pfvf->cq_ctx);
911 	if (pfvf->rss_ctx)
912 		qmem_free(rvu->dev, pfvf->rss_ctx);
913 	if (pfvf->nix_qints_ctx)
914 		qmem_free(rvu->dev, pfvf->nix_qints_ctx);
915 	if (pfvf->cq_ints_ctx)
916 		qmem_free(rvu->dev, pfvf->cq_ints_ctx);
917 
918 	pfvf->rq_bmap = NULL;
919 	pfvf->cq_bmap = NULL;
920 	pfvf->sq_bmap = NULL;
921 	pfvf->rq_ctx = NULL;
922 	pfvf->sq_ctx = NULL;
923 	pfvf->cq_ctx = NULL;
924 	pfvf->rss_ctx = NULL;
925 	pfvf->nix_qints_ctx = NULL;
926 	pfvf->cq_ints_ctx = NULL;
927 }
928 
nixlf_rss_ctx_init(struct rvu * rvu,int blkaddr,struct rvu_pfvf * pfvf,int nixlf,int rss_sz,int rss_grps,int hwctx_size,u64 way_mask,bool tag_lsb_as_adder)929 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
930 			      struct rvu_pfvf *pfvf, int nixlf,
931 			      int rss_sz, int rss_grps, int hwctx_size,
932 			      u64 way_mask, bool tag_lsb_as_adder)
933 {
934 	int err, grp, num_indices;
935 	u64 val;
936 
937 	/* RSS is not requested for this NIXLF */
938 	if (!rss_sz)
939 		return 0;
940 	num_indices = rss_sz * rss_grps;
941 
942 	/* Alloc NIX RSS HW context memory and config the base */
943 	err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
944 	if (err)
945 		return err;
946 
947 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
948 		    (u64)pfvf->rss_ctx->iova);
949 
950 	/* Config full RSS table size, enable RSS and caching */
951 	val = BIT_ULL(36) | BIT_ULL(4) | way_mask << 20 |
952 			ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE);
953 
954 	if (tag_lsb_as_adder)
955 		val |= BIT_ULL(5);
956 
957 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf), val);
958 	/* Config RSS group offset and sizes */
959 	for (grp = 0; grp < rss_grps; grp++)
960 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
961 			    ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
962 	return 0;
963 }
964 
nix_aq_enqueue_wait(struct rvu * rvu,struct rvu_block * block,struct nix_aq_inst_s * inst)965 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
966 			       struct nix_aq_inst_s *inst)
967 {
968 	struct admin_queue *aq = block->aq;
969 	struct nix_aq_res_s *result;
970 	int timeout = 1000;
971 	u64 reg, head;
972 	int ret;
973 
974 	result = (struct nix_aq_res_s *)aq->res->base;
975 
976 	/* Get current head pointer where to append this instruction */
977 	reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
978 	head = (reg >> 4) & AQ_PTR_MASK;
979 
980 	memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
981 	       (void *)inst, aq->inst->entry_sz);
982 	memset(result, 0, sizeof(*result));
983 	/* sync into memory */
984 	wmb();
985 
986 	/* Ring the doorbell and wait for result */
987 	rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
988 	while (result->compcode == NIX_AQ_COMP_NOTDONE) {
989 		cpu_relax();
990 		udelay(1);
991 		timeout--;
992 		if (!timeout)
993 			return -EBUSY;
994 	}
995 
996 	if (result->compcode != NIX_AQ_COMP_GOOD) {
997 		/* TODO: Replace this with some error code */
998 		if (result->compcode == NIX_AQ_COMP_CTX_FAULT ||
999 		    result->compcode == NIX_AQ_COMP_LOCKERR ||
1000 		    result->compcode == NIX_AQ_COMP_CTX_POISON) {
1001 			ret = rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_RX);
1002 			ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_TX);
1003 			ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_RX);
1004 			ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_TX);
1005 			if (ret)
1006 				dev_err(rvu->dev,
1007 					"%s: Not able to unlock cachelines\n", __func__);
1008 		}
1009 
1010 		return -EBUSY;
1011 	}
1012 
1013 	return 0;
1014 }
1015 
nix_get_aq_req_smq(struct rvu * rvu,struct nix_aq_enq_req * req,u16 * smq,u16 * smq_mask)1016 static void nix_get_aq_req_smq(struct rvu *rvu, struct nix_aq_enq_req *req,
1017 			       u16 *smq, u16 *smq_mask)
1018 {
1019 	struct nix_cn10k_aq_enq_req *aq_req;
1020 
1021 	if (!is_rvu_otx2(rvu)) {
1022 		aq_req = (struct nix_cn10k_aq_enq_req *)req;
1023 		*smq = aq_req->sq.smq;
1024 		*smq_mask = aq_req->sq_mask.smq;
1025 	} else {
1026 		*smq = req->sq.smq;
1027 		*smq_mask = req->sq_mask.smq;
1028 	}
1029 }
1030 
rvu_nix_blk_aq_enq_inst(struct rvu * rvu,struct nix_hw * nix_hw,struct nix_aq_enq_req * req,struct nix_aq_enq_rsp * rsp)1031 static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
1032 				   struct nix_aq_enq_req *req,
1033 				   struct nix_aq_enq_rsp *rsp)
1034 {
1035 	struct rvu_hwinfo *hw = rvu->hw;
1036 	u16 pcifunc = req->hdr.pcifunc;
1037 	int nixlf, blkaddr, rc = 0;
1038 	struct nix_aq_inst_s inst;
1039 	struct rvu_block *block;
1040 	struct admin_queue *aq;
1041 	struct rvu_pfvf *pfvf;
1042 	u16 smq, smq_mask;
1043 	void *ctx, *mask;
1044 	bool ena;
1045 	u64 cfg;
1046 
1047 	blkaddr = nix_hw->blkaddr;
1048 	block = &hw->block[blkaddr];
1049 	aq = block->aq;
1050 	if (!aq) {
1051 		dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
1052 		return NIX_AF_ERR_AQ_ENQUEUE;
1053 	}
1054 
1055 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1056 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1057 
1058 	/* Skip NIXLF check for broadcast MCE entry and bandwidth profile
1059 	 * operations done by AF itself.
1060 	 */
1061 	if (!((!rsp && req->ctype == NIX_AQ_CTYPE_MCE) ||
1062 	      (req->ctype == NIX_AQ_CTYPE_BANDPROF && !pcifunc))) {
1063 		if (!pfvf->nixlf || nixlf < 0)
1064 			return NIX_AF_ERR_AF_LF_INVALID;
1065 	}
1066 
1067 	switch (req->ctype) {
1068 	case NIX_AQ_CTYPE_RQ:
1069 		/* Check if index exceeds max no of queues */
1070 		if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
1071 			rc = NIX_AF_ERR_AQ_ENQUEUE;
1072 		break;
1073 	case NIX_AQ_CTYPE_SQ:
1074 		if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
1075 			rc = NIX_AF_ERR_AQ_ENQUEUE;
1076 		break;
1077 	case NIX_AQ_CTYPE_CQ:
1078 		if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
1079 			rc = NIX_AF_ERR_AQ_ENQUEUE;
1080 		break;
1081 	case NIX_AQ_CTYPE_RSS:
1082 		/* Check if RSS is enabled and qidx is within range */
1083 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
1084 		if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
1085 		    (req->qidx >= (256UL << (cfg & 0xF))))
1086 			rc = NIX_AF_ERR_AQ_ENQUEUE;
1087 		break;
1088 	case NIX_AQ_CTYPE_MCE:
1089 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
1090 
1091 		/* Check if index exceeds MCE list length */
1092 		if (!nix_hw->mcast.mce_ctx ||
1093 		    (req->qidx >= (256UL << (cfg & 0xF))))
1094 			rc = NIX_AF_ERR_AQ_ENQUEUE;
1095 
1096 		/* Adding multicast lists for requests from PF/VFs is not
1097 		 * yet supported, so ignore this.
1098 		 */
1099 		if (rsp)
1100 			rc = NIX_AF_ERR_AQ_ENQUEUE;
1101 		break;
1102 	case NIX_AQ_CTYPE_BANDPROF:
1103 		if (nix_verify_bandprof((struct nix_cn10k_aq_enq_req *)req,
1104 					nix_hw, pcifunc))
1105 			rc = NIX_AF_ERR_INVALID_BANDPROF;
1106 		break;
1107 	default:
1108 		rc = NIX_AF_ERR_AQ_ENQUEUE;
1109 	}
1110 
1111 	if (rc)
1112 		return rc;
1113 
1114 	nix_get_aq_req_smq(rvu, req, &smq, &smq_mask);
1115 	/* Check if SQ pointed SMQ belongs to this PF/VF or not */
1116 	if (req->ctype == NIX_AQ_CTYPE_SQ &&
1117 	    ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
1118 	     (req->op == NIX_AQ_INSTOP_WRITE &&
1119 	      req->sq_mask.ena && req->sq.ena && smq_mask))) {
1120 		if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
1121 				     pcifunc, smq))
1122 			return NIX_AF_ERR_AQ_ENQUEUE;
1123 	}
1124 
1125 	memset(&inst, 0, sizeof(struct nix_aq_inst_s));
1126 	inst.lf = nixlf;
1127 	inst.cindex = req->qidx;
1128 	inst.ctype = req->ctype;
1129 	inst.op = req->op;
1130 	/* Currently we are not supporting enqueuing multiple instructions,
1131 	 * so always choose first entry in result memory.
1132 	 */
1133 	inst.res_addr = (u64)aq->res->iova;
1134 
1135 	/* Hardware uses same aq->res->base for updating result of
1136 	 * previous instruction hence wait here till it is done.
1137 	 */
1138 	spin_lock(&aq->lock);
1139 
1140 	/* Clean result + context memory */
1141 	memset(aq->res->base, 0, aq->res->entry_sz);
1142 	/* Context needs to be written at RES_ADDR + 128 */
1143 	ctx = aq->res->base + 128;
1144 	/* Mask needs to be written at RES_ADDR + 256 */
1145 	mask = aq->res->base + 256;
1146 
1147 	switch (req->op) {
1148 	case NIX_AQ_INSTOP_WRITE:
1149 		if (req->ctype == NIX_AQ_CTYPE_RQ)
1150 			memcpy(mask, &req->rq_mask,
1151 			       sizeof(struct nix_rq_ctx_s));
1152 		else if (req->ctype == NIX_AQ_CTYPE_SQ)
1153 			memcpy(mask, &req->sq_mask,
1154 			       sizeof(struct nix_sq_ctx_s));
1155 		else if (req->ctype == NIX_AQ_CTYPE_CQ)
1156 			memcpy(mask, &req->cq_mask,
1157 			       sizeof(struct nix_cq_ctx_s));
1158 		else if (req->ctype == NIX_AQ_CTYPE_RSS)
1159 			memcpy(mask, &req->rss_mask,
1160 			       sizeof(struct nix_rsse_s));
1161 		else if (req->ctype == NIX_AQ_CTYPE_MCE)
1162 			memcpy(mask, &req->mce_mask,
1163 			       sizeof(struct nix_rx_mce_s));
1164 		else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
1165 			memcpy(mask, &req->prof_mask,
1166 			       sizeof(struct nix_bandprof_s));
1167 		fallthrough;
1168 	case NIX_AQ_INSTOP_INIT:
1169 		if (req->ctype == NIX_AQ_CTYPE_RQ)
1170 			memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
1171 		else if (req->ctype == NIX_AQ_CTYPE_SQ)
1172 			memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
1173 		else if (req->ctype == NIX_AQ_CTYPE_CQ)
1174 			memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
1175 		else if (req->ctype == NIX_AQ_CTYPE_RSS)
1176 			memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
1177 		else if (req->ctype == NIX_AQ_CTYPE_MCE)
1178 			memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
1179 		else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
1180 			memcpy(ctx, &req->prof, sizeof(struct nix_bandprof_s));
1181 		break;
1182 	case NIX_AQ_INSTOP_NOP:
1183 	case NIX_AQ_INSTOP_READ:
1184 	case NIX_AQ_INSTOP_LOCK:
1185 	case NIX_AQ_INSTOP_UNLOCK:
1186 		break;
1187 	default:
1188 		rc = NIX_AF_ERR_AQ_ENQUEUE;
1189 		spin_unlock(&aq->lock);
1190 		return rc;
1191 	}
1192 
1193 	/* Submit the instruction to AQ */
1194 	rc = nix_aq_enqueue_wait(rvu, block, &inst);
1195 	if (rc) {
1196 		spin_unlock(&aq->lock);
1197 		return rc;
1198 	}
1199 
1200 	/* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
1201 	if (req->op == NIX_AQ_INSTOP_INIT) {
1202 		if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
1203 			__set_bit(req->qidx, pfvf->rq_bmap);
1204 		if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
1205 			__set_bit(req->qidx, pfvf->sq_bmap);
1206 		if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
1207 			__set_bit(req->qidx, pfvf->cq_bmap);
1208 	}
1209 
1210 	if (req->op == NIX_AQ_INSTOP_WRITE) {
1211 		if (req->ctype == NIX_AQ_CTYPE_RQ) {
1212 			ena = (req->rq.ena & req->rq_mask.ena) |
1213 				(test_bit(req->qidx, pfvf->rq_bmap) &
1214 				~req->rq_mask.ena);
1215 			if (ena)
1216 				__set_bit(req->qidx, pfvf->rq_bmap);
1217 			else
1218 				__clear_bit(req->qidx, pfvf->rq_bmap);
1219 		}
1220 		if (req->ctype == NIX_AQ_CTYPE_SQ) {
1221 			ena = (req->rq.ena & req->sq_mask.ena) |
1222 				(test_bit(req->qidx, pfvf->sq_bmap) &
1223 				~req->sq_mask.ena);
1224 			if (ena)
1225 				__set_bit(req->qidx, pfvf->sq_bmap);
1226 			else
1227 				__clear_bit(req->qidx, pfvf->sq_bmap);
1228 		}
1229 		if (req->ctype == NIX_AQ_CTYPE_CQ) {
1230 			ena = (req->rq.ena & req->cq_mask.ena) |
1231 				(test_bit(req->qidx, pfvf->cq_bmap) &
1232 				~req->cq_mask.ena);
1233 			if (ena)
1234 				__set_bit(req->qidx, pfvf->cq_bmap);
1235 			else
1236 				__clear_bit(req->qidx, pfvf->cq_bmap);
1237 		}
1238 	}
1239 
1240 	if (rsp) {
1241 		/* Copy read context into mailbox */
1242 		if (req->op == NIX_AQ_INSTOP_READ) {
1243 			if (req->ctype == NIX_AQ_CTYPE_RQ)
1244 				memcpy(&rsp->rq, ctx,
1245 				       sizeof(struct nix_rq_ctx_s));
1246 			else if (req->ctype == NIX_AQ_CTYPE_SQ)
1247 				memcpy(&rsp->sq, ctx,
1248 				       sizeof(struct nix_sq_ctx_s));
1249 			else if (req->ctype == NIX_AQ_CTYPE_CQ)
1250 				memcpy(&rsp->cq, ctx,
1251 				       sizeof(struct nix_cq_ctx_s));
1252 			else if (req->ctype == NIX_AQ_CTYPE_RSS)
1253 				memcpy(&rsp->rss, ctx,
1254 				       sizeof(struct nix_rsse_s));
1255 			else if (req->ctype == NIX_AQ_CTYPE_MCE)
1256 				memcpy(&rsp->mce, ctx,
1257 				       sizeof(struct nix_rx_mce_s));
1258 			else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
1259 				memcpy(&rsp->prof, ctx,
1260 				       sizeof(struct nix_bandprof_s));
1261 		}
1262 	}
1263 
1264 	spin_unlock(&aq->lock);
1265 	return 0;
1266 }
1267 
rvu_nix_verify_aq_ctx(struct rvu * rvu,struct nix_hw * nix_hw,struct nix_aq_enq_req * req,u8 ctype)1268 static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw,
1269 				 struct nix_aq_enq_req *req, u8 ctype)
1270 {
1271 	struct nix_cn10k_aq_enq_req aq_req;
1272 	struct nix_cn10k_aq_enq_rsp aq_rsp;
1273 	int rc, word;
1274 
1275 	if (req->ctype != NIX_AQ_CTYPE_CQ)
1276 		return 0;
1277 
1278 	rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
1279 				 req->hdr.pcifunc, ctype, req->qidx);
1280 	if (rc) {
1281 		dev_err(rvu->dev,
1282 			"%s: Failed to fetch %s%d context of PFFUNC 0x%x\n",
1283 			__func__, nix_get_ctx_name(ctype), req->qidx,
1284 			req->hdr.pcifunc);
1285 		return rc;
1286 	}
1287 
1288 	/* Make copy of original context & mask which are required
1289 	 * for resubmission
1290 	 */
1291 	memcpy(&aq_req.cq_mask, &req->cq_mask, sizeof(struct nix_cq_ctx_s));
1292 	memcpy(&aq_req.cq, &req->cq, sizeof(struct nix_cq_ctx_s));
1293 
1294 	/* exclude fields which HW can update */
1295 	aq_req.cq_mask.cq_err       = 0;
1296 	aq_req.cq_mask.wrptr        = 0;
1297 	aq_req.cq_mask.tail         = 0;
1298 	aq_req.cq_mask.head	    = 0;
1299 	aq_req.cq_mask.avg_level    = 0;
1300 	aq_req.cq_mask.update_time  = 0;
1301 	aq_req.cq_mask.substream    = 0;
1302 
1303 	/* Context mask (cq_mask) holds mask value of fields which
1304 	 * are changed in AQ WRITE operation.
1305 	 * for example cq.drop = 0xa;
1306 	 *	       cq_mask.drop = 0xff;
1307 	 * Below logic performs '&' between cq and cq_mask so that non
1308 	 * updated fields are masked out for request and response
1309 	 * comparison
1310 	 */
1311 	for (word = 0; word < sizeof(struct nix_cq_ctx_s) / sizeof(u64);
1312 	     word++) {
1313 		*(u64 *)((u8 *)&aq_rsp.cq + word * 8) &=
1314 			(*(u64 *)((u8 *)&aq_req.cq_mask + word * 8));
1315 		*(u64 *)((u8 *)&aq_req.cq + word * 8) &=
1316 			(*(u64 *)((u8 *)&aq_req.cq_mask + word * 8));
1317 	}
1318 
1319 	if (memcmp(&aq_req.cq, &aq_rsp.cq, sizeof(struct nix_cq_ctx_s)))
1320 		return NIX_AF_ERR_AQ_CTX_RETRY_WRITE;
1321 
1322 	return 0;
1323 }
1324 
rvu_nix_aq_enq_inst(struct rvu * rvu,struct nix_aq_enq_req * req,struct nix_aq_enq_rsp * rsp)1325 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
1326 			       struct nix_aq_enq_rsp *rsp)
1327 {
1328 	struct nix_hw *nix_hw;
1329 	int err, retries = 5;
1330 	int blkaddr;
1331 
1332 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
1333 	if (blkaddr < 0)
1334 		return NIX_AF_ERR_AF_LF_INVALID;
1335 
1336 	nix_hw =  get_nix_hw(rvu->hw, blkaddr);
1337 	if (!nix_hw)
1338 		return NIX_AF_ERR_INVALID_NIXBLK;
1339 
1340 retry:
1341 	err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp);
1342 
1343 	/* HW errata 'AQ Modification to CQ could be discarded on heavy traffic'
1344 	 * As a work around perfrom CQ context read after each AQ write. If AQ
1345 	 * read shows AQ write is not updated perform AQ write again.
1346 	 */
1347 	if (!err && req->op == NIX_AQ_INSTOP_WRITE) {
1348 		err = rvu_nix_verify_aq_ctx(rvu, nix_hw, req, NIX_AQ_CTYPE_CQ);
1349 		if (err == NIX_AF_ERR_AQ_CTX_RETRY_WRITE) {
1350 			if (retries--)
1351 				goto retry;
1352 			else
1353 				return NIX_AF_ERR_CQ_CTX_WRITE_ERR;
1354 		}
1355 	}
1356 
1357 	return err;
1358 }
1359 
nix_get_ctx_name(int ctype)1360 static const char *nix_get_ctx_name(int ctype)
1361 {
1362 	switch (ctype) {
1363 	case NIX_AQ_CTYPE_CQ:
1364 		return "CQ";
1365 	case NIX_AQ_CTYPE_SQ:
1366 		return "SQ";
1367 	case NIX_AQ_CTYPE_RQ:
1368 		return "RQ";
1369 	case NIX_AQ_CTYPE_RSS:
1370 		return "RSS";
1371 	}
1372 	return "";
1373 }
1374 
nix_lf_hwctx_disable(struct rvu * rvu,struct hwctx_disable_req * req)1375 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
1376 {
1377 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
1378 	struct nix_aq_enq_req aq_req;
1379 	unsigned long *bmap;
1380 	int qidx, q_cnt = 0;
1381 	int err = 0, rc;
1382 
1383 	if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx)
1384 		return NIX_AF_ERR_AQ_ENQUEUE;
1385 
1386 	memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1387 	aq_req.hdr.pcifunc = req->hdr.pcifunc;
1388 
1389 	if (req->ctype == NIX_AQ_CTYPE_CQ) {
1390 		aq_req.cq.ena = 0;
1391 		aq_req.cq_mask.ena = 1;
1392 		aq_req.cq.bp_ena = 0;
1393 		aq_req.cq_mask.bp_ena = 1;
1394 		q_cnt = pfvf->cq_ctx->qsize;
1395 		bmap = pfvf->cq_bmap;
1396 	}
1397 	if (req->ctype == NIX_AQ_CTYPE_SQ) {
1398 		aq_req.sq.ena = 0;
1399 		aq_req.sq_mask.ena = 1;
1400 		q_cnt = pfvf->sq_ctx->qsize;
1401 		bmap = pfvf->sq_bmap;
1402 	}
1403 	if (req->ctype == NIX_AQ_CTYPE_RQ) {
1404 		aq_req.rq.ena = 0;
1405 		aq_req.rq_mask.ena = 1;
1406 		q_cnt = pfvf->rq_ctx->qsize;
1407 		bmap = pfvf->rq_bmap;
1408 	}
1409 
1410 	aq_req.ctype = req->ctype;
1411 	aq_req.op = NIX_AQ_INSTOP_WRITE;
1412 
1413 	for (qidx = 0; qidx < q_cnt; qidx++) {
1414 		if (!test_bit(qidx, bmap))
1415 			continue;
1416 		aq_req.qidx = qidx;
1417 		rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
1418 		if (rc) {
1419 			err = rc;
1420 			dev_err(rvu->dev, "Failed to disable %s:%d context\n",
1421 				nix_get_ctx_name(req->ctype), qidx);
1422 		}
1423 	}
1424 
1425 	return err;
1426 }
1427 
1428 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
nix_lf_hwctx_lockdown(struct rvu * rvu,struct nix_aq_enq_req * req)1429 static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req)
1430 {
1431 	struct nix_aq_enq_req lock_ctx_req;
1432 	int err;
1433 
1434 	if (req->op != NIX_AQ_INSTOP_INIT)
1435 		return 0;
1436 
1437 	if (req->ctype == NIX_AQ_CTYPE_MCE ||
1438 	    req->ctype == NIX_AQ_CTYPE_DYNO)
1439 		return 0;
1440 
1441 	memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req));
1442 	lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
1443 	lock_ctx_req.ctype = req->ctype;
1444 	lock_ctx_req.op = NIX_AQ_INSTOP_LOCK;
1445 	lock_ctx_req.qidx = req->qidx;
1446 	err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL);
1447 	if (err)
1448 		dev_err(rvu->dev,
1449 			"PFUNC 0x%x: Failed to lock NIX %s:%d context\n",
1450 			req->hdr.pcifunc,
1451 			nix_get_ctx_name(req->ctype), req->qidx);
1452 	return err;
1453 }
1454 
rvu_mbox_handler_nix_aq_enq(struct rvu * rvu,struct nix_aq_enq_req * req,struct nix_aq_enq_rsp * rsp)1455 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
1456 				struct nix_aq_enq_req *req,
1457 				struct nix_aq_enq_rsp *rsp)
1458 {
1459 	int err;
1460 
1461 	err = rvu_nix_aq_enq_inst(rvu, req, rsp);
1462 	if (!err)
1463 		err = nix_lf_hwctx_lockdown(rvu, req);
1464 	return err;
1465 }
1466 #else
1467 
rvu_mbox_handler_nix_aq_enq(struct rvu * rvu,struct nix_aq_enq_req * req,struct nix_aq_enq_rsp * rsp)1468 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
1469 				struct nix_aq_enq_req *req,
1470 				struct nix_aq_enq_rsp *rsp)
1471 {
1472 	return rvu_nix_aq_enq_inst(rvu, req, rsp);
1473 }
1474 #endif
1475 /* CN10K mbox handler */
rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu * rvu,struct nix_cn10k_aq_enq_req * req,struct nix_cn10k_aq_enq_rsp * rsp)1476 int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu,
1477 				      struct nix_cn10k_aq_enq_req *req,
1478 				      struct nix_cn10k_aq_enq_rsp *rsp)
1479 {
1480 	return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req,
1481 				  (struct nix_aq_enq_rsp *)rsp);
1482 }
1483 
rvu_mbox_handler_nix_hwctx_disable(struct rvu * rvu,struct hwctx_disable_req * req,struct msg_rsp * rsp)1484 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
1485 				       struct hwctx_disable_req *req,
1486 				       struct msg_rsp *rsp)
1487 {
1488 	return nix_lf_hwctx_disable(rvu, req);
1489 }
1490 
rvu_mbox_handler_nix_lf_alloc(struct rvu * rvu,struct nix_lf_alloc_req * req,struct nix_lf_alloc_rsp * rsp)1491 int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
1492 				  struct nix_lf_alloc_req *req,
1493 				  struct nix_lf_alloc_rsp *rsp)
1494 {
1495 	int nixlf, qints, hwctx_size, intf, err, rc = 0;
1496 	struct rvu_hwinfo *hw = rvu->hw;
1497 	u16 pcifunc = req->hdr.pcifunc;
1498 	struct rvu_block *block;
1499 	struct rvu_pfvf *pfvf;
1500 	u64 cfg, ctx_cfg;
1501 	int blkaddr;
1502 
1503 	if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
1504 		return NIX_AF_ERR_PARAM;
1505 
1506 	if (req->way_mask)
1507 		req->way_mask &= 0xFFFF;
1508 
1509 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1510 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1511 	if (!pfvf->nixlf || blkaddr < 0)
1512 		return NIX_AF_ERR_AF_LF_INVALID;
1513 
1514 	block = &hw->block[blkaddr];
1515 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1516 	if (nixlf < 0)
1517 		return NIX_AF_ERR_AF_LF_INVALID;
1518 
1519 	/* Check if requested 'NIXLF <=> NPALF' mapping is valid */
1520 	if (req->npa_func) {
1521 		/* If default, use 'this' NIXLF's PFFUNC */
1522 		if (req->npa_func == RVU_DEFAULT_PF_FUNC)
1523 			req->npa_func = pcifunc;
1524 		if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA))
1525 			return NIX_AF_INVAL_NPA_PF_FUNC;
1526 	}
1527 
1528 	/* Check if requested 'NIXLF <=> SSOLF' mapping is valid */
1529 	if (req->sso_func) {
1530 		/* If default, use 'this' NIXLF's PFFUNC */
1531 		if (req->sso_func == RVU_DEFAULT_PF_FUNC)
1532 			req->sso_func = pcifunc;
1533 		if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO))
1534 			return NIX_AF_INVAL_SSO_PF_FUNC;
1535 	}
1536 
1537 	/* If RSS is being enabled, check if requested config is valid.
1538 	 * RSS table size should be power of two, otherwise
1539 	 * RSS_GRP::OFFSET + adder might go beyond that group or
1540 	 * won't be able to use entire table.
1541 	 */
1542 	if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
1543 			    !is_power_of_2(req->rss_sz)))
1544 		return NIX_AF_ERR_RSS_SIZE_INVALID;
1545 
1546 	if (req->rss_sz &&
1547 	    (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
1548 		return NIX_AF_ERR_RSS_GRPS_INVALID;
1549 
1550 	/* Reset this NIX LF */
1551 	err = rvu_lf_reset(rvu, block, nixlf);
1552 	if (err) {
1553 		dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1554 			block->addr - BLKADDR_NIX0, nixlf);
1555 		return NIX_AF_ERR_LF_RESET;
1556 	}
1557 
1558 	ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
1559 
1560 	/* Alloc NIX RQ HW context memory and config the base */
1561 	hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
1562 	err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
1563 	if (err)
1564 		goto free_mem;
1565 
1566 	pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
1567 	if (!pfvf->rq_bmap)
1568 		goto free_mem;
1569 
1570 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
1571 		    (u64)pfvf->rq_ctx->iova);
1572 
1573 	/* Set caching and queue count in HW */
1574 	cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20;
1575 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
1576 
1577 	/* Alloc NIX SQ HW context memory and config the base */
1578 	hwctx_size = 1UL << (ctx_cfg & 0xF);
1579 	err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
1580 	if (err)
1581 		goto free_mem;
1582 
1583 	pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
1584 	if (!pfvf->sq_bmap)
1585 		goto free_mem;
1586 
1587 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
1588 		    (u64)pfvf->sq_ctx->iova);
1589 
1590 	cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20;
1591 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
1592 
1593 	/* Alloc NIX CQ HW context memory and config the base */
1594 	hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
1595 	err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
1596 	if (err)
1597 		goto free_mem;
1598 
1599 	pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
1600 	if (!pfvf->cq_bmap)
1601 		goto free_mem;
1602 
1603 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
1604 		    (u64)pfvf->cq_ctx->iova);
1605 
1606 	cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20;
1607 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
1608 
1609 	/* Initialize receive side scaling (RSS) */
1610 	hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
1611 	err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz,
1612 				 req->rss_grps, hwctx_size, req->way_mask,
1613 				 !!(req->flags & NIX_LF_RSS_TAG_LSB_AS_ADDER));
1614 	if (err)
1615 		goto free_mem;
1616 
1617 	/* Alloc memory for CQINT's HW contexts */
1618 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1619 	qints = (cfg >> 24) & 0xFFF;
1620 	hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
1621 	err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
1622 	if (err)
1623 		goto free_mem;
1624 
1625 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
1626 		    (u64)pfvf->cq_ints_ctx->iova);
1627 
1628 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf),
1629 		    BIT_ULL(36) | req->way_mask << 20);
1630 
1631 	/* Alloc memory for QINT's HW contexts */
1632 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1633 	qints = (cfg >> 12) & 0xFFF;
1634 	hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
1635 	err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
1636 	if (err)
1637 		goto free_mem;
1638 
1639 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
1640 		    (u64)pfvf->nix_qints_ctx->iova);
1641 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf),
1642 		    BIT_ULL(36) | req->way_mask << 20);
1643 
1644 	/* Setup VLANX TPID's.
1645 	 * Use VLAN1 for 802.1Q
1646 	 * and VLAN0 for 802.1AD.
1647 	 */
1648 	cfg = (0x8100ULL << 16) | 0x88A8ULL;
1649 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
1650 
1651 	/* Enable LMTST for this NIX LF */
1652 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
1653 
1654 	/* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */
1655 	if (req->npa_func)
1656 		cfg = req->npa_func;
1657 	if (req->sso_func)
1658 		cfg |= (u64)req->sso_func << 16;
1659 
1660 	cfg |= (u64)req->xqe_sz << 33;
1661 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
1662 
1663 	/* Config Rx pkt length, csum checks and apad  enable / disable */
1664 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
1665 
1666 	/* Configure pkind for TX parse config */
1667 	cfg = NPC_TX_DEF_PKIND;
1668 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
1669 
1670 	if (is_rep_dev(rvu, pcifunc)) {
1671 		pfvf->tx_chan_base = RVU_SWITCH_LBK_CHAN;
1672 		pfvf->tx_chan_cnt = 1;
1673 		goto exit;
1674 	}
1675 
1676 	intf = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
1677 	if (is_sdp_pfvf(pcifunc))
1678 		intf = NIX_INTF_TYPE_SDP;
1679 
1680 	err = nix_interface_init(rvu, pcifunc, intf, nixlf, rsp,
1681 				 !!(req->flags & NIX_LF_LBK_BLK_SEL));
1682 	if (err)
1683 		goto free_mem;
1684 
1685 	/* Disable NPC entries as NIXLF's contexts are not initialized yet */
1686 	rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
1687 
1688 	/* Configure RX VTAG Type 7 (strip) for vf vlan */
1689 	rvu_write64(rvu, blkaddr,
1690 		    NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7),
1691 		    VTAGSIZE_T4 | VTAG_STRIP);
1692 
1693 	goto exit;
1694 
1695 free_mem:
1696 	nix_ctx_free(rvu, pfvf);
1697 	rc = -ENOMEM;
1698 
1699 exit:
1700 	/* Set macaddr of this PF/VF */
1701 	ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
1702 
1703 	/* set SQB size info */
1704 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
1705 	rsp->sqb_size = (cfg >> 34) & 0xFFFF;
1706 	rsp->rx_chan_base = pfvf->rx_chan_base;
1707 	rsp->tx_chan_base = pfvf->tx_chan_base;
1708 	rsp->rx_chan_cnt = pfvf->rx_chan_cnt;
1709 	rsp->tx_chan_cnt = pfvf->tx_chan_cnt;
1710 	rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
1711 	rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
1712 	/* Get HW supported stat count */
1713 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
1714 	rsp->lf_rx_stats = ((cfg >> 32) & 0xFF);
1715 	rsp->lf_tx_stats = ((cfg >> 24) & 0xFF);
1716 	/* Get count of CQ IRQs and error IRQs supported per LF */
1717 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1718 	rsp->qints = ((cfg >> 12) & 0xFFF);
1719 	rsp->cints = ((cfg >> 24) & 0xFFF);
1720 	rsp->cgx_links = hw->cgx_links;
1721 	rsp->lbk_links = hw->lbk_links;
1722 	rsp->sdp_links = hw->sdp_links;
1723 
1724 	return rc;
1725 }
1726 
rvu_mbox_handler_nix_lf_free(struct rvu * rvu,struct nix_lf_free_req * req,struct msg_rsp * rsp)1727 int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req,
1728 				 struct msg_rsp *rsp)
1729 {
1730 	struct rvu_hwinfo *hw = rvu->hw;
1731 	u16 pcifunc = req->hdr.pcifunc;
1732 	struct rvu_block *block;
1733 	int blkaddr, nixlf, err;
1734 	struct rvu_pfvf *pfvf;
1735 
1736 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1737 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1738 	if (!pfvf->nixlf || blkaddr < 0)
1739 		return NIX_AF_ERR_AF_LF_INVALID;
1740 
1741 	block = &hw->block[blkaddr];
1742 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1743 	if (nixlf < 0)
1744 		return NIX_AF_ERR_AF_LF_INVALID;
1745 
1746 	if (is_rep_dev(rvu, pcifunc))
1747 		goto free_lf;
1748 
1749 	if (req->flags & NIX_LF_DISABLE_FLOWS)
1750 		rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
1751 	else
1752 		rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
1753 
1754 	/* Free any tx vtag def entries used by this NIX LF */
1755 	if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG))
1756 		nix_free_tx_vtag_entries(rvu, pcifunc);
1757 
1758 	nix_interface_deinit(rvu, pcifunc, nixlf);
1759 
1760 free_lf:
1761 	/* Reset this NIX LF */
1762 	err = rvu_lf_reset(rvu, block, nixlf);
1763 	if (err) {
1764 		dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1765 			block->addr - BLKADDR_NIX0, nixlf);
1766 		return NIX_AF_ERR_LF_RESET;
1767 	}
1768 
1769 	nix_ctx_free(rvu, pfvf);
1770 
1771 	return 0;
1772 }
1773 
rvu_mbox_handler_nix_mark_format_cfg(struct rvu * rvu,struct nix_mark_format_cfg * req,struct nix_mark_format_cfg_rsp * rsp)1774 int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
1775 					 struct nix_mark_format_cfg  *req,
1776 					 struct nix_mark_format_cfg_rsp *rsp)
1777 {
1778 	u16 pcifunc = req->hdr.pcifunc;
1779 	struct nix_hw *nix_hw;
1780 	struct rvu_pfvf *pfvf;
1781 	int blkaddr, rc;
1782 	u32 cfg;
1783 
1784 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1785 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1786 	if (!pfvf->nixlf || blkaddr < 0)
1787 		return NIX_AF_ERR_AF_LF_INVALID;
1788 
1789 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1790 	if (!nix_hw)
1791 		return NIX_AF_ERR_INVALID_NIXBLK;
1792 
1793 	cfg = (((u32)req->offset & 0x7) << 16) |
1794 	      (((u32)req->y_mask & 0xF) << 12) |
1795 	      (((u32)req->y_val & 0xF) << 8) |
1796 	      (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF);
1797 
1798 	rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg);
1799 	if (rc < 0) {
1800 		dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)",
1801 			rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
1802 		return NIX_AF_ERR_MARK_CFG_FAIL;
1803 	}
1804 
1805 	rsp->mark_format_idx = rc;
1806 	return 0;
1807 }
1808 
1809 /* Handle shaper update specially for few revisions */
1810 static bool
handle_txschq_shaper_update(struct rvu * rvu,int blkaddr,int nixlf,int lvl,u64 reg,u64 regval)1811 handle_txschq_shaper_update(struct rvu *rvu, int blkaddr, int nixlf,
1812 			    int lvl, u64 reg, u64 regval)
1813 {
1814 	u64 regbase, oldval, sw_xoff = 0;
1815 	u64 dbgval, md_debug0 = 0;
1816 	unsigned long poll_tmo;
1817 	bool rate_reg = 0;
1818 	u32 schq;
1819 
1820 	regbase = reg & 0xFFFF;
1821 	schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1822 
1823 	/* Check for rate register */
1824 	switch (lvl) {
1825 	case NIX_TXSCH_LVL_TL1:
1826 		md_debug0 = NIX_AF_TL1X_MD_DEBUG0(schq);
1827 		sw_xoff = NIX_AF_TL1X_SW_XOFF(schq);
1828 
1829 		rate_reg = !!(regbase == NIX_AF_TL1X_CIR(0));
1830 		break;
1831 	case NIX_TXSCH_LVL_TL2:
1832 		md_debug0 = NIX_AF_TL2X_MD_DEBUG0(schq);
1833 		sw_xoff = NIX_AF_TL2X_SW_XOFF(schq);
1834 
1835 		rate_reg = (regbase == NIX_AF_TL2X_CIR(0) ||
1836 			    regbase == NIX_AF_TL2X_PIR(0));
1837 		break;
1838 	case NIX_TXSCH_LVL_TL3:
1839 		md_debug0 = NIX_AF_TL3X_MD_DEBUG0(schq);
1840 		sw_xoff = NIX_AF_TL3X_SW_XOFF(schq);
1841 
1842 		rate_reg = (regbase == NIX_AF_TL3X_CIR(0) ||
1843 			    regbase == NIX_AF_TL3X_PIR(0));
1844 		break;
1845 	case NIX_TXSCH_LVL_TL4:
1846 		md_debug0 = NIX_AF_TL4X_MD_DEBUG0(schq);
1847 		sw_xoff = NIX_AF_TL4X_SW_XOFF(schq);
1848 
1849 		rate_reg = (regbase == NIX_AF_TL4X_CIR(0) ||
1850 			    regbase == NIX_AF_TL4X_PIR(0));
1851 		break;
1852 	case NIX_TXSCH_LVL_MDQ:
1853 		sw_xoff = NIX_AF_MDQX_SW_XOFF(schq);
1854 		rate_reg = (regbase == NIX_AF_MDQX_CIR(0) ||
1855 			    regbase == NIX_AF_MDQX_PIR(0));
1856 		break;
1857 	}
1858 
1859 	if (!rate_reg)
1860 		return false;
1861 
1862 	/* Nothing special to do when state is not toggled */
1863 	oldval = rvu_read64(rvu, blkaddr, reg);
1864 	if ((oldval & 0x1) == (regval & 0x1)) {
1865 		rvu_write64(rvu, blkaddr, reg, regval);
1866 		return true;
1867 	}
1868 
1869 	/* PIR/CIR disable */
1870 	if (!(regval & 0x1)) {
1871 		rvu_write64(rvu, blkaddr, sw_xoff, 1);
1872 		rvu_write64(rvu, blkaddr, reg, 0);
1873 		udelay(4);
1874 		rvu_write64(rvu, blkaddr, sw_xoff, 0);
1875 		return true;
1876 	}
1877 
1878 	/* PIR/CIR enable */
1879 	rvu_write64(rvu, blkaddr, sw_xoff, 1);
1880 	if (md_debug0) {
1881 		poll_tmo = jiffies + usecs_to_jiffies(10000);
1882 		/* Wait until VLD(bit32) == 1 or C_CON(bit48) == 0 */
1883 		do {
1884 			if (time_after(jiffies, poll_tmo)) {
1885 				dev_err(rvu->dev,
1886 					"NIXLF%d: TLX%u(lvl %u) CIR/PIR enable failed\n",
1887 					nixlf, schq, lvl);
1888 				goto exit;
1889 			}
1890 			usleep_range(1, 5);
1891 			dbgval = rvu_read64(rvu, blkaddr, md_debug0);
1892 		} while (!(dbgval & BIT_ULL(32)) && (dbgval & BIT_ULL(48)));
1893 	}
1894 	rvu_write64(rvu, blkaddr, reg, regval);
1895 exit:
1896 	rvu_write64(rvu, blkaddr, sw_xoff, 0);
1897 	return true;
1898 }
1899 
nix_reset_tx_schedule(struct rvu * rvu,int blkaddr,int lvl,int schq)1900 static void nix_reset_tx_schedule(struct rvu *rvu, int blkaddr,
1901 				  int lvl, int schq)
1902 {
1903 	u64 tlx_parent = 0, tlx_schedule = 0;
1904 
1905 	switch (lvl) {
1906 	case NIX_TXSCH_LVL_TL2:
1907 		tlx_parent   = NIX_AF_TL2X_PARENT(schq);
1908 		tlx_schedule = NIX_AF_TL2X_SCHEDULE(schq);
1909 		break;
1910 	case NIX_TXSCH_LVL_TL3:
1911 		tlx_parent   = NIX_AF_TL3X_PARENT(schq);
1912 		tlx_schedule = NIX_AF_TL3X_SCHEDULE(schq);
1913 		break;
1914 	case NIX_TXSCH_LVL_TL4:
1915 		tlx_parent   = NIX_AF_TL4X_PARENT(schq);
1916 		tlx_schedule = NIX_AF_TL4X_SCHEDULE(schq);
1917 		break;
1918 	case NIX_TXSCH_LVL_MDQ:
1919 		/* no need to reset SMQ_CFG as HW clears this CSR
1920 		 * on SMQ flush
1921 		 */
1922 		tlx_parent   = NIX_AF_MDQX_PARENT(schq);
1923 		tlx_schedule = NIX_AF_MDQX_SCHEDULE(schq);
1924 		break;
1925 	default:
1926 		return;
1927 	}
1928 
1929 	if (tlx_parent)
1930 		rvu_write64(rvu, blkaddr, tlx_parent, 0x0);
1931 
1932 	if (tlx_schedule)
1933 		rvu_write64(rvu, blkaddr, tlx_schedule, 0x0);
1934 }
1935 
1936 /* Disable shaping of pkts by a scheduler queue
1937  * at a given scheduler level.
1938  */
nix_reset_tx_shaping(struct rvu * rvu,int blkaddr,int nixlf,int lvl,int schq)1939 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
1940 				 int nixlf, int lvl, int schq)
1941 {
1942 	struct rvu_hwinfo *hw = rvu->hw;
1943 	u64  cir_reg = 0, pir_reg = 0;
1944 	u64  cfg;
1945 
1946 	switch (lvl) {
1947 	case NIX_TXSCH_LVL_TL1:
1948 		cir_reg = NIX_AF_TL1X_CIR(schq);
1949 		pir_reg = 0; /* PIR not available at TL1 */
1950 		break;
1951 	case NIX_TXSCH_LVL_TL2:
1952 		cir_reg = NIX_AF_TL2X_CIR(schq);
1953 		pir_reg = NIX_AF_TL2X_PIR(schq);
1954 		break;
1955 	case NIX_TXSCH_LVL_TL3:
1956 		cir_reg = NIX_AF_TL3X_CIR(schq);
1957 		pir_reg = NIX_AF_TL3X_PIR(schq);
1958 		break;
1959 	case NIX_TXSCH_LVL_TL4:
1960 		cir_reg = NIX_AF_TL4X_CIR(schq);
1961 		pir_reg = NIX_AF_TL4X_PIR(schq);
1962 		break;
1963 	case NIX_TXSCH_LVL_MDQ:
1964 		cir_reg = NIX_AF_MDQX_CIR(schq);
1965 		pir_reg = NIX_AF_MDQX_PIR(schq);
1966 		break;
1967 	}
1968 
1969 	/* Shaper state toggle needs wait/poll */
1970 	if (hw->cap.nix_shaper_toggle_wait) {
1971 		if (cir_reg)
1972 			handle_txschq_shaper_update(rvu, blkaddr, nixlf,
1973 						    lvl, cir_reg, 0);
1974 		if (pir_reg)
1975 			handle_txschq_shaper_update(rvu, blkaddr, nixlf,
1976 						    lvl, pir_reg, 0);
1977 		return;
1978 	}
1979 
1980 	if (!cir_reg)
1981 		return;
1982 	cfg = rvu_read64(rvu, blkaddr, cir_reg);
1983 	rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0));
1984 
1985 	if (!pir_reg)
1986 		return;
1987 	cfg = rvu_read64(rvu, blkaddr, pir_reg);
1988 	rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0));
1989 }
1990 
nix_reset_tx_linkcfg(struct rvu * rvu,int blkaddr,int lvl,int schq)1991 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
1992 				 int lvl, int schq)
1993 {
1994 	struct rvu_hwinfo *hw = rvu->hw;
1995 	int link_level;
1996 	int link;
1997 
1998 	if (lvl >= hw->cap.nix_tx_aggr_lvl)
1999 		return;
2000 
2001 	/* Reset TL4's SDP link config */
2002 	if (lvl == NIX_TXSCH_LVL_TL4)
2003 		rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
2004 
2005 	link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
2006 			NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
2007 	if (lvl != link_level)
2008 		return;
2009 
2010 	/* Reset TL2's CGX or LBK link config */
2011 	for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
2012 		rvu_write64(rvu, blkaddr,
2013 			    NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
2014 }
2015 
nix_clear_tx_xoff(struct rvu * rvu,int blkaddr,int lvl,int schq)2016 static void nix_clear_tx_xoff(struct rvu *rvu, int blkaddr,
2017 			      int lvl, int schq)
2018 {
2019 	struct rvu_hwinfo *hw = rvu->hw;
2020 	u64 reg;
2021 
2022 	/* Skip this if shaping is not supported */
2023 	if (!hw->cap.nix_shaping)
2024 		return;
2025 
2026 	/* Clear level specific SW_XOFF */
2027 	switch (lvl) {
2028 	case NIX_TXSCH_LVL_TL1:
2029 		reg = NIX_AF_TL1X_SW_XOFF(schq);
2030 		break;
2031 	case NIX_TXSCH_LVL_TL2:
2032 		reg = NIX_AF_TL2X_SW_XOFF(schq);
2033 		break;
2034 	case NIX_TXSCH_LVL_TL3:
2035 		reg = NIX_AF_TL3X_SW_XOFF(schq);
2036 		break;
2037 	case NIX_TXSCH_LVL_TL4:
2038 		reg = NIX_AF_TL4X_SW_XOFF(schq);
2039 		break;
2040 	case NIX_TXSCH_LVL_MDQ:
2041 		reg = NIX_AF_MDQX_SW_XOFF(schq);
2042 		break;
2043 	default:
2044 		return;
2045 	}
2046 
2047 	rvu_write64(rvu, blkaddr, reg, 0x0);
2048 }
2049 
nix_get_tx_link(struct rvu * rvu,u16 pcifunc)2050 static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
2051 {
2052 	struct rvu_hwinfo *hw = rvu->hw;
2053 	int pf = rvu_get_pf(pcifunc);
2054 	u8 cgx_id = 0, lmac_id = 0;
2055 
2056 	if (is_lbk_vf(rvu, pcifunc)) {/* LBK links */
2057 		return hw->cgx_links;
2058 	} else if (is_pf_cgxmapped(rvu, pf)) {
2059 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
2060 		return (cgx_id * hw->lmac_per_cgx) + lmac_id;
2061 	}
2062 
2063 	/* SDP link */
2064 	return hw->cgx_links + hw->lbk_links;
2065 }
2066 
nix_get_txschq_range(struct rvu * rvu,u16 pcifunc,int link,int * start,int * end)2067 static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc,
2068 				 int link, int *start, int *end)
2069 {
2070 	struct rvu_hwinfo *hw = rvu->hw;
2071 	int pf = rvu_get_pf(pcifunc);
2072 
2073 	/* LBK links */
2074 	if (is_lbk_vf(rvu, pcifunc) || is_rep_dev(rvu, pcifunc)) {
2075 		*start = hw->cap.nix_txsch_per_cgx_lmac * link;
2076 		*end = *start + hw->cap.nix_txsch_per_lbk_lmac;
2077 	} else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */
2078 		*start = hw->cap.nix_txsch_per_cgx_lmac * link;
2079 		*end = *start + hw->cap.nix_txsch_per_cgx_lmac;
2080 	} else { /* SDP link */
2081 		*start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) +
2082 			(hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links);
2083 		*end = *start + hw->cap.nix_txsch_per_sdp_lmac;
2084 	}
2085 }
2086 
nix_check_txschq_alloc_req(struct rvu * rvu,int lvl,u16 pcifunc,struct nix_hw * nix_hw,struct nix_txsch_alloc_req * req)2087 static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc,
2088 				      struct nix_hw *nix_hw,
2089 				      struct nix_txsch_alloc_req *req)
2090 {
2091 	struct rvu_hwinfo *hw = rvu->hw;
2092 	int schq, req_schq, free_cnt;
2093 	struct nix_txsch *txsch;
2094 	int link, start, end;
2095 
2096 	txsch = &nix_hw->txsch[lvl];
2097 	req_schq = req->schq_contig[lvl] + req->schq[lvl];
2098 
2099 	if (!req_schq)
2100 		return 0;
2101 
2102 	link = nix_get_tx_link(rvu, pcifunc);
2103 
2104 	/* For traffic aggregating scheduler level, one queue is enough */
2105 	if (lvl >= hw->cap.nix_tx_aggr_lvl) {
2106 		if (req_schq != 1)
2107 			return NIX_AF_ERR_TLX_ALLOC_FAIL;
2108 		return 0;
2109 	}
2110 
2111 	/* Get free SCHQ count and check if request can be accomodated */
2112 	if (hw->cap.nix_fixed_txschq_mapping) {
2113 		nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
2114 		schq = start + (pcifunc & RVU_PFVF_FUNC_MASK);
2115 		if (end <= txsch->schq.max && schq < end &&
2116 		    !test_bit(schq, txsch->schq.bmap))
2117 			free_cnt = 1;
2118 		else
2119 			free_cnt = 0;
2120 	} else {
2121 		free_cnt = rvu_rsrc_free_count(&txsch->schq);
2122 	}
2123 
2124 	if (free_cnt < req_schq || req->schq[lvl] > MAX_TXSCHQ_PER_FUNC ||
2125 	    req->schq_contig[lvl] > MAX_TXSCHQ_PER_FUNC)
2126 		return NIX_AF_ERR_TLX_ALLOC_FAIL;
2127 
2128 	/* If contiguous queues are needed, check for availability */
2129 	if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] &&
2130 	    !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
2131 		return NIX_AF_ERR_TLX_ALLOC_FAIL;
2132 
2133 	return 0;
2134 }
2135 
nix_txsch_alloc(struct rvu * rvu,struct nix_txsch * txsch,struct nix_txsch_alloc_rsp * rsp,int lvl,int start,int end)2136 static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch,
2137 			    struct nix_txsch_alloc_rsp *rsp,
2138 			    int lvl, int start, int end)
2139 {
2140 	struct rvu_hwinfo *hw = rvu->hw;
2141 	u16 pcifunc = rsp->hdr.pcifunc;
2142 	int idx, schq;
2143 
2144 	/* For traffic aggregating levels, queue alloc is based
2145 	 * on transmit link to which PF_FUNC is mapped to.
2146 	 */
2147 	if (lvl >= hw->cap.nix_tx_aggr_lvl) {
2148 		/* A single TL queue is allocated */
2149 		if (rsp->schq_contig[lvl]) {
2150 			rsp->schq_contig[lvl] = 1;
2151 			rsp->schq_contig_list[lvl][0] = start;
2152 		}
2153 
2154 		/* Both contig and non-contig reqs doesn't make sense here */
2155 		if (rsp->schq_contig[lvl])
2156 			rsp->schq[lvl] = 0;
2157 
2158 		if (rsp->schq[lvl]) {
2159 			rsp->schq[lvl] = 1;
2160 			rsp->schq_list[lvl][0] = start;
2161 		}
2162 		return;
2163 	}
2164 
2165 	/* Adjust the queue request count if HW supports
2166 	 * only one queue per level configuration.
2167 	 */
2168 	if (hw->cap.nix_fixed_txschq_mapping) {
2169 		idx = pcifunc & RVU_PFVF_FUNC_MASK;
2170 		schq = start + idx;
2171 		if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) {
2172 			rsp->schq_contig[lvl] = 0;
2173 			rsp->schq[lvl] = 0;
2174 			return;
2175 		}
2176 
2177 		if (rsp->schq_contig[lvl]) {
2178 			rsp->schq_contig[lvl] = 1;
2179 			set_bit(schq, txsch->schq.bmap);
2180 			rsp->schq_contig_list[lvl][0] = schq;
2181 			rsp->schq[lvl] = 0;
2182 		} else if (rsp->schq[lvl]) {
2183 			rsp->schq[lvl] = 1;
2184 			set_bit(schq, txsch->schq.bmap);
2185 			rsp->schq_list[lvl][0] = schq;
2186 		}
2187 		return;
2188 	}
2189 
2190 	/* Allocate contiguous queue indices requesty first */
2191 	if (rsp->schq_contig[lvl]) {
2192 		schq = bitmap_find_next_zero_area(txsch->schq.bmap,
2193 						  txsch->schq.max, start,
2194 						  rsp->schq_contig[lvl], 0);
2195 		if (schq >= end)
2196 			rsp->schq_contig[lvl] = 0;
2197 		for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) {
2198 			set_bit(schq, txsch->schq.bmap);
2199 			rsp->schq_contig_list[lvl][idx] = schq;
2200 			schq++;
2201 		}
2202 	}
2203 
2204 	/* Allocate non-contiguous queue indices */
2205 	if (rsp->schq[lvl]) {
2206 		idx = 0;
2207 		for (schq = start; schq < end; schq++) {
2208 			if (!test_bit(schq, txsch->schq.bmap)) {
2209 				set_bit(schq, txsch->schq.bmap);
2210 				rsp->schq_list[lvl][idx++] = schq;
2211 			}
2212 			if (idx == rsp->schq[lvl])
2213 				break;
2214 		}
2215 		/* Update how many were allocated */
2216 		rsp->schq[lvl] = idx;
2217 	}
2218 }
2219 
rvu_mbox_handler_nix_txsch_alloc(struct rvu * rvu,struct nix_txsch_alloc_req * req,struct nix_txsch_alloc_rsp * rsp)2220 int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
2221 				     struct nix_txsch_alloc_req *req,
2222 				     struct nix_txsch_alloc_rsp *rsp)
2223 {
2224 	struct rvu_hwinfo *hw = rvu->hw;
2225 	u16 pcifunc = req->hdr.pcifunc;
2226 	int link, blkaddr, rc = 0;
2227 	int lvl, idx, start, end;
2228 	struct nix_txsch *txsch;
2229 	struct nix_hw *nix_hw;
2230 	u32 *pfvf_map;
2231 	int nixlf;
2232 	u16 schq;
2233 
2234 	rc = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2235 	if (rc)
2236 		return rc;
2237 
2238 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
2239 	if (!nix_hw)
2240 		return NIX_AF_ERR_INVALID_NIXBLK;
2241 
2242 	mutex_lock(&rvu->rsrc_lock);
2243 
2244 	/* Check if request is valid as per HW capabilities
2245 	 * and can be accomodated.
2246 	 */
2247 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2248 		rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req);
2249 		if (rc)
2250 			goto err;
2251 	}
2252 
2253 	/* Allocate requested Tx scheduler queues */
2254 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2255 		txsch = &nix_hw->txsch[lvl];
2256 		pfvf_map = txsch->pfvf_map;
2257 
2258 		if (!req->schq[lvl] && !req->schq_contig[lvl])
2259 			continue;
2260 
2261 		rsp->schq[lvl] = req->schq[lvl];
2262 		rsp->schq_contig[lvl] = req->schq_contig[lvl];
2263 
2264 		link = nix_get_tx_link(rvu, pcifunc);
2265 
2266 		if (lvl >= hw->cap.nix_tx_aggr_lvl) {
2267 			start = link;
2268 			end = link;
2269 		} else if (hw->cap.nix_fixed_txschq_mapping) {
2270 			nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
2271 		} else {
2272 			start = 0;
2273 			end = txsch->schq.max;
2274 		}
2275 
2276 		nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end);
2277 
2278 		/* Reset queue config */
2279 		for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
2280 			schq = rsp->schq_contig_list[lvl][idx];
2281 			if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
2282 			    NIX_TXSCHQ_CFG_DONE))
2283 				pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
2284 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
2285 			nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
2286 			nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
2287 		}
2288 
2289 		for (idx = 0; idx < req->schq[lvl]; idx++) {
2290 			schq = rsp->schq_list[lvl][idx];
2291 			if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
2292 			    NIX_TXSCHQ_CFG_DONE))
2293 				pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
2294 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
2295 			nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
2296 			nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
2297 		}
2298 	}
2299 
2300 	rsp->aggr_level = hw->cap.nix_tx_aggr_lvl;
2301 	rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
2302 	rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr,
2303 				       NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
2304 				       NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
2305 	goto exit;
2306 err:
2307 	rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
2308 exit:
2309 	mutex_unlock(&rvu->rsrc_lock);
2310 	return rc;
2311 }
2312 
nix_smq_flush_fill_ctx(struct rvu * rvu,int blkaddr,int smq,struct nix_smq_flush_ctx * smq_flush_ctx)2313 static void nix_smq_flush_fill_ctx(struct rvu *rvu, int blkaddr, int smq,
2314 				   struct nix_smq_flush_ctx *smq_flush_ctx)
2315 {
2316 	struct nix_smq_tree_ctx *smq_tree_ctx;
2317 	u64 parent_off, regval;
2318 	u16 schq;
2319 	int lvl;
2320 
2321 	smq_flush_ctx->smq = smq;
2322 
2323 	schq = smq;
2324 	for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) {
2325 		smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl];
2326 		smq_tree_ctx->schq = schq;
2327 		if (lvl == NIX_TXSCH_LVL_TL1) {
2328 			smq_tree_ctx->cir_off = NIX_AF_TL1X_CIR(schq);
2329 			smq_tree_ctx->pir_off = 0;
2330 			smq_tree_ctx->pir_val = 0;
2331 			parent_off = 0;
2332 		} else if (lvl == NIX_TXSCH_LVL_TL2) {
2333 			smq_tree_ctx->cir_off = NIX_AF_TL2X_CIR(schq);
2334 			smq_tree_ctx->pir_off = NIX_AF_TL2X_PIR(schq);
2335 			parent_off = NIX_AF_TL2X_PARENT(schq);
2336 		} else if (lvl == NIX_TXSCH_LVL_TL3) {
2337 			smq_tree_ctx->cir_off = NIX_AF_TL3X_CIR(schq);
2338 			smq_tree_ctx->pir_off = NIX_AF_TL3X_PIR(schq);
2339 			parent_off = NIX_AF_TL3X_PARENT(schq);
2340 		} else if (lvl == NIX_TXSCH_LVL_TL4) {
2341 			smq_tree_ctx->cir_off = NIX_AF_TL4X_CIR(schq);
2342 			smq_tree_ctx->pir_off = NIX_AF_TL4X_PIR(schq);
2343 			parent_off = NIX_AF_TL4X_PARENT(schq);
2344 		} else if (lvl == NIX_TXSCH_LVL_MDQ) {
2345 			smq_tree_ctx->cir_off = NIX_AF_MDQX_CIR(schq);
2346 			smq_tree_ctx->pir_off = NIX_AF_MDQX_PIR(schq);
2347 			parent_off = NIX_AF_MDQX_PARENT(schq);
2348 		}
2349 		/* save cir/pir register values */
2350 		smq_tree_ctx->cir_val = rvu_read64(rvu, blkaddr, smq_tree_ctx->cir_off);
2351 		if (smq_tree_ctx->pir_off)
2352 			smq_tree_ctx->pir_val = rvu_read64(rvu, blkaddr, smq_tree_ctx->pir_off);
2353 
2354 		/* get parent txsch node */
2355 		if (parent_off) {
2356 			regval = rvu_read64(rvu, blkaddr, parent_off);
2357 			schq = (regval >> 16) & 0x1FF;
2358 		}
2359 	}
2360 }
2361 
nix_smq_flush_enadis_xoff(struct rvu * rvu,int blkaddr,struct nix_smq_flush_ctx * smq_flush_ctx,bool enable)2362 static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr,
2363 				      struct nix_smq_flush_ctx *smq_flush_ctx, bool enable)
2364 {
2365 	struct nix_txsch *txsch;
2366 	struct nix_hw *nix_hw;
2367 	int tl2, tl2_schq;
2368 	u64 regoff;
2369 
2370 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
2371 	if (!nix_hw)
2372 		return;
2373 
2374 	/* loop through all TL2s with matching PF_FUNC */
2375 	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
2376 	tl2_schq = smq_flush_ctx->smq_tree_ctx[NIX_TXSCH_LVL_TL2].schq;
2377 	for (tl2 = 0; tl2 < txsch->schq.max; tl2++) {
2378 		/* skip the smq(flush) TL2 */
2379 		if (tl2 == tl2_schq)
2380 			continue;
2381 		/* skip unused TL2s */
2382 		if (TXSCH_MAP_FLAGS(txsch->pfvf_map[tl2]) & NIX_TXSCHQ_FREE)
2383 			continue;
2384 		/* skip if PF_FUNC doesn't match */
2385 		if ((TXSCH_MAP_FUNC(txsch->pfvf_map[tl2]) & ~RVU_PFVF_FUNC_MASK) !=
2386 		    (TXSCH_MAP_FUNC(txsch->pfvf_map[tl2_schq] &
2387 				    ~RVU_PFVF_FUNC_MASK)))
2388 			continue;
2389 		/* enable/disable XOFF */
2390 		regoff = NIX_AF_TL2X_SW_XOFF(tl2);
2391 		if (enable)
2392 			rvu_write64(rvu, blkaddr, regoff, 0x1);
2393 		else
2394 			rvu_write64(rvu, blkaddr, regoff, 0x0);
2395 	}
2396 }
2397 
nix_smq_flush_enadis_rate(struct rvu * rvu,int blkaddr,struct nix_smq_flush_ctx * smq_flush_ctx,bool enable)2398 static void nix_smq_flush_enadis_rate(struct rvu *rvu, int blkaddr,
2399 				      struct nix_smq_flush_ctx *smq_flush_ctx, bool enable)
2400 {
2401 	u64 cir_off, pir_off, cir_val, pir_val;
2402 	struct nix_smq_tree_ctx *smq_tree_ctx;
2403 	int lvl;
2404 
2405 	for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) {
2406 		smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl];
2407 		cir_off = smq_tree_ctx->cir_off;
2408 		cir_val = smq_tree_ctx->cir_val;
2409 		pir_off = smq_tree_ctx->pir_off;
2410 		pir_val = smq_tree_ctx->pir_val;
2411 
2412 		if (enable) {
2413 			rvu_write64(rvu, blkaddr, cir_off, cir_val);
2414 			if (lvl != NIX_TXSCH_LVL_TL1)
2415 				rvu_write64(rvu, blkaddr, pir_off, pir_val);
2416 		} else {
2417 			rvu_write64(rvu, blkaddr, cir_off, 0x0);
2418 			if (lvl != NIX_TXSCH_LVL_TL1)
2419 				rvu_write64(rvu, blkaddr, pir_off, 0x0);
2420 		}
2421 	}
2422 }
2423 
nix_smq_flush(struct rvu * rvu,int blkaddr,int smq,u16 pcifunc,int nixlf)2424 static int nix_smq_flush(struct rvu *rvu, int blkaddr,
2425 			 int smq, u16 pcifunc, int nixlf)
2426 {
2427 	struct nix_smq_flush_ctx *smq_flush_ctx;
2428 	int err, restore_tx_en = 0, i;
2429 	int pf = rvu_get_pf(pcifunc);
2430 	u8 cgx_id = 0, lmac_id = 0;
2431 	u16 tl2_tl3_link_schq;
2432 	u8 link, link_level;
2433 	u64 cfg, bmap = 0;
2434 
2435 	if (!is_rvu_otx2(rvu)) {
2436 		/* Skip SMQ flush if pkt count is zero */
2437 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_MDQX_IN_MD_COUNT(smq));
2438 		if (!cfg)
2439 			return 0;
2440 	}
2441 
2442 	/* enable cgx tx if disabled */
2443 	if (is_pf_cgxmapped(rvu, pf)) {
2444 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
2445 		restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu),
2446 						   lmac_id, true);
2447 	}
2448 
2449 	/* XOFF all TL2s whose parent TL1 matches SMQ tree TL1 */
2450 	smq_flush_ctx = kzalloc(sizeof(*smq_flush_ctx), GFP_KERNEL);
2451 	if (!smq_flush_ctx)
2452 		return -ENOMEM;
2453 	nix_smq_flush_fill_ctx(rvu, blkaddr, smq, smq_flush_ctx);
2454 	nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, true);
2455 	nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, false);
2456 
2457 	/* Disable backpressure from physical link,
2458 	 * otherwise SMQ flush may stall.
2459 	 */
2460 	rvu_cgx_enadis_rx_bp(rvu, pf, false);
2461 
2462 	link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
2463 			NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
2464 	tl2_tl3_link_schq = smq_flush_ctx->smq_tree_ctx[link_level].schq;
2465 	link = smq_flush_ctx->smq_tree_ctx[NIX_TXSCH_LVL_TL1].schq;
2466 
2467 	/* SMQ set enqueue xoff */
2468 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
2469 	cfg |= BIT_ULL(50);
2470 	rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
2471 
2472 	/* Clear all NIX_AF_TL3_TL2_LINK_CFG[ENA] for the TL3/TL2 queue */
2473 	for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) {
2474 		cfg = rvu_read64(rvu, blkaddr,
2475 				 NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link));
2476 		if (!(cfg & BIT_ULL(12)))
2477 			continue;
2478 		bmap |= BIT_ULL(i);
2479 		cfg &= ~BIT_ULL(12);
2480 		rvu_write64(rvu, blkaddr,
2481 			    NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg);
2482 	}
2483 
2484 	/* Do SMQ flush and set enqueue xoff */
2485 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
2486 	cfg |= BIT_ULL(50) | BIT_ULL(49);
2487 	rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
2488 
2489 	/* Wait for flush to complete */
2490 	err = rvu_poll_reg(rvu, blkaddr,
2491 			   NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
2492 	if (err)
2493 		dev_info(rvu->dev,
2494 			 "NIXLF%d: SMQ%d flush failed, txlink might be busy\n",
2495 			 nixlf, smq);
2496 
2497 	/* Set NIX_AF_TL3_TL2_LINKX_CFG[ENA] for the TL3/TL2 queue */
2498 	for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) {
2499 		if (!(bmap & BIT_ULL(i)))
2500 			continue;
2501 		cfg = rvu_read64(rvu, blkaddr,
2502 				 NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link));
2503 		cfg |= BIT_ULL(12);
2504 		rvu_write64(rvu, blkaddr,
2505 			    NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg);
2506 	}
2507 
2508 	/* clear XOFF on TL2s */
2509 	nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, true);
2510 	nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, false);
2511 	kfree(smq_flush_ctx);
2512 
2513 	rvu_cgx_enadis_rx_bp(rvu, pf, true);
2514 	/* restore cgx tx state */
2515 	if (restore_tx_en)
2516 		rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
2517 	return err;
2518 }
2519 
nix_txschq_free(struct rvu * rvu,u16 pcifunc)2520 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
2521 {
2522 	int blkaddr, nixlf, lvl, schq, err;
2523 	struct rvu_hwinfo *hw = rvu->hw;
2524 	struct nix_txsch *txsch;
2525 	struct nix_hw *nix_hw;
2526 	u16 map_func;
2527 
2528 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2529 	if (blkaddr < 0)
2530 		return NIX_AF_ERR_AF_LF_INVALID;
2531 
2532 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
2533 	if (!nix_hw)
2534 		return NIX_AF_ERR_INVALID_NIXBLK;
2535 
2536 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
2537 	if (nixlf < 0)
2538 		return NIX_AF_ERR_AF_LF_INVALID;
2539 
2540 	/* Disable TL2/3 queue links and all XOFF's before SMQ flush*/
2541 	mutex_lock(&rvu->rsrc_lock);
2542 	for (lvl = NIX_TXSCH_LVL_MDQ; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2543 		txsch = &nix_hw->txsch[lvl];
2544 
2545 		if (lvl >= hw->cap.nix_tx_aggr_lvl)
2546 			continue;
2547 
2548 		for (schq = 0; schq < txsch->schq.max; schq++) {
2549 			if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2550 				continue;
2551 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
2552 			nix_clear_tx_xoff(rvu, blkaddr, lvl, schq);
2553 			nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
2554 		}
2555 	}
2556 	nix_clear_tx_xoff(rvu, blkaddr, NIX_TXSCH_LVL_TL1,
2557 			  nix_get_tx_link(rvu, pcifunc));
2558 
2559 	/* On PF cleanup, clear cfg done flag as
2560 	 * PF would have changed default config.
2561 	 */
2562 	if (!(pcifunc & RVU_PFVF_FUNC_MASK)) {
2563 		txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1];
2564 		schq = nix_get_tx_link(rvu, pcifunc);
2565 		/* Do not clear pcifunc in txsch->pfvf_map[schq] because
2566 		 * VF might be using this TL1 queue
2567 		 */
2568 		map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
2569 		txsch->pfvf_map[schq] = TXSCH_SET_FLAG(map_func, 0x0);
2570 	}
2571 
2572 	/* Flush SMQs */
2573 	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
2574 	for (schq = 0; schq < txsch->schq.max; schq++) {
2575 		if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2576 			continue;
2577 		nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
2578 	}
2579 
2580 	/* Now free scheduler queues to free pool */
2581 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2582 		 /* TLs above aggregation level are shared across all PF
2583 		  * and it's VFs, hence skip freeing them.
2584 		  */
2585 		if (lvl >= hw->cap.nix_tx_aggr_lvl)
2586 			continue;
2587 
2588 		txsch = &nix_hw->txsch[lvl];
2589 		for (schq = 0; schq < txsch->schq.max; schq++) {
2590 			if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2591 				continue;
2592 			nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
2593 			rvu_free_rsrc(&txsch->schq, schq);
2594 			txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
2595 		}
2596 	}
2597 	mutex_unlock(&rvu->rsrc_lock);
2598 
2599 	err = rvu_ndc_sync(rvu, blkaddr, nixlf, NIX_AF_NDC_TX_SYNC);
2600 	if (err)
2601 		dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
2602 
2603 	return 0;
2604 }
2605 
nix_txschq_free_one(struct rvu * rvu,struct nix_txsch_free_req * req)2606 static int nix_txschq_free_one(struct rvu *rvu,
2607 			       struct nix_txsch_free_req *req)
2608 {
2609 	struct rvu_hwinfo *hw = rvu->hw;
2610 	u16 pcifunc = req->hdr.pcifunc;
2611 	int lvl, schq, nixlf, blkaddr;
2612 	struct nix_txsch *txsch;
2613 	struct nix_hw *nix_hw;
2614 	u32 *pfvf_map;
2615 	int rc;
2616 
2617 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2618 	if (blkaddr < 0)
2619 		return NIX_AF_ERR_AF_LF_INVALID;
2620 
2621 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
2622 	if (!nix_hw)
2623 		return NIX_AF_ERR_INVALID_NIXBLK;
2624 
2625 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
2626 	if (nixlf < 0)
2627 		return NIX_AF_ERR_AF_LF_INVALID;
2628 
2629 	lvl = req->schq_lvl;
2630 	schq = req->schq;
2631 	txsch = &nix_hw->txsch[lvl];
2632 
2633 	if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max)
2634 		return 0;
2635 
2636 	pfvf_map = txsch->pfvf_map;
2637 	mutex_lock(&rvu->rsrc_lock);
2638 
2639 	if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) {
2640 		rc = NIX_AF_ERR_TLX_INVALID;
2641 		goto err;
2642 	}
2643 
2644 	/* Clear SW_XOFF of this resource only.
2645 	 * For SMQ level, all path XOFF's
2646 	 * need to be made clear by user
2647 	 */
2648 	nix_clear_tx_xoff(rvu, blkaddr, lvl, schq);
2649 
2650 	nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
2651 	nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
2652 
2653 	/* Flush if it is a SMQ. Onus of disabling
2654 	 * TL2/3 queue links before SMQ flush is on user
2655 	 */
2656 	if (lvl == NIX_TXSCH_LVL_SMQ &&
2657 	    nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf)) {
2658 		rc = NIX_AF_SMQ_FLUSH_FAILED;
2659 		goto err;
2660 	}
2661 
2662 	nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
2663 
2664 	/* Free the resource */
2665 	rvu_free_rsrc(&txsch->schq, schq);
2666 	txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
2667 	mutex_unlock(&rvu->rsrc_lock);
2668 	return 0;
2669 err:
2670 	mutex_unlock(&rvu->rsrc_lock);
2671 	return rc;
2672 }
2673 
rvu_mbox_handler_nix_txsch_free(struct rvu * rvu,struct nix_txsch_free_req * req,struct msg_rsp * rsp)2674 int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
2675 				    struct nix_txsch_free_req *req,
2676 				    struct msg_rsp *rsp)
2677 {
2678 	if (req->flags & TXSCHQ_FREE_ALL)
2679 		return nix_txschq_free(rvu, req->hdr.pcifunc);
2680 	else
2681 		return nix_txschq_free_one(rvu, req);
2682 }
2683 
is_txschq_hierarchy_valid(struct rvu * rvu,u16 pcifunc,int blkaddr,int lvl,u64 reg,u64 regval)2684 static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
2685 				      int lvl, u64 reg, u64 regval)
2686 {
2687 	u64 regbase = reg & 0xFFFF;
2688 	u16 schq, parent;
2689 
2690 	if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg))
2691 		return false;
2692 
2693 	schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2694 	/* Check if this schq belongs to this PF/VF or not */
2695 	if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq))
2696 		return false;
2697 
2698 	parent = (regval >> 16) & 0x1FF;
2699 	/* Validate MDQ's TL4 parent */
2700 	if (regbase == NIX_AF_MDQX_PARENT(0) &&
2701 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent))
2702 		return false;
2703 
2704 	/* Validate TL4's TL3 parent */
2705 	if (regbase == NIX_AF_TL4X_PARENT(0) &&
2706 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent))
2707 		return false;
2708 
2709 	/* Validate TL3's TL2 parent */
2710 	if (regbase == NIX_AF_TL3X_PARENT(0) &&
2711 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent))
2712 		return false;
2713 
2714 	/* Validate TL2's TL1 parent */
2715 	if (regbase == NIX_AF_TL2X_PARENT(0) &&
2716 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent))
2717 		return false;
2718 
2719 	return true;
2720 }
2721 
is_txschq_shaping_valid(struct rvu_hwinfo * hw,int lvl,u64 reg)2722 static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg)
2723 {
2724 	u64 regbase;
2725 
2726 	if (hw->cap.nix_shaping)
2727 		return true;
2728 
2729 	/* If shaping and coloring is not supported, then
2730 	 * *_CIR and *_PIR registers should not be configured.
2731 	 */
2732 	regbase = reg & 0xFFFF;
2733 
2734 	switch (lvl) {
2735 	case NIX_TXSCH_LVL_TL1:
2736 		if (regbase == NIX_AF_TL1X_CIR(0))
2737 			return false;
2738 		break;
2739 	case NIX_TXSCH_LVL_TL2:
2740 		if (regbase == NIX_AF_TL2X_CIR(0) ||
2741 		    regbase == NIX_AF_TL2X_PIR(0))
2742 			return false;
2743 		break;
2744 	case NIX_TXSCH_LVL_TL3:
2745 		if (regbase == NIX_AF_TL3X_CIR(0) ||
2746 		    regbase == NIX_AF_TL3X_PIR(0))
2747 			return false;
2748 		break;
2749 	case NIX_TXSCH_LVL_TL4:
2750 		if (regbase == NIX_AF_TL4X_CIR(0) ||
2751 		    regbase == NIX_AF_TL4X_PIR(0))
2752 			return false;
2753 		break;
2754 	case NIX_TXSCH_LVL_MDQ:
2755 		if (regbase == NIX_AF_MDQX_CIR(0) ||
2756 		    regbase == NIX_AF_MDQX_PIR(0))
2757 			return false;
2758 		break;
2759 	}
2760 	return true;
2761 }
2762 
nix_tl1_default_cfg(struct rvu * rvu,struct nix_hw * nix_hw,u16 pcifunc,int blkaddr)2763 static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
2764 				u16 pcifunc, int blkaddr)
2765 {
2766 	u32 *pfvf_map;
2767 	int schq;
2768 
2769 	schq = nix_get_tx_link(rvu, pcifunc);
2770 	pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
2771 	/* Skip if PF has already done the config */
2772 	if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE)
2773 		return;
2774 	rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq),
2775 		    (TXSCH_TL1_DFLT_RR_PRIO << 1));
2776 
2777 	/* On OcteonTx2 the config was in bytes and newer silcons
2778 	 * it's changed to weight.
2779 	 */
2780 	if (!rvu->hw->cap.nix_common_dwrr_mtu)
2781 		rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
2782 			    TXSCH_TL1_DFLT_RR_QTM);
2783 	else
2784 		rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
2785 			    CN10K_MAX_DWRR_WEIGHT);
2786 
2787 	rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00);
2788 	pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE);
2789 }
2790 
2791 /* Register offset - [15:0]
2792  * Scheduler Queue number - [25:16]
2793  */
2794 #define NIX_TX_SCHQ_MASK	GENMASK_ULL(25, 0)
2795 
nix_txschq_cfg_read(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr,struct nix_txschq_config * req,struct nix_txschq_config * rsp)2796 static int nix_txschq_cfg_read(struct rvu *rvu, struct nix_hw *nix_hw,
2797 			       int blkaddr, struct nix_txschq_config *req,
2798 			       struct nix_txschq_config *rsp)
2799 {
2800 	u16 pcifunc = req->hdr.pcifunc;
2801 	int idx, schq;
2802 	u64 reg;
2803 
2804 	for (idx = 0; idx < req->num_regs; idx++) {
2805 		reg = req->reg[idx];
2806 		reg &= NIX_TX_SCHQ_MASK;
2807 		schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2808 		if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, req->lvl, reg) ||
2809 		    !is_valid_txschq(rvu, blkaddr, req->lvl, pcifunc, schq))
2810 			return NIX_AF_INVAL_TXSCHQ_CFG;
2811 		rsp->regval[idx] = rvu_read64(rvu, blkaddr, reg);
2812 	}
2813 	rsp->lvl = req->lvl;
2814 	rsp->num_regs = req->num_regs;
2815 	return 0;
2816 }
2817 
rvu_nix_tx_tl2_cfg(struct rvu * rvu,int blkaddr,u16 pcifunc,struct nix_txsch * txsch,bool enable)2818 void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, u16 pcifunc,
2819 			struct nix_txsch *txsch, bool enable)
2820 {
2821 	struct rvu_hwinfo *hw = rvu->hw;
2822 	int lbk_link_start, lbk_links;
2823 	u8 pf = rvu_get_pf(pcifunc);
2824 	int schq;
2825 	u64 cfg;
2826 
2827 	if (!is_pf_cgxmapped(rvu, pf) && !is_rep_dev(rvu, pcifunc))
2828 		return;
2829 
2830 	cfg = enable ? (BIT_ULL(12) | RVU_SWITCH_LBK_CHAN) : 0;
2831 	lbk_link_start = hw->cgx_links;
2832 
2833 	for (schq = 0; schq < txsch->schq.max; schq++) {
2834 		if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2835 			continue;
2836 		/* Enable all LBK links with channel 63 by default so that
2837 		 * packets can be sent to LBK with a NPC TX MCAM rule
2838 		 */
2839 		lbk_links = hw->lbk_links;
2840 		while (lbk_links--)
2841 			rvu_write64(rvu, blkaddr,
2842 				    NIX_AF_TL3_TL2X_LINKX_CFG(schq,
2843 							      lbk_link_start +
2844 							      lbk_links), cfg);
2845 	}
2846 }
2847 
rvu_mbox_handler_nix_txschq_cfg(struct rvu * rvu,struct nix_txschq_config * req,struct nix_txschq_config * rsp)2848 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
2849 				    struct nix_txschq_config *req,
2850 				    struct nix_txschq_config *rsp)
2851 {
2852 	u64 reg, val, regval, schq_regbase, val_mask;
2853 	struct rvu_hwinfo *hw = rvu->hw;
2854 	u16 pcifunc = req->hdr.pcifunc;
2855 	struct nix_txsch *txsch;
2856 	struct nix_hw *nix_hw;
2857 	int blkaddr, idx, err;
2858 	int nixlf, schq;
2859 	u32 *pfvf_map;
2860 
2861 	if (req->lvl >= NIX_TXSCH_LVL_CNT ||
2862 	    req->num_regs > MAX_REGS_PER_MBOX_MSG)
2863 		return NIX_AF_INVAL_TXSCHQ_CFG;
2864 
2865 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2866 	if (err)
2867 		return err;
2868 
2869 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
2870 	if (!nix_hw)
2871 		return NIX_AF_ERR_INVALID_NIXBLK;
2872 
2873 	if (req->read)
2874 		return nix_txschq_cfg_read(rvu, nix_hw, blkaddr, req, rsp);
2875 
2876 	txsch = &nix_hw->txsch[req->lvl];
2877 	pfvf_map = txsch->pfvf_map;
2878 
2879 	if (req->lvl >= hw->cap.nix_tx_aggr_lvl &&
2880 	    pcifunc & RVU_PFVF_FUNC_MASK) {
2881 		mutex_lock(&rvu->rsrc_lock);
2882 		if (req->lvl == NIX_TXSCH_LVL_TL1)
2883 			nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr);
2884 		mutex_unlock(&rvu->rsrc_lock);
2885 		return 0;
2886 	}
2887 
2888 	for (idx = 0; idx < req->num_regs; idx++) {
2889 		reg = req->reg[idx];
2890 		reg &= NIX_TX_SCHQ_MASK;
2891 		regval = req->regval[idx];
2892 		schq_regbase = reg & 0xFFFF;
2893 		val_mask = req->regval_mask[idx];
2894 
2895 		if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr,
2896 					       txsch->lvl, reg, regval))
2897 			return NIX_AF_INVAL_TXSCHQ_CFG;
2898 
2899 		/* Check if shaping and coloring is supported */
2900 		if (!is_txschq_shaping_valid(hw, req->lvl, reg))
2901 			continue;
2902 
2903 		val = rvu_read64(rvu, blkaddr, reg);
2904 		regval = (val & val_mask) | (regval & ~val_mask);
2905 
2906 		/* Handle shaping state toggle specially */
2907 		if (hw->cap.nix_shaper_toggle_wait &&
2908 		    handle_txschq_shaper_update(rvu, blkaddr, nixlf,
2909 						req->lvl, reg, regval))
2910 			continue;
2911 
2912 		/* Replace PF/VF visible NIXLF slot with HW NIXLF id */
2913 		if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
2914 			nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
2915 					   pcifunc, 0);
2916 			regval &= ~(0x7FULL << 24);
2917 			regval |= ((u64)nixlf << 24);
2918 		}
2919 
2920 		/* Clear 'BP_ENA' config, if it's not allowed */
2921 		if (!hw->cap.nix_tx_link_bp) {
2922 			if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) ||
2923 			    (schq_regbase & 0xFF00) ==
2924 			    NIX_AF_TL3_TL2X_LINKX_CFG(0, 0))
2925 				regval &= ~BIT_ULL(13);
2926 		}
2927 
2928 		/* Mark config as done for TL1 by PF */
2929 		if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) &&
2930 		    schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) {
2931 			schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2932 			mutex_lock(&rvu->rsrc_lock);
2933 			pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq],
2934 							NIX_TXSCHQ_CFG_DONE);
2935 			mutex_unlock(&rvu->rsrc_lock);
2936 		}
2937 
2938 		/* SMQ flush is special hence split register writes such
2939 		 * that flush first and write rest of the bits later.
2940 		 */
2941 		if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
2942 		    (regval & BIT_ULL(49))) {
2943 			schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2944 			nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
2945 			regval &= ~BIT_ULL(49);
2946 		}
2947 		rvu_write64(rvu, blkaddr, reg, regval);
2948 	}
2949 
2950 	return 0;
2951 }
2952 
nix_rx_vtag_cfg(struct rvu * rvu,int nixlf,int blkaddr,struct nix_vtag_config * req)2953 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
2954 			   struct nix_vtag_config *req)
2955 {
2956 	u64 regval = req->vtag_size;
2957 
2958 	if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 ||
2959 	    req->vtag_size > VTAGSIZE_T8)
2960 		return -EINVAL;
2961 
2962 	/* RX VTAG Type 7 reserved for vf vlan */
2963 	if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7)
2964 		return NIX_AF_ERR_RX_VTAG_INUSE;
2965 
2966 	if (req->rx.capture_vtag)
2967 		regval |= BIT_ULL(5);
2968 	if (req->rx.strip_vtag)
2969 		regval |= BIT_ULL(4);
2970 
2971 	rvu_write64(rvu, blkaddr,
2972 		    NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
2973 	return 0;
2974 }
2975 
nix_tx_vtag_free(struct rvu * rvu,int blkaddr,u16 pcifunc,int index)2976 static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr,
2977 			    u16 pcifunc, int index)
2978 {
2979 	struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2980 	struct nix_txvlan *vlan;
2981 
2982 	if (!nix_hw)
2983 		return NIX_AF_ERR_INVALID_NIXBLK;
2984 
2985 	vlan = &nix_hw->txvlan;
2986 	if (vlan->entry2pfvf_map[index] != pcifunc)
2987 		return NIX_AF_ERR_PARAM;
2988 
2989 	rvu_write64(rvu, blkaddr,
2990 		    NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull);
2991 	rvu_write64(rvu, blkaddr,
2992 		    NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull);
2993 
2994 	vlan->entry2pfvf_map[index] = 0;
2995 	rvu_free_rsrc(&vlan->rsrc, index);
2996 
2997 	return 0;
2998 }
2999 
nix_free_tx_vtag_entries(struct rvu * rvu,u16 pcifunc)3000 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc)
3001 {
3002 	struct nix_txvlan *vlan;
3003 	struct nix_hw *nix_hw;
3004 	int index, blkaddr;
3005 
3006 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3007 	if (blkaddr < 0)
3008 		return;
3009 
3010 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
3011 	if (!nix_hw)
3012 		return;
3013 
3014 	vlan = &nix_hw->txvlan;
3015 
3016 	mutex_lock(&vlan->rsrc_lock);
3017 	/* Scan all the entries and free the ones mapped to 'pcifunc' */
3018 	for (index = 0; index < vlan->rsrc.max; index++) {
3019 		if (vlan->entry2pfvf_map[index] == pcifunc)
3020 			nix_tx_vtag_free(rvu, blkaddr, pcifunc, index);
3021 	}
3022 	mutex_unlock(&vlan->rsrc_lock);
3023 }
3024 
nix_tx_vtag_alloc(struct rvu * rvu,int blkaddr,u64 vtag,u8 size)3025 static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr,
3026 			     u64 vtag, u8 size)
3027 {
3028 	struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
3029 	struct nix_txvlan *vlan;
3030 	u64 regval;
3031 	int index;
3032 
3033 	if (!nix_hw)
3034 		return NIX_AF_ERR_INVALID_NIXBLK;
3035 
3036 	vlan = &nix_hw->txvlan;
3037 
3038 	mutex_lock(&vlan->rsrc_lock);
3039 
3040 	index = rvu_alloc_rsrc(&vlan->rsrc);
3041 	if (index < 0) {
3042 		mutex_unlock(&vlan->rsrc_lock);
3043 		return index;
3044 	}
3045 
3046 	mutex_unlock(&vlan->rsrc_lock);
3047 
3048 	regval = size ? vtag : vtag << 32;
3049 
3050 	rvu_write64(rvu, blkaddr,
3051 		    NIX_AF_TX_VTAG_DEFX_DATA(index), regval);
3052 	rvu_write64(rvu, blkaddr,
3053 		    NIX_AF_TX_VTAG_DEFX_CTL(index), size);
3054 
3055 	return index;
3056 }
3057 
nix_tx_vtag_decfg(struct rvu * rvu,int blkaddr,struct nix_vtag_config * req)3058 static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr,
3059 			     struct nix_vtag_config *req)
3060 {
3061 	struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
3062 	u16 pcifunc = req->hdr.pcifunc;
3063 	int idx0 = req->tx.vtag0_idx;
3064 	int idx1 = req->tx.vtag1_idx;
3065 	struct nix_txvlan *vlan;
3066 	int err = 0;
3067 
3068 	if (!nix_hw)
3069 		return NIX_AF_ERR_INVALID_NIXBLK;
3070 
3071 	vlan = &nix_hw->txvlan;
3072 	if (req->tx.free_vtag0 && req->tx.free_vtag1)
3073 		if (vlan->entry2pfvf_map[idx0] != pcifunc ||
3074 		    vlan->entry2pfvf_map[idx1] != pcifunc)
3075 			return NIX_AF_ERR_PARAM;
3076 
3077 	mutex_lock(&vlan->rsrc_lock);
3078 
3079 	if (req->tx.free_vtag0) {
3080 		err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx0);
3081 		if (err)
3082 			goto exit;
3083 	}
3084 
3085 	if (req->tx.free_vtag1)
3086 		err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx1);
3087 
3088 exit:
3089 	mutex_unlock(&vlan->rsrc_lock);
3090 	return err;
3091 }
3092 
nix_tx_vtag_cfg(struct rvu * rvu,int blkaddr,struct nix_vtag_config * req,struct nix_vtag_config_rsp * rsp)3093 static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr,
3094 			   struct nix_vtag_config *req,
3095 			   struct nix_vtag_config_rsp *rsp)
3096 {
3097 	struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
3098 	struct nix_txvlan *vlan;
3099 	u16 pcifunc = req->hdr.pcifunc;
3100 
3101 	if (!nix_hw)
3102 		return NIX_AF_ERR_INVALID_NIXBLK;
3103 
3104 	vlan = &nix_hw->txvlan;
3105 	if (req->tx.cfg_vtag0) {
3106 		rsp->vtag0_idx =
3107 			nix_tx_vtag_alloc(rvu, blkaddr,
3108 					  req->tx.vtag0, req->vtag_size);
3109 
3110 		if (rsp->vtag0_idx < 0)
3111 			return NIX_AF_ERR_TX_VTAG_NOSPC;
3112 
3113 		vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc;
3114 	}
3115 
3116 	if (req->tx.cfg_vtag1) {
3117 		rsp->vtag1_idx =
3118 			nix_tx_vtag_alloc(rvu, blkaddr,
3119 					  req->tx.vtag1, req->vtag_size);
3120 
3121 		if (rsp->vtag1_idx < 0)
3122 			goto err_free;
3123 
3124 		vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc;
3125 	}
3126 
3127 	return 0;
3128 
3129 err_free:
3130 	if (req->tx.cfg_vtag0)
3131 		nix_tx_vtag_free(rvu, blkaddr, pcifunc, rsp->vtag0_idx);
3132 
3133 	return NIX_AF_ERR_TX_VTAG_NOSPC;
3134 }
3135 
rvu_mbox_handler_nix_vtag_cfg(struct rvu * rvu,struct nix_vtag_config * req,struct nix_vtag_config_rsp * rsp)3136 int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
3137 				  struct nix_vtag_config *req,
3138 				  struct nix_vtag_config_rsp *rsp)
3139 {
3140 	u16 pcifunc = req->hdr.pcifunc;
3141 	int blkaddr, nixlf, err;
3142 
3143 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
3144 	if (err)
3145 		return err;
3146 
3147 	if (req->cfg_type) {
3148 		/* rx vtag configuration */
3149 		err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
3150 		if (err)
3151 			return NIX_AF_ERR_PARAM;
3152 	} else {
3153 		/* tx vtag configuration */
3154 		if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) &&
3155 		    (req->tx.free_vtag0 || req->tx.free_vtag1))
3156 			return NIX_AF_ERR_PARAM;
3157 
3158 		if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1)
3159 			return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp);
3160 
3161 		if (req->tx.free_vtag0 || req->tx.free_vtag1)
3162 			return nix_tx_vtag_decfg(rvu, blkaddr, req);
3163 	}
3164 
3165 	return 0;
3166 }
3167 
nix_blk_setup_mce(struct rvu * rvu,struct nix_hw * nix_hw,int mce,u8 op,u16 pcifunc,int next,int index,u8 mce_op,bool eol)3168 static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
3169 			     int mce, u8 op, u16 pcifunc, int next,
3170 			     int index, u8 mce_op, bool eol)
3171 {
3172 	struct nix_aq_enq_req aq_req;
3173 	int err;
3174 
3175 	aq_req.hdr.pcifunc = 0;
3176 	aq_req.ctype = NIX_AQ_CTYPE_MCE;
3177 	aq_req.op = op;
3178 	aq_req.qidx = mce;
3179 
3180 	/* Use RSS with RSS index 0 */
3181 	aq_req.mce.op = mce_op;
3182 	aq_req.mce.index = index;
3183 	aq_req.mce.eol = eol;
3184 	aq_req.mce.pf_func = pcifunc;
3185 	aq_req.mce.next = next;
3186 
3187 	/* All fields valid */
3188 	*(u64 *)(&aq_req.mce_mask) = ~0ULL;
3189 
3190 	err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL);
3191 	if (err) {
3192 		dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
3193 			rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
3194 		return err;
3195 	}
3196 	return 0;
3197 }
3198 
nix_delete_mcast_mce_list(struct nix_mce_list * mce_list)3199 static void nix_delete_mcast_mce_list(struct nix_mce_list *mce_list)
3200 {
3201 	struct hlist_node *tmp;
3202 	struct mce *mce;
3203 
3204 	/* Scan through the current list */
3205 	hlist_for_each_entry_safe(mce, tmp, &mce_list->head, node) {
3206 		hlist_del(&mce->node);
3207 		kfree(mce);
3208 	}
3209 
3210 	mce_list->count = 0;
3211 	mce_list->max = 0;
3212 }
3213 
nix_get_last_mce_list_index(struct nix_mcast_grp_elem * elem)3214 static int nix_get_last_mce_list_index(struct nix_mcast_grp_elem *elem)
3215 {
3216 	return elem->mce_start_index + elem->mcast_mce_list.count - 1;
3217 }
3218 
nix_update_ingress_mce_list_hw(struct rvu * rvu,struct nix_hw * nix_hw,struct nix_mcast_grp_elem * elem)3219 static int nix_update_ingress_mce_list_hw(struct rvu *rvu,
3220 					  struct nix_hw *nix_hw,
3221 					  struct nix_mcast_grp_elem *elem)
3222 {
3223 	int idx, last_idx, next_idx, err;
3224 	struct nix_mce_list *mce_list;
3225 	struct mce *mce, *prev_mce;
3226 
3227 	mce_list = &elem->mcast_mce_list;
3228 	idx = elem->mce_start_index;
3229 	last_idx = nix_get_last_mce_list_index(elem);
3230 	hlist_for_each_entry(mce, &mce_list->head, node) {
3231 		if (idx > last_idx)
3232 			break;
3233 
3234 		if (!mce->is_active) {
3235 			if (idx == elem->mce_start_index) {
3236 				idx++;
3237 				prev_mce = mce;
3238 				elem->mce_start_index = idx;
3239 				continue;
3240 			} else if (idx == last_idx) {
3241 				err = nix_blk_setup_mce(rvu, nix_hw, idx - 1, NIX_AQ_INSTOP_WRITE,
3242 							prev_mce->pcifunc, next_idx,
3243 							prev_mce->rq_rss_index,
3244 							prev_mce->dest_type,
3245 							false);
3246 				if (err)
3247 					return err;
3248 
3249 				break;
3250 			}
3251 		}
3252 
3253 		next_idx = idx + 1;
3254 		/* EOL should be set in last MCE */
3255 		err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE,
3256 					mce->pcifunc, next_idx,
3257 					mce->rq_rss_index, mce->dest_type,
3258 					(next_idx > last_idx) ? true : false);
3259 		if (err)
3260 			return err;
3261 
3262 		idx++;
3263 		prev_mce = mce;
3264 	}
3265 
3266 	return 0;
3267 }
3268 
nix_update_egress_mce_list_hw(struct rvu * rvu,struct nix_hw * nix_hw,struct nix_mcast_grp_elem * elem)3269 static void nix_update_egress_mce_list_hw(struct rvu *rvu,
3270 					  struct nix_hw *nix_hw,
3271 					  struct nix_mcast_grp_elem *elem)
3272 {
3273 	struct nix_mce_list *mce_list;
3274 	int idx, last_idx, next_idx;
3275 	struct mce *mce, *prev_mce;
3276 	u64 regval;
3277 	u8 eol;
3278 
3279 	mce_list = &elem->mcast_mce_list;
3280 	idx = elem->mce_start_index;
3281 	last_idx = nix_get_last_mce_list_index(elem);
3282 	hlist_for_each_entry(mce, &mce_list->head, node) {
3283 		if (idx > last_idx)
3284 			break;
3285 
3286 		if (!mce->is_active) {
3287 			if (idx == elem->mce_start_index) {
3288 				idx++;
3289 				prev_mce = mce;
3290 				elem->mce_start_index = idx;
3291 				continue;
3292 			} else if (idx == last_idx) {
3293 				regval = (next_idx << 16) | (1 << 12) | prev_mce->channel;
3294 				rvu_write64(rvu, nix_hw->blkaddr,
3295 					    NIX_AF_TX_MCASTX(idx - 1),
3296 					    regval);
3297 				break;
3298 			}
3299 		}
3300 
3301 		eol = 0;
3302 		next_idx = idx + 1;
3303 		/* EOL should be set in last MCE */
3304 		if (next_idx > last_idx)
3305 			eol = 1;
3306 
3307 		regval = (next_idx << 16) | (eol << 12) | mce->channel;
3308 		rvu_write64(rvu, nix_hw->blkaddr,
3309 			    NIX_AF_TX_MCASTX(idx),
3310 			    regval);
3311 		idx++;
3312 		prev_mce = mce;
3313 	}
3314 }
3315 
nix_del_mce_list_entry(struct rvu * rvu,struct nix_hw * nix_hw,struct nix_mcast_grp_elem * elem,struct nix_mcast_grp_update_req * req)3316 static int nix_del_mce_list_entry(struct rvu *rvu,
3317 				  struct nix_hw *nix_hw,
3318 				  struct nix_mcast_grp_elem *elem,
3319 				  struct nix_mcast_grp_update_req *req)
3320 {
3321 	u32 num_entry = req->num_mce_entry;
3322 	struct nix_mce_list *mce_list;
3323 	struct mce *mce;
3324 	bool is_found;
3325 	int i;
3326 
3327 	mce_list = &elem->mcast_mce_list;
3328 	for (i = 0; i < num_entry; i++) {
3329 		is_found = false;
3330 		hlist_for_each_entry(mce, &mce_list->head, node) {
3331 			/* If already exists, then delete */
3332 			if (mce->pcifunc == req->pcifunc[i]) {
3333 				hlist_del(&mce->node);
3334 				kfree(mce);
3335 				mce_list->count--;
3336 				is_found = true;
3337 				break;
3338 			}
3339 		}
3340 
3341 		if (!is_found)
3342 			return NIX_AF_ERR_INVALID_MCAST_DEL_REQ;
3343 	}
3344 
3345 	mce_list->max = mce_list->count;
3346 	/* Dump the updated list to HW */
3347 	if (elem->dir == NIX_MCAST_INGRESS)
3348 		return nix_update_ingress_mce_list_hw(rvu, nix_hw, elem);
3349 
3350 	nix_update_egress_mce_list_hw(rvu, nix_hw, elem);
3351 	return 0;
3352 }
3353 
nix_add_mce_list_entry(struct rvu * rvu,struct nix_hw * nix_hw,struct nix_mcast_grp_elem * elem,struct nix_mcast_grp_update_req * req)3354 static int nix_add_mce_list_entry(struct rvu *rvu,
3355 				  struct nix_hw *nix_hw,
3356 				  struct nix_mcast_grp_elem *elem,
3357 				  struct nix_mcast_grp_update_req *req)
3358 {
3359 	u32 num_entry = req->num_mce_entry;
3360 	struct nix_mce_list *mce_list;
3361 	struct hlist_node *tmp;
3362 	struct mce *mce;
3363 	int i;
3364 
3365 	mce_list = &elem->mcast_mce_list;
3366 	for (i = 0; i < num_entry; i++) {
3367 		mce = kzalloc(sizeof(*mce), GFP_KERNEL);
3368 		if (!mce)
3369 			goto free_mce;
3370 
3371 		mce->pcifunc = req->pcifunc[i];
3372 		mce->channel = req->channel[i];
3373 		mce->rq_rss_index = req->rq_rss_index[i];
3374 		mce->dest_type = req->dest_type[i];
3375 		mce->is_active = 1;
3376 		hlist_add_head(&mce->node, &mce_list->head);
3377 		mce_list->count++;
3378 	}
3379 
3380 	mce_list->max += num_entry;
3381 
3382 	/* Dump the updated list to HW */
3383 	if (elem->dir == NIX_MCAST_INGRESS)
3384 		return nix_update_ingress_mce_list_hw(rvu, nix_hw, elem);
3385 
3386 	nix_update_egress_mce_list_hw(rvu, nix_hw, elem);
3387 	return 0;
3388 
3389 free_mce:
3390 	hlist_for_each_entry_safe(mce, tmp, &mce_list->head, node) {
3391 		hlist_del(&mce->node);
3392 		kfree(mce);
3393 		mce_list->count--;
3394 	}
3395 
3396 	return -ENOMEM;
3397 }
3398 
nix_update_mce_list_entry(struct nix_mce_list * mce_list,u16 pcifunc,bool add)3399 static int nix_update_mce_list_entry(struct nix_mce_list *mce_list,
3400 				     u16 pcifunc, bool add)
3401 {
3402 	struct mce *mce, *tail = NULL;
3403 	bool delete = false;
3404 
3405 	/* Scan through the current list */
3406 	hlist_for_each_entry(mce, &mce_list->head, node) {
3407 		/* If already exists, then delete */
3408 		if (mce->pcifunc == pcifunc && !add) {
3409 			delete = true;
3410 			break;
3411 		} else if (mce->pcifunc == pcifunc && add) {
3412 			/* entry already exists */
3413 			return 0;
3414 		}
3415 		tail = mce;
3416 	}
3417 
3418 	if (delete) {
3419 		hlist_del(&mce->node);
3420 		kfree(mce);
3421 		mce_list->count--;
3422 		return 0;
3423 	}
3424 
3425 	if (!add)
3426 		return 0;
3427 
3428 	/* Add a new one to the list, at the tail */
3429 	mce = kzalloc(sizeof(*mce), GFP_KERNEL);
3430 	if (!mce)
3431 		return -ENOMEM;
3432 	mce->pcifunc = pcifunc;
3433 	if (!tail)
3434 		hlist_add_head(&mce->node, &mce_list->head);
3435 	else
3436 		hlist_add_behind(&mce->node, &tail->node);
3437 	mce_list->count++;
3438 	return 0;
3439 }
3440 
nix_update_mce_list(struct rvu * rvu,u16 pcifunc,struct nix_mce_list * mce_list,int mce_idx,int mcam_index,bool add)3441 int nix_update_mce_list(struct rvu *rvu, u16 pcifunc,
3442 			struct nix_mce_list *mce_list,
3443 			int mce_idx, int mcam_index, bool add)
3444 {
3445 	int err = 0, idx, next_idx, last_idx, blkaddr, npc_blkaddr;
3446 	struct npc_mcam *mcam = &rvu->hw->mcam;
3447 	struct nix_mcast *mcast;
3448 	struct nix_hw *nix_hw;
3449 	struct mce *mce;
3450 
3451 	if (!mce_list)
3452 		return -EINVAL;
3453 
3454 	/* Get this PF/VF func's MCE index */
3455 	idx = mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
3456 
3457 	if (idx > (mce_idx + mce_list->max)) {
3458 		dev_err(rvu->dev,
3459 			"%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
3460 			__func__, idx, mce_list->max,
3461 			pcifunc >> RVU_PFVF_PF_SHIFT);
3462 		return -EINVAL;
3463 	}
3464 
3465 	err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
3466 	if (err)
3467 		return err;
3468 
3469 	mcast = &nix_hw->mcast;
3470 	mutex_lock(&mcast->mce_lock);
3471 
3472 	err = nix_update_mce_list_entry(mce_list, pcifunc, add);
3473 	if (err)
3474 		goto end;
3475 
3476 	/* Disable MCAM entry in NPC */
3477 	if (!mce_list->count) {
3478 		npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
3479 		npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, mcam_index, false);
3480 		goto end;
3481 	}
3482 
3483 	/* Dump the updated list to HW */
3484 	idx = mce_idx;
3485 	last_idx = idx + mce_list->count - 1;
3486 	hlist_for_each_entry(mce, &mce_list->head, node) {
3487 		if (idx > last_idx)
3488 			break;
3489 
3490 		next_idx = idx + 1;
3491 		/* EOL should be set in last MCE */
3492 		err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE,
3493 					mce->pcifunc, next_idx,
3494 					0, 1,
3495 					(next_idx > last_idx) ? true : false);
3496 		if (err)
3497 			goto end;
3498 		idx++;
3499 	}
3500 
3501 end:
3502 	mutex_unlock(&mcast->mce_lock);
3503 	return err;
3504 }
3505 
nix_get_mce_list(struct rvu * rvu,u16 pcifunc,int type,struct nix_mce_list ** mce_list,int * mce_idx)3506 void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type,
3507 		      struct nix_mce_list **mce_list, int *mce_idx)
3508 {
3509 	struct rvu_hwinfo *hw = rvu->hw;
3510 	struct rvu_pfvf *pfvf;
3511 
3512 	if (!hw->cap.nix_rx_multicast ||
3513 	    !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc & ~RVU_PFVF_FUNC_MASK))) {
3514 		*mce_list = NULL;
3515 		*mce_idx = 0;
3516 		return;
3517 	}
3518 
3519 	/* Get this PF/VF func's MCE index */
3520 	pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
3521 
3522 	if (type == NIXLF_BCAST_ENTRY) {
3523 		*mce_list = &pfvf->bcast_mce_list;
3524 		*mce_idx = pfvf->bcast_mce_idx;
3525 	} else if (type == NIXLF_ALLMULTI_ENTRY) {
3526 		*mce_list = &pfvf->mcast_mce_list;
3527 		*mce_idx = pfvf->mcast_mce_idx;
3528 	} else if (type == NIXLF_PROMISC_ENTRY) {
3529 		*mce_list = &pfvf->promisc_mce_list;
3530 		*mce_idx = pfvf->promisc_mce_idx;
3531 	}  else {
3532 		*mce_list = NULL;
3533 		*mce_idx = 0;
3534 	}
3535 }
3536 
nix_update_mce_rule(struct rvu * rvu,u16 pcifunc,int type,bool add)3537 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
3538 			       int type, bool add)
3539 {
3540 	int err = 0, nixlf, blkaddr, mcam_index, mce_idx;
3541 	struct npc_mcam *mcam = &rvu->hw->mcam;
3542 	struct rvu_hwinfo *hw = rvu->hw;
3543 	struct nix_mce_list *mce_list;
3544 	int pf;
3545 
3546 	/* skip multicast pkt replication for AF's VFs & SDP links */
3547 	if (is_lbk_vf(rvu, pcifunc) || is_sdp_pfvf(pcifunc))
3548 		return 0;
3549 
3550 	if (!hw->cap.nix_rx_multicast)
3551 		return 0;
3552 
3553 	pf = rvu_get_pf(pcifunc);
3554 	if (!is_pf_cgxmapped(rvu, pf))
3555 		return 0;
3556 
3557 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3558 	if (blkaddr < 0)
3559 		return -EINVAL;
3560 
3561 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
3562 	if (nixlf < 0)
3563 		return -EINVAL;
3564 
3565 	nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx);
3566 
3567 	mcam_index = npc_get_nixlf_mcam_index(mcam,
3568 					      pcifunc & ~RVU_PFVF_FUNC_MASK,
3569 					      nixlf, type);
3570 	err = nix_update_mce_list(rvu, pcifunc, mce_list,
3571 				  mce_idx, mcam_index, add);
3572 	return err;
3573 }
3574 
nix_setup_mcast_grp(struct nix_hw * nix_hw)3575 static void nix_setup_mcast_grp(struct nix_hw *nix_hw)
3576 {
3577 	struct nix_mcast_grp *mcast_grp = &nix_hw->mcast_grp;
3578 
3579 	INIT_LIST_HEAD(&mcast_grp->mcast_grp_head);
3580 	mutex_init(&mcast_grp->mcast_grp_lock);
3581 	mcast_grp->next_grp_index = 1;
3582 	mcast_grp->count = 0;
3583 }
3584 
nix_setup_mce_tables(struct rvu * rvu,struct nix_hw * nix_hw)3585 static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw)
3586 {
3587 	struct nix_mcast *mcast = &nix_hw->mcast;
3588 	int err, pf, numvfs, idx;
3589 	struct rvu_pfvf *pfvf;
3590 	u16 pcifunc;
3591 	u64 cfg;
3592 
3593 	/* Skip PF0 (i.e AF) */
3594 	for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) {
3595 		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
3596 		/* If PF is not enabled, nothing to do */
3597 		if (!((cfg >> 20) & 0x01))
3598 			continue;
3599 		/* Get numVFs attached to this PF */
3600 		numvfs = (cfg >> 12) & 0xFF;
3601 
3602 		pfvf = &rvu->pf[pf];
3603 
3604 		/* This NIX0/1 block mapped to PF ? */
3605 		if (pfvf->nix_blkaddr != nix_hw->blkaddr)
3606 			continue;
3607 
3608 		/* save start idx of broadcast mce list */
3609 		pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1, NIX_MCAST_INGRESS);
3610 		nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
3611 
3612 		/* save start idx of multicast mce list */
3613 		pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1, NIX_MCAST_INGRESS);
3614 		nix_mce_list_init(&pfvf->mcast_mce_list, numvfs + 1);
3615 
3616 		/* save the start idx of promisc mce list */
3617 		pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1, NIX_MCAST_INGRESS);
3618 		nix_mce_list_init(&pfvf->promisc_mce_list, numvfs + 1);
3619 
3620 		for (idx = 0; idx < (numvfs + 1); idx++) {
3621 			/* idx-0 is for PF, followed by VFs */
3622 			pcifunc = (pf << RVU_PFVF_PF_SHIFT);
3623 			pcifunc |= idx;
3624 			/* Add dummy entries now, so that we don't have to check
3625 			 * for whether AQ_OP should be INIT/WRITE later on.
3626 			 * Will be updated when a NIXLF is attached/detached to
3627 			 * these PF/VFs.
3628 			 */
3629 			err = nix_blk_setup_mce(rvu, nix_hw,
3630 						pfvf->bcast_mce_idx + idx,
3631 						NIX_AQ_INSTOP_INIT,
3632 						pcifunc, 0, 0, 1, true);
3633 			if (err)
3634 				return err;
3635 
3636 			/* add dummy entries to multicast mce list */
3637 			err = nix_blk_setup_mce(rvu, nix_hw,
3638 						pfvf->mcast_mce_idx + idx,
3639 						NIX_AQ_INSTOP_INIT,
3640 						pcifunc, 0, 0, 1, true);
3641 			if (err)
3642 				return err;
3643 
3644 			/* add dummy entries to promisc mce list */
3645 			err = nix_blk_setup_mce(rvu, nix_hw,
3646 						pfvf->promisc_mce_idx + idx,
3647 						NIX_AQ_INSTOP_INIT,
3648 						pcifunc, 0, 0, 1, true);
3649 			if (err)
3650 				return err;
3651 		}
3652 	}
3653 	return 0;
3654 }
3655 
nix_setup_mcast(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr)3656 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
3657 {
3658 	struct nix_mcast *mcast = &nix_hw->mcast;
3659 	struct rvu_hwinfo *hw = rvu->hw;
3660 	int err, size;
3661 
3662 	size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
3663 	size = BIT_ULL(size);
3664 
3665 	/* Allocate bitmap for rx mce entries */
3666 	mcast->mce_counter[NIX_MCAST_INGRESS].max = 256UL << MC_TBL_SIZE;
3667 	err = rvu_alloc_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]);
3668 	if (err)
3669 		return -ENOMEM;
3670 
3671 	/* Allocate bitmap for tx mce entries */
3672 	mcast->mce_counter[NIX_MCAST_EGRESS].max = MC_TX_MAX;
3673 	err = rvu_alloc_bitmap(&mcast->mce_counter[NIX_MCAST_EGRESS]);
3674 	if (err) {
3675 		rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]);
3676 		return -ENOMEM;
3677 	}
3678 
3679 	/* Alloc memory for multicast/mirror replication entries */
3680 	err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
3681 			 mcast->mce_counter[NIX_MCAST_INGRESS].max, size);
3682 	if (err) {
3683 		rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]);
3684 		rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_EGRESS]);
3685 		return -ENOMEM;
3686 	}
3687 
3688 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
3689 		    (u64)mcast->mce_ctx->iova);
3690 
3691 	/* Set max list length equal to max no of VFs per PF  + PF itself */
3692 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG,
3693 		    BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE);
3694 
3695 	/* Alloc memory for multicast replication buffers */
3696 	size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
3697 	err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
3698 			 (8UL << MC_BUF_CNT), size);
3699 	if (err) {
3700 		rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]);
3701 		rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_EGRESS]);
3702 		return -ENOMEM;
3703 	}
3704 
3705 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
3706 		    (u64)mcast->mcast_buf->iova);
3707 
3708 	/* Alloc pkind for NIX internal RX multicast/mirror replay */
3709 	mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc);
3710 
3711 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG,
3712 		    BIT_ULL(63) | (mcast->replay_pkind << 24) |
3713 		    BIT_ULL(20) | MC_BUF_CNT);
3714 
3715 	mutex_init(&mcast->mce_lock);
3716 
3717 	nix_setup_mcast_grp(nix_hw);
3718 
3719 	return nix_setup_mce_tables(rvu, nix_hw);
3720 }
3721 
nix_setup_txvlan(struct rvu * rvu,struct nix_hw * nix_hw)3722 static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw)
3723 {
3724 	struct nix_txvlan *vlan = &nix_hw->txvlan;
3725 	int err;
3726 
3727 	/* Allocate resource bimap for tx vtag def registers*/
3728 	vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX;
3729 	err = rvu_alloc_bitmap(&vlan->rsrc);
3730 	if (err)
3731 		return -ENOMEM;
3732 
3733 	/* Alloc memory for saving entry to RVU PFFUNC allocation mapping */
3734 	vlan->entry2pfvf_map = devm_kcalloc(rvu->dev, vlan->rsrc.max,
3735 					    sizeof(u16), GFP_KERNEL);
3736 	if (!vlan->entry2pfvf_map)
3737 		goto free_mem;
3738 
3739 	mutex_init(&vlan->rsrc_lock);
3740 	return 0;
3741 
3742 free_mem:
3743 	kfree(vlan->rsrc.bmap);
3744 	return -ENOMEM;
3745 }
3746 
nix_setup_txschq(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr)3747 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
3748 {
3749 	struct nix_txsch *txsch;
3750 	int err, lvl, schq;
3751 	u64 cfg, reg;
3752 
3753 	/* Get scheduler queue count of each type and alloc
3754 	 * bitmap for each for alloc/free/attach operations.
3755 	 */
3756 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
3757 		txsch = &nix_hw->txsch[lvl];
3758 		txsch->lvl = lvl;
3759 		switch (lvl) {
3760 		case NIX_TXSCH_LVL_SMQ:
3761 			reg = NIX_AF_MDQ_CONST;
3762 			break;
3763 		case NIX_TXSCH_LVL_TL4:
3764 			reg = NIX_AF_TL4_CONST;
3765 			break;
3766 		case NIX_TXSCH_LVL_TL3:
3767 			reg = NIX_AF_TL3_CONST;
3768 			break;
3769 		case NIX_TXSCH_LVL_TL2:
3770 			reg = NIX_AF_TL2_CONST;
3771 			break;
3772 		case NIX_TXSCH_LVL_TL1:
3773 			reg = NIX_AF_TL1_CONST;
3774 			break;
3775 		}
3776 		cfg = rvu_read64(rvu, blkaddr, reg);
3777 		txsch->schq.max = cfg & 0xFFFF;
3778 		err = rvu_alloc_bitmap(&txsch->schq);
3779 		if (err)
3780 			return err;
3781 
3782 		/* Allocate memory for scheduler queues to
3783 		 * PF/VF pcifunc mapping info.
3784 		 */
3785 		txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
3786 					       sizeof(u32), GFP_KERNEL);
3787 		if (!txsch->pfvf_map)
3788 			return -ENOMEM;
3789 		for (schq = 0; schq < txsch->schq.max; schq++)
3790 			txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
3791 	}
3792 
3793 	/* Setup a default value of 8192 as DWRR MTU */
3794 	if (rvu->hw->cap.nix_common_dwrr_mtu ||
3795 	    rvu->hw->cap.nix_multiple_dwrr_mtu) {
3796 		rvu_write64(rvu, blkaddr,
3797 			    nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM),
3798 			    convert_bytes_to_dwrr_mtu(8192));
3799 		rvu_write64(rvu, blkaddr,
3800 			    nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_LBK),
3801 			    convert_bytes_to_dwrr_mtu(8192));
3802 		rvu_write64(rvu, blkaddr,
3803 			    nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_SDP),
3804 			    convert_bytes_to_dwrr_mtu(8192));
3805 	}
3806 
3807 	return 0;
3808 }
3809 
rvu_nix_reserve_mark_format(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr,u32 cfg)3810 int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
3811 				int blkaddr, u32 cfg)
3812 {
3813 	int fmt_idx;
3814 
3815 	for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) {
3816 		if (nix_hw->mark_format.cfg[fmt_idx] == cfg)
3817 			return fmt_idx;
3818 	}
3819 	if (fmt_idx >= nix_hw->mark_format.total)
3820 		return -ERANGE;
3821 
3822 	rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg);
3823 	nix_hw->mark_format.cfg[fmt_idx] = cfg;
3824 	nix_hw->mark_format.in_use++;
3825 	return fmt_idx;
3826 }
3827 
nix_af_mark_format_setup(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr)3828 static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw,
3829 				    int blkaddr)
3830 {
3831 	u64 cfgs[] = {
3832 		[NIX_MARK_CFG_IP_DSCP_RED]         = 0x10003,
3833 		[NIX_MARK_CFG_IP_DSCP_YELLOW]      = 0x11200,
3834 		[NIX_MARK_CFG_IP_DSCP_YELLOW_RED]  = 0x11203,
3835 		[NIX_MARK_CFG_IP_ECN_RED]          = 0x6000c,
3836 		[NIX_MARK_CFG_IP_ECN_YELLOW]       = 0x60c00,
3837 		[NIX_MARK_CFG_IP_ECN_YELLOW_RED]   = 0x60c0c,
3838 		[NIX_MARK_CFG_VLAN_DEI_RED]        = 0x30008,
3839 		[NIX_MARK_CFG_VLAN_DEI_YELLOW]     = 0x30800,
3840 		[NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808,
3841 	};
3842 	int i, rc;
3843 	u64 total;
3844 
3845 	total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8;
3846 	nix_hw->mark_format.total = (u8)total;
3847 	nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32),
3848 					       GFP_KERNEL);
3849 	if (!nix_hw->mark_format.cfg)
3850 		return -ENOMEM;
3851 	for (i = 0; i < NIX_MARK_CFG_MAX; i++) {
3852 		rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]);
3853 		if (rc < 0)
3854 			dev_err(rvu->dev, "Err %d in setup mark format %d\n",
3855 				i, rc);
3856 	}
3857 
3858 	return 0;
3859 }
3860 
rvu_get_lbk_link_max_frs(struct rvu * rvu,u16 * max_mtu)3861 static void rvu_get_lbk_link_max_frs(struct rvu *rvu,  u16 *max_mtu)
3862 {
3863 	/* CN10K supports LBK FIFO size 72 KB */
3864 	if (rvu->hw->lbk_bufsize == 0x12000)
3865 		*max_mtu = CN10K_LBK_LINK_MAX_FRS;
3866 	else
3867 		*max_mtu = NIC_HW_MAX_FRS;
3868 }
3869 
rvu_get_lmac_link_max_frs(struct rvu * rvu,u16 * max_mtu)3870 static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu)
3871 {
3872 	int fifo_size = rvu_cgx_get_fifolen(rvu);
3873 
3874 	/* RPM supports FIFO len 128 KB and RPM2 supports double the
3875 	 * FIFO len to accommodate 8 LMACS
3876 	 */
3877 	if (fifo_size == 0x20000 || fifo_size == 0x40000)
3878 		*max_mtu = CN10K_LMAC_LINK_MAX_FRS;
3879 	else
3880 		*max_mtu = NIC_HW_MAX_FRS;
3881 }
3882 
rvu_mbox_handler_nix_get_hw_info(struct rvu * rvu,struct msg_req * req,struct nix_hw_info * rsp)3883 int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req,
3884 				     struct nix_hw_info *rsp)
3885 {
3886 	u16 pcifunc = req->hdr.pcifunc;
3887 	u64 dwrr_mtu;
3888 	int blkaddr;
3889 
3890 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3891 	if (blkaddr < 0)
3892 		return NIX_AF_ERR_AF_LF_INVALID;
3893 
3894 	if (is_lbk_vf(rvu, pcifunc))
3895 		rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu);
3896 	else
3897 		rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu);
3898 
3899 	rsp->min_mtu = NIC_HW_MIN_FRS;
3900 
3901 	if (!rvu->hw->cap.nix_common_dwrr_mtu &&
3902 	    !rvu->hw->cap.nix_multiple_dwrr_mtu) {
3903 		/* Return '1' on OTx2 */
3904 		rsp->rpm_dwrr_mtu = 1;
3905 		rsp->sdp_dwrr_mtu = 1;
3906 		rsp->lbk_dwrr_mtu = 1;
3907 		return 0;
3908 	}
3909 
3910 	/* Return DWRR_MTU for TLx_SCHEDULE[RR_WEIGHT] config */
3911 	dwrr_mtu = rvu_read64(rvu, blkaddr,
3912 			      nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM));
3913 	rsp->rpm_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
3914 
3915 	dwrr_mtu = rvu_read64(rvu, blkaddr,
3916 			      nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_SDP));
3917 	rsp->sdp_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
3918 
3919 	dwrr_mtu = rvu_read64(rvu, blkaddr,
3920 			      nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_LBK));
3921 	rsp->lbk_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
3922 
3923 	return 0;
3924 }
3925 
rvu_mbox_handler_nix_stats_rst(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)3926 int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
3927 				   struct msg_rsp *rsp)
3928 {
3929 	u16 pcifunc = req->hdr.pcifunc;
3930 	int i, nixlf, blkaddr, err;
3931 	u64 stats;
3932 
3933 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
3934 	if (err)
3935 		return err;
3936 
3937 	/* Get stats count supported by HW */
3938 	stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
3939 
3940 	/* Reset tx stats */
3941 	for (i = 0; i < ((stats >> 24) & 0xFF); i++)
3942 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0);
3943 
3944 	/* Reset rx stats */
3945 	for (i = 0; i < ((stats >> 32) & 0xFF); i++)
3946 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0);
3947 
3948 	return 0;
3949 }
3950 
3951 /* Returns the ALG index to be set into NPC_RX_ACTION */
get_flowkey_alg_idx(struct nix_hw * nix_hw,u32 flow_cfg)3952 static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg)
3953 {
3954 	int i;
3955 
3956 	/* Scan over exiting algo entries to find a match */
3957 	for (i = 0; i < nix_hw->flowkey.in_use; i++)
3958 		if (nix_hw->flowkey.flowkey[i] == flow_cfg)
3959 			return i;
3960 
3961 	return -ERANGE;
3962 }
3963 
3964 /* Mask to match ipv6(NPC_LT_LC_IP6) and ipv6 ext(NPC_LT_LC_IP6_EXT) */
3965 #define NPC_LT_LC_IP6_MATCH_MSK ((~(NPC_LT_LC_IP6 ^ NPC_LT_LC_IP6_EXT)) & 0xf)
3966 /* Mask to match both ipv4(NPC_LT_LC_IP) and ipv4 ext(NPC_LT_LC_IP_OPT) */
3967 #define NPC_LT_LC_IP_MATCH_MSK  ((~(NPC_LT_LC_IP ^ NPC_LT_LC_IP_OPT)) & 0xf)
3968 
set_flowkey_fields(struct nix_rx_flowkey_alg * alg,u32 flow_cfg)3969 static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
3970 {
3971 	int idx, nr_field, key_off, field_marker, keyoff_marker;
3972 	int max_key_off, max_bit_pos, group_member;
3973 	struct nix_rx_flowkey_alg *field;
3974 	struct nix_rx_flowkey_alg tmp;
3975 	u32 key_type, valid_key;
3976 	u32 l3_l4_src_dst;
3977 	int l4_key_offset = 0;
3978 
3979 	if (!alg)
3980 		return -EINVAL;
3981 
3982 #define FIELDS_PER_ALG  5
3983 #define MAX_KEY_OFF	40
3984 	/* Clear all fields */
3985 	memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG);
3986 
3987 	/* Each of the 32 possible flow key algorithm definitions should
3988 	 * fall into above incremental config (except ALG0). Otherwise a
3989 	 * single NPC MCAM entry is not sufficient for supporting RSS.
3990 	 *
3991 	 * If a different definition or combination needed then NPC MCAM
3992 	 * has to be programmed to filter such pkts and it's action should
3993 	 * point to this definition to calculate flowtag or hash.
3994 	 *
3995 	 * The `for loop` goes over _all_ protocol field and the following
3996 	 * variables depicts the state machine forward progress logic.
3997 	 *
3998 	 * keyoff_marker - Enabled when hash byte length needs to be accounted
3999 	 * in field->key_offset update.
4000 	 * field_marker - Enabled when a new field needs to be selected.
4001 	 * group_member - Enabled when protocol is part of a group.
4002 	 */
4003 
4004 	/* Last 4 bits (31:28) are reserved to specify SRC, DST
4005 	 * selection for L3, L4 i.e IPV[4,6]_SRC, IPV[4,6]_DST,
4006 	 * [TCP,UDP,SCTP]_SRC, [TCP,UDP,SCTP]_DST
4007 	 * 31 => L3_SRC, 30 => L3_DST, 29 => L4_SRC, 28 => L4_DST
4008 	 */
4009 	l3_l4_src_dst = flow_cfg;
4010 	/* Reset these 4 bits, so that these won't be part of key */
4011 	flow_cfg &= NIX_FLOW_KEY_TYPE_L3_L4_MASK;
4012 
4013 	keyoff_marker = 0; max_key_off = 0; group_member = 0;
4014 	nr_field = 0; key_off = 0; field_marker = 1;
4015 	field = &tmp; max_bit_pos = fls(flow_cfg);
4016 	for (idx = 0;
4017 	     idx < max_bit_pos && nr_field < FIELDS_PER_ALG &&
4018 	     key_off < MAX_KEY_OFF; idx++) {
4019 		key_type = BIT(idx);
4020 		valid_key = flow_cfg & key_type;
4021 		/* Found a field marker, reset the field values */
4022 		if (field_marker)
4023 			memset(&tmp, 0, sizeof(tmp));
4024 
4025 		field_marker = true;
4026 		keyoff_marker = true;
4027 		switch (key_type) {
4028 		case NIX_FLOW_KEY_TYPE_PORT:
4029 			field->sel_chan = true;
4030 			/* This should be set to 1, when SEL_CHAN is set */
4031 			field->bytesm1 = 1;
4032 			break;
4033 		case NIX_FLOW_KEY_TYPE_IPV4_PROTO:
4034 			field->lid = NPC_LID_LC;
4035 			field->hdr_offset = 9; /* offset */
4036 			field->bytesm1 = 0; /* 1 byte */
4037 			field->ltype_match = NPC_LT_LC_IP;
4038 			field->ltype_mask = NPC_LT_LC_IP_MATCH_MSK;
4039 			break;
4040 		case NIX_FLOW_KEY_TYPE_IPV4:
4041 		case NIX_FLOW_KEY_TYPE_INNR_IPV4:
4042 			field->lid = NPC_LID_LC;
4043 			field->ltype_match = NPC_LT_LC_IP;
4044 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) {
4045 				field->lid = NPC_LID_LG;
4046 				field->ltype_match = NPC_LT_LG_TU_IP;
4047 			}
4048 			field->hdr_offset = 12; /* SIP offset */
4049 			field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
4050 
4051 			/* Only SIP */
4052 			if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY)
4053 				field->bytesm1 = 3; /* SIP, 4 bytes */
4054 
4055 			if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) {
4056 				/* Both SIP + DIP */
4057 				if (field->bytesm1 == 3) {
4058 					field->bytesm1 = 7; /* SIP + DIP, 8B */
4059 				} else {
4060 					/* Only DIP */
4061 					field->hdr_offset = 16; /* DIP off */
4062 					field->bytesm1 = 3; /* DIP, 4 bytes */
4063 				}
4064 			}
4065 			field->ltype_mask = NPC_LT_LC_IP_MATCH_MSK;
4066 			keyoff_marker = false;
4067 			break;
4068 		case NIX_FLOW_KEY_TYPE_IPV6:
4069 		case NIX_FLOW_KEY_TYPE_INNR_IPV6:
4070 			field->lid = NPC_LID_LC;
4071 			field->ltype_match = NPC_LT_LC_IP6;
4072 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) {
4073 				field->lid = NPC_LID_LG;
4074 				field->ltype_match = NPC_LT_LG_TU_IP6;
4075 			}
4076 			field->hdr_offset = 8; /* SIP offset */
4077 			field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
4078 
4079 			/* Only SIP */
4080 			if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY)
4081 				field->bytesm1 = 15; /* SIP, 16 bytes */
4082 
4083 			if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) {
4084 				/* Both SIP + DIP */
4085 				if (field->bytesm1 == 15) {
4086 					/* SIP + DIP, 32 bytes */
4087 					field->bytesm1 = 31;
4088 				} else {
4089 					/* Only DIP */
4090 					field->hdr_offset = 24; /* DIP off */
4091 					field->bytesm1 = 15; /* DIP,16 bytes */
4092 				}
4093 			}
4094 			field->ltype_mask = NPC_LT_LC_IP6_MATCH_MSK;
4095 			break;
4096 		case NIX_FLOW_KEY_TYPE_TCP:
4097 		case NIX_FLOW_KEY_TYPE_UDP:
4098 		case NIX_FLOW_KEY_TYPE_SCTP:
4099 		case NIX_FLOW_KEY_TYPE_INNR_TCP:
4100 		case NIX_FLOW_KEY_TYPE_INNR_UDP:
4101 		case NIX_FLOW_KEY_TYPE_INNR_SCTP:
4102 			field->lid = NPC_LID_LD;
4103 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP ||
4104 			    key_type == NIX_FLOW_KEY_TYPE_INNR_UDP ||
4105 			    key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP)
4106 				field->lid = NPC_LID_LH;
4107 			field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
4108 
4109 			if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_SRC_ONLY)
4110 				field->bytesm1 = 1; /* SRC, 2 bytes */
4111 
4112 			if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_DST_ONLY) {
4113 				/* Both SRC + DST */
4114 				if (field->bytesm1 == 1) {
4115 					/* SRC + DST, 4 bytes */
4116 					field->bytesm1 = 3;
4117 				} else {
4118 					/* Only DIP */
4119 					field->hdr_offset = 2; /* DST off */
4120 					field->bytesm1 = 1; /* DST, 2 bytes */
4121 				}
4122 			}
4123 
4124 			/* Enum values for NPC_LID_LD and NPC_LID_LG are same,
4125 			 * so no need to change the ltype_match, just change
4126 			 * the lid for inner protocols
4127 			 */
4128 			BUILD_BUG_ON((int)NPC_LT_LD_TCP !=
4129 				     (int)NPC_LT_LH_TU_TCP);
4130 			BUILD_BUG_ON((int)NPC_LT_LD_UDP !=
4131 				     (int)NPC_LT_LH_TU_UDP);
4132 			BUILD_BUG_ON((int)NPC_LT_LD_SCTP !=
4133 				     (int)NPC_LT_LH_TU_SCTP);
4134 
4135 			if ((key_type == NIX_FLOW_KEY_TYPE_TCP ||
4136 			     key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) &&
4137 			    valid_key) {
4138 				field->ltype_match |= NPC_LT_LD_TCP;
4139 				group_member = true;
4140 			} else if ((key_type == NIX_FLOW_KEY_TYPE_UDP ||
4141 				    key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) &&
4142 				   valid_key) {
4143 				field->ltype_match |= NPC_LT_LD_UDP;
4144 				group_member = true;
4145 			} else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP ||
4146 				    key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) &&
4147 				   valid_key) {
4148 				field->ltype_match |= NPC_LT_LD_SCTP;
4149 				group_member = true;
4150 			}
4151 			field->ltype_mask = ~field->ltype_match;
4152 			if (key_type == NIX_FLOW_KEY_TYPE_SCTP ||
4153 			    key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) {
4154 				/* Handle the case where any of the group item
4155 				 * is enabled in the group but not the final one
4156 				 */
4157 				if (group_member) {
4158 					valid_key = true;
4159 					group_member = false;
4160 				}
4161 			} else {
4162 				field_marker = false;
4163 				keyoff_marker = false;
4164 			}
4165 
4166 			/* TCP/UDP/SCTP and ESP/AH falls at same offset so
4167 			 * remember the TCP key offset of 40 byte hash key.
4168 			 */
4169 			if (key_type == NIX_FLOW_KEY_TYPE_TCP)
4170 				l4_key_offset = key_off;
4171 			break;
4172 		case NIX_FLOW_KEY_TYPE_NVGRE:
4173 			field->lid = NPC_LID_LD;
4174 			field->hdr_offset = 4; /* VSID offset */
4175 			field->bytesm1 = 2;
4176 			field->ltype_match = NPC_LT_LD_NVGRE;
4177 			field->ltype_mask = 0xF;
4178 			break;
4179 		case NIX_FLOW_KEY_TYPE_VXLAN:
4180 		case NIX_FLOW_KEY_TYPE_GENEVE:
4181 			field->lid = NPC_LID_LE;
4182 			field->bytesm1 = 2;
4183 			field->hdr_offset = 4;
4184 			field->ltype_mask = 0xF;
4185 			field_marker = false;
4186 			keyoff_marker = false;
4187 
4188 			if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) {
4189 				field->ltype_match |= NPC_LT_LE_VXLAN;
4190 				group_member = true;
4191 			}
4192 
4193 			if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) {
4194 				field->ltype_match |= NPC_LT_LE_GENEVE;
4195 				group_member = true;
4196 			}
4197 
4198 			if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) {
4199 				if (group_member) {
4200 					field->ltype_mask = ~field->ltype_match;
4201 					field_marker = true;
4202 					keyoff_marker = true;
4203 					valid_key = true;
4204 					group_member = false;
4205 				}
4206 			}
4207 			break;
4208 		case NIX_FLOW_KEY_TYPE_ETH_DMAC:
4209 		case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC:
4210 			field->lid = NPC_LID_LA;
4211 			field->ltype_match = NPC_LT_LA_ETHER;
4212 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) {
4213 				field->lid = NPC_LID_LF;
4214 				field->ltype_match = NPC_LT_LF_TU_ETHER;
4215 			}
4216 			field->hdr_offset = 0;
4217 			field->bytesm1 = 5; /* DMAC 6 Byte */
4218 			field->ltype_mask = 0xF;
4219 			break;
4220 		case NIX_FLOW_KEY_TYPE_IPV6_EXT:
4221 			field->lid = NPC_LID_LC;
4222 			field->hdr_offset = 40; /* IPV6 hdr */
4223 			field->bytesm1 = 0; /* 1 Byte ext hdr*/
4224 			field->ltype_match = NPC_LT_LC_IP6_EXT;
4225 			field->ltype_mask = 0xF;
4226 			break;
4227 		case NIX_FLOW_KEY_TYPE_GTPU:
4228 			field->lid = NPC_LID_LE;
4229 			field->hdr_offset = 4;
4230 			field->bytesm1 = 3; /* 4 bytes TID*/
4231 			field->ltype_match = NPC_LT_LE_GTPU;
4232 			field->ltype_mask = 0xF;
4233 			break;
4234 		case NIX_FLOW_KEY_TYPE_CUSTOM0:
4235 			field->lid = NPC_LID_LC;
4236 			field->hdr_offset = 6;
4237 			field->bytesm1 = 1; /* 2 Bytes*/
4238 			field->ltype_match = NPC_LT_LC_CUSTOM0;
4239 			field->ltype_mask = 0xF;
4240 			break;
4241 		case NIX_FLOW_KEY_TYPE_VLAN:
4242 			field->lid = NPC_LID_LB;
4243 			field->hdr_offset = 2; /* Skip TPID (2-bytes) */
4244 			field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */
4245 			field->ltype_match = NPC_LT_LB_CTAG;
4246 			field->ltype_mask = 0xF;
4247 			field->fn_mask = 1; /* Mask out the first nibble */
4248 			break;
4249 		case NIX_FLOW_KEY_TYPE_AH:
4250 		case NIX_FLOW_KEY_TYPE_ESP:
4251 			field->hdr_offset = 0;
4252 			field->bytesm1 = 7; /* SPI + sequence number */
4253 			field->ltype_mask = 0xF;
4254 			field->lid = NPC_LID_LE;
4255 			field->ltype_match = NPC_LT_LE_ESP;
4256 			if (key_type == NIX_FLOW_KEY_TYPE_AH) {
4257 				field->lid = NPC_LID_LD;
4258 				field->ltype_match = NPC_LT_LD_AH;
4259 				field->hdr_offset = 4;
4260 				keyoff_marker = false;
4261 			}
4262 			break;
4263 		}
4264 		field->ena = 1;
4265 
4266 		/* Found a valid flow key type */
4267 		if (valid_key) {
4268 			/* Use the key offset of TCP/UDP/SCTP fields
4269 			 * for ESP/AH fields.
4270 			 */
4271 			if (key_type == NIX_FLOW_KEY_TYPE_ESP ||
4272 			    key_type == NIX_FLOW_KEY_TYPE_AH)
4273 				key_off = l4_key_offset;
4274 			field->key_offset = key_off;
4275 			memcpy(&alg[nr_field], field, sizeof(*field));
4276 			max_key_off = max(max_key_off, field->bytesm1 + 1);
4277 
4278 			/* Found a field marker, get the next field */
4279 			if (field_marker)
4280 				nr_field++;
4281 		}
4282 
4283 		/* Found a keyoff marker, update the new key_off */
4284 		if (keyoff_marker) {
4285 			key_off += max_key_off;
4286 			max_key_off = 0;
4287 		}
4288 	}
4289 	/* Processed all the flow key types */
4290 	if (idx == max_bit_pos && key_off <= MAX_KEY_OFF)
4291 		return 0;
4292 	else
4293 		return NIX_AF_ERR_RSS_NOSPC_FIELD;
4294 }
4295 
reserve_flowkey_alg_idx(struct rvu * rvu,int blkaddr,u32 flow_cfg)4296 static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg)
4297 {
4298 	u64 field[FIELDS_PER_ALG];
4299 	struct nix_hw *hw;
4300 	int fid, rc;
4301 
4302 	hw = get_nix_hw(rvu->hw, blkaddr);
4303 	if (!hw)
4304 		return NIX_AF_ERR_INVALID_NIXBLK;
4305 
4306 	/* No room to add new flow hash algoritham */
4307 	if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX)
4308 		return NIX_AF_ERR_RSS_NOSPC_ALGO;
4309 
4310 	/* Generate algo fields for the given flow_cfg */
4311 	rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg);
4312 	if (rc)
4313 		return rc;
4314 
4315 	/* Update ALGX_FIELDX register with generated fields */
4316 	for (fid = 0; fid < FIELDS_PER_ALG; fid++)
4317 		rvu_write64(rvu, blkaddr,
4318 			    NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use,
4319 							   fid), field[fid]);
4320 
4321 	/* Store the flow_cfg for futher lookup */
4322 	rc = hw->flowkey.in_use;
4323 	hw->flowkey.flowkey[rc] = flow_cfg;
4324 	hw->flowkey.in_use++;
4325 
4326 	return rc;
4327 }
4328 
rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu * rvu,struct nix_rss_flowkey_cfg * req,struct nix_rss_flowkey_cfg_rsp * rsp)4329 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
4330 					 struct nix_rss_flowkey_cfg *req,
4331 					 struct nix_rss_flowkey_cfg_rsp *rsp)
4332 {
4333 	u16 pcifunc = req->hdr.pcifunc;
4334 	int alg_idx, nixlf, blkaddr;
4335 	struct nix_hw *nix_hw;
4336 	int err;
4337 
4338 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
4339 	if (err)
4340 		return err;
4341 
4342 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
4343 	if (!nix_hw)
4344 		return NIX_AF_ERR_INVALID_NIXBLK;
4345 
4346 	alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg);
4347 	/* Failed to get algo index from the exiting list, reserve new  */
4348 	if (alg_idx < 0) {
4349 		alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr,
4350 						  req->flowkey_cfg);
4351 		if (alg_idx < 0)
4352 			return alg_idx;
4353 	}
4354 	rsp->alg_idx = alg_idx;
4355 	rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
4356 				       alg_idx, req->mcam_index);
4357 	return 0;
4358 }
4359 
nix_rx_flowkey_alg_cfg(struct rvu * rvu,int blkaddr)4360 static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
4361 {
4362 	u32 flowkey_cfg, minkey_cfg;
4363 	int alg, fid, rc;
4364 
4365 	/* Disable all flow key algx fieldx */
4366 	for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) {
4367 		for (fid = 0; fid < FIELDS_PER_ALG; fid++)
4368 			rvu_write64(rvu, blkaddr,
4369 				    NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
4370 				    0);
4371 	}
4372 
4373 	/* IPv4/IPv6 SIP/DIPs */
4374 	flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
4375 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
4376 	if (rc < 0)
4377 		return rc;
4378 
4379 	/* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
4380 	minkey_cfg = flowkey_cfg;
4381 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP;
4382 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
4383 	if (rc < 0)
4384 		return rc;
4385 
4386 	/* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
4387 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP;
4388 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
4389 	if (rc < 0)
4390 		return rc;
4391 
4392 	/* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
4393 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP;
4394 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
4395 	if (rc < 0)
4396 		return rc;
4397 
4398 	/* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
4399 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
4400 			NIX_FLOW_KEY_TYPE_UDP;
4401 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
4402 	if (rc < 0)
4403 		return rc;
4404 
4405 	/* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
4406 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
4407 			NIX_FLOW_KEY_TYPE_SCTP;
4408 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
4409 	if (rc < 0)
4410 		return rc;
4411 
4412 	/* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
4413 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP |
4414 			NIX_FLOW_KEY_TYPE_SCTP;
4415 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
4416 	if (rc < 0)
4417 		return rc;
4418 
4419 	/* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
4420 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
4421 		      NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP;
4422 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
4423 	if (rc < 0)
4424 		return rc;
4425 
4426 	return 0;
4427 }
4428 
rvu_mbox_handler_nix_set_mac_addr(struct rvu * rvu,struct nix_set_mac_addr * req,struct msg_rsp * rsp)4429 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
4430 				      struct nix_set_mac_addr *req,
4431 				      struct msg_rsp *rsp)
4432 {
4433 	bool from_vf = req->hdr.pcifunc & RVU_PFVF_FUNC_MASK;
4434 	u16 pcifunc = req->hdr.pcifunc;
4435 	int blkaddr, nixlf, err;
4436 	struct rvu_pfvf *pfvf;
4437 
4438 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
4439 	if (err)
4440 		return err;
4441 
4442 	pfvf = rvu_get_pfvf(rvu, pcifunc);
4443 
4444 	/* untrusted VF can't overwrite admin(PF) changes */
4445 	if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
4446 	    (from_vf && test_bit(PF_SET_VF_MAC, &pfvf->flags))) {
4447 		dev_warn(rvu->dev,
4448 			 "MAC address set by admin(PF) cannot be overwritten by untrusted VF");
4449 		return -EPERM;
4450 	}
4451 
4452 	ether_addr_copy(pfvf->mac_addr, req->mac_addr);
4453 
4454 	rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
4455 				    pfvf->rx_chan_base, req->mac_addr);
4456 
4457 	if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf)
4458 		ether_addr_copy(pfvf->default_mac, req->mac_addr);
4459 
4460 	return 0;
4461 }
4462 
rvu_mbox_handler_nix_get_mac_addr(struct rvu * rvu,struct msg_req * req,struct nix_get_mac_addr_rsp * rsp)4463 int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu,
4464 				      struct msg_req *req,
4465 				      struct nix_get_mac_addr_rsp *rsp)
4466 {
4467 	u16 pcifunc = req->hdr.pcifunc;
4468 	struct rvu_pfvf *pfvf;
4469 
4470 	if (!is_nixlf_attached(rvu, pcifunc))
4471 		return NIX_AF_ERR_AF_LF_INVALID;
4472 
4473 	pfvf = rvu_get_pfvf(rvu, pcifunc);
4474 
4475 	ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
4476 
4477 	return 0;
4478 }
4479 
rvu_mbox_handler_nix_set_rx_mode(struct rvu * rvu,struct nix_rx_mode * req,struct msg_rsp * rsp)4480 int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
4481 				     struct msg_rsp *rsp)
4482 {
4483 	bool allmulti, promisc, nix_rx_multicast;
4484 	u16 pcifunc = req->hdr.pcifunc;
4485 	struct rvu_pfvf *pfvf;
4486 	int nixlf, err;
4487 
4488 	pfvf = rvu_get_pfvf(rvu, pcifunc);
4489 	promisc = req->mode & NIX_RX_MODE_PROMISC ? true : false;
4490 	allmulti = req->mode & NIX_RX_MODE_ALLMULTI ? true : false;
4491 	pfvf->use_mce_list = req->mode & NIX_RX_MODE_USE_MCE ? true : false;
4492 
4493 	nix_rx_multicast = rvu->hw->cap.nix_rx_multicast & pfvf->use_mce_list;
4494 
4495 	if (is_vf(pcifunc) && !nix_rx_multicast &&
4496 	    (promisc || allmulti)) {
4497 		dev_warn_ratelimited(rvu->dev,
4498 				     "VF promisc/multicast not supported\n");
4499 		return 0;
4500 	}
4501 
4502 	/* untrusted VF can't configure promisc/allmulti */
4503 	if (is_vf(pcifunc) && !test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
4504 	    (promisc || allmulti))
4505 		return 0;
4506 
4507 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
4508 	if (err)
4509 		return err;
4510 
4511 	if (nix_rx_multicast) {
4512 		/* add/del this PF_FUNC to/from mcast pkt replication list */
4513 		err = nix_update_mce_rule(rvu, pcifunc, NIXLF_ALLMULTI_ENTRY,
4514 					  allmulti);
4515 		if (err) {
4516 			dev_err(rvu->dev,
4517 				"Failed to update pcifunc 0x%x to multicast list\n",
4518 				pcifunc);
4519 			return err;
4520 		}
4521 
4522 		/* add/del this PF_FUNC to/from promisc pkt replication list */
4523 		err = nix_update_mce_rule(rvu, pcifunc, NIXLF_PROMISC_ENTRY,
4524 					  promisc);
4525 		if (err) {
4526 			dev_err(rvu->dev,
4527 				"Failed to update pcifunc 0x%x to promisc list\n",
4528 				pcifunc);
4529 			return err;
4530 		}
4531 	}
4532 
4533 	/* install/uninstall allmulti entry */
4534 	if (allmulti) {
4535 		rvu_npc_install_allmulti_entry(rvu, pcifunc, nixlf,
4536 					       pfvf->rx_chan_base);
4537 	} else {
4538 		if (!nix_rx_multicast)
4539 			rvu_npc_enable_allmulti_entry(rvu, pcifunc, nixlf, false);
4540 	}
4541 
4542 	/* install/uninstall promisc entry */
4543 	if (promisc)
4544 		rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
4545 					      pfvf->rx_chan_base,
4546 					      pfvf->rx_chan_cnt);
4547 	else
4548 		if (!nix_rx_multicast)
4549 			rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false);
4550 
4551 	return 0;
4552 }
4553 
nix_find_link_frs(struct rvu * rvu,struct nix_frs_cfg * req,u16 pcifunc)4554 static void nix_find_link_frs(struct rvu *rvu,
4555 			      struct nix_frs_cfg *req, u16 pcifunc)
4556 {
4557 	int pf = rvu_get_pf(pcifunc);
4558 	struct rvu_pfvf *pfvf;
4559 	int maxlen, minlen;
4560 	int numvfs, hwvf;
4561 	int vf;
4562 
4563 	/* Update with requester's min/max lengths */
4564 	pfvf = rvu_get_pfvf(rvu, pcifunc);
4565 	pfvf->maxlen = req->maxlen;
4566 	if (req->update_minlen)
4567 		pfvf->minlen = req->minlen;
4568 
4569 	maxlen = req->maxlen;
4570 	minlen = req->update_minlen ? req->minlen : 0;
4571 
4572 	/* Get this PF's numVFs and starting hwvf */
4573 	rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
4574 
4575 	/* For each VF, compare requested max/minlen */
4576 	for (vf = 0; vf < numvfs; vf++) {
4577 		pfvf =  &rvu->hwvf[hwvf + vf];
4578 		if (pfvf->maxlen > maxlen)
4579 			maxlen = pfvf->maxlen;
4580 		if (req->update_minlen &&
4581 		    pfvf->minlen && pfvf->minlen < minlen)
4582 			minlen = pfvf->minlen;
4583 	}
4584 
4585 	/* Compare requested max/minlen with PF's max/minlen */
4586 	pfvf = &rvu->pf[pf];
4587 	if (pfvf->maxlen > maxlen)
4588 		maxlen = pfvf->maxlen;
4589 	if (req->update_minlen &&
4590 	    pfvf->minlen && pfvf->minlen < minlen)
4591 		minlen = pfvf->minlen;
4592 
4593 	/* Update the request with max/min PF's and it's VF's max/min */
4594 	req->maxlen = maxlen;
4595 	if (req->update_minlen)
4596 		req->minlen = minlen;
4597 }
4598 
rvu_mbox_handler_nix_set_hw_frs(struct rvu * rvu,struct nix_frs_cfg * req,struct msg_rsp * rsp)4599 int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
4600 				    struct msg_rsp *rsp)
4601 {
4602 	struct rvu_hwinfo *hw = rvu->hw;
4603 	u16 pcifunc = req->hdr.pcifunc;
4604 	int pf = rvu_get_pf(pcifunc);
4605 	int blkaddr, link = -1;
4606 	struct nix_hw *nix_hw;
4607 	struct rvu_pfvf *pfvf;
4608 	u8 cgx = 0, lmac = 0;
4609 	u16 max_mtu;
4610 	u64 cfg;
4611 
4612 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
4613 	if (blkaddr < 0)
4614 		return NIX_AF_ERR_AF_LF_INVALID;
4615 
4616 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
4617 	if (!nix_hw)
4618 		return NIX_AF_ERR_INVALID_NIXBLK;
4619 
4620 	if (is_lbk_vf(rvu, pcifunc) || is_rep_dev(rvu, pcifunc))
4621 		rvu_get_lbk_link_max_frs(rvu, &max_mtu);
4622 	else
4623 		rvu_get_lmac_link_max_frs(rvu, &max_mtu);
4624 
4625 	if (!req->sdp_link && req->maxlen > max_mtu)
4626 		return NIX_AF_ERR_FRS_INVALID;
4627 
4628 	if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
4629 		return NIX_AF_ERR_FRS_INVALID;
4630 
4631 	/* Check if config is for SDP link */
4632 	if (req->sdp_link) {
4633 		if (!hw->sdp_links)
4634 			return NIX_AF_ERR_RX_LINK_INVALID;
4635 		link = hw->cgx_links + hw->lbk_links;
4636 		goto linkcfg;
4637 	}
4638 
4639 	/* Check if the request is from CGX mapped RVU PF */
4640 	if (is_pf_cgxmapped(rvu, pf)) {
4641 		/* Get CGX and LMAC to which this PF is mapped and find link */
4642 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
4643 		link = (cgx * hw->lmac_per_cgx) + lmac;
4644 	} else if (pf == 0) {
4645 		/* For VFs of PF0 ingress is LBK port, so config LBK link */
4646 		pfvf = rvu_get_pfvf(rvu, pcifunc);
4647 		link = hw->cgx_links + pfvf->lbkid;
4648 	} else if (is_rep_dev(rvu, pcifunc)) {
4649 		link = hw->cgx_links + 0;
4650 	}
4651 
4652 	if (link < 0)
4653 		return NIX_AF_ERR_RX_LINK_INVALID;
4654 
4655 linkcfg:
4656 	nix_find_link_frs(rvu, req, pcifunc);
4657 
4658 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
4659 	cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
4660 	if (req->update_minlen)
4661 		cfg = (cfg & ~0xFFFFULL) | req->minlen;
4662 	rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg);
4663 
4664 	return 0;
4665 }
4666 
rvu_mbox_handler_nix_set_rx_cfg(struct rvu * rvu,struct nix_rx_cfg * req,struct msg_rsp * rsp)4667 int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
4668 				    struct msg_rsp *rsp)
4669 {
4670 	int nixlf, blkaddr, err;
4671 	u64 cfg;
4672 
4673 	err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr);
4674 	if (err)
4675 		return err;
4676 
4677 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf));
4678 	/* Set the interface configuration */
4679 	if (req->len_verify & BIT(0))
4680 		cfg |= BIT_ULL(41);
4681 	else
4682 		cfg &= ~BIT_ULL(41);
4683 
4684 	if (req->len_verify & BIT(1))
4685 		cfg |= BIT_ULL(40);
4686 	else
4687 		cfg &= ~BIT_ULL(40);
4688 
4689 	if (req->len_verify & NIX_RX_DROP_RE)
4690 		cfg |= BIT_ULL(32);
4691 	else
4692 		cfg &= ~BIT_ULL(32);
4693 
4694 	if (req->csum_verify & BIT(0))
4695 		cfg |= BIT_ULL(37);
4696 	else
4697 		cfg &= ~BIT_ULL(37);
4698 
4699 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg);
4700 
4701 	return 0;
4702 }
4703 
rvu_get_lbk_link_credits(struct rvu * rvu,u16 lbk_max_frs)4704 static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs)
4705 {
4706 	return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */
4707 }
4708 
nix_link_config(struct rvu * rvu,int blkaddr,struct nix_hw * nix_hw)4709 static void nix_link_config(struct rvu *rvu, int blkaddr,
4710 			    struct nix_hw *nix_hw)
4711 {
4712 	struct rvu_hwinfo *hw = rvu->hw;
4713 	int cgx, lmac_cnt, slink, link;
4714 	u16 lbk_max_frs, lmac_max_frs;
4715 	unsigned long lmac_bmap;
4716 	u64 tx_credits, cfg;
4717 	u64 lmac_fifo_len;
4718 	int iter;
4719 
4720 	rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs);
4721 	rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs);
4722 
4723 	/* Set SDP link credit */
4724 	rvu_write64(rvu, blkaddr, NIX_AF_SDP_LINK_CREDIT, SDP_LINK_CREDIT);
4725 
4726 	/* Set default min/max packet lengths allowed on NIX Rx links.
4727 	 *
4728 	 * With HW reset minlen value of 60byte, HW will treat ARP pkts
4729 	 * as undersize and report them to SW as error pkts, hence
4730 	 * setting it to 40 bytes.
4731 	 */
4732 	for (link = 0; link < hw->cgx_links; link++) {
4733 		rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
4734 				((u64)lmac_max_frs << 16) | NIC_HW_MIN_FRS);
4735 	}
4736 
4737 	for (link = hw->cgx_links; link < hw->lbk_links; link++) {
4738 		rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
4739 			    ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS);
4740 	}
4741 	if (hw->sdp_links) {
4742 		link = hw->cgx_links + hw->lbk_links;
4743 		rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
4744 			    SDP_HW_MAX_FRS << 16 | SDP_HW_MIN_FRS);
4745 	}
4746 
4747 	/* Get MCS external bypass status for CN10K-B */
4748 	if (mcs_get_blkcnt() == 1) {
4749 		/* Adjust for 2 credits when external bypass is disabled */
4750 		nix_hw->cc_mcs_cnt = is_mcs_bypass(0) ? 0 : 2;
4751 	}
4752 
4753 	/* Set credits for Tx links assuming max packet length allowed.
4754 	 * This will be reconfigured based on MTU set for PF/VF.
4755 	 */
4756 	for (cgx = 0; cgx < hw->cgx; cgx++) {
4757 		lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
4758 		/* Skip when cgx is not available or lmac cnt is zero */
4759 		if (lmac_cnt <= 0)
4760 			continue;
4761 		slink = cgx * hw->lmac_per_cgx;
4762 
4763 		/* Get LMAC id's from bitmap */
4764 		lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
4765 		for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
4766 			lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, iter);
4767 			if (!lmac_fifo_len) {
4768 				dev_err(rvu->dev,
4769 					"%s: Failed to get CGX/RPM%d:LMAC%d FIFO size\n",
4770 					__func__, cgx, iter);
4771 				continue;
4772 			}
4773 			tx_credits = (lmac_fifo_len - lmac_max_frs) / 16;
4774 			/* Enable credits and set credit pkt count to max allowed */
4775 			cfg =  (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
4776 			cfg |= FIELD_PREP(NIX_AF_LINKX_MCS_CNT_MASK, nix_hw->cc_mcs_cnt);
4777 
4778 			link = iter + slink;
4779 			nix_hw->tx_credits[link] = tx_credits;
4780 			rvu_write64(rvu, blkaddr,
4781 				    NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
4782 		}
4783 	}
4784 
4785 	/* Set Tx credits for LBK link */
4786 	slink = hw->cgx_links;
4787 	for (link = slink; link < (slink + hw->lbk_links); link++) {
4788 		tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs);
4789 		nix_hw->tx_credits[link] = tx_credits;
4790 		/* Enable credits and set credit pkt count to max allowed */
4791 		tx_credits =  (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
4792 		rvu_write64(rvu, blkaddr,
4793 			    NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits);
4794 	}
4795 }
4796 
nix_calibrate_x2p(struct rvu * rvu,int blkaddr)4797 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
4798 {
4799 	int idx, err;
4800 	u64 status;
4801 
4802 	/* Start X2P bus calibration */
4803 	rvu_write64(rvu, blkaddr, NIX_AF_CFG,
4804 		    rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
4805 	/* Wait for calibration to complete */
4806 	err = rvu_poll_reg(rvu, blkaddr,
4807 			   NIX_AF_STATUS, BIT_ULL(10), false);
4808 	if (err) {
4809 		dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
4810 		return err;
4811 	}
4812 
4813 	status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
4814 	/* Check if CGX devices are ready */
4815 	for (idx = 0; idx < rvu->cgx_cnt_max; idx++) {
4816 		/* Skip when cgx port is not available */
4817 		if (!rvu_cgx_pdata(idx, rvu) ||
4818 		    (status & (BIT_ULL(16 + idx))))
4819 			continue;
4820 		dev_err(rvu->dev,
4821 			"CGX%d didn't respond to NIX X2P calibration\n", idx);
4822 		err = -EBUSY;
4823 	}
4824 
4825 	/* Check if LBK is ready */
4826 	if (!(status & BIT_ULL(19))) {
4827 		dev_err(rvu->dev,
4828 			"LBK didn't respond to NIX X2P calibration\n");
4829 		err = -EBUSY;
4830 	}
4831 
4832 	/* Clear 'calibrate_x2p' bit */
4833 	rvu_write64(rvu, blkaddr, NIX_AF_CFG,
4834 		    rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
4835 	if (err || (status & 0x3FFULL))
4836 		dev_err(rvu->dev,
4837 			"NIX X2P calibration failed, status 0x%llx\n", status);
4838 	if (err)
4839 		return err;
4840 	return 0;
4841 }
4842 
nix_aq_init(struct rvu * rvu,struct rvu_block * block)4843 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
4844 {
4845 	u64 cfg;
4846 	int err;
4847 
4848 	/* Set admin queue endianness */
4849 	cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
4850 #ifdef __BIG_ENDIAN
4851 	cfg |= BIT_ULL(8);
4852 	rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
4853 #else
4854 	cfg &= ~BIT_ULL(8);
4855 	rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
4856 #endif
4857 
4858 	/* Do not bypass NDC cache */
4859 	cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
4860 	cfg &= ~0x3FFEULL;
4861 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
4862 	/* Disable caching of SQB aka SQEs */
4863 	cfg |= 0x04ULL;
4864 #endif
4865 	rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
4866 
4867 	/* Result structure can be followed by RQ/SQ/CQ context at
4868 	 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
4869 	 * operation type. Alloc sufficient result memory for all operations.
4870 	 */
4871 	err = rvu_aq_alloc(rvu, &block->aq,
4872 			   Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
4873 			   ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
4874 	if (err)
4875 		return err;
4876 
4877 	rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
4878 	rvu_write64(rvu, block->addr,
4879 		    NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
4880 	return 0;
4881 }
4882 
rvu_nix_setup_capabilities(struct rvu * rvu,int blkaddr)4883 static void rvu_nix_setup_capabilities(struct rvu *rvu, int blkaddr)
4884 {
4885 	struct rvu_hwinfo *hw = rvu->hw;
4886 	u64 hw_const;
4887 
4888 	hw_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
4889 
4890 	/* On OcteonTx2 DWRR quantum is directly configured into each of
4891 	 * the transmit scheduler queues. And PF/VF drivers were free to
4892 	 * config any value upto 2^24.
4893 	 * On CN10K, HW is modified, the quantum configuration at scheduler
4894 	 * queues is in terms of weight. And SW needs to setup a base DWRR MTU
4895 	 * at NIX_AF_DWRR_RPM_MTU / NIX_AF_DWRR_SDP_MTU. HW will do
4896 	 * 'DWRR MTU * weight' to get the quantum.
4897 	 *
4898 	 * Check if HW uses a common MTU for all DWRR quantum configs.
4899 	 * On OcteonTx2 this register field is '0'.
4900 	 */
4901 	if ((((hw_const >> 56) & 0x10) == 0x10) && !(hw_const & BIT_ULL(61)))
4902 		hw->cap.nix_common_dwrr_mtu = true;
4903 
4904 	if (hw_const & BIT_ULL(61))
4905 		hw->cap.nix_multiple_dwrr_mtu = true;
4906 }
4907 
rvu_nix_block_init(struct rvu * rvu,struct nix_hw * nix_hw)4908 static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
4909 {
4910 	const struct npc_lt_def_cfg *ltdefs;
4911 	struct rvu_hwinfo *hw = rvu->hw;
4912 	int blkaddr = nix_hw->blkaddr;
4913 	struct rvu_block *block;
4914 	int err;
4915 	u64 cfg;
4916 
4917 	block = &hw->block[blkaddr];
4918 
4919 	if (is_rvu_96xx_B0(rvu)) {
4920 		/* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt
4921 		 * internal state when conditional clocks are turned off.
4922 		 * Hence enable them.
4923 		 */
4924 		rvu_write64(rvu, blkaddr, NIX_AF_CFG,
4925 			    rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
4926 	}
4927 
4928 	/* Set chan/link to backpressure TL3 instead of TL2 */
4929 	rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
4930 
4931 	/* Disable SQ manager's sticky mode operation (set TM6 = 0)
4932 	 * This sticky mode is known to cause SQ stalls when multiple
4933 	 * SQs are mapped to same SMQ and transmitting pkts at a time.
4934 	 */
4935 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
4936 	cfg &= ~BIT_ULL(15);
4937 	rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
4938 
4939 	ltdefs = rvu->kpu.lt_def;
4940 	/* Calibrate X2P bus to check if CGX/LBK links are fine */
4941 	err = nix_calibrate_x2p(rvu, blkaddr);
4942 	if (err)
4943 		return err;
4944 
4945 	/* Setup capabilities of the NIX block */
4946 	rvu_nix_setup_capabilities(rvu, blkaddr);
4947 
4948 	/* Initialize admin queue */
4949 	err = nix_aq_init(rvu, block);
4950 	if (err)
4951 		return err;
4952 
4953 	/* Restore CINT timer delay to HW reset values */
4954 	rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
4955 
4956 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SEB_CFG);
4957 
4958 	/* For better performance use NDC TX instead of NDC RX for SQ's SQEs" */
4959 	cfg |= 1ULL;
4960 	if (!is_rvu_otx2(rvu))
4961 		cfg |= NIX_PTP_1STEP_EN;
4962 
4963 	rvu_write64(rvu, blkaddr, NIX_AF_SEB_CFG, cfg);
4964 
4965 	if (!is_rvu_otx2(rvu))
4966 		rvu_nix_block_cn10k_init(rvu, nix_hw);
4967 
4968 	if (is_block_implemented(hw, blkaddr)) {
4969 		err = nix_setup_txschq(rvu, nix_hw, blkaddr);
4970 		if (err)
4971 			return err;
4972 
4973 		err = nix_setup_ipolicers(rvu, nix_hw, blkaddr);
4974 		if (err)
4975 			return err;
4976 
4977 		err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr);
4978 		if (err)
4979 			return err;
4980 
4981 		err = nix_setup_mcast(rvu, nix_hw, blkaddr);
4982 		if (err)
4983 			return err;
4984 
4985 		err = nix_setup_txvlan(rvu, nix_hw);
4986 		if (err)
4987 			return err;
4988 
4989 		err = nix_setup_bpids(rvu, nix_hw, blkaddr);
4990 		if (err)
4991 			return err;
4992 
4993 		/* Configure segmentation offload formats */
4994 		nix_setup_lso(rvu, nix_hw, blkaddr);
4995 
4996 		/* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info.
4997 		 * This helps HW protocol checker to identify headers
4998 		 * and validate length and checksums.
4999 		 */
5000 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
5001 			    (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) |
5002 			    ltdefs->rx_ol2.ltype_mask);
5003 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
5004 			    (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) |
5005 			    ltdefs->rx_oip4.ltype_mask);
5006 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4,
5007 			    (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) |
5008 			    ltdefs->rx_iip4.ltype_mask);
5009 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6,
5010 			    (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) |
5011 			    ltdefs->rx_oip6.ltype_mask);
5012 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6,
5013 			    (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) |
5014 			    ltdefs->rx_iip6.ltype_mask);
5015 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
5016 			    (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) |
5017 			    ltdefs->rx_otcp.ltype_mask);
5018 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP,
5019 			    (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) |
5020 			    ltdefs->rx_itcp.ltype_mask);
5021 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
5022 			    (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) |
5023 			    ltdefs->rx_oudp.ltype_mask);
5024 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP,
5025 			    (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) |
5026 			    ltdefs->rx_iudp.ltype_mask);
5027 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP,
5028 			    (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) |
5029 			    ltdefs->rx_osctp.ltype_mask);
5030 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP,
5031 			    (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) |
5032 			    ltdefs->rx_isctp.ltype_mask);
5033 
5034 		if (!is_rvu_otx2(rvu)) {
5035 			/* Enable APAD calculation for other protocols
5036 			 * matching APAD0 and APAD1 lt def registers.
5037 			 */
5038 			rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD0,
5039 				    (ltdefs->rx_apad0.valid << 11) |
5040 				    (ltdefs->rx_apad0.lid << 8) |
5041 				    (ltdefs->rx_apad0.ltype_match << 4) |
5042 				    ltdefs->rx_apad0.ltype_mask);
5043 			rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD1,
5044 				    (ltdefs->rx_apad1.valid << 11) |
5045 				    (ltdefs->rx_apad1.lid << 8) |
5046 				    (ltdefs->rx_apad1.ltype_match << 4) |
5047 				    ltdefs->rx_apad1.ltype_mask);
5048 
5049 			/* Receive ethertype defination register defines layer
5050 			 * information in NPC_RESULT_S to identify the Ethertype
5051 			 * location in L2 header. Used for Ethertype overwriting
5052 			 * in inline IPsec flow.
5053 			 */
5054 			rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(0),
5055 				    (ltdefs->rx_et[0].offset << 12) |
5056 				    (ltdefs->rx_et[0].valid << 11) |
5057 				    (ltdefs->rx_et[0].lid << 8) |
5058 				    (ltdefs->rx_et[0].ltype_match << 4) |
5059 				    ltdefs->rx_et[0].ltype_mask);
5060 			rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(1),
5061 				    (ltdefs->rx_et[1].offset << 12) |
5062 				    (ltdefs->rx_et[1].valid << 11) |
5063 				    (ltdefs->rx_et[1].lid << 8) |
5064 				    (ltdefs->rx_et[1].ltype_match << 4) |
5065 				    ltdefs->rx_et[1].ltype_mask);
5066 		}
5067 
5068 		err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
5069 		if (err)
5070 			return err;
5071 
5072 		nix_hw->tx_credits = kcalloc(hw->cgx_links + hw->lbk_links,
5073 					     sizeof(u64), GFP_KERNEL);
5074 		if (!nix_hw->tx_credits)
5075 			return -ENOMEM;
5076 
5077 		/* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
5078 		nix_link_config(rvu, blkaddr, nix_hw);
5079 
5080 		/* Enable Channel backpressure */
5081 		rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0));
5082 	}
5083 	return 0;
5084 }
5085 
rvu_nix_init(struct rvu * rvu)5086 int rvu_nix_init(struct rvu *rvu)
5087 {
5088 	struct rvu_hwinfo *hw = rvu->hw;
5089 	struct nix_hw *nix_hw;
5090 	int blkaddr = 0, err;
5091 	int i = 0;
5092 
5093 	hw->nix = devm_kcalloc(rvu->dev, MAX_NIX_BLKS, sizeof(struct nix_hw),
5094 			       GFP_KERNEL);
5095 	if (!hw->nix)
5096 		return -ENOMEM;
5097 
5098 	blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
5099 	while (blkaddr) {
5100 		nix_hw = &hw->nix[i];
5101 		nix_hw->rvu = rvu;
5102 		nix_hw->blkaddr = blkaddr;
5103 		err = rvu_nix_block_init(rvu, nix_hw);
5104 		if (err)
5105 			return err;
5106 		blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
5107 		i++;
5108 	}
5109 
5110 	return 0;
5111 }
5112 
rvu_nix_block_freemem(struct rvu * rvu,int blkaddr,struct rvu_block * block)5113 static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr,
5114 				  struct rvu_block *block)
5115 {
5116 	struct nix_txsch *txsch;
5117 	struct nix_mcast *mcast;
5118 	struct nix_txvlan *vlan;
5119 	struct nix_hw *nix_hw;
5120 	int lvl;
5121 
5122 	rvu_aq_free(rvu, block->aq);
5123 
5124 	if (is_block_implemented(rvu->hw, blkaddr)) {
5125 		nix_hw = get_nix_hw(rvu->hw, blkaddr);
5126 		if (!nix_hw)
5127 			return;
5128 
5129 		for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
5130 			txsch = &nix_hw->txsch[lvl];
5131 			kfree(txsch->schq.bmap);
5132 		}
5133 
5134 		kfree(nix_hw->tx_credits);
5135 
5136 		nix_ipolicer_freemem(rvu, nix_hw);
5137 
5138 		vlan = &nix_hw->txvlan;
5139 		kfree(vlan->rsrc.bmap);
5140 		mutex_destroy(&vlan->rsrc_lock);
5141 
5142 		mcast = &nix_hw->mcast;
5143 		qmem_free(rvu->dev, mcast->mce_ctx);
5144 		qmem_free(rvu->dev, mcast->mcast_buf);
5145 		mutex_destroy(&mcast->mce_lock);
5146 	}
5147 }
5148 
rvu_nix_freemem(struct rvu * rvu)5149 void rvu_nix_freemem(struct rvu *rvu)
5150 {
5151 	struct rvu_hwinfo *hw = rvu->hw;
5152 	struct rvu_block *block;
5153 	int blkaddr = 0;
5154 
5155 	blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
5156 	while (blkaddr) {
5157 		block = &hw->block[blkaddr];
5158 		rvu_nix_block_freemem(rvu, blkaddr, block);
5159 		blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
5160 	}
5161 }
5162 
nix_mcast_update_action(struct rvu * rvu,struct nix_mcast_grp_elem * elem)5163 static void nix_mcast_update_action(struct rvu *rvu,
5164 				    struct nix_mcast_grp_elem *elem)
5165 {
5166 	struct npc_mcam *mcam = &rvu->hw->mcam;
5167 	struct nix_rx_action rx_action = { 0 };
5168 	struct nix_tx_action tx_action = { 0 };
5169 	int npc_blkaddr;
5170 
5171 	npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
5172 	if (elem->dir == NIX_MCAST_INGRESS) {
5173 		*(u64 *)&rx_action = npc_get_mcam_action(rvu, mcam,
5174 							 npc_blkaddr,
5175 							 elem->mcam_index);
5176 		rx_action.index = elem->mce_start_index;
5177 		npc_set_mcam_action(rvu, mcam, npc_blkaddr, elem->mcam_index,
5178 				    *(u64 *)&rx_action);
5179 	} else {
5180 		*(u64 *)&tx_action = npc_get_mcam_action(rvu, mcam,
5181 							 npc_blkaddr,
5182 							 elem->mcam_index);
5183 		tx_action.index = elem->mce_start_index;
5184 		npc_set_mcam_action(rvu, mcam, npc_blkaddr, elem->mcam_index,
5185 				    *(u64 *)&tx_action);
5186 	}
5187 }
5188 
nix_mcast_update_mce_entry(struct rvu * rvu,u16 pcifunc,u8 is_active)5189 static void nix_mcast_update_mce_entry(struct rvu *rvu, u16 pcifunc, u8 is_active)
5190 {
5191 	struct nix_mcast_grp_elem *elem;
5192 	struct nix_mcast_grp *mcast_grp;
5193 	struct nix_hw *nix_hw;
5194 	int blkaddr;
5195 
5196 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
5197 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
5198 	if (!nix_hw)
5199 		return;
5200 
5201 	mcast_grp = &nix_hw->mcast_grp;
5202 
5203 	mutex_lock(&mcast_grp->mcast_grp_lock);
5204 	list_for_each_entry(elem, &mcast_grp->mcast_grp_head, list) {
5205 		struct nix_mce_list *mce_list;
5206 		struct mce *mce;
5207 
5208 		/* Iterate the group elements and disable the element which
5209 		 * received the disable request.
5210 		 */
5211 		mce_list = &elem->mcast_mce_list;
5212 		hlist_for_each_entry(mce, &mce_list->head, node) {
5213 			if (mce->pcifunc == pcifunc) {
5214 				mce->is_active = is_active;
5215 				break;
5216 			}
5217 		}
5218 
5219 		/* Dump the updated list to HW */
5220 		if (elem->dir == NIX_MCAST_INGRESS)
5221 			nix_update_ingress_mce_list_hw(rvu, nix_hw, elem);
5222 		else
5223 			nix_update_egress_mce_list_hw(rvu, nix_hw, elem);
5224 
5225 		/* Update the multicast index in NPC rule */
5226 		nix_mcast_update_action(rvu, elem);
5227 	}
5228 	mutex_unlock(&mcast_grp->mcast_grp_lock);
5229 }
5230 
rvu_mbox_handler_nix_lf_start_rx(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)5231 int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
5232 				     struct msg_rsp *rsp)
5233 {
5234 	u16 pcifunc = req->hdr.pcifunc;
5235 	struct rvu_pfvf *pfvf;
5236 	int nixlf, err, pf;
5237 
5238 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
5239 	if (err)
5240 		return err;
5241 
5242 	/* Enable the interface if it is in any multicast list */
5243 	nix_mcast_update_mce_entry(rvu, pcifunc, 1);
5244 
5245 	rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
5246 
5247 	npc_mcam_enable_flows(rvu, pcifunc);
5248 
5249 	pfvf = rvu_get_pfvf(rvu, pcifunc);
5250 	set_bit(NIXLF_INITIALIZED, &pfvf->flags);
5251 
5252 	rvu_switch_update_rules(rvu, pcifunc, true);
5253 
5254 	pf = rvu_get_pf(pcifunc);
5255 	if (is_pf_cgxmapped(rvu, pf) && rvu->rep_mode)
5256 		rvu_rep_notify_pfvf_state(rvu, pcifunc, true);
5257 
5258 	return rvu_cgx_start_stop_io(rvu, pcifunc, true);
5259 }
5260 
rvu_mbox_handler_nix_lf_stop_rx(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)5261 int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
5262 				    struct msg_rsp *rsp)
5263 {
5264 	u16 pcifunc = req->hdr.pcifunc;
5265 	struct rvu_pfvf *pfvf;
5266 	int nixlf, err, pf;
5267 
5268 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
5269 	if (err)
5270 		return err;
5271 
5272 	rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
5273 	/* Disable the interface if it is in any multicast list */
5274 	nix_mcast_update_mce_entry(rvu, pcifunc, 0);
5275 
5276 
5277 	pfvf = rvu_get_pfvf(rvu, pcifunc);
5278 	clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
5279 
5280 	err = rvu_cgx_start_stop_io(rvu, pcifunc, false);
5281 	if (err)
5282 		return err;
5283 
5284 	rvu_switch_update_rules(rvu, pcifunc, false);
5285 	rvu_cgx_tx_enable(rvu, pcifunc, true);
5286 
5287 	pf = rvu_get_pf(pcifunc);
5288 	if (is_pf_cgxmapped(rvu, pf) && rvu->rep_mode)
5289 		rvu_rep_notify_pfvf_state(rvu, pcifunc, false);
5290 	return 0;
5291 }
5292 
5293 #define RX_SA_BASE  GENMASK_ULL(52, 7)
5294 
rvu_nix_lf_teardown(struct rvu * rvu,u16 pcifunc,int blkaddr,int nixlf)5295 void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
5296 {
5297 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
5298 	struct hwctx_disable_req ctx_req;
5299 	int pf = rvu_get_pf(pcifunc);
5300 	struct mac_ops *mac_ops;
5301 	u8 cgx_id, lmac_id;
5302 	u64 sa_base;
5303 	void *cgxd;
5304 	int err;
5305 
5306 	ctx_req.hdr.pcifunc = pcifunc;
5307 
5308 	/* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
5309 	rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
5310 	rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
5311 	nix_interface_deinit(rvu, pcifunc, nixlf);
5312 	nix_rx_sync(rvu, blkaddr);
5313 	nix_txschq_free(rvu, pcifunc);
5314 
5315 	clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
5316 
5317 	if (is_pf_cgxmapped(rvu, pf) && rvu->rep_mode)
5318 		rvu_rep_notify_pfvf_state(rvu, pcifunc, false);
5319 
5320 	rvu_cgx_start_stop_io(rvu, pcifunc, false);
5321 
5322 	if (pfvf->sq_ctx) {
5323 		ctx_req.ctype = NIX_AQ_CTYPE_SQ;
5324 		err = nix_lf_hwctx_disable(rvu, &ctx_req);
5325 		if (err)
5326 			dev_err(rvu->dev, "SQ ctx disable failed\n");
5327 	}
5328 
5329 	if (pfvf->rq_ctx) {
5330 		ctx_req.ctype = NIX_AQ_CTYPE_RQ;
5331 		err = nix_lf_hwctx_disable(rvu, &ctx_req);
5332 		if (err)
5333 			dev_err(rvu->dev, "RQ ctx disable failed\n");
5334 	}
5335 
5336 	if (pfvf->cq_ctx) {
5337 		ctx_req.ctype = NIX_AQ_CTYPE_CQ;
5338 		err = nix_lf_hwctx_disable(rvu, &ctx_req);
5339 		if (err)
5340 			dev_err(rvu->dev, "CQ ctx disable failed\n");
5341 	}
5342 
5343 	/* reset HW config done for Switch headers */
5344 	rvu_npc_set_parse_mode(rvu, pcifunc, OTX2_PRIV_FLAGS_DEFAULT,
5345 			       (PKIND_TX | PKIND_RX), 0, 0, 0, 0);
5346 
5347 	/* Disabling CGX and NPC config done for PTP */
5348 	if (pfvf->hw_rx_tstamp_en) {
5349 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
5350 		cgxd = rvu_cgx_pdata(cgx_id, rvu);
5351 		mac_ops = get_mac_ops(cgxd);
5352 		mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, false);
5353 		/* Undo NPC config done for PTP */
5354 		if (npc_config_ts_kpuaction(rvu, pf, pcifunc, false))
5355 			dev_err(rvu->dev, "NPC config for PTP failed\n");
5356 		pfvf->hw_rx_tstamp_en = false;
5357 	}
5358 
5359 	/* reset priority flow control config */
5360 	rvu_cgx_prio_flow_ctrl_cfg(rvu, pcifunc, 0, 0, 0);
5361 
5362 	/* reset 802.3x flow control config */
5363 	rvu_cgx_cfg_pause_frm(rvu, pcifunc, 0, 0);
5364 
5365 	nix_ctx_free(rvu, pfvf);
5366 
5367 	nix_free_all_bandprof(rvu, pcifunc);
5368 
5369 	sa_base = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(nixlf));
5370 	if (FIELD_GET(RX_SA_BASE, sa_base)) {
5371 		err = rvu_cpt_ctx_flush(rvu, pcifunc);
5372 		if (err)
5373 			dev_err(rvu->dev,
5374 				"CPT ctx flush failed with error: %d\n", err);
5375 	}
5376 }
5377 
5378 #define NIX_AF_LFX_TX_CFG_PTP_EN	BIT_ULL(32)
5379 
rvu_nix_lf_ptp_tx_cfg(struct rvu * rvu,u16 pcifunc,bool enable)5380 static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
5381 {
5382 	struct rvu_hwinfo *hw = rvu->hw;
5383 	struct rvu_block *block;
5384 	int blkaddr, pf;
5385 	int nixlf;
5386 	u64 cfg;
5387 
5388 	pf = rvu_get_pf(pcifunc);
5389 	if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
5390 		return 0;
5391 
5392 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
5393 	if (blkaddr < 0)
5394 		return NIX_AF_ERR_AF_LF_INVALID;
5395 
5396 	block = &hw->block[blkaddr];
5397 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
5398 	if (nixlf < 0)
5399 		return NIX_AF_ERR_AF_LF_INVALID;
5400 
5401 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf));
5402 
5403 	if (enable)
5404 		cfg |= NIX_AF_LFX_TX_CFG_PTP_EN;
5405 	else
5406 		cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN;
5407 
5408 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
5409 
5410 	return 0;
5411 }
5412 
rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)5413 int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req,
5414 					  struct msg_rsp *rsp)
5415 {
5416 	return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true);
5417 }
5418 
rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)5419 int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req,
5420 					   struct msg_rsp *rsp)
5421 {
5422 	return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false);
5423 }
5424 
rvu_mbox_handler_nix_lso_format_cfg(struct rvu * rvu,struct nix_lso_format_cfg * req,struct nix_lso_format_cfg_rsp * rsp)5425 int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
5426 					struct nix_lso_format_cfg *req,
5427 					struct nix_lso_format_cfg_rsp *rsp)
5428 {
5429 	u16 pcifunc = req->hdr.pcifunc;
5430 	struct nix_hw *nix_hw;
5431 	struct rvu_pfvf *pfvf;
5432 	int blkaddr, idx, f;
5433 	u64 reg;
5434 
5435 	pfvf = rvu_get_pfvf(rvu, pcifunc);
5436 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
5437 	if (!pfvf->nixlf || blkaddr < 0)
5438 		return NIX_AF_ERR_AF_LF_INVALID;
5439 
5440 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
5441 	if (!nix_hw)
5442 		return NIX_AF_ERR_INVALID_NIXBLK;
5443 
5444 	/* Find existing matching LSO format, if any */
5445 	for (idx = 0; idx < nix_hw->lso.in_use; idx++) {
5446 		for (f = 0; f < NIX_LSO_FIELD_MAX; f++) {
5447 			reg = rvu_read64(rvu, blkaddr,
5448 					 NIX_AF_LSO_FORMATX_FIELDX(idx, f));
5449 			if (req->fields[f] != (reg & req->field_mask))
5450 				break;
5451 		}
5452 
5453 		if (f == NIX_LSO_FIELD_MAX)
5454 			break;
5455 	}
5456 
5457 	if (idx < nix_hw->lso.in_use) {
5458 		/* Match found */
5459 		rsp->lso_format_idx = idx;
5460 		return 0;
5461 	}
5462 
5463 	if (nix_hw->lso.in_use == nix_hw->lso.total)
5464 		return NIX_AF_ERR_LSO_CFG_FAIL;
5465 
5466 	rsp->lso_format_idx = nix_hw->lso.in_use++;
5467 
5468 	for (f = 0; f < NIX_LSO_FIELD_MAX; f++)
5469 		rvu_write64(rvu, blkaddr,
5470 			    NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f),
5471 			    req->fields[f]);
5472 
5473 	return 0;
5474 }
5475 
5476 #define IPSEC_GEN_CFG_EGRP    GENMASK_ULL(50, 48)
5477 #define IPSEC_GEN_CFG_OPCODE  GENMASK_ULL(47, 32)
5478 #define IPSEC_GEN_CFG_PARAM1  GENMASK_ULL(31, 16)
5479 #define IPSEC_GEN_CFG_PARAM2  GENMASK_ULL(15, 0)
5480 
5481 #define CPT_INST_QSEL_BLOCK   GENMASK_ULL(28, 24)
5482 #define CPT_INST_QSEL_PF_FUNC GENMASK_ULL(23, 8)
5483 #define CPT_INST_QSEL_SLOT    GENMASK_ULL(7, 0)
5484 
5485 #define CPT_INST_CREDIT_TH    GENMASK_ULL(53, 32)
5486 #define CPT_INST_CREDIT_BPID  GENMASK_ULL(30, 22)
5487 #define CPT_INST_CREDIT_CNT   GENMASK_ULL(21, 0)
5488 
nix_inline_ipsec_cfg(struct rvu * rvu,struct nix_inline_ipsec_cfg * req,int blkaddr)5489 static void nix_inline_ipsec_cfg(struct rvu *rvu, struct nix_inline_ipsec_cfg *req,
5490 				 int blkaddr)
5491 {
5492 	u8 cpt_idx, cpt_blkaddr;
5493 	u64 val;
5494 
5495 	cpt_idx = (blkaddr == BLKADDR_NIX0) ? 0 : 1;
5496 	if (req->enable) {
5497 		val = 0;
5498 		/* Enable context prefetching */
5499 		if (!is_rvu_otx2(rvu))
5500 			val |= BIT_ULL(51);
5501 
5502 		/* Set OPCODE and EGRP */
5503 		val |= FIELD_PREP(IPSEC_GEN_CFG_EGRP, req->gen_cfg.egrp);
5504 		val |= FIELD_PREP(IPSEC_GEN_CFG_OPCODE, req->gen_cfg.opcode);
5505 		val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM1, req->gen_cfg.param1);
5506 		val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM2, req->gen_cfg.param2);
5507 
5508 		rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, val);
5509 
5510 		/* Set CPT queue for inline IPSec */
5511 		val = FIELD_PREP(CPT_INST_QSEL_SLOT, req->inst_qsel.cpt_slot);
5512 		val |= FIELD_PREP(CPT_INST_QSEL_PF_FUNC,
5513 				  req->inst_qsel.cpt_pf_func);
5514 
5515 		if (!is_rvu_otx2(rvu)) {
5516 			cpt_blkaddr = (cpt_idx == 0) ? BLKADDR_CPT0 :
5517 						       BLKADDR_CPT1;
5518 			val |= FIELD_PREP(CPT_INST_QSEL_BLOCK, cpt_blkaddr);
5519 		}
5520 
5521 		rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx),
5522 			    val);
5523 
5524 		/* Set CPT credit */
5525 		val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx));
5526 		if ((val & 0x3FFFFF) != 0x3FFFFF)
5527 			rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
5528 				    0x3FFFFF - val);
5529 
5530 		val = FIELD_PREP(CPT_INST_CREDIT_CNT, req->cpt_credit);
5531 		val |= FIELD_PREP(CPT_INST_CREDIT_BPID, req->bpid);
5532 		val |= FIELD_PREP(CPT_INST_CREDIT_TH, req->credit_th);
5533 		rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), val);
5534 	} else {
5535 		rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, 0x0);
5536 		rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx),
5537 			    0x0);
5538 		val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx));
5539 		if ((val & 0x3FFFFF) != 0x3FFFFF)
5540 			rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
5541 				    0x3FFFFF - val);
5542 	}
5543 }
5544 
rvu_mbox_handler_nix_inline_ipsec_cfg(struct rvu * rvu,struct nix_inline_ipsec_cfg * req,struct msg_rsp * rsp)5545 int rvu_mbox_handler_nix_inline_ipsec_cfg(struct rvu *rvu,
5546 					  struct nix_inline_ipsec_cfg *req,
5547 					  struct msg_rsp *rsp)
5548 {
5549 	if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
5550 		return 0;
5551 
5552 	nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX0);
5553 	if (is_block_implemented(rvu->hw, BLKADDR_CPT1))
5554 		nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX1);
5555 
5556 	return 0;
5557 }
5558 
rvu_mbox_handler_nix_read_inline_ipsec_cfg(struct rvu * rvu,struct msg_req * req,struct nix_inline_ipsec_cfg * rsp)5559 int rvu_mbox_handler_nix_read_inline_ipsec_cfg(struct rvu *rvu,
5560 					       struct msg_req *req,
5561 					       struct nix_inline_ipsec_cfg *rsp)
5562 
5563 {
5564 	u64 val;
5565 
5566 	if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
5567 		return 0;
5568 
5569 	val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_IPSEC_GEN_CFG);
5570 	rsp->gen_cfg.egrp = FIELD_GET(IPSEC_GEN_CFG_EGRP, val);
5571 	rsp->gen_cfg.opcode = FIELD_GET(IPSEC_GEN_CFG_OPCODE, val);
5572 	rsp->gen_cfg.param1 = FIELD_GET(IPSEC_GEN_CFG_PARAM1, val);
5573 	rsp->gen_cfg.param2 = FIELD_GET(IPSEC_GEN_CFG_PARAM2, val);
5574 
5575 	val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_CPTX_CREDIT(0));
5576 	rsp->cpt_credit = FIELD_GET(CPT_INST_CREDIT_CNT, val);
5577 	rsp->credit_th = FIELD_GET(CPT_INST_CREDIT_TH, val);
5578 	rsp->bpid = FIELD_GET(CPT_INST_CREDIT_BPID, val);
5579 
5580 	return 0;
5581 }
5582 
rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu * rvu,struct nix_inline_ipsec_lf_cfg * req,struct msg_rsp * rsp)5583 int rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu *rvu,
5584 					     struct nix_inline_ipsec_lf_cfg *req,
5585 					     struct msg_rsp *rsp)
5586 {
5587 	int lf, blkaddr, err;
5588 	u64 val;
5589 
5590 	if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
5591 		return 0;
5592 
5593 	err = nix_get_nixlf(rvu, req->hdr.pcifunc, &lf, &blkaddr);
5594 	if (err)
5595 		return err;
5596 
5597 	if (req->enable) {
5598 		/* Set TT, TAG_CONST, SA_POW2_SIZE and LENM1_MAX */
5599 		val = (u64)req->ipsec_cfg0.tt << 44 |
5600 		      (u64)req->ipsec_cfg0.tag_const << 20 |
5601 		      (u64)req->ipsec_cfg0.sa_pow2_size << 16 |
5602 		      req->ipsec_cfg0.lenm1_max;
5603 
5604 		if (blkaddr == BLKADDR_NIX1)
5605 			val |= BIT_ULL(46);
5606 
5607 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), val);
5608 
5609 		/* Set SA_IDX_W and SA_IDX_MAX */
5610 		val = (u64)req->ipsec_cfg1.sa_idx_w << 32 |
5611 		      req->ipsec_cfg1.sa_idx_max;
5612 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), val);
5613 
5614 		/* Set SA base address */
5615 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf),
5616 			    req->sa_base_addr);
5617 	} else {
5618 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), 0x0);
5619 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), 0x0);
5620 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf),
5621 			    0x0);
5622 	}
5623 
5624 	return 0;
5625 }
5626 
rvu_nix_reset_mac(struct rvu_pfvf * pfvf,int pcifunc)5627 void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc)
5628 {
5629 	bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
5630 
5631 	/* overwrite vf mac address with default_mac */
5632 	if (from_vf)
5633 		ether_addr_copy(pfvf->mac_addr, pfvf->default_mac);
5634 }
5635 
5636 /* NIX ingress policers or bandwidth profiles APIs */
nix_config_rx_pkt_policer_precolor(struct rvu * rvu,int blkaddr)5637 static void nix_config_rx_pkt_policer_precolor(struct rvu *rvu, int blkaddr)
5638 {
5639 	struct npc_lt_def_cfg defs, *ltdefs;
5640 
5641 	ltdefs = &defs;
5642 	memcpy(ltdefs, rvu->kpu.lt_def, sizeof(struct npc_lt_def_cfg));
5643 
5644 	/* Extract PCP and DEI fields from outer VLAN from byte offset
5645 	 * 2 from the start of LB_PTR (ie TAG).
5646 	 * VLAN0 is Outer VLAN and VLAN1 is Inner VLAN. Inner VLAN
5647 	 * fields are considered when 'Tunnel enable' is set in profile.
5648 	 */
5649 	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN0_PCP_DEI,
5650 		    (2UL << 12) | (ltdefs->ovlan.lid << 8) |
5651 		    (ltdefs->ovlan.ltype_match << 4) |
5652 		    ltdefs->ovlan.ltype_mask);
5653 	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN1_PCP_DEI,
5654 		    (2UL << 12) | (ltdefs->ivlan.lid << 8) |
5655 		    (ltdefs->ivlan.ltype_match << 4) |
5656 		    ltdefs->ivlan.ltype_mask);
5657 
5658 	/* DSCP field in outer and tunneled IPv4 packets */
5659 	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4_DSCP,
5660 		    (1UL << 12) | (ltdefs->rx_oip4.lid << 8) |
5661 		    (ltdefs->rx_oip4.ltype_match << 4) |
5662 		    ltdefs->rx_oip4.ltype_mask);
5663 	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4_DSCP,
5664 		    (1UL << 12) | (ltdefs->rx_iip4.lid << 8) |
5665 		    (ltdefs->rx_iip4.ltype_match << 4) |
5666 		    ltdefs->rx_iip4.ltype_mask);
5667 
5668 	/* DSCP field (traffic class) in outer and tunneled IPv6 packets */
5669 	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6_DSCP,
5670 		    (1UL << 11) | (ltdefs->rx_oip6.lid << 8) |
5671 		    (ltdefs->rx_oip6.ltype_match << 4) |
5672 		    ltdefs->rx_oip6.ltype_mask);
5673 	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6_DSCP,
5674 		    (1UL << 11) | (ltdefs->rx_iip6.lid << 8) |
5675 		    (ltdefs->rx_iip6.ltype_match << 4) |
5676 		    ltdefs->rx_iip6.ltype_mask);
5677 }
5678 
nix_init_policer_context(struct rvu * rvu,struct nix_hw * nix_hw,int layer,int prof_idx)5679 static int nix_init_policer_context(struct rvu *rvu, struct nix_hw *nix_hw,
5680 				    int layer, int prof_idx)
5681 {
5682 	struct nix_cn10k_aq_enq_req aq_req;
5683 	int rc;
5684 
5685 	memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
5686 
5687 	aq_req.qidx = (prof_idx & 0x3FFF) | (layer << 14);
5688 	aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
5689 	aq_req.op = NIX_AQ_INSTOP_INIT;
5690 
5691 	/* Context is all zeros, submit to AQ */
5692 	rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
5693 				     (struct nix_aq_enq_req *)&aq_req, NULL);
5694 	if (rc)
5695 		dev_err(rvu->dev, "Failed to INIT bandwidth profile layer %d profile %d\n",
5696 			layer, prof_idx);
5697 	return rc;
5698 }
5699 
nix_setup_ipolicers(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr)5700 static int nix_setup_ipolicers(struct rvu *rvu,
5701 			       struct nix_hw *nix_hw, int blkaddr)
5702 {
5703 	struct rvu_hwinfo *hw = rvu->hw;
5704 	struct nix_ipolicer *ipolicer;
5705 	int err, layer, prof_idx;
5706 	u64 cfg;
5707 
5708 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
5709 	if (!(cfg & BIT_ULL(61))) {
5710 		hw->cap.ipolicer = false;
5711 		return 0;
5712 	}
5713 
5714 	hw->cap.ipolicer = true;
5715 	nix_hw->ipolicer = devm_kcalloc(rvu->dev, BAND_PROF_NUM_LAYERS,
5716 					sizeof(*ipolicer), GFP_KERNEL);
5717 	if (!nix_hw->ipolicer)
5718 		return -ENOMEM;
5719 
5720 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_PL_CONST);
5721 
5722 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5723 		ipolicer = &nix_hw->ipolicer[layer];
5724 		switch (layer) {
5725 		case BAND_PROF_LEAF_LAYER:
5726 			ipolicer->band_prof.max = cfg & 0XFFFF;
5727 			break;
5728 		case BAND_PROF_MID_LAYER:
5729 			ipolicer->band_prof.max = (cfg >> 16) & 0XFFFF;
5730 			break;
5731 		case BAND_PROF_TOP_LAYER:
5732 			ipolicer->band_prof.max = (cfg >> 32) & 0XFFFF;
5733 			break;
5734 		}
5735 
5736 		if (!ipolicer->band_prof.max)
5737 			continue;
5738 
5739 		err = rvu_alloc_bitmap(&ipolicer->band_prof);
5740 		if (err)
5741 			return err;
5742 
5743 		ipolicer->pfvf_map = devm_kcalloc(rvu->dev,
5744 						  ipolicer->band_prof.max,
5745 						  sizeof(u16), GFP_KERNEL);
5746 		if (!ipolicer->pfvf_map)
5747 			return -ENOMEM;
5748 
5749 		ipolicer->match_id = devm_kcalloc(rvu->dev,
5750 						  ipolicer->band_prof.max,
5751 						  sizeof(u16), GFP_KERNEL);
5752 		if (!ipolicer->match_id)
5753 			return -ENOMEM;
5754 
5755 		for (prof_idx = 0;
5756 		     prof_idx < ipolicer->band_prof.max; prof_idx++) {
5757 			/* Set AF as current owner for INIT ops to succeed */
5758 			ipolicer->pfvf_map[prof_idx] = 0x00;
5759 
5760 			/* There is no enable bit in the profile context,
5761 			 * so no context disable. So let's INIT them here
5762 			 * so that PF/VF later on have to just do WRITE to
5763 			 * setup policer rates and config.
5764 			 */
5765 			err = nix_init_policer_context(rvu, nix_hw,
5766 						       layer, prof_idx);
5767 			if (err)
5768 				return err;
5769 		}
5770 
5771 		/* Allocate memory for maintaining ref_counts for MID level
5772 		 * profiles, this will be needed for leaf layer profiles'
5773 		 * aggregation.
5774 		 */
5775 		if (layer != BAND_PROF_MID_LAYER)
5776 			continue;
5777 
5778 		ipolicer->ref_count = devm_kcalloc(rvu->dev,
5779 						   ipolicer->band_prof.max,
5780 						   sizeof(u16), GFP_KERNEL);
5781 		if (!ipolicer->ref_count)
5782 			return -ENOMEM;
5783 	}
5784 
5785 	/* Set policer timeunit to 2us ie  (19 + 1) * 100 nsec = 2us */
5786 	rvu_write64(rvu, blkaddr, NIX_AF_PL_TS, 19);
5787 
5788 	nix_config_rx_pkt_policer_precolor(rvu, blkaddr);
5789 
5790 	return 0;
5791 }
5792 
nix_ipolicer_freemem(struct rvu * rvu,struct nix_hw * nix_hw)5793 static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw)
5794 {
5795 	struct nix_ipolicer *ipolicer;
5796 	int layer;
5797 
5798 	if (!rvu->hw->cap.ipolicer)
5799 		return;
5800 
5801 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5802 		ipolicer = &nix_hw->ipolicer[layer];
5803 
5804 		if (!ipolicer->band_prof.max)
5805 			continue;
5806 
5807 		kfree(ipolicer->band_prof.bmap);
5808 	}
5809 }
5810 
nix_verify_bandprof(struct nix_cn10k_aq_enq_req * req,struct nix_hw * nix_hw,u16 pcifunc)5811 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
5812 			       struct nix_hw *nix_hw, u16 pcifunc)
5813 {
5814 	struct nix_ipolicer *ipolicer;
5815 	int layer, hi_layer, prof_idx;
5816 
5817 	/* Bits [15:14] in profile index represent layer */
5818 	layer = (req->qidx >> 14) & 0x03;
5819 	prof_idx = req->qidx & 0x3FFF;
5820 
5821 	ipolicer = &nix_hw->ipolicer[layer];
5822 	if (prof_idx >= ipolicer->band_prof.max)
5823 		return -EINVAL;
5824 
5825 	/* Check if the profile is allocated to the requesting PCIFUNC or not
5826 	 * with the exception of AF. AF is allowed to read and update contexts.
5827 	 */
5828 	if (pcifunc && ipolicer->pfvf_map[prof_idx] != pcifunc)
5829 		return -EINVAL;
5830 
5831 	/* If this profile is linked to higher layer profile then check
5832 	 * if that profile is also allocated to the requesting PCIFUNC
5833 	 * or not.
5834 	 */
5835 	if (!req->prof.hl_en)
5836 		return 0;
5837 
5838 	/* Leaf layer profile can link only to mid layer and
5839 	 * mid layer to top layer.
5840 	 */
5841 	if (layer == BAND_PROF_LEAF_LAYER)
5842 		hi_layer = BAND_PROF_MID_LAYER;
5843 	else if (layer == BAND_PROF_MID_LAYER)
5844 		hi_layer = BAND_PROF_TOP_LAYER;
5845 	else
5846 		return -EINVAL;
5847 
5848 	ipolicer = &nix_hw->ipolicer[hi_layer];
5849 	prof_idx = req->prof.band_prof_id;
5850 	if (prof_idx >= ipolicer->band_prof.max ||
5851 	    ipolicer->pfvf_map[prof_idx] != pcifunc)
5852 		return -EINVAL;
5853 
5854 	return 0;
5855 }
5856 
rvu_mbox_handler_nix_bandprof_alloc(struct rvu * rvu,struct nix_bandprof_alloc_req * req,struct nix_bandprof_alloc_rsp * rsp)5857 int rvu_mbox_handler_nix_bandprof_alloc(struct rvu *rvu,
5858 					struct nix_bandprof_alloc_req *req,
5859 					struct nix_bandprof_alloc_rsp *rsp)
5860 {
5861 	int blkaddr, layer, prof, idx, err;
5862 	u16 pcifunc = req->hdr.pcifunc;
5863 	struct nix_ipolicer *ipolicer;
5864 	struct nix_hw *nix_hw;
5865 
5866 	if (!rvu->hw->cap.ipolicer)
5867 		return NIX_AF_ERR_IPOLICER_NOTSUPP;
5868 
5869 	err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
5870 	if (err)
5871 		return err;
5872 
5873 	mutex_lock(&rvu->rsrc_lock);
5874 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5875 		if (layer == BAND_PROF_INVAL_LAYER)
5876 			continue;
5877 		if (!req->prof_count[layer])
5878 			continue;
5879 
5880 		ipolicer = &nix_hw->ipolicer[layer];
5881 		for (idx = 0; idx < req->prof_count[layer]; idx++) {
5882 			/* Allocate a max of 'MAX_BANDPROF_PER_PFFUNC' profiles */
5883 			if (idx == MAX_BANDPROF_PER_PFFUNC)
5884 				break;
5885 
5886 			prof = rvu_alloc_rsrc(&ipolicer->band_prof);
5887 			if (prof < 0)
5888 				break;
5889 			rsp->prof_count[layer]++;
5890 			rsp->prof_idx[layer][idx] = prof;
5891 			ipolicer->pfvf_map[prof] = pcifunc;
5892 		}
5893 	}
5894 	mutex_unlock(&rvu->rsrc_lock);
5895 	return 0;
5896 }
5897 
nix_free_all_bandprof(struct rvu * rvu,u16 pcifunc)5898 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc)
5899 {
5900 	int blkaddr, layer, prof_idx, err;
5901 	struct nix_ipolicer *ipolicer;
5902 	struct nix_hw *nix_hw;
5903 
5904 	if (!rvu->hw->cap.ipolicer)
5905 		return NIX_AF_ERR_IPOLICER_NOTSUPP;
5906 
5907 	err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
5908 	if (err)
5909 		return err;
5910 
5911 	mutex_lock(&rvu->rsrc_lock);
5912 	/* Free all the profiles allocated to the PCIFUNC */
5913 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5914 		if (layer == BAND_PROF_INVAL_LAYER)
5915 			continue;
5916 		ipolicer = &nix_hw->ipolicer[layer];
5917 
5918 		for (prof_idx = 0; prof_idx < ipolicer->band_prof.max; prof_idx++) {
5919 			if (ipolicer->pfvf_map[prof_idx] != pcifunc)
5920 				continue;
5921 
5922 			/* Clear ratelimit aggregation, if any */
5923 			if (layer == BAND_PROF_LEAF_LAYER &&
5924 			    ipolicer->match_id[prof_idx])
5925 				nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
5926 
5927 			ipolicer->pfvf_map[prof_idx] = 0x00;
5928 			ipolicer->match_id[prof_idx] = 0;
5929 			rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
5930 		}
5931 	}
5932 	mutex_unlock(&rvu->rsrc_lock);
5933 	return 0;
5934 }
5935 
rvu_mbox_handler_nix_bandprof_free(struct rvu * rvu,struct nix_bandprof_free_req * req,struct msg_rsp * rsp)5936 int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu,
5937 				       struct nix_bandprof_free_req *req,
5938 				       struct msg_rsp *rsp)
5939 {
5940 	int blkaddr, layer, prof_idx, idx, err;
5941 	u16 pcifunc = req->hdr.pcifunc;
5942 	struct nix_ipolicer *ipolicer;
5943 	struct nix_hw *nix_hw;
5944 
5945 	if (req->free_all)
5946 		return nix_free_all_bandprof(rvu, pcifunc);
5947 
5948 	if (!rvu->hw->cap.ipolicer)
5949 		return NIX_AF_ERR_IPOLICER_NOTSUPP;
5950 
5951 	err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
5952 	if (err)
5953 		return err;
5954 
5955 	mutex_lock(&rvu->rsrc_lock);
5956 	/* Free the requested profile indices */
5957 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5958 		if (layer == BAND_PROF_INVAL_LAYER)
5959 			continue;
5960 		if (!req->prof_count[layer])
5961 			continue;
5962 
5963 		ipolicer = &nix_hw->ipolicer[layer];
5964 		for (idx = 0; idx < req->prof_count[layer]; idx++) {
5965 			if (idx == MAX_BANDPROF_PER_PFFUNC)
5966 				break;
5967 			prof_idx = req->prof_idx[layer][idx];
5968 			if (prof_idx >= ipolicer->band_prof.max ||
5969 			    ipolicer->pfvf_map[prof_idx] != pcifunc)
5970 				continue;
5971 
5972 			/* Clear ratelimit aggregation, if any */
5973 			if (layer == BAND_PROF_LEAF_LAYER &&
5974 			    ipolicer->match_id[prof_idx])
5975 				nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
5976 
5977 			ipolicer->pfvf_map[prof_idx] = 0x00;
5978 			ipolicer->match_id[prof_idx] = 0;
5979 			rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
5980 		}
5981 	}
5982 	mutex_unlock(&rvu->rsrc_lock);
5983 	return 0;
5984 }
5985 
nix_aq_context_read(struct rvu * rvu,struct nix_hw * nix_hw,struct nix_cn10k_aq_enq_req * aq_req,struct nix_cn10k_aq_enq_rsp * aq_rsp,u16 pcifunc,u8 ctype,u32 qidx)5986 int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw,
5987 			struct nix_cn10k_aq_enq_req *aq_req,
5988 			struct nix_cn10k_aq_enq_rsp *aq_rsp,
5989 			u16 pcifunc, u8 ctype, u32 qidx)
5990 {
5991 	memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
5992 	aq_req->hdr.pcifunc = pcifunc;
5993 	aq_req->ctype = ctype;
5994 	aq_req->op = NIX_AQ_INSTOP_READ;
5995 	aq_req->qidx = qidx;
5996 
5997 	return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
5998 				       (struct nix_aq_enq_req *)aq_req,
5999 				       (struct nix_aq_enq_rsp *)aq_rsp);
6000 }
6001 
nix_ipolicer_map_leaf_midprofs(struct rvu * rvu,struct nix_hw * nix_hw,struct nix_cn10k_aq_enq_req * aq_req,struct nix_cn10k_aq_enq_rsp * aq_rsp,u32 leaf_prof,u16 mid_prof)6002 static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu,
6003 					  struct nix_hw *nix_hw,
6004 					  struct nix_cn10k_aq_enq_req *aq_req,
6005 					  struct nix_cn10k_aq_enq_rsp *aq_rsp,
6006 					  u32 leaf_prof, u16 mid_prof)
6007 {
6008 	memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
6009 	aq_req->hdr.pcifunc = 0x00;
6010 	aq_req->ctype = NIX_AQ_CTYPE_BANDPROF;
6011 	aq_req->op = NIX_AQ_INSTOP_WRITE;
6012 	aq_req->qidx = leaf_prof;
6013 
6014 	aq_req->prof.band_prof_id = mid_prof;
6015 	aq_req->prof_mask.band_prof_id = GENMASK(6, 0);
6016 	aq_req->prof.hl_en = 1;
6017 	aq_req->prof_mask.hl_en = 1;
6018 
6019 	return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
6020 				       (struct nix_aq_enq_req *)aq_req,
6021 				       (struct nix_aq_enq_rsp *)aq_rsp);
6022 }
6023 
rvu_nix_setup_ratelimit_aggr(struct rvu * rvu,u16 pcifunc,u16 rq_idx,u16 match_id)6024 int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc,
6025 				 u16 rq_idx, u16 match_id)
6026 {
6027 	int leaf_prof, mid_prof, leaf_match;
6028 	struct nix_cn10k_aq_enq_req aq_req;
6029 	struct nix_cn10k_aq_enq_rsp aq_rsp;
6030 	struct nix_ipolicer *ipolicer;
6031 	struct nix_hw *nix_hw;
6032 	int blkaddr, idx, rc;
6033 
6034 	if (!rvu->hw->cap.ipolicer)
6035 		return 0;
6036 
6037 	rc = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
6038 	if (rc)
6039 		return rc;
6040 
6041 	/* Fetch the RQ's context to see if policing is enabled */
6042 	rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, pcifunc,
6043 				 NIX_AQ_CTYPE_RQ, rq_idx);
6044 	if (rc) {
6045 		dev_err(rvu->dev,
6046 			"%s: Failed to fetch RQ%d context of PFFUNC 0x%x\n",
6047 			__func__, rq_idx, pcifunc);
6048 		return rc;
6049 	}
6050 
6051 	if (!aq_rsp.rq.policer_ena)
6052 		return 0;
6053 
6054 	/* Get the bandwidth profile ID mapped to this RQ */
6055 	leaf_prof = aq_rsp.rq.band_prof_id;
6056 
6057 	ipolicer = &nix_hw->ipolicer[BAND_PROF_LEAF_LAYER];
6058 	ipolicer->match_id[leaf_prof] = match_id;
6059 
6060 	/* Check if any other leaf profile is marked with same match_id */
6061 	for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
6062 		if (idx == leaf_prof)
6063 			continue;
6064 		if (ipolicer->match_id[idx] != match_id)
6065 			continue;
6066 
6067 		leaf_match = idx;
6068 		break;
6069 	}
6070 
6071 	if (idx == ipolicer->band_prof.max)
6072 		return 0;
6073 
6074 	/* Fetch the matching profile's context to check if it's already
6075 	 * mapped to a mid level profile.
6076 	 */
6077 	rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
6078 				 NIX_AQ_CTYPE_BANDPROF, leaf_match);
6079 	if (rc) {
6080 		dev_err(rvu->dev,
6081 			"%s: Failed to fetch context of leaf profile %d\n",
6082 			__func__, leaf_match);
6083 		return rc;
6084 	}
6085 
6086 	ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
6087 	if (aq_rsp.prof.hl_en) {
6088 		/* Get Mid layer prof index and map leaf_prof index
6089 		 * also such that flows that are being steered
6090 		 * to different RQs and marked with same match_id
6091 		 * are rate limited in a aggregate fashion
6092 		 */
6093 		mid_prof = aq_rsp.prof.band_prof_id;
6094 		rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
6095 						    &aq_req, &aq_rsp,
6096 						    leaf_prof, mid_prof);
6097 		if (rc) {
6098 			dev_err(rvu->dev,
6099 				"%s: Failed to map leaf(%d) and mid(%d) profiles\n",
6100 				__func__, leaf_prof, mid_prof);
6101 			goto exit;
6102 		}
6103 
6104 		mutex_lock(&rvu->rsrc_lock);
6105 		ipolicer->ref_count[mid_prof]++;
6106 		mutex_unlock(&rvu->rsrc_lock);
6107 		goto exit;
6108 	}
6109 
6110 	/* Allocate a mid layer profile and
6111 	 * map both 'leaf_prof' and 'leaf_match' profiles to it.
6112 	 */
6113 	mutex_lock(&rvu->rsrc_lock);
6114 	mid_prof = rvu_alloc_rsrc(&ipolicer->band_prof);
6115 	if (mid_prof < 0) {
6116 		dev_err(rvu->dev,
6117 			"%s: Unable to allocate mid layer profile\n", __func__);
6118 		mutex_unlock(&rvu->rsrc_lock);
6119 		goto exit;
6120 	}
6121 	mutex_unlock(&rvu->rsrc_lock);
6122 	ipolicer->pfvf_map[mid_prof] = 0x00;
6123 	ipolicer->ref_count[mid_prof] = 0;
6124 
6125 	/* Initialize mid layer profile same as 'leaf_prof' */
6126 	rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
6127 				 NIX_AQ_CTYPE_BANDPROF, leaf_prof);
6128 	if (rc) {
6129 		dev_err(rvu->dev,
6130 			"%s: Failed to fetch context of leaf profile %d\n",
6131 			__func__, leaf_prof);
6132 		goto exit;
6133 	}
6134 
6135 	memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
6136 	aq_req.hdr.pcifunc = 0x00;
6137 	aq_req.qidx = (mid_prof & 0x3FFF) | (BAND_PROF_MID_LAYER << 14);
6138 	aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
6139 	aq_req.op = NIX_AQ_INSTOP_WRITE;
6140 	memcpy(&aq_req.prof, &aq_rsp.prof, sizeof(struct nix_bandprof_s));
6141 	memset((char *)&aq_req.prof_mask, 0xff, sizeof(struct nix_bandprof_s));
6142 	/* Clear higher layer enable bit in the mid profile, just in case */
6143 	aq_req.prof.hl_en = 0;
6144 	aq_req.prof_mask.hl_en = 1;
6145 
6146 	rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
6147 				     (struct nix_aq_enq_req *)&aq_req, NULL);
6148 	if (rc) {
6149 		dev_err(rvu->dev,
6150 			"%s: Failed to INIT context of mid layer profile %d\n",
6151 			__func__, mid_prof);
6152 		goto exit;
6153 	}
6154 
6155 	/* Map both leaf profiles to this mid layer profile */
6156 	rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
6157 					    &aq_req, &aq_rsp,
6158 					    leaf_prof, mid_prof);
6159 	if (rc) {
6160 		dev_err(rvu->dev,
6161 			"%s: Failed to map leaf(%d) and mid(%d) profiles\n",
6162 			__func__, leaf_prof, mid_prof);
6163 		goto exit;
6164 	}
6165 
6166 	mutex_lock(&rvu->rsrc_lock);
6167 	ipolicer->ref_count[mid_prof]++;
6168 	mutex_unlock(&rvu->rsrc_lock);
6169 
6170 	rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
6171 					    &aq_req, &aq_rsp,
6172 					    leaf_match, mid_prof);
6173 	if (rc) {
6174 		dev_err(rvu->dev,
6175 			"%s: Failed to map leaf(%d) and mid(%d) profiles\n",
6176 			__func__, leaf_match, mid_prof);
6177 		ipolicer->ref_count[mid_prof]--;
6178 		goto exit;
6179 	}
6180 
6181 	mutex_lock(&rvu->rsrc_lock);
6182 	ipolicer->ref_count[mid_prof]++;
6183 	mutex_unlock(&rvu->rsrc_lock);
6184 
6185 exit:
6186 	return rc;
6187 }
6188 
6189 /* Called with mutex rsrc_lock */
nix_clear_ratelimit_aggr(struct rvu * rvu,struct nix_hw * nix_hw,u32 leaf_prof)6190 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
6191 				     u32 leaf_prof)
6192 {
6193 	struct nix_cn10k_aq_enq_req aq_req;
6194 	struct nix_cn10k_aq_enq_rsp aq_rsp;
6195 	struct nix_ipolicer *ipolicer;
6196 	u16 mid_prof;
6197 	int rc;
6198 
6199 	mutex_unlock(&rvu->rsrc_lock);
6200 
6201 	rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
6202 				 NIX_AQ_CTYPE_BANDPROF, leaf_prof);
6203 
6204 	mutex_lock(&rvu->rsrc_lock);
6205 	if (rc) {
6206 		dev_err(rvu->dev,
6207 			"%s: Failed to fetch context of leaf profile %d\n",
6208 			__func__, leaf_prof);
6209 		return;
6210 	}
6211 
6212 	if (!aq_rsp.prof.hl_en)
6213 		return;
6214 
6215 	mid_prof = aq_rsp.prof.band_prof_id;
6216 	ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
6217 	ipolicer->ref_count[mid_prof]--;
6218 	/* If ref_count is zero, free mid layer profile */
6219 	if (!ipolicer->ref_count[mid_prof]) {
6220 		ipolicer->pfvf_map[mid_prof] = 0x00;
6221 		rvu_free_rsrc(&ipolicer->band_prof, mid_prof);
6222 	}
6223 }
6224 
rvu_mbox_handler_nix_bandprof_get_hwinfo(struct rvu * rvu,struct msg_req * req,struct nix_bandprof_get_hwinfo_rsp * rsp)6225 int rvu_mbox_handler_nix_bandprof_get_hwinfo(struct rvu *rvu, struct msg_req *req,
6226 					     struct nix_bandprof_get_hwinfo_rsp *rsp)
6227 {
6228 	struct nix_ipolicer *ipolicer;
6229 	int blkaddr, layer, err;
6230 	struct nix_hw *nix_hw;
6231 	u64 tu;
6232 
6233 	if (!rvu->hw->cap.ipolicer)
6234 		return NIX_AF_ERR_IPOLICER_NOTSUPP;
6235 
6236 	err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
6237 	if (err)
6238 		return err;
6239 
6240 	/* Return number of bandwidth profiles free at each layer */
6241 	mutex_lock(&rvu->rsrc_lock);
6242 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
6243 		if (layer == BAND_PROF_INVAL_LAYER)
6244 			continue;
6245 
6246 		ipolicer = &nix_hw->ipolicer[layer];
6247 		rsp->prof_count[layer] = rvu_rsrc_free_count(&ipolicer->band_prof);
6248 	}
6249 	mutex_unlock(&rvu->rsrc_lock);
6250 
6251 	/* Set the policer timeunit in nanosec */
6252 	tu = rvu_read64(rvu, blkaddr, NIX_AF_PL_TS) & GENMASK_ULL(9, 0);
6253 	rsp->policer_timeunit = (tu + 1) * 100;
6254 
6255 	return 0;
6256 }
6257 
rvu_nix_mcast_find_grp_elem(struct nix_mcast_grp * mcast_grp,u32 mcast_grp_idx)6258 static struct nix_mcast_grp_elem *rvu_nix_mcast_find_grp_elem(struct nix_mcast_grp *mcast_grp,
6259 							      u32 mcast_grp_idx)
6260 {
6261 	struct nix_mcast_grp_elem *iter;
6262 	bool is_found = false;
6263 
6264 	list_for_each_entry(iter, &mcast_grp->mcast_grp_head, list) {
6265 		if (iter->mcast_grp_idx == mcast_grp_idx) {
6266 			is_found = true;
6267 			break;
6268 		}
6269 	}
6270 
6271 	if (is_found)
6272 		return iter;
6273 
6274 	return NULL;
6275 }
6276 
rvu_nix_mcast_get_mce_index(struct rvu * rvu,u16 pcifunc,u32 mcast_grp_idx)6277 int rvu_nix_mcast_get_mce_index(struct rvu *rvu, u16 pcifunc, u32 mcast_grp_idx)
6278 {
6279 	struct nix_mcast_grp_elem *elem;
6280 	struct nix_mcast_grp *mcast_grp;
6281 	struct nix_hw *nix_hw;
6282 	int blkaddr, ret;
6283 
6284 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
6285 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
6286 	if (!nix_hw)
6287 		return NIX_AF_ERR_INVALID_NIXBLK;
6288 
6289 	mcast_grp = &nix_hw->mcast_grp;
6290 	mutex_lock(&mcast_grp->mcast_grp_lock);
6291 	elem = rvu_nix_mcast_find_grp_elem(mcast_grp, mcast_grp_idx);
6292 	if (!elem)
6293 		ret = NIX_AF_ERR_INVALID_MCAST_GRP;
6294 	else
6295 		ret = elem->mce_start_index;
6296 
6297 	mutex_unlock(&mcast_grp->mcast_grp_lock);
6298 	return ret;
6299 }
6300 
rvu_nix_mcast_flr_free_entries(struct rvu * rvu,u16 pcifunc)6301 void rvu_nix_mcast_flr_free_entries(struct rvu *rvu, u16 pcifunc)
6302 {
6303 	struct nix_mcast_grp_destroy_req dreq = { 0 };
6304 	struct nix_mcast_grp_update_req ureq = { 0 };
6305 	struct nix_mcast_grp_update_rsp ursp = { 0 };
6306 	struct nix_mcast_grp_elem *elem, *tmp;
6307 	struct nix_mcast_grp *mcast_grp;
6308 	struct nix_hw *nix_hw;
6309 	int blkaddr;
6310 
6311 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
6312 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
6313 	if (!nix_hw)
6314 		return;
6315 
6316 	mcast_grp = &nix_hw->mcast_grp;
6317 
6318 	mutex_lock(&mcast_grp->mcast_grp_lock);
6319 	list_for_each_entry_safe(elem, tmp, &mcast_grp->mcast_grp_head, list) {
6320 		struct nix_mce_list *mce_list;
6321 		struct hlist_node *tmp;
6322 		struct mce *mce;
6323 
6324 		/* If the pcifunc which created the multicast/mirror
6325 		 * group received an FLR, then delete the entire group.
6326 		 */
6327 		if (elem->pcifunc == pcifunc) {
6328 			/* Delete group */
6329 			dreq.hdr.pcifunc = elem->pcifunc;
6330 			dreq.mcast_grp_idx = elem->mcast_grp_idx;
6331 			dreq.is_af = 1;
6332 			rvu_mbox_handler_nix_mcast_grp_destroy(rvu, &dreq, NULL);
6333 			continue;
6334 		}
6335 
6336 		/* Iterate the group elements and delete the element which
6337 		 * received the FLR.
6338 		 */
6339 		mce_list = &elem->mcast_mce_list;
6340 		hlist_for_each_entry_safe(mce, tmp, &mce_list->head, node) {
6341 			if (mce->pcifunc == pcifunc) {
6342 				ureq.hdr.pcifunc = pcifunc;
6343 				ureq.num_mce_entry = 1;
6344 				ureq.mcast_grp_idx = elem->mcast_grp_idx;
6345 				ureq.op = NIX_MCAST_OP_DEL_ENTRY;
6346 				ureq.pcifunc[0] = pcifunc;
6347 				ureq.is_af = 1;
6348 				rvu_mbox_handler_nix_mcast_grp_update(rvu, &ureq, &ursp);
6349 				break;
6350 			}
6351 		}
6352 	}
6353 	mutex_unlock(&mcast_grp->mcast_grp_lock);
6354 }
6355 
rvu_nix_mcast_update_mcam_entry(struct rvu * rvu,u16 pcifunc,u32 mcast_grp_idx,u16 mcam_index)6356 int rvu_nix_mcast_update_mcam_entry(struct rvu *rvu, u16 pcifunc,
6357 				    u32 mcast_grp_idx, u16 mcam_index)
6358 {
6359 	struct nix_mcast_grp_elem *elem;
6360 	struct nix_mcast_grp *mcast_grp;
6361 	struct nix_hw *nix_hw;
6362 	int blkaddr, ret = 0;
6363 
6364 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
6365 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
6366 	if (!nix_hw)
6367 		return NIX_AF_ERR_INVALID_NIXBLK;
6368 
6369 	mcast_grp = &nix_hw->mcast_grp;
6370 	mutex_lock(&mcast_grp->mcast_grp_lock);
6371 	elem = rvu_nix_mcast_find_grp_elem(mcast_grp, mcast_grp_idx);
6372 	if (!elem)
6373 		ret = NIX_AF_ERR_INVALID_MCAST_GRP;
6374 	else
6375 		elem->mcam_index = mcam_index;
6376 
6377 	mutex_unlock(&mcast_grp->mcast_grp_lock);
6378 	return ret;
6379 }
6380 
rvu_mbox_handler_nix_mcast_grp_create(struct rvu * rvu,struct nix_mcast_grp_create_req * req,struct nix_mcast_grp_create_rsp * rsp)6381 int rvu_mbox_handler_nix_mcast_grp_create(struct rvu *rvu,
6382 					  struct nix_mcast_grp_create_req *req,
6383 					  struct nix_mcast_grp_create_rsp *rsp)
6384 {
6385 	struct nix_mcast_grp_elem *elem;
6386 	struct nix_mcast_grp *mcast_grp;
6387 	struct nix_hw *nix_hw;
6388 	int blkaddr, err;
6389 
6390 	err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
6391 	if (err)
6392 		return err;
6393 
6394 	mcast_grp = &nix_hw->mcast_grp;
6395 	elem = kzalloc(sizeof(*elem), GFP_KERNEL);
6396 	if (!elem)
6397 		return -ENOMEM;
6398 
6399 	INIT_HLIST_HEAD(&elem->mcast_mce_list.head);
6400 	elem->mcam_index = -1;
6401 	elem->mce_start_index = -1;
6402 	elem->pcifunc = req->hdr.pcifunc;
6403 	elem->dir = req->dir;
6404 	elem->mcast_grp_idx = mcast_grp->next_grp_index++;
6405 
6406 	mutex_lock(&mcast_grp->mcast_grp_lock);
6407 	list_add_tail(&elem->list, &mcast_grp->mcast_grp_head);
6408 	mcast_grp->count++;
6409 	mutex_unlock(&mcast_grp->mcast_grp_lock);
6410 
6411 	rsp->mcast_grp_idx = elem->mcast_grp_idx;
6412 	return 0;
6413 }
6414 
rvu_mbox_handler_nix_mcast_grp_destroy(struct rvu * rvu,struct nix_mcast_grp_destroy_req * req,struct msg_rsp * rsp)6415 int rvu_mbox_handler_nix_mcast_grp_destroy(struct rvu *rvu,
6416 					   struct nix_mcast_grp_destroy_req *req,
6417 					   struct msg_rsp *rsp)
6418 {
6419 	struct npc_delete_flow_req uninstall_req = { 0 };
6420 	struct npc_delete_flow_rsp uninstall_rsp = { 0 };
6421 	struct nix_mcast_grp_elem *elem;
6422 	struct nix_mcast_grp *mcast_grp;
6423 	int blkaddr, err, ret = 0;
6424 	struct nix_mcast *mcast;
6425 	struct nix_hw *nix_hw;
6426 
6427 	err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
6428 	if (err)
6429 		return err;
6430 
6431 	mcast_grp = &nix_hw->mcast_grp;
6432 
6433 	/* If AF is requesting for the deletion,
6434 	 * then AF is already taking the lock
6435 	 */
6436 	if (!req->is_af)
6437 		mutex_lock(&mcast_grp->mcast_grp_lock);
6438 
6439 	elem = rvu_nix_mcast_find_grp_elem(mcast_grp, req->mcast_grp_idx);
6440 	if (!elem) {
6441 		ret = NIX_AF_ERR_INVALID_MCAST_GRP;
6442 		goto unlock_grp;
6443 	}
6444 
6445 	/* If no mce entries are associated with the group
6446 	 * then just remove it from the global list.
6447 	 */
6448 	if (!elem->mcast_mce_list.count)
6449 		goto delete_grp;
6450 
6451 	/* Delete the associated mcam entry and
6452 	 * remove all mce entries from the group
6453 	 */
6454 	mcast = &nix_hw->mcast;
6455 	mutex_lock(&mcast->mce_lock);
6456 	if (elem->mcam_index != -1) {
6457 		uninstall_req.hdr.pcifunc = req->hdr.pcifunc;
6458 		uninstall_req.entry = elem->mcam_index;
6459 		rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &uninstall_rsp);
6460 	}
6461 
6462 	nix_free_mce_list(mcast, elem->mcast_mce_list.count,
6463 			  elem->mce_start_index, elem->dir);
6464 	nix_delete_mcast_mce_list(&elem->mcast_mce_list);
6465 	mutex_unlock(&mcast->mce_lock);
6466 
6467 delete_grp:
6468 	list_del(&elem->list);
6469 	kfree(elem);
6470 	mcast_grp->count--;
6471 
6472 unlock_grp:
6473 	if (!req->is_af)
6474 		mutex_unlock(&mcast_grp->mcast_grp_lock);
6475 
6476 	return ret;
6477 }
6478 
rvu_mbox_handler_nix_mcast_grp_update(struct rvu * rvu,struct nix_mcast_grp_update_req * req,struct nix_mcast_grp_update_rsp * rsp)6479 int rvu_mbox_handler_nix_mcast_grp_update(struct rvu *rvu,
6480 					  struct nix_mcast_grp_update_req *req,
6481 					  struct nix_mcast_grp_update_rsp *rsp)
6482 {
6483 	struct nix_mcast_grp_destroy_req dreq = { 0 };
6484 	struct npc_mcam *mcam = &rvu->hw->mcam;
6485 	struct nix_mcast_grp_elem *elem;
6486 	struct nix_mcast_grp *mcast_grp;
6487 	int blkaddr, err, npc_blkaddr;
6488 	u16 prev_count, new_count;
6489 	struct nix_mcast *mcast;
6490 	struct nix_hw *nix_hw;
6491 	int i, ret;
6492 
6493 	if (!req->num_mce_entry)
6494 		return 0;
6495 
6496 	err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
6497 	if (err)
6498 		return err;
6499 
6500 	mcast_grp = &nix_hw->mcast_grp;
6501 
6502 	/* If AF is requesting for the updation,
6503 	 * then AF is already taking the lock
6504 	 */
6505 	if (!req->is_af)
6506 		mutex_lock(&mcast_grp->mcast_grp_lock);
6507 
6508 	elem = rvu_nix_mcast_find_grp_elem(mcast_grp, req->mcast_grp_idx);
6509 	if (!elem) {
6510 		ret = NIX_AF_ERR_INVALID_MCAST_GRP;
6511 		goto unlock_grp;
6512 	}
6513 
6514 	/* If any pcifunc matches the group's pcifunc, then we can
6515 	 * delete the entire group.
6516 	 */
6517 	if (req->op == NIX_MCAST_OP_DEL_ENTRY) {
6518 		for (i = 0; i < req->num_mce_entry; i++) {
6519 			if (elem->pcifunc == req->pcifunc[i]) {
6520 				/* Delete group */
6521 				dreq.hdr.pcifunc = elem->pcifunc;
6522 				dreq.mcast_grp_idx = elem->mcast_grp_idx;
6523 				dreq.is_af = 1;
6524 				rvu_mbox_handler_nix_mcast_grp_destroy(rvu, &dreq, NULL);
6525 				ret = 0;
6526 				goto unlock_grp;
6527 			}
6528 		}
6529 	}
6530 
6531 	mcast = &nix_hw->mcast;
6532 	mutex_lock(&mcast->mce_lock);
6533 	npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
6534 	if (elem->mcam_index != -1)
6535 		npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, elem->mcam_index, false);
6536 
6537 	prev_count = elem->mcast_mce_list.count;
6538 	if (req->op == NIX_MCAST_OP_ADD_ENTRY) {
6539 		new_count = prev_count + req->num_mce_entry;
6540 		if (prev_count)
6541 			nix_free_mce_list(mcast, prev_count, elem->mce_start_index, elem->dir);
6542 
6543 		elem->mce_start_index = nix_alloc_mce_list(mcast, new_count, elem->dir);
6544 
6545 		/* It is possible not to get contiguous memory */
6546 		if (elem->mce_start_index < 0) {
6547 			if (elem->mcam_index != -1) {
6548 				npc_enable_mcam_entry(rvu, mcam, npc_blkaddr,
6549 						      elem->mcam_index, true);
6550 				ret = NIX_AF_ERR_NON_CONTIG_MCE_LIST;
6551 				goto unlock_mce;
6552 			}
6553 		}
6554 
6555 		ret = nix_add_mce_list_entry(rvu, nix_hw, elem, req);
6556 		if (ret) {
6557 			nix_free_mce_list(mcast, new_count, elem->mce_start_index, elem->dir);
6558 			if (prev_count)
6559 				elem->mce_start_index = nix_alloc_mce_list(mcast,
6560 									   prev_count,
6561 									   elem->dir);
6562 
6563 			if (elem->mcam_index != -1)
6564 				npc_enable_mcam_entry(rvu, mcam, npc_blkaddr,
6565 						      elem->mcam_index, true);
6566 
6567 			goto unlock_mce;
6568 		}
6569 	} else {
6570 		if (!prev_count || prev_count < req->num_mce_entry) {
6571 			if (elem->mcam_index != -1)
6572 				npc_enable_mcam_entry(rvu, mcam, npc_blkaddr,
6573 						      elem->mcam_index, true);
6574 			ret = NIX_AF_ERR_INVALID_MCAST_DEL_REQ;
6575 			goto unlock_mce;
6576 		}
6577 
6578 		nix_free_mce_list(mcast, prev_count, elem->mce_start_index, elem->dir);
6579 		new_count = prev_count - req->num_mce_entry;
6580 		elem->mce_start_index = nix_alloc_mce_list(mcast, new_count, elem->dir);
6581 		ret = nix_del_mce_list_entry(rvu, nix_hw, elem, req);
6582 		if (ret) {
6583 			nix_free_mce_list(mcast, new_count, elem->mce_start_index, elem->dir);
6584 			elem->mce_start_index = nix_alloc_mce_list(mcast, prev_count, elem->dir);
6585 			if (elem->mcam_index != -1)
6586 				npc_enable_mcam_entry(rvu, mcam,
6587 						      npc_blkaddr,
6588 						      elem->mcam_index,
6589 						      true);
6590 
6591 			goto unlock_mce;
6592 		}
6593 	}
6594 
6595 	if (elem->mcam_index == -1) {
6596 		rsp->mce_start_index = elem->mce_start_index;
6597 		ret = 0;
6598 		goto unlock_mce;
6599 	}
6600 
6601 	nix_mcast_update_action(rvu, elem);
6602 	npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, elem->mcam_index, true);
6603 	rsp->mce_start_index = elem->mce_start_index;
6604 	ret = 0;
6605 
6606 unlock_mce:
6607 	mutex_unlock(&mcast->mce_lock);
6608 
6609 unlock_grp:
6610 	if (!req->is_af)
6611 		mutex_unlock(&mcast_grp->mcast_grp_lock);
6612 
6613 	return ret;
6614 }
6615