xref: /linux/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c (revision fcee7d82f27d6a8b1ddc5bbefda59b4e441e9bc0)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3  *
4  * Copyright (C) 2018 Marvell.
5  *
6  */
7 
8 #include <linux/module.h>
9 #include <linux/pci.h>
10 
11 #include "rvu_struct.h"
12 #include "rvu_reg.h"
13 #include "rvu.h"
14 #include "npc.h"
15 #include "mcs.h"
16 #include "cgx.h"
17 #include "lmac_common.h"
18 #include "rvu_npc_hash.h"
19 
20 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc);
21 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
22 			    int type, int chan_id);
23 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
24 			       int type, bool add);
25 static int nix_setup_ipolicers(struct rvu *rvu,
26 			       struct nix_hw *nix_hw, int blkaddr);
27 static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw);
28 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
29 			       struct nix_hw *nix_hw, u16 pcifunc);
30 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc);
31 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
32 				     u32 leaf_prof);
33 static const char *nix_get_ctx_name(int ctype);
34 static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc);
35 
36 enum mc_tbl_sz {
37 	MC_TBL_SZ_256,
38 	MC_TBL_SZ_512,
39 	MC_TBL_SZ_1K,
40 	MC_TBL_SZ_2K,
41 	MC_TBL_SZ_4K,
42 	MC_TBL_SZ_8K,
43 	MC_TBL_SZ_16K,
44 	MC_TBL_SZ_32K,
45 	MC_TBL_SZ_64K,
46 };
47 
48 enum mc_buf_cnt {
49 	MC_BUF_CNT_8,
50 	MC_BUF_CNT_16,
51 	MC_BUF_CNT_32,
52 	MC_BUF_CNT_64,
53 	MC_BUF_CNT_128,
54 	MC_BUF_CNT_256,
55 	MC_BUF_CNT_512,
56 	MC_BUF_CNT_1024,
57 	MC_BUF_CNT_2048,
58 };
59 
60 enum nix_makr_fmt_indexes {
61 	NIX_MARK_CFG_IP_DSCP_RED,
62 	NIX_MARK_CFG_IP_DSCP_YELLOW,
63 	NIX_MARK_CFG_IP_DSCP_YELLOW_RED,
64 	NIX_MARK_CFG_IP_ECN_RED,
65 	NIX_MARK_CFG_IP_ECN_YELLOW,
66 	NIX_MARK_CFG_IP_ECN_YELLOW_RED,
67 	NIX_MARK_CFG_VLAN_DEI_RED,
68 	NIX_MARK_CFG_VLAN_DEI_YELLOW,
69 	NIX_MARK_CFG_VLAN_DEI_YELLOW_RED,
70 	NIX_MARK_CFG_MAX,
71 };
72 
73 /* For now considering MC resources needed for broadcast
74  * pkt replication only. i.e 256 HWVFs + 12 PFs.
75  */
76 #define MC_TBL_SIZE	MC_TBL_SZ_2K
77 #define MC_BUF_CNT	MC_BUF_CNT_1024
78 
79 #define MC_TX_MAX	2048
80 
81 struct mce {
82 	struct hlist_node	node;
83 	u32			rq_rss_index;
84 	u16			pcifunc;
85 	u16			channel;
86 	u8			dest_type;
87 	u8			is_active;
88 	u8			reserved[2];
89 };
90 
rvu_get_next_nix_blkaddr(struct rvu * rvu,int blkaddr)91 int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr)
92 {
93 	int i = 0;
94 
95 	/*If blkaddr is 0, return the first nix block address*/
96 	if (blkaddr == 0)
97 		return rvu->nix_blkaddr[blkaddr];
98 
99 	while (i + 1 < MAX_NIX_BLKS) {
100 		if (rvu->nix_blkaddr[i] == blkaddr)
101 			return rvu->nix_blkaddr[i + 1];
102 		i++;
103 	}
104 
105 	return 0;
106 }
107 
is_nixlf_attached(struct rvu * rvu,u16 pcifunc)108 bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
109 {
110 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
111 	int blkaddr;
112 
113 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
114 	if (!pfvf->nixlf || blkaddr < 0)
115 		return false;
116 	return true;
117 }
118 
rvu_get_nixlf_count(struct rvu * rvu)119 int rvu_get_nixlf_count(struct rvu *rvu)
120 {
121 	int blkaddr = 0, max = 0;
122 	struct rvu_block *block;
123 
124 	blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
125 	while (blkaddr) {
126 		block = &rvu->hw->block[blkaddr];
127 		max += block->lf.max;
128 		blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
129 	}
130 	return max;
131 }
132 
nix_get_nixlf(struct rvu * rvu,u16 pcifunc,int * nixlf,int * nix_blkaddr)133 int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr)
134 {
135 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
136 	struct rvu_hwinfo *hw = rvu->hw;
137 	int blkaddr;
138 
139 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
140 	if (!pfvf->nixlf || blkaddr < 0)
141 		return NIX_AF_ERR_AF_LF_INVALID;
142 
143 	*nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
144 	if (*nixlf < 0)
145 		return NIX_AF_ERR_AF_LF_INVALID;
146 
147 	if (nix_blkaddr)
148 		*nix_blkaddr = blkaddr;
149 
150 	return 0;
151 }
152 
nix_get_struct_ptrs(struct rvu * rvu,u16 pcifunc,struct nix_hw ** nix_hw,int * blkaddr)153 int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc,
154 			struct nix_hw **nix_hw, int *blkaddr)
155 {
156 	struct rvu_pfvf *pfvf;
157 
158 	pfvf = rvu_get_pfvf(rvu, pcifunc);
159 	*blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
160 	if (!pfvf->nixlf || *blkaddr < 0)
161 		return NIX_AF_ERR_AF_LF_INVALID;
162 
163 	*nix_hw = get_nix_hw(rvu->hw, *blkaddr);
164 	if (!*nix_hw)
165 		return NIX_AF_ERR_INVALID_NIXBLK;
166 	return 0;
167 }
168 
nix_mce_list_init(struct nix_mce_list * list,int max)169 static void nix_mce_list_init(struct nix_mce_list *list, int max)
170 {
171 	INIT_HLIST_HEAD(&list->head);
172 	list->count = 0;
173 	list->max = max;
174 }
175 
nix_alloc_mce_list(struct nix_mcast * mcast,int count,u8 dir)176 static int nix_alloc_mce_list(struct nix_mcast *mcast, int count, u8 dir)
177 {
178 	struct rsrc_bmap *mce_counter;
179 	int idx;
180 
181 	if (!mcast)
182 		return -EINVAL;
183 
184 	mce_counter = &mcast->mce_counter[dir];
185 	if (!rvu_rsrc_check_contig(mce_counter, count))
186 		return -ENOSPC;
187 
188 	idx = rvu_alloc_rsrc_contig(mce_counter, count);
189 	return idx;
190 }
191 
nix_free_mce_list(struct nix_mcast * mcast,int count,int start,u8 dir)192 static void nix_free_mce_list(struct nix_mcast *mcast, int count, int start, u8 dir)
193 {
194 	struct rsrc_bmap *mce_counter;
195 
196 	if (!mcast)
197 		return;
198 
199 	mce_counter = &mcast->mce_counter[dir];
200 	rvu_free_rsrc_contig(mce_counter, count, start);
201 }
202 
get_nix_hw(struct rvu_hwinfo * hw,int blkaddr)203 struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
204 {
205 	int nix_blkaddr = 0, i = 0;
206 	struct rvu *rvu = hw->rvu;
207 
208 	nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
209 	while (nix_blkaddr) {
210 		if (blkaddr == nix_blkaddr && hw->nix)
211 			return &hw->nix[i];
212 		nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
213 		i++;
214 	}
215 	return NULL;
216 }
217 
nix_get_dwrr_mtu_reg(struct rvu_hwinfo * hw,int smq_link_type)218 int nix_get_dwrr_mtu_reg(struct rvu_hwinfo *hw, int smq_link_type)
219 {
220 	if (hw->cap.nix_multiple_dwrr_mtu)
221 		return NIX_AF_DWRR_MTUX(smq_link_type);
222 
223 	if (smq_link_type == SMQ_LINK_TYPE_SDP)
224 		return NIX_AF_DWRR_SDP_MTU;
225 
226 	/* Here it's same reg for RPM and LBK */
227 	return NIX_AF_DWRR_RPM_MTU;
228 }
229 
convert_dwrr_mtu_to_bytes(u8 dwrr_mtu)230 u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu)
231 {
232 	dwrr_mtu &= 0x1FULL;
233 
234 	/* MTU used for DWRR calculation is in power of 2 up until 64K bytes.
235 	 * Value of 4 is reserved for MTU value of 9728 bytes.
236 	 * Value of 5 is reserved for MTU value of 10240 bytes.
237 	 */
238 	switch (dwrr_mtu) {
239 	case 4:
240 		return 9728;
241 	case 5:
242 		return 10240;
243 	default:
244 		return BIT_ULL(dwrr_mtu);
245 	}
246 
247 	return 0;
248 }
249 
convert_bytes_to_dwrr_mtu(u32 bytes)250 u32 convert_bytes_to_dwrr_mtu(u32 bytes)
251 {
252 	/* MTU used for DWRR calculation is in power of 2 up until 64K bytes.
253 	 * Value of 4 is reserved for MTU value of 9728 bytes.
254 	 * Value of 5 is reserved for MTU value of 10240 bytes.
255 	 */
256 	if (bytes > BIT_ULL(16))
257 		return 0;
258 
259 	switch (bytes) {
260 	case 9728:
261 		return 4;
262 	case 10240:
263 		return 5;
264 	default:
265 		return ilog2(bytes);
266 	}
267 
268 	return 0;
269 }
270 
nix_rx_sync(struct rvu * rvu,int blkaddr)271 static void nix_rx_sync(struct rvu *rvu, int blkaddr)
272 {
273 	int err;
274 
275 	/* Sync all in flight RX packets to LLC/DRAM */
276 	rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
277 	err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
278 	if (err)
279 		dev_err(rvu->dev, "SYNC1: NIX RX software sync failed\n");
280 
281 	/* SW_SYNC ensures all existing transactions are finished and pkts
282 	 * are written to LLC/DRAM, queues should be teared down after
283 	 * successful SW_SYNC. Due to a HW errata, in some rare scenarios
284 	 * an existing transaction might end after SW_SYNC operation. To
285 	 * ensure operation is fully done, do the SW_SYNC twice.
286 	 */
287 	rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
288 	err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
289 	if (err)
290 		dev_err(rvu->dev, "SYNC2: NIX RX software sync failed\n");
291 }
292 
is_valid_txschq(struct rvu * rvu,int blkaddr,int lvl,u16 pcifunc,u16 schq)293 static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
294 			    int lvl, u16 pcifunc, u16 schq)
295 {
296 	struct rvu_hwinfo *hw = rvu->hw;
297 	struct nix_txsch *txsch;
298 	struct nix_hw *nix_hw;
299 	u16 map_func;
300 
301 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
302 	if (!nix_hw)
303 		return false;
304 
305 	txsch = &nix_hw->txsch[lvl];
306 	/* Check out of bounds */
307 	if (schq >= txsch->schq.max)
308 		return false;
309 
310 	mutex_lock(&rvu->rsrc_lock);
311 	map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
312 	mutex_unlock(&rvu->rsrc_lock);
313 
314 	/* TLs aggegating traffic are shared across PF and VFs */
315 	if (lvl >= hw->cap.nix_tx_aggr_lvl) {
316 		if ((nix_get_tx_link(rvu, map_func) !=
317 		     nix_get_tx_link(rvu, pcifunc)) &&
318 		     (rvu_get_pf(rvu->pdev, map_func) !=
319 				rvu_get_pf(rvu->pdev, pcifunc)))
320 			return false;
321 		else
322 			return true;
323 	}
324 
325 	if (map_func != pcifunc)
326 		return false;
327 
328 	return true;
329 }
330 
nix_interface_init(struct rvu * rvu,u16 pcifunc,int type,int nixlf,struct nix_lf_alloc_rsp * rsp,bool loop)331 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf,
332 			      struct nix_lf_alloc_rsp *rsp, bool loop)
333 {
334 	struct rvu_pfvf *parent_pf, *pfvf = rvu_get_pfvf(rvu, pcifunc);
335 	u16 req_chan_base, req_chan_end, req_chan_cnt;
336 	struct rvu_hwinfo *hw = rvu->hw;
337 	struct sdp_node_info *sdp_info;
338 	int pkind, pf, vf, lbkid, vfid;
339 	u8 cgx_id, lmac_id;
340 	bool from_vf;
341 	int err;
342 
343 	pf = rvu_get_pf(rvu->pdev, pcifunc);
344 	if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK &&
345 	    type != NIX_INTF_TYPE_SDP)
346 		return 0;
347 
348 	switch (type) {
349 	case NIX_INTF_TYPE_CGX:
350 		pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf];
351 		rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
352 
353 		pkind = rvu_npc_get_pkind(rvu, pf);
354 		if (pkind < 0) {
355 			dev_err(rvu->dev,
356 				"PF_Func 0x%x: Invalid pkind\n", pcifunc);
357 			return -EINVAL;
358 		}
359 		pfvf->rx_chan_base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0);
360 		pfvf->tx_chan_base = pfvf->rx_chan_base;
361 		pfvf->rx_chan_cnt = 1;
362 		pfvf->tx_chan_cnt = 1;
363 		rsp->tx_link = cgx_id * hw->lmac_per_cgx + lmac_id;
364 
365 		cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
366 		rvu_npc_set_pkind(rvu, pkind, pfvf);
367 		break;
368 	case NIX_INTF_TYPE_LBK:
369 		vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
370 
371 		/* If NIX1 block is present on the silicon then NIXes are
372 		 * assigned alternatively for lbk interfaces. NIX0 should
373 		 * send packets on lbk link 1 channels and NIX1 should send
374 		 * on lbk link 0 channels for the communication between
375 		 * NIX0 and NIX1.
376 		 */
377 		lbkid = 0;
378 		if (rvu->hw->lbk_links > 1)
379 			lbkid = vf & 0x1 ? 0 : 1;
380 
381 		/* By default NIX0 is configured to send packet on lbk link 1
382 		 * (which corresponds to LBK1), same packet will receive on
383 		 * NIX1 over lbk link 0. If NIX1 sends packet on lbk link 0
384 		 * (which corresponds to LBK2) packet will receive on NIX0 lbk
385 		 * link 1.
386 		 * But if lbk links for NIX0 and NIX1 are negated, i.e NIX0
387 		 * transmits and receives on lbk link 0, whick corresponds
388 		 * to LBK1 block, back to back connectivity between NIX and
389 		 * LBK can be achieved (which is similar to 96xx)
390 		 *
391 		 *			RX		TX
392 		 * NIX0 lbk link	1 (LBK2)	1 (LBK1)
393 		 * NIX0 lbk link	0 (LBK0)	0 (LBK0)
394 		 * NIX1 lbk link	0 (LBK1)	0 (LBK2)
395 		 * NIX1 lbk link	1 (LBK3)	1 (LBK3)
396 		 */
397 		if (loop)
398 			lbkid = !lbkid;
399 
400 		/* Note that AF's VFs work in pairs and talk over consecutive
401 		 * loopback channels.Therefore if odd number of AF VFs are
402 		 * enabled then the last VF remains with no pair.
403 		 */
404 		pfvf->rx_chan_base = rvu_nix_chan_lbk(rvu, lbkid, vf);
405 		pfvf->tx_chan_base = vf & 0x1 ?
406 					rvu_nix_chan_lbk(rvu, lbkid, vf - 1) :
407 					rvu_nix_chan_lbk(rvu, lbkid, vf + 1);
408 		pfvf->rx_chan_cnt = 1;
409 		pfvf->tx_chan_cnt = 1;
410 		rsp->tx_link = hw->cgx_links + lbkid;
411 		pfvf->lbkid = lbkid;
412 		rvu_npc_set_pkind(rvu, NPC_RX_LBK_PKIND, pfvf);
413 		rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
414 					      pfvf->rx_chan_base,
415 					      pfvf->rx_chan_cnt);
416 
417 		break;
418 	case NIX_INTF_TYPE_SDP:
419 		from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
420 		parent_pf = &rvu->pf[rvu_get_pf(rvu->pdev, pcifunc)];
421 		sdp_info = parent_pf->sdp_info;
422 		if (!sdp_info) {
423 			dev_err(rvu->dev, "Invalid sdp_info pointer\n");
424 			return -EINVAL;
425 		}
426 		if (from_vf) {
427 			req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn +
428 				sdp_info->num_pf_rings;
429 			vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
430 			for (vfid = 0; vfid < vf; vfid++)
431 				req_chan_base += sdp_info->vf_rings[vfid];
432 			req_chan_cnt = sdp_info->vf_rings[vf];
433 			req_chan_end = req_chan_base + req_chan_cnt - 1;
434 			if (req_chan_base < rvu_nix_chan_sdp(rvu, 0) ||
435 			    req_chan_end > rvu_nix_chan_sdp(rvu, 255)) {
436 				dev_err(rvu->dev,
437 					"PF_Func 0x%x: Invalid channel base and count\n",
438 					pcifunc);
439 				return -EINVAL;
440 			}
441 		} else {
442 			req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn;
443 			req_chan_cnt = sdp_info->num_pf_rings;
444 		}
445 
446 		pfvf->rx_chan_base = req_chan_base;
447 		pfvf->rx_chan_cnt = req_chan_cnt;
448 		pfvf->tx_chan_base = pfvf->rx_chan_base;
449 		pfvf->tx_chan_cnt = pfvf->rx_chan_cnt;
450 
451 		rsp->tx_link = hw->cgx_links + hw->lbk_links;
452 		rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
453 					      pfvf->rx_chan_base,
454 					      pfvf->rx_chan_cnt);
455 		break;
456 	}
457 
458 	/* Add a UCAST forwarding rule in MCAM with this NIXLF attached
459 	 * RVU PF/VF's MAC address.
460 	 */
461 	rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
462 				    pfvf->rx_chan_base, pfvf->mac_addr);
463 
464 	/* Add this PF_FUNC to bcast pkt replication list */
465 	err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, true);
466 	if (err) {
467 		dev_err(rvu->dev,
468 			"Bcast list, failed to enable PF_FUNC 0x%x\n",
469 			pcifunc);
470 		return err;
471 	}
472 	/* Install MCAM rule matching Ethernet broadcast mac address */
473 	rvu_npc_install_bcast_match_entry(rvu, pcifunc,
474 					  nixlf, pfvf->rx_chan_base);
475 
476 	pfvf->maxlen = NIC_HW_MIN_FRS;
477 	pfvf->minlen = NIC_HW_MIN_FRS;
478 
479 	return 0;
480 }
481 
nix_interface_deinit(struct rvu * rvu,u16 pcifunc,u8 nixlf)482 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
483 {
484 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
485 	int err;
486 
487 	pfvf->maxlen = 0;
488 	pfvf->minlen = 0;
489 
490 	/* Remove this PF_FUNC from bcast pkt replication list */
491 	err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, false);
492 	if (err) {
493 		dev_err(rvu->dev,
494 			"Bcast list, failed to disable PF_FUNC 0x%x\n",
495 			pcifunc);
496 	}
497 
498 	/* Free and disable any MCAM entries used by this NIX LF */
499 	rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
500 
501 	/* Disable DMAC filters used */
502 	rvu_cgx_disable_dmac_entries(rvu, pcifunc);
503 }
504 
505 #define NIX_BPIDS_PER_LMAC	8
506 #define NIX_BPIDS_PER_CPT	1
nix_setup_bpids(struct rvu * rvu,struct nix_hw * hw,int blkaddr)507 static int nix_setup_bpids(struct rvu *rvu, struct nix_hw *hw, int blkaddr)
508 {
509 	struct nix_bp *bp = &hw->bp;
510 	int err, max_bpids;
511 	u64 cfg;
512 
513 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
514 	max_bpids =  FIELD_GET(NIX_CONST_MAX_BPIDS, cfg);
515 
516 	/* Reserve the BPIds for CGX and SDP */
517 	bp->cgx_bpid_cnt = rvu->hw->cgx_links * NIX_BPIDS_PER_LMAC;
518 	bp->sdp_bpid_cnt = rvu->hw->sdp_links * FIELD_GET(NIX_CONST_SDP_CHANS, cfg);
519 	bp->free_pool_base = bp->cgx_bpid_cnt + bp->sdp_bpid_cnt +
520 			     NIX_BPIDS_PER_CPT;
521 	bp->bpids.max = max_bpids - bp->free_pool_base;
522 
523 	err = rvu_alloc_bitmap(&bp->bpids);
524 	if (err)
525 		return err;
526 
527 	bp->fn_map = devm_kcalloc(rvu->dev, bp->bpids.max,
528 				  sizeof(u16), GFP_KERNEL);
529 	if (!bp->fn_map)
530 		return -ENOMEM;
531 
532 	bp->intf_map = devm_kcalloc(rvu->dev, bp->bpids.max,
533 				    sizeof(u8), GFP_KERNEL);
534 	if (!bp->intf_map)
535 		return -ENOMEM;
536 
537 	bp->ref_cnt = devm_kcalloc(rvu->dev, bp->bpids.max,
538 				   sizeof(u8), GFP_KERNEL);
539 	if (!bp->ref_cnt)
540 		return -ENOMEM;
541 
542 	return 0;
543 }
544 
rvu_nix_flr_free_bpids(struct rvu * rvu,u16 pcifunc)545 void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc)
546 {
547 	int blkaddr, bpid, err;
548 	struct nix_hw *nix_hw;
549 	struct nix_bp *bp;
550 
551 	if (!is_lbk_vf(rvu, pcifunc))
552 		return;
553 
554 	err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
555 	if (err)
556 		return;
557 
558 	bp = &nix_hw->bp;
559 
560 	mutex_lock(&rvu->rsrc_lock);
561 	for (bpid = 0; bpid < bp->bpids.max; bpid++) {
562 		if (bp->fn_map[bpid] == pcifunc) {
563 			bp->ref_cnt[bpid]--;
564 			if (bp->ref_cnt[bpid])
565 				continue;
566 			rvu_free_rsrc(&bp->bpids, bpid);
567 			bp->fn_map[bpid] = 0;
568 		}
569 	}
570 	mutex_unlock(&rvu->rsrc_lock);
571 }
572 
nix_get_channel(u16 chan,bool cpt_link)573 static u16 nix_get_channel(u16 chan, bool cpt_link)
574 {
575 	/* CPT channel for a given link channel is always
576 	 * assumed to be BIT(11) set in link channel.
577 	 */
578 	return cpt_link ? chan | BIT(11) : chan;
579 }
580 
nix_bp_disable(struct rvu * rvu,struct nix_bp_cfg_req * req,struct msg_rsp * rsp,bool cpt_link)581 static int nix_bp_disable(struct rvu *rvu,
582 			  struct nix_bp_cfg_req *req,
583 			  struct msg_rsp *rsp, bool cpt_link)
584 {
585 	u16 pcifunc = req->hdr.pcifunc;
586 	int blkaddr, pf, type, err;
587 	u16 chan_base, chan, bpid;
588 	struct rvu_pfvf *pfvf;
589 	struct nix_hw *nix_hw;
590 	struct nix_bp *bp;
591 	u16 chan_v;
592 	u64 cfg;
593 
594 	pf = rvu_get_pf(rvu->pdev, pcifunc);
595 	type = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
596 	if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
597 		return 0;
598 
599 	if (is_sdp_pfvf(rvu, pcifunc))
600 		type = NIX_INTF_TYPE_SDP;
601 
602 	if (cpt_link && !rvu->hw->cpt_links)
603 		return 0;
604 
605 	pfvf = rvu_get_pfvf(rvu, pcifunc);
606 	err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
607 	if (err)
608 		return err;
609 
610 	bp = &nix_hw->bp;
611 	chan_base = pfvf->rx_chan_base + req->chan_base;
612 	for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
613 		chan_v = nix_get_channel(chan, cpt_link);
614 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v));
615 		rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v),
616 			    cfg & ~BIT_ULL(16));
617 
618 		if (type == NIX_INTF_TYPE_LBK) {
619 			bpid = cfg & GENMASK(8, 0);
620 			mutex_lock(&rvu->rsrc_lock);
621 			rvu_free_rsrc(&bp->bpids, bpid - bp->free_pool_base);
622 			for (bpid = 0; bpid < bp->bpids.max; bpid++) {
623 				if (bp->fn_map[bpid] == pcifunc) {
624 					bp->fn_map[bpid] = 0;
625 					bp->ref_cnt[bpid] = 0;
626 				}
627 			}
628 			mutex_unlock(&rvu->rsrc_lock);
629 		}
630 	}
631 	return 0;
632 }
633 
rvu_mbox_handler_nix_bp_disable(struct rvu * rvu,struct nix_bp_cfg_req * req,struct msg_rsp * rsp)634 int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
635 				    struct nix_bp_cfg_req *req,
636 				    struct msg_rsp *rsp)
637 {
638 	return nix_bp_disable(rvu, req, rsp, false);
639 }
640 
rvu_mbox_handler_nix_cpt_bp_disable(struct rvu * rvu,struct nix_bp_cfg_req * req,struct msg_rsp * rsp)641 int rvu_mbox_handler_nix_cpt_bp_disable(struct rvu *rvu,
642 					struct nix_bp_cfg_req *req,
643 					struct msg_rsp *rsp)
644 {
645 	return nix_bp_disable(rvu, req, rsp, true);
646 }
647 
rvu_nix_get_bpid(struct rvu * rvu,struct nix_bp_cfg_req * req,int type,int chan_id)648 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
649 			    int type, int chan_id)
650 {
651 	int bpid, blkaddr, sdp_chan_base, err;
652 	struct rvu_hwinfo *hw = rvu->hw;
653 	struct rvu_pfvf *pfvf;
654 	struct nix_hw *nix_hw;
655 	u8 cgx_id, lmac_id;
656 	struct nix_bp *bp;
657 
658 	pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
659 
660 	err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
661 	if (err)
662 		return err;
663 
664 	bp = &nix_hw->bp;
665 
666 	/* Backpressure IDs range division
667 	 * CGX channles are mapped to (0 - 191) BPIDs
668 	 * LBK channles are mapped to (192 - 255) BPIDs
669 	 * SDP channles are mapped to (256 - 511) BPIDs
670 	 *
671 	 * Lmac channles and bpids mapped as follows
672 	 * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15)
673 	 * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) ....
674 	 * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) ....
675 	 */
676 	switch (type) {
677 	case NIX_INTF_TYPE_CGX:
678 		if ((req->chan_base + req->chan_cnt) > NIX_BPIDS_PER_LMAC)
679 			return NIX_AF_ERR_INVALID_BPID_REQ;
680 		rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
681 		/* Assign bpid based on cgx, lmac and chan id */
682 		bpid = (cgx_id * hw->lmac_per_cgx * NIX_BPIDS_PER_LMAC) +
683 			(lmac_id * NIX_BPIDS_PER_LMAC) + req->chan_base;
684 
685 		if (req->bpid_per_chan)
686 			bpid += chan_id;
687 		if (bpid > bp->cgx_bpid_cnt)
688 			return NIX_AF_ERR_INVALID_BPID;
689 		break;
690 
691 	case NIX_INTF_TYPE_LBK:
692 		/* Alloc bpid from the free pool */
693 		mutex_lock(&rvu->rsrc_lock);
694 		bpid = rvu_alloc_rsrc(&bp->bpids);
695 		if (bpid < 0) {
696 			mutex_unlock(&rvu->rsrc_lock);
697 			return NIX_AF_ERR_INVALID_BPID;
698 		}
699 		bp->fn_map[bpid] = req->hdr.pcifunc;
700 		bp->ref_cnt[bpid]++;
701 		bpid += bp->free_pool_base;
702 		mutex_unlock(&rvu->rsrc_lock);
703 		break;
704 	case NIX_INTF_TYPE_SDP:
705 		if ((req->chan_base + req->chan_cnt) > bp->sdp_bpid_cnt)
706 			return NIX_AF_ERR_INVALID_BPID_REQ;
707 
708 		/* Handle usecase of 2 SDP blocks */
709 		if (!hw->cap.programmable_chans)
710 			sdp_chan_base = pfvf->rx_chan_base - NIX_CHAN_SDP_CH_START;
711 		else
712 			sdp_chan_base = pfvf->rx_chan_base - hw->sdp_chan_base;
713 
714 		bpid = bp->cgx_bpid_cnt + req->chan_base + sdp_chan_base;
715 		if (req->bpid_per_chan)
716 			bpid += chan_id;
717 
718 		if (bpid > (bp->cgx_bpid_cnt + bp->sdp_bpid_cnt))
719 			return NIX_AF_ERR_INVALID_BPID;
720 		break;
721 	default:
722 		return -EINVAL;
723 	}
724 	return bpid;
725 }
726 
nix_bp_enable(struct rvu * rvu,struct nix_bp_cfg_req * req,struct nix_bp_cfg_rsp * rsp,bool cpt_link)727 static int nix_bp_enable(struct rvu *rvu,
728 			 struct nix_bp_cfg_req *req,
729 			 struct nix_bp_cfg_rsp *rsp,
730 			 bool cpt_link)
731 {
732 	int blkaddr, pf, type, chan_id = 0;
733 	u16 pcifunc = req->hdr.pcifunc;
734 	struct rvu_pfvf *pfvf;
735 	u16 chan_base, chan;
736 	s16 bpid, bpid_base;
737 	u16 chan_v;
738 	u64 cfg;
739 
740 	pf = rvu_get_pf(rvu->pdev, pcifunc);
741 	type = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
742 	if (is_sdp_pfvf(rvu, pcifunc))
743 		type = NIX_INTF_TYPE_SDP;
744 
745 	/* Enable backpressure only for CGX mapped PFs and LBK/SDP interface */
746 	if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK &&
747 	    type != NIX_INTF_TYPE_SDP)
748 		return 0;
749 
750 	if (cpt_link && !rvu->hw->cpt_links)
751 		return 0;
752 
753 	pfvf = rvu_get_pfvf(rvu, pcifunc);
754 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
755 
756 	bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id);
757 	chan_base = pfvf->rx_chan_base + req->chan_base;
758 	bpid = bpid_base;
759 
760 	for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
761 		if (bpid < 0) {
762 			dev_warn(rvu->dev, "Fail to enable backpressure\n");
763 			return -EINVAL;
764 		}
765 
766 		chan_v = nix_get_channel(chan, cpt_link);
767 
768 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v));
769 		cfg &= ~GENMASK_ULL(8, 0);
770 		rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v),
771 			    cfg | (bpid & GENMASK_ULL(8, 0)) | BIT_ULL(16));
772 		chan_id++;
773 		bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
774 	}
775 
776 	for (chan = 0; chan < req->chan_cnt; chan++) {
777 		/* Map channel and bpid assign to it */
778 		rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 |
779 					(bpid_base & 0x3FF);
780 		if (req->bpid_per_chan)
781 			bpid_base++;
782 	}
783 	rsp->chan_cnt = req->chan_cnt;
784 
785 	return 0;
786 }
787 
rvu_mbox_handler_nix_bp_enable(struct rvu * rvu,struct nix_bp_cfg_req * req,struct nix_bp_cfg_rsp * rsp)788 int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
789 				   struct nix_bp_cfg_req *req,
790 				   struct nix_bp_cfg_rsp *rsp)
791 {
792 	return nix_bp_enable(rvu, req, rsp, false);
793 }
794 
rvu_mbox_handler_nix_cpt_bp_enable(struct rvu * rvu,struct nix_bp_cfg_req * req,struct nix_bp_cfg_rsp * rsp)795 int rvu_mbox_handler_nix_cpt_bp_enable(struct rvu *rvu,
796 				       struct nix_bp_cfg_req *req,
797 				       struct nix_bp_cfg_rsp *rsp)
798 {
799 	return nix_bp_enable(rvu, req, rsp, true);
800 }
801 
nix_setup_lso_tso_l3(struct rvu * rvu,int blkaddr,u64 format,bool v4,u64 * fidx)802 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
803 				 u64 format, bool v4, u64 *fidx)
804 {
805 	struct nix_lso_format field = {0};
806 
807 	/* IP's Length field */
808 	field.layer = NIX_TXLAYER_OL3;
809 	/* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
810 	field.offset = v4 ? 2 : 4;
811 	field.sizem1 = 1; /* i.e 2 bytes */
812 	field.alg = NIX_LSOALG_ADD_PAYLEN;
813 	rvu_write64(rvu, blkaddr,
814 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
815 		    *(u64 *)&field);
816 
817 	/* No ID field in IPv6 header */
818 	if (!v4)
819 		return;
820 
821 	/* IP's ID field */
822 	field.layer = NIX_TXLAYER_OL3;
823 	field.offset = 4;
824 	field.sizem1 = 1; /* i.e 2 bytes */
825 	field.alg = NIX_LSOALG_ADD_SEGNUM;
826 	rvu_write64(rvu, blkaddr,
827 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
828 		    *(u64 *)&field);
829 }
830 
nix_setup_lso_tso_l4(struct rvu * rvu,int blkaddr,u64 format,u64 * fidx)831 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
832 				 u64 format, u64 *fidx)
833 {
834 	struct nix_lso_format field = {0};
835 
836 	/* TCP's sequence number field */
837 	field.layer = NIX_TXLAYER_OL4;
838 	field.offset = 4;
839 	field.sizem1 = 3; /* i.e 4 bytes */
840 	field.alg = NIX_LSOALG_ADD_OFFSET;
841 	rvu_write64(rvu, blkaddr,
842 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
843 		    *(u64 *)&field);
844 
845 	/* TCP's flags field */
846 	field.layer = NIX_TXLAYER_OL4;
847 	field.offset = 12;
848 	field.sizem1 = 1; /* 2 bytes */
849 	field.alg = NIX_LSOALG_TCP_FLAGS;
850 	rvu_write64(rvu, blkaddr,
851 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
852 		    *(u64 *)&field);
853 }
854 
nix_setup_lso(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr)855 static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
856 {
857 	u64 cfg, idx, fidx = 0;
858 
859 	/* Get max HW supported format indices */
860 	cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF;
861 	nix_hw->lso.total = cfg;
862 
863 	/* Enable LSO */
864 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
865 	/* For TSO, set first and middle segment flags to
866 	 * mask out PSH, RST & FIN flags in TCP packet
867 	 */
868 	cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
869 	cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
870 	rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
871 
872 	/* Setup default static LSO formats
873 	 *
874 	 * Configure format fields for TCPv4 segmentation offload
875 	 */
876 	idx = NIX_LSO_FORMAT_IDX_TSOV4;
877 	nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
878 	nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
879 
880 	/* Set rest of the fields to NOP */
881 	for (; fidx < 8; fidx++) {
882 		rvu_write64(rvu, blkaddr,
883 			    NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
884 	}
885 	nix_hw->lso.in_use++;
886 
887 	/* Configure format fields for TCPv6 segmentation offload */
888 	idx = NIX_LSO_FORMAT_IDX_TSOV6;
889 	fidx = 0;
890 	nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
891 	nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
892 
893 	/* Set rest of the fields to NOP */
894 	for (; fidx < 8; fidx++) {
895 		rvu_write64(rvu, blkaddr,
896 			    NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
897 	}
898 	nix_hw->lso.in_use++;
899 }
900 
nix_ctx_free(struct rvu * rvu,struct rvu_pfvf * pfvf)901 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
902 {
903 	kfree(pfvf->rq_bmap);
904 	kfree(pfvf->sq_bmap);
905 	kfree(pfvf->cq_bmap);
906 	if (pfvf->rq_ctx)
907 		qmem_free(rvu->dev, pfvf->rq_ctx);
908 	if (pfvf->sq_ctx)
909 		qmem_free(rvu->dev, pfvf->sq_ctx);
910 	if (pfvf->cq_ctx)
911 		qmem_free(rvu->dev, pfvf->cq_ctx);
912 	if (pfvf->rss_ctx)
913 		qmem_free(rvu->dev, pfvf->rss_ctx);
914 	if (pfvf->nix_qints_ctx)
915 		qmem_free(rvu->dev, pfvf->nix_qints_ctx);
916 	if (pfvf->cq_ints_ctx)
917 		qmem_free(rvu->dev, pfvf->cq_ints_ctx);
918 
919 	pfvf->rq_bmap = NULL;
920 	pfvf->cq_bmap = NULL;
921 	pfvf->sq_bmap = NULL;
922 	pfvf->rq_ctx = NULL;
923 	pfvf->sq_ctx = NULL;
924 	pfvf->cq_ctx = NULL;
925 	pfvf->rss_ctx = NULL;
926 	pfvf->nix_qints_ctx = NULL;
927 	pfvf->cq_ints_ctx = NULL;
928 }
929 
nixlf_rss_ctx_init(struct rvu * rvu,int blkaddr,struct rvu_pfvf * pfvf,int nixlf,int rss_sz,int rss_grps,int hwctx_size,u64 way_mask,bool tag_lsb_as_adder)930 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
931 			      struct rvu_pfvf *pfvf, int nixlf,
932 			      int rss_sz, int rss_grps, int hwctx_size,
933 			      u64 way_mask, bool tag_lsb_as_adder)
934 {
935 	int err, grp, num_indices;
936 	u64 val;
937 
938 	/* RSS is not requested for this NIXLF */
939 	if (!rss_sz)
940 		return 0;
941 	num_indices = rss_sz * rss_grps;
942 
943 	/* Alloc NIX RSS HW context memory and config the base */
944 	err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
945 	if (err)
946 		return err;
947 
948 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
949 		    (u64)pfvf->rss_ctx->iova);
950 
951 	/* Config full RSS table size, enable RSS and caching */
952 	val = BIT_ULL(36) | BIT_ULL(4) | way_mask << 20 |
953 			ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE);
954 
955 	if (tag_lsb_as_adder)
956 		val |= BIT_ULL(5);
957 
958 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf), val);
959 	/* Config RSS group offset and sizes */
960 	for (grp = 0; grp < rss_grps; grp++)
961 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
962 			    ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
963 	return 0;
964 }
965 
nix_aq_enqueue_wait(struct rvu * rvu,struct rvu_block * block,struct nix_aq_inst_s * inst)966 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
967 			       struct nix_aq_inst_s *inst)
968 {
969 	struct admin_queue *aq = block->aq;
970 	struct nix_aq_res_s *result;
971 	int timeout = 1000;
972 	u64 reg, head;
973 	int ret;
974 
975 	result = (struct nix_aq_res_s *)aq->res->base;
976 
977 	/* Get current head pointer where to append this instruction */
978 	reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
979 	head = (reg >> 4) & AQ_PTR_MASK;
980 
981 	memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
982 	       (void *)inst, aq->inst->entry_sz);
983 	memset(result, 0, sizeof(*result));
984 	/* sync into memory */
985 	wmb();
986 
987 	/* Ring the doorbell and wait for result */
988 	rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
989 	while (result->compcode == NIX_AQ_COMP_NOTDONE) {
990 		cpu_relax();
991 		udelay(1);
992 		timeout--;
993 		if (!timeout)
994 			return -EBUSY;
995 	}
996 
997 	if (result->compcode != NIX_AQ_COMP_GOOD) {
998 		/* TODO: Replace this with some error code */
999 		if (result->compcode == NIX_AQ_COMP_CTX_FAULT ||
1000 		    result->compcode == NIX_AQ_COMP_LOCKERR ||
1001 		    result->compcode == NIX_AQ_COMP_CTX_POISON) {
1002 			ret = rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_RX);
1003 			ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_TX);
1004 			ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_RX);
1005 			ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_TX);
1006 			if (ret)
1007 				dev_err(rvu->dev,
1008 					"%s: Not able to unlock cachelines\n", __func__);
1009 		}
1010 
1011 		return -EBUSY;
1012 	}
1013 
1014 	return 0;
1015 }
1016 
nix_get_aq_req_smq(struct rvu * rvu,struct nix_aq_enq_req * req,u16 * smq,u16 * smq_mask)1017 static void nix_get_aq_req_smq(struct rvu *rvu, struct nix_aq_enq_req *req,
1018 			       u16 *smq, u16 *smq_mask)
1019 {
1020 	struct nix_cn10k_aq_enq_req *aq_req;
1021 
1022 	if (is_cn20k(rvu->pdev)) {
1023 		*smq = ((struct nix_cn20k_aq_enq_req *)req)->sq.smq;
1024 		*smq_mask = ((struct nix_cn20k_aq_enq_req *)req)->sq_mask.smq;
1025 		return;
1026 	}
1027 
1028 	if (!is_rvu_otx2(rvu)) {
1029 		aq_req = (struct nix_cn10k_aq_enq_req *)req;
1030 		*smq = aq_req->sq.smq;
1031 		*smq_mask = aq_req->sq_mask.smq;
1032 	} else {
1033 		*smq = req->sq.smq;
1034 		*smq_mask = req->sq_mask.smq;
1035 	}
1036 }
1037 
rvu_nix_blk_aq_enq_inst(struct rvu * rvu,struct nix_hw * nix_hw,struct nix_aq_enq_req * req,struct nix_aq_enq_rsp * rsp)1038 static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
1039 				   struct nix_aq_enq_req *req,
1040 				   struct nix_aq_enq_rsp *rsp)
1041 {
1042 	struct rvu_hwinfo *hw = rvu->hw;
1043 	u16 pcifunc = req->hdr.pcifunc;
1044 	int nixlf, blkaddr, rc = 0;
1045 	struct nix_aq_inst_s inst;
1046 	struct rvu_block *block;
1047 	struct admin_queue *aq;
1048 	struct rvu_pfvf *pfvf;
1049 	u16 smq, smq_mask;
1050 	void *ctx, *mask;
1051 	bool ena;
1052 	u64 cfg;
1053 
1054 	blkaddr = nix_hw->blkaddr;
1055 	block = &hw->block[blkaddr];
1056 	aq = block->aq;
1057 	if (!aq) {
1058 		dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
1059 		return NIX_AF_ERR_AQ_ENQUEUE;
1060 	}
1061 
1062 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1063 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1064 
1065 	/* Skip NIXLF check for broadcast MCE entry and bandwidth profile
1066 	 * operations done by AF itself.
1067 	 */
1068 	if (!((!rsp && req->ctype == NIX_AQ_CTYPE_MCE) ||
1069 	      (req->ctype == NIX_AQ_CTYPE_BANDPROF && !pcifunc))) {
1070 		if (!pfvf->nixlf || nixlf < 0)
1071 			return NIX_AF_ERR_AF_LF_INVALID;
1072 	}
1073 
1074 	switch (req->ctype) {
1075 	case NIX_AQ_CTYPE_RQ:
1076 		/* Check if index exceeds max no of queues */
1077 		if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
1078 			rc = NIX_AF_ERR_AQ_ENQUEUE;
1079 		break;
1080 	case NIX_AQ_CTYPE_SQ:
1081 		if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
1082 			rc = NIX_AF_ERR_AQ_ENQUEUE;
1083 		break;
1084 	case NIX_AQ_CTYPE_CQ:
1085 		if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
1086 			rc = NIX_AF_ERR_AQ_ENQUEUE;
1087 		break;
1088 	case NIX_AQ_CTYPE_RSS:
1089 		/* Check if RSS is enabled and qidx is within range */
1090 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
1091 		if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
1092 		    (req->qidx >= (256UL << (cfg & 0xF))))
1093 			rc = NIX_AF_ERR_AQ_ENQUEUE;
1094 		break;
1095 	case NIX_AQ_CTYPE_MCE:
1096 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
1097 
1098 		/* Check if index exceeds MCE list length */
1099 		if (!nix_hw->mcast.mce_ctx ||
1100 		    (req->qidx >= (256UL << (cfg & 0xF))))
1101 			rc = NIX_AF_ERR_AQ_ENQUEUE;
1102 
1103 		/* Adding multicast lists for requests from PF/VFs is not
1104 		 * yet supported, so ignore this.
1105 		 */
1106 		if (rsp)
1107 			rc = NIX_AF_ERR_AQ_ENQUEUE;
1108 		break;
1109 	case NIX_AQ_CTYPE_BANDPROF:
1110 		if (nix_verify_bandprof((struct nix_cn10k_aq_enq_req *)req,
1111 					nix_hw, pcifunc))
1112 			rc = NIX_AF_ERR_INVALID_BANDPROF;
1113 		break;
1114 	default:
1115 		rc = NIX_AF_ERR_AQ_ENQUEUE;
1116 	}
1117 
1118 	if (rc)
1119 		return rc;
1120 
1121 	nix_get_aq_req_smq(rvu, req, &smq, &smq_mask);
1122 	/* Check if SQ pointed SMQ belongs to this PF/VF or not */
1123 	if (req->ctype == NIX_AQ_CTYPE_SQ &&
1124 	    ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
1125 	     (req->op == NIX_AQ_INSTOP_WRITE &&
1126 	      req->sq_mask.ena && req->sq.ena && smq_mask))) {
1127 		if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
1128 				     pcifunc, smq))
1129 			return NIX_AF_ERR_AQ_ENQUEUE;
1130 	}
1131 
1132 	memset(&inst, 0, sizeof(struct nix_aq_inst_s));
1133 	inst.lf = nixlf;
1134 	inst.cindex = req->qidx;
1135 	inst.ctype = req->ctype;
1136 	inst.op = req->op;
1137 	/* Currently we are not supporting enqueuing multiple instructions,
1138 	 * so always choose first entry in result memory.
1139 	 */
1140 	inst.res_addr = (u64)aq->res->iova;
1141 
1142 	/* Hardware uses same aq->res->base for updating result of
1143 	 * previous instruction hence wait here till it is done.
1144 	 */
1145 	spin_lock(&aq->lock);
1146 
1147 	/* Clean result + context memory */
1148 	memset(aq->res->base, 0, aq->res->entry_sz);
1149 	/* Context needs to be written at RES_ADDR + 128 */
1150 	ctx = aq->res->base + 128;
1151 	/* Mask needs to be written at RES_ADDR + 256 */
1152 	mask = aq->res->base + 256;
1153 
1154 	switch (req->op) {
1155 	case NIX_AQ_INSTOP_WRITE:
1156 		if (req->ctype == NIX_AQ_CTYPE_RQ)
1157 			memcpy(mask, &req->rq_mask,
1158 			       NIX_MAX_CTX_SIZE);
1159 		else if (req->ctype == NIX_AQ_CTYPE_SQ)
1160 			memcpy(mask, &req->sq_mask,
1161 			       NIX_MAX_CTX_SIZE);
1162 		else if (req->ctype == NIX_AQ_CTYPE_CQ)
1163 			memcpy(mask, &req->cq_mask,
1164 			       NIX_MAX_CTX_SIZE);
1165 		else if (req->ctype == NIX_AQ_CTYPE_RSS)
1166 			memcpy(mask, &req->rss_mask,
1167 			       NIX_MAX_CTX_SIZE);
1168 		else if (req->ctype == NIX_AQ_CTYPE_MCE)
1169 			memcpy(mask, &req->mce_mask,
1170 			       NIX_MAX_CTX_SIZE);
1171 		else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
1172 			memcpy(mask, &req->prof_mask,
1173 			       NIX_MAX_CTX_SIZE);
1174 		fallthrough;
1175 	case NIX_AQ_INSTOP_INIT:
1176 		if (req->ctype == NIX_AQ_CTYPE_RQ)
1177 			memcpy(ctx, &req->rq, NIX_MAX_CTX_SIZE);
1178 		else if (req->ctype == NIX_AQ_CTYPE_SQ)
1179 			memcpy(ctx, &req->sq, NIX_MAX_CTX_SIZE);
1180 		else if (req->ctype == NIX_AQ_CTYPE_CQ)
1181 			memcpy(ctx, &req->cq, NIX_MAX_CTX_SIZE);
1182 		else if (req->ctype == NIX_AQ_CTYPE_RSS)
1183 			memcpy(ctx, &req->rss, NIX_MAX_CTX_SIZE);
1184 		else if (req->ctype == NIX_AQ_CTYPE_MCE)
1185 			memcpy(ctx, &req->mce, NIX_MAX_CTX_SIZE);
1186 		else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
1187 			memcpy(ctx, &req->prof, NIX_MAX_CTX_SIZE);
1188 		break;
1189 	case NIX_AQ_INSTOP_NOP:
1190 	case NIX_AQ_INSTOP_READ:
1191 	case NIX_AQ_INSTOP_LOCK:
1192 	case NIX_AQ_INSTOP_UNLOCK:
1193 		break;
1194 	default:
1195 		rc = NIX_AF_ERR_AQ_ENQUEUE;
1196 		spin_unlock(&aq->lock);
1197 		return rc;
1198 	}
1199 
1200 	/* Submit the instruction to AQ */
1201 	rc = nix_aq_enqueue_wait(rvu, block, &inst);
1202 	if (rc) {
1203 		spin_unlock(&aq->lock);
1204 		return rc;
1205 	}
1206 
1207 	/* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
1208 	if (req->op == NIX_AQ_INSTOP_INIT) {
1209 		if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
1210 			__set_bit(req->qidx, pfvf->rq_bmap);
1211 		if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
1212 			__set_bit(req->qidx, pfvf->sq_bmap);
1213 		if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
1214 			__set_bit(req->qidx, pfvf->cq_bmap);
1215 	}
1216 
1217 	if (req->op == NIX_AQ_INSTOP_WRITE) {
1218 		if (req->ctype == NIX_AQ_CTYPE_RQ) {
1219 			ena = (req->rq.ena & req->rq_mask.ena) |
1220 				(test_bit(req->qidx, pfvf->rq_bmap) &
1221 				~req->rq_mask.ena);
1222 			if (ena)
1223 				__set_bit(req->qidx, pfvf->rq_bmap);
1224 			else
1225 				__clear_bit(req->qidx, pfvf->rq_bmap);
1226 		}
1227 		if (req->ctype == NIX_AQ_CTYPE_SQ) {
1228 			ena = (req->rq.ena & req->sq_mask.ena) |
1229 				(test_bit(req->qidx, pfvf->sq_bmap) &
1230 				~req->sq_mask.ena);
1231 			if (ena)
1232 				__set_bit(req->qidx, pfvf->sq_bmap);
1233 			else
1234 				__clear_bit(req->qidx, pfvf->sq_bmap);
1235 		}
1236 		if (req->ctype == NIX_AQ_CTYPE_CQ) {
1237 			ena = (req->rq.ena & req->cq_mask.ena) |
1238 				(test_bit(req->qidx, pfvf->cq_bmap) &
1239 				~req->cq_mask.ena);
1240 			if (ena)
1241 				__set_bit(req->qidx, pfvf->cq_bmap);
1242 			else
1243 				__clear_bit(req->qidx, pfvf->cq_bmap);
1244 		}
1245 	}
1246 
1247 	if (rsp) {
1248 		/* Copy read context into mailbox */
1249 		if (req->op == NIX_AQ_INSTOP_READ) {
1250 			if (req->ctype == NIX_AQ_CTYPE_RQ)
1251 				memcpy(&rsp->rq, ctx,
1252 				       NIX_MAX_CTX_SIZE);
1253 			else if (req->ctype == NIX_AQ_CTYPE_SQ)
1254 				memcpy(&rsp->sq, ctx,
1255 				       NIX_MAX_CTX_SIZE);
1256 			else if (req->ctype == NIX_AQ_CTYPE_CQ)
1257 				memcpy(&rsp->cq, ctx,
1258 				       NIX_MAX_CTX_SIZE);
1259 			else if (req->ctype == NIX_AQ_CTYPE_RSS)
1260 				memcpy(&rsp->rss, ctx,
1261 				       NIX_MAX_CTX_SIZE);
1262 			else if (req->ctype == NIX_AQ_CTYPE_MCE)
1263 				memcpy(&rsp->mce, ctx,
1264 				       NIX_MAX_CTX_SIZE);
1265 			else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
1266 				memcpy(&rsp->prof, ctx,
1267 				       NIX_MAX_CTX_SIZE);
1268 		}
1269 	}
1270 
1271 	spin_unlock(&aq->lock);
1272 	return 0;
1273 }
1274 
rvu_nix_verify_aq_ctx(struct rvu * rvu,struct nix_hw * nix_hw,struct nix_aq_enq_req * req,u8 ctype)1275 static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw,
1276 				 struct nix_aq_enq_req *req, u8 ctype)
1277 {
1278 	struct nix_cn10k_aq_enq_req aq_req;
1279 	struct nix_cn10k_aq_enq_rsp aq_rsp;
1280 	int rc, word;
1281 
1282 	if (req->ctype != NIX_AQ_CTYPE_CQ)
1283 		return 0;
1284 
1285 	rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
1286 				 req->hdr.pcifunc, ctype, req->qidx);
1287 	if (rc) {
1288 		dev_err(rvu->dev,
1289 			"%s: Failed to fetch %s%d context of PFFUNC 0x%x\n",
1290 			__func__, nix_get_ctx_name(ctype), req->qidx,
1291 			req->hdr.pcifunc);
1292 		return rc;
1293 	}
1294 
1295 	/* Make copy of original context & mask which are required
1296 	 * for resubmission
1297 	 */
1298 	memcpy(&aq_req.cq_mask, &req->cq_mask, NIX_MAX_CTX_SIZE);
1299 	memcpy(&aq_req.cq, &req->cq, NIX_MAX_CTX_SIZE);
1300 
1301 	/* exclude fields which HW can update */
1302 	aq_req.cq_mask.cq_err       = 0;
1303 	aq_req.cq_mask.wrptr        = 0;
1304 	aq_req.cq_mask.tail         = 0;
1305 	aq_req.cq_mask.head	    = 0;
1306 	aq_req.cq_mask.avg_level    = 0;
1307 	aq_req.cq_mask.update_time  = 0;
1308 	aq_req.cq_mask.substream    = 0;
1309 
1310 	/* Context mask (cq_mask) holds mask value of fields which
1311 	 * are changed in AQ WRITE operation.
1312 	 * for example cq.drop = 0xa;
1313 	 *	       cq_mask.drop = 0xff;
1314 	 * Below logic performs '&' between cq and cq_mask so that non
1315 	 * updated fields are masked out for request and response
1316 	 * comparison
1317 	 */
1318 	for (word = 0; word < NIX_MAX_CTX_SIZE / sizeof(u64);
1319 	     word++) {
1320 		*(u64 *)((u8 *)&aq_rsp.cq + word * 8) &=
1321 			(*(u64 *)((u8 *)&aq_req.cq_mask + word * 8));
1322 		*(u64 *)((u8 *)&aq_req.cq + word * 8) &=
1323 			(*(u64 *)((u8 *)&aq_req.cq_mask + word * 8));
1324 	}
1325 
1326 	if (memcmp(&aq_req.cq, &aq_rsp.cq, NIX_MAX_CTX_SIZE))
1327 		return NIX_AF_ERR_AQ_CTX_RETRY_WRITE;
1328 
1329 	return 0;
1330 }
1331 
rvu_nix_aq_enq_inst(struct rvu * rvu,struct nix_aq_enq_req * req,struct nix_aq_enq_rsp * rsp)1332 int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
1333 			struct nix_aq_enq_rsp *rsp)
1334 {
1335 	struct nix_hw *nix_hw;
1336 	int err, retries = 5;
1337 	int blkaddr;
1338 
1339 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
1340 	if (blkaddr < 0)
1341 		return NIX_AF_ERR_AF_LF_INVALID;
1342 
1343 	nix_hw =  get_nix_hw(rvu->hw, blkaddr);
1344 	if (!nix_hw)
1345 		return NIX_AF_ERR_INVALID_NIXBLK;
1346 
1347 retry:
1348 	err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp);
1349 
1350 	/* HW errata 'AQ Modification to CQ could be discarded on heavy traffic'
1351 	 * As a work around perfrom CQ context read after each AQ write. If AQ
1352 	 * read shows AQ write is not updated perform AQ write again.
1353 	 */
1354 	if (!err && req->op == NIX_AQ_INSTOP_WRITE) {
1355 		err = rvu_nix_verify_aq_ctx(rvu, nix_hw, req, NIX_AQ_CTYPE_CQ);
1356 		if (err == NIX_AF_ERR_AQ_CTX_RETRY_WRITE) {
1357 			if (retries--)
1358 				goto retry;
1359 			else
1360 				return NIX_AF_ERR_CQ_CTX_WRITE_ERR;
1361 		}
1362 	}
1363 
1364 	return err;
1365 }
1366 
nix_get_ctx_name(int ctype)1367 static const char *nix_get_ctx_name(int ctype)
1368 {
1369 	switch (ctype) {
1370 	case NIX_AQ_CTYPE_CQ:
1371 		return "CQ";
1372 	case NIX_AQ_CTYPE_SQ:
1373 		return "SQ";
1374 	case NIX_AQ_CTYPE_RQ:
1375 		return "RQ";
1376 	case NIX_AQ_CTYPE_RSS:
1377 		return "RSS";
1378 	}
1379 	return "";
1380 }
1381 
nix_lf_hwctx_disable(struct rvu * rvu,struct hwctx_disable_req * req)1382 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
1383 {
1384 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
1385 	struct nix_aq_enq_req aq_req;
1386 	unsigned long *bmap;
1387 	int qidx, q_cnt = 0;
1388 	int err = 0, rc;
1389 
1390 	if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx)
1391 		return NIX_AF_ERR_AQ_ENQUEUE;
1392 
1393 	memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1394 	aq_req.hdr.pcifunc = req->hdr.pcifunc;
1395 
1396 	if (req->ctype == NIX_AQ_CTYPE_CQ) {
1397 		aq_req.cq.ena = 0;
1398 		aq_req.cq_mask.ena = 1;
1399 		aq_req.cq.bp_ena = 0;
1400 		aq_req.cq_mask.bp_ena = 1;
1401 		q_cnt = pfvf->cq_ctx->qsize;
1402 		bmap = pfvf->cq_bmap;
1403 	}
1404 	if (req->ctype == NIX_AQ_CTYPE_SQ) {
1405 		aq_req.sq.ena = 0;
1406 		aq_req.sq_mask.ena = 1;
1407 		q_cnt = pfvf->sq_ctx->qsize;
1408 		bmap = pfvf->sq_bmap;
1409 	}
1410 	if (req->ctype == NIX_AQ_CTYPE_RQ) {
1411 		aq_req.rq.ena = 0;
1412 		aq_req.rq_mask.ena = 1;
1413 		q_cnt = pfvf->rq_ctx->qsize;
1414 		bmap = pfvf->rq_bmap;
1415 	}
1416 
1417 	aq_req.ctype = req->ctype;
1418 	aq_req.op = NIX_AQ_INSTOP_WRITE;
1419 
1420 	for (qidx = 0; qidx < q_cnt; qidx++) {
1421 		if (!test_bit(qidx, bmap))
1422 			continue;
1423 		aq_req.qidx = qidx;
1424 		rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
1425 		if (rc) {
1426 			err = rc;
1427 			dev_err(rvu->dev, "Failed to disable %s:%d context\n",
1428 				nix_get_ctx_name(req->ctype), qidx);
1429 		}
1430 	}
1431 
1432 	return err;
1433 }
1434 
1435 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
nix_lf_hwctx_lockdown(struct rvu * rvu,struct nix_aq_enq_req * req)1436 static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req)
1437 {
1438 	struct nix_aq_enq_req lock_ctx_req;
1439 	int err;
1440 
1441 	if (req->op != NIX_AQ_INSTOP_INIT)
1442 		return 0;
1443 
1444 	if (req->ctype == NIX_AQ_CTYPE_MCE ||
1445 	    req->ctype == NIX_AQ_CTYPE_DYNO)
1446 		return 0;
1447 
1448 	memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req));
1449 	lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
1450 	lock_ctx_req.ctype = req->ctype;
1451 	lock_ctx_req.op = NIX_AQ_INSTOP_LOCK;
1452 	lock_ctx_req.qidx = req->qidx;
1453 	err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL);
1454 	if (err)
1455 		dev_err(rvu->dev,
1456 			"PFUNC 0x%x: Failed to lock NIX %s:%d context\n",
1457 			req->hdr.pcifunc,
1458 			nix_get_ctx_name(req->ctype), req->qidx);
1459 	return err;
1460 }
1461 
rvu_mbox_handler_nix_aq_enq(struct rvu * rvu,struct nix_aq_enq_req * req,struct nix_aq_enq_rsp * rsp)1462 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
1463 				struct nix_aq_enq_req *req,
1464 				struct nix_aq_enq_rsp *rsp)
1465 {
1466 	int err;
1467 
1468 	err = rvu_nix_aq_enq_inst(rvu, req, rsp);
1469 	if (!err)
1470 		err = nix_lf_hwctx_lockdown(rvu, req);
1471 	return err;
1472 }
1473 #else
1474 
rvu_mbox_handler_nix_aq_enq(struct rvu * rvu,struct nix_aq_enq_req * req,struct nix_aq_enq_rsp * rsp)1475 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
1476 				struct nix_aq_enq_req *req,
1477 				struct nix_aq_enq_rsp *rsp)
1478 {
1479 	return rvu_nix_aq_enq_inst(rvu, req, rsp);
1480 }
1481 #endif
1482 /* CN10K mbox handler */
rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu * rvu,struct nix_cn10k_aq_enq_req * req,struct nix_cn10k_aq_enq_rsp * rsp)1483 int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu,
1484 				      struct nix_cn10k_aq_enq_req *req,
1485 				      struct nix_cn10k_aq_enq_rsp *rsp)
1486 {
1487 	return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req,
1488 				  (struct nix_aq_enq_rsp *)rsp);
1489 }
1490 
rvu_mbox_handler_nix_hwctx_disable(struct rvu * rvu,struct hwctx_disable_req * req,struct msg_rsp * rsp)1491 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
1492 				       struct hwctx_disable_req *req,
1493 				       struct msg_rsp *rsp)
1494 {
1495 	return nix_lf_hwctx_disable(rvu, req);
1496 }
1497 
rvu_mbox_handler_nix_lf_alloc(struct rvu * rvu,struct nix_lf_alloc_req * req,struct nix_lf_alloc_rsp * rsp)1498 int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
1499 				  struct nix_lf_alloc_req *req,
1500 				  struct nix_lf_alloc_rsp *rsp)
1501 {
1502 	int nixlf, qints, hwctx_size, intf, err, rc = 0;
1503 	struct rvu_hwinfo *hw = rvu->hw;
1504 	u16 pcifunc = req->hdr.pcifunc;
1505 	struct rvu_block *block;
1506 	struct rvu_pfvf *pfvf;
1507 	u64 cfg, ctx_cfg;
1508 	int blkaddr;
1509 
1510 	if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
1511 		return NIX_AF_ERR_PARAM;
1512 
1513 	if (req->way_mask)
1514 		req->way_mask &= 0xFFFF;
1515 
1516 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1517 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1518 	if (!pfvf->nixlf || blkaddr < 0)
1519 		return NIX_AF_ERR_AF_LF_INVALID;
1520 
1521 	block = &hw->block[blkaddr];
1522 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1523 	if (nixlf < 0)
1524 		return NIX_AF_ERR_AF_LF_INVALID;
1525 
1526 	/* Check if requested 'NIXLF <=> NPALF' mapping is valid */
1527 	if (req->npa_func) {
1528 		/* If default, use 'this' NIXLF's PFFUNC */
1529 		if (req->npa_func == RVU_DEFAULT_PF_FUNC)
1530 			req->npa_func = pcifunc;
1531 		if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA))
1532 			return NIX_AF_INVAL_NPA_PF_FUNC;
1533 	}
1534 
1535 	/* Check if requested 'NIXLF <=> SSOLF' mapping is valid */
1536 	if (req->sso_func) {
1537 		/* If default, use 'this' NIXLF's PFFUNC */
1538 		if (req->sso_func == RVU_DEFAULT_PF_FUNC)
1539 			req->sso_func = pcifunc;
1540 		if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO))
1541 			return NIX_AF_INVAL_SSO_PF_FUNC;
1542 	}
1543 
1544 	/* If RSS is being enabled, check if requested config is valid.
1545 	 * RSS table size should be power of two, otherwise
1546 	 * RSS_GRP::OFFSET + adder might go beyond that group or
1547 	 * won't be able to use entire table.
1548 	 */
1549 	if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
1550 			    !is_power_of_2(req->rss_sz)))
1551 		return NIX_AF_ERR_RSS_SIZE_INVALID;
1552 
1553 	if (req->rss_sz &&
1554 	    (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
1555 		return NIX_AF_ERR_RSS_GRPS_INVALID;
1556 
1557 	/* Reset this NIX LF */
1558 	err = rvu_lf_reset(rvu, block, nixlf);
1559 	if (err) {
1560 		dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1561 			block->addr - BLKADDR_NIX0, nixlf);
1562 		return NIX_AF_ERR_LF_RESET;
1563 	}
1564 
1565 	ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
1566 
1567 	/* Alloc NIX RQ HW context memory and config the base */
1568 	hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
1569 	err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
1570 	if (err)
1571 		goto free_mem;
1572 
1573 	pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
1574 	if (!pfvf->rq_bmap)
1575 		goto free_mem;
1576 
1577 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
1578 		    (u64)pfvf->rq_ctx->iova);
1579 
1580 	/* Set caching and queue count in HW */
1581 	cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20;
1582 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
1583 
1584 	/* Alloc NIX SQ HW context memory and config the base */
1585 	hwctx_size = 1UL << (ctx_cfg & 0xF);
1586 	err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
1587 	if (err)
1588 		goto free_mem;
1589 
1590 	pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
1591 	if (!pfvf->sq_bmap)
1592 		goto free_mem;
1593 
1594 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
1595 		    (u64)pfvf->sq_ctx->iova);
1596 
1597 	cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20;
1598 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
1599 
1600 	/* Alloc NIX CQ HW context memory and config the base */
1601 	hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
1602 	err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
1603 	if (err)
1604 		goto free_mem;
1605 
1606 	pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
1607 	if (!pfvf->cq_bmap)
1608 		goto free_mem;
1609 
1610 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
1611 		    (u64)pfvf->cq_ctx->iova);
1612 
1613 	cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20;
1614 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
1615 
1616 	/* Initialize receive side scaling (RSS) */
1617 	hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
1618 	err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz,
1619 				 req->rss_grps, hwctx_size, req->way_mask,
1620 				 !!(req->flags & NIX_LF_RSS_TAG_LSB_AS_ADDER));
1621 	if (err)
1622 		goto free_mem;
1623 
1624 	/* Alloc memory for CQINT's HW contexts */
1625 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1626 	qints = (cfg >> 24) & 0xFFF;
1627 	hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
1628 	err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
1629 	if (err)
1630 		goto free_mem;
1631 
1632 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
1633 		    (u64)pfvf->cq_ints_ctx->iova);
1634 
1635 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf),
1636 		    BIT_ULL(36) | req->way_mask << 20);
1637 
1638 	/* Alloc memory for QINT's HW contexts */
1639 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1640 	qints = (cfg >> 12) & 0xFFF;
1641 	hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
1642 	err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
1643 	if (err)
1644 		goto free_mem;
1645 
1646 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
1647 		    (u64)pfvf->nix_qints_ctx->iova);
1648 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf),
1649 		    BIT_ULL(36) | req->way_mask << 20);
1650 
1651 	/* Setup VLANX TPID's.
1652 	 * Use VLAN1 for 802.1Q
1653 	 * and VLAN0 for 802.1AD.
1654 	 */
1655 	cfg = (0x8100ULL << 16) | 0x88A8ULL;
1656 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
1657 
1658 	/* Enable LMTST for this NIX LF */
1659 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
1660 
1661 	/* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */
1662 	if (req->npa_func)
1663 		cfg = req->npa_func;
1664 	if (req->sso_func)
1665 		cfg |= (u64)req->sso_func << 16;
1666 
1667 	cfg |= (u64)req->xqe_sz << 33;
1668 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
1669 
1670 	/* Config Rx pkt length, csum checks and apad  enable / disable */
1671 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
1672 
1673 	/* Configure pkind for TX parse config */
1674 	cfg = NPC_TX_DEF_PKIND;
1675 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
1676 
1677 	if (is_rep_dev(rvu, pcifunc)) {
1678 		pfvf->tx_chan_base = RVU_SWITCH_LBK_CHAN;
1679 		pfvf->tx_chan_cnt = 1;
1680 		goto exit;
1681 	}
1682 
1683 	intf = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
1684 	if (is_sdp_pfvf(rvu, pcifunc))
1685 		intf = NIX_INTF_TYPE_SDP;
1686 
1687 	err = nix_interface_init(rvu, pcifunc, intf, nixlf, rsp,
1688 				 !!(req->flags & NIX_LF_LBK_BLK_SEL));
1689 	if (err)
1690 		goto free_mem;
1691 
1692 	/* Disable NPC entries as NIXLF's contexts are not initialized yet */
1693 	rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
1694 
1695 	/* Configure RX VTAG Type 7 (strip) for vf vlan */
1696 	rvu_write64(rvu, blkaddr,
1697 		    NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7),
1698 		    VTAGSIZE_T4 | VTAG_STRIP);
1699 
1700 	goto exit;
1701 
1702 free_mem:
1703 	nix_ctx_free(rvu, pfvf);
1704 	rc = -ENOMEM;
1705 
1706 exit:
1707 	/* Set macaddr of this PF/VF */
1708 	ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
1709 
1710 	/* set SQB size info */
1711 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
1712 	rsp->sqb_size = (cfg >> 34) & 0xFFFF;
1713 	rsp->rx_chan_base = pfvf->rx_chan_base;
1714 	rsp->tx_chan_base = pfvf->tx_chan_base;
1715 	rsp->rx_chan_cnt = pfvf->rx_chan_cnt;
1716 	rsp->tx_chan_cnt = pfvf->tx_chan_cnt;
1717 	rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
1718 	rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
1719 	/* Get HW supported stat count */
1720 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
1721 	rsp->lf_rx_stats = ((cfg >> 32) & 0xFF);
1722 	rsp->lf_tx_stats = ((cfg >> 24) & 0xFF);
1723 	/* Get count of CQ IRQs and error IRQs supported per LF */
1724 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1725 	rsp->qints = ((cfg >> 12) & 0xFFF);
1726 	rsp->cints = ((cfg >> 24) & 0xFFF);
1727 	rsp->cgx_links = hw->cgx_links;
1728 	rsp->lbk_links = hw->lbk_links;
1729 	rsp->sdp_links = hw->sdp_links;
1730 
1731 	return rc;
1732 }
1733 
rvu_mbox_handler_nix_lf_free(struct rvu * rvu,struct nix_lf_free_req * req,struct msg_rsp * rsp)1734 int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req,
1735 				 struct msg_rsp *rsp)
1736 {
1737 	struct rvu_hwinfo *hw = rvu->hw;
1738 	u16 pcifunc = req->hdr.pcifunc;
1739 	struct rvu_block *block;
1740 	int blkaddr, nixlf, err;
1741 	struct rvu_pfvf *pfvf;
1742 
1743 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1744 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1745 	if (!pfvf->nixlf || blkaddr < 0)
1746 		return NIX_AF_ERR_AF_LF_INVALID;
1747 
1748 	block = &hw->block[blkaddr];
1749 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1750 	if (nixlf < 0)
1751 		return NIX_AF_ERR_AF_LF_INVALID;
1752 
1753 	if (is_rep_dev(rvu, pcifunc))
1754 		goto free_lf;
1755 
1756 	if (req->flags & NIX_LF_DISABLE_FLOWS)
1757 		rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
1758 	else
1759 		rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
1760 
1761 	/* Free any tx vtag def entries used by this NIX LF */
1762 	if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG))
1763 		nix_free_tx_vtag_entries(rvu, pcifunc);
1764 
1765 	nix_interface_deinit(rvu, pcifunc, nixlf);
1766 
1767 free_lf:
1768 	/* Reset this NIX LF */
1769 	err = rvu_lf_reset(rvu, block, nixlf);
1770 	if (err) {
1771 		dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1772 			block->addr - BLKADDR_NIX0, nixlf);
1773 		return NIX_AF_ERR_LF_RESET;
1774 	}
1775 
1776 	nix_ctx_free(rvu, pfvf);
1777 
1778 	return 0;
1779 }
1780 
rvu_mbox_handler_nix_mark_format_cfg(struct rvu * rvu,struct nix_mark_format_cfg * req,struct nix_mark_format_cfg_rsp * rsp)1781 int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
1782 					 struct nix_mark_format_cfg  *req,
1783 					 struct nix_mark_format_cfg_rsp *rsp)
1784 {
1785 	u16 pcifunc = req->hdr.pcifunc;
1786 	struct nix_hw *nix_hw;
1787 	struct rvu_pfvf *pfvf;
1788 	int blkaddr, rc;
1789 	u32 cfg;
1790 
1791 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1792 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1793 	if (!pfvf->nixlf || blkaddr < 0)
1794 		return NIX_AF_ERR_AF_LF_INVALID;
1795 
1796 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1797 	if (!nix_hw)
1798 		return NIX_AF_ERR_INVALID_NIXBLK;
1799 
1800 	cfg = (((u32)req->offset & 0x7) << 16) |
1801 	      (((u32)req->y_mask & 0xF) << 12) |
1802 	      (((u32)req->y_val & 0xF) << 8) |
1803 	      (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF);
1804 
1805 	rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg);
1806 	if (rc < 0) {
1807 		dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)",
1808 			rvu_get_pf(rvu->pdev,  pcifunc),
1809 				   pcifunc & RVU_PFVF_FUNC_MASK);
1810 		return NIX_AF_ERR_MARK_CFG_FAIL;
1811 	}
1812 
1813 	rsp->mark_format_idx = rc;
1814 	return 0;
1815 }
1816 
1817 /* Handle shaper update specially for few revisions */
1818 static bool
handle_txschq_shaper_update(struct rvu * rvu,int blkaddr,int nixlf,int lvl,u64 reg,u64 regval)1819 handle_txschq_shaper_update(struct rvu *rvu, int blkaddr, int nixlf,
1820 			    int lvl, u64 reg, u64 regval)
1821 {
1822 	u64 regbase, oldval, sw_xoff = 0;
1823 	u64 dbgval, md_debug0 = 0;
1824 	unsigned long poll_tmo;
1825 	bool rate_reg = 0;
1826 	u32 schq;
1827 
1828 	regbase = reg & 0xFFFF;
1829 	schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1830 
1831 	/* Check for rate register */
1832 	switch (lvl) {
1833 	case NIX_TXSCH_LVL_TL1:
1834 		md_debug0 = NIX_AF_TL1X_MD_DEBUG0(schq);
1835 		sw_xoff = NIX_AF_TL1X_SW_XOFF(schq);
1836 
1837 		rate_reg = !!(regbase == NIX_AF_TL1X_CIR(0));
1838 		break;
1839 	case NIX_TXSCH_LVL_TL2:
1840 		md_debug0 = NIX_AF_TL2X_MD_DEBUG0(schq);
1841 		sw_xoff = NIX_AF_TL2X_SW_XOFF(schq);
1842 
1843 		rate_reg = (regbase == NIX_AF_TL2X_CIR(0) ||
1844 			    regbase == NIX_AF_TL2X_PIR(0));
1845 		break;
1846 	case NIX_TXSCH_LVL_TL3:
1847 		md_debug0 = NIX_AF_TL3X_MD_DEBUG0(schq);
1848 		sw_xoff = NIX_AF_TL3X_SW_XOFF(schq);
1849 
1850 		rate_reg = (regbase == NIX_AF_TL3X_CIR(0) ||
1851 			    regbase == NIX_AF_TL3X_PIR(0));
1852 		break;
1853 	case NIX_TXSCH_LVL_TL4:
1854 		md_debug0 = NIX_AF_TL4X_MD_DEBUG0(schq);
1855 		sw_xoff = NIX_AF_TL4X_SW_XOFF(schq);
1856 
1857 		rate_reg = (regbase == NIX_AF_TL4X_CIR(0) ||
1858 			    regbase == NIX_AF_TL4X_PIR(0));
1859 		break;
1860 	case NIX_TXSCH_LVL_MDQ:
1861 		sw_xoff = NIX_AF_MDQX_SW_XOFF(schq);
1862 		rate_reg = (regbase == NIX_AF_MDQX_CIR(0) ||
1863 			    regbase == NIX_AF_MDQX_PIR(0));
1864 		break;
1865 	}
1866 
1867 	if (!rate_reg)
1868 		return false;
1869 
1870 	/* Nothing special to do when state is not toggled */
1871 	oldval = rvu_read64(rvu, blkaddr, reg);
1872 	if ((oldval & 0x1) == (regval & 0x1)) {
1873 		rvu_write64(rvu, blkaddr, reg, regval);
1874 		return true;
1875 	}
1876 
1877 	/* PIR/CIR disable */
1878 	if (!(regval & 0x1)) {
1879 		rvu_write64(rvu, blkaddr, sw_xoff, 1);
1880 		rvu_write64(rvu, blkaddr, reg, 0);
1881 		udelay(4);
1882 		rvu_write64(rvu, blkaddr, sw_xoff, 0);
1883 		return true;
1884 	}
1885 
1886 	/* PIR/CIR enable */
1887 	rvu_write64(rvu, blkaddr, sw_xoff, 1);
1888 	if (md_debug0) {
1889 		poll_tmo = jiffies + usecs_to_jiffies(10000);
1890 		/* Wait until VLD(bit32) == 1 or C_CON(bit48) == 0 */
1891 		do {
1892 			if (time_after(jiffies, poll_tmo)) {
1893 				dev_err(rvu->dev,
1894 					"NIXLF%d: TLX%u(lvl %u) CIR/PIR enable failed\n",
1895 					nixlf, schq, lvl);
1896 				goto exit;
1897 			}
1898 			usleep_range(1, 5);
1899 			dbgval = rvu_read64(rvu, blkaddr, md_debug0);
1900 		} while (!(dbgval & BIT_ULL(32)) && (dbgval & BIT_ULL(48)));
1901 	}
1902 	rvu_write64(rvu, blkaddr, reg, regval);
1903 exit:
1904 	rvu_write64(rvu, blkaddr, sw_xoff, 0);
1905 	return true;
1906 }
1907 
nix_reset_tx_schedule(struct rvu * rvu,int blkaddr,int lvl,int schq)1908 static void nix_reset_tx_schedule(struct rvu *rvu, int blkaddr,
1909 				  int lvl, int schq)
1910 {
1911 	u64 tlx_parent = 0, tlx_schedule = 0;
1912 
1913 	switch (lvl) {
1914 	case NIX_TXSCH_LVL_TL2:
1915 		tlx_parent   = NIX_AF_TL2X_PARENT(schq);
1916 		tlx_schedule = NIX_AF_TL2X_SCHEDULE(schq);
1917 		break;
1918 	case NIX_TXSCH_LVL_TL3:
1919 		tlx_parent   = NIX_AF_TL3X_PARENT(schq);
1920 		tlx_schedule = NIX_AF_TL3X_SCHEDULE(schq);
1921 		break;
1922 	case NIX_TXSCH_LVL_TL4:
1923 		tlx_parent   = NIX_AF_TL4X_PARENT(schq);
1924 		tlx_schedule = NIX_AF_TL4X_SCHEDULE(schq);
1925 		break;
1926 	case NIX_TXSCH_LVL_MDQ:
1927 		/* no need to reset SMQ_CFG as HW clears this CSR
1928 		 * on SMQ flush
1929 		 */
1930 		tlx_parent   = NIX_AF_MDQX_PARENT(schq);
1931 		tlx_schedule = NIX_AF_MDQX_SCHEDULE(schq);
1932 		break;
1933 	default:
1934 		return;
1935 	}
1936 
1937 	if (tlx_parent)
1938 		rvu_write64(rvu, blkaddr, tlx_parent, 0x0);
1939 
1940 	if (tlx_schedule)
1941 		rvu_write64(rvu, blkaddr, tlx_schedule, 0x0);
1942 }
1943 
1944 /* Disable shaping of pkts by a scheduler queue
1945  * at a given scheduler level.
1946  */
nix_reset_tx_shaping(struct rvu * rvu,int blkaddr,int nixlf,int lvl,int schq)1947 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
1948 				 int nixlf, int lvl, int schq)
1949 {
1950 	struct rvu_hwinfo *hw = rvu->hw;
1951 	u64  cir_reg = 0, pir_reg = 0;
1952 	u64  cfg;
1953 
1954 	switch (lvl) {
1955 	case NIX_TXSCH_LVL_TL1:
1956 		cir_reg = NIX_AF_TL1X_CIR(schq);
1957 		pir_reg = 0; /* PIR not available at TL1 */
1958 		break;
1959 	case NIX_TXSCH_LVL_TL2:
1960 		cir_reg = NIX_AF_TL2X_CIR(schq);
1961 		pir_reg = NIX_AF_TL2X_PIR(schq);
1962 		break;
1963 	case NIX_TXSCH_LVL_TL3:
1964 		cir_reg = NIX_AF_TL3X_CIR(schq);
1965 		pir_reg = NIX_AF_TL3X_PIR(schq);
1966 		break;
1967 	case NIX_TXSCH_LVL_TL4:
1968 		cir_reg = NIX_AF_TL4X_CIR(schq);
1969 		pir_reg = NIX_AF_TL4X_PIR(schq);
1970 		break;
1971 	case NIX_TXSCH_LVL_MDQ:
1972 		cir_reg = NIX_AF_MDQX_CIR(schq);
1973 		pir_reg = NIX_AF_MDQX_PIR(schq);
1974 		break;
1975 	}
1976 
1977 	/* Shaper state toggle needs wait/poll */
1978 	if (hw->cap.nix_shaper_toggle_wait) {
1979 		if (cir_reg)
1980 			handle_txschq_shaper_update(rvu, blkaddr, nixlf,
1981 						    lvl, cir_reg, 0);
1982 		if (pir_reg)
1983 			handle_txschq_shaper_update(rvu, blkaddr, nixlf,
1984 						    lvl, pir_reg, 0);
1985 		return;
1986 	}
1987 
1988 	if (!cir_reg)
1989 		return;
1990 	cfg = rvu_read64(rvu, blkaddr, cir_reg);
1991 	rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0));
1992 
1993 	if (!pir_reg)
1994 		return;
1995 	cfg = rvu_read64(rvu, blkaddr, pir_reg);
1996 	rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0));
1997 }
1998 
nix_reset_tx_linkcfg(struct rvu * rvu,int blkaddr,int lvl,int schq)1999 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
2000 				 int lvl, int schq)
2001 {
2002 	struct rvu_hwinfo *hw = rvu->hw;
2003 	int link_level;
2004 	int link;
2005 
2006 	if (lvl >= hw->cap.nix_tx_aggr_lvl)
2007 		return;
2008 
2009 	/* Reset TL4's SDP link config */
2010 	if (lvl == NIX_TXSCH_LVL_TL4)
2011 		rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
2012 
2013 	link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
2014 			NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
2015 	if (lvl != link_level)
2016 		return;
2017 
2018 	/* Reset TL2's CGX or LBK link config */
2019 	for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
2020 		rvu_write64(rvu, blkaddr,
2021 			    NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
2022 }
2023 
nix_clear_tx_xoff(struct rvu * rvu,int blkaddr,int lvl,int schq)2024 static void nix_clear_tx_xoff(struct rvu *rvu, int blkaddr,
2025 			      int lvl, int schq)
2026 {
2027 	struct rvu_hwinfo *hw = rvu->hw;
2028 	u64 reg;
2029 
2030 	/* Skip this if shaping is not supported */
2031 	if (!hw->cap.nix_shaping)
2032 		return;
2033 
2034 	/* Clear level specific SW_XOFF */
2035 	switch (lvl) {
2036 	case NIX_TXSCH_LVL_TL1:
2037 		reg = NIX_AF_TL1X_SW_XOFF(schq);
2038 		break;
2039 	case NIX_TXSCH_LVL_TL2:
2040 		reg = NIX_AF_TL2X_SW_XOFF(schq);
2041 		break;
2042 	case NIX_TXSCH_LVL_TL3:
2043 		reg = NIX_AF_TL3X_SW_XOFF(schq);
2044 		break;
2045 	case NIX_TXSCH_LVL_TL4:
2046 		reg = NIX_AF_TL4X_SW_XOFF(schq);
2047 		break;
2048 	case NIX_TXSCH_LVL_MDQ:
2049 		reg = NIX_AF_MDQX_SW_XOFF(schq);
2050 		break;
2051 	default:
2052 		return;
2053 	}
2054 
2055 	rvu_write64(rvu, blkaddr, reg, 0x0);
2056 }
2057 
nix_get_tx_link(struct rvu * rvu,u16 pcifunc)2058 static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
2059 {
2060 	struct rvu_hwinfo *hw = rvu->hw;
2061 	int pf = rvu_get_pf(rvu->pdev, pcifunc);
2062 	u8 cgx_id = 0, lmac_id = 0;
2063 
2064 	if (is_lbk_vf(rvu, pcifunc)) {/* LBK links */
2065 		return hw->cgx_links;
2066 	} else if (is_pf_cgxmapped(rvu, pf)) {
2067 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
2068 		return (cgx_id * hw->lmac_per_cgx) + lmac_id;
2069 	}
2070 
2071 	/* SDP link */
2072 	return hw->cgx_links + hw->lbk_links;
2073 }
2074 
nix_get_txschq_range(struct rvu * rvu,u16 pcifunc,int link,int * start,int * end)2075 static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc,
2076 				 int link, int *start, int *end)
2077 {
2078 	struct rvu_hwinfo *hw = rvu->hw;
2079 	int pf = rvu_get_pf(rvu->pdev, pcifunc);
2080 
2081 	/* LBK links */
2082 	if (is_lbk_vf(rvu, pcifunc) || is_rep_dev(rvu, pcifunc)) {
2083 		*start = hw->cap.nix_txsch_per_cgx_lmac * link;
2084 		*end = *start + hw->cap.nix_txsch_per_lbk_lmac;
2085 	} else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */
2086 		*start = hw->cap.nix_txsch_per_cgx_lmac * link;
2087 		*end = *start + hw->cap.nix_txsch_per_cgx_lmac;
2088 	} else { /* SDP link */
2089 		*start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) +
2090 			(hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links);
2091 		*end = *start + hw->cap.nix_txsch_per_sdp_lmac;
2092 	}
2093 }
2094 
nix_check_txschq_alloc_req(struct rvu * rvu,int lvl,u16 pcifunc,struct nix_hw * nix_hw,struct nix_txsch_alloc_req * req)2095 static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc,
2096 				      struct nix_hw *nix_hw,
2097 				      struct nix_txsch_alloc_req *req)
2098 {
2099 	struct rvu_hwinfo *hw = rvu->hw;
2100 	int schq, req_schq, free_cnt;
2101 	struct nix_txsch *txsch;
2102 	int link, start, end;
2103 
2104 	txsch = &nix_hw->txsch[lvl];
2105 	req_schq = req->schq_contig[lvl] + req->schq[lvl];
2106 
2107 	if (!req_schq)
2108 		return 0;
2109 
2110 	link = nix_get_tx_link(rvu, pcifunc);
2111 
2112 	/* For traffic aggregating scheduler level, one queue is enough */
2113 	if (lvl >= hw->cap.nix_tx_aggr_lvl) {
2114 		if (req_schq != 1)
2115 			return NIX_AF_ERR_TLX_ALLOC_FAIL;
2116 		return 0;
2117 	}
2118 
2119 	/* Get free SCHQ count and check if request can be accomodated */
2120 	if (hw->cap.nix_fixed_txschq_mapping) {
2121 		nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
2122 		schq = start + (pcifunc & RVU_PFVF_FUNC_MASK);
2123 		if (end <= txsch->schq.max && schq < end &&
2124 		    !test_bit(schq, txsch->schq.bmap))
2125 			free_cnt = 1;
2126 		else
2127 			free_cnt = 0;
2128 	} else {
2129 		free_cnt = rvu_rsrc_free_count(&txsch->schq);
2130 	}
2131 
2132 	if (free_cnt < req_schq || req->schq[lvl] > MAX_TXSCHQ_PER_FUNC ||
2133 	    req->schq_contig[lvl] > MAX_TXSCHQ_PER_FUNC)
2134 		return NIX_AF_ERR_TLX_ALLOC_FAIL;
2135 
2136 	/* If contiguous queues are needed, check for availability */
2137 	if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] &&
2138 	    !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
2139 		return NIX_AF_ERR_TLX_ALLOC_FAIL;
2140 
2141 	return 0;
2142 }
2143 
nix_txsch_alloc(struct rvu * rvu,struct nix_txsch * txsch,struct nix_txsch_alloc_rsp * rsp,int lvl,int start,int end)2144 static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch,
2145 			    struct nix_txsch_alloc_rsp *rsp,
2146 			    int lvl, int start, int end)
2147 {
2148 	struct rvu_hwinfo *hw = rvu->hw;
2149 	u16 pcifunc = rsp->hdr.pcifunc;
2150 	int idx, schq;
2151 
2152 	/* For traffic aggregating levels, queue alloc is based
2153 	 * on transmit link to which PF_FUNC is mapped to.
2154 	 */
2155 	if (lvl >= hw->cap.nix_tx_aggr_lvl) {
2156 		/* A single TL queue is allocated */
2157 		if (rsp->schq_contig[lvl]) {
2158 			rsp->schq_contig[lvl] = 1;
2159 			rsp->schq_contig_list[lvl][0] = start;
2160 		}
2161 
2162 		/* Both contig and non-contig reqs doesn't make sense here */
2163 		if (rsp->schq_contig[lvl])
2164 			rsp->schq[lvl] = 0;
2165 
2166 		if (rsp->schq[lvl]) {
2167 			rsp->schq[lvl] = 1;
2168 			rsp->schq_list[lvl][0] = start;
2169 		}
2170 		return;
2171 	}
2172 
2173 	/* Adjust the queue request count if HW supports
2174 	 * only one queue per level configuration.
2175 	 */
2176 	if (hw->cap.nix_fixed_txschq_mapping) {
2177 		idx = pcifunc & RVU_PFVF_FUNC_MASK;
2178 		schq = start + idx;
2179 		if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) {
2180 			rsp->schq_contig[lvl] = 0;
2181 			rsp->schq[lvl] = 0;
2182 			return;
2183 		}
2184 
2185 		if (rsp->schq_contig[lvl]) {
2186 			rsp->schq_contig[lvl] = 1;
2187 			set_bit(schq, txsch->schq.bmap);
2188 			rsp->schq_contig_list[lvl][0] = schq;
2189 			rsp->schq[lvl] = 0;
2190 		} else if (rsp->schq[lvl]) {
2191 			rsp->schq[lvl] = 1;
2192 			set_bit(schq, txsch->schq.bmap);
2193 			rsp->schq_list[lvl][0] = schq;
2194 		}
2195 		return;
2196 	}
2197 
2198 	/* Allocate contiguous queue indices requesty first */
2199 	if (rsp->schq_contig[lvl]) {
2200 		schq = bitmap_find_next_zero_area(txsch->schq.bmap,
2201 						  txsch->schq.max, start,
2202 						  rsp->schq_contig[lvl], 0);
2203 		if (schq >= end)
2204 			rsp->schq_contig[lvl] = 0;
2205 		for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) {
2206 			set_bit(schq, txsch->schq.bmap);
2207 			rsp->schq_contig_list[lvl][idx] = schq;
2208 			schq++;
2209 		}
2210 	}
2211 
2212 	/* Allocate non-contiguous queue indices */
2213 	if (rsp->schq[lvl]) {
2214 		idx = 0;
2215 		for (schq = start; schq < end; schq++) {
2216 			if (!test_bit(schq, txsch->schq.bmap)) {
2217 				set_bit(schq, txsch->schq.bmap);
2218 				rsp->schq_list[lvl][idx++] = schq;
2219 			}
2220 			if (idx == rsp->schq[lvl])
2221 				break;
2222 		}
2223 		/* Update how many were allocated */
2224 		rsp->schq[lvl] = idx;
2225 	}
2226 }
2227 
rvu_mbox_handler_nix_txsch_alloc(struct rvu * rvu,struct nix_txsch_alloc_req * req,struct nix_txsch_alloc_rsp * rsp)2228 int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
2229 				     struct nix_txsch_alloc_req *req,
2230 				     struct nix_txsch_alloc_rsp *rsp)
2231 {
2232 	struct rvu_hwinfo *hw = rvu->hw;
2233 	u16 pcifunc = req->hdr.pcifunc;
2234 	int link, blkaddr, rc = 0;
2235 	int lvl, idx, start, end;
2236 	struct nix_txsch *txsch;
2237 	struct nix_hw *nix_hw;
2238 	u32 *pfvf_map;
2239 	int nixlf;
2240 	u16 schq;
2241 
2242 	rc = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2243 	if (rc)
2244 		return rc;
2245 
2246 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
2247 	if (!nix_hw)
2248 		return NIX_AF_ERR_INVALID_NIXBLK;
2249 
2250 	mutex_lock(&rvu->rsrc_lock);
2251 
2252 	/* Check if request is valid as per HW capabilities
2253 	 * and can be accomodated.
2254 	 */
2255 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2256 		rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req);
2257 		if (rc)
2258 			goto err;
2259 	}
2260 
2261 	/* Allocate requested Tx scheduler queues */
2262 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2263 		txsch = &nix_hw->txsch[lvl];
2264 		pfvf_map = txsch->pfvf_map;
2265 
2266 		if (!req->schq[lvl] && !req->schq_contig[lvl])
2267 			continue;
2268 
2269 		rsp->schq[lvl] = req->schq[lvl];
2270 		rsp->schq_contig[lvl] = req->schq_contig[lvl];
2271 
2272 		link = nix_get_tx_link(rvu, pcifunc);
2273 
2274 		if (lvl >= hw->cap.nix_tx_aggr_lvl) {
2275 			start = link;
2276 			end = link;
2277 		} else if (hw->cap.nix_fixed_txschq_mapping) {
2278 			nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
2279 		} else {
2280 			start = 0;
2281 			end = txsch->schq.max;
2282 		}
2283 
2284 		nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end);
2285 
2286 		/* Reset queue config */
2287 		for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
2288 			schq = rsp->schq_contig_list[lvl][idx];
2289 			if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
2290 			    NIX_TXSCHQ_CFG_DONE))
2291 				pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
2292 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
2293 			nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
2294 			nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
2295 		}
2296 
2297 		for (idx = 0; idx < req->schq[lvl]; idx++) {
2298 			schq = rsp->schq_list[lvl][idx];
2299 			if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
2300 			    NIX_TXSCHQ_CFG_DONE))
2301 				pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
2302 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
2303 			nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
2304 			nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
2305 		}
2306 	}
2307 
2308 	rsp->aggr_level = hw->cap.nix_tx_aggr_lvl;
2309 	rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
2310 	rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr,
2311 				       NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
2312 				       NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
2313 	goto exit;
2314 err:
2315 	rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
2316 exit:
2317 	mutex_unlock(&rvu->rsrc_lock);
2318 	return rc;
2319 }
2320 
nix_smq_flush_fill_ctx(struct rvu * rvu,int blkaddr,int smq,struct nix_smq_flush_ctx * smq_flush_ctx)2321 static void nix_smq_flush_fill_ctx(struct rvu *rvu, int blkaddr, int smq,
2322 				   struct nix_smq_flush_ctx *smq_flush_ctx)
2323 {
2324 	struct nix_smq_tree_ctx *smq_tree_ctx;
2325 	u64 parent_off, regval;
2326 	u16 schq;
2327 	int lvl;
2328 
2329 	smq_flush_ctx->smq = smq;
2330 
2331 	schq = smq;
2332 	for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) {
2333 		smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl];
2334 		smq_tree_ctx->schq = schq;
2335 		if (lvl == NIX_TXSCH_LVL_TL1) {
2336 			smq_tree_ctx->cir_off = NIX_AF_TL1X_CIR(schq);
2337 			smq_tree_ctx->pir_off = 0;
2338 			smq_tree_ctx->pir_val = 0;
2339 			parent_off = 0;
2340 		} else if (lvl == NIX_TXSCH_LVL_TL2) {
2341 			smq_tree_ctx->cir_off = NIX_AF_TL2X_CIR(schq);
2342 			smq_tree_ctx->pir_off = NIX_AF_TL2X_PIR(schq);
2343 			parent_off = NIX_AF_TL2X_PARENT(schq);
2344 		} else if (lvl == NIX_TXSCH_LVL_TL3) {
2345 			smq_tree_ctx->cir_off = NIX_AF_TL3X_CIR(schq);
2346 			smq_tree_ctx->pir_off = NIX_AF_TL3X_PIR(schq);
2347 			parent_off = NIX_AF_TL3X_PARENT(schq);
2348 		} else if (lvl == NIX_TXSCH_LVL_TL4) {
2349 			smq_tree_ctx->cir_off = NIX_AF_TL4X_CIR(schq);
2350 			smq_tree_ctx->pir_off = NIX_AF_TL4X_PIR(schq);
2351 			parent_off = NIX_AF_TL4X_PARENT(schq);
2352 		} else if (lvl == NIX_TXSCH_LVL_MDQ) {
2353 			smq_tree_ctx->cir_off = NIX_AF_MDQX_CIR(schq);
2354 			smq_tree_ctx->pir_off = NIX_AF_MDQX_PIR(schq);
2355 			parent_off = NIX_AF_MDQX_PARENT(schq);
2356 		}
2357 		/* save cir/pir register values */
2358 		smq_tree_ctx->cir_val = rvu_read64(rvu, blkaddr, smq_tree_ctx->cir_off);
2359 		if (smq_tree_ctx->pir_off)
2360 			smq_tree_ctx->pir_val = rvu_read64(rvu, blkaddr, smq_tree_ctx->pir_off);
2361 
2362 		/* get parent txsch node */
2363 		if (parent_off) {
2364 			regval = rvu_read64(rvu, blkaddr, parent_off);
2365 			schq = (regval >> 16) & 0x1FF;
2366 		}
2367 	}
2368 }
2369 
nix_smq_flush_enadis_xoff(struct rvu * rvu,int blkaddr,struct nix_smq_flush_ctx * smq_flush_ctx,bool enable)2370 static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr,
2371 				      struct nix_smq_flush_ctx *smq_flush_ctx, bool enable)
2372 {
2373 	struct nix_txsch *txsch;
2374 	struct nix_hw *nix_hw;
2375 	int tl2, tl2_schq;
2376 	u64 regoff;
2377 
2378 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
2379 	if (!nix_hw)
2380 		return;
2381 
2382 	/* loop through all TL2s with matching PF_FUNC */
2383 	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
2384 	tl2_schq = smq_flush_ctx->smq_tree_ctx[NIX_TXSCH_LVL_TL2].schq;
2385 	for (tl2 = 0; tl2 < txsch->schq.max; tl2++) {
2386 		/* skip the smq(flush) TL2 */
2387 		if (tl2 == tl2_schq)
2388 			continue;
2389 		/* skip unused TL2s */
2390 		if (TXSCH_MAP_FLAGS(txsch->pfvf_map[tl2]) & NIX_TXSCHQ_FREE)
2391 			continue;
2392 		/* skip if PF_FUNC doesn't match */
2393 		if ((TXSCH_MAP_FUNC(txsch->pfvf_map[tl2]) & ~RVU_PFVF_FUNC_MASK) !=
2394 		    (TXSCH_MAP_FUNC(txsch->pfvf_map[tl2_schq]) &
2395 				    ~RVU_PFVF_FUNC_MASK))
2396 			continue;
2397 		/* enable/disable XOFF */
2398 		regoff = NIX_AF_TL2X_SW_XOFF(tl2);
2399 		if (enable)
2400 			rvu_write64(rvu, blkaddr, regoff, 0x1);
2401 		else
2402 			rvu_write64(rvu, blkaddr, regoff, 0x0);
2403 	}
2404 }
2405 
nix_smq_flush_enadis_rate(struct rvu * rvu,int blkaddr,struct nix_smq_flush_ctx * smq_flush_ctx,bool enable)2406 static void nix_smq_flush_enadis_rate(struct rvu *rvu, int blkaddr,
2407 				      struct nix_smq_flush_ctx *smq_flush_ctx, bool enable)
2408 {
2409 	u64 cir_off, pir_off, cir_val, pir_val;
2410 	struct nix_smq_tree_ctx *smq_tree_ctx;
2411 	int lvl;
2412 
2413 	for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) {
2414 		smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl];
2415 		cir_off = smq_tree_ctx->cir_off;
2416 		cir_val = smq_tree_ctx->cir_val;
2417 		pir_off = smq_tree_ctx->pir_off;
2418 		pir_val = smq_tree_ctx->pir_val;
2419 
2420 		if (enable) {
2421 			rvu_write64(rvu, blkaddr, cir_off, cir_val);
2422 			if (lvl != NIX_TXSCH_LVL_TL1)
2423 				rvu_write64(rvu, blkaddr, pir_off, pir_val);
2424 		} else {
2425 			rvu_write64(rvu, blkaddr, cir_off, 0x0);
2426 			if (lvl != NIX_TXSCH_LVL_TL1)
2427 				rvu_write64(rvu, blkaddr, pir_off, 0x0);
2428 		}
2429 	}
2430 }
2431 
nix_smq_flush(struct rvu * rvu,int blkaddr,int smq,u16 pcifunc,int nixlf)2432 static int nix_smq_flush(struct rvu *rvu, int blkaddr,
2433 			 int smq, u16 pcifunc, int nixlf)
2434 {
2435 	struct nix_smq_flush_ctx *smq_flush_ctx;
2436 	int err, restore_tx_en = 0, i;
2437 	int pf = rvu_get_pf(rvu->pdev, pcifunc);
2438 	u8 cgx_id = 0, lmac_id = 0;
2439 	u16 tl2_tl3_link_schq;
2440 	u8 link, link_level;
2441 	u64 cfg, bmap = 0;
2442 
2443 	if (!is_rvu_otx2(rvu)) {
2444 		/* Skip SMQ flush if pkt count is zero */
2445 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_MDQX_IN_MD_COUNT(smq));
2446 		if (!cfg)
2447 			return 0;
2448 	}
2449 
2450 	/* enable cgx tx if disabled */
2451 	if (is_pf_cgxmapped(rvu, pf)) {
2452 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
2453 		restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu),
2454 						   lmac_id, true);
2455 	}
2456 
2457 	/* XOFF all TL2s whose parent TL1 matches SMQ tree TL1 */
2458 	smq_flush_ctx = kzalloc_obj(*smq_flush_ctx);
2459 	if (!smq_flush_ctx)
2460 		return -ENOMEM;
2461 	nix_smq_flush_fill_ctx(rvu, blkaddr, smq, smq_flush_ctx);
2462 	nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, true);
2463 	nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, false);
2464 
2465 	/* Disable backpressure from physical link,
2466 	 * otherwise SMQ flush may stall.
2467 	 */
2468 	rvu_cgx_enadis_rx_bp(rvu, pf, false);
2469 
2470 	link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
2471 			NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
2472 	tl2_tl3_link_schq = smq_flush_ctx->smq_tree_ctx[link_level].schq;
2473 	link = smq_flush_ctx->smq_tree_ctx[NIX_TXSCH_LVL_TL1].schq;
2474 
2475 	/* SMQ set enqueue xoff */
2476 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
2477 	cfg |= BIT_ULL(50);
2478 	rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
2479 
2480 	/* Clear all NIX_AF_TL3_TL2_LINK_CFG[ENA] for the TL3/TL2 queue */
2481 	for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) {
2482 		cfg = rvu_read64(rvu, blkaddr,
2483 				 NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link));
2484 		if (!(cfg & BIT_ULL(12)))
2485 			continue;
2486 		bmap |= BIT_ULL(i);
2487 		cfg &= ~BIT_ULL(12);
2488 		rvu_write64(rvu, blkaddr,
2489 			    NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg);
2490 	}
2491 
2492 	/* Do SMQ flush and set enqueue xoff */
2493 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
2494 	cfg |= BIT_ULL(50) | BIT_ULL(49);
2495 	rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
2496 
2497 	/* Wait for flush to complete */
2498 	err = rvu_poll_reg(rvu, blkaddr,
2499 			   NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
2500 	if (err)
2501 		dev_info(rvu->dev,
2502 			 "NIXLF%d: SMQ%d flush failed, txlink might be busy\n",
2503 			 nixlf, smq);
2504 
2505 	/* Set NIX_AF_TL3_TL2_LINKX_CFG[ENA] for the TL3/TL2 queue */
2506 	for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) {
2507 		if (!(bmap & BIT_ULL(i)))
2508 			continue;
2509 		cfg = rvu_read64(rvu, blkaddr,
2510 				 NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link));
2511 		cfg |= BIT_ULL(12);
2512 		rvu_write64(rvu, blkaddr,
2513 			    NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg);
2514 	}
2515 
2516 	/* clear XOFF on TL2s */
2517 	nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, true);
2518 	nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, false);
2519 	kfree(smq_flush_ctx);
2520 
2521 	rvu_cgx_enadis_rx_bp(rvu, pf, true);
2522 	/* restore cgx tx state */
2523 	if (restore_tx_en)
2524 		rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
2525 	return err;
2526 }
2527 
nix_txschq_free(struct rvu * rvu,u16 pcifunc)2528 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
2529 {
2530 	int blkaddr, nixlf, lvl, schq, err;
2531 	struct rvu_hwinfo *hw = rvu->hw;
2532 	struct nix_txsch *txsch;
2533 	struct nix_hw *nix_hw;
2534 	u16 map_func;
2535 
2536 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2537 	if (blkaddr < 0)
2538 		return NIX_AF_ERR_AF_LF_INVALID;
2539 
2540 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
2541 	if (!nix_hw)
2542 		return NIX_AF_ERR_INVALID_NIXBLK;
2543 
2544 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
2545 	if (nixlf < 0)
2546 		return NIX_AF_ERR_AF_LF_INVALID;
2547 
2548 	/* Disable TL2/3 queue links and all XOFF's before SMQ flush*/
2549 	mutex_lock(&rvu->rsrc_lock);
2550 	for (lvl = NIX_TXSCH_LVL_MDQ; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2551 		txsch = &nix_hw->txsch[lvl];
2552 
2553 		if (lvl >= hw->cap.nix_tx_aggr_lvl)
2554 			continue;
2555 
2556 		for (schq = 0; schq < txsch->schq.max; schq++) {
2557 			if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2558 				continue;
2559 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
2560 			nix_clear_tx_xoff(rvu, blkaddr, lvl, schq);
2561 			nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
2562 		}
2563 	}
2564 	nix_clear_tx_xoff(rvu, blkaddr, NIX_TXSCH_LVL_TL1,
2565 			  nix_get_tx_link(rvu, pcifunc));
2566 
2567 	/* On PF cleanup, clear cfg done flag as
2568 	 * PF would have changed default config.
2569 	 */
2570 	if (!(pcifunc & RVU_PFVF_FUNC_MASK)) {
2571 		txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1];
2572 		schq = nix_get_tx_link(rvu, pcifunc);
2573 		/* Do not clear pcifunc in txsch->pfvf_map[schq] because
2574 		 * VF might be using this TL1 queue
2575 		 */
2576 		map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
2577 		txsch->pfvf_map[schq] = TXSCH_SET_FLAG(map_func, 0x0);
2578 	}
2579 
2580 	/* Flush SMQs */
2581 	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
2582 	for (schq = 0; schq < txsch->schq.max; schq++) {
2583 		if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2584 			continue;
2585 		nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
2586 	}
2587 
2588 	/* Now free scheduler queues to free pool */
2589 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2590 		 /* TLs above aggregation level are shared across all PF
2591 		  * and it's VFs, hence skip freeing them.
2592 		  */
2593 		if (lvl >= hw->cap.nix_tx_aggr_lvl)
2594 			continue;
2595 
2596 		txsch = &nix_hw->txsch[lvl];
2597 		for (schq = 0; schq < txsch->schq.max; schq++) {
2598 			if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2599 				continue;
2600 			nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
2601 			rvu_free_rsrc(&txsch->schq, schq);
2602 			txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
2603 		}
2604 	}
2605 	mutex_unlock(&rvu->rsrc_lock);
2606 
2607 	err = rvu_ndc_sync(rvu, blkaddr, nixlf, NIX_AF_NDC_TX_SYNC);
2608 	if (err)
2609 		dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
2610 
2611 	return 0;
2612 }
2613 
nix_txschq_free_one(struct rvu * rvu,struct nix_txsch_free_req * req)2614 static int nix_txschq_free_one(struct rvu *rvu,
2615 			       struct nix_txsch_free_req *req)
2616 {
2617 	struct rvu_hwinfo *hw = rvu->hw;
2618 	u16 pcifunc = req->hdr.pcifunc;
2619 	int lvl, schq, nixlf, blkaddr;
2620 	struct nix_txsch *txsch;
2621 	struct nix_hw *nix_hw;
2622 	u32 *pfvf_map;
2623 	int rc;
2624 
2625 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2626 	if (blkaddr < 0)
2627 		return NIX_AF_ERR_AF_LF_INVALID;
2628 
2629 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
2630 	if (!nix_hw)
2631 		return NIX_AF_ERR_INVALID_NIXBLK;
2632 
2633 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
2634 	if (nixlf < 0)
2635 		return NIX_AF_ERR_AF_LF_INVALID;
2636 
2637 	lvl = req->schq_lvl;
2638 	schq = req->schq;
2639 	txsch = &nix_hw->txsch[lvl];
2640 
2641 	if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max)
2642 		return 0;
2643 
2644 	pfvf_map = txsch->pfvf_map;
2645 	mutex_lock(&rvu->rsrc_lock);
2646 
2647 	if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) {
2648 		rc = NIX_AF_ERR_TLX_INVALID;
2649 		goto err;
2650 	}
2651 
2652 	/* Clear SW_XOFF of this resource only.
2653 	 * For SMQ level, all path XOFF's
2654 	 * need to be made clear by user
2655 	 */
2656 	nix_clear_tx_xoff(rvu, blkaddr, lvl, schq);
2657 
2658 	nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
2659 	nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
2660 
2661 	/* Flush if it is a SMQ. Onus of disabling
2662 	 * TL2/3 queue links before SMQ flush is on user
2663 	 */
2664 	if (lvl == NIX_TXSCH_LVL_SMQ &&
2665 	    nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf)) {
2666 		rc = NIX_AF_SMQ_FLUSH_FAILED;
2667 		goto err;
2668 	}
2669 
2670 	nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
2671 
2672 	/* Free the resource */
2673 	rvu_free_rsrc(&txsch->schq, schq);
2674 	txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
2675 	mutex_unlock(&rvu->rsrc_lock);
2676 	return 0;
2677 err:
2678 	mutex_unlock(&rvu->rsrc_lock);
2679 	return rc;
2680 }
2681 
rvu_mbox_handler_nix_txsch_free(struct rvu * rvu,struct nix_txsch_free_req * req,struct msg_rsp * rsp)2682 int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
2683 				    struct nix_txsch_free_req *req,
2684 				    struct msg_rsp *rsp)
2685 {
2686 	if (req->flags & TXSCHQ_FREE_ALL)
2687 		return nix_txschq_free(rvu, req->hdr.pcifunc);
2688 	else
2689 		return nix_txschq_free_one(rvu, req);
2690 }
2691 
is_txschq_hierarchy_valid(struct rvu * rvu,u16 pcifunc,int blkaddr,int lvl,u64 reg,u64 regval)2692 static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
2693 				      int lvl, u64 reg, u64 regval)
2694 {
2695 	u64 regbase = reg & 0xFFFF;
2696 	u16 schq, parent;
2697 
2698 	if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg))
2699 		return false;
2700 
2701 	schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2702 	/* Check if this schq belongs to this PF/VF or not */
2703 	if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq))
2704 		return false;
2705 
2706 	parent = (regval >> 16) & 0x1FF;
2707 	/* Validate MDQ's TL4 parent */
2708 	if (regbase == NIX_AF_MDQX_PARENT(0) &&
2709 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent))
2710 		return false;
2711 
2712 	/* Validate TL4's TL3 parent */
2713 	if (regbase == NIX_AF_TL4X_PARENT(0) &&
2714 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent))
2715 		return false;
2716 
2717 	/* Validate TL3's TL2 parent */
2718 	if (regbase == NIX_AF_TL3X_PARENT(0) &&
2719 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent))
2720 		return false;
2721 
2722 	/* Validate TL2's TL1 parent */
2723 	if (regbase == NIX_AF_TL2X_PARENT(0) &&
2724 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent))
2725 		return false;
2726 
2727 	return true;
2728 }
2729 
is_txschq_shaping_valid(struct rvu_hwinfo * hw,int lvl,u64 reg)2730 static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg)
2731 {
2732 	u64 regbase;
2733 
2734 	if (hw->cap.nix_shaping)
2735 		return true;
2736 
2737 	/* If shaping and coloring is not supported, then
2738 	 * *_CIR and *_PIR registers should not be configured.
2739 	 */
2740 	regbase = reg & 0xFFFF;
2741 
2742 	switch (lvl) {
2743 	case NIX_TXSCH_LVL_TL1:
2744 		if (regbase == NIX_AF_TL1X_CIR(0))
2745 			return false;
2746 		break;
2747 	case NIX_TXSCH_LVL_TL2:
2748 		if (regbase == NIX_AF_TL2X_CIR(0) ||
2749 		    regbase == NIX_AF_TL2X_PIR(0))
2750 			return false;
2751 		break;
2752 	case NIX_TXSCH_LVL_TL3:
2753 		if (regbase == NIX_AF_TL3X_CIR(0) ||
2754 		    regbase == NIX_AF_TL3X_PIR(0))
2755 			return false;
2756 		break;
2757 	case NIX_TXSCH_LVL_TL4:
2758 		if (regbase == NIX_AF_TL4X_CIR(0) ||
2759 		    regbase == NIX_AF_TL4X_PIR(0))
2760 			return false;
2761 		break;
2762 	case NIX_TXSCH_LVL_MDQ:
2763 		if (regbase == NIX_AF_MDQX_CIR(0) ||
2764 		    regbase == NIX_AF_MDQX_PIR(0))
2765 			return false;
2766 		break;
2767 	}
2768 	return true;
2769 }
2770 
nix_tl1_default_cfg(struct rvu * rvu,struct nix_hw * nix_hw,u16 pcifunc,int blkaddr)2771 static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
2772 				u16 pcifunc, int blkaddr)
2773 {
2774 	u32 *pfvf_map;
2775 	int schq;
2776 
2777 	schq = nix_get_tx_link(rvu, pcifunc);
2778 	pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
2779 	/* Skip if PF has already done the config */
2780 	if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE)
2781 		return;
2782 	rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq),
2783 		    (TXSCH_TL1_DFLT_RR_PRIO << 1));
2784 
2785 	/* On OcteonTx2 the config was in bytes and newer silcons
2786 	 * it's changed to weight.
2787 	 */
2788 	if (!rvu->hw->cap.nix_common_dwrr_mtu)
2789 		rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
2790 			    TXSCH_TL1_DFLT_RR_QTM);
2791 	else
2792 		rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
2793 			    CN10K_MAX_DWRR_WEIGHT);
2794 
2795 	rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00);
2796 	pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE);
2797 }
2798 
2799 /* Register offset - [15:0]
2800  * Scheduler Queue number - [25:16]
2801  */
2802 #define NIX_TX_SCHQ_MASK	GENMASK_ULL(25, 0)
2803 
nix_txschq_cfg_read(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr,struct nix_txschq_config * req,struct nix_txschq_config * rsp)2804 static int nix_txschq_cfg_read(struct rvu *rvu, struct nix_hw *nix_hw,
2805 			       int blkaddr, struct nix_txschq_config *req,
2806 			       struct nix_txschq_config *rsp)
2807 {
2808 	u16 pcifunc = req->hdr.pcifunc;
2809 	int idx, schq;
2810 	u64 reg;
2811 
2812 	for (idx = 0; idx < req->num_regs; idx++) {
2813 		reg = req->reg[idx];
2814 		reg &= NIX_TX_SCHQ_MASK;
2815 		schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2816 		if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, req->lvl, reg) ||
2817 		    !is_valid_txschq(rvu, blkaddr, req->lvl, pcifunc, schq))
2818 			return NIX_AF_INVAL_TXSCHQ_CFG;
2819 		rsp->regval[idx] = rvu_read64(rvu, blkaddr, reg);
2820 	}
2821 	rsp->lvl = req->lvl;
2822 	rsp->num_regs = req->num_regs;
2823 	return 0;
2824 }
2825 
rvu_nix_tx_tl2_cfg(struct rvu * rvu,int blkaddr,u16 pcifunc,struct nix_txsch * txsch,bool enable)2826 void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, u16 pcifunc,
2827 			struct nix_txsch *txsch, bool enable)
2828 {
2829 	struct rvu_hwinfo *hw = rvu->hw;
2830 	int lbk_link_start, lbk_links;
2831 	u8 pf = rvu_get_pf(rvu->pdev, pcifunc);
2832 	int schq;
2833 	u64 cfg;
2834 
2835 	if (!is_pf_cgxmapped(rvu, pf) && !is_rep_dev(rvu, pcifunc))
2836 		return;
2837 
2838 	cfg = enable ? (BIT_ULL(12) | RVU_SWITCH_LBK_CHAN) : 0;
2839 	lbk_link_start = hw->cgx_links;
2840 
2841 	for (schq = 0; schq < txsch->schq.max; schq++) {
2842 		if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2843 			continue;
2844 		/* Enable all LBK links with channel 63 by default so that
2845 		 * packets can be sent to LBK with a NPC TX MCAM rule
2846 		 */
2847 		lbk_links = hw->lbk_links;
2848 		while (lbk_links--)
2849 			rvu_write64(rvu, blkaddr,
2850 				    NIX_AF_TL3_TL2X_LINKX_CFG(schq,
2851 							      lbk_link_start +
2852 							      lbk_links), cfg);
2853 	}
2854 }
2855 
rvu_mbox_handler_nix_txschq_cfg(struct rvu * rvu,struct nix_txschq_config * req,struct nix_txschq_config * rsp)2856 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
2857 				    struct nix_txschq_config *req,
2858 				    struct nix_txschq_config *rsp)
2859 {
2860 	u64 reg, val, regval, schq_regbase, val_mask;
2861 	struct rvu_hwinfo *hw = rvu->hw;
2862 	u16 pcifunc = req->hdr.pcifunc;
2863 	struct nix_txsch *txsch;
2864 	struct nix_hw *nix_hw;
2865 	int blkaddr, idx, err;
2866 	int nixlf, schq;
2867 	u32 *pfvf_map;
2868 
2869 	if (req->lvl >= NIX_TXSCH_LVL_CNT ||
2870 	    req->num_regs > MAX_REGS_PER_MBOX_MSG)
2871 		return NIX_AF_INVAL_TXSCHQ_CFG;
2872 
2873 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2874 	if (err)
2875 		return err;
2876 
2877 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
2878 	if (!nix_hw)
2879 		return NIX_AF_ERR_INVALID_NIXBLK;
2880 
2881 	if (req->read)
2882 		return nix_txschq_cfg_read(rvu, nix_hw, blkaddr, req, rsp);
2883 
2884 	txsch = &nix_hw->txsch[req->lvl];
2885 	pfvf_map = txsch->pfvf_map;
2886 
2887 	if (req->lvl >= hw->cap.nix_tx_aggr_lvl &&
2888 	    pcifunc & RVU_PFVF_FUNC_MASK) {
2889 		mutex_lock(&rvu->rsrc_lock);
2890 		if (req->lvl == NIX_TXSCH_LVL_TL1)
2891 			nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr);
2892 		mutex_unlock(&rvu->rsrc_lock);
2893 		return 0;
2894 	}
2895 
2896 	for (idx = 0; idx < req->num_regs; idx++) {
2897 		reg = req->reg[idx];
2898 		reg &= NIX_TX_SCHQ_MASK;
2899 		regval = req->regval[idx];
2900 		schq_regbase = reg & 0xFFFF;
2901 		val_mask = req->regval_mask[idx];
2902 
2903 		if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr,
2904 					       txsch->lvl, reg, regval))
2905 			return NIX_AF_INVAL_TXSCHQ_CFG;
2906 
2907 		/* Check if shaping and coloring is supported */
2908 		if (!is_txschq_shaping_valid(hw, req->lvl, reg))
2909 			continue;
2910 
2911 		val = rvu_read64(rvu, blkaddr, reg);
2912 		regval = (val & val_mask) | (regval & ~val_mask);
2913 
2914 		/* Handle shaping state toggle specially */
2915 		if (hw->cap.nix_shaper_toggle_wait &&
2916 		    handle_txschq_shaper_update(rvu, blkaddr, nixlf,
2917 						req->lvl, reg, regval))
2918 			continue;
2919 
2920 		/* Replace PF/VF visible NIXLF slot with HW NIXLF id */
2921 		if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
2922 			nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
2923 					   pcifunc, 0);
2924 			regval &= ~(0x7FULL << 24);
2925 			regval |= ((u64)nixlf << 24);
2926 		}
2927 
2928 		/* Clear 'BP_ENA' config, if it's not allowed */
2929 		if (!hw->cap.nix_tx_link_bp) {
2930 			if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) ||
2931 			    (schq_regbase & 0xFF00) ==
2932 			    NIX_AF_TL3_TL2X_LINKX_CFG(0, 0))
2933 				regval &= ~BIT_ULL(13);
2934 		}
2935 
2936 		/* Mark config as done for TL1 by PF */
2937 		if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) &&
2938 		    schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) {
2939 			schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2940 			mutex_lock(&rvu->rsrc_lock);
2941 			pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq],
2942 							NIX_TXSCHQ_CFG_DONE);
2943 			mutex_unlock(&rvu->rsrc_lock);
2944 		}
2945 
2946 		/* SMQ flush is special hence split register writes such
2947 		 * that flush first and write rest of the bits later.
2948 		 */
2949 		if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
2950 		    (regval & BIT_ULL(49))) {
2951 			schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2952 			nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
2953 			regval &= ~BIT_ULL(49);
2954 		}
2955 		rvu_write64(rvu, blkaddr, reg, regval);
2956 	}
2957 
2958 	return 0;
2959 }
2960 
nix_rx_vtag_cfg(struct rvu * rvu,int nixlf,int blkaddr,struct nix_vtag_config * req)2961 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
2962 			   struct nix_vtag_config *req)
2963 {
2964 	u64 regval = req->vtag_size;
2965 
2966 	if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 ||
2967 	    req->vtag_size > VTAGSIZE_T8)
2968 		return -EINVAL;
2969 
2970 	/* RX VTAG Type 7 reserved for vf vlan */
2971 	if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7)
2972 		return NIX_AF_ERR_RX_VTAG_INUSE;
2973 
2974 	if (req->rx.capture_vtag)
2975 		regval |= BIT_ULL(5);
2976 	if (req->rx.strip_vtag)
2977 		regval |= BIT_ULL(4);
2978 
2979 	rvu_write64(rvu, blkaddr,
2980 		    NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
2981 	return 0;
2982 }
2983 
nix_tx_vtag_free(struct rvu * rvu,int blkaddr,u16 pcifunc,int index)2984 static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr,
2985 			    u16 pcifunc, int index)
2986 {
2987 	struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2988 	struct nix_txvlan *vlan;
2989 
2990 	if (!nix_hw)
2991 		return NIX_AF_ERR_INVALID_NIXBLK;
2992 
2993 	vlan = &nix_hw->txvlan;
2994 	if (vlan->entry2pfvf_map[index] != pcifunc)
2995 		return NIX_AF_ERR_PARAM;
2996 
2997 	rvu_write64(rvu, blkaddr,
2998 		    NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull);
2999 	rvu_write64(rvu, blkaddr,
3000 		    NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull);
3001 
3002 	vlan->entry2pfvf_map[index] = 0;
3003 	rvu_free_rsrc(&vlan->rsrc, index);
3004 
3005 	return 0;
3006 }
3007 
nix_free_tx_vtag_entries(struct rvu * rvu,u16 pcifunc)3008 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc)
3009 {
3010 	struct nix_txvlan *vlan;
3011 	struct nix_hw *nix_hw;
3012 	int index, blkaddr;
3013 
3014 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3015 	if (blkaddr < 0)
3016 		return;
3017 
3018 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
3019 	if (!nix_hw)
3020 		return;
3021 
3022 	vlan = &nix_hw->txvlan;
3023 
3024 	mutex_lock(&vlan->rsrc_lock);
3025 	/* Scan all the entries and free the ones mapped to 'pcifunc' */
3026 	for (index = 0; index < vlan->rsrc.max; index++) {
3027 		if (vlan->entry2pfvf_map[index] == pcifunc)
3028 			nix_tx_vtag_free(rvu, blkaddr, pcifunc, index);
3029 	}
3030 	mutex_unlock(&vlan->rsrc_lock);
3031 }
3032 
nix_tx_vtag_alloc(struct rvu * rvu,int blkaddr,u64 vtag,u8 size)3033 static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr,
3034 			     u64 vtag, u8 size)
3035 {
3036 	struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
3037 	struct nix_txvlan *vlan;
3038 	u64 regval;
3039 	int index;
3040 
3041 	if (!nix_hw)
3042 		return NIX_AF_ERR_INVALID_NIXBLK;
3043 
3044 	vlan = &nix_hw->txvlan;
3045 
3046 	mutex_lock(&vlan->rsrc_lock);
3047 
3048 	index = rvu_alloc_rsrc(&vlan->rsrc);
3049 	if (index < 0) {
3050 		mutex_unlock(&vlan->rsrc_lock);
3051 		return index;
3052 	}
3053 
3054 	mutex_unlock(&vlan->rsrc_lock);
3055 
3056 	regval = size ? vtag : vtag << 32;
3057 
3058 	rvu_write64(rvu, blkaddr,
3059 		    NIX_AF_TX_VTAG_DEFX_DATA(index), regval);
3060 	rvu_write64(rvu, blkaddr,
3061 		    NIX_AF_TX_VTAG_DEFX_CTL(index), size);
3062 
3063 	return index;
3064 }
3065 
nix_tx_vtag_decfg(struct rvu * rvu,int blkaddr,struct nix_vtag_config * req)3066 static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr,
3067 			     struct nix_vtag_config *req)
3068 {
3069 	struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
3070 	u16 pcifunc = req->hdr.pcifunc;
3071 	int idx0 = req->tx.vtag0_idx;
3072 	int idx1 = req->tx.vtag1_idx;
3073 	struct nix_txvlan *vlan;
3074 	int err = 0;
3075 
3076 	if (!nix_hw)
3077 		return NIX_AF_ERR_INVALID_NIXBLK;
3078 
3079 	vlan = &nix_hw->txvlan;
3080 	if (req->tx.free_vtag0 && req->tx.free_vtag1)
3081 		if (vlan->entry2pfvf_map[idx0] != pcifunc ||
3082 		    vlan->entry2pfvf_map[idx1] != pcifunc)
3083 			return NIX_AF_ERR_PARAM;
3084 
3085 	mutex_lock(&vlan->rsrc_lock);
3086 
3087 	if (req->tx.free_vtag0) {
3088 		err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx0);
3089 		if (err)
3090 			goto exit;
3091 	}
3092 
3093 	if (req->tx.free_vtag1)
3094 		err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx1);
3095 
3096 exit:
3097 	mutex_unlock(&vlan->rsrc_lock);
3098 	return err;
3099 }
3100 
nix_tx_vtag_cfg(struct rvu * rvu,int blkaddr,struct nix_vtag_config * req,struct nix_vtag_config_rsp * rsp)3101 static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr,
3102 			   struct nix_vtag_config *req,
3103 			   struct nix_vtag_config_rsp *rsp)
3104 {
3105 	struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
3106 	struct nix_txvlan *vlan;
3107 	u16 pcifunc = req->hdr.pcifunc;
3108 
3109 	if (!nix_hw)
3110 		return NIX_AF_ERR_INVALID_NIXBLK;
3111 
3112 	vlan = &nix_hw->txvlan;
3113 	if (req->tx.cfg_vtag0) {
3114 		rsp->vtag0_idx =
3115 			nix_tx_vtag_alloc(rvu, blkaddr,
3116 					  req->tx.vtag0, req->vtag_size);
3117 
3118 		if (rsp->vtag0_idx < 0)
3119 			return NIX_AF_ERR_TX_VTAG_NOSPC;
3120 
3121 		vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc;
3122 	}
3123 
3124 	if (req->tx.cfg_vtag1) {
3125 		rsp->vtag1_idx =
3126 			nix_tx_vtag_alloc(rvu, blkaddr,
3127 					  req->tx.vtag1, req->vtag_size);
3128 
3129 		if (rsp->vtag1_idx < 0)
3130 			goto err_free;
3131 
3132 		vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc;
3133 	}
3134 
3135 	return 0;
3136 
3137 err_free:
3138 	if (req->tx.cfg_vtag0)
3139 		nix_tx_vtag_free(rvu, blkaddr, pcifunc, rsp->vtag0_idx);
3140 
3141 	return NIX_AF_ERR_TX_VTAG_NOSPC;
3142 }
3143 
rvu_mbox_handler_nix_vtag_cfg(struct rvu * rvu,struct nix_vtag_config * req,struct nix_vtag_config_rsp * rsp)3144 int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
3145 				  struct nix_vtag_config *req,
3146 				  struct nix_vtag_config_rsp *rsp)
3147 {
3148 	u16 pcifunc = req->hdr.pcifunc;
3149 	int blkaddr, nixlf, err;
3150 
3151 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
3152 	if (err)
3153 		return err;
3154 
3155 	if (req->cfg_type) {
3156 		/* rx vtag configuration */
3157 		err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
3158 		if (err)
3159 			return NIX_AF_ERR_PARAM;
3160 	} else {
3161 		/* tx vtag configuration */
3162 		if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) &&
3163 		    (req->tx.free_vtag0 || req->tx.free_vtag1))
3164 			return NIX_AF_ERR_PARAM;
3165 
3166 		if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1)
3167 			return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp);
3168 
3169 		if (req->tx.free_vtag0 || req->tx.free_vtag1)
3170 			return nix_tx_vtag_decfg(rvu, blkaddr, req);
3171 	}
3172 
3173 	return 0;
3174 }
3175 
nix_blk_setup_mce(struct rvu * rvu,struct nix_hw * nix_hw,int mce,u8 op,u16 pcifunc,int next,int index,u8 mce_op,bool eol)3176 static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
3177 			     int mce, u8 op, u16 pcifunc, int next,
3178 			     int index, u8 mce_op, bool eol)
3179 {
3180 	struct nix_aq_enq_req aq_req;
3181 	int err;
3182 
3183 	aq_req.hdr.pcifunc = 0;
3184 	aq_req.ctype = NIX_AQ_CTYPE_MCE;
3185 	aq_req.op = op;
3186 	aq_req.qidx = mce;
3187 
3188 	/* Use RSS with RSS index 0 */
3189 	aq_req.mce.op = mce_op;
3190 	aq_req.mce.index = index;
3191 	aq_req.mce.eol = eol;
3192 	aq_req.mce.pf_func = pcifunc;
3193 	aq_req.mce.next = next;
3194 
3195 	/* All fields valid */
3196 	*(u64 *)(&aq_req.mce_mask) = ~0ULL;
3197 
3198 	err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL);
3199 	if (err) {
3200 		dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
3201 			rvu_get_pf(rvu->pdev, pcifunc),
3202 				pcifunc & RVU_PFVF_FUNC_MASK);
3203 		return err;
3204 	}
3205 	return 0;
3206 }
3207 
nix_delete_mcast_mce_list(struct nix_mce_list * mce_list)3208 static void nix_delete_mcast_mce_list(struct nix_mce_list *mce_list)
3209 {
3210 	struct hlist_node *tmp;
3211 	struct mce *mce;
3212 
3213 	/* Scan through the current list */
3214 	hlist_for_each_entry_safe(mce, tmp, &mce_list->head, node) {
3215 		hlist_del(&mce->node);
3216 		kfree(mce);
3217 	}
3218 
3219 	mce_list->count = 0;
3220 	mce_list->max = 0;
3221 }
3222 
nix_get_last_mce_list_index(struct nix_mcast_grp_elem * elem)3223 static int nix_get_last_mce_list_index(struct nix_mcast_grp_elem *elem)
3224 {
3225 	return elem->mce_start_index + elem->mcast_mce_list.count - 1;
3226 }
3227 
nix_update_ingress_mce_list_hw(struct rvu * rvu,struct nix_hw * nix_hw,struct nix_mcast_grp_elem * elem)3228 static int nix_update_ingress_mce_list_hw(struct rvu *rvu,
3229 					  struct nix_hw *nix_hw,
3230 					  struct nix_mcast_grp_elem *elem)
3231 {
3232 	int idx, last_idx, next_idx, err;
3233 	struct nix_mce_list *mce_list;
3234 	struct mce *mce, *prev_mce;
3235 
3236 	mce_list = &elem->mcast_mce_list;
3237 	idx = elem->mce_start_index;
3238 	last_idx = nix_get_last_mce_list_index(elem);
3239 	hlist_for_each_entry(mce, &mce_list->head, node) {
3240 		if (idx > last_idx)
3241 			break;
3242 
3243 		if (!mce->is_active) {
3244 			if (idx == elem->mce_start_index) {
3245 				idx++;
3246 				prev_mce = mce;
3247 				elem->mce_start_index = idx;
3248 				continue;
3249 			} else if (idx == last_idx) {
3250 				err = nix_blk_setup_mce(rvu, nix_hw, idx - 1, NIX_AQ_INSTOP_WRITE,
3251 							prev_mce->pcifunc, next_idx,
3252 							prev_mce->rq_rss_index,
3253 							prev_mce->dest_type,
3254 							false);
3255 				if (err)
3256 					return err;
3257 
3258 				break;
3259 			}
3260 		}
3261 
3262 		next_idx = idx + 1;
3263 		/* EOL should be set in last MCE */
3264 		err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE,
3265 					mce->pcifunc, next_idx,
3266 					mce->rq_rss_index, mce->dest_type,
3267 					(next_idx > last_idx) ? true : false);
3268 		if (err)
3269 			return err;
3270 
3271 		idx++;
3272 		prev_mce = mce;
3273 	}
3274 
3275 	return 0;
3276 }
3277 
nix_update_egress_mce_list_hw(struct rvu * rvu,struct nix_hw * nix_hw,struct nix_mcast_grp_elem * elem)3278 static void nix_update_egress_mce_list_hw(struct rvu *rvu,
3279 					  struct nix_hw *nix_hw,
3280 					  struct nix_mcast_grp_elem *elem)
3281 {
3282 	struct nix_mce_list *mce_list;
3283 	int idx, last_idx, next_idx;
3284 	struct mce *mce, *prev_mce;
3285 	u64 regval;
3286 	u8 eol;
3287 
3288 	mce_list = &elem->mcast_mce_list;
3289 	idx = elem->mce_start_index;
3290 	last_idx = nix_get_last_mce_list_index(elem);
3291 	hlist_for_each_entry(mce, &mce_list->head, node) {
3292 		if (idx > last_idx)
3293 			break;
3294 
3295 		if (!mce->is_active) {
3296 			if (idx == elem->mce_start_index) {
3297 				idx++;
3298 				prev_mce = mce;
3299 				elem->mce_start_index = idx;
3300 				continue;
3301 			} else if (idx == last_idx) {
3302 				regval = (next_idx << 16) | (1 << 12) | prev_mce->channel;
3303 				rvu_write64(rvu, nix_hw->blkaddr,
3304 					    NIX_AF_TX_MCASTX(idx - 1),
3305 					    regval);
3306 				break;
3307 			}
3308 		}
3309 
3310 		eol = 0;
3311 		next_idx = idx + 1;
3312 		/* EOL should be set in last MCE */
3313 		if (next_idx > last_idx)
3314 			eol = 1;
3315 
3316 		regval = (next_idx << 16) | (eol << 12) | mce->channel;
3317 		rvu_write64(rvu, nix_hw->blkaddr,
3318 			    NIX_AF_TX_MCASTX(idx),
3319 			    regval);
3320 		idx++;
3321 		prev_mce = mce;
3322 	}
3323 }
3324 
nix_del_mce_list_entry(struct rvu * rvu,struct nix_hw * nix_hw,struct nix_mcast_grp_elem * elem,struct nix_mcast_grp_update_req * req)3325 static int nix_del_mce_list_entry(struct rvu *rvu,
3326 				  struct nix_hw *nix_hw,
3327 				  struct nix_mcast_grp_elem *elem,
3328 				  struct nix_mcast_grp_update_req *req)
3329 {
3330 	u32 num_entry = req->num_mce_entry;
3331 	struct nix_mce_list *mce_list;
3332 	struct mce *mce;
3333 	bool is_found;
3334 	int i;
3335 
3336 	mce_list = &elem->mcast_mce_list;
3337 	for (i = 0; i < num_entry; i++) {
3338 		is_found = false;
3339 		hlist_for_each_entry(mce, &mce_list->head, node) {
3340 			/* If already exists, then delete */
3341 			if (mce->pcifunc == req->pcifunc[i]) {
3342 				hlist_del(&mce->node);
3343 				kfree(mce);
3344 				mce_list->count--;
3345 				is_found = true;
3346 				break;
3347 			}
3348 		}
3349 
3350 		if (!is_found)
3351 			return NIX_AF_ERR_INVALID_MCAST_DEL_REQ;
3352 	}
3353 
3354 	mce_list->max = mce_list->count;
3355 	/* Dump the updated list to HW */
3356 	if (elem->dir == NIX_MCAST_INGRESS)
3357 		return nix_update_ingress_mce_list_hw(rvu, nix_hw, elem);
3358 
3359 	nix_update_egress_mce_list_hw(rvu, nix_hw, elem);
3360 	return 0;
3361 }
3362 
nix_add_mce_list_entry(struct rvu * rvu,struct nix_hw * nix_hw,struct nix_mcast_grp_elem * elem,struct nix_mcast_grp_update_req * req)3363 static int nix_add_mce_list_entry(struct rvu *rvu,
3364 				  struct nix_hw *nix_hw,
3365 				  struct nix_mcast_grp_elem *elem,
3366 				  struct nix_mcast_grp_update_req *req)
3367 {
3368 	u32 num_entry = req->num_mce_entry;
3369 	struct nix_mce_list *mce_list;
3370 	struct hlist_node *tmp;
3371 	struct mce *mce;
3372 	int i;
3373 
3374 	mce_list = &elem->mcast_mce_list;
3375 	for (i = 0; i < num_entry; i++) {
3376 		mce = kzalloc_obj(*mce);
3377 		if (!mce)
3378 			goto free_mce;
3379 
3380 		mce->pcifunc = req->pcifunc[i];
3381 		mce->channel = req->channel[i];
3382 		mce->rq_rss_index = req->rq_rss_index[i];
3383 		mce->dest_type = req->dest_type[i];
3384 		mce->is_active = 1;
3385 		hlist_add_head(&mce->node, &mce_list->head);
3386 		mce_list->count++;
3387 	}
3388 
3389 	mce_list->max += num_entry;
3390 
3391 	/* Dump the updated list to HW */
3392 	if (elem->dir == NIX_MCAST_INGRESS)
3393 		return nix_update_ingress_mce_list_hw(rvu, nix_hw, elem);
3394 
3395 	nix_update_egress_mce_list_hw(rvu, nix_hw, elem);
3396 	return 0;
3397 
3398 free_mce:
3399 	hlist_for_each_entry_safe(mce, tmp, &mce_list->head, node) {
3400 		hlist_del(&mce->node);
3401 		kfree(mce);
3402 		mce_list->count--;
3403 	}
3404 
3405 	return -ENOMEM;
3406 }
3407 
nix_update_mce_list_entry(struct nix_mce_list * mce_list,u16 pcifunc,bool add)3408 static int nix_update_mce_list_entry(struct nix_mce_list *mce_list,
3409 				     u16 pcifunc, bool add)
3410 {
3411 	struct mce *mce, *tail = NULL;
3412 	bool delete = false;
3413 
3414 	/* Scan through the current list */
3415 	hlist_for_each_entry(mce, &mce_list->head, node) {
3416 		/* If already exists, then delete */
3417 		if (mce->pcifunc == pcifunc && !add) {
3418 			delete = true;
3419 			break;
3420 		} else if (mce->pcifunc == pcifunc && add) {
3421 			/* entry already exists */
3422 			return 0;
3423 		}
3424 		tail = mce;
3425 	}
3426 
3427 	if (delete) {
3428 		hlist_del(&mce->node);
3429 		kfree(mce);
3430 		mce_list->count--;
3431 		return 0;
3432 	}
3433 
3434 	if (!add)
3435 		return 0;
3436 
3437 	/* Add a new one to the list, at the tail */
3438 	mce = kzalloc_obj(*mce);
3439 	if (!mce)
3440 		return -ENOMEM;
3441 	mce->pcifunc = pcifunc;
3442 	if (!tail)
3443 		hlist_add_head(&mce->node, &mce_list->head);
3444 	else
3445 		hlist_add_behind(&mce->node, &tail->node);
3446 	mce_list->count++;
3447 	return 0;
3448 }
3449 
nix_update_mce_list(struct rvu * rvu,u16 pcifunc,struct nix_mce_list * mce_list,int mce_idx,int mcam_index,bool add)3450 int nix_update_mce_list(struct rvu *rvu, u16 pcifunc,
3451 			struct nix_mce_list *mce_list,
3452 			int mce_idx, int mcam_index, bool add)
3453 {
3454 	int err = 0, idx, next_idx, last_idx, blkaddr, npc_blkaddr;
3455 	struct npc_mcam *mcam = &rvu->hw->mcam;
3456 	struct nix_mcast *mcast;
3457 	struct nix_hw *nix_hw;
3458 	struct mce *mce;
3459 
3460 	if (!mce_list)
3461 		return -EINVAL;
3462 
3463 	/* Get this PF/VF func's MCE index */
3464 	idx = mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
3465 
3466 	if (idx > (mce_idx + mce_list->max)) {
3467 		dev_err(rvu->dev,
3468 			"%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
3469 			__func__, idx, mce_list->max,
3470 			rvu_get_pf(rvu->pdev, pcifunc));
3471 		return -EINVAL;
3472 	}
3473 
3474 	err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
3475 	if (err)
3476 		return err;
3477 
3478 	mcast = &nix_hw->mcast;
3479 	mutex_lock(&mcast->mce_lock);
3480 
3481 	err = nix_update_mce_list_entry(mce_list, pcifunc, add);
3482 	if (err)
3483 		goto end;
3484 
3485 	/* Disable MCAM entry in NPC */
3486 	if (!mce_list->count) {
3487 		npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
3488 		npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, mcam_index, false);
3489 		goto end;
3490 	}
3491 
3492 	/* Dump the updated list to HW */
3493 	idx = mce_idx;
3494 	last_idx = idx + mce_list->count - 1;
3495 	hlist_for_each_entry(mce, &mce_list->head, node) {
3496 		if (idx > last_idx)
3497 			break;
3498 
3499 		next_idx = idx + 1;
3500 		/* EOL should be set in last MCE */
3501 		err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE,
3502 					mce->pcifunc, next_idx,
3503 					0, 1,
3504 					(next_idx > last_idx) ? true : false);
3505 		if (err)
3506 			goto end;
3507 		idx++;
3508 	}
3509 
3510 end:
3511 	mutex_unlock(&mcast->mce_lock);
3512 	return err;
3513 }
3514 
nix_get_mce_list(struct rvu * rvu,u16 pcifunc,int type,struct nix_mce_list ** mce_list,int * mce_idx)3515 void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type,
3516 		      struct nix_mce_list **mce_list, int *mce_idx)
3517 {
3518 	struct rvu_hwinfo *hw = rvu->hw;
3519 	struct rvu_pfvf *pfvf;
3520 
3521 	if (!hw->cap.nix_rx_multicast ||
3522 	    !is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev,
3523 			     pcifunc & ~RVU_PFVF_FUNC_MASK))) {
3524 		*mce_list = NULL;
3525 		*mce_idx = 0;
3526 		return;
3527 	}
3528 
3529 	/* Get this PF/VF func's MCE index */
3530 	pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
3531 
3532 	if (type == NIXLF_BCAST_ENTRY) {
3533 		*mce_list = &pfvf->bcast_mce_list;
3534 		*mce_idx = pfvf->bcast_mce_idx;
3535 	} else if (type == NIXLF_ALLMULTI_ENTRY) {
3536 		*mce_list = &pfvf->mcast_mce_list;
3537 		*mce_idx = pfvf->mcast_mce_idx;
3538 	} else if (type == NIXLF_PROMISC_ENTRY) {
3539 		*mce_list = &pfvf->promisc_mce_list;
3540 		*mce_idx = pfvf->promisc_mce_idx;
3541 	}  else {
3542 		*mce_list = NULL;
3543 		*mce_idx = 0;
3544 	}
3545 }
3546 
nix_update_mce_rule(struct rvu * rvu,u16 pcifunc,int type,bool add)3547 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
3548 			       int type, bool add)
3549 {
3550 	int err = 0, nixlf, blkaddr, mcam_index, mce_idx;
3551 	struct npc_mcam *mcam = &rvu->hw->mcam;
3552 	struct rvu_hwinfo *hw = rvu->hw;
3553 	struct nix_mce_list *mce_list;
3554 	int pf;
3555 
3556 	/* skip multicast pkt replication for AF's VFs & SDP links */
3557 	if (is_lbk_vf(rvu, pcifunc) || is_sdp_pfvf(rvu, pcifunc))
3558 		return 0;
3559 
3560 	if (!hw->cap.nix_rx_multicast)
3561 		return 0;
3562 
3563 	pf = rvu_get_pf(rvu->pdev, pcifunc);
3564 	if (!is_pf_cgxmapped(rvu, pf))
3565 		return 0;
3566 
3567 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3568 	if (blkaddr < 0)
3569 		return -EINVAL;
3570 
3571 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
3572 	if (nixlf < 0)
3573 		return -EINVAL;
3574 
3575 	nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx);
3576 
3577 	mcam_index = npc_get_nixlf_mcam_index(mcam,
3578 					      pcifunc & ~RVU_PFVF_FUNC_MASK,
3579 					      nixlf, type);
3580 	if (mcam_index < 0)
3581 		return -EINVAL;
3582 
3583 	err = nix_update_mce_list(rvu, pcifunc, mce_list,
3584 				  mce_idx, mcam_index, add);
3585 	return err;
3586 }
3587 
nix_setup_mcast_grp(struct nix_hw * nix_hw)3588 static void nix_setup_mcast_grp(struct nix_hw *nix_hw)
3589 {
3590 	struct nix_mcast_grp *mcast_grp = &nix_hw->mcast_grp;
3591 
3592 	INIT_LIST_HEAD(&mcast_grp->mcast_grp_head);
3593 	mutex_init(&mcast_grp->mcast_grp_lock);
3594 	mcast_grp->next_grp_index = 1;
3595 	mcast_grp->count = 0;
3596 }
3597 
nix_setup_mce_tables(struct rvu * rvu,struct nix_hw * nix_hw)3598 static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw)
3599 {
3600 	struct nix_mcast *mcast = &nix_hw->mcast;
3601 	int err, pf, numvfs, idx;
3602 	struct rvu_pfvf *pfvf;
3603 	u16 pcifunc;
3604 	u64 cfg;
3605 
3606 	/* Skip PF0 (i.e AF) */
3607 	for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) {
3608 		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
3609 		/* If PF is not enabled, nothing to do */
3610 		if (!((cfg >> 20) & 0x01))
3611 			continue;
3612 		/* Get numVFs attached to this PF */
3613 		numvfs = (cfg >> 12) & 0xFF;
3614 
3615 		pfvf = &rvu->pf[pf];
3616 
3617 		/* This NIX0/1 block mapped to PF ? */
3618 		if (pfvf->nix_blkaddr != nix_hw->blkaddr)
3619 			continue;
3620 
3621 		/* save start idx of broadcast mce list */
3622 		pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1, NIX_MCAST_INGRESS);
3623 		nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
3624 
3625 		/* save start idx of multicast mce list */
3626 		pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1, NIX_MCAST_INGRESS);
3627 		nix_mce_list_init(&pfvf->mcast_mce_list, numvfs + 1);
3628 
3629 		/* save the start idx of promisc mce list */
3630 		pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1, NIX_MCAST_INGRESS);
3631 		nix_mce_list_init(&pfvf->promisc_mce_list, numvfs + 1);
3632 
3633 		for (idx = 0; idx < (numvfs + 1); idx++) {
3634 			/* idx-0 is for PF, followed by VFs */
3635 			pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0);
3636 			pcifunc |= idx;
3637 			/* Add dummy entries now, so that we don't have to check
3638 			 * for whether AQ_OP should be INIT/WRITE later on.
3639 			 * Will be updated when a NIXLF is attached/detached to
3640 			 * these PF/VFs.
3641 			 */
3642 			err = nix_blk_setup_mce(rvu, nix_hw,
3643 						pfvf->bcast_mce_idx + idx,
3644 						NIX_AQ_INSTOP_INIT,
3645 						pcifunc, 0, 0, 1, true);
3646 			if (err)
3647 				return err;
3648 
3649 			/* add dummy entries to multicast mce list */
3650 			err = nix_blk_setup_mce(rvu, nix_hw,
3651 						pfvf->mcast_mce_idx + idx,
3652 						NIX_AQ_INSTOP_INIT,
3653 						pcifunc, 0, 0, 1, true);
3654 			if (err)
3655 				return err;
3656 
3657 			/* add dummy entries to promisc mce list */
3658 			err = nix_blk_setup_mce(rvu, nix_hw,
3659 						pfvf->promisc_mce_idx + idx,
3660 						NIX_AQ_INSTOP_INIT,
3661 						pcifunc, 0, 0, 1, true);
3662 			if (err)
3663 				return err;
3664 		}
3665 	}
3666 	return 0;
3667 }
3668 
nix_setup_mcast(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr)3669 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
3670 {
3671 	struct nix_mcast *mcast = &nix_hw->mcast;
3672 	struct rvu_hwinfo *hw = rvu->hw;
3673 	int err, size;
3674 
3675 	size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
3676 	size = BIT_ULL(size);
3677 
3678 	/* Allocate bitmap for rx mce entries */
3679 	mcast->mce_counter[NIX_MCAST_INGRESS].max = 256UL << MC_TBL_SIZE;
3680 	err = rvu_alloc_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]);
3681 	if (err)
3682 		return -ENOMEM;
3683 
3684 	/* Allocate bitmap for tx mce entries */
3685 	mcast->mce_counter[NIX_MCAST_EGRESS].max = MC_TX_MAX;
3686 	err = rvu_alloc_bitmap(&mcast->mce_counter[NIX_MCAST_EGRESS]);
3687 	if (err) {
3688 		rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]);
3689 		return -ENOMEM;
3690 	}
3691 
3692 	/* Alloc memory for multicast/mirror replication entries */
3693 	err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
3694 			 mcast->mce_counter[NIX_MCAST_INGRESS].max, size);
3695 	if (err) {
3696 		rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]);
3697 		rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_EGRESS]);
3698 		return -ENOMEM;
3699 	}
3700 
3701 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
3702 		    (u64)mcast->mce_ctx->iova);
3703 
3704 	/* Set max list length equal to max no of VFs per PF  + PF itself */
3705 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG,
3706 		    BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE);
3707 
3708 	/* Alloc memory for multicast replication buffers */
3709 	size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
3710 	err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
3711 			 (8UL << MC_BUF_CNT), size);
3712 	if (err) {
3713 		rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]);
3714 		rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_EGRESS]);
3715 		return -ENOMEM;
3716 	}
3717 
3718 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
3719 		    (u64)mcast->mcast_buf->iova);
3720 
3721 	/* Alloc pkind for NIX internal RX multicast/mirror replay */
3722 	mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc);
3723 
3724 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG,
3725 		    BIT_ULL(63) | (mcast->replay_pkind << 24) |
3726 		    BIT_ULL(20) | MC_BUF_CNT);
3727 
3728 	mutex_init(&mcast->mce_lock);
3729 
3730 	nix_setup_mcast_grp(nix_hw);
3731 
3732 	return nix_setup_mce_tables(rvu, nix_hw);
3733 }
3734 
nix_setup_txvlan(struct rvu * rvu,struct nix_hw * nix_hw)3735 static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw)
3736 {
3737 	struct nix_txvlan *vlan = &nix_hw->txvlan;
3738 	int err;
3739 
3740 	/* Allocate resource bimap for tx vtag def registers*/
3741 	vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX;
3742 	err = rvu_alloc_bitmap(&vlan->rsrc);
3743 	if (err)
3744 		return -ENOMEM;
3745 
3746 	/* Alloc memory for saving entry to RVU PFFUNC allocation mapping */
3747 	vlan->entry2pfvf_map = devm_kcalloc(rvu->dev, vlan->rsrc.max,
3748 					    sizeof(u16), GFP_KERNEL);
3749 	if (!vlan->entry2pfvf_map)
3750 		goto free_mem;
3751 
3752 	mutex_init(&vlan->rsrc_lock);
3753 	return 0;
3754 
3755 free_mem:
3756 	kfree(vlan->rsrc.bmap);
3757 	return -ENOMEM;
3758 }
3759 
nix_setup_txschq(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr)3760 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
3761 {
3762 	struct nix_txsch *txsch;
3763 	int err, lvl, schq;
3764 	u64 cfg, reg;
3765 
3766 	/* Get scheduler queue count of each type and alloc
3767 	 * bitmap for each for alloc/free/attach operations.
3768 	 */
3769 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
3770 		txsch = &nix_hw->txsch[lvl];
3771 		txsch->lvl = lvl;
3772 		switch (lvl) {
3773 		case NIX_TXSCH_LVL_SMQ:
3774 			reg = NIX_AF_MDQ_CONST;
3775 			break;
3776 		case NIX_TXSCH_LVL_TL4:
3777 			reg = NIX_AF_TL4_CONST;
3778 			break;
3779 		case NIX_TXSCH_LVL_TL3:
3780 			reg = NIX_AF_TL3_CONST;
3781 			break;
3782 		case NIX_TXSCH_LVL_TL2:
3783 			reg = NIX_AF_TL2_CONST;
3784 			break;
3785 		case NIX_TXSCH_LVL_TL1:
3786 			reg = NIX_AF_TL1_CONST;
3787 			break;
3788 		}
3789 		cfg = rvu_read64(rvu, blkaddr, reg);
3790 		txsch->schq.max = cfg & 0xFFFF;
3791 		err = rvu_alloc_bitmap(&txsch->schq);
3792 		if (err)
3793 			return err;
3794 
3795 		/* Allocate memory for scheduler queues to
3796 		 * PF/VF pcifunc mapping info.
3797 		 */
3798 		txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
3799 					       sizeof(u32), GFP_KERNEL);
3800 		if (!txsch->pfvf_map)
3801 			return -ENOMEM;
3802 		for (schq = 0; schq < txsch->schq.max; schq++)
3803 			txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
3804 	}
3805 
3806 	/* Setup a default value of 8192 as DWRR MTU */
3807 	if (rvu->hw->cap.nix_common_dwrr_mtu ||
3808 	    rvu->hw->cap.nix_multiple_dwrr_mtu) {
3809 		rvu_write64(rvu, blkaddr,
3810 			    nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM),
3811 			    convert_bytes_to_dwrr_mtu(8192));
3812 		rvu_write64(rvu, blkaddr,
3813 			    nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_LBK),
3814 			    convert_bytes_to_dwrr_mtu(8192));
3815 		rvu_write64(rvu, blkaddr,
3816 			    nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_SDP),
3817 			    convert_bytes_to_dwrr_mtu(8192));
3818 	}
3819 
3820 	return 0;
3821 }
3822 
rvu_nix_reserve_mark_format(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr,u32 cfg)3823 int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
3824 				int blkaddr, u32 cfg)
3825 {
3826 	int fmt_idx;
3827 
3828 	for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) {
3829 		if (nix_hw->mark_format.cfg[fmt_idx] == cfg)
3830 			return fmt_idx;
3831 	}
3832 	if (fmt_idx >= nix_hw->mark_format.total)
3833 		return -ERANGE;
3834 
3835 	rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg);
3836 	nix_hw->mark_format.cfg[fmt_idx] = cfg;
3837 	nix_hw->mark_format.in_use++;
3838 	return fmt_idx;
3839 }
3840 
nix_af_mark_format_setup(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr)3841 static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw,
3842 				    int blkaddr)
3843 {
3844 	u64 cfgs[] = {
3845 		[NIX_MARK_CFG_IP_DSCP_RED]         = 0x10003,
3846 		[NIX_MARK_CFG_IP_DSCP_YELLOW]      = 0x11200,
3847 		[NIX_MARK_CFG_IP_DSCP_YELLOW_RED]  = 0x11203,
3848 		[NIX_MARK_CFG_IP_ECN_RED]          = 0x6000c,
3849 		[NIX_MARK_CFG_IP_ECN_YELLOW]       = 0x60c00,
3850 		[NIX_MARK_CFG_IP_ECN_YELLOW_RED]   = 0x60c0c,
3851 		[NIX_MARK_CFG_VLAN_DEI_RED]        = 0x30008,
3852 		[NIX_MARK_CFG_VLAN_DEI_YELLOW]     = 0x30800,
3853 		[NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808,
3854 	};
3855 	int i, rc;
3856 	u64 total;
3857 
3858 	total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8;
3859 	nix_hw->mark_format.total = (u8)total;
3860 	nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32),
3861 					       GFP_KERNEL);
3862 	if (!nix_hw->mark_format.cfg)
3863 		return -ENOMEM;
3864 	for (i = 0; i < NIX_MARK_CFG_MAX; i++) {
3865 		rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]);
3866 		if (rc < 0)
3867 			dev_err(rvu->dev, "Err %d in setup mark format %d\n",
3868 				i, rc);
3869 	}
3870 
3871 	return 0;
3872 }
3873 
rvu_get_lbk_link_max_frs(struct rvu * rvu,u16 * max_mtu)3874 static void rvu_get_lbk_link_max_frs(struct rvu *rvu,  u16 *max_mtu)
3875 {
3876 	/* CN10K supports LBK FIFO size 72 KB */
3877 	if (rvu->hw->lbk_bufsize == 0x12000)
3878 		*max_mtu = CN10K_LBK_LINK_MAX_FRS;
3879 	else
3880 		*max_mtu = NIC_HW_MAX_FRS;
3881 }
3882 
rvu_get_lmac_link_max_frs(struct rvu * rvu,u16 * max_mtu)3883 static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu)
3884 {
3885 	int fifo_size = rvu_cgx_get_fifolen(rvu);
3886 
3887 	/* RPM supports FIFO len 128 KB and RPM2 supports double the
3888 	 * FIFO len to accommodate 8 LMACS
3889 	 */
3890 	if (fifo_size == 0x20000 || fifo_size == 0x40000)
3891 		*max_mtu = CN10K_LMAC_LINK_MAX_FRS;
3892 	else
3893 		*max_mtu = NIC_HW_MAX_FRS;
3894 }
3895 
rvu_mbox_handler_nix_get_hw_info(struct rvu * rvu,struct msg_req * req,struct nix_hw_info * rsp)3896 int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req,
3897 				     struct nix_hw_info *rsp)
3898 {
3899 	u16 pcifunc = req->hdr.pcifunc;
3900 	u64 dwrr_mtu;
3901 	int blkaddr;
3902 
3903 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3904 	if (blkaddr < 0)
3905 		return NIX_AF_ERR_AF_LF_INVALID;
3906 
3907 	if (is_lbk_vf(rvu, pcifunc))
3908 		rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu);
3909 	else
3910 		rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu);
3911 
3912 	rsp->min_mtu = NIC_HW_MIN_FRS;
3913 
3914 	if (!rvu->hw->cap.nix_common_dwrr_mtu &&
3915 	    !rvu->hw->cap.nix_multiple_dwrr_mtu) {
3916 		/* Return '1' on OTx2 */
3917 		rsp->rpm_dwrr_mtu = 1;
3918 		rsp->sdp_dwrr_mtu = 1;
3919 		rsp->lbk_dwrr_mtu = 1;
3920 		return 0;
3921 	}
3922 
3923 	/* Return DWRR_MTU for TLx_SCHEDULE[RR_WEIGHT] config */
3924 	dwrr_mtu = rvu_read64(rvu, blkaddr,
3925 			      nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM));
3926 	rsp->rpm_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
3927 
3928 	dwrr_mtu = rvu_read64(rvu, blkaddr,
3929 			      nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_SDP));
3930 	rsp->sdp_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
3931 
3932 	dwrr_mtu = rvu_read64(rvu, blkaddr,
3933 			      nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_LBK));
3934 	rsp->lbk_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
3935 
3936 	return 0;
3937 }
3938 
rvu_mbox_handler_nix_stats_rst(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)3939 int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
3940 				   struct msg_rsp *rsp)
3941 {
3942 	u16 pcifunc = req->hdr.pcifunc;
3943 	int i, nixlf, blkaddr, err;
3944 	u64 stats;
3945 
3946 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
3947 	if (err)
3948 		return err;
3949 
3950 	/* Get stats count supported by HW */
3951 	stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
3952 
3953 	/* Reset tx stats */
3954 	for (i = 0; i < ((stats >> 24) & 0xFF); i++)
3955 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0);
3956 
3957 	/* Reset rx stats */
3958 	for (i = 0; i < ((stats >> 32) & 0xFF); i++)
3959 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0);
3960 
3961 	return 0;
3962 }
3963 
3964 /* Returns the ALG index to be set into NPC_RX_ACTION */
get_flowkey_alg_idx(struct nix_hw * nix_hw,u32 flow_cfg)3965 static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg)
3966 {
3967 	int i;
3968 
3969 	/* Scan over exiting algo entries to find a match */
3970 	for (i = 0; i < nix_hw->flowkey.in_use; i++)
3971 		if (nix_hw->flowkey.flowkey[i] == flow_cfg)
3972 			return i;
3973 
3974 	return -ERANGE;
3975 }
3976 
3977 /* Mask to match ipv6(NPC_LT_LC_IP6) and ipv6 ext(NPC_LT_LC_IP6_EXT) */
3978 #define NPC_LT_LC_IP6_MATCH_MSK ((~(NPC_LT_LC_IP6 ^ NPC_LT_LC_IP6_EXT)) & 0xf)
3979 /* Mask to match both ipv4(NPC_LT_LC_IP) and ipv4 ext(NPC_LT_LC_IP_OPT) */
3980 #define NPC_LT_LC_IP_MATCH_MSK  ((~(NPC_LT_LC_IP ^ NPC_LT_LC_IP_OPT)) & 0xf)
3981 
set_flowkey_fields(struct nix_rx_flowkey_alg * alg,u32 flow_cfg)3982 static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
3983 {
3984 	int idx, nr_field, key_off, field_marker, keyoff_marker;
3985 	int max_key_off, max_bit_pos, group_member;
3986 	struct nix_rx_flowkey_alg *field;
3987 	struct nix_rx_flowkey_alg tmp;
3988 	u32 key_type, valid_key;
3989 	u32 l3_l4_src_dst;
3990 	int l4_key_offset = 0;
3991 
3992 	if (!alg)
3993 		return -EINVAL;
3994 
3995 #define FIELDS_PER_ALG  5
3996 #define MAX_KEY_OFF	40
3997 	/* Clear all fields */
3998 	memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG);
3999 
4000 	/* Each of the 32 possible flow key algorithm definitions should
4001 	 * fall into above incremental config (except ALG0). Otherwise a
4002 	 * single NPC MCAM entry is not sufficient for supporting RSS.
4003 	 *
4004 	 * If a different definition or combination needed then NPC MCAM
4005 	 * has to be programmed to filter such pkts and it's action should
4006 	 * point to this definition to calculate flowtag or hash.
4007 	 *
4008 	 * The `for loop` goes over _all_ protocol field and the following
4009 	 * variables depicts the state machine forward progress logic.
4010 	 *
4011 	 * keyoff_marker - Enabled when hash byte length needs to be accounted
4012 	 * in field->key_offset update.
4013 	 * field_marker - Enabled when a new field needs to be selected.
4014 	 * group_member - Enabled when protocol is part of a group.
4015 	 */
4016 
4017 	/* Last 4 bits (31:28) are reserved to specify SRC, DST
4018 	 * selection for L3, L4 i.e IPV[4,6]_SRC, IPV[4,6]_DST,
4019 	 * [TCP,UDP,SCTP]_SRC, [TCP,UDP,SCTP]_DST
4020 	 * 31 => L3_SRC, 30 => L3_DST, 29 => L4_SRC, 28 => L4_DST
4021 	 */
4022 	l3_l4_src_dst = flow_cfg;
4023 	/* Reset these 4 bits, so that these won't be part of key */
4024 	flow_cfg &= NIX_FLOW_KEY_TYPE_L3_L4_MASK;
4025 
4026 	keyoff_marker = 0; max_key_off = 0; group_member = 0;
4027 	nr_field = 0; key_off = 0; field_marker = 1;
4028 	field = &tmp; max_bit_pos = fls(flow_cfg);
4029 	for (idx = 0;
4030 	     idx < max_bit_pos && nr_field < FIELDS_PER_ALG &&
4031 	     key_off < MAX_KEY_OFF; idx++) {
4032 		key_type = BIT(idx);
4033 		valid_key = flow_cfg & key_type;
4034 		/* Found a field marker, reset the field values */
4035 		if (field_marker)
4036 			memset(&tmp, 0, sizeof(tmp));
4037 
4038 		field_marker = true;
4039 		keyoff_marker = true;
4040 		switch (key_type) {
4041 		case NIX_FLOW_KEY_TYPE_PORT:
4042 			field->sel_chan = true;
4043 			/* This should be set to 1, when SEL_CHAN is set */
4044 			field->bytesm1 = 1;
4045 			break;
4046 		case NIX_FLOW_KEY_TYPE_IPV4_PROTO:
4047 			field->lid = NPC_LID_LC;
4048 			field->hdr_offset = 9; /* offset */
4049 			field->bytesm1 = 0; /* 1 byte */
4050 			field->ltype_match = NPC_LT_LC_IP;
4051 			field->ltype_mask = NPC_LT_LC_IP_MATCH_MSK;
4052 			break;
4053 		case NIX_FLOW_KEY_TYPE_IPV4:
4054 		case NIX_FLOW_KEY_TYPE_INNR_IPV4:
4055 			field->lid = NPC_LID_LC;
4056 			field->ltype_match = NPC_LT_LC_IP;
4057 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) {
4058 				field->lid = NPC_LID_LG;
4059 				field->ltype_match = NPC_LT_LG_TU_IP;
4060 			}
4061 			field->hdr_offset = 12; /* SIP offset */
4062 			field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
4063 
4064 			/* Only SIP */
4065 			if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY)
4066 				field->bytesm1 = 3; /* SIP, 4 bytes */
4067 
4068 			if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) {
4069 				/* Both SIP + DIP */
4070 				if (field->bytesm1 == 3) {
4071 					field->bytesm1 = 7; /* SIP + DIP, 8B */
4072 				} else {
4073 					/* Only DIP */
4074 					field->hdr_offset = 16; /* DIP off */
4075 					field->bytesm1 = 3; /* DIP, 4 bytes */
4076 				}
4077 			}
4078 			field->ltype_mask = NPC_LT_LC_IP_MATCH_MSK;
4079 			keyoff_marker = false;
4080 			break;
4081 		case NIX_FLOW_KEY_TYPE_IPV6:
4082 		case NIX_FLOW_KEY_TYPE_INNR_IPV6:
4083 			field->lid = NPC_LID_LC;
4084 			field->ltype_match = NPC_LT_LC_IP6;
4085 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) {
4086 				field->lid = NPC_LID_LG;
4087 				field->ltype_match = NPC_LT_LG_TU_IP6;
4088 			}
4089 			field->hdr_offset = 8; /* SIP offset */
4090 			field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
4091 
4092 			/* Only SIP */
4093 			if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY)
4094 				field->bytesm1 = 15; /* SIP, 16 bytes */
4095 
4096 			if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) {
4097 				/* Both SIP + DIP */
4098 				if (field->bytesm1 == 15) {
4099 					/* SIP + DIP, 32 bytes */
4100 					field->bytesm1 = 31;
4101 				} else {
4102 					/* Only DIP */
4103 					field->hdr_offset = 24; /* DIP off */
4104 					field->bytesm1 = 15; /* DIP,16 bytes */
4105 				}
4106 			}
4107 			field->ltype_mask = NPC_LT_LC_IP6_MATCH_MSK;
4108 			break;
4109 		case NIX_FLOW_KEY_TYPE_TCP:
4110 		case NIX_FLOW_KEY_TYPE_UDP:
4111 		case NIX_FLOW_KEY_TYPE_SCTP:
4112 		case NIX_FLOW_KEY_TYPE_INNR_TCP:
4113 		case NIX_FLOW_KEY_TYPE_INNR_UDP:
4114 		case NIX_FLOW_KEY_TYPE_INNR_SCTP:
4115 			field->lid = NPC_LID_LD;
4116 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP ||
4117 			    key_type == NIX_FLOW_KEY_TYPE_INNR_UDP ||
4118 			    key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP)
4119 				field->lid = NPC_LID_LH;
4120 			field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
4121 
4122 			if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_SRC_ONLY)
4123 				field->bytesm1 = 1; /* SRC, 2 bytes */
4124 
4125 			if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_DST_ONLY) {
4126 				/* Both SRC + DST */
4127 				if (field->bytesm1 == 1) {
4128 					/* SRC + DST, 4 bytes */
4129 					field->bytesm1 = 3;
4130 				} else {
4131 					/* Only DIP */
4132 					field->hdr_offset = 2; /* DST off */
4133 					field->bytesm1 = 1; /* DST, 2 bytes */
4134 				}
4135 			}
4136 
4137 			/* Enum values for NPC_LID_LD and NPC_LID_LG are same,
4138 			 * so no need to change the ltype_match, just change
4139 			 * the lid for inner protocols
4140 			 */
4141 			BUILD_BUG_ON((int)NPC_LT_LD_TCP !=
4142 				     (int)NPC_LT_LH_TU_TCP);
4143 			BUILD_BUG_ON((int)NPC_LT_LD_UDP !=
4144 				     (int)NPC_LT_LH_TU_UDP);
4145 			BUILD_BUG_ON((int)NPC_LT_LD_SCTP !=
4146 				     (int)NPC_LT_LH_TU_SCTP);
4147 
4148 			if ((key_type == NIX_FLOW_KEY_TYPE_TCP ||
4149 			     key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) &&
4150 			    valid_key) {
4151 				field->ltype_match |= NPC_LT_LD_TCP;
4152 				group_member = true;
4153 			} else if ((key_type == NIX_FLOW_KEY_TYPE_UDP ||
4154 				    key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) &&
4155 				   valid_key) {
4156 				field->ltype_match |= NPC_LT_LD_UDP;
4157 				group_member = true;
4158 			} else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP ||
4159 				    key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) &&
4160 				   valid_key) {
4161 				field->ltype_match |= NPC_LT_LD_SCTP;
4162 				group_member = true;
4163 			}
4164 			field->ltype_mask = ~field->ltype_match;
4165 			if (key_type == NIX_FLOW_KEY_TYPE_SCTP ||
4166 			    key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) {
4167 				/* Handle the case where any of the group item
4168 				 * is enabled in the group but not the final one
4169 				 */
4170 				if (group_member) {
4171 					valid_key = true;
4172 					group_member = false;
4173 				}
4174 			} else {
4175 				field_marker = false;
4176 				keyoff_marker = false;
4177 			}
4178 
4179 			/* TCP/UDP/SCTP and ESP/AH falls at same offset so
4180 			 * remember the TCP key offset of 40 byte hash key.
4181 			 */
4182 			if (key_type == NIX_FLOW_KEY_TYPE_TCP)
4183 				l4_key_offset = key_off;
4184 			break;
4185 		case NIX_FLOW_KEY_TYPE_NVGRE:
4186 			field->lid = NPC_LID_LD;
4187 			field->hdr_offset = 4; /* VSID offset */
4188 			field->bytesm1 = 2;
4189 			field->ltype_match = NPC_LT_LD_NVGRE;
4190 			field->ltype_mask = 0xF;
4191 			break;
4192 		case NIX_FLOW_KEY_TYPE_VXLAN:
4193 		case NIX_FLOW_KEY_TYPE_GENEVE:
4194 			field->lid = NPC_LID_LE;
4195 			field->bytesm1 = 2;
4196 			field->hdr_offset = 4;
4197 			field->ltype_mask = 0xF;
4198 			field_marker = false;
4199 			keyoff_marker = false;
4200 
4201 			if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) {
4202 				field->ltype_match |= NPC_LT_LE_VXLAN;
4203 				group_member = true;
4204 			}
4205 
4206 			if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) {
4207 				field->ltype_match |= NPC_LT_LE_GENEVE;
4208 				group_member = true;
4209 			}
4210 
4211 			if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) {
4212 				if (group_member) {
4213 					field->ltype_mask = ~field->ltype_match;
4214 					field_marker = true;
4215 					keyoff_marker = true;
4216 					valid_key = true;
4217 					group_member = false;
4218 				}
4219 			}
4220 			break;
4221 		case NIX_FLOW_KEY_TYPE_ETH_DMAC:
4222 		case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC:
4223 			field->lid = NPC_LID_LA;
4224 			field->ltype_match = NPC_LT_LA_ETHER;
4225 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) {
4226 				field->lid = NPC_LID_LF;
4227 				field->ltype_match = NPC_LT_LF_TU_ETHER;
4228 			}
4229 			field->hdr_offset = 0;
4230 			field->bytesm1 = 5; /* DMAC 6 Byte */
4231 			field->ltype_mask = 0xF;
4232 			break;
4233 		case NIX_FLOW_KEY_TYPE_IPV6_EXT:
4234 			field->lid = NPC_LID_LC;
4235 			field->hdr_offset = 40; /* IPV6 hdr */
4236 			field->bytesm1 = 0; /* 1 Byte ext hdr*/
4237 			field->ltype_match = NPC_LT_LC_IP6_EXT;
4238 			field->ltype_mask = 0xF;
4239 			break;
4240 		case NIX_FLOW_KEY_TYPE_GTPU:
4241 			field->lid = NPC_LID_LE;
4242 			field->hdr_offset = 4;
4243 			field->bytesm1 = 3; /* 4 bytes TID*/
4244 			field->ltype_match = NPC_LT_LE_GTPU;
4245 			field->ltype_mask = 0xF;
4246 			break;
4247 		case NIX_FLOW_KEY_TYPE_CUSTOM0:
4248 			field->lid = NPC_LID_LC;
4249 			field->hdr_offset = 6;
4250 			field->bytesm1 = 1; /* 2 Bytes*/
4251 			field->ltype_match = NPC_LT_LC_CUSTOM0;
4252 			field->ltype_mask = 0xF;
4253 			break;
4254 		case NIX_FLOW_KEY_TYPE_VLAN:
4255 			field->lid = NPC_LID_LB;
4256 			field->hdr_offset = 2; /* Skip TPID (2-bytes) */
4257 			field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */
4258 			field->ltype_match = NPC_LT_LB_CTAG;
4259 			field->ltype_mask = 0xF;
4260 			field->fn_mask = 1; /* Mask out the first nibble */
4261 			break;
4262 		case NIX_FLOW_KEY_TYPE_AH:
4263 		case NIX_FLOW_KEY_TYPE_ESP:
4264 			field->hdr_offset = 0;
4265 			field->bytesm1 = 7; /* SPI + sequence number */
4266 			field->ltype_mask = 0xF;
4267 			field->lid = NPC_LID_LE;
4268 			field->ltype_match = NPC_LT_LE_ESP;
4269 			if (key_type == NIX_FLOW_KEY_TYPE_AH) {
4270 				field->lid = NPC_LID_LD;
4271 				field->ltype_match = NPC_LT_LD_AH;
4272 				field->hdr_offset = 4;
4273 				keyoff_marker = false;
4274 			}
4275 			break;
4276 		}
4277 		field->ena = 1;
4278 
4279 		/* Found a valid flow key type */
4280 		if (valid_key) {
4281 			/* Use the key offset of TCP/UDP/SCTP fields
4282 			 * for ESP/AH fields.
4283 			 */
4284 			if (key_type == NIX_FLOW_KEY_TYPE_ESP ||
4285 			    key_type == NIX_FLOW_KEY_TYPE_AH)
4286 				key_off = l4_key_offset;
4287 			field->key_offset = key_off;
4288 			memcpy(&alg[nr_field], field, sizeof(*field));
4289 			max_key_off = max(max_key_off, field->bytesm1 + 1);
4290 
4291 			/* Found a field marker, get the next field */
4292 			if (field_marker)
4293 				nr_field++;
4294 		}
4295 
4296 		/* Found a keyoff marker, update the new key_off */
4297 		if (keyoff_marker) {
4298 			key_off += max_key_off;
4299 			max_key_off = 0;
4300 		}
4301 	}
4302 	/* Processed all the flow key types */
4303 	if (idx == max_bit_pos && key_off <= MAX_KEY_OFF)
4304 		return 0;
4305 	else
4306 		return NIX_AF_ERR_RSS_NOSPC_FIELD;
4307 }
4308 
reserve_flowkey_alg_idx(struct rvu * rvu,int blkaddr,u32 flow_cfg)4309 static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg)
4310 {
4311 	u64 field[FIELDS_PER_ALG];
4312 	struct nix_hw *hw;
4313 	int fid, rc;
4314 
4315 	hw = get_nix_hw(rvu->hw, blkaddr);
4316 	if (!hw)
4317 		return NIX_AF_ERR_INVALID_NIXBLK;
4318 
4319 	/* No room to add new flow hash algoritham */
4320 	if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX)
4321 		return NIX_AF_ERR_RSS_NOSPC_ALGO;
4322 
4323 	/* Generate algo fields for the given flow_cfg */
4324 	rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg);
4325 	if (rc)
4326 		return rc;
4327 
4328 	/* Update ALGX_FIELDX register with generated fields */
4329 	for (fid = 0; fid < FIELDS_PER_ALG; fid++)
4330 		rvu_write64(rvu, blkaddr,
4331 			    NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use,
4332 							   fid), field[fid]);
4333 
4334 	/* Store the flow_cfg for futher lookup */
4335 	rc = hw->flowkey.in_use;
4336 	hw->flowkey.flowkey[rc] = flow_cfg;
4337 	hw->flowkey.in_use++;
4338 
4339 	return rc;
4340 }
4341 
rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu * rvu,struct nix_rss_flowkey_cfg * req,struct nix_rss_flowkey_cfg_rsp * rsp)4342 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
4343 					 struct nix_rss_flowkey_cfg *req,
4344 					 struct nix_rss_flowkey_cfg_rsp *rsp)
4345 {
4346 	u16 pcifunc = req->hdr.pcifunc;
4347 	int alg_idx, nixlf, blkaddr;
4348 	struct nix_hw *nix_hw;
4349 	int err;
4350 
4351 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
4352 	if (err)
4353 		return err;
4354 
4355 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
4356 	if (!nix_hw)
4357 		return NIX_AF_ERR_INVALID_NIXBLK;
4358 
4359 	alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg);
4360 	/* Failed to get algo index from the exiting list, reserve new  */
4361 	if (alg_idx < 0) {
4362 		alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr,
4363 						  req->flowkey_cfg);
4364 		if (alg_idx < 0)
4365 			return alg_idx;
4366 	}
4367 	rsp->alg_idx = alg_idx;
4368 	rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
4369 				       alg_idx, req->mcam_index);
4370 	return 0;
4371 }
4372 
nix_rx_flowkey_alg_cfg(struct rvu * rvu,int blkaddr)4373 static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
4374 {
4375 	u32 flowkey_cfg, minkey_cfg;
4376 	int alg, fid, rc;
4377 
4378 	/* Disable all flow key algx fieldx */
4379 	for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) {
4380 		for (fid = 0; fid < FIELDS_PER_ALG; fid++)
4381 			rvu_write64(rvu, blkaddr,
4382 				    NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
4383 				    0);
4384 	}
4385 
4386 	/* IPv4/IPv6 SIP/DIPs */
4387 	flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
4388 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
4389 	if (rc < 0)
4390 		return rc;
4391 
4392 	/* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
4393 	minkey_cfg = flowkey_cfg;
4394 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP;
4395 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
4396 	if (rc < 0)
4397 		return rc;
4398 
4399 	/* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
4400 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP;
4401 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
4402 	if (rc < 0)
4403 		return rc;
4404 
4405 	/* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
4406 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP;
4407 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
4408 	if (rc < 0)
4409 		return rc;
4410 
4411 	/* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
4412 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
4413 			NIX_FLOW_KEY_TYPE_UDP;
4414 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
4415 	if (rc < 0)
4416 		return rc;
4417 
4418 	/* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
4419 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
4420 			NIX_FLOW_KEY_TYPE_SCTP;
4421 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
4422 	if (rc < 0)
4423 		return rc;
4424 
4425 	/* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
4426 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP |
4427 			NIX_FLOW_KEY_TYPE_SCTP;
4428 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
4429 	if (rc < 0)
4430 		return rc;
4431 
4432 	/* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
4433 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
4434 		      NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP;
4435 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
4436 	if (rc < 0)
4437 		return rc;
4438 
4439 	return 0;
4440 }
4441 
rvu_mbox_handler_nix_set_mac_addr(struct rvu * rvu,struct nix_set_mac_addr * req,struct msg_rsp * rsp)4442 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
4443 				      struct nix_set_mac_addr *req,
4444 				      struct msg_rsp *rsp)
4445 {
4446 	bool from_vf = req->hdr.pcifunc & RVU_PFVF_FUNC_MASK;
4447 	u16 pcifunc = req->hdr.pcifunc;
4448 	int blkaddr, nixlf, err;
4449 	struct rvu_pfvf *pfvf;
4450 
4451 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
4452 	if (err)
4453 		return err;
4454 
4455 	pfvf = rvu_get_pfvf(rvu, pcifunc);
4456 
4457 	/* untrusted VF can't overwrite admin(PF) changes */
4458 	if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
4459 	    (from_vf && test_bit(PF_SET_VF_MAC, &pfvf->flags))) {
4460 		dev_warn(rvu->dev,
4461 			 "MAC address set by admin(PF) cannot be overwritten by untrusted VF");
4462 		return -EPERM;
4463 	}
4464 
4465 	ether_addr_copy(pfvf->mac_addr, req->mac_addr);
4466 
4467 	rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
4468 				    pfvf->rx_chan_base, req->mac_addr);
4469 
4470 	if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf)
4471 		ether_addr_copy(pfvf->default_mac, req->mac_addr);
4472 
4473 	return 0;
4474 }
4475 
rvu_mbox_handler_nix_get_mac_addr(struct rvu * rvu,struct msg_req * req,struct nix_get_mac_addr_rsp * rsp)4476 int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu,
4477 				      struct msg_req *req,
4478 				      struct nix_get_mac_addr_rsp *rsp)
4479 {
4480 	u16 pcifunc = req->hdr.pcifunc;
4481 	struct rvu_pfvf *pfvf;
4482 
4483 	if (!is_nixlf_attached(rvu, pcifunc))
4484 		return NIX_AF_ERR_AF_LF_INVALID;
4485 
4486 	pfvf = rvu_get_pfvf(rvu, pcifunc);
4487 
4488 	ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
4489 
4490 	return 0;
4491 }
4492 
rvu_mbox_handler_nix_set_rx_mode(struct rvu * rvu,struct nix_rx_mode * req,struct msg_rsp * rsp)4493 int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
4494 				     struct msg_rsp *rsp)
4495 {
4496 	bool allmulti, promisc, nix_rx_multicast;
4497 	u16 pcifunc = req->hdr.pcifunc;
4498 	struct rvu_pfvf *pfvf;
4499 	int nixlf, err;
4500 
4501 	pfvf = rvu_get_pfvf(rvu, pcifunc);
4502 	promisc = req->mode & NIX_RX_MODE_PROMISC ? true : false;
4503 	allmulti = req->mode & NIX_RX_MODE_ALLMULTI ? true : false;
4504 	pfvf->use_mce_list = req->mode & NIX_RX_MODE_USE_MCE ? true : false;
4505 
4506 	nix_rx_multicast = rvu->hw->cap.nix_rx_multicast & pfvf->use_mce_list;
4507 
4508 	if (is_vf(pcifunc) && !nix_rx_multicast &&
4509 	    (promisc || allmulti)) {
4510 		dev_warn_ratelimited(rvu->dev,
4511 				     "VF promisc/multicast not supported\n");
4512 		return 0;
4513 	}
4514 
4515 	/* untrusted VF can't configure promisc/allmulti */
4516 	if (is_vf(pcifunc) && !test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
4517 	    (promisc || allmulti))
4518 		return 0;
4519 
4520 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
4521 	if (err)
4522 		return err;
4523 
4524 	if (nix_rx_multicast) {
4525 		/* add/del this PF_FUNC to/from mcast pkt replication list */
4526 		err = nix_update_mce_rule(rvu, pcifunc, NIXLF_ALLMULTI_ENTRY,
4527 					  allmulti);
4528 		if (err) {
4529 			dev_err(rvu->dev,
4530 				"Failed to update pcifunc 0x%x to multicast list\n",
4531 				pcifunc);
4532 			return err;
4533 		}
4534 
4535 		/* add/del this PF_FUNC to/from promisc pkt replication list */
4536 		err = nix_update_mce_rule(rvu, pcifunc, NIXLF_PROMISC_ENTRY,
4537 					  promisc);
4538 		if (err) {
4539 			dev_err(rvu->dev,
4540 				"Failed to update pcifunc 0x%x to promisc list\n",
4541 				pcifunc);
4542 			return err;
4543 		}
4544 	}
4545 
4546 	/* install/uninstall allmulti entry */
4547 	if (allmulti) {
4548 		rvu_npc_install_allmulti_entry(rvu, pcifunc, nixlf,
4549 					       pfvf->rx_chan_base);
4550 	} else {
4551 		if (!nix_rx_multicast)
4552 			rvu_npc_enable_allmulti_entry(rvu, pcifunc, nixlf, false);
4553 	}
4554 
4555 	/* install/uninstall promisc entry */
4556 	if (promisc)
4557 		rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
4558 					      pfvf->rx_chan_base,
4559 					      pfvf->rx_chan_cnt);
4560 	else
4561 		if (!nix_rx_multicast)
4562 			rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false);
4563 
4564 	return 0;
4565 }
4566 
nix_find_link_frs(struct rvu * rvu,struct nix_frs_cfg * req,u16 pcifunc)4567 static void nix_find_link_frs(struct rvu *rvu,
4568 			      struct nix_frs_cfg *req, u16 pcifunc)
4569 {
4570 	int pf = rvu_get_pf(rvu->pdev, pcifunc);
4571 	struct rvu_pfvf *pfvf;
4572 	int maxlen, minlen;
4573 	int numvfs, hwvf;
4574 	int vf;
4575 
4576 	/* Update with requester's min/max lengths */
4577 	pfvf = rvu_get_pfvf(rvu, pcifunc);
4578 	pfvf->maxlen = req->maxlen;
4579 	if (req->update_minlen)
4580 		pfvf->minlen = req->minlen;
4581 
4582 	maxlen = req->maxlen;
4583 	minlen = req->update_minlen ? req->minlen : 0;
4584 
4585 	/* Get this PF's numVFs and starting hwvf */
4586 	rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
4587 
4588 	/* For each VF, compare requested max/minlen */
4589 	for (vf = 0; vf < numvfs; vf++) {
4590 		pfvf =  &rvu->hwvf[hwvf + vf];
4591 		if (pfvf->maxlen > maxlen)
4592 			maxlen = pfvf->maxlen;
4593 		if (req->update_minlen &&
4594 		    pfvf->minlen && pfvf->minlen < minlen)
4595 			minlen = pfvf->minlen;
4596 	}
4597 
4598 	/* Compare requested max/minlen with PF's max/minlen */
4599 	pfvf = &rvu->pf[pf];
4600 	if (pfvf->maxlen > maxlen)
4601 		maxlen = pfvf->maxlen;
4602 	if (req->update_minlen &&
4603 	    pfvf->minlen && pfvf->minlen < minlen)
4604 		minlen = pfvf->minlen;
4605 
4606 	/* Update the request with max/min PF's and it's VF's max/min */
4607 	req->maxlen = maxlen;
4608 	if (req->update_minlen)
4609 		req->minlen = minlen;
4610 }
4611 
rvu_mbox_handler_nix_set_hw_frs(struct rvu * rvu,struct nix_frs_cfg * req,struct msg_rsp * rsp)4612 int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
4613 				    struct msg_rsp *rsp)
4614 {
4615 	struct rvu_hwinfo *hw = rvu->hw;
4616 	u16 pcifunc = req->hdr.pcifunc;
4617 	int pf = rvu_get_pf(rvu->pdev, pcifunc);
4618 	int blkaddr, link = -1;
4619 	struct nix_hw *nix_hw;
4620 	struct rvu_pfvf *pfvf;
4621 	u8 cgx = 0, lmac = 0;
4622 	u16 max_mtu;
4623 	u64 cfg;
4624 
4625 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
4626 	if (blkaddr < 0)
4627 		return NIX_AF_ERR_AF_LF_INVALID;
4628 
4629 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
4630 	if (!nix_hw)
4631 		return NIX_AF_ERR_INVALID_NIXBLK;
4632 
4633 	if (is_lbk_vf(rvu, pcifunc) || is_rep_dev(rvu, pcifunc))
4634 		rvu_get_lbk_link_max_frs(rvu, &max_mtu);
4635 	else
4636 		rvu_get_lmac_link_max_frs(rvu, &max_mtu);
4637 
4638 	if (!req->sdp_link && req->maxlen > max_mtu)
4639 		return NIX_AF_ERR_FRS_INVALID;
4640 
4641 	if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
4642 		return NIX_AF_ERR_FRS_INVALID;
4643 
4644 	/* Check if config is for SDP link */
4645 	if (req->sdp_link) {
4646 		if (!hw->sdp_links)
4647 			return NIX_AF_ERR_RX_LINK_INVALID;
4648 		link = hw->cgx_links + hw->lbk_links;
4649 		goto linkcfg;
4650 	}
4651 
4652 	/* Check if the request is from CGX mapped RVU PF */
4653 	if (is_pf_cgxmapped(rvu, pf)) {
4654 		/* Get CGX and LMAC to which this PF is mapped and find link */
4655 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
4656 		link = (cgx * hw->lmac_per_cgx) + lmac;
4657 	} else if (pf == 0) {
4658 		/* For VFs of PF0 ingress is LBK port, so config LBK link */
4659 		pfvf = rvu_get_pfvf(rvu, pcifunc);
4660 		link = hw->cgx_links + pfvf->lbkid;
4661 	} else if (is_rep_dev(rvu, pcifunc)) {
4662 		link = hw->cgx_links + 0;
4663 	}
4664 
4665 	if (link < 0)
4666 		return NIX_AF_ERR_RX_LINK_INVALID;
4667 
4668 linkcfg:
4669 	nix_find_link_frs(rvu, req, pcifunc);
4670 
4671 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
4672 	cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
4673 	if (req->update_minlen)
4674 		cfg = (cfg & ~0xFFFFULL) | req->minlen;
4675 	rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg);
4676 
4677 	return 0;
4678 }
4679 
rvu_mbox_handler_nix_set_rx_cfg(struct rvu * rvu,struct nix_rx_cfg * req,struct msg_rsp * rsp)4680 int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
4681 				    struct msg_rsp *rsp)
4682 {
4683 	int nixlf, blkaddr, err;
4684 	u64 cfg;
4685 
4686 	err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr);
4687 	if (err)
4688 		return err;
4689 
4690 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf));
4691 	/* Set the interface configuration */
4692 	if (req->len_verify & BIT(0))
4693 		cfg |= BIT_ULL(41);
4694 	else
4695 		cfg &= ~BIT_ULL(41);
4696 
4697 	if (req->len_verify & BIT(1))
4698 		cfg |= BIT_ULL(40);
4699 	else
4700 		cfg &= ~BIT_ULL(40);
4701 
4702 	if (req->len_verify & NIX_RX_DROP_RE)
4703 		cfg |= BIT_ULL(32);
4704 	else
4705 		cfg &= ~BIT_ULL(32);
4706 
4707 	if (req->csum_verify & BIT(0))
4708 		cfg |= BIT_ULL(37);
4709 	else
4710 		cfg &= ~BIT_ULL(37);
4711 
4712 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg);
4713 
4714 	return 0;
4715 }
4716 
rvu_get_lbk_link_credits(struct rvu * rvu,u16 lbk_max_frs)4717 static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs)
4718 {
4719 	return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */
4720 }
4721 
nix_link_config(struct rvu * rvu,int blkaddr,struct nix_hw * nix_hw)4722 static void nix_link_config(struct rvu *rvu, int blkaddr,
4723 			    struct nix_hw *nix_hw)
4724 {
4725 	struct rvu_hwinfo *hw = rvu->hw;
4726 	int cgx, lmac_cnt, slink, link;
4727 	u16 lbk_max_frs, lmac_max_frs;
4728 	unsigned long lmac_bmap;
4729 	u64 tx_credits, cfg;
4730 	u64 lmac_fifo_len;
4731 	int iter;
4732 
4733 	rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs);
4734 	rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs);
4735 
4736 	/* Set SDP link credit */
4737 	rvu_write64(rvu, blkaddr, NIX_AF_SDP_LINK_CREDIT, SDP_LINK_CREDIT);
4738 
4739 	/* Set default min/max packet lengths allowed on NIX Rx links.
4740 	 *
4741 	 * With HW reset minlen value of 60byte, HW will treat ARP pkts
4742 	 * as undersize and report them to SW as error pkts, hence
4743 	 * setting it to 40 bytes.
4744 	 */
4745 	for (link = 0; link < hw->cgx_links; link++) {
4746 		rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
4747 				((u64)lmac_max_frs << 16) | NIC_HW_MIN_FRS);
4748 	}
4749 
4750 	for (link = hw->cgx_links; link < hw->lbk_links; link++) {
4751 		rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
4752 			    ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS);
4753 	}
4754 	if (hw->sdp_links) {
4755 		link = hw->cgx_links + hw->lbk_links;
4756 		rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
4757 			    SDP_HW_MAX_FRS << 16 | SDP_HW_MIN_FRS);
4758 	}
4759 
4760 	/* Get MCS external bypass status for CN10K-B */
4761 	if (mcs_get_blkcnt() == 1) {
4762 		/* Adjust for 2 credits when external bypass is disabled */
4763 		nix_hw->cc_mcs_cnt = is_mcs_bypass(0) ? 0 : 2;
4764 	}
4765 
4766 	/* Set credits for Tx links assuming max packet length allowed.
4767 	 * This will be reconfigured based on MTU set for PF/VF.
4768 	 */
4769 	for (cgx = 0; cgx < hw->cgx; cgx++) {
4770 		lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
4771 		/* Skip when cgx is not available or lmac cnt is zero */
4772 		if (lmac_cnt <= 0)
4773 			continue;
4774 		slink = cgx * hw->lmac_per_cgx;
4775 
4776 		/* Get LMAC id's from bitmap */
4777 		lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
4778 		for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
4779 			lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, iter);
4780 			if (!lmac_fifo_len) {
4781 				dev_err(rvu->dev,
4782 					"%s: Failed to get CGX/RPM%d:LMAC%d FIFO size\n",
4783 					__func__, cgx, iter);
4784 				continue;
4785 			}
4786 			tx_credits = (lmac_fifo_len - lmac_max_frs) / 16;
4787 			/* Enable credits and set credit pkt count to max allowed */
4788 			cfg =  (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
4789 			cfg |= FIELD_PREP(NIX_AF_LINKX_MCS_CNT_MASK, nix_hw->cc_mcs_cnt);
4790 
4791 			link = iter + slink;
4792 			nix_hw->tx_credits[link] = tx_credits;
4793 			rvu_write64(rvu, blkaddr,
4794 				    NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
4795 		}
4796 	}
4797 
4798 	/* Set Tx credits for LBK link */
4799 	slink = hw->cgx_links;
4800 	for (link = slink; link < (slink + hw->lbk_links); link++) {
4801 		tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs);
4802 		nix_hw->tx_credits[link] = tx_credits;
4803 		/* Enable credits and set credit pkt count to max allowed */
4804 		tx_credits =  (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
4805 		rvu_write64(rvu, blkaddr,
4806 			    NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits);
4807 	}
4808 }
4809 
nix_calibrate_x2p(struct rvu * rvu,int blkaddr)4810 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
4811 {
4812 	int idx, err;
4813 	u64 status;
4814 
4815 	/* Start X2P bus calibration */
4816 	rvu_write64(rvu, blkaddr, NIX_AF_CFG,
4817 		    rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
4818 	/* Wait for calibration to complete */
4819 	err = rvu_poll_reg(rvu, blkaddr,
4820 			   NIX_AF_STATUS, BIT_ULL(10), false);
4821 	if (err) {
4822 		dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
4823 		return err;
4824 	}
4825 
4826 	status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
4827 	/* Check if CGX devices are ready */
4828 	for (idx = 0; idx < rvu->cgx_cnt_max; idx++) {
4829 		/* Skip when cgx port is not available */
4830 		if (!rvu_cgx_pdata(idx, rvu) ||
4831 		    (status & (BIT_ULL(16 + idx))))
4832 			continue;
4833 		dev_err(rvu->dev,
4834 			"CGX%d didn't respond to NIX X2P calibration\n", idx);
4835 		err = -EBUSY;
4836 	}
4837 
4838 	/* Check if LBK is ready */
4839 	if (!(status & BIT_ULL(19))) {
4840 		dev_err(rvu->dev,
4841 			"LBK didn't respond to NIX X2P calibration\n");
4842 		err = -EBUSY;
4843 	}
4844 
4845 	/* Clear 'calibrate_x2p' bit */
4846 	rvu_write64(rvu, blkaddr, NIX_AF_CFG,
4847 		    rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
4848 	if (err || (status & 0x3FFULL))
4849 		dev_err(rvu->dev,
4850 			"NIX X2P calibration failed, status 0x%llx\n", status);
4851 	if (err)
4852 		return err;
4853 	return 0;
4854 }
4855 
nix_aq_init(struct rvu * rvu,struct rvu_block * block)4856 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
4857 {
4858 	u64 cfg;
4859 	int err;
4860 
4861 	/* Set admin queue endianness */
4862 	cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
4863 #ifdef __BIG_ENDIAN
4864 	cfg |= BIT_ULL(8);
4865 	rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
4866 #else
4867 	cfg &= ~BIT_ULL(8);
4868 	rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
4869 #endif
4870 
4871 	/* Do not bypass NDC cache */
4872 	cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
4873 	cfg &= ~0x3FFEULL;
4874 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
4875 	/* Disable caching of SQB aka SQEs */
4876 	cfg |= 0x04ULL;
4877 #endif
4878 	rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
4879 
4880 	/* Result structure can be followed by RQ/SQ/CQ context at
4881 	 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
4882 	 * operation type. Alloc sufficient result memory for all operations.
4883 	 */
4884 	err = rvu_aq_alloc(rvu, &block->aq,
4885 			   Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
4886 			   ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
4887 	if (err)
4888 		return err;
4889 
4890 	rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
4891 	rvu_write64(rvu, block->addr,
4892 		    NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
4893 	return 0;
4894 }
4895 
rvu_nix_setup_capabilities(struct rvu * rvu,int blkaddr)4896 static void rvu_nix_setup_capabilities(struct rvu *rvu, int blkaddr)
4897 {
4898 	struct rvu_hwinfo *hw = rvu->hw;
4899 	u64 hw_const;
4900 
4901 	hw_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
4902 
4903 	/* On OcteonTx2 DWRR quantum is directly configured into each of
4904 	 * the transmit scheduler queues. And PF/VF drivers were free to
4905 	 * config any value upto 2^24.
4906 	 * On CN10K, HW is modified, the quantum configuration at scheduler
4907 	 * queues is in terms of weight. And SW needs to setup a base DWRR MTU
4908 	 * at NIX_AF_DWRR_RPM_MTU / NIX_AF_DWRR_SDP_MTU. HW will do
4909 	 * 'DWRR MTU * weight' to get the quantum.
4910 	 *
4911 	 * Check if HW uses a common MTU for all DWRR quantum configs.
4912 	 * On OcteonTx2 this register field is '0'.
4913 	 */
4914 	if ((((hw_const >> 56) & 0x10) == 0x10) && !(hw_const & BIT_ULL(61)))
4915 		hw->cap.nix_common_dwrr_mtu = true;
4916 
4917 	if (hw_const & BIT_ULL(61))
4918 		hw->cap.nix_multiple_dwrr_mtu = true;
4919 }
4920 
rvu_nix_block_init(struct rvu * rvu,struct nix_hw * nix_hw)4921 static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
4922 {
4923 	const struct npc_lt_def_cfg *ltdefs;
4924 	struct rvu_hwinfo *hw = rvu->hw;
4925 	int blkaddr = nix_hw->blkaddr;
4926 	struct rvu_block *block;
4927 	int err;
4928 	u64 cfg;
4929 
4930 	block = &hw->block[blkaddr];
4931 
4932 	if (is_rvu_96xx_B0(rvu)) {
4933 		/* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt
4934 		 * internal state when conditional clocks are turned off.
4935 		 * Hence enable them.
4936 		 */
4937 		rvu_write64(rvu, blkaddr, NIX_AF_CFG,
4938 			    rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
4939 	}
4940 
4941 	/* Set chan/link to backpressure TL3 instead of TL2 */
4942 	rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
4943 
4944 	/* Disable SQ manager's sticky mode operation (set TM6 = 0, TM11 = 0)
4945 	 * This sticky mode is known to cause SQ stalls when multiple
4946 	 * SQs are mapped to same SMQ and transmitting pkts simultaneously.
4947 	 * NIX PSE may deadlock when there are any sticky to non-sticky
4948 	 * transmission. Hence disable it (TM5 = 0).
4949 	 */
4950 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
4951 	cfg &= ~(BIT_ULL(15) | BIT_ULL(14) | BIT_ULL(23));
4952 	/* NIX may drop credits when condition clocks are turned off.
4953 	 * Hence enable control flow clk (set TM9 = 1).
4954 	 */
4955 	cfg |= BIT_ULL(21);
4956 	rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
4957 
4958 	ltdefs = rvu->kpu.lt_def;
4959 	/* Calibrate X2P bus to check if CGX/LBK links are fine */
4960 	err = nix_calibrate_x2p(rvu, blkaddr);
4961 	if (err)
4962 		return err;
4963 
4964 	/* Setup capabilities of the NIX block */
4965 	rvu_nix_setup_capabilities(rvu, blkaddr);
4966 
4967 	/* Initialize admin queue */
4968 	err = nix_aq_init(rvu, block);
4969 	if (err)
4970 		return err;
4971 
4972 	/* Restore CINT timer delay to HW reset values */
4973 	rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
4974 
4975 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SEB_CFG);
4976 
4977 	/* For better performance use NDC TX instead of NDC RX for SQ's SQEs" */
4978 	cfg |= 1ULL;
4979 	if (!is_rvu_otx2(rvu))
4980 		cfg |= NIX_PTP_1STEP_EN;
4981 
4982 	rvu_write64(rvu, blkaddr, NIX_AF_SEB_CFG, cfg);
4983 
4984 	if (!is_rvu_otx2(rvu))
4985 		rvu_nix_block_cn10k_init(rvu, nix_hw);
4986 
4987 	if (is_block_implemented(hw, blkaddr)) {
4988 		err = nix_setup_txschq(rvu, nix_hw, blkaddr);
4989 		if (err)
4990 			return err;
4991 
4992 		err = nix_setup_ipolicers(rvu, nix_hw, blkaddr);
4993 		if (err)
4994 			return err;
4995 
4996 		err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr);
4997 		if (err)
4998 			return err;
4999 
5000 		err = nix_setup_mcast(rvu, nix_hw, blkaddr);
5001 		if (err)
5002 			return err;
5003 
5004 		err = nix_setup_txvlan(rvu, nix_hw);
5005 		if (err)
5006 			return err;
5007 
5008 		err = nix_setup_bpids(rvu, nix_hw, blkaddr);
5009 		if (err)
5010 			return err;
5011 
5012 		/* Configure segmentation offload formats */
5013 		nix_setup_lso(rvu, nix_hw, blkaddr);
5014 
5015 		/* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info.
5016 		 * This helps HW protocol checker to identify headers
5017 		 * and validate length and checksums.
5018 		 */
5019 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
5020 			    (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) |
5021 			    ltdefs->rx_ol2.ltype_mask);
5022 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
5023 			    (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) |
5024 			    ltdefs->rx_oip4.ltype_mask);
5025 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4,
5026 			    (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) |
5027 			    ltdefs->rx_iip4.ltype_mask);
5028 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6,
5029 			    (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) |
5030 			    ltdefs->rx_oip6.ltype_mask);
5031 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6,
5032 			    (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) |
5033 			    ltdefs->rx_iip6.ltype_mask);
5034 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
5035 			    (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) |
5036 			    ltdefs->rx_otcp.ltype_mask);
5037 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP,
5038 			    (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) |
5039 			    ltdefs->rx_itcp.ltype_mask);
5040 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
5041 			    (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) |
5042 			    ltdefs->rx_oudp.ltype_mask);
5043 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP,
5044 			    (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) |
5045 			    ltdefs->rx_iudp.ltype_mask);
5046 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP,
5047 			    (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) |
5048 			    ltdefs->rx_osctp.ltype_mask);
5049 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP,
5050 			    (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) |
5051 			    ltdefs->rx_isctp.ltype_mask);
5052 
5053 		if (!is_rvu_otx2(rvu)) {
5054 			/* Enable APAD calculation for other protocols
5055 			 * matching APAD0 and APAD1 lt def registers.
5056 			 */
5057 			rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD0,
5058 				    (ltdefs->rx_apad0.valid << 11) |
5059 				    (ltdefs->rx_apad0.lid << 8) |
5060 				    (ltdefs->rx_apad0.ltype_match << 4) |
5061 				    ltdefs->rx_apad0.ltype_mask);
5062 			rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD1,
5063 				    (ltdefs->rx_apad1.valid << 11) |
5064 				    (ltdefs->rx_apad1.lid << 8) |
5065 				    (ltdefs->rx_apad1.ltype_match << 4) |
5066 				    ltdefs->rx_apad1.ltype_mask);
5067 
5068 			/* Receive ethertype definition register defines layer
5069 			 * information in NPC_RESULT_S to identify the Ethertype
5070 			 * location in L2 header. Used for Ethertype overwriting
5071 			 * in inline IPsec flow.
5072 			 */
5073 			rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(0),
5074 				    (ltdefs->rx_et[0].offset << 12) |
5075 				    (ltdefs->rx_et[0].valid << 11) |
5076 				    (ltdefs->rx_et[0].lid << 8) |
5077 				    (ltdefs->rx_et[0].ltype_match << 4) |
5078 				    ltdefs->rx_et[0].ltype_mask);
5079 			rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(1),
5080 				    (ltdefs->rx_et[1].offset << 12) |
5081 				    (ltdefs->rx_et[1].valid << 11) |
5082 				    (ltdefs->rx_et[1].lid << 8) |
5083 				    (ltdefs->rx_et[1].ltype_match << 4) |
5084 				    ltdefs->rx_et[1].ltype_mask);
5085 		}
5086 
5087 		err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
5088 		if (err)
5089 			return err;
5090 
5091 		nix_hw->tx_credits = kcalloc(hw->cgx_links + hw->lbk_links,
5092 					     sizeof(u64), GFP_KERNEL);
5093 		if (!nix_hw->tx_credits)
5094 			return -ENOMEM;
5095 
5096 		/* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
5097 		nix_link_config(rvu, blkaddr, nix_hw);
5098 
5099 		/* Enable Channel backpressure */
5100 		rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0));
5101 	}
5102 	return 0;
5103 }
5104 
rvu_nix_init(struct rvu * rvu)5105 int rvu_nix_init(struct rvu *rvu)
5106 {
5107 	struct rvu_hwinfo *hw = rvu->hw;
5108 	struct nix_hw *nix_hw;
5109 	int blkaddr = 0, err;
5110 	int i = 0;
5111 
5112 	hw->nix = devm_kcalloc(rvu->dev, MAX_NIX_BLKS, sizeof(struct nix_hw),
5113 			       GFP_KERNEL);
5114 	if (!hw->nix)
5115 		return -ENOMEM;
5116 
5117 	blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
5118 	while (blkaddr) {
5119 		nix_hw = &hw->nix[i];
5120 		nix_hw->rvu = rvu;
5121 		nix_hw->blkaddr = blkaddr;
5122 		err = rvu_nix_block_init(rvu, nix_hw);
5123 		if (err)
5124 			return err;
5125 		blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
5126 		i++;
5127 	}
5128 
5129 	return 0;
5130 }
5131 
rvu_nix_block_freemem(struct rvu * rvu,int blkaddr,struct rvu_block * block)5132 static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr,
5133 				  struct rvu_block *block)
5134 {
5135 	struct nix_txsch *txsch;
5136 	struct nix_mcast *mcast;
5137 	struct nix_txvlan *vlan;
5138 	struct nix_hw *nix_hw;
5139 	int lvl;
5140 
5141 	rvu_aq_free(rvu, block->aq);
5142 
5143 	if (is_block_implemented(rvu->hw, blkaddr)) {
5144 		nix_hw = get_nix_hw(rvu->hw, blkaddr);
5145 		if (!nix_hw)
5146 			return;
5147 
5148 		for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
5149 			txsch = &nix_hw->txsch[lvl];
5150 			kfree(txsch->schq.bmap);
5151 		}
5152 
5153 		kfree(nix_hw->tx_credits);
5154 
5155 		nix_ipolicer_freemem(rvu, nix_hw);
5156 
5157 		vlan = &nix_hw->txvlan;
5158 		kfree(vlan->rsrc.bmap);
5159 		mutex_destroy(&vlan->rsrc_lock);
5160 
5161 		mcast = &nix_hw->mcast;
5162 		qmem_free(rvu->dev, mcast->mce_ctx);
5163 		qmem_free(rvu->dev, mcast->mcast_buf);
5164 		mutex_destroy(&mcast->mce_lock);
5165 	}
5166 }
5167 
rvu_nix_freemem(struct rvu * rvu)5168 void rvu_nix_freemem(struct rvu *rvu)
5169 {
5170 	struct rvu_hwinfo *hw = rvu->hw;
5171 	struct rvu_block *block;
5172 	int blkaddr = 0;
5173 
5174 	blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
5175 	while (blkaddr) {
5176 		block = &hw->block[blkaddr];
5177 		rvu_nix_block_freemem(rvu, blkaddr, block);
5178 		blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
5179 	}
5180 }
5181 
nix_mcast_update_action(struct rvu * rvu,struct nix_mcast_grp_elem * elem)5182 static void nix_mcast_update_action(struct rvu *rvu,
5183 				    struct nix_mcast_grp_elem *elem)
5184 {
5185 	struct npc_mcam *mcam = &rvu->hw->mcam;
5186 	struct nix_rx_action rx_action = { 0 };
5187 	struct nix_tx_action tx_action = { 0 };
5188 	int npc_blkaddr;
5189 
5190 	npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
5191 	if (elem->dir == NIX_MCAST_INGRESS) {
5192 		*(u64 *)&rx_action = npc_get_mcam_action(rvu, mcam,
5193 							 npc_blkaddr,
5194 							 elem->mcam_index);
5195 		rx_action.index = elem->mce_start_index;
5196 		npc_set_mcam_action(rvu, mcam, npc_blkaddr, elem->mcam_index,
5197 				    *(u64 *)&rx_action);
5198 	} else {
5199 		*(u64 *)&tx_action = npc_get_mcam_action(rvu, mcam,
5200 							 npc_blkaddr,
5201 							 elem->mcam_index);
5202 		tx_action.index = elem->mce_start_index;
5203 		npc_set_mcam_action(rvu, mcam, npc_blkaddr, elem->mcam_index,
5204 				    *(u64 *)&tx_action);
5205 	}
5206 }
5207 
nix_mcast_update_mce_entry(struct rvu * rvu,u16 pcifunc,u8 is_active)5208 static void nix_mcast_update_mce_entry(struct rvu *rvu, u16 pcifunc, u8 is_active)
5209 {
5210 	struct nix_mcast_grp_elem *elem;
5211 	struct nix_mcast_grp *mcast_grp;
5212 	struct nix_hw *nix_hw;
5213 	int blkaddr;
5214 
5215 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
5216 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
5217 	if (!nix_hw)
5218 		return;
5219 
5220 	mcast_grp = &nix_hw->mcast_grp;
5221 
5222 	mutex_lock(&mcast_grp->mcast_grp_lock);
5223 	list_for_each_entry(elem, &mcast_grp->mcast_grp_head, list) {
5224 		struct nix_mce_list *mce_list;
5225 		struct mce *mce;
5226 
5227 		/* Iterate the group elements and disable the element which
5228 		 * received the disable request.
5229 		 */
5230 		mce_list = &elem->mcast_mce_list;
5231 		hlist_for_each_entry(mce, &mce_list->head, node) {
5232 			if (mce->pcifunc == pcifunc) {
5233 				mce->is_active = is_active;
5234 				break;
5235 			}
5236 		}
5237 
5238 		/* Dump the updated list to HW */
5239 		if (elem->dir == NIX_MCAST_INGRESS)
5240 			nix_update_ingress_mce_list_hw(rvu, nix_hw, elem);
5241 		else
5242 			nix_update_egress_mce_list_hw(rvu, nix_hw, elem);
5243 
5244 		/* Update the multicast index in NPC rule */
5245 		nix_mcast_update_action(rvu, elem);
5246 	}
5247 	mutex_unlock(&mcast_grp->mcast_grp_lock);
5248 }
5249 
rvu_mbox_handler_nix_lf_start_rx(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)5250 int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
5251 				     struct msg_rsp *rsp)
5252 {
5253 	u16 pcifunc = req->hdr.pcifunc;
5254 	struct rvu_pfvf *pfvf;
5255 	int nixlf, err, pf;
5256 
5257 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
5258 	if (err)
5259 		return err;
5260 
5261 	/* Enable the interface if it is in any multicast list */
5262 	nix_mcast_update_mce_entry(rvu, pcifunc, 1);
5263 
5264 	rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
5265 
5266 	npc_mcam_enable_flows(rvu, pcifunc);
5267 
5268 	pfvf = rvu_get_pfvf(rvu, pcifunc);
5269 	set_bit(NIXLF_INITIALIZED, &pfvf->flags);
5270 
5271 	rvu_switch_update_rules(rvu, pcifunc, true);
5272 
5273 	pf = rvu_get_pf(rvu->pdev, pcifunc);
5274 	if (is_pf_cgxmapped(rvu, pf) && rvu->rep_mode)
5275 		rvu_rep_notify_pfvf_state(rvu, pcifunc, true);
5276 
5277 	return rvu_cgx_start_stop_io(rvu, pcifunc, true);
5278 }
5279 
rvu_mbox_handler_nix_lf_stop_rx(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)5280 int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
5281 				    struct msg_rsp *rsp)
5282 {
5283 	u16 pcifunc = req->hdr.pcifunc;
5284 	struct rvu_pfvf *pfvf;
5285 	int nixlf, err, pf;
5286 
5287 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
5288 	if (err)
5289 		return err;
5290 
5291 	rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
5292 	/* Disable the interface if it is in any multicast list */
5293 	nix_mcast_update_mce_entry(rvu, pcifunc, 0);
5294 
5295 	pfvf = rvu_get_pfvf(rvu, pcifunc);
5296 	clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
5297 
5298 	err = rvu_cgx_start_stop_io(rvu, pcifunc, false);
5299 	if (err)
5300 		return err;
5301 
5302 	rvu_switch_update_rules(rvu, pcifunc, false);
5303 	rvu_cgx_tx_enable(rvu, pcifunc, true);
5304 
5305 	pf = rvu_get_pf(rvu->pdev, pcifunc);
5306 	if (is_pf_cgxmapped(rvu, pf) && rvu->rep_mode)
5307 		rvu_rep_notify_pfvf_state(rvu, pcifunc, false);
5308 	return 0;
5309 }
5310 
5311 #define RX_SA_BASE  GENMASK_ULL(52, 7)
5312 
rvu_nix_lf_teardown(struct rvu * rvu,u16 pcifunc,int blkaddr,int nixlf)5313 void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
5314 {
5315 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
5316 	struct hwctx_disable_req ctx_req;
5317 	int pf = rvu_get_pf(rvu->pdev, pcifunc);
5318 	struct mac_ops *mac_ops;
5319 	u8 cgx_id, lmac_id;
5320 	u64 sa_base;
5321 	void *cgxd;
5322 	int err;
5323 
5324 	ctx_req.hdr.pcifunc = pcifunc;
5325 
5326 	/* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
5327 	rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
5328 	rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
5329 	nix_interface_deinit(rvu, pcifunc, nixlf);
5330 	nix_rx_sync(rvu, blkaddr);
5331 	nix_txschq_free(rvu, pcifunc);
5332 
5333 	clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
5334 
5335 	if (is_pf_cgxmapped(rvu, pf) && rvu->rep_mode)
5336 		rvu_rep_notify_pfvf_state(rvu, pcifunc, false);
5337 
5338 	rvu_cgx_start_stop_io(rvu, pcifunc, false);
5339 
5340 	if (pfvf->sq_ctx) {
5341 		ctx_req.ctype = NIX_AQ_CTYPE_SQ;
5342 		err = nix_lf_hwctx_disable(rvu, &ctx_req);
5343 		if (err)
5344 			dev_err(rvu->dev, "SQ ctx disable failed\n");
5345 	}
5346 
5347 	if (pfvf->rq_ctx) {
5348 		ctx_req.ctype = NIX_AQ_CTYPE_RQ;
5349 		err = nix_lf_hwctx_disable(rvu, &ctx_req);
5350 		if (err)
5351 			dev_err(rvu->dev, "RQ ctx disable failed\n");
5352 	}
5353 
5354 	if (pfvf->cq_ctx) {
5355 		ctx_req.ctype = NIX_AQ_CTYPE_CQ;
5356 		err = nix_lf_hwctx_disable(rvu, &ctx_req);
5357 		if (err)
5358 			dev_err(rvu->dev, "CQ ctx disable failed\n");
5359 	}
5360 
5361 	/* reset HW config done for Switch headers */
5362 	rvu_npc_set_parse_mode(rvu, pcifunc, OTX2_PRIV_FLAGS_DEFAULT,
5363 			       (PKIND_TX | PKIND_RX), 0, 0, 0, 0);
5364 
5365 	/* Disabling CGX and NPC config done for PTP */
5366 	if (pfvf->hw_rx_tstamp_en) {
5367 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
5368 		cgxd = rvu_cgx_pdata(cgx_id, rvu);
5369 		mac_ops = get_mac_ops(cgxd);
5370 		mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, false);
5371 		/* Undo NPC config done for PTP */
5372 		if (npc_config_ts_kpuaction(rvu, pf, pcifunc, false))
5373 			dev_err(rvu->dev, "NPC config for PTP failed\n");
5374 		pfvf->hw_rx_tstamp_en = false;
5375 	}
5376 
5377 	/* reset priority flow control config */
5378 	rvu_cgx_prio_flow_ctrl_cfg(rvu, pcifunc, 0, 0, 0);
5379 
5380 	/* reset 802.3x flow control config */
5381 	rvu_cgx_cfg_pause_frm(rvu, pcifunc, 0, 0);
5382 
5383 	nix_ctx_free(rvu, pfvf);
5384 
5385 	nix_free_all_bandprof(rvu, pcifunc);
5386 
5387 	sa_base = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(nixlf));
5388 	if (FIELD_GET(RX_SA_BASE, sa_base)) {
5389 		err = rvu_cpt_ctx_flush(rvu, pcifunc);
5390 		if (err)
5391 			dev_err(rvu->dev,
5392 				"CPT ctx flush failed with error: %d\n", err);
5393 	}
5394 }
5395 
5396 #define NIX_AF_LFX_TX_CFG_PTP_EN	BIT_ULL(32)
5397 
rvu_nix_lf_ptp_tx_cfg(struct rvu * rvu,u16 pcifunc,bool enable)5398 static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
5399 {
5400 	struct rvu_hwinfo *hw = rvu->hw;
5401 	struct rvu_block *block;
5402 	int blkaddr, pf;
5403 	int nixlf;
5404 	u64 cfg;
5405 
5406 	pf = rvu_get_pf(rvu->pdev, pcifunc);
5407 	if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
5408 		return 0;
5409 
5410 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
5411 	if (blkaddr < 0)
5412 		return NIX_AF_ERR_AF_LF_INVALID;
5413 
5414 	block = &hw->block[blkaddr];
5415 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
5416 	if (nixlf < 0)
5417 		return NIX_AF_ERR_AF_LF_INVALID;
5418 
5419 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf));
5420 
5421 	if (enable)
5422 		cfg |= NIX_AF_LFX_TX_CFG_PTP_EN;
5423 	else
5424 		cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN;
5425 
5426 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
5427 
5428 	return 0;
5429 }
5430 
rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)5431 int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req,
5432 					  struct msg_rsp *rsp)
5433 {
5434 	return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true);
5435 }
5436 
rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)5437 int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req,
5438 					   struct msg_rsp *rsp)
5439 {
5440 	return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false);
5441 }
5442 
rvu_mbox_handler_nix_lso_format_cfg(struct rvu * rvu,struct nix_lso_format_cfg * req,struct nix_lso_format_cfg_rsp * rsp)5443 int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
5444 					struct nix_lso_format_cfg *req,
5445 					struct nix_lso_format_cfg_rsp *rsp)
5446 {
5447 	u16 pcifunc = req->hdr.pcifunc;
5448 	struct nix_hw *nix_hw;
5449 	struct rvu_pfvf *pfvf;
5450 	int blkaddr, idx, f;
5451 	u64 reg;
5452 
5453 	pfvf = rvu_get_pfvf(rvu, pcifunc);
5454 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
5455 	if (!pfvf->nixlf || blkaddr < 0)
5456 		return NIX_AF_ERR_AF_LF_INVALID;
5457 
5458 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
5459 	if (!nix_hw)
5460 		return NIX_AF_ERR_INVALID_NIXBLK;
5461 
5462 	/* Find existing matching LSO format, if any */
5463 	for (idx = 0; idx < nix_hw->lso.in_use; idx++) {
5464 		for (f = 0; f < NIX_LSO_FIELD_MAX; f++) {
5465 			reg = rvu_read64(rvu, blkaddr,
5466 					 NIX_AF_LSO_FORMATX_FIELDX(idx, f));
5467 			if (req->fields[f] != (reg & req->field_mask))
5468 				break;
5469 		}
5470 
5471 		if (f == NIX_LSO_FIELD_MAX)
5472 			break;
5473 	}
5474 
5475 	if (idx < nix_hw->lso.in_use) {
5476 		/* Match found */
5477 		rsp->lso_format_idx = idx;
5478 		return 0;
5479 	}
5480 
5481 	if (nix_hw->lso.in_use == nix_hw->lso.total)
5482 		return NIX_AF_ERR_LSO_CFG_FAIL;
5483 
5484 	rsp->lso_format_idx = nix_hw->lso.in_use++;
5485 
5486 	for (f = 0; f < NIX_LSO_FIELD_MAX; f++)
5487 		rvu_write64(rvu, blkaddr,
5488 			    NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f),
5489 			    req->fields[f]);
5490 
5491 	return 0;
5492 }
5493 
5494 #define IPSEC_GEN_CFG_EGRP    GENMASK_ULL(50, 48)
5495 #define IPSEC_GEN_CFG_OPCODE  GENMASK_ULL(47, 32)
5496 #define IPSEC_GEN_CFG_PARAM1  GENMASK_ULL(31, 16)
5497 #define IPSEC_GEN_CFG_PARAM2  GENMASK_ULL(15, 0)
5498 
5499 #define CPT_INST_QSEL_BLOCK   GENMASK_ULL(28, 24)
5500 #define CPT_INST_QSEL_PF_FUNC GENMASK_ULL(23, 8)
5501 #define CPT_INST_QSEL_SLOT    GENMASK_ULL(7, 0)
5502 
5503 #define CPT_INST_CREDIT_TH    GENMASK_ULL(53, 32)
5504 #define CPT_INST_CREDIT_BPID  GENMASK_ULL(30, 22)
5505 #define CPT_INST_CREDIT_CNT   GENMASK_ULL(21, 0)
5506 
nix_inline_ipsec_cfg(struct rvu * rvu,struct nix_inline_ipsec_cfg * req,int blkaddr)5507 static void nix_inline_ipsec_cfg(struct rvu *rvu, struct nix_inline_ipsec_cfg *req,
5508 				 int blkaddr)
5509 {
5510 	u8 cpt_idx, cpt_blkaddr;
5511 	u64 val;
5512 
5513 	cpt_idx = (blkaddr == BLKADDR_NIX0) ? 0 : 1;
5514 	if (req->enable) {
5515 		val = 0;
5516 		/* Enable context prefetching */
5517 		if (!is_rvu_otx2(rvu))
5518 			val |= BIT_ULL(51);
5519 
5520 		/* Set OPCODE and EGRP */
5521 		val |= FIELD_PREP(IPSEC_GEN_CFG_EGRP, req->gen_cfg.egrp);
5522 		val |= FIELD_PREP(IPSEC_GEN_CFG_OPCODE, req->gen_cfg.opcode);
5523 		val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM1, req->gen_cfg.param1);
5524 		val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM2, req->gen_cfg.param2);
5525 
5526 		rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, val);
5527 
5528 		/* Set CPT queue for inline IPSec */
5529 		val = FIELD_PREP(CPT_INST_QSEL_SLOT, req->inst_qsel.cpt_slot);
5530 		val |= FIELD_PREP(CPT_INST_QSEL_PF_FUNC,
5531 				  req->inst_qsel.cpt_pf_func);
5532 
5533 		if (!is_rvu_otx2(rvu)) {
5534 			cpt_blkaddr = (cpt_idx == 0) ? BLKADDR_CPT0 :
5535 						       BLKADDR_CPT1;
5536 			val |= FIELD_PREP(CPT_INST_QSEL_BLOCK, cpt_blkaddr);
5537 		}
5538 
5539 		rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx),
5540 			    val);
5541 
5542 		/* Set CPT credit */
5543 		val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx));
5544 		if ((val & 0x3FFFFF) != 0x3FFFFF)
5545 			rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
5546 				    0x3FFFFF - val);
5547 
5548 		val = FIELD_PREP(CPT_INST_CREDIT_CNT, req->cpt_credit);
5549 		val |= FIELD_PREP(CPT_INST_CREDIT_BPID, req->bpid);
5550 		val |= FIELD_PREP(CPT_INST_CREDIT_TH, req->credit_th);
5551 		rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), val);
5552 	} else {
5553 		rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, 0x0);
5554 		rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx),
5555 			    0x0);
5556 		val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx));
5557 		if ((val & 0x3FFFFF) != 0x3FFFFF)
5558 			rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
5559 				    0x3FFFFF - val);
5560 	}
5561 }
5562 
rvu_mbox_handler_nix_inline_ipsec_cfg(struct rvu * rvu,struct nix_inline_ipsec_cfg * req,struct msg_rsp * rsp)5563 int rvu_mbox_handler_nix_inline_ipsec_cfg(struct rvu *rvu,
5564 					  struct nix_inline_ipsec_cfg *req,
5565 					  struct msg_rsp *rsp)
5566 {
5567 	if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
5568 		return 0;
5569 
5570 	nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX0);
5571 	if (is_block_implemented(rvu->hw, BLKADDR_CPT1))
5572 		nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX1);
5573 
5574 	return 0;
5575 }
5576 
rvu_mbox_handler_nix_read_inline_ipsec_cfg(struct rvu * rvu,struct msg_req * req,struct nix_inline_ipsec_cfg * rsp)5577 int rvu_mbox_handler_nix_read_inline_ipsec_cfg(struct rvu *rvu,
5578 					       struct msg_req *req,
5579 					       struct nix_inline_ipsec_cfg *rsp)
5580 
5581 {
5582 	u64 val;
5583 
5584 	if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
5585 		return 0;
5586 
5587 	val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_IPSEC_GEN_CFG);
5588 	rsp->gen_cfg.egrp = FIELD_GET(IPSEC_GEN_CFG_EGRP, val);
5589 	rsp->gen_cfg.opcode = FIELD_GET(IPSEC_GEN_CFG_OPCODE, val);
5590 	rsp->gen_cfg.param1 = FIELD_GET(IPSEC_GEN_CFG_PARAM1, val);
5591 	rsp->gen_cfg.param2 = FIELD_GET(IPSEC_GEN_CFG_PARAM2, val);
5592 
5593 	val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_CPTX_CREDIT(0));
5594 	rsp->cpt_credit = FIELD_GET(CPT_INST_CREDIT_CNT, val);
5595 	rsp->credit_th = FIELD_GET(CPT_INST_CREDIT_TH, val);
5596 	rsp->bpid = FIELD_GET(CPT_INST_CREDIT_BPID, val);
5597 
5598 	return 0;
5599 }
5600 
rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu * rvu,struct nix_inline_ipsec_lf_cfg * req,struct msg_rsp * rsp)5601 int rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu *rvu,
5602 					     struct nix_inline_ipsec_lf_cfg *req,
5603 					     struct msg_rsp *rsp)
5604 {
5605 	int lf, blkaddr, err;
5606 	u64 val;
5607 
5608 	if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
5609 		return 0;
5610 
5611 	err = nix_get_nixlf(rvu, req->hdr.pcifunc, &lf, &blkaddr);
5612 	if (err)
5613 		return err;
5614 
5615 	if (req->enable) {
5616 		/* Set TT, TAG_CONST, SA_POW2_SIZE and LENM1_MAX */
5617 		val = (u64)req->ipsec_cfg0.tt << 44 |
5618 		      (u64)req->ipsec_cfg0.tag_const << 20 |
5619 		      (u64)req->ipsec_cfg0.sa_pow2_size << 16 |
5620 		      req->ipsec_cfg0.lenm1_max;
5621 
5622 		if (blkaddr == BLKADDR_NIX1)
5623 			val |= BIT_ULL(46);
5624 
5625 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), val);
5626 
5627 		/* Set SA_IDX_W and SA_IDX_MAX */
5628 		val = (u64)req->ipsec_cfg1.sa_idx_w << 32 |
5629 		      req->ipsec_cfg1.sa_idx_max;
5630 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), val);
5631 
5632 		/* Set SA base address */
5633 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf),
5634 			    req->sa_base_addr);
5635 	} else {
5636 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), 0x0);
5637 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), 0x0);
5638 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf),
5639 			    0x0);
5640 	}
5641 
5642 	return 0;
5643 }
5644 
rvu_nix_reset_mac(struct rvu_pfvf * pfvf,int pcifunc)5645 void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc)
5646 {
5647 	bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
5648 
5649 	/* overwrite vf mac address with default_mac */
5650 	if (from_vf)
5651 		ether_addr_copy(pfvf->mac_addr, pfvf->default_mac);
5652 }
5653 
5654 /* NIX ingress policers or bandwidth profiles APIs */
nix_config_rx_pkt_policer_precolor(struct rvu * rvu,int blkaddr)5655 static void nix_config_rx_pkt_policer_precolor(struct rvu *rvu, int blkaddr)
5656 {
5657 	struct npc_lt_def_cfg defs, *ltdefs;
5658 
5659 	ltdefs = &defs;
5660 	memcpy(ltdefs, rvu->kpu.lt_def, sizeof(struct npc_lt_def_cfg));
5661 
5662 	/* Extract PCP and DEI fields from outer VLAN from byte offset
5663 	 * 2 from the start of LB_PTR (ie TAG).
5664 	 * VLAN0 is Outer VLAN and VLAN1 is Inner VLAN. Inner VLAN
5665 	 * fields are considered when 'Tunnel enable' is set in profile.
5666 	 */
5667 	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN0_PCP_DEI,
5668 		    (2UL << 12) | (ltdefs->ovlan.lid << 8) |
5669 		    (ltdefs->ovlan.ltype_match << 4) |
5670 		    ltdefs->ovlan.ltype_mask);
5671 	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN1_PCP_DEI,
5672 		    (2UL << 12) | (ltdefs->ivlan.lid << 8) |
5673 		    (ltdefs->ivlan.ltype_match << 4) |
5674 		    ltdefs->ivlan.ltype_mask);
5675 
5676 	/* DSCP field in outer and tunneled IPv4 packets */
5677 	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4_DSCP,
5678 		    (1UL << 12) | (ltdefs->rx_oip4.lid << 8) |
5679 		    (ltdefs->rx_oip4.ltype_match << 4) |
5680 		    ltdefs->rx_oip4.ltype_mask);
5681 	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4_DSCP,
5682 		    (1UL << 12) | (ltdefs->rx_iip4.lid << 8) |
5683 		    (ltdefs->rx_iip4.ltype_match << 4) |
5684 		    ltdefs->rx_iip4.ltype_mask);
5685 
5686 	/* DSCP field (traffic class) in outer and tunneled IPv6 packets */
5687 	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6_DSCP,
5688 		    (1UL << 11) | (ltdefs->rx_oip6.lid << 8) |
5689 		    (ltdefs->rx_oip6.ltype_match << 4) |
5690 		    ltdefs->rx_oip6.ltype_mask);
5691 	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6_DSCP,
5692 		    (1UL << 11) | (ltdefs->rx_iip6.lid << 8) |
5693 		    (ltdefs->rx_iip6.ltype_match << 4) |
5694 		    ltdefs->rx_iip6.ltype_mask);
5695 }
5696 
nix_init_policer_context(struct rvu * rvu,struct nix_hw * nix_hw,int layer,int prof_idx)5697 static int nix_init_policer_context(struct rvu *rvu, struct nix_hw *nix_hw,
5698 				    int layer, int prof_idx)
5699 {
5700 	struct nix_cn10k_aq_enq_req aq_req;
5701 	int rc;
5702 
5703 	memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
5704 
5705 	aq_req.qidx = (prof_idx & 0x3FFF) | (layer << 14);
5706 	aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
5707 	aq_req.op = NIX_AQ_INSTOP_INIT;
5708 
5709 	/* Context is all zeros, submit to AQ */
5710 	rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
5711 				     (struct nix_aq_enq_req *)&aq_req, NULL);
5712 	if (rc)
5713 		dev_err(rvu->dev, "Failed to INIT bandwidth profile layer %d profile %d\n",
5714 			layer, prof_idx);
5715 	return rc;
5716 }
5717 
nix_setup_ipolicers(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr)5718 static int nix_setup_ipolicers(struct rvu *rvu,
5719 			       struct nix_hw *nix_hw, int blkaddr)
5720 {
5721 	struct rvu_hwinfo *hw = rvu->hw;
5722 	struct nix_ipolicer *ipolicer;
5723 	int err, layer, prof_idx;
5724 	u64 cfg;
5725 
5726 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
5727 	if (!(cfg & BIT_ULL(61))) {
5728 		hw->cap.ipolicer = false;
5729 		return 0;
5730 	}
5731 
5732 	hw->cap.ipolicer = true;
5733 	nix_hw->ipolicer = devm_kcalloc(rvu->dev, BAND_PROF_NUM_LAYERS,
5734 					sizeof(*ipolicer), GFP_KERNEL);
5735 	if (!nix_hw->ipolicer)
5736 		return -ENOMEM;
5737 
5738 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_PL_CONST);
5739 
5740 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5741 		ipolicer = &nix_hw->ipolicer[layer];
5742 		switch (layer) {
5743 		case BAND_PROF_LEAF_LAYER:
5744 			ipolicer->band_prof.max = cfg & 0XFFFF;
5745 			break;
5746 		case BAND_PROF_MID_LAYER:
5747 			ipolicer->band_prof.max = (cfg >> 16) & 0XFFFF;
5748 			break;
5749 		case BAND_PROF_TOP_LAYER:
5750 			ipolicer->band_prof.max = (cfg >> 32) & 0XFFFF;
5751 			break;
5752 		}
5753 
5754 		if (!ipolicer->band_prof.max)
5755 			continue;
5756 
5757 		err = rvu_alloc_bitmap(&ipolicer->band_prof);
5758 		if (err)
5759 			return err;
5760 
5761 		ipolicer->pfvf_map = devm_kcalloc(rvu->dev,
5762 						  ipolicer->band_prof.max,
5763 						  sizeof(u16), GFP_KERNEL);
5764 		if (!ipolicer->pfvf_map)
5765 			return -ENOMEM;
5766 
5767 		ipolicer->match_id = devm_kcalloc(rvu->dev,
5768 						  ipolicer->band_prof.max,
5769 						  sizeof(u16), GFP_KERNEL);
5770 		if (!ipolicer->match_id)
5771 			return -ENOMEM;
5772 
5773 		for (prof_idx = 0;
5774 		     prof_idx < ipolicer->band_prof.max; prof_idx++) {
5775 			/* Set AF as current owner for INIT ops to succeed */
5776 			ipolicer->pfvf_map[prof_idx] = 0x00;
5777 
5778 			/* There is no enable bit in the profile context,
5779 			 * so no context disable. So let's INIT them here
5780 			 * so that PF/VF later on have to just do WRITE to
5781 			 * setup policer rates and config.
5782 			 */
5783 			err = nix_init_policer_context(rvu, nix_hw,
5784 						       layer, prof_idx);
5785 			if (err)
5786 				return err;
5787 		}
5788 
5789 		/* Allocate memory for maintaining ref_counts for MID level
5790 		 * profiles, this will be needed for leaf layer profiles'
5791 		 * aggregation.
5792 		 */
5793 		if (layer != BAND_PROF_MID_LAYER)
5794 			continue;
5795 
5796 		ipolicer->ref_count = devm_kcalloc(rvu->dev,
5797 						   ipolicer->band_prof.max,
5798 						   sizeof(u16), GFP_KERNEL);
5799 		if (!ipolicer->ref_count)
5800 			return -ENOMEM;
5801 	}
5802 
5803 	/* Set policer timeunit to 2us ie  (19 + 1) * 100 nsec = 2us */
5804 	rvu_write64(rvu, blkaddr, NIX_AF_PL_TS, 19);
5805 
5806 	nix_config_rx_pkt_policer_precolor(rvu, blkaddr);
5807 
5808 	return 0;
5809 }
5810 
nix_ipolicer_freemem(struct rvu * rvu,struct nix_hw * nix_hw)5811 static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw)
5812 {
5813 	struct nix_ipolicer *ipolicer;
5814 	int layer;
5815 
5816 	if (!rvu->hw->cap.ipolicer)
5817 		return;
5818 
5819 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5820 		ipolicer = &nix_hw->ipolicer[layer];
5821 
5822 		if (!ipolicer->band_prof.max)
5823 			continue;
5824 
5825 		kfree(ipolicer->band_prof.bmap);
5826 	}
5827 }
5828 
5829 #define NIX_BW_PROF_HI_MASK	GENMASK(10, 7)
5830 
nix_verify_bandprof(struct nix_cn10k_aq_enq_req * req,struct nix_hw * nix_hw,u16 pcifunc)5831 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
5832 			       struct nix_hw *nix_hw, u16 pcifunc)
5833 {
5834 	struct nix_ipolicer *ipolicer;
5835 	int layer, hi_layer, prof_idx;
5836 
5837 	/* Bits [15:14] in profile index represent layer */
5838 	layer = (req->qidx >> 14) & 0x03;
5839 	prof_idx = req->qidx & 0x3FFF;
5840 
5841 	ipolicer = &nix_hw->ipolicer[layer];
5842 	if (prof_idx >= ipolicer->band_prof.max)
5843 		return -EINVAL;
5844 
5845 	/* Check if the profile is allocated to the requesting PCIFUNC or not
5846 	 * with the exception of AF. AF is allowed to read and update contexts.
5847 	 */
5848 	if (pcifunc && ipolicer->pfvf_map[prof_idx] != pcifunc)
5849 		return -EINVAL;
5850 
5851 	/* If this profile is linked to higher layer profile then check
5852 	 * if that profile is also allocated to the requesting PCIFUNC
5853 	 * or not.
5854 	 */
5855 	if (!req->prof.hl_en)
5856 		return 0;
5857 
5858 	/* Leaf layer profile can link only to mid layer and
5859 	 * mid layer to top layer.
5860 	 */
5861 	if (layer == BAND_PROF_LEAF_LAYER)
5862 		hi_layer = BAND_PROF_MID_LAYER;
5863 	else if (layer == BAND_PROF_MID_LAYER)
5864 		hi_layer = BAND_PROF_TOP_LAYER;
5865 	else
5866 		return -EINVAL;
5867 
5868 	ipolicer = &nix_hw->ipolicer[hi_layer];
5869 	prof_idx = FIELD_PREP(NIX_BW_PROF_HI_MASK, req->prof.band_prof_id_h);
5870 	prof_idx |= req->prof.band_prof_id;
5871 	if (prof_idx >= ipolicer->band_prof.max ||
5872 	    ipolicer->pfvf_map[prof_idx] != pcifunc)
5873 		return -EINVAL;
5874 
5875 	return 0;
5876 }
5877 
rvu_mbox_handler_nix_bandprof_alloc(struct rvu * rvu,struct nix_bandprof_alloc_req * req,struct nix_bandprof_alloc_rsp * rsp)5878 int rvu_mbox_handler_nix_bandprof_alloc(struct rvu *rvu,
5879 					struct nix_bandprof_alloc_req *req,
5880 					struct nix_bandprof_alloc_rsp *rsp)
5881 {
5882 	int blkaddr, layer, prof, idx, err;
5883 	u16 pcifunc = req->hdr.pcifunc;
5884 	struct nix_ipolicer *ipolicer;
5885 	struct nix_hw *nix_hw;
5886 
5887 	if (!rvu->hw->cap.ipolicer)
5888 		return NIX_AF_ERR_IPOLICER_NOTSUPP;
5889 
5890 	err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
5891 	if (err)
5892 		return err;
5893 
5894 	mutex_lock(&rvu->rsrc_lock);
5895 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5896 		if (layer == BAND_PROF_INVAL_LAYER)
5897 			continue;
5898 		if (!req->prof_count[layer])
5899 			continue;
5900 
5901 		ipolicer = &nix_hw->ipolicer[layer];
5902 		for (idx = 0; idx < req->prof_count[layer]; idx++) {
5903 			/* Allocate a max of 'MAX_BANDPROF_PER_PFFUNC' profiles */
5904 			if (idx == MAX_BANDPROF_PER_PFFUNC)
5905 				break;
5906 
5907 			prof = rvu_alloc_rsrc(&ipolicer->band_prof);
5908 			if (prof < 0)
5909 				break;
5910 			rsp->prof_count[layer]++;
5911 			rsp->prof_idx[layer][idx] = prof;
5912 			ipolicer->pfvf_map[prof] = pcifunc;
5913 		}
5914 	}
5915 	mutex_unlock(&rvu->rsrc_lock);
5916 	return 0;
5917 }
5918 
nix_free_all_bandprof(struct rvu * rvu,u16 pcifunc)5919 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc)
5920 {
5921 	int blkaddr, layer, prof_idx, err;
5922 	struct nix_ipolicer *ipolicer;
5923 	struct nix_hw *nix_hw;
5924 
5925 	if (!rvu->hw->cap.ipolicer)
5926 		return NIX_AF_ERR_IPOLICER_NOTSUPP;
5927 
5928 	err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
5929 	if (err)
5930 		return err;
5931 
5932 	mutex_lock(&rvu->rsrc_lock);
5933 	/* Free all the profiles allocated to the PCIFUNC */
5934 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5935 		if (layer == BAND_PROF_INVAL_LAYER)
5936 			continue;
5937 		ipolicer = &nix_hw->ipolicer[layer];
5938 
5939 		for (prof_idx = 0; prof_idx < ipolicer->band_prof.max; prof_idx++) {
5940 			if (ipolicer->pfvf_map[prof_idx] != pcifunc)
5941 				continue;
5942 
5943 			/* Clear ratelimit aggregation, if any */
5944 			if (layer == BAND_PROF_LEAF_LAYER &&
5945 			    ipolicer->match_id[prof_idx])
5946 				nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
5947 
5948 			ipolicer->pfvf_map[prof_idx] = 0x00;
5949 			ipolicer->match_id[prof_idx] = 0;
5950 			rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
5951 		}
5952 	}
5953 	mutex_unlock(&rvu->rsrc_lock);
5954 	return 0;
5955 }
5956 
rvu_mbox_handler_nix_bandprof_free(struct rvu * rvu,struct nix_bandprof_free_req * req,struct msg_rsp * rsp)5957 int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu,
5958 				       struct nix_bandprof_free_req *req,
5959 				       struct msg_rsp *rsp)
5960 {
5961 	int blkaddr, layer, prof_idx, idx, err;
5962 	u16 pcifunc = req->hdr.pcifunc;
5963 	struct nix_ipolicer *ipolicer;
5964 	struct nix_hw *nix_hw;
5965 
5966 	if (req->free_all)
5967 		return nix_free_all_bandprof(rvu, pcifunc);
5968 
5969 	if (!rvu->hw->cap.ipolicer)
5970 		return NIX_AF_ERR_IPOLICER_NOTSUPP;
5971 
5972 	err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
5973 	if (err)
5974 		return err;
5975 
5976 	mutex_lock(&rvu->rsrc_lock);
5977 	/* Free the requested profile indices */
5978 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5979 		if (layer == BAND_PROF_INVAL_LAYER)
5980 			continue;
5981 		if (!req->prof_count[layer])
5982 			continue;
5983 
5984 		ipolicer = &nix_hw->ipolicer[layer];
5985 		for (idx = 0; idx < req->prof_count[layer]; idx++) {
5986 			if (idx == MAX_BANDPROF_PER_PFFUNC)
5987 				break;
5988 			prof_idx = req->prof_idx[layer][idx];
5989 			if (prof_idx >= ipolicer->band_prof.max ||
5990 			    ipolicer->pfvf_map[prof_idx] != pcifunc)
5991 				continue;
5992 
5993 			/* Clear ratelimit aggregation, if any */
5994 			if (layer == BAND_PROF_LEAF_LAYER &&
5995 			    ipolicer->match_id[prof_idx])
5996 				nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
5997 
5998 			ipolicer->pfvf_map[prof_idx] = 0x00;
5999 			ipolicer->match_id[prof_idx] = 0;
6000 			rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
6001 		}
6002 	}
6003 	mutex_unlock(&rvu->rsrc_lock);
6004 	return 0;
6005 }
6006 
nix_aq_context_read(struct rvu * rvu,struct nix_hw * nix_hw,struct nix_cn10k_aq_enq_req * aq_req,struct nix_cn10k_aq_enq_rsp * aq_rsp,u16 pcifunc,u8 ctype,u32 qidx)6007 int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw,
6008 			struct nix_cn10k_aq_enq_req *aq_req,
6009 			struct nix_cn10k_aq_enq_rsp *aq_rsp,
6010 			u16 pcifunc, u8 ctype, u32 qidx)
6011 {
6012 	memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
6013 	aq_req->hdr.pcifunc = pcifunc;
6014 	aq_req->ctype = ctype;
6015 	aq_req->op = NIX_AQ_INSTOP_READ;
6016 	aq_req->qidx = qidx;
6017 
6018 	return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
6019 				       (struct nix_aq_enq_req *)aq_req,
6020 				       (struct nix_aq_enq_rsp *)aq_rsp);
6021 }
6022 
nix_ipolicer_map_leaf_midprofs(struct rvu * rvu,struct nix_hw * nix_hw,struct nix_cn10k_aq_enq_req * aq_req,struct nix_cn10k_aq_enq_rsp * aq_rsp,u32 leaf_prof,u16 mid_prof)6023 static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu,
6024 					  struct nix_hw *nix_hw,
6025 					  struct nix_cn10k_aq_enq_req *aq_req,
6026 					  struct nix_cn10k_aq_enq_rsp *aq_rsp,
6027 					  u32 leaf_prof, u16 mid_prof)
6028 {
6029 	memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
6030 	aq_req->hdr.pcifunc = 0x00;
6031 	aq_req->ctype = NIX_AQ_CTYPE_BANDPROF;
6032 	aq_req->op = NIX_AQ_INSTOP_WRITE;
6033 	aq_req->qidx = leaf_prof;
6034 
6035 	aq_req->prof.band_prof_id = mid_prof & 0x7F;
6036 	aq_req->prof_mask.band_prof_id = GENMASK(6, 0);
6037 	aq_req->prof.band_prof_id_h = FIELD_GET(NIX_BW_PROF_HI_MASK, mid_prof);
6038 	aq_req->prof_mask.band_prof_id_h = GENMASK(3, 0);
6039 	aq_req->prof.hl_en = 1;
6040 	aq_req->prof_mask.hl_en = 1;
6041 
6042 	return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
6043 				       (struct nix_aq_enq_req *)aq_req,
6044 				       (struct nix_aq_enq_rsp *)aq_rsp);
6045 }
6046 
6047 #define NIX_RQ_PROF_HI_MASK	GENMASK(13, 10)
6048 
rvu_nix_setup_ratelimit_aggr(struct rvu * rvu,u16 pcifunc,u16 rq_idx,u16 match_id)6049 int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc,
6050 				 u16 rq_idx, u16 match_id)
6051 {
6052 	int leaf_prof, mid_prof, leaf_match;
6053 	struct nix_cn10k_aq_enq_req aq_req;
6054 	struct nix_cn10k_aq_enq_rsp aq_rsp;
6055 	struct nix_ipolicer *ipolicer;
6056 	struct nix_hw *nix_hw;
6057 	int blkaddr, idx, rc;
6058 
6059 	if (!rvu->hw->cap.ipolicer)
6060 		return 0;
6061 
6062 	rc = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
6063 	if (rc)
6064 		return rc;
6065 
6066 	/* Fetch the RQ's context to see if policing is enabled */
6067 	rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, pcifunc,
6068 				 NIX_AQ_CTYPE_RQ, rq_idx);
6069 	if (rc) {
6070 		dev_err(rvu->dev,
6071 			"%s: Failed to fetch RQ%d context of PFFUNC 0x%x\n",
6072 			__func__, rq_idx, pcifunc);
6073 		return rc;
6074 	}
6075 
6076 	if (!aq_rsp.rq.policer_ena)
6077 		return 0;
6078 
6079 	/* Get the bandwidth profile ID mapped to this RQ */
6080 	leaf_prof = FIELD_PREP(NIX_RQ_PROF_HI_MASK, aq_rsp.rq.band_prof_id_h);
6081 	leaf_prof |= aq_rsp.rq.band_prof_id;
6082 
6083 	ipolicer = &nix_hw->ipolicer[BAND_PROF_LEAF_LAYER];
6084 	ipolicer->match_id[leaf_prof] = match_id;
6085 
6086 	/* Check if any other leaf profile is marked with same match_id */
6087 	for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
6088 		if (idx == leaf_prof)
6089 			continue;
6090 		if (ipolicer->match_id[idx] != match_id)
6091 			continue;
6092 
6093 		leaf_match = idx;
6094 		break;
6095 	}
6096 
6097 	if (idx == ipolicer->band_prof.max)
6098 		return 0;
6099 
6100 	/* Fetch the matching profile's context to check if it's already
6101 	 * mapped to a mid level profile.
6102 	 */
6103 	rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
6104 				 NIX_AQ_CTYPE_BANDPROF, leaf_match);
6105 	if (rc) {
6106 		dev_err(rvu->dev,
6107 			"%s: Failed to fetch context of leaf profile %d\n",
6108 			__func__, leaf_match);
6109 		return rc;
6110 	}
6111 
6112 	ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
6113 	if (aq_rsp.prof.hl_en) {
6114 		/* Get Mid layer prof index and map leaf_prof index
6115 		 * also such that flows that are being steered
6116 		 * to different RQs and marked with same match_id
6117 		 * are rate limited in a aggregate fashion
6118 		 */
6119 		mid_prof = FIELD_PREP(NIX_BW_PROF_HI_MASK,
6120 				      aq_rsp.prof.band_prof_id_h);
6121 		mid_prof |= aq_rsp.prof.band_prof_id;
6122 
6123 		rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
6124 						    &aq_req, &aq_rsp,
6125 						    leaf_prof, mid_prof);
6126 		if (rc) {
6127 			dev_err(rvu->dev,
6128 				"%s: Failed to map leaf(%d) and mid(%d) profiles\n",
6129 				__func__, leaf_prof, mid_prof);
6130 			goto exit;
6131 		}
6132 
6133 		mutex_lock(&rvu->rsrc_lock);
6134 		ipolicer->ref_count[mid_prof]++;
6135 		mutex_unlock(&rvu->rsrc_lock);
6136 		goto exit;
6137 	}
6138 
6139 	/* Allocate a mid layer profile and
6140 	 * map both 'leaf_prof' and 'leaf_match' profiles to it.
6141 	 */
6142 	mutex_lock(&rvu->rsrc_lock);
6143 	mid_prof = rvu_alloc_rsrc(&ipolicer->band_prof);
6144 	if (mid_prof < 0) {
6145 		dev_err(rvu->dev,
6146 			"%s: Unable to allocate mid layer profile\n", __func__);
6147 		mutex_unlock(&rvu->rsrc_lock);
6148 		goto exit;
6149 	}
6150 	mutex_unlock(&rvu->rsrc_lock);
6151 	ipolicer->pfvf_map[mid_prof] = 0x00;
6152 	ipolicer->ref_count[mid_prof] = 0;
6153 
6154 	/* Initialize mid layer profile same as 'leaf_prof' */
6155 	rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
6156 				 NIX_AQ_CTYPE_BANDPROF, leaf_prof);
6157 	if (rc) {
6158 		dev_err(rvu->dev,
6159 			"%s: Failed to fetch context of leaf profile %d\n",
6160 			__func__, leaf_prof);
6161 		goto exit;
6162 	}
6163 
6164 	memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
6165 	aq_req.hdr.pcifunc = 0x00;
6166 	aq_req.qidx = (mid_prof & 0x3FFF) | (BAND_PROF_MID_LAYER << 14);
6167 	aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
6168 	aq_req.op = NIX_AQ_INSTOP_WRITE;
6169 	memcpy(&aq_req.prof, &aq_rsp.prof, sizeof(struct nix_bandprof_s));
6170 	memset((char *)&aq_req.prof_mask, 0xff, sizeof(struct nix_bandprof_s));
6171 	/* Clear higher layer enable bit in the mid profile, just in case */
6172 	aq_req.prof.hl_en = 0;
6173 	aq_req.prof_mask.hl_en = 1;
6174 
6175 	rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
6176 				     (struct nix_aq_enq_req *)&aq_req, NULL);
6177 	if (rc) {
6178 		dev_err(rvu->dev,
6179 			"%s: Failed to INIT context of mid layer profile %d\n",
6180 			__func__, mid_prof);
6181 		goto exit;
6182 	}
6183 
6184 	/* Map both leaf profiles to this mid layer profile */
6185 	rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
6186 					    &aq_req, &aq_rsp,
6187 					    leaf_prof, mid_prof);
6188 	if (rc) {
6189 		dev_err(rvu->dev,
6190 			"%s: Failed to map leaf(%d) and mid(%d) profiles\n",
6191 			__func__, leaf_prof, mid_prof);
6192 		goto exit;
6193 	}
6194 
6195 	mutex_lock(&rvu->rsrc_lock);
6196 	ipolicer->ref_count[mid_prof]++;
6197 	mutex_unlock(&rvu->rsrc_lock);
6198 
6199 	rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
6200 					    &aq_req, &aq_rsp,
6201 					    leaf_match, mid_prof);
6202 	if (rc) {
6203 		dev_err(rvu->dev,
6204 			"%s: Failed to map leaf(%d) and mid(%d) profiles\n",
6205 			__func__, leaf_match, mid_prof);
6206 		ipolicer->ref_count[mid_prof]--;
6207 		goto exit;
6208 	}
6209 
6210 	mutex_lock(&rvu->rsrc_lock);
6211 	ipolicer->ref_count[mid_prof]++;
6212 	mutex_unlock(&rvu->rsrc_lock);
6213 
6214 exit:
6215 	return rc;
6216 }
6217 
6218 /* Called with mutex rsrc_lock */
nix_clear_ratelimit_aggr(struct rvu * rvu,struct nix_hw * nix_hw,u32 leaf_prof)6219 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
6220 				     u32 leaf_prof)
6221 {
6222 	struct nix_cn10k_aq_enq_req aq_req;
6223 	struct nix_cn10k_aq_enq_rsp aq_rsp;
6224 	struct nix_ipolicer *ipolicer;
6225 	u16 mid_prof;
6226 	int rc;
6227 
6228 	mutex_unlock(&rvu->rsrc_lock);
6229 
6230 	rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
6231 				 NIX_AQ_CTYPE_BANDPROF, leaf_prof);
6232 
6233 	mutex_lock(&rvu->rsrc_lock);
6234 	if (rc) {
6235 		dev_err(rvu->dev,
6236 			"%s: Failed to fetch context of leaf profile %d\n",
6237 			__func__, leaf_prof);
6238 		return;
6239 	}
6240 
6241 	if (!aq_rsp.prof.hl_en)
6242 		return;
6243 
6244 	mid_prof = FIELD_PREP(NIX_BW_PROF_HI_MASK, aq_rsp.prof.band_prof_id_h);
6245 	mid_prof |= aq_rsp.prof.band_prof_id;
6246 	ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
6247 	ipolicer->ref_count[mid_prof]--;
6248 	/* If ref_count is zero, free mid layer profile */
6249 	if (!ipolicer->ref_count[mid_prof]) {
6250 		ipolicer->pfvf_map[mid_prof] = 0x00;
6251 		rvu_free_rsrc(&ipolicer->band_prof, mid_prof);
6252 	}
6253 }
6254 
rvu_mbox_handler_nix_bandprof_get_hwinfo(struct rvu * rvu,struct msg_req * req,struct nix_bandprof_get_hwinfo_rsp * rsp)6255 int rvu_mbox_handler_nix_bandprof_get_hwinfo(struct rvu *rvu, struct msg_req *req,
6256 					     struct nix_bandprof_get_hwinfo_rsp *rsp)
6257 {
6258 	struct nix_ipolicer *ipolicer;
6259 	int blkaddr, layer, err;
6260 	struct nix_hw *nix_hw;
6261 	u64 tu;
6262 
6263 	if (!rvu->hw->cap.ipolicer)
6264 		return NIX_AF_ERR_IPOLICER_NOTSUPP;
6265 
6266 	err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
6267 	if (err)
6268 		return err;
6269 
6270 	/* Return number of bandwidth profiles free at each layer */
6271 	mutex_lock(&rvu->rsrc_lock);
6272 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
6273 		if (layer == BAND_PROF_INVAL_LAYER)
6274 			continue;
6275 
6276 		ipolicer = &nix_hw->ipolicer[layer];
6277 		rsp->prof_count[layer] = rvu_rsrc_free_count(&ipolicer->band_prof);
6278 	}
6279 	mutex_unlock(&rvu->rsrc_lock);
6280 
6281 	/* Set the policer timeunit in nanosec */
6282 	tu = rvu_read64(rvu, blkaddr, NIX_AF_PL_TS) & GENMASK_ULL(9, 0);
6283 	rsp->policer_timeunit = (tu + 1) * 100;
6284 
6285 	return 0;
6286 }
6287 
rvu_nix_mcast_find_grp_elem(struct nix_mcast_grp * mcast_grp,u32 mcast_grp_idx)6288 static struct nix_mcast_grp_elem *rvu_nix_mcast_find_grp_elem(struct nix_mcast_grp *mcast_grp,
6289 							      u32 mcast_grp_idx)
6290 {
6291 	struct nix_mcast_grp_elem *iter;
6292 	bool is_found = false;
6293 
6294 	list_for_each_entry(iter, &mcast_grp->mcast_grp_head, list) {
6295 		if (iter->mcast_grp_idx == mcast_grp_idx) {
6296 			is_found = true;
6297 			break;
6298 		}
6299 	}
6300 
6301 	if (is_found)
6302 		return iter;
6303 
6304 	return NULL;
6305 }
6306 
rvu_nix_mcast_get_mce_index(struct rvu * rvu,u16 pcifunc,u32 mcast_grp_idx)6307 int rvu_nix_mcast_get_mce_index(struct rvu *rvu, u16 pcifunc, u32 mcast_grp_idx)
6308 {
6309 	struct nix_mcast_grp_elem *elem;
6310 	struct nix_mcast_grp *mcast_grp;
6311 	struct nix_hw *nix_hw;
6312 	int blkaddr, ret;
6313 
6314 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
6315 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
6316 	if (!nix_hw)
6317 		return NIX_AF_ERR_INVALID_NIXBLK;
6318 
6319 	mcast_grp = &nix_hw->mcast_grp;
6320 	mutex_lock(&mcast_grp->mcast_grp_lock);
6321 	elem = rvu_nix_mcast_find_grp_elem(mcast_grp, mcast_grp_idx);
6322 	if (!elem)
6323 		ret = NIX_AF_ERR_INVALID_MCAST_GRP;
6324 	else
6325 		ret = elem->mce_start_index;
6326 
6327 	mutex_unlock(&mcast_grp->mcast_grp_lock);
6328 	return ret;
6329 }
6330 
rvu_nix_mcast_flr_free_entries(struct rvu * rvu,u16 pcifunc)6331 void rvu_nix_mcast_flr_free_entries(struct rvu *rvu, u16 pcifunc)
6332 {
6333 	struct nix_mcast_grp_destroy_req dreq = { 0 };
6334 	struct nix_mcast_grp_update_req ureq = { 0 };
6335 	struct nix_mcast_grp_update_rsp ursp = { 0 };
6336 	struct nix_mcast_grp_elem *elem, *tmp;
6337 	struct nix_mcast_grp *mcast_grp;
6338 	struct nix_hw *nix_hw;
6339 	int blkaddr;
6340 
6341 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
6342 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
6343 	if (!nix_hw)
6344 		return;
6345 
6346 	mcast_grp = &nix_hw->mcast_grp;
6347 
6348 	mutex_lock(&mcast_grp->mcast_grp_lock);
6349 	list_for_each_entry_safe(elem, tmp, &mcast_grp->mcast_grp_head, list) {
6350 		struct nix_mce_list *mce_list;
6351 		struct hlist_node *tmp;
6352 		struct mce *mce;
6353 
6354 		/* If the pcifunc which created the multicast/mirror
6355 		 * group received an FLR, then delete the entire group.
6356 		 */
6357 		if (elem->pcifunc == pcifunc) {
6358 			/* Delete group */
6359 			dreq.hdr.pcifunc = elem->pcifunc;
6360 			dreq.mcast_grp_idx = elem->mcast_grp_idx;
6361 			dreq.is_af = 1;
6362 			rvu_mbox_handler_nix_mcast_grp_destroy(rvu, &dreq, NULL);
6363 			continue;
6364 		}
6365 
6366 		/* Iterate the group elements and delete the element which
6367 		 * received the FLR.
6368 		 */
6369 		mce_list = &elem->mcast_mce_list;
6370 		hlist_for_each_entry_safe(mce, tmp, &mce_list->head, node) {
6371 			if (mce->pcifunc == pcifunc) {
6372 				ureq.hdr.pcifunc = pcifunc;
6373 				ureq.num_mce_entry = 1;
6374 				ureq.mcast_grp_idx = elem->mcast_grp_idx;
6375 				ureq.op = NIX_MCAST_OP_DEL_ENTRY;
6376 				ureq.pcifunc[0] = pcifunc;
6377 				ureq.is_af = 1;
6378 				rvu_mbox_handler_nix_mcast_grp_update(rvu, &ureq, &ursp);
6379 				break;
6380 			}
6381 		}
6382 	}
6383 	mutex_unlock(&mcast_grp->mcast_grp_lock);
6384 }
6385 
rvu_nix_mcast_update_mcam_entry(struct rvu * rvu,u16 pcifunc,u32 mcast_grp_idx,u16 mcam_index)6386 int rvu_nix_mcast_update_mcam_entry(struct rvu *rvu, u16 pcifunc,
6387 				    u32 mcast_grp_idx, u16 mcam_index)
6388 {
6389 	struct nix_mcast_grp_elem *elem;
6390 	struct nix_mcast_grp *mcast_grp;
6391 	struct nix_hw *nix_hw;
6392 	int blkaddr, ret = 0;
6393 
6394 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
6395 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
6396 	if (!nix_hw)
6397 		return NIX_AF_ERR_INVALID_NIXBLK;
6398 
6399 	mcast_grp = &nix_hw->mcast_grp;
6400 	mutex_lock(&mcast_grp->mcast_grp_lock);
6401 	elem = rvu_nix_mcast_find_grp_elem(mcast_grp, mcast_grp_idx);
6402 	if (!elem)
6403 		ret = NIX_AF_ERR_INVALID_MCAST_GRP;
6404 	else
6405 		elem->mcam_index = mcam_index;
6406 
6407 	mutex_unlock(&mcast_grp->mcast_grp_lock);
6408 	return ret;
6409 }
6410 
rvu_mbox_handler_nix_mcast_grp_create(struct rvu * rvu,struct nix_mcast_grp_create_req * req,struct nix_mcast_grp_create_rsp * rsp)6411 int rvu_mbox_handler_nix_mcast_grp_create(struct rvu *rvu,
6412 					  struct nix_mcast_grp_create_req *req,
6413 					  struct nix_mcast_grp_create_rsp *rsp)
6414 {
6415 	struct nix_mcast_grp_elem *elem;
6416 	struct nix_mcast_grp *mcast_grp;
6417 	struct nix_hw *nix_hw;
6418 	int blkaddr, err;
6419 
6420 	err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
6421 	if (err)
6422 		return err;
6423 
6424 	mcast_grp = &nix_hw->mcast_grp;
6425 	elem = kzalloc_obj(*elem);
6426 	if (!elem)
6427 		return -ENOMEM;
6428 
6429 	INIT_HLIST_HEAD(&elem->mcast_mce_list.head);
6430 	elem->mcam_index = -1;
6431 	elem->mce_start_index = -1;
6432 	elem->pcifunc = req->hdr.pcifunc;
6433 	elem->dir = req->dir;
6434 	elem->mcast_grp_idx = mcast_grp->next_grp_index++;
6435 
6436 	mutex_lock(&mcast_grp->mcast_grp_lock);
6437 	list_add_tail(&elem->list, &mcast_grp->mcast_grp_head);
6438 	mcast_grp->count++;
6439 	mutex_unlock(&mcast_grp->mcast_grp_lock);
6440 
6441 	rsp->mcast_grp_idx = elem->mcast_grp_idx;
6442 	return 0;
6443 }
6444 
rvu_mbox_handler_nix_mcast_grp_destroy(struct rvu * rvu,struct nix_mcast_grp_destroy_req * req,struct msg_rsp * rsp)6445 int rvu_mbox_handler_nix_mcast_grp_destroy(struct rvu *rvu,
6446 					   struct nix_mcast_grp_destroy_req *req,
6447 					   struct msg_rsp *rsp)
6448 {
6449 	struct npc_delete_flow_req uninstall_req = { 0 };
6450 	struct npc_delete_flow_rsp uninstall_rsp = { 0 };
6451 	struct nix_mcast_grp_elem *elem;
6452 	struct nix_mcast_grp *mcast_grp;
6453 	int blkaddr, err, ret = 0;
6454 	struct nix_mcast *mcast;
6455 	struct nix_hw *nix_hw;
6456 
6457 	err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
6458 	if (err)
6459 		return err;
6460 
6461 	mcast_grp = &nix_hw->mcast_grp;
6462 
6463 	/* If AF is requesting for the deletion,
6464 	 * then AF is already taking the lock
6465 	 */
6466 	if (!req->is_af)
6467 		mutex_lock(&mcast_grp->mcast_grp_lock);
6468 
6469 	elem = rvu_nix_mcast_find_grp_elem(mcast_grp, req->mcast_grp_idx);
6470 	if (!elem) {
6471 		ret = NIX_AF_ERR_INVALID_MCAST_GRP;
6472 		goto unlock_grp;
6473 	}
6474 
6475 	/* If no mce entries are associated with the group
6476 	 * then just remove it from the global list.
6477 	 */
6478 	if (!elem->mcast_mce_list.count)
6479 		goto delete_grp;
6480 
6481 	/* Delete the associated mcam entry and
6482 	 * remove all mce entries from the group
6483 	 */
6484 	mcast = &nix_hw->mcast;
6485 	mutex_lock(&mcast->mce_lock);
6486 	if (elem->mcam_index != -1) {
6487 		uninstall_req.hdr.pcifunc = req->hdr.pcifunc;
6488 		uninstall_req.entry = elem->mcam_index;
6489 		rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &uninstall_rsp);
6490 	}
6491 
6492 	nix_free_mce_list(mcast, elem->mcast_mce_list.count,
6493 			  elem->mce_start_index, elem->dir);
6494 	nix_delete_mcast_mce_list(&elem->mcast_mce_list);
6495 	mutex_unlock(&mcast->mce_lock);
6496 
6497 delete_grp:
6498 	list_del(&elem->list);
6499 	kfree(elem);
6500 	mcast_grp->count--;
6501 
6502 unlock_grp:
6503 	if (!req->is_af)
6504 		mutex_unlock(&mcast_grp->mcast_grp_lock);
6505 
6506 	return ret;
6507 }
6508 
rvu_mbox_handler_nix_mcast_grp_update(struct rvu * rvu,struct nix_mcast_grp_update_req * req,struct nix_mcast_grp_update_rsp * rsp)6509 int rvu_mbox_handler_nix_mcast_grp_update(struct rvu *rvu,
6510 					  struct nix_mcast_grp_update_req *req,
6511 					  struct nix_mcast_grp_update_rsp *rsp)
6512 {
6513 	struct nix_mcast_grp_destroy_req dreq = { 0 };
6514 	struct npc_mcam *mcam = &rvu->hw->mcam;
6515 	struct nix_mcast_grp_elem *elem;
6516 	struct nix_mcast_grp *mcast_grp;
6517 	int blkaddr, err, npc_blkaddr;
6518 	u16 prev_count, new_count;
6519 	struct nix_mcast *mcast;
6520 	struct nix_hw *nix_hw;
6521 	int i, ret;
6522 
6523 	if (!req->num_mce_entry)
6524 		return 0;
6525 
6526 	err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
6527 	if (err)
6528 		return err;
6529 
6530 	mcast_grp = &nix_hw->mcast_grp;
6531 
6532 	/* If AF is requesting for the updation,
6533 	 * then AF is already taking the lock
6534 	 */
6535 	if (!req->is_af)
6536 		mutex_lock(&mcast_grp->mcast_grp_lock);
6537 
6538 	elem = rvu_nix_mcast_find_grp_elem(mcast_grp, req->mcast_grp_idx);
6539 	if (!elem) {
6540 		ret = NIX_AF_ERR_INVALID_MCAST_GRP;
6541 		goto unlock_grp;
6542 	}
6543 
6544 	/* If any pcifunc matches the group's pcifunc, then we can
6545 	 * delete the entire group.
6546 	 */
6547 	if (req->op == NIX_MCAST_OP_DEL_ENTRY) {
6548 		for (i = 0; i < req->num_mce_entry; i++) {
6549 			if (elem->pcifunc == req->pcifunc[i]) {
6550 				/* Delete group */
6551 				dreq.hdr.pcifunc = elem->pcifunc;
6552 				dreq.mcast_grp_idx = elem->mcast_grp_idx;
6553 				dreq.is_af = 1;
6554 				rvu_mbox_handler_nix_mcast_grp_destroy(rvu, &dreq, NULL);
6555 				ret = 0;
6556 				goto unlock_grp;
6557 			}
6558 		}
6559 	}
6560 
6561 	mcast = &nix_hw->mcast;
6562 	mutex_lock(&mcast->mce_lock);
6563 	npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
6564 	if (elem->mcam_index != -1)
6565 		npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, elem->mcam_index, false);
6566 
6567 	prev_count = elem->mcast_mce_list.count;
6568 	if (req->op == NIX_MCAST_OP_ADD_ENTRY) {
6569 		new_count = prev_count + req->num_mce_entry;
6570 		if (prev_count)
6571 			nix_free_mce_list(mcast, prev_count, elem->mce_start_index, elem->dir);
6572 
6573 		elem->mce_start_index = nix_alloc_mce_list(mcast, new_count, elem->dir);
6574 
6575 		/* It is possible not to get contiguous memory */
6576 		if (elem->mce_start_index < 0) {
6577 			if (elem->mcam_index != -1) {
6578 				npc_enable_mcam_entry(rvu, mcam, npc_blkaddr,
6579 						      elem->mcam_index, true);
6580 				ret = NIX_AF_ERR_NON_CONTIG_MCE_LIST;
6581 				goto unlock_mce;
6582 			}
6583 		}
6584 
6585 		ret = nix_add_mce_list_entry(rvu, nix_hw, elem, req);
6586 		if (ret) {
6587 			nix_free_mce_list(mcast, new_count, elem->mce_start_index, elem->dir);
6588 			if (prev_count)
6589 				elem->mce_start_index = nix_alloc_mce_list(mcast,
6590 									   prev_count,
6591 									   elem->dir);
6592 
6593 			if (elem->mcam_index != -1)
6594 				npc_enable_mcam_entry(rvu, mcam, npc_blkaddr,
6595 						      elem->mcam_index, true);
6596 
6597 			goto unlock_mce;
6598 		}
6599 	} else {
6600 		if (!prev_count || prev_count < req->num_mce_entry) {
6601 			if (elem->mcam_index != -1)
6602 				npc_enable_mcam_entry(rvu, mcam, npc_blkaddr,
6603 						      elem->mcam_index, true);
6604 			ret = NIX_AF_ERR_INVALID_MCAST_DEL_REQ;
6605 			goto unlock_mce;
6606 		}
6607 
6608 		nix_free_mce_list(mcast, prev_count, elem->mce_start_index, elem->dir);
6609 		new_count = prev_count - req->num_mce_entry;
6610 		elem->mce_start_index = nix_alloc_mce_list(mcast, new_count, elem->dir);
6611 		ret = nix_del_mce_list_entry(rvu, nix_hw, elem, req);
6612 		if (ret) {
6613 			nix_free_mce_list(mcast, new_count, elem->mce_start_index, elem->dir);
6614 			elem->mce_start_index = nix_alloc_mce_list(mcast, prev_count, elem->dir);
6615 			if (elem->mcam_index != -1)
6616 				npc_enable_mcam_entry(rvu, mcam,
6617 						      npc_blkaddr,
6618 						      elem->mcam_index,
6619 						      true);
6620 
6621 			goto unlock_mce;
6622 		}
6623 	}
6624 
6625 	if (elem->mcam_index == -1) {
6626 		rsp->mce_start_index = elem->mce_start_index;
6627 		ret = 0;
6628 		goto unlock_mce;
6629 	}
6630 
6631 	nix_mcast_update_action(rvu, elem);
6632 	npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, elem->mcam_index, true);
6633 	rsp->mce_start_index = elem->mce_start_index;
6634 	ret = 0;
6635 
6636 unlock_mce:
6637 	mutex_unlock(&mcast->mce_lock);
6638 
6639 unlock_grp:
6640 	if (!req->is_af)
6641 		mutex_unlock(&mcast_grp->mcast_grp_lock);
6642 
6643 	return ret;
6644 }
6645 
6646 /* On CN10k and older series of silicons, hardware may incorrectly
6647  * assert XOFF on certain channels. Issue a write on NIX_AF_RX_CHANX_CFG
6648  * to broadcacst XON on the same.
6649  */
rvu_block_bcast_xon(struct rvu * rvu,int blkaddr)6650 void rvu_block_bcast_xon(struct rvu *rvu, int blkaddr)
6651 {
6652 	struct rvu_block *block = &rvu->hw->block[blkaddr];
6653 	u64 cfg;
6654 
6655 	if (!block->implemented || is_cn20k(rvu->pdev))
6656 		return;
6657 
6658 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(0));
6659 	rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(0), cfg);
6660 }
6661