1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3 *
4 * Copyright (C) 2018 Marvell.
5 *
6 */
7
8 #include <linux/module.h>
9 #include <linux/pci.h>
10
11 #include "rvu_struct.h"
12 #include "rvu_reg.h"
13 #include "rvu.h"
14 #include "npc.h"
15 #include "mcs.h"
16 #include "cgx.h"
17 #include "lmac_common.h"
18 #include "rvu_npc_hash.h"
19
20 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc);
21 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
22 int type, int chan_id);
23 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
24 int type, bool add);
25 static int nix_setup_ipolicers(struct rvu *rvu,
26 struct nix_hw *nix_hw, int blkaddr);
27 static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw);
28 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
29 struct nix_hw *nix_hw, u16 pcifunc);
30 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc);
31 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
32 u32 leaf_prof);
33 static const char *nix_get_ctx_name(int ctype);
34 static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc);
35
36 enum mc_tbl_sz {
37 MC_TBL_SZ_256,
38 MC_TBL_SZ_512,
39 MC_TBL_SZ_1K,
40 MC_TBL_SZ_2K,
41 MC_TBL_SZ_4K,
42 MC_TBL_SZ_8K,
43 MC_TBL_SZ_16K,
44 MC_TBL_SZ_32K,
45 MC_TBL_SZ_64K,
46 };
47
48 enum mc_buf_cnt {
49 MC_BUF_CNT_8,
50 MC_BUF_CNT_16,
51 MC_BUF_CNT_32,
52 MC_BUF_CNT_64,
53 MC_BUF_CNT_128,
54 MC_BUF_CNT_256,
55 MC_BUF_CNT_512,
56 MC_BUF_CNT_1024,
57 MC_BUF_CNT_2048,
58 };
59
60 enum nix_makr_fmt_indexes {
61 NIX_MARK_CFG_IP_DSCP_RED,
62 NIX_MARK_CFG_IP_DSCP_YELLOW,
63 NIX_MARK_CFG_IP_DSCP_YELLOW_RED,
64 NIX_MARK_CFG_IP_ECN_RED,
65 NIX_MARK_CFG_IP_ECN_YELLOW,
66 NIX_MARK_CFG_IP_ECN_YELLOW_RED,
67 NIX_MARK_CFG_VLAN_DEI_RED,
68 NIX_MARK_CFG_VLAN_DEI_YELLOW,
69 NIX_MARK_CFG_VLAN_DEI_YELLOW_RED,
70 NIX_MARK_CFG_MAX,
71 };
72
73 /* For now considering MC resources needed for broadcast
74 * pkt replication only. i.e 256 HWVFs + 12 PFs.
75 */
76 #define MC_TBL_SIZE MC_TBL_SZ_2K
77 #define MC_BUF_CNT MC_BUF_CNT_1024
78
79 #define MC_TX_MAX 2048
80
81 struct mce {
82 struct hlist_node node;
83 u32 rq_rss_index;
84 u16 pcifunc;
85 u16 channel;
86 u8 dest_type;
87 u8 is_active;
88 u8 reserved[2];
89 };
90
rvu_get_next_nix_blkaddr(struct rvu * rvu,int blkaddr)91 int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr)
92 {
93 int i = 0;
94
95 /*If blkaddr is 0, return the first nix block address*/
96 if (blkaddr == 0)
97 return rvu->nix_blkaddr[blkaddr];
98
99 while (i + 1 < MAX_NIX_BLKS) {
100 if (rvu->nix_blkaddr[i] == blkaddr)
101 return rvu->nix_blkaddr[i + 1];
102 i++;
103 }
104
105 return 0;
106 }
107
is_nixlf_attached(struct rvu * rvu,u16 pcifunc)108 bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
109 {
110 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
111 int blkaddr;
112
113 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
114 if (!pfvf->nixlf || blkaddr < 0)
115 return false;
116 return true;
117 }
118
rvu_get_nixlf_count(struct rvu * rvu)119 int rvu_get_nixlf_count(struct rvu *rvu)
120 {
121 int blkaddr = 0, max = 0;
122 struct rvu_block *block;
123
124 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
125 while (blkaddr) {
126 block = &rvu->hw->block[blkaddr];
127 max += block->lf.max;
128 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
129 }
130 return max;
131 }
132
nix_get_nixlf(struct rvu * rvu,u16 pcifunc,int * nixlf,int * nix_blkaddr)133 int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr)
134 {
135 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
136 struct rvu_hwinfo *hw = rvu->hw;
137 int blkaddr;
138
139 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
140 if (!pfvf->nixlf || blkaddr < 0)
141 return NIX_AF_ERR_AF_LF_INVALID;
142
143 *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
144 if (*nixlf < 0)
145 return NIX_AF_ERR_AF_LF_INVALID;
146
147 if (nix_blkaddr)
148 *nix_blkaddr = blkaddr;
149
150 return 0;
151 }
152
nix_get_struct_ptrs(struct rvu * rvu,u16 pcifunc,struct nix_hw ** nix_hw,int * blkaddr)153 int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc,
154 struct nix_hw **nix_hw, int *blkaddr)
155 {
156 struct rvu_pfvf *pfvf;
157
158 pfvf = rvu_get_pfvf(rvu, pcifunc);
159 *blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
160 if (!pfvf->nixlf || *blkaddr < 0)
161 return NIX_AF_ERR_AF_LF_INVALID;
162
163 *nix_hw = get_nix_hw(rvu->hw, *blkaddr);
164 if (!*nix_hw)
165 return NIX_AF_ERR_INVALID_NIXBLK;
166 return 0;
167 }
168
nix_mce_list_init(struct nix_mce_list * list,int max)169 static void nix_mce_list_init(struct nix_mce_list *list, int max)
170 {
171 INIT_HLIST_HEAD(&list->head);
172 list->count = 0;
173 list->max = max;
174 }
175
nix_alloc_mce_list(struct nix_mcast * mcast,int count,u8 dir)176 static int nix_alloc_mce_list(struct nix_mcast *mcast, int count, u8 dir)
177 {
178 struct rsrc_bmap *mce_counter;
179 int idx;
180
181 if (!mcast)
182 return -EINVAL;
183
184 mce_counter = &mcast->mce_counter[dir];
185 if (!rvu_rsrc_check_contig(mce_counter, count))
186 return -ENOSPC;
187
188 idx = rvu_alloc_rsrc_contig(mce_counter, count);
189 return idx;
190 }
191
nix_free_mce_list(struct nix_mcast * mcast,int count,int start,u8 dir)192 static void nix_free_mce_list(struct nix_mcast *mcast, int count, int start, u8 dir)
193 {
194 struct rsrc_bmap *mce_counter;
195
196 if (!mcast)
197 return;
198
199 mce_counter = &mcast->mce_counter[dir];
200 rvu_free_rsrc_contig(mce_counter, count, start);
201 }
202
get_nix_hw(struct rvu_hwinfo * hw,int blkaddr)203 struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
204 {
205 int nix_blkaddr = 0, i = 0;
206 struct rvu *rvu = hw->rvu;
207
208 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
209 while (nix_blkaddr) {
210 if (blkaddr == nix_blkaddr && hw->nix)
211 return &hw->nix[i];
212 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
213 i++;
214 }
215 return NULL;
216 }
217
nix_get_dwrr_mtu_reg(struct rvu_hwinfo * hw,int smq_link_type)218 int nix_get_dwrr_mtu_reg(struct rvu_hwinfo *hw, int smq_link_type)
219 {
220 if (hw->cap.nix_multiple_dwrr_mtu)
221 return NIX_AF_DWRR_MTUX(smq_link_type);
222
223 if (smq_link_type == SMQ_LINK_TYPE_SDP)
224 return NIX_AF_DWRR_SDP_MTU;
225
226 /* Here it's same reg for RPM and LBK */
227 return NIX_AF_DWRR_RPM_MTU;
228 }
229
convert_dwrr_mtu_to_bytes(u8 dwrr_mtu)230 u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu)
231 {
232 dwrr_mtu &= 0x1FULL;
233
234 /* MTU used for DWRR calculation is in power of 2 up until 64K bytes.
235 * Value of 4 is reserved for MTU value of 9728 bytes.
236 * Value of 5 is reserved for MTU value of 10240 bytes.
237 */
238 switch (dwrr_mtu) {
239 case 4:
240 return 9728;
241 case 5:
242 return 10240;
243 default:
244 return BIT_ULL(dwrr_mtu);
245 }
246
247 return 0;
248 }
249
convert_bytes_to_dwrr_mtu(u32 bytes)250 u32 convert_bytes_to_dwrr_mtu(u32 bytes)
251 {
252 /* MTU used for DWRR calculation is in power of 2 up until 64K bytes.
253 * Value of 4 is reserved for MTU value of 9728 bytes.
254 * Value of 5 is reserved for MTU value of 10240 bytes.
255 */
256 if (bytes > BIT_ULL(16))
257 return 0;
258
259 switch (bytes) {
260 case 9728:
261 return 4;
262 case 10240:
263 return 5;
264 default:
265 return ilog2(bytes);
266 }
267
268 return 0;
269 }
270
nix_rx_sync(struct rvu * rvu,int blkaddr)271 static void nix_rx_sync(struct rvu *rvu, int blkaddr)
272 {
273 int err;
274
275 /* Sync all in flight RX packets to LLC/DRAM */
276 rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
277 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
278 if (err)
279 dev_err(rvu->dev, "SYNC1: NIX RX software sync failed\n");
280
281 /* SW_SYNC ensures all existing transactions are finished and pkts
282 * are written to LLC/DRAM, queues should be teared down after
283 * successful SW_SYNC. Due to a HW errata, in some rare scenarios
284 * an existing transaction might end after SW_SYNC operation. To
285 * ensure operation is fully done, do the SW_SYNC twice.
286 */
287 rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
288 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
289 if (err)
290 dev_err(rvu->dev, "SYNC2: NIX RX software sync failed\n");
291 }
292
is_valid_txschq(struct rvu * rvu,int blkaddr,int lvl,u16 pcifunc,u16 schq)293 static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
294 int lvl, u16 pcifunc, u16 schq)
295 {
296 struct rvu_hwinfo *hw = rvu->hw;
297 struct nix_txsch *txsch;
298 struct nix_hw *nix_hw;
299 u16 map_func;
300
301 nix_hw = get_nix_hw(rvu->hw, blkaddr);
302 if (!nix_hw)
303 return false;
304
305 txsch = &nix_hw->txsch[lvl];
306 /* Check out of bounds */
307 if (schq >= txsch->schq.max)
308 return false;
309
310 mutex_lock(&rvu->rsrc_lock);
311 map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
312 mutex_unlock(&rvu->rsrc_lock);
313
314 /* TLs aggegating traffic are shared across PF and VFs */
315 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
316 if ((nix_get_tx_link(rvu, map_func) !=
317 nix_get_tx_link(rvu, pcifunc)) &&
318 (rvu_get_pf(rvu->pdev, map_func) !=
319 rvu_get_pf(rvu->pdev, pcifunc)))
320 return false;
321 else
322 return true;
323 }
324
325 if (map_func != pcifunc)
326 return false;
327
328 return true;
329 }
330
nix_interface_init(struct rvu * rvu,u16 pcifunc,int type,int nixlf,struct nix_lf_alloc_rsp * rsp,bool loop)331 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf,
332 struct nix_lf_alloc_rsp *rsp, bool loop)
333 {
334 struct rvu_pfvf *parent_pf, *pfvf = rvu_get_pfvf(rvu, pcifunc);
335 u16 req_chan_base, req_chan_end, req_chan_cnt;
336 struct rvu_hwinfo *hw = rvu->hw;
337 struct sdp_node_info *sdp_info;
338 int pkind, pf, vf, lbkid, vfid;
339 u8 cgx_id, lmac_id;
340 bool from_vf;
341 int err;
342
343 pf = rvu_get_pf(rvu->pdev, pcifunc);
344 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK &&
345 type != NIX_INTF_TYPE_SDP)
346 return 0;
347
348 switch (type) {
349 case NIX_INTF_TYPE_CGX:
350 pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf];
351 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
352
353 pkind = rvu_npc_get_pkind(rvu, pf);
354 if (pkind < 0) {
355 dev_err(rvu->dev,
356 "PF_Func 0x%x: Invalid pkind\n", pcifunc);
357 return -EINVAL;
358 }
359 pfvf->rx_chan_base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0);
360 pfvf->tx_chan_base = pfvf->rx_chan_base;
361 pfvf->rx_chan_cnt = 1;
362 pfvf->tx_chan_cnt = 1;
363 rsp->tx_link = cgx_id * hw->lmac_per_cgx + lmac_id;
364
365 cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
366 rvu_npc_set_pkind(rvu, pkind, pfvf);
367 break;
368 case NIX_INTF_TYPE_LBK:
369 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
370
371 /* If NIX1 block is present on the silicon then NIXes are
372 * assigned alternatively for lbk interfaces. NIX0 should
373 * send packets on lbk link 1 channels and NIX1 should send
374 * on lbk link 0 channels for the communication between
375 * NIX0 and NIX1.
376 */
377 lbkid = 0;
378 if (rvu->hw->lbk_links > 1)
379 lbkid = vf & 0x1 ? 0 : 1;
380
381 /* By default NIX0 is configured to send packet on lbk link 1
382 * (which corresponds to LBK1), same packet will receive on
383 * NIX1 over lbk link 0. If NIX1 sends packet on lbk link 0
384 * (which corresponds to LBK2) packet will receive on NIX0 lbk
385 * link 1.
386 * But if lbk links for NIX0 and NIX1 are negated, i.e NIX0
387 * transmits and receives on lbk link 0, whick corresponds
388 * to LBK1 block, back to back connectivity between NIX and
389 * LBK can be achieved (which is similar to 96xx)
390 *
391 * RX TX
392 * NIX0 lbk link 1 (LBK2) 1 (LBK1)
393 * NIX0 lbk link 0 (LBK0) 0 (LBK0)
394 * NIX1 lbk link 0 (LBK1) 0 (LBK2)
395 * NIX1 lbk link 1 (LBK3) 1 (LBK3)
396 */
397 if (loop)
398 lbkid = !lbkid;
399
400 /* Note that AF's VFs work in pairs and talk over consecutive
401 * loopback channels.Therefore if odd number of AF VFs are
402 * enabled then the last VF remains with no pair.
403 */
404 pfvf->rx_chan_base = rvu_nix_chan_lbk(rvu, lbkid, vf);
405 pfvf->tx_chan_base = vf & 0x1 ?
406 rvu_nix_chan_lbk(rvu, lbkid, vf - 1) :
407 rvu_nix_chan_lbk(rvu, lbkid, vf + 1);
408 pfvf->rx_chan_cnt = 1;
409 pfvf->tx_chan_cnt = 1;
410 rsp->tx_link = hw->cgx_links + lbkid;
411 pfvf->lbkid = lbkid;
412 rvu_npc_set_pkind(rvu, NPC_RX_LBK_PKIND, pfvf);
413 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
414 pfvf->rx_chan_base,
415 pfvf->rx_chan_cnt);
416
417 break;
418 case NIX_INTF_TYPE_SDP:
419 from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
420 parent_pf = &rvu->pf[rvu_get_pf(rvu->pdev, pcifunc)];
421 sdp_info = parent_pf->sdp_info;
422 if (!sdp_info) {
423 dev_err(rvu->dev, "Invalid sdp_info pointer\n");
424 return -EINVAL;
425 }
426 if (from_vf) {
427 req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn +
428 sdp_info->num_pf_rings;
429 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
430 for (vfid = 0; vfid < vf; vfid++)
431 req_chan_base += sdp_info->vf_rings[vfid];
432 req_chan_cnt = sdp_info->vf_rings[vf];
433 req_chan_end = req_chan_base + req_chan_cnt - 1;
434 if (req_chan_base < rvu_nix_chan_sdp(rvu, 0) ||
435 req_chan_end > rvu_nix_chan_sdp(rvu, 255)) {
436 dev_err(rvu->dev,
437 "PF_Func 0x%x: Invalid channel base and count\n",
438 pcifunc);
439 return -EINVAL;
440 }
441 } else {
442 req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn;
443 req_chan_cnt = sdp_info->num_pf_rings;
444 }
445
446 pfvf->rx_chan_base = req_chan_base;
447 pfvf->rx_chan_cnt = req_chan_cnt;
448 pfvf->tx_chan_base = pfvf->rx_chan_base;
449 pfvf->tx_chan_cnt = pfvf->rx_chan_cnt;
450
451 rsp->tx_link = hw->cgx_links + hw->lbk_links;
452 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
453 pfvf->rx_chan_base,
454 pfvf->rx_chan_cnt);
455 break;
456 }
457
458 /* Add a UCAST forwarding rule in MCAM with this NIXLF attached
459 * RVU PF/VF's MAC address.
460 */
461 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
462 pfvf->rx_chan_base, pfvf->mac_addr);
463
464 /* Add this PF_FUNC to bcast pkt replication list */
465 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, true);
466 if (err) {
467 dev_err(rvu->dev,
468 "Bcast list, failed to enable PF_FUNC 0x%x\n",
469 pcifunc);
470 return err;
471 }
472 /* Install MCAM rule matching Ethernet broadcast mac address */
473 rvu_npc_install_bcast_match_entry(rvu, pcifunc,
474 nixlf, pfvf->rx_chan_base);
475
476 pfvf->maxlen = NIC_HW_MIN_FRS;
477 pfvf->minlen = NIC_HW_MIN_FRS;
478
479 return 0;
480 }
481
nix_interface_deinit(struct rvu * rvu,u16 pcifunc,u8 nixlf)482 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
483 {
484 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
485 int err;
486
487 pfvf->maxlen = 0;
488 pfvf->minlen = 0;
489
490 /* Remove this PF_FUNC from bcast pkt replication list */
491 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, false);
492 if (err) {
493 dev_err(rvu->dev,
494 "Bcast list, failed to disable PF_FUNC 0x%x\n",
495 pcifunc);
496 }
497
498 /* Free and disable any MCAM entries used by this NIX LF */
499 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
500
501 /* Disable DMAC filters used */
502 rvu_cgx_disable_dmac_entries(rvu, pcifunc);
503 }
504
505 #define NIX_BPIDS_PER_LMAC 8
506 #define NIX_BPIDS_PER_CPT 1
nix_setup_bpids(struct rvu * rvu,struct nix_hw * hw,int blkaddr)507 static int nix_setup_bpids(struct rvu *rvu, struct nix_hw *hw, int blkaddr)
508 {
509 struct nix_bp *bp = &hw->bp;
510 int err, max_bpids;
511 u64 cfg;
512
513 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
514 max_bpids = FIELD_GET(NIX_CONST_MAX_BPIDS, cfg);
515
516 /* Reserve the BPIds for CGX and SDP */
517 bp->cgx_bpid_cnt = rvu->hw->cgx_links * NIX_BPIDS_PER_LMAC;
518 bp->sdp_bpid_cnt = rvu->hw->sdp_links * FIELD_GET(NIX_CONST_SDP_CHANS, cfg);
519 bp->free_pool_base = bp->cgx_bpid_cnt + bp->sdp_bpid_cnt +
520 NIX_BPIDS_PER_CPT;
521 bp->bpids.max = max_bpids - bp->free_pool_base;
522
523 err = rvu_alloc_bitmap(&bp->bpids);
524 if (err)
525 return err;
526
527 bp->fn_map = devm_kcalloc(rvu->dev, bp->bpids.max,
528 sizeof(u16), GFP_KERNEL);
529 if (!bp->fn_map)
530 return -ENOMEM;
531
532 bp->intf_map = devm_kcalloc(rvu->dev, bp->bpids.max,
533 sizeof(u8), GFP_KERNEL);
534 if (!bp->intf_map)
535 return -ENOMEM;
536
537 bp->ref_cnt = devm_kcalloc(rvu->dev, bp->bpids.max,
538 sizeof(u8), GFP_KERNEL);
539 if (!bp->ref_cnt)
540 return -ENOMEM;
541
542 return 0;
543 }
544
rvu_nix_flr_free_bpids(struct rvu * rvu,u16 pcifunc)545 void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc)
546 {
547 int blkaddr, bpid, err;
548 struct nix_hw *nix_hw;
549 struct nix_bp *bp;
550
551 if (!is_lbk_vf(rvu, pcifunc))
552 return;
553
554 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
555 if (err)
556 return;
557
558 bp = &nix_hw->bp;
559
560 mutex_lock(&rvu->rsrc_lock);
561 for (bpid = 0; bpid < bp->bpids.max; bpid++) {
562 if (bp->fn_map[bpid] == pcifunc) {
563 bp->ref_cnt[bpid]--;
564 if (bp->ref_cnt[bpid])
565 continue;
566 rvu_free_rsrc(&bp->bpids, bpid);
567 bp->fn_map[bpid] = 0;
568 }
569 }
570 mutex_unlock(&rvu->rsrc_lock);
571 }
572
nix_get_channel(u16 chan,bool cpt_link)573 static u16 nix_get_channel(u16 chan, bool cpt_link)
574 {
575 /* CPT channel for a given link channel is always
576 * assumed to be BIT(11) set in link channel.
577 */
578 return cpt_link ? chan | BIT(11) : chan;
579 }
580
nix_bp_disable(struct rvu * rvu,struct nix_bp_cfg_req * req,struct msg_rsp * rsp,bool cpt_link)581 static int nix_bp_disable(struct rvu *rvu,
582 struct nix_bp_cfg_req *req,
583 struct msg_rsp *rsp, bool cpt_link)
584 {
585 u16 pcifunc = req->hdr.pcifunc;
586 int blkaddr, pf, type, err;
587 u16 chan_base, chan, bpid;
588 struct rvu_pfvf *pfvf;
589 struct nix_hw *nix_hw;
590 struct nix_bp *bp;
591 u16 chan_v;
592 u64 cfg;
593
594 pf = rvu_get_pf(rvu->pdev, pcifunc);
595 type = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
596 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
597 return 0;
598
599 if (is_sdp_pfvf(rvu, pcifunc))
600 type = NIX_INTF_TYPE_SDP;
601
602 if (cpt_link && !rvu->hw->cpt_links)
603 return 0;
604
605 pfvf = rvu_get_pfvf(rvu, pcifunc);
606 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
607 if (err)
608 return err;
609
610 bp = &nix_hw->bp;
611 chan_base = pfvf->rx_chan_base + req->chan_base;
612 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
613 chan_v = nix_get_channel(chan, cpt_link);
614 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v));
615 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v),
616 cfg & ~BIT_ULL(16));
617
618 if (type == NIX_INTF_TYPE_LBK) {
619 bpid = cfg & GENMASK(8, 0);
620 mutex_lock(&rvu->rsrc_lock);
621 rvu_free_rsrc(&bp->bpids, bpid - bp->free_pool_base);
622 for (bpid = 0; bpid < bp->bpids.max; bpid++) {
623 if (bp->fn_map[bpid] == pcifunc) {
624 bp->fn_map[bpid] = 0;
625 bp->ref_cnt[bpid] = 0;
626 }
627 }
628 mutex_unlock(&rvu->rsrc_lock);
629 }
630 }
631 return 0;
632 }
633
rvu_mbox_handler_nix_bp_disable(struct rvu * rvu,struct nix_bp_cfg_req * req,struct msg_rsp * rsp)634 int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
635 struct nix_bp_cfg_req *req,
636 struct msg_rsp *rsp)
637 {
638 return nix_bp_disable(rvu, req, rsp, false);
639 }
640
rvu_mbox_handler_nix_cpt_bp_disable(struct rvu * rvu,struct nix_bp_cfg_req * req,struct msg_rsp * rsp)641 int rvu_mbox_handler_nix_cpt_bp_disable(struct rvu *rvu,
642 struct nix_bp_cfg_req *req,
643 struct msg_rsp *rsp)
644 {
645 return nix_bp_disable(rvu, req, rsp, true);
646 }
647
rvu_nix_get_bpid(struct rvu * rvu,struct nix_bp_cfg_req * req,int type,int chan_id)648 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
649 int type, int chan_id)
650 {
651 int bpid, blkaddr, sdp_chan_base, err;
652 struct rvu_hwinfo *hw = rvu->hw;
653 struct rvu_pfvf *pfvf;
654 struct nix_hw *nix_hw;
655 u8 cgx_id, lmac_id;
656 struct nix_bp *bp;
657
658 pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
659
660 err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
661 if (err)
662 return err;
663
664 bp = &nix_hw->bp;
665
666 /* Backpressure IDs range division
667 * CGX channles are mapped to (0 - 191) BPIDs
668 * LBK channles are mapped to (192 - 255) BPIDs
669 * SDP channles are mapped to (256 - 511) BPIDs
670 *
671 * Lmac channles and bpids mapped as follows
672 * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15)
673 * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) ....
674 * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) ....
675 */
676 switch (type) {
677 case NIX_INTF_TYPE_CGX:
678 if ((req->chan_base + req->chan_cnt) > NIX_BPIDS_PER_LMAC)
679 return NIX_AF_ERR_INVALID_BPID_REQ;
680 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
681 /* Assign bpid based on cgx, lmac and chan id */
682 bpid = (cgx_id * hw->lmac_per_cgx * NIX_BPIDS_PER_LMAC) +
683 (lmac_id * NIX_BPIDS_PER_LMAC) + req->chan_base;
684
685 if (req->bpid_per_chan)
686 bpid += chan_id;
687 if (bpid > bp->cgx_bpid_cnt)
688 return NIX_AF_ERR_INVALID_BPID;
689 break;
690
691 case NIX_INTF_TYPE_LBK:
692 /* Alloc bpid from the free pool */
693 mutex_lock(&rvu->rsrc_lock);
694 bpid = rvu_alloc_rsrc(&bp->bpids);
695 if (bpid < 0) {
696 mutex_unlock(&rvu->rsrc_lock);
697 return NIX_AF_ERR_INVALID_BPID;
698 }
699 bp->fn_map[bpid] = req->hdr.pcifunc;
700 bp->ref_cnt[bpid]++;
701 bpid += bp->free_pool_base;
702 mutex_unlock(&rvu->rsrc_lock);
703 break;
704 case NIX_INTF_TYPE_SDP:
705 if ((req->chan_base + req->chan_cnt) > bp->sdp_bpid_cnt)
706 return NIX_AF_ERR_INVALID_BPID_REQ;
707
708 /* Handle usecase of 2 SDP blocks */
709 if (!hw->cap.programmable_chans)
710 sdp_chan_base = pfvf->rx_chan_base - NIX_CHAN_SDP_CH_START;
711 else
712 sdp_chan_base = pfvf->rx_chan_base - hw->sdp_chan_base;
713
714 bpid = bp->cgx_bpid_cnt + req->chan_base + sdp_chan_base;
715 if (req->bpid_per_chan)
716 bpid += chan_id;
717
718 if (bpid > (bp->cgx_bpid_cnt + bp->sdp_bpid_cnt))
719 return NIX_AF_ERR_INVALID_BPID;
720 break;
721 default:
722 return -EINVAL;
723 }
724 return bpid;
725 }
726
nix_bp_enable(struct rvu * rvu,struct nix_bp_cfg_req * req,struct nix_bp_cfg_rsp * rsp,bool cpt_link)727 static int nix_bp_enable(struct rvu *rvu,
728 struct nix_bp_cfg_req *req,
729 struct nix_bp_cfg_rsp *rsp,
730 bool cpt_link)
731 {
732 int blkaddr, pf, type, chan_id = 0;
733 u16 pcifunc = req->hdr.pcifunc;
734 struct rvu_pfvf *pfvf;
735 u16 chan_base, chan;
736 s16 bpid, bpid_base;
737 u16 chan_v;
738 u64 cfg;
739
740 pf = rvu_get_pf(rvu->pdev, pcifunc);
741 type = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
742 if (is_sdp_pfvf(rvu, pcifunc))
743 type = NIX_INTF_TYPE_SDP;
744
745 /* Enable backpressure only for CGX mapped PFs and LBK/SDP interface */
746 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK &&
747 type != NIX_INTF_TYPE_SDP)
748 return 0;
749
750 if (cpt_link && !rvu->hw->cpt_links)
751 return 0;
752
753 pfvf = rvu_get_pfvf(rvu, pcifunc);
754 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
755
756 bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id);
757 chan_base = pfvf->rx_chan_base + req->chan_base;
758 bpid = bpid_base;
759
760 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
761 if (bpid < 0) {
762 dev_warn(rvu->dev, "Fail to enable backpressure\n");
763 return -EINVAL;
764 }
765
766 chan_v = nix_get_channel(chan, cpt_link);
767
768 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v));
769 cfg &= ~GENMASK_ULL(8, 0);
770 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v),
771 cfg | (bpid & GENMASK_ULL(8, 0)) | BIT_ULL(16));
772 chan_id++;
773 bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
774 }
775
776 for (chan = 0; chan < req->chan_cnt; chan++) {
777 /* Map channel and bpid assign to it */
778 rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 |
779 (bpid_base & 0x3FF);
780 if (req->bpid_per_chan)
781 bpid_base++;
782 }
783 rsp->chan_cnt = req->chan_cnt;
784
785 return 0;
786 }
787
rvu_mbox_handler_nix_bp_enable(struct rvu * rvu,struct nix_bp_cfg_req * req,struct nix_bp_cfg_rsp * rsp)788 int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
789 struct nix_bp_cfg_req *req,
790 struct nix_bp_cfg_rsp *rsp)
791 {
792 return nix_bp_enable(rvu, req, rsp, false);
793 }
794
rvu_mbox_handler_nix_cpt_bp_enable(struct rvu * rvu,struct nix_bp_cfg_req * req,struct nix_bp_cfg_rsp * rsp)795 int rvu_mbox_handler_nix_cpt_bp_enable(struct rvu *rvu,
796 struct nix_bp_cfg_req *req,
797 struct nix_bp_cfg_rsp *rsp)
798 {
799 return nix_bp_enable(rvu, req, rsp, true);
800 }
801
nix_setup_lso_tso_l3(struct rvu * rvu,int blkaddr,u64 format,bool v4,u64 * fidx)802 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
803 u64 format, bool v4, u64 *fidx)
804 {
805 struct nix_lso_format field = {0};
806
807 /* IP's Length field */
808 field.layer = NIX_TXLAYER_OL3;
809 /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
810 field.offset = v4 ? 2 : 4;
811 field.sizem1 = 1; /* i.e 2 bytes */
812 field.alg = NIX_LSOALG_ADD_PAYLEN;
813 rvu_write64(rvu, blkaddr,
814 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
815 *(u64 *)&field);
816
817 /* No ID field in IPv6 header */
818 if (!v4)
819 return;
820
821 /* IP's ID field */
822 field.layer = NIX_TXLAYER_OL3;
823 field.offset = 4;
824 field.sizem1 = 1; /* i.e 2 bytes */
825 field.alg = NIX_LSOALG_ADD_SEGNUM;
826 rvu_write64(rvu, blkaddr,
827 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
828 *(u64 *)&field);
829 }
830
nix_setup_lso_tso_l4(struct rvu * rvu,int blkaddr,u64 format,u64 * fidx)831 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
832 u64 format, u64 *fidx)
833 {
834 struct nix_lso_format field = {0};
835
836 /* TCP's sequence number field */
837 field.layer = NIX_TXLAYER_OL4;
838 field.offset = 4;
839 field.sizem1 = 3; /* i.e 4 bytes */
840 field.alg = NIX_LSOALG_ADD_OFFSET;
841 rvu_write64(rvu, blkaddr,
842 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
843 *(u64 *)&field);
844
845 /* TCP's flags field */
846 field.layer = NIX_TXLAYER_OL4;
847 field.offset = 12;
848 field.sizem1 = 1; /* 2 bytes */
849 field.alg = NIX_LSOALG_TCP_FLAGS;
850 rvu_write64(rvu, blkaddr,
851 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
852 *(u64 *)&field);
853 }
854
nix_setup_lso(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr)855 static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
856 {
857 u64 cfg, idx, fidx = 0;
858
859 /* Get max HW supported format indices */
860 cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF;
861 nix_hw->lso.total = cfg;
862
863 /* Enable LSO */
864 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
865 /* For TSO, set first and middle segment flags to
866 * mask out PSH, RST & FIN flags in TCP packet
867 */
868 cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
869 cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
870 rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
871
872 /* Setup default static LSO formats
873 *
874 * Configure format fields for TCPv4 segmentation offload
875 */
876 idx = NIX_LSO_FORMAT_IDX_TSOV4;
877 nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
878 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
879
880 /* Set rest of the fields to NOP */
881 for (; fidx < 8; fidx++) {
882 rvu_write64(rvu, blkaddr,
883 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
884 }
885 nix_hw->lso.in_use++;
886
887 /* Configure format fields for TCPv6 segmentation offload */
888 idx = NIX_LSO_FORMAT_IDX_TSOV6;
889 fidx = 0;
890 nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
891 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
892
893 /* Set rest of the fields to NOP */
894 for (; fidx < 8; fidx++) {
895 rvu_write64(rvu, blkaddr,
896 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
897 }
898 nix_hw->lso.in_use++;
899 }
900
nix_ctx_free(struct rvu * rvu,struct rvu_pfvf * pfvf)901 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
902 {
903 kfree(pfvf->rq_bmap);
904 kfree(pfvf->sq_bmap);
905 kfree(pfvf->cq_bmap);
906 if (pfvf->rq_ctx)
907 qmem_free(rvu->dev, pfvf->rq_ctx);
908 if (pfvf->sq_ctx)
909 qmem_free(rvu->dev, pfvf->sq_ctx);
910 if (pfvf->cq_ctx)
911 qmem_free(rvu->dev, pfvf->cq_ctx);
912 if (pfvf->rss_ctx)
913 qmem_free(rvu->dev, pfvf->rss_ctx);
914 if (pfvf->nix_qints_ctx)
915 qmem_free(rvu->dev, pfvf->nix_qints_ctx);
916 if (pfvf->cq_ints_ctx)
917 qmem_free(rvu->dev, pfvf->cq_ints_ctx);
918
919 pfvf->rq_bmap = NULL;
920 pfvf->cq_bmap = NULL;
921 pfvf->sq_bmap = NULL;
922 pfvf->rq_ctx = NULL;
923 pfvf->sq_ctx = NULL;
924 pfvf->cq_ctx = NULL;
925 pfvf->rss_ctx = NULL;
926 pfvf->nix_qints_ctx = NULL;
927 pfvf->cq_ints_ctx = NULL;
928 }
929
nixlf_rss_ctx_init(struct rvu * rvu,int blkaddr,struct rvu_pfvf * pfvf,int nixlf,int rss_sz,int rss_grps,int hwctx_size,u64 way_mask,bool tag_lsb_as_adder)930 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
931 struct rvu_pfvf *pfvf, int nixlf,
932 int rss_sz, int rss_grps, int hwctx_size,
933 u64 way_mask, bool tag_lsb_as_adder)
934 {
935 int err, grp, num_indices;
936 u64 val;
937
938 /* RSS is not requested for this NIXLF */
939 if (!rss_sz)
940 return 0;
941 num_indices = rss_sz * rss_grps;
942
943 /* Alloc NIX RSS HW context memory and config the base */
944 err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
945 if (err)
946 return err;
947
948 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
949 (u64)pfvf->rss_ctx->iova);
950
951 /* Config full RSS table size, enable RSS and caching */
952 val = BIT_ULL(36) | BIT_ULL(4) | way_mask << 20 |
953 ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE);
954
955 if (tag_lsb_as_adder)
956 val |= BIT_ULL(5);
957
958 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf), val);
959 /* Config RSS group offset and sizes */
960 for (grp = 0; grp < rss_grps; grp++)
961 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
962 ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
963 return 0;
964 }
965
nix_aq_enqueue_wait(struct rvu * rvu,struct rvu_block * block,struct nix_aq_inst_s * inst)966 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
967 struct nix_aq_inst_s *inst)
968 {
969 struct admin_queue *aq = block->aq;
970 struct nix_aq_res_s *result;
971 int timeout = 1000;
972 u64 reg, head;
973 int ret;
974
975 result = (struct nix_aq_res_s *)aq->res->base;
976
977 /* Get current head pointer where to append this instruction */
978 reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
979 head = (reg >> 4) & AQ_PTR_MASK;
980
981 memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
982 (void *)inst, aq->inst->entry_sz);
983 memset(result, 0, sizeof(*result));
984 /* sync into memory */
985 wmb();
986
987 /* Ring the doorbell and wait for result */
988 rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
989 while (result->compcode == NIX_AQ_COMP_NOTDONE) {
990 cpu_relax();
991 udelay(1);
992 timeout--;
993 if (!timeout)
994 return -EBUSY;
995 }
996
997 if (result->compcode != NIX_AQ_COMP_GOOD) {
998 /* TODO: Replace this with some error code */
999 if (result->compcode == NIX_AQ_COMP_CTX_FAULT ||
1000 result->compcode == NIX_AQ_COMP_LOCKERR ||
1001 result->compcode == NIX_AQ_COMP_CTX_POISON) {
1002 ret = rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_RX);
1003 ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_TX);
1004 ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_RX);
1005 ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_TX);
1006 if (ret)
1007 dev_err(rvu->dev,
1008 "%s: Not able to unlock cachelines\n", __func__);
1009 }
1010
1011 return -EBUSY;
1012 }
1013
1014 return 0;
1015 }
1016
nix_get_aq_req_smq(struct rvu * rvu,struct nix_aq_enq_req * req,u16 * smq,u16 * smq_mask)1017 static void nix_get_aq_req_smq(struct rvu *rvu, struct nix_aq_enq_req *req,
1018 u16 *smq, u16 *smq_mask)
1019 {
1020 struct nix_cn10k_aq_enq_req *aq_req;
1021
1022 if (!is_rvu_otx2(rvu)) {
1023 aq_req = (struct nix_cn10k_aq_enq_req *)req;
1024 *smq = aq_req->sq.smq;
1025 *smq_mask = aq_req->sq_mask.smq;
1026 } else {
1027 *smq = req->sq.smq;
1028 *smq_mask = req->sq_mask.smq;
1029 }
1030 }
1031
rvu_nix_blk_aq_enq_inst(struct rvu * rvu,struct nix_hw * nix_hw,struct nix_aq_enq_req * req,struct nix_aq_enq_rsp * rsp)1032 static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
1033 struct nix_aq_enq_req *req,
1034 struct nix_aq_enq_rsp *rsp)
1035 {
1036 struct rvu_hwinfo *hw = rvu->hw;
1037 u16 pcifunc = req->hdr.pcifunc;
1038 int nixlf, blkaddr, rc = 0;
1039 struct nix_aq_inst_s inst;
1040 struct rvu_block *block;
1041 struct admin_queue *aq;
1042 struct rvu_pfvf *pfvf;
1043 u16 smq, smq_mask;
1044 void *ctx, *mask;
1045 bool ena;
1046 u64 cfg;
1047
1048 blkaddr = nix_hw->blkaddr;
1049 block = &hw->block[blkaddr];
1050 aq = block->aq;
1051 if (!aq) {
1052 dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
1053 return NIX_AF_ERR_AQ_ENQUEUE;
1054 }
1055
1056 pfvf = rvu_get_pfvf(rvu, pcifunc);
1057 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1058
1059 /* Skip NIXLF check for broadcast MCE entry and bandwidth profile
1060 * operations done by AF itself.
1061 */
1062 if (!((!rsp && req->ctype == NIX_AQ_CTYPE_MCE) ||
1063 (req->ctype == NIX_AQ_CTYPE_BANDPROF && !pcifunc))) {
1064 if (!pfvf->nixlf || nixlf < 0)
1065 return NIX_AF_ERR_AF_LF_INVALID;
1066 }
1067
1068 switch (req->ctype) {
1069 case NIX_AQ_CTYPE_RQ:
1070 /* Check if index exceeds max no of queues */
1071 if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
1072 rc = NIX_AF_ERR_AQ_ENQUEUE;
1073 break;
1074 case NIX_AQ_CTYPE_SQ:
1075 if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
1076 rc = NIX_AF_ERR_AQ_ENQUEUE;
1077 break;
1078 case NIX_AQ_CTYPE_CQ:
1079 if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
1080 rc = NIX_AF_ERR_AQ_ENQUEUE;
1081 break;
1082 case NIX_AQ_CTYPE_RSS:
1083 /* Check if RSS is enabled and qidx is within range */
1084 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
1085 if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
1086 (req->qidx >= (256UL << (cfg & 0xF))))
1087 rc = NIX_AF_ERR_AQ_ENQUEUE;
1088 break;
1089 case NIX_AQ_CTYPE_MCE:
1090 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
1091
1092 /* Check if index exceeds MCE list length */
1093 if (!nix_hw->mcast.mce_ctx ||
1094 (req->qidx >= (256UL << (cfg & 0xF))))
1095 rc = NIX_AF_ERR_AQ_ENQUEUE;
1096
1097 /* Adding multicast lists for requests from PF/VFs is not
1098 * yet supported, so ignore this.
1099 */
1100 if (rsp)
1101 rc = NIX_AF_ERR_AQ_ENQUEUE;
1102 break;
1103 case NIX_AQ_CTYPE_BANDPROF:
1104 if (nix_verify_bandprof((struct nix_cn10k_aq_enq_req *)req,
1105 nix_hw, pcifunc))
1106 rc = NIX_AF_ERR_INVALID_BANDPROF;
1107 break;
1108 default:
1109 rc = NIX_AF_ERR_AQ_ENQUEUE;
1110 }
1111
1112 if (rc)
1113 return rc;
1114
1115 nix_get_aq_req_smq(rvu, req, &smq, &smq_mask);
1116 /* Check if SQ pointed SMQ belongs to this PF/VF or not */
1117 if (req->ctype == NIX_AQ_CTYPE_SQ &&
1118 ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
1119 (req->op == NIX_AQ_INSTOP_WRITE &&
1120 req->sq_mask.ena && req->sq.ena && smq_mask))) {
1121 if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
1122 pcifunc, smq))
1123 return NIX_AF_ERR_AQ_ENQUEUE;
1124 }
1125
1126 memset(&inst, 0, sizeof(struct nix_aq_inst_s));
1127 inst.lf = nixlf;
1128 inst.cindex = req->qidx;
1129 inst.ctype = req->ctype;
1130 inst.op = req->op;
1131 /* Currently we are not supporting enqueuing multiple instructions,
1132 * so always choose first entry in result memory.
1133 */
1134 inst.res_addr = (u64)aq->res->iova;
1135
1136 /* Hardware uses same aq->res->base for updating result of
1137 * previous instruction hence wait here till it is done.
1138 */
1139 spin_lock(&aq->lock);
1140
1141 /* Clean result + context memory */
1142 memset(aq->res->base, 0, aq->res->entry_sz);
1143 /* Context needs to be written at RES_ADDR + 128 */
1144 ctx = aq->res->base + 128;
1145 /* Mask needs to be written at RES_ADDR + 256 */
1146 mask = aq->res->base + 256;
1147
1148 switch (req->op) {
1149 case NIX_AQ_INSTOP_WRITE:
1150 if (req->ctype == NIX_AQ_CTYPE_RQ)
1151 memcpy(mask, &req->rq_mask,
1152 sizeof(struct nix_rq_ctx_s));
1153 else if (req->ctype == NIX_AQ_CTYPE_SQ)
1154 memcpy(mask, &req->sq_mask,
1155 sizeof(struct nix_sq_ctx_s));
1156 else if (req->ctype == NIX_AQ_CTYPE_CQ)
1157 memcpy(mask, &req->cq_mask,
1158 sizeof(struct nix_cq_ctx_s));
1159 else if (req->ctype == NIX_AQ_CTYPE_RSS)
1160 memcpy(mask, &req->rss_mask,
1161 sizeof(struct nix_rsse_s));
1162 else if (req->ctype == NIX_AQ_CTYPE_MCE)
1163 memcpy(mask, &req->mce_mask,
1164 sizeof(struct nix_rx_mce_s));
1165 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
1166 memcpy(mask, &req->prof_mask,
1167 sizeof(struct nix_bandprof_s));
1168 fallthrough;
1169 case NIX_AQ_INSTOP_INIT:
1170 if (req->ctype == NIX_AQ_CTYPE_RQ)
1171 memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
1172 else if (req->ctype == NIX_AQ_CTYPE_SQ)
1173 memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
1174 else if (req->ctype == NIX_AQ_CTYPE_CQ)
1175 memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
1176 else if (req->ctype == NIX_AQ_CTYPE_RSS)
1177 memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
1178 else if (req->ctype == NIX_AQ_CTYPE_MCE)
1179 memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
1180 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
1181 memcpy(ctx, &req->prof, sizeof(struct nix_bandprof_s));
1182 break;
1183 case NIX_AQ_INSTOP_NOP:
1184 case NIX_AQ_INSTOP_READ:
1185 case NIX_AQ_INSTOP_LOCK:
1186 case NIX_AQ_INSTOP_UNLOCK:
1187 break;
1188 default:
1189 rc = NIX_AF_ERR_AQ_ENQUEUE;
1190 spin_unlock(&aq->lock);
1191 return rc;
1192 }
1193
1194 /* Submit the instruction to AQ */
1195 rc = nix_aq_enqueue_wait(rvu, block, &inst);
1196 if (rc) {
1197 spin_unlock(&aq->lock);
1198 return rc;
1199 }
1200
1201 /* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
1202 if (req->op == NIX_AQ_INSTOP_INIT) {
1203 if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
1204 __set_bit(req->qidx, pfvf->rq_bmap);
1205 if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
1206 __set_bit(req->qidx, pfvf->sq_bmap);
1207 if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
1208 __set_bit(req->qidx, pfvf->cq_bmap);
1209 }
1210
1211 if (req->op == NIX_AQ_INSTOP_WRITE) {
1212 if (req->ctype == NIX_AQ_CTYPE_RQ) {
1213 ena = (req->rq.ena & req->rq_mask.ena) |
1214 (test_bit(req->qidx, pfvf->rq_bmap) &
1215 ~req->rq_mask.ena);
1216 if (ena)
1217 __set_bit(req->qidx, pfvf->rq_bmap);
1218 else
1219 __clear_bit(req->qidx, pfvf->rq_bmap);
1220 }
1221 if (req->ctype == NIX_AQ_CTYPE_SQ) {
1222 ena = (req->rq.ena & req->sq_mask.ena) |
1223 (test_bit(req->qidx, pfvf->sq_bmap) &
1224 ~req->sq_mask.ena);
1225 if (ena)
1226 __set_bit(req->qidx, pfvf->sq_bmap);
1227 else
1228 __clear_bit(req->qidx, pfvf->sq_bmap);
1229 }
1230 if (req->ctype == NIX_AQ_CTYPE_CQ) {
1231 ena = (req->rq.ena & req->cq_mask.ena) |
1232 (test_bit(req->qidx, pfvf->cq_bmap) &
1233 ~req->cq_mask.ena);
1234 if (ena)
1235 __set_bit(req->qidx, pfvf->cq_bmap);
1236 else
1237 __clear_bit(req->qidx, pfvf->cq_bmap);
1238 }
1239 }
1240
1241 if (rsp) {
1242 /* Copy read context into mailbox */
1243 if (req->op == NIX_AQ_INSTOP_READ) {
1244 if (req->ctype == NIX_AQ_CTYPE_RQ)
1245 memcpy(&rsp->rq, ctx,
1246 sizeof(struct nix_rq_ctx_s));
1247 else if (req->ctype == NIX_AQ_CTYPE_SQ)
1248 memcpy(&rsp->sq, ctx,
1249 sizeof(struct nix_sq_ctx_s));
1250 else if (req->ctype == NIX_AQ_CTYPE_CQ)
1251 memcpy(&rsp->cq, ctx,
1252 sizeof(struct nix_cq_ctx_s));
1253 else if (req->ctype == NIX_AQ_CTYPE_RSS)
1254 memcpy(&rsp->rss, ctx,
1255 sizeof(struct nix_rsse_s));
1256 else if (req->ctype == NIX_AQ_CTYPE_MCE)
1257 memcpy(&rsp->mce, ctx,
1258 sizeof(struct nix_rx_mce_s));
1259 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
1260 memcpy(&rsp->prof, ctx,
1261 sizeof(struct nix_bandprof_s));
1262 }
1263 }
1264
1265 spin_unlock(&aq->lock);
1266 return 0;
1267 }
1268
rvu_nix_verify_aq_ctx(struct rvu * rvu,struct nix_hw * nix_hw,struct nix_aq_enq_req * req,u8 ctype)1269 static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw,
1270 struct nix_aq_enq_req *req, u8 ctype)
1271 {
1272 struct nix_cn10k_aq_enq_req aq_req;
1273 struct nix_cn10k_aq_enq_rsp aq_rsp;
1274 int rc, word;
1275
1276 if (req->ctype != NIX_AQ_CTYPE_CQ)
1277 return 0;
1278
1279 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
1280 req->hdr.pcifunc, ctype, req->qidx);
1281 if (rc) {
1282 dev_err(rvu->dev,
1283 "%s: Failed to fetch %s%d context of PFFUNC 0x%x\n",
1284 __func__, nix_get_ctx_name(ctype), req->qidx,
1285 req->hdr.pcifunc);
1286 return rc;
1287 }
1288
1289 /* Make copy of original context & mask which are required
1290 * for resubmission
1291 */
1292 memcpy(&aq_req.cq_mask, &req->cq_mask, sizeof(struct nix_cq_ctx_s));
1293 memcpy(&aq_req.cq, &req->cq, sizeof(struct nix_cq_ctx_s));
1294
1295 /* exclude fields which HW can update */
1296 aq_req.cq_mask.cq_err = 0;
1297 aq_req.cq_mask.wrptr = 0;
1298 aq_req.cq_mask.tail = 0;
1299 aq_req.cq_mask.head = 0;
1300 aq_req.cq_mask.avg_level = 0;
1301 aq_req.cq_mask.update_time = 0;
1302 aq_req.cq_mask.substream = 0;
1303
1304 /* Context mask (cq_mask) holds mask value of fields which
1305 * are changed in AQ WRITE operation.
1306 * for example cq.drop = 0xa;
1307 * cq_mask.drop = 0xff;
1308 * Below logic performs '&' between cq and cq_mask so that non
1309 * updated fields are masked out for request and response
1310 * comparison
1311 */
1312 for (word = 0; word < sizeof(struct nix_cq_ctx_s) / sizeof(u64);
1313 word++) {
1314 *(u64 *)((u8 *)&aq_rsp.cq + word * 8) &=
1315 (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8));
1316 *(u64 *)((u8 *)&aq_req.cq + word * 8) &=
1317 (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8));
1318 }
1319
1320 if (memcmp(&aq_req.cq, &aq_rsp.cq, sizeof(struct nix_cq_ctx_s)))
1321 return NIX_AF_ERR_AQ_CTX_RETRY_WRITE;
1322
1323 return 0;
1324 }
1325
rvu_nix_aq_enq_inst(struct rvu * rvu,struct nix_aq_enq_req * req,struct nix_aq_enq_rsp * rsp)1326 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
1327 struct nix_aq_enq_rsp *rsp)
1328 {
1329 struct nix_hw *nix_hw;
1330 int err, retries = 5;
1331 int blkaddr;
1332
1333 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
1334 if (blkaddr < 0)
1335 return NIX_AF_ERR_AF_LF_INVALID;
1336
1337 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1338 if (!nix_hw)
1339 return NIX_AF_ERR_INVALID_NIXBLK;
1340
1341 retry:
1342 err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp);
1343
1344 /* HW errata 'AQ Modification to CQ could be discarded on heavy traffic'
1345 * As a work around perfrom CQ context read after each AQ write. If AQ
1346 * read shows AQ write is not updated perform AQ write again.
1347 */
1348 if (!err && req->op == NIX_AQ_INSTOP_WRITE) {
1349 err = rvu_nix_verify_aq_ctx(rvu, nix_hw, req, NIX_AQ_CTYPE_CQ);
1350 if (err == NIX_AF_ERR_AQ_CTX_RETRY_WRITE) {
1351 if (retries--)
1352 goto retry;
1353 else
1354 return NIX_AF_ERR_CQ_CTX_WRITE_ERR;
1355 }
1356 }
1357
1358 return err;
1359 }
1360
nix_get_ctx_name(int ctype)1361 static const char *nix_get_ctx_name(int ctype)
1362 {
1363 switch (ctype) {
1364 case NIX_AQ_CTYPE_CQ:
1365 return "CQ";
1366 case NIX_AQ_CTYPE_SQ:
1367 return "SQ";
1368 case NIX_AQ_CTYPE_RQ:
1369 return "RQ";
1370 case NIX_AQ_CTYPE_RSS:
1371 return "RSS";
1372 }
1373 return "";
1374 }
1375
nix_lf_hwctx_disable(struct rvu * rvu,struct hwctx_disable_req * req)1376 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
1377 {
1378 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
1379 struct nix_aq_enq_req aq_req;
1380 unsigned long *bmap;
1381 int qidx, q_cnt = 0;
1382 int err = 0, rc;
1383
1384 if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx)
1385 return NIX_AF_ERR_AQ_ENQUEUE;
1386
1387 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1388 aq_req.hdr.pcifunc = req->hdr.pcifunc;
1389
1390 if (req->ctype == NIX_AQ_CTYPE_CQ) {
1391 aq_req.cq.ena = 0;
1392 aq_req.cq_mask.ena = 1;
1393 aq_req.cq.bp_ena = 0;
1394 aq_req.cq_mask.bp_ena = 1;
1395 q_cnt = pfvf->cq_ctx->qsize;
1396 bmap = pfvf->cq_bmap;
1397 }
1398 if (req->ctype == NIX_AQ_CTYPE_SQ) {
1399 aq_req.sq.ena = 0;
1400 aq_req.sq_mask.ena = 1;
1401 q_cnt = pfvf->sq_ctx->qsize;
1402 bmap = pfvf->sq_bmap;
1403 }
1404 if (req->ctype == NIX_AQ_CTYPE_RQ) {
1405 aq_req.rq.ena = 0;
1406 aq_req.rq_mask.ena = 1;
1407 q_cnt = pfvf->rq_ctx->qsize;
1408 bmap = pfvf->rq_bmap;
1409 }
1410
1411 aq_req.ctype = req->ctype;
1412 aq_req.op = NIX_AQ_INSTOP_WRITE;
1413
1414 for (qidx = 0; qidx < q_cnt; qidx++) {
1415 if (!test_bit(qidx, bmap))
1416 continue;
1417 aq_req.qidx = qidx;
1418 rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
1419 if (rc) {
1420 err = rc;
1421 dev_err(rvu->dev, "Failed to disable %s:%d context\n",
1422 nix_get_ctx_name(req->ctype), qidx);
1423 }
1424 }
1425
1426 return err;
1427 }
1428
1429 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
nix_lf_hwctx_lockdown(struct rvu * rvu,struct nix_aq_enq_req * req)1430 static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req)
1431 {
1432 struct nix_aq_enq_req lock_ctx_req;
1433 int err;
1434
1435 if (req->op != NIX_AQ_INSTOP_INIT)
1436 return 0;
1437
1438 if (req->ctype == NIX_AQ_CTYPE_MCE ||
1439 req->ctype == NIX_AQ_CTYPE_DYNO)
1440 return 0;
1441
1442 memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req));
1443 lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
1444 lock_ctx_req.ctype = req->ctype;
1445 lock_ctx_req.op = NIX_AQ_INSTOP_LOCK;
1446 lock_ctx_req.qidx = req->qidx;
1447 err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL);
1448 if (err)
1449 dev_err(rvu->dev,
1450 "PFUNC 0x%x: Failed to lock NIX %s:%d context\n",
1451 req->hdr.pcifunc,
1452 nix_get_ctx_name(req->ctype), req->qidx);
1453 return err;
1454 }
1455
rvu_mbox_handler_nix_aq_enq(struct rvu * rvu,struct nix_aq_enq_req * req,struct nix_aq_enq_rsp * rsp)1456 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
1457 struct nix_aq_enq_req *req,
1458 struct nix_aq_enq_rsp *rsp)
1459 {
1460 int err;
1461
1462 err = rvu_nix_aq_enq_inst(rvu, req, rsp);
1463 if (!err)
1464 err = nix_lf_hwctx_lockdown(rvu, req);
1465 return err;
1466 }
1467 #else
1468
rvu_mbox_handler_nix_aq_enq(struct rvu * rvu,struct nix_aq_enq_req * req,struct nix_aq_enq_rsp * rsp)1469 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
1470 struct nix_aq_enq_req *req,
1471 struct nix_aq_enq_rsp *rsp)
1472 {
1473 return rvu_nix_aq_enq_inst(rvu, req, rsp);
1474 }
1475 #endif
1476 /* CN10K mbox handler */
rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu * rvu,struct nix_cn10k_aq_enq_req * req,struct nix_cn10k_aq_enq_rsp * rsp)1477 int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu,
1478 struct nix_cn10k_aq_enq_req *req,
1479 struct nix_cn10k_aq_enq_rsp *rsp)
1480 {
1481 return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req,
1482 (struct nix_aq_enq_rsp *)rsp);
1483 }
1484
rvu_mbox_handler_nix_hwctx_disable(struct rvu * rvu,struct hwctx_disable_req * req,struct msg_rsp * rsp)1485 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
1486 struct hwctx_disable_req *req,
1487 struct msg_rsp *rsp)
1488 {
1489 return nix_lf_hwctx_disable(rvu, req);
1490 }
1491
rvu_mbox_handler_nix_lf_alloc(struct rvu * rvu,struct nix_lf_alloc_req * req,struct nix_lf_alloc_rsp * rsp)1492 int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
1493 struct nix_lf_alloc_req *req,
1494 struct nix_lf_alloc_rsp *rsp)
1495 {
1496 int nixlf, qints, hwctx_size, intf, err, rc = 0;
1497 struct rvu_hwinfo *hw = rvu->hw;
1498 u16 pcifunc = req->hdr.pcifunc;
1499 struct rvu_block *block;
1500 struct rvu_pfvf *pfvf;
1501 u64 cfg, ctx_cfg;
1502 int blkaddr;
1503
1504 if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
1505 return NIX_AF_ERR_PARAM;
1506
1507 if (req->way_mask)
1508 req->way_mask &= 0xFFFF;
1509
1510 pfvf = rvu_get_pfvf(rvu, pcifunc);
1511 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1512 if (!pfvf->nixlf || blkaddr < 0)
1513 return NIX_AF_ERR_AF_LF_INVALID;
1514
1515 block = &hw->block[blkaddr];
1516 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1517 if (nixlf < 0)
1518 return NIX_AF_ERR_AF_LF_INVALID;
1519
1520 /* Check if requested 'NIXLF <=> NPALF' mapping is valid */
1521 if (req->npa_func) {
1522 /* If default, use 'this' NIXLF's PFFUNC */
1523 if (req->npa_func == RVU_DEFAULT_PF_FUNC)
1524 req->npa_func = pcifunc;
1525 if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA))
1526 return NIX_AF_INVAL_NPA_PF_FUNC;
1527 }
1528
1529 /* Check if requested 'NIXLF <=> SSOLF' mapping is valid */
1530 if (req->sso_func) {
1531 /* If default, use 'this' NIXLF's PFFUNC */
1532 if (req->sso_func == RVU_DEFAULT_PF_FUNC)
1533 req->sso_func = pcifunc;
1534 if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO))
1535 return NIX_AF_INVAL_SSO_PF_FUNC;
1536 }
1537
1538 /* If RSS is being enabled, check if requested config is valid.
1539 * RSS table size should be power of two, otherwise
1540 * RSS_GRP::OFFSET + adder might go beyond that group or
1541 * won't be able to use entire table.
1542 */
1543 if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
1544 !is_power_of_2(req->rss_sz)))
1545 return NIX_AF_ERR_RSS_SIZE_INVALID;
1546
1547 if (req->rss_sz &&
1548 (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
1549 return NIX_AF_ERR_RSS_GRPS_INVALID;
1550
1551 /* Reset this NIX LF */
1552 err = rvu_lf_reset(rvu, block, nixlf);
1553 if (err) {
1554 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1555 block->addr - BLKADDR_NIX0, nixlf);
1556 return NIX_AF_ERR_LF_RESET;
1557 }
1558
1559 ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
1560
1561 /* Alloc NIX RQ HW context memory and config the base */
1562 hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
1563 err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
1564 if (err)
1565 goto free_mem;
1566
1567 pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
1568 if (!pfvf->rq_bmap)
1569 goto free_mem;
1570
1571 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
1572 (u64)pfvf->rq_ctx->iova);
1573
1574 /* Set caching and queue count in HW */
1575 cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20;
1576 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
1577
1578 /* Alloc NIX SQ HW context memory and config the base */
1579 hwctx_size = 1UL << (ctx_cfg & 0xF);
1580 err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
1581 if (err)
1582 goto free_mem;
1583
1584 pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
1585 if (!pfvf->sq_bmap)
1586 goto free_mem;
1587
1588 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
1589 (u64)pfvf->sq_ctx->iova);
1590
1591 cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20;
1592 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
1593
1594 /* Alloc NIX CQ HW context memory and config the base */
1595 hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
1596 err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
1597 if (err)
1598 goto free_mem;
1599
1600 pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
1601 if (!pfvf->cq_bmap)
1602 goto free_mem;
1603
1604 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
1605 (u64)pfvf->cq_ctx->iova);
1606
1607 cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20;
1608 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
1609
1610 /* Initialize receive side scaling (RSS) */
1611 hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
1612 err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz,
1613 req->rss_grps, hwctx_size, req->way_mask,
1614 !!(req->flags & NIX_LF_RSS_TAG_LSB_AS_ADDER));
1615 if (err)
1616 goto free_mem;
1617
1618 /* Alloc memory for CQINT's HW contexts */
1619 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1620 qints = (cfg >> 24) & 0xFFF;
1621 hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
1622 err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
1623 if (err)
1624 goto free_mem;
1625
1626 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
1627 (u64)pfvf->cq_ints_ctx->iova);
1628
1629 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf),
1630 BIT_ULL(36) | req->way_mask << 20);
1631
1632 /* Alloc memory for QINT's HW contexts */
1633 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1634 qints = (cfg >> 12) & 0xFFF;
1635 hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
1636 err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
1637 if (err)
1638 goto free_mem;
1639
1640 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
1641 (u64)pfvf->nix_qints_ctx->iova);
1642 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf),
1643 BIT_ULL(36) | req->way_mask << 20);
1644
1645 /* Setup VLANX TPID's.
1646 * Use VLAN1 for 802.1Q
1647 * and VLAN0 for 802.1AD.
1648 */
1649 cfg = (0x8100ULL << 16) | 0x88A8ULL;
1650 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
1651
1652 /* Enable LMTST for this NIX LF */
1653 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
1654
1655 /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */
1656 if (req->npa_func)
1657 cfg = req->npa_func;
1658 if (req->sso_func)
1659 cfg |= (u64)req->sso_func << 16;
1660
1661 cfg |= (u64)req->xqe_sz << 33;
1662 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
1663
1664 /* Config Rx pkt length, csum checks and apad enable / disable */
1665 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
1666
1667 /* Configure pkind for TX parse config */
1668 cfg = NPC_TX_DEF_PKIND;
1669 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
1670
1671 if (is_rep_dev(rvu, pcifunc)) {
1672 pfvf->tx_chan_base = RVU_SWITCH_LBK_CHAN;
1673 pfvf->tx_chan_cnt = 1;
1674 goto exit;
1675 }
1676
1677 intf = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
1678 if (is_sdp_pfvf(rvu, pcifunc))
1679 intf = NIX_INTF_TYPE_SDP;
1680
1681 err = nix_interface_init(rvu, pcifunc, intf, nixlf, rsp,
1682 !!(req->flags & NIX_LF_LBK_BLK_SEL));
1683 if (err)
1684 goto free_mem;
1685
1686 /* Disable NPC entries as NIXLF's contexts are not initialized yet */
1687 rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
1688
1689 /* Configure RX VTAG Type 7 (strip) for vf vlan */
1690 rvu_write64(rvu, blkaddr,
1691 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7),
1692 VTAGSIZE_T4 | VTAG_STRIP);
1693
1694 goto exit;
1695
1696 free_mem:
1697 nix_ctx_free(rvu, pfvf);
1698 rc = -ENOMEM;
1699
1700 exit:
1701 /* Set macaddr of this PF/VF */
1702 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
1703
1704 /* set SQB size info */
1705 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
1706 rsp->sqb_size = (cfg >> 34) & 0xFFFF;
1707 rsp->rx_chan_base = pfvf->rx_chan_base;
1708 rsp->tx_chan_base = pfvf->tx_chan_base;
1709 rsp->rx_chan_cnt = pfvf->rx_chan_cnt;
1710 rsp->tx_chan_cnt = pfvf->tx_chan_cnt;
1711 rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
1712 rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
1713 /* Get HW supported stat count */
1714 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
1715 rsp->lf_rx_stats = ((cfg >> 32) & 0xFF);
1716 rsp->lf_tx_stats = ((cfg >> 24) & 0xFF);
1717 /* Get count of CQ IRQs and error IRQs supported per LF */
1718 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1719 rsp->qints = ((cfg >> 12) & 0xFFF);
1720 rsp->cints = ((cfg >> 24) & 0xFFF);
1721 rsp->cgx_links = hw->cgx_links;
1722 rsp->lbk_links = hw->lbk_links;
1723 rsp->sdp_links = hw->sdp_links;
1724
1725 return rc;
1726 }
1727
rvu_mbox_handler_nix_lf_free(struct rvu * rvu,struct nix_lf_free_req * req,struct msg_rsp * rsp)1728 int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req,
1729 struct msg_rsp *rsp)
1730 {
1731 struct rvu_hwinfo *hw = rvu->hw;
1732 u16 pcifunc = req->hdr.pcifunc;
1733 struct rvu_block *block;
1734 int blkaddr, nixlf, err;
1735 struct rvu_pfvf *pfvf;
1736
1737 pfvf = rvu_get_pfvf(rvu, pcifunc);
1738 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1739 if (!pfvf->nixlf || blkaddr < 0)
1740 return NIX_AF_ERR_AF_LF_INVALID;
1741
1742 block = &hw->block[blkaddr];
1743 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1744 if (nixlf < 0)
1745 return NIX_AF_ERR_AF_LF_INVALID;
1746
1747 if (is_rep_dev(rvu, pcifunc))
1748 goto free_lf;
1749
1750 if (req->flags & NIX_LF_DISABLE_FLOWS)
1751 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
1752 else
1753 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
1754
1755 /* Free any tx vtag def entries used by this NIX LF */
1756 if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG))
1757 nix_free_tx_vtag_entries(rvu, pcifunc);
1758
1759 nix_interface_deinit(rvu, pcifunc, nixlf);
1760
1761 free_lf:
1762 /* Reset this NIX LF */
1763 err = rvu_lf_reset(rvu, block, nixlf);
1764 if (err) {
1765 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1766 block->addr - BLKADDR_NIX0, nixlf);
1767 return NIX_AF_ERR_LF_RESET;
1768 }
1769
1770 nix_ctx_free(rvu, pfvf);
1771
1772 return 0;
1773 }
1774
rvu_mbox_handler_nix_mark_format_cfg(struct rvu * rvu,struct nix_mark_format_cfg * req,struct nix_mark_format_cfg_rsp * rsp)1775 int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
1776 struct nix_mark_format_cfg *req,
1777 struct nix_mark_format_cfg_rsp *rsp)
1778 {
1779 u16 pcifunc = req->hdr.pcifunc;
1780 struct nix_hw *nix_hw;
1781 struct rvu_pfvf *pfvf;
1782 int blkaddr, rc;
1783 u32 cfg;
1784
1785 pfvf = rvu_get_pfvf(rvu, pcifunc);
1786 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1787 if (!pfvf->nixlf || blkaddr < 0)
1788 return NIX_AF_ERR_AF_LF_INVALID;
1789
1790 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1791 if (!nix_hw)
1792 return NIX_AF_ERR_INVALID_NIXBLK;
1793
1794 cfg = (((u32)req->offset & 0x7) << 16) |
1795 (((u32)req->y_mask & 0xF) << 12) |
1796 (((u32)req->y_val & 0xF) << 8) |
1797 (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF);
1798
1799 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg);
1800 if (rc < 0) {
1801 dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)",
1802 rvu_get_pf(rvu->pdev, pcifunc),
1803 pcifunc & RVU_PFVF_FUNC_MASK);
1804 return NIX_AF_ERR_MARK_CFG_FAIL;
1805 }
1806
1807 rsp->mark_format_idx = rc;
1808 return 0;
1809 }
1810
1811 /* Handle shaper update specially for few revisions */
1812 static bool
handle_txschq_shaper_update(struct rvu * rvu,int blkaddr,int nixlf,int lvl,u64 reg,u64 regval)1813 handle_txschq_shaper_update(struct rvu *rvu, int blkaddr, int nixlf,
1814 int lvl, u64 reg, u64 regval)
1815 {
1816 u64 regbase, oldval, sw_xoff = 0;
1817 u64 dbgval, md_debug0 = 0;
1818 unsigned long poll_tmo;
1819 bool rate_reg = 0;
1820 u32 schq;
1821
1822 regbase = reg & 0xFFFF;
1823 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1824
1825 /* Check for rate register */
1826 switch (lvl) {
1827 case NIX_TXSCH_LVL_TL1:
1828 md_debug0 = NIX_AF_TL1X_MD_DEBUG0(schq);
1829 sw_xoff = NIX_AF_TL1X_SW_XOFF(schq);
1830
1831 rate_reg = !!(regbase == NIX_AF_TL1X_CIR(0));
1832 break;
1833 case NIX_TXSCH_LVL_TL2:
1834 md_debug0 = NIX_AF_TL2X_MD_DEBUG0(schq);
1835 sw_xoff = NIX_AF_TL2X_SW_XOFF(schq);
1836
1837 rate_reg = (regbase == NIX_AF_TL2X_CIR(0) ||
1838 regbase == NIX_AF_TL2X_PIR(0));
1839 break;
1840 case NIX_TXSCH_LVL_TL3:
1841 md_debug0 = NIX_AF_TL3X_MD_DEBUG0(schq);
1842 sw_xoff = NIX_AF_TL3X_SW_XOFF(schq);
1843
1844 rate_reg = (regbase == NIX_AF_TL3X_CIR(0) ||
1845 regbase == NIX_AF_TL3X_PIR(0));
1846 break;
1847 case NIX_TXSCH_LVL_TL4:
1848 md_debug0 = NIX_AF_TL4X_MD_DEBUG0(schq);
1849 sw_xoff = NIX_AF_TL4X_SW_XOFF(schq);
1850
1851 rate_reg = (regbase == NIX_AF_TL4X_CIR(0) ||
1852 regbase == NIX_AF_TL4X_PIR(0));
1853 break;
1854 case NIX_TXSCH_LVL_MDQ:
1855 sw_xoff = NIX_AF_MDQX_SW_XOFF(schq);
1856 rate_reg = (regbase == NIX_AF_MDQX_CIR(0) ||
1857 regbase == NIX_AF_MDQX_PIR(0));
1858 break;
1859 }
1860
1861 if (!rate_reg)
1862 return false;
1863
1864 /* Nothing special to do when state is not toggled */
1865 oldval = rvu_read64(rvu, blkaddr, reg);
1866 if ((oldval & 0x1) == (regval & 0x1)) {
1867 rvu_write64(rvu, blkaddr, reg, regval);
1868 return true;
1869 }
1870
1871 /* PIR/CIR disable */
1872 if (!(regval & 0x1)) {
1873 rvu_write64(rvu, blkaddr, sw_xoff, 1);
1874 rvu_write64(rvu, blkaddr, reg, 0);
1875 udelay(4);
1876 rvu_write64(rvu, blkaddr, sw_xoff, 0);
1877 return true;
1878 }
1879
1880 /* PIR/CIR enable */
1881 rvu_write64(rvu, blkaddr, sw_xoff, 1);
1882 if (md_debug0) {
1883 poll_tmo = jiffies + usecs_to_jiffies(10000);
1884 /* Wait until VLD(bit32) == 1 or C_CON(bit48) == 0 */
1885 do {
1886 if (time_after(jiffies, poll_tmo)) {
1887 dev_err(rvu->dev,
1888 "NIXLF%d: TLX%u(lvl %u) CIR/PIR enable failed\n",
1889 nixlf, schq, lvl);
1890 goto exit;
1891 }
1892 usleep_range(1, 5);
1893 dbgval = rvu_read64(rvu, blkaddr, md_debug0);
1894 } while (!(dbgval & BIT_ULL(32)) && (dbgval & BIT_ULL(48)));
1895 }
1896 rvu_write64(rvu, blkaddr, reg, regval);
1897 exit:
1898 rvu_write64(rvu, blkaddr, sw_xoff, 0);
1899 return true;
1900 }
1901
nix_reset_tx_schedule(struct rvu * rvu,int blkaddr,int lvl,int schq)1902 static void nix_reset_tx_schedule(struct rvu *rvu, int blkaddr,
1903 int lvl, int schq)
1904 {
1905 u64 tlx_parent = 0, tlx_schedule = 0;
1906
1907 switch (lvl) {
1908 case NIX_TXSCH_LVL_TL2:
1909 tlx_parent = NIX_AF_TL2X_PARENT(schq);
1910 tlx_schedule = NIX_AF_TL2X_SCHEDULE(schq);
1911 break;
1912 case NIX_TXSCH_LVL_TL3:
1913 tlx_parent = NIX_AF_TL3X_PARENT(schq);
1914 tlx_schedule = NIX_AF_TL3X_SCHEDULE(schq);
1915 break;
1916 case NIX_TXSCH_LVL_TL4:
1917 tlx_parent = NIX_AF_TL4X_PARENT(schq);
1918 tlx_schedule = NIX_AF_TL4X_SCHEDULE(schq);
1919 break;
1920 case NIX_TXSCH_LVL_MDQ:
1921 /* no need to reset SMQ_CFG as HW clears this CSR
1922 * on SMQ flush
1923 */
1924 tlx_parent = NIX_AF_MDQX_PARENT(schq);
1925 tlx_schedule = NIX_AF_MDQX_SCHEDULE(schq);
1926 break;
1927 default:
1928 return;
1929 }
1930
1931 if (tlx_parent)
1932 rvu_write64(rvu, blkaddr, tlx_parent, 0x0);
1933
1934 if (tlx_schedule)
1935 rvu_write64(rvu, blkaddr, tlx_schedule, 0x0);
1936 }
1937
1938 /* Disable shaping of pkts by a scheduler queue
1939 * at a given scheduler level.
1940 */
nix_reset_tx_shaping(struct rvu * rvu,int blkaddr,int nixlf,int lvl,int schq)1941 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
1942 int nixlf, int lvl, int schq)
1943 {
1944 struct rvu_hwinfo *hw = rvu->hw;
1945 u64 cir_reg = 0, pir_reg = 0;
1946 u64 cfg;
1947
1948 switch (lvl) {
1949 case NIX_TXSCH_LVL_TL1:
1950 cir_reg = NIX_AF_TL1X_CIR(schq);
1951 pir_reg = 0; /* PIR not available at TL1 */
1952 break;
1953 case NIX_TXSCH_LVL_TL2:
1954 cir_reg = NIX_AF_TL2X_CIR(schq);
1955 pir_reg = NIX_AF_TL2X_PIR(schq);
1956 break;
1957 case NIX_TXSCH_LVL_TL3:
1958 cir_reg = NIX_AF_TL3X_CIR(schq);
1959 pir_reg = NIX_AF_TL3X_PIR(schq);
1960 break;
1961 case NIX_TXSCH_LVL_TL4:
1962 cir_reg = NIX_AF_TL4X_CIR(schq);
1963 pir_reg = NIX_AF_TL4X_PIR(schq);
1964 break;
1965 case NIX_TXSCH_LVL_MDQ:
1966 cir_reg = NIX_AF_MDQX_CIR(schq);
1967 pir_reg = NIX_AF_MDQX_PIR(schq);
1968 break;
1969 }
1970
1971 /* Shaper state toggle needs wait/poll */
1972 if (hw->cap.nix_shaper_toggle_wait) {
1973 if (cir_reg)
1974 handle_txschq_shaper_update(rvu, blkaddr, nixlf,
1975 lvl, cir_reg, 0);
1976 if (pir_reg)
1977 handle_txschq_shaper_update(rvu, blkaddr, nixlf,
1978 lvl, pir_reg, 0);
1979 return;
1980 }
1981
1982 if (!cir_reg)
1983 return;
1984 cfg = rvu_read64(rvu, blkaddr, cir_reg);
1985 rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0));
1986
1987 if (!pir_reg)
1988 return;
1989 cfg = rvu_read64(rvu, blkaddr, pir_reg);
1990 rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0));
1991 }
1992
nix_reset_tx_linkcfg(struct rvu * rvu,int blkaddr,int lvl,int schq)1993 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
1994 int lvl, int schq)
1995 {
1996 struct rvu_hwinfo *hw = rvu->hw;
1997 int link_level;
1998 int link;
1999
2000 if (lvl >= hw->cap.nix_tx_aggr_lvl)
2001 return;
2002
2003 /* Reset TL4's SDP link config */
2004 if (lvl == NIX_TXSCH_LVL_TL4)
2005 rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
2006
2007 link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
2008 NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
2009 if (lvl != link_level)
2010 return;
2011
2012 /* Reset TL2's CGX or LBK link config */
2013 for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
2014 rvu_write64(rvu, blkaddr,
2015 NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
2016 }
2017
nix_clear_tx_xoff(struct rvu * rvu,int blkaddr,int lvl,int schq)2018 static void nix_clear_tx_xoff(struct rvu *rvu, int blkaddr,
2019 int lvl, int schq)
2020 {
2021 struct rvu_hwinfo *hw = rvu->hw;
2022 u64 reg;
2023
2024 /* Skip this if shaping is not supported */
2025 if (!hw->cap.nix_shaping)
2026 return;
2027
2028 /* Clear level specific SW_XOFF */
2029 switch (lvl) {
2030 case NIX_TXSCH_LVL_TL1:
2031 reg = NIX_AF_TL1X_SW_XOFF(schq);
2032 break;
2033 case NIX_TXSCH_LVL_TL2:
2034 reg = NIX_AF_TL2X_SW_XOFF(schq);
2035 break;
2036 case NIX_TXSCH_LVL_TL3:
2037 reg = NIX_AF_TL3X_SW_XOFF(schq);
2038 break;
2039 case NIX_TXSCH_LVL_TL4:
2040 reg = NIX_AF_TL4X_SW_XOFF(schq);
2041 break;
2042 case NIX_TXSCH_LVL_MDQ:
2043 reg = NIX_AF_MDQX_SW_XOFF(schq);
2044 break;
2045 default:
2046 return;
2047 }
2048
2049 rvu_write64(rvu, blkaddr, reg, 0x0);
2050 }
2051
nix_get_tx_link(struct rvu * rvu,u16 pcifunc)2052 static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
2053 {
2054 struct rvu_hwinfo *hw = rvu->hw;
2055 int pf = rvu_get_pf(rvu->pdev, pcifunc);
2056 u8 cgx_id = 0, lmac_id = 0;
2057
2058 if (is_lbk_vf(rvu, pcifunc)) {/* LBK links */
2059 return hw->cgx_links;
2060 } else if (is_pf_cgxmapped(rvu, pf)) {
2061 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
2062 return (cgx_id * hw->lmac_per_cgx) + lmac_id;
2063 }
2064
2065 /* SDP link */
2066 return hw->cgx_links + hw->lbk_links;
2067 }
2068
nix_get_txschq_range(struct rvu * rvu,u16 pcifunc,int link,int * start,int * end)2069 static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc,
2070 int link, int *start, int *end)
2071 {
2072 struct rvu_hwinfo *hw = rvu->hw;
2073 int pf = rvu_get_pf(rvu->pdev, pcifunc);
2074
2075 /* LBK links */
2076 if (is_lbk_vf(rvu, pcifunc) || is_rep_dev(rvu, pcifunc)) {
2077 *start = hw->cap.nix_txsch_per_cgx_lmac * link;
2078 *end = *start + hw->cap.nix_txsch_per_lbk_lmac;
2079 } else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */
2080 *start = hw->cap.nix_txsch_per_cgx_lmac * link;
2081 *end = *start + hw->cap.nix_txsch_per_cgx_lmac;
2082 } else { /* SDP link */
2083 *start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) +
2084 (hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links);
2085 *end = *start + hw->cap.nix_txsch_per_sdp_lmac;
2086 }
2087 }
2088
nix_check_txschq_alloc_req(struct rvu * rvu,int lvl,u16 pcifunc,struct nix_hw * nix_hw,struct nix_txsch_alloc_req * req)2089 static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc,
2090 struct nix_hw *nix_hw,
2091 struct nix_txsch_alloc_req *req)
2092 {
2093 struct rvu_hwinfo *hw = rvu->hw;
2094 int schq, req_schq, free_cnt;
2095 struct nix_txsch *txsch;
2096 int link, start, end;
2097
2098 txsch = &nix_hw->txsch[lvl];
2099 req_schq = req->schq_contig[lvl] + req->schq[lvl];
2100
2101 if (!req_schq)
2102 return 0;
2103
2104 link = nix_get_tx_link(rvu, pcifunc);
2105
2106 /* For traffic aggregating scheduler level, one queue is enough */
2107 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
2108 if (req_schq != 1)
2109 return NIX_AF_ERR_TLX_ALLOC_FAIL;
2110 return 0;
2111 }
2112
2113 /* Get free SCHQ count and check if request can be accomodated */
2114 if (hw->cap.nix_fixed_txschq_mapping) {
2115 nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
2116 schq = start + (pcifunc & RVU_PFVF_FUNC_MASK);
2117 if (end <= txsch->schq.max && schq < end &&
2118 !test_bit(schq, txsch->schq.bmap))
2119 free_cnt = 1;
2120 else
2121 free_cnt = 0;
2122 } else {
2123 free_cnt = rvu_rsrc_free_count(&txsch->schq);
2124 }
2125
2126 if (free_cnt < req_schq || req->schq[lvl] > MAX_TXSCHQ_PER_FUNC ||
2127 req->schq_contig[lvl] > MAX_TXSCHQ_PER_FUNC)
2128 return NIX_AF_ERR_TLX_ALLOC_FAIL;
2129
2130 /* If contiguous queues are needed, check for availability */
2131 if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] &&
2132 !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
2133 return NIX_AF_ERR_TLX_ALLOC_FAIL;
2134
2135 return 0;
2136 }
2137
nix_txsch_alloc(struct rvu * rvu,struct nix_txsch * txsch,struct nix_txsch_alloc_rsp * rsp,int lvl,int start,int end)2138 static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch,
2139 struct nix_txsch_alloc_rsp *rsp,
2140 int lvl, int start, int end)
2141 {
2142 struct rvu_hwinfo *hw = rvu->hw;
2143 u16 pcifunc = rsp->hdr.pcifunc;
2144 int idx, schq;
2145
2146 /* For traffic aggregating levels, queue alloc is based
2147 * on transmit link to which PF_FUNC is mapped to.
2148 */
2149 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
2150 /* A single TL queue is allocated */
2151 if (rsp->schq_contig[lvl]) {
2152 rsp->schq_contig[lvl] = 1;
2153 rsp->schq_contig_list[lvl][0] = start;
2154 }
2155
2156 /* Both contig and non-contig reqs doesn't make sense here */
2157 if (rsp->schq_contig[lvl])
2158 rsp->schq[lvl] = 0;
2159
2160 if (rsp->schq[lvl]) {
2161 rsp->schq[lvl] = 1;
2162 rsp->schq_list[lvl][0] = start;
2163 }
2164 return;
2165 }
2166
2167 /* Adjust the queue request count if HW supports
2168 * only one queue per level configuration.
2169 */
2170 if (hw->cap.nix_fixed_txschq_mapping) {
2171 idx = pcifunc & RVU_PFVF_FUNC_MASK;
2172 schq = start + idx;
2173 if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) {
2174 rsp->schq_contig[lvl] = 0;
2175 rsp->schq[lvl] = 0;
2176 return;
2177 }
2178
2179 if (rsp->schq_contig[lvl]) {
2180 rsp->schq_contig[lvl] = 1;
2181 set_bit(schq, txsch->schq.bmap);
2182 rsp->schq_contig_list[lvl][0] = schq;
2183 rsp->schq[lvl] = 0;
2184 } else if (rsp->schq[lvl]) {
2185 rsp->schq[lvl] = 1;
2186 set_bit(schq, txsch->schq.bmap);
2187 rsp->schq_list[lvl][0] = schq;
2188 }
2189 return;
2190 }
2191
2192 /* Allocate contiguous queue indices requesty first */
2193 if (rsp->schq_contig[lvl]) {
2194 schq = bitmap_find_next_zero_area(txsch->schq.bmap,
2195 txsch->schq.max, start,
2196 rsp->schq_contig[lvl], 0);
2197 if (schq >= end)
2198 rsp->schq_contig[lvl] = 0;
2199 for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) {
2200 set_bit(schq, txsch->schq.bmap);
2201 rsp->schq_contig_list[lvl][idx] = schq;
2202 schq++;
2203 }
2204 }
2205
2206 /* Allocate non-contiguous queue indices */
2207 if (rsp->schq[lvl]) {
2208 idx = 0;
2209 for (schq = start; schq < end; schq++) {
2210 if (!test_bit(schq, txsch->schq.bmap)) {
2211 set_bit(schq, txsch->schq.bmap);
2212 rsp->schq_list[lvl][idx++] = schq;
2213 }
2214 if (idx == rsp->schq[lvl])
2215 break;
2216 }
2217 /* Update how many were allocated */
2218 rsp->schq[lvl] = idx;
2219 }
2220 }
2221
rvu_mbox_handler_nix_txsch_alloc(struct rvu * rvu,struct nix_txsch_alloc_req * req,struct nix_txsch_alloc_rsp * rsp)2222 int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
2223 struct nix_txsch_alloc_req *req,
2224 struct nix_txsch_alloc_rsp *rsp)
2225 {
2226 struct rvu_hwinfo *hw = rvu->hw;
2227 u16 pcifunc = req->hdr.pcifunc;
2228 int link, blkaddr, rc = 0;
2229 int lvl, idx, start, end;
2230 struct nix_txsch *txsch;
2231 struct nix_hw *nix_hw;
2232 u32 *pfvf_map;
2233 int nixlf;
2234 u16 schq;
2235
2236 rc = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2237 if (rc)
2238 return rc;
2239
2240 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2241 if (!nix_hw)
2242 return NIX_AF_ERR_INVALID_NIXBLK;
2243
2244 mutex_lock(&rvu->rsrc_lock);
2245
2246 /* Check if request is valid as per HW capabilities
2247 * and can be accomodated.
2248 */
2249 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2250 rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req);
2251 if (rc)
2252 goto err;
2253 }
2254
2255 /* Allocate requested Tx scheduler queues */
2256 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2257 txsch = &nix_hw->txsch[lvl];
2258 pfvf_map = txsch->pfvf_map;
2259
2260 if (!req->schq[lvl] && !req->schq_contig[lvl])
2261 continue;
2262
2263 rsp->schq[lvl] = req->schq[lvl];
2264 rsp->schq_contig[lvl] = req->schq_contig[lvl];
2265
2266 link = nix_get_tx_link(rvu, pcifunc);
2267
2268 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
2269 start = link;
2270 end = link;
2271 } else if (hw->cap.nix_fixed_txschq_mapping) {
2272 nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
2273 } else {
2274 start = 0;
2275 end = txsch->schq.max;
2276 }
2277
2278 nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end);
2279
2280 /* Reset queue config */
2281 for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
2282 schq = rsp->schq_contig_list[lvl][idx];
2283 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
2284 NIX_TXSCHQ_CFG_DONE))
2285 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
2286 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
2287 nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
2288 nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
2289 }
2290
2291 for (idx = 0; idx < req->schq[lvl]; idx++) {
2292 schq = rsp->schq_list[lvl][idx];
2293 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
2294 NIX_TXSCHQ_CFG_DONE))
2295 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
2296 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
2297 nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
2298 nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
2299 }
2300 }
2301
2302 rsp->aggr_level = hw->cap.nix_tx_aggr_lvl;
2303 rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
2304 rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr,
2305 NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
2306 NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
2307 goto exit;
2308 err:
2309 rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
2310 exit:
2311 mutex_unlock(&rvu->rsrc_lock);
2312 return rc;
2313 }
2314
nix_smq_flush_fill_ctx(struct rvu * rvu,int blkaddr,int smq,struct nix_smq_flush_ctx * smq_flush_ctx)2315 static void nix_smq_flush_fill_ctx(struct rvu *rvu, int blkaddr, int smq,
2316 struct nix_smq_flush_ctx *smq_flush_ctx)
2317 {
2318 struct nix_smq_tree_ctx *smq_tree_ctx;
2319 u64 parent_off, regval;
2320 u16 schq;
2321 int lvl;
2322
2323 smq_flush_ctx->smq = smq;
2324
2325 schq = smq;
2326 for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) {
2327 smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl];
2328 smq_tree_ctx->schq = schq;
2329 if (lvl == NIX_TXSCH_LVL_TL1) {
2330 smq_tree_ctx->cir_off = NIX_AF_TL1X_CIR(schq);
2331 smq_tree_ctx->pir_off = 0;
2332 smq_tree_ctx->pir_val = 0;
2333 parent_off = 0;
2334 } else if (lvl == NIX_TXSCH_LVL_TL2) {
2335 smq_tree_ctx->cir_off = NIX_AF_TL2X_CIR(schq);
2336 smq_tree_ctx->pir_off = NIX_AF_TL2X_PIR(schq);
2337 parent_off = NIX_AF_TL2X_PARENT(schq);
2338 } else if (lvl == NIX_TXSCH_LVL_TL3) {
2339 smq_tree_ctx->cir_off = NIX_AF_TL3X_CIR(schq);
2340 smq_tree_ctx->pir_off = NIX_AF_TL3X_PIR(schq);
2341 parent_off = NIX_AF_TL3X_PARENT(schq);
2342 } else if (lvl == NIX_TXSCH_LVL_TL4) {
2343 smq_tree_ctx->cir_off = NIX_AF_TL4X_CIR(schq);
2344 smq_tree_ctx->pir_off = NIX_AF_TL4X_PIR(schq);
2345 parent_off = NIX_AF_TL4X_PARENT(schq);
2346 } else if (lvl == NIX_TXSCH_LVL_MDQ) {
2347 smq_tree_ctx->cir_off = NIX_AF_MDQX_CIR(schq);
2348 smq_tree_ctx->pir_off = NIX_AF_MDQX_PIR(schq);
2349 parent_off = NIX_AF_MDQX_PARENT(schq);
2350 }
2351 /* save cir/pir register values */
2352 smq_tree_ctx->cir_val = rvu_read64(rvu, blkaddr, smq_tree_ctx->cir_off);
2353 if (smq_tree_ctx->pir_off)
2354 smq_tree_ctx->pir_val = rvu_read64(rvu, blkaddr, smq_tree_ctx->pir_off);
2355
2356 /* get parent txsch node */
2357 if (parent_off) {
2358 regval = rvu_read64(rvu, blkaddr, parent_off);
2359 schq = (regval >> 16) & 0x1FF;
2360 }
2361 }
2362 }
2363
nix_smq_flush_enadis_xoff(struct rvu * rvu,int blkaddr,struct nix_smq_flush_ctx * smq_flush_ctx,bool enable)2364 static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr,
2365 struct nix_smq_flush_ctx *smq_flush_ctx, bool enable)
2366 {
2367 struct nix_txsch *txsch;
2368 struct nix_hw *nix_hw;
2369 int tl2, tl2_schq;
2370 u64 regoff;
2371
2372 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2373 if (!nix_hw)
2374 return;
2375
2376 /* loop through all TL2s with matching PF_FUNC */
2377 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
2378 tl2_schq = smq_flush_ctx->smq_tree_ctx[NIX_TXSCH_LVL_TL2].schq;
2379 for (tl2 = 0; tl2 < txsch->schq.max; tl2++) {
2380 /* skip the smq(flush) TL2 */
2381 if (tl2 == tl2_schq)
2382 continue;
2383 /* skip unused TL2s */
2384 if (TXSCH_MAP_FLAGS(txsch->pfvf_map[tl2]) & NIX_TXSCHQ_FREE)
2385 continue;
2386 /* skip if PF_FUNC doesn't match */
2387 if ((TXSCH_MAP_FUNC(txsch->pfvf_map[tl2]) & ~RVU_PFVF_FUNC_MASK) !=
2388 (TXSCH_MAP_FUNC(txsch->pfvf_map[tl2_schq] &
2389 ~RVU_PFVF_FUNC_MASK)))
2390 continue;
2391 /* enable/disable XOFF */
2392 regoff = NIX_AF_TL2X_SW_XOFF(tl2);
2393 if (enable)
2394 rvu_write64(rvu, blkaddr, regoff, 0x1);
2395 else
2396 rvu_write64(rvu, blkaddr, regoff, 0x0);
2397 }
2398 }
2399
nix_smq_flush_enadis_rate(struct rvu * rvu,int blkaddr,struct nix_smq_flush_ctx * smq_flush_ctx,bool enable)2400 static void nix_smq_flush_enadis_rate(struct rvu *rvu, int blkaddr,
2401 struct nix_smq_flush_ctx *smq_flush_ctx, bool enable)
2402 {
2403 u64 cir_off, pir_off, cir_val, pir_val;
2404 struct nix_smq_tree_ctx *smq_tree_ctx;
2405 int lvl;
2406
2407 for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) {
2408 smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl];
2409 cir_off = smq_tree_ctx->cir_off;
2410 cir_val = smq_tree_ctx->cir_val;
2411 pir_off = smq_tree_ctx->pir_off;
2412 pir_val = smq_tree_ctx->pir_val;
2413
2414 if (enable) {
2415 rvu_write64(rvu, blkaddr, cir_off, cir_val);
2416 if (lvl != NIX_TXSCH_LVL_TL1)
2417 rvu_write64(rvu, blkaddr, pir_off, pir_val);
2418 } else {
2419 rvu_write64(rvu, blkaddr, cir_off, 0x0);
2420 if (lvl != NIX_TXSCH_LVL_TL1)
2421 rvu_write64(rvu, blkaddr, pir_off, 0x0);
2422 }
2423 }
2424 }
2425
nix_smq_flush(struct rvu * rvu,int blkaddr,int smq,u16 pcifunc,int nixlf)2426 static int nix_smq_flush(struct rvu *rvu, int blkaddr,
2427 int smq, u16 pcifunc, int nixlf)
2428 {
2429 struct nix_smq_flush_ctx *smq_flush_ctx;
2430 int err, restore_tx_en = 0, i;
2431 int pf = rvu_get_pf(rvu->pdev, pcifunc);
2432 u8 cgx_id = 0, lmac_id = 0;
2433 u16 tl2_tl3_link_schq;
2434 u8 link, link_level;
2435 u64 cfg, bmap = 0;
2436
2437 if (!is_rvu_otx2(rvu)) {
2438 /* Skip SMQ flush if pkt count is zero */
2439 cfg = rvu_read64(rvu, blkaddr, NIX_AF_MDQX_IN_MD_COUNT(smq));
2440 if (!cfg)
2441 return 0;
2442 }
2443
2444 /* enable cgx tx if disabled */
2445 if (is_pf_cgxmapped(rvu, pf)) {
2446 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
2447 restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu),
2448 lmac_id, true);
2449 }
2450
2451 /* XOFF all TL2s whose parent TL1 matches SMQ tree TL1 */
2452 smq_flush_ctx = kzalloc(sizeof(*smq_flush_ctx), GFP_KERNEL);
2453 if (!smq_flush_ctx)
2454 return -ENOMEM;
2455 nix_smq_flush_fill_ctx(rvu, blkaddr, smq, smq_flush_ctx);
2456 nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, true);
2457 nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, false);
2458
2459 /* Disable backpressure from physical link,
2460 * otherwise SMQ flush may stall.
2461 */
2462 rvu_cgx_enadis_rx_bp(rvu, pf, false);
2463
2464 link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
2465 NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
2466 tl2_tl3_link_schq = smq_flush_ctx->smq_tree_ctx[link_level].schq;
2467 link = smq_flush_ctx->smq_tree_ctx[NIX_TXSCH_LVL_TL1].schq;
2468
2469 /* SMQ set enqueue xoff */
2470 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
2471 cfg |= BIT_ULL(50);
2472 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
2473
2474 /* Clear all NIX_AF_TL3_TL2_LINK_CFG[ENA] for the TL3/TL2 queue */
2475 for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) {
2476 cfg = rvu_read64(rvu, blkaddr,
2477 NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link));
2478 if (!(cfg & BIT_ULL(12)))
2479 continue;
2480 bmap |= BIT_ULL(i);
2481 cfg &= ~BIT_ULL(12);
2482 rvu_write64(rvu, blkaddr,
2483 NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg);
2484 }
2485
2486 /* Do SMQ flush and set enqueue xoff */
2487 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
2488 cfg |= BIT_ULL(50) | BIT_ULL(49);
2489 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
2490
2491 /* Wait for flush to complete */
2492 err = rvu_poll_reg(rvu, blkaddr,
2493 NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
2494 if (err)
2495 dev_info(rvu->dev,
2496 "NIXLF%d: SMQ%d flush failed, txlink might be busy\n",
2497 nixlf, smq);
2498
2499 /* Set NIX_AF_TL3_TL2_LINKX_CFG[ENA] for the TL3/TL2 queue */
2500 for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) {
2501 if (!(bmap & BIT_ULL(i)))
2502 continue;
2503 cfg = rvu_read64(rvu, blkaddr,
2504 NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link));
2505 cfg |= BIT_ULL(12);
2506 rvu_write64(rvu, blkaddr,
2507 NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg);
2508 }
2509
2510 /* clear XOFF on TL2s */
2511 nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, true);
2512 nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, false);
2513 kfree(smq_flush_ctx);
2514
2515 rvu_cgx_enadis_rx_bp(rvu, pf, true);
2516 /* restore cgx tx state */
2517 if (restore_tx_en)
2518 rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
2519 return err;
2520 }
2521
nix_txschq_free(struct rvu * rvu,u16 pcifunc)2522 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
2523 {
2524 int blkaddr, nixlf, lvl, schq, err;
2525 struct rvu_hwinfo *hw = rvu->hw;
2526 struct nix_txsch *txsch;
2527 struct nix_hw *nix_hw;
2528 u16 map_func;
2529
2530 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2531 if (blkaddr < 0)
2532 return NIX_AF_ERR_AF_LF_INVALID;
2533
2534 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2535 if (!nix_hw)
2536 return NIX_AF_ERR_INVALID_NIXBLK;
2537
2538 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
2539 if (nixlf < 0)
2540 return NIX_AF_ERR_AF_LF_INVALID;
2541
2542 /* Disable TL2/3 queue links and all XOFF's before SMQ flush*/
2543 mutex_lock(&rvu->rsrc_lock);
2544 for (lvl = NIX_TXSCH_LVL_MDQ; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2545 txsch = &nix_hw->txsch[lvl];
2546
2547 if (lvl >= hw->cap.nix_tx_aggr_lvl)
2548 continue;
2549
2550 for (schq = 0; schq < txsch->schq.max; schq++) {
2551 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2552 continue;
2553 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
2554 nix_clear_tx_xoff(rvu, blkaddr, lvl, schq);
2555 nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
2556 }
2557 }
2558 nix_clear_tx_xoff(rvu, blkaddr, NIX_TXSCH_LVL_TL1,
2559 nix_get_tx_link(rvu, pcifunc));
2560
2561 /* On PF cleanup, clear cfg done flag as
2562 * PF would have changed default config.
2563 */
2564 if (!(pcifunc & RVU_PFVF_FUNC_MASK)) {
2565 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1];
2566 schq = nix_get_tx_link(rvu, pcifunc);
2567 /* Do not clear pcifunc in txsch->pfvf_map[schq] because
2568 * VF might be using this TL1 queue
2569 */
2570 map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
2571 txsch->pfvf_map[schq] = TXSCH_SET_FLAG(map_func, 0x0);
2572 }
2573
2574 /* Flush SMQs */
2575 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
2576 for (schq = 0; schq < txsch->schq.max; schq++) {
2577 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2578 continue;
2579 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
2580 }
2581
2582 /* Now free scheduler queues to free pool */
2583 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2584 /* TLs above aggregation level are shared across all PF
2585 * and it's VFs, hence skip freeing them.
2586 */
2587 if (lvl >= hw->cap.nix_tx_aggr_lvl)
2588 continue;
2589
2590 txsch = &nix_hw->txsch[lvl];
2591 for (schq = 0; schq < txsch->schq.max; schq++) {
2592 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2593 continue;
2594 nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
2595 rvu_free_rsrc(&txsch->schq, schq);
2596 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
2597 }
2598 }
2599 mutex_unlock(&rvu->rsrc_lock);
2600
2601 err = rvu_ndc_sync(rvu, blkaddr, nixlf, NIX_AF_NDC_TX_SYNC);
2602 if (err)
2603 dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
2604
2605 return 0;
2606 }
2607
nix_txschq_free_one(struct rvu * rvu,struct nix_txsch_free_req * req)2608 static int nix_txschq_free_one(struct rvu *rvu,
2609 struct nix_txsch_free_req *req)
2610 {
2611 struct rvu_hwinfo *hw = rvu->hw;
2612 u16 pcifunc = req->hdr.pcifunc;
2613 int lvl, schq, nixlf, blkaddr;
2614 struct nix_txsch *txsch;
2615 struct nix_hw *nix_hw;
2616 u32 *pfvf_map;
2617 int rc;
2618
2619 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2620 if (blkaddr < 0)
2621 return NIX_AF_ERR_AF_LF_INVALID;
2622
2623 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2624 if (!nix_hw)
2625 return NIX_AF_ERR_INVALID_NIXBLK;
2626
2627 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
2628 if (nixlf < 0)
2629 return NIX_AF_ERR_AF_LF_INVALID;
2630
2631 lvl = req->schq_lvl;
2632 schq = req->schq;
2633 txsch = &nix_hw->txsch[lvl];
2634
2635 if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max)
2636 return 0;
2637
2638 pfvf_map = txsch->pfvf_map;
2639 mutex_lock(&rvu->rsrc_lock);
2640
2641 if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) {
2642 rc = NIX_AF_ERR_TLX_INVALID;
2643 goto err;
2644 }
2645
2646 /* Clear SW_XOFF of this resource only.
2647 * For SMQ level, all path XOFF's
2648 * need to be made clear by user
2649 */
2650 nix_clear_tx_xoff(rvu, blkaddr, lvl, schq);
2651
2652 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
2653 nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
2654
2655 /* Flush if it is a SMQ. Onus of disabling
2656 * TL2/3 queue links before SMQ flush is on user
2657 */
2658 if (lvl == NIX_TXSCH_LVL_SMQ &&
2659 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf)) {
2660 rc = NIX_AF_SMQ_FLUSH_FAILED;
2661 goto err;
2662 }
2663
2664 nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
2665
2666 /* Free the resource */
2667 rvu_free_rsrc(&txsch->schq, schq);
2668 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
2669 mutex_unlock(&rvu->rsrc_lock);
2670 return 0;
2671 err:
2672 mutex_unlock(&rvu->rsrc_lock);
2673 return rc;
2674 }
2675
rvu_mbox_handler_nix_txsch_free(struct rvu * rvu,struct nix_txsch_free_req * req,struct msg_rsp * rsp)2676 int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
2677 struct nix_txsch_free_req *req,
2678 struct msg_rsp *rsp)
2679 {
2680 if (req->flags & TXSCHQ_FREE_ALL)
2681 return nix_txschq_free(rvu, req->hdr.pcifunc);
2682 else
2683 return nix_txschq_free_one(rvu, req);
2684 }
2685
is_txschq_hierarchy_valid(struct rvu * rvu,u16 pcifunc,int blkaddr,int lvl,u64 reg,u64 regval)2686 static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
2687 int lvl, u64 reg, u64 regval)
2688 {
2689 u64 regbase = reg & 0xFFFF;
2690 u16 schq, parent;
2691
2692 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg))
2693 return false;
2694
2695 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2696 /* Check if this schq belongs to this PF/VF or not */
2697 if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq))
2698 return false;
2699
2700 parent = (regval >> 16) & 0x1FF;
2701 /* Validate MDQ's TL4 parent */
2702 if (regbase == NIX_AF_MDQX_PARENT(0) &&
2703 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent))
2704 return false;
2705
2706 /* Validate TL4's TL3 parent */
2707 if (regbase == NIX_AF_TL4X_PARENT(0) &&
2708 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent))
2709 return false;
2710
2711 /* Validate TL3's TL2 parent */
2712 if (regbase == NIX_AF_TL3X_PARENT(0) &&
2713 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent))
2714 return false;
2715
2716 /* Validate TL2's TL1 parent */
2717 if (regbase == NIX_AF_TL2X_PARENT(0) &&
2718 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent))
2719 return false;
2720
2721 return true;
2722 }
2723
is_txschq_shaping_valid(struct rvu_hwinfo * hw,int lvl,u64 reg)2724 static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg)
2725 {
2726 u64 regbase;
2727
2728 if (hw->cap.nix_shaping)
2729 return true;
2730
2731 /* If shaping and coloring is not supported, then
2732 * *_CIR and *_PIR registers should not be configured.
2733 */
2734 regbase = reg & 0xFFFF;
2735
2736 switch (lvl) {
2737 case NIX_TXSCH_LVL_TL1:
2738 if (regbase == NIX_AF_TL1X_CIR(0))
2739 return false;
2740 break;
2741 case NIX_TXSCH_LVL_TL2:
2742 if (regbase == NIX_AF_TL2X_CIR(0) ||
2743 regbase == NIX_AF_TL2X_PIR(0))
2744 return false;
2745 break;
2746 case NIX_TXSCH_LVL_TL3:
2747 if (regbase == NIX_AF_TL3X_CIR(0) ||
2748 regbase == NIX_AF_TL3X_PIR(0))
2749 return false;
2750 break;
2751 case NIX_TXSCH_LVL_TL4:
2752 if (regbase == NIX_AF_TL4X_CIR(0) ||
2753 regbase == NIX_AF_TL4X_PIR(0))
2754 return false;
2755 break;
2756 case NIX_TXSCH_LVL_MDQ:
2757 if (regbase == NIX_AF_MDQX_CIR(0) ||
2758 regbase == NIX_AF_MDQX_PIR(0))
2759 return false;
2760 break;
2761 }
2762 return true;
2763 }
2764
nix_tl1_default_cfg(struct rvu * rvu,struct nix_hw * nix_hw,u16 pcifunc,int blkaddr)2765 static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
2766 u16 pcifunc, int blkaddr)
2767 {
2768 u32 *pfvf_map;
2769 int schq;
2770
2771 schq = nix_get_tx_link(rvu, pcifunc);
2772 pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
2773 /* Skip if PF has already done the config */
2774 if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE)
2775 return;
2776 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq),
2777 (TXSCH_TL1_DFLT_RR_PRIO << 1));
2778
2779 /* On OcteonTx2 the config was in bytes and newer silcons
2780 * it's changed to weight.
2781 */
2782 if (!rvu->hw->cap.nix_common_dwrr_mtu)
2783 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
2784 TXSCH_TL1_DFLT_RR_QTM);
2785 else
2786 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
2787 CN10K_MAX_DWRR_WEIGHT);
2788
2789 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00);
2790 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE);
2791 }
2792
2793 /* Register offset - [15:0]
2794 * Scheduler Queue number - [25:16]
2795 */
2796 #define NIX_TX_SCHQ_MASK GENMASK_ULL(25, 0)
2797
nix_txschq_cfg_read(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr,struct nix_txschq_config * req,struct nix_txschq_config * rsp)2798 static int nix_txschq_cfg_read(struct rvu *rvu, struct nix_hw *nix_hw,
2799 int blkaddr, struct nix_txschq_config *req,
2800 struct nix_txschq_config *rsp)
2801 {
2802 u16 pcifunc = req->hdr.pcifunc;
2803 int idx, schq;
2804 u64 reg;
2805
2806 for (idx = 0; idx < req->num_regs; idx++) {
2807 reg = req->reg[idx];
2808 reg &= NIX_TX_SCHQ_MASK;
2809 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2810 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, req->lvl, reg) ||
2811 !is_valid_txschq(rvu, blkaddr, req->lvl, pcifunc, schq))
2812 return NIX_AF_INVAL_TXSCHQ_CFG;
2813 rsp->regval[idx] = rvu_read64(rvu, blkaddr, reg);
2814 }
2815 rsp->lvl = req->lvl;
2816 rsp->num_regs = req->num_regs;
2817 return 0;
2818 }
2819
rvu_nix_tx_tl2_cfg(struct rvu * rvu,int blkaddr,u16 pcifunc,struct nix_txsch * txsch,bool enable)2820 void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, u16 pcifunc,
2821 struct nix_txsch *txsch, bool enable)
2822 {
2823 struct rvu_hwinfo *hw = rvu->hw;
2824 int lbk_link_start, lbk_links;
2825 u8 pf = rvu_get_pf(rvu->pdev, pcifunc);
2826 int schq;
2827 u64 cfg;
2828
2829 if (!is_pf_cgxmapped(rvu, pf) && !is_rep_dev(rvu, pcifunc))
2830 return;
2831
2832 cfg = enable ? (BIT_ULL(12) | RVU_SWITCH_LBK_CHAN) : 0;
2833 lbk_link_start = hw->cgx_links;
2834
2835 for (schq = 0; schq < txsch->schq.max; schq++) {
2836 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2837 continue;
2838 /* Enable all LBK links with channel 63 by default so that
2839 * packets can be sent to LBK with a NPC TX MCAM rule
2840 */
2841 lbk_links = hw->lbk_links;
2842 while (lbk_links--)
2843 rvu_write64(rvu, blkaddr,
2844 NIX_AF_TL3_TL2X_LINKX_CFG(schq,
2845 lbk_link_start +
2846 lbk_links), cfg);
2847 }
2848 }
2849
rvu_mbox_handler_nix_txschq_cfg(struct rvu * rvu,struct nix_txschq_config * req,struct nix_txschq_config * rsp)2850 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
2851 struct nix_txschq_config *req,
2852 struct nix_txschq_config *rsp)
2853 {
2854 u64 reg, val, regval, schq_regbase, val_mask;
2855 struct rvu_hwinfo *hw = rvu->hw;
2856 u16 pcifunc = req->hdr.pcifunc;
2857 struct nix_txsch *txsch;
2858 struct nix_hw *nix_hw;
2859 int blkaddr, idx, err;
2860 int nixlf, schq;
2861 u32 *pfvf_map;
2862
2863 if (req->lvl >= NIX_TXSCH_LVL_CNT ||
2864 req->num_regs > MAX_REGS_PER_MBOX_MSG)
2865 return NIX_AF_INVAL_TXSCHQ_CFG;
2866
2867 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2868 if (err)
2869 return err;
2870
2871 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2872 if (!nix_hw)
2873 return NIX_AF_ERR_INVALID_NIXBLK;
2874
2875 if (req->read)
2876 return nix_txschq_cfg_read(rvu, nix_hw, blkaddr, req, rsp);
2877
2878 txsch = &nix_hw->txsch[req->lvl];
2879 pfvf_map = txsch->pfvf_map;
2880
2881 if (req->lvl >= hw->cap.nix_tx_aggr_lvl &&
2882 pcifunc & RVU_PFVF_FUNC_MASK) {
2883 mutex_lock(&rvu->rsrc_lock);
2884 if (req->lvl == NIX_TXSCH_LVL_TL1)
2885 nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr);
2886 mutex_unlock(&rvu->rsrc_lock);
2887 return 0;
2888 }
2889
2890 for (idx = 0; idx < req->num_regs; idx++) {
2891 reg = req->reg[idx];
2892 reg &= NIX_TX_SCHQ_MASK;
2893 regval = req->regval[idx];
2894 schq_regbase = reg & 0xFFFF;
2895 val_mask = req->regval_mask[idx];
2896
2897 if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr,
2898 txsch->lvl, reg, regval))
2899 return NIX_AF_INVAL_TXSCHQ_CFG;
2900
2901 /* Check if shaping and coloring is supported */
2902 if (!is_txschq_shaping_valid(hw, req->lvl, reg))
2903 continue;
2904
2905 val = rvu_read64(rvu, blkaddr, reg);
2906 regval = (val & val_mask) | (regval & ~val_mask);
2907
2908 /* Handle shaping state toggle specially */
2909 if (hw->cap.nix_shaper_toggle_wait &&
2910 handle_txschq_shaper_update(rvu, blkaddr, nixlf,
2911 req->lvl, reg, regval))
2912 continue;
2913
2914 /* Replace PF/VF visible NIXLF slot with HW NIXLF id */
2915 if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
2916 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
2917 pcifunc, 0);
2918 regval &= ~(0x7FULL << 24);
2919 regval |= ((u64)nixlf << 24);
2920 }
2921
2922 /* Clear 'BP_ENA' config, if it's not allowed */
2923 if (!hw->cap.nix_tx_link_bp) {
2924 if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) ||
2925 (schq_regbase & 0xFF00) ==
2926 NIX_AF_TL3_TL2X_LINKX_CFG(0, 0))
2927 regval &= ~BIT_ULL(13);
2928 }
2929
2930 /* Mark config as done for TL1 by PF */
2931 if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) &&
2932 schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) {
2933 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2934 mutex_lock(&rvu->rsrc_lock);
2935 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq],
2936 NIX_TXSCHQ_CFG_DONE);
2937 mutex_unlock(&rvu->rsrc_lock);
2938 }
2939
2940 /* SMQ flush is special hence split register writes such
2941 * that flush first and write rest of the bits later.
2942 */
2943 if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
2944 (regval & BIT_ULL(49))) {
2945 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2946 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
2947 regval &= ~BIT_ULL(49);
2948 }
2949 rvu_write64(rvu, blkaddr, reg, regval);
2950 }
2951
2952 return 0;
2953 }
2954
nix_rx_vtag_cfg(struct rvu * rvu,int nixlf,int blkaddr,struct nix_vtag_config * req)2955 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
2956 struct nix_vtag_config *req)
2957 {
2958 u64 regval = req->vtag_size;
2959
2960 if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 ||
2961 req->vtag_size > VTAGSIZE_T8)
2962 return -EINVAL;
2963
2964 /* RX VTAG Type 7 reserved for vf vlan */
2965 if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7)
2966 return NIX_AF_ERR_RX_VTAG_INUSE;
2967
2968 if (req->rx.capture_vtag)
2969 regval |= BIT_ULL(5);
2970 if (req->rx.strip_vtag)
2971 regval |= BIT_ULL(4);
2972
2973 rvu_write64(rvu, blkaddr,
2974 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
2975 return 0;
2976 }
2977
nix_tx_vtag_free(struct rvu * rvu,int blkaddr,u16 pcifunc,int index)2978 static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr,
2979 u16 pcifunc, int index)
2980 {
2981 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2982 struct nix_txvlan *vlan;
2983
2984 if (!nix_hw)
2985 return NIX_AF_ERR_INVALID_NIXBLK;
2986
2987 vlan = &nix_hw->txvlan;
2988 if (vlan->entry2pfvf_map[index] != pcifunc)
2989 return NIX_AF_ERR_PARAM;
2990
2991 rvu_write64(rvu, blkaddr,
2992 NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull);
2993 rvu_write64(rvu, blkaddr,
2994 NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull);
2995
2996 vlan->entry2pfvf_map[index] = 0;
2997 rvu_free_rsrc(&vlan->rsrc, index);
2998
2999 return 0;
3000 }
3001
nix_free_tx_vtag_entries(struct rvu * rvu,u16 pcifunc)3002 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc)
3003 {
3004 struct nix_txvlan *vlan;
3005 struct nix_hw *nix_hw;
3006 int index, blkaddr;
3007
3008 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3009 if (blkaddr < 0)
3010 return;
3011
3012 nix_hw = get_nix_hw(rvu->hw, blkaddr);
3013 if (!nix_hw)
3014 return;
3015
3016 vlan = &nix_hw->txvlan;
3017
3018 mutex_lock(&vlan->rsrc_lock);
3019 /* Scan all the entries and free the ones mapped to 'pcifunc' */
3020 for (index = 0; index < vlan->rsrc.max; index++) {
3021 if (vlan->entry2pfvf_map[index] == pcifunc)
3022 nix_tx_vtag_free(rvu, blkaddr, pcifunc, index);
3023 }
3024 mutex_unlock(&vlan->rsrc_lock);
3025 }
3026
nix_tx_vtag_alloc(struct rvu * rvu,int blkaddr,u64 vtag,u8 size)3027 static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr,
3028 u64 vtag, u8 size)
3029 {
3030 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
3031 struct nix_txvlan *vlan;
3032 u64 regval;
3033 int index;
3034
3035 if (!nix_hw)
3036 return NIX_AF_ERR_INVALID_NIXBLK;
3037
3038 vlan = &nix_hw->txvlan;
3039
3040 mutex_lock(&vlan->rsrc_lock);
3041
3042 index = rvu_alloc_rsrc(&vlan->rsrc);
3043 if (index < 0) {
3044 mutex_unlock(&vlan->rsrc_lock);
3045 return index;
3046 }
3047
3048 mutex_unlock(&vlan->rsrc_lock);
3049
3050 regval = size ? vtag : vtag << 32;
3051
3052 rvu_write64(rvu, blkaddr,
3053 NIX_AF_TX_VTAG_DEFX_DATA(index), regval);
3054 rvu_write64(rvu, blkaddr,
3055 NIX_AF_TX_VTAG_DEFX_CTL(index), size);
3056
3057 return index;
3058 }
3059
nix_tx_vtag_decfg(struct rvu * rvu,int blkaddr,struct nix_vtag_config * req)3060 static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr,
3061 struct nix_vtag_config *req)
3062 {
3063 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
3064 u16 pcifunc = req->hdr.pcifunc;
3065 int idx0 = req->tx.vtag0_idx;
3066 int idx1 = req->tx.vtag1_idx;
3067 struct nix_txvlan *vlan;
3068 int err = 0;
3069
3070 if (!nix_hw)
3071 return NIX_AF_ERR_INVALID_NIXBLK;
3072
3073 vlan = &nix_hw->txvlan;
3074 if (req->tx.free_vtag0 && req->tx.free_vtag1)
3075 if (vlan->entry2pfvf_map[idx0] != pcifunc ||
3076 vlan->entry2pfvf_map[idx1] != pcifunc)
3077 return NIX_AF_ERR_PARAM;
3078
3079 mutex_lock(&vlan->rsrc_lock);
3080
3081 if (req->tx.free_vtag0) {
3082 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx0);
3083 if (err)
3084 goto exit;
3085 }
3086
3087 if (req->tx.free_vtag1)
3088 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx1);
3089
3090 exit:
3091 mutex_unlock(&vlan->rsrc_lock);
3092 return err;
3093 }
3094
nix_tx_vtag_cfg(struct rvu * rvu,int blkaddr,struct nix_vtag_config * req,struct nix_vtag_config_rsp * rsp)3095 static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr,
3096 struct nix_vtag_config *req,
3097 struct nix_vtag_config_rsp *rsp)
3098 {
3099 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
3100 struct nix_txvlan *vlan;
3101 u16 pcifunc = req->hdr.pcifunc;
3102
3103 if (!nix_hw)
3104 return NIX_AF_ERR_INVALID_NIXBLK;
3105
3106 vlan = &nix_hw->txvlan;
3107 if (req->tx.cfg_vtag0) {
3108 rsp->vtag0_idx =
3109 nix_tx_vtag_alloc(rvu, blkaddr,
3110 req->tx.vtag0, req->vtag_size);
3111
3112 if (rsp->vtag0_idx < 0)
3113 return NIX_AF_ERR_TX_VTAG_NOSPC;
3114
3115 vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc;
3116 }
3117
3118 if (req->tx.cfg_vtag1) {
3119 rsp->vtag1_idx =
3120 nix_tx_vtag_alloc(rvu, blkaddr,
3121 req->tx.vtag1, req->vtag_size);
3122
3123 if (rsp->vtag1_idx < 0)
3124 goto err_free;
3125
3126 vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc;
3127 }
3128
3129 return 0;
3130
3131 err_free:
3132 if (req->tx.cfg_vtag0)
3133 nix_tx_vtag_free(rvu, blkaddr, pcifunc, rsp->vtag0_idx);
3134
3135 return NIX_AF_ERR_TX_VTAG_NOSPC;
3136 }
3137
rvu_mbox_handler_nix_vtag_cfg(struct rvu * rvu,struct nix_vtag_config * req,struct nix_vtag_config_rsp * rsp)3138 int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
3139 struct nix_vtag_config *req,
3140 struct nix_vtag_config_rsp *rsp)
3141 {
3142 u16 pcifunc = req->hdr.pcifunc;
3143 int blkaddr, nixlf, err;
3144
3145 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
3146 if (err)
3147 return err;
3148
3149 if (req->cfg_type) {
3150 /* rx vtag configuration */
3151 err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
3152 if (err)
3153 return NIX_AF_ERR_PARAM;
3154 } else {
3155 /* tx vtag configuration */
3156 if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) &&
3157 (req->tx.free_vtag0 || req->tx.free_vtag1))
3158 return NIX_AF_ERR_PARAM;
3159
3160 if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1)
3161 return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp);
3162
3163 if (req->tx.free_vtag0 || req->tx.free_vtag1)
3164 return nix_tx_vtag_decfg(rvu, blkaddr, req);
3165 }
3166
3167 return 0;
3168 }
3169
nix_blk_setup_mce(struct rvu * rvu,struct nix_hw * nix_hw,int mce,u8 op,u16 pcifunc,int next,int index,u8 mce_op,bool eol)3170 static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
3171 int mce, u8 op, u16 pcifunc, int next,
3172 int index, u8 mce_op, bool eol)
3173 {
3174 struct nix_aq_enq_req aq_req;
3175 int err;
3176
3177 aq_req.hdr.pcifunc = 0;
3178 aq_req.ctype = NIX_AQ_CTYPE_MCE;
3179 aq_req.op = op;
3180 aq_req.qidx = mce;
3181
3182 /* Use RSS with RSS index 0 */
3183 aq_req.mce.op = mce_op;
3184 aq_req.mce.index = index;
3185 aq_req.mce.eol = eol;
3186 aq_req.mce.pf_func = pcifunc;
3187 aq_req.mce.next = next;
3188
3189 /* All fields valid */
3190 *(u64 *)(&aq_req.mce_mask) = ~0ULL;
3191
3192 err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL);
3193 if (err) {
3194 dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
3195 rvu_get_pf(rvu->pdev, pcifunc),
3196 pcifunc & RVU_PFVF_FUNC_MASK);
3197 return err;
3198 }
3199 return 0;
3200 }
3201
nix_delete_mcast_mce_list(struct nix_mce_list * mce_list)3202 static void nix_delete_mcast_mce_list(struct nix_mce_list *mce_list)
3203 {
3204 struct hlist_node *tmp;
3205 struct mce *mce;
3206
3207 /* Scan through the current list */
3208 hlist_for_each_entry_safe(mce, tmp, &mce_list->head, node) {
3209 hlist_del(&mce->node);
3210 kfree(mce);
3211 }
3212
3213 mce_list->count = 0;
3214 mce_list->max = 0;
3215 }
3216
nix_get_last_mce_list_index(struct nix_mcast_grp_elem * elem)3217 static int nix_get_last_mce_list_index(struct nix_mcast_grp_elem *elem)
3218 {
3219 return elem->mce_start_index + elem->mcast_mce_list.count - 1;
3220 }
3221
nix_update_ingress_mce_list_hw(struct rvu * rvu,struct nix_hw * nix_hw,struct nix_mcast_grp_elem * elem)3222 static int nix_update_ingress_mce_list_hw(struct rvu *rvu,
3223 struct nix_hw *nix_hw,
3224 struct nix_mcast_grp_elem *elem)
3225 {
3226 int idx, last_idx, next_idx, err;
3227 struct nix_mce_list *mce_list;
3228 struct mce *mce, *prev_mce;
3229
3230 mce_list = &elem->mcast_mce_list;
3231 idx = elem->mce_start_index;
3232 last_idx = nix_get_last_mce_list_index(elem);
3233 hlist_for_each_entry(mce, &mce_list->head, node) {
3234 if (idx > last_idx)
3235 break;
3236
3237 if (!mce->is_active) {
3238 if (idx == elem->mce_start_index) {
3239 idx++;
3240 prev_mce = mce;
3241 elem->mce_start_index = idx;
3242 continue;
3243 } else if (idx == last_idx) {
3244 err = nix_blk_setup_mce(rvu, nix_hw, idx - 1, NIX_AQ_INSTOP_WRITE,
3245 prev_mce->pcifunc, next_idx,
3246 prev_mce->rq_rss_index,
3247 prev_mce->dest_type,
3248 false);
3249 if (err)
3250 return err;
3251
3252 break;
3253 }
3254 }
3255
3256 next_idx = idx + 1;
3257 /* EOL should be set in last MCE */
3258 err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE,
3259 mce->pcifunc, next_idx,
3260 mce->rq_rss_index, mce->dest_type,
3261 (next_idx > last_idx) ? true : false);
3262 if (err)
3263 return err;
3264
3265 idx++;
3266 prev_mce = mce;
3267 }
3268
3269 return 0;
3270 }
3271
nix_update_egress_mce_list_hw(struct rvu * rvu,struct nix_hw * nix_hw,struct nix_mcast_grp_elem * elem)3272 static void nix_update_egress_mce_list_hw(struct rvu *rvu,
3273 struct nix_hw *nix_hw,
3274 struct nix_mcast_grp_elem *elem)
3275 {
3276 struct nix_mce_list *mce_list;
3277 int idx, last_idx, next_idx;
3278 struct mce *mce, *prev_mce;
3279 u64 regval;
3280 u8 eol;
3281
3282 mce_list = &elem->mcast_mce_list;
3283 idx = elem->mce_start_index;
3284 last_idx = nix_get_last_mce_list_index(elem);
3285 hlist_for_each_entry(mce, &mce_list->head, node) {
3286 if (idx > last_idx)
3287 break;
3288
3289 if (!mce->is_active) {
3290 if (idx == elem->mce_start_index) {
3291 idx++;
3292 prev_mce = mce;
3293 elem->mce_start_index = idx;
3294 continue;
3295 } else if (idx == last_idx) {
3296 regval = (next_idx << 16) | (1 << 12) | prev_mce->channel;
3297 rvu_write64(rvu, nix_hw->blkaddr,
3298 NIX_AF_TX_MCASTX(idx - 1),
3299 regval);
3300 break;
3301 }
3302 }
3303
3304 eol = 0;
3305 next_idx = idx + 1;
3306 /* EOL should be set in last MCE */
3307 if (next_idx > last_idx)
3308 eol = 1;
3309
3310 regval = (next_idx << 16) | (eol << 12) | mce->channel;
3311 rvu_write64(rvu, nix_hw->blkaddr,
3312 NIX_AF_TX_MCASTX(idx),
3313 regval);
3314 idx++;
3315 prev_mce = mce;
3316 }
3317 }
3318
nix_del_mce_list_entry(struct rvu * rvu,struct nix_hw * nix_hw,struct nix_mcast_grp_elem * elem,struct nix_mcast_grp_update_req * req)3319 static int nix_del_mce_list_entry(struct rvu *rvu,
3320 struct nix_hw *nix_hw,
3321 struct nix_mcast_grp_elem *elem,
3322 struct nix_mcast_grp_update_req *req)
3323 {
3324 u32 num_entry = req->num_mce_entry;
3325 struct nix_mce_list *mce_list;
3326 struct mce *mce;
3327 bool is_found;
3328 int i;
3329
3330 mce_list = &elem->mcast_mce_list;
3331 for (i = 0; i < num_entry; i++) {
3332 is_found = false;
3333 hlist_for_each_entry(mce, &mce_list->head, node) {
3334 /* If already exists, then delete */
3335 if (mce->pcifunc == req->pcifunc[i]) {
3336 hlist_del(&mce->node);
3337 kfree(mce);
3338 mce_list->count--;
3339 is_found = true;
3340 break;
3341 }
3342 }
3343
3344 if (!is_found)
3345 return NIX_AF_ERR_INVALID_MCAST_DEL_REQ;
3346 }
3347
3348 mce_list->max = mce_list->count;
3349 /* Dump the updated list to HW */
3350 if (elem->dir == NIX_MCAST_INGRESS)
3351 return nix_update_ingress_mce_list_hw(rvu, nix_hw, elem);
3352
3353 nix_update_egress_mce_list_hw(rvu, nix_hw, elem);
3354 return 0;
3355 }
3356
nix_add_mce_list_entry(struct rvu * rvu,struct nix_hw * nix_hw,struct nix_mcast_grp_elem * elem,struct nix_mcast_grp_update_req * req)3357 static int nix_add_mce_list_entry(struct rvu *rvu,
3358 struct nix_hw *nix_hw,
3359 struct nix_mcast_grp_elem *elem,
3360 struct nix_mcast_grp_update_req *req)
3361 {
3362 u32 num_entry = req->num_mce_entry;
3363 struct nix_mce_list *mce_list;
3364 struct hlist_node *tmp;
3365 struct mce *mce;
3366 int i;
3367
3368 mce_list = &elem->mcast_mce_list;
3369 for (i = 0; i < num_entry; i++) {
3370 mce = kzalloc(sizeof(*mce), GFP_KERNEL);
3371 if (!mce)
3372 goto free_mce;
3373
3374 mce->pcifunc = req->pcifunc[i];
3375 mce->channel = req->channel[i];
3376 mce->rq_rss_index = req->rq_rss_index[i];
3377 mce->dest_type = req->dest_type[i];
3378 mce->is_active = 1;
3379 hlist_add_head(&mce->node, &mce_list->head);
3380 mce_list->count++;
3381 }
3382
3383 mce_list->max += num_entry;
3384
3385 /* Dump the updated list to HW */
3386 if (elem->dir == NIX_MCAST_INGRESS)
3387 return nix_update_ingress_mce_list_hw(rvu, nix_hw, elem);
3388
3389 nix_update_egress_mce_list_hw(rvu, nix_hw, elem);
3390 return 0;
3391
3392 free_mce:
3393 hlist_for_each_entry_safe(mce, tmp, &mce_list->head, node) {
3394 hlist_del(&mce->node);
3395 kfree(mce);
3396 mce_list->count--;
3397 }
3398
3399 return -ENOMEM;
3400 }
3401
nix_update_mce_list_entry(struct nix_mce_list * mce_list,u16 pcifunc,bool add)3402 static int nix_update_mce_list_entry(struct nix_mce_list *mce_list,
3403 u16 pcifunc, bool add)
3404 {
3405 struct mce *mce, *tail = NULL;
3406 bool delete = false;
3407
3408 /* Scan through the current list */
3409 hlist_for_each_entry(mce, &mce_list->head, node) {
3410 /* If already exists, then delete */
3411 if (mce->pcifunc == pcifunc && !add) {
3412 delete = true;
3413 break;
3414 } else if (mce->pcifunc == pcifunc && add) {
3415 /* entry already exists */
3416 return 0;
3417 }
3418 tail = mce;
3419 }
3420
3421 if (delete) {
3422 hlist_del(&mce->node);
3423 kfree(mce);
3424 mce_list->count--;
3425 return 0;
3426 }
3427
3428 if (!add)
3429 return 0;
3430
3431 /* Add a new one to the list, at the tail */
3432 mce = kzalloc(sizeof(*mce), GFP_KERNEL);
3433 if (!mce)
3434 return -ENOMEM;
3435 mce->pcifunc = pcifunc;
3436 if (!tail)
3437 hlist_add_head(&mce->node, &mce_list->head);
3438 else
3439 hlist_add_behind(&mce->node, &tail->node);
3440 mce_list->count++;
3441 return 0;
3442 }
3443
nix_update_mce_list(struct rvu * rvu,u16 pcifunc,struct nix_mce_list * mce_list,int mce_idx,int mcam_index,bool add)3444 int nix_update_mce_list(struct rvu *rvu, u16 pcifunc,
3445 struct nix_mce_list *mce_list,
3446 int mce_idx, int mcam_index, bool add)
3447 {
3448 int err = 0, idx, next_idx, last_idx, blkaddr, npc_blkaddr;
3449 struct npc_mcam *mcam = &rvu->hw->mcam;
3450 struct nix_mcast *mcast;
3451 struct nix_hw *nix_hw;
3452 struct mce *mce;
3453
3454 if (!mce_list)
3455 return -EINVAL;
3456
3457 /* Get this PF/VF func's MCE index */
3458 idx = mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
3459
3460 if (idx > (mce_idx + mce_list->max)) {
3461 dev_err(rvu->dev,
3462 "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
3463 __func__, idx, mce_list->max,
3464 rvu_get_pf(rvu->pdev, pcifunc));
3465 return -EINVAL;
3466 }
3467
3468 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
3469 if (err)
3470 return err;
3471
3472 mcast = &nix_hw->mcast;
3473 mutex_lock(&mcast->mce_lock);
3474
3475 err = nix_update_mce_list_entry(mce_list, pcifunc, add);
3476 if (err)
3477 goto end;
3478
3479 /* Disable MCAM entry in NPC */
3480 if (!mce_list->count) {
3481 npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
3482 npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, mcam_index, false);
3483 goto end;
3484 }
3485
3486 /* Dump the updated list to HW */
3487 idx = mce_idx;
3488 last_idx = idx + mce_list->count - 1;
3489 hlist_for_each_entry(mce, &mce_list->head, node) {
3490 if (idx > last_idx)
3491 break;
3492
3493 next_idx = idx + 1;
3494 /* EOL should be set in last MCE */
3495 err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE,
3496 mce->pcifunc, next_idx,
3497 0, 1,
3498 (next_idx > last_idx) ? true : false);
3499 if (err)
3500 goto end;
3501 idx++;
3502 }
3503
3504 end:
3505 mutex_unlock(&mcast->mce_lock);
3506 return err;
3507 }
3508
nix_get_mce_list(struct rvu * rvu,u16 pcifunc,int type,struct nix_mce_list ** mce_list,int * mce_idx)3509 void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type,
3510 struct nix_mce_list **mce_list, int *mce_idx)
3511 {
3512 struct rvu_hwinfo *hw = rvu->hw;
3513 struct rvu_pfvf *pfvf;
3514
3515 if (!hw->cap.nix_rx_multicast ||
3516 !is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev,
3517 pcifunc & ~RVU_PFVF_FUNC_MASK))) {
3518 *mce_list = NULL;
3519 *mce_idx = 0;
3520 return;
3521 }
3522
3523 /* Get this PF/VF func's MCE index */
3524 pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
3525
3526 if (type == NIXLF_BCAST_ENTRY) {
3527 *mce_list = &pfvf->bcast_mce_list;
3528 *mce_idx = pfvf->bcast_mce_idx;
3529 } else if (type == NIXLF_ALLMULTI_ENTRY) {
3530 *mce_list = &pfvf->mcast_mce_list;
3531 *mce_idx = pfvf->mcast_mce_idx;
3532 } else if (type == NIXLF_PROMISC_ENTRY) {
3533 *mce_list = &pfvf->promisc_mce_list;
3534 *mce_idx = pfvf->promisc_mce_idx;
3535 } else {
3536 *mce_list = NULL;
3537 *mce_idx = 0;
3538 }
3539 }
3540
nix_update_mce_rule(struct rvu * rvu,u16 pcifunc,int type,bool add)3541 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
3542 int type, bool add)
3543 {
3544 int err = 0, nixlf, blkaddr, mcam_index, mce_idx;
3545 struct npc_mcam *mcam = &rvu->hw->mcam;
3546 struct rvu_hwinfo *hw = rvu->hw;
3547 struct nix_mce_list *mce_list;
3548 int pf;
3549
3550 /* skip multicast pkt replication for AF's VFs & SDP links */
3551 if (is_lbk_vf(rvu, pcifunc) || is_sdp_pfvf(rvu, pcifunc))
3552 return 0;
3553
3554 if (!hw->cap.nix_rx_multicast)
3555 return 0;
3556
3557 pf = rvu_get_pf(rvu->pdev, pcifunc);
3558 if (!is_pf_cgxmapped(rvu, pf))
3559 return 0;
3560
3561 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3562 if (blkaddr < 0)
3563 return -EINVAL;
3564
3565 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
3566 if (nixlf < 0)
3567 return -EINVAL;
3568
3569 nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx);
3570
3571 mcam_index = npc_get_nixlf_mcam_index(mcam,
3572 pcifunc & ~RVU_PFVF_FUNC_MASK,
3573 nixlf, type);
3574 err = nix_update_mce_list(rvu, pcifunc, mce_list,
3575 mce_idx, mcam_index, add);
3576 return err;
3577 }
3578
nix_setup_mcast_grp(struct nix_hw * nix_hw)3579 static void nix_setup_mcast_grp(struct nix_hw *nix_hw)
3580 {
3581 struct nix_mcast_grp *mcast_grp = &nix_hw->mcast_grp;
3582
3583 INIT_LIST_HEAD(&mcast_grp->mcast_grp_head);
3584 mutex_init(&mcast_grp->mcast_grp_lock);
3585 mcast_grp->next_grp_index = 1;
3586 mcast_grp->count = 0;
3587 }
3588
nix_setup_mce_tables(struct rvu * rvu,struct nix_hw * nix_hw)3589 static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw)
3590 {
3591 struct nix_mcast *mcast = &nix_hw->mcast;
3592 int err, pf, numvfs, idx;
3593 struct rvu_pfvf *pfvf;
3594 u16 pcifunc;
3595 u64 cfg;
3596
3597 /* Skip PF0 (i.e AF) */
3598 for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) {
3599 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
3600 /* If PF is not enabled, nothing to do */
3601 if (!((cfg >> 20) & 0x01))
3602 continue;
3603 /* Get numVFs attached to this PF */
3604 numvfs = (cfg >> 12) & 0xFF;
3605
3606 pfvf = &rvu->pf[pf];
3607
3608 /* This NIX0/1 block mapped to PF ? */
3609 if (pfvf->nix_blkaddr != nix_hw->blkaddr)
3610 continue;
3611
3612 /* save start idx of broadcast mce list */
3613 pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1, NIX_MCAST_INGRESS);
3614 nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
3615
3616 /* save start idx of multicast mce list */
3617 pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1, NIX_MCAST_INGRESS);
3618 nix_mce_list_init(&pfvf->mcast_mce_list, numvfs + 1);
3619
3620 /* save the start idx of promisc mce list */
3621 pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1, NIX_MCAST_INGRESS);
3622 nix_mce_list_init(&pfvf->promisc_mce_list, numvfs + 1);
3623
3624 for (idx = 0; idx < (numvfs + 1); idx++) {
3625 /* idx-0 is for PF, followed by VFs */
3626 pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0);
3627 pcifunc |= idx;
3628 /* Add dummy entries now, so that we don't have to check
3629 * for whether AQ_OP should be INIT/WRITE later on.
3630 * Will be updated when a NIXLF is attached/detached to
3631 * these PF/VFs.
3632 */
3633 err = nix_blk_setup_mce(rvu, nix_hw,
3634 pfvf->bcast_mce_idx + idx,
3635 NIX_AQ_INSTOP_INIT,
3636 pcifunc, 0, 0, 1, true);
3637 if (err)
3638 return err;
3639
3640 /* add dummy entries to multicast mce list */
3641 err = nix_blk_setup_mce(rvu, nix_hw,
3642 pfvf->mcast_mce_idx + idx,
3643 NIX_AQ_INSTOP_INIT,
3644 pcifunc, 0, 0, 1, true);
3645 if (err)
3646 return err;
3647
3648 /* add dummy entries to promisc mce list */
3649 err = nix_blk_setup_mce(rvu, nix_hw,
3650 pfvf->promisc_mce_idx + idx,
3651 NIX_AQ_INSTOP_INIT,
3652 pcifunc, 0, 0, 1, true);
3653 if (err)
3654 return err;
3655 }
3656 }
3657 return 0;
3658 }
3659
nix_setup_mcast(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr)3660 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
3661 {
3662 struct nix_mcast *mcast = &nix_hw->mcast;
3663 struct rvu_hwinfo *hw = rvu->hw;
3664 int err, size;
3665
3666 size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
3667 size = BIT_ULL(size);
3668
3669 /* Allocate bitmap for rx mce entries */
3670 mcast->mce_counter[NIX_MCAST_INGRESS].max = 256UL << MC_TBL_SIZE;
3671 err = rvu_alloc_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]);
3672 if (err)
3673 return -ENOMEM;
3674
3675 /* Allocate bitmap for tx mce entries */
3676 mcast->mce_counter[NIX_MCAST_EGRESS].max = MC_TX_MAX;
3677 err = rvu_alloc_bitmap(&mcast->mce_counter[NIX_MCAST_EGRESS]);
3678 if (err) {
3679 rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]);
3680 return -ENOMEM;
3681 }
3682
3683 /* Alloc memory for multicast/mirror replication entries */
3684 err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
3685 mcast->mce_counter[NIX_MCAST_INGRESS].max, size);
3686 if (err) {
3687 rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]);
3688 rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_EGRESS]);
3689 return -ENOMEM;
3690 }
3691
3692 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
3693 (u64)mcast->mce_ctx->iova);
3694
3695 /* Set max list length equal to max no of VFs per PF + PF itself */
3696 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG,
3697 BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE);
3698
3699 /* Alloc memory for multicast replication buffers */
3700 size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
3701 err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
3702 (8UL << MC_BUF_CNT), size);
3703 if (err) {
3704 rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]);
3705 rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_EGRESS]);
3706 return -ENOMEM;
3707 }
3708
3709 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
3710 (u64)mcast->mcast_buf->iova);
3711
3712 /* Alloc pkind for NIX internal RX multicast/mirror replay */
3713 mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc);
3714
3715 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG,
3716 BIT_ULL(63) | (mcast->replay_pkind << 24) |
3717 BIT_ULL(20) | MC_BUF_CNT);
3718
3719 mutex_init(&mcast->mce_lock);
3720
3721 nix_setup_mcast_grp(nix_hw);
3722
3723 return nix_setup_mce_tables(rvu, nix_hw);
3724 }
3725
nix_setup_txvlan(struct rvu * rvu,struct nix_hw * nix_hw)3726 static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw)
3727 {
3728 struct nix_txvlan *vlan = &nix_hw->txvlan;
3729 int err;
3730
3731 /* Allocate resource bimap for tx vtag def registers*/
3732 vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX;
3733 err = rvu_alloc_bitmap(&vlan->rsrc);
3734 if (err)
3735 return -ENOMEM;
3736
3737 /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */
3738 vlan->entry2pfvf_map = devm_kcalloc(rvu->dev, vlan->rsrc.max,
3739 sizeof(u16), GFP_KERNEL);
3740 if (!vlan->entry2pfvf_map)
3741 goto free_mem;
3742
3743 mutex_init(&vlan->rsrc_lock);
3744 return 0;
3745
3746 free_mem:
3747 kfree(vlan->rsrc.bmap);
3748 return -ENOMEM;
3749 }
3750
nix_setup_txschq(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr)3751 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
3752 {
3753 struct nix_txsch *txsch;
3754 int err, lvl, schq;
3755 u64 cfg, reg;
3756
3757 /* Get scheduler queue count of each type and alloc
3758 * bitmap for each for alloc/free/attach operations.
3759 */
3760 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
3761 txsch = &nix_hw->txsch[lvl];
3762 txsch->lvl = lvl;
3763 switch (lvl) {
3764 case NIX_TXSCH_LVL_SMQ:
3765 reg = NIX_AF_MDQ_CONST;
3766 break;
3767 case NIX_TXSCH_LVL_TL4:
3768 reg = NIX_AF_TL4_CONST;
3769 break;
3770 case NIX_TXSCH_LVL_TL3:
3771 reg = NIX_AF_TL3_CONST;
3772 break;
3773 case NIX_TXSCH_LVL_TL2:
3774 reg = NIX_AF_TL2_CONST;
3775 break;
3776 case NIX_TXSCH_LVL_TL1:
3777 reg = NIX_AF_TL1_CONST;
3778 break;
3779 }
3780 cfg = rvu_read64(rvu, blkaddr, reg);
3781 txsch->schq.max = cfg & 0xFFFF;
3782 err = rvu_alloc_bitmap(&txsch->schq);
3783 if (err)
3784 return err;
3785
3786 /* Allocate memory for scheduler queues to
3787 * PF/VF pcifunc mapping info.
3788 */
3789 txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
3790 sizeof(u32), GFP_KERNEL);
3791 if (!txsch->pfvf_map)
3792 return -ENOMEM;
3793 for (schq = 0; schq < txsch->schq.max; schq++)
3794 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
3795 }
3796
3797 /* Setup a default value of 8192 as DWRR MTU */
3798 if (rvu->hw->cap.nix_common_dwrr_mtu ||
3799 rvu->hw->cap.nix_multiple_dwrr_mtu) {
3800 rvu_write64(rvu, blkaddr,
3801 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM),
3802 convert_bytes_to_dwrr_mtu(8192));
3803 rvu_write64(rvu, blkaddr,
3804 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_LBK),
3805 convert_bytes_to_dwrr_mtu(8192));
3806 rvu_write64(rvu, blkaddr,
3807 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_SDP),
3808 convert_bytes_to_dwrr_mtu(8192));
3809 }
3810
3811 return 0;
3812 }
3813
rvu_nix_reserve_mark_format(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr,u32 cfg)3814 int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
3815 int blkaddr, u32 cfg)
3816 {
3817 int fmt_idx;
3818
3819 for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) {
3820 if (nix_hw->mark_format.cfg[fmt_idx] == cfg)
3821 return fmt_idx;
3822 }
3823 if (fmt_idx >= nix_hw->mark_format.total)
3824 return -ERANGE;
3825
3826 rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg);
3827 nix_hw->mark_format.cfg[fmt_idx] = cfg;
3828 nix_hw->mark_format.in_use++;
3829 return fmt_idx;
3830 }
3831
nix_af_mark_format_setup(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr)3832 static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw,
3833 int blkaddr)
3834 {
3835 u64 cfgs[] = {
3836 [NIX_MARK_CFG_IP_DSCP_RED] = 0x10003,
3837 [NIX_MARK_CFG_IP_DSCP_YELLOW] = 0x11200,
3838 [NIX_MARK_CFG_IP_DSCP_YELLOW_RED] = 0x11203,
3839 [NIX_MARK_CFG_IP_ECN_RED] = 0x6000c,
3840 [NIX_MARK_CFG_IP_ECN_YELLOW] = 0x60c00,
3841 [NIX_MARK_CFG_IP_ECN_YELLOW_RED] = 0x60c0c,
3842 [NIX_MARK_CFG_VLAN_DEI_RED] = 0x30008,
3843 [NIX_MARK_CFG_VLAN_DEI_YELLOW] = 0x30800,
3844 [NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808,
3845 };
3846 int i, rc;
3847 u64 total;
3848
3849 total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8;
3850 nix_hw->mark_format.total = (u8)total;
3851 nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32),
3852 GFP_KERNEL);
3853 if (!nix_hw->mark_format.cfg)
3854 return -ENOMEM;
3855 for (i = 0; i < NIX_MARK_CFG_MAX; i++) {
3856 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]);
3857 if (rc < 0)
3858 dev_err(rvu->dev, "Err %d in setup mark format %d\n",
3859 i, rc);
3860 }
3861
3862 return 0;
3863 }
3864
rvu_get_lbk_link_max_frs(struct rvu * rvu,u16 * max_mtu)3865 static void rvu_get_lbk_link_max_frs(struct rvu *rvu, u16 *max_mtu)
3866 {
3867 /* CN10K supports LBK FIFO size 72 KB */
3868 if (rvu->hw->lbk_bufsize == 0x12000)
3869 *max_mtu = CN10K_LBK_LINK_MAX_FRS;
3870 else
3871 *max_mtu = NIC_HW_MAX_FRS;
3872 }
3873
rvu_get_lmac_link_max_frs(struct rvu * rvu,u16 * max_mtu)3874 static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu)
3875 {
3876 int fifo_size = rvu_cgx_get_fifolen(rvu);
3877
3878 /* RPM supports FIFO len 128 KB and RPM2 supports double the
3879 * FIFO len to accommodate 8 LMACS
3880 */
3881 if (fifo_size == 0x20000 || fifo_size == 0x40000)
3882 *max_mtu = CN10K_LMAC_LINK_MAX_FRS;
3883 else
3884 *max_mtu = NIC_HW_MAX_FRS;
3885 }
3886
rvu_mbox_handler_nix_get_hw_info(struct rvu * rvu,struct msg_req * req,struct nix_hw_info * rsp)3887 int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req,
3888 struct nix_hw_info *rsp)
3889 {
3890 u16 pcifunc = req->hdr.pcifunc;
3891 u64 dwrr_mtu;
3892 int blkaddr;
3893
3894 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3895 if (blkaddr < 0)
3896 return NIX_AF_ERR_AF_LF_INVALID;
3897
3898 if (is_lbk_vf(rvu, pcifunc))
3899 rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu);
3900 else
3901 rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu);
3902
3903 rsp->min_mtu = NIC_HW_MIN_FRS;
3904
3905 if (!rvu->hw->cap.nix_common_dwrr_mtu &&
3906 !rvu->hw->cap.nix_multiple_dwrr_mtu) {
3907 /* Return '1' on OTx2 */
3908 rsp->rpm_dwrr_mtu = 1;
3909 rsp->sdp_dwrr_mtu = 1;
3910 rsp->lbk_dwrr_mtu = 1;
3911 return 0;
3912 }
3913
3914 /* Return DWRR_MTU for TLx_SCHEDULE[RR_WEIGHT] config */
3915 dwrr_mtu = rvu_read64(rvu, blkaddr,
3916 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM));
3917 rsp->rpm_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
3918
3919 dwrr_mtu = rvu_read64(rvu, blkaddr,
3920 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_SDP));
3921 rsp->sdp_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
3922
3923 dwrr_mtu = rvu_read64(rvu, blkaddr,
3924 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_LBK));
3925 rsp->lbk_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
3926
3927 return 0;
3928 }
3929
rvu_mbox_handler_nix_stats_rst(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)3930 int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
3931 struct msg_rsp *rsp)
3932 {
3933 u16 pcifunc = req->hdr.pcifunc;
3934 int i, nixlf, blkaddr, err;
3935 u64 stats;
3936
3937 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
3938 if (err)
3939 return err;
3940
3941 /* Get stats count supported by HW */
3942 stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
3943
3944 /* Reset tx stats */
3945 for (i = 0; i < ((stats >> 24) & 0xFF); i++)
3946 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0);
3947
3948 /* Reset rx stats */
3949 for (i = 0; i < ((stats >> 32) & 0xFF); i++)
3950 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0);
3951
3952 return 0;
3953 }
3954
3955 /* Returns the ALG index to be set into NPC_RX_ACTION */
get_flowkey_alg_idx(struct nix_hw * nix_hw,u32 flow_cfg)3956 static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg)
3957 {
3958 int i;
3959
3960 /* Scan over exiting algo entries to find a match */
3961 for (i = 0; i < nix_hw->flowkey.in_use; i++)
3962 if (nix_hw->flowkey.flowkey[i] == flow_cfg)
3963 return i;
3964
3965 return -ERANGE;
3966 }
3967
3968 /* Mask to match ipv6(NPC_LT_LC_IP6) and ipv6 ext(NPC_LT_LC_IP6_EXT) */
3969 #define NPC_LT_LC_IP6_MATCH_MSK ((~(NPC_LT_LC_IP6 ^ NPC_LT_LC_IP6_EXT)) & 0xf)
3970 /* Mask to match both ipv4(NPC_LT_LC_IP) and ipv4 ext(NPC_LT_LC_IP_OPT) */
3971 #define NPC_LT_LC_IP_MATCH_MSK ((~(NPC_LT_LC_IP ^ NPC_LT_LC_IP_OPT)) & 0xf)
3972
set_flowkey_fields(struct nix_rx_flowkey_alg * alg,u32 flow_cfg)3973 static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
3974 {
3975 int idx, nr_field, key_off, field_marker, keyoff_marker;
3976 int max_key_off, max_bit_pos, group_member;
3977 struct nix_rx_flowkey_alg *field;
3978 struct nix_rx_flowkey_alg tmp;
3979 u32 key_type, valid_key;
3980 u32 l3_l4_src_dst;
3981 int l4_key_offset = 0;
3982
3983 if (!alg)
3984 return -EINVAL;
3985
3986 #define FIELDS_PER_ALG 5
3987 #define MAX_KEY_OFF 40
3988 /* Clear all fields */
3989 memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG);
3990
3991 /* Each of the 32 possible flow key algorithm definitions should
3992 * fall into above incremental config (except ALG0). Otherwise a
3993 * single NPC MCAM entry is not sufficient for supporting RSS.
3994 *
3995 * If a different definition or combination needed then NPC MCAM
3996 * has to be programmed to filter such pkts and it's action should
3997 * point to this definition to calculate flowtag or hash.
3998 *
3999 * The `for loop` goes over _all_ protocol field and the following
4000 * variables depicts the state machine forward progress logic.
4001 *
4002 * keyoff_marker - Enabled when hash byte length needs to be accounted
4003 * in field->key_offset update.
4004 * field_marker - Enabled when a new field needs to be selected.
4005 * group_member - Enabled when protocol is part of a group.
4006 */
4007
4008 /* Last 4 bits (31:28) are reserved to specify SRC, DST
4009 * selection for L3, L4 i.e IPV[4,6]_SRC, IPV[4,6]_DST,
4010 * [TCP,UDP,SCTP]_SRC, [TCP,UDP,SCTP]_DST
4011 * 31 => L3_SRC, 30 => L3_DST, 29 => L4_SRC, 28 => L4_DST
4012 */
4013 l3_l4_src_dst = flow_cfg;
4014 /* Reset these 4 bits, so that these won't be part of key */
4015 flow_cfg &= NIX_FLOW_KEY_TYPE_L3_L4_MASK;
4016
4017 keyoff_marker = 0; max_key_off = 0; group_member = 0;
4018 nr_field = 0; key_off = 0; field_marker = 1;
4019 field = &tmp; max_bit_pos = fls(flow_cfg);
4020 for (idx = 0;
4021 idx < max_bit_pos && nr_field < FIELDS_PER_ALG &&
4022 key_off < MAX_KEY_OFF; idx++) {
4023 key_type = BIT(idx);
4024 valid_key = flow_cfg & key_type;
4025 /* Found a field marker, reset the field values */
4026 if (field_marker)
4027 memset(&tmp, 0, sizeof(tmp));
4028
4029 field_marker = true;
4030 keyoff_marker = true;
4031 switch (key_type) {
4032 case NIX_FLOW_KEY_TYPE_PORT:
4033 field->sel_chan = true;
4034 /* This should be set to 1, when SEL_CHAN is set */
4035 field->bytesm1 = 1;
4036 break;
4037 case NIX_FLOW_KEY_TYPE_IPV4_PROTO:
4038 field->lid = NPC_LID_LC;
4039 field->hdr_offset = 9; /* offset */
4040 field->bytesm1 = 0; /* 1 byte */
4041 field->ltype_match = NPC_LT_LC_IP;
4042 field->ltype_mask = NPC_LT_LC_IP_MATCH_MSK;
4043 break;
4044 case NIX_FLOW_KEY_TYPE_IPV4:
4045 case NIX_FLOW_KEY_TYPE_INNR_IPV4:
4046 field->lid = NPC_LID_LC;
4047 field->ltype_match = NPC_LT_LC_IP;
4048 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) {
4049 field->lid = NPC_LID_LG;
4050 field->ltype_match = NPC_LT_LG_TU_IP;
4051 }
4052 field->hdr_offset = 12; /* SIP offset */
4053 field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
4054
4055 /* Only SIP */
4056 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY)
4057 field->bytesm1 = 3; /* SIP, 4 bytes */
4058
4059 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) {
4060 /* Both SIP + DIP */
4061 if (field->bytesm1 == 3) {
4062 field->bytesm1 = 7; /* SIP + DIP, 8B */
4063 } else {
4064 /* Only DIP */
4065 field->hdr_offset = 16; /* DIP off */
4066 field->bytesm1 = 3; /* DIP, 4 bytes */
4067 }
4068 }
4069 field->ltype_mask = NPC_LT_LC_IP_MATCH_MSK;
4070 keyoff_marker = false;
4071 break;
4072 case NIX_FLOW_KEY_TYPE_IPV6:
4073 case NIX_FLOW_KEY_TYPE_INNR_IPV6:
4074 field->lid = NPC_LID_LC;
4075 field->ltype_match = NPC_LT_LC_IP6;
4076 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) {
4077 field->lid = NPC_LID_LG;
4078 field->ltype_match = NPC_LT_LG_TU_IP6;
4079 }
4080 field->hdr_offset = 8; /* SIP offset */
4081 field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
4082
4083 /* Only SIP */
4084 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY)
4085 field->bytesm1 = 15; /* SIP, 16 bytes */
4086
4087 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) {
4088 /* Both SIP + DIP */
4089 if (field->bytesm1 == 15) {
4090 /* SIP + DIP, 32 bytes */
4091 field->bytesm1 = 31;
4092 } else {
4093 /* Only DIP */
4094 field->hdr_offset = 24; /* DIP off */
4095 field->bytesm1 = 15; /* DIP,16 bytes */
4096 }
4097 }
4098 field->ltype_mask = NPC_LT_LC_IP6_MATCH_MSK;
4099 break;
4100 case NIX_FLOW_KEY_TYPE_TCP:
4101 case NIX_FLOW_KEY_TYPE_UDP:
4102 case NIX_FLOW_KEY_TYPE_SCTP:
4103 case NIX_FLOW_KEY_TYPE_INNR_TCP:
4104 case NIX_FLOW_KEY_TYPE_INNR_UDP:
4105 case NIX_FLOW_KEY_TYPE_INNR_SCTP:
4106 field->lid = NPC_LID_LD;
4107 if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP ||
4108 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP ||
4109 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP)
4110 field->lid = NPC_LID_LH;
4111 field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
4112
4113 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_SRC_ONLY)
4114 field->bytesm1 = 1; /* SRC, 2 bytes */
4115
4116 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_DST_ONLY) {
4117 /* Both SRC + DST */
4118 if (field->bytesm1 == 1) {
4119 /* SRC + DST, 4 bytes */
4120 field->bytesm1 = 3;
4121 } else {
4122 /* Only DIP */
4123 field->hdr_offset = 2; /* DST off */
4124 field->bytesm1 = 1; /* DST, 2 bytes */
4125 }
4126 }
4127
4128 /* Enum values for NPC_LID_LD and NPC_LID_LG are same,
4129 * so no need to change the ltype_match, just change
4130 * the lid for inner protocols
4131 */
4132 BUILD_BUG_ON((int)NPC_LT_LD_TCP !=
4133 (int)NPC_LT_LH_TU_TCP);
4134 BUILD_BUG_ON((int)NPC_LT_LD_UDP !=
4135 (int)NPC_LT_LH_TU_UDP);
4136 BUILD_BUG_ON((int)NPC_LT_LD_SCTP !=
4137 (int)NPC_LT_LH_TU_SCTP);
4138
4139 if ((key_type == NIX_FLOW_KEY_TYPE_TCP ||
4140 key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) &&
4141 valid_key) {
4142 field->ltype_match |= NPC_LT_LD_TCP;
4143 group_member = true;
4144 } else if ((key_type == NIX_FLOW_KEY_TYPE_UDP ||
4145 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) &&
4146 valid_key) {
4147 field->ltype_match |= NPC_LT_LD_UDP;
4148 group_member = true;
4149 } else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP ||
4150 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) &&
4151 valid_key) {
4152 field->ltype_match |= NPC_LT_LD_SCTP;
4153 group_member = true;
4154 }
4155 field->ltype_mask = ~field->ltype_match;
4156 if (key_type == NIX_FLOW_KEY_TYPE_SCTP ||
4157 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) {
4158 /* Handle the case where any of the group item
4159 * is enabled in the group but not the final one
4160 */
4161 if (group_member) {
4162 valid_key = true;
4163 group_member = false;
4164 }
4165 } else {
4166 field_marker = false;
4167 keyoff_marker = false;
4168 }
4169
4170 /* TCP/UDP/SCTP and ESP/AH falls at same offset so
4171 * remember the TCP key offset of 40 byte hash key.
4172 */
4173 if (key_type == NIX_FLOW_KEY_TYPE_TCP)
4174 l4_key_offset = key_off;
4175 break;
4176 case NIX_FLOW_KEY_TYPE_NVGRE:
4177 field->lid = NPC_LID_LD;
4178 field->hdr_offset = 4; /* VSID offset */
4179 field->bytesm1 = 2;
4180 field->ltype_match = NPC_LT_LD_NVGRE;
4181 field->ltype_mask = 0xF;
4182 break;
4183 case NIX_FLOW_KEY_TYPE_VXLAN:
4184 case NIX_FLOW_KEY_TYPE_GENEVE:
4185 field->lid = NPC_LID_LE;
4186 field->bytesm1 = 2;
4187 field->hdr_offset = 4;
4188 field->ltype_mask = 0xF;
4189 field_marker = false;
4190 keyoff_marker = false;
4191
4192 if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) {
4193 field->ltype_match |= NPC_LT_LE_VXLAN;
4194 group_member = true;
4195 }
4196
4197 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) {
4198 field->ltype_match |= NPC_LT_LE_GENEVE;
4199 group_member = true;
4200 }
4201
4202 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) {
4203 if (group_member) {
4204 field->ltype_mask = ~field->ltype_match;
4205 field_marker = true;
4206 keyoff_marker = true;
4207 valid_key = true;
4208 group_member = false;
4209 }
4210 }
4211 break;
4212 case NIX_FLOW_KEY_TYPE_ETH_DMAC:
4213 case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC:
4214 field->lid = NPC_LID_LA;
4215 field->ltype_match = NPC_LT_LA_ETHER;
4216 if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) {
4217 field->lid = NPC_LID_LF;
4218 field->ltype_match = NPC_LT_LF_TU_ETHER;
4219 }
4220 field->hdr_offset = 0;
4221 field->bytesm1 = 5; /* DMAC 6 Byte */
4222 field->ltype_mask = 0xF;
4223 break;
4224 case NIX_FLOW_KEY_TYPE_IPV6_EXT:
4225 field->lid = NPC_LID_LC;
4226 field->hdr_offset = 40; /* IPV6 hdr */
4227 field->bytesm1 = 0; /* 1 Byte ext hdr*/
4228 field->ltype_match = NPC_LT_LC_IP6_EXT;
4229 field->ltype_mask = 0xF;
4230 break;
4231 case NIX_FLOW_KEY_TYPE_GTPU:
4232 field->lid = NPC_LID_LE;
4233 field->hdr_offset = 4;
4234 field->bytesm1 = 3; /* 4 bytes TID*/
4235 field->ltype_match = NPC_LT_LE_GTPU;
4236 field->ltype_mask = 0xF;
4237 break;
4238 case NIX_FLOW_KEY_TYPE_CUSTOM0:
4239 field->lid = NPC_LID_LC;
4240 field->hdr_offset = 6;
4241 field->bytesm1 = 1; /* 2 Bytes*/
4242 field->ltype_match = NPC_LT_LC_CUSTOM0;
4243 field->ltype_mask = 0xF;
4244 break;
4245 case NIX_FLOW_KEY_TYPE_VLAN:
4246 field->lid = NPC_LID_LB;
4247 field->hdr_offset = 2; /* Skip TPID (2-bytes) */
4248 field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */
4249 field->ltype_match = NPC_LT_LB_CTAG;
4250 field->ltype_mask = 0xF;
4251 field->fn_mask = 1; /* Mask out the first nibble */
4252 break;
4253 case NIX_FLOW_KEY_TYPE_AH:
4254 case NIX_FLOW_KEY_TYPE_ESP:
4255 field->hdr_offset = 0;
4256 field->bytesm1 = 7; /* SPI + sequence number */
4257 field->ltype_mask = 0xF;
4258 field->lid = NPC_LID_LE;
4259 field->ltype_match = NPC_LT_LE_ESP;
4260 if (key_type == NIX_FLOW_KEY_TYPE_AH) {
4261 field->lid = NPC_LID_LD;
4262 field->ltype_match = NPC_LT_LD_AH;
4263 field->hdr_offset = 4;
4264 keyoff_marker = false;
4265 }
4266 break;
4267 }
4268 field->ena = 1;
4269
4270 /* Found a valid flow key type */
4271 if (valid_key) {
4272 /* Use the key offset of TCP/UDP/SCTP fields
4273 * for ESP/AH fields.
4274 */
4275 if (key_type == NIX_FLOW_KEY_TYPE_ESP ||
4276 key_type == NIX_FLOW_KEY_TYPE_AH)
4277 key_off = l4_key_offset;
4278 field->key_offset = key_off;
4279 memcpy(&alg[nr_field], field, sizeof(*field));
4280 max_key_off = max(max_key_off, field->bytesm1 + 1);
4281
4282 /* Found a field marker, get the next field */
4283 if (field_marker)
4284 nr_field++;
4285 }
4286
4287 /* Found a keyoff marker, update the new key_off */
4288 if (keyoff_marker) {
4289 key_off += max_key_off;
4290 max_key_off = 0;
4291 }
4292 }
4293 /* Processed all the flow key types */
4294 if (idx == max_bit_pos && key_off <= MAX_KEY_OFF)
4295 return 0;
4296 else
4297 return NIX_AF_ERR_RSS_NOSPC_FIELD;
4298 }
4299
reserve_flowkey_alg_idx(struct rvu * rvu,int blkaddr,u32 flow_cfg)4300 static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg)
4301 {
4302 u64 field[FIELDS_PER_ALG];
4303 struct nix_hw *hw;
4304 int fid, rc;
4305
4306 hw = get_nix_hw(rvu->hw, blkaddr);
4307 if (!hw)
4308 return NIX_AF_ERR_INVALID_NIXBLK;
4309
4310 /* No room to add new flow hash algoritham */
4311 if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX)
4312 return NIX_AF_ERR_RSS_NOSPC_ALGO;
4313
4314 /* Generate algo fields for the given flow_cfg */
4315 rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg);
4316 if (rc)
4317 return rc;
4318
4319 /* Update ALGX_FIELDX register with generated fields */
4320 for (fid = 0; fid < FIELDS_PER_ALG; fid++)
4321 rvu_write64(rvu, blkaddr,
4322 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use,
4323 fid), field[fid]);
4324
4325 /* Store the flow_cfg for futher lookup */
4326 rc = hw->flowkey.in_use;
4327 hw->flowkey.flowkey[rc] = flow_cfg;
4328 hw->flowkey.in_use++;
4329
4330 return rc;
4331 }
4332
rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu * rvu,struct nix_rss_flowkey_cfg * req,struct nix_rss_flowkey_cfg_rsp * rsp)4333 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
4334 struct nix_rss_flowkey_cfg *req,
4335 struct nix_rss_flowkey_cfg_rsp *rsp)
4336 {
4337 u16 pcifunc = req->hdr.pcifunc;
4338 int alg_idx, nixlf, blkaddr;
4339 struct nix_hw *nix_hw;
4340 int err;
4341
4342 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
4343 if (err)
4344 return err;
4345
4346 nix_hw = get_nix_hw(rvu->hw, blkaddr);
4347 if (!nix_hw)
4348 return NIX_AF_ERR_INVALID_NIXBLK;
4349
4350 alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg);
4351 /* Failed to get algo index from the exiting list, reserve new */
4352 if (alg_idx < 0) {
4353 alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr,
4354 req->flowkey_cfg);
4355 if (alg_idx < 0)
4356 return alg_idx;
4357 }
4358 rsp->alg_idx = alg_idx;
4359 rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
4360 alg_idx, req->mcam_index);
4361 return 0;
4362 }
4363
nix_rx_flowkey_alg_cfg(struct rvu * rvu,int blkaddr)4364 static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
4365 {
4366 u32 flowkey_cfg, minkey_cfg;
4367 int alg, fid, rc;
4368
4369 /* Disable all flow key algx fieldx */
4370 for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) {
4371 for (fid = 0; fid < FIELDS_PER_ALG; fid++)
4372 rvu_write64(rvu, blkaddr,
4373 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
4374 0);
4375 }
4376
4377 /* IPv4/IPv6 SIP/DIPs */
4378 flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
4379 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
4380 if (rc < 0)
4381 return rc;
4382
4383 /* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
4384 minkey_cfg = flowkey_cfg;
4385 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP;
4386 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
4387 if (rc < 0)
4388 return rc;
4389
4390 /* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
4391 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP;
4392 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
4393 if (rc < 0)
4394 return rc;
4395
4396 /* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
4397 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP;
4398 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
4399 if (rc < 0)
4400 return rc;
4401
4402 /* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
4403 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
4404 NIX_FLOW_KEY_TYPE_UDP;
4405 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
4406 if (rc < 0)
4407 return rc;
4408
4409 /* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
4410 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
4411 NIX_FLOW_KEY_TYPE_SCTP;
4412 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
4413 if (rc < 0)
4414 return rc;
4415
4416 /* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
4417 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP |
4418 NIX_FLOW_KEY_TYPE_SCTP;
4419 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
4420 if (rc < 0)
4421 return rc;
4422
4423 /* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
4424 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
4425 NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP;
4426 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
4427 if (rc < 0)
4428 return rc;
4429
4430 return 0;
4431 }
4432
rvu_mbox_handler_nix_set_mac_addr(struct rvu * rvu,struct nix_set_mac_addr * req,struct msg_rsp * rsp)4433 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
4434 struct nix_set_mac_addr *req,
4435 struct msg_rsp *rsp)
4436 {
4437 bool from_vf = req->hdr.pcifunc & RVU_PFVF_FUNC_MASK;
4438 u16 pcifunc = req->hdr.pcifunc;
4439 int blkaddr, nixlf, err;
4440 struct rvu_pfvf *pfvf;
4441
4442 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
4443 if (err)
4444 return err;
4445
4446 pfvf = rvu_get_pfvf(rvu, pcifunc);
4447
4448 /* untrusted VF can't overwrite admin(PF) changes */
4449 if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
4450 (from_vf && test_bit(PF_SET_VF_MAC, &pfvf->flags))) {
4451 dev_warn(rvu->dev,
4452 "MAC address set by admin(PF) cannot be overwritten by untrusted VF");
4453 return -EPERM;
4454 }
4455
4456 ether_addr_copy(pfvf->mac_addr, req->mac_addr);
4457
4458 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
4459 pfvf->rx_chan_base, req->mac_addr);
4460
4461 if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf)
4462 ether_addr_copy(pfvf->default_mac, req->mac_addr);
4463
4464 return 0;
4465 }
4466
rvu_mbox_handler_nix_get_mac_addr(struct rvu * rvu,struct msg_req * req,struct nix_get_mac_addr_rsp * rsp)4467 int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu,
4468 struct msg_req *req,
4469 struct nix_get_mac_addr_rsp *rsp)
4470 {
4471 u16 pcifunc = req->hdr.pcifunc;
4472 struct rvu_pfvf *pfvf;
4473
4474 if (!is_nixlf_attached(rvu, pcifunc))
4475 return NIX_AF_ERR_AF_LF_INVALID;
4476
4477 pfvf = rvu_get_pfvf(rvu, pcifunc);
4478
4479 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
4480
4481 return 0;
4482 }
4483
rvu_mbox_handler_nix_set_rx_mode(struct rvu * rvu,struct nix_rx_mode * req,struct msg_rsp * rsp)4484 int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
4485 struct msg_rsp *rsp)
4486 {
4487 bool allmulti, promisc, nix_rx_multicast;
4488 u16 pcifunc = req->hdr.pcifunc;
4489 struct rvu_pfvf *pfvf;
4490 int nixlf, err;
4491
4492 pfvf = rvu_get_pfvf(rvu, pcifunc);
4493 promisc = req->mode & NIX_RX_MODE_PROMISC ? true : false;
4494 allmulti = req->mode & NIX_RX_MODE_ALLMULTI ? true : false;
4495 pfvf->use_mce_list = req->mode & NIX_RX_MODE_USE_MCE ? true : false;
4496
4497 nix_rx_multicast = rvu->hw->cap.nix_rx_multicast & pfvf->use_mce_list;
4498
4499 if (is_vf(pcifunc) && !nix_rx_multicast &&
4500 (promisc || allmulti)) {
4501 dev_warn_ratelimited(rvu->dev,
4502 "VF promisc/multicast not supported\n");
4503 return 0;
4504 }
4505
4506 /* untrusted VF can't configure promisc/allmulti */
4507 if (is_vf(pcifunc) && !test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
4508 (promisc || allmulti))
4509 return 0;
4510
4511 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
4512 if (err)
4513 return err;
4514
4515 if (nix_rx_multicast) {
4516 /* add/del this PF_FUNC to/from mcast pkt replication list */
4517 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_ALLMULTI_ENTRY,
4518 allmulti);
4519 if (err) {
4520 dev_err(rvu->dev,
4521 "Failed to update pcifunc 0x%x to multicast list\n",
4522 pcifunc);
4523 return err;
4524 }
4525
4526 /* add/del this PF_FUNC to/from promisc pkt replication list */
4527 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_PROMISC_ENTRY,
4528 promisc);
4529 if (err) {
4530 dev_err(rvu->dev,
4531 "Failed to update pcifunc 0x%x to promisc list\n",
4532 pcifunc);
4533 return err;
4534 }
4535 }
4536
4537 /* install/uninstall allmulti entry */
4538 if (allmulti) {
4539 rvu_npc_install_allmulti_entry(rvu, pcifunc, nixlf,
4540 pfvf->rx_chan_base);
4541 } else {
4542 if (!nix_rx_multicast)
4543 rvu_npc_enable_allmulti_entry(rvu, pcifunc, nixlf, false);
4544 }
4545
4546 /* install/uninstall promisc entry */
4547 if (promisc)
4548 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
4549 pfvf->rx_chan_base,
4550 pfvf->rx_chan_cnt);
4551 else
4552 if (!nix_rx_multicast)
4553 rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false);
4554
4555 return 0;
4556 }
4557
nix_find_link_frs(struct rvu * rvu,struct nix_frs_cfg * req,u16 pcifunc)4558 static void nix_find_link_frs(struct rvu *rvu,
4559 struct nix_frs_cfg *req, u16 pcifunc)
4560 {
4561 int pf = rvu_get_pf(rvu->pdev, pcifunc);
4562 struct rvu_pfvf *pfvf;
4563 int maxlen, minlen;
4564 int numvfs, hwvf;
4565 int vf;
4566
4567 /* Update with requester's min/max lengths */
4568 pfvf = rvu_get_pfvf(rvu, pcifunc);
4569 pfvf->maxlen = req->maxlen;
4570 if (req->update_minlen)
4571 pfvf->minlen = req->minlen;
4572
4573 maxlen = req->maxlen;
4574 minlen = req->update_minlen ? req->minlen : 0;
4575
4576 /* Get this PF's numVFs and starting hwvf */
4577 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
4578
4579 /* For each VF, compare requested max/minlen */
4580 for (vf = 0; vf < numvfs; vf++) {
4581 pfvf = &rvu->hwvf[hwvf + vf];
4582 if (pfvf->maxlen > maxlen)
4583 maxlen = pfvf->maxlen;
4584 if (req->update_minlen &&
4585 pfvf->minlen && pfvf->minlen < minlen)
4586 minlen = pfvf->minlen;
4587 }
4588
4589 /* Compare requested max/minlen with PF's max/minlen */
4590 pfvf = &rvu->pf[pf];
4591 if (pfvf->maxlen > maxlen)
4592 maxlen = pfvf->maxlen;
4593 if (req->update_minlen &&
4594 pfvf->minlen && pfvf->minlen < minlen)
4595 minlen = pfvf->minlen;
4596
4597 /* Update the request with max/min PF's and it's VF's max/min */
4598 req->maxlen = maxlen;
4599 if (req->update_minlen)
4600 req->minlen = minlen;
4601 }
4602
rvu_mbox_handler_nix_set_hw_frs(struct rvu * rvu,struct nix_frs_cfg * req,struct msg_rsp * rsp)4603 int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
4604 struct msg_rsp *rsp)
4605 {
4606 struct rvu_hwinfo *hw = rvu->hw;
4607 u16 pcifunc = req->hdr.pcifunc;
4608 int pf = rvu_get_pf(rvu->pdev, pcifunc);
4609 int blkaddr, link = -1;
4610 struct nix_hw *nix_hw;
4611 struct rvu_pfvf *pfvf;
4612 u8 cgx = 0, lmac = 0;
4613 u16 max_mtu;
4614 u64 cfg;
4615
4616 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
4617 if (blkaddr < 0)
4618 return NIX_AF_ERR_AF_LF_INVALID;
4619
4620 nix_hw = get_nix_hw(rvu->hw, blkaddr);
4621 if (!nix_hw)
4622 return NIX_AF_ERR_INVALID_NIXBLK;
4623
4624 if (is_lbk_vf(rvu, pcifunc) || is_rep_dev(rvu, pcifunc))
4625 rvu_get_lbk_link_max_frs(rvu, &max_mtu);
4626 else
4627 rvu_get_lmac_link_max_frs(rvu, &max_mtu);
4628
4629 if (!req->sdp_link && req->maxlen > max_mtu)
4630 return NIX_AF_ERR_FRS_INVALID;
4631
4632 if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
4633 return NIX_AF_ERR_FRS_INVALID;
4634
4635 /* Check if config is for SDP link */
4636 if (req->sdp_link) {
4637 if (!hw->sdp_links)
4638 return NIX_AF_ERR_RX_LINK_INVALID;
4639 link = hw->cgx_links + hw->lbk_links;
4640 goto linkcfg;
4641 }
4642
4643 /* Check if the request is from CGX mapped RVU PF */
4644 if (is_pf_cgxmapped(rvu, pf)) {
4645 /* Get CGX and LMAC to which this PF is mapped and find link */
4646 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
4647 link = (cgx * hw->lmac_per_cgx) + lmac;
4648 } else if (pf == 0) {
4649 /* For VFs of PF0 ingress is LBK port, so config LBK link */
4650 pfvf = rvu_get_pfvf(rvu, pcifunc);
4651 link = hw->cgx_links + pfvf->lbkid;
4652 } else if (is_rep_dev(rvu, pcifunc)) {
4653 link = hw->cgx_links + 0;
4654 }
4655
4656 if (link < 0)
4657 return NIX_AF_ERR_RX_LINK_INVALID;
4658
4659 linkcfg:
4660 nix_find_link_frs(rvu, req, pcifunc);
4661
4662 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
4663 cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
4664 if (req->update_minlen)
4665 cfg = (cfg & ~0xFFFFULL) | req->minlen;
4666 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg);
4667
4668 return 0;
4669 }
4670
rvu_mbox_handler_nix_set_rx_cfg(struct rvu * rvu,struct nix_rx_cfg * req,struct msg_rsp * rsp)4671 int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
4672 struct msg_rsp *rsp)
4673 {
4674 int nixlf, blkaddr, err;
4675 u64 cfg;
4676
4677 err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr);
4678 if (err)
4679 return err;
4680
4681 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf));
4682 /* Set the interface configuration */
4683 if (req->len_verify & BIT(0))
4684 cfg |= BIT_ULL(41);
4685 else
4686 cfg &= ~BIT_ULL(41);
4687
4688 if (req->len_verify & BIT(1))
4689 cfg |= BIT_ULL(40);
4690 else
4691 cfg &= ~BIT_ULL(40);
4692
4693 if (req->len_verify & NIX_RX_DROP_RE)
4694 cfg |= BIT_ULL(32);
4695 else
4696 cfg &= ~BIT_ULL(32);
4697
4698 if (req->csum_verify & BIT(0))
4699 cfg |= BIT_ULL(37);
4700 else
4701 cfg &= ~BIT_ULL(37);
4702
4703 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg);
4704
4705 return 0;
4706 }
4707
rvu_get_lbk_link_credits(struct rvu * rvu,u16 lbk_max_frs)4708 static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs)
4709 {
4710 return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */
4711 }
4712
nix_link_config(struct rvu * rvu,int blkaddr,struct nix_hw * nix_hw)4713 static void nix_link_config(struct rvu *rvu, int blkaddr,
4714 struct nix_hw *nix_hw)
4715 {
4716 struct rvu_hwinfo *hw = rvu->hw;
4717 int cgx, lmac_cnt, slink, link;
4718 u16 lbk_max_frs, lmac_max_frs;
4719 unsigned long lmac_bmap;
4720 u64 tx_credits, cfg;
4721 u64 lmac_fifo_len;
4722 int iter;
4723
4724 rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs);
4725 rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs);
4726
4727 /* Set SDP link credit */
4728 rvu_write64(rvu, blkaddr, NIX_AF_SDP_LINK_CREDIT, SDP_LINK_CREDIT);
4729
4730 /* Set default min/max packet lengths allowed on NIX Rx links.
4731 *
4732 * With HW reset minlen value of 60byte, HW will treat ARP pkts
4733 * as undersize and report them to SW as error pkts, hence
4734 * setting it to 40 bytes.
4735 */
4736 for (link = 0; link < hw->cgx_links; link++) {
4737 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
4738 ((u64)lmac_max_frs << 16) | NIC_HW_MIN_FRS);
4739 }
4740
4741 for (link = hw->cgx_links; link < hw->lbk_links; link++) {
4742 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
4743 ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS);
4744 }
4745 if (hw->sdp_links) {
4746 link = hw->cgx_links + hw->lbk_links;
4747 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
4748 SDP_HW_MAX_FRS << 16 | SDP_HW_MIN_FRS);
4749 }
4750
4751 /* Get MCS external bypass status for CN10K-B */
4752 if (mcs_get_blkcnt() == 1) {
4753 /* Adjust for 2 credits when external bypass is disabled */
4754 nix_hw->cc_mcs_cnt = is_mcs_bypass(0) ? 0 : 2;
4755 }
4756
4757 /* Set credits for Tx links assuming max packet length allowed.
4758 * This will be reconfigured based on MTU set for PF/VF.
4759 */
4760 for (cgx = 0; cgx < hw->cgx; cgx++) {
4761 lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
4762 /* Skip when cgx is not available or lmac cnt is zero */
4763 if (lmac_cnt <= 0)
4764 continue;
4765 slink = cgx * hw->lmac_per_cgx;
4766
4767 /* Get LMAC id's from bitmap */
4768 lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
4769 for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
4770 lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, iter);
4771 if (!lmac_fifo_len) {
4772 dev_err(rvu->dev,
4773 "%s: Failed to get CGX/RPM%d:LMAC%d FIFO size\n",
4774 __func__, cgx, iter);
4775 continue;
4776 }
4777 tx_credits = (lmac_fifo_len - lmac_max_frs) / 16;
4778 /* Enable credits and set credit pkt count to max allowed */
4779 cfg = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
4780 cfg |= FIELD_PREP(NIX_AF_LINKX_MCS_CNT_MASK, nix_hw->cc_mcs_cnt);
4781
4782 link = iter + slink;
4783 nix_hw->tx_credits[link] = tx_credits;
4784 rvu_write64(rvu, blkaddr,
4785 NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
4786 }
4787 }
4788
4789 /* Set Tx credits for LBK link */
4790 slink = hw->cgx_links;
4791 for (link = slink; link < (slink + hw->lbk_links); link++) {
4792 tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs);
4793 nix_hw->tx_credits[link] = tx_credits;
4794 /* Enable credits and set credit pkt count to max allowed */
4795 tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
4796 rvu_write64(rvu, blkaddr,
4797 NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits);
4798 }
4799 }
4800
nix_calibrate_x2p(struct rvu * rvu,int blkaddr)4801 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
4802 {
4803 int idx, err;
4804 u64 status;
4805
4806 /* Start X2P bus calibration */
4807 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
4808 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
4809 /* Wait for calibration to complete */
4810 err = rvu_poll_reg(rvu, blkaddr,
4811 NIX_AF_STATUS, BIT_ULL(10), false);
4812 if (err) {
4813 dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
4814 return err;
4815 }
4816
4817 status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
4818 /* Check if CGX devices are ready */
4819 for (idx = 0; idx < rvu->cgx_cnt_max; idx++) {
4820 /* Skip when cgx port is not available */
4821 if (!rvu_cgx_pdata(idx, rvu) ||
4822 (status & (BIT_ULL(16 + idx))))
4823 continue;
4824 dev_err(rvu->dev,
4825 "CGX%d didn't respond to NIX X2P calibration\n", idx);
4826 err = -EBUSY;
4827 }
4828
4829 /* Check if LBK is ready */
4830 if (!(status & BIT_ULL(19))) {
4831 dev_err(rvu->dev,
4832 "LBK didn't respond to NIX X2P calibration\n");
4833 err = -EBUSY;
4834 }
4835
4836 /* Clear 'calibrate_x2p' bit */
4837 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
4838 rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
4839 if (err || (status & 0x3FFULL))
4840 dev_err(rvu->dev,
4841 "NIX X2P calibration failed, status 0x%llx\n", status);
4842 if (err)
4843 return err;
4844 return 0;
4845 }
4846
nix_aq_init(struct rvu * rvu,struct rvu_block * block)4847 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
4848 {
4849 u64 cfg;
4850 int err;
4851
4852 /* Set admin queue endianness */
4853 cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
4854 #ifdef __BIG_ENDIAN
4855 cfg |= BIT_ULL(8);
4856 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
4857 #else
4858 cfg &= ~BIT_ULL(8);
4859 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
4860 #endif
4861
4862 /* Do not bypass NDC cache */
4863 cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
4864 cfg &= ~0x3FFEULL;
4865 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
4866 /* Disable caching of SQB aka SQEs */
4867 cfg |= 0x04ULL;
4868 #endif
4869 rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
4870
4871 /* Result structure can be followed by RQ/SQ/CQ context at
4872 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
4873 * operation type. Alloc sufficient result memory for all operations.
4874 */
4875 err = rvu_aq_alloc(rvu, &block->aq,
4876 Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
4877 ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
4878 if (err)
4879 return err;
4880
4881 rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
4882 rvu_write64(rvu, block->addr,
4883 NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
4884 return 0;
4885 }
4886
rvu_nix_setup_capabilities(struct rvu * rvu,int blkaddr)4887 static void rvu_nix_setup_capabilities(struct rvu *rvu, int blkaddr)
4888 {
4889 struct rvu_hwinfo *hw = rvu->hw;
4890 u64 hw_const;
4891
4892 hw_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
4893
4894 /* On OcteonTx2 DWRR quantum is directly configured into each of
4895 * the transmit scheduler queues. And PF/VF drivers were free to
4896 * config any value upto 2^24.
4897 * On CN10K, HW is modified, the quantum configuration at scheduler
4898 * queues is in terms of weight. And SW needs to setup a base DWRR MTU
4899 * at NIX_AF_DWRR_RPM_MTU / NIX_AF_DWRR_SDP_MTU. HW will do
4900 * 'DWRR MTU * weight' to get the quantum.
4901 *
4902 * Check if HW uses a common MTU for all DWRR quantum configs.
4903 * On OcteonTx2 this register field is '0'.
4904 */
4905 if ((((hw_const >> 56) & 0x10) == 0x10) && !(hw_const & BIT_ULL(61)))
4906 hw->cap.nix_common_dwrr_mtu = true;
4907
4908 if (hw_const & BIT_ULL(61))
4909 hw->cap.nix_multiple_dwrr_mtu = true;
4910 }
4911
rvu_nix_block_init(struct rvu * rvu,struct nix_hw * nix_hw)4912 static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
4913 {
4914 const struct npc_lt_def_cfg *ltdefs;
4915 struct rvu_hwinfo *hw = rvu->hw;
4916 int blkaddr = nix_hw->blkaddr;
4917 struct rvu_block *block;
4918 int err;
4919 u64 cfg;
4920
4921 block = &hw->block[blkaddr];
4922
4923 if (is_rvu_96xx_B0(rvu)) {
4924 /* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt
4925 * internal state when conditional clocks are turned off.
4926 * Hence enable them.
4927 */
4928 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
4929 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
4930 }
4931
4932 /* Set chan/link to backpressure TL3 instead of TL2 */
4933 rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
4934
4935 /* Disable SQ manager's sticky mode operation (set TM6 = 0)
4936 * This sticky mode is known to cause SQ stalls when multiple
4937 * SQs are mapped to same SMQ and transmitting pkts at a time.
4938 */
4939 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
4940 cfg &= ~BIT_ULL(15);
4941 rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
4942
4943 ltdefs = rvu->kpu.lt_def;
4944 /* Calibrate X2P bus to check if CGX/LBK links are fine */
4945 err = nix_calibrate_x2p(rvu, blkaddr);
4946 if (err)
4947 return err;
4948
4949 /* Setup capabilities of the NIX block */
4950 rvu_nix_setup_capabilities(rvu, blkaddr);
4951
4952 /* Initialize admin queue */
4953 err = nix_aq_init(rvu, block);
4954 if (err)
4955 return err;
4956
4957 /* Restore CINT timer delay to HW reset values */
4958 rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
4959
4960 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SEB_CFG);
4961
4962 /* For better performance use NDC TX instead of NDC RX for SQ's SQEs" */
4963 cfg |= 1ULL;
4964 if (!is_rvu_otx2(rvu))
4965 cfg |= NIX_PTP_1STEP_EN;
4966
4967 rvu_write64(rvu, blkaddr, NIX_AF_SEB_CFG, cfg);
4968
4969 if (!is_rvu_otx2(rvu))
4970 rvu_nix_block_cn10k_init(rvu, nix_hw);
4971
4972 if (is_block_implemented(hw, blkaddr)) {
4973 err = nix_setup_txschq(rvu, nix_hw, blkaddr);
4974 if (err)
4975 return err;
4976
4977 err = nix_setup_ipolicers(rvu, nix_hw, blkaddr);
4978 if (err)
4979 return err;
4980
4981 err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr);
4982 if (err)
4983 return err;
4984
4985 err = nix_setup_mcast(rvu, nix_hw, blkaddr);
4986 if (err)
4987 return err;
4988
4989 err = nix_setup_txvlan(rvu, nix_hw);
4990 if (err)
4991 return err;
4992
4993 err = nix_setup_bpids(rvu, nix_hw, blkaddr);
4994 if (err)
4995 return err;
4996
4997 /* Configure segmentation offload formats */
4998 nix_setup_lso(rvu, nix_hw, blkaddr);
4999
5000 /* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info.
5001 * This helps HW protocol checker to identify headers
5002 * and validate length and checksums.
5003 */
5004 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
5005 (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) |
5006 ltdefs->rx_ol2.ltype_mask);
5007 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
5008 (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) |
5009 ltdefs->rx_oip4.ltype_mask);
5010 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4,
5011 (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) |
5012 ltdefs->rx_iip4.ltype_mask);
5013 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6,
5014 (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) |
5015 ltdefs->rx_oip6.ltype_mask);
5016 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6,
5017 (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) |
5018 ltdefs->rx_iip6.ltype_mask);
5019 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
5020 (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) |
5021 ltdefs->rx_otcp.ltype_mask);
5022 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP,
5023 (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) |
5024 ltdefs->rx_itcp.ltype_mask);
5025 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
5026 (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) |
5027 ltdefs->rx_oudp.ltype_mask);
5028 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP,
5029 (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) |
5030 ltdefs->rx_iudp.ltype_mask);
5031 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP,
5032 (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) |
5033 ltdefs->rx_osctp.ltype_mask);
5034 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP,
5035 (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) |
5036 ltdefs->rx_isctp.ltype_mask);
5037
5038 if (!is_rvu_otx2(rvu)) {
5039 /* Enable APAD calculation for other protocols
5040 * matching APAD0 and APAD1 lt def registers.
5041 */
5042 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD0,
5043 (ltdefs->rx_apad0.valid << 11) |
5044 (ltdefs->rx_apad0.lid << 8) |
5045 (ltdefs->rx_apad0.ltype_match << 4) |
5046 ltdefs->rx_apad0.ltype_mask);
5047 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD1,
5048 (ltdefs->rx_apad1.valid << 11) |
5049 (ltdefs->rx_apad1.lid << 8) |
5050 (ltdefs->rx_apad1.ltype_match << 4) |
5051 ltdefs->rx_apad1.ltype_mask);
5052
5053 /* Receive ethertype definition register defines layer
5054 * information in NPC_RESULT_S to identify the Ethertype
5055 * location in L2 header. Used for Ethertype overwriting
5056 * in inline IPsec flow.
5057 */
5058 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(0),
5059 (ltdefs->rx_et[0].offset << 12) |
5060 (ltdefs->rx_et[0].valid << 11) |
5061 (ltdefs->rx_et[0].lid << 8) |
5062 (ltdefs->rx_et[0].ltype_match << 4) |
5063 ltdefs->rx_et[0].ltype_mask);
5064 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(1),
5065 (ltdefs->rx_et[1].offset << 12) |
5066 (ltdefs->rx_et[1].valid << 11) |
5067 (ltdefs->rx_et[1].lid << 8) |
5068 (ltdefs->rx_et[1].ltype_match << 4) |
5069 ltdefs->rx_et[1].ltype_mask);
5070 }
5071
5072 err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
5073 if (err)
5074 return err;
5075
5076 nix_hw->tx_credits = kcalloc(hw->cgx_links + hw->lbk_links,
5077 sizeof(u64), GFP_KERNEL);
5078 if (!nix_hw->tx_credits)
5079 return -ENOMEM;
5080
5081 /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
5082 nix_link_config(rvu, blkaddr, nix_hw);
5083
5084 /* Enable Channel backpressure */
5085 rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0));
5086 }
5087 return 0;
5088 }
5089
rvu_nix_init(struct rvu * rvu)5090 int rvu_nix_init(struct rvu *rvu)
5091 {
5092 struct rvu_hwinfo *hw = rvu->hw;
5093 struct nix_hw *nix_hw;
5094 int blkaddr = 0, err;
5095 int i = 0;
5096
5097 hw->nix = devm_kcalloc(rvu->dev, MAX_NIX_BLKS, sizeof(struct nix_hw),
5098 GFP_KERNEL);
5099 if (!hw->nix)
5100 return -ENOMEM;
5101
5102 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
5103 while (blkaddr) {
5104 nix_hw = &hw->nix[i];
5105 nix_hw->rvu = rvu;
5106 nix_hw->blkaddr = blkaddr;
5107 err = rvu_nix_block_init(rvu, nix_hw);
5108 if (err)
5109 return err;
5110 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
5111 i++;
5112 }
5113
5114 return 0;
5115 }
5116
rvu_nix_block_freemem(struct rvu * rvu,int blkaddr,struct rvu_block * block)5117 static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr,
5118 struct rvu_block *block)
5119 {
5120 struct nix_txsch *txsch;
5121 struct nix_mcast *mcast;
5122 struct nix_txvlan *vlan;
5123 struct nix_hw *nix_hw;
5124 int lvl;
5125
5126 rvu_aq_free(rvu, block->aq);
5127
5128 if (is_block_implemented(rvu->hw, blkaddr)) {
5129 nix_hw = get_nix_hw(rvu->hw, blkaddr);
5130 if (!nix_hw)
5131 return;
5132
5133 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
5134 txsch = &nix_hw->txsch[lvl];
5135 kfree(txsch->schq.bmap);
5136 }
5137
5138 kfree(nix_hw->tx_credits);
5139
5140 nix_ipolicer_freemem(rvu, nix_hw);
5141
5142 vlan = &nix_hw->txvlan;
5143 kfree(vlan->rsrc.bmap);
5144 mutex_destroy(&vlan->rsrc_lock);
5145
5146 mcast = &nix_hw->mcast;
5147 qmem_free(rvu->dev, mcast->mce_ctx);
5148 qmem_free(rvu->dev, mcast->mcast_buf);
5149 mutex_destroy(&mcast->mce_lock);
5150 }
5151 }
5152
rvu_nix_freemem(struct rvu * rvu)5153 void rvu_nix_freemem(struct rvu *rvu)
5154 {
5155 struct rvu_hwinfo *hw = rvu->hw;
5156 struct rvu_block *block;
5157 int blkaddr = 0;
5158
5159 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
5160 while (blkaddr) {
5161 block = &hw->block[blkaddr];
5162 rvu_nix_block_freemem(rvu, blkaddr, block);
5163 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
5164 }
5165 }
5166
nix_mcast_update_action(struct rvu * rvu,struct nix_mcast_grp_elem * elem)5167 static void nix_mcast_update_action(struct rvu *rvu,
5168 struct nix_mcast_grp_elem *elem)
5169 {
5170 struct npc_mcam *mcam = &rvu->hw->mcam;
5171 struct nix_rx_action rx_action = { 0 };
5172 struct nix_tx_action tx_action = { 0 };
5173 int npc_blkaddr;
5174
5175 npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
5176 if (elem->dir == NIX_MCAST_INGRESS) {
5177 *(u64 *)&rx_action = npc_get_mcam_action(rvu, mcam,
5178 npc_blkaddr,
5179 elem->mcam_index);
5180 rx_action.index = elem->mce_start_index;
5181 npc_set_mcam_action(rvu, mcam, npc_blkaddr, elem->mcam_index,
5182 *(u64 *)&rx_action);
5183 } else {
5184 *(u64 *)&tx_action = npc_get_mcam_action(rvu, mcam,
5185 npc_blkaddr,
5186 elem->mcam_index);
5187 tx_action.index = elem->mce_start_index;
5188 npc_set_mcam_action(rvu, mcam, npc_blkaddr, elem->mcam_index,
5189 *(u64 *)&tx_action);
5190 }
5191 }
5192
nix_mcast_update_mce_entry(struct rvu * rvu,u16 pcifunc,u8 is_active)5193 static void nix_mcast_update_mce_entry(struct rvu *rvu, u16 pcifunc, u8 is_active)
5194 {
5195 struct nix_mcast_grp_elem *elem;
5196 struct nix_mcast_grp *mcast_grp;
5197 struct nix_hw *nix_hw;
5198 int blkaddr;
5199
5200 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
5201 nix_hw = get_nix_hw(rvu->hw, blkaddr);
5202 if (!nix_hw)
5203 return;
5204
5205 mcast_grp = &nix_hw->mcast_grp;
5206
5207 mutex_lock(&mcast_grp->mcast_grp_lock);
5208 list_for_each_entry(elem, &mcast_grp->mcast_grp_head, list) {
5209 struct nix_mce_list *mce_list;
5210 struct mce *mce;
5211
5212 /* Iterate the group elements and disable the element which
5213 * received the disable request.
5214 */
5215 mce_list = &elem->mcast_mce_list;
5216 hlist_for_each_entry(mce, &mce_list->head, node) {
5217 if (mce->pcifunc == pcifunc) {
5218 mce->is_active = is_active;
5219 break;
5220 }
5221 }
5222
5223 /* Dump the updated list to HW */
5224 if (elem->dir == NIX_MCAST_INGRESS)
5225 nix_update_ingress_mce_list_hw(rvu, nix_hw, elem);
5226 else
5227 nix_update_egress_mce_list_hw(rvu, nix_hw, elem);
5228
5229 /* Update the multicast index in NPC rule */
5230 nix_mcast_update_action(rvu, elem);
5231 }
5232 mutex_unlock(&mcast_grp->mcast_grp_lock);
5233 }
5234
rvu_mbox_handler_nix_lf_start_rx(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)5235 int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
5236 struct msg_rsp *rsp)
5237 {
5238 u16 pcifunc = req->hdr.pcifunc;
5239 struct rvu_pfvf *pfvf;
5240 int nixlf, err, pf;
5241
5242 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
5243 if (err)
5244 return err;
5245
5246 /* Enable the interface if it is in any multicast list */
5247 nix_mcast_update_mce_entry(rvu, pcifunc, 1);
5248
5249 rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
5250
5251 npc_mcam_enable_flows(rvu, pcifunc);
5252
5253 pfvf = rvu_get_pfvf(rvu, pcifunc);
5254 set_bit(NIXLF_INITIALIZED, &pfvf->flags);
5255
5256 rvu_switch_update_rules(rvu, pcifunc, true);
5257
5258 pf = rvu_get_pf(rvu->pdev, pcifunc);
5259 if (is_pf_cgxmapped(rvu, pf) && rvu->rep_mode)
5260 rvu_rep_notify_pfvf_state(rvu, pcifunc, true);
5261
5262 return rvu_cgx_start_stop_io(rvu, pcifunc, true);
5263 }
5264
rvu_mbox_handler_nix_lf_stop_rx(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)5265 int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
5266 struct msg_rsp *rsp)
5267 {
5268 u16 pcifunc = req->hdr.pcifunc;
5269 struct rvu_pfvf *pfvf;
5270 int nixlf, err, pf;
5271
5272 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
5273 if (err)
5274 return err;
5275
5276 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
5277 /* Disable the interface if it is in any multicast list */
5278 nix_mcast_update_mce_entry(rvu, pcifunc, 0);
5279
5280
5281 pfvf = rvu_get_pfvf(rvu, pcifunc);
5282 clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
5283
5284 err = rvu_cgx_start_stop_io(rvu, pcifunc, false);
5285 if (err)
5286 return err;
5287
5288 rvu_switch_update_rules(rvu, pcifunc, false);
5289 rvu_cgx_tx_enable(rvu, pcifunc, true);
5290
5291 pf = rvu_get_pf(rvu->pdev, pcifunc);
5292 if (is_pf_cgxmapped(rvu, pf) && rvu->rep_mode)
5293 rvu_rep_notify_pfvf_state(rvu, pcifunc, false);
5294 return 0;
5295 }
5296
5297 #define RX_SA_BASE GENMASK_ULL(52, 7)
5298
rvu_nix_lf_teardown(struct rvu * rvu,u16 pcifunc,int blkaddr,int nixlf)5299 void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
5300 {
5301 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
5302 struct hwctx_disable_req ctx_req;
5303 int pf = rvu_get_pf(rvu->pdev, pcifunc);
5304 struct mac_ops *mac_ops;
5305 u8 cgx_id, lmac_id;
5306 u64 sa_base;
5307 void *cgxd;
5308 int err;
5309
5310 ctx_req.hdr.pcifunc = pcifunc;
5311
5312 /* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
5313 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
5314 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
5315 nix_interface_deinit(rvu, pcifunc, nixlf);
5316 nix_rx_sync(rvu, blkaddr);
5317 nix_txschq_free(rvu, pcifunc);
5318
5319 clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
5320
5321 if (is_pf_cgxmapped(rvu, pf) && rvu->rep_mode)
5322 rvu_rep_notify_pfvf_state(rvu, pcifunc, false);
5323
5324 rvu_cgx_start_stop_io(rvu, pcifunc, false);
5325
5326 if (pfvf->sq_ctx) {
5327 ctx_req.ctype = NIX_AQ_CTYPE_SQ;
5328 err = nix_lf_hwctx_disable(rvu, &ctx_req);
5329 if (err)
5330 dev_err(rvu->dev, "SQ ctx disable failed\n");
5331 }
5332
5333 if (pfvf->rq_ctx) {
5334 ctx_req.ctype = NIX_AQ_CTYPE_RQ;
5335 err = nix_lf_hwctx_disable(rvu, &ctx_req);
5336 if (err)
5337 dev_err(rvu->dev, "RQ ctx disable failed\n");
5338 }
5339
5340 if (pfvf->cq_ctx) {
5341 ctx_req.ctype = NIX_AQ_CTYPE_CQ;
5342 err = nix_lf_hwctx_disable(rvu, &ctx_req);
5343 if (err)
5344 dev_err(rvu->dev, "CQ ctx disable failed\n");
5345 }
5346
5347 /* reset HW config done for Switch headers */
5348 rvu_npc_set_parse_mode(rvu, pcifunc, OTX2_PRIV_FLAGS_DEFAULT,
5349 (PKIND_TX | PKIND_RX), 0, 0, 0, 0);
5350
5351 /* Disabling CGX and NPC config done for PTP */
5352 if (pfvf->hw_rx_tstamp_en) {
5353 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
5354 cgxd = rvu_cgx_pdata(cgx_id, rvu);
5355 mac_ops = get_mac_ops(cgxd);
5356 mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, false);
5357 /* Undo NPC config done for PTP */
5358 if (npc_config_ts_kpuaction(rvu, pf, pcifunc, false))
5359 dev_err(rvu->dev, "NPC config for PTP failed\n");
5360 pfvf->hw_rx_tstamp_en = false;
5361 }
5362
5363 /* reset priority flow control config */
5364 rvu_cgx_prio_flow_ctrl_cfg(rvu, pcifunc, 0, 0, 0);
5365
5366 /* reset 802.3x flow control config */
5367 rvu_cgx_cfg_pause_frm(rvu, pcifunc, 0, 0);
5368
5369 nix_ctx_free(rvu, pfvf);
5370
5371 nix_free_all_bandprof(rvu, pcifunc);
5372
5373 sa_base = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(nixlf));
5374 if (FIELD_GET(RX_SA_BASE, sa_base)) {
5375 err = rvu_cpt_ctx_flush(rvu, pcifunc);
5376 if (err)
5377 dev_err(rvu->dev,
5378 "CPT ctx flush failed with error: %d\n", err);
5379 }
5380 }
5381
5382 #define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32)
5383
rvu_nix_lf_ptp_tx_cfg(struct rvu * rvu,u16 pcifunc,bool enable)5384 static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
5385 {
5386 struct rvu_hwinfo *hw = rvu->hw;
5387 struct rvu_block *block;
5388 int blkaddr, pf;
5389 int nixlf;
5390 u64 cfg;
5391
5392 pf = rvu_get_pf(rvu->pdev, pcifunc);
5393 if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
5394 return 0;
5395
5396 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
5397 if (blkaddr < 0)
5398 return NIX_AF_ERR_AF_LF_INVALID;
5399
5400 block = &hw->block[blkaddr];
5401 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
5402 if (nixlf < 0)
5403 return NIX_AF_ERR_AF_LF_INVALID;
5404
5405 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf));
5406
5407 if (enable)
5408 cfg |= NIX_AF_LFX_TX_CFG_PTP_EN;
5409 else
5410 cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN;
5411
5412 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
5413
5414 return 0;
5415 }
5416
rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)5417 int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req,
5418 struct msg_rsp *rsp)
5419 {
5420 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true);
5421 }
5422
rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)5423 int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req,
5424 struct msg_rsp *rsp)
5425 {
5426 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false);
5427 }
5428
rvu_mbox_handler_nix_lso_format_cfg(struct rvu * rvu,struct nix_lso_format_cfg * req,struct nix_lso_format_cfg_rsp * rsp)5429 int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
5430 struct nix_lso_format_cfg *req,
5431 struct nix_lso_format_cfg_rsp *rsp)
5432 {
5433 u16 pcifunc = req->hdr.pcifunc;
5434 struct nix_hw *nix_hw;
5435 struct rvu_pfvf *pfvf;
5436 int blkaddr, idx, f;
5437 u64 reg;
5438
5439 pfvf = rvu_get_pfvf(rvu, pcifunc);
5440 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
5441 if (!pfvf->nixlf || blkaddr < 0)
5442 return NIX_AF_ERR_AF_LF_INVALID;
5443
5444 nix_hw = get_nix_hw(rvu->hw, blkaddr);
5445 if (!nix_hw)
5446 return NIX_AF_ERR_INVALID_NIXBLK;
5447
5448 /* Find existing matching LSO format, if any */
5449 for (idx = 0; idx < nix_hw->lso.in_use; idx++) {
5450 for (f = 0; f < NIX_LSO_FIELD_MAX; f++) {
5451 reg = rvu_read64(rvu, blkaddr,
5452 NIX_AF_LSO_FORMATX_FIELDX(idx, f));
5453 if (req->fields[f] != (reg & req->field_mask))
5454 break;
5455 }
5456
5457 if (f == NIX_LSO_FIELD_MAX)
5458 break;
5459 }
5460
5461 if (idx < nix_hw->lso.in_use) {
5462 /* Match found */
5463 rsp->lso_format_idx = idx;
5464 return 0;
5465 }
5466
5467 if (nix_hw->lso.in_use == nix_hw->lso.total)
5468 return NIX_AF_ERR_LSO_CFG_FAIL;
5469
5470 rsp->lso_format_idx = nix_hw->lso.in_use++;
5471
5472 for (f = 0; f < NIX_LSO_FIELD_MAX; f++)
5473 rvu_write64(rvu, blkaddr,
5474 NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f),
5475 req->fields[f]);
5476
5477 return 0;
5478 }
5479
5480 #define IPSEC_GEN_CFG_EGRP GENMASK_ULL(50, 48)
5481 #define IPSEC_GEN_CFG_OPCODE GENMASK_ULL(47, 32)
5482 #define IPSEC_GEN_CFG_PARAM1 GENMASK_ULL(31, 16)
5483 #define IPSEC_GEN_CFG_PARAM2 GENMASK_ULL(15, 0)
5484
5485 #define CPT_INST_QSEL_BLOCK GENMASK_ULL(28, 24)
5486 #define CPT_INST_QSEL_PF_FUNC GENMASK_ULL(23, 8)
5487 #define CPT_INST_QSEL_SLOT GENMASK_ULL(7, 0)
5488
5489 #define CPT_INST_CREDIT_TH GENMASK_ULL(53, 32)
5490 #define CPT_INST_CREDIT_BPID GENMASK_ULL(30, 22)
5491 #define CPT_INST_CREDIT_CNT GENMASK_ULL(21, 0)
5492
nix_inline_ipsec_cfg(struct rvu * rvu,struct nix_inline_ipsec_cfg * req,int blkaddr)5493 static void nix_inline_ipsec_cfg(struct rvu *rvu, struct nix_inline_ipsec_cfg *req,
5494 int blkaddr)
5495 {
5496 u8 cpt_idx, cpt_blkaddr;
5497 u64 val;
5498
5499 cpt_idx = (blkaddr == BLKADDR_NIX0) ? 0 : 1;
5500 if (req->enable) {
5501 val = 0;
5502 /* Enable context prefetching */
5503 if (!is_rvu_otx2(rvu))
5504 val |= BIT_ULL(51);
5505
5506 /* Set OPCODE and EGRP */
5507 val |= FIELD_PREP(IPSEC_GEN_CFG_EGRP, req->gen_cfg.egrp);
5508 val |= FIELD_PREP(IPSEC_GEN_CFG_OPCODE, req->gen_cfg.opcode);
5509 val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM1, req->gen_cfg.param1);
5510 val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM2, req->gen_cfg.param2);
5511
5512 rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, val);
5513
5514 /* Set CPT queue for inline IPSec */
5515 val = FIELD_PREP(CPT_INST_QSEL_SLOT, req->inst_qsel.cpt_slot);
5516 val |= FIELD_PREP(CPT_INST_QSEL_PF_FUNC,
5517 req->inst_qsel.cpt_pf_func);
5518
5519 if (!is_rvu_otx2(rvu)) {
5520 cpt_blkaddr = (cpt_idx == 0) ? BLKADDR_CPT0 :
5521 BLKADDR_CPT1;
5522 val |= FIELD_PREP(CPT_INST_QSEL_BLOCK, cpt_blkaddr);
5523 }
5524
5525 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx),
5526 val);
5527
5528 /* Set CPT credit */
5529 val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx));
5530 if ((val & 0x3FFFFF) != 0x3FFFFF)
5531 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
5532 0x3FFFFF - val);
5533
5534 val = FIELD_PREP(CPT_INST_CREDIT_CNT, req->cpt_credit);
5535 val |= FIELD_PREP(CPT_INST_CREDIT_BPID, req->bpid);
5536 val |= FIELD_PREP(CPT_INST_CREDIT_TH, req->credit_th);
5537 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), val);
5538 } else {
5539 rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, 0x0);
5540 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx),
5541 0x0);
5542 val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx));
5543 if ((val & 0x3FFFFF) != 0x3FFFFF)
5544 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
5545 0x3FFFFF - val);
5546 }
5547 }
5548
rvu_mbox_handler_nix_inline_ipsec_cfg(struct rvu * rvu,struct nix_inline_ipsec_cfg * req,struct msg_rsp * rsp)5549 int rvu_mbox_handler_nix_inline_ipsec_cfg(struct rvu *rvu,
5550 struct nix_inline_ipsec_cfg *req,
5551 struct msg_rsp *rsp)
5552 {
5553 if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
5554 return 0;
5555
5556 nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX0);
5557 if (is_block_implemented(rvu->hw, BLKADDR_CPT1))
5558 nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX1);
5559
5560 return 0;
5561 }
5562
rvu_mbox_handler_nix_read_inline_ipsec_cfg(struct rvu * rvu,struct msg_req * req,struct nix_inline_ipsec_cfg * rsp)5563 int rvu_mbox_handler_nix_read_inline_ipsec_cfg(struct rvu *rvu,
5564 struct msg_req *req,
5565 struct nix_inline_ipsec_cfg *rsp)
5566
5567 {
5568 u64 val;
5569
5570 if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
5571 return 0;
5572
5573 val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_IPSEC_GEN_CFG);
5574 rsp->gen_cfg.egrp = FIELD_GET(IPSEC_GEN_CFG_EGRP, val);
5575 rsp->gen_cfg.opcode = FIELD_GET(IPSEC_GEN_CFG_OPCODE, val);
5576 rsp->gen_cfg.param1 = FIELD_GET(IPSEC_GEN_CFG_PARAM1, val);
5577 rsp->gen_cfg.param2 = FIELD_GET(IPSEC_GEN_CFG_PARAM2, val);
5578
5579 val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_CPTX_CREDIT(0));
5580 rsp->cpt_credit = FIELD_GET(CPT_INST_CREDIT_CNT, val);
5581 rsp->credit_th = FIELD_GET(CPT_INST_CREDIT_TH, val);
5582 rsp->bpid = FIELD_GET(CPT_INST_CREDIT_BPID, val);
5583
5584 return 0;
5585 }
5586
rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu * rvu,struct nix_inline_ipsec_lf_cfg * req,struct msg_rsp * rsp)5587 int rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu *rvu,
5588 struct nix_inline_ipsec_lf_cfg *req,
5589 struct msg_rsp *rsp)
5590 {
5591 int lf, blkaddr, err;
5592 u64 val;
5593
5594 if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
5595 return 0;
5596
5597 err = nix_get_nixlf(rvu, req->hdr.pcifunc, &lf, &blkaddr);
5598 if (err)
5599 return err;
5600
5601 if (req->enable) {
5602 /* Set TT, TAG_CONST, SA_POW2_SIZE and LENM1_MAX */
5603 val = (u64)req->ipsec_cfg0.tt << 44 |
5604 (u64)req->ipsec_cfg0.tag_const << 20 |
5605 (u64)req->ipsec_cfg0.sa_pow2_size << 16 |
5606 req->ipsec_cfg0.lenm1_max;
5607
5608 if (blkaddr == BLKADDR_NIX1)
5609 val |= BIT_ULL(46);
5610
5611 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), val);
5612
5613 /* Set SA_IDX_W and SA_IDX_MAX */
5614 val = (u64)req->ipsec_cfg1.sa_idx_w << 32 |
5615 req->ipsec_cfg1.sa_idx_max;
5616 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), val);
5617
5618 /* Set SA base address */
5619 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf),
5620 req->sa_base_addr);
5621 } else {
5622 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), 0x0);
5623 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), 0x0);
5624 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf),
5625 0x0);
5626 }
5627
5628 return 0;
5629 }
5630
rvu_nix_reset_mac(struct rvu_pfvf * pfvf,int pcifunc)5631 void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc)
5632 {
5633 bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
5634
5635 /* overwrite vf mac address with default_mac */
5636 if (from_vf)
5637 ether_addr_copy(pfvf->mac_addr, pfvf->default_mac);
5638 }
5639
5640 /* NIX ingress policers or bandwidth profiles APIs */
nix_config_rx_pkt_policer_precolor(struct rvu * rvu,int blkaddr)5641 static void nix_config_rx_pkt_policer_precolor(struct rvu *rvu, int blkaddr)
5642 {
5643 struct npc_lt_def_cfg defs, *ltdefs;
5644
5645 ltdefs = &defs;
5646 memcpy(ltdefs, rvu->kpu.lt_def, sizeof(struct npc_lt_def_cfg));
5647
5648 /* Extract PCP and DEI fields from outer VLAN from byte offset
5649 * 2 from the start of LB_PTR (ie TAG).
5650 * VLAN0 is Outer VLAN and VLAN1 is Inner VLAN. Inner VLAN
5651 * fields are considered when 'Tunnel enable' is set in profile.
5652 */
5653 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN0_PCP_DEI,
5654 (2UL << 12) | (ltdefs->ovlan.lid << 8) |
5655 (ltdefs->ovlan.ltype_match << 4) |
5656 ltdefs->ovlan.ltype_mask);
5657 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN1_PCP_DEI,
5658 (2UL << 12) | (ltdefs->ivlan.lid << 8) |
5659 (ltdefs->ivlan.ltype_match << 4) |
5660 ltdefs->ivlan.ltype_mask);
5661
5662 /* DSCP field in outer and tunneled IPv4 packets */
5663 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4_DSCP,
5664 (1UL << 12) | (ltdefs->rx_oip4.lid << 8) |
5665 (ltdefs->rx_oip4.ltype_match << 4) |
5666 ltdefs->rx_oip4.ltype_mask);
5667 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4_DSCP,
5668 (1UL << 12) | (ltdefs->rx_iip4.lid << 8) |
5669 (ltdefs->rx_iip4.ltype_match << 4) |
5670 ltdefs->rx_iip4.ltype_mask);
5671
5672 /* DSCP field (traffic class) in outer and tunneled IPv6 packets */
5673 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6_DSCP,
5674 (1UL << 11) | (ltdefs->rx_oip6.lid << 8) |
5675 (ltdefs->rx_oip6.ltype_match << 4) |
5676 ltdefs->rx_oip6.ltype_mask);
5677 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6_DSCP,
5678 (1UL << 11) | (ltdefs->rx_iip6.lid << 8) |
5679 (ltdefs->rx_iip6.ltype_match << 4) |
5680 ltdefs->rx_iip6.ltype_mask);
5681 }
5682
nix_init_policer_context(struct rvu * rvu,struct nix_hw * nix_hw,int layer,int prof_idx)5683 static int nix_init_policer_context(struct rvu *rvu, struct nix_hw *nix_hw,
5684 int layer, int prof_idx)
5685 {
5686 struct nix_cn10k_aq_enq_req aq_req;
5687 int rc;
5688
5689 memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
5690
5691 aq_req.qidx = (prof_idx & 0x3FFF) | (layer << 14);
5692 aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
5693 aq_req.op = NIX_AQ_INSTOP_INIT;
5694
5695 /* Context is all zeros, submit to AQ */
5696 rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
5697 (struct nix_aq_enq_req *)&aq_req, NULL);
5698 if (rc)
5699 dev_err(rvu->dev, "Failed to INIT bandwidth profile layer %d profile %d\n",
5700 layer, prof_idx);
5701 return rc;
5702 }
5703
nix_setup_ipolicers(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr)5704 static int nix_setup_ipolicers(struct rvu *rvu,
5705 struct nix_hw *nix_hw, int blkaddr)
5706 {
5707 struct rvu_hwinfo *hw = rvu->hw;
5708 struct nix_ipolicer *ipolicer;
5709 int err, layer, prof_idx;
5710 u64 cfg;
5711
5712 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
5713 if (!(cfg & BIT_ULL(61))) {
5714 hw->cap.ipolicer = false;
5715 return 0;
5716 }
5717
5718 hw->cap.ipolicer = true;
5719 nix_hw->ipolicer = devm_kcalloc(rvu->dev, BAND_PROF_NUM_LAYERS,
5720 sizeof(*ipolicer), GFP_KERNEL);
5721 if (!nix_hw->ipolicer)
5722 return -ENOMEM;
5723
5724 cfg = rvu_read64(rvu, blkaddr, NIX_AF_PL_CONST);
5725
5726 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5727 ipolicer = &nix_hw->ipolicer[layer];
5728 switch (layer) {
5729 case BAND_PROF_LEAF_LAYER:
5730 ipolicer->band_prof.max = cfg & 0XFFFF;
5731 break;
5732 case BAND_PROF_MID_LAYER:
5733 ipolicer->band_prof.max = (cfg >> 16) & 0XFFFF;
5734 break;
5735 case BAND_PROF_TOP_LAYER:
5736 ipolicer->band_prof.max = (cfg >> 32) & 0XFFFF;
5737 break;
5738 }
5739
5740 if (!ipolicer->band_prof.max)
5741 continue;
5742
5743 err = rvu_alloc_bitmap(&ipolicer->band_prof);
5744 if (err)
5745 return err;
5746
5747 ipolicer->pfvf_map = devm_kcalloc(rvu->dev,
5748 ipolicer->band_prof.max,
5749 sizeof(u16), GFP_KERNEL);
5750 if (!ipolicer->pfvf_map)
5751 return -ENOMEM;
5752
5753 ipolicer->match_id = devm_kcalloc(rvu->dev,
5754 ipolicer->band_prof.max,
5755 sizeof(u16), GFP_KERNEL);
5756 if (!ipolicer->match_id)
5757 return -ENOMEM;
5758
5759 for (prof_idx = 0;
5760 prof_idx < ipolicer->band_prof.max; prof_idx++) {
5761 /* Set AF as current owner for INIT ops to succeed */
5762 ipolicer->pfvf_map[prof_idx] = 0x00;
5763
5764 /* There is no enable bit in the profile context,
5765 * so no context disable. So let's INIT them here
5766 * so that PF/VF later on have to just do WRITE to
5767 * setup policer rates and config.
5768 */
5769 err = nix_init_policer_context(rvu, nix_hw,
5770 layer, prof_idx);
5771 if (err)
5772 return err;
5773 }
5774
5775 /* Allocate memory for maintaining ref_counts for MID level
5776 * profiles, this will be needed for leaf layer profiles'
5777 * aggregation.
5778 */
5779 if (layer != BAND_PROF_MID_LAYER)
5780 continue;
5781
5782 ipolicer->ref_count = devm_kcalloc(rvu->dev,
5783 ipolicer->band_prof.max,
5784 sizeof(u16), GFP_KERNEL);
5785 if (!ipolicer->ref_count)
5786 return -ENOMEM;
5787 }
5788
5789 /* Set policer timeunit to 2us ie (19 + 1) * 100 nsec = 2us */
5790 rvu_write64(rvu, blkaddr, NIX_AF_PL_TS, 19);
5791
5792 nix_config_rx_pkt_policer_precolor(rvu, blkaddr);
5793
5794 return 0;
5795 }
5796
nix_ipolicer_freemem(struct rvu * rvu,struct nix_hw * nix_hw)5797 static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw)
5798 {
5799 struct nix_ipolicer *ipolicer;
5800 int layer;
5801
5802 if (!rvu->hw->cap.ipolicer)
5803 return;
5804
5805 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5806 ipolicer = &nix_hw->ipolicer[layer];
5807
5808 if (!ipolicer->band_prof.max)
5809 continue;
5810
5811 kfree(ipolicer->band_prof.bmap);
5812 }
5813 }
5814
nix_verify_bandprof(struct nix_cn10k_aq_enq_req * req,struct nix_hw * nix_hw,u16 pcifunc)5815 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
5816 struct nix_hw *nix_hw, u16 pcifunc)
5817 {
5818 struct nix_ipolicer *ipolicer;
5819 int layer, hi_layer, prof_idx;
5820
5821 /* Bits [15:14] in profile index represent layer */
5822 layer = (req->qidx >> 14) & 0x03;
5823 prof_idx = req->qidx & 0x3FFF;
5824
5825 ipolicer = &nix_hw->ipolicer[layer];
5826 if (prof_idx >= ipolicer->band_prof.max)
5827 return -EINVAL;
5828
5829 /* Check if the profile is allocated to the requesting PCIFUNC or not
5830 * with the exception of AF. AF is allowed to read and update contexts.
5831 */
5832 if (pcifunc && ipolicer->pfvf_map[prof_idx] != pcifunc)
5833 return -EINVAL;
5834
5835 /* If this profile is linked to higher layer profile then check
5836 * if that profile is also allocated to the requesting PCIFUNC
5837 * or not.
5838 */
5839 if (!req->prof.hl_en)
5840 return 0;
5841
5842 /* Leaf layer profile can link only to mid layer and
5843 * mid layer to top layer.
5844 */
5845 if (layer == BAND_PROF_LEAF_LAYER)
5846 hi_layer = BAND_PROF_MID_LAYER;
5847 else if (layer == BAND_PROF_MID_LAYER)
5848 hi_layer = BAND_PROF_TOP_LAYER;
5849 else
5850 return -EINVAL;
5851
5852 ipolicer = &nix_hw->ipolicer[hi_layer];
5853 prof_idx = req->prof.band_prof_id;
5854 if (prof_idx >= ipolicer->band_prof.max ||
5855 ipolicer->pfvf_map[prof_idx] != pcifunc)
5856 return -EINVAL;
5857
5858 return 0;
5859 }
5860
rvu_mbox_handler_nix_bandprof_alloc(struct rvu * rvu,struct nix_bandprof_alloc_req * req,struct nix_bandprof_alloc_rsp * rsp)5861 int rvu_mbox_handler_nix_bandprof_alloc(struct rvu *rvu,
5862 struct nix_bandprof_alloc_req *req,
5863 struct nix_bandprof_alloc_rsp *rsp)
5864 {
5865 int blkaddr, layer, prof, idx, err;
5866 u16 pcifunc = req->hdr.pcifunc;
5867 struct nix_ipolicer *ipolicer;
5868 struct nix_hw *nix_hw;
5869
5870 if (!rvu->hw->cap.ipolicer)
5871 return NIX_AF_ERR_IPOLICER_NOTSUPP;
5872
5873 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
5874 if (err)
5875 return err;
5876
5877 mutex_lock(&rvu->rsrc_lock);
5878 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5879 if (layer == BAND_PROF_INVAL_LAYER)
5880 continue;
5881 if (!req->prof_count[layer])
5882 continue;
5883
5884 ipolicer = &nix_hw->ipolicer[layer];
5885 for (idx = 0; idx < req->prof_count[layer]; idx++) {
5886 /* Allocate a max of 'MAX_BANDPROF_PER_PFFUNC' profiles */
5887 if (idx == MAX_BANDPROF_PER_PFFUNC)
5888 break;
5889
5890 prof = rvu_alloc_rsrc(&ipolicer->band_prof);
5891 if (prof < 0)
5892 break;
5893 rsp->prof_count[layer]++;
5894 rsp->prof_idx[layer][idx] = prof;
5895 ipolicer->pfvf_map[prof] = pcifunc;
5896 }
5897 }
5898 mutex_unlock(&rvu->rsrc_lock);
5899 return 0;
5900 }
5901
nix_free_all_bandprof(struct rvu * rvu,u16 pcifunc)5902 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc)
5903 {
5904 int blkaddr, layer, prof_idx, err;
5905 struct nix_ipolicer *ipolicer;
5906 struct nix_hw *nix_hw;
5907
5908 if (!rvu->hw->cap.ipolicer)
5909 return NIX_AF_ERR_IPOLICER_NOTSUPP;
5910
5911 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
5912 if (err)
5913 return err;
5914
5915 mutex_lock(&rvu->rsrc_lock);
5916 /* Free all the profiles allocated to the PCIFUNC */
5917 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5918 if (layer == BAND_PROF_INVAL_LAYER)
5919 continue;
5920 ipolicer = &nix_hw->ipolicer[layer];
5921
5922 for (prof_idx = 0; prof_idx < ipolicer->band_prof.max; prof_idx++) {
5923 if (ipolicer->pfvf_map[prof_idx] != pcifunc)
5924 continue;
5925
5926 /* Clear ratelimit aggregation, if any */
5927 if (layer == BAND_PROF_LEAF_LAYER &&
5928 ipolicer->match_id[prof_idx])
5929 nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
5930
5931 ipolicer->pfvf_map[prof_idx] = 0x00;
5932 ipolicer->match_id[prof_idx] = 0;
5933 rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
5934 }
5935 }
5936 mutex_unlock(&rvu->rsrc_lock);
5937 return 0;
5938 }
5939
rvu_mbox_handler_nix_bandprof_free(struct rvu * rvu,struct nix_bandprof_free_req * req,struct msg_rsp * rsp)5940 int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu,
5941 struct nix_bandprof_free_req *req,
5942 struct msg_rsp *rsp)
5943 {
5944 int blkaddr, layer, prof_idx, idx, err;
5945 u16 pcifunc = req->hdr.pcifunc;
5946 struct nix_ipolicer *ipolicer;
5947 struct nix_hw *nix_hw;
5948
5949 if (req->free_all)
5950 return nix_free_all_bandprof(rvu, pcifunc);
5951
5952 if (!rvu->hw->cap.ipolicer)
5953 return NIX_AF_ERR_IPOLICER_NOTSUPP;
5954
5955 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
5956 if (err)
5957 return err;
5958
5959 mutex_lock(&rvu->rsrc_lock);
5960 /* Free the requested profile indices */
5961 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5962 if (layer == BAND_PROF_INVAL_LAYER)
5963 continue;
5964 if (!req->prof_count[layer])
5965 continue;
5966
5967 ipolicer = &nix_hw->ipolicer[layer];
5968 for (idx = 0; idx < req->prof_count[layer]; idx++) {
5969 if (idx == MAX_BANDPROF_PER_PFFUNC)
5970 break;
5971 prof_idx = req->prof_idx[layer][idx];
5972 if (prof_idx >= ipolicer->band_prof.max ||
5973 ipolicer->pfvf_map[prof_idx] != pcifunc)
5974 continue;
5975
5976 /* Clear ratelimit aggregation, if any */
5977 if (layer == BAND_PROF_LEAF_LAYER &&
5978 ipolicer->match_id[prof_idx])
5979 nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
5980
5981 ipolicer->pfvf_map[prof_idx] = 0x00;
5982 ipolicer->match_id[prof_idx] = 0;
5983 rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
5984 }
5985 }
5986 mutex_unlock(&rvu->rsrc_lock);
5987 return 0;
5988 }
5989
nix_aq_context_read(struct rvu * rvu,struct nix_hw * nix_hw,struct nix_cn10k_aq_enq_req * aq_req,struct nix_cn10k_aq_enq_rsp * aq_rsp,u16 pcifunc,u8 ctype,u32 qidx)5990 int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw,
5991 struct nix_cn10k_aq_enq_req *aq_req,
5992 struct nix_cn10k_aq_enq_rsp *aq_rsp,
5993 u16 pcifunc, u8 ctype, u32 qidx)
5994 {
5995 memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
5996 aq_req->hdr.pcifunc = pcifunc;
5997 aq_req->ctype = ctype;
5998 aq_req->op = NIX_AQ_INSTOP_READ;
5999 aq_req->qidx = qidx;
6000
6001 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
6002 (struct nix_aq_enq_req *)aq_req,
6003 (struct nix_aq_enq_rsp *)aq_rsp);
6004 }
6005
nix_ipolicer_map_leaf_midprofs(struct rvu * rvu,struct nix_hw * nix_hw,struct nix_cn10k_aq_enq_req * aq_req,struct nix_cn10k_aq_enq_rsp * aq_rsp,u32 leaf_prof,u16 mid_prof)6006 static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu,
6007 struct nix_hw *nix_hw,
6008 struct nix_cn10k_aq_enq_req *aq_req,
6009 struct nix_cn10k_aq_enq_rsp *aq_rsp,
6010 u32 leaf_prof, u16 mid_prof)
6011 {
6012 memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
6013 aq_req->hdr.pcifunc = 0x00;
6014 aq_req->ctype = NIX_AQ_CTYPE_BANDPROF;
6015 aq_req->op = NIX_AQ_INSTOP_WRITE;
6016 aq_req->qidx = leaf_prof;
6017
6018 aq_req->prof.band_prof_id = mid_prof;
6019 aq_req->prof_mask.band_prof_id = GENMASK(6, 0);
6020 aq_req->prof.hl_en = 1;
6021 aq_req->prof_mask.hl_en = 1;
6022
6023 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
6024 (struct nix_aq_enq_req *)aq_req,
6025 (struct nix_aq_enq_rsp *)aq_rsp);
6026 }
6027
rvu_nix_setup_ratelimit_aggr(struct rvu * rvu,u16 pcifunc,u16 rq_idx,u16 match_id)6028 int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc,
6029 u16 rq_idx, u16 match_id)
6030 {
6031 int leaf_prof, mid_prof, leaf_match;
6032 struct nix_cn10k_aq_enq_req aq_req;
6033 struct nix_cn10k_aq_enq_rsp aq_rsp;
6034 struct nix_ipolicer *ipolicer;
6035 struct nix_hw *nix_hw;
6036 int blkaddr, idx, rc;
6037
6038 if (!rvu->hw->cap.ipolicer)
6039 return 0;
6040
6041 rc = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
6042 if (rc)
6043 return rc;
6044
6045 /* Fetch the RQ's context to see if policing is enabled */
6046 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, pcifunc,
6047 NIX_AQ_CTYPE_RQ, rq_idx);
6048 if (rc) {
6049 dev_err(rvu->dev,
6050 "%s: Failed to fetch RQ%d context of PFFUNC 0x%x\n",
6051 __func__, rq_idx, pcifunc);
6052 return rc;
6053 }
6054
6055 if (!aq_rsp.rq.policer_ena)
6056 return 0;
6057
6058 /* Get the bandwidth profile ID mapped to this RQ */
6059 leaf_prof = aq_rsp.rq.band_prof_id;
6060
6061 ipolicer = &nix_hw->ipolicer[BAND_PROF_LEAF_LAYER];
6062 ipolicer->match_id[leaf_prof] = match_id;
6063
6064 /* Check if any other leaf profile is marked with same match_id */
6065 for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
6066 if (idx == leaf_prof)
6067 continue;
6068 if (ipolicer->match_id[idx] != match_id)
6069 continue;
6070
6071 leaf_match = idx;
6072 break;
6073 }
6074
6075 if (idx == ipolicer->band_prof.max)
6076 return 0;
6077
6078 /* Fetch the matching profile's context to check if it's already
6079 * mapped to a mid level profile.
6080 */
6081 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
6082 NIX_AQ_CTYPE_BANDPROF, leaf_match);
6083 if (rc) {
6084 dev_err(rvu->dev,
6085 "%s: Failed to fetch context of leaf profile %d\n",
6086 __func__, leaf_match);
6087 return rc;
6088 }
6089
6090 ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
6091 if (aq_rsp.prof.hl_en) {
6092 /* Get Mid layer prof index and map leaf_prof index
6093 * also such that flows that are being steered
6094 * to different RQs and marked with same match_id
6095 * are rate limited in a aggregate fashion
6096 */
6097 mid_prof = aq_rsp.prof.band_prof_id;
6098 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
6099 &aq_req, &aq_rsp,
6100 leaf_prof, mid_prof);
6101 if (rc) {
6102 dev_err(rvu->dev,
6103 "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
6104 __func__, leaf_prof, mid_prof);
6105 goto exit;
6106 }
6107
6108 mutex_lock(&rvu->rsrc_lock);
6109 ipolicer->ref_count[mid_prof]++;
6110 mutex_unlock(&rvu->rsrc_lock);
6111 goto exit;
6112 }
6113
6114 /* Allocate a mid layer profile and
6115 * map both 'leaf_prof' and 'leaf_match' profiles to it.
6116 */
6117 mutex_lock(&rvu->rsrc_lock);
6118 mid_prof = rvu_alloc_rsrc(&ipolicer->band_prof);
6119 if (mid_prof < 0) {
6120 dev_err(rvu->dev,
6121 "%s: Unable to allocate mid layer profile\n", __func__);
6122 mutex_unlock(&rvu->rsrc_lock);
6123 goto exit;
6124 }
6125 mutex_unlock(&rvu->rsrc_lock);
6126 ipolicer->pfvf_map[mid_prof] = 0x00;
6127 ipolicer->ref_count[mid_prof] = 0;
6128
6129 /* Initialize mid layer profile same as 'leaf_prof' */
6130 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
6131 NIX_AQ_CTYPE_BANDPROF, leaf_prof);
6132 if (rc) {
6133 dev_err(rvu->dev,
6134 "%s: Failed to fetch context of leaf profile %d\n",
6135 __func__, leaf_prof);
6136 goto exit;
6137 }
6138
6139 memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
6140 aq_req.hdr.pcifunc = 0x00;
6141 aq_req.qidx = (mid_prof & 0x3FFF) | (BAND_PROF_MID_LAYER << 14);
6142 aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
6143 aq_req.op = NIX_AQ_INSTOP_WRITE;
6144 memcpy(&aq_req.prof, &aq_rsp.prof, sizeof(struct nix_bandprof_s));
6145 memset((char *)&aq_req.prof_mask, 0xff, sizeof(struct nix_bandprof_s));
6146 /* Clear higher layer enable bit in the mid profile, just in case */
6147 aq_req.prof.hl_en = 0;
6148 aq_req.prof_mask.hl_en = 1;
6149
6150 rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
6151 (struct nix_aq_enq_req *)&aq_req, NULL);
6152 if (rc) {
6153 dev_err(rvu->dev,
6154 "%s: Failed to INIT context of mid layer profile %d\n",
6155 __func__, mid_prof);
6156 goto exit;
6157 }
6158
6159 /* Map both leaf profiles to this mid layer profile */
6160 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
6161 &aq_req, &aq_rsp,
6162 leaf_prof, mid_prof);
6163 if (rc) {
6164 dev_err(rvu->dev,
6165 "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
6166 __func__, leaf_prof, mid_prof);
6167 goto exit;
6168 }
6169
6170 mutex_lock(&rvu->rsrc_lock);
6171 ipolicer->ref_count[mid_prof]++;
6172 mutex_unlock(&rvu->rsrc_lock);
6173
6174 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
6175 &aq_req, &aq_rsp,
6176 leaf_match, mid_prof);
6177 if (rc) {
6178 dev_err(rvu->dev,
6179 "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
6180 __func__, leaf_match, mid_prof);
6181 ipolicer->ref_count[mid_prof]--;
6182 goto exit;
6183 }
6184
6185 mutex_lock(&rvu->rsrc_lock);
6186 ipolicer->ref_count[mid_prof]++;
6187 mutex_unlock(&rvu->rsrc_lock);
6188
6189 exit:
6190 return rc;
6191 }
6192
6193 /* Called with mutex rsrc_lock */
nix_clear_ratelimit_aggr(struct rvu * rvu,struct nix_hw * nix_hw,u32 leaf_prof)6194 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
6195 u32 leaf_prof)
6196 {
6197 struct nix_cn10k_aq_enq_req aq_req;
6198 struct nix_cn10k_aq_enq_rsp aq_rsp;
6199 struct nix_ipolicer *ipolicer;
6200 u16 mid_prof;
6201 int rc;
6202
6203 mutex_unlock(&rvu->rsrc_lock);
6204
6205 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
6206 NIX_AQ_CTYPE_BANDPROF, leaf_prof);
6207
6208 mutex_lock(&rvu->rsrc_lock);
6209 if (rc) {
6210 dev_err(rvu->dev,
6211 "%s: Failed to fetch context of leaf profile %d\n",
6212 __func__, leaf_prof);
6213 return;
6214 }
6215
6216 if (!aq_rsp.prof.hl_en)
6217 return;
6218
6219 mid_prof = aq_rsp.prof.band_prof_id;
6220 ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
6221 ipolicer->ref_count[mid_prof]--;
6222 /* If ref_count is zero, free mid layer profile */
6223 if (!ipolicer->ref_count[mid_prof]) {
6224 ipolicer->pfvf_map[mid_prof] = 0x00;
6225 rvu_free_rsrc(&ipolicer->band_prof, mid_prof);
6226 }
6227 }
6228
rvu_mbox_handler_nix_bandprof_get_hwinfo(struct rvu * rvu,struct msg_req * req,struct nix_bandprof_get_hwinfo_rsp * rsp)6229 int rvu_mbox_handler_nix_bandprof_get_hwinfo(struct rvu *rvu, struct msg_req *req,
6230 struct nix_bandprof_get_hwinfo_rsp *rsp)
6231 {
6232 struct nix_ipolicer *ipolicer;
6233 int blkaddr, layer, err;
6234 struct nix_hw *nix_hw;
6235 u64 tu;
6236
6237 if (!rvu->hw->cap.ipolicer)
6238 return NIX_AF_ERR_IPOLICER_NOTSUPP;
6239
6240 err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
6241 if (err)
6242 return err;
6243
6244 /* Return number of bandwidth profiles free at each layer */
6245 mutex_lock(&rvu->rsrc_lock);
6246 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
6247 if (layer == BAND_PROF_INVAL_LAYER)
6248 continue;
6249
6250 ipolicer = &nix_hw->ipolicer[layer];
6251 rsp->prof_count[layer] = rvu_rsrc_free_count(&ipolicer->band_prof);
6252 }
6253 mutex_unlock(&rvu->rsrc_lock);
6254
6255 /* Set the policer timeunit in nanosec */
6256 tu = rvu_read64(rvu, blkaddr, NIX_AF_PL_TS) & GENMASK_ULL(9, 0);
6257 rsp->policer_timeunit = (tu + 1) * 100;
6258
6259 return 0;
6260 }
6261
rvu_nix_mcast_find_grp_elem(struct nix_mcast_grp * mcast_grp,u32 mcast_grp_idx)6262 static struct nix_mcast_grp_elem *rvu_nix_mcast_find_grp_elem(struct nix_mcast_grp *mcast_grp,
6263 u32 mcast_grp_idx)
6264 {
6265 struct nix_mcast_grp_elem *iter;
6266 bool is_found = false;
6267
6268 list_for_each_entry(iter, &mcast_grp->mcast_grp_head, list) {
6269 if (iter->mcast_grp_idx == mcast_grp_idx) {
6270 is_found = true;
6271 break;
6272 }
6273 }
6274
6275 if (is_found)
6276 return iter;
6277
6278 return NULL;
6279 }
6280
rvu_nix_mcast_get_mce_index(struct rvu * rvu,u16 pcifunc,u32 mcast_grp_idx)6281 int rvu_nix_mcast_get_mce_index(struct rvu *rvu, u16 pcifunc, u32 mcast_grp_idx)
6282 {
6283 struct nix_mcast_grp_elem *elem;
6284 struct nix_mcast_grp *mcast_grp;
6285 struct nix_hw *nix_hw;
6286 int blkaddr, ret;
6287
6288 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
6289 nix_hw = get_nix_hw(rvu->hw, blkaddr);
6290 if (!nix_hw)
6291 return NIX_AF_ERR_INVALID_NIXBLK;
6292
6293 mcast_grp = &nix_hw->mcast_grp;
6294 mutex_lock(&mcast_grp->mcast_grp_lock);
6295 elem = rvu_nix_mcast_find_grp_elem(mcast_grp, mcast_grp_idx);
6296 if (!elem)
6297 ret = NIX_AF_ERR_INVALID_MCAST_GRP;
6298 else
6299 ret = elem->mce_start_index;
6300
6301 mutex_unlock(&mcast_grp->mcast_grp_lock);
6302 return ret;
6303 }
6304
rvu_nix_mcast_flr_free_entries(struct rvu * rvu,u16 pcifunc)6305 void rvu_nix_mcast_flr_free_entries(struct rvu *rvu, u16 pcifunc)
6306 {
6307 struct nix_mcast_grp_destroy_req dreq = { 0 };
6308 struct nix_mcast_grp_update_req ureq = { 0 };
6309 struct nix_mcast_grp_update_rsp ursp = { 0 };
6310 struct nix_mcast_grp_elem *elem, *tmp;
6311 struct nix_mcast_grp *mcast_grp;
6312 struct nix_hw *nix_hw;
6313 int blkaddr;
6314
6315 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
6316 nix_hw = get_nix_hw(rvu->hw, blkaddr);
6317 if (!nix_hw)
6318 return;
6319
6320 mcast_grp = &nix_hw->mcast_grp;
6321
6322 mutex_lock(&mcast_grp->mcast_grp_lock);
6323 list_for_each_entry_safe(elem, tmp, &mcast_grp->mcast_grp_head, list) {
6324 struct nix_mce_list *mce_list;
6325 struct hlist_node *tmp;
6326 struct mce *mce;
6327
6328 /* If the pcifunc which created the multicast/mirror
6329 * group received an FLR, then delete the entire group.
6330 */
6331 if (elem->pcifunc == pcifunc) {
6332 /* Delete group */
6333 dreq.hdr.pcifunc = elem->pcifunc;
6334 dreq.mcast_grp_idx = elem->mcast_grp_idx;
6335 dreq.is_af = 1;
6336 rvu_mbox_handler_nix_mcast_grp_destroy(rvu, &dreq, NULL);
6337 continue;
6338 }
6339
6340 /* Iterate the group elements and delete the element which
6341 * received the FLR.
6342 */
6343 mce_list = &elem->mcast_mce_list;
6344 hlist_for_each_entry_safe(mce, tmp, &mce_list->head, node) {
6345 if (mce->pcifunc == pcifunc) {
6346 ureq.hdr.pcifunc = pcifunc;
6347 ureq.num_mce_entry = 1;
6348 ureq.mcast_grp_idx = elem->mcast_grp_idx;
6349 ureq.op = NIX_MCAST_OP_DEL_ENTRY;
6350 ureq.pcifunc[0] = pcifunc;
6351 ureq.is_af = 1;
6352 rvu_mbox_handler_nix_mcast_grp_update(rvu, &ureq, &ursp);
6353 break;
6354 }
6355 }
6356 }
6357 mutex_unlock(&mcast_grp->mcast_grp_lock);
6358 }
6359
rvu_nix_mcast_update_mcam_entry(struct rvu * rvu,u16 pcifunc,u32 mcast_grp_idx,u16 mcam_index)6360 int rvu_nix_mcast_update_mcam_entry(struct rvu *rvu, u16 pcifunc,
6361 u32 mcast_grp_idx, u16 mcam_index)
6362 {
6363 struct nix_mcast_grp_elem *elem;
6364 struct nix_mcast_grp *mcast_grp;
6365 struct nix_hw *nix_hw;
6366 int blkaddr, ret = 0;
6367
6368 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
6369 nix_hw = get_nix_hw(rvu->hw, blkaddr);
6370 if (!nix_hw)
6371 return NIX_AF_ERR_INVALID_NIXBLK;
6372
6373 mcast_grp = &nix_hw->mcast_grp;
6374 mutex_lock(&mcast_grp->mcast_grp_lock);
6375 elem = rvu_nix_mcast_find_grp_elem(mcast_grp, mcast_grp_idx);
6376 if (!elem)
6377 ret = NIX_AF_ERR_INVALID_MCAST_GRP;
6378 else
6379 elem->mcam_index = mcam_index;
6380
6381 mutex_unlock(&mcast_grp->mcast_grp_lock);
6382 return ret;
6383 }
6384
rvu_mbox_handler_nix_mcast_grp_create(struct rvu * rvu,struct nix_mcast_grp_create_req * req,struct nix_mcast_grp_create_rsp * rsp)6385 int rvu_mbox_handler_nix_mcast_grp_create(struct rvu *rvu,
6386 struct nix_mcast_grp_create_req *req,
6387 struct nix_mcast_grp_create_rsp *rsp)
6388 {
6389 struct nix_mcast_grp_elem *elem;
6390 struct nix_mcast_grp *mcast_grp;
6391 struct nix_hw *nix_hw;
6392 int blkaddr, err;
6393
6394 err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
6395 if (err)
6396 return err;
6397
6398 mcast_grp = &nix_hw->mcast_grp;
6399 elem = kzalloc(sizeof(*elem), GFP_KERNEL);
6400 if (!elem)
6401 return -ENOMEM;
6402
6403 INIT_HLIST_HEAD(&elem->mcast_mce_list.head);
6404 elem->mcam_index = -1;
6405 elem->mce_start_index = -1;
6406 elem->pcifunc = req->hdr.pcifunc;
6407 elem->dir = req->dir;
6408 elem->mcast_grp_idx = mcast_grp->next_grp_index++;
6409
6410 mutex_lock(&mcast_grp->mcast_grp_lock);
6411 list_add_tail(&elem->list, &mcast_grp->mcast_grp_head);
6412 mcast_grp->count++;
6413 mutex_unlock(&mcast_grp->mcast_grp_lock);
6414
6415 rsp->mcast_grp_idx = elem->mcast_grp_idx;
6416 return 0;
6417 }
6418
rvu_mbox_handler_nix_mcast_grp_destroy(struct rvu * rvu,struct nix_mcast_grp_destroy_req * req,struct msg_rsp * rsp)6419 int rvu_mbox_handler_nix_mcast_grp_destroy(struct rvu *rvu,
6420 struct nix_mcast_grp_destroy_req *req,
6421 struct msg_rsp *rsp)
6422 {
6423 struct npc_delete_flow_req uninstall_req = { 0 };
6424 struct npc_delete_flow_rsp uninstall_rsp = { 0 };
6425 struct nix_mcast_grp_elem *elem;
6426 struct nix_mcast_grp *mcast_grp;
6427 int blkaddr, err, ret = 0;
6428 struct nix_mcast *mcast;
6429 struct nix_hw *nix_hw;
6430
6431 err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
6432 if (err)
6433 return err;
6434
6435 mcast_grp = &nix_hw->mcast_grp;
6436
6437 /* If AF is requesting for the deletion,
6438 * then AF is already taking the lock
6439 */
6440 if (!req->is_af)
6441 mutex_lock(&mcast_grp->mcast_grp_lock);
6442
6443 elem = rvu_nix_mcast_find_grp_elem(mcast_grp, req->mcast_grp_idx);
6444 if (!elem) {
6445 ret = NIX_AF_ERR_INVALID_MCAST_GRP;
6446 goto unlock_grp;
6447 }
6448
6449 /* If no mce entries are associated with the group
6450 * then just remove it from the global list.
6451 */
6452 if (!elem->mcast_mce_list.count)
6453 goto delete_grp;
6454
6455 /* Delete the associated mcam entry and
6456 * remove all mce entries from the group
6457 */
6458 mcast = &nix_hw->mcast;
6459 mutex_lock(&mcast->mce_lock);
6460 if (elem->mcam_index != -1) {
6461 uninstall_req.hdr.pcifunc = req->hdr.pcifunc;
6462 uninstall_req.entry = elem->mcam_index;
6463 rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &uninstall_rsp);
6464 }
6465
6466 nix_free_mce_list(mcast, elem->mcast_mce_list.count,
6467 elem->mce_start_index, elem->dir);
6468 nix_delete_mcast_mce_list(&elem->mcast_mce_list);
6469 mutex_unlock(&mcast->mce_lock);
6470
6471 delete_grp:
6472 list_del(&elem->list);
6473 kfree(elem);
6474 mcast_grp->count--;
6475
6476 unlock_grp:
6477 if (!req->is_af)
6478 mutex_unlock(&mcast_grp->mcast_grp_lock);
6479
6480 return ret;
6481 }
6482
rvu_mbox_handler_nix_mcast_grp_update(struct rvu * rvu,struct nix_mcast_grp_update_req * req,struct nix_mcast_grp_update_rsp * rsp)6483 int rvu_mbox_handler_nix_mcast_grp_update(struct rvu *rvu,
6484 struct nix_mcast_grp_update_req *req,
6485 struct nix_mcast_grp_update_rsp *rsp)
6486 {
6487 struct nix_mcast_grp_destroy_req dreq = { 0 };
6488 struct npc_mcam *mcam = &rvu->hw->mcam;
6489 struct nix_mcast_grp_elem *elem;
6490 struct nix_mcast_grp *mcast_grp;
6491 int blkaddr, err, npc_blkaddr;
6492 u16 prev_count, new_count;
6493 struct nix_mcast *mcast;
6494 struct nix_hw *nix_hw;
6495 int i, ret;
6496
6497 if (!req->num_mce_entry)
6498 return 0;
6499
6500 err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
6501 if (err)
6502 return err;
6503
6504 mcast_grp = &nix_hw->mcast_grp;
6505
6506 /* If AF is requesting for the updation,
6507 * then AF is already taking the lock
6508 */
6509 if (!req->is_af)
6510 mutex_lock(&mcast_grp->mcast_grp_lock);
6511
6512 elem = rvu_nix_mcast_find_grp_elem(mcast_grp, req->mcast_grp_idx);
6513 if (!elem) {
6514 ret = NIX_AF_ERR_INVALID_MCAST_GRP;
6515 goto unlock_grp;
6516 }
6517
6518 /* If any pcifunc matches the group's pcifunc, then we can
6519 * delete the entire group.
6520 */
6521 if (req->op == NIX_MCAST_OP_DEL_ENTRY) {
6522 for (i = 0; i < req->num_mce_entry; i++) {
6523 if (elem->pcifunc == req->pcifunc[i]) {
6524 /* Delete group */
6525 dreq.hdr.pcifunc = elem->pcifunc;
6526 dreq.mcast_grp_idx = elem->mcast_grp_idx;
6527 dreq.is_af = 1;
6528 rvu_mbox_handler_nix_mcast_grp_destroy(rvu, &dreq, NULL);
6529 ret = 0;
6530 goto unlock_grp;
6531 }
6532 }
6533 }
6534
6535 mcast = &nix_hw->mcast;
6536 mutex_lock(&mcast->mce_lock);
6537 npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
6538 if (elem->mcam_index != -1)
6539 npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, elem->mcam_index, false);
6540
6541 prev_count = elem->mcast_mce_list.count;
6542 if (req->op == NIX_MCAST_OP_ADD_ENTRY) {
6543 new_count = prev_count + req->num_mce_entry;
6544 if (prev_count)
6545 nix_free_mce_list(mcast, prev_count, elem->mce_start_index, elem->dir);
6546
6547 elem->mce_start_index = nix_alloc_mce_list(mcast, new_count, elem->dir);
6548
6549 /* It is possible not to get contiguous memory */
6550 if (elem->mce_start_index < 0) {
6551 if (elem->mcam_index != -1) {
6552 npc_enable_mcam_entry(rvu, mcam, npc_blkaddr,
6553 elem->mcam_index, true);
6554 ret = NIX_AF_ERR_NON_CONTIG_MCE_LIST;
6555 goto unlock_mce;
6556 }
6557 }
6558
6559 ret = nix_add_mce_list_entry(rvu, nix_hw, elem, req);
6560 if (ret) {
6561 nix_free_mce_list(mcast, new_count, elem->mce_start_index, elem->dir);
6562 if (prev_count)
6563 elem->mce_start_index = nix_alloc_mce_list(mcast,
6564 prev_count,
6565 elem->dir);
6566
6567 if (elem->mcam_index != -1)
6568 npc_enable_mcam_entry(rvu, mcam, npc_blkaddr,
6569 elem->mcam_index, true);
6570
6571 goto unlock_mce;
6572 }
6573 } else {
6574 if (!prev_count || prev_count < req->num_mce_entry) {
6575 if (elem->mcam_index != -1)
6576 npc_enable_mcam_entry(rvu, mcam, npc_blkaddr,
6577 elem->mcam_index, true);
6578 ret = NIX_AF_ERR_INVALID_MCAST_DEL_REQ;
6579 goto unlock_mce;
6580 }
6581
6582 nix_free_mce_list(mcast, prev_count, elem->mce_start_index, elem->dir);
6583 new_count = prev_count - req->num_mce_entry;
6584 elem->mce_start_index = nix_alloc_mce_list(mcast, new_count, elem->dir);
6585 ret = nix_del_mce_list_entry(rvu, nix_hw, elem, req);
6586 if (ret) {
6587 nix_free_mce_list(mcast, new_count, elem->mce_start_index, elem->dir);
6588 elem->mce_start_index = nix_alloc_mce_list(mcast, prev_count, elem->dir);
6589 if (elem->mcam_index != -1)
6590 npc_enable_mcam_entry(rvu, mcam,
6591 npc_blkaddr,
6592 elem->mcam_index,
6593 true);
6594
6595 goto unlock_mce;
6596 }
6597 }
6598
6599 if (elem->mcam_index == -1) {
6600 rsp->mce_start_index = elem->mce_start_index;
6601 ret = 0;
6602 goto unlock_mce;
6603 }
6604
6605 nix_mcast_update_action(rvu, elem);
6606 npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, elem->mcam_index, true);
6607 rsp->mce_start_index = elem->mce_start_index;
6608 ret = 0;
6609
6610 unlock_mce:
6611 mutex_unlock(&mcast->mce_lock);
6612
6613 unlock_grp:
6614 if (!req->is_af)
6615 mutex_unlock(&mcast_grp->mcast_grp_lock);
6616
6617 return ret;
6618 }
6619