1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3 *
4 * Copyright (C) 2018 Marvell.
5 *
6 */
7
8 #include <linux/module.h>
9 #include <linux/pci.h>
10
11 #include "rvu_struct.h"
12 #include "rvu_reg.h"
13 #include "rvu.h"
14 #include "npc.h"
15 #include "mcs.h"
16 #include "cgx.h"
17 #include "lmac_common.h"
18 #include "rvu_npc_hash.h"
19
20 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc);
21 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
22 int type, int chan_id);
23 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
24 int type, bool add);
25 static int nix_setup_ipolicers(struct rvu *rvu,
26 struct nix_hw *nix_hw, int blkaddr);
27 static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw);
28 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
29 struct nix_hw *nix_hw, u16 pcifunc);
30 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc);
31 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
32 u32 leaf_prof);
33 static const char *nix_get_ctx_name(int ctype);
34
35 enum mc_tbl_sz {
36 MC_TBL_SZ_256,
37 MC_TBL_SZ_512,
38 MC_TBL_SZ_1K,
39 MC_TBL_SZ_2K,
40 MC_TBL_SZ_4K,
41 MC_TBL_SZ_8K,
42 MC_TBL_SZ_16K,
43 MC_TBL_SZ_32K,
44 MC_TBL_SZ_64K,
45 };
46
47 enum mc_buf_cnt {
48 MC_BUF_CNT_8,
49 MC_BUF_CNT_16,
50 MC_BUF_CNT_32,
51 MC_BUF_CNT_64,
52 MC_BUF_CNT_128,
53 MC_BUF_CNT_256,
54 MC_BUF_CNT_512,
55 MC_BUF_CNT_1024,
56 MC_BUF_CNT_2048,
57 };
58
59 enum nix_makr_fmt_indexes {
60 NIX_MARK_CFG_IP_DSCP_RED,
61 NIX_MARK_CFG_IP_DSCP_YELLOW,
62 NIX_MARK_CFG_IP_DSCP_YELLOW_RED,
63 NIX_MARK_CFG_IP_ECN_RED,
64 NIX_MARK_CFG_IP_ECN_YELLOW,
65 NIX_MARK_CFG_IP_ECN_YELLOW_RED,
66 NIX_MARK_CFG_VLAN_DEI_RED,
67 NIX_MARK_CFG_VLAN_DEI_YELLOW,
68 NIX_MARK_CFG_VLAN_DEI_YELLOW_RED,
69 NIX_MARK_CFG_MAX,
70 };
71
72 /* For now considering MC resources needed for broadcast
73 * pkt replication only. i.e 256 HWVFs + 12 PFs.
74 */
75 #define MC_TBL_SIZE MC_TBL_SZ_2K
76 #define MC_BUF_CNT MC_BUF_CNT_1024
77
78 #define MC_TX_MAX 2048
79
80 struct mce {
81 struct hlist_node node;
82 u32 rq_rss_index;
83 u16 pcifunc;
84 u16 channel;
85 u8 dest_type;
86 u8 is_active;
87 u8 reserved[2];
88 };
89
rvu_get_next_nix_blkaddr(struct rvu * rvu,int blkaddr)90 int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr)
91 {
92 int i = 0;
93
94 /*If blkaddr is 0, return the first nix block address*/
95 if (blkaddr == 0)
96 return rvu->nix_blkaddr[blkaddr];
97
98 while (i + 1 < MAX_NIX_BLKS) {
99 if (rvu->nix_blkaddr[i] == blkaddr)
100 return rvu->nix_blkaddr[i + 1];
101 i++;
102 }
103
104 return 0;
105 }
106
is_nixlf_attached(struct rvu * rvu,u16 pcifunc)107 bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
108 {
109 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
110 int blkaddr;
111
112 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
113 if (!pfvf->nixlf || blkaddr < 0)
114 return false;
115 return true;
116 }
117
rvu_get_nixlf_count(struct rvu * rvu)118 int rvu_get_nixlf_count(struct rvu *rvu)
119 {
120 int blkaddr = 0, max = 0;
121 struct rvu_block *block;
122
123 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
124 while (blkaddr) {
125 block = &rvu->hw->block[blkaddr];
126 max += block->lf.max;
127 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
128 }
129 return max;
130 }
131
nix_get_nixlf(struct rvu * rvu,u16 pcifunc,int * nixlf,int * nix_blkaddr)132 int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr)
133 {
134 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
135 struct rvu_hwinfo *hw = rvu->hw;
136 int blkaddr;
137
138 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
139 if (!pfvf->nixlf || blkaddr < 0)
140 return NIX_AF_ERR_AF_LF_INVALID;
141
142 *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
143 if (*nixlf < 0)
144 return NIX_AF_ERR_AF_LF_INVALID;
145
146 if (nix_blkaddr)
147 *nix_blkaddr = blkaddr;
148
149 return 0;
150 }
151
nix_get_struct_ptrs(struct rvu * rvu,u16 pcifunc,struct nix_hw ** nix_hw,int * blkaddr)152 int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc,
153 struct nix_hw **nix_hw, int *blkaddr)
154 {
155 struct rvu_pfvf *pfvf;
156
157 pfvf = rvu_get_pfvf(rvu, pcifunc);
158 *blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
159 if (!pfvf->nixlf || *blkaddr < 0)
160 return NIX_AF_ERR_AF_LF_INVALID;
161
162 *nix_hw = get_nix_hw(rvu->hw, *blkaddr);
163 if (!*nix_hw)
164 return NIX_AF_ERR_INVALID_NIXBLK;
165 return 0;
166 }
167
nix_mce_list_init(struct nix_mce_list * list,int max)168 static void nix_mce_list_init(struct nix_mce_list *list, int max)
169 {
170 INIT_HLIST_HEAD(&list->head);
171 list->count = 0;
172 list->max = max;
173 }
174
nix_alloc_mce_list(struct nix_mcast * mcast,int count,u8 dir)175 static int nix_alloc_mce_list(struct nix_mcast *mcast, int count, u8 dir)
176 {
177 struct rsrc_bmap *mce_counter;
178 int idx;
179
180 if (!mcast)
181 return -EINVAL;
182
183 mce_counter = &mcast->mce_counter[dir];
184 if (!rvu_rsrc_check_contig(mce_counter, count))
185 return -ENOSPC;
186
187 idx = rvu_alloc_rsrc_contig(mce_counter, count);
188 return idx;
189 }
190
nix_free_mce_list(struct nix_mcast * mcast,int count,int start,u8 dir)191 static void nix_free_mce_list(struct nix_mcast *mcast, int count, int start, u8 dir)
192 {
193 struct rsrc_bmap *mce_counter;
194
195 if (!mcast)
196 return;
197
198 mce_counter = &mcast->mce_counter[dir];
199 rvu_free_rsrc_contig(mce_counter, count, start);
200 }
201
get_nix_hw(struct rvu_hwinfo * hw,int blkaddr)202 struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
203 {
204 int nix_blkaddr = 0, i = 0;
205 struct rvu *rvu = hw->rvu;
206
207 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
208 while (nix_blkaddr) {
209 if (blkaddr == nix_blkaddr && hw->nix)
210 return &hw->nix[i];
211 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
212 i++;
213 }
214 return NULL;
215 }
216
nix_get_dwrr_mtu_reg(struct rvu_hwinfo * hw,int smq_link_type)217 int nix_get_dwrr_mtu_reg(struct rvu_hwinfo *hw, int smq_link_type)
218 {
219 if (hw->cap.nix_multiple_dwrr_mtu)
220 return NIX_AF_DWRR_MTUX(smq_link_type);
221
222 if (smq_link_type == SMQ_LINK_TYPE_SDP)
223 return NIX_AF_DWRR_SDP_MTU;
224
225 /* Here it's same reg for RPM and LBK */
226 return NIX_AF_DWRR_RPM_MTU;
227 }
228
convert_dwrr_mtu_to_bytes(u8 dwrr_mtu)229 u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu)
230 {
231 dwrr_mtu &= 0x1FULL;
232
233 /* MTU used for DWRR calculation is in power of 2 up until 64K bytes.
234 * Value of 4 is reserved for MTU value of 9728 bytes.
235 * Value of 5 is reserved for MTU value of 10240 bytes.
236 */
237 switch (dwrr_mtu) {
238 case 4:
239 return 9728;
240 case 5:
241 return 10240;
242 default:
243 return BIT_ULL(dwrr_mtu);
244 }
245
246 return 0;
247 }
248
convert_bytes_to_dwrr_mtu(u32 bytes)249 u32 convert_bytes_to_dwrr_mtu(u32 bytes)
250 {
251 /* MTU used for DWRR calculation is in power of 2 up until 64K bytes.
252 * Value of 4 is reserved for MTU value of 9728 bytes.
253 * Value of 5 is reserved for MTU value of 10240 bytes.
254 */
255 if (bytes > BIT_ULL(16))
256 return 0;
257
258 switch (bytes) {
259 case 9728:
260 return 4;
261 case 10240:
262 return 5;
263 default:
264 return ilog2(bytes);
265 }
266
267 return 0;
268 }
269
nix_rx_sync(struct rvu * rvu,int blkaddr)270 static void nix_rx_sync(struct rvu *rvu, int blkaddr)
271 {
272 int err;
273
274 /* Sync all in flight RX packets to LLC/DRAM */
275 rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
276 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
277 if (err)
278 dev_err(rvu->dev, "SYNC1: NIX RX software sync failed\n");
279
280 /* SW_SYNC ensures all existing transactions are finished and pkts
281 * are written to LLC/DRAM, queues should be teared down after
282 * successful SW_SYNC. Due to a HW errata, in some rare scenarios
283 * an existing transaction might end after SW_SYNC operation. To
284 * ensure operation is fully done, do the SW_SYNC twice.
285 */
286 rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
287 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
288 if (err)
289 dev_err(rvu->dev, "SYNC2: NIX RX software sync failed\n");
290 }
291
is_valid_txschq(struct rvu * rvu,int blkaddr,int lvl,u16 pcifunc,u16 schq)292 static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
293 int lvl, u16 pcifunc, u16 schq)
294 {
295 struct rvu_hwinfo *hw = rvu->hw;
296 struct nix_txsch *txsch;
297 struct nix_hw *nix_hw;
298 u16 map_func;
299
300 nix_hw = get_nix_hw(rvu->hw, blkaddr);
301 if (!nix_hw)
302 return false;
303
304 txsch = &nix_hw->txsch[lvl];
305 /* Check out of bounds */
306 if (schq >= txsch->schq.max)
307 return false;
308
309 mutex_lock(&rvu->rsrc_lock);
310 map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
311 mutex_unlock(&rvu->rsrc_lock);
312
313 /* TLs aggegating traffic are shared across PF and VFs */
314 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
315 if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
316 return false;
317 else
318 return true;
319 }
320
321 if (map_func != pcifunc)
322 return false;
323
324 return true;
325 }
326
nix_interface_init(struct rvu * rvu,u16 pcifunc,int type,int nixlf,struct nix_lf_alloc_rsp * rsp,bool loop)327 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf,
328 struct nix_lf_alloc_rsp *rsp, bool loop)
329 {
330 struct rvu_pfvf *parent_pf, *pfvf = rvu_get_pfvf(rvu, pcifunc);
331 u16 req_chan_base, req_chan_end, req_chan_cnt;
332 struct rvu_hwinfo *hw = rvu->hw;
333 struct sdp_node_info *sdp_info;
334 int pkind, pf, vf, lbkid, vfid;
335 u8 cgx_id, lmac_id;
336 bool from_vf;
337 int err;
338
339 pf = rvu_get_pf(pcifunc);
340 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK &&
341 type != NIX_INTF_TYPE_SDP)
342 return 0;
343
344 switch (type) {
345 case NIX_INTF_TYPE_CGX:
346 pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf];
347 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
348
349 pkind = rvu_npc_get_pkind(rvu, pf);
350 if (pkind < 0) {
351 dev_err(rvu->dev,
352 "PF_Func 0x%x: Invalid pkind\n", pcifunc);
353 return -EINVAL;
354 }
355 pfvf->rx_chan_base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0);
356 pfvf->tx_chan_base = pfvf->rx_chan_base;
357 pfvf->rx_chan_cnt = 1;
358 pfvf->tx_chan_cnt = 1;
359 rsp->tx_link = cgx_id * hw->lmac_per_cgx + lmac_id;
360
361 cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
362 rvu_npc_set_pkind(rvu, pkind, pfvf);
363
364 break;
365 case NIX_INTF_TYPE_LBK:
366 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
367
368 /* If NIX1 block is present on the silicon then NIXes are
369 * assigned alternatively for lbk interfaces. NIX0 should
370 * send packets on lbk link 1 channels and NIX1 should send
371 * on lbk link 0 channels for the communication between
372 * NIX0 and NIX1.
373 */
374 lbkid = 0;
375 if (rvu->hw->lbk_links > 1)
376 lbkid = vf & 0x1 ? 0 : 1;
377
378 /* By default NIX0 is configured to send packet on lbk link 1
379 * (which corresponds to LBK1), same packet will receive on
380 * NIX1 over lbk link 0. If NIX1 sends packet on lbk link 0
381 * (which corresponds to LBK2) packet will receive on NIX0 lbk
382 * link 1.
383 * But if lbk links for NIX0 and NIX1 are negated, i.e NIX0
384 * transmits and receives on lbk link 0, whick corresponds
385 * to LBK1 block, back to back connectivity between NIX and
386 * LBK can be achieved (which is similar to 96xx)
387 *
388 * RX TX
389 * NIX0 lbk link 1 (LBK2) 1 (LBK1)
390 * NIX0 lbk link 0 (LBK0) 0 (LBK0)
391 * NIX1 lbk link 0 (LBK1) 0 (LBK2)
392 * NIX1 lbk link 1 (LBK3) 1 (LBK3)
393 */
394 if (loop)
395 lbkid = !lbkid;
396
397 /* Note that AF's VFs work in pairs and talk over consecutive
398 * loopback channels.Therefore if odd number of AF VFs are
399 * enabled then the last VF remains with no pair.
400 */
401 pfvf->rx_chan_base = rvu_nix_chan_lbk(rvu, lbkid, vf);
402 pfvf->tx_chan_base = vf & 0x1 ?
403 rvu_nix_chan_lbk(rvu, lbkid, vf - 1) :
404 rvu_nix_chan_lbk(rvu, lbkid, vf + 1);
405 pfvf->rx_chan_cnt = 1;
406 pfvf->tx_chan_cnt = 1;
407 rsp->tx_link = hw->cgx_links + lbkid;
408 pfvf->lbkid = lbkid;
409 rvu_npc_set_pkind(rvu, NPC_RX_LBK_PKIND, pfvf);
410 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
411 pfvf->rx_chan_base,
412 pfvf->rx_chan_cnt);
413
414 break;
415 case NIX_INTF_TYPE_SDP:
416 from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
417 parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
418 sdp_info = parent_pf->sdp_info;
419 if (!sdp_info) {
420 dev_err(rvu->dev, "Invalid sdp_info pointer\n");
421 return -EINVAL;
422 }
423 if (from_vf) {
424 req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn +
425 sdp_info->num_pf_rings;
426 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
427 for (vfid = 0; vfid < vf; vfid++)
428 req_chan_base += sdp_info->vf_rings[vfid];
429 req_chan_cnt = sdp_info->vf_rings[vf];
430 req_chan_end = req_chan_base + req_chan_cnt - 1;
431 if (req_chan_base < rvu_nix_chan_sdp(rvu, 0) ||
432 req_chan_end > rvu_nix_chan_sdp(rvu, 255)) {
433 dev_err(rvu->dev,
434 "PF_Func 0x%x: Invalid channel base and count\n",
435 pcifunc);
436 return -EINVAL;
437 }
438 } else {
439 req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn;
440 req_chan_cnt = sdp_info->num_pf_rings;
441 }
442
443 pfvf->rx_chan_base = req_chan_base;
444 pfvf->rx_chan_cnt = req_chan_cnt;
445 pfvf->tx_chan_base = pfvf->rx_chan_base;
446 pfvf->tx_chan_cnt = pfvf->rx_chan_cnt;
447
448 rsp->tx_link = hw->cgx_links + hw->lbk_links;
449 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
450 pfvf->rx_chan_base,
451 pfvf->rx_chan_cnt);
452 break;
453 }
454
455 /* Add a UCAST forwarding rule in MCAM with this NIXLF attached
456 * RVU PF/VF's MAC address.
457 */
458 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
459 pfvf->rx_chan_base, pfvf->mac_addr);
460
461 /* Add this PF_FUNC to bcast pkt replication list */
462 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, true);
463 if (err) {
464 dev_err(rvu->dev,
465 "Bcast list, failed to enable PF_FUNC 0x%x\n",
466 pcifunc);
467 return err;
468 }
469 /* Install MCAM rule matching Ethernet broadcast mac address */
470 rvu_npc_install_bcast_match_entry(rvu, pcifunc,
471 nixlf, pfvf->rx_chan_base);
472
473 pfvf->maxlen = NIC_HW_MIN_FRS;
474 pfvf->minlen = NIC_HW_MIN_FRS;
475
476 return 0;
477 }
478
nix_interface_deinit(struct rvu * rvu,u16 pcifunc,u8 nixlf)479 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
480 {
481 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
482 int err;
483
484 pfvf->maxlen = 0;
485 pfvf->minlen = 0;
486
487 /* Remove this PF_FUNC from bcast pkt replication list */
488 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, false);
489 if (err) {
490 dev_err(rvu->dev,
491 "Bcast list, failed to disable PF_FUNC 0x%x\n",
492 pcifunc);
493 }
494
495 /* Free and disable any MCAM entries used by this NIX LF */
496 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
497
498 /* Disable DMAC filters used */
499 rvu_cgx_disable_dmac_entries(rvu, pcifunc);
500 }
501
502 #define NIX_BPIDS_PER_LMAC 8
503 #define NIX_BPIDS_PER_CPT 1
nix_setup_bpids(struct rvu * rvu,struct nix_hw * hw,int blkaddr)504 static int nix_setup_bpids(struct rvu *rvu, struct nix_hw *hw, int blkaddr)
505 {
506 struct nix_bp *bp = &hw->bp;
507 int err, max_bpids;
508 u64 cfg;
509
510 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
511 max_bpids = FIELD_GET(NIX_CONST_MAX_BPIDS, cfg);
512
513 /* Reserve the BPIds for CGX and SDP */
514 bp->cgx_bpid_cnt = rvu->hw->cgx_links * NIX_BPIDS_PER_LMAC;
515 bp->sdp_bpid_cnt = rvu->hw->sdp_links * FIELD_GET(NIX_CONST_SDP_CHANS, cfg);
516 bp->free_pool_base = bp->cgx_bpid_cnt + bp->sdp_bpid_cnt +
517 NIX_BPIDS_PER_CPT;
518 bp->bpids.max = max_bpids - bp->free_pool_base;
519
520 err = rvu_alloc_bitmap(&bp->bpids);
521 if (err)
522 return err;
523
524 bp->fn_map = devm_kcalloc(rvu->dev, bp->bpids.max,
525 sizeof(u16), GFP_KERNEL);
526 if (!bp->fn_map)
527 return -ENOMEM;
528
529 bp->intf_map = devm_kcalloc(rvu->dev, bp->bpids.max,
530 sizeof(u8), GFP_KERNEL);
531 if (!bp->intf_map)
532 return -ENOMEM;
533
534 bp->ref_cnt = devm_kcalloc(rvu->dev, bp->bpids.max,
535 sizeof(u8), GFP_KERNEL);
536 if (!bp->ref_cnt)
537 return -ENOMEM;
538
539 return 0;
540 }
541
rvu_nix_flr_free_bpids(struct rvu * rvu,u16 pcifunc)542 void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc)
543 {
544 int blkaddr, bpid, err;
545 struct nix_hw *nix_hw;
546 struct nix_bp *bp;
547
548 if (!is_lbk_vf(rvu, pcifunc))
549 return;
550
551 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
552 if (err)
553 return;
554
555 bp = &nix_hw->bp;
556
557 mutex_lock(&rvu->rsrc_lock);
558 for (bpid = 0; bpid < bp->bpids.max; bpid++) {
559 if (bp->fn_map[bpid] == pcifunc) {
560 bp->ref_cnt[bpid]--;
561 if (bp->ref_cnt[bpid])
562 continue;
563 rvu_free_rsrc(&bp->bpids, bpid);
564 bp->fn_map[bpid] = 0;
565 }
566 }
567 mutex_unlock(&rvu->rsrc_lock);
568 }
569
rvu_mbox_handler_nix_bp_disable(struct rvu * rvu,struct nix_bp_cfg_req * req,struct msg_rsp * rsp)570 int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
571 struct nix_bp_cfg_req *req,
572 struct msg_rsp *rsp)
573 {
574 u16 pcifunc = req->hdr.pcifunc;
575 int blkaddr, pf, type, err;
576 u16 chan_base, chan, bpid;
577 struct rvu_pfvf *pfvf;
578 struct nix_hw *nix_hw;
579 struct nix_bp *bp;
580 u64 cfg;
581
582 pf = rvu_get_pf(pcifunc);
583 type = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
584 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
585 return 0;
586
587 pfvf = rvu_get_pfvf(rvu, pcifunc);
588 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
589 if (err)
590 return err;
591
592 bp = &nix_hw->bp;
593 chan_base = pfvf->rx_chan_base + req->chan_base;
594 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
595 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
596 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
597 cfg & ~BIT_ULL(16));
598
599 if (type == NIX_INTF_TYPE_LBK) {
600 bpid = cfg & GENMASK(8, 0);
601 mutex_lock(&rvu->rsrc_lock);
602 rvu_free_rsrc(&bp->bpids, bpid - bp->free_pool_base);
603 for (bpid = 0; bpid < bp->bpids.max; bpid++) {
604 if (bp->fn_map[bpid] == pcifunc) {
605 bp->fn_map[bpid] = 0;
606 bp->ref_cnt[bpid] = 0;
607 }
608 }
609 mutex_unlock(&rvu->rsrc_lock);
610 }
611 }
612 return 0;
613 }
614
rvu_nix_get_bpid(struct rvu * rvu,struct nix_bp_cfg_req * req,int type,int chan_id)615 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
616 int type, int chan_id)
617 {
618 int bpid, blkaddr, sdp_chan_base, err;
619 struct rvu_hwinfo *hw = rvu->hw;
620 struct rvu_pfvf *pfvf;
621 struct nix_hw *nix_hw;
622 u8 cgx_id, lmac_id;
623 struct nix_bp *bp;
624
625 pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
626
627 err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
628 if (err)
629 return err;
630
631 bp = &nix_hw->bp;
632
633 /* Backpressure IDs range division
634 * CGX channles are mapped to (0 - 191) BPIDs
635 * LBK channles are mapped to (192 - 255) BPIDs
636 * SDP channles are mapped to (256 - 511) BPIDs
637 *
638 * Lmac channles and bpids mapped as follows
639 * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15)
640 * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) ....
641 * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) ....
642 */
643 switch (type) {
644 case NIX_INTF_TYPE_CGX:
645 if ((req->chan_base + req->chan_cnt) > NIX_BPIDS_PER_LMAC)
646 return NIX_AF_ERR_INVALID_BPID_REQ;
647 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
648 /* Assign bpid based on cgx, lmac and chan id */
649 bpid = (cgx_id * hw->lmac_per_cgx * NIX_BPIDS_PER_LMAC) +
650 (lmac_id * NIX_BPIDS_PER_LMAC) + req->chan_base;
651
652 if (req->bpid_per_chan)
653 bpid += chan_id;
654 if (bpid > bp->cgx_bpid_cnt)
655 return NIX_AF_ERR_INVALID_BPID;
656 break;
657
658 case NIX_INTF_TYPE_LBK:
659 /* Alloc bpid from the free pool */
660 mutex_lock(&rvu->rsrc_lock);
661 bpid = rvu_alloc_rsrc(&bp->bpids);
662 if (bpid < 0) {
663 mutex_unlock(&rvu->rsrc_lock);
664 return NIX_AF_ERR_INVALID_BPID;
665 }
666 bp->fn_map[bpid] = req->hdr.pcifunc;
667 bp->ref_cnt[bpid]++;
668 bpid += bp->free_pool_base;
669 mutex_unlock(&rvu->rsrc_lock);
670 break;
671 case NIX_INTF_TYPE_SDP:
672 if ((req->chan_base + req->chan_cnt) > bp->sdp_bpid_cnt)
673 return NIX_AF_ERR_INVALID_BPID_REQ;
674
675 /* Handle usecase of 2 SDP blocks */
676 if (!hw->cap.programmable_chans)
677 sdp_chan_base = pfvf->rx_chan_base - NIX_CHAN_SDP_CH_START;
678 else
679 sdp_chan_base = pfvf->rx_chan_base - hw->sdp_chan_base;
680
681 bpid = bp->cgx_bpid_cnt + req->chan_base + sdp_chan_base;
682 if (req->bpid_per_chan)
683 bpid += chan_id;
684
685 if (bpid > (bp->cgx_bpid_cnt + bp->sdp_bpid_cnt))
686 return NIX_AF_ERR_INVALID_BPID;
687 break;
688 default:
689 return -EINVAL;
690 }
691 return bpid;
692 }
693
rvu_mbox_handler_nix_bp_enable(struct rvu * rvu,struct nix_bp_cfg_req * req,struct nix_bp_cfg_rsp * rsp)694 int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
695 struct nix_bp_cfg_req *req,
696 struct nix_bp_cfg_rsp *rsp)
697 {
698 int blkaddr, pf, type, chan_id = 0;
699 u16 pcifunc = req->hdr.pcifunc;
700 struct rvu_pfvf *pfvf;
701 u16 chan_base, chan;
702 s16 bpid, bpid_base;
703 u64 cfg;
704
705 pf = rvu_get_pf(pcifunc);
706 type = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
707 if (is_sdp_pfvf(pcifunc))
708 type = NIX_INTF_TYPE_SDP;
709
710 /* Enable backpressure only for CGX mapped PFs and LBK/SDP interface */
711 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK &&
712 type != NIX_INTF_TYPE_SDP)
713 return 0;
714
715 pfvf = rvu_get_pfvf(rvu, pcifunc);
716 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
717
718 bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id);
719 chan_base = pfvf->rx_chan_base + req->chan_base;
720 bpid = bpid_base;
721
722 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
723 if (bpid < 0) {
724 dev_warn(rvu->dev, "Fail to enable backpressure\n");
725 return -EINVAL;
726 }
727
728 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
729 cfg &= ~GENMASK_ULL(8, 0);
730 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
731 cfg | (bpid & GENMASK_ULL(8, 0)) | BIT_ULL(16));
732 chan_id++;
733 bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
734 }
735
736 for (chan = 0; chan < req->chan_cnt; chan++) {
737 /* Map channel and bpid assign to it */
738 rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 |
739 (bpid_base & 0x3FF);
740 if (req->bpid_per_chan)
741 bpid_base++;
742 }
743 rsp->chan_cnt = req->chan_cnt;
744
745 return 0;
746 }
747
nix_setup_lso_tso_l3(struct rvu * rvu,int blkaddr,u64 format,bool v4,u64 * fidx)748 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
749 u64 format, bool v4, u64 *fidx)
750 {
751 struct nix_lso_format field = {0};
752
753 /* IP's Length field */
754 field.layer = NIX_TXLAYER_OL3;
755 /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
756 field.offset = v4 ? 2 : 4;
757 field.sizem1 = 1; /* i.e 2 bytes */
758 field.alg = NIX_LSOALG_ADD_PAYLEN;
759 rvu_write64(rvu, blkaddr,
760 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
761 *(u64 *)&field);
762
763 /* No ID field in IPv6 header */
764 if (!v4)
765 return;
766
767 /* IP's ID field */
768 field.layer = NIX_TXLAYER_OL3;
769 field.offset = 4;
770 field.sizem1 = 1; /* i.e 2 bytes */
771 field.alg = NIX_LSOALG_ADD_SEGNUM;
772 rvu_write64(rvu, blkaddr,
773 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
774 *(u64 *)&field);
775 }
776
nix_setup_lso_tso_l4(struct rvu * rvu,int blkaddr,u64 format,u64 * fidx)777 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
778 u64 format, u64 *fidx)
779 {
780 struct nix_lso_format field = {0};
781
782 /* TCP's sequence number field */
783 field.layer = NIX_TXLAYER_OL4;
784 field.offset = 4;
785 field.sizem1 = 3; /* i.e 4 bytes */
786 field.alg = NIX_LSOALG_ADD_OFFSET;
787 rvu_write64(rvu, blkaddr,
788 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
789 *(u64 *)&field);
790
791 /* TCP's flags field */
792 field.layer = NIX_TXLAYER_OL4;
793 field.offset = 12;
794 field.sizem1 = 1; /* 2 bytes */
795 field.alg = NIX_LSOALG_TCP_FLAGS;
796 rvu_write64(rvu, blkaddr,
797 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
798 *(u64 *)&field);
799 }
800
nix_setup_lso(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr)801 static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
802 {
803 u64 cfg, idx, fidx = 0;
804
805 /* Get max HW supported format indices */
806 cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF;
807 nix_hw->lso.total = cfg;
808
809 /* Enable LSO */
810 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
811 /* For TSO, set first and middle segment flags to
812 * mask out PSH, RST & FIN flags in TCP packet
813 */
814 cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
815 cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
816 rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
817
818 /* Setup default static LSO formats
819 *
820 * Configure format fields for TCPv4 segmentation offload
821 */
822 idx = NIX_LSO_FORMAT_IDX_TSOV4;
823 nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
824 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
825
826 /* Set rest of the fields to NOP */
827 for (; fidx < 8; fidx++) {
828 rvu_write64(rvu, blkaddr,
829 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
830 }
831 nix_hw->lso.in_use++;
832
833 /* Configure format fields for TCPv6 segmentation offload */
834 idx = NIX_LSO_FORMAT_IDX_TSOV6;
835 fidx = 0;
836 nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
837 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
838
839 /* Set rest of the fields to NOP */
840 for (; fidx < 8; fidx++) {
841 rvu_write64(rvu, blkaddr,
842 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
843 }
844 nix_hw->lso.in_use++;
845 }
846
nix_ctx_free(struct rvu * rvu,struct rvu_pfvf * pfvf)847 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
848 {
849 kfree(pfvf->rq_bmap);
850 kfree(pfvf->sq_bmap);
851 kfree(pfvf->cq_bmap);
852 if (pfvf->rq_ctx)
853 qmem_free(rvu->dev, pfvf->rq_ctx);
854 if (pfvf->sq_ctx)
855 qmem_free(rvu->dev, pfvf->sq_ctx);
856 if (pfvf->cq_ctx)
857 qmem_free(rvu->dev, pfvf->cq_ctx);
858 if (pfvf->rss_ctx)
859 qmem_free(rvu->dev, pfvf->rss_ctx);
860 if (pfvf->nix_qints_ctx)
861 qmem_free(rvu->dev, pfvf->nix_qints_ctx);
862 if (pfvf->cq_ints_ctx)
863 qmem_free(rvu->dev, pfvf->cq_ints_ctx);
864
865 pfvf->rq_bmap = NULL;
866 pfvf->cq_bmap = NULL;
867 pfvf->sq_bmap = NULL;
868 pfvf->rq_ctx = NULL;
869 pfvf->sq_ctx = NULL;
870 pfvf->cq_ctx = NULL;
871 pfvf->rss_ctx = NULL;
872 pfvf->nix_qints_ctx = NULL;
873 pfvf->cq_ints_ctx = NULL;
874 }
875
nixlf_rss_ctx_init(struct rvu * rvu,int blkaddr,struct rvu_pfvf * pfvf,int nixlf,int rss_sz,int rss_grps,int hwctx_size,u64 way_mask,bool tag_lsb_as_adder)876 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
877 struct rvu_pfvf *pfvf, int nixlf,
878 int rss_sz, int rss_grps, int hwctx_size,
879 u64 way_mask, bool tag_lsb_as_adder)
880 {
881 int err, grp, num_indices;
882 u64 val;
883
884 /* RSS is not requested for this NIXLF */
885 if (!rss_sz)
886 return 0;
887 num_indices = rss_sz * rss_grps;
888
889 /* Alloc NIX RSS HW context memory and config the base */
890 err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
891 if (err)
892 return err;
893
894 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
895 (u64)pfvf->rss_ctx->iova);
896
897 /* Config full RSS table size, enable RSS and caching */
898 val = BIT_ULL(36) | BIT_ULL(4) | way_mask << 20 |
899 ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE);
900
901 if (tag_lsb_as_adder)
902 val |= BIT_ULL(5);
903
904 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf), val);
905 /* Config RSS group offset and sizes */
906 for (grp = 0; grp < rss_grps; grp++)
907 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
908 ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
909 return 0;
910 }
911
nix_aq_enqueue_wait(struct rvu * rvu,struct rvu_block * block,struct nix_aq_inst_s * inst)912 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
913 struct nix_aq_inst_s *inst)
914 {
915 struct admin_queue *aq = block->aq;
916 struct nix_aq_res_s *result;
917 int timeout = 1000;
918 u64 reg, head;
919 int ret;
920
921 result = (struct nix_aq_res_s *)aq->res->base;
922
923 /* Get current head pointer where to append this instruction */
924 reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
925 head = (reg >> 4) & AQ_PTR_MASK;
926
927 memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
928 (void *)inst, aq->inst->entry_sz);
929 memset(result, 0, sizeof(*result));
930 /* sync into memory */
931 wmb();
932
933 /* Ring the doorbell and wait for result */
934 rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
935 while (result->compcode == NIX_AQ_COMP_NOTDONE) {
936 cpu_relax();
937 udelay(1);
938 timeout--;
939 if (!timeout)
940 return -EBUSY;
941 }
942
943 if (result->compcode != NIX_AQ_COMP_GOOD) {
944 /* TODO: Replace this with some error code */
945 if (result->compcode == NIX_AQ_COMP_CTX_FAULT ||
946 result->compcode == NIX_AQ_COMP_LOCKERR ||
947 result->compcode == NIX_AQ_COMP_CTX_POISON) {
948 ret = rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_RX);
949 ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_TX);
950 ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_RX);
951 ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_TX);
952 if (ret)
953 dev_err(rvu->dev,
954 "%s: Not able to unlock cachelines\n", __func__);
955 }
956
957 return -EBUSY;
958 }
959
960 return 0;
961 }
962
nix_get_aq_req_smq(struct rvu * rvu,struct nix_aq_enq_req * req,u16 * smq,u16 * smq_mask)963 static void nix_get_aq_req_smq(struct rvu *rvu, struct nix_aq_enq_req *req,
964 u16 *smq, u16 *smq_mask)
965 {
966 struct nix_cn10k_aq_enq_req *aq_req;
967
968 if (!is_rvu_otx2(rvu)) {
969 aq_req = (struct nix_cn10k_aq_enq_req *)req;
970 *smq = aq_req->sq.smq;
971 *smq_mask = aq_req->sq_mask.smq;
972 } else {
973 *smq = req->sq.smq;
974 *smq_mask = req->sq_mask.smq;
975 }
976 }
977
rvu_nix_blk_aq_enq_inst(struct rvu * rvu,struct nix_hw * nix_hw,struct nix_aq_enq_req * req,struct nix_aq_enq_rsp * rsp)978 static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
979 struct nix_aq_enq_req *req,
980 struct nix_aq_enq_rsp *rsp)
981 {
982 struct rvu_hwinfo *hw = rvu->hw;
983 u16 pcifunc = req->hdr.pcifunc;
984 int nixlf, blkaddr, rc = 0;
985 struct nix_aq_inst_s inst;
986 struct rvu_block *block;
987 struct admin_queue *aq;
988 struct rvu_pfvf *pfvf;
989 u16 smq, smq_mask;
990 void *ctx, *mask;
991 bool ena;
992 u64 cfg;
993
994 blkaddr = nix_hw->blkaddr;
995 block = &hw->block[blkaddr];
996 aq = block->aq;
997 if (!aq) {
998 dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
999 return NIX_AF_ERR_AQ_ENQUEUE;
1000 }
1001
1002 pfvf = rvu_get_pfvf(rvu, pcifunc);
1003 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1004
1005 /* Skip NIXLF check for broadcast MCE entry and bandwidth profile
1006 * operations done by AF itself.
1007 */
1008 if (!((!rsp && req->ctype == NIX_AQ_CTYPE_MCE) ||
1009 (req->ctype == NIX_AQ_CTYPE_BANDPROF && !pcifunc))) {
1010 if (!pfvf->nixlf || nixlf < 0)
1011 return NIX_AF_ERR_AF_LF_INVALID;
1012 }
1013
1014 switch (req->ctype) {
1015 case NIX_AQ_CTYPE_RQ:
1016 /* Check if index exceeds max no of queues */
1017 if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
1018 rc = NIX_AF_ERR_AQ_ENQUEUE;
1019 break;
1020 case NIX_AQ_CTYPE_SQ:
1021 if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
1022 rc = NIX_AF_ERR_AQ_ENQUEUE;
1023 break;
1024 case NIX_AQ_CTYPE_CQ:
1025 if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
1026 rc = NIX_AF_ERR_AQ_ENQUEUE;
1027 break;
1028 case NIX_AQ_CTYPE_RSS:
1029 /* Check if RSS is enabled and qidx is within range */
1030 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
1031 if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
1032 (req->qidx >= (256UL << (cfg & 0xF))))
1033 rc = NIX_AF_ERR_AQ_ENQUEUE;
1034 break;
1035 case NIX_AQ_CTYPE_MCE:
1036 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
1037
1038 /* Check if index exceeds MCE list length */
1039 if (!nix_hw->mcast.mce_ctx ||
1040 (req->qidx >= (256UL << (cfg & 0xF))))
1041 rc = NIX_AF_ERR_AQ_ENQUEUE;
1042
1043 /* Adding multicast lists for requests from PF/VFs is not
1044 * yet supported, so ignore this.
1045 */
1046 if (rsp)
1047 rc = NIX_AF_ERR_AQ_ENQUEUE;
1048 break;
1049 case NIX_AQ_CTYPE_BANDPROF:
1050 if (nix_verify_bandprof((struct nix_cn10k_aq_enq_req *)req,
1051 nix_hw, pcifunc))
1052 rc = NIX_AF_ERR_INVALID_BANDPROF;
1053 break;
1054 default:
1055 rc = NIX_AF_ERR_AQ_ENQUEUE;
1056 }
1057
1058 if (rc)
1059 return rc;
1060
1061 nix_get_aq_req_smq(rvu, req, &smq, &smq_mask);
1062 /* Check if SQ pointed SMQ belongs to this PF/VF or not */
1063 if (req->ctype == NIX_AQ_CTYPE_SQ &&
1064 ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
1065 (req->op == NIX_AQ_INSTOP_WRITE &&
1066 req->sq_mask.ena && req->sq.ena && smq_mask))) {
1067 if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
1068 pcifunc, smq))
1069 return NIX_AF_ERR_AQ_ENQUEUE;
1070 }
1071
1072 memset(&inst, 0, sizeof(struct nix_aq_inst_s));
1073 inst.lf = nixlf;
1074 inst.cindex = req->qidx;
1075 inst.ctype = req->ctype;
1076 inst.op = req->op;
1077 /* Currently we are not supporting enqueuing multiple instructions,
1078 * so always choose first entry in result memory.
1079 */
1080 inst.res_addr = (u64)aq->res->iova;
1081
1082 /* Hardware uses same aq->res->base for updating result of
1083 * previous instruction hence wait here till it is done.
1084 */
1085 spin_lock(&aq->lock);
1086
1087 /* Clean result + context memory */
1088 memset(aq->res->base, 0, aq->res->entry_sz);
1089 /* Context needs to be written at RES_ADDR + 128 */
1090 ctx = aq->res->base + 128;
1091 /* Mask needs to be written at RES_ADDR + 256 */
1092 mask = aq->res->base + 256;
1093
1094 switch (req->op) {
1095 case NIX_AQ_INSTOP_WRITE:
1096 if (req->ctype == NIX_AQ_CTYPE_RQ)
1097 memcpy(mask, &req->rq_mask,
1098 sizeof(struct nix_rq_ctx_s));
1099 else if (req->ctype == NIX_AQ_CTYPE_SQ)
1100 memcpy(mask, &req->sq_mask,
1101 sizeof(struct nix_sq_ctx_s));
1102 else if (req->ctype == NIX_AQ_CTYPE_CQ)
1103 memcpy(mask, &req->cq_mask,
1104 sizeof(struct nix_cq_ctx_s));
1105 else if (req->ctype == NIX_AQ_CTYPE_RSS)
1106 memcpy(mask, &req->rss_mask,
1107 sizeof(struct nix_rsse_s));
1108 else if (req->ctype == NIX_AQ_CTYPE_MCE)
1109 memcpy(mask, &req->mce_mask,
1110 sizeof(struct nix_rx_mce_s));
1111 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
1112 memcpy(mask, &req->prof_mask,
1113 sizeof(struct nix_bandprof_s));
1114 fallthrough;
1115 case NIX_AQ_INSTOP_INIT:
1116 if (req->ctype == NIX_AQ_CTYPE_RQ)
1117 memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
1118 else if (req->ctype == NIX_AQ_CTYPE_SQ)
1119 memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
1120 else if (req->ctype == NIX_AQ_CTYPE_CQ)
1121 memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
1122 else if (req->ctype == NIX_AQ_CTYPE_RSS)
1123 memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
1124 else if (req->ctype == NIX_AQ_CTYPE_MCE)
1125 memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
1126 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
1127 memcpy(ctx, &req->prof, sizeof(struct nix_bandprof_s));
1128 break;
1129 case NIX_AQ_INSTOP_NOP:
1130 case NIX_AQ_INSTOP_READ:
1131 case NIX_AQ_INSTOP_LOCK:
1132 case NIX_AQ_INSTOP_UNLOCK:
1133 break;
1134 default:
1135 rc = NIX_AF_ERR_AQ_ENQUEUE;
1136 spin_unlock(&aq->lock);
1137 return rc;
1138 }
1139
1140 /* Submit the instruction to AQ */
1141 rc = nix_aq_enqueue_wait(rvu, block, &inst);
1142 if (rc) {
1143 spin_unlock(&aq->lock);
1144 return rc;
1145 }
1146
1147 /* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
1148 if (req->op == NIX_AQ_INSTOP_INIT) {
1149 if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
1150 __set_bit(req->qidx, pfvf->rq_bmap);
1151 if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
1152 __set_bit(req->qidx, pfvf->sq_bmap);
1153 if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
1154 __set_bit(req->qidx, pfvf->cq_bmap);
1155 }
1156
1157 if (req->op == NIX_AQ_INSTOP_WRITE) {
1158 if (req->ctype == NIX_AQ_CTYPE_RQ) {
1159 ena = (req->rq.ena & req->rq_mask.ena) |
1160 (test_bit(req->qidx, pfvf->rq_bmap) &
1161 ~req->rq_mask.ena);
1162 if (ena)
1163 __set_bit(req->qidx, pfvf->rq_bmap);
1164 else
1165 __clear_bit(req->qidx, pfvf->rq_bmap);
1166 }
1167 if (req->ctype == NIX_AQ_CTYPE_SQ) {
1168 ena = (req->rq.ena & req->sq_mask.ena) |
1169 (test_bit(req->qidx, pfvf->sq_bmap) &
1170 ~req->sq_mask.ena);
1171 if (ena)
1172 __set_bit(req->qidx, pfvf->sq_bmap);
1173 else
1174 __clear_bit(req->qidx, pfvf->sq_bmap);
1175 }
1176 if (req->ctype == NIX_AQ_CTYPE_CQ) {
1177 ena = (req->rq.ena & req->cq_mask.ena) |
1178 (test_bit(req->qidx, pfvf->cq_bmap) &
1179 ~req->cq_mask.ena);
1180 if (ena)
1181 __set_bit(req->qidx, pfvf->cq_bmap);
1182 else
1183 __clear_bit(req->qidx, pfvf->cq_bmap);
1184 }
1185 }
1186
1187 if (rsp) {
1188 /* Copy read context into mailbox */
1189 if (req->op == NIX_AQ_INSTOP_READ) {
1190 if (req->ctype == NIX_AQ_CTYPE_RQ)
1191 memcpy(&rsp->rq, ctx,
1192 sizeof(struct nix_rq_ctx_s));
1193 else if (req->ctype == NIX_AQ_CTYPE_SQ)
1194 memcpy(&rsp->sq, ctx,
1195 sizeof(struct nix_sq_ctx_s));
1196 else if (req->ctype == NIX_AQ_CTYPE_CQ)
1197 memcpy(&rsp->cq, ctx,
1198 sizeof(struct nix_cq_ctx_s));
1199 else if (req->ctype == NIX_AQ_CTYPE_RSS)
1200 memcpy(&rsp->rss, ctx,
1201 sizeof(struct nix_rsse_s));
1202 else if (req->ctype == NIX_AQ_CTYPE_MCE)
1203 memcpy(&rsp->mce, ctx,
1204 sizeof(struct nix_rx_mce_s));
1205 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
1206 memcpy(&rsp->prof, ctx,
1207 sizeof(struct nix_bandprof_s));
1208 }
1209 }
1210
1211 spin_unlock(&aq->lock);
1212 return 0;
1213 }
1214
rvu_nix_verify_aq_ctx(struct rvu * rvu,struct nix_hw * nix_hw,struct nix_aq_enq_req * req,u8 ctype)1215 static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw,
1216 struct nix_aq_enq_req *req, u8 ctype)
1217 {
1218 struct nix_cn10k_aq_enq_req aq_req;
1219 struct nix_cn10k_aq_enq_rsp aq_rsp;
1220 int rc, word;
1221
1222 if (req->ctype != NIX_AQ_CTYPE_CQ)
1223 return 0;
1224
1225 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
1226 req->hdr.pcifunc, ctype, req->qidx);
1227 if (rc) {
1228 dev_err(rvu->dev,
1229 "%s: Failed to fetch %s%d context of PFFUNC 0x%x\n",
1230 __func__, nix_get_ctx_name(ctype), req->qidx,
1231 req->hdr.pcifunc);
1232 return rc;
1233 }
1234
1235 /* Make copy of original context & mask which are required
1236 * for resubmission
1237 */
1238 memcpy(&aq_req.cq_mask, &req->cq_mask, sizeof(struct nix_cq_ctx_s));
1239 memcpy(&aq_req.cq, &req->cq, sizeof(struct nix_cq_ctx_s));
1240
1241 /* exclude fields which HW can update */
1242 aq_req.cq_mask.cq_err = 0;
1243 aq_req.cq_mask.wrptr = 0;
1244 aq_req.cq_mask.tail = 0;
1245 aq_req.cq_mask.head = 0;
1246 aq_req.cq_mask.avg_level = 0;
1247 aq_req.cq_mask.update_time = 0;
1248 aq_req.cq_mask.substream = 0;
1249
1250 /* Context mask (cq_mask) holds mask value of fields which
1251 * are changed in AQ WRITE operation.
1252 * for example cq.drop = 0xa;
1253 * cq_mask.drop = 0xff;
1254 * Below logic performs '&' between cq and cq_mask so that non
1255 * updated fields are masked out for request and response
1256 * comparison
1257 */
1258 for (word = 0; word < sizeof(struct nix_cq_ctx_s) / sizeof(u64);
1259 word++) {
1260 *(u64 *)((u8 *)&aq_rsp.cq + word * 8) &=
1261 (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8));
1262 *(u64 *)((u8 *)&aq_req.cq + word * 8) &=
1263 (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8));
1264 }
1265
1266 if (memcmp(&aq_req.cq, &aq_rsp.cq, sizeof(struct nix_cq_ctx_s)))
1267 return NIX_AF_ERR_AQ_CTX_RETRY_WRITE;
1268
1269 return 0;
1270 }
1271
rvu_nix_aq_enq_inst(struct rvu * rvu,struct nix_aq_enq_req * req,struct nix_aq_enq_rsp * rsp)1272 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
1273 struct nix_aq_enq_rsp *rsp)
1274 {
1275 struct nix_hw *nix_hw;
1276 int err, retries = 5;
1277 int blkaddr;
1278
1279 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
1280 if (blkaddr < 0)
1281 return NIX_AF_ERR_AF_LF_INVALID;
1282
1283 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1284 if (!nix_hw)
1285 return NIX_AF_ERR_INVALID_NIXBLK;
1286
1287 retry:
1288 err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp);
1289
1290 /* HW errata 'AQ Modification to CQ could be discarded on heavy traffic'
1291 * As a work around perfrom CQ context read after each AQ write. If AQ
1292 * read shows AQ write is not updated perform AQ write again.
1293 */
1294 if (!err && req->op == NIX_AQ_INSTOP_WRITE) {
1295 err = rvu_nix_verify_aq_ctx(rvu, nix_hw, req, NIX_AQ_CTYPE_CQ);
1296 if (err == NIX_AF_ERR_AQ_CTX_RETRY_WRITE) {
1297 if (retries--)
1298 goto retry;
1299 else
1300 return NIX_AF_ERR_CQ_CTX_WRITE_ERR;
1301 }
1302 }
1303
1304 return err;
1305 }
1306
nix_get_ctx_name(int ctype)1307 static const char *nix_get_ctx_name(int ctype)
1308 {
1309 switch (ctype) {
1310 case NIX_AQ_CTYPE_CQ:
1311 return "CQ";
1312 case NIX_AQ_CTYPE_SQ:
1313 return "SQ";
1314 case NIX_AQ_CTYPE_RQ:
1315 return "RQ";
1316 case NIX_AQ_CTYPE_RSS:
1317 return "RSS";
1318 }
1319 return "";
1320 }
1321
nix_lf_hwctx_disable(struct rvu * rvu,struct hwctx_disable_req * req)1322 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
1323 {
1324 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
1325 struct nix_aq_enq_req aq_req;
1326 unsigned long *bmap;
1327 int qidx, q_cnt = 0;
1328 int err = 0, rc;
1329
1330 if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx)
1331 return NIX_AF_ERR_AQ_ENQUEUE;
1332
1333 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1334 aq_req.hdr.pcifunc = req->hdr.pcifunc;
1335
1336 if (req->ctype == NIX_AQ_CTYPE_CQ) {
1337 aq_req.cq.ena = 0;
1338 aq_req.cq_mask.ena = 1;
1339 aq_req.cq.bp_ena = 0;
1340 aq_req.cq_mask.bp_ena = 1;
1341 q_cnt = pfvf->cq_ctx->qsize;
1342 bmap = pfvf->cq_bmap;
1343 }
1344 if (req->ctype == NIX_AQ_CTYPE_SQ) {
1345 aq_req.sq.ena = 0;
1346 aq_req.sq_mask.ena = 1;
1347 q_cnt = pfvf->sq_ctx->qsize;
1348 bmap = pfvf->sq_bmap;
1349 }
1350 if (req->ctype == NIX_AQ_CTYPE_RQ) {
1351 aq_req.rq.ena = 0;
1352 aq_req.rq_mask.ena = 1;
1353 q_cnt = pfvf->rq_ctx->qsize;
1354 bmap = pfvf->rq_bmap;
1355 }
1356
1357 aq_req.ctype = req->ctype;
1358 aq_req.op = NIX_AQ_INSTOP_WRITE;
1359
1360 for (qidx = 0; qidx < q_cnt; qidx++) {
1361 if (!test_bit(qidx, bmap))
1362 continue;
1363 aq_req.qidx = qidx;
1364 rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
1365 if (rc) {
1366 err = rc;
1367 dev_err(rvu->dev, "Failed to disable %s:%d context\n",
1368 nix_get_ctx_name(req->ctype), qidx);
1369 }
1370 }
1371
1372 return err;
1373 }
1374
1375 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
nix_lf_hwctx_lockdown(struct rvu * rvu,struct nix_aq_enq_req * req)1376 static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req)
1377 {
1378 struct nix_aq_enq_req lock_ctx_req;
1379 int err;
1380
1381 if (req->op != NIX_AQ_INSTOP_INIT)
1382 return 0;
1383
1384 if (req->ctype == NIX_AQ_CTYPE_MCE ||
1385 req->ctype == NIX_AQ_CTYPE_DYNO)
1386 return 0;
1387
1388 memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req));
1389 lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
1390 lock_ctx_req.ctype = req->ctype;
1391 lock_ctx_req.op = NIX_AQ_INSTOP_LOCK;
1392 lock_ctx_req.qidx = req->qidx;
1393 err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL);
1394 if (err)
1395 dev_err(rvu->dev,
1396 "PFUNC 0x%x: Failed to lock NIX %s:%d context\n",
1397 req->hdr.pcifunc,
1398 nix_get_ctx_name(req->ctype), req->qidx);
1399 return err;
1400 }
1401
rvu_mbox_handler_nix_aq_enq(struct rvu * rvu,struct nix_aq_enq_req * req,struct nix_aq_enq_rsp * rsp)1402 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
1403 struct nix_aq_enq_req *req,
1404 struct nix_aq_enq_rsp *rsp)
1405 {
1406 int err;
1407
1408 err = rvu_nix_aq_enq_inst(rvu, req, rsp);
1409 if (!err)
1410 err = nix_lf_hwctx_lockdown(rvu, req);
1411 return err;
1412 }
1413 #else
1414
rvu_mbox_handler_nix_aq_enq(struct rvu * rvu,struct nix_aq_enq_req * req,struct nix_aq_enq_rsp * rsp)1415 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
1416 struct nix_aq_enq_req *req,
1417 struct nix_aq_enq_rsp *rsp)
1418 {
1419 return rvu_nix_aq_enq_inst(rvu, req, rsp);
1420 }
1421 #endif
1422 /* CN10K mbox handler */
rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu * rvu,struct nix_cn10k_aq_enq_req * req,struct nix_cn10k_aq_enq_rsp * rsp)1423 int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu,
1424 struct nix_cn10k_aq_enq_req *req,
1425 struct nix_cn10k_aq_enq_rsp *rsp)
1426 {
1427 return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req,
1428 (struct nix_aq_enq_rsp *)rsp);
1429 }
1430
rvu_mbox_handler_nix_hwctx_disable(struct rvu * rvu,struct hwctx_disable_req * req,struct msg_rsp * rsp)1431 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
1432 struct hwctx_disable_req *req,
1433 struct msg_rsp *rsp)
1434 {
1435 return nix_lf_hwctx_disable(rvu, req);
1436 }
1437
rvu_mbox_handler_nix_lf_alloc(struct rvu * rvu,struct nix_lf_alloc_req * req,struct nix_lf_alloc_rsp * rsp)1438 int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
1439 struct nix_lf_alloc_req *req,
1440 struct nix_lf_alloc_rsp *rsp)
1441 {
1442 int nixlf, qints, hwctx_size, intf, err, rc = 0;
1443 struct rvu_hwinfo *hw = rvu->hw;
1444 u16 pcifunc = req->hdr.pcifunc;
1445 struct rvu_block *block;
1446 struct rvu_pfvf *pfvf;
1447 u64 cfg, ctx_cfg;
1448 int blkaddr;
1449
1450 if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
1451 return NIX_AF_ERR_PARAM;
1452
1453 if (req->way_mask)
1454 req->way_mask &= 0xFFFF;
1455
1456 pfvf = rvu_get_pfvf(rvu, pcifunc);
1457 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1458 if (!pfvf->nixlf || blkaddr < 0)
1459 return NIX_AF_ERR_AF_LF_INVALID;
1460
1461 block = &hw->block[blkaddr];
1462 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1463 if (nixlf < 0)
1464 return NIX_AF_ERR_AF_LF_INVALID;
1465
1466 /* Check if requested 'NIXLF <=> NPALF' mapping is valid */
1467 if (req->npa_func) {
1468 /* If default, use 'this' NIXLF's PFFUNC */
1469 if (req->npa_func == RVU_DEFAULT_PF_FUNC)
1470 req->npa_func = pcifunc;
1471 if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA))
1472 return NIX_AF_INVAL_NPA_PF_FUNC;
1473 }
1474
1475 /* Check if requested 'NIXLF <=> SSOLF' mapping is valid */
1476 if (req->sso_func) {
1477 /* If default, use 'this' NIXLF's PFFUNC */
1478 if (req->sso_func == RVU_DEFAULT_PF_FUNC)
1479 req->sso_func = pcifunc;
1480 if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO))
1481 return NIX_AF_INVAL_SSO_PF_FUNC;
1482 }
1483
1484 /* If RSS is being enabled, check if requested config is valid.
1485 * RSS table size should be power of two, otherwise
1486 * RSS_GRP::OFFSET + adder might go beyond that group or
1487 * won't be able to use entire table.
1488 */
1489 if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
1490 !is_power_of_2(req->rss_sz)))
1491 return NIX_AF_ERR_RSS_SIZE_INVALID;
1492
1493 if (req->rss_sz &&
1494 (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
1495 return NIX_AF_ERR_RSS_GRPS_INVALID;
1496
1497 /* Reset this NIX LF */
1498 err = rvu_lf_reset(rvu, block, nixlf);
1499 if (err) {
1500 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1501 block->addr - BLKADDR_NIX0, nixlf);
1502 return NIX_AF_ERR_LF_RESET;
1503 }
1504
1505 ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
1506
1507 /* Alloc NIX RQ HW context memory and config the base */
1508 hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
1509 err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
1510 if (err)
1511 goto free_mem;
1512
1513 pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
1514 if (!pfvf->rq_bmap)
1515 goto free_mem;
1516
1517 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
1518 (u64)pfvf->rq_ctx->iova);
1519
1520 /* Set caching and queue count in HW */
1521 cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20;
1522 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
1523
1524 /* Alloc NIX SQ HW context memory and config the base */
1525 hwctx_size = 1UL << (ctx_cfg & 0xF);
1526 err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
1527 if (err)
1528 goto free_mem;
1529
1530 pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
1531 if (!pfvf->sq_bmap)
1532 goto free_mem;
1533
1534 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
1535 (u64)pfvf->sq_ctx->iova);
1536
1537 cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20;
1538 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
1539
1540 /* Alloc NIX CQ HW context memory and config the base */
1541 hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
1542 err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
1543 if (err)
1544 goto free_mem;
1545
1546 pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
1547 if (!pfvf->cq_bmap)
1548 goto free_mem;
1549
1550 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
1551 (u64)pfvf->cq_ctx->iova);
1552
1553 cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20;
1554 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
1555
1556 /* Initialize receive side scaling (RSS) */
1557 hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
1558 err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz,
1559 req->rss_grps, hwctx_size, req->way_mask,
1560 !!(req->flags & NIX_LF_RSS_TAG_LSB_AS_ADDER));
1561 if (err)
1562 goto free_mem;
1563
1564 /* Alloc memory for CQINT's HW contexts */
1565 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1566 qints = (cfg >> 24) & 0xFFF;
1567 hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
1568 err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
1569 if (err)
1570 goto free_mem;
1571
1572 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
1573 (u64)pfvf->cq_ints_ctx->iova);
1574
1575 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf),
1576 BIT_ULL(36) | req->way_mask << 20);
1577
1578 /* Alloc memory for QINT's HW contexts */
1579 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1580 qints = (cfg >> 12) & 0xFFF;
1581 hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
1582 err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
1583 if (err)
1584 goto free_mem;
1585
1586 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
1587 (u64)pfvf->nix_qints_ctx->iova);
1588 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf),
1589 BIT_ULL(36) | req->way_mask << 20);
1590
1591 /* Setup VLANX TPID's.
1592 * Use VLAN1 for 802.1Q
1593 * and VLAN0 for 802.1AD.
1594 */
1595 cfg = (0x8100ULL << 16) | 0x88A8ULL;
1596 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
1597
1598 /* Enable LMTST for this NIX LF */
1599 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
1600
1601 /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */
1602 if (req->npa_func)
1603 cfg = req->npa_func;
1604 if (req->sso_func)
1605 cfg |= (u64)req->sso_func << 16;
1606
1607 cfg |= (u64)req->xqe_sz << 33;
1608 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
1609
1610 /* Config Rx pkt length, csum checks and apad enable / disable */
1611 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
1612
1613 /* Configure pkind for TX parse config */
1614 cfg = NPC_TX_DEF_PKIND;
1615 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
1616
1617 intf = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
1618 if (is_sdp_pfvf(pcifunc))
1619 intf = NIX_INTF_TYPE_SDP;
1620
1621 err = nix_interface_init(rvu, pcifunc, intf, nixlf, rsp,
1622 !!(req->flags & NIX_LF_LBK_BLK_SEL));
1623 if (err)
1624 goto free_mem;
1625
1626 /* Disable NPC entries as NIXLF's contexts are not initialized yet */
1627 rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
1628
1629 /* Configure RX VTAG Type 7 (strip) for vf vlan */
1630 rvu_write64(rvu, blkaddr,
1631 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7),
1632 VTAGSIZE_T4 | VTAG_STRIP);
1633
1634 goto exit;
1635
1636 free_mem:
1637 nix_ctx_free(rvu, pfvf);
1638 rc = -ENOMEM;
1639
1640 exit:
1641 /* Set macaddr of this PF/VF */
1642 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
1643
1644 /* set SQB size info */
1645 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
1646 rsp->sqb_size = (cfg >> 34) & 0xFFFF;
1647 rsp->rx_chan_base = pfvf->rx_chan_base;
1648 rsp->tx_chan_base = pfvf->tx_chan_base;
1649 rsp->rx_chan_cnt = pfvf->rx_chan_cnt;
1650 rsp->tx_chan_cnt = pfvf->tx_chan_cnt;
1651 rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
1652 rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
1653 /* Get HW supported stat count */
1654 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
1655 rsp->lf_rx_stats = ((cfg >> 32) & 0xFF);
1656 rsp->lf_tx_stats = ((cfg >> 24) & 0xFF);
1657 /* Get count of CQ IRQs and error IRQs supported per LF */
1658 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1659 rsp->qints = ((cfg >> 12) & 0xFFF);
1660 rsp->cints = ((cfg >> 24) & 0xFFF);
1661 rsp->cgx_links = hw->cgx_links;
1662 rsp->lbk_links = hw->lbk_links;
1663 rsp->sdp_links = hw->sdp_links;
1664
1665 return rc;
1666 }
1667
rvu_mbox_handler_nix_lf_free(struct rvu * rvu,struct nix_lf_free_req * req,struct msg_rsp * rsp)1668 int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req,
1669 struct msg_rsp *rsp)
1670 {
1671 struct rvu_hwinfo *hw = rvu->hw;
1672 u16 pcifunc = req->hdr.pcifunc;
1673 struct rvu_block *block;
1674 int blkaddr, nixlf, err;
1675 struct rvu_pfvf *pfvf;
1676
1677 pfvf = rvu_get_pfvf(rvu, pcifunc);
1678 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1679 if (!pfvf->nixlf || blkaddr < 0)
1680 return NIX_AF_ERR_AF_LF_INVALID;
1681
1682 block = &hw->block[blkaddr];
1683 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1684 if (nixlf < 0)
1685 return NIX_AF_ERR_AF_LF_INVALID;
1686
1687 if (req->flags & NIX_LF_DISABLE_FLOWS)
1688 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
1689 else
1690 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
1691
1692 /* Free any tx vtag def entries used by this NIX LF */
1693 if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG))
1694 nix_free_tx_vtag_entries(rvu, pcifunc);
1695
1696 nix_interface_deinit(rvu, pcifunc, nixlf);
1697
1698 /* Reset this NIX LF */
1699 err = rvu_lf_reset(rvu, block, nixlf);
1700 if (err) {
1701 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1702 block->addr - BLKADDR_NIX0, nixlf);
1703 return NIX_AF_ERR_LF_RESET;
1704 }
1705
1706 nix_ctx_free(rvu, pfvf);
1707
1708 return 0;
1709 }
1710
rvu_mbox_handler_nix_mark_format_cfg(struct rvu * rvu,struct nix_mark_format_cfg * req,struct nix_mark_format_cfg_rsp * rsp)1711 int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
1712 struct nix_mark_format_cfg *req,
1713 struct nix_mark_format_cfg_rsp *rsp)
1714 {
1715 u16 pcifunc = req->hdr.pcifunc;
1716 struct nix_hw *nix_hw;
1717 struct rvu_pfvf *pfvf;
1718 int blkaddr, rc;
1719 u32 cfg;
1720
1721 pfvf = rvu_get_pfvf(rvu, pcifunc);
1722 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1723 if (!pfvf->nixlf || blkaddr < 0)
1724 return NIX_AF_ERR_AF_LF_INVALID;
1725
1726 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1727 if (!nix_hw)
1728 return NIX_AF_ERR_INVALID_NIXBLK;
1729
1730 cfg = (((u32)req->offset & 0x7) << 16) |
1731 (((u32)req->y_mask & 0xF) << 12) |
1732 (((u32)req->y_val & 0xF) << 8) |
1733 (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF);
1734
1735 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg);
1736 if (rc < 0) {
1737 dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)",
1738 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
1739 return NIX_AF_ERR_MARK_CFG_FAIL;
1740 }
1741
1742 rsp->mark_format_idx = rc;
1743 return 0;
1744 }
1745
1746 /* Handle shaper update specially for few revisions */
1747 static bool
handle_txschq_shaper_update(struct rvu * rvu,int blkaddr,int nixlf,int lvl,u64 reg,u64 regval)1748 handle_txschq_shaper_update(struct rvu *rvu, int blkaddr, int nixlf,
1749 int lvl, u64 reg, u64 regval)
1750 {
1751 u64 regbase, oldval, sw_xoff = 0;
1752 u64 dbgval, md_debug0 = 0;
1753 unsigned long poll_tmo;
1754 bool rate_reg = 0;
1755 u32 schq;
1756
1757 regbase = reg & 0xFFFF;
1758 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1759
1760 /* Check for rate register */
1761 switch (lvl) {
1762 case NIX_TXSCH_LVL_TL1:
1763 md_debug0 = NIX_AF_TL1X_MD_DEBUG0(schq);
1764 sw_xoff = NIX_AF_TL1X_SW_XOFF(schq);
1765
1766 rate_reg = !!(regbase == NIX_AF_TL1X_CIR(0));
1767 break;
1768 case NIX_TXSCH_LVL_TL2:
1769 md_debug0 = NIX_AF_TL2X_MD_DEBUG0(schq);
1770 sw_xoff = NIX_AF_TL2X_SW_XOFF(schq);
1771
1772 rate_reg = (regbase == NIX_AF_TL2X_CIR(0) ||
1773 regbase == NIX_AF_TL2X_PIR(0));
1774 break;
1775 case NIX_TXSCH_LVL_TL3:
1776 md_debug0 = NIX_AF_TL3X_MD_DEBUG0(schq);
1777 sw_xoff = NIX_AF_TL3X_SW_XOFF(schq);
1778
1779 rate_reg = (regbase == NIX_AF_TL3X_CIR(0) ||
1780 regbase == NIX_AF_TL3X_PIR(0));
1781 break;
1782 case NIX_TXSCH_LVL_TL4:
1783 md_debug0 = NIX_AF_TL4X_MD_DEBUG0(schq);
1784 sw_xoff = NIX_AF_TL4X_SW_XOFF(schq);
1785
1786 rate_reg = (regbase == NIX_AF_TL4X_CIR(0) ||
1787 regbase == NIX_AF_TL4X_PIR(0));
1788 break;
1789 case NIX_TXSCH_LVL_MDQ:
1790 sw_xoff = NIX_AF_MDQX_SW_XOFF(schq);
1791 rate_reg = (regbase == NIX_AF_MDQX_CIR(0) ||
1792 regbase == NIX_AF_MDQX_PIR(0));
1793 break;
1794 }
1795
1796 if (!rate_reg)
1797 return false;
1798
1799 /* Nothing special to do when state is not toggled */
1800 oldval = rvu_read64(rvu, blkaddr, reg);
1801 if ((oldval & 0x1) == (regval & 0x1)) {
1802 rvu_write64(rvu, blkaddr, reg, regval);
1803 return true;
1804 }
1805
1806 /* PIR/CIR disable */
1807 if (!(regval & 0x1)) {
1808 rvu_write64(rvu, blkaddr, sw_xoff, 1);
1809 rvu_write64(rvu, blkaddr, reg, 0);
1810 udelay(4);
1811 rvu_write64(rvu, blkaddr, sw_xoff, 0);
1812 return true;
1813 }
1814
1815 /* PIR/CIR enable */
1816 rvu_write64(rvu, blkaddr, sw_xoff, 1);
1817 if (md_debug0) {
1818 poll_tmo = jiffies + usecs_to_jiffies(10000);
1819 /* Wait until VLD(bit32) == 1 or C_CON(bit48) == 0 */
1820 do {
1821 if (time_after(jiffies, poll_tmo)) {
1822 dev_err(rvu->dev,
1823 "NIXLF%d: TLX%u(lvl %u) CIR/PIR enable failed\n",
1824 nixlf, schq, lvl);
1825 goto exit;
1826 }
1827 usleep_range(1, 5);
1828 dbgval = rvu_read64(rvu, blkaddr, md_debug0);
1829 } while (!(dbgval & BIT_ULL(32)) && (dbgval & BIT_ULL(48)));
1830 }
1831 rvu_write64(rvu, blkaddr, reg, regval);
1832 exit:
1833 rvu_write64(rvu, blkaddr, sw_xoff, 0);
1834 return true;
1835 }
1836
nix_reset_tx_schedule(struct rvu * rvu,int blkaddr,int lvl,int schq)1837 static void nix_reset_tx_schedule(struct rvu *rvu, int blkaddr,
1838 int lvl, int schq)
1839 {
1840 u64 tlx_parent = 0, tlx_schedule = 0;
1841
1842 switch (lvl) {
1843 case NIX_TXSCH_LVL_TL2:
1844 tlx_parent = NIX_AF_TL2X_PARENT(schq);
1845 tlx_schedule = NIX_AF_TL2X_SCHEDULE(schq);
1846 break;
1847 case NIX_TXSCH_LVL_TL3:
1848 tlx_parent = NIX_AF_TL3X_PARENT(schq);
1849 tlx_schedule = NIX_AF_TL3X_SCHEDULE(schq);
1850 break;
1851 case NIX_TXSCH_LVL_TL4:
1852 tlx_parent = NIX_AF_TL4X_PARENT(schq);
1853 tlx_schedule = NIX_AF_TL4X_SCHEDULE(schq);
1854 break;
1855 case NIX_TXSCH_LVL_MDQ:
1856 /* no need to reset SMQ_CFG as HW clears this CSR
1857 * on SMQ flush
1858 */
1859 tlx_parent = NIX_AF_MDQX_PARENT(schq);
1860 tlx_schedule = NIX_AF_MDQX_SCHEDULE(schq);
1861 break;
1862 default:
1863 return;
1864 }
1865
1866 if (tlx_parent)
1867 rvu_write64(rvu, blkaddr, tlx_parent, 0x0);
1868
1869 if (tlx_schedule)
1870 rvu_write64(rvu, blkaddr, tlx_schedule, 0x0);
1871 }
1872
1873 /* Disable shaping of pkts by a scheduler queue
1874 * at a given scheduler level.
1875 */
nix_reset_tx_shaping(struct rvu * rvu,int blkaddr,int nixlf,int lvl,int schq)1876 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
1877 int nixlf, int lvl, int schq)
1878 {
1879 struct rvu_hwinfo *hw = rvu->hw;
1880 u64 cir_reg = 0, pir_reg = 0;
1881 u64 cfg;
1882
1883 switch (lvl) {
1884 case NIX_TXSCH_LVL_TL1:
1885 cir_reg = NIX_AF_TL1X_CIR(schq);
1886 pir_reg = 0; /* PIR not available at TL1 */
1887 break;
1888 case NIX_TXSCH_LVL_TL2:
1889 cir_reg = NIX_AF_TL2X_CIR(schq);
1890 pir_reg = NIX_AF_TL2X_PIR(schq);
1891 break;
1892 case NIX_TXSCH_LVL_TL3:
1893 cir_reg = NIX_AF_TL3X_CIR(schq);
1894 pir_reg = NIX_AF_TL3X_PIR(schq);
1895 break;
1896 case NIX_TXSCH_LVL_TL4:
1897 cir_reg = NIX_AF_TL4X_CIR(schq);
1898 pir_reg = NIX_AF_TL4X_PIR(schq);
1899 break;
1900 case NIX_TXSCH_LVL_MDQ:
1901 cir_reg = NIX_AF_MDQX_CIR(schq);
1902 pir_reg = NIX_AF_MDQX_PIR(schq);
1903 break;
1904 }
1905
1906 /* Shaper state toggle needs wait/poll */
1907 if (hw->cap.nix_shaper_toggle_wait) {
1908 if (cir_reg)
1909 handle_txschq_shaper_update(rvu, blkaddr, nixlf,
1910 lvl, cir_reg, 0);
1911 if (pir_reg)
1912 handle_txschq_shaper_update(rvu, blkaddr, nixlf,
1913 lvl, pir_reg, 0);
1914 return;
1915 }
1916
1917 if (!cir_reg)
1918 return;
1919 cfg = rvu_read64(rvu, blkaddr, cir_reg);
1920 rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0));
1921
1922 if (!pir_reg)
1923 return;
1924 cfg = rvu_read64(rvu, blkaddr, pir_reg);
1925 rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0));
1926 }
1927
nix_reset_tx_linkcfg(struct rvu * rvu,int blkaddr,int lvl,int schq)1928 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
1929 int lvl, int schq)
1930 {
1931 struct rvu_hwinfo *hw = rvu->hw;
1932 int link_level;
1933 int link;
1934
1935 if (lvl >= hw->cap.nix_tx_aggr_lvl)
1936 return;
1937
1938 /* Reset TL4's SDP link config */
1939 if (lvl == NIX_TXSCH_LVL_TL4)
1940 rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
1941
1942 link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
1943 NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
1944 if (lvl != link_level)
1945 return;
1946
1947 /* Reset TL2's CGX or LBK link config */
1948 for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
1949 rvu_write64(rvu, blkaddr,
1950 NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
1951 }
1952
nix_clear_tx_xoff(struct rvu * rvu,int blkaddr,int lvl,int schq)1953 static void nix_clear_tx_xoff(struct rvu *rvu, int blkaddr,
1954 int lvl, int schq)
1955 {
1956 struct rvu_hwinfo *hw = rvu->hw;
1957 u64 reg;
1958
1959 /* Skip this if shaping is not supported */
1960 if (!hw->cap.nix_shaping)
1961 return;
1962
1963 /* Clear level specific SW_XOFF */
1964 switch (lvl) {
1965 case NIX_TXSCH_LVL_TL1:
1966 reg = NIX_AF_TL1X_SW_XOFF(schq);
1967 break;
1968 case NIX_TXSCH_LVL_TL2:
1969 reg = NIX_AF_TL2X_SW_XOFF(schq);
1970 break;
1971 case NIX_TXSCH_LVL_TL3:
1972 reg = NIX_AF_TL3X_SW_XOFF(schq);
1973 break;
1974 case NIX_TXSCH_LVL_TL4:
1975 reg = NIX_AF_TL4X_SW_XOFF(schq);
1976 break;
1977 case NIX_TXSCH_LVL_MDQ:
1978 reg = NIX_AF_MDQX_SW_XOFF(schq);
1979 break;
1980 default:
1981 return;
1982 }
1983
1984 rvu_write64(rvu, blkaddr, reg, 0x0);
1985 }
1986
nix_get_tx_link(struct rvu * rvu,u16 pcifunc)1987 static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
1988 {
1989 struct rvu_hwinfo *hw = rvu->hw;
1990 int pf = rvu_get_pf(pcifunc);
1991 u8 cgx_id = 0, lmac_id = 0;
1992
1993 if (is_lbk_vf(rvu, pcifunc)) {/* LBK links */
1994 return hw->cgx_links;
1995 } else if (is_pf_cgxmapped(rvu, pf)) {
1996 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1997 return (cgx_id * hw->lmac_per_cgx) + lmac_id;
1998 }
1999
2000 /* SDP link */
2001 return hw->cgx_links + hw->lbk_links;
2002 }
2003
nix_get_txschq_range(struct rvu * rvu,u16 pcifunc,int link,int * start,int * end)2004 static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc,
2005 int link, int *start, int *end)
2006 {
2007 struct rvu_hwinfo *hw = rvu->hw;
2008 int pf = rvu_get_pf(pcifunc);
2009
2010 if (is_lbk_vf(rvu, pcifunc)) { /* LBK links */
2011 *start = hw->cap.nix_txsch_per_cgx_lmac * link;
2012 *end = *start + hw->cap.nix_txsch_per_lbk_lmac;
2013 } else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */
2014 *start = hw->cap.nix_txsch_per_cgx_lmac * link;
2015 *end = *start + hw->cap.nix_txsch_per_cgx_lmac;
2016 } else { /* SDP link */
2017 *start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) +
2018 (hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links);
2019 *end = *start + hw->cap.nix_txsch_per_sdp_lmac;
2020 }
2021 }
2022
nix_check_txschq_alloc_req(struct rvu * rvu,int lvl,u16 pcifunc,struct nix_hw * nix_hw,struct nix_txsch_alloc_req * req)2023 static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc,
2024 struct nix_hw *nix_hw,
2025 struct nix_txsch_alloc_req *req)
2026 {
2027 struct rvu_hwinfo *hw = rvu->hw;
2028 int schq, req_schq, free_cnt;
2029 struct nix_txsch *txsch;
2030 int link, start, end;
2031
2032 txsch = &nix_hw->txsch[lvl];
2033 req_schq = req->schq_contig[lvl] + req->schq[lvl];
2034
2035 if (!req_schq)
2036 return 0;
2037
2038 link = nix_get_tx_link(rvu, pcifunc);
2039
2040 /* For traffic aggregating scheduler level, one queue is enough */
2041 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
2042 if (req_schq != 1)
2043 return NIX_AF_ERR_TLX_ALLOC_FAIL;
2044 return 0;
2045 }
2046
2047 /* Get free SCHQ count and check if request can be accomodated */
2048 if (hw->cap.nix_fixed_txschq_mapping) {
2049 nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
2050 schq = start + (pcifunc & RVU_PFVF_FUNC_MASK);
2051 if (end <= txsch->schq.max && schq < end &&
2052 !test_bit(schq, txsch->schq.bmap))
2053 free_cnt = 1;
2054 else
2055 free_cnt = 0;
2056 } else {
2057 free_cnt = rvu_rsrc_free_count(&txsch->schq);
2058 }
2059
2060 if (free_cnt < req_schq || req->schq[lvl] > MAX_TXSCHQ_PER_FUNC ||
2061 req->schq_contig[lvl] > MAX_TXSCHQ_PER_FUNC)
2062 return NIX_AF_ERR_TLX_ALLOC_FAIL;
2063
2064 /* If contiguous queues are needed, check for availability */
2065 if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] &&
2066 !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
2067 return NIX_AF_ERR_TLX_ALLOC_FAIL;
2068
2069 return 0;
2070 }
2071
nix_txsch_alloc(struct rvu * rvu,struct nix_txsch * txsch,struct nix_txsch_alloc_rsp * rsp,int lvl,int start,int end)2072 static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch,
2073 struct nix_txsch_alloc_rsp *rsp,
2074 int lvl, int start, int end)
2075 {
2076 struct rvu_hwinfo *hw = rvu->hw;
2077 u16 pcifunc = rsp->hdr.pcifunc;
2078 int idx, schq;
2079
2080 /* For traffic aggregating levels, queue alloc is based
2081 * on transmit link to which PF_FUNC is mapped to.
2082 */
2083 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
2084 /* A single TL queue is allocated */
2085 if (rsp->schq_contig[lvl]) {
2086 rsp->schq_contig[lvl] = 1;
2087 rsp->schq_contig_list[lvl][0] = start;
2088 }
2089
2090 /* Both contig and non-contig reqs doesn't make sense here */
2091 if (rsp->schq_contig[lvl])
2092 rsp->schq[lvl] = 0;
2093
2094 if (rsp->schq[lvl]) {
2095 rsp->schq[lvl] = 1;
2096 rsp->schq_list[lvl][0] = start;
2097 }
2098 return;
2099 }
2100
2101 /* Adjust the queue request count if HW supports
2102 * only one queue per level configuration.
2103 */
2104 if (hw->cap.nix_fixed_txschq_mapping) {
2105 idx = pcifunc & RVU_PFVF_FUNC_MASK;
2106 schq = start + idx;
2107 if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) {
2108 rsp->schq_contig[lvl] = 0;
2109 rsp->schq[lvl] = 0;
2110 return;
2111 }
2112
2113 if (rsp->schq_contig[lvl]) {
2114 rsp->schq_contig[lvl] = 1;
2115 set_bit(schq, txsch->schq.bmap);
2116 rsp->schq_contig_list[lvl][0] = schq;
2117 rsp->schq[lvl] = 0;
2118 } else if (rsp->schq[lvl]) {
2119 rsp->schq[lvl] = 1;
2120 set_bit(schq, txsch->schq.bmap);
2121 rsp->schq_list[lvl][0] = schq;
2122 }
2123 return;
2124 }
2125
2126 /* Allocate contiguous queue indices requesty first */
2127 if (rsp->schq_contig[lvl]) {
2128 schq = bitmap_find_next_zero_area(txsch->schq.bmap,
2129 txsch->schq.max, start,
2130 rsp->schq_contig[lvl], 0);
2131 if (schq >= end)
2132 rsp->schq_contig[lvl] = 0;
2133 for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) {
2134 set_bit(schq, txsch->schq.bmap);
2135 rsp->schq_contig_list[lvl][idx] = schq;
2136 schq++;
2137 }
2138 }
2139
2140 /* Allocate non-contiguous queue indices */
2141 if (rsp->schq[lvl]) {
2142 idx = 0;
2143 for (schq = start; schq < end; schq++) {
2144 if (!test_bit(schq, txsch->schq.bmap)) {
2145 set_bit(schq, txsch->schq.bmap);
2146 rsp->schq_list[lvl][idx++] = schq;
2147 }
2148 if (idx == rsp->schq[lvl])
2149 break;
2150 }
2151 /* Update how many were allocated */
2152 rsp->schq[lvl] = idx;
2153 }
2154 }
2155
rvu_mbox_handler_nix_txsch_alloc(struct rvu * rvu,struct nix_txsch_alloc_req * req,struct nix_txsch_alloc_rsp * rsp)2156 int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
2157 struct nix_txsch_alloc_req *req,
2158 struct nix_txsch_alloc_rsp *rsp)
2159 {
2160 struct rvu_hwinfo *hw = rvu->hw;
2161 u16 pcifunc = req->hdr.pcifunc;
2162 int link, blkaddr, rc = 0;
2163 int lvl, idx, start, end;
2164 struct nix_txsch *txsch;
2165 struct nix_hw *nix_hw;
2166 u32 *pfvf_map;
2167 int nixlf;
2168 u16 schq;
2169
2170 rc = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2171 if (rc)
2172 return rc;
2173
2174 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2175 if (!nix_hw)
2176 return NIX_AF_ERR_INVALID_NIXBLK;
2177
2178 mutex_lock(&rvu->rsrc_lock);
2179
2180 /* Check if request is valid as per HW capabilities
2181 * and can be accomodated.
2182 */
2183 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2184 rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req);
2185 if (rc)
2186 goto err;
2187 }
2188
2189 /* Allocate requested Tx scheduler queues */
2190 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2191 txsch = &nix_hw->txsch[lvl];
2192 pfvf_map = txsch->pfvf_map;
2193
2194 if (!req->schq[lvl] && !req->schq_contig[lvl])
2195 continue;
2196
2197 rsp->schq[lvl] = req->schq[lvl];
2198 rsp->schq_contig[lvl] = req->schq_contig[lvl];
2199
2200 link = nix_get_tx_link(rvu, pcifunc);
2201
2202 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
2203 start = link;
2204 end = link;
2205 } else if (hw->cap.nix_fixed_txschq_mapping) {
2206 nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
2207 } else {
2208 start = 0;
2209 end = txsch->schq.max;
2210 }
2211
2212 nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end);
2213
2214 /* Reset queue config */
2215 for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
2216 schq = rsp->schq_contig_list[lvl][idx];
2217 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
2218 NIX_TXSCHQ_CFG_DONE))
2219 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
2220 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
2221 nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
2222 nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
2223 }
2224
2225 for (idx = 0; idx < req->schq[lvl]; idx++) {
2226 schq = rsp->schq_list[lvl][idx];
2227 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
2228 NIX_TXSCHQ_CFG_DONE))
2229 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
2230 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
2231 nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
2232 nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
2233 }
2234 }
2235
2236 rsp->aggr_level = hw->cap.nix_tx_aggr_lvl;
2237 rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
2238 rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr,
2239 NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
2240 NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
2241 goto exit;
2242 err:
2243 rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
2244 exit:
2245 mutex_unlock(&rvu->rsrc_lock);
2246 return rc;
2247 }
2248
nix_smq_flush_fill_ctx(struct rvu * rvu,int blkaddr,int smq,struct nix_smq_flush_ctx * smq_flush_ctx)2249 static void nix_smq_flush_fill_ctx(struct rvu *rvu, int blkaddr, int smq,
2250 struct nix_smq_flush_ctx *smq_flush_ctx)
2251 {
2252 struct nix_smq_tree_ctx *smq_tree_ctx;
2253 u64 parent_off, regval;
2254 u16 schq;
2255 int lvl;
2256
2257 smq_flush_ctx->smq = smq;
2258
2259 schq = smq;
2260 for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) {
2261 smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl];
2262 smq_tree_ctx->schq = schq;
2263 if (lvl == NIX_TXSCH_LVL_TL1) {
2264 smq_tree_ctx->cir_off = NIX_AF_TL1X_CIR(schq);
2265 smq_tree_ctx->pir_off = 0;
2266 smq_tree_ctx->pir_val = 0;
2267 parent_off = 0;
2268 } else if (lvl == NIX_TXSCH_LVL_TL2) {
2269 smq_tree_ctx->cir_off = NIX_AF_TL2X_CIR(schq);
2270 smq_tree_ctx->pir_off = NIX_AF_TL2X_PIR(schq);
2271 parent_off = NIX_AF_TL2X_PARENT(schq);
2272 } else if (lvl == NIX_TXSCH_LVL_TL3) {
2273 smq_tree_ctx->cir_off = NIX_AF_TL3X_CIR(schq);
2274 smq_tree_ctx->pir_off = NIX_AF_TL3X_PIR(schq);
2275 parent_off = NIX_AF_TL3X_PARENT(schq);
2276 } else if (lvl == NIX_TXSCH_LVL_TL4) {
2277 smq_tree_ctx->cir_off = NIX_AF_TL4X_CIR(schq);
2278 smq_tree_ctx->pir_off = NIX_AF_TL4X_PIR(schq);
2279 parent_off = NIX_AF_TL4X_PARENT(schq);
2280 } else if (lvl == NIX_TXSCH_LVL_MDQ) {
2281 smq_tree_ctx->cir_off = NIX_AF_MDQX_CIR(schq);
2282 smq_tree_ctx->pir_off = NIX_AF_MDQX_PIR(schq);
2283 parent_off = NIX_AF_MDQX_PARENT(schq);
2284 }
2285 /* save cir/pir register values */
2286 smq_tree_ctx->cir_val = rvu_read64(rvu, blkaddr, smq_tree_ctx->cir_off);
2287 if (smq_tree_ctx->pir_off)
2288 smq_tree_ctx->pir_val = rvu_read64(rvu, blkaddr, smq_tree_ctx->pir_off);
2289
2290 /* get parent txsch node */
2291 if (parent_off) {
2292 regval = rvu_read64(rvu, blkaddr, parent_off);
2293 schq = (regval >> 16) & 0x1FF;
2294 }
2295 }
2296 }
2297
nix_smq_flush_enadis_xoff(struct rvu * rvu,int blkaddr,struct nix_smq_flush_ctx * smq_flush_ctx,bool enable)2298 static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr,
2299 struct nix_smq_flush_ctx *smq_flush_ctx, bool enable)
2300 {
2301 struct nix_txsch *txsch;
2302 struct nix_hw *nix_hw;
2303 int tl2, tl2_schq;
2304 u64 regoff;
2305
2306 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2307 if (!nix_hw)
2308 return;
2309
2310 /* loop through all TL2s with matching PF_FUNC */
2311 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
2312 tl2_schq = smq_flush_ctx->smq_tree_ctx[NIX_TXSCH_LVL_TL2].schq;
2313 for (tl2 = 0; tl2 < txsch->schq.max; tl2++) {
2314 /* skip the smq(flush) TL2 */
2315 if (tl2 == tl2_schq)
2316 continue;
2317 /* skip unused TL2s */
2318 if (TXSCH_MAP_FLAGS(txsch->pfvf_map[tl2]) & NIX_TXSCHQ_FREE)
2319 continue;
2320 /* skip if PF_FUNC doesn't match */
2321 if ((TXSCH_MAP_FUNC(txsch->pfvf_map[tl2]) & ~RVU_PFVF_FUNC_MASK) !=
2322 (TXSCH_MAP_FUNC(txsch->pfvf_map[tl2_schq] &
2323 ~RVU_PFVF_FUNC_MASK)))
2324 continue;
2325 /* enable/disable XOFF */
2326 regoff = NIX_AF_TL2X_SW_XOFF(tl2);
2327 if (enable)
2328 rvu_write64(rvu, blkaddr, regoff, 0x1);
2329 else
2330 rvu_write64(rvu, blkaddr, regoff, 0x0);
2331 }
2332 }
2333
nix_smq_flush_enadis_rate(struct rvu * rvu,int blkaddr,struct nix_smq_flush_ctx * smq_flush_ctx,bool enable)2334 static void nix_smq_flush_enadis_rate(struct rvu *rvu, int blkaddr,
2335 struct nix_smq_flush_ctx *smq_flush_ctx, bool enable)
2336 {
2337 u64 cir_off, pir_off, cir_val, pir_val;
2338 struct nix_smq_tree_ctx *smq_tree_ctx;
2339 int lvl;
2340
2341 for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) {
2342 smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl];
2343 cir_off = smq_tree_ctx->cir_off;
2344 cir_val = smq_tree_ctx->cir_val;
2345 pir_off = smq_tree_ctx->pir_off;
2346 pir_val = smq_tree_ctx->pir_val;
2347
2348 if (enable) {
2349 rvu_write64(rvu, blkaddr, cir_off, cir_val);
2350 if (lvl != NIX_TXSCH_LVL_TL1)
2351 rvu_write64(rvu, blkaddr, pir_off, pir_val);
2352 } else {
2353 rvu_write64(rvu, blkaddr, cir_off, 0x0);
2354 if (lvl != NIX_TXSCH_LVL_TL1)
2355 rvu_write64(rvu, blkaddr, pir_off, 0x0);
2356 }
2357 }
2358 }
2359
nix_smq_flush(struct rvu * rvu,int blkaddr,int smq,u16 pcifunc,int nixlf)2360 static int nix_smq_flush(struct rvu *rvu, int blkaddr,
2361 int smq, u16 pcifunc, int nixlf)
2362 {
2363 struct nix_smq_flush_ctx *smq_flush_ctx;
2364 int err, restore_tx_en = 0, i;
2365 int pf = rvu_get_pf(pcifunc);
2366 u8 cgx_id = 0, lmac_id = 0;
2367 u16 tl2_tl3_link_schq;
2368 u8 link, link_level;
2369 u64 cfg, bmap = 0;
2370
2371 if (!is_rvu_otx2(rvu)) {
2372 /* Skip SMQ flush if pkt count is zero */
2373 cfg = rvu_read64(rvu, blkaddr, NIX_AF_MDQX_IN_MD_COUNT(smq));
2374 if (!cfg)
2375 return 0;
2376 }
2377
2378 /* enable cgx tx if disabled */
2379 if (is_pf_cgxmapped(rvu, pf)) {
2380 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
2381 restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu),
2382 lmac_id, true);
2383 }
2384
2385 /* XOFF all TL2s whose parent TL1 matches SMQ tree TL1 */
2386 smq_flush_ctx = kzalloc(sizeof(*smq_flush_ctx), GFP_KERNEL);
2387 if (!smq_flush_ctx)
2388 return -ENOMEM;
2389 nix_smq_flush_fill_ctx(rvu, blkaddr, smq, smq_flush_ctx);
2390 nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, true);
2391 nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, false);
2392
2393 /* Disable backpressure from physical link,
2394 * otherwise SMQ flush may stall.
2395 */
2396 rvu_cgx_enadis_rx_bp(rvu, pf, false);
2397
2398 link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
2399 NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
2400 tl2_tl3_link_schq = smq_flush_ctx->smq_tree_ctx[link_level].schq;
2401 link = smq_flush_ctx->smq_tree_ctx[NIX_TXSCH_LVL_TL1].schq;
2402
2403 /* SMQ set enqueue xoff */
2404 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
2405 cfg |= BIT_ULL(50);
2406 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
2407
2408 /* Clear all NIX_AF_TL3_TL2_LINK_CFG[ENA] for the TL3/TL2 queue */
2409 for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) {
2410 cfg = rvu_read64(rvu, blkaddr,
2411 NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link));
2412 if (!(cfg & BIT_ULL(12)))
2413 continue;
2414 bmap |= BIT_ULL(i);
2415 cfg &= ~BIT_ULL(12);
2416 rvu_write64(rvu, blkaddr,
2417 NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg);
2418 }
2419
2420 /* Do SMQ flush and set enqueue xoff */
2421 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
2422 cfg |= BIT_ULL(50) | BIT_ULL(49);
2423 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
2424
2425 /* Wait for flush to complete */
2426 err = rvu_poll_reg(rvu, blkaddr,
2427 NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
2428 if (err)
2429 dev_info(rvu->dev,
2430 "NIXLF%d: SMQ%d flush failed, txlink might be busy\n",
2431 nixlf, smq);
2432
2433 /* Set NIX_AF_TL3_TL2_LINKX_CFG[ENA] for the TL3/TL2 queue */
2434 for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) {
2435 if (!(bmap & BIT_ULL(i)))
2436 continue;
2437 cfg = rvu_read64(rvu, blkaddr,
2438 NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link));
2439 cfg |= BIT_ULL(12);
2440 rvu_write64(rvu, blkaddr,
2441 NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg);
2442 }
2443
2444 /* clear XOFF on TL2s */
2445 nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, true);
2446 nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, false);
2447 kfree(smq_flush_ctx);
2448
2449 rvu_cgx_enadis_rx_bp(rvu, pf, true);
2450 /* restore cgx tx state */
2451 if (restore_tx_en)
2452 rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
2453 return err;
2454 }
2455
nix_txschq_free(struct rvu * rvu,u16 pcifunc)2456 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
2457 {
2458 int blkaddr, nixlf, lvl, schq, err;
2459 struct rvu_hwinfo *hw = rvu->hw;
2460 struct nix_txsch *txsch;
2461 struct nix_hw *nix_hw;
2462 u16 map_func;
2463
2464 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2465 if (blkaddr < 0)
2466 return NIX_AF_ERR_AF_LF_INVALID;
2467
2468 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2469 if (!nix_hw)
2470 return NIX_AF_ERR_INVALID_NIXBLK;
2471
2472 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
2473 if (nixlf < 0)
2474 return NIX_AF_ERR_AF_LF_INVALID;
2475
2476 /* Disable TL2/3 queue links and all XOFF's before SMQ flush*/
2477 mutex_lock(&rvu->rsrc_lock);
2478 for (lvl = NIX_TXSCH_LVL_MDQ; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2479 txsch = &nix_hw->txsch[lvl];
2480
2481 if (lvl >= hw->cap.nix_tx_aggr_lvl)
2482 continue;
2483
2484 for (schq = 0; schq < txsch->schq.max; schq++) {
2485 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2486 continue;
2487 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
2488 nix_clear_tx_xoff(rvu, blkaddr, lvl, schq);
2489 nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
2490 }
2491 }
2492 nix_clear_tx_xoff(rvu, blkaddr, NIX_TXSCH_LVL_TL1,
2493 nix_get_tx_link(rvu, pcifunc));
2494
2495 /* On PF cleanup, clear cfg done flag as
2496 * PF would have changed default config.
2497 */
2498 if (!(pcifunc & RVU_PFVF_FUNC_MASK)) {
2499 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1];
2500 schq = nix_get_tx_link(rvu, pcifunc);
2501 /* Do not clear pcifunc in txsch->pfvf_map[schq] because
2502 * VF might be using this TL1 queue
2503 */
2504 map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
2505 txsch->pfvf_map[schq] = TXSCH_SET_FLAG(map_func, 0x0);
2506 }
2507
2508 /* Flush SMQs */
2509 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
2510 for (schq = 0; schq < txsch->schq.max; schq++) {
2511 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2512 continue;
2513 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
2514 }
2515
2516 /* Now free scheduler queues to free pool */
2517 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2518 /* TLs above aggregation level are shared across all PF
2519 * and it's VFs, hence skip freeing them.
2520 */
2521 if (lvl >= hw->cap.nix_tx_aggr_lvl)
2522 continue;
2523
2524 txsch = &nix_hw->txsch[lvl];
2525 for (schq = 0; schq < txsch->schq.max; schq++) {
2526 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2527 continue;
2528 nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
2529 rvu_free_rsrc(&txsch->schq, schq);
2530 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
2531 }
2532 }
2533 mutex_unlock(&rvu->rsrc_lock);
2534
2535 err = rvu_ndc_sync(rvu, blkaddr, nixlf, NIX_AF_NDC_TX_SYNC);
2536 if (err)
2537 dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
2538
2539 return 0;
2540 }
2541
nix_txschq_free_one(struct rvu * rvu,struct nix_txsch_free_req * req)2542 static int nix_txschq_free_one(struct rvu *rvu,
2543 struct nix_txsch_free_req *req)
2544 {
2545 struct rvu_hwinfo *hw = rvu->hw;
2546 u16 pcifunc = req->hdr.pcifunc;
2547 int lvl, schq, nixlf, blkaddr;
2548 struct nix_txsch *txsch;
2549 struct nix_hw *nix_hw;
2550 u32 *pfvf_map;
2551 int rc;
2552
2553 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2554 if (blkaddr < 0)
2555 return NIX_AF_ERR_AF_LF_INVALID;
2556
2557 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2558 if (!nix_hw)
2559 return NIX_AF_ERR_INVALID_NIXBLK;
2560
2561 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
2562 if (nixlf < 0)
2563 return NIX_AF_ERR_AF_LF_INVALID;
2564
2565 lvl = req->schq_lvl;
2566 schq = req->schq;
2567 txsch = &nix_hw->txsch[lvl];
2568
2569 if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max)
2570 return 0;
2571
2572 pfvf_map = txsch->pfvf_map;
2573 mutex_lock(&rvu->rsrc_lock);
2574
2575 if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) {
2576 rc = NIX_AF_ERR_TLX_INVALID;
2577 goto err;
2578 }
2579
2580 /* Clear SW_XOFF of this resource only.
2581 * For SMQ level, all path XOFF's
2582 * need to be made clear by user
2583 */
2584 nix_clear_tx_xoff(rvu, blkaddr, lvl, schq);
2585
2586 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
2587 nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
2588
2589 /* Flush if it is a SMQ. Onus of disabling
2590 * TL2/3 queue links before SMQ flush is on user
2591 */
2592 if (lvl == NIX_TXSCH_LVL_SMQ &&
2593 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf)) {
2594 rc = NIX_AF_SMQ_FLUSH_FAILED;
2595 goto err;
2596 }
2597
2598 nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
2599
2600 /* Free the resource */
2601 rvu_free_rsrc(&txsch->schq, schq);
2602 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
2603 mutex_unlock(&rvu->rsrc_lock);
2604 return 0;
2605 err:
2606 mutex_unlock(&rvu->rsrc_lock);
2607 return rc;
2608 }
2609
rvu_mbox_handler_nix_txsch_free(struct rvu * rvu,struct nix_txsch_free_req * req,struct msg_rsp * rsp)2610 int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
2611 struct nix_txsch_free_req *req,
2612 struct msg_rsp *rsp)
2613 {
2614 if (req->flags & TXSCHQ_FREE_ALL)
2615 return nix_txschq_free(rvu, req->hdr.pcifunc);
2616 else
2617 return nix_txschq_free_one(rvu, req);
2618 }
2619
is_txschq_hierarchy_valid(struct rvu * rvu,u16 pcifunc,int blkaddr,int lvl,u64 reg,u64 regval)2620 static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
2621 int lvl, u64 reg, u64 regval)
2622 {
2623 u64 regbase = reg & 0xFFFF;
2624 u16 schq, parent;
2625
2626 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg))
2627 return false;
2628
2629 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2630 /* Check if this schq belongs to this PF/VF or not */
2631 if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq))
2632 return false;
2633
2634 parent = (regval >> 16) & 0x1FF;
2635 /* Validate MDQ's TL4 parent */
2636 if (regbase == NIX_AF_MDQX_PARENT(0) &&
2637 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent))
2638 return false;
2639
2640 /* Validate TL4's TL3 parent */
2641 if (regbase == NIX_AF_TL4X_PARENT(0) &&
2642 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent))
2643 return false;
2644
2645 /* Validate TL3's TL2 parent */
2646 if (regbase == NIX_AF_TL3X_PARENT(0) &&
2647 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent))
2648 return false;
2649
2650 /* Validate TL2's TL1 parent */
2651 if (regbase == NIX_AF_TL2X_PARENT(0) &&
2652 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent))
2653 return false;
2654
2655 return true;
2656 }
2657
is_txschq_shaping_valid(struct rvu_hwinfo * hw,int lvl,u64 reg)2658 static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg)
2659 {
2660 u64 regbase;
2661
2662 if (hw->cap.nix_shaping)
2663 return true;
2664
2665 /* If shaping and coloring is not supported, then
2666 * *_CIR and *_PIR registers should not be configured.
2667 */
2668 regbase = reg & 0xFFFF;
2669
2670 switch (lvl) {
2671 case NIX_TXSCH_LVL_TL1:
2672 if (regbase == NIX_AF_TL1X_CIR(0))
2673 return false;
2674 break;
2675 case NIX_TXSCH_LVL_TL2:
2676 if (regbase == NIX_AF_TL2X_CIR(0) ||
2677 regbase == NIX_AF_TL2X_PIR(0))
2678 return false;
2679 break;
2680 case NIX_TXSCH_LVL_TL3:
2681 if (regbase == NIX_AF_TL3X_CIR(0) ||
2682 regbase == NIX_AF_TL3X_PIR(0))
2683 return false;
2684 break;
2685 case NIX_TXSCH_LVL_TL4:
2686 if (regbase == NIX_AF_TL4X_CIR(0) ||
2687 regbase == NIX_AF_TL4X_PIR(0))
2688 return false;
2689 break;
2690 case NIX_TXSCH_LVL_MDQ:
2691 if (regbase == NIX_AF_MDQX_CIR(0) ||
2692 regbase == NIX_AF_MDQX_PIR(0))
2693 return false;
2694 break;
2695 }
2696 return true;
2697 }
2698
nix_tl1_default_cfg(struct rvu * rvu,struct nix_hw * nix_hw,u16 pcifunc,int blkaddr)2699 static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
2700 u16 pcifunc, int blkaddr)
2701 {
2702 u32 *pfvf_map;
2703 int schq;
2704
2705 schq = nix_get_tx_link(rvu, pcifunc);
2706 pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
2707 /* Skip if PF has already done the config */
2708 if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE)
2709 return;
2710 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq),
2711 (TXSCH_TL1_DFLT_RR_PRIO << 1));
2712
2713 /* On OcteonTx2 the config was in bytes and newer silcons
2714 * it's changed to weight.
2715 */
2716 if (!rvu->hw->cap.nix_common_dwrr_mtu)
2717 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
2718 TXSCH_TL1_DFLT_RR_QTM);
2719 else
2720 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
2721 CN10K_MAX_DWRR_WEIGHT);
2722
2723 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00);
2724 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE);
2725 }
2726
2727 /* Register offset - [15:0]
2728 * Scheduler Queue number - [25:16]
2729 */
2730 #define NIX_TX_SCHQ_MASK GENMASK_ULL(25, 0)
2731
nix_txschq_cfg_read(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr,struct nix_txschq_config * req,struct nix_txschq_config * rsp)2732 static int nix_txschq_cfg_read(struct rvu *rvu, struct nix_hw *nix_hw,
2733 int blkaddr, struct nix_txschq_config *req,
2734 struct nix_txschq_config *rsp)
2735 {
2736 u16 pcifunc = req->hdr.pcifunc;
2737 int idx, schq;
2738 u64 reg;
2739
2740 for (idx = 0; idx < req->num_regs; idx++) {
2741 reg = req->reg[idx];
2742 reg &= NIX_TX_SCHQ_MASK;
2743 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2744 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, req->lvl, reg) ||
2745 !is_valid_txschq(rvu, blkaddr, req->lvl, pcifunc, schq))
2746 return NIX_AF_INVAL_TXSCHQ_CFG;
2747 rsp->regval[idx] = rvu_read64(rvu, blkaddr, reg);
2748 }
2749 rsp->lvl = req->lvl;
2750 rsp->num_regs = req->num_regs;
2751 return 0;
2752 }
2753
rvu_nix_tx_tl2_cfg(struct rvu * rvu,int blkaddr,u16 pcifunc,struct nix_txsch * txsch,bool enable)2754 void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, u16 pcifunc,
2755 struct nix_txsch *txsch, bool enable)
2756 {
2757 struct rvu_hwinfo *hw = rvu->hw;
2758 int lbk_link_start, lbk_links;
2759 u8 pf = rvu_get_pf(pcifunc);
2760 int schq;
2761 u64 cfg;
2762
2763 if (!is_pf_cgxmapped(rvu, pf))
2764 return;
2765
2766 cfg = enable ? (BIT_ULL(12) | RVU_SWITCH_LBK_CHAN) : 0;
2767 lbk_link_start = hw->cgx_links;
2768
2769 for (schq = 0; schq < txsch->schq.max; schq++) {
2770 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2771 continue;
2772 /* Enable all LBK links with channel 63 by default so that
2773 * packets can be sent to LBK with a NPC TX MCAM rule
2774 */
2775 lbk_links = hw->lbk_links;
2776 while (lbk_links--)
2777 rvu_write64(rvu, blkaddr,
2778 NIX_AF_TL3_TL2X_LINKX_CFG(schq,
2779 lbk_link_start +
2780 lbk_links), cfg);
2781 }
2782 }
2783
rvu_mbox_handler_nix_txschq_cfg(struct rvu * rvu,struct nix_txschq_config * req,struct nix_txschq_config * rsp)2784 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
2785 struct nix_txschq_config *req,
2786 struct nix_txschq_config *rsp)
2787 {
2788 u64 reg, val, regval, schq_regbase, val_mask;
2789 struct rvu_hwinfo *hw = rvu->hw;
2790 u16 pcifunc = req->hdr.pcifunc;
2791 struct nix_txsch *txsch;
2792 struct nix_hw *nix_hw;
2793 int blkaddr, idx, err;
2794 int nixlf, schq;
2795 u32 *pfvf_map;
2796
2797 if (req->lvl >= NIX_TXSCH_LVL_CNT ||
2798 req->num_regs > MAX_REGS_PER_MBOX_MSG)
2799 return NIX_AF_INVAL_TXSCHQ_CFG;
2800
2801 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2802 if (err)
2803 return err;
2804
2805 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2806 if (!nix_hw)
2807 return NIX_AF_ERR_INVALID_NIXBLK;
2808
2809 if (req->read)
2810 return nix_txschq_cfg_read(rvu, nix_hw, blkaddr, req, rsp);
2811
2812 txsch = &nix_hw->txsch[req->lvl];
2813 pfvf_map = txsch->pfvf_map;
2814
2815 if (req->lvl >= hw->cap.nix_tx_aggr_lvl &&
2816 pcifunc & RVU_PFVF_FUNC_MASK) {
2817 mutex_lock(&rvu->rsrc_lock);
2818 if (req->lvl == NIX_TXSCH_LVL_TL1)
2819 nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr);
2820 mutex_unlock(&rvu->rsrc_lock);
2821 return 0;
2822 }
2823
2824 for (idx = 0; idx < req->num_regs; idx++) {
2825 reg = req->reg[idx];
2826 reg &= NIX_TX_SCHQ_MASK;
2827 regval = req->regval[idx];
2828 schq_regbase = reg & 0xFFFF;
2829 val_mask = req->regval_mask[idx];
2830
2831 if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr,
2832 txsch->lvl, reg, regval))
2833 return NIX_AF_INVAL_TXSCHQ_CFG;
2834
2835 /* Check if shaping and coloring is supported */
2836 if (!is_txschq_shaping_valid(hw, req->lvl, reg))
2837 continue;
2838
2839 val = rvu_read64(rvu, blkaddr, reg);
2840 regval = (val & val_mask) | (regval & ~val_mask);
2841
2842 /* Handle shaping state toggle specially */
2843 if (hw->cap.nix_shaper_toggle_wait &&
2844 handle_txschq_shaper_update(rvu, blkaddr, nixlf,
2845 req->lvl, reg, regval))
2846 continue;
2847
2848 /* Replace PF/VF visible NIXLF slot with HW NIXLF id */
2849 if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
2850 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
2851 pcifunc, 0);
2852 regval &= ~(0x7FULL << 24);
2853 regval |= ((u64)nixlf << 24);
2854 }
2855
2856 /* Clear 'BP_ENA' config, if it's not allowed */
2857 if (!hw->cap.nix_tx_link_bp) {
2858 if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) ||
2859 (schq_regbase & 0xFF00) ==
2860 NIX_AF_TL3_TL2X_LINKX_CFG(0, 0))
2861 regval &= ~BIT_ULL(13);
2862 }
2863
2864 /* Mark config as done for TL1 by PF */
2865 if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) &&
2866 schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) {
2867 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2868 mutex_lock(&rvu->rsrc_lock);
2869 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq],
2870 NIX_TXSCHQ_CFG_DONE);
2871 mutex_unlock(&rvu->rsrc_lock);
2872 }
2873
2874 /* SMQ flush is special hence split register writes such
2875 * that flush first and write rest of the bits later.
2876 */
2877 if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
2878 (regval & BIT_ULL(49))) {
2879 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2880 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
2881 regval &= ~BIT_ULL(49);
2882 }
2883 rvu_write64(rvu, blkaddr, reg, regval);
2884 }
2885
2886 return 0;
2887 }
2888
nix_rx_vtag_cfg(struct rvu * rvu,int nixlf,int blkaddr,struct nix_vtag_config * req)2889 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
2890 struct nix_vtag_config *req)
2891 {
2892 u64 regval = req->vtag_size;
2893
2894 if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 ||
2895 req->vtag_size > VTAGSIZE_T8)
2896 return -EINVAL;
2897
2898 /* RX VTAG Type 7 reserved for vf vlan */
2899 if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7)
2900 return NIX_AF_ERR_RX_VTAG_INUSE;
2901
2902 if (req->rx.capture_vtag)
2903 regval |= BIT_ULL(5);
2904 if (req->rx.strip_vtag)
2905 regval |= BIT_ULL(4);
2906
2907 rvu_write64(rvu, blkaddr,
2908 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
2909 return 0;
2910 }
2911
nix_tx_vtag_free(struct rvu * rvu,int blkaddr,u16 pcifunc,int index)2912 static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr,
2913 u16 pcifunc, int index)
2914 {
2915 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2916 struct nix_txvlan *vlan;
2917
2918 if (!nix_hw)
2919 return NIX_AF_ERR_INVALID_NIXBLK;
2920
2921 vlan = &nix_hw->txvlan;
2922 if (vlan->entry2pfvf_map[index] != pcifunc)
2923 return NIX_AF_ERR_PARAM;
2924
2925 rvu_write64(rvu, blkaddr,
2926 NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull);
2927 rvu_write64(rvu, blkaddr,
2928 NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull);
2929
2930 vlan->entry2pfvf_map[index] = 0;
2931 rvu_free_rsrc(&vlan->rsrc, index);
2932
2933 return 0;
2934 }
2935
nix_free_tx_vtag_entries(struct rvu * rvu,u16 pcifunc)2936 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc)
2937 {
2938 struct nix_txvlan *vlan;
2939 struct nix_hw *nix_hw;
2940 int index, blkaddr;
2941
2942 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2943 if (blkaddr < 0)
2944 return;
2945
2946 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2947 if (!nix_hw)
2948 return;
2949
2950 vlan = &nix_hw->txvlan;
2951
2952 mutex_lock(&vlan->rsrc_lock);
2953 /* Scan all the entries and free the ones mapped to 'pcifunc' */
2954 for (index = 0; index < vlan->rsrc.max; index++) {
2955 if (vlan->entry2pfvf_map[index] == pcifunc)
2956 nix_tx_vtag_free(rvu, blkaddr, pcifunc, index);
2957 }
2958 mutex_unlock(&vlan->rsrc_lock);
2959 }
2960
nix_tx_vtag_alloc(struct rvu * rvu,int blkaddr,u64 vtag,u8 size)2961 static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr,
2962 u64 vtag, u8 size)
2963 {
2964 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2965 struct nix_txvlan *vlan;
2966 u64 regval;
2967 int index;
2968
2969 if (!nix_hw)
2970 return NIX_AF_ERR_INVALID_NIXBLK;
2971
2972 vlan = &nix_hw->txvlan;
2973
2974 mutex_lock(&vlan->rsrc_lock);
2975
2976 index = rvu_alloc_rsrc(&vlan->rsrc);
2977 if (index < 0) {
2978 mutex_unlock(&vlan->rsrc_lock);
2979 return index;
2980 }
2981
2982 mutex_unlock(&vlan->rsrc_lock);
2983
2984 regval = size ? vtag : vtag << 32;
2985
2986 rvu_write64(rvu, blkaddr,
2987 NIX_AF_TX_VTAG_DEFX_DATA(index), regval);
2988 rvu_write64(rvu, blkaddr,
2989 NIX_AF_TX_VTAG_DEFX_CTL(index), size);
2990
2991 return index;
2992 }
2993
nix_tx_vtag_decfg(struct rvu * rvu,int blkaddr,struct nix_vtag_config * req)2994 static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr,
2995 struct nix_vtag_config *req)
2996 {
2997 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2998 u16 pcifunc = req->hdr.pcifunc;
2999 int idx0 = req->tx.vtag0_idx;
3000 int idx1 = req->tx.vtag1_idx;
3001 struct nix_txvlan *vlan;
3002 int err = 0;
3003
3004 if (!nix_hw)
3005 return NIX_AF_ERR_INVALID_NIXBLK;
3006
3007 vlan = &nix_hw->txvlan;
3008 if (req->tx.free_vtag0 && req->tx.free_vtag1)
3009 if (vlan->entry2pfvf_map[idx0] != pcifunc ||
3010 vlan->entry2pfvf_map[idx1] != pcifunc)
3011 return NIX_AF_ERR_PARAM;
3012
3013 mutex_lock(&vlan->rsrc_lock);
3014
3015 if (req->tx.free_vtag0) {
3016 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx0);
3017 if (err)
3018 goto exit;
3019 }
3020
3021 if (req->tx.free_vtag1)
3022 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx1);
3023
3024 exit:
3025 mutex_unlock(&vlan->rsrc_lock);
3026 return err;
3027 }
3028
nix_tx_vtag_cfg(struct rvu * rvu,int blkaddr,struct nix_vtag_config * req,struct nix_vtag_config_rsp * rsp)3029 static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr,
3030 struct nix_vtag_config *req,
3031 struct nix_vtag_config_rsp *rsp)
3032 {
3033 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
3034 struct nix_txvlan *vlan;
3035 u16 pcifunc = req->hdr.pcifunc;
3036
3037 if (!nix_hw)
3038 return NIX_AF_ERR_INVALID_NIXBLK;
3039
3040 vlan = &nix_hw->txvlan;
3041 if (req->tx.cfg_vtag0) {
3042 rsp->vtag0_idx =
3043 nix_tx_vtag_alloc(rvu, blkaddr,
3044 req->tx.vtag0, req->vtag_size);
3045
3046 if (rsp->vtag0_idx < 0)
3047 return NIX_AF_ERR_TX_VTAG_NOSPC;
3048
3049 vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc;
3050 }
3051
3052 if (req->tx.cfg_vtag1) {
3053 rsp->vtag1_idx =
3054 nix_tx_vtag_alloc(rvu, blkaddr,
3055 req->tx.vtag1, req->vtag_size);
3056
3057 if (rsp->vtag1_idx < 0)
3058 goto err_free;
3059
3060 vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc;
3061 }
3062
3063 return 0;
3064
3065 err_free:
3066 if (req->tx.cfg_vtag0)
3067 nix_tx_vtag_free(rvu, blkaddr, pcifunc, rsp->vtag0_idx);
3068
3069 return NIX_AF_ERR_TX_VTAG_NOSPC;
3070 }
3071
rvu_mbox_handler_nix_vtag_cfg(struct rvu * rvu,struct nix_vtag_config * req,struct nix_vtag_config_rsp * rsp)3072 int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
3073 struct nix_vtag_config *req,
3074 struct nix_vtag_config_rsp *rsp)
3075 {
3076 u16 pcifunc = req->hdr.pcifunc;
3077 int blkaddr, nixlf, err;
3078
3079 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
3080 if (err)
3081 return err;
3082
3083 if (req->cfg_type) {
3084 /* rx vtag configuration */
3085 err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
3086 if (err)
3087 return NIX_AF_ERR_PARAM;
3088 } else {
3089 /* tx vtag configuration */
3090 if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) &&
3091 (req->tx.free_vtag0 || req->tx.free_vtag1))
3092 return NIX_AF_ERR_PARAM;
3093
3094 if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1)
3095 return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp);
3096
3097 if (req->tx.free_vtag0 || req->tx.free_vtag1)
3098 return nix_tx_vtag_decfg(rvu, blkaddr, req);
3099 }
3100
3101 return 0;
3102 }
3103
nix_blk_setup_mce(struct rvu * rvu,struct nix_hw * nix_hw,int mce,u8 op,u16 pcifunc,int next,int index,u8 mce_op,bool eol)3104 static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
3105 int mce, u8 op, u16 pcifunc, int next,
3106 int index, u8 mce_op, bool eol)
3107 {
3108 struct nix_aq_enq_req aq_req;
3109 int err;
3110
3111 aq_req.hdr.pcifunc = 0;
3112 aq_req.ctype = NIX_AQ_CTYPE_MCE;
3113 aq_req.op = op;
3114 aq_req.qidx = mce;
3115
3116 /* Use RSS with RSS index 0 */
3117 aq_req.mce.op = mce_op;
3118 aq_req.mce.index = index;
3119 aq_req.mce.eol = eol;
3120 aq_req.mce.pf_func = pcifunc;
3121 aq_req.mce.next = next;
3122
3123 /* All fields valid */
3124 *(u64 *)(&aq_req.mce_mask) = ~0ULL;
3125
3126 err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL);
3127 if (err) {
3128 dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
3129 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
3130 return err;
3131 }
3132 return 0;
3133 }
3134
nix_delete_mcast_mce_list(struct nix_mce_list * mce_list)3135 static void nix_delete_mcast_mce_list(struct nix_mce_list *mce_list)
3136 {
3137 struct hlist_node *tmp;
3138 struct mce *mce;
3139
3140 /* Scan through the current list */
3141 hlist_for_each_entry_safe(mce, tmp, &mce_list->head, node) {
3142 hlist_del(&mce->node);
3143 kfree(mce);
3144 }
3145
3146 mce_list->count = 0;
3147 mce_list->max = 0;
3148 }
3149
nix_get_last_mce_list_index(struct nix_mcast_grp_elem * elem)3150 static int nix_get_last_mce_list_index(struct nix_mcast_grp_elem *elem)
3151 {
3152 return elem->mce_start_index + elem->mcast_mce_list.count - 1;
3153 }
3154
nix_update_ingress_mce_list_hw(struct rvu * rvu,struct nix_hw * nix_hw,struct nix_mcast_grp_elem * elem)3155 static int nix_update_ingress_mce_list_hw(struct rvu *rvu,
3156 struct nix_hw *nix_hw,
3157 struct nix_mcast_grp_elem *elem)
3158 {
3159 int idx, last_idx, next_idx, err;
3160 struct nix_mce_list *mce_list;
3161 struct mce *mce, *prev_mce;
3162
3163 mce_list = &elem->mcast_mce_list;
3164 idx = elem->mce_start_index;
3165 last_idx = nix_get_last_mce_list_index(elem);
3166 hlist_for_each_entry(mce, &mce_list->head, node) {
3167 if (idx > last_idx)
3168 break;
3169
3170 if (!mce->is_active) {
3171 if (idx == elem->mce_start_index) {
3172 idx++;
3173 prev_mce = mce;
3174 elem->mce_start_index = idx;
3175 continue;
3176 } else if (idx == last_idx) {
3177 err = nix_blk_setup_mce(rvu, nix_hw, idx - 1, NIX_AQ_INSTOP_WRITE,
3178 prev_mce->pcifunc, next_idx,
3179 prev_mce->rq_rss_index,
3180 prev_mce->dest_type,
3181 false);
3182 if (err)
3183 return err;
3184
3185 break;
3186 }
3187 }
3188
3189 next_idx = idx + 1;
3190 /* EOL should be set in last MCE */
3191 err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE,
3192 mce->pcifunc, next_idx,
3193 mce->rq_rss_index, mce->dest_type,
3194 (next_idx > last_idx) ? true : false);
3195 if (err)
3196 return err;
3197
3198 idx++;
3199 prev_mce = mce;
3200 }
3201
3202 return 0;
3203 }
3204
nix_update_egress_mce_list_hw(struct rvu * rvu,struct nix_hw * nix_hw,struct nix_mcast_grp_elem * elem)3205 static void nix_update_egress_mce_list_hw(struct rvu *rvu,
3206 struct nix_hw *nix_hw,
3207 struct nix_mcast_grp_elem *elem)
3208 {
3209 struct nix_mce_list *mce_list;
3210 int idx, last_idx, next_idx;
3211 struct mce *mce, *prev_mce;
3212 u64 regval;
3213 u8 eol;
3214
3215 mce_list = &elem->mcast_mce_list;
3216 idx = elem->mce_start_index;
3217 last_idx = nix_get_last_mce_list_index(elem);
3218 hlist_for_each_entry(mce, &mce_list->head, node) {
3219 if (idx > last_idx)
3220 break;
3221
3222 if (!mce->is_active) {
3223 if (idx == elem->mce_start_index) {
3224 idx++;
3225 prev_mce = mce;
3226 elem->mce_start_index = idx;
3227 continue;
3228 } else if (idx == last_idx) {
3229 regval = (next_idx << 16) | (1 << 12) | prev_mce->channel;
3230 rvu_write64(rvu, nix_hw->blkaddr,
3231 NIX_AF_TX_MCASTX(idx - 1),
3232 regval);
3233 break;
3234 }
3235 }
3236
3237 eol = 0;
3238 next_idx = idx + 1;
3239 /* EOL should be set in last MCE */
3240 if (next_idx > last_idx)
3241 eol = 1;
3242
3243 regval = (next_idx << 16) | (eol << 12) | mce->channel;
3244 rvu_write64(rvu, nix_hw->blkaddr,
3245 NIX_AF_TX_MCASTX(idx),
3246 regval);
3247 idx++;
3248 prev_mce = mce;
3249 }
3250 }
3251
nix_del_mce_list_entry(struct rvu * rvu,struct nix_hw * nix_hw,struct nix_mcast_grp_elem * elem,struct nix_mcast_grp_update_req * req)3252 static int nix_del_mce_list_entry(struct rvu *rvu,
3253 struct nix_hw *nix_hw,
3254 struct nix_mcast_grp_elem *elem,
3255 struct nix_mcast_grp_update_req *req)
3256 {
3257 u32 num_entry = req->num_mce_entry;
3258 struct nix_mce_list *mce_list;
3259 struct mce *mce;
3260 bool is_found;
3261 int i;
3262
3263 mce_list = &elem->mcast_mce_list;
3264 for (i = 0; i < num_entry; i++) {
3265 is_found = false;
3266 hlist_for_each_entry(mce, &mce_list->head, node) {
3267 /* If already exists, then delete */
3268 if (mce->pcifunc == req->pcifunc[i]) {
3269 hlist_del(&mce->node);
3270 kfree(mce);
3271 mce_list->count--;
3272 is_found = true;
3273 break;
3274 }
3275 }
3276
3277 if (!is_found)
3278 return NIX_AF_ERR_INVALID_MCAST_DEL_REQ;
3279 }
3280
3281 mce_list->max = mce_list->count;
3282 /* Dump the updated list to HW */
3283 if (elem->dir == NIX_MCAST_INGRESS)
3284 return nix_update_ingress_mce_list_hw(rvu, nix_hw, elem);
3285
3286 nix_update_egress_mce_list_hw(rvu, nix_hw, elem);
3287 return 0;
3288 }
3289
nix_add_mce_list_entry(struct rvu * rvu,struct nix_hw * nix_hw,struct nix_mcast_grp_elem * elem,struct nix_mcast_grp_update_req * req)3290 static int nix_add_mce_list_entry(struct rvu *rvu,
3291 struct nix_hw *nix_hw,
3292 struct nix_mcast_grp_elem *elem,
3293 struct nix_mcast_grp_update_req *req)
3294 {
3295 u32 num_entry = req->num_mce_entry;
3296 struct nix_mce_list *mce_list;
3297 struct hlist_node *tmp;
3298 struct mce *mce;
3299 int i;
3300
3301 mce_list = &elem->mcast_mce_list;
3302 for (i = 0; i < num_entry; i++) {
3303 mce = kzalloc(sizeof(*mce), GFP_KERNEL);
3304 if (!mce)
3305 goto free_mce;
3306
3307 mce->pcifunc = req->pcifunc[i];
3308 mce->channel = req->channel[i];
3309 mce->rq_rss_index = req->rq_rss_index[i];
3310 mce->dest_type = req->dest_type[i];
3311 mce->is_active = 1;
3312 hlist_add_head(&mce->node, &mce_list->head);
3313 mce_list->count++;
3314 }
3315
3316 mce_list->max += num_entry;
3317
3318 /* Dump the updated list to HW */
3319 if (elem->dir == NIX_MCAST_INGRESS)
3320 return nix_update_ingress_mce_list_hw(rvu, nix_hw, elem);
3321
3322 nix_update_egress_mce_list_hw(rvu, nix_hw, elem);
3323 return 0;
3324
3325 free_mce:
3326 hlist_for_each_entry_safe(mce, tmp, &mce_list->head, node) {
3327 hlist_del(&mce->node);
3328 kfree(mce);
3329 mce_list->count--;
3330 }
3331
3332 return -ENOMEM;
3333 }
3334
nix_update_mce_list_entry(struct nix_mce_list * mce_list,u16 pcifunc,bool add)3335 static int nix_update_mce_list_entry(struct nix_mce_list *mce_list,
3336 u16 pcifunc, bool add)
3337 {
3338 struct mce *mce, *tail = NULL;
3339 bool delete = false;
3340
3341 /* Scan through the current list */
3342 hlist_for_each_entry(mce, &mce_list->head, node) {
3343 /* If already exists, then delete */
3344 if (mce->pcifunc == pcifunc && !add) {
3345 delete = true;
3346 break;
3347 } else if (mce->pcifunc == pcifunc && add) {
3348 /* entry already exists */
3349 return 0;
3350 }
3351 tail = mce;
3352 }
3353
3354 if (delete) {
3355 hlist_del(&mce->node);
3356 kfree(mce);
3357 mce_list->count--;
3358 return 0;
3359 }
3360
3361 if (!add)
3362 return 0;
3363
3364 /* Add a new one to the list, at the tail */
3365 mce = kzalloc(sizeof(*mce), GFP_KERNEL);
3366 if (!mce)
3367 return -ENOMEM;
3368 mce->pcifunc = pcifunc;
3369 if (!tail)
3370 hlist_add_head(&mce->node, &mce_list->head);
3371 else
3372 hlist_add_behind(&mce->node, &tail->node);
3373 mce_list->count++;
3374 return 0;
3375 }
3376
nix_update_mce_list(struct rvu * rvu,u16 pcifunc,struct nix_mce_list * mce_list,int mce_idx,int mcam_index,bool add)3377 int nix_update_mce_list(struct rvu *rvu, u16 pcifunc,
3378 struct nix_mce_list *mce_list,
3379 int mce_idx, int mcam_index, bool add)
3380 {
3381 int err = 0, idx, next_idx, last_idx, blkaddr, npc_blkaddr;
3382 struct npc_mcam *mcam = &rvu->hw->mcam;
3383 struct nix_mcast *mcast;
3384 struct nix_hw *nix_hw;
3385 struct mce *mce;
3386
3387 if (!mce_list)
3388 return -EINVAL;
3389
3390 /* Get this PF/VF func's MCE index */
3391 idx = mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
3392
3393 if (idx > (mce_idx + mce_list->max)) {
3394 dev_err(rvu->dev,
3395 "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
3396 __func__, idx, mce_list->max,
3397 pcifunc >> RVU_PFVF_PF_SHIFT);
3398 return -EINVAL;
3399 }
3400
3401 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
3402 if (err)
3403 return err;
3404
3405 mcast = &nix_hw->mcast;
3406 mutex_lock(&mcast->mce_lock);
3407
3408 err = nix_update_mce_list_entry(mce_list, pcifunc, add);
3409 if (err)
3410 goto end;
3411
3412 /* Disable MCAM entry in NPC */
3413 if (!mce_list->count) {
3414 npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
3415 npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, mcam_index, false);
3416 goto end;
3417 }
3418
3419 /* Dump the updated list to HW */
3420 idx = mce_idx;
3421 last_idx = idx + mce_list->count - 1;
3422 hlist_for_each_entry(mce, &mce_list->head, node) {
3423 if (idx > last_idx)
3424 break;
3425
3426 next_idx = idx + 1;
3427 /* EOL should be set in last MCE */
3428 err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE,
3429 mce->pcifunc, next_idx,
3430 0, 1,
3431 (next_idx > last_idx) ? true : false);
3432 if (err)
3433 goto end;
3434 idx++;
3435 }
3436
3437 end:
3438 mutex_unlock(&mcast->mce_lock);
3439 return err;
3440 }
3441
nix_get_mce_list(struct rvu * rvu,u16 pcifunc,int type,struct nix_mce_list ** mce_list,int * mce_idx)3442 void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type,
3443 struct nix_mce_list **mce_list, int *mce_idx)
3444 {
3445 struct rvu_hwinfo *hw = rvu->hw;
3446 struct rvu_pfvf *pfvf;
3447
3448 if (!hw->cap.nix_rx_multicast ||
3449 !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc & ~RVU_PFVF_FUNC_MASK))) {
3450 *mce_list = NULL;
3451 *mce_idx = 0;
3452 return;
3453 }
3454
3455 /* Get this PF/VF func's MCE index */
3456 pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
3457
3458 if (type == NIXLF_BCAST_ENTRY) {
3459 *mce_list = &pfvf->bcast_mce_list;
3460 *mce_idx = pfvf->bcast_mce_idx;
3461 } else if (type == NIXLF_ALLMULTI_ENTRY) {
3462 *mce_list = &pfvf->mcast_mce_list;
3463 *mce_idx = pfvf->mcast_mce_idx;
3464 } else if (type == NIXLF_PROMISC_ENTRY) {
3465 *mce_list = &pfvf->promisc_mce_list;
3466 *mce_idx = pfvf->promisc_mce_idx;
3467 } else {
3468 *mce_list = NULL;
3469 *mce_idx = 0;
3470 }
3471 }
3472
nix_update_mce_rule(struct rvu * rvu,u16 pcifunc,int type,bool add)3473 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
3474 int type, bool add)
3475 {
3476 int err = 0, nixlf, blkaddr, mcam_index, mce_idx;
3477 struct npc_mcam *mcam = &rvu->hw->mcam;
3478 struct rvu_hwinfo *hw = rvu->hw;
3479 struct nix_mce_list *mce_list;
3480 int pf;
3481
3482 /* skip multicast pkt replication for AF's VFs & SDP links */
3483 if (is_lbk_vf(rvu, pcifunc) || is_sdp_pfvf(pcifunc))
3484 return 0;
3485
3486 if (!hw->cap.nix_rx_multicast)
3487 return 0;
3488
3489 pf = rvu_get_pf(pcifunc);
3490 if (!is_pf_cgxmapped(rvu, pf))
3491 return 0;
3492
3493 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3494 if (blkaddr < 0)
3495 return -EINVAL;
3496
3497 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
3498 if (nixlf < 0)
3499 return -EINVAL;
3500
3501 nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx);
3502
3503 mcam_index = npc_get_nixlf_mcam_index(mcam,
3504 pcifunc & ~RVU_PFVF_FUNC_MASK,
3505 nixlf, type);
3506 err = nix_update_mce_list(rvu, pcifunc, mce_list,
3507 mce_idx, mcam_index, add);
3508 return err;
3509 }
3510
nix_setup_mcast_grp(struct nix_hw * nix_hw)3511 static void nix_setup_mcast_grp(struct nix_hw *nix_hw)
3512 {
3513 struct nix_mcast_grp *mcast_grp = &nix_hw->mcast_grp;
3514
3515 INIT_LIST_HEAD(&mcast_grp->mcast_grp_head);
3516 mutex_init(&mcast_grp->mcast_grp_lock);
3517 mcast_grp->next_grp_index = 1;
3518 mcast_grp->count = 0;
3519 }
3520
nix_setup_mce_tables(struct rvu * rvu,struct nix_hw * nix_hw)3521 static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw)
3522 {
3523 struct nix_mcast *mcast = &nix_hw->mcast;
3524 int err, pf, numvfs, idx;
3525 struct rvu_pfvf *pfvf;
3526 u16 pcifunc;
3527 u64 cfg;
3528
3529 /* Skip PF0 (i.e AF) */
3530 for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) {
3531 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
3532 /* If PF is not enabled, nothing to do */
3533 if (!((cfg >> 20) & 0x01))
3534 continue;
3535 /* Get numVFs attached to this PF */
3536 numvfs = (cfg >> 12) & 0xFF;
3537
3538 pfvf = &rvu->pf[pf];
3539
3540 /* This NIX0/1 block mapped to PF ? */
3541 if (pfvf->nix_blkaddr != nix_hw->blkaddr)
3542 continue;
3543
3544 /* save start idx of broadcast mce list */
3545 pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1, NIX_MCAST_INGRESS);
3546 nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
3547
3548 /* save start idx of multicast mce list */
3549 pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1, NIX_MCAST_INGRESS);
3550 nix_mce_list_init(&pfvf->mcast_mce_list, numvfs + 1);
3551
3552 /* save the start idx of promisc mce list */
3553 pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1, NIX_MCAST_INGRESS);
3554 nix_mce_list_init(&pfvf->promisc_mce_list, numvfs + 1);
3555
3556 for (idx = 0; idx < (numvfs + 1); idx++) {
3557 /* idx-0 is for PF, followed by VFs */
3558 pcifunc = (pf << RVU_PFVF_PF_SHIFT);
3559 pcifunc |= idx;
3560 /* Add dummy entries now, so that we don't have to check
3561 * for whether AQ_OP should be INIT/WRITE later on.
3562 * Will be updated when a NIXLF is attached/detached to
3563 * these PF/VFs.
3564 */
3565 err = nix_blk_setup_mce(rvu, nix_hw,
3566 pfvf->bcast_mce_idx + idx,
3567 NIX_AQ_INSTOP_INIT,
3568 pcifunc, 0, 0, 1, true);
3569 if (err)
3570 return err;
3571
3572 /* add dummy entries to multicast mce list */
3573 err = nix_blk_setup_mce(rvu, nix_hw,
3574 pfvf->mcast_mce_idx + idx,
3575 NIX_AQ_INSTOP_INIT,
3576 pcifunc, 0, 0, 1, true);
3577 if (err)
3578 return err;
3579
3580 /* add dummy entries to promisc mce list */
3581 err = nix_blk_setup_mce(rvu, nix_hw,
3582 pfvf->promisc_mce_idx + idx,
3583 NIX_AQ_INSTOP_INIT,
3584 pcifunc, 0, 0, 1, true);
3585 if (err)
3586 return err;
3587 }
3588 }
3589 return 0;
3590 }
3591
nix_setup_mcast(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr)3592 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
3593 {
3594 struct nix_mcast *mcast = &nix_hw->mcast;
3595 struct rvu_hwinfo *hw = rvu->hw;
3596 int err, size;
3597
3598 size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
3599 size = BIT_ULL(size);
3600
3601 /* Allocate bitmap for rx mce entries */
3602 mcast->mce_counter[NIX_MCAST_INGRESS].max = 256UL << MC_TBL_SIZE;
3603 err = rvu_alloc_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]);
3604 if (err)
3605 return -ENOMEM;
3606
3607 /* Allocate bitmap for tx mce entries */
3608 mcast->mce_counter[NIX_MCAST_EGRESS].max = MC_TX_MAX;
3609 err = rvu_alloc_bitmap(&mcast->mce_counter[NIX_MCAST_EGRESS]);
3610 if (err) {
3611 rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]);
3612 return -ENOMEM;
3613 }
3614
3615 /* Alloc memory for multicast/mirror replication entries */
3616 err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
3617 mcast->mce_counter[NIX_MCAST_INGRESS].max, size);
3618 if (err) {
3619 rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]);
3620 rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_EGRESS]);
3621 return -ENOMEM;
3622 }
3623
3624 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
3625 (u64)mcast->mce_ctx->iova);
3626
3627 /* Set max list length equal to max no of VFs per PF + PF itself */
3628 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG,
3629 BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE);
3630
3631 /* Alloc memory for multicast replication buffers */
3632 size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
3633 err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
3634 (8UL << MC_BUF_CNT), size);
3635 if (err) {
3636 rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]);
3637 rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_EGRESS]);
3638 return -ENOMEM;
3639 }
3640
3641 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
3642 (u64)mcast->mcast_buf->iova);
3643
3644 /* Alloc pkind for NIX internal RX multicast/mirror replay */
3645 mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc);
3646
3647 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG,
3648 BIT_ULL(63) | (mcast->replay_pkind << 24) |
3649 BIT_ULL(20) | MC_BUF_CNT);
3650
3651 mutex_init(&mcast->mce_lock);
3652
3653 nix_setup_mcast_grp(nix_hw);
3654
3655 return nix_setup_mce_tables(rvu, nix_hw);
3656 }
3657
nix_setup_txvlan(struct rvu * rvu,struct nix_hw * nix_hw)3658 static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw)
3659 {
3660 struct nix_txvlan *vlan = &nix_hw->txvlan;
3661 int err;
3662
3663 /* Allocate resource bimap for tx vtag def registers*/
3664 vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX;
3665 err = rvu_alloc_bitmap(&vlan->rsrc);
3666 if (err)
3667 return -ENOMEM;
3668
3669 /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */
3670 vlan->entry2pfvf_map = devm_kcalloc(rvu->dev, vlan->rsrc.max,
3671 sizeof(u16), GFP_KERNEL);
3672 if (!vlan->entry2pfvf_map)
3673 goto free_mem;
3674
3675 mutex_init(&vlan->rsrc_lock);
3676 return 0;
3677
3678 free_mem:
3679 kfree(vlan->rsrc.bmap);
3680 return -ENOMEM;
3681 }
3682
nix_setup_txschq(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr)3683 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
3684 {
3685 struct nix_txsch *txsch;
3686 int err, lvl, schq;
3687 u64 cfg, reg;
3688
3689 /* Get scheduler queue count of each type and alloc
3690 * bitmap for each for alloc/free/attach operations.
3691 */
3692 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
3693 txsch = &nix_hw->txsch[lvl];
3694 txsch->lvl = lvl;
3695 switch (lvl) {
3696 case NIX_TXSCH_LVL_SMQ:
3697 reg = NIX_AF_MDQ_CONST;
3698 break;
3699 case NIX_TXSCH_LVL_TL4:
3700 reg = NIX_AF_TL4_CONST;
3701 break;
3702 case NIX_TXSCH_LVL_TL3:
3703 reg = NIX_AF_TL3_CONST;
3704 break;
3705 case NIX_TXSCH_LVL_TL2:
3706 reg = NIX_AF_TL2_CONST;
3707 break;
3708 case NIX_TXSCH_LVL_TL1:
3709 reg = NIX_AF_TL1_CONST;
3710 break;
3711 }
3712 cfg = rvu_read64(rvu, blkaddr, reg);
3713 txsch->schq.max = cfg & 0xFFFF;
3714 err = rvu_alloc_bitmap(&txsch->schq);
3715 if (err)
3716 return err;
3717
3718 /* Allocate memory for scheduler queues to
3719 * PF/VF pcifunc mapping info.
3720 */
3721 txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
3722 sizeof(u32), GFP_KERNEL);
3723 if (!txsch->pfvf_map)
3724 return -ENOMEM;
3725 for (schq = 0; schq < txsch->schq.max; schq++)
3726 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
3727 }
3728
3729 /* Setup a default value of 8192 as DWRR MTU */
3730 if (rvu->hw->cap.nix_common_dwrr_mtu ||
3731 rvu->hw->cap.nix_multiple_dwrr_mtu) {
3732 rvu_write64(rvu, blkaddr,
3733 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM),
3734 convert_bytes_to_dwrr_mtu(8192));
3735 rvu_write64(rvu, blkaddr,
3736 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_LBK),
3737 convert_bytes_to_dwrr_mtu(8192));
3738 rvu_write64(rvu, blkaddr,
3739 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_SDP),
3740 convert_bytes_to_dwrr_mtu(8192));
3741 }
3742
3743 return 0;
3744 }
3745
rvu_nix_reserve_mark_format(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr,u32 cfg)3746 int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
3747 int blkaddr, u32 cfg)
3748 {
3749 int fmt_idx;
3750
3751 for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) {
3752 if (nix_hw->mark_format.cfg[fmt_idx] == cfg)
3753 return fmt_idx;
3754 }
3755 if (fmt_idx >= nix_hw->mark_format.total)
3756 return -ERANGE;
3757
3758 rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg);
3759 nix_hw->mark_format.cfg[fmt_idx] = cfg;
3760 nix_hw->mark_format.in_use++;
3761 return fmt_idx;
3762 }
3763
nix_af_mark_format_setup(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr)3764 static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw,
3765 int blkaddr)
3766 {
3767 u64 cfgs[] = {
3768 [NIX_MARK_CFG_IP_DSCP_RED] = 0x10003,
3769 [NIX_MARK_CFG_IP_DSCP_YELLOW] = 0x11200,
3770 [NIX_MARK_CFG_IP_DSCP_YELLOW_RED] = 0x11203,
3771 [NIX_MARK_CFG_IP_ECN_RED] = 0x6000c,
3772 [NIX_MARK_CFG_IP_ECN_YELLOW] = 0x60c00,
3773 [NIX_MARK_CFG_IP_ECN_YELLOW_RED] = 0x60c0c,
3774 [NIX_MARK_CFG_VLAN_DEI_RED] = 0x30008,
3775 [NIX_MARK_CFG_VLAN_DEI_YELLOW] = 0x30800,
3776 [NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808,
3777 };
3778 int i, rc;
3779 u64 total;
3780
3781 total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8;
3782 nix_hw->mark_format.total = (u8)total;
3783 nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32),
3784 GFP_KERNEL);
3785 if (!nix_hw->mark_format.cfg)
3786 return -ENOMEM;
3787 for (i = 0; i < NIX_MARK_CFG_MAX; i++) {
3788 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]);
3789 if (rc < 0)
3790 dev_err(rvu->dev, "Err %d in setup mark format %d\n",
3791 i, rc);
3792 }
3793
3794 return 0;
3795 }
3796
rvu_get_lbk_link_max_frs(struct rvu * rvu,u16 * max_mtu)3797 static void rvu_get_lbk_link_max_frs(struct rvu *rvu, u16 *max_mtu)
3798 {
3799 /* CN10K supports LBK FIFO size 72 KB */
3800 if (rvu->hw->lbk_bufsize == 0x12000)
3801 *max_mtu = CN10K_LBK_LINK_MAX_FRS;
3802 else
3803 *max_mtu = NIC_HW_MAX_FRS;
3804 }
3805
rvu_get_lmac_link_max_frs(struct rvu * rvu,u16 * max_mtu)3806 static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu)
3807 {
3808 int fifo_size = rvu_cgx_get_fifolen(rvu);
3809
3810 /* RPM supports FIFO len 128 KB and RPM2 supports double the
3811 * FIFO len to accommodate 8 LMACS
3812 */
3813 if (fifo_size == 0x20000 || fifo_size == 0x40000)
3814 *max_mtu = CN10K_LMAC_LINK_MAX_FRS;
3815 else
3816 *max_mtu = NIC_HW_MAX_FRS;
3817 }
3818
rvu_mbox_handler_nix_get_hw_info(struct rvu * rvu,struct msg_req * req,struct nix_hw_info * rsp)3819 int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req,
3820 struct nix_hw_info *rsp)
3821 {
3822 u16 pcifunc = req->hdr.pcifunc;
3823 u64 dwrr_mtu;
3824 int blkaddr;
3825
3826 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3827 if (blkaddr < 0)
3828 return NIX_AF_ERR_AF_LF_INVALID;
3829
3830 if (is_lbk_vf(rvu, pcifunc))
3831 rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu);
3832 else
3833 rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu);
3834
3835 rsp->min_mtu = NIC_HW_MIN_FRS;
3836
3837 if (!rvu->hw->cap.nix_common_dwrr_mtu &&
3838 !rvu->hw->cap.nix_multiple_dwrr_mtu) {
3839 /* Return '1' on OTx2 */
3840 rsp->rpm_dwrr_mtu = 1;
3841 rsp->sdp_dwrr_mtu = 1;
3842 rsp->lbk_dwrr_mtu = 1;
3843 return 0;
3844 }
3845
3846 /* Return DWRR_MTU for TLx_SCHEDULE[RR_WEIGHT] config */
3847 dwrr_mtu = rvu_read64(rvu, blkaddr,
3848 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM));
3849 rsp->rpm_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
3850
3851 dwrr_mtu = rvu_read64(rvu, blkaddr,
3852 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_SDP));
3853 rsp->sdp_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
3854
3855 dwrr_mtu = rvu_read64(rvu, blkaddr,
3856 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_LBK));
3857 rsp->lbk_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
3858
3859 return 0;
3860 }
3861
rvu_mbox_handler_nix_stats_rst(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)3862 int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
3863 struct msg_rsp *rsp)
3864 {
3865 u16 pcifunc = req->hdr.pcifunc;
3866 int i, nixlf, blkaddr, err;
3867 u64 stats;
3868
3869 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
3870 if (err)
3871 return err;
3872
3873 /* Get stats count supported by HW */
3874 stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
3875
3876 /* Reset tx stats */
3877 for (i = 0; i < ((stats >> 24) & 0xFF); i++)
3878 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0);
3879
3880 /* Reset rx stats */
3881 for (i = 0; i < ((stats >> 32) & 0xFF); i++)
3882 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0);
3883
3884 return 0;
3885 }
3886
3887 /* Returns the ALG index to be set into NPC_RX_ACTION */
get_flowkey_alg_idx(struct nix_hw * nix_hw,u32 flow_cfg)3888 static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg)
3889 {
3890 int i;
3891
3892 /* Scan over exiting algo entries to find a match */
3893 for (i = 0; i < nix_hw->flowkey.in_use; i++)
3894 if (nix_hw->flowkey.flowkey[i] == flow_cfg)
3895 return i;
3896
3897 return -ERANGE;
3898 }
3899
3900 /* Mask to match ipv6(NPC_LT_LC_IP6) and ipv6 ext(NPC_LT_LC_IP6_EXT) */
3901 #define NPC_LT_LC_IP6_MATCH_MSK ((~(NPC_LT_LC_IP6 ^ NPC_LT_LC_IP6_EXT)) & 0xf)
3902 /* Mask to match both ipv4(NPC_LT_LC_IP) and ipv4 ext(NPC_LT_LC_IP_OPT) */
3903 #define NPC_LT_LC_IP_MATCH_MSK ((~(NPC_LT_LC_IP ^ NPC_LT_LC_IP_OPT)) & 0xf)
3904
set_flowkey_fields(struct nix_rx_flowkey_alg * alg,u32 flow_cfg)3905 static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
3906 {
3907 int idx, nr_field, key_off, field_marker, keyoff_marker;
3908 int max_key_off, max_bit_pos, group_member;
3909 struct nix_rx_flowkey_alg *field;
3910 struct nix_rx_flowkey_alg tmp;
3911 u32 key_type, valid_key;
3912 u32 l3_l4_src_dst;
3913 int l4_key_offset = 0;
3914
3915 if (!alg)
3916 return -EINVAL;
3917
3918 #define FIELDS_PER_ALG 5
3919 #define MAX_KEY_OFF 40
3920 /* Clear all fields */
3921 memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG);
3922
3923 /* Each of the 32 possible flow key algorithm definitions should
3924 * fall into above incremental config (except ALG0). Otherwise a
3925 * single NPC MCAM entry is not sufficient for supporting RSS.
3926 *
3927 * If a different definition or combination needed then NPC MCAM
3928 * has to be programmed to filter such pkts and it's action should
3929 * point to this definition to calculate flowtag or hash.
3930 *
3931 * The `for loop` goes over _all_ protocol field and the following
3932 * variables depicts the state machine forward progress logic.
3933 *
3934 * keyoff_marker - Enabled when hash byte length needs to be accounted
3935 * in field->key_offset update.
3936 * field_marker - Enabled when a new field needs to be selected.
3937 * group_member - Enabled when protocol is part of a group.
3938 */
3939
3940 /* Last 4 bits (31:28) are reserved to specify SRC, DST
3941 * selection for L3, L4 i.e IPV[4,6]_SRC, IPV[4,6]_DST,
3942 * [TCP,UDP,SCTP]_SRC, [TCP,UDP,SCTP]_DST
3943 * 31 => L3_SRC, 30 => L3_DST, 29 => L4_SRC, 28 => L4_DST
3944 */
3945 l3_l4_src_dst = flow_cfg;
3946 /* Reset these 4 bits, so that these won't be part of key */
3947 flow_cfg &= NIX_FLOW_KEY_TYPE_L3_L4_MASK;
3948
3949 keyoff_marker = 0; max_key_off = 0; group_member = 0;
3950 nr_field = 0; key_off = 0; field_marker = 1;
3951 field = &tmp; max_bit_pos = fls(flow_cfg);
3952 for (idx = 0;
3953 idx < max_bit_pos && nr_field < FIELDS_PER_ALG &&
3954 key_off < MAX_KEY_OFF; idx++) {
3955 key_type = BIT(idx);
3956 valid_key = flow_cfg & key_type;
3957 /* Found a field marker, reset the field values */
3958 if (field_marker)
3959 memset(&tmp, 0, sizeof(tmp));
3960
3961 field_marker = true;
3962 keyoff_marker = true;
3963 switch (key_type) {
3964 case NIX_FLOW_KEY_TYPE_PORT:
3965 field->sel_chan = true;
3966 /* This should be set to 1, when SEL_CHAN is set */
3967 field->bytesm1 = 1;
3968 break;
3969 case NIX_FLOW_KEY_TYPE_IPV4_PROTO:
3970 field->lid = NPC_LID_LC;
3971 field->hdr_offset = 9; /* offset */
3972 field->bytesm1 = 0; /* 1 byte */
3973 field->ltype_match = NPC_LT_LC_IP;
3974 field->ltype_mask = NPC_LT_LC_IP_MATCH_MSK;
3975 break;
3976 case NIX_FLOW_KEY_TYPE_IPV4:
3977 case NIX_FLOW_KEY_TYPE_INNR_IPV4:
3978 field->lid = NPC_LID_LC;
3979 field->ltype_match = NPC_LT_LC_IP;
3980 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) {
3981 field->lid = NPC_LID_LG;
3982 field->ltype_match = NPC_LT_LG_TU_IP;
3983 }
3984 field->hdr_offset = 12; /* SIP offset */
3985 field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
3986
3987 /* Only SIP */
3988 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY)
3989 field->bytesm1 = 3; /* SIP, 4 bytes */
3990
3991 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) {
3992 /* Both SIP + DIP */
3993 if (field->bytesm1 == 3) {
3994 field->bytesm1 = 7; /* SIP + DIP, 8B */
3995 } else {
3996 /* Only DIP */
3997 field->hdr_offset = 16; /* DIP off */
3998 field->bytesm1 = 3; /* DIP, 4 bytes */
3999 }
4000 }
4001 field->ltype_mask = NPC_LT_LC_IP_MATCH_MSK;
4002 keyoff_marker = false;
4003 break;
4004 case NIX_FLOW_KEY_TYPE_IPV6:
4005 case NIX_FLOW_KEY_TYPE_INNR_IPV6:
4006 field->lid = NPC_LID_LC;
4007 field->ltype_match = NPC_LT_LC_IP6;
4008 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) {
4009 field->lid = NPC_LID_LG;
4010 field->ltype_match = NPC_LT_LG_TU_IP6;
4011 }
4012 field->hdr_offset = 8; /* SIP offset */
4013 field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
4014
4015 /* Only SIP */
4016 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY)
4017 field->bytesm1 = 15; /* SIP, 16 bytes */
4018
4019 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) {
4020 /* Both SIP + DIP */
4021 if (field->bytesm1 == 15) {
4022 /* SIP + DIP, 32 bytes */
4023 field->bytesm1 = 31;
4024 } else {
4025 /* Only DIP */
4026 field->hdr_offset = 24; /* DIP off */
4027 field->bytesm1 = 15; /* DIP,16 bytes */
4028 }
4029 }
4030 field->ltype_mask = NPC_LT_LC_IP6_MATCH_MSK;
4031 break;
4032 case NIX_FLOW_KEY_TYPE_TCP:
4033 case NIX_FLOW_KEY_TYPE_UDP:
4034 case NIX_FLOW_KEY_TYPE_SCTP:
4035 case NIX_FLOW_KEY_TYPE_INNR_TCP:
4036 case NIX_FLOW_KEY_TYPE_INNR_UDP:
4037 case NIX_FLOW_KEY_TYPE_INNR_SCTP:
4038 field->lid = NPC_LID_LD;
4039 if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP ||
4040 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP ||
4041 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP)
4042 field->lid = NPC_LID_LH;
4043 field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
4044
4045 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_SRC_ONLY)
4046 field->bytesm1 = 1; /* SRC, 2 bytes */
4047
4048 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_DST_ONLY) {
4049 /* Both SRC + DST */
4050 if (field->bytesm1 == 1) {
4051 /* SRC + DST, 4 bytes */
4052 field->bytesm1 = 3;
4053 } else {
4054 /* Only DIP */
4055 field->hdr_offset = 2; /* DST off */
4056 field->bytesm1 = 1; /* DST, 2 bytes */
4057 }
4058 }
4059
4060 /* Enum values for NPC_LID_LD and NPC_LID_LG are same,
4061 * so no need to change the ltype_match, just change
4062 * the lid for inner protocols
4063 */
4064 BUILD_BUG_ON((int)NPC_LT_LD_TCP !=
4065 (int)NPC_LT_LH_TU_TCP);
4066 BUILD_BUG_ON((int)NPC_LT_LD_UDP !=
4067 (int)NPC_LT_LH_TU_UDP);
4068 BUILD_BUG_ON((int)NPC_LT_LD_SCTP !=
4069 (int)NPC_LT_LH_TU_SCTP);
4070
4071 if ((key_type == NIX_FLOW_KEY_TYPE_TCP ||
4072 key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) &&
4073 valid_key) {
4074 field->ltype_match |= NPC_LT_LD_TCP;
4075 group_member = true;
4076 } else if ((key_type == NIX_FLOW_KEY_TYPE_UDP ||
4077 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) &&
4078 valid_key) {
4079 field->ltype_match |= NPC_LT_LD_UDP;
4080 group_member = true;
4081 } else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP ||
4082 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) &&
4083 valid_key) {
4084 field->ltype_match |= NPC_LT_LD_SCTP;
4085 group_member = true;
4086 }
4087 field->ltype_mask = ~field->ltype_match;
4088 if (key_type == NIX_FLOW_KEY_TYPE_SCTP ||
4089 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) {
4090 /* Handle the case where any of the group item
4091 * is enabled in the group but not the final one
4092 */
4093 if (group_member) {
4094 valid_key = true;
4095 group_member = false;
4096 }
4097 } else {
4098 field_marker = false;
4099 keyoff_marker = false;
4100 }
4101
4102 /* TCP/UDP/SCTP and ESP/AH falls at same offset so
4103 * remember the TCP key offset of 40 byte hash key.
4104 */
4105 if (key_type == NIX_FLOW_KEY_TYPE_TCP)
4106 l4_key_offset = key_off;
4107 break;
4108 case NIX_FLOW_KEY_TYPE_NVGRE:
4109 field->lid = NPC_LID_LD;
4110 field->hdr_offset = 4; /* VSID offset */
4111 field->bytesm1 = 2;
4112 field->ltype_match = NPC_LT_LD_NVGRE;
4113 field->ltype_mask = 0xF;
4114 break;
4115 case NIX_FLOW_KEY_TYPE_VXLAN:
4116 case NIX_FLOW_KEY_TYPE_GENEVE:
4117 field->lid = NPC_LID_LE;
4118 field->bytesm1 = 2;
4119 field->hdr_offset = 4;
4120 field->ltype_mask = 0xF;
4121 field_marker = false;
4122 keyoff_marker = false;
4123
4124 if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) {
4125 field->ltype_match |= NPC_LT_LE_VXLAN;
4126 group_member = true;
4127 }
4128
4129 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) {
4130 field->ltype_match |= NPC_LT_LE_GENEVE;
4131 group_member = true;
4132 }
4133
4134 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) {
4135 if (group_member) {
4136 field->ltype_mask = ~field->ltype_match;
4137 field_marker = true;
4138 keyoff_marker = true;
4139 valid_key = true;
4140 group_member = false;
4141 }
4142 }
4143 break;
4144 case NIX_FLOW_KEY_TYPE_ETH_DMAC:
4145 case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC:
4146 field->lid = NPC_LID_LA;
4147 field->ltype_match = NPC_LT_LA_ETHER;
4148 if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) {
4149 field->lid = NPC_LID_LF;
4150 field->ltype_match = NPC_LT_LF_TU_ETHER;
4151 }
4152 field->hdr_offset = 0;
4153 field->bytesm1 = 5; /* DMAC 6 Byte */
4154 field->ltype_mask = 0xF;
4155 break;
4156 case NIX_FLOW_KEY_TYPE_IPV6_EXT:
4157 field->lid = NPC_LID_LC;
4158 field->hdr_offset = 40; /* IPV6 hdr */
4159 field->bytesm1 = 0; /* 1 Byte ext hdr*/
4160 field->ltype_match = NPC_LT_LC_IP6_EXT;
4161 field->ltype_mask = 0xF;
4162 break;
4163 case NIX_FLOW_KEY_TYPE_GTPU:
4164 field->lid = NPC_LID_LE;
4165 field->hdr_offset = 4;
4166 field->bytesm1 = 3; /* 4 bytes TID*/
4167 field->ltype_match = NPC_LT_LE_GTPU;
4168 field->ltype_mask = 0xF;
4169 break;
4170 case NIX_FLOW_KEY_TYPE_CUSTOM0:
4171 field->lid = NPC_LID_LC;
4172 field->hdr_offset = 6;
4173 field->bytesm1 = 1; /* 2 Bytes*/
4174 field->ltype_match = NPC_LT_LC_CUSTOM0;
4175 field->ltype_mask = 0xF;
4176 break;
4177 case NIX_FLOW_KEY_TYPE_VLAN:
4178 field->lid = NPC_LID_LB;
4179 field->hdr_offset = 2; /* Skip TPID (2-bytes) */
4180 field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */
4181 field->ltype_match = NPC_LT_LB_CTAG;
4182 field->ltype_mask = 0xF;
4183 field->fn_mask = 1; /* Mask out the first nibble */
4184 break;
4185 case NIX_FLOW_KEY_TYPE_AH:
4186 case NIX_FLOW_KEY_TYPE_ESP:
4187 field->hdr_offset = 0;
4188 field->bytesm1 = 7; /* SPI + sequence number */
4189 field->ltype_mask = 0xF;
4190 field->lid = NPC_LID_LE;
4191 field->ltype_match = NPC_LT_LE_ESP;
4192 if (key_type == NIX_FLOW_KEY_TYPE_AH) {
4193 field->lid = NPC_LID_LD;
4194 field->ltype_match = NPC_LT_LD_AH;
4195 field->hdr_offset = 4;
4196 keyoff_marker = false;
4197 }
4198 break;
4199 }
4200 field->ena = 1;
4201
4202 /* Found a valid flow key type */
4203 if (valid_key) {
4204 /* Use the key offset of TCP/UDP/SCTP fields
4205 * for ESP/AH fields.
4206 */
4207 if (key_type == NIX_FLOW_KEY_TYPE_ESP ||
4208 key_type == NIX_FLOW_KEY_TYPE_AH)
4209 key_off = l4_key_offset;
4210 field->key_offset = key_off;
4211 memcpy(&alg[nr_field], field, sizeof(*field));
4212 max_key_off = max(max_key_off, field->bytesm1 + 1);
4213
4214 /* Found a field marker, get the next field */
4215 if (field_marker)
4216 nr_field++;
4217 }
4218
4219 /* Found a keyoff marker, update the new key_off */
4220 if (keyoff_marker) {
4221 key_off += max_key_off;
4222 max_key_off = 0;
4223 }
4224 }
4225 /* Processed all the flow key types */
4226 if (idx == max_bit_pos && key_off <= MAX_KEY_OFF)
4227 return 0;
4228 else
4229 return NIX_AF_ERR_RSS_NOSPC_FIELD;
4230 }
4231
reserve_flowkey_alg_idx(struct rvu * rvu,int blkaddr,u32 flow_cfg)4232 static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg)
4233 {
4234 u64 field[FIELDS_PER_ALG];
4235 struct nix_hw *hw;
4236 int fid, rc;
4237
4238 hw = get_nix_hw(rvu->hw, blkaddr);
4239 if (!hw)
4240 return NIX_AF_ERR_INVALID_NIXBLK;
4241
4242 /* No room to add new flow hash algoritham */
4243 if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX)
4244 return NIX_AF_ERR_RSS_NOSPC_ALGO;
4245
4246 /* Generate algo fields for the given flow_cfg */
4247 rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg);
4248 if (rc)
4249 return rc;
4250
4251 /* Update ALGX_FIELDX register with generated fields */
4252 for (fid = 0; fid < FIELDS_PER_ALG; fid++)
4253 rvu_write64(rvu, blkaddr,
4254 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use,
4255 fid), field[fid]);
4256
4257 /* Store the flow_cfg for futher lookup */
4258 rc = hw->flowkey.in_use;
4259 hw->flowkey.flowkey[rc] = flow_cfg;
4260 hw->flowkey.in_use++;
4261
4262 return rc;
4263 }
4264
rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu * rvu,struct nix_rss_flowkey_cfg * req,struct nix_rss_flowkey_cfg_rsp * rsp)4265 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
4266 struct nix_rss_flowkey_cfg *req,
4267 struct nix_rss_flowkey_cfg_rsp *rsp)
4268 {
4269 u16 pcifunc = req->hdr.pcifunc;
4270 int alg_idx, nixlf, blkaddr;
4271 struct nix_hw *nix_hw;
4272 int err;
4273
4274 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
4275 if (err)
4276 return err;
4277
4278 nix_hw = get_nix_hw(rvu->hw, blkaddr);
4279 if (!nix_hw)
4280 return NIX_AF_ERR_INVALID_NIXBLK;
4281
4282 alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg);
4283 /* Failed to get algo index from the exiting list, reserve new */
4284 if (alg_idx < 0) {
4285 alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr,
4286 req->flowkey_cfg);
4287 if (alg_idx < 0)
4288 return alg_idx;
4289 }
4290 rsp->alg_idx = alg_idx;
4291 rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
4292 alg_idx, req->mcam_index);
4293 return 0;
4294 }
4295
nix_rx_flowkey_alg_cfg(struct rvu * rvu,int blkaddr)4296 static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
4297 {
4298 u32 flowkey_cfg, minkey_cfg;
4299 int alg, fid, rc;
4300
4301 /* Disable all flow key algx fieldx */
4302 for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) {
4303 for (fid = 0; fid < FIELDS_PER_ALG; fid++)
4304 rvu_write64(rvu, blkaddr,
4305 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
4306 0);
4307 }
4308
4309 /* IPv4/IPv6 SIP/DIPs */
4310 flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
4311 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
4312 if (rc < 0)
4313 return rc;
4314
4315 /* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
4316 minkey_cfg = flowkey_cfg;
4317 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP;
4318 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
4319 if (rc < 0)
4320 return rc;
4321
4322 /* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
4323 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP;
4324 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
4325 if (rc < 0)
4326 return rc;
4327
4328 /* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
4329 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP;
4330 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
4331 if (rc < 0)
4332 return rc;
4333
4334 /* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
4335 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
4336 NIX_FLOW_KEY_TYPE_UDP;
4337 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
4338 if (rc < 0)
4339 return rc;
4340
4341 /* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
4342 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
4343 NIX_FLOW_KEY_TYPE_SCTP;
4344 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
4345 if (rc < 0)
4346 return rc;
4347
4348 /* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
4349 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP |
4350 NIX_FLOW_KEY_TYPE_SCTP;
4351 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
4352 if (rc < 0)
4353 return rc;
4354
4355 /* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
4356 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
4357 NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP;
4358 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
4359 if (rc < 0)
4360 return rc;
4361
4362 return 0;
4363 }
4364
rvu_mbox_handler_nix_set_mac_addr(struct rvu * rvu,struct nix_set_mac_addr * req,struct msg_rsp * rsp)4365 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
4366 struct nix_set_mac_addr *req,
4367 struct msg_rsp *rsp)
4368 {
4369 bool from_vf = req->hdr.pcifunc & RVU_PFVF_FUNC_MASK;
4370 u16 pcifunc = req->hdr.pcifunc;
4371 int blkaddr, nixlf, err;
4372 struct rvu_pfvf *pfvf;
4373
4374 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
4375 if (err)
4376 return err;
4377
4378 pfvf = rvu_get_pfvf(rvu, pcifunc);
4379
4380 /* untrusted VF can't overwrite admin(PF) changes */
4381 if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
4382 (from_vf && test_bit(PF_SET_VF_MAC, &pfvf->flags))) {
4383 dev_warn(rvu->dev,
4384 "MAC address set by admin(PF) cannot be overwritten by untrusted VF");
4385 return -EPERM;
4386 }
4387
4388 ether_addr_copy(pfvf->mac_addr, req->mac_addr);
4389
4390 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
4391 pfvf->rx_chan_base, req->mac_addr);
4392
4393 if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf)
4394 ether_addr_copy(pfvf->default_mac, req->mac_addr);
4395
4396 rvu_switch_update_rules(rvu, pcifunc);
4397
4398 return 0;
4399 }
4400
rvu_mbox_handler_nix_get_mac_addr(struct rvu * rvu,struct msg_req * req,struct nix_get_mac_addr_rsp * rsp)4401 int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu,
4402 struct msg_req *req,
4403 struct nix_get_mac_addr_rsp *rsp)
4404 {
4405 u16 pcifunc = req->hdr.pcifunc;
4406 struct rvu_pfvf *pfvf;
4407
4408 if (!is_nixlf_attached(rvu, pcifunc))
4409 return NIX_AF_ERR_AF_LF_INVALID;
4410
4411 pfvf = rvu_get_pfvf(rvu, pcifunc);
4412
4413 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
4414
4415 return 0;
4416 }
4417
rvu_mbox_handler_nix_set_rx_mode(struct rvu * rvu,struct nix_rx_mode * req,struct msg_rsp * rsp)4418 int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
4419 struct msg_rsp *rsp)
4420 {
4421 bool allmulti, promisc, nix_rx_multicast;
4422 u16 pcifunc = req->hdr.pcifunc;
4423 struct rvu_pfvf *pfvf;
4424 int nixlf, err;
4425
4426 pfvf = rvu_get_pfvf(rvu, pcifunc);
4427 promisc = req->mode & NIX_RX_MODE_PROMISC ? true : false;
4428 allmulti = req->mode & NIX_RX_MODE_ALLMULTI ? true : false;
4429 pfvf->use_mce_list = req->mode & NIX_RX_MODE_USE_MCE ? true : false;
4430
4431 nix_rx_multicast = rvu->hw->cap.nix_rx_multicast & pfvf->use_mce_list;
4432
4433 if (is_vf(pcifunc) && !nix_rx_multicast &&
4434 (promisc || allmulti)) {
4435 dev_warn_ratelimited(rvu->dev,
4436 "VF promisc/multicast not supported\n");
4437 return 0;
4438 }
4439
4440 /* untrusted VF can't configure promisc/allmulti */
4441 if (is_vf(pcifunc) && !test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
4442 (promisc || allmulti))
4443 return 0;
4444
4445 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
4446 if (err)
4447 return err;
4448
4449 if (nix_rx_multicast) {
4450 /* add/del this PF_FUNC to/from mcast pkt replication list */
4451 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_ALLMULTI_ENTRY,
4452 allmulti);
4453 if (err) {
4454 dev_err(rvu->dev,
4455 "Failed to update pcifunc 0x%x to multicast list\n",
4456 pcifunc);
4457 return err;
4458 }
4459
4460 /* add/del this PF_FUNC to/from promisc pkt replication list */
4461 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_PROMISC_ENTRY,
4462 promisc);
4463 if (err) {
4464 dev_err(rvu->dev,
4465 "Failed to update pcifunc 0x%x to promisc list\n",
4466 pcifunc);
4467 return err;
4468 }
4469 }
4470
4471 /* install/uninstall allmulti entry */
4472 if (allmulti) {
4473 rvu_npc_install_allmulti_entry(rvu, pcifunc, nixlf,
4474 pfvf->rx_chan_base);
4475 } else {
4476 if (!nix_rx_multicast)
4477 rvu_npc_enable_allmulti_entry(rvu, pcifunc, nixlf, false);
4478 }
4479
4480 /* install/uninstall promisc entry */
4481 if (promisc)
4482 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
4483 pfvf->rx_chan_base,
4484 pfvf->rx_chan_cnt);
4485 else
4486 if (!nix_rx_multicast)
4487 rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false);
4488
4489 return 0;
4490 }
4491
nix_find_link_frs(struct rvu * rvu,struct nix_frs_cfg * req,u16 pcifunc)4492 static void nix_find_link_frs(struct rvu *rvu,
4493 struct nix_frs_cfg *req, u16 pcifunc)
4494 {
4495 int pf = rvu_get_pf(pcifunc);
4496 struct rvu_pfvf *pfvf;
4497 int maxlen, minlen;
4498 int numvfs, hwvf;
4499 int vf;
4500
4501 /* Update with requester's min/max lengths */
4502 pfvf = rvu_get_pfvf(rvu, pcifunc);
4503 pfvf->maxlen = req->maxlen;
4504 if (req->update_minlen)
4505 pfvf->minlen = req->minlen;
4506
4507 maxlen = req->maxlen;
4508 minlen = req->update_minlen ? req->minlen : 0;
4509
4510 /* Get this PF's numVFs and starting hwvf */
4511 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
4512
4513 /* For each VF, compare requested max/minlen */
4514 for (vf = 0; vf < numvfs; vf++) {
4515 pfvf = &rvu->hwvf[hwvf + vf];
4516 if (pfvf->maxlen > maxlen)
4517 maxlen = pfvf->maxlen;
4518 if (req->update_minlen &&
4519 pfvf->minlen && pfvf->minlen < minlen)
4520 minlen = pfvf->minlen;
4521 }
4522
4523 /* Compare requested max/minlen with PF's max/minlen */
4524 pfvf = &rvu->pf[pf];
4525 if (pfvf->maxlen > maxlen)
4526 maxlen = pfvf->maxlen;
4527 if (req->update_minlen &&
4528 pfvf->minlen && pfvf->minlen < minlen)
4529 minlen = pfvf->minlen;
4530
4531 /* Update the request with max/min PF's and it's VF's max/min */
4532 req->maxlen = maxlen;
4533 if (req->update_minlen)
4534 req->minlen = minlen;
4535 }
4536
rvu_mbox_handler_nix_set_hw_frs(struct rvu * rvu,struct nix_frs_cfg * req,struct msg_rsp * rsp)4537 int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
4538 struct msg_rsp *rsp)
4539 {
4540 struct rvu_hwinfo *hw = rvu->hw;
4541 u16 pcifunc = req->hdr.pcifunc;
4542 int pf = rvu_get_pf(pcifunc);
4543 int blkaddr, link = -1;
4544 struct nix_hw *nix_hw;
4545 struct rvu_pfvf *pfvf;
4546 u8 cgx = 0, lmac = 0;
4547 u16 max_mtu;
4548 u64 cfg;
4549
4550 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
4551 if (blkaddr < 0)
4552 return NIX_AF_ERR_AF_LF_INVALID;
4553
4554 nix_hw = get_nix_hw(rvu->hw, blkaddr);
4555 if (!nix_hw)
4556 return NIX_AF_ERR_INVALID_NIXBLK;
4557
4558 if (is_lbk_vf(rvu, pcifunc))
4559 rvu_get_lbk_link_max_frs(rvu, &max_mtu);
4560 else
4561 rvu_get_lmac_link_max_frs(rvu, &max_mtu);
4562
4563 if (!req->sdp_link && req->maxlen > max_mtu)
4564 return NIX_AF_ERR_FRS_INVALID;
4565
4566 if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
4567 return NIX_AF_ERR_FRS_INVALID;
4568
4569 /* Check if config is for SDP link */
4570 if (req->sdp_link) {
4571 if (!hw->sdp_links)
4572 return NIX_AF_ERR_RX_LINK_INVALID;
4573 link = hw->cgx_links + hw->lbk_links;
4574 goto linkcfg;
4575 }
4576
4577 /* Check if the request is from CGX mapped RVU PF */
4578 if (is_pf_cgxmapped(rvu, pf)) {
4579 /* Get CGX and LMAC to which this PF is mapped and find link */
4580 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
4581 link = (cgx * hw->lmac_per_cgx) + lmac;
4582 } else if (pf == 0) {
4583 /* For VFs of PF0 ingress is LBK port, so config LBK link */
4584 pfvf = rvu_get_pfvf(rvu, pcifunc);
4585 link = hw->cgx_links + pfvf->lbkid;
4586 }
4587
4588 if (link < 0)
4589 return NIX_AF_ERR_RX_LINK_INVALID;
4590
4591 linkcfg:
4592 nix_find_link_frs(rvu, req, pcifunc);
4593
4594 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
4595 cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
4596 if (req->update_minlen)
4597 cfg = (cfg & ~0xFFFFULL) | req->minlen;
4598 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg);
4599
4600 return 0;
4601 }
4602
rvu_mbox_handler_nix_set_rx_cfg(struct rvu * rvu,struct nix_rx_cfg * req,struct msg_rsp * rsp)4603 int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
4604 struct msg_rsp *rsp)
4605 {
4606 int nixlf, blkaddr, err;
4607 u64 cfg;
4608
4609 err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr);
4610 if (err)
4611 return err;
4612
4613 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf));
4614 /* Set the interface configuration */
4615 if (req->len_verify & BIT(0))
4616 cfg |= BIT_ULL(41);
4617 else
4618 cfg &= ~BIT_ULL(41);
4619
4620 if (req->len_verify & BIT(1))
4621 cfg |= BIT_ULL(40);
4622 else
4623 cfg &= ~BIT_ULL(40);
4624
4625 if (req->len_verify & NIX_RX_DROP_RE)
4626 cfg |= BIT_ULL(32);
4627 else
4628 cfg &= ~BIT_ULL(32);
4629
4630 if (req->csum_verify & BIT(0))
4631 cfg |= BIT_ULL(37);
4632 else
4633 cfg &= ~BIT_ULL(37);
4634
4635 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg);
4636
4637 return 0;
4638 }
4639
rvu_get_lbk_link_credits(struct rvu * rvu,u16 lbk_max_frs)4640 static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs)
4641 {
4642 return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */
4643 }
4644
nix_link_config(struct rvu * rvu,int blkaddr,struct nix_hw * nix_hw)4645 static void nix_link_config(struct rvu *rvu, int blkaddr,
4646 struct nix_hw *nix_hw)
4647 {
4648 struct rvu_hwinfo *hw = rvu->hw;
4649 int cgx, lmac_cnt, slink, link;
4650 u16 lbk_max_frs, lmac_max_frs;
4651 unsigned long lmac_bmap;
4652 u64 tx_credits, cfg;
4653 u64 lmac_fifo_len;
4654 int iter;
4655
4656 rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs);
4657 rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs);
4658
4659 /* Set default min/max packet lengths allowed on NIX Rx links.
4660 *
4661 * With HW reset minlen value of 60byte, HW will treat ARP pkts
4662 * as undersize and report them to SW as error pkts, hence
4663 * setting it to 40 bytes.
4664 */
4665 for (link = 0; link < hw->cgx_links; link++) {
4666 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
4667 ((u64)lmac_max_frs << 16) | NIC_HW_MIN_FRS);
4668 }
4669
4670 for (link = hw->cgx_links; link < hw->lbk_links; link++) {
4671 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
4672 ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS);
4673 }
4674 if (hw->sdp_links) {
4675 link = hw->cgx_links + hw->lbk_links;
4676 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
4677 SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
4678 }
4679
4680 /* Get MCS external bypass status for CN10K-B */
4681 if (mcs_get_blkcnt() == 1) {
4682 /* Adjust for 2 credits when external bypass is disabled */
4683 nix_hw->cc_mcs_cnt = is_mcs_bypass(0) ? 0 : 2;
4684 }
4685
4686 /* Set credits for Tx links assuming max packet length allowed.
4687 * This will be reconfigured based on MTU set for PF/VF.
4688 */
4689 for (cgx = 0; cgx < hw->cgx; cgx++) {
4690 lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
4691 /* Skip when cgx is not available or lmac cnt is zero */
4692 if (lmac_cnt <= 0)
4693 continue;
4694 slink = cgx * hw->lmac_per_cgx;
4695
4696 /* Get LMAC id's from bitmap */
4697 lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
4698 for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
4699 lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, iter);
4700 if (!lmac_fifo_len) {
4701 dev_err(rvu->dev,
4702 "%s: Failed to get CGX/RPM%d:LMAC%d FIFO size\n",
4703 __func__, cgx, iter);
4704 continue;
4705 }
4706 tx_credits = (lmac_fifo_len - lmac_max_frs) / 16;
4707 /* Enable credits and set credit pkt count to max allowed */
4708 cfg = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
4709 cfg |= FIELD_PREP(NIX_AF_LINKX_MCS_CNT_MASK, nix_hw->cc_mcs_cnt);
4710
4711 link = iter + slink;
4712 nix_hw->tx_credits[link] = tx_credits;
4713 rvu_write64(rvu, blkaddr,
4714 NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
4715 }
4716 }
4717
4718 /* Set Tx credits for LBK link */
4719 slink = hw->cgx_links;
4720 for (link = slink; link < (slink + hw->lbk_links); link++) {
4721 tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs);
4722 nix_hw->tx_credits[link] = tx_credits;
4723 /* Enable credits and set credit pkt count to max allowed */
4724 tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
4725 rvu_write64(rvu, blkaddr,
4726 NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits);
4727 }
4728 }
4729
nix_calibrate_x2p(struct rvu * rvu,int blkaddr)4730 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
4731 {
4732 int idx, err;
4733 u64 status;
4734
4735 /* Start X2P bus calibration */
4736 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
4737 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
4738 /* Wait for calibration to complete */
4739 err = rvu_poll_reg(rvu, blkaddr,
4740 NIX_AF_STATUS, BIT_ULL(10), false);
4741 if (err) {
4742 dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
4743 return err;
4744 }
4745
4746 status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
4747 /* Check if CGX devices are ready */
4748 for (idx = 0; idx < rvu->cgx_cnt_max; idx++) {
4749 /* Skip when cgx port is not available */
4750 if (!rvu_cgx_pdata(idx, rvu) ||
4751 (status & (BIT_ULL(16 + idx))))
4752 continue;
4753 dev_err(rvu->dev,
4754 "CGX%d didn't respond to NIX X2P calibration\n", idx);
4755 err = -EBUSY;
4756 }
4757
4758 /* Check if LBK is ready */
4759 if (!(status & BIT_ULL(19))) {
4760 dev_err(rvu->dev,
4761 "LBK didn't respond to NIX X2P calibration\n");
4762 err = -EBUSY;
4763 }
4764
4765 /* Clear 'calibrate_x2p' bit */
4766 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
4767 rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
4768 if (err || (status & 0x3FFULL))
4769 dev_err(rvu->dev,
4770 "NIX X2P calibration failed, status 0x%llx\n", status);
4771 if (err)
4772 return err;
4773 return 0;
4774 }
4775
nix_aq_init(struct rvu * rvu,struct rvu_block * block)4776 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
4777 {
4778 u64 cfg;
4779 int err;
4780
4781 /* Set admin queue endianness */
4782 cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
4783 #ifdef __BIG_ENDIAN
4784 cfg |= BIT_ULL(8);
4785 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
4786 #else
4787 cfg &= ~BIT_ULL(8);
4788 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
4789 #endif
4790
4791 /* Do not bypass NDC cache */
4792 cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
4793 cfg &= ~0x3FFEULL;
4794 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
4795 /* Disable caching of SQB aka SQEs */
4796 cfg |= 0x04ULL;
4797 #endif
4798 rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
4799
4800 /* Result structure can be followed by RQ/SQ/CQ context at
4801 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
4802 * operation type. Alloc sufficient result memory for all operations.
4803 */
4804 err = rvu_aq_alloc(rvu, &block->aq,
4805 Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
4806 ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
4807 if (err)
4808 return err;
4809
4810 rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
4811 rvu_write64(rvu, block->addr,
4812 NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
4813 return 0;
4814 }
4815
rvu_nix_setup_capabilities(struct rvu * rvu,int blkaddr)4816 static void rvu_nix_setup_capabilities(struct rvu *rvu, int blkaddr)
4817 {
4818 struct rvu_hwinfo *hw = rvu->hw;
4819 u64 hw_const;
4820
4821 hw_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
4822
4823 /* On OcteonTx2 DWRR quantum is directly configured into each of
4824 * the transmit scheduler queues. And PF/VF drivers were free to
4825 * config any value upto 2^24.
4826 * On CN10K, HW is modified, the quantum configuration at scheduler
4827 * queues is in terms of weight. And SW needs to setup a base DWRR MTU
4828 * at NIX_AF_DWRR_RPM_MTU / NIX_AF_DWRR_SDP_MTU. HW will do
4829 * 'DWRR MTU * weight' to get the quantum.
4830 *
4831 * Check if HW uses a common MTU for all DWRR quantum configs.
4832 * On OcteonTx2 this register field is '0'.
4833 */
4834 if ((((hw_const >> 56) & 0x10) == 0x10) && !(hw_const & BIT_ULL(61)))
4835 hw->cap.nix_common_dwrr_mtu = true;
4836
4837 if (hw_const & BIT_ULL(61))
4838 hw->cap.nix_multiple_dwrr_mtu = true;
4839 }
4840
rvu_nix_block_init(struct rvu * rvu,struct nix_hw * nix_hw)4841 static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
4842 {
4843 const struct npc_lt_def_cfg *ltdefs;
4844 struct rvu_hwinfo *hw = rvu->hw;
4845 int blkaddr = nix_hw->blkaddr;
4846 struct rvu_block *block;
4847 int err;
4848 u64 cfg;
4849
4850 block = &hw->block[blkaddr];
4851
4852 if (is_rvu_96xx_B0(rvu)) {
4853 /* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt
4854 * internal state when conditional clocks are turned off.
4855 * Hence enable them.
4856 */
4857 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
4858 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
4859 }
4860
4861 /* Set chan/link to backpressure TL3 instead of TL2 */
4862 rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
4863
4864 /* Disable SQ manager's sticky mode operation (set TM6 = 0)
4865 * This sticky mode is known to cause SQ stalls when multiple
4866 * SQs are mapped to same SMQ and transmitting pkts at a time.
4867 */
4868 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
4869 cfg &= ~BIT_ULL(15);
4870 rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
4871
4872 ltdefs = rvu->kpu.lt_def;
4873 /* Calibrate X2P bus to check if CGX/LBK links are fine */
4874 err = nix_calibrate_x2p(rvu, blkaddr);
4875 if (err)
4876 return err;
4877
4878 /* Setup capabilities of the NIX block */
4879 rvu_nix_setup_capabilities(rvu, blkaddr);
4880
4881 /* Initialize admin queue */
4882 err = nix_aq_init(rvu, block);
4883 if (err)
4884 return err;
4885
4886 /* Restore CINT timer delay to HW reset values */
4887 rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
4888
4889 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SEB_CFG);
4890
4891 /* For better performance use NDC TX instead of NDC RX for SQ's SQEs" */
4892 cfg |= 1ULL;
4893 if (!is_rvu_otx2(rvu))
4894 cfg |= NIX_PTP_1STEP_EN;
4895
4896 rvu_write64(rvu, blkaddr, NIX_AF_SEB_CFG, cfg);
4897
4898 if (!is_rvu_otx2(rvu))
4899 rvu_nix_block_cn10k_init(rvu, nix_hw);
4900
4901 if (is_block_implemented(hw, blkaddr)) {
4902 err = nix_setup_txschq(rvu, nix_hw, blkaddr);
4903 if (err)
4904 return err;
4905
4906 err = nix_setup_ipolicers(rvu, nix_hw, blkaddr);
4907 if (err)
4908 return err;
4909
4910 err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr);
4911 if (err)
4912 return err;
4913
4914 err = nix_setup_mcast(rvu, nix_hw, blkaddr);
4915 if (err)
4916 return err;
4917
4918 err = nix_setup_txvlan(rvu, nix_hw);
4919 if (err)
4920 return err;
4921
4922 err = nix_setup_bpids(rvu, nix_hw, blkaddr);
4923 if (err)
4924 return err;
4925
4926 /* Configure segmentation offload formats */
4927 nix_setup_lso(rvu, nix_hw, blkaddr);
4928
4929 /* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info.
4930 * This helps HW protocol checker to identify headers
4931 * and validate length and checksums.
4932 */
4933 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
4934 (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) |
4935 ltdefs->rx_ol2.ltype_mask);
4936 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
4937 (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) |
4938 ltdefs->rx_oip4.ltype_mask);
4939 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4,
4940 (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) |
4941 ltdefs->rx_iip4.ltype_mask);
4942 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6,
4943 (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) |
4944 ltdefs->rx_oip6.ltype_mask);
4945 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6,
4946 (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) |
4947 ltdefs->rx_iip6.ltype_mask);
4948 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
4949 (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) |
4950 ltdefs->rx_otcp.ltype_mask);
4951 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP,
4952 (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) |
4953 ltdefs->rx_itcp.ltype_mask);
4954 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
4955 (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) |
4956 ltdefs->rx_oudp.ltype_mask);
4957 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP,
4958 (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) |
4959 ltdefs->rx_iudp.ltype_mask);
4960 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP,
4961 (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) |
4962 ltdefs->rx_osctp.ltype_mask);
4963 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP,
4964 (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) |
4965 ltdefs->rx_isctp.ltype_mask);
4966
4967 if (!is_rvu_otx2(rvu)) {
4968 /* Enable APAD calculation for other protocols
4969 * matching APAD0 and APAD1 lt def registers.
4970 */
4971 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD0,
4972 (ltdefs->rx_apad0.valid << 11) |
4973 (ltdefs->rx_apad0.lid << 8) |
4974 (ltdefs->rx_apad0.ltype_match << 4) |
4975 ltdefs->rx_apad0.ltype_mask);
4976 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD1,
4977 (ltdefs->rx_apad1.valid << 11) |
4978 (ltdefs->rx_apad1.lid << 8) |
4979 (ltdefs->rx_apad1.ltype_match << 4) |
4980 ltdefs->rx_apad1.ltype_mask);
4981
4982 /* Receive ethertype defination register defines layer
4983 * information in NPC_RESULT_S to identify the Ethertype
4984 * location in L2 header. Used for Ethertype overwriting
4985 * in inline IPsec flow.
4986 */
4987 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(0),
4988 (ltdefs->rx_et[0].offset << 12) |
4989 (ltdefs->rx_et[0].valid << 11) |
4990 (ltdefs->rx_et[0].lid << 8) |
4991 (ltdefs->rx_et[0].ltype_match << 4) |
4992 ltdefs->rx_et[0].ltype_mask);
4993 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(1),
4994 (ltdefs->rx_et[1].offset << 12) |
4995 (ltdefs->rx_et[1].valid << 11) |
4996 (ltdefs->rx_et[1].lid << 8) |
4997 (ltdefs->rx_et[1].ltype_match << 4) |
4998 ltdefs->rx_et[1].ltype_mask);
4999 }
5000
5001 err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
5002 if (err)
5003 return err;
5004
5005 nix_hw->tx_credits = kcalloc(hw->cgx_links + hw->lbk_links,
5006 sizeof(u64), GFP_KERNEL);
5007 if (!nix_hw->tx_credits)
5008 return -ENOMEM;
5009
5010 /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
5011 nix_link_config(rvu, blkaddr, nix_hw);
5012
5013 /* Enable Channel backpressure */
5014 rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0));
5015 }
5016 return 0;
5017 }
5018
rvu_nix_init(struct rvu * rvu)5019 int rvu_nix_init(struct rvu *rvu)
5020 {
5021 struct rvu_hwinfo *hw = rvu->hw;
5022 struct nix_hw *nix_hw;
5023 int blkaddr = 0, err;
5024 int i = 0;
5025
5026 hw->nix = devm_kcalloc(rvu->dev, MAX_NIX_BLKS, sizeof(struct nix_hw),
5027 GFP_KERNEL);
5028 if (!hw->nix)
5029 return -ENOMEM;
5030
5031 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
5032 while (blkaddr) {
5033 nix_hw = &hw->nix[i];
5034 nix_hw->rvu = rvu;
5035 nix_hw->blkaddr = blkaddr;
5036 err = rvu_nix_block_init(rvu, nix_hw);
5037 if (err)
5038 return err;
5039 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
5040 i++;
5041 }
5042
5043 return 0;
5044 }
5045
rvu_nix_block_freemem(struct rvu * rvu,int blkaddr,struct rvu_block * block)5046 static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr,
5047 struct rvu_block *block)
5048 {
5049 struct nix_txsch *txsch;
5050 struct nix_mcast *mcast;
5051 struct nix_txvlan *vlan;
5052 struct nix_hw *nix_hw;
5053 int lvl;
5054
5055 rvu_aq_free(rvu, block->aq);
5056
5057 if (is_block_implemented(rvu->hw, blkaddr)) {
5058 nix_hw = get_nix_hw(rvu->hw, blkaddr);
5059 if (!nix_hw)
5060 return;
5061
5062 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
5063 txsch = &nix_hw->txsch[lvl];
5064 kfree(txsch->schq.bmap);
5065 }
5066
5067 kfree(nix_hw->tx_credits);
5068
5069 nix_ipolicer_freemem(rvu, nix_hw);
5070
5071 vlan = &nix_hw->txvlan;
5072 kfree(vlan->rsrc.bmap);
5073 mutex_destroy(&vlan->rsrc_lock);
5074
5075 mcast = &nix_hw->mcast;
5076 qmem_free(rvu->dev, mcast->mce_ctx);
5077 qmem_free(rvu->dev, mcast->mcast_buf);
5078 mutex_destroy(&mcast->mce_lock);
5079 }
5080 }
5081
rvu_nix_freemem(struct rvu * rvu)5082 void rvu_nix_freemem(struct rvu *rvu)
5083 {
5084 struct rvu_hwinfo *hw = rvu->hw;
5085 struct rvu_block *block;
5086 int blkaddr = 0;
5087
5088 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
5089 while (blkaddr) {
5090 block = &hw->block[blkaddr];
5091 rvu_nix_block_freemem(rvu, blkaddr, block);
5092 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
5093 }
5094 }
5095
nix_mcast_update_action(struct rvu * rvu,struct nix_mcast_grp_elem * elem)5096 static void nix_mcast_update_action(struct rvu *rvu,
5097 struct nix_mcast_grp_elem *elem)
5098 {
5099 struct npc_mcam *mcam = &rvu->hw->mcam;
5100 struct nix_rx_action rx_action = { 0 };
5101 struct nix_tx_action tx_action = { 0 };
5102 int npc_blkaddr;
5103
5104 npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
5105 if (elem->dir == NIX_MCAST_INGRESS) {
5106 *(u64 *)&rx_action = npc_get_mcam_action(rvu, mcam,
5107 npc_blkaddr,
5108 elem->mcam_index);
5109 rx_action.index = elem->mce_start_index;
5110 npc_set_mcam_action(rvu, mcam, npc_blkaddr, elem->mcam_index,
5111 *(u64 *)&rx_action);
5112 } else {
5113 *(u64 *)&tx_action = npc_get_mcam_action(rvu, mcam,
5114 npc_blkaddr,
5115 elem->mcam_index);
5116 tx_action.index = elem->mce_start_index;
5117 npc_set_mcam_action(rvu, mcam, npc_blkaddr, elem->mcam_index,
5118 *(u64 *)&tx_action);
5119 }
5120 }
5121
nix_mcast_update_mce_entry(struct rvu * rvu,u16 pcifunc,u8 is_active)5122 static void nix_mcast_update_mce_entry(struct rvu *rvu, u16 pcifunc, u8 is_active)
5123 {
5124 struct nix_mcast_grp_elem *elem;
5125 struct nix_mcast_grp *mcast_grp;
5126 struct nix_hw *nix_hw;
5127 int blkaddr;
5128
5129 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
5130 nix_hw = get_nix_hw(rvu->hw, blkaddr);
5131 if (!nix_hw)
5132 return;
5133
5134 mcast_grp = &nix_hw->mcast_grp;
5135
5136 mutex_lock(&mcast_grp->mcast_grp_lock);
5137 list_for_each_entry(elem, &mcast_grp->mcast_grp_head, list) {
5138 struct nix_mce_list *mce_list;
5139 struct mce *mce;
5140
5141 /* Iterate the group elements and disable the element which
5142 * received the disable request.
5143 */
5144 mce_list = &elem->mcast_mce_list;
5145 hlist_for_each_entry(mce, &mce_list->head, node) {
5146 if (mce->pcifunc == pcifunc) {
5147 mce->is_active = is_active;
5148 break;
5149 }
5150 }
5151
5152 /* Dump the updated list to HW */
5153 if (elem->dir == NIX_MCAST_INGRESS)
5154 nix_update_ingress_mce_list_hw(rvu, nix_hw, elem);
5155 else
5156 nix_update_egress_mce_list_hw(rvu, nix_hw, elem);
5157
5158 /* Update the multicast index in NPC rule */
5159 nix_mcast_update_action(rvu, elem);
5160 }
5161 mutex_unlock(&mcast_grp->mcast_grp_lock);
5162 }
5163
rvu_mbox_handler_nix_lf_start_rx(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)5164 int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
5165 struct msg_rsp *rsp)
5166 {
5167 u16 pcifunc = req->hdr.pcifunc;
5168 struct rvu_pfvf *pfvf;
5169 int nixlf, err;
5170
5171 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
5172 if (err)
5173 return err;
5174
5175 /* Enable the interface if it is in any multicast list */
5176 nix_mcast_update_mce_entry(rvu, pcifunc, 1);
5177
5178 rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
5179
5180 npc_mcam_enable_flows(rvu, pcifunc);
5181
5182 pfvf = rvu_get_pfvf(rvu, pcifunc);
5183 set_bit(NIXLF_INITIALIZED, &pfvf->flags);
5184
5185 rvu_switch_update_rules(rvu, pcifunc);
5186
5187 return rvu_cgx_start_stop_io(rvu, pcifunc, true);
5188 }
5189
rvu_mbox_handler_nix_lf_stop_rx(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)5190 int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
5191 struct msg_rsp *rsp)
5192 {
5193 u16 pcifunc = req->hdr.pcifunc;
5194 struct rvu_pfvf *pfvf;
5195 int nixlf, err;
5196
5197 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
5198 if (err)
5199 return err;
5200
5201 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
5202 /* Disable the interface if it is in any multicast list */
5203 nix_mcast_update_mce_entry(rvu, pcifunc, 0);
5204
5205
5206 pfvf = rvu_get_pfvf(rvu, pcifunc);
5207 clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
5208
5209 err = rvu_cgx_start_stop_io(rvu, pcifunc, false);
5210 if (err)
5211 return err;
5212
5213 rvu_cgx_tx_enable(rvu, pcifunc, true);
5214
5215 return 0;
5216 }
5217
5218 #define RX_SA_BASE GENMASK_ULL(52, 7)
5219
rvu_nix_lf_teardown(struct rvu * rvu,u16 pcifunc,int blkaddr,int nixlf)5220 void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
5221 {
5222 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
5223 struct hwctx_disable_req ctx_req;
5224 int pf = rvu_get_pf(pcifunc);
5225 struct mac_ops *mac_ops;
5226 u8 cgx_id, lmac_id;
5227 u64 sa_base;
5228 void *cgxd;
5229 int err;
5230
5231 ctx_req.hdr.pcifunc = pcifunc;
5232
5233 /* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
5234 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
5235 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
5236 nix_interface_deinit(rvu, pcifunc, nixlf);
5237 nix_rx_sync(rvu, blkaddr);
5238 nix_txschq_free(rvu, pcifunc);
5239
5240 clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
5241
5242 rvu_cgx_start_stop_io(rvu, pcifunc, false);
5243
5244 if (pfvf->sq_ctx) {
5245 ctx_req.ctype = NIX_AQ_CTYPE_SQ;
5246 err = nix_lf_hwctx_disable(rvu, &ctx_req);
5247 if (err)
5248 dev_err(rvu->dev, "SQ ctx disable failed\n");
5249 }
5250
5251 if (pfvf->rq_ctx) {
5252 ctx_req.ctype = NIX_AQ_CTYPE_RQ;
5253 err = nix_lf_hwctx_disable(rvu, &ctx_req);
5254 if (err)
5255 dev_err(rvu->dev, "RQ ctx disable failed\n");
5256 }
5257
5258 if (pfvf->cq_ctx) {
5259 ctx_req.ctype = NIX_AQ_CTYPE_CQ;
5260 err = nix_lf_hwctx_disable(rvu, &ctx_req);
5261 if (err)
5262 dev_err(rvu->dev, "CQ ctx disable failed\n");
5263 }
5264
5265 /* reset HW config done for Switch headers */
5266 rvu_npc_set_parse_mode(rvu, pcifunc, OTX2_PRIV_FLAGS_DEFAULT,
5267 (PKIND_TX | PKIND_RX), 0, 0, 0, 0);
5268
5269 /* Disabling CGX and NPC config done for PTP */
5270 if (pfvf->hw_rx_tstamp_en) {
5271 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
5272 cgxd = rvu_cgx_pdata(cgx_id, rvu);
5273 mac_ops = get_mac_ops(cgxd);
5274 mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, false);
5275 /* Undo NPC config done for PTP */
5276 if (npc_config_ts_kpuaction(rvu, pf, pcifunc, false))
5277 dev_err(rvu->dev, "NPC config for PTP failed\n");
5278 pfvf->hw_rx_tstamp_en = false;
5279 }
5280
5281 /* reset priority flow control config */
5282 rvu_cgx_prio_flow_ctrl_cfg(rvu, pcifunc, 0, 0, 0);
5283
5284 /* reset 802.3x flow control config */
5285 rvu_cgx_cfg_pause_frm(rvu, pcifunc, 0, 0);
5286
5287 nix_ctx_free(rvu, pfvf);
5288
5289 nix_free_all_bandprof(rvu, pcifunc);
5290
5291 sa_base = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(nixlf));
5292 if (FIELD_GET(RX_SA_BASE, sa_base)) {
5293 err = rvu_cpt_ctx_flush(rvu, pcifunc);
5294 if (err)
5295 dev_err(rvu->dev,
5296 "CPT ctx flush failed with error: %d\n", err);
5297 }
5298 }
5299
5300 #define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32)
5301
rvu_nix_lf_ptp_tx_cfg(struct rvu * rvu,u16 pcifunc,bool enable)5302 static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
5303 {
5304 struct rvu_hwinfo *hw = rvu->hw;
5305 struct rvu_block *block;
5306 int blkaddr, pf;
5307 int nixlf;
5308 u64 cfg;
5309
5310 pf = rvu_get_pf(pcifunc);
5311 if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
5312 return 0;
5313
5314 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
5315 if (blkaddr < 0)
5316 return NIX_AF_ERR_AF_LF_INVALID;
5317
5318 block = &hw->block[blkaddr];
5319 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
5320 if (nixlf < 0)
5321 return NIX_AF_ERR_AF_LF_INVALID;
5322
5323 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf));
5324
5325 if (enable)
5326 cfg |= NIX_AF_LFX_TX_CFG_PTP_EN;
5327 else
5328 cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN;
5329
5330 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
5331
5332 return 0;
5333 }
5334
rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)5335 int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req,
5336 struct msg_rsp *rsp)
5337 {
5338 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true);
5339 }
5340
rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)5341 int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req,
5342 struct msg_rsp *rsp)
5343 {
5344 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false);
5345 }
5346
rvu_mbox_handler_nix_lso_format_cfg(struct rvu * rvu,struct nix_lso_format_cfg * req,struct nix_lso_format_cfg_rsp * rsp)5347 int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
5348 struct nix_lso_format_cfg *req,
5349 struct nix_lso_format_cfg_rsp *rsp)
5350 {
5351 u16 pcifunc = req->hdr.pcifunc;
5352 struct nix_hw *nix_hw;
5353 struct rvu_pfvf *pfvf;
5354 int blkaddr, idx, f;
5355 u64 reg;
5356
5357 pfvf = rvu_get_pfvf(rvu, pcifunc);
5358 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
5359 if (!pfvf->nixlf || blkaddr < 0)
5360 return NIX_AF_ERR_AF_LF_INVALID;
5361
5362 nix_hw = get_nix_hw(rvu->hw, blkaddr);
5363 if (!nix_hw)
5364 return NIX_AF_ERR_INVALID_NIXBLK;
5365
5366 /* Find existing matching LSO format, if any */
5367 for (idx = 0; idx < nix_hw->lso.in_use; idx++) {
5368 for (f = 0; f < NIX_LSO_FIELD_MAX; f++) {
5369 reg = rvu_read64(rvu, blkaddr,
5370 NIX_AF_LSO_FORMATX_FIELDX(idx, f));
5371 if (req->fields[f] != (reg & req->field_mask))
5372 break;
5373 }
5374
5375 if (f == NIX_LSO_FIELD_MAX)
5376 break;
5377 }
5378
5379 if (idx < nix_hw->lso.in_use) {
5380 /* Match found */
5381 rsp->lso_format_idx = idx;
5382 return 0;
5383 }
5384
5385 if (nix_hw->lso.in_use == nix_hw->lso.total)
5386 return NIX_AF_ERR_LSO_CFG_FAIL;
5387
5388 rsp->lso_format_idx = nix_hw->lso.in_use++;
5389
5390 for (f = 0; f < NIX_LSO_FIELD_MAX; f++)
5391 rvu_write64(rvu, blkaddr,
5392 NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f),
5393 req->fields[f]);
5394
5395 return 0;
5396 }
5397
5398 #define IPSEC_GEN_CFG_EGRP GENMASK_ULL(50, 48)
5399 #define IPSEC_GEN_CFG_OPCODE GENMASK_ULL(47, 32)
5400 #define IPSEC_GEN_CFG_PARAM1 GENMASK_ULL(31, 16)
5401 #define IPSEC_GEN_CFG_PARAM2 GENMASK_ULL(15, 0)
5402
5403 #define CPT_INST_QSEL_BLOCK GENMASK_ULL(28, 24)
5404 #define CPT_INST_QSEL_PF_FUNC GENMASK_ULL(23, 8)
5405 #define CPT_INST_QSEL_SLOT GENMASK_ULL(7, 0)
5406
5407 #define CPT_INST_CREDIT_TH GENMASK_ULL(53, 32)
5408 #define CPT_INST_CREDIT_BPID GENMASK_ULL(30, 22)
5409 #define CPT_INST_CREDIT_CNT GENMASK_ULL(21, 0)
5410
nix_inline_ipsec_cfg(struct rvu * rvu,struct nix_inline_ipsec_cfg * req,int blkaddr)5411 static void nix_inline_ipsec_cfg(struct rvu *rvu, struct nix_inline_ipsec_cfg *req,
5412 int blkaddr)
5413 {
5414 u8 cpt_idx, cpt_blkaddr;
5415 u64 val;
5416
5417 cpt_idx = (blkaddr == BLKADDR_NIX0) ? 0 : 1;
5418 if (req->enable) {
5419 val = 0;
5420 /* Enable context prefetching */
5421 if (!is_rvu_otx2(rvu))
5422 val |= BIT_ULL(51);
5423
5424 /* Set OPCODE and EGRP */
5425 val |= FIELD_PREP(IPSEC_GEN_CFG_EGRP, req->gen_cfg.egrp);
5426 val |= FIELD_PREP(IPSEC_GEN_CFG_OPCODE, req->gen_cfg.opcode);
5427 val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM1, req->gen_cfg.param1);
5428 val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM2, req->gen_cfg.param2);
5429
5430 rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, val);
5431
5432 /* Set CPT queue for inline IPSec */
5433 val = FIELD_PREP(CPT_INST_QSEL_SLOT, req->inst_qsel.cpt_slot);
5434 val |= FIELD_PREP(CPT_INST_QSEL_PF_FUNC,
5435 req->inst_qsel.cpt_pf_func);
5436
5437 if (!is_rvu_otx2(rvu)) {
5438 cpt_blkaddr = (cpt_idx == 0) ? BLKADDR_CPT0 :
5439 BLKADDR_CPT1;
5440 val |= FIELD_PREP(CPT_INST_QSEL_BLOCK, cpt_blkaddr);
5441 }
5442
5443 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx),
5444 val);
5445
5446 /* Set CPT credit */
5447 val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx));
5448 if ((val & 0x3FFFFF) != 0x3FFFFF)
5449 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
5450 0x3FFFFF - val);
5451
5452 val = FIELD_PREP(CPT_INST_CREDIT_CNT, req->cpt_credit);
5453 val |= FIELD_PREP(CPT_INST_CREDIT_BPID, req->bpid);
5454 val |= FIELD_PREP(CPT_INST_CREDIT_TH, req->credit_th);
5455 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), val);
5456 } else {
5457 rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, 0x0);
5458 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx),
5459 0x0);
5460 val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx));
5461 if ((val & 0x3FFFFF) != 0x3FFFFF)
5462 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
5463 0x3FFFFF - val);
5464 }
5465 }
5466
rvu_mbox_handler_nix_inline_ipsec_cfg(struct rvu * rvu,struct nix_inline_ipsec_cfg * req,struct msg_rsp * rsp)5467 int rvu_mbox_handler_nix_inline_ipsec_cfg(struct rvu *rvu,
5468 struct nix_inline_ipsec_cfg *req,
5469 struct msg_rsp *rsp)
5470 {
5471 if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
5472 return 0;
5473
5474 nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX0);
5475 if (is_block_implemented(rvu->hw, BLKADDR_CPT1))
5476 nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX1);
5477
5478 return 0;
5479 }
5480
rvu_mbox_handler_nix_read_inline_ipsec_cfg(struct rvu * rvu,struct msg_req * req,struct nix_inline_ipsec_cfg * rsp)5481 int rvu_mbox_handler_nix_read_inline_ipsec_cfg(struct rvu *rvu,
5482 struct msg_req *req,
5483 struct nix_inline_ipsec_cfg *rsp)
5484
5485 {
5486 u64 val;
5487
5488 if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
5489 return 0;
5490
5491 val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_IPSEC_GEN_CFG);
5492 rsp->gen_cfg.egrp = FIELD_GET(IPSEC_GEN_CFG_EGRP, val);
5493 rsp->gen_cfg.opcode = FIELD_GET(IPSEC_GEN_CFG_OPCODE, val);
5494 rsp->gen_cfg.param1 = FIELD_GET(IPSEC_GEN_CFG_PARAM1, val);
5495 rsp->gen_cfg.param2 = FIELD_GET(IPSEC_GEN_CFG_PARAM2, val);
5496
5497 val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_CPTX_CREDIT(0));
5498 rsp->cpt_credit = FIELD_GET(CPT_INST_CREDIT_CNT, val);
5499 rsp->credit_th = FIELD_GET(CPT_INST_CREDIT_TH, val);
5500 rsp->bpid = FIELD_GET(CPT_INST_CREDIT_BPID, val);
5501
5502 return 0;
5503 }
5504
rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu * rvu,struct nix_inline_ipsec_lf_cfg * req,struct msg_rsp * rsp)5505 int rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu *rvu,
5506 struct nix_inline_ipsec_lf_cfg *req,
5507 struct msg_rsp *rsp)
5508 {
5509 int lf, blkaddr, err;
5510 u64 val;
5511
5512 if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
5513 return 0;
5514
5515 err = nix_get_nixlf(rvu, req->hdr.pcifunc, &lf, &blkaddr);
5516 if (err)
5517 return err;
5518
5519 if (req->enable) {
5520 /* Set TT, TAG_CONST, SA_POW2_SIZE and LENM1_MAX */
5521 val = (u64)req->ipsec_cfg0.tt << 44 |
5522 (u64)req->ipsec_cfg0.tag_const << 20 |
5523 (u64)req->ipsec_cfg0.sa_pow2_size << 16 |
5524 req->ipsec_cfg0.lenm1_max;
5525
5526 if (blkaddr == BLKADDR_NIX1)
5527 val |= BIT_ULL(46);
5528
5529 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), val);
5530
5531 /* Set SA_IDX_W and SA_IDX_MAX */
5532 val = (u64)req->ipsec_cfg1.sa_idx_w << 32 |
5533 req->ipsec_cfg1.sa_idx_max;
5534 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), val);
5535
5536 /* Set SA base address */
5537 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf),
5538 req->sa_base_addr);
5539 } else {
5540 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), 0x0);
5541 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), 0x0);
5542 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf),
5543 0x0);
5544 }
5545
5546 return 0;
5547 }
5548
rvu_nix_reset_mac(struct rvu_pfvf * pfvf,int pcifunc)5549 void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc)
5550 {
5551 bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
5552
5553 /* overwrite vf mac address with default_mac */
5554 if (from_vf)
5555 ether_addr_copy(pfvf->mac_addr, pfvf->default_mac);
5556 }
5557
5558 /* NIX ingress policers or bandwidth profiles APIs */
nix_config_rx_pkt_policer_precolor(struct rvu * rvu,int blkaddr)5559 static void nix_config_rx_pkt_policer_precolor(struct rvu *rvu, int blkaddr)
5560 {
5561 struct npc_lt_def_cfg defs, *ltdefs;
5562
5563 ltdefs = &defs;
5564 memcpy(ltdefs, rvu->kpu.lt_def, sizeof(struct npc_lt_def_cfg));
5565
5566 /* Extract PCP and DEI fields from outer VLAN from byte offset
5567 * 2 from the start of LB_PTR (ie TAG).
5568 * VLAN0 is Outer VLAN and VLAN1 is Inner VLAN. Inner VLAN
5569 * fields are considered when 'Tunnel enable' is set in profile.
5570 */
5571 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN0_PCP_DEI,
5572 (2UL << 12) | (ltdefs->ovlan.lid << 8) |
5573 (ltdefs->ovlan.ltype_match << 4) |
5574 ltdefs->ovlan.ltype_mask);
5575 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN1_PCP_DEI,
5576 (2UL << 12) | (ltdefs->ivlan.lid << 8) |
5577 (ltdefs->ivlan.ltype_match << 4) |
5578 ltdefs->ivlan.ltype_mask);
5579
5580 /* DSCP field in outer and tunneled IPv4 packets */
5581 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4_DSCP,
5582 (1UL << 12) | (ltdefs->rx_oip4.lid << 8) |
5583 (ltdefs->rx_oip4.ltype_match << 4) |
5584 ltdefs->rx_oip4.ltype_mask);
5585 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4_DSCP,
5586 (1UL << 12) | (ltdefs->rx_iip4.lid << 8) |
5587 (ltdefs->rx_iip4.ltype_match << 4) |
5588 ltdefs->rx_iip4.ltype_mask);
5589
5590 /* DSCP field (traffic class) in outer and tunneled IPv6 packets */
5591 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6_DSCP,
5592 (1UL << 11) | (ltdefs->rx_oip6.lid << 8) |
5593 (ltdefs->rx_oip6.ltype_match << 4) |
5594 ltdefs->rx_oip6.ltype_mask);
5595 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6_DSCP,
5596 (1UL << 11) | (ltdefs->rx_iip6.lid << 8) |
5597 (ltdefs->rx_iip6.ltype_match << 4) |
5598 ltdefs->rx_iip6.ltype_mask);
5599 }
5600
nix_init_policer_context(struct rvu * rvu,struct nix_hw * nix_hw,int layer,int prof_idx)5601 static int nix_init_policer_context(struct rvu *rvu, struct nix_hw *nix_hw,
5602 int layer, int prof_idx)
5603 {
5604 struct nix_cn10k_aq_enq_req aq_req;
5605 int rc;
5606
5607 memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
5608
5609 aq_req.qidx = (prof_idx & 0x3FFF) | (layer << 14);
5610 aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
5611 aq_req.op = NIX_AQ_INSTOP_INIT;
5612
5613 /* Context is all zeros, submit to AQ */
5614 rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
5615 (struct nix_aq_enq_req *)&aq_req, NULL);
5616 if (rc)
5617 dev_err(rvu->dev, "Failed to INIT bandwidth profile layer %d profile %d\n",
5618 layer, prof_idx);
5619 return rc;
5620 }
5621
nix_setup_ipolicers(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr)5622 static int nix_setup_ipolicers(struct rvu *rvu,
5623 struct nix_hw *nix_hw, int blkaddr)
5624 {
5625 struct rvu_hwinfo *hw = rvu->hw;
5626 struct nix_ipolicer *ipolicer;
5627 int err, layer, prof_idx;
5628 u64 cfg;
5629
5630 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
5631 if (!(cfg & BIT_ULL(61))) {
5632 hw->cap.ipolicer = false;
5633 return 0;
5634 }
5635
5636 hw->cap.ipolicer = true;
5637 nix_hw->ipolicer = devm_kcalloc(rvu->dev, BAND_PROF_NUM_LAYERS,
5638 sizeof(*ipolicer), GFP_KERNEL);
5639 if (!nix_hw->ipolicer)
5640 return -ENOMEM;
5641
5642 cfg = rvu_read64(rvu, blkaddr, NIX_AF_PL_CONST);
5643
5644 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5645 ipolicer = &nix_hw->ipolicer[layer];
5646 switch (layer) {
5647 case BAND_PROF_LEAF_LAYER:
5648 ipolicer->band_prof.max = cfg & 0XFFFF;
5649 break;
5650 case BAND_PROF_MID_LAYER:
5651 ipolicer->band_prof.max = (cfg >> 16) & 0XFFFF;
5652 break;
5653 case BAND_PROF_TOP_LAYER:
5654 ipolicer->band_prof.max = (cfg >> 32) & 0XFFFF;
5655 break;
5656 }
5657
5658 if (!ipolicer->band_prof.max)
5659 continue;
5660
5661 err = rvu_alloc_bitmap(&ipolicer->band_prof);
5662 if (err)
5663 return err;
5664
5665 ipolicer->pfvf_map = devm_kcalloc(rvu->dev,
5666 ipolicer->band_prof.max,
5667 sizeof(u16), GFP_KERNEL);
5668 if (!ipolicer->pfvf_map)
5669 return -ENOMEM;
5670
5671 ipolicer->match_id = devm_kcalloc(rvu->dev,
5672 ipolicer->band_prof.max,
5673 sizeof(u16), GFP_KERNEL);
5674 if (!ipolicer->match_id)
5675 return -ENOMEM;
5676
5677 for (prof_idx = 0;
5678 prof_idx < ipolicer->band_prof.max; prof_idx++) {
5679 /* Set AF as current owner for INIT ops to succeed */
5680 ipolicer->pfvf_map[prof_idx] = 0x00;
5681
5682 /* There is no enable bit in the profile context,
5683 * so no context disable. So let's INIT them here
5684 * so that PF/VF later on have to just do WRITE to
5685 * setup policer rates and config.
5686 */
5687 err = nix_init_policer_context(rvu, nix_hw,
5688 layer, prof_idx);
5689 if (err)
5690 return err;
5691 }
5692
5693 /* Allocate memory for maintaining ref_counts for MID level
5694 * profiles, this will be needed for leaf layer profiles'
5695 * aggregation.
5696 */
5697 if (layer != BAND_PROF_MID_LAYER)
5698 continue;
5699
5700 ipolicer->ref_count = devm_kcalloc(rvu->dev,
5701 ipolicer->band_prof.max,
5702 sizeof(u16), GFP_KERNEL);
5703 if (!ipolicer->ref_count)
5704 return -ENOMEM;
5705 }
5706
5707 /* Set policer timeunit to 2us ie (19 + 1) * 100 nsec = 2us */
5708 rvu_write64(rvu, blkaddr, NIX_AF_PL_TS, 19);
5709
5710 nix_config_rx_pkt_policer_precolor(rvu, blkaddr);
5711
5712 return 0;
5713 }
5714
nix_ipolicer_freemem(struct rvu * rvu,struct nix_hw * nix_hw)5715 static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw)
5716 {
5717 struct nix_ipolicer *ipolicer;
5718 int layer;
5719
5720 if (!rvu->hw->cap.ipolicer)
5721 return;
5722
5723 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5724 ipolicer = &nix_hw->ipolicer[layer];
5725
5726 if (!ipolicer->band_prof.max)
5727 continue;
5728
5729 kfree(ipolicer->band_prof.bmap);
5730 }
5731 }
5732
nix_verify_bandprof(struct nix_cn10k_aq_enq_req * req,struct nix_hw * nix_hw,u16 pcifunc)5733 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
5734 struct nix_hw *nix_hw, u16 pcifunc)
5735 {
5736 struct nix_ipolicer *ipolicer;
5737 int layer, hi_layer, prof_idx;
5738
5739 /* Bits [15:14] in profile index represent layer */
5740 layer = (req->qidx >> 14) & 0x03;
5741 prof_idx = req->qidx & 0x3FFF;
5742
5743 ipolicer = &nix_hw->ipolicer[layer];
5744 if (prof_idx >= ipolicer->band_prof.max)
5745 return -EINVAL;
5746
5747 /* Check if the profile is allocated to the requesting PCIFUNC or not
5748 * with the exception of AF. AF is allowed to read and update contexts.
5749 */
5750 if (pcifunc && ipolicer->pfvf_map[prof_idx] != pcifunc)
5751 return -EINVAL;
5752
5753 /* If this profile is linked to higher layer profile then check
5754 * if that profile is also allocated to the requesting PCIFUNC
5755 * or not.
5756 */
5757 if (!req->prof.hl_en)
5758 return 0;
5759
5760 /* Leaf layer profile can link only to mid layer and
5761 * mid layer to top layer.
5762 */
5763 if (layer == BAND_PROF_LEAF_LAYER)
5764 hi_layer = BAND_PROF_MID_LAYER;
5765 else if (layer == BAND_PROF_MID_LAYER)
5766 hi_layer = BAND_PROF_TOP_LAYER;
5767 else
5768 return -EINVAL;
5769
5770 ipolicer = &nix_hw->ipolicer[hi_layer];
5771 prof_idx = req->prof.band_prof_id;
5772 if (prof_idx >= ipolicer->band_prof.max ||
5773 ipolicer->pfvf_map[prof_idx] != pcifunc)
5774 return -EINVAL;
5775
5776 return 0;
5777 }
5778
rvu_mbox_handler_nix_bandprof_alloc(struct rvu * rvu,struct nix_bandprof_alloc_req * req,struct nix_bandprof_alloc_rsp * rsp)5779 int rvu_mbox_handler_nix_bandprof_alloc(struct rvu *rvu,
5780 struct nix_bandprof_alloc_req *req,
5781 struct nix_bandprof_alloc_rsp *rsp)
5782 {
5783 int blkaddr, layer, prof, idx, err;
5784 u16 pcifunc = req->hdr.pcifunc;
5785 struct nix_ipolicer *ipolicer;
5786 struct nix_hw *nix_hw;
5787
5788 if (!rvu->hw->cap.ipolicer)
5789 return NIX_AF_ERR_IPOLICER_NOTSUPP;
5790
5791 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
5792 if (err)
5793 return err;
5794
5795 mutex_lock(&rvu->rsrc_lock);
5796 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5797 if (layer == BAND_PROF_INVAL_LAYER)
5798 continue;
5799 if (!req->prof_count[layer])
5800 continue;
5801
5802 ipolicer = &nix_hw->ipolicer[layer];
5803 for (idx = 0; idx < req->prof_count[layer]; idx++) {
5804 /* Allocate a max of 'MAX_BANDPROF_PER_PFFUNC' profiles */
5805 if (idx == MAX_BANDPROF_PER_PFFUNC)
5806 break;
5807
5808 prof = rvu_alloc_rsrc(&ipolicer->band_prof);
5809 if (prof < 0)
5810 break;
5811 rsp->prof_count[layer]++;
5812 rsp->prof_idx[layer][idx] = prof;
5813 ipolicer->pfvf_map[prof] = pcifunc;
5814 }
5815 }
5816 mutex_unlock(&rvu->rsrc_lock);
5817 return 0;
5818 }
5819
nix_free_all_bandprof(struct rvu * rvu,u16 pcifunc)5820 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc)
5821 {
5822 int blkaddr, layer, prof_idx, err;
5823 struct nix_ipolicer *ipolicer;
5824 struct nix_hw *nix_hw;
5825
5826 if (!rvu->hw->cap.ipolicer)
5827 return NIX_AF_ERR_IPOLICER_NOTSUPP;
5828
5829 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
5830 if (err)
5831 return err;
5832
5833 mutex_lock(&rvu->rsrc_lock);
5834 /* Free all the profiles allocated to the PCIFUNC */
5835 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5836 if (layer == BAND_PROF_INVAL_LAYER)
5837 continue;
5838 ipolicer = &nix_hw->ipolicer[layer];
5839
5840 for (prof_idx = 0; prof_idx < ipolicer->band_prof.max; prof_idx++) {
5841 if (ipolicer->pfvf_map[prof_idx] != pcifunc)
5842 continue;
5843
5844 /* Clear ratelimit aggregation, if any */
5845 if (layer == BAND_PROF_LEAF_LAYER &&
5846 ipolicer->match_id[prof_idx])
5847 nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
5848
5849 ipolicer->pfvf_map[prof_idx] = 0x00;
5850 ipolicer->match_id[prof_idx] = 0;
5851 rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
5852 }
5853 }
5854 mutex_unlock(&rvu->rsrc_lock);
5855 return 0;
5856 }
5857
rvu_mbox_handler_nix_bandprof_free(struct rvu * rvu,struct nix_bandprof_free_req * req,struct msg_rsp * rsp)5858 int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu,
5859 struct nix_bandprof_free_req *req,
5860 struct msg_rsp *rsp)
5861 {
5862 int blkaddr, layer, prof_idx, idx, err;
5863 u16 pcifunc = req->hdr.pcifunc;
5864 struct nix_ipolicer *ipolicer;
5865 struct nix_hw *nix_hw;
5866
5867 if (req->free_all)
5868 return nix_free_all_bandprof(rvu, pcifunc);
5869
5870 if (!rvu->hw->cap.ipolicer)
5871 return NIX_AF_ERR_IPOLICER_NOTSUPP;
5872
5873 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
5874 if (err)
5875 return err;
5876
5877 mutex_lock(&rvu->rsrc_lock);
5878 /* Free the requested profile indices */
5879 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5880 if (layer == BAND_PROF_INVAL_LAYER)
5881 continue;
5882 if (!req->prof_count[layer])
5883 continue;
5884
5885 ipolicer = &nix_hw->ipolicer[layer];
5886 for (idx = 0; idx < req->prof_count[layer]; idx++) {
5887 if (idx == MAX_BANDPROF_PER_PFFUNC)
5888 break;
5889 prof_idx = req->prof_idx[layer][idx];
5890 if (prof_idx >= ipolicer->band_prof.max ||
5891 ipolicer->pfvf_map[prof_idx] != pcifunc)
5892 continue;
5893
5894 /* Clear ratelimit aggregation, if any */
5895 if (layer == BAND_PROF_LEAF_LAYER &&
5896 ipolicer->match_id[prof_idx])
5897 nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
5898
5899 ipolicer->pfvf_map[prof_idx] = 0x00;
5900 ipolicer->match_id[prof_idx] = 0;
5901 rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
5902 }
5903 }
5904 mutex_unlock(&rvu->rsrc_lock);
5905 return 0;
5906 }
5907
nix_aq_context_read(struct rvu * rvu,struct nix_hw * nix_hw,struct nix_cn10k_aq_enq_req * aq_req,struct nix_cn10k_aq_enq_rsp * aq_rsp,u16 pcifunc,u8 ctype,u32 qidx)5908 int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw,
5909 struct nix_cn10k_aq_enq_req *aq_req,
5910 struct nix_cn10k_aq_enq_rsp *aq_rsp,
5911 u16 pcifunc, u8 ctype, u32 qidx)
5912 {
5913 memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
5914 aq_req->hdr.pcifunc = pcifunc;
5915 aq_req->ctype = ctype;
5916 aq_req->op = NIX_AQ_INSTOP_READ;
5917 aq_req->qidx = qidx;
5918
5919 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
5920 (struct nix_aq_enq_req *)aq_req,
5921 (struct nix_aq_enq_rsp *)aq_rsp);
5922 }
5923
nix_ipolicer_map_leaf_midprofs(struct rvu * rvu,struct nix_hw * nix_hw,struct nix_cn10k_aq_enq_req * aq_req,struct nix_cn10k_aq_enq_rsp * aq_rsp,u32 leaf_prof,u16 mid_prof)5924 static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu,
5925 struct nix_hw *nix_hw,
5926 struct nix_cn10k_aq_enq_req *aq_req,
5927 struct nix_cn10k_aq_enq_rsp *aq_rsp,
5928 u32 leaf_prof, u16 mid_prof)
5929 {
5930 memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
5931 aq_req->hdr.pcifunc = 0x00;
5932 aq_req->ctype = NIX_AQ_CTYPE_BANDPROF;
5933 aq_req->op = NIX_AQ_INSTOP_WRITE;
5934 aq_req->qidx = leaf_prof;
5935
5936 aq_req->prof.band_prof_id = mid_prof;
5937 aq_req->prof_mask.band_prof_id = GENMASK(6, 0);
5938 aq_req->prof.hl_en = 1;
5939 aq_req->prof_mask.hl_en = 1;
5940
5941 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
5942 (struct nix_aq_enq_req *)aq_req,
5943 (struct nix_aq_enq_rsp *)aq_rsp);
5944 }
5945
rvu_nix_setup_ratelimit_aggr(struct rvu * rvu,u16 pcifunc,u16 rq_idx,u16 match_id)5946 int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc,
5947 u16 rq_idx, u16 match_id)
5948 {
5949 int leaf_prof, mid_prof, leaf_match;
5950 struct nix_cn10k_aq_enq_req aq_req;
5951 struct nix_cn10k_aq_enq_rsp aq_rsp;
5952 struct nix_ipolicer *ipolicer;
5953 struct nix_hw *nix_hw;
5954 int blkaddr, idx, rc;
5955
5956 if (!rvu->hw->cap.ipolicer)
5957 return 0;
5958
5959 rc = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
5960 if (rc)
5961 return rc;
5962
5963 /* Fetch the RQ's context to see if policing is enabled */
5964 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, pcifunc,
5965 NIX_AQ_CTYPE_RQ, rq_idx);
5966 if (rc) {
5967 dev_err(rvu->dev,
5968 "%s: Failed to fetch RQ%d context of PFFUNC 0x%x\n",
5969 __func__, rq_idx, pcifunc);
5970 return rc;
5971 }
5972
5973 if (!aq_rsp.rq.policer_ena)
5974 return 0;
5975
5976 /* Get the bandwidth profile ID mapped to this RQ */
5977 leaf_prof = aq_rsp.rq.band_prof_id;
5978
5979 ipolicer = &nix_hw->ipolicer[BAND_PROF_LEAF_LAYER];
5980 ipolicer->match_id[leaf_prof] = match_id;
5981
5982 /* Check if any other leaf profile is marked with same match_id */
5983 for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
5984 if (idx == leaf_prof)
5985 continue;
5986 if (ipolicer->match_id[idx] != match_id)
5987 continue;
5988
5989 leaf_match = idx;
5990 break;
5991 }
5992
5993 if (idx == ipolicer->band_prof.max)
5994 return 0;
5995
5996 /* Fetch the matching profile's context to check if it's already
5997 * mapped to a mid level profile.
5998 */
5999 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
6000 NIX_AQ_CTYPE_BANDPROF, leaf_match);
6001 if (rc) {
6002 dev_err(rvu->dev,
6003 "%s: Failed to fetch context of leaf profile %d\n",
6004 __func__, leaf_match);
6005 return rc;
6006 }
6007
6008 ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
6009 if (aq_rsp.prof.hl_en) {
6010 /* Get Mid layer prof index and map leaf_prof index
6011 * also such that flows that are being steered
6012 * to different RQs and marked with same match_id
6013 * are rate limited in a aggregate fashion
6014 */
6015 mid_prof = aq_rsp.prof.band_prof_id;
6016 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
6017 &aq_req, &aq_rsp,
6018 leaf_prof, mid_prof);
6019 if (rc) {
6020 dev_err(rvu->dev,
6021 "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
6022 __func__, leaf_prof, mid_prof);
6023 goto exit;
6024 }
6025
6026 mutex_lock(&rvu->rsrc_lock);
6027 ipolicer->ref_count[mid_prof]++;
6028 mutex_unlock(&rvu->rsrc_lock);
6029 goto exit;
6030 }
6031
6032 /* Allocate a mid layer profile and
6033 * map both 'leaf_prof' and 'leaf_match' profiles to it.
6034 */
6035 mutex_lock(&rvu->rsrc_lock);
6036 mid_prof = rvu_alloc_rsrc(&ipolicer->band_prof);
6037 if (mid_prof < 0) {
6038 dev_err(rvu->dev,
6039 "%s: Unable to allocate mid layer profile\n", __func__);
6040 mutex_unlock(&rvu->rsrc_lock);
6041 goto exit;
6042 }
6043 mutex_unlock(&rvu->rsrc_lock);
6044 ipolicer->pfvf_map[mid_prof] = 0x00;
6045 ipolicer->ref_count[mid_prof] = 0;
6046
6047 /* Initialize mid layer profile same as 'leaf_prof' */
6048 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
6049 NIX_AQ_CTYPE_BANDPROF, leaf_prof);
6050 if (rc) {
6051 dev_err(rvu->dev,
6052 "%s: Failed to fetch context of leaf profile %d\n",
6053 __func__, leaf_prof);
6054 goto exit;
6055 }
6056
6057 memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
6058 aq_req.hdr.pcifunc = 0x00;
6059 aq_req.qidx = (mid_prof & 0x3FFF) | (BAND_PROF_MID_LAYER << 14);
6060 aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
6061 aq_req.op = NIX_AQ_INSTOP_WRITE;
6062 memcpy(&aq_req.prof, &aq_rsp.prof, sizeof(struct nix_bandprof_s));
6063 memset((char *)&aq_req.prof_mask, 0xff, sizeof(struct nix_bandprof_s));
6064 /* Clear higher layer enable bit in the mid profile, just in case */
6065 aq_req.prof.hl_en = 0;
6066 aq_req.prof_mask.hl_en = 1;
6067
6068 rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
6069 (struct nix_aq_enq_req *)&aq_req, NULL);
6070 if (rc) {
6071 dev_err(rvu->dev,
6072 "%s: Failed to INIT context of mid layer profile %d\n",
6073 __func__, mid_prof);
6074 goto exit;
6075 }
6076
6077 /* Map both leaf profiles to this mid layer profile */
6078 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
6079 &aq_req, &aq_rsp,
6080 leaf_prof, mid_prof);
6081 if (rc) {
6082 dev_err(rvu->dev,
6083 "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
6084 __func__, leaf_prof, mid_prof);
6085 goto exit;
6086 }
6087
6088 mutex_lock(&rvu->rsrc_lock);
6089 ipolicer->ref_count[mid_prof]++;
6090 mutex_unlock(&rvu->rsrc_lock);
6091
6092 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
6093 &aq_req, &aq_rsp,
6094 leaf_match, mid_prof);
6095 if (rc) {
6096 dev_err(rvu->dev,
6097 "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
6098 __func__, leaf_match, mid_prof);
6099 ipolicer->ref_count[mid_prof]--;
6100 goto exit;
6101 }
6102
6103 mutex_lock(&rvu->rsrc_lock);
6104 ipolicer->ref_count[mid_prof]++;
6105 mutex_unlock(&rvu->rsrc_lock);
6106
6107 exit:
6108 return rc;
6109 }
6110
6111 /* Called with mutex rsrc_lock */
nix_clear_ratelimit_aggr(struct rvu * rvu,struct nix_hw * nix_hw,u32 leaf_prof)6112 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
6113 u32 leaf_prof)
6114 {
6115 struct nix_cn10k_aq_enq_req aq_req;
6116 struct nix_cn10k_aq_enq_rsp aq_rsp;
6117 struct nix_ipolicer *ipolicer;
6118 u16 mid_prof;
6119 int rc;
6120
6121 mutex_unlock(&rvu->rsrc_lock);
6122
6123 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
6124 NIX_AQ_CTYPE_BANDPROF, leaf_prof);
6125
6126 mutex_lock(&rvu->rsrc_lock);
6127 if (rc) {
6128 dev_err(rvu->dev,
6129 "%s: Failed to fetch context of leaf profile %d\n",
6130 __func__, leaf_prof);
6131 return;
6132 }
6133
6134 if (!aq_rsp.prof.hl_en)
6135 return;
6136
6137 mid_prof = aq_rsp.prof.band_prof_id;
6138 ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
6139 ipolicer->ref_count[mid_prof]--;
6140 /* If ref_count is zero, free mid layer profile */
6141 if (!ipolicer->ref_count[mid_prof]) {
6142 ipolicer->pfvf_map[mid_prof] = 0x00;
6143 rvu_free_rsrc(&ipolicer->band_prof, mid_prof);
6144 }
6145 }
6146
rvu_mbox_handler_nix_bandprof_get_hwinfo(struct rvu * rvu,struct msg_req * req,struct nix_bandprof_get_hwinfo_rsp * rsp)6147 int rvu_mbox_handler_nix_bandprof_get_hwinfo(struct rvu *rvu, struct msg_req *req,
6148 struct nix_bandprof_get_hwinfo_rsp *rsp)
6149 {
6150 struct nix_ipolicer *ipolicer;
6151 int blkaddr, layer, err;
6152 struct nix_hw *nix_hw;
6153 u64 tu;
6154
6155 if (!rvu->hw->cap.ipolicer)
6156 return NIX_AF_ERR_IPOLICER_NOTSUPP;
6157
6158 err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
6159 if (err)
6160 return err;
6161
6162 /* Return number of bandwidth profiles free at each layer */
6163 mutex_lock(&rvu->rsrc_lock);
6164 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
6165 if (layer == BAND_PROF_INVAL_LAYER)
6166 continue;
6167
6168 ipolicer = &nix_hw->ipolicer[layer];
6169 rsp->prof_count[layer] = rvu_rsrc_free_count(&ipolicer->band_prof);
6170 }
6171 mutex_unlock(&rvu->rsrc_lock);
6172
6173 /* Set the policer timeunit in nanosec */
6174 tu = rvu_read64(rvu, blkaddr, NIX_AF_PL_TS) & GENMASK_ULL(9, 0);
6175 rsp->policer_timeunit = (tu + 1) * 100;
6176
6177 return 0;
6178 }
6179
rvu_nix_mcast_find_grp_elem(struct nix_mcast_grp * mcast_grp,u32 mcast_grp_idx)6180 static struct nix_mcast_grp_elem *rvu_nix_mcast_find_grp_elem(struct nix_mcast_grp *mcast_grp,
6181 u32 mcast_grp_idx)
6182 {
6183 struct nix_mcast_grp_elem *iter;
6184 bool is_found = false;
6185
6186 list_for_each_entry(iter, &mcast_grp->mcast_grp_head, list) {
6187 if (iter->mcast_grp_idx == mcast_grp_idx) {
6188 is_found = true;
6189 break;
6190 }
6191 }
6192
6193 if (is_found)
6194 return iter;
6195
6196 return NULL;
6197 }
6198
rvu_nix_mcast_get_mce_index(struct rvu * rvu,u16 pcifunc,u32 mcast_grp_idx)6199 int rvu_nix_mcast_get_mce_index(struct rvu *rvu, u16 pcifunc, u32 mcast_grp_idx)
6200 {
6201 struct nix_mcast_grp_elem *elem;
6202 struct nix_mcast_grp *mcast_grp;
6203 struct nix_hw *nix_hw;
6204 int blkaddr, ret;
6205
6206 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
6207 nix_hw = get_nix_hw(rvu->hw, blkaddr);
6208 if (!nix_hw)
6209 return NIX_AF_ERR_INVALID_NIXBLK;
6210
6211 mcast_grp = &nix_hw->mcast_grp;
6212 mutex_lock(&mcast_grp->mcast_grp_lock);
6213 elem = rvu_nix_mcast_find_grp_elem(mcast_grp, mcast_grp_idx);
6214 if (!elem)
6215 ret = NIX_AF_ERR_INVALID_MCAST_GRP;
6216 else
6217 ret = elem->mce_start_index;
6218
6219 mutex_unlock(&mcast_grp->mcast_grp_lock);
6220 return ret;
6221 }
6222
rvu_nix_mcast_flr_free_entries(struct rvu * rvu,u16 pcifunc)6223 void rvu_nix_mcast_flr_free_entries(struct rvu *rvu, u16 pcifunc)
6224 {
6225 struct nix_mcast_grp_destroy_req dreq = { 0 };
6226 struct nix_mcast_grp_update_req ureq = { 0 };
6227 struct nix_mcast_grp_update_rsp ursp = { 0 };
6228 struct nix_mcast_grp_elem *elem, *tmp;
6229 struct nix_mcast_grp *mcast_grp;
6230 struct nix_hw *nix_hw;
6231 int blkaddr;
6232
6233 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
6234 nix_hw = get_nix_hw(rvu->hw, blkaddr);
6235 if (!nix_hw)
6236 return;
6237
6238 mcast_grp = &nix_hw->mcast_grp;
6239
6240 mutex_lock(&mcast_grp->mcast_grp_lock);
6241 list_for_each_entry_safe(elem, tmp, &mcast_grp->mcast_grp_head, list) {
6242 struct nix_mce_list *mce_list;
6243 struct hlist_node *tmp;
6244 struct mce *mce;
6245
6246 /* If the pcifunc which created the multicast/mirror
6247 * group received an FLR, then delete the entire group.
6248 */
6249 if (elem->pcifunc == pcifunc) {
6250 /* Delete group */
6251 dreq.hdr.pcifunc = elem->pcifunc;
6252 dreq.mcast_grp_idx = elem->mcast_grp_idx;
6253 dreq.is_af = 1;
6254 rvu_mbox_handler_nix_mcast_grp_destroy(rvu, &dreq, NULL);
6255 continue;
6256 }
6257
6258 /* Iterate the group elements and delete the element which
6259 * received the FLR.
6260 */
6261 mce_list = &elem->mcast_mce_list;
6262 hlist_for_each_entry_safe(mce, tmp, &mce_list->head, node) {
6263 if (mce->pcifunc == pcifunc) {
6264 ureq.hdr.pcifunc = pcifunc;
6265 ureq.num_mce_entry = 1;
6266 ureq.mcast_grp_idx = elem->mcast_grp_idx;
6267 ureq.op = NIX_MCAST_OP_DEL_ENTRY;
6268 ureq.pcifunc[0] = pcifunc;
6269 ureq.is_af = 1;
6270 rvu_mbox_handler_nix_mcast_grp_update(rvu, &ureq, &ursp);
6271 break;
6272 }
6273 }
6274 }
6275 mutex_unlock(&mcast_grp->mcast_grp_lock);
6276 }
6277
rvu_nix_mcast_update_mcam_entry(struct rvu * rvu,u16 pcifunc,u32 mcast_grp_idx,u16 mcam_index)6278 int rvu_nix_mcast_update_mcam_entry(struct rvu *rvu, u16 pcifunc,
6279 u32 mcast_grp_idx, u16 mcam_index)
6280 {
6281 struct nix_mcast_grp_elem *elem;
6282 struct nix_mcast_grp *mcast_grp;
6283 struct nix_hw *nix_hw;
6284 int blkaddr, ret = 0;
6285
6286 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
6287 nix_hw = get_nix_hw(rvu->hw, blkaddr);
6288 if (!nix_hw)
6289 return NIX_AF_ERR_INVALID_NIXBLK;
6290
6291 mcast_grp = &nix_hw->mcast_grp;
6292 mutex_lock(&mcast_grp->mcast_grp_lock);
6293 elem = rvu_nix_mcast_find_grp_elem(mcast_grp, mcast_grp_idx);
6294 if (!elem)
6295 ret = NIX_AF_ERR_INVALID_MCAST_GRP;
6296 else
6297 elem->mcam_index = mcam_index;
6298
6299 mutex_unlock(&mcast_grp->mcast_grp_lock);
6300 return ret;
6301 }
6302
rvu_mbox_handler_nix_mcast_grp_create(struct rvu * rvu,struct nix_mcast_grp_create_req * req,struct nix_mcast_grp_create_rsp * rsp)6303 int rvu_mbox_handler_nix_mcast_grp_create(struct rvu *rvu,
6304 struct nix_mcast_grp_create_req *req,
6305 struct nix_mcast_grp_create_rsp *rsp)
6306 {
6307 struct nix_mcast_grp_elem *elem;
6308 struct nix_mcast_grp *mcast_grp;
6309 struct nix_hw *nix_hw;
6310 int blkaddr, err;
6311
6312 err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
6313 if (err)
6314 return err;
6315
6316 mcast_grp = &nix_hw->mcast_grp;
6317 elem = kzalloc(sizeof(*elem), GFP_KERNEL);
6318 if (!elem)
6319 return -ENOMEM;
6320
6321 INIT_HLIST_HEAD(&elem->mcast_mce_list.head);
6322 elem->mcam_index = -1;
6323 elem->mce_start_index = -1;
6324 elem->pcifunc = req->hdr.pcifunc;
6325 elem->dir = req->dir;
6326 elem->mcast_grp_idx = mcast_grp->next_grp_index++;
6327
6328 mutex_lock(&mcast_grp->mcast_grp_lock);
6329 list_add_tail(&elem->list, &mcast_grp->mcast_grp_head);
6330 mcast_grp->count++;
6331 mutex_unlock(&mcast_grp->mcast_grp_lock);
6332
6333 rsp->mcast_grp_idx = elem->mcast_grp_idx;
6334 return 0;
6335 }
6336
rvu_mbox_handler_nix_mcast_grp_destroy(struct rvu * rvu,struct nix_mcast_grp_destroy_req * req,struct msg_rsp * rsp)6337 int rvu_mbox_handler_nix_mcast_grp_destroy(struct rvu *rvu,
6338 struct nix_mcast_grp_destroy_req *req,
6339 struct msg_rsp *rsp)
6340 {
6341 struct npc_delete_flow_req uninstall_req = { 0 };
6342 struct npc_delete_flow_rsp uninstall_rsp = { 0 };
6343 struct nix_mcast_grp_elem *elem;
6344 struct nix_mcast_grp *mcast_grp;
6345 int blkaddr, err, ret = 0;
6346 struct nix_mcast *mcast;
6347 struct nix_hw *nix_hw;
6348
6349 err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
6350 if (err)
6351 return err;
6352
6353 mcast_grp = &nix_hw->mcast_grp;
6354
6355 /* If AF is requesting for the deletion,
6356 * then AF is already taking the lock
6357 */
6358 if (!req->is_af)
6359 mutex_lock(&mcast_grp->mcast_grp_lock);
6360
6361 elem = rvu_nix_mcast_find_grp_elem(mcast_grp, req->mcast_grp_idx);
6362 if (!elem) {
6363 ret = NIX_AF_ERR_INVALID_MCAST_GRP;
6364 goto unlock_grp;
6365 }
6366
6367 /* If no mce entries are associated with the group
6368 * then just remove it from the global list.
6369 */
6370 if (!elem->mcast_mce_list.count)
6371 goto delete_grp;
6372
6373 /* Delete the associated mcam entry and
6374 * remove all mce entries from the group
6375 */
6376 mcast = &nix_hw->mcast;
6377 mutex_lock(&mcast->mce_lock);
6378 if (elem->mcam_index != -1) {
6379 uninstall_req.hdr.pcifunc = req->hdr.pcifunc;
6380 uninstall_req.entry = elem->mcam_index;
6381 rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &uninstall_rsp);
6382 }
6383
6384 nix_free_mce_list(mcast, elem->mcast_mce_list.count,
6385 elem->mce_start_index, elem->dir);
6386 nix_delete_mcast_mce_list(&elem->mcast_mce_list);
6387 mutex_unlock(&mcast->mce_lock);
6388
6389 delete_grp:
6390 list_del(&elem->list);
6391 kfree(elem);
6392 mcast_grp->count--;
6393
6394 unlock_grp:
6395 if (!req->is_af)
6396 mutex_unlock(&mcast_grp->mcast_grp_lock);
6397
6398 return ret;
6399 }
6400
rvu_mbox_handler_nix_mcast_grp_update(struct rvu * rvu,struct nix_mcast_grp_update_req * req,struct nix_mcast_grp_update_rsp * rsp)6401 int rvu_mbox_handler_nix_mcast_grp_update(struct rvu *rvu,
6402 struct nix_mcast_grp_update_req *req,
6403 struct nix_mcast_grp_update_rsp *rsp)
6404 {
6405 struct nix_mcast_grp_destroy_req dreq = { 0 };
6406 struct npc_mcam *mcam = &rvu->hw->mcam;
6407 struct nix_mcast_grp_elem *elem;
6408 struct nix_mcast_grp *mcast_grp;
6409 int blkaddr, err, npc_blkaddr;
6410 u16 prev_count, new_count;
6411 struct nix_mcast *mcast;
6412 struct nix_hw *nix_hw;
6413 int i, ret;
6414
6415 if (!req->num_mce_entry)
6416 return 0;
6417
6418 err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
6419 if (err)
6420 return err;
6421
6422 mcast_grp = &nix_hw->mcast_grp;
6423
6424 /* If AF is requesting for the updation,
6425 * then AF is already taking the lock
6426 */
6427 if (!req->is_af)
6428 mutex_lock(&mcast_grp->mcast_grp_lock);
6429
6430 elem = rvu_nix_mcast_find_grp_elem(mcast_grp, req->mcast_grp_idx);
6431 if (!elem) {
6432 ret = NIX_AF_ERR_INVALID_MCAST_GRP;
6433 goto unlock_grp;
6434 }
6435
6436 /* If any pcifunc matches the group's pcifunc, then we can
6437 * delete the entire group.
6438 */
6439 if (req->op == NIX_MCAST_OP_DEL_ENTRY) {
6440 for (i = 0; i < req->num_mce_entry; i++) {
6441 if (elem->pcifunc == req->pcifunc[i]) {
6442 /* Delete group */
6443 dreq.hdr.pcifunc = elem->pcifunc;
6444 dreq.mcast_grp_idx = elem->mcast_grp_idx;
6445 dreq.is_af = 1;
6446 rvu_mbox_handler_nix_mcast_grp_destroy(rvu, &dreq, NULL);
6447 ret = 0;
6448 goto unlock_grp;
6449 }
6450 }
6451 }
6452
6453 mcast = &nix_hw->mcast;
6454 mutex_lock(&mcast->mce_lock);
6455 npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
6456 if (elem->mcam_index != -1)
6457 npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, elem->mcam_index, false);
6458
6459 prev_count = elem->mcast_mce_list.count;
6460 if (req->op == NIX_MCAST_OP_ADD_ENTRY) {
6461 new_count = prev_count + req->num_mce_entry;
6462 if (prev_count)
6463 nix_free_mce_list(mcast, prev_count, elem->mce_start_index, elem->dir);
6464
6465 elem->mce_start_index = nix_alloc_mce_list(mcast, new_count, elem->dir);
6466
6467 /* It is possible not to get contiguous memory */
6468 if (elem->mce_start_index < 0) {
6469 if (elem->mcam_index != -1) {
6470 npc_enable_mcam_entry(rvu, mcam, npc_blkaddr,
6471 elem->mcam_index, true);
6472 ret = NIX_AF_ERR_NON_CONTIG_MCE_LIST;
6473 goto unlock_mce;
6474 }
6475 }
6476
6477 ret = nix_add_mce_list_entry(rvu, nix_hw, elem, req);
6478 if (ret) {
6479 nix_free_mce_list(mcast, new_count, elem->mce_start_index, elem->dir);
6480 if (prev_count)
6481 elem->mce_start_index = nix_alloc_mce_list(mcast,
6482 prev_count,
6483 elem->dir);
6484
6485 if (elem->mcam_index != -1)
6486 npc_enable_mcam_entry(rvu, mcam, npc_blkaddr,
6487 elem->mcam_index, true);
6488
6489 goto unlock_mce;
6490 }
6491 } else {
6492 if (!prev_count || prev_count < req->num_mce_entry) {
6493 if (elem->mcam_index != -1)
6494 npc_enable_mcam_entry(rvu, mcam, npc_blkaddr,
6495 elem->mcam_index, true);
6496 ret = NIX_AF_ERR_INVALID_MCAST_DEL_REQ;
6497 goto unlock_mce;
6498 }
6499
6500 nix_free_mce_list(mcast, prev_count, elem->mce_start_index, elem->dir);
6501 new_count = prev_count - req->num_mce_entry;
6502 elem->mce_start_index = nix_alloc_mce_list(mcast, new_count, elem->dir);
6503 ret = nix_del_mce_list_entry(rvu, nix_hw, elem, req);
6504 if (ret) {
6505 nix_free_mce_list(mcast, new_count, elem->mce_start_index, elem->dir);
6506 elem->mce_start_index = nix_alloc_mce_list(mcast, prev_count, elem->dir);
6507 if (elem->mcam_index != -1)
6508 npc_enable_mcam_entry(rvu, mcam,
6509 npc_blkaddr,
6510 elem->mcam_index,
6511 true);
6512
6513 goto unlock_mce;
6514 }
6515 }
6516
6517 if (elem->mcam_index == -1) {
6518 rsp->mce_start_index = elem->mce_start_index;
6519 ret = 0;
6520 goto unlock_mce;
6521 }
6522
6523 nix_mcast_update_action(rvu, elem);
6524 npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, elem->mcam_index, true);
6525 rsp->mce_start_index = elem->mce_start_index;
6526 ret = 0;
6527
6528 unlock_mce:
6529 mutex_unlock(&mcast->mce_lock);
6530
6531 unlock_grp:
6532 if (!req->is_af)
6533 mutex_unlock(&mcast_grp->mcast_grp_lock);
6534
6535 return ret;
6536 }
6537