xref: /linux/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c (revision 621cde16e49b3ecf7d59a8106a20aaebfb4a59a9)
11463f382SLinu Cherian // SPDX-License-Identifier: GPL-2.0
2c7cd6c5aSSunil Goutham /* Marvell RVU Admin Function driver
31463f382SLinu Cherian  *
4c7cd6c5aSSunil Goutham  * Copyright (C) 2018 Marvell.
51463f382SLinu Cherian  *
61463f382SLinu Cherian  */
71463f382SLinu Cherian 
81463f382SLinu Cherian #include <linux/types.h>
91463f382SLinu Cherian #include <linux/module.h>
101463f382SLinu Cherian #include <linux/pci.h>
111463f382SLinu Cherian 
121463f382SLinu Cherian #include "rvu.h"
131463f382SLinu Cherian #include "cgx.h"
146e54e1c5SHariprasad Kelam #include "lmac_common.h"
15f967488dSLinu Cherian #include "rvu_reg.h"
1649142d12SSubbaraya Sundeep #include "rvu_trace.h"
1756d9f5fdSRatheesh Kannoth #include "rvu_npc_hash.h"
181463f382SLinu Cherian 
19afb8902cSLinu Cherian struct cgx_evq_entry {
20afb8902cSLinu Cherian 	struct list_head evq_node;
21afb8902cSLinu Cherian 	struct cgx_link_event link_event;
22afb8902cSLinu Cherian };
23afb8902cSLinu Cherian 
24eac66686SSunil Goutham #define M(_name, _id, _fn_name, _req_type, _rsp_type)			\
2561071a87SLinu Cherian static struct _req_type __maybe_unused					\
26eac66686SSunil Goutham *otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid)		\
2761071a87SLinu Cherian {									\
2861071a87SLinu Cherian 	struct _req_type *req;						\
2961071a87SLinu Cherian 									\
3061071a87SLinu Cherian 	req = (struct _req_type *)otx2_mbox_alloc_msg_rsp(		\
319bdc47a6STomasz Duszynski 		&rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \
3261071a87SLinu Cherian 		sizeof(struct _rsp_type));				\
3361071a87SLinu Cherian 	if (!req)							\
3461071a87SLinu Cherian 		return NULL;						\
3561071a87SLinu Cherian 	req->hdr.sig = OTX2_MBOX_REQ_SIG;				\
3661071a87SLinu Cherian 	req->hdr.id = _id;						\
3749142d12SSubbaraya Sundeep 	trace_otx2_msg_alloc(rvu->pdev, _id, sizeof(*req));		\
3861071a87SLinu Cherian 	return req;							\
3961071a87SLinu Cherian }
4061071a87SLinu Cherian 
4161071a87SLinu Cherian MBOX_UP_CGX_MESSAGES
4261071a87SLinu Cherian #undef M
4361071a87SLinu Cherian 
is_mac_feature_supported(struct rvu * rvu,int pf,int feature)4491c6945eSHariprasad Kelam bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature)
4591c6945eSHariprasad Kelam {
4691c6945eSHariprasad Kelam 	u8 cgx_id, lmac_id;
4791c6945eSHariprasad Kelam 	void *cgxd;
4891c6945eSHariprasad Kelam 
4991c6945eSHariprasad Kelam 	if (!is_pf_cgxmapped(rvu, pf))
5091c6945eSHariprasad Kelam 		return 0;
5191c6945eSHariprasad Kelam 
5291c6945eSHariprasad Kelam 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
5391c6945eSHariprasad Kelam 	cgxd = rvu_cgx_pdata(cgx_id, rvu);
5491c6945eSHariprasad Kelam 
5591c6945eSHariprasad Kelam 	return  (cgx_features_get(cgxd) & feature);
5691c6945eSHariprasad Kelam }
5791c6945eSHariprasad Kelam 
58f2e664adSRakesh Babu Saladi #define CGX_OFFSET(x)			((x) * rvu->hw->lmac_per_cgx)
5961071a87SLinu Cherian /* Returns bitmap of mapped PFs */
cgxlmac_to_pfmap(struct rvu * rvu,u8 cgx_id,u8 lmac_id)60f2e664adSRakesh Babu Saladi static u64 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
6161071a87SLinu Cherian {
6261071a87SLinu Cherian 	return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id];
6361071a87SLinu Cherian }
6461071a87SLinu Cherian 
cgxlmac_to_pf(struct rvu * rvu,int cgx_id,int lmac_id)65dbc52debSHariprasad Kelam int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
66f967488dSLinu Cherian {
67f967488dSLinu Cherian 	unsigned long pfmap;
68f967488dSLinu Cherian 
69f967488dSLinu Cherian 	pfmap = cgxlmac_to_pfmap(rvu, cgx_id, lmac_id);
70f967488dSLinu Cherian 
71f967488dSLinu Cherian 	/* Assumes only one pf mapped to a cgx lmac port */
72f967488dSLinu Cherian 	if (!pfmap)
73f967488dSLinu Cherian 		return -ENODEV;
74f967488dSLinu Cherian 	else
75f2e664adSRakesh Babu Saladi 		return find_first_bit(&pfmap,
76f2e664adSRakesh Babu Saladi 				      rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx);
77f967488dSLinu Cherian }
78f967488dSLinu Cherian 
cgxlmac_id_to_bmap(u8 cgx_id,u8 lmac_id)79f967488dSLinu Cherian static u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
801463f382SLinu Cherian {
811463f382SLinu Cherian 	return ((cgx_id & 0xF) << 4) | (lmac_id & 0xF);
821463f382SLinu Cherian }
831463f382SLinu Cherian 
rvu_cgx_pdata(u8 cgx_id,struct rvu * rvu)8494d942c5SGeetha sowjanya void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
851463f382SLinu Cherian {
8612e4c9abSLinu Cherian 	if (cgx_id >= rvu->cgx_cnt_max)
871463f382SLinu Cherian 		return NULL;
881463f382SLinu Cherian 
891463f382SLinu Cherian 	return rvu->cgx_idmap[cgx_id];
901463f382SLinu Cherian }
911463f382SLinu Cherian 
9229788787SSubbaraya Sundeep /* Return first enabled CGX instance if none are enabled then return NULL */
rvu_first_cgx_pdata(struct rvu * rvu)9329788787SSubbaraya Sundeep void *rvu_first_cgx_pdata(struct rvu *rvu)
9429788787SSubbaraya Sundeep {
9529788787SSubbaraya Sundeep 	int first_enabled_cgx = 0;
9629788787SSubbaraya Sundeep 	void *cgxd = NULL;
9729788787SSubbaraya Sundeep 
9829788787SSubbaraya Sundeep 	for (; first_enabled_cgx < rvu->cgx_cnt_max; first_enabled_cgx++) {
9929788787SSubbaraya Sundeep 		cgxd = rvu_cgx_pdata(first_enabled_cgx, rvu);
10029788787SSubbaraya Sundeep 		if (cgxd)
10129788787SSubbaraya Sundeep 			break;
10229788787SSubbaraya Sundeep 	}
10329788787SSubbaraya Sundeep 
10429788787SSubbaraya Sundeep 	return cgxd;
10529788787SSubbaraya Sundeep }
10629788787SSubbaraya Sundeep 
107c5a73b63SSubbaraya Sundeep /* Based on P2X connectivity find mapped NIX block for a PF */
rvu_map_cgx_nix_block(struct rvu * rvu,int pf,int cgx_id,int lmac_id)108c5a73b63SSubbaraya Sundeep static void rvu_map_cgx_nix_block(struct rvu *rvu, int pf,
109c5a73b63SSubbaraya Sundeep 				  int cgx_id, int lmac_id)
110c5a73b63SSubbaraya Sundeep {
111c5a73b63SSubbaraya Sundeep 	struct rvu_pfvf *pfvf = &rvu->pf[pf];
112c5a73b63SSubbaraya Sundeep 	u8 p2x;
113c5a73b63SSubbaraya Sundeep 
114c5a73b63SSubbaraya Sundeep 	p2x = cgx_lmac_get_p2x(cgx_id, lmac_id);
115c5a73b63SSubbaraya Sundeep 	/* Firmware sets P2X_SELECT as either NIX0 or NIX1 */
116c5a73b63SSubbaraya Sundeep 	pfvf->nix_blkaddr = BLKADDR_NIX0;
1172e7bc57bSHariprasad Kelam 	if (is_rvu_supports_nix1(rvu) && p2x == CMR_P2X_SEL_NIX1)
118c5a73b63SSubbaraya Sundeep 		pfvf->nix_blkaddr = BLKADDR_NIX1;
119c5a73b63SSubbaraya Sundeep }
120c5a73b63SSubbaraya Sundeep 
rvu_map_cgx_lmac_pf(struct rvu * rvu)1211463f382SLinu Cherian static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
1221463f382SLinu Cherian {
12394d942c5SGeetha sowjanya 	struct npc_pkind *pkind = &rvu->hw->pkind;
12412e4c9abSLinu Cherian 	int cgx_cnt_max = rvu->cgx_cnt_max;
1251463f382SLinu Cherian 	int pf = PF_CGXMAP_BASE;
12691c6945eSHariprasad Kelam 	unsigned long lmac_bmap;
12794d942c5SGeetha sowjanya 	int size, free_pkind;
12891c6945eSHariprasad Kelam 	int cgx, lmac, iter;
12923109f8dSSubbaraya Sundeep 	int numvfs, hwvfs;
1301463f382SLinu Cherian 
13112e4c9abSLinu Cherian 	if (!cgx_cnt_max)
1321463f382SLinu Cherian 		return 0;
1331463f382SLinu Cherian 
134f2e664adSRakesh Babu Saladi 	if (cgx_cnt_max > 0xF || rvu->hw->lmac_per_cgx > 0xF)
1351463f382SLinu Cherian 		return -EINVAL;
1361463f382SLinu Cherian 
1371463f382SLinu Cherian 	/* Alloc map table
1381463f382SLinu Cherian 	 * An additional entry is required since PF id starts from 1 and
1391463f382SLinu Cherian 	 * hence entry at offset 0 is invalid.
1401463f382SLinu Cherian 	 */
141f2e664adSRakesh Babu Saladi 	size = (cgx_cnt_max * rvu->hw->lmac_per_cgx + 1) * sizeof(u8);
14212e4c9abSLinu Cherian 	rvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL);
1431463f382SLinu Cherian 	if (!rvu->pf2cgxlmac_map)
1441463f382SLinu Cherian 		return -ENOMEM;
1451463f382SLinu Cherian 
14612e4c9abSLinu Cherian 	/* Initialize all entries with an invalid cgx and lmac id */
14712e4c9abSLinu Cherian 	memset(rvu->pf2cgxlmac_map, 0xFF, size);
1481463f382SLinu Cherian 
1491463f382SLinu Cherian 	/* Reverse map table */
150f2e664adSRakesh Babu Saladi 	rvu->cgxlmac2pf_map =
151f2e664adSRakesh Babu Saladi 		devm_kzalloc(rvu->dev,
152f2e664adSRakesh Babu Saladi 			     cgx_cnt_max * rvu->hw->lmac_per_cgx * sizeof(u64),
1531463f382SLinu Cherian 			     GFP_KERNEL);
1541463f382SLinu Cherian 	if (!rvu->cgxlmac2pf_map)
1551463f382SLinu Cherian 		return -ENOMEM;
1561463f382SLinu Cherian 
1571463f382SLinu Cherian 	rvu->cgx_mapped_pfs = 0;
15812e4c9abSLinu Cherian 	for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
15912e4c9abSLinu Cherian 		if (!rvu_cgx_pdata(cgx, rvu))
16012e4c9abSLinu Cherian 			continue;
16191c6945eSHariprasad Kelam 		lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
162f2e664adSRakesh Babu Saladi 		for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
163ef15ddeeSAleksandr Mishin 			if (iter >= MAX_LMAC_COUNT)
164ef15ddeeSAleksandr Mishin 				continue;
16591c6945eSHariprasad Kelam 			lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu),
16691c6945eSHariprasad Kelam 					      iter);
1671463f382SLinu Cherian 			rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
1681463f382SLinu Cherian 			rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;
16994d942c5SGeetha sowjanya 			free_pkind = rvu_alloc_rsrc(&pkind->rsrc);
17094d942c5SGeetha sowjanya 			pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;
171c5a73b63SSubbaraya Sundeep 			rvu_map_cgx_nix_block(rvu, pf, cgx, lmac);
1721463f382SLinu Cherian 			rvu->cgx_mapped_pfs++;
17323109f8dSSubbaraya Sundeep 			rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvfs);
17423109f8dSSubbaraya Sundeep 			rvu->cgx_mapped_vfs += numvfs;
17591c6945eSHariprasad Kelam 			pf++;
1761463f382SLinu Cherian 		}
1771463f382SLinu Cherian 	}
1781463f382SLinu Cherian 	return 0;
1791463f382SLinu Cherian }
1801463f382SLinu Cherian 
rvu_cgx_send_link_info(int cgx_id,int lmac_id,struct rvu * rvu)18161071a87SLinu Cherian static int rvu_cgx_send_link_info(int cgx_id, int lmac_id, struct rvu *rvu)
18261071a87SLinu Cherian {
18361071a87SLinu Cherian 	struct cgx_evq_entry *qentry;
18461071a87SLinu Cherian 	unsigned long flags;
18561071a87SLinu Cherian 	int err;
18661071a87SLinu Cherian 
18761071a87SLinu Cherian 	qentry = kmalloc(sizeof(*qentry), GFP_KERNEL);
18861071a87SLinu Cherian 	if (!qentry)
18961071a87SLinu Cherian 		return -ENOMEM;
19061071a87SLinu Cherian 
19161071a87SLinu Cherian 	/* Lock the event queue before we read the local link status */
19261071a87SLinu Cherian 	spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
19361071a87SLinu Cherian 	err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
19461071a87SLinu Cherian 				&qentry->link_event.link_uinfo);
19561071a87SLinu Cherian 	qentry->link_event.cgx_id = cgx_id;
19661071a87SLinu Cherian 	qentry->link_event.lmac_id = lmac_id;
19791c6945eSHariprasad Kelam 	if (err) {
19891c6945eSHariprasad Kelam 		kfree(qentry);
19961071a87SLinu Cherian 		goto skip_add;
20091c6945eSHariprasad Kelam 	}
20161071a87SLinu Cherian 	list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
20261071a87SLinu Cherian skip_add:
20361071a87SLinu Cherian 	spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
20461071a87SLinu Cherian 
20561071a87SLinu Cherian 	/* start worker to process the events */
20661071a87SLinu Cherian 	queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
20761071a87SLinu Cherian 
20861071a87SLinu Cherian 	return 0;
20961071a87SLinu Cherian }
21061071a87SLinu Cherian 
211afb8902cSLinu Cherian /* This is called from interrupt context and is expected to be atomic */
cgx_lmac_postevent(struct cgx_link_event * event,void * data)212afb8902cSLinu Cherian static int cgx_lmac_postevent(struct cgx_link_event *event, void *data)
213afb8902cSLinu Cherian {
214afb8902cSLinu Cherian 	struct cgx_evq_entry *qentry;
215afb8902cSLinu Cherian 	struct rvu *rvu = data;
216afb8902cSLinu Cherian 
217afb8902cSLinu Cherian 	/* post event to the event queue */
218afb8902cSLinu Cherian 	qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
219afb8902cSLinu Cherian 	if (!qentry)
220afb8902cSLinu Cherian 		return -ENOMEM;
221afb8902cSLinu Cherian 	qentry->link_event = *event;
222afb8902cSLinu Cherian 	spin_lock(&rvu->cgx_evq_lock);
223afb8902cSLinu Cherian 	list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
224afb8902cSLinu Cherian 	spin_unlock(&rvu->cgx_evq_lock);
225afb8902cSLinu Cherian 
226afb8902cSLinu Cherian 	/* start worker to process the events */
227afb8902cSLinu Cherian 	queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
228afb8902cSLinu Cherian 
229afb8902cSLinu Cherian 	return 0;
230afb8902cSLinu Cherian }
231afb8902cSLinu Cherian 
cgx_notify_pfs(struct cgx_link_event * event,struct rvu * rvu)23261071a87SLinu Cherian static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
23361071a87SLinu Cherian {
23461071a87SLinu Cherian 	struct cgx_link_user_info *linfo;
23561071a87SLinu Cherian 	struct cgx_link_info_msg *msg;
23661071a87SLinu Cherian 	unsigned long pfmap;
237a88e0f93SSubbaraya Sundeep 	int pfid;
23861071a87SLinu Cherian 
23961071a87SLinu Cherian 	linfo = &event->link_uinfo;
24061071a87SLinu Cherian 	pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id);
24117d1368fSHariprasad Kelam 	if (!pfmap) {
24217d1368fSHariprasad Kelam 		dev_err(rvu->dev, "CGX port%d:%d not mapped with PF\n",
24317d1368fSHariprasad Kelam 			event->cgx_id, event->lmac_id);
24417d1368fSHariprasad Kelam 		return;
24517d1368fSHariprasad Kelam 	}
24661071a87SLinu Cherian 
24761071a87SLinu Cherian 	do {
248f2e664adSRakesh Babu Saladi 		pfid = find_first_bit(&pfmap,
249f2e664adSRakesh Babu Saladi 				      rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx);
25061071a87SLinu Cherian 		clear_bit(pfid, &pfmap);
25161071a87SLinu Cherian 
25261071a87SLinu Cherian 		/* check if notification is enabled */
25361071a87SLinu Cherian 		if (!test_bit(pfid, &rvu->pf_notify_bmap)) {
25461071a87SLinu Cherian 			dev_info(rvu->dev, "cgx %d: lmac %d Link status %s\n",
25561071a87SLinu Cherian 				 event->cgx_id, event->lmac_id,
25661071a87SLinu Cherian 				 linfo->link_up ? "UP" : "DOWN");
25761071a87SLinu Cherian 			continue;
25861071a87SLinu Cherian 		}
25961071a87SLinu Cherian 
260a88e0f93SSubbaraya Sundeep 		mutex_lock(&rvu->mbox_lock);
261a88e0f93SSubbaraya Sundeep 
26261071a87SLinu Cherian 		/* Send mbox message to PF */
263eac66686SSunil Goutham 		msg = otx2_mbox_alloc_msg_cgx_link_event(rvu, pfid);
264a88e0f93SSubbaraya Sundeep 		if (!msg) {
265a88e0f93SSubbaraya Sundeep 			mutex_unlock(&rvu->mbox_lock);
26661071a87SLinu Cherian 			continue;
267a88e0f93SSubbaraya Sundeep 		}
268a88e0f93SSubbaraya Sundeep 
26961071a87SLinu Cherian 		msg->link_info = *linfo;
270a88e0f93SSubbaraya Sundeep 
271a88e0f93SSubbaraya Sundeep 		otx2_mbox_wait_for_zero(&rvu->afpf_wq_info.mbox_up, pfid);
272a88e0f93SSubbaraya Sundeep 
273a88e0f93SSubbaraya Sundeep 		otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pfid);
274a88e0f93SSubbaraya Sundeep 
275a88e0f93SSubbaraya Sundeep 		mutex_unlock(&rvu->mbox_lock);
27661071a87SLinu Cherian 	} while (pfmap);
27761071a87SLinu Cherian }
27861071a87SLinu Cherian 
cgx_evhandler_task(struct work_struct * work)279afb8902cSLinu Cherian static void cgx_evhandler_task(struct work_struct *work)
280afb8902cSLinu Cherian {
281afb8902cSLinu Cherian 	struct rvu *rvu = container_of(work, struct rvu, cgx_evh_work);
282afb8902cSLinu Cherian 	struct cgx_evq_entry *qentry;
283afb8902cSLinu Cherian 	struct cgx_link_event *event;
284afb8902cSLinu Cherian 	unsigned long flags;
285afb8902cSLinu Cherian 
286afb8902cSLinu Cherian 	do {
287afb8902cSLinu Cherian 		/* Dequeue an event */
288afb8902cSLinu Cherian 		spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
289afb8902cSLinu Cherian 		qentry = list_first_entry_or_null(&rvu->cgx_evq_head,
290afb8902cSLinu Cherian 						  struct cgx_evq_entry,
291afb8902cSLinu Cherian 						  evq_node);
292afb8902cSLinu Cherian 		if (qentry)
293afb8902cSLinu Cherian 			list_del(&qentry->evq_node);
294afb8902cSLinu Cherian 		spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
295afb8902cSLinu Cherian 		if (!qentry)
296afb8902cSLinu Cherian 			break; /* nothing more to process */
297afb8902cSLinu Cherian 
298afb8902cSLinu Cherian 		event = &qentry->link_event;
299afb8902cSLinu Cherian 
30061071a87SLinu Cherian 		/* process event */
30161071a87SLinu Cherian 		cgx_notify_pfs(event, rvu);
302afb8902cSLinu Cherian 		kfree(qentry);
303afb8902cSLinu Cherian 	} while (1);
304afb8902cSLinu Cherian }
305afb8902cSLinu Cherian 
cgx_lmac_event_handler_init(struct rvu * rvu)30644990aaaSLinu Cherian static int cgx_lmac_event_handler_init(struct rvu *rvu)
307afb8902cSLinu Cherian {
30891c6945eSHariprasad Kelam 	unsigned long lmac_bmap;
309afb8902cSLinu Cherian 	struct cgx_event_cb cb;
310afb8902cSLinu Cherian 	int cgx, lmac, err;
311afb8902cSLinu Cherian 	void *cgxd;
312afb8902cSLinu Cherian 
313afb8902cSLinu Cherian 	spin_lock_init(&rvu->cgx_evq_lock);
314afb8902cSLinu Cherian 	INIT_LIST_HEAD(&rvu->cgx_evq_head);
315afb8902cSLinu Cherian 	INIT_WORK(&rvu->cgx_evh_work, cgx_evhandler_task);
316afb8902cSLinu Cherian 	rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0);
317afb8902cSLinu Cherian 	if (!rvu->cgx_evh_wq) {
318afb8902cSLinu Cherian 		dev_err(rvu->dev, "alloc workqueue failed");
31944990aaaSLinu Cherian 		return -ENOMEM;
320afb8902cSLinu Cherian 	}
321afb8902cSLinu Cherian 
322afb8902cSLinu Cherian 	cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */
323afb8902cSLinu Cherian 	cb.data = rvu;
324afb8902cSLinu Cherian 
32512e4c9abSLinu Cherian 	for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
326afb8902cSLinu Cherian 		cgxd = rvu_cgx_pdata(cgx, rvu);
327d3b2b9abSLinu Cherian 		if (!cgxd)
328d3b2b9abSLinu Cherian 			continue;
32991c6945eSHariprasad Kelam 		lmac_bmap = cgx_get_lmac_bmap(cgxd);
330f2e664adSRakesh Babu Saladi 		for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx) {
331afb8902cSLinu Cherian 			err = cgx_lmac_evh_register(&cb, cgxd, lmac);
332afb8902cSLinu Cherian 			if (err)
333afb8902cSLinu Cherian 				dev_err(rvu->dev,
334afb8902cSLinu Cherian 					"%d:%d handler register failed\n",
335afb8902cSLinu Cherian 					cgx, lmac);
336afb8902cSLinu Cherian 		}
337afb8902cSLinu Cherian 	}
33844990aaaSLinu Cherian 
33944990aaaSLinu Cherian 	return 0;
340afb8902cSLinu Cherian }
341afb8902cSLinu Cherian 
rvu_cgx_wq_destroy(struct rvu * rvu)34244990aaaSLinu Cherian static void rvu_cgx_wq_destroy(struct rvu *rvu)
343afb8902cSLinu Cherian {
344afb8902cSLinu Cherian 	if (rvu->cgx_evh_wq) {
345afb8902cSLinu Cherian 		destroy_workqueue(rvu->cgx_evh_wq);
346afb8902cSLinu Cherian 		rvu->cgx_evh_wq = NULL;
347afb8902cSLinu Cherian 	}
348afb8902cSLinu Cherian }
349afb8902cSLinu Cherian 
rvu_cgx_init(struct rvu * rvu)35044990aaaSLinu Cherian int rvu_cgx_init(struct rvu *rvu)
3511463f382SLinu Cherian {
35244990aaaSLinu Cherian 	int cgx, err;
353d3b2b9abSLinu Cherian 	void *cgxd;
3541463f382SLinu Cherian 
35512e4c9abSLinu Cherian 	/* CGX port id starts from 0 and are not necessarily contiguous
35612e4c9abSLinu Cherian 	 * Hence we allocate resources based on the maximum port id value.
35712e4c9abSLinu Cherian 	 */
35812e4c9abSLinu Cherian 	rvu->cgx_cnt_max = cgx_get_cgxcnt_max();
35912e4c9abSLinu Cherian 	if (!rvu->cgx_cnt_max) {
3601463f382SLinu Cherian 		dev_info(rvu->dev, "No CGX devices found!\n");
361f027fd51SSunil Goutham 		return 0;
3621463f382SLinu Cherian 	}
3631463f382SLinu Cherian 
36412e4c9abSLinu Cherian 	rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt_max *
36512e4c9abSLinu Cherian 				      sizeof(void *), GFP_KERNEL);
3661463f382SLinu Cherian 	if (!rvu->cgx_idmap)
3671463f382SLinu Cherian 		return -ENOMEM;
3681463f382SLinu Cherian 
3691463f382SLinu Cherian 	/* Initialize the cgxdata table */
37012e4c9abSLinu Cherian 	for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++)
37144990aaaSLinu Cherian 		rvu->cgx_idmap[cgx] = cgx_get_pdata(cgx);
3721463f382SLinu Cherian 
3731463f382SLinu Cherian 	/* Map CGX LMAC interfaces to RVU PFs */
374afb8902cSLinu Cherian 	err = rvu_map_cgx_lmac_pf(rvu);
375afb8902cSLinu Cherian 	if (err)
376afb8902cSLinu Cherian 		return err;
377afb8902cSLinu Cherian 
378afb8902cSLinu Cherian 	/* Register for CGX events */
37944990aaaSLinu Cherian 	err = cgx_lmac_event_handler_init(rvu);
38044990aaaSLinu Cherian 	if (err)
38144990aaaSLinu Cherian 		return err;
38244990aaaSLinu Cherian 
383a7faa68bSSubbaraya Sundeep 	mutex_init(&rvu->cgx_cfg_lock);
384a7faa68bSSubbaraya Sundeep 
385d3b2b9abSLinu Cherian 	/* Ensure event handler registration is completed, before
386d3b2b9abSLinu Cherian 	 * we turn on the links
387d3b2b9abSLinu Cherian 	 */
388d3b2b9abSLinu Cherian 	mb();
389d3b2b9abSLinu Cherian 
390d3b2b9abSLinu Cherian 	/* Do link up for all CGX ports */
391d3b2b9abSLinu Cherian 	for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
392d3b2b9abSLinu Cherian 		cgxd = rvu_cgx_pdata(cgx, rvu);
393d3b2b9abSLinu Cherian 		if (!cgxd)
394d3b2b9abSLinu Cherian 			continue;
395d3b2b9abSLinu Cherian 		err = cgx_lmac_linkup_start(cgxd);
396d3b2b9abSLinu Cherian 		if (err)
397d3b2b9abSLinu Cherian 			dev_err(rvu->dev,
398d3b2b9abSLinu Cherian 				"Link up process failed to start on cgx %d\n",
399d3b2b9abSLinu Cherian 				cgx);
400d3b2b9abSLinu Cherian 	}
401d3b2b9abSLinu Cherian 
40244990aaaSLinu Cherian 	return 0;
40344990aaaSLinu Cherian }
40444990aaaSLinu Cherian 
rvu_cgx_exit(struct rvu * rvu)40544990aaaSLinu Cherian int rvu_cgx_exit(struct rvu *rvu)
40644990aaaSLinu Cherian {
40791c6945eSHariprasad Kelam 	unsigned long lmac_bmap;
408c9293236SLinu Cherian 	int cgx, lmac;
409c9293236SLinu Cherian 	void *cgxd;
410c9293236SLinu Cherian 
411c9293236SLinu Cherian 	for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
412c9293236SLinu Cherian 		cgxd = rvu_cgx_pdata(cgx, rvu);
413c9293236SLinu Cherian 		if (!cgxd)
414c9293236SLinu Cherian 			continue;
41591c6945eSHariprasad Kelam 		lmac_bmap = cgx_get_lmac_bmap(cgxd);
416f2e664adSRakesh Babu Saladi 		for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx)
417c9293236SLinu Cherian 			cgx_lmac_evh_unregister(cgxd, lmac);
418c9293236SLinu Cherian 	}
419c9293236SLinu Cherian 
420c9293236SLinu Cherian 	/* Ensure event handler unregister is completed */
421c9293236SLinu Cherian 	mb();
422c9293236SLinu Cherian 
42344990aaaSLinu Cherian 	rvu_cgx_wq_destroy(rvu);
424afb8902cSLinu Cherian 	return 0;
4251463f382SLinu Cherian }
4261435f66aSSunil Goutham 
4276fd2a71bSSunil Goutham /* Most of the CGX configuration is restricted to the mapped PF only,
4286fd2a71bSSunil Goutham  * VF's of mapped PF and other PFs are not allowed. This fn() checks
4296fd2a71bSSunil Goutham  * whether a PFFUNC is permitted to do the config or not.
4306fd2a71bSSunil Goutham  */
is_cgx_config_permitted(struct rvu * rvu,u16 pcifunc)431edadeb38SKiran Kumar K inline bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc)
4326fd2a71bSSunil Goutham {
4336fd2a71bSSunil Goutham 	if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
4346fd2a71bSSunil Goutham 	    !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
4356fd2a71bSSunil Goutham 		return false;
4366fd2a71bSSunil Goutham 	return true;
4376fd2a71bSSunil Goutham }
4386fd2a71bSSunil Goutham 
rvu_cgx_enadis_rx_bp(struct rvu * rvu,int pf,bool enable)4395d9b976dSSunil Goutham void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable)
4405d9b976dSSunil Goutham {
4411845ada4SRakesh Babu 	struct mac_ops *mac_ops;
4425d9b976dSSunil Goutham 	u8 cgx_id, lmac_id;
4435d9b976dSSunil Goutham 	void *cgxd;
4445d9b976dSSunil Goutham 
4455d9b976dSSunil Goutham 	if (!is_pf_cgxmapped(rvu, pf))
4465d9b976dSSunil Goutham 		return;
4475d9b976dSSunil Goutham 
4485d9b976dSSunil Goutham 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
4495d9b976dSSunil Goutham 	cgxd = rvu_cgx_pdata(cgx_id, rvu);
4505d9b976dSSunil Goutham 
4511845ada4SRakesh Babu 	mac_ops = get_mac_ops(cgxd);
4525d9b976dSSunil Goutham 	/* Set / clear CTL_BCK to control pause frame forwarding to NIX */
4535d9b976dSSunil Goutham 	if (enable)
4541845ada4SRakesh Babu 		mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, true);
4555d9b976dSSunil Goutham 	else
4561845ada4SRakesh Babu 		mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, false);
4575d9b976dSSunil Goutham }
4585d9b976dSSunil Goutham 
rvu_cgx_config_rxtx(struct rvu * rvu,u16 pcifunc,bool start)4591435f66aSSunil Goutham int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
4601435f66aSSunil Goutham {
4611435f66aSSunil Goutham 	int pf = rvu_get_pf(pcifunc);
462fae80edeSGeetha sowjanya 	struct mac_ops *mac_ops;
4631435f66aSSunil Goutham 	u8 cgx_id, lmac_id;
464fae80edeSGeetha sowjanya 	void *cgxd;
4651435f66aSSunil Goutham 
4666fd2a71bSSunil Goutham 	if (!is_cgx_config_permitted(rvu, pcifunc))
4677278c359SNaveen Mamindlapalli 		return LMAC_AF_ERR_PERM_DENIED;
4681435f66aSSunil Goutham 
4691435f66aSSunil Goutham 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
470fae80edeSGeetha sowjanya 	cgxd = rvu_cgx_pdata(cgx_id, rvu);
471fae80edeSGeetha sowjanya 	mac_ops = get_mac_ops(cgxd);
4721435f66aSSunil Goutham 
473fae80edeSGeetha sowjanya 	return mac_ops->mac_rx_tx_enable(cgxd, lmac_id, start);
474fae80edeSGeetha sowjanya }
4751435f66aSSunil Goutham 
rvu_cgx_tx_enable(struct rvu * rvu,u16 pcifunc,bool enable)476818ed893SNaveen Mamindlapalli int rvu_cgx_tx_enable(struct rvu *rvu, u16 pcifunc, bool enable)
477818ed893SNaveen Mamindlapalli {
478818ed893SNaveen Mamindlapalli 	int pf = rvu_get_pf(pcifunc);
479818ed893SNaveen Mamindlapalli 	struct mac_ops *mac_ops;
480818ed893SNaveen Mamindlapalli 	u8 cgx_id, lmac_id;
481818ed893SNaveen Mamindlapalli 	void *cgxd;
482818ed893SNaveen Mamindlapalli 
483818ed893SNaveen Mamindlapalli 	if (!is_cgx_config_permitted(rvu, pcifunc))
484818ed893SNaveen Mamindlapalli 		return LMAC_AF_ERR_PERM_DENIED;
485818ed893SNaveen Mamindlapalli 
486818ed893SNaveen Mamindlapalli 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
487818ed893SNaveen Mamindlapalli 	cgxd = rvu_cgx_pdata(cgx_id, rvu);
488818ed893SNaveen Mamindlapalli 	mac_ops = get_mac_ops(cgxd);
489818ed893SNaveen Mamindlapalli 
490818ed893SNaveen Mamindlapalli 	return mac_ops->mac_tx_enable(cgxd, lmac_id, enable);
491818ed893SNaveen Mamindlapalli }
492818ed893SNaveen Mamindlapalli 
rvu_cgx_config_tx(void * cgxd,int lmac_id,bool enable)493fae80edeSGeetha sowjanya int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable)
494fae80edeSGeetha sowjanya {
495fae80edeSGeetha sowjanya 	struct mac_ops *mac_ops;
496fae80edeSGeetha sowjanya 
497fae80edeSGeetha sowjanya 	mac_ops = get_mac_ops(cgxd);
498fae80edeSGeetha sowjanya 	return mac_ops->mac_tx_enable(cgxd, lmac_id, enable);
4991435f66aSSunil Goutham }
5001435f66aSSunil Goutham 
rvu_cgx_disable_dmac_entries(struct rvu * rvu,u16 pcifunc)5016f14078eSSunil Kumar Kori void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc)
5026f14078eSSunil Kumar Kori {
5036f14078eSSunil Kumar Kori 	int pf = rvu_get_pf(pcifunc);
5046f14078eSSunil Kumar Kori 	int i = 0, lmac_count = 0;
505b9d0fedcSHariprasad Kelam 	struct mac_ops *mac_ops;
5066f14078eSSunil Kumar Kori 	u8 max_dmac_filters;
5076f14078eSSunil Kumar Kori 	u8 cgx_id, lmac_id;
5086f14078eSSunil Kumar Kori 	void *cgx_dev;
5096f14078eSSunil Kumar Kori 
5106f14078eSSunil Kumar Kori 	if (!is_cgx_config_permitted(rvu, pcifunc))
5116f14078eSSunil Kumar Kori 		return;
5126f14078eSSunil Kumar Kori 
513d6c9784bSRatheesh Kannoth 	if (rvu_npc_exact_has_match_table(rvu)) {
514d6c9784bSRatheesh Kannoth 		rvu_npc_exact_reset(rvu, pcifunc);
515d6c9784bSRatheesh Kannoth 		return;
516d6c9784bSRatheesh Kannoth 	}
517d6c9784bSRatheesh Kannoth 
5186f14078eSSunil Kumar Kori 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
5196f14078eSSunil Kumar Kori 	cgx_dev = cgx_get_pdata(cgx_id);
5206f14078eSSunil Kumar Kori 	lmac_count = cgx_get_lmac_cnt(cgx_dev);
521b9d0fedcSHariprasad Kelam 
522b9d0fedcSHariprasad Kelam 	mac_ops = get_mac_ops(cgx_dev);
523b9d0fedcSHariprasad Kelam 	if (!mac_ops)
524b9d0fedcSHariprasad Kelam 		return;
525b9d0fedcSHariprasad Kelam 
526b9d0fedcSHariprasad Kelam 	max_dmac_filters = mac_ops->dmac_filter_count / lmac_count;
5276f14078eSSunil Kumar Kori 
5286f14078eSSunil Kumar Kori 	for (i = 0; i < max_dmac_filters; i++)
5296f14078eSSunil Kumar Kori 		cgx_lmac_addr_del(cgx_id, lmac_id, i);
5306f14078eSSunil Kumar Kori 
5316f14078eSSunil Kumar Kori 	/* As cgx_lmac_addr_del does not clear entry for index 0
5326f14078eSSunil Kumar Kori 	 * so it needs to be done explicitly
5336f14078eSSunil Kumar Kori 	 */
5346f14078eSSunil Kumar Kori 	cgx_lmac_addr_reset(cgx_id, lmac_id);
5356f14078eSSunil Kumar Kori }
5366f14078eSSunil Kumar Kori 
rvu_mbox_handler_cgx_start_rxtx(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)537eac66686SSunil Goutham int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req,
5381435f66aSSunil Goutham 				    struct msg_rsp *rsp)
5391435f66aSSunil Goutham {
5401435f66aSSunil Goutham 	rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, true);
5411435f66aSSunil Goutham 	return 0;
5421435f66aSSunil Goutham }
5431435f66aSSunil Goutham 
rvu_mbox_handler_cgx_stop_rxtx(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)544eac66686SSunil Goutham int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req,
5451435f66aSSunil Goutham 				   struct msg_rsp *rsp)
5461435f66aSSunil Goutham {
5471435f66aSSunil Goutham 	rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, false);
5481435f66aSSunil Goutham 	return 0;
5491435f66aSSunil Goutham }
55066208910SChristina Jacob 
rvu_lmac_get_stats(struct rvu * rvu,struct msg_req * req,void * rsp)551ce7a6c31SHariprasad Kelam static int rvu_lmac_get_stats(struct rvu *rvu, struct msg_req *req,
552ce7a6c31SHariprasad Kelam 			      void *rsp)
55366208910SChristina Jacob {
55466208910SChristina Jacob 	int pf = rvu_get_pf(req->hdr.pcifunc);
555ce7a6c31SHariprasad Kelam 	struct mac_ops *mac_ops;
55666208910SChristina Jacob 	int stat = 0, err = 0;
55766208910SChristina Jacob 	u64 tx_stat, rx_stat;
55866208910SChristina Jacob 	u8 cgx_idx, lmac;
55966208910SChristina Jacob 	void *cgxd;
56066208910SChristina Jacob 
5616fd2a71bSSunil Goutham 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
5627278c359SNaveen Mamindlapalli 		return LMAC_AF_ERR_PERM_DENIED;
56366208910SChristina Jacob 
56466208910SChristina Jacob 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
56566208910SChristina Jacob 	cgxd = rvu_cgx_pdata(cgx_idx, rvu);
566ce7a6c31SHariprasad Kelam 	mac_ops = get_mac_ops(cgxd);
56766208910SChristina Jacob 
56866208910SChristina Jacob 	/* Rx stats */
569ce7a6c31SHariprasad Kelam 	while (stat < mac_ops->rx_stats_cnt) {
570ce7a6c31SHariprasad Kelam 		err = mac_ops->mac_get_rx_stats(cgxd, lmac, stat, &rx_stat);
57166208910SChristina Jacob 		if (err)
57266208910SChristina Jacob 			return err;
573ce7a6c31SHariprasad Kelam 		if (mac_ops->rx_stats_cnt == RPM_RX_STATS_COUNT)
574ce7a6c31SHariprasad Kelam 			((struct rpm_stats_rsp *)rsp)->rx_stats[stat] = rx_stat;
575ce7a6c31SHariprasad Kelam 		else
576ce7a6c31SHariprasad Kelam 			((struct cgx_stats_rsp *)rsp)->rx_stats[stat] = rx_stat;
57766208910SChristina Jacob 		stat++;
57866208910SChristina Jacob 	}
57966208910SChristina Jacob 
58066208910SChristina Jacob 	/* Tx stats */
58166208910SChristina Jacob 	stat = 0;
582ce7a6c31SHariprasad Kelam 	while (stat < mac_ops->tx_stats_cnt) {
583ce7a6c31SHariprasad Kelam 		err = mac_ops->mac_get_tx_stats(cgxd, lmac, stat, &tx_stat);
58466208910SChristina Jacob 		if (err)
58566208910SChristina Jacob 			return err;
586ce7a6c31SHariprasad Kelam 		if (mac_ops->tx_stats_cnt == RPM_TX_STATS_COUNT)
587ce7a6c31SHariprasad Kelam 			((struct rpm_stats_rsp *)rsp)->tx_stats[stat] = tx_stat;
588ce7a6c31SHariprasad Kelam 		else
589ce7a6c31SHariprasad Kelam 			((struct cgx_stats_rsp *)rsp)->tx_stats[stat] = tx_stat;
59066208910SChristina Jacob 		stat++;
59166208910SChristina Jacob 	}
59266208910SChristina Jacob 	return 0;
59366208910SChristina Jacob }
59496be2e0dSVidhya Raman 
rvu_mbox_handler_cgx_stats(struct rvu * rvu,struct msg_req * req,struct cgx_stats_rsp * rsp)595ce7a6c31SHariprasad Kelam int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
596ce7a6c31SHariprasad Kelam 			       struct cgx_stats_rsp *rsp)
597ce7a6c31SHariprasad Kelam {
598ce7a6c31SHariprasad Kelam 	return rvu_lmac_get_stats(rvu, req, (void *)rsp);
599ce7a6c31SHariprasad Kelam }
600ce7a6c31SHariprasad Kelam 
rvu_mbox_handler_rpm_stats(struct rvu * rvu,struct msg_req * req,struct rpm_stats_rsp * rsp)601ce7a6c31SHariprasad Kelam int rvu_mbox_handler_rpm_stats(struct rvu *rvu, struct msg_req *req,
602ce7a6c31SHariprasad Kelam 			       struct rpm_stats_rsp *rsp)
603ce7a6c31SHariprasad Kelam {
604ce7a6c31SHariprasad Kelam 	return rvu_lmac_get_stats(rvu, req, (void *)rsp);
605ce7a6c31SHariprasad Kelam }
606ce7a6c31SHariprasad Kelam 
rvu_mbox_handler_cgx_stats_rst(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)607*4c6ce450SSai Krishna int rvu_mbox_handler_cgx_stats_rst(struct rvu *rvu, struct msg_req *req,
608*4c6ce450SSai Krishna 				   struct msg_rsp *rsp)
609*4c6ce450SSai Krishna {
610*4c6ce450SSai Krishna 	int pf = rvu_get_pf(req->hdr.pcifunc);
611*4c6ce450SSai Krishna 	struct rvu_pfvf	*parent_pf;
612*4c6ce450SSai Krishna 	struct mac_ops *mac_ops;
613*4c6ce450SSai Krishna 	u8 cgx_idx, lmac;
614*4c6ce450SSai Krishna 	void *cgxd;
615*4c6ce450SSai Krishna 
616*4c6ce450SSai Krishna 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
617*4c6ce450SSai Krishna 		return LMAC_AF_ERR_PERM_DENIED;
618*4c6ce450SSai Krishna 
619*4c6ce450SSai Krishna 	parent_pf = &rvu->pf[pf];
620*4c6ce450SSai Krishna 	/* To ensure reset cgx stats won't affect VF stats,
621*4c6ce450SSai Krishna 	 *  check if it used by only PF interface.
622*4c6ce450SSai Krishna 	 *  If not, return
623*4c6ce450SSai Krishna 	 */
624*4c6ce450SSai Krishna 	if (parent_pf->cgx_users > 1) {
625*4c6ce450SSai Krishna 		dev_info(rvu->dev, "CGX busy, could not reset statistics\n");
626*4c6ce450SSai Krishna 		return 0;
627*4c6ce450SSai Krishna 	}
628*4c6ce450SSai Krishna 
629*4c6ce450SSai Krishna 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
630*4c6ce450SSai Krishna 	cgxd = rvu_cgx_pdata(cgx_idx, rvu);
631*4c6ce450SSai Krishna 	mac_ops = get_mac_ops(cgxd);
632*4c6ce450SSai Krishna 
633*4c6ce450SSai Krishna 	return mac_ops->mac_stats_reset(cgxd, lmac);
634*4c6ce450SSai Krishna }
635*4c6ce450SSai Krishna 
rvu_mbox_handler_cgx_fec_stats(struct rvu * rvu,struct msg_req * req,struct cgx_fec_stats_rsp * rsp)63684c4f9caSChristina Jacob int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu,
63784c4f9caSChristina Jacob 				   struct msg_req *req,
63884c4f9caSChristina Jacob 				   struct cgx_fec_stats_rsp *rsp)
63984c4f9caSChristina Jacob {
64084c4f9caSChristina Jacob 	int pf = rvu_get_pf(req->hdr.pcifunc);
64184ad3642SHariprasad Kelam 	struct mac_ops *mac_ops;
64284c4f9caSChristina Jacob 	u8 cgx_idx, lmac;
64384c4f9caSChristina Jacob 	void *cgxd;
64484c4f9caSChristina Jacob 
64584c4f9caSChristina Jacob 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
6467278c359SNaveen Mamindlapalli 		return LMAC_AF_ERR_PERM_DENIED;
64784c4f9caSChristina Jacob 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
64884c4f9caSChristina Jacob 
64984c4f9caSChristina Jacob 	cgxd = rvu_cgx_pdata(cgx_idx, rvu);
65084ad3642SHariprasad Kelam 	mac_ops = get_mac_ops(cgxd);
65184ad3642SHariprasad Kelam 	return  mac_ops->get_fec_stats(cgxd, lmac, rsp);
65284c4f9caSChristina Jacob }
65384c4f9caSChristina Jacob 
rvu_mbox_handler_cgx_mac_addr_set(struct rvu * rvu,struct cgx_mac_addr_set_or_get * req,struct cgx_mac_addr_set_or_get * rsp)654eac66686SSunil Goutham int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
65596be2e0dSVidhya Raman 				      struct cgx_mac_addr_set_or_get *req,
65696be2e0dSVidhya Raman 				      struct cgx_mac_addr_set_or_get *rsp)
65796be2e0dSVidhya Raman {
65896be2e0dSVidhya Raman 	int pf = rvu_get_pf(req->hdr.pcifunc);
65996be2e0dSVidhya Raman 	u8 cgx_id, lmac_id;
66096be2e0dSVidhya Raman 
661b7ba6cfaSYingjie Wang 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
662b7ba6cfaSYingjie Wang 		return -EPERM;
663b7ba6cfaSYingjie Wang 
664d6c9784bSRatheesh Kannoth 	if (rvu_npc_exact_has_match_table(rvu))
665d6c9784bSRatheesh Kannoth 		return rvu_npc_exact_mac_addr_set(rvu, req, rsp);
666d6c9784bSRatheesh Kannoth 
66796be2e0dSVidhya Raman 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
66896be2e0dSVidhya Raman 
66996be2e0dSVidhya Raman 	cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr);
67096be2e0dSVidhya Raman 
67196be2e0dSVidhya Raman 	return 0;
67296be2e0dSVidhya Raman }
67396be2e0dSVidhya Raman 
rvu_mbox_handler_cgx_mac_addr_add(struct rvu * rvu,struct cgx_mac_addr_add_req * req,struct cgx_mac_addr_add_rsp * rsp)6746f14078eSSunil Kumar Kori int rvu_mbox_handler_cgx_mac_addr_add(struct rvu *rvu,
6756f14078eSSunil Kumar Kori 				      struct cgx_mac_addr_add_req *req,
6766f14078eSSunil Kumar Kori 				      struct cgx_mac_addr_add_rsp *rsp)
6776f14078eSSunil Kumar Kori {
6786f14078eSSunil Kumar Kori 	int pf = rvu_get_pf(req->hdr.pcifunc);
6796f14078eSSunil Kumar Kori 	u8 cgx_id, lmac_id;
6806f14078eSSunil Kumar Kori 	int rc = 0;
6816f14078eSSunil Kumar Kori 
6826f14078eSSunil Kumar Kori 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
6836f14078eSSunil Kumar Kori 		return -EPERM;
6846f14078eSSunil Kumar Kori 
685d6c9784bSRatheesh Kannoth 	if (rvu_npc_exact_has_match_table(rvu))
686d6c9784bSRatheesh Kannoth 		return rvu_npc_exact_mac_addr_add(rvu, req, rsp);
687d6c9784bSRatheesh Kannoth 
6886f14078eSSunil Kumar Kori 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
6896f14078eSSunil Kumar Kori 	rc = cgx_lmac_addr_add(cgx_id, lmac_id, req->mac_addr);
6906f14078eSSunil Kumar Kori 	if (rc >= 0) {
6916f14078eSSunil Kumar Kori 		rsp->index = rc;
6926f14078eSSunil Kumar Kori 		return 0;
6936f14078eSSunil Kumar Kori 	}
6946f14078eSSunil Kumar Kori 
6956f14078eSSunil Kumar Kori 	return rc;
6966f14078eSSunil Kumar Kori }
6976f14078eSSunil Kumar Kori 
rvu_mbox_handler_cgx_mac_addr_del(struct rvu * rvu,struct cgx_mac_addr_del_req * req,struct msg_rsp * rsp)6986f14078eSSunil Kumar Kori int rvu_mbox_handler_cgx_mac_addr_del(struct rvu *rvu,
6996f14078eSSunil Kumar Kori 				      struct cgx_mac_addr_del_req *req,
7006f14078eSSunil Kumar Kori 				      struct msg_rsp *rsp)
7016f14078eSSunil Kumar Kori {
7026f14078eSSunil Kumar Kori 	int pf = rvu_get_pf(req->hdr.pcifunc);
7036f14078eSSunil Kumar Kori 	u8 cgx_id, lmac_id;
7046f14078eSSunil Kumar Kori 
7056f14078eSSunil Kumar Kori 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
7066f14078eSSunil Kumar Kori 		return -EPERM;
7076f14078eSSunil Kumar Kori 
708d6c9784bSRatheesh Kannoth 	if (rvu_npc_exact_has_match_table(rvu))
709d6c9784bSRatheesh Kannoth 		return rvu_npc_exact_mac_addr_del(rvu, req, rsp);
710d6c9784bSRatheesh Kannoth 
7116f14078eSSunil Kumar Kori 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
7126f14078eSSunil Kumar Kori 	return cgx_lmac_addr_del(cgx_id, lmac_id, req->index);
7136f14078eSSunil Kumar Kori }
7146f14078eSSunil Kumar Kori 
rvu_mbox_handler_cgx_mac_max_entries_get(struct rvu * rvu,struct msg_req * req,struct cgx_max_dmac_entries_get_rsp * rsp)7156f14078eSSunil Kumar Kori int rvu_mbox_handler_cgx_mac_max_entries_get(struct rvu *rvu,
7166f14078eSSunil Kumar Kori 					     struct msg_req *req,
7176f14078eSSunil Kumar Kori 					     struct cgx_max_dmac_entries_get_rsp
7186f14078eSSunil Kumar Kori 					     *rsp)
7196f14078eSSunil Kumar Kori {
7206f14078eSSunil Kumar Kori 	int pf = rvu_get_pf(req->hdr.pcifunc);
7216f14078eSSunil Kumar Kori 	u8 cgx_id, lmac_id;
7226f14078eSSunil Kumar Kori 
7236f14078eSSunil Kumar Kori 	/* If msg is received from PFs(which are not mapped to CGX LMACs)
7246f14078eSSunil Kumar Kori 	 * or VF then no entries are allocated for DMAC filters at CGX level.
7256f14078eSSunil Kumar Kori 	 * So returning zero.
7266f14078eSSunil Kumar Kori 	 */
7276f14078eSSunil Kumar Kori 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) {
7286f14078eSSunil Kumar Kori 		rsp->max_dmac_filters = 0;
7296f14078eSSunil Kumar Kori 		return 0;
7306f14078eSSunil Kumar Kori 	}
7316f14078eSSunil Kumar Kori 
732d6c9784bSRatheesh Kannoth 	if (rvu_npc_exact_has_match_table(rvu)) {
733d6c9784bSRatheesh Kannoth 		rsp->max_dmac_filters = rvu_npc_exact_get_max_entries(rvu);
734d6c9784bSRatheesh Kannoth 		return 0;
735d6c9784bSRatheesh Kannoth 	}
736d6c9784bSRatheesh Kannoth 
7376f14078eSSunil Kumar Kori 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
7386f14078eSSunil Kumar Kori 	rsp->max_dmac_filters = cgx_lmac_addr_max_entries_get(cgx_id, lmac_id);
7396f14078eSSunil Kumar Kori 	return 0;
7406f14078eSSunil Kumar Kori }
7416f14078eSSunil Kumar Kori 
rvu_mbox_handler_cgx_mac_addr_get(struct rvu * rvu,struct cgx_mac_addr_set_or_get * req,struct cgx_mac_addr_set_or_get * rsp)742eac66686SSunil Goutham int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
74396be2e0dSVidhya Raman 				      struct cgx_mac_addr_set_or_get *req,
74496be2e0dSVidhya Raman 				      struct cgx_mac_addr_set_or_get *rsp)
74596be2e0dSVidhya Raman {
74696be2e0dSVidhya Raman 	int pf = rvu_get_pf(req->hdr.pcifunc);
74796be2e0dSVidhya Raman 	u8 cgx_id, lmac_id;
748e62c7adfSLi Zetao 	int rc = 0;
74996be2e0dSVidhya Raman 	u64 cfg;
75096be2e0dSVidhya Raman 
751b7ba6cfaSYingjie Wang 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
752b7ba6cfaSYingjie Wang 		return -EPERM;
753b7ba6cfaSYingjie Wang 
75496be2e0dSVidhya Raman 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
75596be2e0dSVidhya Raman 
75696be2e0dSVidhya Raman 	rsp->hdr.rc = rc;
75796be2e0dSVidhya Raman 	cfg = cgx_lmac_addr_get(cgx_id, lmac_id);
75896be2e0dSVidhya Raman 	/* copy 48 bit mac address to req->mac_addr */
759e62c7adfSLi Zetao 	u64_to_ether_addr(cfg, rsp->mac_addr);
76096be2e0dSVidhya Raman 	return 0;
76196be2e0dSVidhya Raman }
76296be2e0dSVidhya Raman 
rvu_mbox_handler_cgx_promisc_enable(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)763eac66686SSunil Goutham int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
76496be2e0dSVidhya Raman 					struct msg_rsp *rsp)
76596be2e0dSVidhya Raman {
76696be2e0dSVidhya Raman 	u16 pcifunc = req->hdr.pcifunc;
76796be2e0dSVidhya Raman 	int pf = rvu_get_pf(pcifunc);
76896be2e0dSVidhya Raman 	u8 cgx_id, lmac_id;
76996be2e0dSVidhya Raman 
7706fd2a71bSSunil Goutham 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
7716fd2a71bSSunil Goutham 		return -EPERM;
77296be2e0dSVidhya Raman 
773d6c9784bSRatheesh Kannoth 	/* Disable drop on non hit rule */
774d6c9784bSRatheesh Kannoth 	if (rvu_npc_exact_has_match_table(rvu))
775d6c9784bSRatheesh Kannoth 		return rvu_npc_exact_promisc_enable(rvu, req->hdr.pcifunc);
776d6c9784bSRatheesh Kannoth 
77796be2e0dSVidhya Raman 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
77896be2e0dSVidhya Raman 
77996be2e0dSVidhya Raman 	cgx_lmac_promisc_config(cgx_id, lmac_id, true);
78096be2e0dSVidhya Raman 	return 0;
78196be2e0dSVidhya Raman }
78296be2e0dSVidhya Raman 
rvu_mbox_handler_cgx_promisc_disable(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)783eac66686SSunil Goutham int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
78496be2e0dSVidhya Raman 					 struct msg_rsp *rsp)
78596be2e0dSVidhya Raman {
7866fd2a71bSSunil Goutham 	int pf = rvu_get_pf(req->hdr.pcifunc);
78796be2e0dSVidhya Raman 	u8 cgx_id, lmac_id;
78896be2e0dSVidhya Raman 
7896fd2a71bSSunil Goutham 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
7906fd2a71bSSunil Goutham 		return -EPERM;
79196be2e0dSVidhya Raman 
792d6c9784bSRatheesh Kannoth 	/* Disable drop on non hit rule */
793d6c9784bSRatheesh Kannoth 	if (rvu_npc_exact_has_match_table(rvu))
794d6c9784bSRatheesh Kannoth 		return rvu_npc_exact_promisc_disable(rvu, req->hdr.pcifunc);
795d6c9784bSRatheesh Kannoth 
79696be2e0dSVidhya Raman 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
79796be2e0dSVidhya Raman 
79896be2e0dSVidhya Raman 	cgx_lmac_promisc_config(cgx_id, lmac_id, false);
79996be2e0dSVidhya Raman 	return 0;
80096be2e0dSVidhya Raman }
80161071a87SLinu Cherian 
rvu_cgx_ptp_rx_cfg(struct rvu * rvu,u16 pcifunc,bool enable)80242157217SZyta Szpak static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
80342157217SZyta Szpak {
804e37e08ffSHarman Kalra 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
80542157217SZyta Szpak 	int pf = rvu_get_pf(pcifunc);
806d1489208SHariprasad Kelam 	struct mac_ops *mac_ops;
80742157217SZyta Szpak 	u8 cgx_id, lmac_id;
80842157217SZyta Szpak 	void *cgxd;
80942157217SZyta Szpak 
81091c6945eSHariprasad Kelam 	if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
81191c6945eSHariprasad Kelam 		return 0;
81291c6945eSHariprasad Kelam 
8135ee0a3bdSSubbaraya Sundeep 	/* This msg is expected only from PF/VFs that are mapped to CGX/RPM LMACs,
81442157217SZyta Szpak 	 * if received from other PF/VF simply ACK, nothing to do.
81542157217SZyta Szpak 	 */
8165ee0a3bdSSubbaraya Sundeep 	if (!is_pf_cgxmapped(rvu, pf))
8175ee0a3bdSSubbaraya Sundeep 		return -EPERM;
81842157217SZyta Szpak 
81942157217SZyta Szpak 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
82042157217SZyta Szpak 	cgxd = rvu_cgx_pdata(cgx_id, rvu);
82142157217SZyta Szpak 
822d1489208SHariprasad Kelam 	mac_ops = get_mac_ops(cgxd);
82314bb236bSHariprasad Kelam 	mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, enable);
82442157217SZyta Szpak 	/* If PTP is enabled then inform NPC that packets to be
82542157217SZyta Szpak 	 * parsed by this PF will have their data shifted by 8 bytes
82642157217SZyta Szpak 	 * and if PTP is disabled then no shift is required
82742157217SZyta Szpak 	 */
82842157217SZyta Szpak 	if (npc_config_ts_kpuaction(rvu, pf, pcifunc, enable))
82942157217SZyta Szpak 		return -EINVAL;
830e37e08ffSHarman Kalra 	/* This flag is required to clean up CGX conf if app gets killed */
831e37e08ffSHarman Kalra 	pfvf->hw_rx_tstamp_en = enable;
83242157217SZyta Szpak 
83365cdc2b6SGeetha sowjanya 	/* Inform MCS about 8B RX header */
83465cdc2b6SGeetha sowjanya 	rvu_mcs_ptp_cfg(rvu, cgx_id, lmac_id, enable);
83542157217SZyta Szpak 	return 0;
83642157217SZyta Szpak }
83742157217SZyta Szpak 
rvu_mbox_handler_cgx_ptp_rx_enable(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)83842157217SZyta Szpak int rvu_mbox_handler_cgx_ptp_rx_enable(struct rvu *rvu, struct msg_req *req,
83942157217SZyta Szpak 				       struct msg_rsp *rsp)
84042157217SZyta Szpak {
84143510ef4SNaveen Mamindlapalli 	if (!is_pf_cgxmapped(rvu, rvu_get_pf(req->hdr.pcifunc)))
84243510ef4SNaveen Mamindlapalli 		return -EPERM;
84343510ef4SNaveen Mamindlapalli 
84442157217SZyta Szpak 	return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, true);
84542157217SZyta Szpak }
84642157217SZyta Szpak 
rvu_mbox_handler_cgx_ptp_rx_disable(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)84742157217SZyta Szpak int rvu_mbox_handler_cgx_ptp_rx_disable(struct rvu *rvu, struct msg_req *req,
84842157217SZyta Szpak 					struct msg_rsp *rsp)
84942157217SZyta Szpak {
85042157217SZyta Szpak 	return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, false);
85142157217SZyta Szpak }
85242157217SZyta Szpak 
rvu_cgx_config_linkevents(struct rvu * rvu,u16 pcifunc,bool en)85361071a87SLinu Cherian static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en)
85461071a87SLinu Cherian {
85561071a87SLinu Cherian 	int pf = rvu_get_pf(pcifunc);
85661071a87SLinu Cherian 	u8 cgx_id, lmac_id;
85761071a87SLinu Cherian 
8586fd2a71bSSunil Goutham 	if (!is_cgx_config_permitted(rvu, pcifunc))
8596fd2a71bSSunil Goutham 		return -EPERM;
86061071a87SLinu Cherian 
86161071a87SLinu Cherian 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
86261071a87SLinu Cherian 
86361071a87SLinu Cherian 	if (en) {
86461071a87SLinu Cherian 		set_bit(pf, &rvu->pf_notify_bmap);
86561071a87SLinu Cherian 		/* Send the current link status to PF */
86661071a87SLinu Cherian 		rvu_cgx_send_link_info(cgx_id, lmac_id, rvu);
86761071a87SLinu Cherian 	} else {
86861071a87SLinu Cherian 		clear_bit(pf, &rvu->pf_notify_bmap);
86961071a87SLinu Cherian 	}
87061071a87SLinu Cherian 
87161071a87SLinu Cherian 	return 0;
87261071a87SLinu Cherian }
87361071a87SLinu Cherian 
rvu_mbox_handler_cgx_start_linkevents(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)874eac66686SSunil Goutham int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req,
87561071a87SLinu Cherian 					  struct msg_rsp *rsp)
87661071a87SLinu Cherian {
87761071a87SLinu Cherian 	rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, true);
87861071a87SLinu Cherian 	return 0;
87961071a87SLinu Cherian }
88061071a87SLinu Cherian 
rvu_mbox_handler_cgx_stop_linkevents(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)881eac66686SSunil Goutham int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req,
88261071a87SLinu Cherian 					 struct msg_rsp *rsp)
88361071a87SLinu Cherian {
88461071a87SLinu Cherian 	rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, false);
88561071a87SLinu Cherian 	return 0;
88661071a87SLinu Cherian }
88761071a87SLinu Cherian 
rvu_mbox_handler_cgx_get_linkinfo(struct rvu * rvu,struct msg_req * req,struct cgx_link_info_msg * rsp)888eac66686SSunil Goutham int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req,
88961071a87SLinu Cherian 				      struct cgx_link_info_msg *rsp)
89061071a87SLinu Cherian {
89161071a87SLinu Cherian 	u8 cgx_id, lmac_id;
89261071a87SLinu Cherian 	int pf, err;
89361071a87SLinu Cherian 
89461071a87SLinu Cherian 	pf = rvu_get_pf(req->hdr.pcifunc);
89561071a87SLinu Cherian 
89661071a87SLinu Cherian 	if (!is_pf_cgxmapped(rvu, pf))
89761071a87SLinu Cherian 		return -ENODEV;
89861071a87SLinu Cherian 
89961071a87SLinu Cherian 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
90061071a87SLinu Cherian 
90161071a87SLinu Cherian 	err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
90261071a87SLinu Cherian 				&rsp->link_info);
90361071a87SLinu Cherian 	return err;
90461071a87SLinu Cherian }
90523999b30SGeetha sowjanya 
rvu_mbox_handler_cgx_features_get(struct rvu * rvu,struct msg_req * req,struct cgx_features_info_msg * rsp)90691c6945eSHariprasad Kelam int rvu_mbox_handler_cgx_features_get(struct rvu *rvu,
90791c6945eSHariprasad Kelam 				      struct msg_req *req,
90891c6945eSHariprasad Kelam 				      struct cgx_features_info_msg *rsp)
90991c6945eSHariprasad Kelam {
91091c6945eSHariprasad Kelam 	int pf = rvu_get_pf(req->hdr.pcifunc);
91191c6945eSHariprasad Kelam 	u8 cgx_idx, lmac;
91291c6945eSHariprasad Kelam 	void *cgxd;
91391c6945eSHariprasad Kelam 
91491c6945eSHariprasad Kelam 	if (!is_pf_cgxmapped(rvu, pf))
91591c6945eSHariprasad Kelam 		return 0;
91691c6945eSHariprasad Kelam 
91791c6945eSHariprasad Kelam 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
91891c6945eSHariprasad Kelam 	cgxd = rvu_cgx_pdata(cgx_idx, rvu);
91991c6945eSHariprasad Kelam 	rsp->lmac_features = cgx_features_get(cgxd);
92091c6945eSHariprasad Kelam 
92191c6945eSHariprasad Kelam 	return 0;
92291c6945eSHariprasad Kelam }
92391c6945eSHariprasad Kelam 
rvu_cgx_get_fifolen(struct rvu * rvu)9246e54e1c5SHariprasad Kelam u32 rvu_cgx_get_fifolen(struct rvu *rvu)
9256e54e1c5SHariprasad Kelam {
9266e54e1c5SHariprasad Kelam 	struct mac_ops *mac_ops;
9276e54e1c5SHariprasad Kelam 	u32 fifo_len;
9286e54e1c5SHariprasad Kelam 
92929788787SSubbaraya Sundeep 	mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
9306e54e1c5SHariprasad Kelam 	fifo_len = mac_ops ? mac_ops->fifo_len : 0;
9316e54e1c5SHariprasad Kelam 
9326e54e1c5SHariprasad Kelam 	return fifo_len;
9336e54e1c5SHariprasad Kelam }
9346e54e1c5SHariprasad Kelam 
rvu_cgx_get_lmac_fifolen(struct rvu * rvu,int cgx,int lmac)935459f326eSSunil Goutham u32 rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac)
936459f326eSSunil Goutham {
937459f326eSSunil Goutham 	struct mac_ops *mac_ops;
938459f326eSSunil Goutham 	void *cgxd;
939459f326eSSunil Goutham 
940459f326eSSunil Goutham 	cgxd = rvu_cgx_pdata(cgx, rvu);
941459f326eSSunil Goutham 	if (!cgxd)
942459f326eSSunil Goutham 		return 0;
943459f326eSSunil Goutham 
944459f326eSSunil Goutham 	mac_ops = get_mac_ops(cgxd);
945459f326eSSunil Goutham 	if (!mac_ops->lmac_fifo_len)
946459f326eSSunil Goutham 		return 0;
947459f326eSSunil Goutham 
948459f326eSSunil Goutham 	return mac_ops->lmac_fifo_len(cgxd, lmac);
949459f326eSSunil Goutham }
950459f326eSSunil Goutham 
rvu_cgx_config_intlbk(struct rvu * rvu,u16 pcifunc,bool en)95123999b30SGeetha sowjanya static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en)
95223999b30SGeetha sowjanya {
953786621d2SGeetha sowjanya 	int pf = rvu_get_pf(pcifunc);
9543ad3f8f9SHariprasad Kelam 	struct mac_ops *mac_ops;
95523999b30SGeetha sowjanya 	u8 cgx_id, lmac_id;
95623999b30SGeetha sowjanya 
9576fd2a71bSSunil Goutham 	if (!is_cgx_config_permitted(rvu, pcifunc))
9586fd2a71bSSunil Goutham 		return -EPERM;
95923999b30SGeetha sowjanya 
960786621d2SGeetha sowjanya 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
9613ad3f8f9SHariprasad Kelam 	mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu));
96223999b30SGeetha sowjanya 
9633ad3f8f9SHariprasad Kelam 	return mac_ops->mac_lmac_intl_lbk(rvu_cgx_pdata(cgx_id, rvu),
96423999b30SGeetha sowjanya 					  lmac_id, en);
96523999b30SGeetha sowjanya }
96623999b30SGeetha sowjanya 
rvu_mbox_handler_cgx_intlbk_enable(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)967eac66686SSunil Goutham int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req,
96823999b30SGeetha sowjanya 				       struct msg_rsp *rsp)
96923999b30SGeetha sowjanya {
97023999b30SGeetha sowjanya 	rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, true);
97123999b30SGeetha sowjanya 	return 0;
97223999b30SGeetha sowjanya }
97323999b30SGeetha sowjanya 
rvu_mbox_handler_cgx_intlbk_disable(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)974eac66686SSunil Goutham int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
97523999b30SGeetha sowjanya 					struct msg_rsp *rsp)
97623999b30SGeetha sowjanya {
97723999b30SGeetha sowjanya 	rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, false);
97823999b30SGeetha sowjanya 	return 0;
97923999b30SGeetha sowjanya }
980f967488dSLinu Cherian 
rvu_cgx_cfg_pause_frm(struct rvu * rvu,u16 pcifunc,u8 tx_pause,u8 rx_pause)981e7400038SHariprasad Kelam int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause)
982e7400038SHariprasad Kelam {
983e7400038SHariprasad Kelam 	int pf = rvu_get_pf(pcifunc);
984e7400038SHariprasad Kelam 	u8 rx_pfc = 0, tx_pfc = 0;
985e7400038SHariprasad Kelam 	struct mac_ops *mac_ops;
986e7400038SHariprasad Kelam 	u8 cgx_id, lmac_id;
987e7400038SHariprasad Kelam 	void *cgxd;
988e7400038SHariprasad Kelam 
989e7400038SHariprasad Kelam 	if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_FC))
990e7400038SHariprasad Kelam 		return 0;
991e7400038SHariprasad Kelam 
992e7400038SHariprasad Kelam 	/* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
993e7400038SHariprasad Kelam 	 * if received from other PF/VF simply ACK, nothing to do.
994e7400038SHariprasad Kelam 	 */
995e7400038SHariprasad Kelam 	if (!is_pf_cgxmapped(rvu, pf))
996e7400038SHariprasad Kelam 		return LMAC_AF_ERR_PF_NOT_MAPPED;
997e7400038SHariprasad Kelam 
998e7400038SHariprasad Kelam 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
999e7400038SHariprasad Kelam 	cgxd = rvu_cgx_pdata(cgx_id, rvu);
1000e7400038SHariprasad Kelam 	mac_ops = get_mac_ops(cgxd);
1001e7400038SHariprasad Kelam 
1002e7400038SHariprasad Kelam 	mac_ops->mac_get_pfc_frm_cfg(cgxd, lmac_id, &tx_pfc, &rx_pfc);
1003e7400038SHariprasad Kelam 	if (tx_pfc || rx_pfc) {
1004e7400038SHariprasad Kelam 		dev_warn(rvu->dev,
1005e7400038SHariprasad Kelam 			 "Can not configure 802.3X flow control as PFC frames are enabled");
1006e7400038SHariprasad Kelam 		return LMAC_AF_ERR_8023PAUSE_ENADIS_PERM_DENIED;
1007e7400038SHariprasad Kelam 	}
1008e7400038SHariprasad Kelam 
1009e7400038SHariprasad Kelam 	mutex_lock(&rvu->rsrc_lock);
1010e7400038SHariprasad Kelam 	if (verify_lmac_fc_cfg(cgxd, lmac_id, tx_pause, rx_pause,
1011e7400038SHariprasad Kelam 			       pcifunc & RVU_PFVF_FUNC_MASK)) {
1012e7400038SHariprasad Kelam 		mutex_unlock(&rvu->rsrc_lock);
1013e7400038SHariprasad Kelam 		return LMAC_AF_ERR_PERM_DENIED;
1014e7400038SHariprasad Kelam 	}
1015e7400038SHariprasad Kelam 	mutex_unlock(&rvu->rsrc_lock);
1016e7400038SHariprasad Kelam 
1017e7400038SHariprasad Kelam 	return mac_ops->mac_enadis_pause_frm(cgxd, lmac_id, tx_pause, rx_pause);
1018e7400038SHariprasad Kelam }
1019e7400038SHariprasad Kelam 
rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu * rvu,struct cgx_pause_frm_cfg * req,struct cgx_pause_frm_cfg * rsp)1020f7e086e7SGeetha sowjanya int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
1021f7e086e7SGeetha sowjanya 				       struct cgx_pause_frm_cfg *req,
1022f7e086e7SGeetha sowjanya 				       struct cgx_pause_frm_cfg *rsp)
1023f7e086e7SGeetha sowjanya {
1024f7e086e7SGeetha sowjanya 	int pf = rvu_get_pf(req->hdr.pcifunc);
10251845ada4SRakesh Babu 	struct mac_ops *mac_ops;
1026f7e086e7SGeetha sowjanya 	u8 cgx_id, lmac_id;
1027e7400038SHariprasad Kelam 	int err = 0;
10281845ada4SRakesh Babu 	void *cgxd;
1029f7e086e7SGeetha sowjanya 
1030f7e086e7SGeetha sowjanya 	/* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
1031f7e086e7SGeetha sowjanya 	 * if received from other PF/VF simply ACK, nothing to do.
1032f7e086e7SGeetha sowjanya 	 */
1033f7e086e7SGeetha sowjanya 	if (!is_pf_cgxmapped(rvu, pf))
1034f7e086e7SGeetha sowjanya 		return -ENODEV;
1035f7e086e7SGeetha sowjanya 
1036f7e086e7SGeetha sowjanya 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
10371845ada4SRakesh Babu 	cgxd = rvu_cgx_pdata(cgx_id, rvu);
10381845ada4SRakesh Babu 	mac_ops = get_mac_ops(cgxd);
1039f7e086e7SGeetha sowjanya 
1040f7e086e7SGeetha sowjanya 	if (req->set)
1041e7400038SHariprasad Kelam 		err = rvu_cgx_cfg_pause_frm(rvu, req->hdr.pcifunc, req->tx_pause, req->rx_pause);
1042f7e086e7SGeetha sowjanya 	else
1043e7400038SHariprasad Kelam 		mac_ops->mac_get_pause_frm_status(cgxd, lmac_id, &rsp->tx_pause, &rsp->rx_pause);
1044e7400038SHariprasad Kelam 
1045e7400038SHariprasad Kelam 	return err;
1046f7e086e7SGeetha sowjanya }
1047f7e086e7SGeetha sowjanya 
rvu_mbox_handler_cgx_get_phy_fec_stats(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)1048bd74d4eaSFelix Manlunas int rvu_mbox_handler_cgx_get_phy_fec_stats(struct rvu *rvu, struct msg_req *req,
1049bd74d4eaSFelix Manlunas 					   struct msg_rsp *rsp)
1050bd74d4eaSFelix Manlunas {
1051bd74d4eaSFelix Manlunas 	int pf = rvu_get_pf(req->hdr.pcifunc);
1052bd74d4eaSFelix Manlunas 	u8 cgx_id, lmac_id;
1053bd74d4eaSFelix Manlunas 
1054bd74d4eaSFelix Manlunas 	if (!is_pf_cgxmapped(rvu, pf))
10557278c359SNaveen Mamindlapalli 		return LMAC_AF_ERR_PF_NOT_MAPPED;
1056bd74d4eaSFelix Manlunas 
1057bd74d4eaSFelix Manlunas 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1058bd74d4eaSFelix Manlunas 	return cgx_get_phy_fec_stats(rvu_cgx_pdata(cgx_id, rvu), lmac_id);
1059bd74d4eaSFelix Manlunas }
1060bd74d4eaSFelix Manlunas 
1061f967488dSLinu Cherian /* Finds cumulative status of NIX rx/tx counters from LF of a PF and those
1062f967488dSLinu Cherian  * from its VFs as well. ie. NIX rx/tx counters at the CGX port level
1063f967488dSLinu Cherian  */
rvu_cgx_nix_cuml_stats(struct rvu * rvu,void * cgxd,int lmac_id,int index,int rxtxflag,u64 * stat)1064f967488dSLinu Cherian int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id,
1065f967488dSLinu Cherian 			   int index, int rxtxflag, u64 *stat)
1066f967488dSLinu Cherian {
1067f967488dSLinu Cherian 	struct rvu_block *block;
1068f967488dSLinu Cherian 	int blkaddr;
1069f967488dSLinu Cherian 	u16 pcifunc;
1070f967488dSLinu Cherian 	int pf, lf;
1071f967488dSLinu Cherian 
10720617aa98SDan Carpenter 	*stat = 0;
10730617aa98SDan Carpenter 
1074f967488dSLinu Cherian 	if (!cgxd || !rvu)
1075f967488dSLinu Cherian 		return -EINVAL;
1076f967488dSLinu Cherian 
1077f967488dSLinu Cherian 	pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
1078f967488dSLinu Cherian 	if (pf < 0)
1079f967488dSLinu Cherian 		return pf;
1080f967488dSLinu Cherian 
1081f967488dSLinu Cherian 	/* Assumes LF of a PF and all of its VF belongs to the same
1082f967488dSLinu Cherian 	 * NIX block
1083f967488dSLinu Cherian 	 */
1084f967488dSLinu Cherian 	pcifunc = pf << RVU_PFVF_PF_SHIFT;
1085f967488dSLinu Cherian 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1086f967488dSLinu Cherian 	if (blkaddr < 0)
1087f967488dSLinu Cherian 		return 0;
1088f967488dSLinu Cherian 	block = &rvu->hw->block[blkaddr];
1089f967488dSLinu Cherian 
1090f967488dSLinu Cherian 	for (lf = 0; lf < block->lf.max; lf++) {
1091f967488dSLinu Cherian 		/* Check if a lf is attached to this PF or one of its VFs */
1092f967488dSLinu Cherian 		if (!((block->fn_map[lf] & ~RVU_PFVF_FUNC_MASK) == (pcifunc &
1093f967488dSLinu Cherian 			 ~RVU_PFVF_FUNC_MASK)))
1094f967488dSLinu Cherian 			continue;
1095f967488dSLinu Cherian 		if (rxtxflag == NIX_STATS_RX)
1096f967488dSLinu Cherian 			*stat += rvu_read64(rvu, blkaddr,
1097f967488dSLinu Cherian 					    NIX_AF_LFX_RX_STATX(lf, index));
1098f967488dSLinu Cherian 		else
1099f967488dSLinu Cherian 			*stat += rvu_read64(rvu, blkaddr,
1100f967488dSLinu Cherian 					    NIX_AF_LFX_TX_STATX(lf, index));
1101f967488dSLinu Cherian 	}
1102f967488dSLinu Cherian 
1103f967488dSLinu Cherian 	return 0;
1104f967488dSLinu Cherian }
1105a7faa68bSSubbaraya Sundeep 
rvu_cgx_start_stop_io(struct rvu * rvu,u16 pcifunc,bool start)1106a7faa68bSSubbaraya Sundeep int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start)
1107a7faa68bSSubbaraya Sundeep {
1108a7faa68bSSubbaraya Sundeep 	struct rvu_pfvf *parent_pf, *pfvf;
1109a7faa68bSSubbaraya Sundeep 	int cgx_users, err = 0;
1110a7faa68bSSubbaraya Sundeep 
1111a7faa68bSSubbaraya Sundeep 	if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
1112a7faa68bSSubbaraya Sundeep 		return 0;
1113a7faa68bSSubbaraya Sundeep 
1114a7faa68bSSubbaraya Sundeep 	parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
1115a7faa68bSSubbaraya Sundeep 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1116a7faa68bSSubbaraya Sundeep 
1117a7faa68bSSubbaraya Sundeep 	mutex_lock(&rvu->cgx_cfg_lock);
1118a7faa68bSSubbaraya Sundeep 
1119a7faa68bSSubbaraya Sundeep 	if (start && pfvf->cgx_in_use)
1120a7faa68bSSubbaraya Sundeep 		goto exit;  /* CGX is already started hence nothing to do */
1121a7faa68bSSubbaraya Sundeep 	if (!start && !pfvf->cgx_in_use)
1122a7faa68bSSubbaraya Sundeep 		goto exit; /* CGX is already stopped hence nothing to do */
1123a7faa68bSSubbaraya Sundeep 
1124a7faa68bSSubbaraya Sundeep 	if (start) {
1125a7faa68bSSubbaraya Sundeep 		cgx_users = parent_pf->cgx_users;
1126a7faa68bSSubbaraya Sundeep 		parent_pf->cgx_users++;
1127a7faa68bSSubbaraya Sundeep 	} else {
1128a7faa68bSSubbaraya Sundeep 		parent_pf->cgx_users--;
1129a7faa68bSSubbaraya Sundeep 		cgx_users = parent_pf->cgx_users;
1130a7faa68bSSubbaraya Sundeep 	}
1131a7faa68bSSubbaraya Sundeep 
1132a7faa68bSSubbaraya Sundeep 	/* Start CGX when first of all NIXLFs is started.
1133a7faa68bSSubbaraya Sundeep 	 * Stop CGX when last of all NIXLFs is stopped.
1134a7faa68bSSubbaraya Sundeep 	 */
1135a7faa68bSSubbaraya Sundeep 	if (!cgx_users) {
1136a7faa68bSSubbaraya Sundeep 		err = rvu_cgx_config_rxtx(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK,
1137a7faa68bSSubbaraya Sundeep 					  start);
1138a7faa68bSSubbaraya Sundeep 		if (err) {
1139a7faa68bSSubbaraya Sundeep 			dev_err(rvu->dev, "Unable to %s CGX\n",
1140a7faa68bSSubbaraya Sundeep 				start ? "start" : "stop");
1141a7faa68bSSubbaraya Sundeep 			/* Revert the usage count in case of error */
1142a7faa68bSSubbaraya Sundeep 			parent_pf->cgx_users = start ? parent_pf->cgx_users  - 1
1143a7faa68bSSubbaraya Sundeep 					       : parent_pf->cgx_users  + 1;
1144a7faa68bSSubbaraya Sundeep 			goto exit;
1145a7faa68bSSubbaraya Sundeep 		}
1146a7faa68bSSubbaraya Sundeep 	}
1147a7faa68bSSubbaraya Sundeep 	pfvf->cgx_in_use = start;
1148a7faa68bSSubbaraya Sundeep exit:
1149a7faa68bSSubbaraya Sundeep 	mutex_unlock(&rvu->cgx_cfg_lock);
1150a7faa68bSSubbaraya Sundeep 	return err;
1151a7faa68bSSubbaraya Sundeep }
115284c4f9caSChristina Jacob 
rvu_mbox_handler_cgx_set_fec_param(struct rvu * rvu,struct fec_mode * req,struct fec_mode * rsp)115384c4f9caSChristina Jacob int rvu_mbox_handler_cgx_set_fec_param(struct rvu *rvu,
115484c4f9caSChristina Jacob 				       struct fec_mode *req,
115584c4f9caSChristina Jacob 				       struct fec_mode *rsp)
115684c4f9caSChristina Jacob {
115784c4f9caSChristina Jacob 	int pf = rvu_get_pf(req->hdr.pcifunc);
115884c4f9caSChristina Jacob 	u8 cgx_id, lmac_id;
115984c4f9caSChristina Jacob 
116084c4f9caSChristina Jacob 	if (!is_pf_cgxmapped(rvu, pf))
116184c4f9caSChristina Jacob 		return -EPERM;
116284c4f9caSChristina Jacob 
116384c4f9caSChristina Jacob 	if (req->fec == OTX2_FEC_OFF)
116484c4f9caSChristina Jacob 		req->fec = OTX2_FEC_NONE;
116584c4f9caSChristina Jacob 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
116684c4f9caSChristina Jacob 	rsp->fec = cgx_set_fec(req->fec, cgx_id, lmac_id);
116784c4f9caSChristina Jacob 	return 0;
116884c4f9caSChristina Jacob }
1169bd74d4eaSFelix Manlunas 
rvu_mbox_handler_cgx_get_aux_link_info(struct rvu * rvu,struct msg_req * req,struct cgx_fw_data * rsp)1170bd74d4eaSFelix Manlunas int rvu_mbox_handler_cgx_get_aux_link_info(struct rvu *rvu, struct msg_req *req,
1171bd74d4eaSFelix Manlunas 					   struct cgx_fw_data *rsp)
1172bd74d4eaSFelix Manlunas {
1173bd74d4eaSFelix Manlunas 	int pf = rvu_get_pf(req->hdr.pcifunc);
1174bd74d4eaSFelix Manlunas 	u8 cgx_id, lmac_id;
1175bd74d4eaSFelix Manlunas 
1176bd74d4eaSFelix Manlunas 	if (!rvu->fwdata)
11773e35d198SHariprasad Kelam 		return LMAC_AF_ERR_FIRMWARE_DATA_NOT_MAPPED;
1178bd74d4eaSFelix Manlunas 
1179bd74d4eaSFelix Manlunas 	if (!is_pf_cgxmapped(rvu, pf))
1180bd74d4eaSFelix Manlunas 		return -EPERM;
1181bd74d4eaSFelix Manlunas 
1182bd74d4eaSFelix Manlunas 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1183bd74d4eaSFelix Manlunas 
1184b9d0fedcSHariprasad Kelam 	if (rvu->hw->lmac_per_cgx == CGX_LMACS_USX)
1185b9d0fedcSHariprasad Kelam 		memcpy(&rsp->fwdata,
1186b9d0fedcSHariprasad Kelam 		       &rvu->fwdata->cgx_fw_data_usx[cgx_id][lmac_id],
1187bd74d4eaSFelix Manlunas 		       sizeof(struct cgx_lmac_fwdata_s));
1188b9d0fedcSHariprasad Kelam 	else
1189b9d0fedcSHariprasad Kelam 		memcpy(&rsp->fwdata,
1190b9d0fedcSHariprasad Kelam 		       &rvu->fwdata->cgx_fw_data[cgx_id][lmac_id],
1191b9d0fedcSHariprasad Kelam 		       sizeof(struct cgx_lmac_fwdata_s));
1192b9d0fedcSHariprasad Kelam 
1193bd74d4eaSFelix Manlunas 	return 0;
1194bd74d4eaSFelix Manlunas }
119556b6d539SChristina Jacob 
rvu_mbox_handler_cgx_set_link_mode(struct rvu * rvu,struct cgx_set_link_mode_req * req,struct cgx_set_link_mode_rsp * rsp)119656b6d539SChristina Jacob int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu,
119756b6d539SChristina Jacob 				       struct cgx_set_link_mode_req *req,
119856b6d539SChristina Jacob 				       struct cgx_set_link_mode_rsp *rsp)
119956b6d539SChristina Jacob {
120056b6d539SChristina Jacob 	int pf = rvu_get_pf(req->hdr.pcifunc);
120156b6d539SChristina Jacob 	u8 cgx_idx, lmac;
120256b6d539SChristina Jacob 	void *cgxd;
120356b6d539SChristina Jacob 
120456b6d539SChristina Jacob 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
120556b6d539SChristina Jacob 		return -EPERM;
120656b6d539SChristina Jacob 
120756b6d539SChristina Jacob 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
120856b6d539SChristina Jacob 	cgxd = rvu_cgx_pdata(cgx_idx, rvu);
120956b6d539SChristina Jacob 	rsp->status = cgx_set_link_mode(cgxd, req->args, cgx_idx, lmac);
121056b6d539SChristina Jacob 	return 0;
121156b6d539SChristina Jacob }
12126f14078eSSunil Kumar Kori 
rvu_mbox_handler_cgx_mac_addr_reset(struct rvu * rvu,struct cgx_mac_addr_reset_req * req,struct msg_rsp * rsp)1213292822e9SRatheesh Kannoth int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req,
12146f14078eSSunil Kumar Kori 					struct msg_rsp *rsp)
12156f14078eSSunil Kumar Kori {
12166f14078eSSunil Kumar Kori 	int pf = rvu_get_pf(req->hdr.pcifunc);
12176f14078eSSunil Kumar Kori 	u8 cgx_id, lmac_id;
12186f14078eSSunil Kumar Kori 
12196f14078eSSunil Kumar Kori 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
12207278c359SNaveen Mamindlapalli 		return LMAC_AF_ERR_PERM_DENIED;
12216f14078eSSunil Kumar Kori 
12226f14078eSSunil Kumar Kori 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1223d6c9784bSRatheesh Kannoth 
1224d6c9784bSRatheesh Kannoth 	if (rvu_npc_exact_has_match_table(rvu))
1225d6c9784bSRatheesh Kannoth 		return rvu_npc_exact_mac_addr_reset(rvu, req, rsp);
1226d6c9784bSRatheesh Kannoth 
12276f14078eSSunil Kumar Kori 	return cgx_lmac_addr_reset(cgx_id, lmac_id);
12286f14078eSSunil Kumar Kori }
12296f14078eSSunil Kumar Kori 
rvu_mbox_handler_cgx_mac_addr_update(struct rvu * rvu,struct cgx_mac_addr_update_req * req,struct cgx_mac_addr_update_rsp * rsp)12306f14078eSSunil Kumar Kori int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu,
12316f14078eSSunil Kumar Kori 					 struct cgx_mac_addr_update_req *req,
1232292822e9SRatheesh Kannoth 					 struct cgx_mac_addr_update_rsp *rsp)
12336f14078eSSunil Kumar Kori {
12346f14078eSSunil Kumar Kori 	int pf = rvu_get_pf(req->hdr.pcifunc);
12356f14078eSSunil Kumar Kori 	u8 cgx_id, lmac_id;
12366f14078eSSunil Kumar Kori 
12376f14078eSSunil Kumar Kori 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
12387278c359SNaveen Mamindlapalli 		return LMAC_AF_ERR_PERM_DENIED;
12396f14078eSSunil Kumar Kori 
1240d6c9784bSRatheesh Kannoth 	if (rvu_npc_exact_has_match_table(rvu))
1241d6c9784bSRatheesh Kannoth 		return rvu_npc_exact_mac_addr_update(rvu, req, rsp);
1242d6c9784bSRatheesh Kannoth 
12436f14078eSSunil Kumar Kori 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
12446f14078eSSunil Kumar Kori 	return cgx_lmac_addr_update(cgx_id, lmac_id, req->mac_addr, req->index);
12456f14078eSSunil Kumar Kori }
12461121f6b0SSunil Kumar Kori 
rvu_cgx_prio_flow_ctrl_cfg(struct rvu * rvu,u16 pcifunc,u8 tx_pause,u8 rx_pause,u16 pfc_en)1247e7400038SHariprasad Kelam int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause,
1248e7400038SHariprasad Kelam 			       u8 rx_pause, u16 pfc_en)
12491121f6b0SSunil Kumar Kori {
1250e7400038SHariprasad Kelam 	int pf = rvu_get_pf(pcifunc);
1251e7400038SHariprasad Kelam 	u8 rx_8023 = 0, tx_8023 = 0;
12521121f6b0SSunil Kumar Kori 	struct mac_ops *mac_ops;
12531121f6b0SSunil Kumar Kori 	u8 cgx_id, lmac_id;
12541121f6b0SSunil Kumar Kori 	void *cgxd;
12551121f6b0SSunil Kumar Kori 
12561121f6b0SSunil Kumar Kori 	/* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
12571121f6b0SSunil Kumar Kori 	 * if received from other PF/VF simply ACK, nothing to do.
12581121f6b0SSunil Kumar Kori 	 */
12591121f6b0SSunil Kumar Kori 	if (!is_pf_cgxmapped(rvu, pf))
12601121f6b0SSunil Kumar Kori 		return -ENODEV;
12611121f6b0SSunil Kumar Kori 
12621121f6b0SSunil Kumar Kori 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
12631121f6b0SSunil Kumar Kori 	cgxd = rvu_cgx_pdata(cgx_id, rvu);
12641121f6b0SSunil Kumar Kori 	mac_ops = get_mac_ops(cgxd);
12651121f6b0SSunil Kumar Kori 
1266e7400038SHariprasad Kelam 	mac_ops->mac_get_pause_frm_status(cgxd, lmac_id, &tx_8023, &rx_8023);
1267e7400038SHariprasad Kelam 	if (tx_8023 || rx_8023) {
1268e7400038SHariprasad Kelam 		dev_warn(rvu->dev,
1269e7400038SHariprasad Kelam 			 "Can not configure PFC as 802.3X pause frames are enabled");
1270e7400038SHariprasad Kelam 		return LMAC_AF_ERR_PFC_ENADIS_PERM_DENIED;
1271e7400038SHariprasad Kelam 	}
1272e7400038SHariprasad Kelam 
1273e7400038SHariprasad Kelam 	mutex_lock(&rvu->rsrc_lock);
1274e7400038SHariprasad Kelam 	if (verify_lmac_fc_cfg(cgxd, lmac_id, tx_pause, rx_pause,
1275e7400038SHariprasad Kelam 			       pcifunc & RVU_PFVF_FUNC_MASK)) {
1276e7400038SHariprasad Kelam 		mutex_unlock(&rvu->rsrc_lock);
1277e7400038SHariprasad Kelam 		return LMAC_AF_ERR_PERM_DENIED;
1278e7400038SHariprasad Kelam 	}
1279e7400038SHariprasad Kelam 	mutex_unlock(&rvu->rsrc_lock);
1280e7400038SHariprasad Kelam 
1281e7400038SHariprasad Kelam 	return mac_ops->pfc_config(cgxd, lmac_id, tx_pause, rx_pause, pfc_en);
1282e7400038SHariprasad Kelam }
1283e7400038SHariprasad Kelam 
rvu_mbox_handler_cgx_prio_flow_ctrl_cfg(struct rvu * rvu,struct cgx_pfc_cfg * req,struct cgx_pfc_rsp * rsp)1284e7400038SHariprasad Kelam int rvu_mbox_handler_cgx_prio_flow_ctrl_cfg(struct rvu *rvu,
1285e7400038SHariprasad Kelam 					    struct cgx_pfc_cfg *req,
1286e7400038SHariprasad Kelam 					    struct cgx_pfc_rsp *rsp)
1287e7400038SHariprasad Kelam {
1288e7400038SHariprasad Kelam 	int pf = rvu_get_pf(req->hdr.pcifunc);
1289e7400038SHariprasad Kelam 	struct mac_ops *mac_ops;
1290e7400038SHariprasad Kelam 	u8 cgx_id, lmac_id;
1291e7400038SHariprasad Kelam 	void *cgxd;
1292e7400038SHariprasad Kelam 	int err;
1293e7400038SHariprasad Kelam 
1294e7400038SHariprasad Kelam 	/* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
1295e7400038SHariprasad Kelam 	 * if received from other PF/VF simply ACK, nothing to do.
1296e7400038SHariprasad Kelam 	 */
1297e7400038SHariprasad Kelam 	if (!is_pf_cgxmapped(rvu, pf))
1298e7400038SHariprasad Kelam 		return -ENODEV;
1299e7400038SHariprasad Kelam 
1300e7400038SHariprasad Kelam 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1301e7400038SHariprasad Kelam 	cgxd = rvu_cgx_pdata(cgx_id, rvu);
1302e7400038SHariprasad Kelam 	mac_ops = get_mac_ops(cgxd);
1303e7400038SHariprasad Kelam 
1304e7400038SHariprasad Kelam 	err = rvu_cgx_prio_flow_ctrl_cfg(rvu, req->hdr.pcifunc, req->tx_pause,
1305e7400038SHariprasad Kelam 					 req->rx_pause, req->pfc_en);
1306e7400038SHariprasad Kelam 
1307e7400038SHariprasad Kelam 	mac_ops->mac_get_pfc_frm_cfg(cgxd, lmac_id, &rsp->tx_pause, &rsp->rx_pause);
1308e7400038SHariprasad Kelam 	return err;
13091121f6b0SSunil Kumar Kori }
13102e3e94c2SHariprasad Kelam 
rvu_mac_reset(struct rvu * rvu,u16 pcifunc)13112e3e94c2SHariprasad Kelam void rvu_mac_reset(struct rvu *rvu, u16 pcifunc)
13122e3e94c2SHariprasad Kelam {
13132e3e94c2SHariprasad Kelam 	int pf = rvu_get_pf(pcifunc);
13142e3e94c2SHariprasad Kelam 	struct mac_ops *mac_ops;
13152e3e94c2SHariprasad Kelam 	struct cgx *cgxd;
13162e3e94c2SHariprasad Kelam 	u8 cgx, lmac;
13172e3e94c2SHariprasad Kelam 
13182e3e94c2SHariprasad Kelam 	if (!is_pf_cgxmapped(rvu, pf))
13192e3e94c2SHariprasad Kelam 		return;
13202e3e94c2SHariprasad Kelam 
13212e3e94c2SHariprasad Kelam 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
13222e3e94c2SHariprasad Kelam 	cgxd = rvu_cgx_pdata(cgx, rvu);
13232e3e94c2SHariprasad Kelam 	mac_ops = get_mac_ops(cgxd);
13242e3e94c2SHariprasad Kelam 
13252e3e94c2SHariprasad Kelam 	if (mac_ops->mac_reset(cgxd, lmac, !is_vf(pcifunc)))
13262e3e94c2SHariprasad Kelam 		dev_err(rvu->dev, "Failed to reset MAC\n");
13272e3e94c2SHariprasad Kelam }
1328