xref: /linux/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c (revision 0526b56cbc3c489642bd6a5fe4b718dea7ef0ee8)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3  *
4  * Copyright (C) 2018 Marvell.
5  *
6  */
7 
8 #include <linux/types.h>
9 #include <linux/module.h>
10 #include <linux/pci.h>
11 
12 #include "rvu.h"
13 #include "cgx.h"
14 #include "lmac_common.h"
15 #include "rvu_reg.h"
16 #include "rvu_trace.h"
17 #include "rvu_npc_hash.h"
18 
19 struct cgx_evq_entry {
20 	struct list_head evq_node;
21 	struct cgx_link_event link_event;
22 };
23 
24 #define M(_name, _id, _fn_name, _req_type, _rsp_type)			\
25 static struct _req_type __maybe_unused					\
26 *otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid)		\
27 {									\
28 	struct _req_type *req;						\
29 									\
30 	req = (struct _req_type *)otx2_mbox_alloc_msg_rsp(		\
31 		&rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \
32 		sizeof(struct _rsp_type));				\
33 	if (!req)							\
34 		return NULL;						\
35 	req->hdr.sig = OTX2_MBOX_REQ_SIG;				\
36 	req->hdr.id = _id;						\
37 	trace_otx2_msg_alloc(rvu->pdev, _id, sizeof(*req));		\
38 	return req;							\
39 }
40 
41 MBOX_UP_CGX_MESSAGES
42 #undef M
43 
44 bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature)
45 {
46 	u8 cgx_id, lmac_id;
47 	void *cgxd;
48 
49 	if (!is_pf_cgxmapped(rvu, pf))
50 		return 0;
51 
52 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
53 	cgxd = rvu_cgx_pdata(cgx_id, rvu);
54 
55 	return  (cgx_features_get(cgxd) & feature);
56 }
57 
58 #define CGX_OFFSET(x)			((x) * rvu->hw->lmac_per_cgx)
59 /* Returns bitmap of mapped PFs */
60 static u64 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
61 {
62 	return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id];
63 }
64 
65 int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
66 {
67 	unsigned long pfmap;
68 
69 	pfmap = cgxlmac_to_pfmap(rvu, cgx_id, lmac_id);
70 
71 	/* Assumes only one pf mapped to a cgx lmac port */
72 	if (!pfmap)
73 		return -ENODEV;
74 	else
75 		return find_first_bit(&pfmap,
76 				      rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx);
77 }
78 
79 static u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
80 {
81 	return ((cgx_id & 0xF) << 4) | (lmac_id & 0xF);
82 }
83 
84 void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
85 {
86 	if (cgx_id >= rvu->cgx_cnt_max)
87 		return NULL;
88 
89 	return rvu->cgx_idmap[cgx_id];
90 }
91 
92 /* Return first enabled CGX instance if none are enabled then return NULL */
93 void *rvu_first_cgx_pdata(struct rvu *rvu)
94 {
95 	int first_enabled_cgx = 0;
96 	void *cgxd = NULL;
97 
98 	for (; first_enabled_cgx < rvu->cgx_cnt_max; first_enabled_cgx++) {
99 		cgxd = rvu_cgx_pdata(first_enabled_cgx, rvu);
100 		if (cgxd)
101 			break;
102 	}
103 
104 	return cgxd;
105 }
106 
107 /* Based on P2X connectivity find mapped NIX block for a PF */
108 static void rvu_map_cgx_nix_block(struct rvu *rvu, int pf,
109 				  int cgx_id, int lmac_id)
110 {
111 	struct rvu_pfvf *pfvf = &rvu->pf[pf];
112 	u8 p2x;
113 
114 	p2x = cgx_lmac_get_p2x(cgx_id, lmac_id);
115 	/* Firmware sets P2X_SELECT as either NIX0 or NIX1 */
116 	pfvf->nix_blkaddr = BLKADDR_NIX0;
117 	if (p2x == CMR_P2X_SEL_NIX1)
118 		pfvf->nix_blkaddr = BLKADDR_NIX1;
119 }
120 
121 static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
122 {
123 	struct npc_pkind *pkind = &rvu->hw->pkind;
124 	int cgx_cnt_max = rvu->cgx_cnt_max;
125 	int pf = PF_CGXMAP_BASE;
126 	unsigned long lmac_bmap;
127 	int size, free_pkind;
128 	int cgx, lmac, iter;
129 	int numvfs, hwvfs;
130 
131 	if (!cgx_cnt_max)
132 		return 0;
133 
134 	if (cgx_cnt_max > 0xF || rvu->hw->lmac_per_cgx > 0xF)
135 		return -EINVAL;
136 
137 	/* Alloc map table
138 	 * An additional entry is required since PF id starts from 1 and
139 	 * hence entry at offset 0 is invalid.
140 	 */
141 	size = (cgx_cnt_max * rvu->hw->lmac_per_cgx + 1) * sizeof(u8);
142 	rvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL);
143 	if (!rvu->pf2cgxlmac_map)
144 		return -ENOMEM;
145 
146 	/* Initialize all entries with an invalid cgx and lmac id */
147 	memset(rvu->pf2cgxlmac_map, 0xFF, size);
148 
149 	/* Reverse map table */
150 	rvu->cgxlmac2pf_map =
151 		devm_kzalloc(rvu->dev,
152 			     cgx_cnt_max * rvu->hw->lmac_per_cgx * sizeof(u64),
153 			     GFP_KERNEL);
154 	if (!rvu->cgxlmac2pf_map)
155 		return -ENOMEM;
156 
157 	rvu->cgx_mapped_pfs = 0;
158 	for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
159 		if (!rvu_cgx_pdata(cgx, rvu))
160 			continue;
161 		lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
162 		for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
163 			lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu),
164 					      iter);
165 			rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
166 			rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;
167 			free_pkind = rvu_alloc_rsrc(&pkind->rsrc);
168 			pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;
169 			rvu_map_cgx_nix_block(rvu, pf, cgx, lmac);
170 			rvu->cgx_mapped_pfs++;
171 			rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvfs);
172 			rvu->cgx_mapped_vfs += numvfs;
173 			pf++;
174 		}
175 	}
176 	return 0;
177 }
178 
179 static int rvu_cgx_send_link_info(int cgx_id, int lmac_id, struct rvu *rvu)
180 {
181 	struct cgx_evq_entry *qentry;
182 	unsigned long flags;
183 	int err;
184 
185 	qentry = kmalloc(sizeof(*qentry), GFP_KERNEL);
186 	if (!qentry)
187 		return -ENOMEM;
188 
189 	/* Lock the event queue before we read the local link status */
190 	spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
191 	err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
192 				&qentry->link_event.link_uinfo);
193 	qentry->link_event.cgx_id = cgx_id;
194 	qentry->link_event.lmac_id = lmac_id;
195 	if (err) {
196 		kfree(qentry);
197 		goto skip_add;
198 	}
199 	list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
200 skip_add:
201 	spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
202 
203 	/* start worker to process the events */
204 	queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
205 
206 	return 0;
207 }
208 
209 /* This is called from interrupt context and is expected to be atomic */
210 static int cgx_lmac_postevent(struct cgx_link_event *event, void *data)
211 {
212 	struct cgx_evq_entry *qentry;
213 	struct rvu *rvu = data;
214 
215 	/* post event to the event queue */
216 	qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
217 	if (!qentry)
218 		return -ENOMEM;
219 	qentry->link_event = *event;
220 	spin_lock(&rvu->cgx_evq_lock);
221 	list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
222 	spin_unlock(&rvu->cgx_evq_lock);
223 
224 	/* start worker to process the events */
225 	queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
226 
227 	return 0;
228 }
229 
230 static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
231 {
232 	struct cgx_link_user_info *linfo;
233 	struct cgx_link_info_msg *msg;
234 	unsigned long pfmap;
235 	int err, pfid;
236 
237 	linfo = &event->link_uinfo;
238 	pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id);
239 
240 	do {
241 		pfid = find_first_bit(&pfmap,
242 				      rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx);
243 		clear_bit(pfid, &pfmap);
244 
245 		/* check if notification is enabled */
246 		if (!test_bit(pfid, &rvu->pf_notify_bmap)) {
247 			dev_info(rvu->dev, "cgx %d: lmac %d Link status %s\n",
248 				 event->cgx_id, event->lmac_id,
249 				 linfo->link_up ? "UP" : "DOWN");
250 			continue;
251 		}
252 
253 		/* Send mbox message to PF */
254 		msg = otx2_mbox_alloc_msg_cgx_link_event(rvu, pfid);
255 		if (!msg)
256 			continue;
257 		msg->link_info = *linfo;
258 		otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pfid);
259 		err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid);
260 		if (err)
261 			dev_warn(rvu->dev, "notification to pf %d failed\n",
262 				 pfid);
263 	} while (pfmap);
264 }
265 
266 static void cgx_evhandler_task(struct work_struct *work)
267 {
268 	struct rvu *rvu = container_of(work, struct rvu, cgx_evh_work);
269 	struct cgx_evq_entry *qentry;
270 	struct cgx_link_event *event;
271 	unsigned long flags;
272 
273 	do {
274 		/* Dequeue an event */
275 		spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
276 		qentry = list_first_entry_or_null(&rvu->cgx_evq_head,
277 						  struct cgx_evq_entry,
278 						  evq_node);
279 		if (qentry)
280 			list_del(&qentry->evq_node);
281 		spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
282 		if (!qentry)
283 			break; /* nothing more to process */
284 
285 		event = &qentry->link_event;
286 
287 		/* process event */
288 		cgx_notify_pfs(event, rvu);
289 		kfree(qentry);
290 	} while (1);
291 }
292 
293 static int cgx_lmac_event_handler_init(struct rvu *rvu)
294 {
295 	unsigned long lmac_bmap;
296 	struct cgx_event_cb cb;
297 	int cgx, lmac, err;
298 	void *cgxd;
299 
300 	spin_lock_init(&rvu->cgx_evq_lock);
301 	INIT_LIST_HEAD(&rvu->cgx_evq_head);
302 	INIT_WORK(&rvu->cgx_evh_work, cgx_evhandler_task);
303 	rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0);
304 	if (!rvu->cgx_evh_wq) {
305 		dev_err(rvu->dev, "alloc workqueue failed");
306 		return -ENOMEM;
307 	}
308 
309 	cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */
310 	cb.data = rvu;
311 
312 	for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
313 		cgxd = rvu_cgx_pdata(cgx, rvu);
314 		if (!cgxd)
315 			continue;
316 		lmac_bmap = cgx_get_lmac_bmap(cgxd);
317 		for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx) {
318 			err = cgx_lmac_evh_register(&cb, cgxd, lmac);
319 			if (err)
320 				dev_err(rvu->dev,
321 					"%d:%d handler register failed\n",
322 					cgx, lmac);
323 		}
324 	}
325 
326 	return 0;
327 }
328 
329 static void rvu_cgx_wq_destroy(struct rvu *rvu)
330 {
331 	if (rvu->cgx_evh_wq) {
332 		destroy_workqueue(rvu->cgx_evh_wq);
333 		rvu->cgx_evh_wq = NULL;
334 	}
335 }
336 
337 int rvu_cgx_init(struct rvu *rvu)
338 {
339 	int cgx, err;
340 	void *cgxd;
341 
342 	/* CGX port id starts from 0 and are not necessarily contiguous
343 	 * Hence we allocate resources based on the maximum port id value.
344 	 */
345 	rvu->cgx_cnt_max = cgx_get_cgxcnt_max();
346 	if (!rvu->cgx_cnt_max) {
347 		dev_info(rvu->dev, "No CGX devices found!\n");
348 		return -ENODEV;
349 	}
350 
351 	rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt_max *
352 				      sizeof(void *), GFP_KERNEL);
353 	if (!rvu->cgx_idmap)
354 		return -ENOMEM;
355 
356 	/* Initialize the cgxdata table */
357 	for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++)
358 		rvu->cgx_idmap[cgx] = cgx_get_pdata(cgx);
359 
360 	/* Map CGX LMAC interfaces to RVU PFs */
361 	err = rvu_map_cgx_lmac_pf(rvu);
362 	if (err)
363 		return err;
364 
365 	/* Register for CGX events */
366 	err = cgx_lmac_event_handler_init(rvu);
367 	if (err)
368 		return err;
369 
370 	mutex_init(&rvu->cgx_cfg_lock);
371 
372 	/* Ensure event handler registration is completed, before
373 	 * we turn on the links
374 	 */
375 	mb();
376 
377 	/* Do link up for all CGX ports */
378 	for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
379 		cgxd = rvu_cgx_pdata(cgx, rvu);
380 		if (!cgxd)
381 			continue;
382 		err = cgx_lmac_linkup_start(cgxd);
383 		if (err)
384 			dev_err(rvu->dev,
385 				"Link up process failed to start on cgx %d\n",
386 				cgx);
387 	}
388 
389 	return 0;
390 }
391 
392 int rvu_cgx_exit(struct rvu *rvu)
393 {
394 	unsigned long lmac_bmap;
395 	int cgx, lmac;
396 	void *cgxd;
397 
398 	for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
399 		cgxd = rvu_cgx_pdata(cgx, rvu);
400 		if (!cgxd)
401 			continue;
402 		lmac_bmap = cgx_get_lmac_bmap(cgxd);
403 		for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx)
404 			cgx_lmac_evh_unregister(cgxd, lmac);
405 	}
406 
407 	/* Ensure event handler unregister is completed */
408 	mb();
409 
410 	rvu_cgx_wq_destroy(rvu);
411 	return 0;
412 }
413 
414 /* Most of the CGX configuration is restricted to the mapped PF only,
415  * VF's of mapped PF and other PFs are not allowed. This fn() checks
416  * whether a PFFUNC is permitted to do the config or not.
417  */
418 inline bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc)
419 {
420 	if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
421 	    !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
422 		return false;
423 	return true;
424 }
425 
426 void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable)
427 {
428 	struct mac_ops *mac_ops;
429 	u8 cgx_id, lmac_id;
430 	void *cgxd;
431 
432 	if (!is_pf_cgxmapped(rvu, pf))
433 		return;
434 
435 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
436 	cgxd = rvu_cgx_pdata(cgx_id, rvu);
437 
438 	mac_ops = get_mac_ops(cgxd);
439 	/* Set / clear CTL_BCK to control pause frame forwarding to NIX */
440 	if (enable)
441 		mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, true);
442 	else
443 		mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, false);
444 }
445 
446 int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
447 {
448 	int pf = rvu_get_pf(pcifunc);
449 	struct mac_ops *mac_ops;
450 	u8 cgx_id, lmac_id;
451 	void *cgxd;
452 
453 	if (!is_cgx_config_permitted(rvu, pcifunc))
454 		return LMAC_AF_ERR_PERM_DENIED;
455 
456 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
457 	cgxd = rvu_cgx_pdata(cgx_id, rvu);
458 	mac_ops = get_mac_ops(cgxd);
459 
460 	return mac_ops->mac_rx_tx_enable(cgxd, lmac_id, start);
461 }
462 
463 int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable)
464 {
465 	struct mac_ops *mac_ops;
466 
467 	mac_ops = get_mac_ops(cgxd);
468 	return mac_ops->mac_tx_enable(cgxd, lmac_id, enable);
469 }
470 
471 void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc)
472 {
473 	int pf = rvu_get_pf(pcifunc);
474 	int i = 0, lmac_count = 0;
475 	struct mac_ops *mac_ops;
476 	u8 max_dmac_filters;
477 	u8 cgx_id, lmac_id;
478 	void *cgx_dev;
479 
480 	if (!is_cgx_config_permitted(rvu, pcifunc))
481 		return;
482 
483 	if (rvu_npc_exact_has_match_table(rvu)) {
484 		rvu_npc_exact_reset(rvu, pcifunc);
485 		return;
486 	}
487 
488 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
489 	cgx_dev = cgx_get_pdata(cgx_id);
490 	lmac_count = cgx_get_lmac_cnt(cgx_dev);
491 
492 	mac_ops = get_mac_ops(cgx_dev);
493 	if (!mac_ops)
494 		return;
495 
496 	max_dmac_filters = mac_ops->dmac_filter_count / lmac_count;
497 
498 	for (i = 0; i < max_dmac_filters; i++)
499 		cgx_lmac_addr_del(cgx_id, lmac_id, i);
500 
501 	/* As cgx_lmac_addr_del does not clear entry for index 0
502 	 * so it needs to be done explicitly
503 	 */
504 	cgx_lmac_addr_reset(cgx_id, lmac_id);
505 }
506 
507 int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req,
508 				    struct msg_rsp *rsp)
509 {
510 	rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, true);
511 	return 0;
512 }
513 
514 int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req,
515 				   struct msg_rsp *rsp)
516 {
517 	rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, false);
518 	return 0;
519 }
520 
521 static int rvu_lmac_get_stats(struct rvu *rvu, struct msg_req *req,
522 			      void *rsp)
523 {
524 	int pf = rvu_get_pf(req->hdr.pcifunc);
525 	struct mac_ops *mac_ops;
526 	int stat = 0, err = 0;
527 	u64 tx_stat, rx_stat;
528 	u8 cgx_idx, lmac;
529 	void *cgxd;
530 
531 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
532 		return LMAC_AF_ERR_PERM_DENIED;
533 
534 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
535 	cgxd = rvu_cgx_pdata(cgx_idx, rvu);
536 	mac_ops = get_mac_ops(cgxd);
537 
538 	/* Rx stats */
539 	while (stat < mac_ops->rx_stats_cnt) {
540 		err = mac_ops->mac_get_rx_stats(cgxd, lmac, stat, &rx_stat);
541 		if (err)
542 			return err;
543 		if (mac_ops->rx_stats_cnt == RPM_RX_STATS_COUNT)
544 			((struct rpm_stats_rsp *)rsp)->rx_stats[stat] = rx_stat;
545 		else
546 			((struct cgx_stats_rsp *)rsp)->rx_stats[stat] = rx_stat;
547 		stat++;
548 	}
549 
550 	/* Tx stats */
551 	stat = 0;
552 	while (stat < mac_ops->tx_stats_cnt) {
553 		err = mac_ops->mac_get_tx_stats(cgxd, lmac, stat, &tx_stat);
554 		if (err)
555 			return err;
556 		if (mac_ops->tx_stats_cnt == RPM_TX_STATS_COUNT)
557 			((struct rpm_stats_rsp *)rsp)->tx_stats[stat] = tx_stat;
558 		else
559 			((struct cgx_stats_rsp *)rsp)->tx_stats[stat] = tx_stat;
560 		stat++;
561 	}
562 	return 0;
563 }
564 
565 int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
566 			       struct cgx_stats_rsp *rsp)
567 {
568 	return rvu_lmac_get_stats(rvu, req, (void *)rsp);
569 }
570 
571 int rvu_mbox_handler_rpm_stats(struct rvu *rvu, struct msg_req *req,
572 			       struct rpm_stats_rsp *rsp)
573 {
574 	return rvu_lmac_get_stats(rvu, req, (void *)rsp);
575 }
576 
577 int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu,
578 				   struct msg_req *req,
579 				   struct cgx_fec_stats_rsp *rsp)
580 {
581 	int pf = rvu_get_pf(req->hdr.pcifunc);
582 	struct mac_ops *mac_ops;
583 	u8 cgx_idx, lmac;
584 	void *cgxd;
585 
586 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
587 		return LMAC_AF_ERR_PERM_DENIED;
588 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
589 
590 	cgxd = rvu_cgx_pdata(cgx_idx, rvu);
591 	mac_ops = get_mac_ops(cgxd);
592 	return  mac_ops->get_fec_stats(cgxd, lmac, rsp);
593 }
594 
595 int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
596 				      struct cgx_mac_addr_set_or_get *req,
597 				      struct cgx_mac_addr_set_or_get *rsp)
598 {
599 	int pf = rvu_get_pf(req->hdr.pcifunc);
600 	u8 cgx_id, lmac_id;
601 
602 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
603 		return -EPERM;
604 
605 	if (rvu_npc_exact_has_match_table(rvu))
606 		return rvu_npc_exact_mac_addr_set(rvu, req, rsp);
607 
608 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
609 
610 	cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr);
611 
612 	return 0;
613 }
614 
615 int rvu_mbox_handler_cgx_mac_addr_add(struct rvu *rvu,
616 				      struct cgx_mac_addr_add_req *req,
617 				      struct cgx_mac_addr_add_rsp *rsp)
618 {
619 	int pf = rvu_get_pf(req->hdr.pcifunc);
620 	u8 cgx_id, lmac_id;
621 	int rc = 0;
622 
623 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
624 		return -EPERM;
625 
626 	if (rvu_npc_exact_has_match_table(rvu))
627 		return rvu_npc_exact_mac_addr_add(rvu, req, rsp);
628 
629 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
630 	rc = cgx_lmac_addr_add(cgx_id, lmac_id, req->mac_addr);
631 	if (rc >= 0) {
632 		rsp->index = rc;
633 		return 0;
634 	}
635 
636 	return rc;
637 }
638 
639 int rvu_mbox_handler_cgx_mac_addr_del(struct rvu *rvu,
640 				      struct cgx_mac_addr_del_req *req,
641 				      struct msg_rsp *rsp)
642 {
643 	int pf = rvu_get_pf(req->hdr.pcifunc);
644 	u8 cgx_id, lmac_id;
645 
646 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
647 		return -EPERM;
648 
649 	if (rvu_npc_exact_has_match_table(rvu))
650 		return rvu_npc_exact_mac_addr_del(rvu, req, rsp);
651 
652 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
653 	return cgx_lmac_addr_del(cgx_id, lmac_id, req->index);
654 }
655 
656 int rvu_mbox_handler_cgx_mac_max_entries_get(struct rvu *rvu,
657 					     struct msg_req *req,
658 					     struct cgx_max_dmac_entries_get_rsp
659 					     *rsp)
660 {
661 	int pf = rvu_get_pf(req->hdr.pcifunc);
662 	u8 cgx_id, lmac_id;
663 
664 	/* If msg is received from PFs(which are not mapped to CGX LMACs)
665 	 * or VF then no entries are allocated for DMAC filters at CGX level.
666 	 * So returning zero.
667 	 */
668 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) {
669 		rsp->max_dmac_filters = 0;
670 		return 0;
671 	}
672 
673 	if (rvu_npc_exact_has_match_table(rvu)) {
674 		rsp->max_dmac_filters = rvu_npc_exact_get_max_entries(rvu);
675 		return 0;
676 	}
677 
678 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
679 	rsp->max_dmac_filters = cgx_lmac_addr_max_entries_get(cgx_id, lmac_id);
680 	return 0;
681 }
682 
683 int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
684 				      struct cgx_mac_addr_set_or_get *req,
685 				      struct cgx_mac_addr_set_or_get *rsp)
686 {
687 	int pf = rvu_get_pf(req->hdr.pcifunc);
688 	u8 cgx_id, lmac_id;
689 	int rc = 0, i;
690 	u64 cfg;
691 
692 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
693 		return -EPERM;
694 
695 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
696 
697 	rsp->hdr.rc = rc;
698 	cfg = cgx_lmac_addr_get(cgx_id, lmac_id);
699 	/* copy 48 bit mac address to req->mac_addr */
700 	for (i = 0; i < ETH_ALEN; i++)
701 		rsp->mac_addr[i] = cfg >> (ETH_ALEN - 1 - i) * 8;
702 	return 0;
703 }
704 
705 int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
706 					struct msg_rsp *rsp)
707 {
708 	u16 pcifunc = req->hdr.pcifunc;
709 	int pf = rvu_get_pf(pcifunc);
710 	u8 cgx_id, lmac_id;
711 
712 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
713 		return -EPERM;
714 
715 	/* Disable drop on non hit rule */
716 	if (rvu_npc_exact_has_match_table(rvu))
717 		return rvu_npc_exact_promisc_enable(rvu, req->hdr.pcifunc);
718 
719 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
720 
721 	cgx_lmac_promisc_config(cgx_id, lmac_id, true);
722 	return 0;
723 }
724 
725 int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
726 					 struct msg_rsp *rsp)
727 {
728 	int pf = rvu_get_pf(req->hdr.pcifunc);
729 	u8 cgx_id, lmac_id;
730 
731 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
732 		return -EPERM;
733 
734 	/* Disable drop on non hit rule */
735 	if (rvu_npc_exact_has_match_table(rvu))
736 		return rvu_npc_exact_promisc_disable(rvu, req->hdr.pcifunc);
737 
738 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
739 
740 	cgx_lmac_promisc_config(cgx_id, lmac_id, false);
741 	return 0;
742 }
743 
744 static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
745 {
746 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
747 	int pf = rvu_get_pf(pcifunc);
748 	struct mac_ops *mac_ops;
749 	u8 cgx_id, lmac_id;
750 	void *cgxd;
751 
752 	if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
753 		return 0;
754 
755 	/* This msg is expected only from PFs that are mapped to CGX LMACs,
756 	 * if received from other PF/VF simply ACK, nothing to do.
757 	 */
758 	if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
759 	    !is_pf_cgxmapped(rvu, pf))
760 		return -ENODEV;
761 
762 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
763 	cgxd = rvu_cgx_pdata(cgx_id, rvu);
764 
765 	mac_ops = get_mac_ops(cgxd);
766 	mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, true);
767 	/* If PTP is enabled then inform NPC that packets to be
768 	 * parsed by this PF will have their data shifted by 8 bytes
769 	 * and if PTP is disabled then no shift is required
770 	 */
771 	if (npc_config_ts_kpuaction(rvu, pf, pcifunc, enable))
772 		return -EINVAL;
773 	/* This flag is required to clean up CGX conf if app gets killed */
774 	pfvf->hw_rx_tstamp_en = enable;
775 
776 	/* Inform MCS about 8B RX header */
777 	rvu_mcs_ptp_cfg(rvu, cgx_id, lmac_id, enable);
778 	return 0;
779 }
780 
781 int rvu_mbox_handler_cgx_ptp_rx_enable(struct rvu *rvu, struct msg_req *req,
782 				       struct msg_rsp *rsp)
783 {
784 	if (!is_pf_cgxmapped(rvu, rvu_get_pf(req->hdr.pcifunc)))
785 		return -EPERM;
786 
787 	return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, true);
788 }
789 
790 int rvu_mbox_handler_cgx_ptp_rx_disable(struct rvu *rvu, struct msg_req *req,
791 					struct msg_rsp *rsp)
792 {
793 	return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, false);
794 }
795 
796 static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en)
797 {
798 	int pf = rvu_get_pf(pcifunc);
799 	u8 cgx_id, lmac_id;
800 
801 	if (!is_cgx_config_permitted(rvu, pcifunc))
802 		return -EPERM;
803 
804 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
805 
806 	if (en) {
807 		set_bit(pf, &rvu->pf_notify_bmap);
808 		/* Send the current link status to PF */
809 		rvu_cgx_send_link_info(cgx_id, lmac_id, rvu);
810 	} else {
811 		clear_bit(pf, &rvu->pf_notify_bmap);
812 	}
813 
814 	return 0;
815 }
816 
817 int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req,
818 					  struct msg_rsp *rsp)
819 {
820 	rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, true);
821 	return 0;
822 }
823 
824 int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req,
825 					 struct msg_rsp *rsp)
826 {
827 	rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, false);
828 	return 0;
829 }
830 
831 int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req,
832 				      struct cgx_link_info_msg *rsp)
833 {
834 	u8 cgx_id, lmac_id;
835 	int pf, err;
836 
837 	pf = rvu_get_pf(req->hdr.pcifunc);
838 
839 	if (!is_pf_cgxmapped(rvu, pf))
840 		return -ENODEV;
841 
842 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
843 
844 	err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
845 				&rsp->link_info);
846 	return err;
847 }
848 
849 int rvu_mbox_handler_cgx_features_get(struct rvu *rvu,
850 				      struct msg_req *req,
851 				      struct cgx_features_info_msg *rsp)
852 {
853 	int pf = rvu_get_pf(req->hdr.pcifunc);
854 	u8 cgx_idx, lmac;
855 	void *cgxd;
856 
857 	if (!is_pf_cgxmapped(rvu, pf))
858 		return 0;
859 
860 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
861 	cgxd = rvu_cgx_pdata(cgx_idx, rvu);
862 	rsp->lmac_features = cgx_features_get(cgxd);
863 
864 	return 0;
865 }
866 
867 u32 rvu_cgx_get_fifolen(struct rvu *rvu)
868 {
869 	struct mac_ops *mac_ops;
870 	u32 fifo_len;
871 
872 	mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
873 	fifo_len = mac_ops ? mac_ops->fifo_len : 0;
874 
875 	return fifo_len;
876 }
877 
878 u32 rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac)
879 {
880 	struct mac_ops *mac_ops;
881 	void *cgxd;
882 
883 	cgxd = rvu_cgx_pdata(cgx, rvu);
884 	if (!cgxd)
885 		return 0;
886 
887 	mac_ops = get_mac_ops(cgxd);
888 	if (!mac_ops->lmac_fifo_len)
889 		return 0;
890 
891 	return mac_ops->lmac_fifo_len(cgxd, lmac);
892 }
893 
894 static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en)
895 {
896 	int pf = rvu_get_pf(pcifunc);
897 	struct mac_ops *mac_ops;
898 	u8 cgx_id, lmac_id;
899 
900 	if (!is_cgx_config_permitted(rvu, pcifunc))
901 		return -EPERM;
902 
903 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
904 	mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu));
905 
906 	return mac_ops->mac_lmac_intl_lbk(rvu_cgx_pdata(cgx_id, rvu),
907 					  lmac_id, en);
908 }
909 
910 int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req,
911 				       struct msg_rsp *rsp)
912 {
913 	rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, true);
914 	return 0;
915 }
916 
917 int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
918 					struct msg_rsp *rsp)
919 {
920 	rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, false);
921 	return 0;
922 }
923 
924 int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause)
925 {
926 	int pf = rvu_get_pf(pcifunc);
927 	u8 rx_pfc = 0, tx_pfc = 0;
928 	struct mac_ops *mac_ops;
929 	u8 cgx_id, lmac_id;
930 	void *cgxd;
931 
932 	if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_FC))
933 		return 0;
934 
935 	/* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
936 	 * if received from other PF/VF simply ACK, nothing to do.
937 	 */
938 	if (!is_pf_cgxmapped(rvu, pf))
939 		return LMAC_AF_ERR_PF_NOT_MAPPED;
940 
941 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
942 	cgxd = rvu_cgx_pdata(cgx_id, rvu);
943 	mac_ops = get_mac_ops(cgxd);
944 
945 	mac_ops->mac_get_pfc_frm_cfg(cgxd, lmac_id, &tx_pfc, &rx_pfc);
946 	if (tx_pfc || rx_pfc) {
947 		dev_warn(rvu->dev,
948 			 "Can not configure 802.3X flow control as PFC frames are enabled");
949 		return LMAC_AF_ERR_8023PAUSE_ENADIS_PERM_DENIED;
950 	}
951 
952 	mutex_lock(&rvu->rsrc_lock);
953 	if (verify_lmac_fc_cfg(cgxd, lmac_id, tx_pause, rx_pause,
954 			       pcifunc & RVU_PFVF_FUNC_MASK)) {
955 		mutex_unlock(&rvu->rsrc_lock);
956 		return LMAC_AF_ERR_PERM_DENIED;
957 	}
958 	mutex_unlock(&rvu->rsrc_lock);
959 
960 	return mac_ops->mac_enadis_pause_frm(cgxd, lmac_id, tx_pause, rx_pause);
961 }
962 
963 int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
964 				       struct cgx_pause_frm_cfg *req,
965 				       struct cgx_pause_frm_cfg *rsp)
966 {
967 	int pf = rvu_get_pf(req->hdr.pcifunc);
968 	struct mac_ops *mac_ops;
969 	u8 cgx_id, lmac_id;
970 	int err = 0;
971 	void *cgxd;
972 
973 	/* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
974 	 * if received from other PF/VF simply ACK, nothing to do.
975 	 */
976 	if (!is_pf_cgxmapped(rvu, pf))
977 		return -ENODEV;
978 
979 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
980 	cgxd = rvu_cgx_pdata(cgx_id, rvu);
981 	mac_ops = get_mac_ops(cgxd);
982 
983 	if (req->set)
984 		err = rvu_cgx_cfg_pause_frm(rvu, req->hdr.pcifunc, req->tx_pause, req->rx_pause);
985 	else
986 		mac_ops->mac_get_pause_frm_status(cgxd, lmac_id, &rsp->tx_pause, &rsp->rx_pause);
987 
988 	return err;
989 }
990 
991 int rvu_mbox_handler_cgx_get_phy_fec_stats(struct rvu *rvu, struct msg_req *req,
992 					   struct msg_rsp *rsp)
993 {
994 	int pf = rvu_get_pf(req->hdr.pcifunc);
995 	u8 cgx_id, lmac_id;
996 
997 	if (!is_pf_cgxmapped(rvu, pf))
998 		return LMAC_AF_ERR_PF_NOT_MAPPED;
999 
1000 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1001 	return cgx_get_phy_fec_stats(rvu_cgx_pdata(cgx_id, rvu), lmac_id);
1002 }
1003 
1004 /* Finds cumulative status of NIX rx/tx counters from LF of a PF and those
1005  * from its VFs as well. ie. NIX rx/tx counters at the CGX port level
1006  */
1007 int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id,
1008 			   int index, int rxtxflag, u64 *stat)
1009 {
1010 	struct rvu_block *block;
1011 	int blkaddr;
1012 	u16 pcifunc;
1013 	int pf, lf;
1014 
1015 	*stat = 0;
1016 
1017 	if (!cgxd || !rvu)
1018 		return -EINVAL;
1019 
1020 	pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
1021 	if (pf < 0)
1022 		return pf;
1023 
1024 	/* Assumes LF of a PF and all of its VF belongs to the same
1025 	 * NIX block
1026 	 */
1027 	pcifunc = pf << RVU_PFVF_PF_SHIFT;
1028 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1029 	if (blkaddr < 0)
1030 		return 0;
1031 	block = &rvu->hw->block[blkaddr];
1032 
1033 	for (lf = 0; lf < block->lf.max; lf++) {
1034 		/* Check if a lf is attached to this PF or one of its VFs */
1035 		if (!((block->fn_map[lf] & ~RVU_PFVF_FUNC_MASK) == (pcifunc &
1036 			 ~RVU_PFVF_FUNC_MASK)))
1037 			continue;
1038 		if (rxtxflag == NIX_STATS_RX)
1039 			*stat += rvu_read64(rvu, blkaddr,
1040 					    NIX_AF_LFX_RX_STATX(lf, index));
1041 		else
1042 			*stat += rvu_read64(rvu, blkaddr,
1043 					    NIX_AF_LFX_TX_STATX(lf, index));
1044 	}
1045 
1046 	return 0;
1047 }
1048 
1049 int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start)
1050 {
1051 	struct rvu_pfvf *parent_pf, *pfvf;
1052 	int cgx_users, err = 0;
1053 
1054 	if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
1055 		return 0;
1056 
1057 	parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
1058 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1059 
1060 	mutex_lock(&rvu->cgx_cfg_lock);
1061 
1062 	if (start && pfvf->cgx_in_use)
1063 		goto exit;  /* CGX is already started hence nothing to do */
1064 	if (!start && !pfvf->cgx_in_use)
1065 		goto exit; /* CGX is already stopped hence nothing to do */
1066 
1067 	if (start) {
1068 		cgx_users = parent_pf->cgx_users;
1069 		parent_pf->cgx_users++;
1070 	} else {
1071 		parent_pf->cgx_users--;
1072 		cgx_users = parent_pf->cgx_users;
1073 	}
1074 
1075 	/* Start CGX when first of all NIXLFs is started.
1076 	 * Stop CGX when last of all NIXLFs is stopped.
1077 	 */
1078 	if (!cgx_users) {
1079 		err = rvu_cgx_config_rxtx(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK,
1080 					  start);
1081 		if (err) {
1082 			dev_err(rvu->dev, "Unable to %s CGX\n",
1083 				start ? "start" : "stop");
1084 			/* Revert the usage count in case of error */
1085 			parent_pf->cgx_users = start ? parent_pf->cgx_users  - 1
1086 					       : parent_pf->cgx_users  + 1;
1087 			goto exit;
1088 		}
1089 	}
1090 	pfvf->cgx_in_use = start;
1091 exit:
1092 	mutex_unlock(&rvu->cgx_cfg_lock);
1093 	return err;
1094 }
1095 
1096 int rvu_mbox_handler_cgx_set_fec_param(struct rvu *rvu,
1097 				       struct fec_mode *req,
1098 				       struct fec_mode *rsp)
1099 {
1100 	int pf = rvu_get_pf(req->hdr.pcifunc);
1101 	u8 cgx_id, lmac_id;
1102 
1103 	if (!is_pf_cgxmapped(rvu, pf))
1104 		return -EPERM;
1105 
1106 	if (req->fec == OTX2_FEC_OFF)
1107 		req->fec = OTX2_FEC_NONE;
1108 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1109 	rsp->fec = cgx_set_fec(req->fec, cgx_id, lmac_id);
1110 	return 0;
1111 }
1112 
1113 int rvu_mbox_handler_cgx_get_aux_link_info(struct rvu *rvu, struct msg_req *req,
1114 					   struct cgx_fw_data *rsp)
1115 {
1116 	int pf = rvu_get_pf(req->hdr.pcifunc);
1117 	u8 cgx_id, lmac_id;
1118 
1119 	if (!rvu->fwdata)
1120 		return LMAC_AF_ERR_FIRMWARE_DATA_NOT_MAPPED;
1121 
1122 	if (!is_pf_cgxmapped(rvu, pf))
1123 		return -EPERM;
1124 
1125 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1126 
1127 	if (rvu->hw->lmac_per_cgx == CGX_LMACS_USX)
1128 		memcpy(&rsp->fwdata,
1129 		       &rvu->fwdata->cgx_fw_data_usx[cgx_id][lmac_id],
1130 		       sizeof(struct cgx_lmac_fwdata_s));
1131 	else
1132 		memcpy(&rsp->fwdata,
1133 		       &rvu->fwdata->cgx_fw_data[cgx_id][lmac_id],
1134 		       sizeof(struct cgx_lmac_fwdata_s));
1135 
1136 	return 0;
1137 }
1138 
1139 int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu,
1140 				       struct cgx_set_link_mode_req *req,
1141 				       struct cgx_set_link_mode_rsp *rsp)
1142 {
1143 	int pf = rvu_get_pf(req->hdr.pcifunc);
1144 	u8 cgx_idx, lmac;
1145 	void *cgxd;
1146 
1147 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
1148 		return -EPERM;
1149 
1150 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
1151 	cgxd = rvu_cgx_pdata(cgx_idx, rvu);
1152 	rsp->status = cgx_set_link_mode(cgxd, req->args, cgx_idx, lmac);
1153 	return 0;
1154 }
1155 
1156 int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req,
1157 					struct msg_rsp *rsp)
1158 {
1159 	int pf = rvu_get_pf(req->hdr.pcifunc);
1160 	u8 cgx_id, lmac_id;
1161 
1162 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
1163 		return LMAC_AF_ERR_PERM_DENIED;
1164 
1165 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1166 
1167 	if (rvu_npc_exact_has_match_table(rvu))
1168 		return rvu_npc_exact_mac_addr_reset(rvu, req, rsp);
1169 
1170 	return cgx_lmac_addr_reset(cgx_id, lmac_id);
1171 }
1172 
1173 int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu,
1174 					 struct cgx_mac_addr_update_req *req,
1175 					 struct cgx_mac_addr_update_rsp *rsp)
1176 {
1177 	int pf = rvu_get_pf(req->hdr.pcifunc);
1178 	u8 cgx_id, lmac_id;
1179 
1180 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
1181 		return LMAC_AF_ERR_PERM_DENIED;
1182 
1183 	if (rvu_npc_exact_has_match_table(rvu))
1184 		return rvu_npc_exact_mac_addr_update(rvu, req, rsp);
1185 
1186 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1187 	return cgx_lmac_addr_update(cgx_id, lmac_id, req->mac_addr, req->index);
1188 }
1189 
1190 int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause,
1191 			       u8 rx_pause, u16 pfc_en)
1192 {
1193 	int pf = rvu_get_pf(pcifunc);
1194 	u8 rx_8023 = 0, tx_8023 = 0;
1195 	struct mac_ops *mac_ops;
1196 	u8 cgx_id, lmac_id;
1197 	void *cgxd;
1198 
1199 	/* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
1200 	 * if received from other PF/VF simply ACK, nothing to do.
1201 	 */
1202 	if (!is_pf_cgxmapped(rvu, pf))
1203 		return -ENODEV;
1204 
1205 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1206 	cgxd = rvu_cgx_pdata(cgx_id, rvu);
1207 	mac_ops = get_mac_ops(cgxd);
1208 
1209 	mac_ops->mac_get_pause_frm_status(cgxd, lmac_id, &tx_8023, &rx_8023);
1210 	if (tx_8023 || rx_8023) {
1211 		dev_warn(rvu->dev,
1212 			 "Can not configure PFC as 802.3X pause frames are enabled");
1213 		return LMAC_AF_ERR_PFC_ENADIS_PERM_DENIED;
1214 	}
1215 
1216 	mutex_lock(&rvu->rsrc_lock);
1217 	if (verify_lmac_fc_cfg(cgxd, lmac_id, tx_pause, rx_pause,
1218 			       pcifunc & RVU_PFVF_FUNC_MASK)) {
1219 		mutex_unlock(&rvu->rsrc_lock);
1220 		return LMAC_AF_ERR_PERM_DENIED;
1221 	}
1222 	mutex_unlock(&rvu->rsrc_lock);
1223 
1224 	return mac_ops->pfc_config(cgxd, lmac_id, tx_pause, rx_pause, pfc_en);
1225 }
1226 
1227 int rvu_mbox_handler_cgx_prio_flow_ctrl_cfg(struct rvu *rvu,
1228 					    struct cgx_pfc_cfg *req,
1229 					    struct cgx_pfc_rsp *rsp)
1230 {
1231 	int pf = rvu_get_pf(req->hdr.pcifunc);
1232 	struct mac_ops *mac_ops;
1233 	u8 cgx_id, lmac_id;
1234 	void *cgxd;
1235 	int err;
1236 
1237 	/* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
1238 	 * if received from other PF/VF simply ACK, nothing to do.
1239 	 */
1240 	if (!is_pf_cgxmapped(rvu, pf))
1241 		return -ENODEV;
1242 
1243 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1244 	cgxd = rvu_cgx_pdata(cgx_id, rvu);
1245 	mac_ops = get_mac_ops(cgxd);
1246 
1247 	err = rvu_cgx_prio_flow_ctrl_cfg(rvu, req->hdr.pcifunc, req->tx_pause,
1248 					 req->rx_pause, req->pfc_en);
1249 
1250 	mac_ops->mac_get_pfc_frm_cfg(cgxd, lmac_id, &rsp->tx_pause, &rsp->rx_pause);
1251 	return err;
1252 }
1253