xref: /linux/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c (revision 6a143a7cf94730f57544ea14a987dc025364dbb8)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
3  *
4  * Copyright (C) 2018 Marvell International Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <linux/types.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 
15 #include "rvu.h"
16 #include "cgx.h"
17 #include "lmac_common.h"
18 #include "rvu_reg.h"
19 #include "rvu_trace.h"
20 
21 struct cgx_evq_entry {
22 	struct list_head evq_node;
23 	struct cgx_link_event link_event;
24 };
25 
26 #define M(_name, _id, _fn_name, _req_type, _rsp_type)			\
27 static struct _req_type __maybe_unused					\
28 *otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid)		\
29 {									\
30 	struct _req_type *req;						\
31 									\
32 	req = (struct _req_type *)otx2_mbox_alloc_msg_rsp(		\
33 		&rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \
34 		sizeof(struct _rsp_type));				\
35 	if (!req)							\
36 		return NULL;						\
37 	req->hdr.sig = OTX2_MBOX_REQ_SIG;				\
38 	req->hdr.id = _id;						\
39 	trace_otx2_msg_alloc(rvu->pdev, _id, sizeof(*req));		\
40 	return req;							\
41 }
42 
43 MBOX_UP_CGX_MESSAGES
44 #undef M
45 
46 bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature)
47 {
48 	u8 cgx_id, lmac_id;
49 	void *cgxd;
50 
51 	if (!is_pf_cgxmapped(rvu, pf))
52 		return 0;
53 
54 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
55 	cgxd = rvu_cgx_pdata(cgx_id, rvu);
56 
57 	return  (cgx_features_get(cgxd) & feature);
58 }
59 
60 /* Returns bitmap of mapped PFs */
61 static u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
62 {
63 	return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id];
64 }
65 
66 static int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
67 {
68 	unsigned long pfmap;
69 
70 	pfmap = cgxlmac_to_pfmap(rvu, cgx_id, lmac_id);
71 
72 	/* Assumes only one pf mapped to a cgx lmac port */
73 	if (!pfmap)
74 		return -ENODEV;
75 	else
76 		return find_first_bit(&pfmap, 16);
77 }
78 
79 static u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
80 {
81 	return ((cgx_id & 0xF) << 4) | (lmac_id & 0xF);
82 }
83 
84 void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
85 {
86 	if (cgx_id >= rvu->cgx_cnt_max)
87 		return NULL;
88 
89 	return rvu->cgx_idmap[cgx_id];
90 }
91 
92 /* Based on P2X connectivity find mapped NIX block for a PF */
93 static void rvu_map_cgx_nix_block(struct rvu *rvu, int pf,
94 				  int cgx_id, int lmac_id)
95 {
96 	struct rvu_pfvf *pfvf = &rvu->pf[pf];
97 	u8 p2x;
98 
99 	p2x = cgx_lmac_get_p2x(cgx_id, lmac_id);
100 	/* Firmware sets P2X_SELECT as either NIX0 or NIX1 */
101 	pfvf->nix_blkaddr = BLKADDR_NIX0;
102 	if (p2x == CMR_P2X_SEL_NIX1)
103 		pfvf->nix_blkaddr = BLKADDR_NIX1;
104 }
105 
106 static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
107 {
108 	struct npc_pkind *pkind = &rvu->hw->pkind;
109 	int cgx_cnt_max = rvu->cgx_cnt_max;
110 	int pf = PF_CGXMAP_BASE;
111 	unsigned long lmac_bmap;
112 	int size, free_pkind;
113 	int cgx, lmac, iter;
114 
115 	if (!cgx_cnt_max)
116 		return 0;
117 
118 	if (cgx_cnt_max > 0xF || MAX_LMAC_PER_CGX > 0xF)
119 		return -EINVAL;
120 
121 	/* Alloc map table
122 	 * An additional entry is required since PF id starts from 1 and
123 	 * hence entry at offset 0 is invalid.
124 	 */
125 	size = (cgx_cnt_max * MAX_LMAC_PER_CGX + 1) * sizeof(u8);
126 	rvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL);
127 	if (!rvu->pf2cgxlmac_map)
128 		return -ENOMEM;
129 
130 	/* Initialize all entries with an invalid cgx and lmac id */
131 	memset(rvu->pf2cgxlmac_map, 0xFF, size);
132 
133 	/* Reverse map table */
134 	rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev,
135 				  cgx_cnt_max * MAX_LMAC_PER_CGX * sizeof(u16),
136 				  GFP_KERNEL);
137 	if (!rvu->cgxlmac2pf_map)
138 		return -ENOMEM;
139 
140 	rvu->cgx_mapped_pfs = 0;
141 	for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
142 		if (!rvu_cgx_pdata(cgx, rvu))
143 			continue;
144 		lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
145 		for_each_set_bit(iter, &lmac_bmap, MAX_LMAC_PER_CGX) {
146 			lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu),
147 					      iter);
148 			rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
149 			rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;
150 			free_pkind = rvu_alloc_rsrc(&pkind->rsrc);
151 			pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;
152 			rvu_map_cgx_nix_block(rvu, pf, cgx, lmac);
153 			rvu->cgx_mapped_pfs++;
154 			pf++;
155 		}
156 	}
157 	return 0;
158 }
159 
160 static int rvu_cgx_send_link_info(int cgx_id, int lmac_id, struct rvu *rvu)
161 {
162 	struct cgx_evq_entry *qentry;
163 	unsigned long flags;
164 	int err;
165 
166 	qentry = kmalloc(sizeof(*qentry), GFP_KERNEL);
167 	if (!qentry)
168 		return -ENOMEM;
169 
170 	/* Lock the event queue before we read the local link status */
171 	spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
172 	err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
173 				&qentry->link_event.link_uinfo);
174 	qentry->link_event.cgx_id = cgx_id;
175 	qentry->link_event.lmac_id = lmac_id;
176 	if (err) {
177 		kfree(qentry);
178 		goto skip_add;
179 	}
180 	list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
181 skip_add:
182 	spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
183 
184 	/* start worker to process the events */
185 	queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
186 
187 	return 0;
188 }
189 
190 /* This is called from interrupt context and is expected to be atomic */
191 static int cgx_lmac_postevent(struct cgx_link_event *event, void *data)
192 {
193 	struct cgx_evq_entry *qentry;
194 	struct rvu *rvu = data;
195 
196 	/* post event to the event queue */
197 	qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
198 	if (!qentry)
199 		return -ENOMEM;
200 	qentry->link_event = *event;
201 	spin_lock(&rvu->cgx_evq_lock);
202 	list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
203 	spin_unlock(&rvu->cgx_evq_lock);
204 
205 	/* start worker to process the events */
206 	queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
207 
208 	return 0;
209 }
210 
211 static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
212 {
213 	struct cgx_link_user_info *linfo;
214 	struct cgx_link_info_msg *msg;
215 	unsigned long pfmap;
216 	int err, pfid;
217 
218 	linfo = &event->link_uinfo;
219 	pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id);
220 
221 	do {
222 		pfid = find_first_bit(&pfmap, 16);
223 		clear_bit(pfid, &pfmap);
224 
225 		/* check if notification is enabled */
226 		if (!test_bit(pfid, &rvu->pf_notify_bmap)) {
227 			dev_info(rvu->dev, "cgx %d: lmac %d Link status %s\n",
228 				 event->cgx_id, event->lmac_id,
229 				 linfo->link_up ? "UP" : "DOWN");
230 			continue;
231 		}
232 
233 		/* Send mbox message to PF */
234 		msg = otx2_mbox_alloc_msg_cgx_link_event(rvu, pfid);
235 		if (!msg)
236 			continue;
237 		msg->link_info = *linfo;
238 		otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pfid);
239 		err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid);
240 		if (err)
241 			dev_warn(rvu->dev, "notification to pf %d failed\n",
242 				 pfid);
243 	} while (pfmap);
244 }
245 
246 static void cgx_evhandler_task(struct work_struct *work)
247 {
248 	struct rvu *rvu = container_of(work, struct rvu, cgx_evh_work);
249 	struct cgx_evq_entry *qentry;
250 	struct cgx_link_event *event;
251 	unsigned long flags;
252 
253 	do {
254 		/* Dequeue an event */
255 		spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
256 		qentry = list_first_entry_or_null(&rvu->cgx_evq_head,
257 						  struct cgx_evq_entry,
258 						  evq_node);
259 		if (qentry)
260 			list_del(&qentry->evq_node);
261 		spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
262 		if (!qentry)
263 			break; /* nothing more to process */
264 
265 		event = &qentry->link_event;
266 
267 		/* process event */
268 		cgx_notify_pfs(event, rvu);
269 		kfree(qentry);
270 	} while (1);
271 }
272 
273 static int cgx_lmac_event_handler_init(struct rvu *rvu)
274 {
275 	unsigned long lmac_bmap;
276 	struct cgx_event_cb cb;
277 	int cgx, lmac, err;
278 	void *cgxd;
279 
280 	spin_lock_init(&rvu->cgx_evq_lock);
281 	INIT_LIST_HEAD(&rvu->cgx_evq_head);
282 	INIT_WORK(&rvu->cgx_evh_work, cgx_evhandler_task);
283 	rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0);
284 	if (!rvu->cgx_evh_wq) {
285 		dev_err(rvu->dev, "alloc workqueue failed");
286 		return -ENOMEM;
287 	}
288 
289 	cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */
290 	cb.data = rvu;
291 
292 	for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
293 		cgxd = rvu_cgx_pdata(cgx, rvu);
294 		if (!cgxd)
295 			continue;
296 		lmac_bmap = cgx_get_lmac_bmap(cgxd);
297 		for_each_set_bit(lmac, &lmac_bmap, MAX_LMAC_PER_CGX) {
298 			err = cgx_lmac_evh_register(&cb, cgxd, lmac);
299 			if (err)
300 				dev_err(rvu->dev,
301 					"%d:%d handler register failed\n",
302 					cgx, lmac);
303 		}
304 	}
305 
306 	return 0;
307 }
308 
309 static void rvu_cgx_wq_destroy(struct rvu *rvu)
310 {
311 	if (rvu->cgx_evh_wq) {
312 		flush_workqueue(rvu->cgx_evh_wq);
313 		destroy_workqueue(rvu->cgx_evh_wq);
314 		rvu->cgx_evh_wq = NULL;
315 	}
316 }
317 
318 int rvu_cgx_init(struct rvu *rvu)
319 {
320 	int cgx, err;
321 	void *cgxd;
322 
323 	/* CGX port id starts from 0 and are not necessarily contiguous
324 	 * Hence we allocate resources based on the maximum port id value.
325 	 */
326 	rvu->cgx_cnt_max = cgx_get_cgxcnt_max();
327 	if (!rvu->cgx_cnt_max) {
328 		dev_info(rvu->dev, "No CGX devices found!\n");
329 		return -ENODEV;
330 	}
331 
332 	rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt_max *
333 				      sizeof(void *), GFP_KERNEL);
334 	if (!rvu->cgx_idmap)
335 		return -ENOMEM;
336 
337 	/* Initialize the cgxdata table */
338 	for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++)
339 		rvu->cgx_idmap[cgx] = cgx_get_pdata(cgx);
340 
341 	/* Map CGX LMAC interfaces to RVU PFs */
342 	err = rvu_map_cgx_lmac_pf(rvu);
343 	if (err)
344 		return err;
345 
346 	/* Register for CGX events */
347 	err = cgx_lmac_event_handler_init(rvu);
348 	if (err)
349 		return err;
350 
351 	mutex_init(&rvu->cgx_cfg_lock);
352 
353 	/* Ensure event handler registration is completed, before
354 	 * we turn on the links
355 	 */
356 	mb();
357 
358 	/* Do link up for all CGX ports */
359 	for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
360 		cgxd = rvu_cgx_pdata(cgx, rvu);
361 		if (!cgxd)
362 			continue;
363 		err = cgx_lmac_linkup_start(cgxd);
364 		if (err)
365 			dev_err(rvu->dev,
366 				"Link up process failed to start on cgx %d\n",
367 				cgx);
368 	}
369 
370 	return 0;
371 }
372 
373 int rvu_cgx_exit(struct rvu *rvu)
374 {
375 	unsigned long lmac_bmap;
376 	int cgx, lmac;
377 	void *cgxd;
378 
379 	for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
380 		cgxd = rvu_cgx_pdata(cgx, rvu);
381 		if (!cgxd)
382 			continue;
383 		lmac_bmap = cgx_get_lmac_bmap(cgxd);
384 		for_each_set_bit(lmac, &lmac_bmap, MAX_LMAC_PER_CGX)
385 			cgx_lmac_evh_unregister(cgxd, lmac);
386 	}
387 
388 	/* Ensure event handler unregister is completed */
389 	mb();
390 
391 	rvu_cgx_wq_destroy(rvu);
392 	return 0;
393 }
394 
395 /* Most of the CGX configuration is restricted to the mapped PF only,
396  * VF's of mapped PF and other PFs are not allowed. This fn() checks
397  * whether a PFFUNC is permitted to do the config or not.
398  */
399 static bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc)
400 {
401 	if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
402 	    !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
403 		return false;
404 	return true;
405 }
406 
407 void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable)
408 {
409 	struct mac_ops *mac_ops;
410 	u8 cgx_id, lmac_id;
411 	void *cgxd;
412 
413 	if (!is_pf_cgxmapped(rvu, pf))
414 		return;
415 
416 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
417 	cgxd = rvu_cgx_pdata(cgx_id, rvu);
418 
419 	mac_ops = get_mac_ops(cgxd);
420 	/* Set / clear CTL_BCK to control pause frame forwarding to NIX */
421 	if (enable)
422 		mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, true);
423 	else
424 		mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, false);
425 }
426 
427 int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
428 {
429 	int pf = rvu_get_pf(pcifunc);
430 	u8 cgx_id, lmac_id;
431 
432 	if (!is_cgx_config_permitted(rvu, pcifunc))
433 		return -EPERM;
434 
435 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
436 
437 	cgx_lmac_rx_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, start);
438 
439 	return 0;
440 }
441 
442 int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req,
443 				    struct msg_rsp *rsp)
444 {
445 	rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, true);
446 	return 0;
447 }
448 
449 int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req,
450 				   struct msg_rsp *rsp)
451 {
452 	rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, false);
453 	return 0;
454 }
455 
456 static int rvu_lmac_get_stats(struct rvu *rvu, struct msg_req *req,
457 			      void *rsp)
458 {
459 	int pf = rvu_get_pf(req->hdr.pcifunc);
460 	struct mac_ops *mac_ops;
461 	int stat = 0, err = 0;
462 	u64 tx_stat, rx_stat;
463 	u8 cgx_idx, lmac;
464 	void *cgxd;
465 
466 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
467 		return -ENODEV;
468 
469 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
470 	cgxd = rvu_cgx_pdata(cgx_idx, rvu);
471 	mac_ops = get_mac_ops(cgxd);
472 
473 	/* Rx stats */
474 	while (stat < mac_ops->rx_stats_cnt) {
475 		err = mac_ops->mac_get_rx_stats(cgxd, lmac, stat, &rx_stat);
476 		if (err)
477 			return err;
478 		if (mac_ops->rx_stats_cnt == RPM_RX_STATS_COUNT)
479 			((struct rpm_stats_rsp *)rsp)->rx_stats[stat] = rx_stat;
480 		else
481 			((struct cgx_stats_rsp *)rsp)->rx_stats[stat] = rx_stat;
482 		stat++;
483 	}
484 
485 	/* Tx stats */
486 	stat = 0;
487 	while (stat < mac_ops->tx_stats_cnt) {
488 		err = mac_ops->mac_get_tx_stats(cgxd, lmac, stat, &tx_stat);
489 		if (err)
490 			return err;
491 		if (mac_ops->tx_stats_cnt == RPM_TX_STATS_COUNT)
492 			((struct rpm_stats_rsp *)rsp)->tx_stats[stat] = tx_stat;
493 		else
494 			((struct cgx_stats_rsp *)rsp)->tx_stats[stat] = tx_stat;
495 		stat++;
496 	}
497 	return 0;
498 }
499 
500 int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
501 			       struct cgx_stats_rsp *rsp)
502 {
503 	return rvu_lmac_get_stats(rvu, req, (void *)rsp);
504 }
505 
506 int rvu_mbox_handler_rpm_stats(struct rvu *rvu, struct msg_req *req,
507 			       struct rpm_stats_rsp *rsp)
508 {
509 	return rvu_lmac_get_stats(rvu, req, (void *)rsp);
510 }
511 
512 int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu,
513 				   struct msg_req *req,
514 				   struct cgx_fec_stats_rsp *rsp)
515 {
516 	int pf = rvu_get_pf(req->hdr.pcifunc);
517 	u8 cgx_idx, lmac;
518 	void *cgxd;
519 
520 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
521 		return -EPERM;
522 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
523 
524 	cgxd = rvu_cgx_pdata(cgx_idx, rvu);
525 	return cgx_get_fec_stats(cgxd, lmac, rsp);
526 }
527 
528 int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
529 				      struct cgx_mac_addr_set_or_get *req,
530 				      struct cgx_mac_addr_set_or_get *rsp)
531 {
532 	int pf = rvu_get_pf(req->hdr.pcifunc);
533 	u8 cgx_id, lmac_id;
534 
535 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
536 		return -EPERM;
537 
538 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
539 
540 	cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr);
541 
542 	return 0;
543 }
544 
545 int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
546 				      struct cgx_mac_addr_set_or_get *req,
547 				      struct cgx_mac_addr_set_or_get *rsp)
548 {
549 	int pf = rvu_get_pf(req->hdr.pcifunc);
550 	u8 cgx_id, lmac_id;
551 	int rc = 0, i;
552 	u64 cfg;
553 
554 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
555 		return -EPERM;
556 
557 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
558 
559 	rsp->hdr.rc = rc;
560 	cfg = cgx_lmac_addr_get(cgx_id, lmac_id);
561 	/* copy 48 bit mac address to req->mac_addr */
562 	for (i = 0; i < ETH_ALEN; i++)
563 		rsp->mac_addr[i] = cfg >> (ETH_ALEN - 1 - i) * 8;
564 	return 0;
565 }
566 
567 int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
568 					struct msg_rsp *rsp)
569 {
570 	u16 pcifunc = req->hdr.pcifunc;
571 	int pf = rvu_get_pf(pcifunc);
572 	u8 cgx_id, lmac_id;
573 
574 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
575 		return -EPERM;
576 
577 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
578 
579 	cgx_lmac_promisc_config(cgx_id, lmac_id, true);
580 	return 0;
581 }
582 
583 int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
584 					 struct msg_rsp *rsp)
585 {
586 	int pf = rvu_get_pf(req->hdr.pcifunc);
587 	u8 cgx_id, lmac_id;
588 
589 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
590 		return -EPERM;
591 
592 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
593 
594 	cgx_lmac_promisc_config(cgx_id, lmac_id, false);
595 	return 0;
596 }
597 
598 static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
599 {
600 	int pf = rvu_get_pf(pcifunc);
601 	u8 cgx_id, lmac_id;
602 	void *cgxd;
603 
604 	if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
605 		return 0;
606 
607 	/* This msg is expected only from PFs that are mapped to CGX LMACs,
608 	 * if received from other PF/VF simply ACK, nothing to do.
609 	 */
610 	if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
611 	    !is_pf_cgxmapped(rvu, pf))
612 		return -ENODEV;
613 
614 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
615 	cgxd = rvu_cgx_pdata(cgx_id, rvu);
616 
617 	cgx_lmac_ptp_config(cgxd, lmac_id, enable);
618 	/* If PTP is enabled then inform NPC that packets to be
619 	 * parsed by this PF will have their data shifted by 8 bytes
620 	 * and if PTP is disabled then no shift is required
621 	 */
622 	if (npc_config_ts_kpuaction(rvu, pf, pcifunc, enable))
623 		return -EINVAL;
624 
625 	return 0;
626 }
627 
628 int rvu_mbox_handler_cgx_ptp_rx_enable(struct rvu *rvu, struct msg_req *req,
629 				       struct msg_rsp *rsp)
630 {
631 	return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, true);
632 }
633 
634 int rvu_mbox_handler_cgx_ptp_rx_disable(struct rvu *rvu, struct msg_req *req,
635 					struct msg_rsp *rsp)
636 {
637 	return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, false);
638 }
639 
640 static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en)
641 {
642 	int pf = rvu_get_pf(pcifunc);
643 	u8 cgx_id, lmac_id;
644 
645 	if (!is_cgx_config_permitted(rvu, pcifunc))
646 		return -EPERM;
647 
648 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
649 
650 	if (en) {
651 		set_bit(pf, &rvu->pf_notify_bmap);
652 		/* Send the current link status to PF */
653 		rvu_cgx_send_link_info(cgx_id, lmac_id, rvu);
654 	} else {
655 		clear_bit(pf, &rvu->pf_notify_bmap);
656 	}
657 
658 	return 0;
659 }
660 
661 int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req,
662 					  struct msg_rsp *rsp)
663 {
664 	rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, true);
665 	return 0;
666 }
667 
668 int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req,
669 					 struct msg_rsp *rsp)
670 {
671 	rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, false);
672 	return 0;
673 }
674 
675 int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req,
676 				      struct cgx_link_info_msg *rsp)
677 {
678 	u8 cgx_id, lmac_id;
679 	int pf, err;
680 
681 	pf = rvu_get_pf(req->hdr.pcifunc);
682 
683 	if (!is_pf_cgxmapped(rvu, pf))
684 		return -ENODEV;
685 
686 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
687 
688 	err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
689 				&rsp->link_info);
690 	return err;
691 }
692 
693 int rvu_mbox_handler_cgx_features_get(struct rvu *rvu,
694 				      struct msg_req *req,
695 				      struct cgx_features_info_msg *rsp)
696 {
697 	int pf = rvu_get_pf(req->hdr.pcifunc);
698 	u8 cgx_idx, lmac;
699 	void *cgxd;
700 
701 	if (!is_pf_cgxmapped(rvu, pf))
702 		return 0;
703 
704 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
705 	cgxd = rvu_cgx_pdata(cgx_idx, rvu);
706 	rsp->lmac_features = cgx_features_get(cgxd);
707 
708 	return 0;
709 }
710 
711 u32 rvu_cgx_get_fifolen(struct rvu *rvu)
712 {
713 	struct mac_ops *mac_ops;
714 	int rvu_def_cgx_id = 0;
715 	u32 fifo_len;
716 
717 	mac_ops = get_mac_ops(rvu_cgx_pdata(rvu_def_cgx_id, rvu));
718 	fifo_len = mac_ops ? mac_ops->fifo_len : 0;
719 
720 	return fifo_len;
721 }
722 
723 static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en)
724 {
725 	int pf = rvu_get_pf(pcifunc);
726 	struct mac_ops *mac_ops;
727 	u8 cgx_id, lmac_id;
728 
729 	if (!is_cgx_config_permitted(rvu, pcifunc))
730 		return -EPERM;
731 
732 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
733 	mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu));
734 
735 	return mac_ops->mac_lmac_intl_lbk(rvu_cgx_pdata(cgx_id, rvu),
736 					  lmac_id, en);
737 }
738 
739 int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req,
740 				       struct msg_rsp *rsp)
741 {
742 	rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, true);
743 	return 0;
744 }
745 
746 int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
747 					struct msg_rsp *rsp)
748 {
749 	rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, false);
750 	return 0;
751 }
752 
753 int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
754 				       struct cgx_pause_frm_cfg *req,
755 				       struct cgx_pause_frm_cfg *rsp)
756 {
757 	int pf = rvu_get_pf(req->hdr.pcifunc);
758 	struct mac_ops *mac_ops;
759 	u8 cgx_id, lmac_id;
760 	void *cgxd;
761 
762 	if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_FC))
763 		return 0;
764 
765 	/* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
766 	 * if received from other PF/VF simply ACK, nothing to do.
767 	 */
768 	if (!is_pf_cgxmapped(rvu, pf))
769 		return -ENODEV;
770 
771 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
772 	cgxd = rvu_cgx_pdata(cgx_id, rvu);
773 	mac_ops = get_mac_ops(cgxd);
774 
775 	if (req->set)
776 		mac_ops->mac_enadis_pause_frm(cgxd, lmac_id,
777 					      req->tx_pause, req->rx_pause);
778 	else
779 		mac_ops->mac_get_pause_frm_status(cgxd, lmac_id,
780 						  &rsp->tx_pause,
781 						  &rsp->rx_pause);
782 	return 0;
783 }
784 
785 int rvu_mbox_handler_cgx_get_phy_fec_stats(struct rvu *rvu, struct msg_req *req,
786 					   struct msg_rsp *rsp)
787 {
788 	int pf = rvu_get_pf(req->hdr.pcifunc);
789 	u8 cgx_id, lmac_id;
790 
791 	if (!is_pf_cgxmapped(rvu, pf))
792 		return -EPERM;
793 
794 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
795 	return cgx_get_phy_fec_stats(rvu_cgx_pdata(cgx_id, rvu), lmac_id);
796 }
797 
798 /* Finds cumulative status of NIX rx/tx counters from LF of a PF and those
799  * from its VFs as well. ie. NIX rx/tx counters at the CGX port level
800  */
801 int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id,
802 			   int index, int rxtxflag, u64 *stat)
803 {
804 	struct rvu_block *block;
805 	int blkaddr;
806 	u16 pcifunc;
807 	int pf, lf;
808 
809 	*stat = 0;
810 
811 	if (!cgxd || !rvu)
812 		return -EINVAL;
813 
814 	pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
815 	if (pf < 0)
816 		return pf;
817 
818 	/* Assumes LF of a PF and all of its VF belongs to the same
819 	 * NIX block
820 	 */
821 	pcifunc = pf << RVU_PFVF_PF_SHIFT;
822 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
823 	if (blkaddr < 0)
824 		return 0;
825 	block = &rvu->hw->block[blkaddr];
826 
827 	for (lf = 0; lf < block->lf.max; lf++) {
828 		/* Check if a lf is attached to this PF or one of its VFs */
829 		if (!((block->fn_map[lf] & ~RVU_PFVF_FUNC_MASK) == (pcifunc &
830 			 ~RVU_PFVF_FUNC_MASK)))
831 			continue;
832 		if (rxtxflag == NIX_STATS_RX)
833 			*stat += rvu_read64(rvu, blkaddr,
834 					    NIX_AF_LFX_RX_STATX(lf, index));
835 		else
836 			*stat += rvu_read64(rvu, blkaddr,
837 					    NIX_AF_LFX_TX_STATX(lf, index));
838 	}
839 
840 	return 0;
841 }
842 
843 int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start)
844 {
845 	struct rvu_pfvf *parent_pf, *pfvf;
846 	int cgx_users, err = 0;
847 
848 	if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
849 		return 0;
850 
851 	parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
852 	pfvf = rvu_get_pfvf(rvu, pcifunc);
853 
854 	mutex_lock(&rvu->cgx_cfg_lock);
855 
856 	if (start && pfvf->cgx_in_use)
857 		goto exit;  /* CGX is already started hence nothing to do */
858 	if (!start && !pfvf->cgx_in_use)
859 		goto exit; /* CGX is already stopped hence nothing to do */
860 
861 	if (start) {
862 		cgx_users = parent_pf->cgx_users;
863 		parent_pf->cgx_users++;
864 	} else {
865 		parent_pf->cgx_users--;
866 		cgx_users = parent_pf->cgx_users;
867 	}
868 
869 	/* Start CGX when first of all NIXLFs is started.
870 	 * Stop CGX when last of all NIXLFs is stopped.
871 	 */
872 	if (!cgx_users) {
873 		err = rvu_cgx_config_rxtx(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK,
874 					  start);
875 		if (err) {
876 			dev_err(rvu->dev, "Unable to %s CGX\n",
877 				start ? "start" : "stop");
878 			/* Revert the usage count in case of error */
879 			parent_pf->cgx_users = start ? parent_pf->cgx_users  - 1
880 					       : parent_pf->cgx_users  + 1;
881 			goto exit;
882 		}
883 	}
884 	pfvf->cgx_in_use = start;
885 exit:
886 	mutex_unlock(&rvu->cgx_cfg_lock);
887 	return err;
888 }
889 
890 int rvu_mbox_handler_cgx_set_fec_param(struct rvu *rvu,
891 				       struct fec_mode *req,
892 				       struct fec_mode *rsp)
893 {
894 	int pf = rvu_get_pf(req->hdr.pcifunc);
895 	u8 cgx_id, lmac_id;
896 
897 	if (!is_pf_cgxmapped(rvu, pf))
898 		return -EPERM;
899 
900 	if (req->fec == OTX2_FEC_OFF)
901 		req->fec = OTX2_FEC_NONE;
902 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
903 	rsp->fec = cgx_set_fec(req->fec, cgx_id, lmac_id);
904 	return 0;
905 }
906 
907 int rvu_mbox_handler_cgx_get_aux_link_info(struct rvu *rvu, struct msg_req *req,
908 					   struct cgx_fw_data *rsp)
909 {
910 	int pf = rvu_get_pf(req->hdr.pcifunc);
911 	u8 cgx_id, lmac_id;
912 
913 	if (!rvu->fwdata)
914 		return -ENXIO;
915 
916 	if (!is_pf_cgxmapped(rvu, pf))
917 		return -EPERM;
918 
919 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
920 
921 	memcpy(&rsp->fwdata, &rvu->fwdata->cgx_fw_data[cgx_id][lmac_id],
922 	       sizeof(struct cgx_lmac_fwdata_s));
923 	return 0;
924 }
925 
926 int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu,
927 				       struct cgx_set_link_mode_req *req,
928 				       struct cgx_set_link_mode_rsp *rsp)
929 {
930 	int pf = rvu_get_pf(req->hdr.pcifunc);
931 	u8 cgx_idx, lmac;
932 	void *cgxd;
933 
934 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
935 		return -EPERM;
936 
937 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
938 	cgxd = rvu_cgx_pdata(cgx_idx, rvu);
939 	rsp->status = cgx_set_link_mode(cgxd, req->args, cgx_idx, lmac);
940 	return 0;
941 }
942