xref: /linux/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c (revision 8be4d31cb8aaeea27bde4b7ddb26e28a89062ebf)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3  *
4  * Copyright (C) 2024 Marvell.
5  *
6  */
7 
8 #include <linux/bitfield.h>
9 #include <linux/types.h>
10 #include <linux/device.h>
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 
14 #include "rvu.h"
15 #include "rvu_reg.h"
16 
17 #define M(_name, _id, _fn_name, _req_type, _rsp_type)			\
18 static struct _req_type __maybe_unused					\
19 *otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid)		\
20 {									\
21 	struct _req_type *req;						\
22 									\
23 	req = (struct _req_type *)otx2_mbox_alloc_msg_rsp(		\
24 		&rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \
25 		sizeof(struct _rsp_type));				\
26 	if (!req)							\
27 		return NULL;						\
28 	req->hdr.sig = OTX2_MBOX_REQ_SIG;				\
29 	req->hdr.id = _id;						\
30 	return req;							\
31 }
32 
33 MBOX_UP_REP_MESSAGES
34 #undef M
35 
rvu_rep_up_notify(struct rvu * rvu,struct rep_event * event)36 static int rvu_rep_up_notify(struct rvu *rvu, struct rep_event *event)
37 {
38 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, event->pcifunc);
39 	struct rep_event *msg;
40 	int pf;
41 
42 	pf = rvu_get_pf(rvu->pdev, event->pcifunc);
43 
44 	if (event->event & RVU_EVENT_MAC_ADDR_CHANGE)
45 		ether_addr_copy(pfvf->mac_addr, event->evt_data.mac);
46 
47 	mutex_lock(&rvu->mbox_lock);
48 	msg = otx2_mbox_alloc_msg_rep_event_up_notify(rvu, pf);
49 	if (!msg) {
50 		mutex_unlock(&rvu->mbox_lock);
51 		return -ENOMEM;
52 	}
53 
54 	msg->hdr.pcifunc = event->pcifunc;
55 	msg->event = event->event;
56 
57 	memcpy(&msg->evt_data, &event->evt_data, sizeof(struct rep_evt_data));
58 
59 	otx2_mbox_wait_for_zero(&rvu->afpf_wq_info.mbox_up, pf);
60 
61 	otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf);
62 
63 	otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pf);
64 
65 	mutex_unlock(&rvu->mbox_lock);
66 	return 0;
67 }
68 
rvu_rep_wq_handler(struct work_struct * work)69 static void rvu_rep_wq_handler(struct work_struct *work)
70 {
71 	struct rvu *rvu = container_of(work, struct rvu, rep_evt_work);
72 	struct rep_evtq_ent *qentry;
73 	struct rep_event *event;
74 	unsigned long flags;
75 
76 	do {
77 		spin_lock_irqsave(&rvu->rep_evtq_lock, flags);
78 		qentry = list_first_entry_or_null(&rvu->rep_evtq_head,
79 						  struct rep_evtq_ent,
80 						  node);
81 		if (qentry)
82 			list_del(&qentry->node);
83 
84 		spin_unlock_irqrestore(&rvu->rep_evtq_lock, flags);
85 		if (!qentry)
86 			break; /* nothing more to process */
87 
88 		event = &qentry->event;
89 
90 		rvu_rep_up_notify(rvu, event);
91 		kfree(qentry);
92 	} while (1);
93 }
94 
rvu_mbox_handler_rep_event_notify(struct rvu * rvu,struct rep_event * req,struct msg_rsp * rsp)95 int rvu_mbox_handler_rep_event_notify(struct rvu *rvu, struct rep_event *req,
96 				      struct msg_rsp *rsp)
97 {
98 	struct rep_evtq_ent *qentry;
99 
100 	qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
101 	if (!qentry)
102 		return -ENOMEM;
103 
104 	qentry->event = *req;
105 	spin_lock(&rvu->rep_evtq_lock);
106 	list_add_tail(&qentry->node, &rvu->rep_evtq_head);
107 	spin_unlock(&rvu->rep_evtq_lock);
108 	queue_work(rvu->rep_evt_wq, &rvu->rep_evt_work);
109 	return 0;
110 }
111 
rvu_rep_notify_pfvf_state(struct rvu * rvu,u16 pcifunc,bool enable)112 int rvu_rep_notify_pfvf_state(struct rvu *rvu, u16 pcifunc, bool enable)
113 {
114 	struct rep_event *req;
115 	int pf;
116 
117 	if (!is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc)))
118 		return 0;
119 
120 	pf = rvu_get_pf(rvu->pdev, rvu->rep_pcifunc);
121 
122 	mutex_lock(&rvu->mbox_lock);
123 	req = otx2_mbox_alloc_msg_rep_event_up_notify(rvu, pf);
124 	if (!req) {
125 		mutex_unlock(&rvu->mbox_lock);
126 		return -ENOMEM;
127 	}
128 
129 	req->hdr.pcifunc = rvu->rep_pcifunc;
130 	req->event |= RVU_EVENT_PFVF_STATE;
131 	req->pcifunc = pcifunc;
132 	req->evt_data.vf_state = enable;
133 
134 	otx2_mbox_wait_for_zero(&rvu->afpf_wq_info.mbox_up, pf);
135 	otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf);
136 
137 	mutex_unlock(&rvu->mbox_lock);
138 	return 0;
139 }
140 
141 #define RVU_LF_RX_STATS(reg) \
142 		rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, reg))
143 
144 #define RVU_LF_TX_STATS(reg) \
145 		rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, reg))
146 
rvu_mbox_handler_nix_lf_stats(struct rvu * rvu,struct nix_stats_req * req,struct nix_stats_rsp * rsp)147 int rvu_mbox_handler_nix_lf_stats(struct rvu *rvu,
148 				  struct nix_stats_req *req,
149 				  struct nix_stats_rsp *rsp)
150 {
151 	u16 pcifunc = req->pcifunc;
152 	int nixlf, blkaddr, err;
153 	struct msg_req rst_req;
154 	struct msg_rsp rst_rsp;
155 
156 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
157 	if (err)
158 		return 0;
159 
160 	if (req->reset) {
161 		rst_req.hdr.pcifunc = pcifunc;
162 		return rvu_mbox_handler_nix_stats_rst(rvu, &rst_req, &rst_rsp);
163 	}
164 	rsp->rx.octs = RVU_LF_RX_STATS(RX_OCTS);
165 	rsp->rx.ucast = RVU_LF_RX_STATS(RX_UCAST);
166 	rsp->rx.bcast = RVU_LF_RX_STATS(RX_BCAST);
167 	rsp->rx.mcast = RVU_LF_RX_STATS(RX_MCAST);
168 	rsp->rx.drop = RVU_LF_RX_STATS(RX_DROP);
169 	rsp->rx.err = RVU_LF_RX_STATS(RX_ERR);
170 	rsp->rx.drop_octs = RVU_LF_RX_STATS(RX_DROP_OCTS);
171 	rsp->rx.drop_mcast = RVU_LF_RX_STATS(RX_DRP_MCAST);
172 	rsp->rx.drop_bcast = RVU_LF_RX_STATS(RX_DRP_BCAST);
173 
174 	rsp->tx.octs = RVU_LF_TX_STATS(TX_OCTS);
175 	rsp->tx.ucast = RVU_LF_TX_STATS(TX_UCAST);
176 	rsp->tx.bcast = RVU_LF_TX_STATS(TX_BCAST);
177 	rsp->tx.mcast = RVU_LF_TX_STATS(TX_MCAST);
178 	rsp->tx.drop = RVU_LF_TX_STATS(TX_DROP);
179 
180 	rsp->pcifunc = req->pcifunc;
181 	return 0;
182 }
183 
rvu_rep_get_vlan_id(struct rvu * rvu,u16 pcifunc)184 static u16 rvu_rep_get_vlan_id(struct rvu *rvu, u16 pcifunc)
185 {
186 	int id;
187 
188 	for (id = 0; id < rvu->rep_cnt; id++)
189 		if (rvu->rep2pfvf_map[id] == pcifunc)
190 			return id;
191 	return 0;
192 }
193 
rvu_rep_tx_vlan_cfg(struct rvu * rvu,u16 pcifunc,u16 vlan_tci,int * vidx)194 static int rvu_rep_tx_vlan_cfg(struct rvu *rvu,  u16 pcifunc,
195 			       u16 vlan_tci, int *vidx)
196 {
197 	struct nix_vtag_config_rsp rsp = {};
198 	struct nix_vtag_config req = {};
199 	u64 etype = ETH_P_8021Q;
200 	int err;
201 
202 	/* Insert vlan tag */
203 	req.hdr.pcifunc = pcifunc;
204 	req.vtag_size = VTAGSIZE_T4;
205 	req.cfg_type = 0; /* tx vlan cfg */
206 	req.tx.cfg_vtag0 = true;
207 	req.tx.vtag0 = FIELD_PREP(NIX_VLAN_ETYPE_MASK, etype) | vlan_tci;
208 
209 	err = rvu_mbox_handler_nix_vtag_cfg(rvu, &req, &rsp);
210 	if (err) {
211 		dev_err(rvu->dev, "Tx vlan config failed\n");
212 		return err;
213 	}
214 	*vidx = rsp.vtag0_idx;
215 	return 0;
216 }
217 
rvu_rep_rx_vlan_cfg(struct rvu * rvu,u16 pcifunc)218 static int rvu_rep_rx_vlan_cfg(struct rvu *rvu, u16 pcifunc)
219 {
220 	struct nix_vtag_config req = {};
221 	struct nix_vtag_config_rsp rsp;
222 
223 	/* config strip, capture and size */
224 	req.hdr.pcifunc = pcifunc;
225 	req.vtag_size = VTAGSIZE_T4;
226 	req.cfg_type = 1; /* rx vlan cfg */
227 	req.rx.vtag_type = NIX_AF_LFX_RX_VTAG_TYPE0;
228 	req.rx.strip_vtag = true;
229 	req.rx.capture_vtag = false;
230 
231 	return rvu_mbox_handler_nix_vtag_cfg(rvu, &req, &rsp);
232 }
233 
rvu_rep_install_rx_rule(struct rvu * rvu,u16 pcifunc,u16 entry,bool rte)234 static int rvu_rep_install_rx_rule(struct rvu *rvu, u16 pcifunc,
235 				   u16 entry, bool rte)
236 {
237 	struct npc_install_flow_req req = {};
238 	struct npc_install_flow_rsp rsp = {};
239 	struct rvu_pfvf *pfvf;
240 	u16 vlan_tci, rep_id;
241 
242 	pfvf = rvu_get_pfvf(rvu, pcifunc);
243 
244 	/* To steer the traffic from Representee to Representor */
245 	rep_id = rvu_rep_get_vlan_id(rvu, pcifunc);
246 	if (rte) {
247 		vlan_tci = rep_id | BIT_ULL(8);
248 		req.vf = rvu->rep_pcifunc;
249 		req.op = NIX_RX_ACTIONOP_UCAST;
250 		req.index = rep_id;
251 	} else {
252 		vlan_tci = rep_id;
253 		req.vf = pcifunc;
254 		req.op = NIX_RX_ACTION_DEFAULT;
255 	}
256 
257 	rvu_rep_rx_vlan_cfg(rvu, req.vf);
258 	req.entry = entry;
259 	req.hdr.pcifunc = 0; /* AF is requester */
260 	req.features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_VLAN_ETYPE_CTAG);
261 	req.vtag0_valid = true;
262 	req.vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE0;
263 	req.packet.vlan_etype = cpu_to_be16(ETH_P_8021Q);
264 	req.mask.vlan_etype = cpu_to_be16(ETH_P_8021Q);
265 	req.packet.vlan_tci = cpu_to_be16(vlan_tci);
266 	req.mask.vlan_tci = cpu_to_be16(0xffff);
267 
268 	req.channel = RVU_SWITCH_LBK_CHAN;
269 	req.chan_mask = 0xffff;
270 	req.intf = pfvf->nix_rx_intf;
271 
272 	return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
273 }
274 
rvu_rep_install_tx_rule(struct rvu * rvu,u16 pcifunc,u16 entry,bool rte)275 static int rvu_rep_install_tx_rule(struct rvu *rvu, u16 pcifunc, u16 entry,
276 				   bool rte)
277 {
278 	struct npc_install_flow_req req = {};
279 	struct npc_install_flow_rsp rsp = {};
280 	struct rvu_pfvf *pfvf;
281 	int vidx, err;
282 	u16 vlan_tci;
283 	u8 lbkid;
284 
285 	pfvf = rvu_get_pfvf(rvu, pcifunc);
286 	vlan_tci = rvu_rep_get_vlan_id(rvu, pcifunc);
287 	if (rte)
288 		vlan_tci |= BIT_ULL(8);
289 
290 	err = rvu_rep_tx_vlan_cfg(rvu, pcifunc, vlan_tci, &vidx);
291 	if (err)
292 		return err;
293 
294 	lbkid = pfvf->nix_blkaddr == BLKADDR_NIX0 ? 0 : 1;
295 	req.hdr.pcifunc = 0; /* AF is requester */
296 	if (rte) {
297 		req.vf = pcifunc;
298 	} else {
299 		req.vf = rvu->rep_pcifunc;
300 		req.packet.sq_id = vlan_tci;
301 		req.mask.sq_id = 0xffff;
302 	}
303 
304 	req.entry = entry;
305 	req.intf = pfvf->nix_tx_intf;
306 	req.op = NIX_TX_ACTIONOP_UCAST_CHAN;
307 	req.index = (lbkid << 8) | RVU_SWITCH_LBK_CHAN;
308 	req.set_cntr = 1;
309 	req.vtag0_def = vidx;
310 	req.vtag0_op = 1;
311 	return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
312 }
313 
rvu_rep_install_mcam_rules(struct rvu * rvu)314 int rvu_rep_install_mcam_rules(struct rvu *rvu)
315 {
316 	struct rvu_switch *rswitch = &rvu->rswitch;
317 	u16 start = rswitch->start_entry;
318 	struct rvu_hwinfo *hw = rvu->hw;
319 	u16 pcifunc, entry = 0;
320 	int pf, vf, numvfs;
321 	int err, nixlf, i;
322 	u8 rep;
323 
324 	for (pf = 1; pf < hw->total_pfs; pf++) {
325 		if (!is_pf_cgxmapped(rvu, pf))
326 			continue;
327 
328 		pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0);
329 		rvu_get_nix_blkaddr(rvu, pcifunc);
330 		rep = true;
331 		for (i = 0; i < 2; i++) {
332 			err = rvu_rep_install_rx_rule(rvu, pcifunc,
333 						      start + entry, rep);
334 			if (err)
335 				return err;
336 			rswitch->entry2pcifunc[entry++] = pcifunc;
337 
338 			err = rvu_rep_install_tx_rule(rvu, pcifunc,
339 						      start + entry, rep);
340 			if (err)
341 				return err;
342 			rswitch->entry2pcifunc[entry++] = pcifunc;
343 			rep = false;
344 		}
345 
346 		rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL);
347 		for (vf = 0; vf < numvfs; vf++) {
348 			pcifunc = rvu_make_pcifunc(rvu->pdev, pf, vf + 1);
349 			rvu_get_nix_blkaddr(rvu, pcifunc);
350 
351 			/* Skip installimg rules if nixlf is not attached */
352 			err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
353 			if (err)
354 				continue;
355 			rep = true;
356 			for (i = 0; i < 2; i++) {
357 				err = rvu_rep_install_rx_rule(rvu, pcifunc,
358 							      start + entry,
359 							      rep);
360 				if (err)
361 					return err;
362 				rswitch->entry2pcifunc[entry++] = pcifunc;
363 
364 				err = rvu_rep_install_tx_rule(rvu, pcifunc,
365 							      start + entry,
366 							      rep);
367 				if (err)
368 					return err;
369 				rswitch->entry2pcifunc[entry++] = pcifunc;
370 				rep = false;
371 			}
372 		}
373 	}
374 
375 	/* Initialize the wq for handling REP events */
376 	spin_lock_init(&rvu->rep_evtq_lock);
377 	INIT_LIST_HEAD(&rvu->rep_evtq_head);
378 	INIT_WORK(&rvu->rep_evt_work, rvu_rep_wq_handler);
379 	rvu->rep_evt_wq = alloc_workqueue("rep_evt_wq", 0, 0);
380 	if (!rvu->rep_evt_wq) {
381 		dev_err(rvu->dev, "REP workqueue allocation failed\n");
382 		return -ENOMEM;
383 	}
384 	return 0;
385 }
386 
rvu_rep_update_rules(struct rvu * rvu,u16 pcifunc,bool ena)387 void rvu_rep_update_rules(struct rvu *rvu, u16 pcifunc, bool ena)
388 {
389 	struct rvu_switch *rswitch = &rvu->rswitch;
390 	struct npc_mcam *mcam = &rvu->hw->mcam;
391 	u32 max = rswitch->used_entries;
392 	int blkaddr;
393 	u16 entry;
394 
395 	if (!rswitch->used_entries)
396 		return;
397 
398 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
399 
400 	if (blkaddr < 0)
401 		return;
402 
403 	rvu_switch_enable_lbk_link(rvu, pcifunc, ena);
404 	mutex_lock(&mcam->lock);
405 	for (entry = 0; entry < max; entry++) {
406 		if (rswitch->entry2pcifunc[entry] == pcifunc)
407 			npc_enable_mcam_entry(rvu, mcam, blkaddr, entry, ena);
408 	}
409 	mutex_unlock(&mcam->lock);
410 }
411 
rvu_rep_pf_init(struct rvu * rvu)412 int rvu_rep_pf_init(struct rvu *rvu)
413 {
414 	u16 pcifunc = rvu->rep_pcifunc;
415 	struct rvu_pfvf *pfvf;
416 
417 	pfvf = rvu_get_pfvf(rvu, pcifunc);
418 	set_bit(NIXLF_INITIALIZED, &pfvf->flags);
419 	rvu_switch_enable_lbk_link(rvu, pcifunc, true);
420 	rvu_rep_rx_vlan_cfg(rvu, pcifunc);
421 	return 0;
422 }
423 
rvu_mbox_handler_esw_cfg(struct rvu * rvu,struct esw_cfg_req * req,struct msg_rsp * rsp)424 int rvu_mbox_handler_esw_cfg(struct rvu *rvu, struct esw_cfg_req *req,
425 			     struct msg_rsp *rsp)
426 {
427 	if (req->hdr.pcifunc != rvu->rep_pcifunc)
428 		return 0;
429 
430 	rvu->rep_mode = req->ena;
431 
432 	if (!rvu->rep_mode)
433 		rvu_npc_free_mcam_entries(rvu, req->hdr.pcifunc, -1);
434 
435 	return 0;
436 }
437 
rvu_mbox_handler_get_rep_cnt(struct rvu * rvu,struct msg_req * req,struct get_rep_cnt_rsp * rsp)438 int rvu_mbox_handler_get_rep_cnt(struct rvu *rvu, struct msg_req *req,
439 				 struct get_rep_cnt_rsp *rsp)
440 {
441 	int pf, vf, numvfs, hwvf, rep = 0;
442 	u16 pcifunc;
443 
444 	rvu->rep_pcifunc = req->hdr.pcifunc;
445 	rsp->rep_cnt = rvu->cgx_mapped_pfs + rvu->cgx_mapped_vfs;
446 	rvu->rep_cnt = rsp->rep_cnt;
447 
448 	rvu->rep2pfvf_map = devm_kzalloc(rvu->dev, rvu->rep_cnt *
449 					 sizeof(u16), GFP_KERNEL);
450 	if (!rvu->rep2pfvf_map)
451 		return -ENOMEM;
452 
453 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
454 		if (!is_pf_cgxmapped(rvu, pf))
455 			continue;
456 		pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0);
457 		rvu->rep2pfvf_map[rep] = pcifunc;
458 		rsp->rep_pf_map[rep] = pcifunc;
459 		rep++;
460 		rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
461 		for (vf = 0; vf < numvfs; vf++) {
462 			rvu->rep2pfvf_map[rep] = pcifunc |
463 				((vf + 1) & RVU_PFVF_FUNC_MASK);
464 			rsp->rep_pf_map[rep] = rvu->rep2pfvf_map[rep];
465 			rep++;
466 		}
467 	}
468 	return 0;
469 }
470