xref: /linux/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3  *
4  * Copyright (C) 2024 Marvell.
5  *
6  */
7 
8 #include <linux/bitfield.h>
9 #include <linux/types.h>
10 #include <linux/device.h>
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 
14 #include "rvu.h"
15 #include "rvu_reg.h"
16 
17 #define M(_name, _id, _fn_name, _req_type, _rsp_type)			\
18 static struct _req_type __maybe_unused					\
19 *otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid)		\
20 {									\
21 	struct _req_type *req;						\
22 									\
23 	req = (struct _req_type *)otx2_mbox_alloc_msg_rsp(		\
24 		&rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \
25 		sizeof(struct _rsp_type));				\
26 	if (!req)							\
27 		return NULL;						\
28 	req->hdr.sig = OTX2_MBOX_REQ_SIG;				\
29 	req->hdr.id = _id;						\
30 	return req;							\
31 }
32 
33 MBOX_UP_REP_MESSAGES
34 #undef M
35 
36 static int rvu_rep_up_notify(struct rvu *rvu, struct rep_event *event)
37 {
38 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, event->pcifunc);
39 	struct rep_event *msg;
40 	int pf;
41 
42 	pf = rvu_get_pf(event->pcifunc);
43 
44 	if (event->event & RVU_EVENT_MAC_ADDR_CHANGE)
45 		ether_addr_copy(pfvf->mac_addr, event->evt_data.mac);
46 
47 	mutex_lock(&rvu->mbox_lock);
48 	msg = otx2_mbox_alloc_msg_rep_event_up_notify(rvu, pf);
49 	if (!msg) {
50 		mutex_unlock(&rvu->mbox_lock);
51 		return -ENOMEM;
52 	}
53 
54 	msg->hdr.pcifunc = event->pcifunc;
55 	msg->event = event->event;
56 
57 	memcpy(&msg->evt_data, &event->evt_data, sizeof(struct rep_evt_data));
58 
59 	otx2_mbox_wait_for_zero(&rvu->afpf_wq_info.mbox_up, pf);
60 
61 	otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf);
62 
63 	mutex_unlock(&rvu->mbox_lock);
64 	return 0;
65 }
66 
67 static void rvu_rep_wq_handler(struct work_struct *work)
68 {
69 	struct rvu *rvu = container_of(work, struct rvu, rep_evt_work);
70 	struct rep_evtq_ent *qentry;
71 	struct rep_event *event;
72 	unsigned long flags;
73 
74 	do {
75 		spin_lock_irqsave(&rvu->rep_evtq_lock, flags);
76 		qentry = list_first_entry_or_null(&rvu->rep_evtq_head,
77 						  struct rep_evtq_ent,
78 						  node);
79 		if (qentry)
80 			list_del(&qentry->node);
81 
82 		spin_unlock_irqrestore(&rvu->rep_evtq_lock, flags);
83 		if (!qentry)
84 			break; /* nothing more to process */
85 
86 		event = &qentry->event;
87 
88 		rvu_rep_up_notify(rvu, event);
89 		kfree(qentry);
90 	} while (1);
91 }
92 
93 int rvu_mbox_handler_rep_event_notify(struct rvu *rvu, struct rep_event *req,
94 				      struct msg_rsp *rsp)
95 {
96 	struct rep_evtq_ent *qentry;
97 
98 	qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
99 	if (!qentry)
100 		return -ENOMEM;
101 
102 	qentry->event = *req;
103 	spin_lock(&rvu->rep_evtq_lock);
104 	list_add_tail(&qentry->node, &rvu->rep_evtq_head);
105 	spin_unlock(&rvu->rep_evtq_lock);
106 	queue_work(rvu->rep_evt_wq, &rvu->rep_evt_work);
107 	return 0;
108 }
109 
110 int rvu_rep_notify_pfvf_state(struct rvu *rvu, u16 pcifunc, bool enable)
111 {
112 	struct rep_event *req;
113 	int pf;
114 
115 	if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
116 		return 0;
117 
118 	pf = rvu_get_pf(rvu->rep_pcifunc);
119 
120 	mutex_lock(&rvu->mbox_lock);
121 	req = otx2_mbox_alloc_msg_rep_event_up_notify(rvu, pf);
122 	if (!req) {
123 		mutex_unlock(&rvu->mbox_lock);
124 		return -ENOMEM;
125 	}
126 
127 	req->hdr.pcifunc = rvu->rep_pcifunc;
128 	req->event |= RVU_EVENT_PFVF_STATE;
129 	req->pcifunc = pcifunc;
130 	req->evt_data.vf_state = enable;
131 
132 	otx2_mbox_wait_for_zero(&rvu->afpf_wq_info.mbox_up, pf);
133 	otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf);
134 
135 	mutex_unlock(&rvu->mbox_lock);
136 	return 0;
137 }
138 
139 #define RVU_LF_RX_STATS(reg) \
140 		rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, reg))
141 
142 #define RVU_LF_TX_STATS(reg) \
143 		rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, reg))
144 
145 int rvu_mbox_handler_nix_lf_stats(struct rvu *rvu,
146 				  struct nix_stats_req *req,
147 				  struct nix_stats_rsp *rsp)
148 {
149 	u16 pcifunc = req->pcifunc;
150 	int nixlf, blkaddr, err;
151 	struct msg_req rst_req;
152 	struct msg_rsp rst_rsp;
153 
154 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
155 	if (err)
156 		return 0;
157 
158 	if (req->reset) {
159 		rst_req.hdr.pcifunc = pcifunc;
160 		return rvu_mbox_handler_nix_stats_rst(rvu, &rst_req, &rst_rsp);
161 	}
162 	rsp->rx.octs = RVU_LF_RX_STATS(RX_OCTS);
163 	rsp->rx.ucast = RVU_LF_RX_STATS(RX_UCAST);
164 	rsp->rx.bcast = RVU_LF_RX_STATS(RX_BCAST);
165 	rsp->rx.mcast = RVU_LF_RX_STATS(RX_MCAST);
166 	rsp->rx.drop = RVU_LF_RX_STATS(RX_DROP);
167 	rsp->rx.err = RVU_LF_RX_STATS(RX_ERR);
168 	rsp->rx.drop_octs = RVU_LF_RX_STATS(RX_DROP_OCTS);
169 	rsp->rx.drop_mcast = RVU_LF_RX_STATS(RX_DRP_MCAST);
170 	rsp->rx.drop_bcast = RVU_LF_RX_STATS(RX_DRP_BCAST);
171 
172 	rsp->tx.octs = RVU_LF_TX_STATS(TX_OCTS);
173 	rsp->tx.ucast = RVU_LF_TX_STATS(TX_UCAST);
174 	rsp->tx.bcast = RVU_LF_TX_STATS(TX_BCAST);
175 	rsp->tx.mcast = RVU_LF_TX_STATS(TX_MCAST);
176 	rsp->tx.drop = RVU_LF_TX_STATS(TX_DROP);
177 
178 	rsp->pcifunc = req->pcifunc;
179 	return 0;
180 }
181 
182 static u16 rvu_rep_get_vlan_id(struct rvu *rvu, u16 pcifunc)
183 {
184 	int id;
185 
186 	for (id = 0; id < rvu->rep_cnt; id++)
187 		if (rvu->rep2pfvf_map[id] == pcifunc)
188 			return id;
189 	return 0;
190 }
191 
192 static int rvu_rep_tx_vlan_cfg(struct rvu *rvu,  u16 pcifunc,
193 			       u16 vlan_tci, int *vidx)
194 {
195 	struct nix_vtag_config_rsp rsp = {};
196 	struct nix_vtag_config req = {};
197 	u64 etype = ETH_P_8021Q;
198 	int err;
199 
200 	/* Insert vlan tag */
201 	req.hdr.pcifunc = pcifunc;
202 	req.vtag_size = VTAGSIZE_T4;
203 	req.cfg_type = 0; /* tx vlan cfg */
204 	req.tx.cfg_vtag0 = true;
205 	req.tx.vtag0 = FIELD_PREP(NIX_VLAN_ETYPE_MASK, etype) | vlan_tci;
206 
207 	err = rvu_mbox_handler_nix_vtag_cfg(rvu, &req, &rsp);
208 	if (err) {
209 		dev_err(rvu->dev, "Tx vlan config failed\n");
210 		return err;
211 	}
212 	*vidx = rsp.vtag0_idx;
213 	return 0;
214 }
215 
216 static int rvu_rep_rx_vlan_cfg(struct rvu *rvu, u16 pcifunc)
217 {
218 	struct nix_vtag_config req = {};
219 	struct nix_vtag_config_rsp rsp;
220 
221 	/* config strip, capture and size */
222 	req.hdr.pcifunc = pcifunc;
223 	req.vtag_size = VTAGSIZE_T4;
224 	req.cfg_type = 1; /* rx vlan cfg */
225 	req.rx.vtag_type = NIX_AF_LFX_RX_VTAG_TYPE0;
226 	req.rx.strip_vtag = true;
227 	req.rx.capture_vtag = false;
228 
229 	return rvu_mbox_handler_nix_vtag_cfg(rvu, &req, &rsp);
230 }
231 
232 static int rvu_rep_install_rx_rule(struct rvu *rvu, u16 pcifunc,
233 				   u16 entry, bool rte)
234 {
235 	struct npc_install_flow_req req = {};
236 	struct npc_install_flow_rsp rsp = {};
237 	struct rvu_pfvf *pfvf;
238 	u16 vlan_tci, rep_id;
239 
240 	pfvf = rvu_get_pfvf(rvu, pcifunc);
241 
242 	/* To steer the traffic from Representee to Representor */
243 	rep_id = rvu_rep_get_vlan_id(rvu, pcifunc);
244 	if (rte) {
245 		vlan_tci = rep_id | BIT_ULL(8);
246 		req.vf = rvu->rep_pcifunc;
247 		req.op = NIX_RX_ACTIONOP_UCAST;
248 		req.index = rep_id;
249 	} else {
250 		vlan_tci = rep_id;
251 		req.vf = pcifunc;
252 		req.op = NIX_RX_ACTION_DEFAULT;
253 	}
254 
255 	rvu_rep_rx_vlan_cfg(rvu, req.vf);
256 	req.entry = entry;
257 	req.hdr.pcifunc = 0; /* AF is requester */
258 	req.features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_VLAN_ETYPE_CTAG);
259 	req.vtag0_valid = true;
260 	req.vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE0;
261 	req.packet.vlan_etype = cpu_to_be16(ETH_P_8021Q);
262 	req.mask.vlan_etype = cpu_to_be16(ETH_P_8021Q);
263 	req.packet.vlan_tci = cpu_to_be16(vlan_tci);
264 	req.mask.vlan_tci = cpu_to_be16(0xffff);
265 
266 	req.channel = RVU_SWITCH_LBK_CHAN;
267 	req.chan_mask = 0xffff;
268 	req.intf = pfvf->nix_rx_intf;
269 
270 	return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
271 }
272 
273 static int rvu_rep_install_tx_rule(struct rvu *rvu, u16 pcifunc, u16 entry,
274 				   bool rte)
275 {
276 	struct npc_install_flow_req req = {};
277 	struct npc_install_flow_rsp rsp = {};
278 	struct rvu_pfvf *pfvf;
279 	int vidx, err;
280 	u16 vlan_tci;
281 	u8 lbkid;
282 
283 	pfvf = rvu_get_pfvf(rvu, pcifunc);
284 	vlan_tci = rvu_rep_get_vlan_id(rvu, pcifunc);
285 	if (rte)
286 		vlan_tci |= BIT_ULL(8);
287 
288 	err = rvu_rep_tx_vlan_cfg(rvu, pcifunc, vlan_tci, &vidx);
289 	if (err)
290 		return err;
291 
292 	lbkid = pfvf->nix_blkaddr == BLKADDR_NIX0 ? 0 : 1;
293 	req.hdr.pcifunc = 0; /* AF is requester */
294 	if (rte) {
295 		req.vf = pcifunc;
296 	} else {
297 		req.vf = rvu->rep_pcifunc;
298 		req.packet.sq_id = vlan_tci;
299 		req.mask.sq_id = 0xffff;
300 	}
301 
302 	req.entry = entry;
303 	req.intf = pfvf->nix_tx_intf;
304 	req.op = NIX_TX_ACTIONOP_UCAST_CHAN;
305 	req.index = (lbkid << 8) | RVU_SWITCH_LBK_CHAN;
306 	req.set_cntr = 1;
307 	req.vtag0_def = vidx;
308 	req.vtag0_op = 1;
309 	return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
310 }
311 
312 int rvu_rep_install_mcam_rules(struct rvu *rvu)
313 {
314 	struct rvu_switch *rswitch = &rvu->rswitch;
315 	u16 start = rswitch->start_entry;
316 	struct rvu_hwinfo *hw = rvu->hw;
317 	u16 pcifunc, entry = 0;
318 	int pf, vf, numvfs;
319 	int err, nixlf, i;
320 	u8 rep;
321 
322 	for (pf = 1; pf < hw->total_pfs; pf++) {
323 		if (!is_pf_cgxmapped(rvu, pf))
324 			continue;
325 
326 		pcifunc = pf << RVU_PFVF_PF_SHIFT;
327 		rvu_get_nix_blkaddr(rvu, pcifunc);
328 		rep = true;
329 		for (i = 0; i < 2; i++) {
330 			err = rvu_rep_install_rx_rule(rvu, pcifunc,
331 						      start + entry, rep);
332 			if (err)
333 				return err;
334 			rswitch->entry2pcifunc[entry++] = pcifunc;
335 
336 			err = rvu_rep_install_tx_rule(rvu, pcifunc,
337 						      start + entry, rep);
338 			if (err)
339 				return err;
340 			rswitch->entry2pcifunc[entry++] = pcifunc;
341 			rep = false;
342 		}
343 
344 		rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL);
345 		for (vf = 0; vf < numvfs; vf++) {
346 			pcifunc = pf << RVU_PFVF_PF_SHIFT |
347 				  ((vf + 1) & RVU_PFVF_FUNC_MASK);
348 			rvu_get_nix_blkaddr(rvu, pcifunc);
349 
350 			/* Skip installimg rules if nixlf is not attached */
351 			err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
352 			if (err)
353 				continue;
354 			rep = true;
355 			for (i = 0; i < 2; i++) {
356 				err = rvu_rep_install_rx_rule(rvu, pcifunc,
357 							      start + entry,
358 							      rep);
359 				if (err)
360 					return err;
361 				rswitch->entry2pcifunc[entry++] = pcifunc;
362 
363 				err = rvu_rep_install_tx_rule(rvu, pcifunc,
364 							      start + entry,
365 							      rep);
366 				if (err)
367 					return err;
368 				rswitch->entry2pcifunc[entry++] = pcifunc;
369 				rep = false;
370 			}
371 		}
372 	}
373 
374 	/* Initialize the wq for handling REP events */
375 	spin_lock_init(&rvu->rep_evtq_lock);
376 	INIT_LIST_HEAD(&rvu->rep_evtq_head);
377 	INIT_WORK(&rvu->rep_evt_work, rvu_rep_wq_handler);
378 	rvu->rep_evt_wq = alloc_workqueue("rep_evt_wq", 0, 0);
379 	if (!rvu->rep_evt_wq) {
380 		dev_err(rvu->dev, "REP workqueue allocation failed\n");
381 		return -ENOMEM;
382 	}
383 	return 0;
384 }
385 
386 void rvu_rep_update_rules(struct rvu *rvu, u16 pcifunc, bool ena)
387 {
388 	struct rvu_switch *rswitch = &rvu->rswitch;
389 	struct npc_mcam *mcam = &rvu->hw->mcam;
390 	u32 max = rswitch->used_entries;
391 	int blkaddr;
392 	u16 entry;
393 
394 	if (!rswitch->used_entries)
395 		return;
396 
397 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
398 
399 	if (blkaddr < 0)
400 		return;
401 
402 	rvu_switch_enable_lbk_link(rvu, pcifunc, ena);
403 	mutex_lock(&mcam->lock);
404 	for (entry = 0; entry < max; entry++) {
405 		if (rswitch->entry2pcifunc[entry] == pcifunc)
406 			npc_enable_mcam_entry(rvu, mcam, blkaddr, entry, ena);
407 	}
408 	mutex_unlock(&mcam->lock);
409 }
410 
411 int rvu_rep_pf_init(struct rvu *rvu)
412 {
413 	u16 pcifunc = rvu->rep_pcifunc;
414 	struct rvu_pfvf *pfvf;
415 
416 	pfvf = rvu_get_pfvf(rvu, pcifunc);
417 	set_bit(NIXLF_INITIALIZED, &pfvf->flags);
418 	rvu_switch_enable_lbk_link(rvu, pcifunc, true);
419 	rvu_rep_rx_vlan_cfg(rvu, pcifunc);
420 	return 0;
421 }
422 
423 int rvu_mbox_handler_esw_cfg(struct rvu *rvu, struct esw_cfg_req *req,
424 			     struct msg_rsp *rsp)
425 {
426 	if (req->hdr.pcifunc != rvu->rep_pcifunc)
427 		return 0;
428 
429 	rvu->rep_mode = req->ena;
430 
431 	if (!rvu->rep_mode)
432 		rvu_npc_free_mcam_entries(rvu, req->hdr.pcifunc, -1);
433 
434 	return 0;
435 }
436 
437 int rvu_mbox_handler_get_rep_cnt(struct rvu *rvu, struct msg_req *req,
438 				 struct get_rep_cnt_rsp *rsp)
439 {
440 	int pf, vf, numvfs, hwvf, rep = 0;
441 	u16 pcifunc;
442 
443 	rvu->rep_pcifunc = req->hdr.pcifunc;
444 	rsp->rep_cnt = rvu->cgx_mapped_pfs + rvu->cgx_mapped_vfs;
445 	rvu->rep_cnt = rsp->rep_cnt;
446 
447 	rvu->rep2pfvf_map = devm_kzalloc(rvu->dev, rvu->rep_cnt *
448 					 sizeof(u16), GFP_KERNEL);
449 	if (!rvu->rep2pfvf_map)
450 		return -ENOMEM;
451 
452 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
453 		if (!is_pf_cgxmapped(rvu, pf))
454 			continue;
455 		pcifunc = pf << RVU_PFVF_PF_SHIFT;
456 		rvu->rep2pfvf_map[rep] = pcifunc;
457 		rsp->rep_pf_map[rep] = pcifunc;
458 		rep++;
459 		rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
460 		for (vf = 0; vf < numvfs; vf++) {
461 			rvu->rep2pfvf_map[rep] = pcifunc |
462 				((vf + 1) & RVU_PFVF_FUNC_MASK);
463 			rsp->rep_pf_map[rep] = rvu->rep2pfvf_map[rep];
464 			rep++;
465 		}
466 	}
467 	return 0;
468 }
469