xref: /linux/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c (revision 79790b6818e96c58fe2bffee1b418c16e64e7b80)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell CN10K MCS driver
3  *
4  * Copyright (C) 2022 Marvell.
5  */
6 
7 #include <linux/types.h>
8 #include <linux/device.h>
9 #include <linux/module.h>
10 #include <linux/pci.h>
11 
12 #include "mcs.h"
13 #include "rvu.h"
14 #include "mcs_reg.h"
15 #include "lmac_common.h"
16 
17 #define M(_name, _id, _fn_name, _req_type, _rsp_type)			\
18 static struct _req_type __maybe_unused					\
19 *otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid)		\
20 {									\
21 	struct _req_type *req;						\
22 									\
23 	req = (struct _req_type *)otx2_mbox_alloc_msg_rsp(		\
24 		&rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \
25 		sizeof(struct _rsp_type));				\
26 	if (!req)							\
27 		return NULL;						\
28 	req->hdr.sig = OTX2_MBOX_REQ_SIG;				\
29 	req->hdr.id = _id;						\
30 	return req;							\
31 }
32 
33 MBOX_UP_MCS_MESSAGES
34 #undef M
35 
rvu_mcs_ptp_cfg(struct rvu * rvu,u8 rpm_id,u8 lmac_id,bool ena)36 void rvu_mcs_ptp_cfg(struct rvu *rvu, u8 rpm_id, u8 lmac_id, bool ena)
37 {
38 	struct mcs *mcs;
39 	u64 cfg;
40 	u8 port;
41 
42 	if (!rvu->mcs_blk_cnt)
43 		return;
44 
45 	/* When ptp is enabled, RPM appends 8B header for all
46 	 * RX packets. MCS PEX need to configure to skip 8B
47 	 * during packet parsing.
48 	 */
49 
50 	/* CNF10K-B */
51 	if (rvu->mcs_blk_cnt > 1) {
52 		mcs = mcs_get_pdata(rpm_id);
53 		cfg = mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION);
54 		if (ena)
55 			cfg |= BIT_ULL(lmac_id);
56 		else
57 			cfg &= ~BIT_ULL(lmac_id);
58 		mcs_reg_write(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION, cfg);
59 		return;
60 	}
61 	/* CN10KB */
62 	mcs = mcs_get_pdata(0);
63 	port = (rpm_id * rvu->hw->lmac_per_cgx) + lmac_id;
64 	cfg = mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PORT_CFGX(port));
65 	if (ena)
66 		cfg |= BIT_ULL(0);
67 	else
68 		cfg &= ~BIT_ULL(0);
69 	mcs_reg_write(mcs, MCSX_PEX_RX_SLAVE_PORT_CFGX(port), cfg);
70 }
71 
rvu_mbox_handler_mcs_set_lmac_mode(struct rvu * rvu,struct mcs_set_lmac_mode * req,struct msg_rsp * rsp)72 int rvu_mbox_handler_mcs_set_lmac_mode(struct rvu *rvu,
73 				       struct mcs_set_lmac_mode *req,
74 				       struct msg_rsp *rsp)
75 {
76 	struct mcs *mcs;
77 
78 	if (req->mcs_id >= rvu->mcs_blk_cnt)
79 		return MCS_AF_ERR_INVALID_MCSID;
80 
81 	mcs = mcs_get_pdata(req->mcs_id);
82 
83 	if (BIT_ULL(req->lmac_id) & mcs->hw->lmac_bmap)
84 		mcs_set_lmac_mode(mcs, req->lmac_id, req->mode);
85 
86 	return 0;
87 }
88 
mcs_add_intr_wq_entry(struct mcs * mcs,struct mcs_intr_event * event)89 int mcs_add_intr_wq_entry(struct mcs *mcs, struct mcs_intr_event *event)
90 {
91 	struct mcs_intrq_entry *qentry;
92 	u16 pcifunc = event->pcifunc;
93 	struct rvu *rvu = mcs->rvu;
94 	struct mcs_pfvf *pfvf;
95 
96 	/* Check if it is PF or VF */
97 	if (pcifunc & RVU_PFVF_FUNC_MASK)
98 		pfvf = &mcs->vf[rvu_get_hwvf(rvu, pcifunc)];
99 	else
100 		pfvf = &mcs->pf[rvu_get_pf(pcifunc)];
101 
102 	event->intr_mask &= pfvf->intr_mask;
103 
104 	/* Check PF/VF interrupt notification is enabled */
105 	if (!(pfvf->intr_mask && event->intr_mask))
106 		return 0;
107 
108 	qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
109 	if (!qentry)
110 		return -ENOMEM;
111 
112 	qentry->intr_event = *event;
113 	spin_lock(&rvu->mcs_intrq_lock);
114 	list_add_tail(&qentry->node, &rvu->mcs_intrq_head);
115 	spin_unlock(&rvu->mcs_intrq_lock);
116 	queue_work(rvu->mcs_intr_wq, &rvu->mcs_intr_work);
117 
118 	return 0;
119 }
120 
mcs_notify_pfvf(struct mcs_intr_event * event,struct rvu * rvu)121 static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu)
122 {
123 	struct mcs_intr_info *req;
124 	int pf;
125 
126 	pf = rvu_get_pf(event->pcifunc);
127 
128 	mutex_lock(&rvu->mbox_lock);
129 
130 	req = otx2_mbox_alloc_msg_mcs_intr_notify(rvu, pf);
131 	if (!req) {
132 		mutex_unlock(&rvu->mbox_lock);
133 		return -ENOMEM;
134 	}
135 
136 	req->mcs_id = event->mcs_id;
137 	req->intr_mask = event->intr_mask;
138 	req->sa_id = event->sa_id;
139 	req->hdr.pcifunc = event->pcifunc;
140 	req->lmac_id = event->lmac_id;
141 
142 	otx2_mbox_wait_for_zero(&rvu->afpf_wq_info.mbox_up, pf);
143 
144 	otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf);
145 
146 	mutex_unlock(&rvu->mbox_lock);
147 
148 	return 0;
149 }
150 
mcs_intr_handler_task(struct work_struct * work)151 static void mcs_intr_handler_task(struct work_struct *work)
152 {
153 	struct rvu *rvu = container_of(work, struct rvu, mcs_intr_work);
154 	struct mcs_intrq_entry *qentry;
155 	struct mcs_intr_event *event;
156 	unsigned long flags;
157 
158 	do {
159 		spin_lock_irqsave(&rvu->mcs_intrq_lock, flags);
160 		qentry = list_first_entry_or_null(&rvu->mcs_intrq_head,
161 						  struct mcs_intrq_entry,
162 						  node);
163 		if (qentry)
164 			list_del(&qentry->node);
165 
166 		spin_unlock_irqrestore(&rvu->mcs_intrq_lock, flags);
167 		if (!qentry)
168 			break; /* nothing more to process */
169 
170 		event = &qentry->intr_event;
171 
172 		mcs_notify_pfvf(event, rvu);
173 		kfree(qentry);
174 	} while (1);
175 }
176 
rvu_mbox_handler_mcs_intr_cfg(struct rvu * rvu,struct mcs_intr_cfg * req,struct msg_rsp * rsp)177 int rvu_mbox_handler_mcs_intr_cfg(struct rvu *rvu,
178 				  struct mcs_intr_cfg *req,
179 				  struct msg_rsp *rsp)
180 {
181 	u16 pcifunc = req->hdr.pcifunc;
182 	struct mcs_pfvf *pfvf;
183 	struct mcs *mcs;
184 
185 	if (req->mcs_id >= rvu->mcs_blk_cnt)
186 		return MCS_AF_ERR_INVALID_MCSID;
187 
188 	mcs = mcs_get_pdata(req->mcs_id);
189 
190 	/* Check if it is PF or VF */
191 	if (pcifunc & RVU_PFVF_FUNC_MASK)
192 		pfvf = &mcs->vf[rvu_get_hwvf(rvu, pcifunc)];
193 	else
194 		pfvf = &mcs->pf[rvu_get_pf(pcifunc)];
195 
196 	mcs->pf_map[0] = pcifunc;
197 	pfvf->intr_mask = req->intr_mask;
198 
199 	return 0;
200 }
201 
rvu_mbox_handler_mcs_get_hw_info(struct rvu * rvu,struct msg_req * req,struct mcs_hw_info * rsp)202 int rvu_mbox_handler_mcs_get_hw_info(struct rvu *rvu,
203 				     struct msg_req *req,
204 				     struct mcs_hw_info *rsp)
205 {
206 	struct mcs *mcs;
207 
208 	if (!rvu->mcs_blk_cnt)
209 		return MCS_AF_ERR_NOT_MAPPED;
210 
211 	/* MCS resources are same across all blocks */
212 	mcs = mcs_get_pdata(0);
213 	rsp->num_mcs_blks = rvu->mcs_blk_cnt;
214 	rsp->tcam_entries = mcs->hw->tcam_entries;
215 	rsp->secy_entries = mcs->hw->secy_entries;
216 	rsp->sc_entries = mcs->hw->sc_entries;
217 	rsp->sa_entries = mcs->hw->sa_entries;
218 	return 0;
219 }
220 
rvu_mbox_handler_mcs_port_reset(struct rvu * rvu,struct mcs_port_reset_req * req,struct msg_rsp * rsp)221 int rvu_mbox_handler_mcs_port_reset(struct rvu *rvu, struct mcs_port_reset_req *req,
222 				    struct msg_rsp *rsp)
223 {
224 	struct mcs *mcs;
225 
226 	if (req->mcs_id >= rvu->mcs_blk_cnt)
227 		return MCS_AF_ERR_INVALID_MCSID;
228 
229 	mcs = mcs_get_pdata(req->mcs_id);
230 
231 	mcs_reset_port(mcs, req->port_id, req->reset);
232 
233 	return 0;
234 }
235 
rvu_mbox_handler_mcs_clear_stats(struct rvu * rvu,struct mcs_clear_stats * req,struct msg_rsp * rsp)236 int rvu_mbox_handler_mcs_clear_stats(struct rvu *rvu,
237 				     struct mcs_clear_stats *req,
238 				     struct msg_rsp *rsp)
239 {
240 	u16 pcifunc = req->hdr.pcifunc;
241 	struct mcs *mcs;
242 
243 	if (req->mcs_id >= rvu->mcs_blk_cnt)
244 		return MCS_AF_ERR_INVALID_MCSID;
245 
246 	mcs = mcs_get_pdata(req->mcs_id);
247 
248 	mutex_lock(&mcs->stats_lock);
249 	if (req->all)
250 		mcs_clear_all_stats(mcs, pcifunc, req->dir);
251 	else
252 		mcs_clear_stats(mcs, req->type, req->id, req->dir);
253 
254 	mutex_unlock(&mcs->stats_lock);
255 	return 0;
256 }
257 
rvu_mbox_handler_mcs_get_flowid_stats(struct rvu * rvu,struct mcs_stats_req * req,struct mcs_flowid_stats * rsp)258 int rvu_mbox_handler_mcs_get_flowid_stats(struct rvu *rvu,
259 					  struct mcs_stats_req *req,
260 					  struct mcs_flowid_stats *rsp)
261 {
262 	struct mcs *mcs;
263 
264 	if (req->mcs_id >= rvu->mcs_blk_cnt)
265 		return MCS_AF_ERR_INVALID_MCSID;
266 
267 	mcs = mcs_get_pdata(req->mcs_id);
268 
269 	/* In CNF10K-B, before reading the statistics,
270 	 * MCSX_MIL_GLOBAL.FORCE_CLK_EN_IP needs to be set
271 	 * to get accurate statistics
272 	 */
273 	if (mcs->hw->mcs_blks > 1)
274 		mcs_set_force_clk_en(mcs, true);
275 
276 	mutex_lock(&mcs->stats_lock);
277 	mcs_get_flowid_stats(mcs, rsp, req->id, req->dir);
278 	mutex_unlock(&mcs->stats_lock);
279 
280 	/* Clear MCSX_MIL_GLOBAL.FORCE_CLK_EN_IP after reading
281 	 * the statistics
282 	 */
283 	if (mcs->hw->mcs_blks > 1)
284 		mcs_set_force_clk_en(mcs, false);
285 
286 	return 0;
287 }
288 
rvu_mbox_handler_mcs_get_secy_stats(struct rvu * rvu,struct mcs_stats_req * req,struct mcs_secy_stats * rsp)289 int rvu_mbox_handler_mcs_get_secy_stats(struct rvu *rvu,
290 					struct mcs_stats_req *req,
291 					struct mcs_secy_stats *rsp)
292 {	struct mcs *mcs;
293 
294 	if (req->mcs_id >= rvu->mcs_blk_cnt)
295 		return MCS_AF_ERR_INVALID_MCSID;
296 
297 	mcs = mcs_get_pdata(req->mcs_id);
298 
299 	if (mcs->hw->mcs_blks > 1)
300 		mcs_set_force_clk_en(mcs, true);
301 
302 	mutex_lock(&mcs->stats_lock);
303 
304 	if (req->dir == MCS_RX)
305 		mcs_get_rx_secy_stats(mcs, rsp, req->id);
306 	else
307 		mcs_get_tx_secy_stats(mcs, rsp, req->id);
308 
309 	mutex_unlock(&mcs->stats_lock);
310 
311 	if (mcs->hw->mcs_blks > 1)
312 		mcs_set_force_clk_en(mcs, false);
313 
314 	return 0;
315 }
316 
rvu_mbox_handler_mcs_get_sc_stats(struct rvu * rvu,struct mcs_stats_req * req,struct mcs_sc_stats * rsp)317 int rvu_mbox_handler_mcs_get_sc_stats(struct rvu *rvu,
318 				      struct mcs_stats_req *req,
319 				      struct mcs_sc_stats *rsp)
320 {
321 	struct mcs *mcs;
322 
323 	if (req->mcs_id >= rvu->mcs_blk_cnt)
324 		return MCS_AF_ERR_INVALID_MCSID;
325 
326 	mcs = mcs_get_pdata(req->mcs_id);
327 
328 	if (mcs->hw->mcs_blks > 1)
329 		mcs_set_force_clk_en(mcs, true);
330 
331 	mutex_lock(&mcs->stats_lock);
332 	mcs_get_sc_stats(mcs, rsp, req->id, req->dir);
333 	mutex_unlock(&mcs->stats_lock);
334 
335 	if (mcs->hw->mcs_blks > 1)
336 		mcs_set_force_clk_en(mcs, false);
337 
338 	return 0;
339 }
340 
rvu_mbox_handler_mcs_get_sa_stats(struct rvu * rvu,struct mcs_stats_req * req,struct mcs_sa_stats * rsp)341 int rvu_mbox_handler_mcs_get_sa_stats(struct rvu *rvu,
342 				      struct mcs_stats_req *req,
343 				      struct mcs_sa_stats *rsp)
344 {
345 	struct mcs *mcs;
346 
347 	if (req->mcs_id >= rvu->mcs_blk_cnt)
348 		return MCS_AF_ERR_INVALID_MCSID;
349 
350 	mcs = mcs_get_pdata(req->mcs_id);
351 
352 	if (mcs->hw->mcs_blks > 1)
353 		mcs_set_force_clk_en(mcs, true);
354 
355 	mutex_lock(&mcs->stats_lock);
356 	mcs_get_sa_stats(mcs, rsp, req->id, req->dir);
357 	mutex_unlock(&mcs->stats_lock);
358 
359 	if (mcs->hw->mcs_blks > 1)
360 		mcs_set_force_clk_en(mcs, false);
361 
362 	return 0;
363 }
364 
rvu_mbox_handler_mcs_get_port_stats(struct rvu * rvu,struct mcs_stats_req * req,struct mcs_port_stats * rsp)365 int rvu_mbox_handler_mcs_get_port_stats(struct rvu *rvu,
366 					struct mcs_stats_req *req,
367 					struct mcs_port_stats *rsp)
368 {
369 	struct mcs *mcs;
370 
371 	if (req->mcs_id >= rvu->mcs_blk_cnt)
372 		return MCS_AF_ERR_INVALID_MCSID;
373 
374 	mcs = mcs_get_pdata(req->mcs_id);
375 
376 	if (mcs->hw->mcs_blks > 1)
377 		mcs_set_force_clk_en(mcs, true);
378 
379 	mutex_lock(&mcs->stats_lock);
380 	mcs_get_port_stats(mcs, rsp, req->id, req->dir);
381 	mutex_unlock(&mcs->stats_lock);
382 
383 	if (mcs->hw->mcs_blks > 1)
384 		mcs_set_force_clk_en(mcs, false);
385 
386 	return 0;
387 }
388 
rvu_mbox_handler_mcs_set_active_lmac(struct rvu * rvu,struct mcs_set_active_lmac * req,struct msg_rsp * rsp)389 int rvu_mbox_handler_mcs_set_active_lmac(struct rvu *rvu,
390 					 struct mcs_set_active_lmac *req,
391 					 struct msg_rsp *rsp)
392 {
393 	struct mcs *mcs;
394 
395 	if (req->mcs_id >= rvu->mcs_blk_cnt)
396 		return MCS_AF_ERR_INVALID_MCSID;
397 
398 	mcs = mcs_get_pdata(req->mcs_id);
399 	if (!mcs)
400 		return MCS_AF_ERR_NOT_MAPPED;
401 
402 	mcs->hw->lmac_bmap = req->lmac_bmap;
403 	mcs_set_lmac_channels(req->mcs_id, req->chan_base);
404 	return 0;
405 }
406 
rvu_mbox_handler_mcs_port_cfg_set(struct rvu * rvu,struct mcs_port_cfg_set_req * req,struct msg_rsp * rsp)407 int rvu_mbox_handler_mcs_port_cfg_set(struct rvu *rvu, struct mcs_port_cfg_set_req *req,
408 				      struct msg_rsp *rsp)
409 {
410 	struct mcs *mcs;
411 
412 	if (req->mcs_id >= rvu->mcs_blk_cnt)
413 		return MCS_AF_ERR_INVALID_MCSID;
414 
415 	mcs = mcs_get_pdata(req->mcs_id);
416 
417 	if (mcs->hw->lmac_cnt <= req->port_id || !(mcs->hw->lmac_bmap & BIT_ULL(req->port_id)))
418 		return -EINVAL;
419 
420 	mcs_set_port_cfg(mcs, req);
421 
422 	return 0;
423 }
424 
rvu_mbox_handler_mcs_port_cfg_get(struct rvu * rvu,struct mcs_port_cfg_get_req * req,struct mcs_port_cfg_get_rsp * rsp)425 int rvu_mbox_handler_mcs_port_cfg_get(struct rvu *rvu, struct mcs_port_cfg_get_req *req,
426 				      struct mcs_port_cfg_get_rsp *rsp)
427 {
428 	struct mcs *mcs;
429 
430 	if (req->mcs_id >= rvu->mcs_blk_cnt)
431 		return MCS_AF_ERR_INVALID_MCSID;
432 
433 	mcs = mcs_get_pdata(req->mcs_id);
434 
435 	if (mcs->hw->lmac_cnt <= req->port_id || !(mcs->hw->lmac_bmap & BIT_ULL(req->port_id)))
436 		return -EINVAL;
437 
438 	mcs_get_port_cfg(mcs, req, rsp);
439 
440 	return 0;
441 }
442 
rvu_mbox_handler_mcs_custom_tag_cfg_get(struct rvu * rvu,struct mcs_custom_tag_cfg_get_req * req,struct mcs_custom_tag_cfg_get_rsp * rsp)443 int rvu_mbox_handler_mcs_custom_tag_cfg_get(struct rvu *rvu, struct mcs_custom_tag_cfg_get_req *req,
444 					    struct mcs_custom_tag_cfg_get_rsp *rsp)
445 {
446 	struct mcs *mcs;
447 
448 	if (req->mcs_id >= rvu->mcs_blk_cnt)
449 		return MCS_AF_ERR_INVALID_MCSID;
450 
451 	mcs = mcs_get_pdata(req->mcs_id);
452 
453 	mcs_get_custom_tag_cfg(mcs, req, rsp);
454 
455 	return 0;
456 }
457 
rvu_mcs_flr_handler(struct rvu * rvu,u16 pcifunc)458 int rvu_mcs_flr_handler(struct rvu *rvu, u16 pcifunc)
459 {
460 	struct mcs *mcs;
461 	int mcs_id;
462 
463 	/* CNF10K-B mcs0-6 are mapped to RPM2-8*/
464 	if (rvu->mcs_blk_cnt > 1) {
465 		for (mcs_id = 0; mcs_id < rvu->mcs_blk_cnt; mcs_id++) {
466 			mcs = mcs_get_pdata(mcs_id);
467 			mcs_free_all_rsrc(mcs, MCS_RX, pcifunc);
468 			mcs_free_all_rsrc(mcs, MCS_TX, pcifunc);
469 		}
470 	} else {
471 		/* CN10K-B has only one mcs block */
472 		mcs = mcs_get_pdata(0);
473 		mcs_free_all_rsrc(mcs, MCS_RX, pcifunc);
474 		mcs_free_all_rsrc(mcs, MCS_TX, pcifunc);
475 	}
476 	return 0;
477 }
478 
rvu_mbox_handler_mcs_flowid_ena_entry(struct rvu * rvu,struct mcs_flowid_ena_dis_entry * req,struct msg_rsp * rsp)479 int rvu_mbox_handler_mcs_flowid_ena_entry(struct rvu *rvu,
480 					  struct mcs_flowid_ena_dis_entry *req,
481 					  struct msg_rsp *rsp)
482 {
483 	struct mcs *mcs;
484 
485 	if (req->mcs_id >= rvu->mcs_blk_cnt)
486 		return MCS_AF_ERR_INVALID_MCSID;
487 
488 	mcs = mcs_get_pdata(req->mcs_id);
489 	mcs_ena_dis_flowid_entry(mcs, req->flow_id, req->dir, req->ena);
490 	return 0;
491 }
492 
rvu_mbox_handler_mcs_pn_table_write(struct rvu * rvu,struct mcs_pn_table_write_req * req,struct msg_rsp * rsp)493 int rvu_mbox_handler_mcs_pn_table_write(struct rvu *rvu,
494 					struct mcs_pn_table_write_req *req,
495 					struct msg_rsp *rsp)
496 {
497 	struct mcs *mcs;
498 
499 	if (req->mcs_id >= rvu->mcs_blk_cnt)
500 		return MCS_AF_ERR_INVALID_MCSID;
501 
502 	mcs = mcs_get_pdata(req->mcs_id);
503 	mcs_pn_table_write(mcs, req->pn_id, req->next_pn, req->dir);
504 	return 0;
505 }
506 
rvu_mbox_handler_mcs_set_pn_threshold(struct rvu * rvu,struct mcs_set_pn_threshold * req,struct msg_rsp * rsp)507 int rvu_mbox_handler_mcs_set_pn_threshold(struct rvu *rvu,
508 					  struct mcs_set_pn_threshold *req,
509 					  struct msg_rsp *rsp)
510 {
511 	struct mcs *mcs;
512 
513 	if (req->mcs_id >= rvu->mcs_blk_cnt)
514 		return MCS_AF_ERR_INVALID_MCSID;
515 
516 	mcs = mcs_get_pdata(req->mcs_id);
517 
518 	mcs_pn_threshold_set(mcs, req);
519 
520 	return 0;
521 }
522 
rvu_mbox_handler_mcs_rx_sc_sa_map_write(struct rvu * rvu,struct mcs_rx_sc_sa_map * req,struct msg_rsp * rsp)523 int rvu_mbox_handler_mcs_rx_sc_sa_map_write(struct rvu *rvu,
524 					    struct mcs_rx_sc_sa_map *req,
525 					    struct msg_rsp *rsp)
526 {
527 	struct mcs *mcs;
528 
529 	if (req->mcs_id >= rvu->mcs_blk_cnt)
530 		return MCS_AF_ERR_INVALID_MCSID;
531 
532 	mcs = mcs_get_pdata(req->mcs_id);
533 	mcs->mcs_ops->mcs_rx_sa_mem_map_write(mcs, req);
534 	return 0;
535 }
536 
rvu_mbox_handler_mcs_tx_sc_sa_map_write(struct rvu * rvu,struct mcs_tx_sc_sa_map * req,struct msg_rsp * rsp)537 int rvu_mbox_handler_mcs_tx_sc_sa_map_write(struct rvu *rvu,
538 					    struct mcs_tx_sc_sa_map *req,
539 					    struct msg_rsp *rsp)
540 {
541 	struct mcs *mcs;
542 
543 	if (req->mcs_id >= rvu->mcs_blk_cnt)
544 		return MCS_AF_ERR_INVALID_MCSID;
545 
546 	mcs = mcs_get_pdata(req->mcs_id);
547 	mcs->mcs_ops->mcs_tx_sa_mem_map_write(mcs, req);
548 	mcs->tx_sa_active[req->sc_id] = req->tx_sa_active;
549 
550 	return 0;
551 }
552 
rvu_mbox_handler_mcs_sa_plcy_write(struct rvu * rvu,struct mcs_sa_plcy_write_req * req,struct msg_rsp * rsp)553 int rvu_mbox_handler_mcs_sa_plcy_write(struct rvu *rvu,
554 				       struct mcs_sa_plcy_write_req *req,
555 				       struct msg_rsp *rsp)
556 {
557 	struct mcs *mcs;
558 	int i;
559 
560 	if (req->mcs_id >= rvu->mcs_blk_cnt)
561 		return MCS_AF_ERR_INVALID_MCSID;
562 
563 	mcs = mcs_get_pdata(req->mcs_id);
564 
565 	for (i = 0; i < req->sa_cnt; i++)
566 		mcs_sa_plcy_write(mcs, &req->plcy[i][0],
567 				  req->sa_index[i], req->dir);
568 	return 0;
569 }
570 
rvu_mbox_handler_mcs_rx_sc_cam_write(struct rvu * rvu,struct mcs_rx_sc_cam_write_req * req,struct msg_rsp * rsp)571 int rvu_mbox_handler_mcs_rx_sc_cam_write(struct rvu *rvu,
572 					 struct mcs_rx_sc_cam_write_req *req,
573 					 struct msg_rsp *rsp)
574 {
575 	struct mcs *mcs;
576 
577 	if (req->mcs_id >= rvu->mcs_blk_cnt)
578 		return MCS_AF_ERR_INVALID_MCSID;
579 
580 	mcs = mcs_get_pdata(req->mcs_id);
581 	mcs_rx_sc_cam_write(mcs, req->sci, req->secy_id, req->sc_id);
582 	return 0;
583 }
584 
rvu_mbox_handler_mcs_secy_plcy_write(struct rvu * rvu,struct mcs_secy_plcy_write_req * req,struct msg_rsp * rsp)585 int rvu_mbox_handler_mcs_secy_plcy_write(struct rvu *rvu,
586 					 struct mcs_secy_plcy_write_req *req,
587 					 struct msg_rsp *rsp)
588 {	struct mcs *mcs;
589 
590 	if (req->mcs_id >= rvu->mcs_blk_cnt)
591 		return MCS_AF_ERR_INVALID_MCSID;
592 
593 	mcs = mcs_get_pdata(req->mcs_id);
594 
595 	mcs_secy_plcy_write(mcs, req->plcy,
596 			    req->secy_id, req->dir);
597 	return 0;
598 }
599 
rvu_mbox_handler_mcs_flowid_entry_write(struct rvu * rvu,struct mcs_flowid_entry_write_req * req,struct msg_rsp * rsp)600 int rvu_mbox_handler_mcs_flowid_entry_write(struct rvu *rvu,
601 					    struct mcs_flowid_entry_write_req *req,
602 					    struct msg_rsp *rsp)
603 {
604 	struct secy_mem_map map;
605 	struct mcs *mcs;
606 
607 	if (req->mcs_id >= rvu->mcs_blk_cnt)
608 		return MCS_AF_ERR_INVALID_MCSID;
609 
610 	mcs = mcs_get_pdata(req->mcs_id);
611 
612 	/* TODO validate the flowid */
613 	mcs_flowid_entry_write(mcs, req->data, req->mask,
614 			       req->flow_id, req->dir);
615 	map.secy = req->secy_id;
616 	map.sc = req->sc_id;
617 	map.ctrl_pkt = req->ctrl_pkt;
618 	map.flow_id = req->flow_id;
619 	map.sci = req->sci;
620 	mcs->mcs_ops->mcs_flowid_secy_map(mcs, &map, req->dir);
621 	if (req->ena)
622 		mcs_ena_dis_flowid_entry(mcs, req->flow_id,
623 					 req->dir, true);
624 	return 0;
625 }
626 
rvu_mbox_handler_mcs_free_resources(struct rvu * rvu,struct mcs_free_rsrc_req * req,struct msg_rsp * rsp)627 int rvu_mbox_handler_mcs_free_resources(struct rvu *rvu,
628 					struct mcs_free_rsrc_req *req,
629 					struct msg_rsp *rsp)
630 {
631 	u16 pcifunc = req->hdr.pcifunc;
632 	struct mcs_rsrc_map *map;
633 	struct mcs *mcs;
634 	int rc = 0;
635 
636 	if (req->mcs_id >= rvu->mcs_blk_cnt)
637 		return MCS_AF_ERR_INVALID_MCSID;
638 
639 	mcs = mcs_get_pdata(req->mcs_id);
640 
641 	if (req->dir == MCS_RX)
642 		map = &mcs->rx;
643 	else
644 		map = &mcs->tx;
645 
646 	mutex_lock(&rvu->rsrc_lock);
647 	/* Free all the cam resources mapped to PF/VF */
648 	if (req->all) {
649 		rc = mcs_free_all_rsrc(mcs, req->dir, pcifunc);
650 		goto exit;
651 	}
652 
653 	switch (req->rsrc_type) {
654 	case MCS_RSRC_TYPE_FLOWID:
655 		rc = mcs_free_rsrc(&map->flow_ids, map->flowid2pf_map, req->rsrc_id, pcifunc);
656 		mcs_ena_dis_flowid_entry(mcs, req->rsrc_id, req->dir, false);
657 		break;
658 	case MCS_RSRC_TYPE_SECY:
659 		rc =  mcs_free_rsrc(&map->secy, map->secy2pf_map, req->rsrc_id, pcifunc);
660 		mcs_clear_secy_plcy(mcs, req->rsrc_id, req->dir);
661 		break;
662 	case MCS_RSRC_TYPE_SC:
663 		rc = mcs_free_rsrc(&map->sc, map->sc2pf_map, req->rsrc_id, pcifunc);
664 		/* Disable SC CAM only on RX side */
665 		if (req->dir == MCS_RX)
666 			mcs_ena_dis_sc_cam_entry(mcs, req->rsrc_id, false);
667 		break;
668 	case MCS_RSRC_TYPE_SA:
669 		rc = mcs_free_rsrc(&map->sa, map->sa2pf_map, req->rsrc_id, pcifunc);
670 		break;
671 	}
672 exit:
673 	mutex_unlock(&rvu->rsrc_lock);
674 	return rc;
675 }
676 
rvu_mbox_handler_mcs_alloc_resources(struct rvu * rvu,struct mcs_alloc_rsrc_req * req,struct mcs_alloc_rsrc_rsp * rsp)677 int rvu_mbox_handler_mcs_alloc_resources(struct rvu *rvu,
678 					 struct mcs_alloc_rsrc_req *req,
679 					 struct mcs_alloc_rsrc_rsp *rsp)
680 {
681 	u16 pcifunc = req->hdr.pcifunc;
682 	struct mcs_rsrc_map *map;
683 	struct mcs *mcs;
684 	int rsrc_id, i;
685 
686 	if (req->mcs_id >= rvu->mcs_blk_cnt)
687 		return MCS_AF_ERR_INVALID_MCSID;
688 
689 	mcs = mcs_get_pdata(req->mcs_id);
690 
691 	if (req->dir == MCS_RX)
692 		map = &mcs->rx;
693 	else
694 		map = &mcs->tx;
695 
696 	mutex_lock(&rvu->rsrc_lock);
697 
698 	if (req->all) {
699 		rsrc_id = mcs_alloc_all_rsrc(mcs, &rsp->flow_ids[0],
700 					     &rsp->secy_ids[0],
701 					     &rsp->sc_ids[0],
702 					     &rsp->sa_ids[0],
703 					     &rsp->sa_ids[1],
704 					     pcifunc, req->dir);
705 		goto exit;
706 	}
707 
708 	switch (req->rsrc_type) {
709 	case MCS_RSRC_TYPE_FLOWID:
710 		for (i = 0; i < req->rsrc_cnt; i++) {
711 			rsrc_id = mcs_alloc_rsrc(&map->flow_ids, map->flowid2pf_map, pcifunc);
712 			if (rsrc_id < 0)
713 				goto exit;
714 			rsp->flow_ids[i] = rsrc_id;
715 			rsp->rsrc_cnt++;
716 		}
717 		break;
718 	case MCS_RSRC_TYPE_SECY:
719 		for (i = 0; i < req->rsrc_cnt; i++) {
720 			rsrc_id = mcs_alloc_rsrc(&map->secy, map->secy2pf_map, pcifunc);
721 			if (rsrc_id < 0)
722 				goto exit;
723 			rsp->secy_ids[i] = rsrc_id;
724 			rsp->rsrc_cnt++;
725 		}
726 		break;
727 	case MCS_RSRC_TYPE_SC:
728 		for (i = 0; i < req->rsrc_cnt; i++) {
729 			rsrc_id = mcs_alloc_rsrc(&map->sc, map->sc2pf_map, pcifunc);
730 			if (rsrc_id < 0)
731 				goto exit;
732 			rsp->sc_ids[i] = rsrc_id;
733 			rsp->rsrc_cnt++;
734 		}
735 		break;
736 	case MCS_RSRC_TYPE_SA:
737 		for (i = 0; i < req->rsrc_cnt; i++) {
738 			rsrc_id = mcs_alloc_rsrc(&map->sa, map->sa2pf_map, pcifunc);
739 			if (rsrc_id < 0)
740 				goto exit;
741 			rsp->sa_ids[i] = rsrc_id;
742 			rsp->rsrc_cnt++;
743 		}
744 		break;
745 	}
746 
747 	rsp->rsrc_type = req->rsrc_type;
748 	rsp->dir = req->dir;
749 	rsp->mcs_id = req->mcs_id;
750 	rsp->all = req->all;
751 
752 exit:
753 	if (rsrc_id < 0)
754 		dev_err(rvu->dev, "Failed to allocate the mcs resources for PCIFUNC:%d\n", pcifunc);
755 	mutex_unlock(&rvu->rsrc_lock);
756 	return 0;
757 }
758 
rvu_mbox_handler_mcs_alloc_ctrl_pkt_rule(struct rvu * rvu,struct mcs_alloc_ctrl_pkt_rule_req * req,struct mcs_alloc_ctrl_pkt_rule_rsp * rsp)759 int rvu_mbox_handler_mcs_alloc_ctrl_pkt_rule(struct rvu *rvu,
760 					     struct mcs_alloc_ctrl_pkt_rule_req *req,
761 					     struct mcs_alloc_ctrl_pkt_rule_rsp *rsp)
762 {
763 	u16 pcifunc = req->hdr.pcifunc;
764 	struct mcs_rsrc_map *map;
765 	struct mcs *mcs;
766 	int rsrc_id;
767 	u16 offset;
768 
769 	if (req->mcs_id >= rvu->mcs_blk_cnt)
770 		return MCS_AF_ERR_INVALID_MCSID;
771 
772 	mcs = mcs_get_pdata(req->mcs_id);
773 
774 	map = (req->dir == MCS_RX) ? &mcs->rx : &mcs->tx;
775 
776 	mutex_lock(&rvu->rsrc_lock);
777 
778 	switch (req->rule_type) {
779 	case MCS_CTRL_PKT_RULE_TYPE_ETH:
780 		offset = MCS_CTRLPKT_ETYPE_RULE_OFFSET;
781 		break;
782 	case MCS_CTRL_PKT_RULE_TYPE_DA:
783 		offset = MCS_CTRLPKT_DA_RULE_OFFSET;
784 		break;
785 	case MCS_CTRL_PKT_RULE_TYPE_RANGE:
786 		offset = MCS_CTRLPKT_DA_RANGE_RULE_OFFSET;
787 		break;
788 	case MCS_CTRL_PKT_RULE_TYPE_COMBO:
789 		offset = MCS_CTRLPKT_COMBO_RULE_OFFSET;
790 		break;
791 	case MCS_CTRL_PKT_RULE_TYPE_MAC:
792 		offset = MCS_CTRLPKT_MAC_EN_RULE_OFFSET;
793 		break;
794 	}
795 
796 	rsrc_id = mcs_alloc_ctrlpktrule(&map->ctrlpktrule, map->ctrlpktrule2pf_map, offset,
797 					pcifunc);
798 	if (rsrc_id < 0)
799 		goto exit;
800 
801 	rsp->rule_idx = rsrc_id;
802 	rsp->rule_type = req->rule_type;
803 	rsp->dir = req->dir;
804 	rsp->mcs_id = req->mcs_id;
805 
806 	mutex_unlock(&rvu->rsrc_lock);
807 	return 0;
808 exit:
809 	if (rsrc_id < 0)
810 		dev_err(rvu->dev, "Failed to allocate the mcs ctrl pkt rule for PCIFUNC:%d\n",
811 			pcifunc);
812 	mutex_unlock(&rvu->rsrc_lock);
813 	return rsrc_id;
814 }
815 
rvu_mbox_handler_mcs_free_ctrl_pkt_rule(struct rvu * rvu,struct mcs_free_ctrl_pkt_rule_req * req,struct msg_rsp * rsp)816 int rvu_mbox_handler_mcs_free_ctrl_pkt_rule(struct rvu *rvu,
817 					    struct mcs_free_ctrl_pkt_rule_req *req,
818 					    struct msg_rsp *rsp)
819 {
820 	struct mcs *mcs;
821 	int rc;
822 
823 	if (req->mcs_id >= rvu->mcs_blk_cnt)
824 		return MCS_AF_ERR_INVALID_MCSID;
825 
826 	mcs = mcs_get_pdata(req->mcs_id);
827 
828 	mutex_lock(&rvu->rsrc_lock);
829 
830 	rc = mcs_free_ctrlpktrule(mcs, req);
831 
832 	mutex_unlock(&rvu->rsrc_lock);
833 
834 	return rc;
835 }
836 
rvu_mbox_handler_mcs_ctrl_pkt_rule_write(struct rvu * rvu,struct mcs_ctrl_pkt_rule_write_req * req,struct msg_rsp * rsp)837 int rvu_mbox_handler_mcs_ctrl_pkt_rule_write(struct rvu *rvu,
838 					     struct mcs_ctrl_pkt_rule_write_req *req,
839 					     struct msg_rsp *rsp)
840 {
841 	struct mcs *mcs;
842 	int rc;
843 
844 	if (req->mcs_id >= rvu->mcs_blk_cnt)
845 		return MCS_AF_ERR_INVALID_MCSID;
846 
847 	mcs = mcs_get_pdata(req->mcs_id);
848 
849 	rc = mcs_ctrlpktrule_write(mcs, req);
850 
851 	return rc;
852 }
853 
rvu_mcs_set_lmac_bmap(struct rvu * rvu)854 static void rvu_mcs_set_lmac_bmap(struct rvu *rvu)
855 {
856 	struct mcs *mcs = mcs_get_pdata(0);
857 	unsigned long lmac_bmap;
858 	int cgx, lmac, port;
859 
860 	for (port = 0; port < mcs->hw->lmac_cnt; port++) {
861 		cgx = port / rvu->hw->lmac_per_cgx;
862 		lmac = port % rvu->hw->lmac_per_cgx;
863 		if (!is_lmac_valid(rvu_cgx_pdata(cgx, rvu), lmac))
864 			continue;
865 		set_bit(port, &lmac_bmap);
866 	}
867 	mcs->hw->lmac_bmap = lmac_bmap;
868 }
869 
rvu_mcs_init(struct rvu * rvu)870 int rvu_mcs_init(struct rvu *rvu)
871 {
872 	struct rvu_hwinfo *hw = rvu->hw;
873 	int lmac, err = 0, mcs_id;
874 	struct mcs *mcs;
875 
876 	rvu->mcs_blk_cnt = mcs_get_blkcnt();
877 
878 	if (!rvu->mcs_blk_cnt)
879 		return 0;
880 
881 	/* Needed only for CN10K-B */
882 	if (rvu->mcs_blk_cnt == 1) {
883 		err = mcs_set_lmac_channels(0, hw->cgx_chan_base);
884 		if (err)
885 			return err;
886 		/* Set active lmacs */
887 		rvu_mcs_set_lmac_bmap(rvu);
888 	}
889 
890 	/* Install default tcam bypass entry and set port to operational mode */
891 	for (mcs_id = 0; mcs_id < rvu->mcs_blk_cnt; mcs_id++) {
892 		mcs = mcs_get_pdata(mcs_id);
893 		mcs_install_flowid_bypass_entry(mcs);
894 		for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++)
895 			mcs_set_lmac_mode(mcs, lmac, 0);
896 
897 		mcs->rvu = rvu;
898 
899 		/* Allocated memory for PFVF data */
900 		mcs->pf = devm_kcalloc(mcs->dev, hw->total_pfs,
901 				       sizeof(struct mcs_pfvf), GFP_KERNEL);
902 		if (!mcs->pf)
903 			return -ENOMEM;
904 
905 		mcs->vf = devm_kcalloc(mcs->dev, hw->total_vfs,
906 				       sizeof(struct mcs_pfvf), GFP_KERNEL);
907 		if (!mcs->vf)
908 			return -ENOMEM;
909 	}
910 
911 	/* Initialize the wq for handling mcs interrupts */
912 	INIT_LIST_HEAD(&rvu->mcs_intrq_head);
913 	INIT_WORK(&rvu->mcs_intr_work, mcs_intr_handler_task);
914 	rvu->mcs_intr_wq = alloc_workqueue("mcs_intr_wq", 0, 0);
915 	if (!rvu->mcs_intr_wq) {
916 		dev_err(rvu->dev, "mcs alloc workqueue failed\n");
917 		return -ENOMEM;
918 	}
919 
920 	return err;
921 }
922 
rvu_mcs_exit(struct rvu * rvu)923 void rvu_mcs_exit(struct rvu *rvu)
924 {
925 	if (!rvu->mcs_intr_wq)
926 		return;
927 
928 	flush_workqueue(rvu->mcs_intr_wq);
929 	destroy_workqueue(rvu->mcs_intr_wq);
930 	rvu->mcs_intr_wq = NULL;
931 }
932