xref: /linux/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c (revision 8be4d31cb8aaeea27bde4b7ddb26e28a89062ebf)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell CN10K MCS driver
3  *
4  * Copyright (C) 2022 Marvell.
5  */
6 
7 #include <linux/types.h>
8 #include <linux/device.h>
9 #include <linux/module.h>
10 #include <linux/pci.h>
11 
12 #include "mcs.h"
13 #include "rvu.h"
14 #include "mcs_reg.h"
15 #include "lmac_common.h"
16 
17 #define M(_name, _id, _fn_name, _req_type, _rsp_type)			\
18 static struct _req_type __maybe_unused					\
19 *otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid)		\
20 {									\
21 	struct _req_type *req;						\
22 									\
23 	req = (struct _req_type *)otx2_mbox_alloc_msg_rsp(		\
24 		&rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \
25 		sizeof(struct _rsp_type));				\
26 	if (!req)							\
27 		return NULL;						\
28 	req->hdr.sig = OTX2_MBOX_REQ_SIG;				\
29 	req->hdr.id = _id;						\
30 	return req;							\
31 }
32 
33 MBOX_UP_MCS_MESSAGES
34 #undef M
35 
rvu_mcs_ptp_cfg(struct rvu * rvu,u8 rpm_id,u8 lmac_id,bool ena)36 void rvu_mcs_ptp_cfg(struct rvu *rvu, u8 rpm_id, u8 lmac_id, bool ena)
37 {
38 	struct mcs *mcs;
39 	u64 cfg;
40 	u8 port;
41 
42 	if (!rvu->mcs_blk_cnt)
43 		return;
44 
45 	/* When ptp is enabled, RPM appends 8B header for all
46 	 * RX packets. MCS PEX need to configure to skip 8B
47 	 * during packet parsing.
48 	 */
49 
50 	/* CNF10K-B */
51 	if (rvu->mcs_blk_cnt > 1) {
52 		mcs = mcs_get_pdata(rpm_id);
53 		cfg = mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION);
54 		if (ena)
55 			cfg |= BIT_ULL(lmac_id);
56 		else
57 			cfg &= ~BIT_ULL(lmac_id);
58 		mcs_reg_write(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION, cfg);
59 		return;
60 	}
61 	/* CN10KB */
62 	mcs = mcs_get_pdata(0);
63 	port = (rpm_id * rvu->hw->lmac_per_cgx) + lmac_id;
64 	cfg = mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PORT_CFGX(port));
65 	if (ena)
66 		cfg |= BIT_ULL(0);
67 	else
68 		cfg &= ~BIT_ULL(0);
69 	mcs_reg_write(mcs, MCSX_PEX_RX_SLAVE_PORT_CFGX(port), cfg);
70 }
71 
rvu_mbox_handler_mcs_set_lmac_mode(struct rvu * rvu,struct mcs_set_lmac_mode * req,struct msg_rsp * rsp)72 int rvu_mbox_handler_mcs_set_lmac_mode(struct rvu *rvu,
73 				       struct mcs_set_lmac_mode *req,
74 				       struct msg_rsp *rsp)
75 {
76 	struct mcs *mcs;
77 
78 	if (req->mcs_id >= rvu->mcs_blk_cnt)
79 		return MCS_AF_ERR_INVALID_MCSID;
80 
81 	mcs = mcs_get_pdata(req->mcs_id);
82 
83 	if (BIT_ULL(req->lmac_id) & mcs->hw->lmac_bmap)
84 		mcs_set_lmac_mode(mcs, req->lmac_id, req->mode);
85 
86 	return 0;
87 }
88 
mcs_add_intr_wq_entry(struct mcs * mcs,struct mcs_intr_event * event)89 int mcs_add_intr_wq_entry(struct mcs *mcs, struct mcs_intr_event *event)
90 {
91 	struct mcs_intrq_entry *qentry;
92 	u16 pcifunc = event->pcifunc;
93 	struct rvu *rvu = mcs->rvu;
94 	struct mcs_pfvf *pfvf;
95 
96 	/* Check if it is PF or VF */
97 	if (pcifunc & RVU_PFVF_FUNC_MASK)
98 		pfvf = &mcs->vf[rvu_get_hwvf(rvu, pcifunc)];
99 	else
100 		pfvf = &mcs->pf[rvu_get_pf(rvu->pdev, pcifunc)];
101 
102 	event->intr_mask &= pfvf->intr_mask;
103 
104 	/* Check PF/VF interrupt notification is enabled */
105 	if (!(pfvf->intr_mask && event->intr_mask))
106 		return 0;
107 
108 	qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
109 	if (!qentry)
110 		return -ENOMEM;
111 
112 	qentry->intr_event = *event;
113 	spin_lock(&rvu->mcs_intrq_lock);
114 	list_add_tail(&qentry->node, &rvu->mcs_intrq_head);
115 	spin_unlock(&rvu->mcs_intrq_lock);
116 	queue_work(rvu->mcs_intr_wq, &rvu->mcs_intr_work);
117 
118 	return 0;
119 }
120 
mcs_notify_pfvf(struct mcs_intr_event * event,struct rvu * rvu)121 static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu)
122 {
123 	struct mcs_intr_info *req;
124 	int pf;
125 
126 	pf = rvu_get_pf(rvu->pdev, event->pcifunc);
127 
128 	mutex_lock(&rvu->mbox_lock);
129 
130 	req = otx2_mbox_alloc_msg_mcs_intr_notify(rvu, pf);
131 	if (!req) {
132 		mutex_unlock(&rvu->mbox_lock);
133 		return -ENOMEM;
134 	}
135 
136 	req->mcs_id = event->mcs_id;
137 	req->intr_mask = event->intr_mask;
138 	req->sa_id = event->sa_id;
139 	req->hdr.pcifunc = event->pcifunc;
140 	req->lmac_id = event->lmac_id;
141 
142 	otx2_mbox_wait_for_zero(&rvu->afpf_wq_info.mbox_up, pf);
143 
144 	otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf);
145 
146 	otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pf);
147 
148 	mutex_unlock(&rvu->mbox_lock);
149 
150 	return 0;
151 }
152 
mcs_intr_handler_task(struct work_struct * work)153 static void mcs_intr_handler_task(struct work_struct *work)
154 {
155 	struct rvu *rvu = container_of(work, struct rvu, mcs_intr_work);
156 	struct mcs_intrq_entry *qentry;
157 	struct mcs_intr_event *event;
158 	unsigned long flags;
159 
160 	do {
161 		spin_lock_irqsave(&rvu->mcs_intrq_lock, flags);
162 		qentry = list_first_entry_or_null(&rvu->mcs_intrq_head,
163 						  struct mcs_intrq_entry,
164 						  node);
165 		if (qentry)
166 			list_del(&qentry->node);
167 
168 		spin_unlock_irqrestore(&rvu->mcs_intrq_lock, flags);
169 		if (!qentry)
170 			break; /* nothing more to process */
171 
172 		event = &qentry->intr_event;
173 
174 		mcs_notify_pfvf(event, rvu);
175 		kfree(qentry);
176 	} while (1);
177 }
178 
rvu_mbox_handler_mcs_intr_cfg(struct rvu * rvu,struct mcs_intr_cfg * req,struct msg_rsp * rsp)179 int rvu_mbox_handler_mcs_intr_cfg(struct rvu *rvu,
180 				  struct mcs_intr_cfg *req,
181 				  struct msg_rsp *rsp)
182 {
183 	u16 pcifunc = req->hdr.pcifunc;
184 	struct mcs_pfvf *pfvf;
185 	struct mcs *mcs;
186 
187 	if (req->mcs_id >= rvu->mcs_blk_cnt)
188 		return MCS_AF_ERR_INVALID_MCSID;
189 
190 	mcs = mcs_get_pdata(req->mcs_id);
191 
192 	/* Check if it is PF or VF */
193 	if (pcifunc & RVU_PFVF_FUNC_MASK)
194 		pfvf = &mcs->vf[rvu_get_hwvf(rvu, pcifunc)];
195 	else
196 		pfvf = &mcs->pf[rvu_get_pf(rvu->pdev, pcifunc)];
197 
198 	mcs->pf_map[0] = pcifunc;
199 	pfvf->intr_mask = req->intr_mask;
200 
201 	return 0;
202 }
203 
rvu_mbox_handler_mcs_get_hw_info(struct rvu * rvu,struct msg_req * req,struct mcs_hw_info * rsp)204 int rvu_mbox_handler_mcs_get_hw_info(struct rvu *rvu,
205 				     struct msg_req *req,
206 				     struct mcs_hw_info *rsp)
207 {
208 	struct mcs *mcs;
209 
210 	if (!rvu->mcs_blk_cnt)
211 		return MCS_AF_ERR_NOT_MAPPED;
212 
213 	/* MCS resources are same across all blocks */
214 	mcs = mcs_get_pdata(0);
215 	rsp->num_mcs_blks = rvu->mcs_blk_cnt;
216 	rsp->tcam_entries = mcs->hw->tcam_entries;
217 	rsp->secy_entries = mcs->hw->secy_entries;
218 	rsp->sc_entries = mcs->hw->sc_entries;
219 	rsp->sa_entries = mcs->hw->sa_entries;
220 	return 0;
221 }
222 
rvu_mbox_handler_mcs_port_reset(struct rvu * rvu,struct mcs_port_reset_req * req,struct msg_rsp * rsp)223 int rvu_mbox_handler_mcs_port_reset(struct rvu *rvu, struct mcs_port_reset_req *req,
224 				    struct msg_rsp *rsp)
225 {
226 	struct mcs *mcs;
227 
228 	if (req->mcs_id >= rvu->mcs_blk_cnt)
229 		return MCS_AF_ERR_INVALID_MCSID;
230 
231 	mcs = mcs_get_pdata(req->mcs_id);
232 
233 	mcs_reset_port(mcs, req->port_id, req->reset);
234 
235 	return 0;
236 }
237 
rvu_mbox_handler_mcs_clear_stats(struct rvu * rvu,struct mcs_clear_stats * req,struct msg_rsp * rsp)238 int rvu_mbox_handler_mcs_clear_stats(struct rvu *rvu,
239 				     struct mcs_clear_stats *req,
240 				     struct msg_rsp *rsp)
241 {
242 	u16 pcifunc = req->hdr.pcifunc;
243 	struct mcs *mcs;
244 
245 	if (req->mcs_id >= rvu->mcs_blk_cnt)
246 		return MCS_AF_ERR_INVALID_MCSID;
247 
248 	mcs = mcs_get_pdata(req->mcs_id);
249 
250 	mutex_lock(&mcs->stats_lock);
251 	if (req->all)
252 		mcs_clear_all_stats(mcs, pcifunc, req->dir);
253 	else
254 		mcs_clear_stats(mcs, req->type, req->id, req->dir);
255 
256 	mutex_unlock(&mcs->stats_lock);
257 	return 0;
258 }
259 
rvu_mbox_handler_mcs_get_flowid_stats(struct rvu * rvu,struct mcs_stats_req * req,struct mcs_flowid_stats * rsp)260 int rvu_mbox_handler_mcs_get_flowid_stats(struct rvu *rvu,
261 					  struct mcs_stats_req *req,
262 					  struct mcs_flowid_stats *rsp)
263 {
264 	struct mcs *mcs;
265 
266 	if (req->mcs_id >= rvu->mcs_blk_cnt)
267 		return MCS_AF_ERR_INVALID_MCSID;
268 
269 	mcs = mcs_get_pdata(req->mcs_id);
270 
271 	/* In CNF10K-B, before reading the statistics,
272 	 * MCSX_MIL_GLOBAL.FORCE_CLK_EN_IP needs to be set
273 	 * to get accurate statistics
274 	 */
275 	if (mcs->hw->mcs_blks > 1)
276 		mcs_set_force_clk_en(mcs, true);
277 
278 	mutex_lock(&mcs->stats_lock);
279 	mcs_get_flowid_stats(mcs, rsp, req->id, req->dir);
280 	mutex_unlock(&mcs->stats_lock);
281 
282 	/* Clear MCSX_MIL_GLOBAL.FORCE_CLK_EN_IP after reading
283 	 * the statistics
284 	 */
285 	if (mcs->hw->mcs_blks > 1)
286 		mcs_set_force_clk_en(mcs, false);
287 
288 	return 0;
289 }
290 
rvu_mbox_handler_mcs_get_secy_stats(struct rvu * rvu,struct mcs_stats_req * req,struct mcs_secy_stats * rsp)291 int rvu_mbox_handler_mcs_get_secy_stats(struct rvu *rvu,
292 					struct mcs_stats_req *req,
293 					struct mcs_secy_stats *rsp)
294 {	struct mcs *mcs;
295 
296 	if (req->mcs_id >= rvu->mcs_blk_cnt)
297 		return MCS_AF_ERR_INVALID_MCSID;
298 
299 	mcs = mcs_get_pdata(req->mcs_id);
300 
301 	if (mcs->hw->mcs_blks > 1)
302 		mcs_set_force_clk_en(mcs, true);
303 
304 	mutex_lock(&mcs->stats_lock);
305 
306 	if (req->dir == MCS_RX)
307 		mcs_get_rx_secy_stats(mcs, rsp, req->id);
308 	else
309 		mcs_get_tx_secy_stats(mcs, rsp, req->id);
310 
311 	mutex_unlock(&mcs->stats_lock);
312 
313 	if (mcs->hw->mcs_blks > 1)
314 		mcs_set_force_clk_en(mcs, false);
315 
316 	return 0;
317 }
318 
rvu_mbox_handler_mcs_get_sc_stats(struct rvu * rvu,struct mcs_stats_req * req,struct mcs_sc_stats * rsp)319 int rvu_mbox_handler_mcs_get_sc_stats(struct rvu *rvu,
320 				      struct mcs_stats_req *req,
321 				      struct mcs_sc_stats *rsp)
322 {
323 	struct mcs *mcs;
324 
325 	if (req->mcs_id >= rvu->mcs_blk_cnt)
326 		return MCS_AF_ERR_INVALID_MCSID;
327 
328 	mcs = mcs_get_pdata(req->mcs_id);
329 
330 	if (mcs->hw->mcs_blks > 1)
331 		mcs_set_force_clk_en(mcs, true);
332 
333 	mutex_lock(&mcs->stats_lock);
334 	mcs_get_sc_stats(mcs, rsp, req->id, req->dir);
335 	mutex_unlock(&mcs->stats_lock);
336 
337 	if (mcs->hw->mcs_blks > 1)
338 		mcs_set_force_clk_en(mcs, false);
339 
340 	return 0;
341 }
342 
rvu_mbox_handler_mcs_get_sa_stats(struct rvu * rvu,struct mcs_stats_req * req,struct mcs_sa_stats * rsp)343 int rvu_mbox_handler_mcs_get_sa_stats(struct rvu *rvu,
344 				      struct mcs_stats_req *req,
345 				      struct mcs_sa_stats *rsp)
346 {
347 	struct mcs *mcs;
348 
349 	if (req->mcs_id >= rvu->mcs_blk_cnt)
350 		return MCS_AF_ERR_INVALID_MCSID;
351 
352 	mcs = mcs_get_pdata(req->mcs_id);
353 
354 	if (mcs->hw->mcs_blks > 1)
355 		mcs_set_force_clk_en(mcs, true);
356 
357 	mutex_lock(&mcs->stats_lock);
358 	mcs_get_sa_stats(mcs, rsp, req->id, req->dir);
359 	mutex_unlock(&mcs->stats_lock);
360 
361 	if (mcs->hw->mcs_blks > 1)
362 		mcs_set_force_clk_en(mcs, false);
363 
364 	return 0;
365 }
366 
rvu_mbox_handler_mcs_get_port_stats(struct rvu * rvu,struct mcs_stats_req * req,struct mcs_port_stats * rsp)367 int rvu_mbox_handler_mcs_get_port_stats(struct rvu *rvu,
368 					struct mcs_stats_req *req,
369 					struct mcs_port_stats *rsp)
370 {
371 	struct mcs *mcs;
372 
373 	if (req->mcs_id >= rvu->mcs_blk_cnt)
374 		return MCS_AF_ERR_INVALID_MCSID;
375 
376 	mcs = mcs_get_pdata(req->mcs_id);
377 
378 	if (mcs->hw->mcs_blks > 1)
379 		mcs_set_force_clk_en(mcs, true);
380 
381 	mutex_lock(&mcs->stats_lock);
382 	mcs_get_port_stats(mcs, rsp, req->id, req->dir);
383 	mutex_unlock(&mcs->stats_lock);
384 
385 	if (mcs->hw->mcs_blks > 1)
386 		mcs_set_force_clk_en(mcs, false);
387 
388 	return 0;
389 }
390 
rvu_mbox_handler_mcs_set_active_lmac(struct rvu * rvu,struct mcs_set_active_lmac * req,struct msg_rsp * rsp)391 int rvu_mbox_handler_mcs_set_active_lmac(struct rvu *rvu,
392 					 struct mcs_set_active_lmac *req,
393 					 struct msg_rsp *rsp)
394 {
395 	struct mcs *mcs;
396 
397 	if (req->mcs_id >= rvu->mcs_blk_cnt)
398 		return MCS_AF_ERR_INVALID_MCSID;
399 
400 	mcs = mcs_get_pdata(req->mcs_id);
401 	if (!mcs)
402 		return MCS_AF_ERR_NOT_MAPPED;
403 
404 	mcs->hw->lmac_bmap = req->lmac_bmap;
405 	mcs_set_lmac_channels(req->mcs_id, req->chan_base);
406 	return 0;
407 }
408 
rvu_mbox_handler_mcs_port_cfg_set(struct rvu * rvu,struct mcs_port_cfg_set_req * req,struct msg_rsp * rsp)409 int rvu_mbox_handler_mcs_port_cfg_set(struct rvu *rvu, struct mcs_port_cfg_set_req *req,
410 				      struct msg_rsp *rsp)
411 {
412 	struct mcs *mcs;
413 
414 	if (req->mcs_id >= rvu->mcs_blk_cnt)
415 		return MCS_AF_ERR_INVALID_MCSID;
416 
417 	mcs = mcs_get_pdata(req->mcs_id);
418 
419 	if (mcs->hw->lmac_cnt <= req->port_id || !(mcs->hw->lmac_bmap & BIT_ULL(req->port_id)))
420 		return -EINVAL;
421 
422 	mcs_set_port_cfg(mcs, req);
423 
424 	return 0;
425 }
426 
rvu_mbox_handler_mcs_port_cfg_get(struct rvu * rvu,struct mcs_port_cfg_get_req * req,struct mcs_port_cfg_get_rsp * rsp)427 int rvu_mbox_handler_mcs_port_cfg_get(struct rvu *rvu, struct mcs_port_cfg_get_req *req,
428 				      struct mcs_port_cfg_get_rsp *rsp)
429 {
430 	struct mcs *mcs;
431 
432 	if (req->mcs_id >= rvu->mcs_blk_cnt)
433 		return MCS_AF_ERR_INVALID_MCSID;
434 
435 	mcs = mcs_get_pdata(req->mcs_id);
436 
437 	if (mcs->hw->lmac_cnt <= req->port_id || !(mcs->hw->lmac_bmap & BIT_ULL(req->port_id)))
438 		return -EINVAL;
439 
440 	mcs_get_port_cfg(mcs, req, rsp);
441 
442 	return 0;
443 }
444 
rvu_mbox_handler_mcs_custom_tag_cfg_get(struct rvu * rvu,struct mcs_custom_tag_cfg_get_req * req,struct mcs_custom_tag_cfg_get_rsp * rsp)445 int rvu_mbox_handler_mcs_custom_tag_cfg_get(struct rvu *rvu, struct mcs_custom_tag_cfg_get_req *req,
446 					    struct mcs_custom_tag_cfg_get_rsp *rsp)
447 {
448 	struct mcs *mcs;
449 
450 	if (req->mcs_id >= rvu->mcs_blk_cnt)
451 		return MCS_AF_ERR_INVALID_MCSID;
452 
453 	mcs = mcs_get_pdata(req->mcs_id);
454 
455 	mcs_get_custom_tag_cfg(mcs, req, rsp);
456 
457 	return 0;
458 }
459 
rvu_mcs_flr_handler(struct rvu * rvu,u16 pcifunc)460 int rvu_mcs_flr_handler(struct rvu *rvu, u16 pcifunc)
461 {
462 	struct mcs *mcs;
463 	int mcs_id;
464 
465 	/* CNF10K-B mcs0-6 are mapped to RPM2-8*/
466 	if (rvu->mcs_blk_cnt > 1) {
467 		for (mcs_id = 0; mcs_id < rvu->mcs_blk_cnt; mcs_id++) {
468 			mcs = mcs_get_pdata(mcs_id);
469 			mcs_free_all_rsrc(mcs, MCS_RX, pcifunc);
470 			mcs_free_all_rsrc(mcs, MCS_TX, pcifunc);
471 		}
472 	} else {
473 		/* CN10K-B has only one mcs block */
474 		mcs = mcs_get_pdata(0);
475 		mcs_free_all_rsrc(mcs, MCS_RX, pcifunc);
476 		mcs_free_all_rsrc(mcs, MCS_TX, pcifunc);
477 	}
478 	return 0;
479 }
480 
rvu_mbox_handler_mcs_flowid_ena_entry(struct rvu * rvu,struct mcs_flowid_ena_dis_entry * req,struct msg_rsp * rsp)481 int rvu_mbox_handler_mcs_flowid_ena_entry(struct rvu *rvu,
482 					  struct mcs_flowid_ena_dis_entry *req,
483 					  struct msg_rsp *rsp)
484 {
485 	struct mcs *mcs;
486 
487 	if (req->mcs_id >= rvu->mcs_blk_cnt)
488 		return MCS_AF_ERR_INVALID_MCSID;
489 
490 	mcs = mcs_get_pdata(req->mcs_id);
491 	mcs_ena_dis_flowid_entry(mcs, req->flow_id, req->dir, req->ena);
492 	return 0;
493 }
494 
rvu_mbox_handler_mcs_pn_table_write(struct rvu * rvu,struct mcs_pn_table_write_req * req,struct msg_rsp * rsp)495 int rvu_mbox_handler_mcs_pn_table_write(struct rvu *rvu,
496 					struct mcs_pn_table_write_req *req,
497 					struct msg_rsp *rsp)
498 {
499 	struct mcs *mcs;
500 
501 	if (req->mcs_id >= rvu->mcs_blk_cnt)
502 		return MCS_AF_ERR_INVALID_MCSID;
503 
504 	mcs = mcs_get_pdata(req->mcs_id);
505 	mcs_pn_table_write(mcs, req->pn_id, req->next_pn, req->dir);
506 	return 0;
507 }
508 
rvu_mbox_handler_mcs_set_pn_threshold(struct rvu * rvu,struct mcs_set_pn_threshold * req,struct msg_rsp * rsp)509 int rvu_mbox_handler_mcs_set_pn_threshold(struct rvu *rvu,
510 					  struct mcs_set_pn_threshold *req,
511 					  struct msg_rsp *rsp)
512 {
513 	struct mcs *mcs;
514 
515 	if (req->mcs_id >= rvu->mcs_blk_cnt)
516 		return MCS_AF_ERR_INVALID_MCSID;
517 
518 	mcs = mcs_get_pdata(req->mcs_id);
519 
520 	mcs_pn_threshold_set(mcs, req);
521 
522 	return 0;
523 }
524 
rvu_mbox_handler_mcs_rx_sc_sa_map_write(struct rvu * rvu,struct mcs_rx_sc_sa_map * req,struct msg_rsp * rsp)525 int rvu_mbox_handler_mcs_rx_sc_sa_map_write(struct rvu *rvu,
526 					    struct mcs_rx_sc_sa_map *req,
527 					    struct msg_rsp *rsp)
528 {
529 	struct mcs *mcs;
530 
531 	if (req->mcs_id >= rvu->mcs_blk_cnt)
532 		return MCS_AF_ERR_INVALID_MCSID;
533 
534 	mcs = mcs_get_pdata(req->mcs_id);
535 	mcs->mcs_ops->mcs_rx_sa_mem_map_write(mcs, req);
536 	return 0;
537 }
538 
rvu_mbox_handler_mcs_tx_sc_sa_map_write(struct rvu * rvu,struct mcs_tx_sc_sa_map * req,struct msg_rsp * rsp)539 int rvu_mbox_handler_mcs_tx_sc_sa_map_write(struct rvu *rvu,
540 					    struct mcs_tx_sc_sa_map *req,
541 					    struct msg_rsp *rsp)
542 {
543 	struct mcs *mcs;
544 
545 	if (req->mcs_id >= rvu->mcs_blk_cnt)
546 		return MCS_AF_ERR_INVALID_MCSID;
547 
548 	mcs = mcs_get_pdata(req->mcs_id);
549 	mcs->mcs_ops->mcs_tx_sa_mem_map_write(mcs, req);
550 	mcs->tx_sa_active[req->sc_id] = req->tx_sa_active;
551 
552 	return 0;
553 }
554 
rvu_mbox_handler_mcs_sa_plcy_write(struct rvu * rvu,struct mcs_sa_plcy_write_req * req,struct msg_rsp * rsp)555 int rvu_mbox_handler_mcs_sa_plcy_write(struct rvu *rvu,
556 				       struct mcs_sa_plcy_write_req *req,
557 				       struct msg_rsp *rsp)
558 {
559 	struct mcs *mcs;
560 	int i;
561 
562 	if (req->mcs_id >= rvu->mcs_blk_cnt)
563 		return MCS_AF_ERR_INVALID_MCSID;
564 
565 	mcs = mcs_get_pdata(req->mcs_id);
566 
567 	for (i = 0; i < req->sa_cnt; i++)
568 		mcs_sa_plcy_write(mcs, &req->plcy[i][0],
569 				  req->sa_index[i], req->dir);
570 	return 0;
571 }
572 
rvu_mbox_handler_mcs_rx_sc_cam_write(struct rvu * rvu,struct mcs_rx_sc_cam_write_req * req,struct msg_rsp * rsp)573 int rvu_mbox_handler_mcs_rx_sc_cam_write(struct rvu *rvu,
574 					 struct mcs_rx_sc_cam_write_req *req,
575 					 struct msg_rsp *rsp)
576 {
577 	struct mcs *mcs;
578 
579 	if (req->mcs_id >= rvu->mcs_blk_cnt)
580 		return MCS_AF_ERR_INVALID_MCSID;
581 
582 	mcs = mcs_get_pdata(req->mcs_id);
583 	mcs_rx_sc_cam_write(mcs, req->sci, req->secy_id, req->sc_id);
584 	return 0;
585 }
586 
rvu_mbox_handler_mcs_secy_plcy_write(struct rvu * rvu,struct mcs_secy_plcy_write_req * req,struct msg_rsp * rsp)587 int rvu_mbox_handler_mcs_secy_plcy_write(struct rvu *rvu,
588 					 struct mcs_secy_plcy_write_req *req,
589 					 struct msg_rsp *rsp)
590 {	struct mcs *mcs;
591 
592 	if (req->mcs_id >= rvu->mcs_blk_cnt)
593 		return MCS_AF_ERR_INVALID_MCSID;
594 
595 	mcs = mcs_get_pdata(req->mcs_id);
596 
597 	mcs_secy_plcy_write(mcs, req->plcy,
598 			    req->secy_id, req->dir);
599 	return 0;
600 }
601 
rvu_mbox_handler_mcs_flowid_entry_write(struct rvu * rvu,struct mcs_flowid_entry_write_req * req,struct msg_rsp * rsp)602 int rvu_mbox_handler_mcs_flowid_entry_write(struct rvu *rvu,
603 					    struct mcs_flowid_entry_write_req *req,
604 					    struct msg_rsp *rsp)
605 {
606 	struct secy_mem_map map;
607 	struct mcs *mcs;
608 
609 	if (req->mcs_id >= rvu->mcs_blk_cnt)
610 		return MCS_AF_ERR_INVALID_MCSID;
611 
612 	mcs = mcs_get_pdata(req->mcs_id);
613 
614 	/* TODO validate the flowid */
615 	mcs_flowid_entry_write(mcs, req->data, req->mask,
616 			       req->flow_id, req->dir);
617 	map.secy = req->secy_id;
618 	map.sc = req->sc_id;
619 	map.ctrl_pkt = req->ctrl_pkt;
620 	map.flow_id = req->flow_id;
621 	map.sci = req->sci;
622 	mcs->mcs_ops->mcs_flowid_secy_map(mcs, &map, req->dir);
623 	if (req->ena)
624 		mcs_ena_dis_flowid_entry(mcs, req->flow_id,
625 					 req->dir, true);
626 	return 0;
627 }
628 
rvu_mbox_handler_mcs_free_resources(struct rvu * rvu,struct mcs_free_rsrc_req * req,struct msg_rsp * rsp)629 int rvu_mbox_handler_mcs_free_resources(struct rvu *rvu,
630 					struct mcs_free_rsrc_req *req,
631 					struct msg_rsp *rsp)
632 {
633 	u16 pcifunc = req->hdr.pcifunc;
634 	struct mcs_rsrc_map *map;
635 	struct mcs *mcs;
636 	int rc = 0;
637 
638 	if (req->mcs_id >= rvu->mcs_blk_cnt)
639 		return MCS_AF_ERR_INVALID_MCSID;
640 
641 	mcs = mcs_get_pdata(req->mcs_id);
642 
643 	if (req->dir == MCS_RX)
644 		map = &mcs->rx;
645 	else
646 		map = &mcs->tx;
647 
648 	mutex_lock(&rvu->rsrc_lock);
649 	/* Free all the cam resources mapped to PF/VF */
650 	if (req->all) {
651 		rc = mcs_free_all_rsrc(mcs, req->dir, pcifunc);
652 		goto exit;
653 	}
654 
655 	switch (req->rsrc_type) {
656 	case MCS_RSRC_TYPE_FLOWID:
657 		rc = mcs_free_rsrc(&map->flow_ids, map->flowid2pf_map, req->rsrc_id, pcifunc);
658 		mcs_ena_dis_flowid_entry(mcs, req->rsrc_id, req->dir, false);
659 		break;
660 	case MCS_RSRC_TYPE_SECY:
661 		rc =  mcs_free_rsrc(&map->secy, map->secy2pf_map, req->rsrc_id, pcifunc);
662 		mcs_clear_secy_plcy(mcs, req->rsrc_id, req->dir);
663 		break;
664 	case MCS_RSRC_TYPE_SC:
665 		rc = mcs_free_rsrc(&map->sc, map->sc2pf_map, req->rsrc_id, pcifunc);
666 		/* Disable SC CAM only on RX side */
667 		if (req->dir == MCS_RX)
668 			mcs_ena_dis_sc_cam_entry(mcs, req->rsrc_id, false);
669 		break;
670 	case MCS_RSRC_TYPE_SA:
671 		rc = mcs_free_rsrc(&map->sa, map->sa2pf_map, req->rsrc_id, pcifunc);
672 		break;
673 	}
674 exit:
675 	mutex_unlock(&rvu->rsrc_lock);
676 	return rc;
677 }
678 
rvu_mbox_handler_mcs_alloc_resources(struct rvu * rvu,struct mcs_alloc_rsrc_req * req,struct mcs_alloc_rsrc_rsp * rsp)679 int rvu_mbox_handler_mcs_alloc_resources(struct rvu *rvu,
680 					 struct mcs_alloc_rsrc_req *req,
681 					 struct mcs_alloc_rsrc_rsp *rsp)
682 {
683 	u16 pcifunc = req->hdr.pcifunc;
684 	struct mcs_rsrc_map *map;
685 	struct mcs *mcs;
686 	int rsrc_id, i;
687 
688 	if (req->mcs_id >= rvu->mcs_blk_cnt)
689 		return MCS_AF_ERR_INVALID_MCSID;
690 
691 	mcs = mcs_get_pdata(req->mcs_id);
692 
693 	if (req->dir == MCS_RX)
694 		map = &mcs->rx;
695 	else
696 		map = &mcs->tx;
697 
698 	mutex_lock(&rvu->rsrc_lock);
699 
700 	if (req->all) {
701 		rsrc_id = mcs_alloc_all_rsrc(mcs, &rsp->flow_ids[0],
702 					     &rsp->secy_ids[0],
703 					     &rsp->sc_ids[0],
704 					     &rsp->sa_ids[0],
705 					     &rsp->sa_ids[1],
706 					     pcifunc, req->dir);
707 		goto exit;
708 	}
709 
710 	switch (req->rsrc_type) {
711 	case MCS_RSRC_TYPE_FLOWID:
712 		for (i = 0; i < req->rsrc_cnt; i++) {
713 			rsrc_id = mcs_alloc_rsrc(&map->flow_ids, map->flowid2pf_map, pcifunc);
714 			if (rsrc_id < 0)
715 				goto exit;
716 			rsp->flow_ids[i] = rsrc_id;
717 			rsp->rsrc_cnt++;
718 		}
719 		break;
720 	case MCS_RSRC_TYPE_SECY:
721 		for (i = 0; i < req->rsrc_cnt; i++) {
722 			rsrc_id = mcs_alloc_rsrc(&map->secy, map->secy2pf_map, pcifunc);
723 			if (rsrc_id < 0)
724 				goto exit;
725 			rsp->secy_ids[i] = rsrc_id;
726 			rsp->rsrc_cnt++;
727 		}
728 		break;
729 	case MCS_RSRC_TYPE_SC:
730 		for (i = 0; i < req->rsrc_cnt; i++) {
731 			rsrc_id = mcs_alloc_rsrc(&map->sc, map->sc2pf_map, pcifunc);
732 			if (rsrc_id < 0)
733 				goto exit;
734 			rsp->sc_ids[i] = rsrc_id;
735 			rsp->rsrc_cnt++;
736 		}
737 		break;
738 	case MCS_RSRC_TYPE_SA:
739 		for (i = 0; i < req->rsrc_cnt; i++) {
740 			rsrc_id = mcs_alloc_rsrc(&map->sa, map->sa2pf_map, pcifunc);
741 			if (rsrc_id < 0)
742 				goto exit;
743 			rsp->sa_ids[i] = rsrc_id;
744 			rsp->rsrc_cnt++;
745 		}
746 		break;
747 	}
748 
749 	rsp->rsrc_type = req->rsrc_type;
750 	rsp->dir = req->dir;
751 	rsp->mcs_id = req->mcs_id;
752 	rsp->all = req->all;
753 
754 exit:
755 	if (rsrc_id < 0)
756 		dev_err(rvu->dev, "Failed to allocate the mcs resources for PCIFUNC:%d\n", pcifunc);
757 	mutex_unlock(&rvu->rsrc_lock);
758 	return 0;
759 }
760 
rvu_mbox_handler_mcs_alloc_ctrl_pkt_rule(struct rvu * rvu,struct mcs_alloc_ctrl_pkt_rule_req * req,struct mcs_alloc_ctrl_pkt_rule_rsp * rsp)761 int rvu_mbox_handler_mcs_alloc_ctrl_pkt_rule(struct rvu *rvu,
762 					     struct mcs_alloc_ctrl_pkt_rule_req *req,
763 					     struct mcs_alloc_ctrl_pkt_rule_rsp *rsp)
764 {
765 	u16 pcifunc = req->hdr.pcifunc;
766 	struct mcs_rsrc_map *map;
767 	struct mcs *mcs;
768 	int rsrc_id;
769 	u16 offset;
770 
771 	if (req->mcs_id >= rvu->mcs_blk_cnt)
772 		return MCS_AF_ERR_INVALID_MCSID;
773 
774 	mcs = mcs_get_pdata(req->mcs_id);
775 
776 	map = (req->dir == MCS_RX) ? &mcs->rx : &mcs->tx;
777 
778 	mutex_lock(&rvu->rsrc_lock);
779 
780 	switch (req->rule_type) {
781 	case MCS_CTRL_PKT_RULE_TYPE_ETH:
782 		offset = MCS_CTRLPKT_ETYPE_RULE_OFFSET;
783 		break;
784 	case MCS_CTRL_PKT_RULE_TYPE_DA:
785 		offset = MCS_CTRLPKT_DA_RULE_OFFSET;
786 		break;
787 	case MCS_CTRL_PKT_RULE_TYPE_RANGE:
788 		offset = MCS_CTRLPKT_DA_RANGE_RULE_OFFSET;
789 		break;
790 	case MCS_CTRL_PKT_RULE_TYPE_COMBO:
791 		offset = MCS_CTRLPKT_COMBO_RULE_OFFSET;
792 		break;
793 	case MCS_CTRL_PKT_RULE_TYPE_MAC:
794 		offset = MCS_CTRLPKT_MAC_EN_RULE_OFFSET;
795 		break;
796 	}
797 
798 	rsrc_id = mcs_alloc_ctrlpktrule(&map->ctrlpktrule, map->ctrlpktrule2pf_map, offset,
799 					pcifunc);
800 	if (rsrc_id < 0)
801 		goto exit;
802 
803 	rsp->rule_idx = rsrc_id;
804 	rsp->rule_type = req->rule_type;
805 	rsp->dir = req->dir;
806 	rsp->mcs_id = req->mcs_id;
807 
808 	mutex_unlock(&rvu->rsrc_lock);
809 	return 0;
810 exit:
811 	if (rsrc_id < 0)
812 		dev_err(rvu->dev, "Failed to allocate the mcs ctrl pkt rule for PCIFUNC:%d\n",
813 			pcifunc);
814 	mutex_unlock(&rvu->rsrc_lock);
815 	return rsrc_id;
816 }
817 
rvu_mbox_handler_mcs_free_ctrl_pkt_rule(struct rvu * rvu,struct mcs_free_ctrl_pkt_rule_req * req,struct msg_rsp * rsp)818 int rvu_mbox_handler_mcs_free_ctrl_pkt_rule(struct rvu *rvu,
819 					    struct mcs_free_ctrl_pkt_rule_req *req,
820 					    struct msg_rsp *rsp)
821 {
822 	struct mcs *mcs;
823 	int rc;
824 
825 	if (req->mcs_id >= rvu->mcs_blk_cnt)
826 		return MCS_AF_ERR_INVALID_MCSID;
827 
828 	mcs = mcs_get_pdata(req->mcs_id);
829 
830 	mutex_lock(&rvu->rsrc_lock);
831 
832 	rc = mcs_free_ctrlpktrule(mcs, req);
833 
834 	mutex_unlock(&rvu->rsrc_lock);
835 
836 	return rc;
837 }
838 
rvu_mbox_handler_mcs_ctrl_pkt_rule_write(struct rvu * rvu,struct mcs_ctrl_pkt_rule_write_req * req,struct msg_rsp * rsp)839 int rvu_mbox_handler_mcs_ctrl_pkt_rule_write(struct rvu *rvu,
840 					     struct mcs_ctrl_pkt_rule_write_req *req,
841 					     struct msg_rsp *rsp)
842 {
843 	struct mcs *mcs;
844 	int rc;
845 
846 	if (req->mcs_id >= rvu->mcs_blk_cnt)
847 		return MCS_AF_ERR_INVALID_MCSID;
848 
849 	mcs = mcs_get_pdata(req->mcs_id);
850 
851 	rc = mcs_ctrlpktrule_write(mcs, req);
852 
853 	return rc;
854 }
855 
rvu_mcs_set_lmac_bmap(struct rvu * rvu)856 static void rvu_mcs_set_lmac_bmap(struct rvu *rvu)
857 {
858 	struct mcs *mcs = mcs_get_pdata(0);
859 	unsigned long lmac_bmap;
860 	int cgx, lmac, port;
861 
862 	for (port = 0; port < mcs->hw->lmac_cnt; port++) {
863 		cgx = port / rvu->hw->lmac_per_cgx;
864 		lmac = port % rvu->hw->lmac_per_cgx;
865 		if (!is_lmac_valid(rvu_cgx_pdata(cgx, rvu), lmac))
866 			continue;
867 		set_bit(port, &lmac_bmap);
868 	}
869 	mcs->hw->lmac_bmap = lmac_bmap;
870 }
871 
rvu_mcs_init(struct rvu * rvu)872 int rvu_mcs_init(struct rvu *rvu)
873 {
874 	struct rvu_hwinfo *hw = rvu->hw;
875 	int lmac, err = 0, mcs_id;
876 	struct mcs *mcs;
877 
878 	rvu->mcs_blk_cnt = mcs_get_blkcnt();
879 
880 	if (!rvu->mcs_blk_cnt)
881 		return 0;
882 
883 	/* Needed only for CN10K-B */
884 	if (rvu->mcs_blk_cnt == 1) {
885 		err = mcs_set_lmac_channels(0, hw->cgx_chan_base);
886 		if (err)
887 			return err;
888 		/* Set active lmacs */
889 		rvu_mcs_set_lmac_bmap(rvu);
890 	}
891 
892 	/* Install default tcam bypass entry and set port to operational mode */
893 	for (mcs_id = 0; mcs_id < rvu->mcs_blk_cnt; mcs_id++) {
894 		mcs = mcs_get_pdata(mcs_id);
895 		mcs_install_flowid_bypass_entry(mcs);
896 		for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++)
897 			mcs_set_lmac_mode(mcs, lmac, 0);
898 
899 		mcs->rvu = rvu;
900 
901 		/* Allocated memory for PFVF data */
902 		mcs->pf = devm_kcalloc(mcs->dev, hw->total_pfs,
903 				       sizeof(struct mcs_pfvf), GFP_KERNEL);
904 		if (!mcs->pf)
905 			return -ENOMEM;
906 
907 		mcs->vf = devm_kcalloc(mcs->dev, hw->total_vfs,
908 				       sizeof(struct mcs_pfvf), GFP_KERNEL);
909 		if (!mcs->vf)
910 			return -ENOMEM;
911 	}
912 
913 	/* Initialize the wq for handling mcs interrupts */
914 	INIT_LIST_HEAD(&rvu->mcs_intrq_head);
915 	INIT_WORK(&rvu->mcs_intr_work, mcs_intr_handler_task);
916 	rvu->mcs_intr_wq = alloc_workqueue("mcs_intr_wq", 0, 0);
917 	if (!rvu->mcs_intr_wq) {
918 		dev_err(rvu->dev, "mcs alloc workqueue failed\n");
919 		return -ENOMEM;
920 	}
921 
922 	return err;
923 }
924 
rvu_mcs_exit(struct rvu * rvu)925 void rvu_mcs_exit(struct rvu *rvu)
926 {
927 	if (!rvu->mcs_intr_wq)
928 		return;
929 
930 	destroy_workqueue(rvu->mcs_intr_wq);
931 	rvu->mcs_intr_wq = NULL;
932 }
933