xref: /linux/drivers/net/ethernet/marvell/octeontx2/af/cn20k/npc.c (revision fcee7d82f27d6a8b1ddc5bbefda59b4e441e9bc0)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3  *
4  * Copyright (C) 2026 Marvell.
5  *
6  */
7 #include <linux/xarray.h>
8 #include <linux/bitfield.h>
9 
10 #include "rvu.h"
11 #include "npc.h"
12 #include "npc_profile.h"
13 #include "rvu_npc_hash.h"
14 #include "rvu_npc.h"
15 #include "cn20k/npc.h"
16 #include "cn20k/reg.h"
17 #include "rvu_npc_fs.h"
18 
19 static struct npc_priv_t npc_priv = {
20 	.num_banks = MAX_NUM_BANKS,
21 };
22 
23 static const char *npc_kw_name[NPC_MCAM_KEY_MAX] = {
24 	[NPC_MCAM_KEY_DYN] = "DYNAMIC",
25 	[NPC_MCAM_KEY_X2] = "X2",
26 	[NPC_MCAM_KEY_X4] = "X4",
27 };
28 
29 static const char *npc_dft_rule_name[NPC_DFT_RULE_MAX_ID] = {
30 	[NPC_DFT_RULE_PROMISC_ID] = "Promisc",
31 	[NPC_DFT_RULE_MCAST_ID] = "Mcast",
32 	[NPC_DFT_RULE_BCAST_ID] = "Bcast",
33 	[NPC_DFT_RULE_UCAST_ID] = "Ucast",
34 };
35 
36 #define KEX_EXTR_CFG(bytesm1, hdr_ofs, ena, key_ofs)		\
37 		     (((bytesm1) << 16) | ((hdr_ofs) << 8) | ((ena) << 7) | \
38 		     ((key_ofs) & 0x3F))
39 
40 #define NPC_DFT_RULE_ID_MK(pcifunc, id) \
41 	((pcifunc) | FIELD_PREP(GENMASK_ULL(31, 16), id))
42 
43 #define NPC_DFT_RULE_ID_2_PCIFUNC(rid) \
44 	FIELD_GET(GENMASK_ULL(15, 0), rid)
45 
46 #define NPC_DFT_RULE_ID_2_ID(rid) \
47 	FIELD_GET(GENMASK_ULL(31, 16), rid)
48 
49 #define NPC_DFT_RULE_PRIO 127
50 
51 static const char cn20k_def_pfl_name[] = "default";
52 
53 static struct npc_mcam_kex_extr npc_mkex_extr_default = {
54 	.mkex_sign = MKEX_CN20K_SIGN,
55 	.name = "default",
56 	.kpu_version = NPC_KPU_PROFILE_VER,
57 	.keyx_cfg = {
58 		/* nibble: LA..LE (ltype only) + Error code + Channel */
59 		[NIX_INTF_RX] = ((u64)NPC_MCAM_KEY_DYN << 32) |
60 			NPC_PARSE_NIBBLE_INTF_RX |
61 				 NPC_CN20K_PARSE_NIBBLE_ERRCODE,
62 
63 		/* nibble: LA..LE (ltype only) */
64 		[NIX_INTF_TX] = ((u64)NPC_MCAM_KEY_X2 << 32) |
65 			NPC_CN20K_PARSE_NIBBLE_INTF_TX,
66 	},
67 	.intf_extr_lid = {
68 	/* Default RX MCAM KEX profile */
69 	[NIX_INTF_RX] = { NPC_LID_LA, NPC_LID_LA, NPC_LID_LB, NPC_LID_LB,
70 			  NPC_LID_LC, NPC_LID_LC, NPC_LID_LD },
71 	[NIX_INTF_TX] = { NPC_LID_LA, NPC_LID_LA, NPC_LID_LB, NPC_LID_LB,
72 			  NPC_LID_LC, NPC_LID_LD },
73 	},
74 	.intf_extr_lt = {
75 	/* Default RX MCAM KEX profile */
76 	[NIX_INTF_RX] = {
77 		[0] = {
78 			/* Layer A: Ethernet: */
79 			[NPC_LT_LA_ETHER] =
80 				/* DMAC: 6 bytes, KW1[63:15] */
81 				KEX_EXTR_CFG(0x05, 0x0, 0x1,
82 					     NPC_KEXOF_DMAC + 1),
83 			[NPC_LT_LA_CPT_HDR] =
84 				/* DMAC: 6 bytes, KW1[63:15] */
85 				KEX_EXTR_CFG(0x05, 0x0, 0x1,
86 					     NPC_KEXOF_DMAC + 1),
87 		},
88 		[1] = {
89 			/* Layer A: Ethernet: */
90 			[NPC_LT_LA_ETHER] =
91 				/* Ethertype: 2 bytes, KW0[63:48] */
92 				KEX_EXTR_CFG(0x01, 0xc, 0x1, 0x6),
93 			[NPC_LT_LA_CPT_HDR] =
94 				/* Ethertype: 2 bytes, KW0[63:48] */
95 				KEX_EXTR_CFG(0x01, 0xc, 0x1, 0x6),
96 		},
97 		[2] = {
98 			/* Layer B: Single VLAN (CTAG) */
99 			[NPC_LT_LB_CTAG] =
100 				/* CTAG VLAN: 2 bytes, KW1[15:0] */
101 				KEX_EXTR_CFG(0x01, 0x2, 0x1, 0x8),
102 			/* Layer B: Stacked VLAN (STAG|QinQ) */
103 			[NPC_LT_LB_STAG_QINQ] =
104 				/* Outer VLAN: 2 bytes, KW1[15:0] */
105 				KEX_EXTR_CFG(0x01, 0x2, 0x1, 0x8),
106 			[NPC_LT_LB_FDSA] =
107 				/* SWITCH PORT: 1 byte, KW1[7:0] */
108 				KEX_EXTR_CFG(0x0, 0x1, 0x1, 0x8),
109 		},
110 		[3] = {
111 			[NPC_LT_LB_CTAG] =
112 				/* Ethertype: 2 bytes, KW0[63:48] */
113 				KEX_EXTR_CFG(0x01, 0x4, 0x1, 0x6),
114 			[NPC_LT_LB_STAG_QINQ] =
115 				/* Ethertype: 2 bytes, KW0[63:48] */
116 				KEX_EXTR_CFG(0x01, 0x8, 0x1, 0x6),
117 			[NPC_LT_LB_FDSA] =
118 				/* Ethertype: 2 bytes, KW0[63:48] */
119 				KEX_EXTR_CFG(0x01, 0x4, 0x1, 0x6),
120 		},
121 		[4] = {
122 			/* Layer C: IPv4 */
123 			[NPC_LT_LC_IP] =
124 				/* SIP+DIP: 8 bytes, KW3[7:0], KW2[63:8] */
125 				KEX_EXTR_CFG(0x07, 0xc, 0x1, 0x11),
126 			/* Layer C: IPv6 */
127 			[NPC_LT_LC_IP6] =
128 				/* Everything up to SADDR: 8 bytes, KW3[7:0],
129 				 * KW2[63:8]
130 				 */
131 				KEX_EXTR_CFG(0x07, 0x0, 0x1, 0x11),
132 		},
133 		[5] = {
134 			[NPC_LT_LC_IP] =
135 				/* TOS: 1 byte, KW2[7:0] */
136 				KEX_EXTR_CFG(0x0, 0x1, 0x1, 0x10),
137 		},
138 		[6] = {
139 			/* Layer D:UDP */
140 			[NPC_LT_LD_UDP] =
141 				/* SPORT+DPORT: 4 bytes, KW3[39:8] */
142 				KEX_EXTR_CFG(0x3, 0x0, 0x1, 0x19),
143 			/* Layer D:TCP */
144 			[NPC_LT_LD_TCP] =
145 				/* SPORT+DPORT: 4 bytes, KW3[39:8] */
146 				KEX_EXTR_CFG(0x3, 0x0, 0x1, 0x19),
147 		},
148 	},
149 	/* Default TX MCAM KEX profile */
150 	[NIX_INTF_TX] = {
151 		[0] = {
152 			/* Layer A: NIX_INST_HDR_S + Ethernet */
153 			/* NIX appends 8 bytes of NIX_INST_HDR_S at the
154 			 * start of each TX packet supplied to NPC.
155 			 */
156 			[NPC_LT_LA_IH_NIX_ETHER] =
157 				/* PF_FUNC: 2B , KW0 [47:32] */
158 				KEX_EXTR_CFG(0x01, 0x0, 0x1, 0x4),
159 			/* Layer A: HiGig2: */
160 			[NPC_LT_LA_IH_NIX_HIGIG2_ETHER] =
161 				/* PF_FUNC: 2B , KW0 [47:32] */
162 				KEX_EXTR_CFG(0x01, 0x0, 0x1, 0x4),
163 		},
164 		[1] = {
165 			[NPC_LT_LA_IH_NIX_ETHER] =
166 				/* SQ_ID 3 bytes, KW1[63:16] */
167 				KEX_EXTR_CFG(0x02, 0x02, 0x1, 0xa),
168 			[NPC_LT_LA_IH_NIX_HIGIG2_ETHER] =
169 				/* VID: 2 bytes, KW1[31:16] */
170 				KEX_EXTR_CFG(0x01, 0x10, 0x1, 0xa),
171 		},
172 		[2] = {
173 			/* Layer B: Single VLAN (CTAG) */
174 			[NPC_LT_LB_CTAG] =
175 				/* CTAG VLAN[2..3] KW0[63:48] */
176 				KEX_EXTR_CFG(0x01, 0x2, 0x1, 0x6),
177 			/* Layer B: Stacked VLAN (STAG|QinQ) */
178 			[NPC_LT_LB_STAG_QINQ] =
179 				/* Outer VLAN: 2 bytes, KW0[63:48] */
180 				KEX_EXTR_CFG(0x01, 0x2, 0x1, 0x6),
181 		},
182 		[3] = {
183 			[NPC_LT_LB_CTAG] =
184 				/* CTAG VLAN[2..3] KW1[15:0] */
185 				KEX_EXTR_CFG(0x01, 0x4, 0x1, 0x8),
186 			[NPC_LT_LB_STAG_QINQ] =
187 				/* Outer VLAN: 2 Bytes, KW1[15:0] */
188 				KEX_EXTR_CFG(0x01, 0x8, 0x1, 0x8),
189 		},
190 		[4] = {
191 			/* Layer C: IPv4 */
192 			[NPC_LT_LC_IP] =
193 				/* SIP+DIP: 8 bytes, KW2[63:0] */
194 				KEX_EXTR_CFG(0x07, 0xc, 0x1, 0x10),
195 			/* Layer C: IPv6 */
196 			[NPC_LT_LC_IP6] =
197 				/* Everything up to SADDR: 8 bytes, KW2[63:0] */
198 				KEX_EXTR_CFG(0x07, 0x0, 0x1, 0x10),
199 		},
200 		[5] = {
201 			/* Layer D:UDP */
202 			[NPC_LT_LD_UDP] =
203 				/* SPORT+DPORT: 4 bytes, KW3[31:0] */
204 				KEX_EXTR_CFG(0x3, 0x0, 0x1, 0x18),
205 			/* Layer D:TCP */
206 			[NPC_LT_LD_TCP] =
207 				/* SPORT+DPORT: 4 bytes, KW3[31:0] */
208 				KEX_EXTR_CFG(0x3, 0x0, 0x1, 0x18),
209 		},
210 	},
211 	},
212 };
213 
npc_mkex_extr_default_get(void)214 struct npc_mcam_kex_extr *npc_mkex_extr_default_get(void)
215 {
216 	return &npc_mkex_extr_default;
217 }
218 
npc_idx2vidx(u16 idx)219 static u16 npc_idx2vidx(u16 idx)
220 {
221 	unsigned long index;
222 	void *map;
223 	u16 vidx;
224 	int val;
225 
226 	vidx = idx;
227 	index = idx;
228 
229 	map = xa_load(&npc_priv.xa_idx2vidx_map, index);
230 	if (!map)
231 		goto done;
232 
233 	val = xa_to_value(map);
234 	if (val == -1)
235 		goto done;
236 
237 	vidx = val;
238 
239 done:
240 	return vidx;
241 }
242 
npc_is_vidx(u16 vidx)243 static bool npc_is_vidx(u16 vidx)
244 {
245 	return vidx >= npc_priv.bank_depth * 2;
246 }
247 
npc_vidx2idx(u16 vidx)248 static u16 npc_vidx2idx(u16 vidx)
249 {
250 	unsigned long sentinel = (unsigned long)-1;
251 	unsigned long index;
252 	void *map;
253 	int val;
254 	u16 idx;
255 
256 	idx = vidx;
257 	index = vidx;
258 
259 	map = xa_load(&npc_priv.xa_vidx2idx_map, index);
260 	if (!map)
261 		goto done;
262 
263 	val = xa_to_value(map);
264 	if (val == sentinel)
265 		goto done;
266 
267 	idx = val;
268 
269 done:
270 	return idx;
271 }
272 
npc_cn20k_vidx2idx(u16 idx)273 u16 npc_cn20k_vidx2idx(u16 idx)
274 {
275 	if (!npc_priv.init_done)
276 		return idx;
277 
278 	if (!npc_is_vidx(idx))
279 		return idx;
280 
281 	return npc_vidx2idx(idx);
282 }
283 
npc_cn20k_idx2vidx(u16 idx)284 u16 npc_cn20k_idx2vidx(u16 idx)
285 {
286 	if (!npc_priv.init_done)
287 		return idx;
288 
289 	if (npc_is_vidx(idx))
290 		return idx;
291 
292 	return npc_idx2vidx(idx);
293 }
294 
npc_vidx_maps_del_entry(struct rvu * rvu,u16 vidx,u16 * old_midx)295 static int npc_vidx_maps_del_entry(struct rvu *rvu, u16 vidx, u16 *old_midx)
296 {
297 	u16 mcam_idx;
298 	void *map;
299 
300 	if (!npc_is_vidx(vidx)) {
301 		dev_err(rvu->dev,
302 			"%s: vidx(%u) does not map to proper mcam idx\n",
303 			__func__, vidx);
304 		return -ESRCH;
305 	}
306 
307 	mcam_idx = npc_vidx2idx(vidx);
308 
309 	map = xa_erase(&npc_priv.xa_vidx2idx_map, vidx);
310 	if (!map) {
311 		dev_err(rvu->dev,
312 			"%s: vidx(%u) does not map to proper mcam idx\n",
313 			__func__, vidx);
314 		return -ESRCH;
315 	}
316 
317 	map = xa_erase(&npc_priv.xa_idx2vidx_map, mcam_idx);
318 	if (!map) {
319 		dev_err(rvu->dev,
320 			"%s: vidx(%u) is not valid\n",
321 			__func__, vidx);
322 		return -ESRCH;
323 	}
324 
325 	if (old_midx)
326 		*old_midx = mcam_idx;
327 
328 	return 0;
329 }
330 
npc_vidx_maps_modify(struct rvu * rvu,u16 vidx,u16 new_midx)331 static int npc_vidx_maps_modify(struct rvu *rvu, u16 vidx, u16 new_midx)
332 {
333 	u16 old_midx;
334 	void *map;
335 	int rc;
336 
337 	if (!npc_is_vidx(vidx)) {
338 		dev_err(rvu->dev,
339 			"%s: vidx(%u) does not map to proper mcam idx\n",
340 			__func__, vidx);
341 		return -ESRCH;
342 	}
343 
344 	map = xa_erase(&npc_priv.xa_vidx2idx_map, vidx);
345 	if (!map) {
346 		dev_err(rvu->dev,
347 			"%s: vidx(%u) could not be deleted from vidx2idx map\n",
348 			__func__, vidx);
349 		return -ESRCH;
350 	}
351 
352 	old_midx = xa_to_value(map);
353 
354 	rc = xa_insert(&npc_priv.xa_vidx2idx_map, vidx,
355 		       xa_mk_value(new_midx), GFP_KERNEL);
356 	if (rc) {
357 		dev_err(rvu->dev,
358 			"%s: vidx(%u) cannot be added to vidx2idx map\n",
359 			__func__, vidx);
360 		goto fail1;
361 	}
362 
363 	map = xa_erase(&npc_priv.xa_idx2vidx_map, old_midx);
364 	if (!map) {
365 		dev_err(rvu->dev,
366 			"%s: old_midx(%u, vidx(%u)) cannot be added to idx2vidx map\n",
367 			__func__, old_midx, vidx);
368 		rc = -ESRCH;
369 		goto fail2;
370 	}
371 
372 	rc = xa_insert(&npc_priv.xa_idx2vidx_map, new_midx,
373 		       xa_mk_value(vidx), GFP_KERNEL);
374 	if (rc) {
375 		dev_err(rvu->dev,
376 			"%s: new_midx(%u, vidx(%u)) cannot be added to idx2vidx map\n",
377 			__func__, new_midx, vidx);
378 		goto fail3;
379 	}
380 
381 	return 0;
382 
383 fail3:
384 	/* Restore vidx at old_midx location */
385 	if (xa_insert(&npc_priv.xa_idx2vidx_map, old_midx,
386 		      xa_mk_value(vidx), GFP_KERNEL))
387 		dev_err(rvu->dev,
388 			"%s: Error to roll back idx2vidx old_midx=%u vidx=%u\n",
389 			__func__, old_midx, vidx);
390 fail2:
391 	/* Erase new_midx inserted at vidx */
392 	if (!xa_erase(&npc_priv.xa_vidx2idx_map, vidx))
393 		dev_err(rvu->dev,
394 			"%s: Failed to roll back vidx2idx vidx=%u\n",
395 			__func__, vidx);
396 
397 fail1:
398 	/* Restore old_midx at vidx location */
399 	if (xa_insert(&npc_priv.xa_vidx2idx_map, vidx,
400 		      xa_mk_value(old_midx), GFP_KERNEL))
401 		dev_err(rvu->dev,
402 			"%s: Failed to roll back vidx2idx to old_midx=%u, vidx=%u\n",
403 			__func__, old_midx, vidx);
404 
405 	return rc;
406 }
407 
npc_vidx_maps_add_entry(struct rvu * rvu,u16 mcam_idx,int pcifunc,u16 * vidx)408 static int npc_vidx_maps_add_entry(struct rvu *rvu, u16 mcam_idx, int pcifunc,
409 				   u16 *vidx)
410 {
411 	int rc, max, min;
412 	u32 id;
413 
414 	/* Virtual index start from maximum mcam index + 1 */
415 	max = npc_priv.bank_depth * 2 * 2 - 1;
416 	min = npc_priv.bank_depth * 2;
417 
418 	rc = xa_alloc(&npc_priv.xa_vidx2idx_map, &id,
419 		      xa_mk_value(mcam_idx),
420 		      XA_LIMIT(min, max), GFP_KERNEL);
421 	if (rc) {
422 		dev_err(rvu->dev,
423 			"%s: Failed to add to vidx2idx map (%u)\n",
424 			__func__, mcam_idx);
425 		goto fail1;
426 	}
427 
428 	rc = xa_insert(&npc_priv.xa_idx2vidx_map, mcam_idx,
429 		       xa_mk_value(id), GFP_KERNEL);
430 	if (rc) {
431 		dev_err(rvu->dev,
432 			"%s: Failed to add to idx2vidx map (%u)\n",
433 			__func__, mcam_idx);
434 		goto fail2;
435 	}
436 
437 	if (vidx)
438 		*vidx = id;
439 
440 	return 0;
441 
442 fail2:
443 	xa_erase(&npc_priv.xa_vidx2idx_map, id);
444 fail1:
445 	return rc;
446 }
447 
npc_config_kpmcam(struct rvu * rvu,int blkaddr,const struct npc_kpu_profile_cam * kpucam,int kpm,int entry)448 static void npc_config_kpmcam(struct rvu *rvu, int blkaddr,
449 			      const struct npc_kpu_profile_cam *kpucam,
450 			      int kpm, int entry)
451 {
452 	struct npc_kpu_cam cam0 = {0};
453 	struct npc_kpu_cam cam1 = {0};
454 
455 	cam1.state = kpucam->state & kpucam->state_mask;
456 	cam1.dp0_data = kpucam->dp0 & kpucam->dp0_mask;
457 	cam1.dp1_data = kpucam->dp1 & kpucam->dp1_mask;
458 	cam1.dp2_data = kpucam->dp2 & kpucam->dp2_mask;
459 
460 	cam0.state = ~kpucam->state & kpucam->state_mask;
461 	cam0.dp0_data = ~kpucam->dp0 & kpucam->dp0_mask;
462 	cam0.dp1_data = ~kpucam->dp1 & kpucam->dp1_mask;
463 	cam0.dp2_data = ~kpucam->dp2 & kpucam->dp2_mask;
464 
465 	rvu_write64(rvu, blkaddr,
466 		    NPC_AF_KPMX_ENTRYX_CAMX(kpm, entry, 0), *(u64 *)&cam0);
467 	rvu_write64(rvu, blkaddr,
468 		    NPC_AF_KPMX_ENTRYX_CAMX(kpm, entry, 1), *(u64 *)&cam1);
469 }
470 
471 static void
npc_config_kpmaction(struct rvu * rvu,int blkaddr,const struct npc_kpu_profile_action * kpuaction,int kpm,int entry,bool pkind)472 npc_config_kpmaction(struct rvu *rvu, int blkaddr,
473 		     const struct npc_kpu_profile_action *kpuaction,
474 		     int kpm, int entry, bool pkind)
475 {
476 	struct npc_kpm_action0 action0 = {0};
477 	struct npc_kpu_action1 action1 = {0};
478 	u64 reg;
479 
480 	action1.errlev = kpuaction->errlev;
481 	action1.errcode = kpuaction->errcode;
482 	action1.dp0_offset = kpuaction->dp0_offset;
483 	action1.dp1_offset = kpuaction->dp1_offset;
484 	action1.dp2_offset = kpuaction->dp2_offset;
485 
486 	if (pkind)
487 		reg = NPC_AF_PKINDX_ACTION1(entry);
488 	else
489 		reg = NPC_AF_KPMX_ENTRYX_ACTION1(kpm, entry);
490 
491 	rvu_write64(rvu, blkaddr, reg, *(u64 *)&action1);
492 
493 	action0.byp_count = kpuaction->bypass_count & 0x7;
494 	action0.capture_ena = kpuaction->cap_ena & 1;
495 	action0.parse_done = kpuaction->parse_done & 1;
496 	action0.next_state = kpuaction->next_state & 0xf;
497 	action0.capture_lid = kpuaction->lid & 0x7;
498 
499 	/* Parser functionality will work correctly even though
500 	 * upper flag bits are silently discarded
501 	 */
502 	action0.capture_ltype = kpuaction->ltype & 0xf;
503 	action0.capture_flags = kpuaction->flags & 0xf;
504 	action0.ptr_advance = kpuaction->ptr_advance;
505 
506 	action0.var_len_offset = kpuaction->offset;
507 	action0.var_len_mask = kpuaction->mask;
508 	action0.var_len_right = kpuaction->right & 1;
509 	action0.var_len_shift = kpuaction->shift & 1;
510 
511 	if (pkind)
512 		reg = NPC_AF_PKINDX_ACTION0(entry);
513 	else
514 		reg = NPC_AF_KPMX_ENTRYX_ACTION0(kpm, entry);
515 
516 	rvu_write64(rvu, blkaddr, reg, *(u64 *)&action0);
517 }
518 
519 static void
npc_program_single_kpm_profile(struct rvu * rvu,int blkaddr,int kpm,int start_entry,const struct npc_kpu_profile * profile)520 npc_program_single_kpm_profile(struct rvu *rvu, int blkaddr,
521 			       int kpm, int start_entry,
522 			       const struct npc_kpu_profile *profile)
523 {
524 	int entry, num_entries, max_entries;
525 	u64 idx;
526 
527 	if (profile->cam_entries != profile->action_entries) {
528 		dev_err(rvu->dev,
529 			"kpm%d: CAM and action entries [%d != %d] not equal\n",
530 			kpm, profile->cam_entries, profile->action_entries);
531 
532 		WARN(1, "Fatal error\n");
533 		return;
534 	}
535 
536 	max_entries = rvu->hw->npc_kpu_entries / 2;
537 	entry = start_entry;
538 	/* Program CAM match entries for previous kpm extracted data */
539 	num_entries = min_t(int, profile->cam_entries, max_entries);
540 	for (idx = 0; entry < num_entries + start_entry; entry++, idx++)
541 		npc_config_kpmcam(rvu, blkaddr, &profile->cam[idx],
542 				  kpm, entry);
543 
544 	entry = start_entry;
545 	/* Program this kpm's actions */
546 	num_entries = min_t(int, profile->action_entries, max_entries);
547 	for (idx = 0; entry < num_entries + start_entry; entry++, idx++)
548 		npc_config_kpmaction(rvu, blkaddr, &profile->action[idx],
549 				     kpm, entry, false);
550 }
551 
552 static void
npc_enable_kpm_entry(struct rvu * rvu,int blkaddr,int kpm,int num_entries)553 npc_enable_kpm_entry(struct rvu *rvu, int blkaddr, int kpm, int num_entries)
554 {
555 	u64 entry_mask;
556 
557 	entry_mask = npc_enable_mask(num_entries);
558 	/* Disable first KPU_CN20K_MAX_CST_ENT entries for built-in profile */
559 	if (!rvu->kpu.custom)
560 		entry_mask |= GENMASK_ULL(KPU_CN20K_MAX_CST_ENT - 1, 0);
561 	rvu_write64(rvu, blkaddr,
562 		    NPC_AF_KPMX_ENTRY_DISX(kpm, 0), entry_mask);
563 	if (num_entries <= 64) {
564 		/* Disable all the entries in W1, W2 and W3 */
565 		rvu_write64(rvu, blkaddr,
566 			    NPC_AF_KPMX_ENTRY_DISX(kpm, 1),
567 			    npc_enable_mask(0));
568 		rvu_write64(rvu, blkaddr,
569 			    NPC_AF_KPMX_ENTRY_DISX(kpm, 2),
570 			    npc_enable_mask(0));
571 		rvu_write64(rvu, blkaddr,
572 			    NPC_AF_KPMX_ENTRY_DISX(kpm, 3),
573 			    npc_enable_mask(0));
574 		return;
575 	}
576 
577 	num_entries = num_entries - 64;
578 	entry_mask = npc_enable_mask(num_entries);
579 	rvu_write64(rvu, blkaddr,
580 		    NPC_AF_KPMX_ENTRY_DISX(kpm, 1), entry_mask);
581 	if (num_entries <= 64) {
582 		/* Disable all the entries in W2 and W3 */
583 		rvu_write64(rvu, blkaddr,
584 			    NPC_AF_KPMX_ENTRY_DISX(kpm, 2),
585 			    npc_enable_mask(0));
586 		rvu_write64(rvu, blkaddr,
587 			    NPC_AF_KPMX_ENTRY_DISX(kpm, 3),
588 			    npc_enable_mask(0));
589 		return;
590 	}
591 
592 	num_entries = num_entries - 64;
593 	entry_mask = npc_enable_mask(num_entries);
594 	rvu_write64(rvu, blkaddr,
595 		    NPC_AF_KPMX_ENTRY_DISX(kpm, 2), entry_mask);
596 	if (num_entries <= 64) {
597 		/* Disable all the entries in W3 */
598 		rvu_write64(rvu, blkaddr,
599 			    NPC_AF_KPMX_ENTRY_DISX(kpm, 3),
600 			    npc_enable_mask(0));
601 		return;
602 	}
603 
604 	num_entries = num_entries - 64;
605 	entry_mask = npc_enable_mask(num_entries);
606 	rvu_write64(rvu, blkaddr,
607 		    NPC_AF_KPMX_ENTRY_DISX(kpm, 3), entry_mask);
608 }
609 
610 #define KPU_OFFSET	8
npc_program_kpm_profile(struct rvu * rvu,int blkaddr,int num_kpms)611 static void npc_program_kpm_profile(struct rvu *rvu, int blkaddr, int num_kpms)
612 {
613 	const struct npc_kpu_profile *profile1, *profile2;
614 	int idx, total_cam_entries;
615 
616 	for (idx = 0; idx < num_kpms; idx++) {
617 		profile1 = &rvu->kpu.kpu[idx];
618 		npc_program_single_kpm_profile(rvu, blkaddr, idx, 0, profile1);
619 		profile2 = &rvu->kpu.kpu[idx + KPU_OFFSET];
620 		npc_program_single_kpm_profile(rvu, blkaddr, idx,
621 					       profile1->cam_entries,
622 					       profile2);
623 		total_cam_entries = profile1->cam_entries +
624 			profile2->cam_entries;
625 		npc_enable_kpm_entry(rvu, blkaddr, idx, total_cam_entries);
626 		rvu_write64(rvu, blkaddr, NPC_AF_KPMX_PASS2_OFFSET(idx),
627 			    profile1->cam_entries);
628 		/* Enable the KPUs associated with this KPM */
629 		rvu_write64(rvu, blkaddr, NPC_AF_KPUX_CFG(idx), 0x01);
630 		rvu_write64(rvu, blkaddr, NPC_AF_KPUX_CFG(idx + KPU_OFFSET),
631 			    0x01);
632 	}
633 }
634 
npc_cn20k_parser_profile_init(struct rvu * rvu,int blkaddr)635 void npc_cn20k_parser_profile_init(struct rvu *rvu, int blkaddr)
636 {
637 	struct rvu_hwinfo *hw = rvu->hw;
638 	int num_pkinds, idx;
639 
640 	/* Disable all KPMs and their entries */
641 	for (idx = 0; idx < hw->npc_kpms; idx++) {
642 		rvu_write64(rvu, blkaddr,
643 			    NPC_AF_KPMX_ENTRY_DISX(idx, 0), ~0ULL);
644 		rvu_write64(rvu, blkaddr,
645 			    NPC_AF_KPMX_ENTRY_DISX(idx, 1), ~0ULL);
646 		rvu_write64(rvu, blkaddr,
647 			    NPC_AF_KPMX_ENTRY_DISX(idx, 2), ~0ULL);
648 		rvu_write64(rvu, blkaddr,
649 			    NPC_AF_KPMX_ENTRY_DISX(idx, 3), ~0ULL);
650 	}
651 
652 	for (idx = 0; idx < hw->npc_kpus; idx++)
653 		rvu_write64(rvu, blkaddr, NPC_AF_KPUX_CFG(idx), 0x00);
654 
655 	/* Load and customize KPU profile. */
656 	npc_load_kpu_profile(rvu);
657 
658 	/* Configure KPU and KPM mapping for second pass */
659 	rvu_write64(rvu, blkaddr, NPC_AF_KPM_PASS2_CFG, 0x76543210);
660 
661 	/* First program IKPU profile i.e PKIND configs.
662 	 * Check HW max count to avoid configuring junk or
663 	 * writing to unsupported CSR addresses.
664 	 */
665 	num_pkinds = rvu->kpu.pkinds;
666 	num_pkinds = min_t(int, hw->npc_pkinds, num_pkinds);
667 
668 	for (idx = 0; idx < num_pkinds; idx++)
669 		npc_config_kpmaction(rvu, blkaddr, &rvu->kpu.ikpu[idx],
670 				     0, idx, true);
671 
672 	/* Program KPM CAM and Action profiles */
673 	npc_program_kpm_profile(rvu, blkaddr, hw->npc_kpms);
674 }
675 
npc_priv_get(void)676 struct npc_priv_t *npc_priv_get(void)
677 {
678 	return &npc_priv;
679 }
680 
npc_program_mkex_rx(struct rvu * rvu,int blkaddr,struct npc_mcam_kex_extr * mkex_extr,u8 intf)681 static void npc_program_mkex_rx(struct rvu *rvu, int blkaddr,
682 				struct npc_mcam_kex_extr *mkex_extr,
683 				u8 intf)
684 {
685 	u8 num_extr = rvu->hw->npc_kex_extr;
686 	int extr, lt;
687 	u64 val;
688 
689 	if (is_npc_intf_tx(intf))
690 		return;
691 
692 	rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf),
693 		    mkex_extr->keyx_cfg[NIX_INTF_RX]);
694 
695 	/* Program EXTRACTOR */
696 	for (extr = 0; extr < num_extr; extr++)
697 		rvu_write64(rvu, blkaddr,
698 			    NPC_AF_INTFX_EXTRACTORX_CFG(intf, extr),
699 			    mkex_extr->intf_extr_lid[intf][extr]);
700 
701 	/* Program EXTRACTOR_LTYPE */
702 	for (extr = 0; extr < num_extr; extr++) {
703 		for (lt = 0; lt < NPC_MAX_LT; lt++) {
704 			val = mkex_extr->intf_extr_lt[intf][extr][lt];
705 			CN20K_SET_EXTR_LT(intf, extr, lt, val);
706 		}
707 	}
708 }
709 
npc_program_mkex_tx(struct rvu * rvu,int blkaddr,struct npc_mcam_kex_extr * mkex_extr,u8 intf)710 static void npc_program_mkex_tx(struct rvu *rvu, int blkaddr,
711 				struct npc_mcam_kex_extr *mkex_extr,
712 				u8 intf)
713 {
714 	u8 num_extr = rvu->hw->npc_kex_extr;
715 	int extr, lt;
716 	u64 val;
717 
718 	if (is_npc_intf_rx(intf))
719 		return;
720 
721 	rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf),
722 		    mkex_extr->keyx_cfg[NIX_INTF_TX]);
723 
724 	/* Program EXTRACTOR */
725 	for (extr = 0; extr < num_extr; extr++)
726 		rvu_write64(rvu, blkaddr,
727 			    NPC_AF_INTFX_EXTRACTORX_CFG(intf, extr),
728 			    mkex_extr->intf_extr_lid[intf][extr]);
729 
730 	/* Program EXTRACTOR_LTYPE */
731 	for (extr = 0; extr < num_extr; extr++) {
732 		for (lt = 0; lt < NPC_MAX_LT; lt++) {
733 			val = mkex_extr->intf_extr_lt[intf][extr][lt];
734 			CN20K_SET_EXTR_LT(intf, extr, lt, val);
735 		}
736 	}
737 }
738 
npc_program_mkex_profile(struct rvu * rvu,int blkaddr,struct npc_mcam_kex_extr * mkex_extr)739 static void npc_program_mkex_profile(struct rvu *rvu, int blkaddr,
740 				     struct npc_mcam_kex_extr *mkex_extr)
741 {
742 	struct rvu_hwinfo *hw = rvu->hw;
743 	u8 intf;
744 
745 	for (intf = 0; intf < hw->npc_intfs; intf++) {
746 		npc_program_mkex_rx(rvu, blkaddr, mkex_extr, intf);
747 		npc_program_mkex_tx(rvu, blkaddr, mkex_extr, intf);
748 	}
749 
750 	/* Programme mkex hash profile */
751 	npc_program_mkex_hash(rvu, blkaddr);
752 }
753 
npc_cn20k_load_mkex_profile(struct rvu * rvu,int blkaddr,const char * mkex_profile)754 void npc_cn20k_load_mkex_profile(struct rvu *rvu, int blkaddr,
755 				 const char *mkex_profile)
756 {
757 	struct npc_mcam_kex_extr *mcam_kex_extr;
758 	struct device *dev = &rvu->pdev->dev;
759 	void __iomem *mkex_prfl_addr = NULL;
760 	u64 prfl_sz;
761 	int ret;
762 
763 	/* If user not selected mkex profile */
764 	if (rvu->kpu_fwdata_sz ||
765 	    !strncmp(mkex_profile, cn20k_def_pfl_name, MKEX_NAME_LEN))
766 		goto program_mkex_extr;
767 
768 	/* Setting up the mapping for mkex profile image */
769 	ret = npc_fwdb_prfl_img_map(rvu, &mkex_prfl_addr, &prfl_sz);
770 	if (ret < 0)
771 		goto program_mkex_extr;
772 
773 	mcam_kex_extr = (struct npc_mcam_kex_extr __force *)mkex_prfl_addr;
774 
775 	while (((s64)prfl_sz > 0) &&
776 	       (mcam_kex_extr->mkex_sign != MKEX_END_SIGN)) {
777 		/* Compare with mkex mod_param name string */
778 		if (mcam_kex_extr->mkex_sign == MKEX_CN20K_SIGN &&
779 		    !strncmp(mcam_kex_extr->name, mkex_profile,
780 			     MKEX_NAME_LEN)) {
781 			rvu->kpu.mcam_kex_prfl.mkex_extr = mcam_kex_extr;
782 			goto program_mkex_extr;
783 		}
784 
785 		mcam_kex_extr++;
786 		prfl_sz -= sizeof(struct npc_mcam_kex_extr);
787 	}
788 	dev_warn(dev, "Failed to load requested profile: %s\n", mkex_profile);
789 	rvu->kpu.mcam_kex_prfl.mkex_extr = npc_mkex_extr_default_get();
790 
791 program_mkex_extr:
792 	dev_info(rvu->dev, "Using %s mkex profile\n",
793 		 rvu->kpu.mcam_kex_prfl.mkex_extr->name);
794 	/* Program selected mkex profile */
795 	npc_program_mkex_profile(rvu, blkaddr,
796 				 rvu->kpu.mcam_kex_prfl.mkex_extr);
797 	if (mkex_prfl_addr)
798 		iounmap(mkex_prfl_addr);
799 }
800 
801 int
npc_cn20k_enable_mcam_entry(struct rvu * rvu,int blkaddr,int index,bool enable)802 npc_cn20k_enable_mcam_entry(struct rvu *rvu, int blkaddr,
803 			    int index, bool enable)
804 {
805 	struct npc_mcam *mcam = &rvu->hw->mcam;
806 	int mcam_idx = index % mcam->banksize;
807 	int bank = index / mcam->banksize;
808 	u64 cfg, hw_prio;
809 	u8 kw_type;
810 
811 	if (index < 0 || index >= mcam->total_entries)
812 		return -EINVAL;
813 
814 	if (npc_mcam_idx_2_key_type(rvu, index, &kw_type))
815 		return -EINVAL;
816 
817 	if (kw_type == NPC_MCAM_KEY_X2) {
818 		cfg = rvu_read64(rvu, blkaddr,
819 				 NPC_AF_CN20K_MCAMEX_BANKX_CFG_EXT(mcam_idx,
820 								   bank));
821 		hw_prio = cfg & GENMASK_ULL(30, 24);
822 		cfg = enable ? 1 : 0;
823 		cfg |= hw_prio;
824 		rvu_write64(rvu, blkaddr,
825 			    NPC_AF_CN20K_MCAMEX_BANKX_CFG_EXT(mcam_idx, bank),
826 			    cfg);
827 		return 0;
828 	}
829 
830 	/* For NPC_CN20K_MCAM_KEY_X4 keys, both the banks
831 	 * need to be programmed with the same value.
832 	 */
833 	for (bank = 0; bank < mcam->banks_per_entry; bank++) {
834 		cfg = rvu_read64(rvu, blkaddr,
835 				 NPC_AF_CN20K_MCAMEX_BANKX_CFG_EXT(mcam_idx,
836 								   bank));
837 		hw_prio = cfg & GENMASK_ULL(30, 24);
838 		cfg = enable ? 1 : 0;
839 		cfg |= hw_prio;
840 		rvu_write64(rvu, blkaddr,
841 			    NPC_AF_CN20K_MCAMEX_BANKX_CFG_EXT(mcam_idx, bank),
842 			    cfg);
843 	}
844 
845 	return 0;
846 }
847 
848 static void
npc_clear_x2_entry(struct rvu * rvu,int blkaddr,int bank,int index)849 npc_clear_x2_entry(struct rvu *rvu, int blkaddr, int bank, int index)
850 {
851 	rvu_write64(rvu, blkaddr,
852 		    NPC_AF_CN20K_MCAMEX_BANKX_CAMX_INTF_EXT(index, bank, 1),
853 		    0);
854 	rvu_write64(rvu, blkaddr,
855 		    NPC_AF_CN20K_MCAMEX_BANKX_CAMX_INTF_EXT(index, bank, 0),
856 		    0);
857 
858 	rvu_write64(rvu, blkaddr,
859 		    NPC_AF_CN20K_MCAMEX_BANKX_CAMX_W0_EXT(index, bank, 1), 0);
860 	rvu_write64(rvu, blkaddr,
861 		    NPC_AF_CN20K_MCAMEX_BANKX_CAMX_W0_EXT(index, bank, 0), 0);
862 
863 	rvu_write64(rvu, blkaddr,
864 		    NPC_AF_CN20K_MCAMEX_BANKX_CAMX_W1_EXT(index, bank, 1), 0);
865 	rvu_write64(rvu, blkaddr,
866 		    NPC_AF_CN20K_MCAMEX_BANKX_CAMX_W1_EXT(index, bank, 0), 0);
867 
868 	rvu_write64(rvu, blkaddr,
869 		    NPC_AF_CN20K_MCAMEX_BANKX_CAMX_W2_EXT(index, bank, 1), 0);
870 	rvu_write64(rvu, blkaddr,
871 		    NPC_AF_CN20K_MCAMEX_BANKX_CAMX_W2_EXT(index, bank, 0), 0);
872 
873 	rvu_write64(rvu, blkaddr,
874 		    NPC_AF_CN20K_MCAMEX_BANKX_CAMX_W3_EXT(index, bank, 1), 0);
875 	rvu_write64(rvu, blkaddr,
876 		    NPC_AF_CN20K_MCAMEX_BANKX_CAMX_W3_EXT(index, bank, 0), 0);
877 
878 	/* Clear corresponding stats register */
879 	rvu_write64(rvu, blkaddr,
880 		    NPC_AF_CN20K_MCAMEX_BANKX_STAT_EXT(index, bank), 0);
881 }
882 
883 int
npc_cn20k_clear_mcam_entry(struct rvu * rvu,int blkaddr,int mcam_idx)884 npc_cn20k_clear_mcam_entry(struct rvu *rvu, int blkaddr, int mcam_idx)
885 {
886 	struct npc_mcam *mcam = &rvu->hw->mcam;
887 	int bank = npc_get_bank(mcam, mcam_idx);
888 	u8 kw_type;
889 	int index;
890 
891 	if (npc_mcam_idx_2_key_type(rvu, mcam_idx, &kw_type))
892 		return -EINVAL;
893 
894 	index = mcam_idx & (mcam->banksize - 1);
895 
896 	if (kw_type == NPC_MCAM_KEY_X2) {
897 		npc_clear_x2_entry(rvu, blkaddr, bank, index);
898 		return 0;
899 	}
900 
901 	/* For NPC_MCAM_KEY_X4 keys, both the banks
902 	 * need to be programmed with the same value.
903 	 */
904 	for (bank = 0; bank < mcam->banks_per_entry; bank++)
905 		npc_clear_x2_entry(rvu, blkaddr, bank, index);
906 
907 	return 0;
908 }
909 
npc_cn20k_get_keyword(struct cn20k_mcam_entry * entry,int idx,u64 * cam0,u64 * cam1)910 static void npc_cn20k_get_keyword(struct cn20k_mcam_entry *entry, int idx,
911 				  u64 *cam0, u64 *cam1)
912 {
913 	u64 kw_mask;
914 
915 	/* The two banks of every MCAM entry are used as a single double-wide
916 	 * entry that is compared with the search key as follows:
917 	 *
918 	 * NPC_AF_MCAME()_BANK(0)_CAM(0..1)_W0_EXT[MD] ->NPC_MCAM_KEY_X4_S[KW0]
919 	 * NPC_AF_MCAME()_BANK(0)_CAM(0..1)_W1_EXT[MD] ->NPC_MCAM_KEY_X4_S[KW1]
920 	 * NPC_AF_MCAME()_BANK(0)_CAM(0..1)_W2_EXT[MD] ->NPC_MCAM_KEY_X4_S[KW2]
921 	 * NPC_AF_MCAME()_BANK(0)_CAM(0..1)_W3_EXT[MD] ->NPC_MCAM_KEY_X4_S[KW3]
922 	 * NPC_AF_MCAME()_BANK(1)_CAM(0..1)_W0_EXT[MD] ->NPC_MCAM_KEY_X4_S[KW4]
923 	 * NPC_AF_MCAME()_BANK(1)_CAM(0..1)_W1_EXT[MD] ->NPC_MCAM_KEY_X4_S[KW5]
924 	 * NPC_AF_MCAME()_BANK(1)_CAM(0..1)_W2_EXT[MD] ->NPC_MCAM_KEY_X4_S[KW6]
925 	 * NPC_AF_MCAME()_BANK(1)_CAM(0..1)_W3_EXT[MD] ->NPC_MCAM_KEY_X4_S[KW7]
926 	 */
927 	*cam1 = entry->kw[idx];
928 	kw_mask = entry->kw_mask[idx];
929 	*cam1 &= kw_mask;
930 	*cam0 = ~*cam1 & kw_mask;
931 }
932 
933 /*-------------------------------------------------------------------------
934  *Kex type|  mcam	|  cam1	|cam0   | req_kwtype||<----- output >	  |
935  * in     |		|	|	|	    ||		|	  |
936  * profile|  len	|	|	|	    ||len	| type    |
937  *-------------------------------------------------------------------------
938  *X2	|  256 (X2)	|  001b	|110b	| 0	    ||X2	| X2      |
939  *------------------------------------------------------------------------|
940  *X4	|  256 (X2)	|  000b	|000b	| 0	    ||X2	| DYN     |
941  *------------------------------------------------------------------------|
942  *X4	|  512 (X4)	|  010b	|101b	| 0	    ||X4	| X4      |
943  *------------------------------------------------------------------------|
944  *DYN	|  256 (X2)	|  000b	|000b	| 0	    ||X2	| DYN     |
945  *------------------------------------------------------------------------|
946  *DYN	|  512 (X4)	|  010b	|101b	| 0	    ||X4	| X4      |
947  *------------------------------------------------------------------------|
948  *X4	|  256 (X2)	|  000b	|000b	| X2	    ||DYN	| DYN     |
949  *------------------------------------------------------------------------|
950  *DYNC	|  256 (X2)	|  000b	|000b	| X2	    ||DYN	| DYN     |
951  *------------------------------------------------------------------------|
952  * X2	|  512 (X4)	|  xxxb	|xxxb	| X4	    ||INVAL	| INVAL   |
953  *------------------------------------------------------------------------|
954  */
npc_cn20k_config_kw_x2(struct rvu * rvu,struct npc_mcam * mcam,int blkaddr,int index,u8 intf,struct cn20k_mcam_entry * entry,int bank,u8 kw_type,int kw,u8 req_kw_type)955 static void npc_cn20k_config_kw_x2(struct rvu *rvu, struct npc_mcam *mcam,
956 				   int blkaddr, int index, u8 intf,
957 				   struct cn20k_mcam_entry *entry,
958 				   int bank, u8 kw_type, int kw,
959 				   u8 req_kw_type)
960 {
961 	u64 intf_ext = 0, intf_ext_mask = 0;
962 	u8 tx_intf_mask = ~intf & 0x3;
963 	u8 tx_intf = intf, kex_type;
964 	u8 kw_type_mask = ~kw_type;
965 	u64 cam0, cam1, kex_cfg;
966 
967 	if (is_npc_intf_tx(intf)) {
968 		/* Last bit must be set and rest don't care
969 		 * for TX interfaces
970 		 */
971 		tx_intf_mask = 0x1;
972 		tx_intf = intf & tx_intf_mask;
973 		tx_intf_mask = ~tx_intf & tx_intf_mask;
974 	}
975 
976 	kex_cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf));
977 	kex_type = (kex_cfg & GENMASK_ULL(34, 32)) >> 32;
978 	if ((kex_type == NPC_MCAM_KEY_DYN || kex_type == NPC_MCAM_KEY_X4) &&
979 	    kw_type == NPC_MCAM_KEY_X2) {
980 		kw_type = 0;
981 		kw_type_mask = 0;
982 	}
983 
984 	/* Say, we need to write x2 keyword in an x4 subbank.
985 	 * req_kw_type will be x2, and kw_type will be x4.
986 	 * So in the case ignore kw bits in mcam.
987 	 */
988 	if (kw_type == NPC_MCAM_KEY_X4 && req_kw_type == NPC_MCAM_KEY_X2) {
989 		kw_type = 0;
990 		kw_type_mask = 0;
991 	}
992 
993 	intf_ext = ((u64)kw_type << 16) | tx_intf;
994 	intf_ext_mask = (((u64)kw_type_mask  << 16) & GENMASK_ULL(18, 16)) |
995 		tx_intf_mask;
996 	rvu_write64(rvu, blkaddr,
997 		    NPC_AF_CN20K_MCAMEX_BANKX_CAMX_INTF_EXT(index, bank, 1),
998 		    intf_ext);
999 	rvu_write64(rvu, blkaddr,
1000 		    NPC_AF_CN20K_MCAMEX_BANKX_CAMX_INTF_EXT(index, bank, 0),
1001 		    intf_ext_mask);
1002 
1003 	/* Set the match key */
1004 	npc_cn20k_get_keyword(entry, kw, &cam0, &cam1);
1005 	rvu_write64(rvu, blkaddr,
1006 		    NPC_AF_CN20K_MCAMEX_BANKX_CAMX_W0_EXT(index, bank, 1),
1007 		    cam1);
1008 	rvu_write64(rvu, blkaddr,
1009 		    NPC_AF_CN20K_MCAMEX_BANKX_CAMX_W0_EXT(index, bank, 0),
1010 		    cam0);
1011 
1012 	npc_cn20k_get_keyword(entry, kw + 1, &cam0, &cam1);
1013 	rvu_write64(rvu, blkaddr,
1014 		    NPC_AF_CN20K_MCAMEX_BANKX_CAMX_W1_EXT(index, bank, 1),
1015 		    cam1);
1016 	rvu_write64(rvu, blkaddr,
1017 		    NPC_AF_CN20K_MCAMEX_BANKX_CAMX_W1_EXT(index, bank, 0),
1018 		    cam0);
1019 
1020 	npc_cn20k_get_keyword(entry, kw + 2, &cam0, &cam1);
1021 	rvu_write64(rvu, blkaddr,
1022 		    NPC_AF_CN20K_MCAMEX_BANKX_CAMX_W2_EXT(index, bank, 1),
1023 		    cam1);
1024 	rvu_write64(rvu, blkaddr,
1025 		    NPC_AF_CN20K_MCAMEX_BANKX_CAMX_W2_EXT(index, bank, 0),
1026 		    cam0);
1027 
1028 	npc_cn20k_get_keyword(entry, kw + 3, &cam0, &cam1);
1029 	rvu_write64(rvu, blkaddr,
1030 		    NPC_AF_CN20K_MCAMEX_BANKX_CAMX_W3_EXT(index, bank, 1),
1031 		    cam1);
1032 	rvu_write64(rvu, blkaddr,
1033 		    NPC_AF_CN20K_MCAMEX_BANKX_CAMX_W3_EXT(index, bank, 0),
1034 		    cam0);
1035 }
1036 
npc_cn20k_config_kw_x4(struct rvu * rvu,struct npc_mcam * mcam,int blkaddr,int index,u8 intf,struct cn20k_mcam_entry * entry,u8 kw_type,u8 req_kw_type)1037 static void npc_cn20k_config_kw_x4(struct rvu *rvu, struct npc_mcam *mcam,
1038 				   int blkaddr, int index, u8 intf,
1039 				   struct cn20k_mcam_entry *entry,
1040 				   u8 kw_type, u8 req_kw_type)
1041 {
1042 	int kw = 0, bank;
1043 
1044 	for (bank = 0; bank < mcam->banks_per_entry; bank++, kw = kw + 4)
1045 		npc_cn20k_config_kw_x2(rvu, mcam, blkaddr,
1046 				       index, intf,
1047 				       entry, bank, kw_type,
1048 				       kw, req_kw_type);
1049 }
1050 
npc_cn20k_config_mcam_entry(struct rvu * rvu,int blkaddr,int index,u8 intf,struct cn20k_mcam_entry * entry,bool enable,u8 hw_prio,u8 req_kw_type)1051 int npc_cn20k_config_mcam_entry(struct rvu *rvu, int blkaddr, int index,
1052 				u8 intf, struct cn20k_mcam_entry *entry,
1053 				bool enable, u8 hw_prio, u8 req_kw_type)
1054 {
1055 	struct npc_mcam *mcam = &rvu->hw->mcam;
1056 	int mcam_idx = index % mcam->banksize;
1057 	int bank = index / mcam->banksize;
1058 	u64 bank_cfg = (u64)hw_prio << 24;
1059 	int kw = 0;
1060 	u8 kw_type;
1061 
1062 	if (index < 0 || index >= mcam->total_entries)
1063 		return -EINVAL;
1064 
1065 	if (npc_mcam_idx_2_key_type(rvu, index, &kw_type))
1066 		return -EINVAL;
1067 
1068 	/* Disable before mcam entry update */
1069 	if (npc_cn20k_enable_mcam_entry(rvu, blkaddr, index, false))
1070 		return -EINVAL;
1071 
1072 	/* CAM1 takes the comparison value and
1073 	 * CAM0 specifies match for a bit in key being '0' or '1' or 'dontcare'.
1074 	 * CAM1<n> = 0 & CAM0<n> = 1 => match if key<n> = 0
1075 	 * CAM1<n> = 1 & CAM0<n> = 0 => match if key<n> = 1
1076 	 * CAM1<n> = 0 & CAM0<n> = 0 => always match i.e dontcare.
1077 	 */
1078 	if (kw_type == NPC_MCAM_KEY_X2) {
1079 		/* Clear mcam entry to avoid writes being suppressed by NPC */
1080 		npc_clear_x2_entry(rvu, blkaddr, bank, mcam_idx);
1081 		npc_cn20k_config_kw_x2(rvu, mcam, blkaddr,
1082 				       mcam_idx, intf, entry,
1083 				       bank, kw_type, kw, req_kw_type);
1084 		/* Set 'action' */
1085 		rvu_write64(rvu, blkaddr,
1086 			    NPC_AF_CN20K_MCAMEX_BANKX_ACTIONX_EXT(mcam_idx,
1087 								  bank, 0),
1088 			    entry->action);
1089 
1090 		/* Set 'action2' for inline receive */
1091 		rvu_write64(rvu, blkaddr,
1092 			    NPC_AF_CN20K_MCAMEX_BANKX_ACTIONX_EXT(mcam_idx,
1093 								  bank, 2),
1094 			    entry->action2);
1095 
1096 		/* Set TAG 'action' */
1097 		rvu_write64(rvu, blkaddr,
1098 			    NPC_AF_CN20K_MCAMEX_BANKX_ACTIONX_EXT(mcam_idx,
1099 								  bank, 1),
1100 			    entry->vtag_action);
1101 
1102 		/* Set HW priority */
1103 		rvu_write64(rvu, blkaddr,
1104 			    NPC_AF_CN20K_MCAMEX_BANKX_CFG_EXT(mcam_idx, bank),
1105 			    bank_cfg);
1106 
1107 	} else {
1108 		/* Clear mcam entry to avoid writes being suppressed by NPC */
1109 		npc_clear_x2_entry(rvu, blkaddr, 0, mcam_idx);
1110 		npc_clear_x2_entry(rvu, blkaddr, 1, mcam_idx);
1111 
1112 		npc_cn20k_config_kw_x4(rvu, mcam, blkaddr,
1113 				       mcam_idx, intf, entry,
1114 				       kw_type, req_kw_type);
1115 		for (bank = 0; bank < mcam->banks_per_entry; bank++) {
1116 			/* Set 'action' */
1117 			rvu_write64(rvu, blkaddr,
1118 				    NPC_AF_CN20K_MCAMEX_BANKX_ACTIONX_EXT(mcam_idx,
1119 									  bank, 0),
1120 				    entry->action);
1121 
1122 			/* Set TAG 'action' */
1123 			rvu_write64(rvu, blkaddr,
1124 				    NPC_AF_CN20K_MCAMEX_BANKX_ACTIONX_EXT(mcam_idx,
1125 									  bank, 1),
1126 				    entry->vtag_action);
1127 
1128 			/* Set 'action2' for inline receive */
1129 			rvu_write64(rvu, blkaddr,
1130 				    NPC_AF_CN20K_MCAMEX_BANKX_ACTIONX_EXT(mcam_idx,
1131 									  bank, 2),
1132 				    entry->action2);
1133 
1134 			/* Set HW priority */
1135 			rvu_write64(rvu, blkaddr,
1136 				    NPC_AF_CN20K_MCAMEX_BANKX_CFG_EXT(mcam_idx, bank),
1137 				    bank_cfg);
1138 		}
1139 	}
1140 
1141 	/* TODO: */
1142 	/* PF installing VF rule */
1143 	if (npc_cn20k_enable_mcam_entry(rvu, blkaddr, index, enable))
1144 		return -EINVAL;
1145 
1146 	return 0;
1147 }
1148 
npc_cn20k_copy_mcam_entry(struct rvu * rvu,int blkaddr,u16 src,u16 dest)1149 int npc_cn20k_copy_mcam_entry(struct rvu *rvu, int blkaddr, u16 src, u16 dest)
1150 {
1151 	struct npc_mcam *mcam = &rvu->hw->mcam;
1152 	u64 cfg, sreg, dreg, soff, doff;
1153 	u8 src_kwtype, dest_kwtype;
1154 	int bank, i, sb, db;
1155 	int dbank, sbank;
1156 
1157 	if (src >= mcam->total_entries || dest >= mcam->total_entries)
1158 		return -EINVAL;
1159 
1160 	dbank = npc_get_bank(mcam, dest);
1161 	sbank = npc_get_bank(mcam, src);
1162 
1163 	if (npc_mcam_idx_2_key_type(rvu, src, &src_kwtype))
1164 		return -EINVAL;
1165 
1166 	if (npc_mcam_idx_2_key_type(rvu, dest, &dest_kwtype))
1167 		return -EINVAL;
1168 
1169 	if (src_kwtype != dest_kwtype)
1170 		return -EINVAL;
1171 
1172 	src &= (mcam->banksize - 1);
1173 	dest &= (mcam->banksize - 1);
1174 
1175 	/* Copy INTF's, W0's, W1's, W2's, W3s CAM0 and CAM1 configuration */
1176 	for (bank = 0; bank < mcam->banks_per_entry; bank++) {
1177 		sb = sbank + bank;
1178 		sreg = NPC_AF_CN20K_MCAMEX_BANKX_CAMX_INTF_EXT(src, sb, 0);
1179 		db = dbank + bank;
1180 		dreg = NPC_AF_CN20K_MCAMEX_BANKX_CAMX_INTF_EXT(dest, db, 0);
1181 		for (i = 0; i < 10; i++) {
1182 			cfg = rvu_read64(rvu, blkaddr, sreg + (i * 8));
1183 			rvu_write64(rvu, blkaddr, dreg + (i * 8), cfg);
1184 		}
1185 
1186 		/* Copy action */
1187 		for (i = 0; i < 3; i++) {
1188 			soff = NPC_AF_CN20K_MCAMEX_BANKX_ACTIONX_EXT(src,
1189 								     sb, i);
1190 			cfg = rvu_read64(rvu, blkaddr, soff);
1191 
1192 			doff = NPC_AF_CN20K_MCAMEX_BANKX_ACTIONX_EXT(dest, db,
1193 								     i);
1194 			rvu_write64(rvu, blkaddr, doff, cfg);
1195 		}
1196 
1197 		/* Copy bank configuration */
1198 		cfg = rvu_read64(rvu, blkaddr,
1199 				 NPC_AF_CN20K_MCAMEX_BANKX_CFG_EXT(src, sb));
1200 		rvu_write64(rvu, blkaddr,
1201 			    NPC_AF_CN20K_MCAMEX_BANKX_CFG_EXT(dest, db), cfg);
1202 		if (src_kwtype == NPC_MCAM_KEY_X2)
1203 			break;
1204 	}
1205 
1206 	return 0;
1207 }
1208 
npc_cn20k_fill_entryword(struct cn20k_mcam_entry * entry,int idx,u64 cam0,u64 cam1)1209 static void npc_cn20k_fill_entryword(struct cn20k_mcam_entry *entry, int idx,
1210 				     u64 cam0, u64 cam1)
1211 {
1212 	entry->kw[idx] = cam1;
1213 	entry->kw_mask[idx] = cam1 ^ cam0;
1214 }
1215 
npc_cn20k_read_mcam_entry(struct rvu * rvu,int blkaddr,u16 index,struct cn20k_mcam_entry * entry,u8 * intf,u8 * ena,u8 * hw_prio)1216 int npc_cn20k_read_mcam_entry(struct rvu *rvu, int blkaddr, u16 index,
1217 			      struct cn20k_mcam_entry *entry,
1218 			      u8 *intf, u8 *ena, u8 *hw_prio)
1219 {
1220 	struct npc_mcam *mcam = &rvu->hw->mcam;
1221 	u64 cam0, cam1, bank_cfg, cfg;
1222 	int kw = 0, bank;
1223 	u8 kw_type;
1224 
1225 	if (index >= mcam->total_entries)
1226 		return -EINVAL;
1227 
1228 	if (npc_mcam_idx_2_key_type(rvu, index, &kw_type))
1229 		return -EINVAL;
1230 
1231 	bank = npc_get_bank(mcam, index);
1232 	index &= (mcam->banksize - 1);
1233 
1234 	cfg = rvu_read64(rvu, blkaddr,
1235 			 NPC_AF_CN20K_MCAMEX_BANKX_ACTIONX_EXT(index, bank, 0));
1236 	entry->action = cfg;
1237 
1238 	cfg = rvu_read64(rvu, blkaddr,
1239 			 NPC_AF_CN20K_MCAMEX_BANKX_ACTIONX_EXT(index, bank, 1));
1240 	entry->vtag_action = cfg;
1241 
1242 	cfg = rvu_read64(rvu, blkaddr,
1243 			 NPC_AF_CN20K_MCAMEX_BANKX_ACTIONX_EXT(index, bank, 2));
1244 	entry->action2 = cfg;
1245 
1246 	cfg = rvu_read64(rvu, blkaddr,
1247 			 NPC_AF_CN20K_MCAMEX_BANKX_CAMX_INTF_EXT(index,
1248 								 bank, 1)) & 3;
1249 	*intf = cfg;
1250 
1251 	bank_cfg = rvu_read64(rvu, blkaddr,
1252 			      NPC_AF_CN20K_MCAMEX_BANKX_CFG_EXT(index, bank));
1253 	*ena = bank_cfg & 0x1;
1254 	*hw_prio = (bank_cfg & GENMASK_ULL(30, 24)) >> 24;
1255 	if (kw_type == NPC_MCAM_KEY_X2) {
1256 		cam1 = rvu_read64(rvu, blkaddr,
1257 				  NPC_AF_CN20K_MCAMEX_BANKX_CAMX_W0_EXT(index,
1258 									bank,
1259 									1));
1260 		cam0 = rvu_read64(rvu, blkaddr,
1261 				  NPC_AF_CN20K_MCAMEX_BANKX_CAMX_W0_EXT(index,
1262 									bank,
1263 									0));
1264 		npc_cn20k_fill_entryword(entry, kw, cam0, cam1);
1265 
1266 		cam1 = rvu_read64(rvu, blkaddr,
1267 				  NPC_AF_CN20K_MCAMEX_BANKX_CAMX_W1_EXT(index,
1268 									bank,
1269 									1));
1270 		cam0 = rvu_read64(rvu, blkaddr,
1271 				  NPC_AF_CN20K_MCAMEX_BANKX_CAMX_W1_EXT(index,
1272 									bank,
1273 									0));
1274 		npc_cn20k_fill_entryword(entry, kw + 1, cam0, cam1);
1275 
1276 		cam1 = rvu_read64(rvu, blkaddr,
1277 				  NPC_AF_CN20K_MCAMEX_BANKX_CAMX_W2_EXT(index,
1278 									bank,
1279 									1));
1280 		cam0 = rvu_read64(rvu, blkaddr,
1281 				  NPC_AF_CN20K_MCAMEX_BANKX_CAMX_W2_EXT(index,
1282 									bank,
1283 									0));
1284 		npc_cn20k_fill_entryword(entry, kw + 2, cam0, cam1);
1285 
1286 		cam1 = rvu_read64(rvu, blkaddr,
1287 				  NPC_AF_CN20K_MCAMEX_BANKX_CAMX_W3_EXT(index,
1288 									bank,
1289 									1));
1290 		cam0 = rvu_read64(rvu, blkaddr,
1291 				  NPC_AF_CN20K_MCAMEX_BANKX_CAMX_W3_EXT(index,
1292 									bank,
1293 									0));
1294 		npc_cn20k_fill_entryword(entry, kw + 3, cam0, cam1);
1295 		return 0;
1296 	}
1297 
1298 	for (bank = 0; bank < mcam->banks_per_entry; bank++, kw = kw + 4) {
1299 		cam1 = rvu_read64(rvu, blkaddr,
1300 				  NPC_AF_CN20K_MCAMEX_BANKX_CAMX_W0_EXT(index,
1301 									bank,
1302 									1));
1303 		cam0 = rvu_read64(rvu, blkaddr,
1304 				  NPC_AF_CN20K_MCAMEX_BANKX_CAMX_W0_EXT(index,
1305 									bank,
1306 									0));
1307 		npc_cn20k_fill_entryword(entry, kw, cam0, cam1);
1308 
1309 		cam1 = rvu_read64(rvu, blkaddr,
1310 				  NPC_AF_CN20K_MCAMEX_BANKX_CAMX_W1_EXT(index,
1311 									bank,
1312 									1));
1313 		cam0 = rvu_read64(rvu, blkaddr,
1314 				  NPC_AF_CN20K_MCAMEX_BANKX_CAMX_W1_EXT(index,
1315 									bank,
1316 									0));
1317 		npc_cn20k_fill_entryword(entry, kw + 1, cam0, cam1);
1318 
1319 		cam1 = rvu_read64(rvu, blkaddr,
1320 				  NPC_AF_CN20K_MCAMEX_BANKX_CAMX_W2_EXT(index,
1321 									bank,
1322 									1));
1323 		cam0 = rvu_read64(rvu, blkaddr,
1324 				  NPC_AF_CN20K_MCAMEX_BANKX_CAMX_W2_EXT(index,
1325 									bank,
1326 									0));
1327 		npc_cn20k_fill_entryword(entry, kw + 2, cam0, cam1);
1328 
1329 		cam1 = rvu_read64(rvu, blkaddr,
1330 				  NPC_AF_CN20K_MCAMEX_BANKX_CAMX_W3_EXT(index,
1331 									bank,
1332 									1));
1333 		cam0 = rvu_read64(rvu, blkaddr,
1334 				  NPC_AF_CN20K_MCAMEX_BANKX_CAMX_W3_EXT(index,
1335 									bank,
1336 									0));
1337 		npc_cn20k_fill_entryword(entry, kw + 3, cam0, cam1);
1338 	}
1339 
1340 	return 0;
1341 }
1342 
rvu_mbox_handler_npc_cn20k_mcam_write_entry(struct rvu * rvu,struct npc_cn20k_mcam_write_entry_req * req,struct msg_rsp * rsp)1343 int rvu_mbox_handler_npc_cn20k_mcam_write_entry(struct rvu *rvu,
1344 						struct npc_cn20k_mcam_write_entry_req *req,
1345 						struct msg_rsp *rsp)
1346 {
1347 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
1348 	struct npc_mcam *mcam = &rvu->hw->mcam;
1349 	u16 pcifunc = req->hdr.pcifunc;
1350 	int blkaddr, rc;
1351 	u8 nix_intf;
1352 
1353 	req->entry = npc_cn20k_vidx2idx(req->entry);
1354 
1355 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
1356 	if (blkaddr < 0)
1357 		return NPC_MCAM_INVALID_REQ;
1358 
1359 	mutex_lock(&mcam->lock);
1360 	rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry);
1361 	if (rc)
1362 		goto exit;
1363 
1364 	if (!is_npc_interface_valid(rvu, req->intf)) {
1365 		rc = NPC_MCAM_INVALID_REQ;
1366 		goto exit;
1367 	}
1368 
1369 	if (is_npc_intf_tx(req->intf))
1370 		nix_intf = pfvf->nix_tx_intf;
1371 	else
1372 		nix_intf = pfvf->nix_rx_intf;
1373 
1374 	/* For AF installed rules, the nix_intf should be set to target NIX */
1375 	if (is_pffunc_af(req->hdr.pcifunc))
1376 		nix_intf = req->intf;
1377 
1378 	rc = npc_cn20k_config_mcam_entry(rvu, blkaddr, req->entry, nix_intf,
1379 					 &req->entry_data, req->enable_entry,
1380 					 req->hw_prio, req->req_kw_type);
1381 
1382 exit:
1383 	mutex_unlock(&mcam->lock);
1384 	return rc;
1385 }
1386 
rvu_mbox_handler_npc_cn20k_mcam_read_entry(struct rvu * rvu,struct npc_mcam_read_entry_req * req,struct npc_cn20k_mcam_read_entry_rsp * rsp)1387 int rvu_mbox_handler_npc_cn20k_mcam_read_entry(struct rvu *rvu,
1388 					       struct npc_mcam_read_entry_req *req,
1389 					       struct npc_cn20k_mcam_read_entry_rsp *rsp)
1390 {
1391 	struct npc_mcam *mcam = &rvu->hw->mcam;
1392 	u16 pcifunc = req->hdr.pcifunc;
1393 	int blkaddr, rc;
1394 
1395 	req->entry = npc_cn20k_vidx2idx(req->entry);
1396 
1397 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
1398 	if (blkaddr < 0)
1399 		return NPC_MCAM_INVALID_REQ;
1400 
1401 	mutex_lock(&mcam->lock);
1402 	rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry);
1403 	if (rc)
1404 		goto fail;
1405 
1406 	rc = npc_cn20k_read_mcam_entry(rvu, blkaddr, req->entry,
1407 				       &rsp->entry_data, &rsp->intf,
1408 				       &rsp->enable, &rsp->hw_prio);
1409 fail:
1410 	mutex_unlock(&mcam->lock);
1411 	return rc;
1412 }
1413 
rvu_mbox_handler_npc_cn20k_mcam_alloc_and_write_entry(struct rvu * rvu,struct npc_cn20k_mcam_alloc_and_write_entry_req * req,struct npc_mcam_alloc_and_write_entry_rsp * rsp)1414 int rvu_mbox_handler_npc_cn20k_mcam_alloc_and_write_entry(struct rvu *rvu,
1415 							  struct npc_cn20k_mcam_alloc_and_write_entry_req *req,
1416 							  struct npc_mcam_alloc_and_write_entry_rsp *rsp)
1417 {
1418 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
1419 	struct npc_mcam_free_entry_req free_req = { 0 };
1420 	struct npc_mcam_alloc_entry_req entry_req;
1421 	struct npc_mcam_alloc_entry_rsp entry_rsp;
1422 	struct npc_mcam *mcam = &rvu->hw->mcam;
1423 	u16 entry = NPC_MCAM_ENTRY_INVALID;
1424 	struct msg_rsp free_rsp;
1425 	int blkaddr, rc, err;
1426 	u8 nix_intf;
1427 
1428 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
1429 	if (blkaddr < 0)
1430 		return NPC_MCAM_INVALID_REQ;
1431 
1432 	if (!is_npc_interface_valid(rvu, req->intf))
1433 		return NPC_MCAM_INVALID_REQ;
1434 
1435 	/* Try to allocate a MCAM entry */
1436 	entry_req.hdr.pcifunc = req->hdr.pcifunc;
1437 	entry_req.contig = true;
1438 	entry_req.ref_prio = req->ref_prio;
1439 	entry_req.ref_entry = req->ref_entry;
1440 	entry_req.count = 1;
1441 	entry_req.virt = req->virt;
1442 
1443 	rc = rvu_mbox_handler_npc_mcam_alloc_entry(rvu,
1444 						   &entry_req, &entry_rsp);
1445 	if (rc)
1446 		return rc;
1447 
1448 	if (!entry_rsp.count)
1449 		return NPC_MCAM_ALLOC_FAILED;
1450 
1451 	/* entry_req.count is 1, so single entry is allocated */
1452 	entry = npc_cn20k_vidx2idx(entry_rsp.entry);
1453 
1454 	mutex_lock(&mcam->lock);
1455 
1456 	if (is_npc_intf_tx(req->intf))
1457 		nix_intf = pfvf->nix_tx_intf;
1458 	else
1459 		nix_intf = pfvf->nix_rx_intf;
1460 
1461 	rc = npc_cn20k_config_mcam_entry(rvu, blkaddr, entry, nix_intf,
1462 					 &req->entry_data, req->enable_entry,
1463 					 req->hw_prio, req->req_kw_type);
1464 
1465 	mutex_unlock(&mcam->lock);
1466 
1467 	if (rc) {
1468 		free_req.hdr.pcifunc = req->hdr.pcifunc;
1469 		free_req.entry = entry_rsp.entry;
1470 		err = rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, &free_rsp);
1471 		if (err)
1472 			dev_err(rvu->dev,
1473 				"%s: Error to free mcam idx %u\n",
1474 				__func__, entry_rsp.entry);
1475 		return rc;
1476 	}
1477 
1478 	rsp->entry = entry_rsp.entry;
1479 	return 0;
1480 }
1481 
rvu_npc_get_base_steer_rule_type(struct rvu * rvu,u16 pcifunc)1482 static int rvu_npc_get_base_steer_rule_type(struct rvu *rvu, u16 pcifunc)
1483 {
1484 	if (is_lbk_vf(rvu, pcifunc))
1485 		return NIXLF_PROMISC_ENTRY;
1486 
1487 	return NIXLF_UCAST_ENTRY;
1488 }
1489 
rvu_mbox_handler_npc_cn20k_read_base_steer_rule(struct rvu * rvu,struct msg_req * req,struct npc_cn20k_mcam_read_base_rule_rsp * rsp)1490 int rvu_mbox_handler_npc_cn20k_read_base_steer_rule(struct rvu *rvu,
1491 						    struct msg_req *req,
1492 						    struct npc_cn20k_mcam_read_base_rule_rsp *rsp)
1493 {
1494 	struct npc_mcam *mcam = &rvu->hw->mcam;
1495 	int index, blkaddr, nixlf, rc = 0;
1496 	u16 pcifunc = req->hdr.pcifunc;
1497 	u8 intf, enable, hw_prio;
1498 	struct rvu_pfvf *pfvf;
1499 	int rl_type;
1500 
1501 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
1502 	if (blkaddr < 0)
1503 		return NPC_MCAM_INVALID_REQ;
1504 
1505 	/* Return the channel number in case of PF */
1506 	if (!(pcifunc & RVU_PFVF_FUNC_MASK)) {
1507 		pfvf = rvu_get_pfvf(rvu, pcifunc);
1508 		rsp->entry.kw[0] = pfvf->rx_chan_base;
1509 		rsp->entry.kw_mask[0] = 0xFFFULL;
1510 		goto out;
1511 	}
1512 
1513 	/* Find the pkt steering rule installed by PF to this VF */
1514 	mutex_lock(&mcam->lock);
1515 	for (index = 0; index < mcam->bmap_entries; index++) {
1516 		if (mcam->entry2target_pffunc[index] == pcifunc)
1517 			goto read_entry;
1518 	}
1519 
1520 	rc = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
1521 	if (rc < 0) {
1522 		mutex_unlock(&mcam->lock);
1523 		goto out;
1524 	}
1525 
1526 	rl_type = rvu_npc_get_base_steer_rule_type(rvu, pcifunc);
1527 
1528 	/* Read the default ucast entry if there is no pkt steering rule */
1529 	index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, rl_type);
1530 	if (index < 0) {
1531 		mutex_unlock(&mcam->lock);
1532 		goto out;
1533 	}
1534 
1535 read_entry:
1536 	/* Read the mcam entry */
1537 	rc = npc_cn20k_read_mcam_entry(rvu, blkaddr, index,
1538 				       &rsp->entry, &intf,
1539 				       &enable, &hw_prio);
1540 	mutex_unlock(&mcam->lock);
1541 out:
1542 	return rc;
1543 }
1544 
npc_map2cn20k_flag(u8 flag)1545 static u8 npc_map2cn20k_flag(u8 flag)
1546 {
1547 	switch (flag) {
1548 	case NPC_F_LC_U_IP_FRAG:
1549 		return NPC_CN20K_F_LC_L_IP_FRAG;
1550 
1551 	case NPC_F_LC_U_IP6_FRAG:
1552 		return NPC_CN20K_F_LC_L_IP6_FRAG;
1553 
1554 	case NPC_F_LC_L_6TO4:
1555 		return NPC_CN20K_F_LC_L_6TO4;
1556 
1557 	case NPC_F_LC_L_MPLS_IN_IP:
1558 		return NPC_CN20K_F_LC_U_MPLS_IN_IP;
1559 
1560 	case NPC_F_LC_L_IP6_TUN_IP6:
1561 		return NPC_CN20K_F_LC_U_IP6_TUN_IP6;
1562 
1563 	case NPC_F_LC_L_IP6_MPLS_IN_IP:
1564 		return NPC_CN20K_F_LC_U_IP6_MPLS_IN_IP;
1565 
1566 	default:
1567 		break;
1568 	}
1569 
1570 	WARN(1, "%s: Invalid flag=%u\n", __func__, flag);
1571 	return 0xff;
1572 }
1573 
1574 void
npc_cn20k_update_action_entries_n_flags(struct rvu * rvu,struct npc_kpu_profile_adapter * pfl)1575 npc_cn20k_update_action_entries_n_flags(struct rvu *rvu,
1576 					struct npc_kpu_profile_adapter *pfl)
1577 {
1578 	struct npc_kpu_profile_action *action;
1579 	int entries, ltype;
1580 	u8 flags, val;
1581 
1582 	for (int i = 0; i < pfl->kpus; i++) {
1583 		action = pfl->kpu[i].action;
1584 		entries = pfl->kpu[i].action_entries;
1585 
1586 		for (int j = 0; j < entries; j++) {
1587 			if (action[j].lid != NPC_LID_LC)
1588 				continue;
1589 
1590 			ltype = action[j].ltype;
1591 
1592 			if (ltype != NPC_LT_LC_IP &&
1593 			    ltype != NPC_LT_LC_IP6 &&
1594 			    ltype != NPC_LT_LC_IP_OPT &&
1595 			    ltype != NPC_LT_LC_IP6_EXT)
1596 				continue;
1597 
1598 			flags = action[j].flags;
1599 
1600 			switch (flags) {
1601 			case NPC_F_LC_U_IP_FRAG:
1602 			case NPC_F_LC_U_IP6_FRAG:
1603 			case NPC_F_LC_L_6TO4:
1604 			case NPC_F_LC_L_MPLS_IN_IP:
1605 			case NPC_F_LC_L_IP6_TUN_IP6:
1606 			case NPC_F_LC_L_IP6_MPLS_IN_IP:
1607 				val = npc_map2cn20k_flag(flags);
1608 				if (val == 0xFF) {
1609 					dev_err(rvu->dev,
1610 						"%s: Error to get flag value\n",
1611 						__func__);
1612 					return;
1613 				}
1614 
1615 				action[j].flags = val;
1616 				break;
1617 			default:
1618 				break;
1619 			}
1620 		}
1621 	}
1622 }
1623 
npc_cn20k_apply_custom_kpu(struct rvu * rvu,struct npc_kpu_profile_adapter * profile)1624 int npc_cn20k_apply_custom_kpu(struct rvu *rvu,
1625 			       struct npc_kpu_profile_adapter *profile)
1626 {
1627 	size_t hdr_sz = sizeof(struct npc_cn20k_kpu_profile_fwdata);
1628 	struct npc_cn20k_kpu_profile_fwdata *fw = rvu->kpu_fwdata;
1629 	struct npc_kpu_profile_action *action;
1630 	struct npc_kpu_profile_cam *cam;
1631 	struct npc_kpu_fwdata *fw_kpu;
1632 	size_t offset = 0;
1633 	u16 kpu, entry;
1634 	int entries;
1635 
1636 	hdr_sz = sizeof(struct npc_cn20k_kpu_profile_fwdata);
1637 
1638 	if (rvu->kpu_fwdata_sz < hdr_sz) {
1639 		dev_warn(rvu->dev, "Invalid KPU profile size\n");
1640 		return -EINVAL;
1641 	}
1642 
1643 	if (le64_to_cpu(fw->signature) != KPU_SIGN) {
1644 		dev_warn(rvu->dev, "Invalid KPU profile signature %llx\n",
1645 			 fw->signature);
1646 		return -EINVAL;
1647 	}
1648 
1649 	/* Verify if the using known profile structure */
1650 	if (NPC_KPU_VER_MAJ(profile->version) >
1651 	    NPC_KPU_VER_MAJ(NPC_KPU_PROFILE_VER)) {
1652 		dev_warn(rvu->dev, "Not supported Major version: %d > %d\n",
1653 			 NPC_KPU_VER_MAJ(profile->version),
1654 			 NPC_KPU_VER_MAJ(NPC_KPU_PROFILE_VER));
1655 		return -EINVAL;
1656 	}
1657 
1658 	/* Verify if profile is aligned with the required kernel changes */
1659 	if (NPC_KPU_VER_MIN(profile->version) <
1660 	    NPC_KPU_VER_MIN(NPC_KPU_PROFILE_VER)) {
1661 		dev_warn(rvu->dev,
1662 			 "Invalid KPU profile version: %d.%d.%d expected version <= %d.%d.%d\n",
1663 			 NPC_KPU_VER_MAJ(profile->version),
1664 			 NPC_KPU_VER_MIN(profile->version),
1665 			 NPC_KPU_VER_PATCH(profile->version),
1666 			 NPC_KPU_VER_MAJ(NPC_KPU_PROFILE_VER),
1667 			 NPC_KPU_VER_MIN(NPC_KPU_PROFILE_VER),
1668 			 NPC_KPU_VER_PATCH(NPC_KPU_PROFILE_VER));
1669 		return -EINVAL;
1670 	}
1671 
1672 	/* Verify if profile fits the HW */
1673 	if (fw->kpus > profile->kpus) {
1674 		dev_warn(rvu->dev, "Not enough KPUs: %d > %ld\n", fw->kpus,
1675 			 profile->kpus);
1676 		return -EINVAL;
1677 	}
1678 
1679 	profile->mcam_kex_prfl.mkex_extr = &fw->mkex;
1680 	if (profile->mcam_kex_prfl.mkex_extr->mkex_sign != MKEX_CN20K_SIGN) {
1681 		dev_warn(rvu->dev, "Invalid MKEX profile signature:%llx\n",
1682 			 profile->mcam_kex_prfl.mkex_extr->mkex_sign);
1683 		return -EINVAL;
1684 	}
1685 
1686 	profile->custom = 1;
1687 	profile->name = fw->name;
1688 	profile->version = le64_to_cpu(fw->version);
1689 	profile->lt_def = &fw->lt_def;
1690 
1691 	for (kpu = 0; kpu < fw->kpus; kpu++) {
1692 		fw_kpu = (struct npc_kpu_fwdata *)(fw->data + offset);
1693 		if (fw_kpu->entries > KPU_CN20K_MAX_CST_ENT)
1694 			dev_warn(rvu->dev,
1695 				 "Too many custom entries on KPU%d: %d > %d\n",
1696 				 kpu, fw_kpu->entries, KPU_CN20K_MAX_CST_ENT);
1697 		entries = min(fw_kpu->entries, KPU_CN20K_MAX_CST_ENT);
1698 		cam = (struct npc_kpu_profile_cam *)fw_kpu->data;
1699 		offset += sizeof(*fw_kpu) + fw_kpu->entries * sizeof(*cam);
1700 		action = (struct npc_kpu_profile_action *)(fw->data + offset);
1701 		offset += fw_kpu->entries * sizeof(*action);
1702 		if (rvu->kpu_fwdata_sz < hdr_sz + offset) {
1703 			dev_warn(rvu->dev,
1704 				 "Profile size mismatch on KPU%i parsing.\n",
1705 				 kpu + 1);
1706 			return -EINVAL;
1707 		}
1708 
1709 		for (entry = 0; entry < entries; entry++) {
1710 			profile->kpu[kpu].cam[entry] = cam[entry];
1711 			profile->kpu[kpu].action[entry] = action[entry];
1712 		}
1713 	}
1714 	npc_cn20k_update_action_entries_n_flags(rvu, profile);
1715 
1716 	return 0;
1717 }
1718 
npc_mcam_idx_2_key_type(struct rvu * rvu,u16 mcam_idx,u8 * key_type)1719 int npc_mcam_idx_2_key_type(struct rvu *rvu, u16 mcam_idx, u8 *key_type)
1720 {
1721 	struct npc_subbank *sb;
1722 	int bank_off, sb_id;
1723 
1724 	/* mcam_idx should be less than (2 * bank depth) */
1725 	if (mcam_idx >= npc_priv.bank_depth * 2) {
1726 		dev_err(rvu->dev, "%s: bad params\n",
1727 			__func__);
1728 		return -EINVAL;
1729 	}
1730 
1731 	/* find mcam offset per bank */
1732 	bank_off = mcam_idx & (npc_priv.bank_depth - 1);
1733 
1734 	/* Find subbank id */
1735 	sb_id = bank_off / npc_priv.subbank_depth;
1736 
1737 	/* Check if subbank id is more than maximum
1738 	 * number of subbanks available
1739 	 */
1740 	if (sb_id >= npc_priv.num_subbanks) {
1741 		dev_err(rvu->dev, "%s: invalid subbank %d\n",
1742 			__func__, sb_id);
1743 		return -EINVAL;
1744 	}
1745 
1746 	sb = &npc_priv.sb[sb_id];
1747 
1748 	*key_type = sb->key_type;
1749 
1750 	return 0;
1751 }
1752 
npc_subbank_idx_2_mcam_idx(struct rvu * rvu,struct npc_subbank * sb,u16 sub_off,u16 * mcam_idx)1753 static int npc_subbank_idx_2_mcam_idx(struct rvu *rvu, struct npc_subbank *sb,
1754 				      u16 sub_off, u16 *mcam_idx)
1755 {
1756 	int off, bot;
1757 
1758 	/* for x4 section, maximum allowed subbank index =
1759 	 * subsection depth - 1
1760 	 */
1761 	if (sb->key_type == NPC_MCAM_KEY_X4 &&
1762 	    sub_off >= npc_priv.subbank_depth) {
1763 		dev_err(rvu->dev,
1764 			"%s: Failed to get mcam idx (x4) sb->idx=%u sub_off=%u",
1765 			__func__, sb->idx, sub_off);
1766 		return -EINVAL;
1767 	}
1768 
1769 	/* for x2 section, maximum allowed subbank index =
1770 	 * 2 * subsection depth - 1
1771 	 */
1772 	if (sb->key_type == NPC_MCAM_KEY_X2 &&
1773 	    sub_off >= npc_priv.subbank_depth * 2) {
1774 		dev_err(rvu->dev,
1775 			"%s: Failed to get mcam idx (x2) sb->idx=%u sub_off=%u",
1776 			__func__, sb->idx, sub_off);
1777 		return -EINVAL;
1778 	}
1779 
1780 	/* Find subbank offset from respective subbank (w.r.t bank) */
1781 	off = sub_off & (npc_priv.subbank_depth - 1);
1782 
1783 	/* if subsection idx is in bank1, add bank depth,
1784 	 * which is part of sb->b1b
1785 	 */
1786 	bot = sub_off >= npc_priv.subbank_depth ? sb->b1b : sb->b0b;
1787 
1788 	*mcam_idx = bot + off;
1789 	return 0;
1790 }
1791 
npc_mcam_idx_2_subbank_idx(struct rvu * rvu,u16 mcam_idx,struct npc_subbank ** sb,int * sb_off)1792 static int npc_mcam_idx_2_subbank_idx(struct rvu *rvu, u16 mcam_idx,
1793 				      struct npc_subbank **sb,
1794 				      int *sb_off)
1795 {
1796 	int bank_off, sb_id;
1797 
1798 	/* mcam_idx should be less than (2 * bank depth) */
1799 	if (mcam_idx >= npc_priv.bank_depth * 2) {
1800 		dev_err(rvu->dev, "%s: Invalid mcam idx %u\n",
1801 			__func__, mcam_idx);
1802 		return -EINVAL;
1803 	}
1804 
1805 	/* find mcam offset per bank */
1806 	bank_off = mcam_idx & (npc_priv.bank_depth - 1);
1807 
1808 	/* Find subbank id */
1809 	sb_id = bank_off / npc_priv.subbank_depth;
1810 
1811 	/* Check if subbank id is more than maximum
1812 	 * number of subbanks available
1813 	 */
1814 	if (sb_id >= npc_priv.num_subbanks) {
1815 		dev_err(rvu->dev, "%s: invalid subbank %d\n",
1816 			__func__, sb_id);
1817 		return -EINVAL;
1818 	}
1819 
1820 	*sb = &npc_priv.sb[sb_id];
1821 
1822 	/* Subbank offset per bank */
1823 	*sb_off = bank_off % npc_priv.subbank_depth;
1824 
1825 	/* Index in a subbank should add subbank depth
1826 	 * if it is in bank1
1827 	 */
1828 	if (mcam_idx >= npc_priv.bank_depth)
1829 		*sb_off += npc_priv.subbank_depth;
1830 
1831 	return 0;
1832 }
1833 
__npc_subbank_contig_alloc(struct rvu * rvu,struct npc_subbank * sb,int key_type,int sidx,int eidx,int prio,int count,int t,int b,unsigned long * bmap,u16 * save)1834 static int __npc_subbank_contig_alloc(struct rvu *rvu,
1835 				      struct npc_subbank *sb,
1836 				      int key_type, int sidx,
1837 				      int eidx, int prio,
1838 				      int count, int t, int b,
1839 				      unsigned long *bmap,
1840 				      u16 *save)
1841 {
1842 	int k, offset, delta = 0;
1843 	int cnt = 0, sbd;
1844 
1845 	sbd = npc_priv.subbank_depth;
1846 
1847 	if (sidx >= npc_priv.bank_depth)
1848 		delta = sbd;
1849 
1850 	switch (prio) {
1851 	case NPC_MCAM_LOWER_PRIO:
1852 	case NPC_MCAM_ANY_PRIO:
1853 		/* Find an area of size 'count' from sidx to eidx */
1854 		offset = bitmap_find_next_zero_area(bmap, sbd, sidx - b,
1855 						    count, 0);
1856 
1857 		if (offset >= sbd) {
1858 			dev_err(rvu->dev,
1859 				"%s: Could not find contiguous(%d) entries\n",
1860 				__func__, count);
1861 			return -EFAULT;
1862 		}
1863 
1864 		dev_dbg(rvu->dev,
1865 			"%s: sidx=%d eidx=%d t=%d b=%d offset=%d count=%d delta=%d\n",
1866 			__func__, sidx, eidx, t, b, offset,
1867 			count, delta);
1868 
1869 		for (cnt = 0; cnt < count; cnt++)
1870 			save[cnt] = offset + cnt + delta;
1871 
1872 		break;
1873 
1874 	case NPC_MCAM_HIGHER_PRIO:
1875 		/* Find an area of 'count' from eidx to sidx */
1876 		for (k = eidx - b; cnt < count && k >= (sidx - b); k--) {
1877 			/* If an intermediate slot is not free,
1878 			 * reset the counter (cnt) to zero as
1879 			 * request is for contiguous.
1880 			 */
1881 			if (test_bit(k, bmap)) {
1882 				cnt = 0;
1883 				continue;
1884 			}
1885 
1886 			save[cnt++] = k + delta;
1887 		}
1888 		break;
1889 	}
1890 
1891 	/* Found 'count' number of free slots */
1892 	if (cnt == count)
1893 		return 0;
1894 
1895 	dev_dbg(rvu->dev,
1896 		"%s: Could not find contiguous(%d) entries in subbank=%u\n",
1897 		__func__, count, sb->idx);
1898 	return -EFAULT;
1899 }
1900 
__npc_subbank_non_contig_alloc(struct rvu * rvu,struct npc_subbank * sb,int key_type,int sidx,int eidx,int prio,int t,int b,unsigned long * bmap,int count,u16 * save,bool max_alloc,int * alloc_cnt)1901 static int __npc_subbank_non_contig_alloc(struct rvu *rvu,
1902 					  struct npc_subbank *sb,
1903 					  int key_type, int sidx,
1904 					  int eidx, int prio,
1905 					  int t, int b,
1906 					  unsigned long *bmap,
1907 					  int count, u16 *save,
1908 					  bool max_alloc, int *alloc_cnt)
1909 {
1910 	unsigned long index;
1911 	int cnt = 0, delta;
1912 	int k, sbd;
1913 
1914 	sbd = npc_priv.subbank_depth;
1915 	delta = sidx >= npc_priv.bank_depth ? sbd : 0;
1916 
1917 	switch (prio) {
1918 		/* Find an area of size 'count' from sidx to eidx */
1919 	case NPC_MCAM_LOWER_PRIO:
1920 	case NPC_MCAM_ANY_PRIO:
1921 		index = find_next_zero_bit(bmap, sbd, sidx - b);
1922 		if (index >= sbd) {
1923 			dev_err(rvu->dev,
1924 				"%s: Error happened to alloc %u, bitmap_weight=%u, sb->idx=%u\n",
1925 				__func__, count,
1926 				bitmap_weight(bmap, sbd),
1927 				sb->idx);
1928 			break;
1929 		}
1930 
1931 		for (k = index; cnt < count && k <= (eidx - b); k++) {
1932 			/* Skip used slots */
1933 			if (test_bit(k, bmap))
1934 				continue;
1935 
1936 			save[cnt++] = k + delta;
1937 		}
1938 		break;
1939 
1940 		/* Find an area of 'count' from eidx to sidx */
1941 	case NPC_MCAM_HIGHER_PRIO:
1942 		for (k = eidx - b; cnt < count && k >= (sidx - b); k--) {
1943 			/* Skip used slots */
1944 			if (test_bit(k, bmap))
1945 				continue;
1946 
1947 			save[cnt++] = k + delta;
1948 		}
1949 		break;
1950 	}
1951 
1952 	/* Update allocated 'cnt' to alloc_cnt */
1953 	*alloc_cnt = cnt;
1954 
1955 	/* Successfully allocated requested count slots */
1956 	if (cnt == count)
1957 		return 0;
1958 
1959 	/* Allocation successful for cnt < count */
1960 	if (max_alloc && cnt > 0)
1961 		return 0;
1962 
1963 	dev_dbg(rvu->dev,
1964 		"%s: Could not find non contiguous entries(%u) in subbank(%u) cnt=%d max_alloc=%d\n",
1965 		__func__, count, sb->idx, cnt, max_alloc);
1966 
1967 	return -EFAULT;
1968 }
1969 
__npc_subbank_sboff_2_off(struct rvu * rvu,struct npc_subbank * sb,int sb_off,unsigned long ** bmap,int * off)1970 static void __npc_subbank_sboff_2_off(struct rvu *rvu, struct npc_subbank *sb,
1971 				      int sb_off, unsigned long **bmap,
1972 				      int *off)
1973 {
1974 	int sbd;
1975 
1976 	sbd = npc_priv.subbank_depth;
1977 
1978 	*off = sb_off & (sbd - 1);
1979 	*bmap = (sb_off >= sbd) ? sb->b1map : sb->b0map;
1980 }
1981 
1982 /* set/clear bitmap */
__npc_subbank_mark_slot(struct rvu * rvu,struct npc_subbank * sb,int sb_off,bool set)1983 static bool __npc_subbank_mark_slot(struct rvu *rvu,
1984 				    struct npc_subbank *sb,
1985 				    int sb_off, bool set)
1986 {
1987 	unsigned long *bmap;
1988 	int off;
1989 
1990 	/* if sb_off >= subbank.depth, then slots are in
1991 	 * bank1
1992 	 */
1993 	__npc_subbank_sboff_2_off(rvu, sb, sb_off, &bmap, &off);
1994 
1995 	dev_dbg(rvu->dev,
1996 		"%s: Marking set=%d sb_off=%d sb->idx=%d off=%d\n",
1997 		__func__, set, sb_off, sb->idx, off);
1998 
1999 	if (set) {
2000 		/* Slot is already used */
2001 		if (test_bit(off, bmap))
2002 			return false;
2003 
2004 		sb->free_cnt--;
2005 		set_bit(off, bmap);
2006 		return true;
2007 	}
2008 
2009 	/* Slot is already free */
2010 	if (!test_bit(off, bmap))
2011 		return false;
2012 
2013 	sb->free_cnt++;
2014 	clear_bit(off, bmap);
2015 	return true;
2016 }
2017 
__npc_subbank_mark_free(struct rvu * rvu,struct npc_subbank * sb)2018 static int __npc_subbank_mark_free(struct rvu *rvu, struct npc_subbank *sb)
2019 {
2020 	int rc, blkaddr;
2021 
2022 	sb->flags = NPC_SUBBANK_FLAG_FREE;
2023 	sb->key_type = 0;
2024 
2025 	bitmap_clear(sb->b0map, 0, npc_priv.subbank_depth);
2026 	bitmap_clear(sb->b1map, 0, npc_priv.subbank_depth);
2027 
2028 	if (!xa_erase(&npc_priv.xa_sb_used, sb->arr_idx)) {
2029 		dev_err(rvu->dev,
2030 			"%s: Error to delete from xa_sb_used array\n",
2031 			__func__);
2032 		return -EFAULT;
2033 	}
2034 
2035 	rc = xa_insert(&npc_priv.xa_sb_free, sb->arr_idx,
2036 		       xa_mk_value(sb->idx), GFP_KERNEL);
2037 	if (rc) {
2038 		rc = xa_insert(&npc_priv.xa_sb_used, sb->arr_idx,
2039 			       xa_mk_value(sb->idx), GFP_KERNEL);
2040 		if (rc)
2041 			dev_err(rvu->dev,
2042 				"%s: Failed to roll back sb(%u) arr_idx=%d\n",
2043 				__func__, sb->idx, sb->arr_idx);
2044 
2045 		dev_err(rvu->dev,
2046 			"%s: Error to add sb(%u) to xa_sb_free array at arr_idx=%d\n",
2047 			__func__, sb->idx, sb->arr_idx);
2048 		return rc;
2049 	}
2050 
2051 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2052 	rvu_write64(rvu, blkaddr,
2053 		    NPC_AF_MCAM_SECTIONX_CFG_EXT(sb->idx),
2054 		    NPC_MCAM_KEY_X2);
2055 
2056 	return rc;
2057 }
2058 
__npc_subbank_mark_used(struct rvu * rvu,struct npc_subbank * sb,int key_type)2059 static int __npc_subbank_mark_used(struct rvu *rvu, struct npc_subbank *sb,
2060 				   int key_type)
2061 {
2062 	int rc;
2063 
2064 	sb->flags = NPC_SUBBANK_FLAG_USED;
2065 	sb->key_type = key_type;
2066 	if (key_type == NPC_MCAM_KEY_X4)
2067 		sb->free_cnt = npc_priv.subbank_depth;
2068 	else
2069 		sb->free_cnt = 2 * npc_priv.subbank_depth;
2070 
2071 	bitmap_clear(sb->b0map, 0, npc_priv.subbank_depth);
2072 	bitmap_clear(sb->b1map, 0, npc_priv.subbank_depth);
2073 
2074 	if (!xa_erase(&npc_priv.xa_sb_free, sb->arr_idx)) {
2075 		dev_err(rvu->dev,
2076 			"%s: Error to delete from xa_sb_free array\n",
2077 			__func__);
2078 		return -EFAULT;
2079 	}
2080 
2081 	rc = xa_insert(&npc_priv.xa_sb_used, sb->arr_idx,
2082 		       xa_mk_value(sb->idx), GFP_KERNEL);
2083 	if (rc)
2084 		dev_err(rvu->dev,
2085 			"%s: Error to add to xa_sb_used array\n", __func__);
2086 
2087 	return rc;
2088 }
2089 
__npc_subbank_free(struct rvu * rvu,struct npc_subbank * sb,u16 sb_off)2090 static bool __npc_subbank_free(struct rvu *rvu, struct npc_subbank *sb,
2091 			       u16 sb_off)
2092 {
2093 	bool deleted = false;
2094 	unsigned long *bmap;
2095 	int rc, off;
2096 
2097 	deleted = __npc_subbank_mark_slot(rvu, sb, sb_off, false);
2098 	if (!deleted)
2099 		goto done;
2100 
2101 	__npc_subbank_sboff_2_off(rvu, sb, sb_off, &bmap, &off);
2102 
2103 	/* Check whether we can mark whole subbank as free */
2104 	if (sb->key_type == NPC_MCAM_KEY_X4) {
2105 		if (sb->free_cnt < npc_priv.subbank_depth)
2106 			goto done;
2107 	} else {
2108 		if (sb->free_cnt < 2 * npc_priv.subbank_depth)
2109 			goto done;
2110 	}
2111 
2112 	/* All slots in subbank are unused. Mark the subbank as free
2113 	 * and add to free pool
2114 	 */
2115 	rc = __npc_subbank_mark_free(rvu, sb);
2116 	if (rc)
2117 		dev_err(rvu->dev, "%s: Error to free subbank\n", __func__);
2118 
2119 done:
2120 	return deleted;
2121 }
2122 
2123 static int
npc_subbank_free(struct rvu * rvu,struct npc_subbank * sb,u16 sb_off)2124 npc_subbank_free(struct rvu *rvu, struct npc_subbank *sb, u16 sb_off)
2125 {
2126 	bool deleted;
2127 
2128 	mutex_lock(&sb->lock);
2129 	deleted = __npc_subbank_free(rvu, sb, sb_off);
2130 	mutex_unlock(&sb->lock);
2131 
2132 	return deleted ? 0 : -EFAULT;
2133 }
2134 
__npc_subbank_alloc(struct rvu * rvu,struct npc_subbank * sb,int key_type,int ref,int limit,int prio,bool contig,int count,u16 * mcam_idx,int idx_sz,bool max_alloc,int * alloc_cnt)2135 static int __npc_subbank_alloc(struct rvu *rvu, struct npc_subbank *sb,
2136 			       int key_type, int ref, int limit, int prio,
2137 			       bool contig, int count, u16 *mcam_idx,
2138 			       int idx_sz, bool max_alloc, int *alloc_cnt)
2139 {
2140 	int cnt, t, b, i, blkaddr;
2141 	bool new_sub_bank = false;
2142 	unsigned long *bmap;
2143 	u16 *save = NULL;
2144 	int sidx, eidx;
2145 	bool diffbank;
2146 	int bw, bfree;
2147 	int rc = 0;
2148 	bool ret;
2149 
2150 	/* Check if enough space is there to return requested number of
2151 	 * mcam indexes in case of contiguous allocation
2152 	 */
2153 	if (!max_alloc && count > idx_sz) {
2154 		dev_err(rvu->dev,
2155 			"%s: Less space, count=%d idx_sz=%d sb_id=%d\n",
2156 			__func__, count, idx_sz, sb->idx);
2157 		return -ENOSPC;
2158 	}
2159 
2160 	/* Allocation on multiple subbank is not supported by this function.
2161 	 * it means that ref and limit should be on same subbank.
2162 	 *
2163 	 * ref and limit values should be validated w.r.t prio as below.
2164 	 * say ref = 100, limit = 200,
2165 	 * if NPC_MCAM_LOWER_PRIO, allocate index 100
2166 	 * if NPC_MCAM_HIGHER_PRIO, below sanity test returns error.
2167 	 * if NPC_MCAM_ANY_PRIO, allocate index 100
2168 	 *
2169 	 * say ref = 200, limit = 100
2170 	 * if NPC_MCAM_LOWER_PRIO, below sanity test returns error.
2171 	 * if NPC_MCAM_HIGHER_PRIO, allocate index 200
2172 	 * if NPC_MCAM_ANY_PRIO, allocate index 100
2173 	 *
2174 	 * Please note that NPC_MCAM_ANY_PRIO does not have any restriction
2175 	 * on "ref" and "limit" values. ie, ref > limit and limit > ref
2176 	 * are valid cases.
2177 	 */
2178 	if ((prio == NPC_MCAM_LOWER_PRIO && ref > limit) ||
2179 	    (prio == NPC_MCAM_HIGHER_PRIO && ref < limit)) {
2180 		dev_err(rvu->dev, "%s: Wrong ref_enty(%d) or limit(%d)\n",
2181 			__func__, ref, limit);
2182 		return -EINVAL;
2183 	}
2184 
2185 	/* x4 indexes are from 0 to bank size as it combines two x2 banks */
2186 	if (key_type == NPC_MCAM_KEY_X4 &&
2187 	    (ref >= npc_priv.bank_depth || limit >= npc_priv.bank_depth)) {
2188 		dev_err(rvu->dev,
2189 			"%s: Wrong ref_enty(%d) or limit(%d) for x4\n",
2190 			__func__, ref, limit);
2191 		return -EINVAL;
2192 	}
2193 
2194 	/* This function is called either bank0 or bank1 portion of a subbank.
2195 	 * so ref and limit should be on same bank.
2196 	 */
2197 	diffbank = !!((ref & npc_priv.bank_depth) ^
2198 		      (limit & npc_priv.bank_depth));
2199 	if (diffbank) {
2200 		dev_err(rvu->dev,
2201 			"%s: request ref and limit should be from same bank\n",
2202 			__func__);
2203 		return -EINVAL;
2204 	}
2205 
2206 	sidx = min_t(int, limit, ref);
2207 	eidx = max_t(int, limit, ref);
2208 
2209 	/* Find total number of slots available; both used and free */
2210 	cnt = eidx - sidx + 1;
2211 	if (contig && cnt < count) {
2212 		dev_err(rvu->dev,
2213 			"%s: Wrong ref_enty(%d) or limit(%d) for count(%d)\n",
2214 			__func__, ref, limit, count);
2215 		return -EINVAL;
2216 	}
2217 
2218 	/* If subbank is free, check if requested number of indexes is less than
2219 	 * or equal to mcam entries available in the subbank if contig.
2220 	 */
2221 	if (sb->flags & NPC_SUBBANK_FLAG_FREE) {
2222 		if (contig && count > npc_priv.subbank_depth) {
2223 			dev_err(rvu->dev, "%s: Less number of entries\n",
2224 				__func__);
2225 			return -ENOSPC;
2226 		}
2227 
2228 		new_sub_bank = true;
2229 		goto process;
2230 	}
2231 
2232 	/* Flag should be set for all used subbanks */
2233 	WARN_ONCE(!(sb->flags & NPC_SUBBANK_FLAG_USED),
2234 		  "Used flag is not set(%#x)\n", sb->flags);
2235 
2236 	/* If subbank key type does not match with requested key_type,
2237 	 * return error
2238 	 */
2239 	if (sb->key_type != key_type) {
2240 		dev_dbg(rvu->dev, "%s: subbank key_type mismatch\n", __func__);
2241 		return -EINVAL;
2242 	}
2243 
2244 process:
2245 	/* if ref or limit >= npc_priv.bank_depth, index are in bank1.
2246 	 * else bank0.
2247 	 */
2248 	if (ref >= npc_priv.bank_depth) {
2249 		bmap = sb->b1map;
2250 		t = sb->b1t;
2251 		b = sb->b1b;
2252 	} else {
2253 		bmap = sb->b0map;
2254 		t = sb->b0t;
2255 		b = sb->b0b;
2256 	}
2257 
2258 	/* Calculate free slots */
2259 	bw = bitmap_weight(bmap, npc_priv.subbank_depth);
2260 	bfree = npc_priv.subbank_depth - bw;
2261 
2262 	if (!bfree) {
2263 		dev_dbg(rvu->dev, "%s: subbank is full\n", __func__);
2264 		return -ENOSPC;
2265 	}
2266 
2267 	/* If request is for contiguous , then max we can allocate is
2268 	 * equal to subbank_depth
2269 	 */
2270 	if (contig && bfree < count) {
2271 		dev_dbg(rvu->dev, "%s: no space for entry\n", __func__);
2272 		return -ENOSPC;
2273 	}
2274 
2275 	/* 'save' array stores available indexes temporarily before
2276 	 * marking it as allocated
2277 	 */
2278 	save = kcalloc(count, sizeof(u16), GFP_KERNEL);
2279 	if (!save) {
2280 		rc = -ENOMEM;
2281 		goto err1;
2282 	}
2283 
2284 	if (contig) {
2285 		rc =  __npc_subbank_contig_alloc(rvu, sb, key_type,
2286 						 sidx, eidx, prio,
2287 						 count, t, b,
2288 						 bmap, save);
2289 		/* contiguous allocation success means that
2290 		 * requested number of free slots got
2291 		 * allocated
2292 		 */
2293 		if (!rc)
2294 			*alloc_cnt = count;
2295 
2296 	} else {
2297 		rc =  __npc_subbank_non_contig_alloc(rvu, sb, key_type,
2298 						     sidx, eidx, prio,
2299 						     t, b, bmap,
2300 						     count, save,
2301 						     max_alloc, alloc_cnt);
2302 	}
2303 
2304 	if (rc)
2305 		goto err1;
2306 
2307 	/* Mark new subbank bank as used */
2308 	if (new_sub_bank) {
2309 		blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2310 		if (blkaddr < 0) {
2311 			dev_err(rvu->dev,
2312 				"%s: NPC block not implemented\n", __func__);
2313 			rc = -EFAULT;
2314 			goto err1;
2315 		}
2316 
2317 		rc =  __npc_subbank_mark_used(rvu, sb, key_type);
2318 		if (rc) {
2319 			dev_err(rvu->dev,
2320 				"%s: Error to mark subbank as used\n",
2321 				__func__);
2322 			goto err2;
2323 		}
2324 
2325 		/* Configure section type to key_type */
2326 		rvu_write64(rvu, blkaddr,
2327 			    NPC_AF_MCAM_SECTIONX_CFG_EXT(sb->idx),
2328 			    key_type);
2329 	}
2330 
2331 	for (i = 0; i < *alloc_cnt; i++) {
2332 		rc = npc_subbank_idx_2_mcam_idx(rvu, sb, save[i],
2333 						&mcam_idx[i]);
2334 		if (rc) {
2335 			dev_err(rvu->dev,
2336 				"%s: Error to find mcam idx for %u\n",
2337 				__func__, save[i]);
2338 			/* TODO: handle err case gracefully */
2339 			goto err3;
2340 		}
2341 
2342 		/* Mark all slots as used */
2343 		ret = __npc_subbank_mark_slot(rvu, sb, save[i], true);
2344 		if (!ret) {
2345 			dev_err(rvu->dev, "%s: Error to mark mcam_idx %u\n",
2346 				__func__, mcam_idx[i]);
2347 			rc = -EFAULT;
2348 			goto err3;
2349 		}
2350 	}
2351 	kfree(save);
2352 	return 0;
2353 
2354 err3:
2355 	for (int j = 0; j < i; j++)
2356 		__npc_subbank_mark_slot(rvu, sb, save[j], false);
2357 err2:
2358 	if (new_sub_bank)
2359 		__npc_subbank_mark_free(rvu, sb);
2360 err1:
2361 	kfree(save);
2362 	*alloc_cnt = 0;
2363 	return rc;
2364 }
2365 
2366 static int
npc_subbank_alloc(struct rvu * rvu,struct npc_subbank * sb,int key_type,int ref,int limit,int prio,bool contig,int count,u16 * mcam_idx,int idx_sz,bool max_alloc,int * alloc_cnt)2367 npc_subbank_alloc(struct rvu *rvu, struct npc_subbank *sb,
2368 		  int key_type, int ref, int limit, int prio,
2369 		  bool contig, int count, u16 *mcam_idx,
2370 		  int idx_sz, bool max_alloc, int *alloc_cnt)
2371 {
2372 	int rc;
2373 
2374 	mutex_lock(&sb->lock);
2375 	rc = __npc_subbank_alloc(rvu, sb, key_type, ref, limit, prio,
2376 				 contig, count, mcam_idx, idx_sz,
2377 				 max_alloc, alloc_cnt);
2378 	mutex_unlock(&sb->lock);
2379 
2380 	return rc;
2381 }
2382 
2383 static int
npc_del_from_pf_maps(struct rvu * rvu,u16 mcam_idx)2384 npc_del_from_pf_maps(struct rvu *rvu, u16 mcam_idx)
2385 {
2386 	int pcifunc, idx;
2387 	void *map;
2388 
2389 	map = xa_erase(&npc_priv.xa_idx2pf_map, mcam_idx);
2390 	if (!map) {
2391 		dev_err(rvu->dev,
2392 			"%s: failed to erase mcam_idx(%u) from xa_idx2pf map\n",
2393 			__func__, mcam_idx);
2394 		return -EFAULT;
2395 	}
2396 
2397 	pcifunc = xa_to_value(map);
2398 	map = xa_load(&npc_priv.xa_pf_map, pcifunc);
2399 	if (!map) {
2400 		dev_err(rvu->dev,
2401 			"%s: failed to find entry for (%u) from xa_pf_map, mcam=%u\n",
2402 			__func__, pcifunc, mcam_idx);
2403 		return -ESRCH;
2404 	}
2405 
2406 	idx = xa_to_value(map);
2407 
2408 	map = xa_erase(&npc_priv.xa_pf2idx_map[idx], mcam_idx);
2409 	if (!map) {
2410 		dev_err(rvu->dev,
2411 			"%s: failed to erase mcam_idx(%u) from xa_pf2idx_map map\n",
2412 			__func__, mcam_idx);
2413 		return -EFAULT;
2414 	}
2415 	return 0;
2416 }
2417 
2418 static int
npc_add_to_pf_maps(struct rvu * rvu,u16 mcam_idx,int pcifunc)2419 npc_add_to_pf_maps(struct rvu *rvu, u16 mcam_idx, int pcifunc)
2420 {
2421 	int rc, idx;
2422 	void *map;
2423 
2424 	dev_dbg(rvu->dev,
2425 		"%s: add2maps mcam_idx(%u) to xa_idx2pf map pcifunc=%#x\n",
2426 		__func__, mcam_idx, pcifunc);
2427 
2428 	rc = xa_insert(&npc_priv.xa_idx2pf_map, mcam_idx,
2429 		       xa_mk_value(pcifunc), GFP_KERNEL);
2430 
2431 	if (rc) {
2432 		map = xa_load(&npc_priv.xa_idx2pf_map, mcam_idx);
2433 		dev_err(rvu->dev,
2434 			"%s: failed to insert mcam_idx(%u) to xa_idx2pf map, existing value=%lu\n",
2435 			__func__, mcam_idx, xa_to_value(map));
2436 		return -EFAULT;
2437 	}
2438 
2439 	map = xa_load(&npc_priv.xa_pf_map, pcifunc);
2440 	if (!map) {
2441 		dev_err(rvu->dev,
2442 			"%s: failed to find pf map entry for pcifunc=%#x, mcam=%u\n",
2443 			__func__, pcifunc, mcam_idx);
2444 		return -ESRCH;
2445 	}
2446 
2447 	idx = xa_to_value(map);
2448 
2449 	rc = xa_insert(&npc_priv.xa_pf2idx_map[idx], mcam_idx,
2450 		       xa_mk_value(pcifunc), GFP_KERNEL);
2451 
2452 	if (rc) {
2453 		map = xa_load(&npc_priv.xa_pf2idx_map[idx], mcam_idx);
2454 		xa_erase(&npc_priv.xa_idx2pf_map, mcam_idx);
2455 		dev_err(rvu->dev,
2456 			"%s: failed to insert mcam_idx(%u) to xa_pf2idx_map map, earlier value=%lu idx=%u\n",
2457 			__func__, mcam_idx, xa_to_value(map), idx);
2458 
2459 		return -EFAULT;
2460 	}
2461 
2462 	return 0;
2463 }
2464 
2465 static bool
npc_subbank_suits(struct npc_subbank * sb,int key_type)2466 npc_subbank_suits(struct npc_subbank *sb, int key_type)
2467 {
2468 	mutex_lock(&sb->lock);
2469 
2470 	if (!sb->key_type) {
2471 		mutex_unlock(&sb->lock);
2472 		return true;
2473 	}
2474 
2475 	if (sb->key_type == key_type) {
2476 		mutex_unlock(&sb->lock);
2477 		return true;
2478 	}
2479 
2480 	mutex_unlock(&sb->lock);
2481 	return false;
2482 }
2483 
2484 #define SB_ALIGN_UP(val)   (((val) + npc_priv.subbank_depth) & \
2485 			    ~((npc_priv.subbank_depth) - 1))
2486 #define SB_ALIGN_DOWN(val) ALIGN_DOWN((val), npc_priv.subbank_depth)
2487 
npc_subbank_iter_down(struct rvu * rvu,int ref,int limit,int * cur_ref,int * cur_limit,bool * start,bool * stop)2488 static void npc_subbank_iter_down(struct rvu *rvu,
2489 				  int ref, int limit,
2490 				  int *cur_ref, int *cur_limit,
2491 				  bool *start, bool *stop)
2492 {
2493 	int align;
2494 
2495 	*stop = false;
2496 
2497 	/* ALIGN_DOWN the limit to current subbank boundary bottom index */
2498 	if (*start) {
2499 		*start = false;
2500 		*cur_ref = ref;
2501 		align = SB_ALIGN_DOWN(ref);
2502 		if (align < limit) {
2503 			*stop = true;
2504 			*cur_limit = limit;
2505 			return;
2506 		}
2507 		*cur_limit = align;
2508 		return;
2509 	}
2510 
2511 	*cur_ref = *cur_limit - 1;
2512 	align = *cur_ref - npc_priv.subbank_depth + 1;
2513 	if (align <= limit) {
2514 		*stop = true;
2515 		*cur_limit = limit;
2516 		return;
2517 	}
2518 
2519 	*cur_limit = align;
2520 }
2521 
npc_subbank_iter_up(struct rvu * rvu,int ref,int limit,int * cur_ref,int * cur_limit,bool * start,bool * stop)2522 static void npc_subbank_iter_up(struct rvu *rvu,
2523 				int ref, int limit,
2524 				int *cur_ref, int *cur_limit,
2525 				bool *start, bool *stop)
2526 {
2527 	int align;
2528 
2529 	*stop = false;
2530 
2531 	/* ALIGN_UP the limit to current subbank boundary top index */
2532 	if (*start) {
2533 		*start = false;
2534 		*cur_ref = ref;
2535 
2536 		/* Find next lower prio subbank's bottom index */
2537 		align = SB_ALIGN_UP(ref);
2538 
2539 		/* Crosses limit ? */
2540 		if (align - 1 > limit) {
2541 			*stop = true;
2542 			*cur_limit = limit;
2543 			return;
2544 		}
2545 
2546 		/* Current subbank's top index */
2547 		*cur_limit = align - 1;
2548 		return;
2549 	}
2550 
2551 	*cur_ref = *cur_limit + 1;
2552 	align = *cur_ref + npc_priv.subbank_depth - 1;
2553 
2554 	if (align >= limit) {
2555 		*stop = true;
2556 		*cur_limit = limit;
2557 		return;
2558 	}
2559 
2560 	*cur_limit = align;
2561 }
2562 
2563 static int
npc_subbank_iter(struct rvu * rvu,int key_type,int ref,int limit,int prio,int * cur_ref,int * cur_limit,bool * start,bool * stop)2564 npc_subbank_iter(struct rvu *rvu, int key_type,
2565 		 int ref, int limit, int prio,
2566 		 int *cur_ref, int *cur_limit,
2567 		 bool *start, bool *stop)
2568 {
2569 	if (prio != NPC_MCAM_HIGHER_PRIO)
2570 		npc_subbank_iter_up(rvu, ref, limit,
2571 				    cur_ref, cur_limit,
2572 				    start, stop);
2573 	else
2574 		npc_subbank_iter_down(rvu, ref, limit,
2575 				      cur_ref, cur_limit,
2576 				      start, stop);
2577 
2578 	/* limit and ref should < bank_depth for x4 */
2579 	if (key_type == NPC_MCAM_KEY_X4) {
2580 		if (*cur_ref >= npc_priv.bank_depth)
2581 			return -EINVAL;
2582 
2583 		if (*cur_limit >= npc_priv.bank_depth)
2584 			return -EINVAL;
2585 	}
2586 	/* limit and ref should < 2 * bank_depth, for x2 */
2587 	if (*cur_ref >= 2 * npc_priv.bank_depth)
2588 		return -EINVAL;
2589 
2590 	if (*cur_limit >= 2 * npc_priv.bank_depth)
2591 		return -EINVAL;
2592 
2593 	return 0;
2594 }
2595 
npc_idx_free(struct rvu * rvu,u16 * mcam_idx,int count,bool maps_del)2596 static int npc_idx_free(struct rvu *rvu, u16 *mcam_idx, int count,
2597 			bool maps_del)
2598 {
2599 	struct npc_subbank *sb;
2600 	u16 vidx, midx;
2601 	int sb_off, i;
2602 	bool ret;
2603 	int rc;
2604 
2605 	/* Check if we can dealloc indexes properly ? */
2606 	for (i = 0; i < count; i++) {
2607 		rc =  npc_mcam_idx_2_subbank_idx(rvu, npc_vidx2idx(mcam_idx[i]),
2608 						 &sb, &sb_off);
2609 		if (rc) {
2610 			dev_err(rvu->dev,
2611 				"Failed to free mcam idx=%u\n", mcam_idx[i]);
2612 			return rc;
2613 		}
2614 	}
2615 
2616 	for (i = 0; i < count; i++) {
2617 		if (npc_is_vidx(mcam_idx[i])) {
2618 			vidx = mcam_idx[i];
2619 			midx = npc_vidx2idx(vidx);
2620 		} else {
2621 			midx = mcam_idx[i];
2622 			vidx = npc_idx2vidx(midx);
2623 		}
2624 
2625 		if (midx >= npc_priv.bank_depth * npc_priv.num_banks) {
2626 			dev_err(rvu->dev,
2627 				"%s: Invalid mcam_idx=%u cannot be deleted\n",
2628 				__func__, mcam_idx[i]);
2629 			return -EINVAL;
2630 		}
2631 
2632 		rc =  npc_mcam_idx_2_subbank_idx(rvu, midx,
2633 						 &sb, &sb_off);
2634 		if (rc) {
2635 			dev_err(rvu->dev,
2636 				"%s: Failed to find subbank info for vidx=%u\n",
2637 				__func__, vidx);
2638 			return rc;
2639 		}
2640 
2641 		ret = npc_subbank_free(rvu, sb, sb_off);
2642 		if (ret) {
2643 			dev_err(rvu->dev,
2644 				"%s: Failed to find subbank info for vidx=%u\n",
2645 				__func__, vidx);
2646 			return -EINVAL;
2647 		}
2648 
2649 		if (!maps_del)
2650 			continue;
2651 
2652 		rc = npc_del_from_pf_maps(rvu, midx);
2653 		if (rc)
2654 			return rc;
2655 
2656 		/* If there is no vidx mapping; continue */
2657 		if (vidx == midx)
2658 			continue;
2659 
2660 		rc = npc_vidx_maps_del_entry(rvu, vidx, NULL);
2661 		if (rc)
2662 			return rc;
2663 	}
2664 
2665 	return 0;
2666 }
2667 
npc_multi_subbank_ref_alloc(struct rvu * rvu,int key_type,int ref,int limit,int prio,bool contig,int count,u16 * mcam_idx)2668 static int npc_multi_subbank_ref_alloc(struct rvu *rvu, int key_type,
2669 				       int ref, int limit, int prio,
2670 				       bool contig, int count,
2671 				       u16 *mcam_idx)
2672 {
2673 	struct npc_subbank *sb;
2674 	unsigned long *bmap;
2675 	int sb_off, off, rc;
2676 	int cnt = 0;
2677 	bool bitset;
2678 
2679 	if (prio != NPC_MCAM_HIGHER_PRIO) {
2680 		while (ref <= limit) {
2681 			/* Calculate subbank and subbank index */
2682 			rc =  npc_mcam_idx_2_subbank_idx(rvu, ref,
2683 							 &sb, &sb_off);
2684 			if (rc)
2685 				goto err;
2686 
2687 			/* If subbank is not suitable for requested key type
2688 			 * restart search from next subbank
2689 			 */
2690 			if (!npc_subbank_suits(sb, key_type)) {
2691 				ref = SB_ALIGN_UP(ref);
2692 				if (contig) {
2693 					rc = npc_idx_free(rvu, mcam_idx,
2694 							  cnt, false);
2695 					if (rc)
2696 						return rc;
2697 					cnt = 0;
2698 				}
2699 				continue;
2700 			}
2701 
2702 			mutex_lock(&sb->lock);
2703 
2704 			/* If subbank is free; mark it as used */
2705 			if (sb->flags & NPC_SUBBANK_FLAG_FREE) {
2706 				rc =  __npc_subbank_mark_used(rvu, sb,
2707 							      key_type);
2708 				if (rc) {
2709 					mutex_unlock(&sb->lock);
2710 					dev_err(rvu->dev,
2711 						"%s:Error to add to use array\n",
2712 						__func__);
2713 					goto err;
2714 				}
2715 			}
2716 
2717 			/* Find correct bmap */
2718 			__npc_subbank_sboff_2_off(rvu, sb, sb_off, &bmap, &off);
2719 
2720 			/* if bit is already set, reset 'cnt' */
2721 			bitset = test_bit(off, bmap);
2722 			if (bitset) {
2723 				mutex_unlock(&sb->lock);
2724 				if (contig) {
2725 					rc = npc_idx_free(rvu, mcam_idx,
2726 							  cnt, false);
2727 					if (rc)
2728 						return rc;
2729 					cnt = 0;
2730 				}
2731 
2732 				ref++;
2733 				continue;
2734 			}
2735 
2736 			set_bit(off, bmap);
2737 			sb->free_cnt--;
2738 			mcam_idx[cnt++] = ref;
2739 			mutex_unlock(&sb->lock);
2740 
2741 			if (cnt == count)
2742 				return 0;
2743 			ref++;
2744 		}
2745 
2746 		/* Could not allocate request count slots */
2747 		goto err;
2748 	}
2749 	while (ref >= limit) {
2750 		rc =  npc_mcam_idx_2_subbank_idx(rvu, ref,
2751 						 &sb, &sb_off);
2752 		if (rc)
2753 			goto err;
2754 
2755 		if (!npc_subbank_suits(sb, key_type)) {
2756 			ref = SB_ALIGN_DOWN(ref) - 1;
2757 			if (contig) {
2758 				rc = npc_idx_free(rvu, mcam_idx, cnt, false);
2759 				if (rc)
2760 					return rc;
2761 
2762 				cnt = 0;
2763 			}
2764 			continue;
2765 		}
2766 
2767 		mutex_lock(&sb->lock);
2768 
2769 		if (sb->flags & NPC_SUBBANK_FLAG_FREE) {
2770 			rc =  __npc_subbank_mark_used(rvu, sb, key_type);
2771 			if (rc) {
2772 				mutex_unlock(&sb->lock);
2773 				dev_err(rvu->dev,
2774 					"%s:Error to add to use array\n",
2775 					__func__);
2776 				goto err;
2777 			}
2778 		}
2779 
2780 		__npc_subbank_sboff_2_off(rvu, sb, sb_off, &bmap, &off);
2781 		bitset = test_bit(off, bmap);
2782 		if (bitset) {
2783 			mutex_unlock(&sb->lock);
2784 			if (contig) {
2785 				rc = npc_idx_free(rvu, mcam_idx, cnt, false);
2786 				if (rc)
2787 					return rc;
2788 
2789 				cnt = 0;
2790 			}
2791 			ref--;
2792 			continue;
2793 		}
2794 
2795 		mcam_idx[cnt++] = ref;
2796 		sb->free_cnt--;
2797 		set_bit(off, bmap);
2798 		mutex_unlock(&sb->lock);
2799 
2800 		if (cnt == count)
2801 			return 0;
2802 		ref--;
2803 	}
2804 
2805 err:
2806 	rc = npc_idx_free(rvu, mcam_idx, cnt, false);
2807 	if (rc)
2808 		dev_err(rvu->dev,
2809 			"%s: Error happened while freeing cnt=%u indexes\n",
2810 			__func__, cnt);
2811 
2812 	return -ENOSPC;
2813 }
2814 
npc_subbank_free_cnt(struct rvu * rvu,struct npc_subbank * sb,int key_type)2815 static int npc_subbank_free_cnt(struct rvu *rvu, struct npc_subbank *sb,
2816 				int key_type)
2817 {
2818 	int cnt, spd;
2819 
2820 	spd = npc_priv.subbank_depth;
2821 	mutex_lock(&sb->lock);
2822 
2823 	if (sb->flags & NPC_SUBBANK_FLAG_FREE)
2824 		cnt = key_type == NPC_MCAM_KEY_X4 ? spd : 2 * spd;
2825 	else
2826 		cnt = sb->free_cnt;
2827 
2828 	mutex_unlock(&sb->lock);
2829 	return cnt;
2830 }
2831 
npc_subbank_ref_alloc(struct rvu * rvu,int key_type,int ref,int limit,int prio,bool contig,int count,u16 * mcam_idx)2832 static int npc_subbank_ref_alloc(struct rvu *rvu, int key_type,
2833 				 int ref, int limit, int prio,
2834 				 bool contig, int count,
2835 				 u16 *mcam_idx)
2836 {
2837 	struct npc_subbank *sb1, *sb2;
2838 	bool max_alloc, start, stop;
2839 	int r, l, sb_idx1, sb_idx2;
2840 	int tot = 0, rc;
2841 	int alloc_cnt;
2842 
2843 	max_alloc = !contig;
2844 
2845 	start = true;
2846 	stop = false;
2847 
2848 	/* Loop until we cross the ref/limit boundary */
2849 	while (!stop) {
2850 		rc = npc_subbank_iter(rvu, key_type, ref, limit, prio,
2851 				      &r, &l, &start, &stop);
2852 
2853 		dev_dbg(rvu->dev,
2854 			"%s: ref=%d limit=%d r=%d l=%d start=%d stop=%d tot=%d count=%d rc=%d\n",
2855 			__func__, ref, limit, r, l,
2856 			start, stop, tot, count, rc);
2857 
2858 		if (rc)
2859 			goto err;
2860 
2861 		/* Find subbank and subbank index for ref */
2862 		rc = npc_mcam_idx_2_subbank_idx(rvu, r, &sb1,
2863 						&sb_idx1);
2864 		if (rc)
2865 			goto err;
2866 
2867 		dev_dbg(rvu->dev,
2868 			"%s: ref subbank=%d off=%d\n",
2869 			__func__, sb1->idx, sb_idx1);
2870 
2871 		/* Skip subbank if it is not available for the keytype */
2872 		if (!npc_subbank_suits(sb1, key_type)) {
2873 			dev_dbg(rvu->dev,
2874 				"%s: not suitable sb=%d key_type=%d\n",
2875 				__func__, sb1->idx, key_type);
2876 			continue;
2877 		}
2878 
2879 		/* Find subbank and subbank index for limit */
2880 		rc = npc_mcam_idx_2_subbank_idx(rvu, l, &sb2,
2881 						&sb_idx2);
2882 		if (rc)
2883 			goto err;
2884 
2885 		dev_dbg(rvu->dev,
2886 			"%s: limit subbank=%d off=%d\n",
2887 			__func__, sb_idx1, sb_idx2);
2888 
2889 		/* subbank of ref and limit should be same */
2890 		if (sb1 != sb2) {
2891 			dev_err(rvu->dev,
2892 				"%s: l(%d) and r(%d) are not in same subbank\n",
2893 				__func__, r, l);
2894 			goto err;
2895 		}
2896 
2897 		if (contig &&
2898 		    npc_subbank_free_cnt(rvu, sb1, key_type) < count) {
2899 			dev_dbg(rvu->dev, "%s: less count =%d\n",
2900 				__func__,
2901 				npc_subbank_free_cnt(rvu, sb1, key_type));
2902 			continue;
2903 		}
2904 
2905 		/* Try in one bank of a subbank */
2906 		alloc_cnt = 0;
2907 		rc =  npc_subbank_alloc(rvu, sb1, key_type,
2908 					r, l, prio, contig,
2909 					count - tot, mcam_idx + tot,
2910 					count - tot, max_alloc,
2911 					&alloc_cnt);
2912 
2913 		tot += alloc_cnt;
2914 
2915 		dev_dbg(rvu->dev, "%s: Allocated tot=%d alloc_cnt=%d\n",
2916 			__func__, tot, alloc_cnt);
2917 
2918 		if (!rc && count == tot)
2919 			return 0;
2920 	}
2921 err:
2922 	dev_dbg(rvu->dev, "%s: Error to allocate\n",
2923 		__func__);
2924 
2925 	/* non contiguous allocation fails. We need to do clean up */
2926 	if (max_alloc) {
2927 		rc = npc_idx_free(rvu, mcam_idx, tot, false);
2928 		if (rc)
2929 			dev_err(rvu->dev,
2930 				"%s: failed to free %u indexes\n",
2931 				__func__, tot);
2932 	}
2933 
2934 	return -EFAULT;
2935 }
2936 
2937 /* Minimize allocation from bottom and top subbanks for noref allocations.
2938  * Default allocations are ref based, and will be allocated from top
2939  * subbanks (least priority subbanks). Since default allocation is at very
2940  * early stage of kernel netdev probes, this subbanks will be moved to
2941  * used subbanks list. This will pave a way for noref allocation from these
2942  * used subbanks. Skip allocation for these top and bottom, and try free
2943  * bank next. If none slot is available, come back and search in these
2944  * subbanks.
2945  */
2946 
2947 static int npc_subbank_restricted_idxs[2];
2948 static bool restrict_valid = true;
2949 
npc_subbank_restrict_usage(struct rvu * rvu,int index)2950 static bool npc_subbank_restrict_usage(struct rvu *rvu, int index)
2951 {
2952 	int i;
2953 
2954 	if (!restrict_valid)
2955 		return false;
2956 
2957 	for (i = 0; i < ARRAY_SIZE(npc_subbank_restricted_idxs); i++) {
2958 		if (index == npc_subbank_restricted_idxs[i])
2959 			return true;
2960 	}
2961 
2962 	return false;
2963 }
2964 
npc_subbank_noref_alloc(struct rvu * rvu,int key_type,bool contig,int count,u16 * mcam_idx)2965 static int npc_subbank_noref_alloc(struct rvu *rvu, int key_type, bool contig,
2966 				   int count, u16 *mcam_idx)
2967 {
2968 	struct npc_subbank *sb;
2969 	unsigned long index;
2970 	int tot = 0, rc;
2971 	bool max_alloc;
2972 	int alloc_cnt;
2973 	int idx, i;
2974 	void *val;
2975 
2976 	max_alloc = !contig;
2977 
2978 	/* Check used subbanks for free slots */
2979 	xa_for_each(&npc_priv.xa_sb_used, index, val) {
2980 		idx = xa_to_value(val);
2981 
2982 		/* Minimize allocation from restricted subbanks
2983 		 * in noref allocations.
2984 		 */
2985 		if (npc_subbank_restrict_usage(rvu, idx))
2986 			continue;
2987 
2988 		sb = &npc_priv.sb[idx];
2989 
2990 		/* Skip if not suitable subbank */
2991 		if (!npc_subbank_suits(sb, key_type))
2992 			continue;
2993 
2994 		if (contig && npc_subbank_free_cnt(rvu, sb, key_type) < count)
2995 			continue;
2996 
2997 		/* try in bank 0. Try passing ref and limit equal to
2998 		 * subbank boundaries
2999 		 */
3000 		alloc_cnt = 0;
3001 		rc =  npc_subbank_alloc(rvu, sb, key_type,
3002 					sb->b0b, sb->b0t, 0,
3003 					contig, count - tot,
3004 					mcam_idx + tot,
3005 					count - tot,
3006 					max_alloc, &alloc_cnt);
3007 
3008 		/* Non contiguous allocation may allocate less than
3009 		 * requested 'count'.
3010 		 */
3011 		tot += alloc_cnt;
3012 
3013 		dev_dbg(rvu->dev,
3014 			"%s: Allocated %d from subbank %d, tot=%d count=%d\n",
3015 			__func__, alloc_cnt, sb->idx, tot, count);
3016 
3017 		/* Successfully allocated */
3018 		if (!rc && count == tot)
3019 			return 0;
3020 
3021 		/* x4 entries can be allocated from bank 0 only */
3022 		if (key_type == NPC_MCAM_KEY_X4)
3023 			continue;
3024 
3025 		/* try in bank 1 for x2 */
3026 		alloc_cnt = 0;
3027 		rc =  npc_subbank_alloc(rvu, sb, key_type,
3028 					sb->b1b, sb->b1t, 0,
3029 					contig, count - tot,
3030 					mcam_idx + tot,
3031 					count - tot, max_alloc,
3032 					&alloc_cnt);
3033 
3034 		tot += alloc_cnt;
3035 
3036 		dev_dbg(rvu->dev,
3037 			"%s: Allocated %d from subbank %d, tot=%d count=%d\n",
3038 			__func__, alloc_cnt, sb->idx, tot, count);
3039 
3040 		if (!rc && count == tot)
3041 			return 0;
3042 	}
3043 
3044 	/* Allocate in free subbanks */
3045 	xa_for_each(&npc_priv.xa_sb_free, index, val) {
3046 		idx = xa_to_value(val);
3047 		sb = &npc_priv.sb[idx];
3048 
3049 		/* Minimize allocation from restricted subbanks
3050 		 * in noref allocations.
3051 		 */
3052 		if (npc_subbank_restrict_usage(rvu, idx))
3053 			continue;
3054 
3055 		if (!npc_subbank_suits(sb, key_type))
3056 			continue;
3057 
3058 		/* try in bank 0 */
3059 		alloc_cnt = 0;
3060 		rc =  npc_subbank_alloc(rvu, sb, key_type,
3061 					sb->b0b, sb->b0t, 0,
3062 					contig, count - tot,
3063 					mcam_idx + tot,
3064 					count - tot,
3065 					max_alloc, &alloc_cnt);
3066 
3067 		tot += alloc_cnt;
3068 
3069 		dev_dbg(rvu->dev,
3070 			"%s: Allocated %d from subbank %d, tot=%d count=%d\n",
3071 			__func__, alloc_cnt, sb->idx, tot, count);
3072 
3073 		/* Successfully allocated */
3074 		if (!rc && count == tot)
3075 			return 0;
3076 
3077 		/* x4 entries can be allocated from bank 0 only */
3078 		if (key_type == NPC_MCAM_KEY_X4)
3079 			continue;
3080 
3081 		/* try in bank 1 for x2 */
3082 		alloc_cnt = 0;
3083 		rc =  npc_subbank_alloc(rvu, sb,
3084 					key_type, sb->b1b, sb->b1t, 0,
3085 					contig, count - tot,
3086 					mcam_idx + tot, count - tot,
3087 					max_alloc, &alloc_cnt);
3088 
3089 		tot += alloc_cnt;
3090 
3091 		dev_dbg(rvu->dev,
3092 			"%s: Allocated %d from subbank %d, tot=%d count=%d\n",
3093 			__func__, alloc_cnt, sb->idx, tot, count);
3094 
3095 		if (!rc && count == tot)
3096 			return 0;
3097 	}
3098 
3099 	/* Allocate from restricted subbanks */
3100 	for (i = 0; restrict_valid &&
3101 	     (i < ARRAY_SIZE(npc_subbank_restricted_idxs)); i++) {
3102 		idx = npc_subbank_restricted_idxs[i];
3103 		sb = &npc_priv.sb[idx];
3104 
3105 		/* Skip if not suitable subbank */
3106 		if (!npc_subbank_suits(sb, key_type))
3107 			continue;
3108 
3109 		if (contig && npc_subbank_free_cnt(rvu, sb, key_type) < count)
3110 			continue;
3111 
3112 		/* try in bank 0. Try passing ref and limit equal to
3113 		 * subbank boundaries
3114 		 */
3115 		alloc_cnt = 0;
3116 		rc =  npc_subbank_alloc(rvu, sb, key_type,
3117 					sb->b0b, sb->b0t, 0,
3118 					contig, count - tot,
3119 					mcam_idx + tot,
3120 					count - tot,
3121 					max_alloc, &alloc_cnt);
3122 
3123 		/* Non contiguous allocation may allocate less than
3124 		 * requested 'count'.
3125 		 */
3126 		tot += alloc_cnt;
3127 
3128 		dev_dbg(rvu->dev,
3129 			"%s: Allocated %d from subbank %d, tot=%d count=%d\n",
3130 			__func__, alloc_cnt, sb->idx, tot, count);
3131 
3132 		/* Successfully allocated */
3133 		if (!rc && count == tot)
3134 			return 0;
3135 
3136 		/* x4 entries can be allocated from bank 0 only */
3137 		if (key_type == NPC_MCAM_KEY_X4)
3138 			continue;
3139 
3140 		/* try in bank 1 for x2 */
3141 		alloc_cnt = 0;
3142 		rc =  npc_subbank_alloc(rvu, sb, key_type,
3143 					sb->b1b, sb->b1t, 0,
3144 					contig, count - tot,
3145 					mcam_idx + tot,
3146 					count - tot, max_alloc,
3147 					&alloc_cnt);
3148 
3149 		tot += alloc_cnt;
3150 
3151 		dev_dbg(rvu->dev,
3152 			"%s: Allocated %d from subbank %d, tot=%d count=%d\n",
3153 			__func__, alloc_cnt, sb->idx, tot, count);
3154 
3155 		if (!rc && count == tot)
3156 			return 0;
3157 	}
3158 
3159 	/* non contiguous allocation fails. We need to do clean up */
3160 	if (max_alloc)
3161 		npc_idx_free(rvu, mcam_idx, tot, false);
3162 
3163 	dev_dbg(rvu->dev, "%s: non-contig allocation fails\n",
3164 		__func__);
3165 
3166 	return -EFAULT;
3167 }
3168 
npc_cn20k_idx_free(struct rvu * rvu,u16 * mcam_idx,int count)3169 int npc_cn20k_idx_free(struct rvu *rvu, u16 *mcam_idx, int count)
3170 {
3171 	return npc_idx_free(rvu, mcam_idx, count, true);
3172 }
3173 
npc_cn20k_ref_idx_alloc(struct rvu * rvu,int pcifunc,int key_type,int prio,u16 * mcam_idx,int ref,int limit,bool contig,int count,bool virt)3174 int npc_cn20k_ref_idx_alloc(struct rvu *rvu, int pcifunc, int key_type,
3175 			    int prio, u16 *mcam_idx, int ref, int limit,
3176 			    bool contig, int count, bool virt)
3177 {
3178 	bool defrag_candidate = false;
3179 	int i, eidx, rc, bd;
3180 	bool ref_valid;
3181 	u16 vidx;
3182 
3183 	bd = npc_priv.bank_depth;
3184 
3185 	/* Special case: ref == 0 && limit= 0 && prio == HIGH && count == 1
3186 	 * Here user wants to allocate 0th entry
3187 	 */
3188 	if (!ref && !limit && prio == NPC_MCAM_HIGHER_PRIO &&
3189 	    count == 1) {
3190 		rc = npc_subbank_ref_alloc(rvu, key_type, ref, limit,
3191 					   prio, contig, count, mcam_idx);
3192 
3193 		if (rc)
3194 			return rc;
3195 		goto add2map;
3196 	}
3197 
3198 	ref_valid = !!(limit || ref);
3199 	defrag_candidate = !ref_valid && !contig && virt;
3200 	if (!ref_valid) {
3201 		if (contig && count > npc_priv.subbank_depth)
3202 			goto try_noref_multi_subbank;
3203 
3204 		rc = npc_subbank_noref_alloc(rvu, key_type, contig,
3205 					     count, mcam_idx);
3206 		if (!rc)
3207 			goto add2map;
3208 
3209 try_noref_multi_subbank:
3210 		eidx = (key_type == NPC_MCAM_KEY_X4) ? bd - 1 : 2 * bd - 1;
3211 
3212 		if (prio == NPC_MCAM_HIGHER_PRIO)
3213 			rc = npc_multi_subbank_ref_alloc(rvu, key_type,
3214 							 eidx, 0,
3215 							 NPC_MCAM_HIGHER_PRIO,
3216 							 contig, count,
3217 							 mcam_idx);
3218 		else
3219 			rc = npc_multi_subbank_ref_alloc(rvu, key_type,
3220 							 0, eidx,
3221 							 NPC_MCAM_LOWER_PRIO,
3222 							 contig, count,
3223 							 mcam_idx);
3224 
3225 		if (!rc)
3226 			goto add2map;
3227 
3228 		return rc;
3229 	}
3230 
3231 	if ((prio == NPC_MCAM_LOWER_PRIO && ref > limit) ||
3232 	    (prio == NPC_MCAM_HIGHER_PRIO && ref < limit)) {
3233 		dev_err(rvu->dev, "%s: Wrong ref_enty(%d) or limit(%d)\n",
3234 			__func__, ref, limit);
3235 		return -EINVAL;
3236 	}
3237 
3238 	if ((key_type == NPC_MCAM_KEY_X4 && (ref >= bd || limit >= bd)) ||
3239 	    (key_type == NPC_MCAM_KEY_X2 &&
3240 	     (ref >= 2 * bd || limit >= 2 * bd))) {
3241 		dev_err(rvu->dev, "%s: Wrong ref_enty(%d) or limit(%d)\n",
3242 			__func__, ref, limit);
3243 		return -EINVAL;
3244 	}
3245 
3246 	if (contig && count > npc_priv.subbank_depth)
3247 		goto try_ref_multi_subbank;
3248 
3249 	rc = npc_subbank_ref_alloc(rvu, key_type, ref, limit,
3250 				   prio, contig, count, mcam_idx);
3251 	if (!rc)
3252 		goto add2map;
3253 
3254 try_ref_multi_subbank:
3255 	rc = npc_multi_subbank_ref_alloc(rvu, key_type,
3256 					 ref, limit, prio,
3257 					 contig, count, mcam_idx);
3258 	if (!rc)
3259 		goto add2map;
3260 
3261 	return rc;
3262 
3263 add2map:
3264 	for (i = 0; i < count; i++) {
3265 		rc = npc_add_to_pf_maps(rvu, mcam_idx[i], pcifunc);
3266 		if (rc)
3267 			goto err;
3268 
3269 		if (!defrag_candidate)
3270 			continue;
3271 
3272 		rc = npc_vidx_maps_add_entry(rvu, mcam_idx[i], pcifunc, &vidx);
3273 		if (rc) {
3274 			npc_del_from_pf_maps(rvu, mcam_idx[i]);
3275 			goto err;
3276 		}
3277 
3278 		/* Return vidx to caller */
3279 		mcam_idx[i] = vidx;
3280 	}
3281 
3282 	return 0;
3283 err:
3284 	for (int j = 0; j < i; j++) {
3285 		npc_del_from_pf_maps(rvu, npc_vidx2idx(mcam_idx[j]));
3286 
3287 		if (!defrag_candidate)
3288 			continue;
3289 
3290 		npc_vidx_maps_del_entry(rvu, mcam_idx[j], NULL);
3291 	}
3292 
3293 	return rc;
3294 
3295 }
3296 
npc_cn20k_subbank_calc_free(struct rvu * rvu,int * x2_free,int * x4_free,int * sb_free)3297 void npc_cn20k_subbank_calc_free(struct rvu *rvu, int *x2_free,
3298 				 int *x4_free, int *sb_free)
3299 {
3300 	struct npc_subbank *sb;
3301 	int i;
3302 
3303 	/* Reset all stats to zero */
3304 	*x2_free = 0;
3305 	*x4_free = 0;
3306 	*sb_free = 0;
3307 
3308 	for (i = 0; i < npc_priv.num_subbanks; i++) {
3309 		sb = &npc_priv.sb[i];
3310 		mutex_lock(&sb->lock);
3311 
3312 		/* Count number of free subbanks */
3313 		if (sb->flags & NPC_SUBBANK_FLAG_FREE) {
3314 			(*sb_free)++;
3315 			goto next;
3316 		}
3317 
3318 		/* Sumup x4 free count */
3319 		if (sb->key_type == NPC_MCAM_KEY_X4) {
3320 			(*x4_free) += sb->free_cnt;
3321 			goto next;
3322 		}
3323 
3324 		/* Sumup x2 free counts */
3325 		(*x2_free) += sb->free_cnt;
3326 next:
3327 		mutex_unlock(&sb->lock);
3328 	}
3329 }
3330 
3331 int
rvu_mbox_handler_npc_cn20k_get_fcnt(struct rvu * rvu,struct msg_req * req,struct npc_cn20k_get_fcnt_rsp * rsp)3332 rvu_mbox_handler_npc_cn20k_get_fcnt(struct rvu *rvu,
3333 				    struct msg_req *req,
3334 				    struct npc_cn20k_get_fcnt_rsp *rsp)
3335 {
3336 	npc_cn20k_subbank_calc_free(rvu, &rsp->free_x2,
3337 				    &rsp->free_x4, &rsp->free_subbanks);
3338 	return 0;
3339 }
3340 
3341 int
rvu_mbox_handler_npc_cn20k_get_kex_cfg(struct rvu * rvu,struct msg_req * req,struct npc_cn20k_get_kex_cfg_rsp * rsp)3342 rvu_mbox_handler_npc_cn20k_get_kex_cfg(struct rvu *rvu,
3343 				       struct msg_req *req,
3344 				       struct npc_cn20k_get_kex_cfg_rsp *rsp)
3345 {
3346 	int extr, lt;
3347 
3348 	rsp->rx_keyx_cfg = CN20K_GET_KEX_CFG(NIX_INTF_RX);
3349 	rsp->tx_keyx_cfg = CN20K_GET_KEX_CFG(NIX_INTF_TX);
3350 
3351 	/* Get EXTRACTOR LID */
3352 	for (extr = 0; extr < NPC_MAX_EXTRACTOR; extr++) {
3353 		rsp->intf_extr_lid[NIX_INTF_RX][extr] =
3354 			CN20K_GET_EXTR_LID(NIX_INTF_RX, extr);
3355 		rsp->intf_extr_lid[NIX_INTF_TX][extr] =
3356 			CN20K_GET_EXTR_LID(NIX_INTF_TX, extr);
3357 	}
3358 
3359 	/* Get EXTRACTOR LTYPE */
3360 	for (extr = 0; extr < NPC_MAX_EXTRACTOR; extr++) {
3361 		for (lt = 0; lt < NPC_MAX_LT; lt++) {
3362 			rsp->intf_extr_lt[NIX_INTF_RX][extr][lt] =
3363 				CN20K_GET_EXTR_LT(NIX_INTF_RX, extr, lt);
3364 			rsp->intf_extr_lt[NIX_INTF_TX][extr][lt] =
3365 				CN20K_GET_EXTR_LT(NIX_INTF_TX, extr, lt);
3366 		}
3367 	}
3368 
3369 	memcpy(rsp->mkex_pfl_name, rvu->mkex_pfl_name, MKEX_NAME_LEN);
3370 	return 0;
3371 }
3372 
3373 static int *subbank_srch_order;
3374 
npc_populate_restricted_idxs(int num_subbanks)3375 static void npc_populate_restricted_idxs(int num_subbanks)
3376 {
3377 	npc_subbank_restricted_idxs[0] = num_subbanks - 1;
3378 	npc_subbank_restricted_idxs[1] = 0;
3379 }
3380 
npc_create_srch_order(int cnt)3381 static int npc_create_srch_order(int cnt)
3382 {
3383 	int val = 0;
3384 
3385 	subbank_srch_order = kcalloc(cnt, sizeof(int),
3386 				     GFP_KERNEL);
3387 	if (!subbank_srch_order)
3388 		return -ENOMEM;
3389 
3390 	/* cnt(subbank depth) is always a power of 2. There is a check in
3391 	 * npc_priv_init() to check the same.
3392 	 */
3393 	for (int i = 0; i < cnt; i += 2) {
3394 		subbank_srch_order[i] = cnt / 2 - val - 1;
3395 		subbank_srch_order[i + 1] = cnt / 2 + 1 + val;
3396 		val++;
3397 	}
3398 
3399 	subbank_srch_order[cnt - 1] = cnt / 2;
3400 	return 0;
3401 }
3402 
npc_subbank_init(struct rvu * rvu,struct npc_subbank * sb,int idx)3403 static void npc_subbank_init(struct rvu *rvu, struct npc_subbank *sb, int idx)
3404 {
3405 	mutex_init(&sb->lock);
3406 
3407 	sb->b0b = idx * npc_priv.subbank_depth;
3408 	sb->b0t = sb->b0b + npc_priv.subbank_depth - 1;
3409 
3410 	sb->b1b = npc_priv.bank_depth + idx * npc_priv.subbank_depth;
3411 	sb->b1t = sb->b1b + npc_priv.subbank_depth - 1;
3412 
3413 	sb->flags = NPC_SUBBANK_FLAG_FREE;
3414 	sb->idx = idx;
3415 	sb->arr_idx = subbank_srch_order[idx];
3416 
3417 	dev_dbg(rvu->dev, "%s: sb->idx=%u sb->arr_idx=%u\n",
3418 		__func__, sb->idx, sb->arr_idx);
3419 
3420 	/* Keep first and last subbank at end of free array; so that
3421 	 * it will be used at last
3422 	 */
3423 	xa_store(&npc_priv.xa_sb_free, sb->arr_idx,
3424 		 xa_mk_value(sb->idx), GFP_KERNEL);
3425 }
3426 
npc_pcifunc_map_create(struct rvu * rvu)3427 static int npc_pcifunc_map_create(struct rvu *rvu)
3428 {
3429 	int pf, vf, numvfs;
3430 	int cnt = 0;
3431 	u16 pcifunc;
3432 	u64 cfg;
3433 
3434 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
3435 		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
3436 		numvfs = (cfg >> 12) & 0xFF;
3437 
3438 		/* Skip not enabled PFs */
3439 		if (!(cfg & BIT_ULL(20)))
3440 			goto chk_vfs;
3441 
3442 		/* If Admin function, check on VFs */
3443 		if (cfg & BIT_ULL(21))
3444 			goto chk_vfs;
3445 
3446 		pcifunc = pf << 9;
3447 
3448 		xa_store(&npc_priv.xa_pf_map, (unsigned long)pcifunc,
3449 			 xa_mk_value(cnt), GFP_KERNEL);
3450 
3451 		cnt++;
3452 
3453 chk_vfs:
3454 		for (vf = 0; vf < numvfs; vf++) {
3455 			pcifunc = (pf << 9) | (vf + 1);
3456 
3457 			xa_store(&npc_priv.xa_pf_map, (unsigned long)pcifunc,
3458 				 xa_mk_value(cnt), GFP_KERNEL);
3459 			cnt++;
3460 		}
3461 	}
3462 
3463 	return cnt;
3464 }
3465 
3466 struct npc_defrag_node {
3467 	u8 idx;
3468 	u8 key_type;
3469 	bool valid;
3470 	bool refs;
3471 	u16 free_cnt;
3472 	u16 vidx_cnt;
3473 	u16 *vidx;
3474 	struct list_head list;
3475 };
3476 
npc_defrag_skip_restricted_sb(int sb_id)3477 static bool npc_defrag_skip_restricted_sb(int sb_id)
3478 {
3479 	int i;
3480 
3481 	if (!restrict_valid)
3482 		return false;
3483 
3484 	for (i = 0; i < ARRAY_SIZE(npc_subbank_restricted_idxs); i++)
3485 		if (sb_id == npc_subbank_restricted_idxs[i])
3486 			return true;
3487 	return false;
3488 }
3489 
3490 /* Find subbank with minimum number of virtual indexes */
npc_subbank_min_vidx(struct list_head * lh)3491 static struct npc_defrag_node *npc_subbank_min_vidx(struct list_head *lh)
3492 {
3493 	struct npc_defrag_node *node, *tnode = NULL;
3494 	int min = INT_MAX;
3495 
3496 	list_for_each_entry(node, lh, list) {
3497 		if (!node->valid)
3498 			continue;
3499 
3500 		/* if subbank has ref allocated mcam indexes, that subbank
3501 		 * is not a good candidate to move out indexes.
3502 		 */
3503 		if (node->refs)
3504 			continue;
3505 
3506 		if (min > node->vidx_cnt) {
3507 			min = node->vidx_cnt;
3508 			tnode = node;
3509 		}
3510 	}
3511 
3512 	return tnode;
3513 }
3514 
3515 /* Find subbank with maximum number of free spaces */
npc_subbank_max_free(struct list_head * lh)3516 static struct npc_defrag_node *npc_subbank_max_free(struct list_head *lh)
3517 {
3518 	struct npc_defrag_node *node, *tnode = NULL;
3519 	int max = INT_MIN;
3520 
3521 	list_for_each_entry(node, lh, list) {
3522 		if (!node->valid)
3523 			continue;
3524 
3525 		if (max < node->free_cnt) {
3526 			max = node->free_cnt;
3527 			tnode = node;
3528 		}
3529 	}
3530 
3531 	return tnode;
3532 }
3533 
npc_defrag_alloc_free_slots(struct rvu * rvu,struct npc_defrag_node * f,int cnt,u16 * save)3534 static int npc_defrag_alloc_free_slots(struct rvu *rvu,
3535 				       struct npc_defrag_node *f,
3536 				       int cnt, u16 *save)
3537 {
3538 	int alloc_cnt1, alloc_cnt2;
3539 	struct npc_subbank *sb;
3540 	int rc, sb_off, i, err;
3541 	bool deleted;
3542 
3543 	sb = &npc_priv.sb[f->idx];
3544 
3545 	alloc_cnt1 = 0;
3546 	alloc_cnt2 = 0;
3547 
3548 	rc = __npc_subbank_alloc(rvu, sb,
3549 				 NPC_MCAM_KEY_X2, sb->b0b,
3550 				 sb->b0t,
3551 				 NPC_MCAM_LOWER_PRIO,
3552 				 false, cnt, save, cnt, true,
3553 				 &alloc_cnt1);
3554 
3555 	if (alloc_cnt1 < cnt) {
3556 		rc = __npc_subbank_alloc(rvu, sb,
3557 					 NPC_MCAM_KEY_X2, sb->b1b,
3558 					 sb->b1t,
3559 					 NPC_MCAM_LOWER_PRIO,
3560 					 false, cnt - alloc_cnt1,
3561 					 save + alloc_cnt1,
3562 					 cnt - alloc_cnt1,
3563 					 true, &alloc_cnt2);
3564 	}
3565 
3566 	if (alloc_cnt1 + alloc_cnt2 != cnt) {
3567 		dev_err(rvu->dev,
3568 			"%s: Failed to alloc cnt=%u alloc_cnt1=%u alloc_cnt2=%u\n",
3569 			__func__, cnt, alloc_cnt1, alloc_cnt2);
3570 		rc = -ENOSPC;
3571 		goto fail_free_alloc;
3572 	}
3573 
3574 	return 0;
3575 
3576 fail_free_alloc:
3577 	for (i = 0; i < alloc_cnt1 + alloc_cnt2; i++) {
3578 		err =  npc_mcam_idx_2_subbank_idx(rvu, save[i],
3579 						  &sb, &sb_off);
3580 		if (err) {
3581 			dev_err(rvu->dev,
3582 				"%s: Error to find subbank for mcam idx=%u\n",
3583 				__func__, save[i]);
3584 			break;
3585 		}
3586 
3587 		deleted = __npc_subbank_free(rvu, sb, sb_off);
3588 		if (!deleted) {
3589 			dev_err(rvu->dev,
3590 				"%s: Error to free mcam idx=%u\n",
3591 				__func__, save[i]);
3592 			break;
3593 		}
3594 	}
3595 
3596 	return rc;
3597 }
3598 
npc_defrag_add_2_show_list(struct rvu * rvu,u16 old_midx,u16 new_midx,u16 vidx)3599 static int npc_defrag_add_2_show_list(struct rvu *rvu, u16 old_midx,
3600 				      u16 new_midx, u16 vidx)
3601 {
3602 	struct npc_defrag_show_node *node;
3603 
3604 	node = kcalloc(1, sizeof(*node), GFP_KERNEL);
3605 	if (!node)
3606 		return -ENOMEM;
3607 
3608 	node->old_midx = old_midx;
3609 	node->new_midx = new_midx;
3610 	node->vidx = vidx;
3611 	INIT_LIST_HEAD(&node->list);
3612 
3613 	mutex_lock(&npc_priv.lock);
3614 	list_add_tail(&node->list, &npc_priv.defrag_lh);
3615 	mutex_unlock(&npc_priv.lock);
3616 
3617 	return 0;
3618 }
3619 
3620 static
npc_defrag_move_vdx_to_free(struct rvu * rvu,struct npc_defrag_node * f,struct npc_defrag_node * v,int cnt,u16 * save)3621 int npc_defrag_move_vdx_to_free(struct rvu *rvu,
3622 				struct npc_defrag_node *f,
3623 				struct npc_defrag_node *v,
3624 				int cnt, u16 *save)
3625 {
3626 	u16 new_midx, old_midx, vidx, target_pf;
3627 	struct npc_mcam *mcam = &rvu->hw->mcam;
3628 	struct rvu_npc_mcam_rule *rule, *tmp;
3629 	int i, vidx_cnt, rc, sb_off;
3630 	struct npc_subbank *sb;
3631 	bool deleted;
3632 	u16 pcifunc;
3633 	int blkaddr;
3634 	void *map;
3635 	u8 bank;
3636 	u16 midx;
3637 	u64 stats;
3638 
3639 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
3640 
3641 	vidx_cnt = v->vidx_cnt;
3642 	for (i = 0; i < cnt; i++) {
3643 		vidx = v->vidx[vidx_cnt - i - 1];
3644 		old_midx = npc_vidx2idx(vidx);
3645 		new_midx = save[cnt - i - 1];
3646 
3647 		dev_dbg(rvu->dev,
3648 			"%s: Moving %u ---> %u  (vidx=%u)\n",
3649 			__func__,
3650 			old_midx, new_midx, vidx);
3651 
3652 		rc = npc_defrag_add_2_show_list(rvu, old_midx, new_midx, vidx);
3653 		if (rc)
3654 			dev_err(rvu->dev,
3655 				"%s: Error happened to add to show list vidx=%u\n",
3656 				__func__, vidx);
3657 
3658 		/* Modify vidx to point to new mcam idx */
3659 		rc = npc_vidx_maps_modify(rvu, vidx, new_midx);
3660 		if (rc)
3661 			return rc;
3662 
3663 		midx = old_midx % mcam->banksize;
3664 		bank = old_midx / mcam->banksize;
3665 		stats = rvu_read64(rvu, blkaddr,
3666 				   NPC_AF_CN20K_MCAMEX_BANKX_STAT_EXT(midx,
3667 								      bank));
3668 
3669 		/* If bug happened during copy/enable mcam, then there is a bug in allocation
3670 		 * algorithm itself. There is no point in rewinding and returning, as it
3671 		 * will face further issue. Return error after printing error
3672 		 */
3673 		if (npc_cn20k_enable_mcam_entry(rvu, blkaddr, old_midx, false)) {
3674 			dev_err(rvu->dev,
3675 				"%s: Error happened while disabling old_mid=%u\n",
3676 				__func__, old_midx);
3677 			return -EFAULT;
3678 		}
3679 
3680 		if (npc_cn20k_copy_mcam_entry(rvu, blkaddr, old_midx, new_midx)) {
3681 			dev_err(rvu->dev,
3682 				"%s: Error happened while copying old_midx=%u new_midx=%u\n",
3683 				__func__, old_midx, new_midx);
3684 			return -EFAULT;
3685 		}
3686 
3687 		if (npc_cn20k_enable_mcam_entry(rvu, blkaddr, new_midx, true)) {
3688 			dev_err(rvu->dev,
3689 				"%s: Error happened while enabling new_mid=%u\n",
3690 				__func__, new_midx);
3691 			return -EFAULT;
3692 		}
3693 
3694 		midx = new_midx % mcam->banksize;
3695 		bank = new_midx / mcam->banksize;
3696 		rvu_write64(rvu, blkaddr,
3697 			    NPC_AF_CN20K_MCAMEX_BANKX_STAT_EXT(midx, bank),
3698 			    stats);
3699 
3700 		/* Free the old mcam idx */
3701 		rc =  npc_mcam_idx_2_subbank_idx(rvu, old_midx,
3702 						 &sb, &sb_off);
3703 		if (rc) {
3704 			dev_err(rvu->dev,
3705 				"%s: Unable to calculate subbank off for mcamidx=%u\n",
3706 				__func__, old_midx);
3707 			return rc;
3708 		}
3709 
3710 		deleted = __npc_subbank_free(rvu, sb, sb_off);
3711 		if (!deleted) {
3712 			dev_err(rvu->dev,
3713 				"%s:  Failed to free mcamidx=%u sb=%u sb_off=%u\n",
3714 				__func__, old_midx, sb->idx, sb_off);
3715 			return -EFAULT;
3716 		}
3717 
3718 		/* save pcifunc */
3719 		map = xa_load(&npc_priv.xa_idx2pf_map, old_midx);
3720 		pcifunc = xa_to_value(map);
3721 
3722 		/* delete from pf maps */
3723 		rc =  npc_del_from_pf_maps(rvu, old_midx);
3724 		if (rc) {
3725 			dev_err(rvu->dev,
3726 				"%s:  Failed to delete pf maps for mcamidx=%u\n",
3727 				__func__, old_midx);
3728 			return rc;
3729 		}
3730 
3731 		/* add new mcam_idx to pf map */
3732 		rc = npc_add_to_pf_maps(rvu, new_midx, pcifunc);
3733 		if (rc) {
3734 			dev_err(rvu->dev,
3735 				"%s:  Failed to add pf maps for mcamidx=%u\n",
3736 				__func__, new_midx);
3737 			return rc;
3738 		}
3739 
3740 		/* Remove from mcam maps */
3741 		mcam->entry2pfvf_map[old_midx] = NPC_MCAM_INVALID_MAP;
3742 		mcam->entry2cntr_map[old_midx] = NPC_MCAM_INVALID_MAP;
3743 		npc_mcam_clear_bit(mcam, old_midx);
3744 
3745 		mcam->entry2pfvf_map[new_midx] = pcifunc;
3746 		/* Counter is not preserved */
3747 		mcam->entry2cntr_map[new_midx] = new_midx;
3748 		target_pf = mcam->entry2target_pffunc[old_midx];
3749 		mcam->entry2target_pffunc[new_midx] = target_pf;
3750 		mcam->entry2target_pffunc[old_midx] = NPC_MCAM_INVALID_MAP;
3751 
3752 		npc_mcam_set_bit(mcam, new_midx);
3753 
3754 		/* Note: list order is not functionally required for mcam_rules */
3755 		list_for_each_entry_safe(rule, tmp, &mcam->mcam_rules, list) {
3756 			if (rule->entry != old_midx)
3757 				continue;
3758 
3759 			rule->entry = new_midx;
3760 			break;
3761 		}
3762 
3763 		/* Mark as invalid */
3764 		v->vidx[vidx_cnt - i - 1] = -1;
3765 		save[cnt - i - 1] = -1;
3766 
3767 		f->free_cnt--;
3768 		v->vidx_cnt--;
3769 	}
3770 
3771 	return 0;
3772 }
3773 
npc_defrag_process(struct rvu * rvu,struct list_head * lh)3774 static int npc_defrag_process(struct rvu *rvu, struct list_head *lh)
3775 {
3776 	struct npc_defrag_node *v = NULL;
3777 	struct npc_defrag_node *f = NULL;
3778 	int rc = 0, cnt;
3779 	u16 *save;
3780 
3781 	while (1) {
3782 		/* Find subbank with minimum vidx */
3783 		if (!v) {
3784 			v = npc_subbank_min_vidx(lh);
3785 			if (!v)
3786 				break;
3787 		}
3788 
3789 		/* Find subbank with maximum free slots */
3790 		if (!f) {
3791 			f = npc_subbank_max_free(lh);
3792 			if (!f)
3793 				break;
3794 		}
3795 
3796 		if (!v->vidx_cnt) {
3797 			list_del_init(&v->list);
3798 			v = NULL;
3799 			continue;
3800 		}
3801 
3802 		if (!f->free_cnt) {
3803 			list_del_init(&f->list);
3804 			f = NULL;
3805 			continue;
3806 		}
3807 
3808 		/* If both subbanks are same, choose vidx and
3809 		 * search for free list again
3810 		 */
3811 		if (f == v) {
3812 			list_del_init(&f->list);
3813 			f = NULL;
3814 			continue;
3815 		}
3816 
3817 		/* Calculate minimum free slots needs to be allocated */
3818 		cnt = f->free_cnt > v->vidx_cnt ? v->vidx_cnt :
3819 			f->free_cnt;
3820 
3821 		dev_dbg(rvu->dev,
3822 			"%s: cnt=%u free_cnt=%u(sb=%u) vidx_cnt=%u(sb=%u)\n",
3823 			__func__, cnt, f->free_cnt, f->idx,
3824 			v->vidx_cnt, v->idx);
3825 
3826 		/* Allocate an array to store newly allocated
3827 		 * free slots (mcam indexes)
3828 		 */
3829 		save = kcalloc(cnt, sizeof(*save), GFP_KERNEL);
3830 		if (!save) {
3831 			rc = -ENOMEM;
3832 			goto err;
3833 		}
3834 
3835 		/* Alloc free slots for existing vidx */
3836 		rc = npc_defrag_alloc_free_slots(rvu, f, cnt, save);
3837 		if (rc) {
3838 			kfree(save);
3839 			goto err;
3840 		}
3841 
3842 		/* Move vidx to free slots; update pf_map and vidx maps,
3843 		 * and free existing vidx mcam slots
3844 		 */
3845 		rc = npc_defrag_move_vdx_to_free(rvu, f, v, cnt, save);
3846 		if (rc) {
3847 			kfree(save);
3848 			goto err;
3849 		}
3850 
3851 		kfree(save);
3852 
3853 		if (!f->free_cnt) {
3854 			list_del_init(&f->list);
3855 			f = NULL;
3856 		}
3857 
3858 		if (!v->vidx_cnt) {
3859 			list_del_init(&v->list);
3860 			v = NULL;
3861 		}
3862 	}
3863 
3864 err:
3865 	/* Whole defragmentation process is done within locks. if there
3866 	 * is an error, it would be hard to roll back as index remove/add
3867 	 * can fail again if it failed before. This would mean that there
3868 	 * is bug in the index management algorithm.
3869 	 * Return from here than rolling back.
3870 	 */
3871 	return rc;
3872 }
3873 
npc_defrag_list_clear(void)3874 static void npc_defrag_list_clear(void)
3875 {
3876 	struct npc_defrag_show_node *node, *next;
3877 
3878 	mutex_lock(&npc_priv.lock);
3879 	list_for_each_entry_safe(node, next, &npc_priv.defrag_lh, list) {
3880 		list_del_init(&node->list);
3881 		kfree(node);
3882 	}
3883 
3884 	mutex_unlock(&npc_priv.lock);
3885 }
3886 
npc_lock_all_subbank(void)3887 static void npc_lock_all_subbank(void)
3888 {
3889 	int i;
3890 
3891 	for (i = 0; i < npc_priv.num_subbanks; i++)
3892 		mutex_lock(&npc_priv.sb[i].lock);
3893 }
3894 
npc_unlock_all_subbank(void)3895 static void npc_unlock_all_subbank(void)
3896 {
3897 	int i;
3898 
3899 	for (i = npc_priv.num_subbanks - 1; i >= 0; i--)
3900 		mutex_unlock(&npc_priv.sb[i].lock);
3901 }
3902 
3903 /* Only non-ref non-contigous mcam indexes
3904  * are picked for defrag process
3905  */
npc_cn20k_defrag(struct rvu * rvu)3906 int npc_cn20k_defrag(struct rvu *rvu)
3907 {
3908 	struct npc_mcam *mcam = &rvu->hw->mcam;
3909 	struct npc_defrag_node *node, *tnode;
3910 	struct list_head x4lh, x2lh, *lh;
3911 	int rc = 0, i, sb_off, tot;
3912 	struct npc_subbank *sb;
3913 	unsigned long index;
3914 	void *map;
3915 	u16 midx;
3916 
3917 	/* Free previous show list */
3918 	npc_defrag_list_clear();
3919 
3920 	INIT_LIST_HEAD(&x4lh);
3921 	INIT_LIST_HEAD(&x2lh);
3922 
3923 	node = kcalloc(npc_priv.num_subbanks, sizeof(*node), GFP_KERNEL);
3924 	if (!node)
3925 		return -ENOMEM;
3926 
3927 	/* Lock mcam */
3928 	mutex_lock(&mcam->lock);
3929 	npc_lock_all_subbank();
3930 
3931 	/* Fill in node with subbank properties */
3932 	for (i = 0; i < npc_priv.num_subbanks; i++) {
3933 		sb = &npc_priv.sb[i];
3934 
3935 		node[i].idx = i;
3936 		node[i].key_type = sb->key_type;
3937 		node[i].free_cnt = sb->free_cnt;
3938 		node[i].vidx = kcalloc(npc_priv.subbank_depth * 2,
3939 				       sizeof(*node[i].vidx),
3940 				       GFP_KERNEL);
3941 		if (!node[i].vidx) {
3942 			rc = -ENOMEM;
3943 			goto free_vidx;
3944 		}
3945 
3946 		/* If subbank is empty, dont include it in defrag
3947 		 * process
3948 		 */
3949 		if (sb->flags & NPC_SUBBANK_FLAG_FREE) {
3950 			node[i].valid = false;
3951 			continue;
3952 		}
3953 
3954 		if (npc_defrag_skip_restricted_sb(i)) {
3955 			node[i].valid = false;
3956 			continue;
3957 		}
3958 
3959 		node[i].valid = true;
3960 		INIT_LIST_HEAD(&node[i].list);
3961 
3962 		/* Add node to x2 or x4 list */
3963 		lh = sb->key_type == NPC_MCAM_KEY_X2 ? &x2lh : &x4lh;
3964 		list_add_tail(&node[i].list, lh);
3965 	}
3966 
3967 	/* Filling vidx[] array with all vidx in that subbank */
3968 	xa_for_each_start(&npc_priv.xa_vidx2idx_map, index, map,
3969 			  npc_priv.bank_depth * 2) {
3970 		midx = xa_to_value(map);
3971 		rc =  npc_mcam_idx_2_subbank_idx(rvu, midx,
3972 						 &sb, &sb_off);
3973 		if (rc) {
3974 			dev_err(rvu->dev,
3975 				"%s: Error to get mcam_idx for vidx=%lu\n",
3976 				__func__, index);
3977 			goto free_vidx;
3978 		}
3979 
3980 		tnode = &node[sb->idx];
3981 		tnode->vidx[tnode->vidx_cnt] = index;
3982 		tnode->vidx_cnt++;
3983 	}
3984 
3985 	/* Mark all subbank which has ref allocation */
3986 	for (i = 0; i < npc_priv.num_subbanks; i++) {
3987 		tnode = &node[i];
3988 
3989 		if (!tnode->valid)
3990 			continue;
3991 
3992 		tot = (tnode->key_type == NPC_MCAM_KEY_X2) ?
3993 			npc_priv.subbank_depth * 2 : npc_priv.subbank_depth;
3994 
3995 		if (node[i].vidx_cnt != tot - tnode->free_cnt)
3996 			tnode->refs = true;
3997 	}
3998 
3999 	rc =  npc_defrag_process(rvu, &x2lh);
4000 	if (rc)
4001 		goto free_vidx;
4002 
4003 	rc =  npc_defrag_process(rvu, &x4lh);
4004 	if (rc)
4005 		goto free_vidx;
4006 
4007 free_vidx:
4008 	npc_unlock_all_subbank();
4009 	mutex_unlock(&mcam->lock);
4010 	for (i = 0; i < npc_priv.num_subbanks; i++)
4011 		kfree(node[i].vidx);
4012 	kfree(node);
4013 	return rc;
4014 }
4015 
rvu_mbox_handler_npc_defrag(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)4016 int rvu_mbox_handler_npc_defrag(struct rvu *rvu, struct msg_req *req,
4017 				struct msg_rsp *rsp)
4018 {
4019 	return npc_cn20k_defrag(rvu);
4020 }
4021 
npc_cn20k_dft_rules_idx_get(struct rvu * rvu,u16 pcifunc,u16 * bcast,u16 * mcast,u16 * promisc,u16 * ucast)4022 int npc_cn20k_dft_rules_idx_get(struct rvu *rvu, u16 pcifunc, u16 *bcast,
4023 				u16 *mcast, u16 *promisc, u16 *ucast)
4024 {
4025 	u16 *ptr[4] = {promisc, mcast, bcast, ucast};
4026 	unsigned long idx;
4027 	bool set = false;
4028 	void *val;
4029 	int i, j;
4030 
4031 	for (i = 0; i < ARRAY_SIZE(ptr); i++) {
4032 		if (!ptr[i])
4033 			continue;
4034 
4035 		*ptr[i] = USHRT_MAX;
4036 	}
4037 
4038 	if (!npc_priv.init_done)
4039 		return 0;
4040 
4041 	if (is_lbk_vf(rvu, pcifunc)) {
4042 		if (!ptr[0])
4043 			return -EINVAL;
4044 
4045 		idx = NPC_DFT_RULE_ID_MK(pcifunc, NPC_DFT_RULE_PROMISC_ID);
4046 		val = xa_load(&npc_priv.xa_pf2dfl_rmap, idx);
4047 		if (!val) {
4048 			pr_debug("%s: Failed to find %s index for pcifunc=%#x\n",
4049 				 __func__,
4050 				 npc_dft_rule_name[NPC_DFT_RULE_PROMISC_ID],
4051 				 pcifunc);
4052 
4053 			return -ESRCH;
4054 		}
4055 
4056 		*ptr[0] = xa_to_value(val);
4057 		return 0;
4058 	}
4059 
4060 	if (is_vf(pcifunc)) {
4061 		if (!ptr[3])
4062 			return -EINVAL;
4063 
4064 		idx = NPC_DFT_RULE_ID_MK(pcifunc, NPC_DFT_RULE_UCAST_ID);
4065 		val = xa_load(&npc_priv.xa_pf2dfl_rmap, idx);
4066 		if (!val) {
4067 			pr_debug("%s: Failed to find %s index for pcifunc=%#x\n",
4068 				 __func__,
4069 				 npc_dft_rule_name[NPC_DFT_RULE_UCAST_ID],
4070 				 pcifunc);
4071 
4072 			return -ESRCH;
4073 		}
4074 
4075 		*ptr[3] = xa_to_value(val);
4076 		return 0;
4077 	}
4078 
4079 	for (i = NPC_DFT_RULE_START_ID, j = 0; i < NPC_DFT_RULE_MAX_ID; i++,
4080 	     j++) {
4081 		if (!ptr[j])
4082 			continue;
4083 
4084 		idx = NPC_DFT_RULE_ID_MK(pcifunc, i);
4085 		val = xa_load(&npc_priv.xa_pf2dfl_rmap, idx);
4086 		if (!val) {
4087 			pr_debug("%s: Failed to find %s index for pcifunc=%#x\n",
4088 				 __func__,
4089 				 npc_dft_rule_name[i], pcifunc);
4090 
4091 			continue;
4092 		}
4093 
4094 		*ptr[j] = xa_to_value(val);
4095 		set = true;
4096 	}
4097 
4098 	return  set ? 0 : -ESRCH;
4099 }
4100 
rvu_mbox_handler_npc_get_pfl_info(struct rvu * rvu,struct msg_req * req,struct npc_get_pfl_info_rsp * rsp)4101 int rvu_mbox_handler_npc_get_pfl_info(struct rvu *rvu, struct msg_req *req,
4102 				      struct npc_get_pfl_info_rsp *rsp)
4103 {
4104 	if (!is_cn20k(rvu->pdev)) {
4105 		dev_err(rvu->dev, "Mbox support is only for cn20k\n");
4106 		return -EOPNOTSUPP;
4107 	}
4108 
4109 	rsp->kw_type = npc_priv.kw;
4110 	rsp->x4_slots = npc_priv.bank_depth;
4111 	return 0;
4112 }
4113 
rvu_mbox_handler_npc_get_num_kws(struct rvu * rvu,struct npc_get_num_kws_req * req,struct npc_get_num_kws_rsp * rsp)4114 int rvu_mbox_handler_npc_get_num_kws(struct rvu *rvu,
4115 				     struct npc_get_num_kws_req *req,
4116 				     struct npc_get_num_kws_rsp *rsp)
4117 {
4118 	u64 kw_mask[NPC_KWS_IN_KEY_SZ_MAX] = { 0 };
4119 	u64 kw[NPC_KWS_IN_KEY_SZ_MAX] = { 0 };
4120 	struct rvu_npc_mcam_rule dummy = { 0 };
4121 	struct mcam_entry_mdata mdata = { };
4122 	struct npc_install_flow_req *fl;
4123 	int i, cnt = 0, blkaddr;
4124 
4125 	if (!is_cn20k(rvu->pdev)) {
4126 		dev_err(rvu->dev, "Mbox support is only for cn20k\n");
4127 		return -EOPNOTSUPP;
4128 	}
4129 
4130 	fl = &req->fl;
4131 
4132 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
4133 	if (blkaddr < 0) {
4134 		dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
4135 		return NPC_MCAM_INVALID_REQ;
4136 	}
4137 
4138 	mdata.kw = kw;
4139 	mdata.kw_mask = kw_mask;
4140 
4141 	npc_update_flow(rvu, &mdata, fl->features, &fl->packet,
4142 			&fl->mask, &dummy, fl->intf, blkaddr);
4143 
4144 	/* Find the most significant word valid. Traverse from
4145 	 * MSB to LSB, check if cam0 or cam1 is set
4146 	 */
4147 	for (i = NPC_KWS_IN_KEY_SZ_MAX - 1; i >= 0; i--) {
4148 		if (kw[i] || kw_mask[i]) {
4149 			cnt = i + 1;
4150 			break;
4151 		}
4152 	}
4153 
4154 	rsp->kws = cnt;
4155 
4156 	return 0;
4157 }
4158 
rvu_mbox_handler_npc_get_dft_rl_idxs(struct rvu * rvu,struct msg_req * req,struct npc_get_dft_rl_idxs_rsp * rsp)4159 int rvu_mbox_handler_npc_get_dft_rl_idxs(struct rvu *rvu, struct msg_req *req,
4160 					 struct npc_get_dft_rl_idxs_rsp *rsp)
4161 {
4162 	u16 bcast, mcast, promisc, ucast;
4163 	u16 pcifunc;
4164 	int rc;
4165 
4166 	if (!is_cn20k(rvu->pdev)) {
4167 		dev_err(rvu->dev, "Mbox support is only for cn20k\n");
4168 		return -EOPNOTSUPP;
4169 	}
4170 
4171 	pcifunc = req->hdr.pcifunc;
4172 
4173 	rc = npc_cn20k_dft_rules_idx_get(rvu, pcifunc, &bcast, &mcast,
4174 					 &promisc, &ucast);
4175 	if (rc)
4176 		return rc;
4177 
4178 	rsp->bcast = bcast;
4179 	rsp->mcast = mcast;
4180 	rsp->promisc = promisc;
4181 	rsp->ucast = ucast;
4182 	return 0;
4183 }
4184 
npc_is_cgx_or_lbk(struct rvu * rvu,u16 pcifunc)4185 bool npc_is_cgx_or_lbk(struct rvu *rvu, u16 pcifunc)
4186 {
4187 	return is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc)) ||
4188 		is_lbk_vf(rvu, pcifunc);
4189 }
4190 
npc_cn20k_dft_rules_free(struct rvu * rvu,u16 pcifunc)4191 void npc_cn20k_dft_rules_free(struct rvu *rvu, u16 pcifunc)
4192 {
4193 	struct npc_mcam *mcam = &rvu->hw->mcam;
4194 	u16 ptr[4] = {[0 ... 3] = USHRT_MAX};
4195 	struct rvu_npc_mcam_rule *rule, *tmp;
4196 	unsigned long index;
4197 	int blkaddr, rc, i;
4198 	void *map;
4199 
4200 	if (!npc_priv.init_done)
4201 		return;
4202 
4203 	if (!npc_is_cgx_or_lbk(rvu, pcifunc)) {
4204 		dev_dbg(rvu->dev,
4205 			"%s: dft rule allocation is only for cgx mapped device, pcifunc=%#x\n",
4206 			__func__, pcifunc);
4207 		return;
4208 	}
4209 
4210 	rc = npc_cn20k_dft_rules_idx_get(rvu, pcifunc, &ptr[0], &ptr[1],
4211 					 &ptr[2], &ptr[3]);
4212 	if (rc)
4213 		return;
4214 
4215 	/* LBK */
4216 	if (is_lbk_vf(rvu, pcifunc)) {
4217 		index = NPC_DFT_RULE_ID_MK(pcifunc, NPC_DFT_RULE_PROMISC_ID);
4218 		map = xa_erase(&npc_priv.xa_pf2dfl_rmap, index);
4219 		if (!map)
4220 			dev_dbg(rvu->dev,
4221 				"%s: Err from delete %s mcam idx from xarray (pcifunc=%#x\n",
4222 				__func__,
4223 				npc_dft_rule_name[NPC_DFT_RULE_PROMISC_ID],
4224 				pcifunc);
4225 
4226 		goto free_rules;
4227 	}
4228 
4229 	/* VF */
4230 	if (is_vf(pcifunc)) {
4231 		index = NPC_DFT_RULE_ID_MK(pcifunc, NPC_DFT_RULE_UCAST_ID);
4232 		map = xa_erase(&npc_priv.xa_pf2dfl_rmap, index);
4233 		if (!map)
4234 			dev_dbg(rvu->dev,
4235 				"%s: Err from delete %s mcam idx from xarray (pcifunc=%#x\n",
4236 				__func__,
4237 				npc_dft_rule_name[NPC_DFT_RULE_UCAST_ID],
4238 				pcifunc);
4239 
4240 		goto free_rules;
4241 	}
4242 
4243 	/* PF */
4244 	for (i = NPC_DFT_RULE_START_ID; i < NPC_DFT_RULE_MAX_ID; i++)  {
4245 		index = NPC_DFT_RULE_ID_MK(pcifunc, i);
4246 		map = xa_erase(&npc_priv.xa_pf2dfl_rmap, index);
4247 		if (!map)
4248 			dev_dbg(rvu->dev,
4249 				"%s: Err from delete %s mcam idx from xarray (pcifunc=%#x\n",
4250 				__func__, npc_dft_rule_name[i],
4251 				pcifunc);
4252 	}
4253 
4254 free_rules:
4255 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
4256 	if (blkaddr < 0)
4257 		return;
4258 	for (int i = 0; i < 4; i++) {
4259 		if (ptr[i] == USHRT_MAX)
4260 			continue;
4261 
4262 		mutex_lock(&mcam->lock);
4263 		npc_mcam_clear_bit(mcam, ptr[i]);
4264 		mcam->entry2pfvf_map[ptr[i]] = NPC_MCAM_INVALID_MAP;
4265 		npc_cn20k_enable_mcam_entry(rvu, blkaddr, ptr[i], false);
4266 		mcam->entry2target_pffunc[ptr[i]] = 0x0;
4267 		mutex_unlock(&mcam->lock);
4268 
4269 		rc = npc_cn20k_idx_free(rvu, &ptr[i], 1);
4270 		if (rc) {
4271 			/* Non recoverable error. Let us WARN and return. Keep system alive to
4272 			 * enable debugging
4273 			 */
4274 			WARN(1, "%s Error deleting default entries (pcifunc=%#x) mcam_idx=%u\n",
4275 			     __func__, pcifunc, ptr[i]);
4276 			return;
4277 		}
4278 	}
4279 
4280 	mutex_lock(&mcam->lock);
4281 	list_for_each_entry_safe(rule, tmp, &mcam->mcam_rules, list) {
4282 		for (int i = 0; i < 4; i++) {
4283 			if (ptr[i] != rule->entry)
4284 				continue;
4285 
4286 			list_del(&rule->list);
4287 			kfree(rule);
4288 			break;
4289 		}
4290 	}
4291 	mutex_unlock(&mcam->lock);
4292 }
4293 
npc_cn20k_dft_rules_alloc(struct rvu * rvu,u16 pcifunc)4294 int npc_cn20k_dft_rules_alloc(struct rvu *rvu, u16 pcifunc)
4295 {
4296 	struct npc_mcam_free_entry_req free_req = { 0 };
4297 	u16 mcam_idx[4] = { 0 }, pf_ucast, pf_pcifunc;
4298 	struct npc_mcam_alloc_entry_req req = { 0 };
4299 	struct npc_mcam_alloc_entry_rsp rsp = { 0 };
4300 	int ret, eidx, i, k, pf, cnt;
4301 	struct rvu_pfvf *pfvf;
4302 	unsigned long index;
4303 	struct msg_rsp free_rsp;
4304 	u16 b, m, p, u;
4305 
4306 	if (!npc_priv.init_done)
4307 		return 0;
4308 
4309 	if (!npc_is_cgx_or_lbk(rvu, pcifunc)) {
4310 		dev_dbg(rvu->dev,
4311 			"%s: dft rule allocation is only for cgx mapped device, pcifunc=%#x\n",
4312 			__func__, pcifunc);
4313 		return 0;
4314 	}
4315 
4316 	/* Check if default rules are already alloced for this pcifunc */
4317 	ret =  npc_cn20k_dft_rules_idx_get(rvu, pcifunc, &b, &m, &p, &u);
4318 	if (!ret) {
4319 		dev_dbg(rvu->dev,
4320 			"%s: default rules are already installed (pcifunc=%#x)\n",
4321 			__func__, pcifunc);
4322 		dev_dbg(rvu->dev,
4323 			"%s: bcast(%u) mcast(%u) promisc(%u) ucast(%u)\n",
4324 			__func__, b, m, p, u);
4325 		return 0;
4326 	}
4327 
4328 	/* Set ref index as lowest priority index */
4329 	eidx = 2 * npc_priv.bank_depth - 1;
4330 
4331 	/* Install only UCAST for VF */
4332 	cnt = is_vf(pcifunc) ? 1 : ARRAY_SIZE(mcam_idx);
4333 
4334 	/* For VF pcifunc, allocate default mcam indexes by taking
4335 	 * ref as PF's ucast index.
4336 	 */
4337 	if (is_vf(pcifunc)) {
4338 		pf = rvu_get_pf(rvu->pdev, pcifunc);
4339 		pf_pcifunc = pf << RVU_CN20K_PFVF_PF_SHIFT;
4340 
4341 		/* Get PF's ucast entry index */
4342 		ret = npc_cn20k_dft_rules_idx_get(rvu, pf_pcifunc, NULL,
4343 						  NULL, NULL, &pf_ucast);
4344 
4345 		/* There is no PF rules installed; and VF installation comes
4346 		 * first. PF may come later.
4347 		 * TODO: Install PF rules before installing VF rules.
4348 		 */
4349 
4350 		/* Set PF's ucast as ref entry */
4351 		if (!ret)
4352 			eidx = pf_ucast;
4353 	}
4354 
4355 	pfvf = rvu_get_pfvf(rvu, pcifunc);
4356 	pfvf->hw_prio = NPC_DFT_RULE_PRIO;
4357 
4358 	req.contig = false;
4359 	req.ref_prio = NPC_MCAM_HIGHER_PRIO;
4360 	req.ref_entry = eidx;
4361 	req.kw_type = NPC_MCAM_KEY_X2;
4362 	req.count = cnt;
4363 	req.hdr.pcifunc = pcifunc;
4364 
4365 	ret = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &req, &rsp);
4366 
4367 	/* successfully allocated index */
4368 	if (!ret) {
4369 		/* Copy indexes to local array */
4370 		for (i = 0; i < cnt; i++)
4371 			mcam_idx[i] = rsp.entry_list[i];
4372 
4373 		goto chk_sanity;
4374 	}
4375 
4376 	/* If there is no slots available and request is for PF,
4377 	 * return error.
4378 	 */
4379 	if (!is_vf(pcifunc)) {
4380 		dev_err(rvu->dev,
4381 			"%s: Default index allocation failed for pcifunc=%#x\n",
4382 			__func__, pcifunc);
4383 		return ret;
4384 	}
4385 
4386 	/* We could not find an index with higher priority index for VF.
4387 	 * Find rule with lower priority index and set hardware priority
4388 	 * as NPC_DFT_RULE_PRIO - 1 (higher hw priority)
4389 	 */
4390 	req.contig = false;
4391 	req.kw_type = NPC_MCAM_KEY_X2;
4392 	req.count = cnt;
4393 	req.hdr.pcifunc = pcifunc;
4394 	req.ref_prio = NPC_MCAM_LOWER_PRIO;
4395 	req.ref_entry = eidx + 1;
4396 	ret = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &req, &rsp);
4397 	if (ret) {
4398 		dev_err(rvu->dev,
4399 			"%s: Default index allocation failed for pcifunc=%#x\n",
4400 			__func__, pcifunc);
4401 		return ret;
4402 	}
4403 
4404 	/* Copy indexes to local array */
4405 	for (i = 0; i < cnt; i++)
4406 		mcam_idx[i] = rsp.entry_list[i];
4407 
4408 	pfvf->hw_prio = NPC_DFT_RULE_PRIO - 1;
4409 
4410 chk_sanity:
4411 	/* LBK */
4412 	if (is_lbk_vf(rvu, pcifunc)) {
4413 		index = NPC_DFT_RULE_ID_MK(pcifunc, NPC_DFT_RULE_PROMISC_ID);
4414 		ret = xa_insert(&npc_priv.xa_pf2dfl_rmap, index,
4415 				xa_mk_value(mcam_idx[0]), GFP_KERNEL);
4416 		if (ret) {
4417 			dev_err(rvu->dev,
4418 				"%s: Err to insert %s mcam idx to xarray pcifunc=%#x\n",
4419 				__func__,
4420 				npc_dft_rule_name[NPC_DFT_RULE_PROMISC_ID],
4421 				pcifunc);
4422 			goto err;
4423 		}
4424 
4425 		goto done;
4426 	}
4427 
4428 	/* VF */
4429 	if (is_vf(pcifunc)) {
4430 		index = NPC_DFT_RULE_ID_MK(pcifunc, NPC_DFT_RULE_UCAST_ID);
4431 		ret = xa_insert(&npc_priv.xa_pf2dfl_rmap, index,
4432 				xa_mk_value(mcam_idx[0]), GFP_KERNEL);
4433 		if (ret) {
4434 			dev_err(rvu->dev,
4435 				"%s: Err to insert %s mcam idx to xarray pcifunc=%#x\n",
4436 				__func__,
4437 				npc_dft_rule_name[NPC_DFT_RULE_UCAST_ID],
4438 				pcifunc);
4439 			goto err;
4440 		}
4441 
4442 		goto done;
4443 	}
4444 
4445 	/* PF */
4446 	for (i = NPC_DFT_RULE_START_ID, k = 0; i < NPC_DFT_RULE_MAX_ID &&
4447 	     k < cnt; i++, k++) {
4448 		index = NPC_DFT_RULE_ID_MK(pcifunc, i);
4449 		ret = xa_insert(&npc_priv.xa_pf2dfl_rmap, index,
4450 				xa_mk_value(mcam_idx[k]), GFP_KERNEL);
4451 		if (ret) {
4452 			dev_err(rvu->dev,
4453 				"%s: Err to insert %s mcam idx to xarray pcifunc=%#x\n",
4454 				__func__, npc_dft_rule_name[i],
4455 				pcifunc);
4456 			for (int p = NPC_DFT_RULE_START_ID; p < i; p++) {
4457 				index = NPC_DFT_RULE_ID_MK(pcifunc, p);
4458 				xa_erase(&npc_priv.xa_pf2dfl_rmap, index);
4459 			}
4460 			goto err;
4461 		}
4462 	}
4463 
4464 done:
4465 	return 0;
4466 
4467 err:
4468 	free_req.hdr.pcifunc = pcifunc;
4469 	free_req.all = 1;
4470 	ret = rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, &free_rsp);
4471 	if (ret)
4472 		dev_err(rvu->dev,
4473 			"%s: Error deleting default entries (pcifunc=%#x\n",
4474 			__func__, pcifunc);
4475 
4476 	return -EFAULT;
4477 }
4478 
npc_priv_init(struct rvu * rvu)4479 static int npc_priv_init(struct rvu *rvu)
4480 {
4481 	struct npc_mcam *mcam = &rvu->hw->mcam;
4482 	int blkaddr, num_banks, bank_depth;
4483 	int num_subbanks, subbank_depth;
4484 	u64 npc_const1, npc_const2 = 0;
4485 	struct npc_subbank *sb;
4486 	u64 cfg;
4487 	int i;
4488 
4489 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
4490 	if (blkaddr < 0) {
4491 		dev_err(rvu->dev, "%s: NPC block not implemented\n",
4492 			__func__);
4493 		return -ENODEV;
4494 	}
4495 
4496 	npc_const1 = rvu_read64(rvu, blkaddr, NPC_AF_CONST1);
4497 	if (npc_const1 & BIT_ULL(63))
4498 		npc_const2 = rvu_read64(rvu, blkaddr, NPC_AF_CONST2);
4499 
4500 	num_banks = mcam->banks;
4501 	bank_depth = mcam->banksize;
4502 
4503 	num_subbanks = FIELD_GET(GENMASK_ULL(39, 32), npc_const2);
4504 	if (!num_subbanks) {
4505 		dev_err(rvu->dev, "Number of subbanks is zero\n");
4506 		return -EFAULT;
4507 	}
4508 
4509 	if (num_subbanks & (num_subbanks - 1)) {
4510 		dev_err(rvu->dev,
4511 			"subbanks cnt(%u) should be a power of 2\n",
4512 			num_subbanks);
4513 		return -EINVAL;
4514 	}
4515 
4516 	npc_priv.num_subbanks = num_subbanks;
4517 
4518 	subbank_depth =	bank_depth / num_subbanks;
4519 
4520 	npc_priv.bank_depth = bank_depth;
4521 	npc_priv.subbank_depth = subbank_depth;
4522 
4523 	/* Get kex configured key size */
4524 	cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(0));
4525 	npc_priv.kw = FIELD_GET(GENMASK_ULL(34, 32), cfg);
4526 
4527 	dev_info(rvu->dev,
4528 		 "banks=%u depth=%u, subbanks=%u depth=%u, key type=%s\n",
4529 		 num_banks, bank_depth, num_subbanks, subbank_depth,
4530 		 npc_kw_name[npc_priv.kw]);
4531 
4532 	npc_priv.sb = kcalloc(num_subbanks, sizeof(struct npc_subbank),
4533 			      GFP_KERNEL);
4534 	if (!npc_priv.sb)
4535 		return -ENOMEM;
4536 
4537 	xa_init_flags(&npc_priv.xa_sb_used, XA_FLAGS_ALLOC);
4538 	xa_init_flags(&npc_priv.xa_sb_free, XA_FLAGS_ALLOC);
4539 	xa_init_flags(&npc_priv.xa_idx2pf_map, XA_FLAGS_ALLOC);
4540 	xa_init_flags(&npc_priv.xa_pf_map, XA_FLAGS_ALLOC);
4541 	xa_init_flags(&npc_priv.xa_pf2dfl_rmap, XA_FLAGS_ALLOC);
4542 	xa_init_flags(&npc_priv.xa_idx2vidx_map, XA_FLAGS_ALLOC);
4543 	xa_init_flags(&npc_priv.xa_vidx2idx_map, XA_FLAGS_ALLOC);
4544 
4545 	if (npc_create_srch_order(num_subbanks))
4546 		goto fail1;
4547 
4548 	npc_populate_restricted_idxs(num_subbanks);
4549 
4550 	/* Initialize subbanks */
4551 	for (i = 0, sb = npc_priv.sb; i < num_subbanks; i++, sb++)
4552 		npc_subbank_init(rvu, sb, i);
4553 
4554 	/* Get number of pcifuncs in the system */
4555 	npc_priv.pf_cnt = npc_pcifunc_map_create(rvu);
4556 	npc_priv.xa_pf2idx_map = kcalloc(npc_priv.pf_cnt,
4557 					 sizeof(struct xarray),
4558 					 GFP_KERNEL);
4559 	if (!npc_priv.xa_pf2idx_map)
4560 		goto fail2;
4561 
4562 	for (i = 0; i < npc_priv.pf_cnt; i++)
4563 		xa_init_flags(&npc_priv.xa_pf2idx_map[i], XA_FLAGS_ALLOC);
4564 
4565 	INIT_LIST_HEAD(&npc_priv.defrag_lh);
4566 	mutex_init(&npc_priv.lock);
4567 
4568 	return 0;
4569 
4570 fail2:
4571 	kfree(subbank_srch_order);
4572 	subbank_srch_order = NULL;
4573 
4574 fail1:
4575 	xa_destroy(&npc_priv.xa_sb_used);
4576 	xa_destroy(&npc_priv.xa_sb_free);
4577 	xa_destroy(&npc_priv.xa_idx2pf_map);
4578 	xa_destroy(&npc_priv.xa_pf_map);
4579 	xa_destroy(&npc_priv.xa_pf2dfl_rmap);
4580 	xa_destroy(&npc_priv.xa_idx2vidx_map);
4581 	xa_destroy(&npc_priv.xa_vidx2idx_map);
4582 	kfree(npc_priv.sb);
4583 	npc_priv.sb = NULL;
4584 	return -ENOMEM;
4585 }
4586 
npc_cn20k_deinit(struct rvu * rvu)4587 void npc_cn20k_deinit(struct rvu *rvu)
4588 {
4589 	int i;
4590 
4591 	xa_destroy(&npc_priv.xa_sb_used);
4592 	xa_destroy(&npc_priv.xa_sb_free);
4593 	xa_destroy(&npc_priv.xa_idx2pf_map);
4594 	xa_destroy(&npc_priv.xa_pf_map);
4595 	xa_destroy(&npc_priv.xa_pf2dfl_rmap);
4596 	xa_destroy(&npc_priv.xa_idx2vidx_map);
4597 	xa_destroy(&npc_priv.xa_vidx2idx_map);
4598 
4599 	for (i = 0; i < npc_priv.pf_cnt; i++)
4600 		xa_destroy(&npc_priv.xa_pf2idx_map[i]);
4601 
4602 	kfree(npc_priv.xa_pf2idx_map);
4603 	/* No need to destroy mutex lock as it is
4604 	 * part of subbank structure
4605 	 */
4606 	kfree(npc_priv.sb);
4607 	kfree(subbank_srch_order);
4608 }
4609 
npc_setup_mcam_section(struct rvu * rvu,int key_type)4610 static int npc_setup_mcam_section(struct rvu *rvu, int key_type)
4611 {
4612 	int blkaddr, sec;
4613 
4614 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
4615 	if (blkaddr < 0) {
4616 		dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
4617 		return -ENODEV;
4618 	}
4619 
4620 	for (sec = 0; sec < npc_priv.num_subbanks; sec++)
4621 		rvu_write64(rvu, blkaddr,
4622 			    NPC_AF_MCAM_SECTIONX_CFG_EXT(sec), key_type);
4623 
4624 	return 0;
4625 }
4626 
npc_cn20k_init(struct rvu * rvu)4627 int npc_cn20k_init(struct rvu *rvu)
4628 {
4629 	int err;
4630 
4631 	err = npc_priv_init(rvu);
4632 	if (err) {
4633 		dev_err(rvu->dev, "%s: Error to init\n",
4634 			__func__);
4635 		return err;
4636 	}
4637 
4638 	err = npc_setup_mcam_section(rvu, NPC_MCAM_KEY_X2);
4639 	if (err) {
4640 		dev_err(rvu->dev, "%s: mcam section configuration failure\n",
4641 			__func__);
4642 		return err;
4643 	}
4644 
4645 	npc_priv.init_done = true;
4646 
4647 	return 0;
4648 }
4649