xref: /linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c (revision 83a37b3292f4aca799b355179ad6fbdd78a08e10)
1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
3  * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the names of the copyright holders nor the names of its
15  *    contributors may be used to endorse or promote products derived from
16  *    this software without specific prior written permission.
17  *
18  * Alternatively, this software may be distributed under the terms of the
19  * GNU General Public License ("GPL") version 2 as published by the Free
20  * Software Foundation.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <linux/kernel.h>
36 #include <linux/list.h>
37 #include <linux/netdevice.h>
38 #include <linux/parman.h>
39 
40 #include "reg.h"
41 #include "spectrum.h"
42 #include "core_acl_flex_actions.h"
43 #include "spectrum_mr.h"
44 
45 struct mlxsw_sp_mr_tcam_region {
46 	struct mlxsw_sp *mlxsw_sp;
47 	enum mlxsw_reg_rtar_key_type rtar_key_type;
48 	struct parman *parman;
49 	struct parman_prio *parman_prios;
50 };
51 
52 struct mlxsw_sp_mr_tcam {
53 	struct mlxsw_sp_mr_tcam_region ipv4_tcam_region;
54 };
55 
56 /* This struct maps to one RIGR2 register entry */
57 struct mlxsw_sp_mr_erif_sublist {
58 	struct list_head list;
59 	u32 rigr2_kvdl_index;
60 	int num_erifs;
61 	u16 erif_indices[MLXSW_REG_RIGR2_MAX_ERIFS];
62 	bool synced;
63 };
64 
65 struct mlxsw_sp_mr_tcam_erif_list {
66 	struct list_head erif_sublists;
67 	u32 kvdl_index;
68 };
69 
70 static bool
71 mlxsw_sp_mr_erif_sublist_full(struct mlxsw_sp *mlxsw_sp,
72 			      struct mlxsw_sp_mr_erif_sublist *erif_sublist)
73 {
74 	int erif_list_entries = MLXSW_CORE_RES_GET(mlxsw_sp->core,
75 						   MC_ERIF_LIST_ENTRIES);
76 
77 	return erif_sublist->num_erifs == erif_list_entries;
78 }
79 
80 static void
81 mlxsw_sp_mr_erif_list_init(struct mlxsw_sp_mr_tcam_erif_list *erif_list)
82 {
83 	INIT_LIST_HEAD(&erif_list->erif_sublists);
84 }
85 
86 #define MLXSW_SP_KVDL_RIGR2_SIZE 1
87 
88 static struct mlxsw_sp_mr_erif_sublist *
89 mlxsw_sp_mr_erif_sublist_create(struct mlxsw_sp *mlxsw_sp,
90 				struct mlxsw_sp_mr_tcam_erif_list *erif_list)
91 {
92 	struct mlxsw_sp_mr_erif_sublist *erif_sublist;
93 	int err;
94 
95 	erif_sublist = kzalloc(sizeof(*erif_sublist), GFP_KERNEL);
96 	if (!erif_sublist)
97 		return ERR_PTR(-ENOMEM);
98 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_RIGR2_SIZE,
99 				  &erif_sublist->rigr2_kvdl_index);
100 	if (err) {
101 		kfree(erif_sublist);
102 		return ERR_PTR(err);
103 	}
104 
105 	list_add_tail(&erif_sublist->list, &erif_list->erif_sublists);
106 	return erif_sublist;
107 }
108 
109 static void
110 mlxsw_sp_mr_erif_sublist_destroy(struct mlxsw_sp *mlxsw_sp,
111 				 struct mlxsw_sp_mr_erif_sublist *erif_sublist)
112 {
113 	list_del(&erif_sublist->list);
114 	mlxsw_sp_kvdl_free(mlxsw_sp, erif_sublist->rigr2_kvdl_index);
115 	kfree(erif_sublist);
116 }
117 
118 static int
119 mlxsw_sp_mr_erif_list_add(struct mlxsw_sp *mlxsw_sp,
120 			  struct mlxsw_sp_mr_tcam_erif_list *erif_list,
121 			  u16 erif_index)
122 {
123 	struct mlxsw_sp_mr_erif_sublist *sublist;
124 
125 	/* If either there is no erif_entry or the last one is full, allocate a
126 	 * new one.
127 	 */
128 	if (list_empty(&erif_list->erif_sublists)) {
129 		sublist = mlxsw_sp_mr_erif_sublist_create(mlxsw_sp, erif_list);
130 		if (IS_ERR(sublist))
131 			return PTR_ERR(sublist);
132 		erif_list->kvdl_index = sublist->rigr2_kvdl_index;
133 	} else {
134 		sublist = list_last_entry(&erif_list->erif_sublists,
135 					  struct mlxsw_sp_mr_erif_sublist,
136 					  list);
137 		sublist->synced = false;
138 		if (mlxsw_sp_mr_erif_sublist_full(mlxsw_sp, sublist)) {
139 			sublist = mlxsw_sp_mr_erif_sublist_create(mlxsw_sp,
140 								  erif_list);
141 			if (IS_ERR(sublist))
142 				return PTR_ERR(sublist);
143 		}
144 	}
145 
146 	/* Add the eRIF to the last entry's last index */
147 	sublist->erif_indices[sublist->num_erifs++] = erif_index;
148 	return 0;
149 }
150 
151 static void
152 mlxsw_sp_mr_erif_list_flush(struct mlxsw_sp *mlxsw_sp,
153 			    struct mlxsw_sp_mr_tcam_erif_list *erif_list)
154 {
155 	struct mlxsw_sp_mr_erif_sublist *erif_sublist, *tmp;
156 
157 	list_for_each_entry_safe(erif_sublist, tmp, &erif_list->erif_sublists,
158 				 list)
159 		mlxsw_sp_mr_erif_sublist_destroy(mlxsw_sp, erif_sublist);
160 }
161 
162 static int
163 mlxsw_sp_mr_erif_list_commit(struct mlxsw_sp *mlxsw_sp,
164 			     struct mlxsw_sp_mr_tcam_erif_list *erif_list)
165 {
166 	struct mlxsw_sp_mr_erif_sublist *curr_sublist;
167 	char rigr2_pl[MLXSW_REG_RIGR2_LEN];
168 	int err;
169 	int i;
170 
171 	list_for_each_entry(curr_sublist, &erif_list->erif_sublists, list) {
172 		if (curr_sublist->synced)
173 			continue;
174 
175 		/* If the sublist is not the last one, pack the next index */
176 		if (list_is_last(&curr_sublist->list,
177 				 &erif_list->erif_sublists)) {
178 			mlxsw_reg_rigr2_pack(rigr2_pl,
179 					     curr_sublist->rigr2_kvdl_index,
180 					     false, 0);
181 		} else {
182 			struct mlxsw_sp_mr_erif_sublist *next_sublist;
183 
184 			next_sublist = list_next_entry(curr_sublist, list);
185 			mlxsw_reg_rigr2_pack(rigr2_pl,
186 					     curr_sublist->rigr2_kvdl_index,
187 					     true,
188 					     next_sublist->rigr2_kvdl_index);
189 		}
190 
191 		/* Pack all the erifs */
192 		for (i = 0; i < curr_sublist->num_erifs; i++) {
193 			u16 erif_index = curr_sublist->erif_indices[i];
194 
195 			mlxsw_reg_rigr2_erif_entry_pack(rigr2_pl, i, true,
196 							erif_index);
197 		}
198 
199 		/* Write the entry */
200 		err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rigr2),
201 				      rigr2_pl);
202 		if (err)
203 			/* No need of a rollback here because this
204 			 * hardware entry should not be pointed yet.
205 			 */
206 			return err;
207 		curr_sublist->synced = true;
208 	}
209 	return 0;
210 }
211 
212 static void mlxsw_sp_mr_erif_list_move(struct mlxsw_sp_mr_tcam_erif_list *to,
213 				       struct mlxsw_sp_mr_tcam_erif_list *from)
214 {
215 	list_splice(&from->erif_sublists, &to->erif_sublists);
216 	to->kvdl_index = from->kvdl_index;
217 }
218 
219 struct mlxsw_sp_mr_tcam_route {
220 	struct mlxsw_sp_mr_tcam_erif_list erif_list;
221 	struct mlxsw_afa_block *afa_block;
222 	u32 counter_index;
223 	struct parman_item parman_item;
224 	struct parman_prio *parman_prio;
225 	enum mlxsw_sp_mr_route_action action;
226 	struct mlxsw_sp_mr_route_key key;
227 	u16 irif_index;
228 	u16 min_mtu;
229 };
230 
231 static struct mlxsw_afa_block *
232 mlxsw_sp_mr_tcam_afa_block_create(struct mlxsw_sp *mlxsw_sp,
233 				  enum mlxsw_sp_mr_route_action route_action,
234 				  u16 irif_index, u32 counter_index,
235 				  u16 min_mtu,
236 				  struct mlxsw_sp_mr_tcam_erif_list *erif_list)
237 {
238 	struct mlxsw_afa_block *afa_block;
239 	int err;
240 
241 	afa_block = mlxsw_afa_block_create(mlxsw_sp->afa);
242 	if (!afa_block)
243 		return ERR_PTR(-ENOMEM);
244 
245 	err = mlxsw_afa_block_append_counter(afa_block, counter_index);
246 	if (err)
247 		goto err;
248 
249 	switch (route_action) {
250 	case MLXSW_SP_MR_ROUTE_ACTION_TRAP:
251 		err = mlxsw_afa_block_append_trap(afa_block,
252 						  MLXSW_TRAP_ID_ACL1);
253 		if (err)
254 			goto err;
255 		break;
256 	case MLXSW_SP_MR_ROUTE_ACTION_TRAP_AND_FORWARD:
257 	case MLXSW_SP_MR_ROUTE_ACTION_FORWARD:
258 		/* If we are about to append a multicast router action, commit
259 		 * the erif_list.
260 		 */
261 		err = mlxsw_sp_mr_erif_list_commit(mlxsw_sp, erif_list);
262 		if (err)
263 			goto err;
264 
265 		err = mlxsw_afa_block_append_mcrouter(afa_block, irif_index,
266 						      min_mtu, false,
267 						      erif_list->kvdl_index);
268 		if (err)
269 			goto err;
270 
271 		if (route_action == MLXSW_SP_MR_ROUTE_ACTION_TRAP_AND_FORWARD) {
272 			err = mlxsw_afa_block_append_trap_and_forward(afa_block,
273 								      MLXSW_TRAP_ID_ACL2);
274 			if (err)
275 				goto err;
276 		}
277 		break;
278 	default:
279 		err = -EINVAL;
280 		goto err;
281 	}
282 
283 	err = mlxsw_afa_block_commit(afa_block);
284 	if (err)
285 		goto err;
286 	return afa_block;
287 err:
288 	mlxsw_afa_block_destroy(afa_block);
289 	return ERR_PTR(err);
290 }
291 
292 static void
293 mlxsw_sp_mr_tcam_afa_block_destroy(struct mlxsw_afa_block *afa_block)
294 {
295 	mlxsw_afa_block_destroy(afa_block);
296 }
297 
298 static int mlxsw_sp_mr_tcam_route_replace(struct mlxsw_sp *mlxsw_sp,
299 					  struct parman_item *parman_item,
300 					  struct mlxsw_sp_mr_route_key *key,
301 					  struct mlxsw_afa_block *afa_block)
302 {
303 	char rmft2_pl[MLXSW_REG_RMFT2_LEN];
304 
305 	switch (key->proto) {
306 	case MLXSW_SP_L3_PROTO_IPV4:
307 		mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, true, parman_item->index,
308 					  key->vrid,
309 					  MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0,
310 					  ntohl(key->group.addr4),
311 					  ntohl(key->group_mask.addr4),
312 					  ntohl(key->source.addr4),
313 					  ntohl(key->source_mask.addr4),
314 					  mlxsw_afa_block_first_set(afa_block));
315 		break;
316 	case MLXSW_SP_L3_PROTO_IPV6:
317 	default:
318 		WARN_ON_ONCE(1);
319 	}
320 
321 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl);
322 }
323 
324 static int mlxsw_sp_mr_tcam_route_remove(struct mlxsw_sp *mlxsw_sp, int vrid,
325 					 struct parman_item *parman_item)
326 {
327 	char rmft2_pl[MLXSW_REG_RMFT2_LEN];
328 
329 	mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, false, parman_item->index, vrid,
330 				  0, 0, 0, 0, 0, 0, NULL);
331 
332 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl);
333 }
334 
335 static int
336 mlxsw_sp_mr_tcam_erif_populate(struct mlxsw_sp *mlxsw_sp,
337 			       struct mlxsw_sp_mr_tcam_erif_list *erif_list,
338 			       struct mlxsw_sp_mr_route_info *route_info)
339 {
340 	int err;
341 	int i;
342 
343 	for (i = 0; i < route_info->erif_num; i++) {
344 		u16 erif_index = route_info->erif_indices[i];
345 
346 		err = mlxsw_sp_mr_erif_list_add(mlxsw_sp, erif_list,
347 						erif_index);
348 		if (err)
349 			return err;
350 	}
351 	return 0;
352 }
353 
354 static int
355 mlxsw_sp_mr_tcam_route_parman_item_add(struct mlxsw_sp_mr_tcam *mr_tcam,
356 				       struct mlxsw_sp_mr_tcam_route *route,
357 				       enum mlxsw_sp_mr_route_prio prio)
358 {
359 	struct parman_prio *parman_prio = NULL;
360 	int err;
361 
362 	switch (route->key.proto) {
363 	case MLXSW_SP_L3_PROTO_IPV4:
364 		parman_prio = &mr_tcam->ipv4_tcam_region.parman_prios[prio];
365 		err = parman_item_add(mr_tcam->ipv4_tcam_region.parman,
366 				      parman_prio, &route->parman_item);
367 		if (err)
368 			return err;
369 		break;
370 	case MLXSW_SP_L3_PROTO_IPV6:
371 	default:
372 		WARN_ON_ONCE(1);
373 	}
374 	route->parman_prio = parman_prio;
375 	return 0;
376 }
377 
378 static void
379 mlxsw_sp_mr_tcam_route_parman_item_remove(struct mlxsw_sp_mr_tcam *mr_tcam,
380 					  struct mlxsw_sp_mr_tcam_route *route)
381 {
382 	switch (route->key.proto) {
383 	case MLXSW_SP_L3_PROTO_IPV4:
384 		parman_item_remove(mr_tcam->ipv4_tcam_region.parman,
385 				   route->parman_prio, &route->parman_item);
386 		break;
387 	case MLXSW_SP_L3_PROTO_IPV6:
388 	default:
389 		WARN_ON_ONCE(1);
390 	}
391 }
392 
393 static int
394 mlxsw_sp_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv,
395 			      void *route_priv,
396 			      struct mlxsw_sp_mr_route_params *route_params)
397 {
398 	struct mlxsw_sp_mr_tcam_route *route = route_priv;
399 	struct mlxsw_sp_mr_tcam *mr_tcam = priv;
400 	int err;
401 
402 	route->key = route_params->key;
403 	route->irif_index = route_params->value.irif_index;
404 	route->min_mtu = route_params->value.min_mtu;
405 	route->action = route_params->value.route_action;
406 
407 	/* Create the egress RIFs list */
408 	mlxsw_sp_mr_erif_list_init(&route->erif_list);
409 	err = mlxsw_sp_mr_tcam_erif_populate(mlxsw_sp, &route->erif_list,
410 					     &route_params->value);
411 	if (err)
412 		goto err_erif_populate;
413 
414 	/* Create the flow counter */
415 	err = mlxsw_sp_flow_counter_alloc(mlxsw_sp, &route->counter_index);
416 	if (err)
417 		goto err_counter_alloc;
418 
419 	/* Create the flexible action block */
420 	route->afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp,
421 							     route->action,
422 							     route->irif_index,
423 							     route->counter_index,
424 							     route->min_mtu,
425 							     &route->erif_list);
426 	if (IS_ERR(route->afa_block)) {
427 		err = PTR_ERR(route->afa_block);
428 		goto err_afa_block_create;
429 	}
430 
431 	/* Allocate place in the TCAM */
432 	err = mlxsw_sp_mr_tcam_route_parman_item_add(mr_tcam, route,
433 						     route_params->prio);
434 	if (err)
435 		goto err_parman_item_add;
436 
437 	/* Write the route to the TCAM */
438 	err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
439 					     &route->key, route->afa_block);
440 	if (err)
441 		goto err_route_replace;
442 	return 0;
443 
444 err_route_replace:
445 	mlxsw_sp_mr_tcam_route_parman_item_remove(mr_tcam, route);
446 err_parman_item_add:
447 	mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
448 err_afa_block_create:
449 	mlxsw_sp_flow_counter_free(mlxsw_sp, route->counter_index);
450 err_erif_populate:
451 err_counter_alloc:
452 	mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
453 	return err;
454 }
455 
456 static void mlxsw_sp_mr_tcam_route_destroy(struct mlxsw_sp *mlxsw_sp,
457 					   void *priv, void *route_priv)
458 {
459 	struct mlxsw_sp_mr_tcam_route *route = route_priv;
460 	struct mlxsw_sp_mr_tcam *mr_tcam = priv;
461 
462 	mlxsw_sp_mr_tcam_route_remove(mlxsw_sp, route->key.vrid,
463 				      &route->parman_item);
464 	mlxsw_sp_mr_tcam_route_parman_item_remove(mr_tcam, route);
465 	mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
466 	mlxsw_sp_flow_counter_free(mlxsw_sp, route->counter_index);
467 	mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
468 }
469 
470 static int mlxsw_sp_mr_tcam_route_stats(struct mlxsw_sp *mlxsw_sp,
471 					void *route_priv, u64 *packets,
472 					u64 *bytes)
473 {
474 	struct mlxsw_sp_mr_tcam_route *route = route_priv;
475 
476 	return mlxsw_sp_flow_counter_get(mlxsw_sp, route->counter_index,
477 					 packets, bytes);
478 }
479 
480 static int
481 mlxsw_sp_mr_tcam_route_action_update(struct mlxsw_sp *mlxsw_sp,
482 				     void *route_priv,
483 				     enum mlxsw_sp_mr_route_action route_action)
484 {
485 	struct mlxsw_sp_mr_tcam_route *route = route_priv;
486 	struct mlxsw_afa_block *afa_block;
487 	int err;
488 
489 	/* Create a new flexible action block */
490 	afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp, route_action,
491 						      route->irif_index,
492 						      route->counter_index,
493 						      route->min_mtu,
494 						      &route->erif_list);
495 	if (IS_ERR(afa_block))
496 		return PTR_ERR(afa_block);
497 
498 	/* Update the TCAM route entry */
499 	err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
500 					     &route->key, afa_block);
501 	if (err)
502 		goto err;
503 
504 	/* Delete the old one */
505 	mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
506 	route->afa_block = afa_block;
507 	route->action = route_action;
508 	return 0;
509 err:
510 	mlxsw_sp_mr_tcam_afa_block_destroy(afa_block);
511 	return err;
512 }
513 
514 static int mlxsw_sp_mr_tcam_route_min_mtu_update(struct mlxsw_sp *mlxsw_sp,
515 						 void *route_priv, u16 min_mtu)
516 {
517 	struct mlxsw_sp_mr_tcam_route *route = route_priv;
518 	struct mlxsw_afa_block *afa_block;
519 	int err;
520 
521 	/* Create a new flexible action block */
522 	afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp,
523 						      route->action,
524 						      route->irif_index,
525 						      route->counter_index,
526 						      min_mtu,
527 						      &route->erif_list);
528 	if (IS_ERR(afa_block))
529 		return PTR_ERR(afa_block);
530 
531 	/* Update the TCAM route entry */
532 	err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
533 					     &route->key, afa_block);
534 	if (err)
535 		goto err;
536 
537 	/* Delete the old one */
538 	mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
539 	route->afa_block = afa_block;
540 	route->min_mtu = min_mtu;
541 	return 0;
542 err:
543 	mlxsw_sp_mr_tcam_afa_block_destroy(afa_block);
544 	return err;
545 }
546 
547 static int mlxsw_sp_mr_tcam_route_irif_update(struct mlxsw_sp *mlxsw_sp,
548 					      void *route_priv, u16 irif_index)
549 {
550 	struct mlxsw_sp_mr_tcam_route *route = route_priv;
551 
552 	if (route->action != MLXSW_SP_MR_ROUTE_ACTION_TRAP)
553 		return -EINVAL;
554 	route->irif_index = irif_index;
555 	return 0;
556 }
557 
558 static int mlxsw_sp_mr_tcam_route_erif_add(struct mlxsw_sp *mlxsw_sp,
559 					   void *route_priv, u16 erif_index)
560 {
561 	struct mlxsw_sp_mr_tcam_route *route = route_priv;
562 	int err;
563 
564 	err = mlxsw_sp_mr_erif_list_add(mlxsw_sp, &route->erif_list,
565 					erif_index);
566 	if (err)
567 		return err;
568 
569 	/* Commit the action only if the route action is not TRAP */
570 	if (route->action != MLXSW_SP_MR_ROUTE_ACTION_TRAP)
571 		return mlxsw_sp_mr_erif_list_commit(mlxsw_sp,
572 						    &route->erif_list);
573 	return 0;
574 }
575 
576 static int mlxsw_sp_mr_tcam_route_erif_del(struct mlxsw_sp *mlxsw_sp,
577 					   void *route_priv, u16 erif_index)
578 {
579 	struct mlxsw_sp_mr_tcam_route *route = route_priv;
580 	struct mlxsw_sp_mr_erif_sublist *erif_sublist;
581 	struct mlxsw_sp_mr_tcam_erif_list erif_list;
582 	struct mlxsw_afa_block *afa_block;
583 	int err;
584 	int i;
585 
586 	/* Create a copy of the original erif_list without the deleted entry */
587 	mlxsw_sp_mr_erif_list_init(&erif_list);
588 	list_for_each_entry(erif_sublist, &route->erif_list.erif_sublists, list) {
589 		for (i = 0; i < erif_sublist->num_erifs; i++) {
590 			u16 curr_erif = erif_sublist->erif_indices[i];
591 
592 			if (curr_erif == erif_index)
593 				continue;
594 			err = mlxsw_sp_mr_erif_list_add(mlxsw_sp, &erif_list,
595 							curr_erif);
596 			if (err)
597 				goto err_erif_list_add;
598 		}
599 	}
600 
601 	/* Create the flexible action block pointing to the new erif_list */
602 	afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp, route->action,
603 						      route->irif_index,
604 						      route->counter_index,
605 						      route->min_mtu,
606 						      &erif_list);
607 	if (IS_ERR(afa_block)) {
608 		err = PTR_ERR(afa_block);
609 		goto err_afa_block_create;
610 	}
611 
612 	/* Update the TCAM route entry */
613 	err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
614 					     &route->key, afa_block);
615 	if (err)
616 		goto err_route_write;
617 
618 	mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
619 	mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
620 	route->afa_block = afa_block;
621 	mlxsw_sp_mr_erif_list_move(&route->erif_list, &erif_list);
622 	return 0;
623 
624 err_route_write:
625 	mlxsw_sp_mr_tcam_afa_block_destroy(afa_block);
626 err_afa_block_create:
627 err_erif_list_add:
628 	mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &erif_list);
629 	return err;
630 }
631 
632 static int
633 mlxsw_sp_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp, void *route_priv,
634 			      struct mlxsw_sp_mr_route_info *route_info)
635 {
636 	struct mlxsw_sp_mr_tcam_route *route = route_priv;
637 	struct mlxsw_sp_mr_tcam_erif_list erif_list;
638 	struct mlxsw_afa_block *afa_block;
639 	int err;
640 
641 	/* Create a new erif_list */
642 	mlxsw_sp_mr_erif_list_init(&erif_list);
643 	err = mlxsw_sp_mr_tcam_erif_populate(mlxsw_sp, &erif_list, route_info);
644 	if (err)
645 		goto err_erif_populate;
646 
647 	/* Create the flexible action block pointing to the new erif_list */
648 	afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp,
649 						      route_info->route_action,
650 						      route_info->irif_index,
651 						      route->counter_index,
652 						      route_info->min_mtu,
653 						      &erif_list);
654 	if (IS_ERR(afa_block)) {
655 		err = PTR_ERR(afa_block);
656 		goto err_afa_block_create;
657 	}
658 
659 	/* Update the TCAM route entry */
660 	err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
661 					     &route->key, afa_block);
662 	if (err)
663 		goto err_route_write;
664 
665 	mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
666 	mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
667 	route->afa_block = afa_block;
668 	mlxsw_sp_mr_erif_list_move(&route->erif_list, &erif_list);
669 	route->action = route_info->route_action;
670 	route->irif_index = route_info->irif_index;
671 	route->min_mtu = route_info->min_mtu;
672 	return 0;
673 
674 err_route_write:
675 	mlxsw_sp_mr_tcam_afa_block_destroy(afa_block);
676 err_afa_block_create:
677 err_erif_populate:
678 	mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &erif_list);
679 	return err;
680 }
681 
682 #define MLXSW_SP_MR_TCAM_REGION_BASE_COUNT 16
683 #define MLXSW_SP_MR_TCAM_REGION_RESIZE_STEP 16
684 
685 static int
686 mlxsw_sp_mr_tcam_region_alloc(struct mlxsw_sp_mr_tcam_region *mr_tcam_region)
687 {
688 	struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
689 	char rtar_pl[MLXSW_REG_RTAR_LEN];
690 
691 	mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_ALLOCATE,
692 			    mr_tcam_region->rtar_key_type,
693 			    MLXSW_SP_MR_TCAM_REGION_BASE_COUNT);
694 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
695 }
696 
697 static void
698 mlxsw_sp_mr_tcam_region_free(struct mlxsw_sp_mr_tcam_region *mr_tcam_region)
699 {
700 	struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
701 	char rtar_pl[MLXSW_REG_RTAR_LEN];
702 
703 	mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_DEALLOCATE,
704 			    mr_tcam_region->rtar_key_type, 0);
705 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
706 }
707 
708 static int mlxsw_sp_mr_tcam_region_parman_resize(void *priv,
709 						 unsigned long new_count)
710 {
711 	struct mlxsw_sp_mr_tcam_region *mr_tcam_region = priv;
712 	struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
713 	char rtar_pl[MLXSW_REG_RTAR_LEN];
714 	u64 max_tcam_rules;
715 
716 	max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES);
717 	if (new_count > max_tcam_rules)
718 		return -EINVAL;
719 	mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_RESIZE,
720 			    mr_tcam_region->rtar_key_type, new_count);
721 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
722 }
723 
724 static void mlxsw_sp_mr_tcam_region_parman_move(void *priv,
725 						unsigned long from_index,
726 						unsigned long to_index,
727 						unsigned long count)
728 {
729 	struct mlxsw_sp_mr_tcam_region *mr_tcam_region = priv;
730 	struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
731 	char rrcr_pl[MLXSW_REG_RRCR_LEN];
732 
733 	mlxsw_reg_rrcr_pack(rrcr_pl, MLXSW_REG_RRCR_OP_MOVE,
734 			    from_index, count,
735 			    mr_tcam_region->rtar_key_type, to_index);
736 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rrcr), rrcr_pl);
737 }
738 
739 static const struct parman_ops mlxsw_sp_mr_tcam_region_parman_ops = {
740 	.base_count	= MLXSW_SP_MR_TCAM_REGION_BASE_COUNT,
741 	.resize_step	= MLXSW_SP_MR_TCAM_REGION_RESIZE_STEP,
742 	.resize		= mlxsw_sp_mr_tcam_region_parman_resize,
743 	.move		= mlxsw_sp_mr_tcam_region_parman_move,
744 	.algo		= PARMAN_ALGO_TYPE_LSORT,
745 };
746 
747 static int
748 mlxsw_sp_mr_tcam_region_init(struct mlxsw_sp *mlxsw_sp,
749 			     struct mlxsw_sp_mr_tcam_region *mr_tcam_region,
750 			     enum mlxsw_reg_rtar_key_type rtar_key_type)
751 {
752 	struct parman_prio *parman_prios;
753 	struct parman *parman;
754 	int err;
755 	int i;
756 
757 	mr_tcam_region->rtar_key_type = rtar_key_type;
758 	mr_tcam_region->mlxsw_sp = mlxsw_sp;
759 
760 	err = mlxsw_sp_mr_tcam_region_alloc(mr_tcam_region);
761 	if (err)
762 		return err;
763 
764 	parman = parman_create(&mlxsw_sp_mr_tcam_region_parman_ops,
765 			       mr_tcam_region);
766 	if (!parman) {
767 		err = -ENOMEM;
768 		goto err_parman_create;
769 	}
770 	mr_tcam_region->parman = parman;
771 
772 	parman_prios = kmalloc_array(MLXSW_SP_MR_ROUTE_PRIO_MAX + 1,
773 				     sizeof(*parman_prios), GFP_KERNEL);
774 	if (!parman_prios) {
775 		err = -ENOMEM;
776 		goto err_parman_prios_alloc;
777 	}
778 	mr_tcam_region->parman_prios = parman_prios;
779 
780 	for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++)
781 		parman_prio_init(mr_tcam_region->parman,
782 				 &mr_tcam_region->parman_prios[i], i);
783 	return 0;
784 
785 err_parman_prios_alloc:
786 	parman_destroy(parman);
787 err_parman_create:
788 	mlxsw_sp_mr_tcam_region_free(mr_tcam_region);
789 	return err;
790 }
791 
792 static void
793 mlxsw_sp_mr_tcam_region_fini(struct mlxsw_sp_mr_tcam_region *mr_tcam_region)
794 {
795 	int i;
796 
797 	for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++)
798 		parman_prio_fini(&mr_tcam_region->parman_prios[i]);
799 	kfree(mr_tcam_region->parman_prios);
800 	parman_destroy(mr_tcam_region->parman);
801 	mlxsw_sp_mr_tcam_region_free(mr_tcam_region);
802 }
803 
804 static int mlxsw_sp_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
805 {
806 	struct mlxsw_sp_mr_tcam *mr_tcam = priv;
807 
808 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MC_ERIF_LIST_ENTRIES) ||
809 	    !MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_TCAM_RULES))
810 		return -EIO;
811 
812 	return mlxsw_sp_mr_tcam_region_init(mlxsw_sp,
813 					    &mr_tcam->ipv4_tcam_region,
814 					    MLXSW_REG_RTAR_KEY_TYPE_IPV4_MULTICAST);
815 }
816 
817 static void mlxsw_sp_mr_tcam_fini(void *priv)
818 {
819 	struct mlxsw_sp_mr_tcam *mr_tcam = priv;
820 
821 	mlxsw_sp_mr_tcam_region_fini(&mr_tcam->ipv4_tcam_region);
822 }
823 
824 const struct mlxsw_sp_mr_ops mlxsw_sp_mr_tcam_ops = {
825 	.priv_size = sizeof(struct mlxsw_sp_mr_tcam),
826 	.route_priv_size = sizeof(struct mlxsw_sp_mr_tcam_route),
827 	.init = mlxsw_sp_mr_tcam_init,
828 	.route_create = mlxsw_sp_mr_tcam_route_create,
829 	.route_update = mlxsw_sp_mr_tcam_route_update,
830 	.route_stats = mlxsw_sp_mr_tcam_route_stats,
831 	.route_action_update = mlxsw_sp_mr_tcam_route_action_update,
832 	.route_min_mtu_update = mlxsw_sp_mr_tcam_route_min_mtu_update,
833 	.route_irif_update = mlxsw_sp_mr_tcam_route_irif_update,
834 	.route_erif_add = mlxsw_sp_mr_tcam_route_erif_add,
835 	.route_erif_del = mlxsw_sp_mr_tcam_route_erif_del,
836 	.route_destroy = mlxsw_sp_mr_tcam_route_destroy,
837 	.fini = mlxsw_sp_mr_tcam_fini,
838 };
839