xref: /linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c (revision b9b77222d4ff6b5bb8f5d87fca20de0910618bb9)
1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
3  * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the names of the copyright holders nor the names of its
15  *    contributors may be used to endorse or promote products derived from
16  *    this software without specific prior written permission.
17  *
18  * Alternatively, this software may be distributed under the terms of the
19  * GNU General Public License ("GPL") version 2 as published by the Free
20  * Software Foundation.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <linux/kernel.h>
36 #include <linux/list.h>
37 #include <linux/netdevice.h>
38 #include <linux/parman.h>
39 
40 #include "spectrum_mr_tcam.h"
41 #include "reg.h"
42 #include "spectrum.h"
43 #include "core_acl_flex_actions.h"
44 #include "spectrum_mr.h"
45 
46 struct mlxsw_sp_mr_tcam_region {
47 	struct mlxsw_sp *mlxsw_sp;
48 	enum mlxsw_reg_rtar_key_type rtar_key_type;
49 	struct parman *parman;
50 	struct parman_prio *parman_prios;
51 };
52 
53 struct mlxsw_sp_mr_tcam {
54 	struct mlxsw_sp_mr_tcam_region tcam_regions[MLXSW_SP_L3_PROTO_MAX];
55 };
56 
57 /* This struct maps to one RIGR2 register entry */
58 struct mlxsw_sp_mr_erif_sublist {
59 	struct list_head list;
60 	u32 rigr2_kvdl_index;
61 	int num_erifs;
62 	u16 erif_indices[MLXSW_REG_RIGR2_MAX_ERIFS];
63 	bool synced;
64 };
65 
66 struct mlxsw_sp_mr_tcam_erif_list {
67 	struct list_head erif_sublists;
68 	u32 kvdl_index;
69 };
70 
71 static bool
72 mlxsw_sp_mr_erif_sublist_full(struct mlxsw_sp *mlxsw_sp,
73 			      struct mlxsw_sp_mr_erif_sublist *erif_sublist)
74 {
75 	int erif_list_entries = MLXSW_CORE_RES_GET(mlxsw_sp->core,
76 						   MC_ERIF_LIST_ENTRIES);
77 
78 	return erif_sublist->num_erifs == erif_list_entries;
79 }
80 
81 static void
82 mlxsw_sp_mr_erif_list_init(struct mlxsw_sp_mr_tcam_erif_list *erif_list)
83 {
84 	INIT_LIST_HEAD(&erif_list->erif_sublists);
85 }
86 
87 #define MLXSW_SP_KVDL_RIGR2_SIZE 1
88 
89 static struct mlxsw_sp_mr_erif_sublist *
90 mlxsw_sp_mr_erif_sublist_create(struct mlxsw_sp *mlxsw_sp,
91 				struct mlxsw_sp_mr_tcam_erif_list *erif_list)
92 {
93 	struct mlxsw_sp_mr_erif_sublist *erif_sublist;
94 	int err;
95 
96 	erif_sublist = kzalloc(sizeof(*erif_sublist), GFP_KERNEL);
97 	if (!erif_sublist)
98 		return ERR_PTR(-ENOMEM);
99 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_RIGR2_SIZE,
100 				  &erif_sublist->rigr2_kvdl_index);
101 	if (err) {
102 		kfree(erif_sublist);
103 		return ERR_PTR(err);
104 	}
105 
106 	list_add_tail(&erif_sublist->list, &erif_list->erif_sublists);
107 	return erif_sublist;
108 }
109 
110 static void
111 mlxsw_sp_mr_erif_sublist_destroy(struct mlxsw_sp *mlxsw_sp,
112 				 struct mlxsw_sp_mr_erif_sublist *erif_sublist)
113 {
114 	list_del(&erif_sublist->list);
115 	mlxsw_sp_kvdl_free(mlxsw_sp, erif_sublist->rigr2_kvdl_index);
116 	kfree(erif_sublist);
117 }
118 
119 static int
120 mlxsw_sp_mr_erif_list_add(struct mlxsw_sp *mlxsw_sp,
121 			  struct mlxsw_sp_mr_tcam_erif_list *erif_list,
122 			  u16 erif_index)
123 {
124 	struct mlxsw_sp_mr_erif_sublist *sublist;
125 
126 	/* If either there is no erif_entry or the last one is full, allocate a
127 	 * new one.
128 	 */
129 	if (list_empty(&erif_list->erif_sublists)) {
130 		sublist = mlxsw_sp_mr_erif_sublist_create(mlxsw_sp, erif_list);
131 		if (IS_ERR(sublist))
132 			return PTR_ERR(sublist);
133 		erif_list->kvdl_index = sublist->rigr2_kvdl_index;
134 	} else {
135 		sublist = list_last_entry(&erif_list->erif_sublists,
136 					  struct mlxsw_sp_mr_erif_sublist,
137 					  list);
138 		sublist->synced = false;
139 		if (mlxsw_sp_mr_erif_sublist_full(mlxsw_sp, sublist)) {
140 			sublist = mlxsw_sp_mr_erif_sublist_create(mlxsw_sp,
141 								  erif_list);
142 			if (IS_ERR(sublist))
143 				return PTR_ERR(sublist);
144 		}
145 	}
146 
147 	/* Add the eRIF to the last entry's last index */
148 	sublist->erif_indices[sublist->num_erifs++] = erif_index;
149 	return 0;
150 }
151 
152 static void
153 mlxsw_sp_mr_erif_list_flush(struct mlxsw_sp *mlxsw_sp,
154 			    struct mlxsw_sp_mr_tcam_erif_list *erif_list)
155 {
156 	struct mlxsw_sp_mr_erif_sublist *erif_sublist, *tmp;
157 
158 	list_for_each_entry_safe(erif_sublist, tmp, &erif_list->erif_sublists,
159 				 list)
160 		mlxsw_sp_mr_erif_sublist_destroy(mlxsw_sp, erif_sublist);
161 }
162 
163 static int
164 mlxsw_sp_mr_erif_list_commit(struct mlxsw_sp *mlxsw_sp,
165 			     struct mlxsw_sp_mr_tcam_erif_list *erif_list)
166 {
167 	struct mlxsw_sp_mr_erif_sublist *curr_sublist;
168 	char rigr2_pl[MLXSW_REG_RIGR2_LEN];
169 	int err;
170 	int i;
171 
172 	list_for_each_entry(curr_sublist, &erif_list->erif_sublists, list) {
173 		if (curr_sublist->synced)
174 			continue;
175 
176 		/* If the sublist is not the last one, pack the next index */
177 		if (list_is_last(&curr_sublist->list,
178 				 &erif_list->erif_sublists)) {
179 			mlxsw_reg_rigr2_pack(rigr2_pl,
180 					     curr_sublist->rigr2_kvdl_index,
181 					     false, 0);
182 		} else {
183 			struct mlxsw_sp_mr_erif_sublist *next_sublist;
184 
185 			next_sublist = list_next_entry(curr_sublist, list);
186 			mlxsw_reg_rigr2_pack(rigr2_pl,
187 					     curr_sublist->rigr2_kvdl_index,
188 					     true,
189 					     next_sublist->rigr2_kvdl_index);
190 		}
191 
192 		/* Pack all the erifs */
193 		for (i = 0; i < curr_sublist->num_erifs; i++) {
194 			u16 erif_index = curr_sublist->erif_indices[i];
195 
196 			mlxsw_reg_rigr2_erif_entry_pack(rigr2_pl, i, true,
197 							erif_index);
198 		}
199 
200 		/* Write the entry */
201 		err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rigr2),
202 				      rigr2_pl);
203 		if (err)
204 			/* No need of a rollback here because this
205 			 * hardware entry should not be pointed yet.
206 			 */
207 			return err;
208 		curr_sublist->synced = true;
209 	}
210 	return 0;
211 }
212 
213 static void mlxsw_sp_mr_erif_list_move(struct mlxsw_sp_mr_tcam_erif_list *to,
214 				       struct mlxsw_sp_mr_tcam_erif_list *from)
215 {
216 	list_splice(&from->erif_sublists, &to->erif_sublists);
217 	to->kvdl_index = from->kvdl_index;
218 }
219 
220 struct mlxsw_sp_mr_tcam_route {
221 	struct mlxsw_sp_mr_tcam_erif_list erif_list;
222 	struct mlxsw_afa_block *afa_block;
223 	u32 counter_index;
224 	struct parman_item parman_item;
225 	struct parman_prio *parman_prio;
226 	enum mlxsw_sp_mr_route_action action;
227 	struct mlxsw_sp_mr_route_key key;
228 	u16 irif_index;
229 	u16 min_mtu;
230 };
231 
232 static struct mlxsw_afa_block *
233 mlxsw_sp_mr_tcam_afa_block_create(struct mlxsw_sp *mlxsw_sp,
234 				  enum mlxsw_sp_mr_route_action route_action,
235 				  u16 irif_index, u32 counter_index,
236 				  u16 min_mtu,
237 				  struct mlxsw_sp_mr_tcam_erif_list *erif_list)
238 {
239 	struct mlxsw_afa_block *afa_block;
240 	int err;
241 
242 	afa_block = mlxsw_afa_block_create(mlxsw_sp->afa);
243 	if (!afa_block)
244 		return ERR_PTR(-ENOMEM);
245 
246 	err = mlxsw_afa_block_append_allocated_counter(afa_block,
247 						       counter_index);
248 	if (err)
249 		goto err;
250 
251 	switch (route_action) {
252 	case MLXSW_SP_MR_ROUTE_ACTION_TRAP:
253 		err = mlxsw_afa_block_append_trap(afa_block,
254 						  MLXSW_TRAP_ID_ACL1);
255 		if (err)
256 			goto err;
257 		break;
258 	case MLXSW_SP_MR_ROUTE_ACTION_TRAP_AND_FORWARD:
259 	case MLXSW_SP_MR_ROUTE_ACTION_FORWARD:
260 		/* If we are about to append a multicast router action, commit
261 		 * the erif_list.
262 		 */
263 		err = mlxsw_sp_mr_erif_list_commit(mlxsw_sp, erif_list);
264 		if (err)
265 			goto err;
266 
267 		err = mlxsw_afa_block_append_mcrouter(afa_block, irif_index,
268 						      min_mtu, false,
269 						      erif_list->kvdl_index);
270 		if (err)
271 			goto err;
272 
273 		if (route_action == MLXSW_SP_MR_ROUTE_ACTION_TRAP_AND_FORWARD) {
274 			err = mlxsw_afa_block_append_trap_and_forward(afa_block,
275 								      MLXSW_TRAP_ID_ACL2);
276 			if (err)
277 				goto err;
278 		}
279 		break;
280 	default:
281 		err = -EINVAL;
282 		goto err;
283 	}
284 
285 	err = mlxsw_afa_block_commit(afa_block);
286 	if (err)
287 		goto err;
288 	return afa_block;
289 err:
290 	mlxsw_afa_block_destroy(afa_block);
291 	return ERR_PTR(err);
292 }
293 
294 static void
295 mlxsw_sp_mr_tcam_afa_block_destroy(struct mlxsw_afa_block *afa_block)
296 {
297 	mlxsw_afa_block_destroy(afa_block);
298 }
299 
300 static int mlxsw_sp_mr_tcam_route_replace(struct mlxsw_sp *mlxsw_sp,
301 					  struct parman_item *parman_item,
302 					  struct mlxsw_sp_mr_route_key *key,
303 					  struct mlxsw_afa_block *afa_block)
304 {
305 	char rmft2_pl[MLXSW_REG_RMFT2_LEN];
306 
307 	switch (key->proto) {
308 	case MLXSW_SP_L3_PROTO_IPV4:
309 		mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, true, parman_item->index,
310 					  key->vrid,
311 					  MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0,
312 					  ntohl(key->group.addr4),
313 					  ntohl(key->group_mask.addr4),
314 					  ntohl(key->source.addr4),
315 					  ntohl(key->source_mask.addr4),
316 					  mlxsw_afa_block_first_set(afa_block));
317 		break;
318 	case MLXSW_SP_L3_PROTO_IPV6:
319 		mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, true, parman_item->index,
320 					  key->vrid,
321 					  MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0,
322 					  key->group.addr6,
323 					  key->group_mask.addr6,
324 					  key->source.addr6,
325 					  key->source_mask.addr6,
326 					  mlxsw_afa_block_first_set(afa_block));
327 	}
328 
329 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl);
330 }
331 
332 static int mlxsw_sp_mr_tcam_route_remove(struct mlxsw_sp *mlxsw_sp, int vrid,
333 					 struct mlxsw_sp_mr_route_key *key,
334 					 struct parman_item *parman_item)
335 {
336 	struct in6_addr zero_addr = IN6ADDR_ANY_INIT;
337 	char rmft2_pl[MLXSW_REG_RMFT2_LEN];
338 
339 	switch (key->proto) {
340 	case MLXSW_SP_L3_PROTO_IPV4:
341 		mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, false, parman_item->index,
342 					  vrid, 0, 0, 0, 0, 0, 0, NULL);
343 		break;
344 	case MLXSW_SP_L3_PROTO_IPV6:
345 		mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, false, parman_item->index,
346 					  vrid, 0, 0, zero_addr, zero_addr,
347 					  zero_addr, zero_addr, NULL);
348 		break;
349 	}
350 
351 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl);
352 }
353 
354 static int
355 mlxsw_sp_mr_tcam_erif_populate(struct mlxsw_sp *mlxsw_sp,
356 			       struct mlxsw_sp_mr_tcam_erif_list *erif_list,
357 			       struct mlxsw_sp_mr_route_info *route_info)
358 {
359 	int err;
360 	int i;
361 
362 	for (i = 0; i < route_info->erif_num; i++) {
363 		u16 erif_index = route_info->erif_indices[i];
364 
365 		err = mlxsw_sp_mr_erif_list_add(mlxsw_sp, erif_list,
366 						erif_index);
367 		if (err)
368 			return err;
369 	}
370 	return 0;
371 }
372 
373 static struct mlxsw_sp_mr_tcam_region *
374 mlxsw_sp_mr_tcam_protocol_region(struct mlxsw_sp_mr_tcam *mr_tcam,
375 				 enum mlxsw_sp_l3proto proto)
376 {
377 	return &mr_tcam->tcam_regions[proto];
378 }
379 
380 static int
381 mlxsw_sp_mr_tcam_route_parman_item_add(struct mlxsw_sp_mr_tcam *mr_tcam,
382 				       struct mlxsw_sp_mr_tcam_route *route,
383 				       enum mlxsw_sp_mr_route_prio prio)
384 {
385 	struct mlxsw_sp_mr_tcam_region *tcam_region;
386 	int err;
387 
388 	tcam_region = mlxsw_sp_mr_tcam_protocol_region(mr_tcam,
389 						       route->key.proto);
390 	err = parman_item_add(tcam_region->parman,
391 			      &tcam_region->parman_prios[prio],
392 			      &route->parman_item);
393 	if (err)
394 		return err;
395 
396 	route->parman_prio = &tcam_region->parman_prios[prio];
397 	return 0;
398 }
399 
400 static void
401 mlxsw_sp_mr_tcam_route_parman_item_remove(struct mlxsw_sp_mr_tcam *mr_tcam,
402 					  struct mlxsw_sp_mr_tcam_route *route)
403 {
404 	struct mlxsw_sp_mr_tcam_region *tcam_region;
405 
406 	tcam_region = mlxsw_sp_mr_tcam_protocol_region(mr_tcam,
407 						       route->key.proto);
408 
409 	parman_item_remove(tcam_region->parman,
410 			   route->parman_prio, &route->parman_item);
411 }
412 
413 static int
414 mlxsw_sp_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv,
415 			      void *route_priv,
416 			      struct mlxsw_sp_mr_route_params *route_params)
417 {
418 	struct mlxsw_sp_mr_tcam_route *route = route_priv;
419 	struct mlxsw_sp_mr_tcam *mr_tcam = priv;
420 	int err;
421 
422 	route->key = route_params->key;
423 	route->irif_index = route_params->value.irif_index;
424 	route->min_mtu = route_params->value.min_mtu;
425 	route->action = route_params->value.route_action;
426 
427 	/* Create the egress RIFs list */
428 	mlxsw_sp_mr_erif_list_init(&route->erif_list);
429 	err = mlxsw_sp_mr_tcam_erif_populate(mlxsw_sp, &route->erif_list,
430 					     &route_params->value);
431 	if (err)
432 		goto err_erif_populate;
433 
434 	/* Create the flow counter */
435 	err = mlxsw_sp_flow_counter_alloc(mlxsw_sp, &route->counter_index);
436 	if (err)
437 		goto err_counter_alloc;
438 
439 	/* Create the flexible action block */
440 	route->afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp,
441 							     route->action,
442 							     route->irif_index,
443 							     route->counter_index,
444 							     route->min_mtu,
445 							     &route->erif_list);
446 	if (IS_ERR(route->afa_block)) {
447 		err = PTR_ERR(route->afa_block);
448 		goto err_afa_block_create;
449 	}
450 
451 	/* Allocate place in the TCAM */
452 	err = mlxsw_sp_mr_tcam_route_parman_item_add(mr_tcam, route,
453 						     route_params->prio);
454 	if (err)
455 		goto err_parman_item_add;
456 
457 	/* Write the route to the TCAM */
458 	err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
459 					     &route->key, route->afa_block);
460 	if (err)
461 		goto err_route_replace;
462 	return 0;
463 
464 err_route_replace:
465 	mlxsw_sp_mr_tcam_route_parman_item_remove(mr_tcam, route);
466 err_parman_item_add:
467 	mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
468 err_afa_block_create:
469 	mlxsw_sp_flow_counter_free(mlxsw_sp, route->counter_index);
470 err_erif_populate:
471 err_counter_alloc:
472 	mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
473 	return err;
474 }
475 
476 static void mlxsw_sp_mr_tcam_route_destroy(struct mlxsw_sp *mlxsw_sp,
477 					   void *priv, void *route_priv)
478 {
479 	struct mlxsw_sp_mr_tcam_route *route = route_priv;
480 	struct mlxsw_sp_mr_tcam *mr_tcam = priv;
481 
482 	mlxsw_sp_mr_tcam_route_remove(mlxsw_sp, route->key.vrid,
483 				      &route->key, &route->parman_item);
484 	mlxsw_sp_mr_tcam_route_parman_item_remove(mr_tcam, route);
485 	mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
486 	mlxsw_sp_flow_counter_free(mlxsw_sp, route->counter_index);
487 	mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
488 }
489 
490 static int mlxsw_sp_mr_tcam_route_stats(struct mlxsw_sp *mlxsw_sp,
491 					void *route_priv, u64 *packets,
492 					u64 *bytes)
493 {
494 	struct mlxsw_sp_mr_tcam_route *route = route_priv;
495 
496 	return mlxsw_sp_flow_counter_get(mlxsw_sp, route->counter_index,
497 					 packets, bytes);
498 }
499 
500 static int
501 mlxsw_sp_mr_tcam_route_action_update(struct mlxsw_sp *mlxsw_sp,
502 				     void *route_priv,
503 				     enum mlxsw_sp_mr_route_action route_action)
504 {
505 	struct mlxsw_sp_mr_tcam_route *route = route_priv;
506 	struct mlxsw_afa_block *afa_block;
507 	int err;
508 
509 	/* Create a new flexible action block */
510 	afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp, route_action,
511 						      route->irif_index,
512 						      route->counter_index,
513 						      route->min_mtu,
514 						      &route->erif_list);
515 	if (IS_ERR(afa_block))
516 		return PTR_ERR(afa_block);
517 
518 	/* Update the TCAM route entry */
519 	err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
520 					     &route->key, afa_block);
521 	if (err)
522 		goto err;
523 
524 	/* Delete the old one */
525 	mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
526 	route->afa_block = afa_block;
527 	route->action = route_action;
528 	return 0;
529 err:
530 	mlxsw_sp_mr_tcam_afa_block_destroy(afa_block);
531 	return err;
532 }
533 
534 static int mlxsw_sp_mr_tcam_route_min_mtu_update(struct mlxsw_sp *mlxsw_sp,
535 						 void *route_priv, u16 min_mtu)
536 {
537 	struct mlxsw_sp_mr_tcam_route *route = route_priv;
538 	struct mlxsw_afa_block *afa_block;
539 	int err;
540 
541 	/* Create a new flexible action block */
542 	afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp,
543 						      route->action,
544 						      route->irif_index,
545 						      route->counter_index,
546 						      min_mtu,
547 						      &route->erif_list);
548 	if (IS_ERR(afa_block))
549 		return PTR_ERR(afa_block);
550 
551 	/* Update the TCAM route entry */
552 	err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
553 					     &route->key, afa_block);
554 	if (err)
555 		goto err;
556 
557 	/* Delete the old one */
558 	mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
559 	route->afa_block = afa_block;
560 	route->min_mtu = min_mtu;
561 	return 0;
562 err:
563 	mlxsw_sp_mr_tcam_afa_block_destroy(afa_block);
564 	return err;
565 }
566 
567 static int mlxsw_sp_mr_tcam_route_irif_update(struct mlxsw_sp *mlxsw_sp,
568 					      void *route_priv, u16 irif_index)
569 {
570 	struct mlxsw_sp_mr_tcam_route *route = route_priv;
571 
572 	if (route->action != MLXSW_SP_MR_ROUTE_ACTION_TRAP)
573 		return -EINVAL;
574 	route->irif_index = irif_index;
575 	return 0;
576 }
577 
578 static int mlxsw_sp_mr_tcam_route_erif_add(struct mlxsw_sp *mlxsw_sp,
579 					   void *route_priv, u16 erif_index)
580 {
581 	struct mlxsw_sp_mr_tcam_route *route = route_priv;
582 	int err;
583 
584 	err = mlxsw_sp_mr_erif_list_add(mlxsw_sp, &route->erif_list,
585 					erif_index);
586 	if (err)
587 		return err;
588 
589 	/* Commit the action only if the route action is not TRAP */
590 	if (route->action != MLXSW_SP_MR_ROUTE_ACTION_TRAP)
591 		return mlxsw_sp_mr_erif_list_commit(mlxsw_sp,
592 						    &route->erif_list);
593 	return 0;
594 }
595 
596 static int mlxsw_sp_mr_tcam_route_erif_del(struct mlxsw_sp *mlxsw_sp,
597 					   void *route_priv, u16 erif_index)
598 {
599 	struct mlxsw_sp_mr_tcam_route *route = route_priv;
600 	struct mlxsw_sp_mr_erif_sublist *erif_sublist;
601 	struct mlxsw_sp_mr_tcam_erif_list erif_list;
602 	struct mlxsw_afa_block *afa_block;
603 	int err;
604 	int i;
605 
606 	/* Create a copy of the original erif_list without the deleted entry */
607 	mlxsw_sp_mr_erif_list_init(&erif_list);
608 	list_for_each_entry(erif_sublist, &route->erif_list.erif_sublists, list) {
609 		for (i = 0; i < erif_sublist->num_erifs; i++) {
610 			u16 curr_erif = erif_sublist->erif_indices[i];
611 
612 			if (curr_erif == erif_index)
613 				continue;
614 			err = mlxsw_sp_mr_erif_list_add(mlxsw_sp, &erif_list,
615 							curr_erif);
616 			if (err)
617 				goto err_erif_list_add;
618 		}
619 	}
620 
621 	/* Create the flexible action block pointing to the new erif_list */
622 	afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp, route->action,
623 						      route->irif_index,
624 						      route->counter_index,
625 						      route->min_mtu,
626 						      &erif_list);
627 	if (IS_ERR(afa_block)) {
628 		err = PTR_ERR(afa_block);
629 		goto err_afa_block_create;
630 	}
631 
632 	/* Update the TCAM route entry */
633 	err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
634 					     &route->key, afa_block);
635 	if (err)
636 		goto err_route_write;
637 
638 	mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
639 	mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
640 	route->afa_block = afa_block;
641 	mlxsw_sp_mr_erif_list_move(&route->erif_list, &erif_list);
642 	return 0;
643 
644 err_route_write:
645 	mlxsw_sp_mr_tcam_afa_block_destroy(afa_block);
646 err_afa_block_create:
647 err_erif_list_add:
648 	mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &erif_list);
649 	return err;
650 }
651 
652 static int
653 mlxsw_sp_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp, void *route_priv,
654 			      struct mlxsw_sp_mr_route_info *route_info)
655 {
656 	struct mlxsw_sp_mr_tcam_route *route = route_priv;
657 	struct mlxsw_sp_mr_tcam_erif_list erif_list;
658 	struct mlxsw_afa_block *afa_block;
659 	int err;
660 
661 	/* Create a new erif_list */
662 	mlxsw_sp_mr_erif_list_init(&erif_list);
663 	err = mlxsw_sp_mr_tcam_erif_populate(mlxsw_sp, &erif_list, route_info);
664 	if (err)
665 		goto err_erif_populate;
666 
667 	/* Create the flexible action block pointing to the new erif_list */
668 	afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp,
669 						      route_info->route_action,
670 						      route_info->irif_index,
671 						      route->counter_index,
672 						      route_info->min_mtu,
673 						      &erif_list);
674 	if (IS_ERR(afa_block)) {
675 		err = PTR_ERR(afa_block);
676 		goto err_afa_block_create;
677 	}
678 
679 	/* Update the TCAM route entry */
680 	err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
681 					     &route->key, afa_block);
682 	if (err)
683 		goto err_route_write;
684 
685 	mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
686 	mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
687 	route->afa_block = afa_block;
688 	mlxsw_sp_mr_erif_list_move(&route->erif_list, &erif_list);
689 	route->action = route_info->route_action;
690 	route->irif_index = route_info->irif_index;
691 	route->min_mtu = route_info->min_mtu;
692 	return 0;
693 
694 err_route_write:
695 	mlxsw_sp_mr_tcam_afa_block_destroy(afa_block);
696 err_afa_block_create:
697 err_erif_populate:
698 	mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &erif_list);
699 	return err;
700 }
701 
702 #define MLXSW_SP_MR_TCAM_REGION_BASE_COUNT 16
703 #define MLXSW_SP_MR_TCAM_REGION_RESIZE_STEP 16
704 
705 static int
706 mlxsw_sp_mr_tcam_region_alloc(struct mlxsw_sp_mr_tcam_region *mr_tcam_region)
707 {
708 	struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
709 	char rtar_pl[MLXSW_REG_RTAR_LEN];
710 
711 	mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_ALLOCATE,
712 			    mr_tcam_region->rtar_key_type,
713 			    MLXSW_SP_MR_TCAM_REGION_BASE_COUNT);
714 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
715 }
716 
717 static void
718 mlxsw_sp_mr_tcam_region_free(struct mlxsw_sp_mr_tcam_region *mr_tcam_region)
719 {
720 	struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
721 	char rtar_pl[MLXSW_REG_RTAR_LEN];
722 
723 	mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_DEALLOCATE,
724 			    mr_tcam_region->rtar_key_type, 0);
725 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
726 }
727 
728 static int mlxsw_sp_mr_tcam_region_parman_resize(void *priv,
729 						 unsigned long new_count)
730 {
731 	struct mlxsw_sp_mr_tcam_region *mr_tcam_region = priv;
732 	struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
733 	char rtar_pl[MLXSW_REG_RTAR_LEN];
734 	u64 max_tcam_rules;
735 
736 	max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES);
737 	if (new_count > max_tcam_rules)
738 		return -EINVAL;
739 	mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_RESIZE,
740 			    mr_tcam_region->rtar_key_type, new_count);
741 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
742 }
743 
744 static void mlxsw_sp_mr_tcam_region_parman_move(void *priv,
745 						unsigned long from_index,
746 						unsigned long to_index,
747 						unsigned long count)
748 {
749 	struct mlxsw_sp_mr_tcam_region *mr_tcam_region = priv;
750 	struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
751 	char rrcr_pl[MLXSW_REG_RRCR_LEN];
752 
753 	mlxsw_reg_rrcr_pack(rrcr_pl, MLXSW_REG_RRCR_OP_MOVE,
754 			    from_index, count,
755 			    mr_tcam_region->rtar_key_type, to_index);
756 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rrcr), rrcr_pl);
757 }
758 
759 static const struct parman_ops mlxsw_sp_mr_tcam_region_parman_ops = {
760 	.base_count	= MLXSW_SP_MR_TCAM_REGION_BASE_COUNT,
761 	.resize_step	= MLXSW_SP_MR_TCAM_REGION_RESIZE_STEP,
762 	.resize		= mlxsw_sp_mr_tcam_region_parman_resize,
763 	.move		= mlxsw_sp_mr_tcam_region_parman_move,
764 	.algo		= PARMAN_ALGO_TYPE_LSORT,
765 };
766 
767 static int
768 mlxsw_sp_mr_tcam_region_init(struct mlxsw_sp *mlxsw_sp,
769 			     struct mlxsw_sp_mr_tcam_region *mr_tcam_region,
770 			     enum mlxsw_reg_rtar_key_type rtar_key_type)
771 {
772 	struct parman_prio *parman_prios;
773 	struct parman *parman;
774 	int err;
775 	int i;
776 
777 	mr_tcam_region->rtar_key_type = rtar_key_type;
778 	mr_tcam_region->mlxsw_sp = mlxsw_sp;
779 
780 	err = mlxsw_sp_mr_tcam_region_alloc(mr_tcam_region);
781 	if (err)
782 		return err;
783 
784 	parman = parman_create(&mlxsw_sp_mr_tcam_region_parman_ops,
785 			       mr_tcam_region);
786 	if (!parman) {
787 		err = -ENOMEM;
788 		goto err_parman_create;
789 	}
790 	mr_tcam_region->parman = parman;
791 
792 	parman_prios = kmalloc_array(MLXSW_SP_MR_ROUTE_PRIO_MAX + 1,
793 				     sizeof(*parman_prios), GFP_KERNEL);
794 	if (!parman_prios) {
795 		err = -ENOMEM;
796 		goto err_parman_prios_alloc;
797 	}
798 	mr_tcam_region->parman_prios = parman_prios;
799 
800 	for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++)
801 		parman_prio_init(mr_tcam_region->parman,
802 				 &mr_tcam_region->parman_prios[i], i);
803 	return 0;
804 
805 err_parman_prios_alloc:
806 	parman_destroy(parman);
807 err_parman_create:
808 	mlxsw_sp_mr_tcam_region_free(mr_tcam_region);
809 	return err;
810 }
811 
812 static void
813 mlxsw_sp_mr_tcam_region_fini(struct mlxsw_sp_mr_tcam_region *mr_tcam_region)
814 {
815 	int i;
816 
817 	for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++)
818 		parman_prio_fini(&mr_tcam_region->parman_prios[i]);
819 	kfree(mr_tcam_region->parman_prios);
820 	parman_destroy(mr_tcam_region->parman);
821 	mlxsw_sp_mr_tcam_region_free(mr_tcam_region);
822 }
823 
824 static int mlxsw_sp_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
825 {
826 	struct mlxsw_sp_mr_tcam *mr_tcam = priv;
827 	struct mlxsw_sp_mr_tcam_region *region = &mr_tcam->tcam_regions[0];
828 	u32 rtar_key;
829 	int err;
830 
831 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MC_ERIF_LIST_ENTRIES) ||
832 	    !MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_TCAM_RULES))
833 		return -EIO;
834 
835 	rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV4_MULTICAST;
836 	err = mlxsw_sp_mr_tcam_region_init(mlxsw_sp,
837 					   &region[MLXSW_SP_L3_PROTO_IPV4],
838 					   rtar_key);
839 	if (err)
840 		return err;
841 
842 	rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV6_MULTICAST;
843 	err = mlxsw_sp_mr_tcam_region_init(mlxsw_sp,
844 					   &region[MLXSW_SP_L3_PROTO_IPV6],
845 					   rtar_key);
846 	if (err)
847 		goto err_ipv6_region_init;
848 
849 	return 0;
850 
851 err_ipv6_region_init:
852 	mlxsw_sp_mr_tcam_region_fini(&region[MLXSW_SP_L3_PROTO_IPV4]);
853 	return err;
854 }
855 
856 static void mlxsw_sp_mr_tcam_fini(void *priv)
857 {
858 	struct mlxsw_sp_mr_tcam *mr_tcam = priv;
859 	struct mlxsw_sp_mr_tcam_region *region = &mr_tcam->tcam_regions[0];
860 
861 	mlxsw_sp_mr_tcam_region_fini(&region[MLXSW_SP_L3_PROTO_IPV6]);
862 	mlxsw_sp_mr_tcam_region_fini(&region[MLXSW_SP_L3_PROTO_IPV4]);
863 }
864 
865 const struct mlxsw_sp_mr_ops mlxsw_sp_mr_tcam_ops = {
866 	.priv_size = sizeof(struct mlxsw_sp_mr_tcam),
867 	.route_priv_size = sizeof(struct mlxsw_sp_mr_tcam_route),
868 	.init = mlxsw_sp_mr_tcam_init,
869 	.route_create = mlxsw_sp_mr_tcam_route_create,
870 	.route_update = mlxsw_sp_mr_tcam_route_update,
871 	.route_stats = mlxsw_sp_mr_tcam_route_stats,
872 	.route_action_update = mlxsw_sp_mr_tcam_route_action_update,
873 	.route_min_mtu_update = mlxsw_sp_mr_tcam_route_min_mtu_update,
874 	.route_irif_update = mlxsw_sp_mr_tcam_route_irif_update,
875 	.route_erif_add = mlxsw_sp_mr_tcam_route_erif_add,
876 	.route_erif_del = mlxsw_sp_mr_tcam_route_erif_del,
877 	.route_destroy = mlxsw_sp_mr_tcam_route_destroy,
878 	.fini = mlxsw_sp_mr_tcam_fini,
879 };
880