xref: /linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c (revision 0526b56cbc3c489642bd6a5fe4b718dea7ef0ee8)
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/mutex.h>
5 #include <linux/rhashtable.h>
6 #include <net/ipv6.h>
7 
8 #include "spectrum_mr.h"
9 #include "spectrum_router.h"
10 
11 struct mlxsw_sp_mr {
12 	const struct mlxsw_sp_mr_ops *mr_ops;
13 	void *catchall_route_priv;
14 	struct delayed_work stats_update_dw;
15 	struct list_head table_list;
16 	struct mutex table_list_lock; /* Protects table_list */
17 #define MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL 5000 /* ms */
18 	unsigned long priv[];
19 	/* priv has to be always the last item */
20 };
21 
22 struct mlxsw_sp_mr_vif;
23 struct mlxsw_sp_mr_vif_ops {
24 	bool (*is_regular)(const struct mlxsw_sp_mr_vif *vif);
25 };
26 
27 struct mlxsw_sp_mr_vif {
28 	struct net_device *dev;
29 	const struct mlxsw_sp_rif *rif;
30 	unsigned long vif_flags;
31 
32 	/* A list of route_vif_entry structs that point to routes that the VIF
33 	 * instance is used as one of the egress VIFs
34 	 */
35 	struct list_head route_evif_list;
36 
37 	/* A list of route_vif_entry structs that point to routes that the VIF
38 	 * instance is used as an ingress VIF
39 	 */
40 	struct list_head route_ivif_list;
41 
42 	/* Protocol specific operations for a VIF */
43 	const struct mlxsw_sp_mr_vif_ops *ops;
44 };
45 
46 struct mlxsw_sp_mr_route_vif_entry {
47 	struct list_head vif_node;
48 	struct list_head route_node;
49 	struct mlxsw_sp_mr_vif *mr_vif;
50 	struct mlxsw_sp_mr_route *mr_route;
51 };
52 
53 struct mlxsw_sp_mr_table;
54 struct mlxsw_sp_mr_table_ops {
55 	bool (*is_route_valid)(const struct mlxsw_sp_mr_table *mr_table,
56 			       const struct mr_mfc *mfc);
57 	void (*key_create)(struct mlxsw_sp_mr_table *mr_table,
58 			   struct mlxsw_sp_mr_route_key *key,
59 			   struct mr_mfc *mfc);
60 	bool (*is_route_starg)(const struct mlxsw_sp_mr_table *mr_table,
61 			       const struct mlxsw_sp_mr_route *mr_route);
62 };
63 
64 struct mlxsw_sp_mr_table {
65 	struct list_head node;
66 	enum mlxsw_sp_l3proto proto;
67 	struct mlxsw_sp *mlxsw_sp;
68 	u32 vr_id;
69 	struct mlxsw_sp_mr_vif vifs[MAXVIFS];
70 	struct list_head route_list;
71 	struct mutex route_list_lock; /* Protects route_list */
72 	struct rhashtable route_ht;
73 	const struct mlxsw_sp_mr_table_ops *ops;
74 	char catchall_route_priv[];
75 	/* catchall_route_priv has to be always the last item */
76 };
77 
78 struct mlxsw_sp_mr_route {
79 	struct list_head node;
80 	struct rhash_head ht_node;
81 	struct mlxsw_sp_mr_route_key key;
82 	enum mlxsw_sp_mr_route_action route_action;
83 	u16 min_mtu;
84 	struct mr_mfc *mfc;
85 	void *route_priv;
86 	const struct mlxsw_sp_mr_table *mr_table;
87 	/* A list of route_vif_entry structs that point to the egress VIFs */
88 	struct list_head evif_list;
89 	/* A route_vif_entry struct that point to the ingress VIF */
90 	struct mlxsw_sp_mr_route_vif_entry ivif;
91 };
92 
93 static const struct rhashtable_params mlxsw_sp_mr_route_ht_params = {
94 	.key_len = sizeof(struct mlxsw_sp_mr_route_key),
95 	.key_offset = offsetof(struct mlxsw_sp_mr_route, key),
96 	.head_offset = offsetof(struct mlxsw_sp_mr_route, ht_node),
97 	.automatic_shrinking = true,
98 };
99 
100 static bool mlxsw_sp_mr_vif_valid(const struct mlxsw_sp_mr_vif *vif)
101 {
102 	return vif->ops->is_regular(vif) && vif->dev && vif->rif;
103 }
104 
105 static bool mlxsw_sp_mr_vif_exists(const struct mlxsw_sp_mr_vif *vif)
106 {
107 	return vif->dev;
108 }
109 
110 static bool
111 mlxsw_sp_mr_route_ivif_in_evifs(const struct mlxsw_sp_mr_route *mr_route)
112 {
113 	vifi_t ivif = mr_route->mfc->mfc_parent;
114 
115 	return mr_route->mfc->mfc_un.res.ttls[ivif] != 255;
116 }
117 
118 static int
119 mlxsw_sp_mr_route_valid_evifs_num(const struct mlxsw_sp_mr_route *mr_route)
120 {
121 	struct mlxsw_sp_mr_route_vif_entry *rve;
122 	int valid_evifs;
123 
124 	valid_evifs = 0;
125 	list_for_each_entry(rve, &mr_route->evif_list, route_node)
126 		if (mlxsw_sp_mr_vif_valid(rve->mr_vif))
127 			valid_evifs++;
128 	return valid_evifs;
129 }
130 
131 static enum mlxsw_sp_mr_route_action
132 mlxsw_sp_mr_route_action(const struct mlxsw_sp_mr_route *mr_route)
133 {
134 	struct mlxsw_sp_mr_route_vif_entry *rve;
135 
136 	/* If the ingress port is not regular and resolved, trap the route */
137 	if (!mlxsw_sp_mr_vif_valid(mr_route->ivif.mr_vif))
138 		return MLXSW_SP_MR_ROUTE_ACTION_TRAP;
139 
140 	/* The kernel does not match a (*,G) route that the ingress interface is
141 	 * not one of the egress interfaces, so trap these kind of routes.
142 	 */
143 	if (mr_route->mr_table->ops->is_route_starg(mr_route->mr_table,
144 						    mr_route) &&
145 	    !mlxsw_sp_mr_route_ivif_in_evifs(mr_route))
146 		return MLXSW_SP_MR_ROUTE_ACTION_TRAP;
147 
148 	/* If the route has no valid eVIFs, trap it. */
149 	if (!mlxsw_sp_mr_route_valid_evifs_num(mr_route))
150 		return MLXSW_SP_MR_ROUTE_ACTION_TRAP;
151 
152 	/* If one of the eVIFs has no RIF, trap-and-forward the route as there
153 	 * is some more routing to do in software too.
154 	 */
155 	list_for_each_entry(rve, &mr_route->evif_list, route_node)
156 		if (mlxsw_sp_mr_vif_exists(rve->mr_vif) && !rve->mr_vif->rif)
157 			return MLXSW_SP_MR_ROUTE_ACTION_TRAP_AND_FORWARD;
158 
159 	return MLXSW_SP_MR_ROUTE_ACTION_FORWARD;
160 }
161 
162 static enum mlxsw_sp_mr_route_prio
163 mlxsw_sp_mr_route_prio(const struct mlxsw_sp_mr_route *mr_route)
164 {
165 	return mr_route->mr_table->ops->is_route_starg(mr_route->mr_table,
166 						       mr_route) ?
167 		MLXSW_SP_MR_ROUTE_PRIO_STARG : MLXSW_SP_MR_ROUTE_PRIO_SG;
168 }
169 
170 static int mlxsw_sp_mr_route_evif_link(struct mlxsw_sp_mr_route *mr_route,
171 				       struct mlxsw_sp_mr_vif *mr_vif)
172 {
173 	struct mlxsw_sp_mr_route_vif_entry *rve;
174 
175 	rve = kzalloc(sizeof(*rve), GFP_KERNEL);
176 	if (!rve)
177 		return -ENOMEM;
178 	rve->mr_route = mr_route;
179 	rve->mr_vif = mr_vif;
180 	list_add_tail(&rve->route_node, &mr_route->evif_list);
181 	list_add_tail(&rve->vif_node, &mr_vif->route_evif_list);
182 	return 0;
183 }
184 
185 static void
186 mlxsw_sp_mr_route_evif_unlink(struct mlxsw_sp_mr_route_vif_entry *rve)
187 {
188 	list_del(&rve->route_node);
189 	list_del(&rve->vif_node);
190 	kfree(rve);
191 }
192 
193 static void mlxsw_sp_mr_route_ivif_link(struct mlxsw_sp_mr_route *mr_route,
194 					struct mlxsw_sp_mr_vif *mr_vif)
195 {
196 	mr_route->ivif.mr_route = mr_route;
197 	mr_route->ivif.mr_vif = mr_vif;
198 	list_add_tail(&mr_route->ivif.vif_node, &mr_vif->route_ivif_list);
199 }
200 
201 static void mlxsw_sp_mr_route_ivif_unlink(struct mlxsw_sp_mr_route *mr_route)
202 {
203 	list_del(&mr_route->ivif.vif_node);
204 }
205 
206 static int
207 mlxsw_sp_mr_route_info_create(struct mlxsw_sp_mr_table *mr_table,
208 			      struct mlxsw_sp_mr_route *mr_route,
209 			      struct mlxsw_sp_mr_route_info *route_info)
210 {
211 	struct mlxsw_sp_mr_route_vif_entry *rve;
212 	u16 *erif_indices;
213 	u16 irif_index;
214 	u16 erif = 0;
215 
216 	erif_indices = kmalloc_array(MAXVIFS, sizeof(*erif_indices),
217 				     GFP_KERNEL);
218 	if (!erif_indices)
219 		return -ENOMEM;
220 
221 	list_for_each_entry(rve, &mr_route->evif_list, route_node) {
222 		if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) {
223 			u16 rifi = mlxsw_sp_rif_index(rve->mr_vif->rif);
224 
225 			erif_indices[erif++] = rifi;
226 		}
227 	}
228 
229 	if (mlxsw_sp_mr_vif_valid(mr_route->ivif.mr_vif))
230 		irif_index = mlxsw_sp_rif_index(mr_route->ivif.mr_vif->rif);
231 	else
232 		irif_index = 0;
233 
234 	route_info->irif_index = irif_index;
235 	route_info->erif_indices = erif_indices;
236 	route_info->min_mtu = mr_route->min_mtu;
237 	route_info->route_action = mr_route->route_action;
238 	route_info->erif_num = erif;
239 	return 0;
240 }
241 
242 static void
243 mlxsw_sp_mr_route_info_destroy(struct mlxsw_sp_mr_route_info *route_info)
244 {
245 	kfree(route_info->erif_indices);
246 }
247 
248 static int mlxsw_sp_mr_route_write(struct mlxsw_sp_mr_table *mr_table,
249 				   struct mlxsw_sp_mr_route *mr_route,
250 				   bool replace)
251 {
252 	struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
253 	struct mlxsw_sp_mr_route_info route_info;
254 	struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
255 	int err;
256 
257 	err = mlxsw_sp_mr_route_info_create(mr_table, mr_route, &route_info);
258 	if (err)
259 		return err;
260 
261 	if (!replace) {
262 		struct mlxsw_sp_mr_route_params route_params;
263 
264 		mr_route->route_priv = kzalloc(mr->mr_ops->route_priv_size,
265 					       GFP_KERNEL);
266 		if (!mr_route->route_priv) {
267 			err = -ENOMEM;
268 			goto out;
269 		}
270 
271 		route_params.key = mr_route->key;
272 		route_params.value = route_info;
273 		route_params.prio = mlxsw_sp_mr_route_prio(mr_route);
274 		err = mr->mr_ops->route_create(mlxsw_sp, mr->priv,
275 					       mr_route->route_priv,
276 					       &route_params);
277 		if (err)
278 			kfree(mr_route->route_priv);
279 	} else {
280 		err = mr->mr_ops->route_update(mlxsw_sp, mr_route->route_priv,
281 					       &route_info);
282 	}
283 out:
284 	mlxsw_sp_mr_route_info_destroy(&route_info);
285 	return err;
286 }
287 
288 static void mlxsw_sp_mr_route_erase(struct mlxsw_sp_mr_table *mr_table,
289 				    struct mlxsw_sp_mr_route *mr_route)
290 {
291 	struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
292 	struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
293 
294 	mr->mr_ops->route_destroy(mlxsw_sp, mr->priv, mr_route->route_priv);
295 	kfree(mr_route->route_priv);
296 }
297 
298 static struct mlxsw_sp_mr_route *
299 mlxsw_sp_mr_route_create(struct mlxsw_sp_mr_table *mr_table,
300 			 struct mr_mfc *mfc)
301 {
302 	struct mlxsw_sp_mr_route_vif_entry *rve, *tmp;
303 	struct mlxsw_sp_mr_route *mr_route;
304 	int err = 0;
305 	int i;
306 
307 	/* Allocate and init a new route and fill it with parameters */
308 	mr_route = kzalloc(sizeof(*mr_route), GFP_KERNEL);
309 	if (!mr_route)
310 		return ERR_PTR(-ENOMEM);
311 	INIT_LIST_HEAD(&mr_route->evif_list);
312 
313 	/* Find min_mtu and link iVIF and eVIFs */
314 	mr_route->min_mtu = ETH_MAX_MTU;
315 	mr_cache_hold(mfc);
316 	mr_route->mfc = mfc;
317 	mr_table->ops->key_create(mr_table, &mr_route->key, mr_route->mfc);
318 
319 	mr_route->mr_table = mr_table;
320 	for (i = 0; i < MAXVIFS; i++) {
321 		if (mfc->mfc_un.res.ttls[i] != 255) {
322 			err = mlxsw_sp_mr_route_evif_link(mr_route,
323 							  &mr_table->vifs[i]);
324 			if (err)
325 				goto err;
326 			if (mr_table->vifs[i].dev &&
327 			    mr_table->vifs[i].dev->mtu < mr_route->min_mtu)
328 				mr_route->min_mtu = mr_table->vifs[i].dev->mtu;
329 		}
330 	}
331 	mlxsw_sp_mr_route_ivif_link(mr_route,
332 				    &mr_table->vifs[mfc->mfc_parent]);
333 
334 	mr_route->route_action = mlxsw_sp_mr_route_action(mr_route);
335 	return mr_route;
336 err:
337 	mr_cache_put(mfc);
338 	list_for_each_entry_safe(rve, tmp, &mr_route->evif_list, route_node)
339 		mlxsw_sp_mr_route_evif_unlink(rve);
340 	kfree(mr_route);
341 	return ERR_PTR(err);
342 }
343 
344 static void mlxsw_sp_mr_route_destroy(struct mlxsw_sp_mr_table *mr_table,
345 				      struct mlxsw_sp_mr_route *mr_route)
346 {
347 	struct mlxsw_sp_mr_route_vif_entry *rve, *tmp;
348 
349 	mlxsw_sp_mr_route_ivif_unlink(mr_route);
350 	mr_cache_put(mr_route->mfc);
351 	list_for_each_entry_safe(rve, tmp, &mr_route->evif_list, route_node)
352 		mlxsw_sp_mr_route_evif_unlink(rve);
353 	kfree(mr_route);
354 }
355 
356 static void mlxsw_sp_mr_mfc_offload_set(struct mlxsw_sp_mr_route *mr_route,
357 					bool offload)
358 {
359 	if (offload)
360 		mr_route->mfc->mfc_flags |= MFC_OFFLOAD;
361 	else
362 		mr_route->mfc->mfc_flags &= ~MFC_OFFLOAD;
363 }
364 
365 static void mlxsw_sp_mr_mfc_offload_update(struct mlxsw_sp_mr_route *mr_route)
366 {
367 	bool offload;
368 
369 	offload = mr_route->route_action != MLXSW_SP_MR_ROUTE_ACTION_TRAP;
370 	mlxsw_sp_mr_mfc_offload_set(mr_route, offload);
371 }
372 
373 static void __mlxsw_sp_mr_route_del(struct mlxsw_sp_mr_table *mr_table,
374 				    struct mlxsw_sp_mr_route *mr_route)
375 {
376 	WARN_ON_ONCE(!mutex_is_locked(&mr_table->route_list_lock));
377 
378 	mlxsw_sp_mr_mfc_offload_set(mr_route, false);
379 	rhashtable_remove_fast(&mr_table->route_ht, &mr_route->ht_node,
380 			       mlxsw_sp_mr_route_ht_params);
381 	list_del(&mr_route->node);
382 	mlxsw_sp_mr_route_erase(mr_table, mr_route);
383 	mlxsw_sp_mr_route_destroy(mr_table, mr_route);
384 }
385 
386 int mlxsw_sp_mr_route_add(struct mlxsw_sp_mr_table *mr_table,
387 			  struct mr_mfc *mfc, bool replace)
388 {
389 	struct mlxsw_sp_mr_route *mr_orig_route = NULL;
390 	struct mlxsw_sp_mr_route *mr_route;
391 	int err;
392 
393 	if (!mr_table->ops->is_route_valid(mr_table, mfc))
394 		return -EINVAL;
395 
396 	/* Create a new route */
397 	mr_route = mlxsw_sp_mr_route_create(mr_table, mfc);
398 	if (IS_ERR(mr_route))
399 		return PTR_ERR(mr_route);
400 
401 	/* Find any route with a matching key */
402 	mr_orig_route = rhashtable_lookup_fast(&mr_table->route_ht,
403 					       &mr_route->key,
404 					       mlxsw_sp_mr_route_ht_params);
405 	if (replace) {
406 		/* On replace case, make the route point to the new route_priv.
407 		 */
408 		if (WARN_ON(!mr_orig_route)) {
409 			err = -ENOENT;
410 			goto err_no_orig_route;
411 		}
412 		mr_route->route_priv = mr_orig_route->route_priv;
413 	} else if (mr_orig_route) {
414 		/* On non replace case, if another route with the same key was
415 		 * found, abort, as duplicate routes are used for proxy routes.
416 		 */
417 		dev_warn(mr_table->mlxsw_sp->bus_info->dev,
418 			 "Offloading proxy routes is not supported.\n");
419 		err = -EINVAL;
420 		goto err_duplicate_route;
421 	}
422 
423 	/* Write the route to the hardware */
424 	err = mlxsw_sp_mr_route_write(mr_table, mr_route, replace);
425 	if (err)
426 		goto err_mr_route_write;
427 
428 	/* Put it in the table data-structures */
429 	mutex_lock(&mr_table->route_list_lock);
430 	list_add_tail(&mr_route->node, &mr_table->route_list);
431 	mutex_unlock(&mr_table->route_list_lock);
432 	err = rhashtable_insert_fast(&mr_table->route_ht,
433 				     &mr_route->ht_node,
434 				     mlxsw_sp_mr_route_ht_params);
435 	if (err)
436 		goto err_rhashtable_insert;
437 
438 	/* Destroy the original route */
439 	if (replace) {
440 		rhashtable_remove_fast(&mr_table->route_ht,
441 				       &mr_orig_route->ht_node,
442 				       mlxsw_sp_mr_route_ht_params);
443 		list_del(&mr_orig_route->node);
444 		mlxsw_sp_mr_route_destroy(mr_table, mr_orig_route);
445 	}
446 
447 	mlxsw_sp_mr_mfc_offload_update(mr_route);
448 	return 0;
449 
450 err_rhashtable_insert:
451 	mutex_lock(&mr_table->route_list_lock);
452 	list_del(&mr_route->node);
453 	mutex_unlock(&mr_table->route_list_lock);
454 	mlxsw_sp_mr_route_erase(mr_table, mr_route);
455 err_mr_route_write:
456 err_no_orig_route:
457 err_duplicate_route:
458 	mlxsw_sp_mr_route_destroy(mr_table, mr_route);
459 	return err;
460 }
461 
462 void mlxsw_sp_mr_route_del(struct mlxsw_sp_mr_table *mr_table,
463 			   struct mr_mfc *mfc)
464 {
465 	struct mlxsw_sp_mr_route *mr_route;
466 	struct mlxsw_sp_mr_route_key key;
467 
468 	mr_table->ops->key_create(mr_table, &key, mfc);
469 	mr_route = rhashtable_lookup_fast(&mr_table->route_ht, &key,
470 					  mlxsw_sp_mr_route_ht_params);
471 	if (mr_route) {
472 		mutex_lock(&mr_table->route_list_lock);
473 		__mlxsw_sp_mr_route_del(mr_table, mr_route);
474 		mutex_unlock(&mr_table->route_list_lock);
475 	}
476 }
477 
478 /* Should be called after the VIF struct is updated */
479 static int
480 mlxsw_sp_mr_route_ivif_resolve(struct mlxsw_sp_mr_table *mr_table,
481 			       struct mlxsw_sp_mr_route_vif_entry *rve)
482 {
483 	struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
484 	enum mlxsw_sp_mr_route_action route_action;
485 	struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
486 	u16 irif_index;
487 	int err;
488 
489 	route_action = mlxsw_sp_mr_route_action(rve->mr_route);
490 	if (route_action == MLXSW_SP_MR_ROUTE_ACTION_TRAP)
491 		return 0;
492 
493 	/* rve->mr_vif->rif is guaranteed to be valid at this stage */
494 	irif_index = mlxsw_sp_rif_index(rve->mr_vif->rif);
495 	err = mr->mr_ops->route_irif_update(mlxsw_sp, rve->mr_route->route_priv,
496 					    irif_index);
497 	if (err)
498 		return err;
499 
500 	err = mr->mr_ops->route_action_update(mlxsw_sp,
501 					      rve->mr_route->route_priv,
502 					      route_action);
503 	if (err)
504 		/* No need to rollback here because the iRIF change only takes
505 		 * place after the action has been updated.
506 		 */
507 		return err;
508 
509 	rve->mr_route->route_action = route_action;
510 	mlxsw_sp_mr_mfc_offload_update(rve->mr_route);
511 	return 0;
512 }
513 
514 static void
515 mlxsw_sp_mr_route_ivif_unresolve(struct mlxsw_sp_mr_table *mr_table,
516 				 struct mlxsw_sp_mr_route_vif_entry *rve)
517 {
518 	struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
519 	struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
520 
521 	mr->mr_ops->route_action_update(mlxsw_sp, rve->mr_route->route_priv,
522 					MLXSW_SP_MR_ROUTE_ACTION_TRAP);
523 	rve->mr_route->route_action = MLXSW_SP_MR_ROUTE_ACTION_TRAP;
524 	mlxsw_sp_mr_mfc_offload_update(rve->mr_route);
525 }
526 
527 /* Should be called after the RIF struct is updated */
528 static int
529 mlxsw_sp_mr_route_evif_resolve(struct mlxsw_sp_mr_table *mr_table,
530 			       struct mlxsw_sp_mr_route_vif_entry *rve)
531 {
532 	struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
533 	enum mlxsw_sp_mr_route_action route_action;
534 	struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
535 	u16 erif_index = 0;
536 	int err;
537 
538 	/* Add the eRIF */
539 	if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) {
540 		erif_index = mlxsw_sp_rif_index(rve->mr_vif->rif);
541 		err = mr->mr_ops->route_erif_add(mlxsw_sp,
542 						 rve->mr_route->route_priv,
543 						 erif_index);
544 		if (err)
545 			return err;
546 	}
547 
548 	/* Update the route action, as the new eVIF can be a tunnel or a pimreg
549 	 * device which will require updating the action.
550 	 */
551 	route_action = mlxsw_sp_mr_route_action(rve->mr_route);
552 	if (route_action != rve->mr_route->route_action) {
553 		err = mr->mr_ops->route_action_update(mlxsw_sp,
554 						      rve->mr_route->route_priv,
555 						      route_action);
556 		if (err)
557 			goto err_route_action_update;
558 	}
559 
560 	/* Update the minimum MTU */
561 	if (rve->mr_vif->dev->mtu < rve->mr_route->min_mtu) {
562 		rve->mr_route->min_mtu = rve->mr_vif->dev->mtu;
563 		err = mr->mr_ops->route_min_mtu_update(mlxsw_sp,
564 						       rve->mr_route->route_priv,
565 						       rve->mr_route->min_mtu);
566 		if (err)
567 			goto err_route_min_mtu_update;
568 	}
569 
570 	rve->mr_route->route_action = route_action;
571 	mlxsw_sp_mr_mfc_offload_update(rve->mr_route);
572 	return 0;
573 
574 err_route_min_mtu_update:
575 	if (route_action != rve->mr_route->route_action)
576 		mr->mr_ops->route_action_update(mlxsw_sp,
577 						rve->mr_route->route_priv,
578 						rve->mr_route->route_action);
579 err_route_action_update:
580 	if (mlxsw_sp_mr_vif_valid(rve->mr_vif))
581 		mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv,
582 					   erif_index);
583 	return err;
584 }
585 
586 /* Should be called before the RIF struct is updated */
587 static void
588 mlxsw_sp_mr_route_evif_unresolve(struct mlxsw_sp_mr_table *mr_table,
589 				 struct mlxsw_sp_mr_route_vif_entry *rve)
590 {
591 	struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
592 	enum mlxsw_sp_mr_route_action route_action;
593 	struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
594 	u16 rifi;
595 
596 	/* If the unresolved RIF was not valid, no need to delete it */
597 	if (!mlxsw_sp_mr_vif_valid(rve->mr_vif))
598 		return;
599 
600 	/* Update the route action: if there is only one valid eVIF in the
601 	 * route, set the action to trap as the VIF deletion will lead to zero
602 	 * valid eVIFs. On any other case, use the mlxsw_sp_mr_route_action to
603 	 * determine the route action.
604 	 */
605 	if (mlxsw_sp_mr_route_valid_evifs_num(rve->mr_route) == 1)
606 		route_action = MLXSW_SP_MR_ROUTE_ACTION_TRAP;
607 	else
608 		route_action = mlxsw_sp_mr_route_action(rve->mr_route);
609 	if (route_action != rve->mr_route->route_action)
610 		mr->mr_ops->route_action_update(mlxsw_sp,
611 						rve->mr_route->route_priv,
612 						route_action);
613 
614 	/* Delete the erif from the route */
615 	rifi = mlxsw_sp_rif_index(rve->mr_vif->rif);
616 	mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv, rifi);
617 	rve->mr_route->route_action = route_action;
618 	mlxsw_sp_mr_mfc_offload_update(rve->mr_route);
619 }
620 
621 static int mlxsw_sp_mr_vif_resolve(struct mlxsw_sp_mr_table *mr_table,
622 				   struct net_device *dev,
623 				   struct mlxsw_sp_mr_vif *mr_vif,
624 				   unsigned long vif_flags,
625 				   const struct mlxsw_sp_rif *rif)
626 {
627 	struct mlxsw_sp_mr_route_vif_entry *irve, *erve;
628 	int err;
629 
630 	/* Update the VIF */
631 	mr_vif->dev = dev;
632 	mr_vif->rif = rif;
633 	mr_vif->vif_flags = vif_flags;
634 
635 	/* Update all routes where this VIF is used as an unresolved iRIF */
636 	list_for_each_entry(irve, &mr_vif->route_ivif_list, vif_node) {
637 		err = mlxsw_sp_mr_route_ivif_resolve(mr_table, irve);
638 		if (err)
639 			goto err_irif_unresolve;
640 	}
641 
642 	/* Update all routes where this VIF is used as an unresolved eRIF */
643 	list_for_each_entry(erve, &mr_vif->route_evif_list, vif_node) {
644 		err = mlxsw_sp_mr_route_evif_resolve(mr_table, erve);
645 		if (err)
646 			goto err_erif_unresolve;
647 	}
648 	return 0;
649 
650 err_erif_unresolve:
651 	list_for_each_entry_continue_reverse(erve, &mr_vif->route_evif_list,
652 					     vif_node)
653 		mlxsw_sp_mr_route_evif_unresolve(mr_table, erve);
654 err_irif_unresolve:
655 	list_for_each_entry_continue_reverse(irve, &mr_vif->route_ivif_list,
656 					     vif_node)
657 		mlxsw_sp_mr_route_ivif_unresolve(mr_table, irve);
658 	mr_vif->rif = NULL;
659 	return err;
660 }
661 
662 static void mlxsw_sp_mr_vif_unresolve(struct mlxsw_sp_mr_table *mr_table,
663 				      struct net_device *dev,
664 				      struct mlxsw_sp_mr_vif *mr_vif)
665 {
666 	struct mlxsw_sp_mr_route_vif_entry *rve;
667 
668 	/* Update all routes where this VIF is used as an unresolved eRIF */
669 	list_for_each_entry(rve, &mr_vif->route_evif_list, vif_node)
670 		mlxsw_sp_mr_route_evif_unresolve(mr_table, rve);
671 
672 	/* Update all routes where this VIF is used as an unresolved iRIF */
673 	list_for_each_entry(rve, &mr_vif->route_ivif_list, vif_node)
674 		mlxsw_sp_mr_route_ivif_unresolve(mr_table, rve);
675 
676 	/* Update the VIF */
677 	mr_vif->dev = dev;
678 	mr_vif->rif = NULL;
679 }
680 
681 int mlxsw_sp_mr_vif_add(struct mlxsw_sp_mr_table *mr_table,
682 			struct net_device *dev, vifi_t vif_index,
683 			unsigned long vif_flags, const struct mlxsw_sp_rif *rif)
684 {
685 	struct mlxsw_sp_mr_vif *mr_vif = &mr_table->vifs[vif_index];
686 
687 	if (WARN_ON(vif_index >= MAXVIFS))
688 		return -EINVAL;
689 	if (mr_vif->dev)
690 		return -EEXIST;
691 	return mlxsw_sp_mr_vif_resolve(mr_table, dev, mr_vif, vif_flags, rif);
692 }
693 
694 void mlxsw_sp_mr_vif_del(struct mlxsw_sp_mr_table *mr_table, vifi_t vif_index)
695 {
696 	struct mlxsw_sp_mr_vif *mr_vif = &mr_table->vifs[vif_index];
697 
698 	if (WARN_ON(vif_index >= MAXVIFS))
699 		return;
700 	if (WARN_ON(!mr_vif->dev))
701 		return;
702 	mlxsw_sp_mr_vif_unresolve(mr_table, NULL, mr_vif);
703 }
704 
705 static struct mlxsw_sp_mr_vif *
706 mlxsw_sp_mr_dev_vif_lookup(struct mlxsw_sp_mr_table *mr_table,
707 			   const struct net_device *dev)
708 {
709 	vifi_t vif_index;
710 
711 	for (vif_index = 0; vif_index < MAXVIFS; vif_index++)
712 		if (mr_table->vifs[vif_index].dev == dev)
713 			return &mr_table->vifs[vif_index];
714 	return NULL;
715 }
716 
717 int mlxsw_sp_mr_rif_add(struct mlxsw_sp_mr_table *mr_table,
718 			const struct mlxsw_sp_rif *rif)
719 {
720 	const struct net_device *rif_dev = mlxsw_sp_rif_dev(rif);
721 	struct mlxsw_sp_mr_vif *mr_vif;
722 
723 	if (!rif_dev)
724 		return 0;
725 
726 	mr_vif = mlxsw_sp_mr_dev_vif_lookup(mr_table, rif_dev);
727 	if (!mr_vif)
728 		return 0;
729 	return mlxsw_sp_mr_vif_resolve(mr_table, mr_vif->dev, mr_vif,
730 				       mr_vif->vif_flags, rif);
731 }
732 
733 void mlxsw_sp_mr_rif_del(struct mlxsw_sp_mr_table *mr_table,
734 			 const struct mlxsw_sp_rif *rif)
735 {
736 	const struct net_device *rif_dev = mlxsw_sp_rif_dev(rif);
737 	struct mlxsw_sp_mr_vif *mr_vif;
738 
739 	if (!rif_dev)
740 		return;
741 
742 	mr_vif = mlxsw_sp_mr_dev_vif_lookup(mr_table, rif_dev);
743 	if (!mr_vif)
744 		return;
745 	mlxsw_sp_mr_vif_unresolve(mr_table, mr_vif->dev, mr_vif);
746 }
747 
748 void mlxsw_sp_mr_rif_mtu_update(struct mlxsw_sp_mr_table *mr_table,
749 				const struct mlxsw_sp_rif *rif, int mtu)
750 {
751 	const struct net_device *rif_dev = mlxsw_sp_rif_dev(rif);
752 	struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
753 	struct mlxsw_sp_mr_route_vif_entry *rve;
754 	struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
755 	struct mlxsw_sp_mr_vif *mr_vif;
756 
757 	if (!rif_dev)
758 		return;
759 
760 	/* Search for a VIF that use that RIF */
761 	mr_vif = mlxsw_sp_mr_dev_vif_lookup(mr_table, rif_dev);
762 	if (!mr_vif)
763 		return;
764 
765 	/* Update all the routes that uses that VIF as eVIF */
766 	list_for_each_entry(rve, &mr_vif->route_evif_list, vif_node) {
767 		if (mtu < rve->mr_route->min_mtu) {
768 			rve->mr_route->min_mtu = mtu;
769 			mr->mr_ops->route_min_mtu_update(mlxsw_sp,
770 							 rve->mr_route->route_priv,
771 							 mtu);
772 		}
773 	}
774 }
775 
776 /* Protocol specific functions */
777 static bool
778 mlxsw_sp_mr_route4_validate(const struct mlxsw_sp_mr_table *mr_table,
779 			    const struct mr_mfc *c)
780 {
781 	struct mfc_cache *mfc = (struct mfc_cache *) c;
782 
783 	/* If the route is a (*,*) route, abort, as these kind of routes are
784 	 * used for proxy routes.
785 	 */
786 	if (mfc->mfc_origin == htonl(INADDR_ANY) &&
787 	    mfc->mfc_mcastgrp == htonl(INADDR_ANY)) {
788 		dev_warn(mr_table->mlxsw_sp->bus_info->dev,
789 			 "Offloading proxy routes is not supported.\n");
790 		return false;
791 	}
792 	return true;
793 }
794 
795 static void mlxsw_sp_mr_route4_key(struct mlxsw_sp_mr_table *mr_table,
796 				   struct mlxsw_sp_mr_route_key *key,
797 				   struct mr_mfc *c)
798 {
799 	const struct mfc_cache *mfc = (struct mfc_cache *) c;
800 	bool starg;
801 
802 	starg = (mfc->mfc_origin == htonl(INADDR_ANY));
803 
804 	memset(key, 0, sizeof(*key));
805 	key->vrid = mr_table->vr_id;
806 	key->proto = MLXSW_SP_L3_PROTO_IPV4;
807 	key->group.addr4 = mfc->mfc_mcastgrp;
808 	key->group_mask.addr4 = htonl(0xffffffff);
809 	key->source.addr4 = mfc->mfc_origin;
810 	key->source_mask.addr4 = htonl(starg ? 0 : 0xffffffff);
811 }
812 
813 static bool mlxsw_sp_mr_route4_starg(const struct mlxsw_sp_mr_table *mr_table,
814 				     const struct mlxsw_sp_mr_route *mr_route)
815 {
816 	return mr_route->key.source_mask.addr4 == htonl(INADDR_ANY);
817 }
818 
819 static bool mlxsw_sp_mr_vif4_is_regular(const struct mlxsw_sp_mr_vif *vif)
820 {
821 	return !(vif->vif_flags & (VIFF_TUNNEL | VIFF_REGISTER));
822 }
823 
824 static bool
825 mlxsw_sp_mr_route6_validate(const struct mlxsw_sp_mr_table *mr_table,
826 			    const struct mr_mfc *c)
827 {
828 	struct mfc6_cache *mfc = (struct mfc6_cache *) c;
829 
830 	/* If the route is a (*,*) route, abort, as these kind of routes are
831 	 * used for proxy routes.
832 	 */
833 	if (ipv6_addr_any(&mfc->mf6c_origin) &&
834 	    ipv6_addr_any(&mfc->mf6c_mcastgrp)) {
835 		dev_warn(mr_table->mlxsw_sp->bus_info->dev,
836 			 "Offloading proxy routes is not supported.\n");
837 		return false;
838 	}
839 	return true;
840 }
841 
842 static void mlxsw_sp_mr_route6_key(struct mlxsw_sp_mr_table *mr_table,
843 				   struct mlxsw_sp_mr_route_key *key,
844 				   struct mr_mfc *c)
845 {
846 	const struct mfc6_cache *mfc = (struct mfc6_cache *) c;
847 
848 	memset(key, 0, sizeof(*key));
849 	key->vrid = mr_table->vr_id;
850 	key->proto = MLXSW_SP_L3_PROTO_IPV6;
851 	key->group.addr6 = mfc->mf6c_mcastgrp;
852 	memset(&key->group_mask.addr6, 0xff, sizeof(key->group_mask.addr6));
853 	key->source.addr6 = mfc->mf6c_origin;
854 	if (!ipv6_addr_any(&mfc->mf6c_origin))
855 		memset(&key->source_mask.addr6, 0xff,
856 		       sizeof(key->source_mask.addr6));
857 }
858 
859 static bool mlxsw_sp_mr_route6_starg(const struct mlxsw_sp_mr_table *mr_table,
860 				     const struct mlxsw_sp_mr_route *mr_route)
861 {
862 	return ipv6_addr_any(&mr_route->key.source_mask.addr6);
863 }
864 
865 static bool mlxsw_sp_mr_vif6_is_regular(const struct mlxsw_sp_mr_vif *vif)
866 {
867 	return !(vif->vif_flags & MIFF_REGISTER);
868 }
869 
870 static struct
871 mlxsw_sp_mr_vif_ops mlxsw_sp_mr_vif_ops_arr[] = {
872 	{
873 		.is_regular = mlxsw_sp_mr_vif4_is_regular,
874 	},
875 	{
876 		.is_regular = mlxsw_sp_mr_vif6_is_regular,
877 	},
878 };
879 
880 static struct
881 mlxsw_sp_mr_table_ops mlxsw_sp_mr_table_ops_arr[] = {
882 	{
883 		.is_route_valid = mlxsw_sp_mr_route4_validate,
884 		.key_create = mlxsw_sp_mr_route4_key,
885 		.is_route_starg = mlxsw_sp_mr_route4_starg,
886 	},
887 	{
888 		.is_route_valid = mlxsw_sp_mr_route6_validate,
889 		.key_create = mlxsw_sp_mr_route6_key,
890 		.is_route_starg = mlxsw_sp_mr_route6_starg,
891 	},
892 
893 };
894 
895 struct mlxsw_sp_mr_table *mlxsw_sp_mr_table_create(struct mlxsw_sp *mlxsw_sp,
896 						   u32 vr_id,
897 						   enum mlxsw_sp_l3proto proto)
898 {
899 	struct mlxsw_sp_mr_route_params catchall_route_params = {
900 		.prio = MLXSW_SP_MR_ROUTE_PRIO_CATCHALL,
901 		.key = {
902 			.vrid = vr_id,
903 			.proto = proto,
904 		},
905 		.value = {
906 			.route_action = MLXSW_SP_MR_ROUTE_ACTION_TRAP,
907 		}
908 	};
909 	struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
910 	struct mlxsw_sp_mr_table *mr_table;
911 	int err;
912 	int i;
913 
914 	mr_table = kzalloc(sizeof(*mr_table) + mr->mr_ops->route_priv_size,
915 			   GFP_KERNEL);
916 	if (!mr_table)
917 		return ERR_PTR(-ENOMEM);
918 
919 	mr_table->vr_id = vr_id;
920 	mr_table->mlxsw_sp = mlxsw_sp;
921 	mr_table->proto = proto;
922 	mr_table->ops = &mlxsw_sp_mr_table_ops_arr[proto];
923 	INIT_LIST_HEAD(&mr_table->route_list);
924 	mutex_init(&mr_table->route_list_lock);
925 
926 	err = rhashtable_init(&mr_table->route_ht,
927 			      &mlxsw_sp_mr_route_ht_params);
928 	if (err)
929 		goto err_route_rhashtable_init;
930 
931 	for (i = 0; i < MAXVIFS; i++) {
932 		INIT_LIST_HEAD(&mr_table->vifs[i].route_evif_list);
933 		INIT_LIST_HEAD(&mr_table->vifs[i].route_ivif_list);
934 		mr_table->vifs[i].ops = &mlxsw_sp_mr_vif_ops_arr[proto];
935 	}
936 
937 	err = mr->mr_ops->route_create(mlxsw_sp, mr->priv,
938 				       mr_table->catchall_route_priv,
939 				       &catchall_route_params);
940 	if (err)
941 		goto err_ops_route_create;
942 	mutex_lock(&mr->table_list_lock);
943 	list_add_tail(&mr_table->node, &mr->table_list);
944 	mutex_unlock(&mr->table_list_lock);
945 	return mr_table;
946 
947 err_ops_route_create:
948 	rhashtable_destroy(&mr_table->route_ht);
949 err_route_rhashtable_init:
950 	mutex_destroy(&mr_table->route_list_lock);
951 	kfree(mr_table);
952 	return ERR_PTR(err);
953 }
954 
955 void mlxsw_sp_mr_table_destroy(struct mlxsw_sp_mr_table *mr_table)
956 {
957 	struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
958 	struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
959 
960 	WARN_ON(!mlxsw_sp_mr_table_empty(mr_table));
961 	mutex_lock(&mr->table_list_lock);
962 	list_del(&mr_table->node);
963 	mutex_unlock(&mr->table_list_lock);
964 	mr->mr_ops->route_destroy(mlxsw_sp, mr->priv,
965 				  &mr_table->catchall_route_priv);
966 	rhashtable_destroy(&mr_table->route_ht);
967 	mutex_destroy(&mr_table->route_list_lock);
968 	kfree(mr_table);
969 }
970 
971 void mlxsw_sp_mr_table_flush(struct mlxsw_sp_mr_table *mr_table)
972 {
973 	struct mlxsw_sp_mr_route *mr_route, *tmp;
974 	int i;
975 
976 	mutex_lock(&mr_table->route_list_lock);
977 	list_for_each_entry_safe(mr_route, tmp, &mr_table->route_list, node)
978 		__mlxsw_sp_mr_route_del(mr_table, mr_route);
979 	mutex_unlock(&mr_table->route_list_lock);
980 
981 	for (i = 0; i < MAXVIFS; i++) {
982 		mr_table->vifs[i].dev = NULL;
983 		mr_table->vifs[i].rif = NULL;
984 	}
985 }
986 
987 bool mlxsw_sp_mr_table_empty(const struct mlxsw_sp_mr_table *mr_table)
988 {
989 	int i;
990 
991 	for (i = 0; i < MAXVIFS; i++)
992 		if (mr_table->vifs[i].dev)
993 			return false;
994 	return list_empty(&mr_table->route_list);
995 }
996 
997 static void mlxsw_sp_mr_route_stats_update(struct mlxsw_sp *mlxsw_sp,
998 					   struct mlxsw_sp_mr_route *mr_route)
999 {
1000 	struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
1001 	u64 packets, bytes;
1002 
1003 	if (mr_route->route_action == MLXSW_SP_MR_ROUTE_ACTION_TRAP)
1004 		return;
1005 
1006 	mr->mr_ops->route_stats(mlxsw_sp, mr_route->route_priv, &packets,
1007 				&bytes);
1008 
1009 	if (mr_route->mfc->mfc_un.res.pkt != packets)
1010 		mr_route->mfc->mfc_un.res.lastuse = jiffies;
1011 	mr_route->mfc->mfc_un.res.pkt = packets;
1012 	mr_route->mfc->mfc_un.res.bytes = bytes;
1013 }
1014 
1015 static void mlxsw_sp_mr_stats_update(struct work_struct *work)
1016 {
1017 	struct mlxsw_sp_mr *mr = container_of(work, struct mlxsw_sp_mr,
1018 					      stats_update_dw.work);
1019 	struct mlxsw_sp_mr_table *mr_table;
1020 	struct mlxsw_sp_mr_route *mr_route;
1021 	unsigned long interval;
1022 
1023 	mutex_lock(&mr->table_list_lock);
1024 	list_for_each_entry(mr_table, &mr->table_list, node) {
1025 		mutex_lock(&mr_table->route_list_lock);
1026 		list_for_each_entry(mr_route, &mr_table->route_list, node)
1027 			mlxsw_sp_mr_route_stats_update(mr_table->mlxsw_sp,
1028 						       mr_route);
1029 		mutex_unlock(&mr_table->route_list_lock);
1030 	}
1031 	mutex_unlock(&mr->table_list_lock);
1032 
1033 	interval = msecs_to_jiffies(MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL);
1034 	mlxsw_core_schedule_dw(&mr->stats_update_dw, interval);
1035 }
1036 
1037 int mlxsw_sp_mr_init(struct mlxsw_sp *mlxsw_sp,
1038 		     const struct mlxsw_sp_mr_ops *mr_ops)
1039 {
1040 	struct mlxsw_sp_mr *mr;
1041 	unsigned long interval;
1042 	int err;
1043 
1044 	mr = kzalloc(sizeof(*mr) + mr_ops->priv_size, GFP_KERNEL);
1045 	if (!mr)
1046 		return -ENOMEM;
1047 	mr->mr_ops = mr_ops;
1048 	mlxsw_sp->mr = mr;
1049 	INIT_LIST_HEAD(&mr->table_list);
1050 	mutex_init(&mr->table_list_lock);
1051 
1052 	err = mr_ops->init(mlxsw_sp, mr->priv);
1053 	if (err)
1054 		goto err;
1055 
1056 	/* Create the delayed work for counter updates */
1057 	INIT_DELAYED_WORK(&mr->stats_update_dw, mlxsw_sp_mr_stats_update);
1058 	interval = msecs_to_jiffies(MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL);
1059 	mlxsw_core_schedule_dw(&mr->stats_update_dw, interval);
1060 	return 0;
1061 err:
1062 	mutex_destroy(&mr->table_list_lock);
1063 	kfree(mr);
1064 	return err;
1065 }
1066 
1067 void mlxsw_sp_mr_fini(struct mlxsw_sp *mlxsw_sp)
1068 {
1069 	struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
1070 
1071 	cancel_delayed_work_sync(&mr->stats_update_dw);
1072 	mr->mr_ops->fini(mlxsw_sp, mr->priv);
1073 	mutex_destroy(&mr->table_list_lock);
1074 	kfree(mr);
1075 }
1076