xref: /linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c (revision 8f109e91b852f159b917f5c565bcf43c26d974e2)
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/rhashtable.h>
7 #include <linux/bitops.h>
8 #include <linux/in6.h>
9 #include <linux/notifier.h>
10 #include <linux/inetdevice.h>
11 #include <linux/netdevice.h>
12 #include <linux/if_bridge.h>
13 #include <linux/socket.h>
14 #include <linux/route.h>
15 #include <linux/gcd.h>
16 #include <linux/if_macvlan.h>
17 #include <linux/refcount.h>
18 #include <linux/jhash.h>
19 #include <linux/net_namespace.h>
20 #include <linux/mutex.h>
21 #include <linux/genalloc.h>
22 #include <net/netevent.h>
23 #include <net/neighbour.h>
24 #include <net/arp.h>
25 #include <net/inet_dscp.h>
26 #include <net/ip_fib.h>
27 #include <net/ip6_fib.h>
28 #include <net/nexthop.h>
29 #include <net/fib_rules.h>
30 #include <net/ip_tunnels.h>
31 #include <net/l3mdev.h>
32 #include <net/addrconf.h>
33 #include <net/ndisc.h>
34 #include <net/ipv6.h>
35 #include <net/fib_notifier.h>
36 #include <net/switchdev.h>
37 
38 #include "spectrum.h"
39 #include "core.h"
40 #include "reg.h"
41 #include "spectrum_cnt.h"
42 #include "spectrum_dpipe.h"
43 #include "spectrum_ipip.h"
44 #include "spectrum_mr.h"
45 #include "spectrum_mr_tcam.h"
46 #include "spectrum_router.h"
47 #include "spectrum_span.h"
48 
49 struct mlxsw_sp_fib;
50 struct mlxsw_sp_vr;
51 struct mlxsw_sp_lpm_tree;
52 struct mlxsw_sp_rif_ops;
53 
54 struct mlxsw_sp_crif_key {
55 	struct net_device *dev;
56 };
57 
58 struct mlxsw_sp_crif {
59 	struct mlxsw_sp_crif_key key;
60 	struct rhash_head ht_node;
61 	bool can_destroy;
62 	struct list_head nexthop_list;
63 	struct mlxsw_sp_rif *rif;
64 };
65 
66 static const struct rhashtable_params mlxsw_sp_crif_ht_params = {
67 	.key_offset = offsetof(struct mlxsw_sp_crif, key),
68 	.key_len = sizeof_field(struct mlxsw_sp_crif, key),
69 	.head_offset = offsetof(struct mlxsw_sp_crif, ht_node),
70 };
71 
72 struct mlxsw_sp_rif {
73 	struct mlxsw_sp_crif *crif; /* NULL for underlay RIF */
74 	netdevice_tracker dev_tracker;
75 	struct list_head neigh_list;
76 	struct mlxsw_sp_fid *fid;
77 	unsigned char addr[ETH_ALEN];
78 	int mtu;
79 	u16 rif_index;
80 	u8 mac_profile_id;
81 	u8 rif_entries;
82 	u16 vr_id;
83 	const struct mlxsw_sp_rif_ops *ops;
84 	struct mlxsw_sp *mlxsw_sp;
85 
86 	unsigned int counter_ingress;
87 	bool counter_ingress_valid;
88 	unsigned int counter_egress;
89 	bool counter_egress_valid;
90 };
91 
92 static struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
93 {
94 	if (!rif->crif)
95 		return NULL;
96 	return rif->crif->key.dev;
97 }
98 
99 struct mlxsw_sp_rif_params {
100 	struct net_device *dev;
101 	union {
102 		u16 system_port;
103 		u16 lag_id;
104 	};
105 	u16 vid;
106 	bool lag;
107 	bool double_entry;
108 };
109 
110 struct mlxsw_sp_rif_subport {
111 	struct mlxsw_sp_rif common;
112 	refcount_t ref_count;
113 	union {
114 		u16 system_port;
115 		u16 lag_id;
116 	};
117 	u16 vid;
118 	bool lag;
119 };
120 
121 struct mlxsw_sp_rif_ipip_lb {
122 	struct mlxsw_sp_rif common;
123 	struct mlxsw_sp_rif_ipip_lb_config lb_config;
124 	u16 ul_vr_id;	/* Spectrum-1. */
125 	u16 ul_rif_id;	/* Spectrum-2+. */
126 };
127 
128 struct mlxsw_sp_rif_params_ipip_lb {
129 	struct mlxsw_sp_rif_params common;
130 	struct mlxsw_sp_rif_ipip_lb_config lb_config;
131 };
132 
133 struct mlxsw_sp_rif_ops {
134 	enum mlxsw_sp_rif_type type;
135 	size_t rif_size;
136 
137 	void (*setup)(struct mlxsw_sp_rif *rif,
138 		      const struct mlxsw_sp_rif_params *params);
139 	int (*configure)(struct mlxsw_sp_rif *rif,
140 			 struct netlink_ext_ack *extack);
141 	void (*deconfigure)(struct mlxsw_sp_rif *rif);
142 	struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif,
143 					 const struct mlxsw_sp_rif_params *params,
144 					 struct netlink_ext_ack *extack);
145 	void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac);
146 };
147 
148 struct mlxsw_sp_rif_mac_profile {
149 	unsigned char mac_prefix[ETH_ALEN];
150 	refcount_t ref_count;
151 	u8 id;
152 };
153 
154 struct mlxsw_sp_router_ops {
155 	int (*init)(struct mlxsw_sp *mlxsw_sp);
156 	int (*ipips_init)(struct mlxsw_sp *mlxsw_sp);
157 };
158 
159 static struct mlxsw_sp_rif *
160 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
161 			 const struct net_device *dev);
162 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
163 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
164 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
165 				  struct mlxsw_sp_lpm_tree *lpm_tree);
166 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
167 				     const struct mlxsw_sp_fib *fib,
168 				     u8 tree_id);
169 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
170 				       const struct mlxsw_sp_fib *fib);
171 
172 static unsigned int *
173 mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
174 			   enum mlxsw_sp_rif_counter_dir dir)
175 {
176 	switch (dir) {
177 	case MLXSW_SP_RIF_COUNTER_EGRESS:
178 		return &rif->counter_egress;
179 	case MLXSW_SP_RIF_COUNTER_INGRESS:
180 		return &rif->counter_ingress;
181 	}
182 	return NULL;
183 }
184 
185 static bool
186 mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
187 			       enum mlxsw_sp_rif_counter_dir dir)
188 {
189 	switch (dir) {
190 	case MLXSW_SP_RIF_COUNTER_EGRESS:
191 		return rif->counter_egress_valid;
192 	case MLXSW_SP_RIF_COUNTER_INGRESS:
193 		return rif->counter_ingress_valid;
194 	}
195 	return false;
196 }
197 
198 static void
199 mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
200 			       enum mlxsw_sp_rif_counter_dir dir,
201 			       bool valid)
202 {
203 	switch (dir) {
204 	case MLXSW_SP_RIF_COUNTER_EGRESS:
205 		rif->counter_egress_valid = valid;
206 		break;
207 	case MLXSW_SP_RIF_COUNTER_INGRESS:
208 		rif->counter_ingress_valid = valid;
209 		break;
210 	}
211 }
212 
213 static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
214 				     unsigned int counter_index, bool enable,
215 				     enum mlxsw_sp_rif_counter_dir dir)
216 {
217 	char ritr_pl[MLXSW_REG_RITR_LEN];
218 	bool is_egress = false;
219 	int err;
220 
221 	if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
222 		is_egress = true;
223 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
224 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
225 	if (err)
226 		return err;
227 
228 	mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
229 				    is_egress);
230 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
231 }
232 
233 int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
234 				   struct mlxsw_sp_rif *rif,
235 				   enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
236 {
237 	char ricnt_pl[MLXSW_REG_RICNT_LEN];
238 	unsigned int *p_counter_index;
239 	bool valid;
240 	int err;
241 
242 	valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
243 	if (!valid)
244 		return -EINVAL;
245 
246 	p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
247 	if (!p_counter_index)
248 		return -EINVAL;
249 	mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
250 			     MLXSW_REG_RICNT_OPCODE_NOP);
251 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
252 	if (err)
253 		return err;
254 	*cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
255 	return 0;
256 }
257 
258 struct mlxsw_sp_rif_counter_set_basic {
259 	u64 good_unicast_packets;
260 	u64 good_multicast_packets;
261 	u64 good_broadcast_packets;
262 	u64 good_unicast_bytes;
263 	u64 good_multicast_bytes;
264 	u64 good_broadcast_bytes;
265 	u64 error_packets;
266 	u64 discard_packets;
267 	u64 error_bytes;
268 	u64 discard_bytes;
269 };
270 
271 static int
272 mlxsw_sp_rif_counter_fetch_clear(struct mlxsw_sp_rif *rif,
273 				 enum mlxsw_sp_rif_counter_dir dir,
274 				 struct mlxsw_sp_rif_counter_set_basic *set)
275 {
276 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
277 	char ricnt_pl[MLXSW_REG_RICNT_LEN];
278 	unsigned int *p_counter_index;
279 	int err;
280 
281 	if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
282 		return -EINVAL;
283 
284 	p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
285 	if (!p_counter_index)
286 		return -EINVAL;
287 
288 	mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
289 			     MLXSW_REG_RICNT_OPCODE_CLEAR);
290 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
291 	if (err)
292 		return err;
293 
294 	if (!set)
295 		return 0;
296 
297 #define MLXSW_SP_RIF_COUNTER_EXTRACT(NAME)				\
298 		(set->NAME = mlxsw_reg_ricnt_ ## NAME ## _get(ricnt_pl))
299 
300 	MLXSW_SP_RIF_COUNTER_EXTRACT(good_unicast_packets);
301 	MLXSW_SP_RIF_COUNTER_EXTRACT(good_multicast_packets);
302 	MLXSW_SP_RIF_COUNTER_EXTRACT(good_broadcast_packets);
303 	MLXSW_SP_RIF_COUNTER_EXTRACT(good_unicast_bytes);
304 	MLXSW_SP_RIF_COUNTER_EXTRACT(good_multicast_bytes);
305 	MLXSW_SP_RIF_COUNTER_EXTRACT(good_broadcast_bytes);
306 	MLXSW_SP_RIF_COUNTER_EXTRACT(error_packets);
307 	MLXSW_SP_RIF_COUNTER_EXTRACT(discard_packets);
308 	MLXSW_SP_RIF_COUNTER_EXTRACT(error_bytes);
309 	MLXSW_SP_RIF_COUNTER_EXTRACT(discard_bytes);
310 
311 #undef MLXSW_SP_RIF_COUNTER_EXTRACT
312 
313 	return 0;
314 }
315 
316 static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
317 				      unsigned int counter_index)
318 {
319 	char ricnt_pl[MLXSW_REG_RICNT_LEN];
320 
321 	mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
322 			     MLXSW_REG_RICNT_OPCODE_CLEAR);
323 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
324 }
325 
326 int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp_rif *rif,
327 			       enum mlxsw_sp_rif_counter_dir dir)
328 {
329 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
330 	unsigned int *p_counter_index;
331 	int err;
332 
333 	if (mlxsw_sp_rif_counter_valid_get(rif, dir))
334 		return 0;
335 
336 	p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
337 	if (!p_counter_index)
338 		return -EINVAL;
339 
340 	err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
341 				     p_counter_index);
342 	if (err)
343 		return err;
344 
345 	err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
346 	if (err)
347 		goto err_counter_clear;
348 
349 	err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
350 					*p_counter_index, true, dir);
351 	if (err)
352 		goto err_counter_edit;
353 	mlxsw_sp_rif_counter_valid_set(rif, dir, true);
354 	return 0;
355 
356 err_counter_edit:
357 err_counter_clear:
358 	mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
359 			      *p_counter_index);
360 	return err;
361 }
362 
363 void mlxsw_sp_rif_counter_free(struct mlxsw_sp_rif *rif,
364 			       enum mlxsw_sp_rif_counter_dir dir)
365 {
366 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
367 	unsigned int *p_counter_index;
368 
369 	if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
370 		return;
371 
372 	p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
373 	if (WARN_ON(!p_counter_index))
374 		return;
375 	mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
376 				  *p_counter_index, false, dir);
377 	mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
378 			      *p_counter_index);
379 	mlxsw_sp_rif_counter_valid_set(rif, dir, false);
380 }
381 
382 static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
383 {
384 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
385 	struct devlink *devlink;
386 
387 	devlink = priv_to_devlink(mlxsw_sp->core);
388 	if (!devlink_dpipe_table_counter_enabled(devlink,
389 						 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
390 		return;
391 	mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
392 }
393 
394 static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
395 {
396 	mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
397 }
398 
399 #define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
400 
401 struct mlxsw_sp_prefix_usage {
402 	DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
403 };
404 
405 #define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
406 	for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
407 
408 static bool
409 mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
410 			 struct mlxsw_sp_prefix_usage *prefix_usage2)
411 {
412 	return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
413 }
414 
415 static void
416 mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
417 			  struct mlxsw_sp_prefix_usage *prefix_usage2)
418 {
419 	memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
420 }
421 
422 static void
423 mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
424 			  unsigned char prefix_len)
425 {
426 	set_bit(prefix_len, prefix_usage->b);
427 }
428 
429 static void
430 mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
431 			    unsigned char prefix_len)
432 {
433 	clear_bit(prefix_len, prefix_usage->b);
434 }
435 
436 struct mlxsw_sp_fib_key {
437 	unsigned char addr[sizeof(struct in6_addr)];
438 	unsigned char prefix_len;
439 };
440 
441 enum mlxsw_sp_fib_entry_type {
442 	MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
443 	MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
444 	MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
445 	MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE,
446 	MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE,
447 
448 	/* This is a special case of local delivery, where a packet should be
449 	 * decapsulated on reception. Note that there is no corresponding ENCAP,
450 	 * because that's a type of next hop, not of FIB entry. (There can be
451 	 * several next hops in a REMOTE entry, and some of them may be
452 	 * encapsulating entries.)
453 	 */
454 	MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP,
455 	MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP,
456 };
457 
458 struct mlxsw_sp_nexthop_group_info;
459 struct mlxsw_sp_nexthop_group;
460 struct mlxsw_sp_fib_entry;
461 
462 struct mlxsw_sp_fib_node {
463 	struct mlxsw_sp_fib_entry *fib_entry;
464 	struct list_head list;
465 	struct rhash_head ht_node;
466 	struct mlxsw_sp_fib *fib;
467 	struct mlxsw_sp_fib_key key;
468 };
469 
470 struct mlxsw_sp_fib_entry_decap {
471 	struct mlxsw_sp_ipip_entry *ipip_entry;
472 	u32 tunnel_index;
473 };
474 
475 struct mlxsw_sp_fib_entry {
476 	struct mlxsw_sp_fib_node *fib_node;
477 	enum mlxsw_sp_fib_entry_type type;
478 	struct list_head nexthop_group_node;
479 	struct mlxsw_sp_nexthop_group *nh_group;
480 	struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
481 };
482 
483 struct mlxsw_sp_fib4_entry {
484 	struct mlxsw_sp_fib_entry common;
485 	struct fib_info *fi;
486 	u32 tb_id;
487 	dscp_t dscp;
488 	u8 type;
489 };
490 
491 struct mlxsw_sp_fib6_entry {
492 	struct mlxsw_sp_fib_entry common;
493 	struct list_head rt6_list;
494 	unsigned int nrt6;
495 };
496 
497 struct mlxsw_sp_rt6 {
498 	struct list_head list;
499 	struct fib6_info *rt;
500 };
501 
502 struct mlxsw_sp_lpm_tree {
503 	u8 id; /* tree ID */
504 	refcount_t ref_count;
505 	enum mlxsw_sp_l3proto proto;
506 	unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
507 	struct mlxsw_sp_prefix_usage prefix_usage;
508 };
509 
510 struct mlxsw_sp_fib {
511 	struct rhashtable ht;
512 	struct list_head node_list;
513 	struct mlxsw_sp_vr *vr;
514 	struct mlxsw_sp_lpm_tree *lpm_tree;
515 	enum mlxsw_sp_l3proto proto;
516 };
517 
518 struct mlxsw_sp_vr {
519 	u16 id; /* virtual router ID */
520 	u32 tb_id; /* kernel fib table id */
521 	unsigned int rif_count;
522 	struct mlxsw_sp_fib *fib4;
523 	struct mlxsw_sp_fib *fib6;
524 	struct mlxsw_sp_mr_table *mr_table[MLXSW_SP_L3_PROTO_MAX];
525 	struct mlxsw_sp_rif *ul_rif;
526 	refcount_t ul_rif_refcnt;
527 };
528 
529 static const struct rhashtable_params mlxsw_sp_fib_ht_params;
530 
531 static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
532 						struct mlxsw_sp_vr *vr,
533 						enum mlxsw_sp_l3proto proto)
534 {
535 	struct mlxsw_sp_lpm_tree *lpm_tree;
536 	struct mlxsw_sp_fib *fib;
537 	int err;
538 
539 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[proto];
540 	fib = kzalloc(sizeof(*fib), GFP_KERNEL);
541 	if (!fib)
542 		return ERR_PTR(-ENOMEM);
543 	err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
544 	if (err)
545 		goto err_rhashtable_init;
546 	INIT_LIST_HEAD(&fib->node_list);
547 	fib->proto = proto;
548 	fib->vr = vr;
549 	fib->lpm_tree = lpm_tree;
550 	mlxsw_sp_lpm_tree_hold(lpm_tree);
551 	err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
552 	if (err)
553 		goto err_lpm_tree_bind;
554 	return fib;
555 
556 err_lpm_tree_bind:
557 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
558 err_rhashtable_init:
559 	kfree(fib);
560 	return ERR_PTR(err);
561 }
562 
563 static void mlxsw_sp_fib_destroy(struct mlxsw_sp *mlxsw_sp,
564 				 struct mlxsw_sp_fib *fib)
565 {
566 	mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
567 	mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
568 	WARN_ON(!list_empty(&fib->node_list));
569 	rhashtable_destroy(&fib->ht);
570 	kfree(fib);
571 }
572 
573 static struct mlxsw_sp_lpm_tree *
574 mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
575 {
576 	static struct mlxsw_sp_lpm_tree *lpm_tree;
577 	int i;
578 
579 	for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
580 		lpm_tree = &mlxsw_sp->router->lpm.trees[i];
581 		if (refcount_read(&lpm_tree->ref_count) == 0)
582 			return lpm_tree;
583 	}
584 	return NULL;
585 }
586 
587 static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
588 				   struct mlxsw_sp_lpm_tree *lpm_tree)
589 {
590 	char ralta_pl[MLXSW_REG_RALTA_LEN];
591 
592 	mlxsw_reg_ralta_pack(ralta_pl, true,
593 			     (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
594 			     lpm_tree->id);
595 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
596 }
597 
598 static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
599 				   struct mlxsw_sp_lpm_tree *lpm_tree)
600 {
601 	char ralta_pl[MLXSW_REG_RALTA_LEN];
602 
603 	mlxsw_reg_ralta_pack(ralta_pl, false,
604 			     (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
605 			     lpm_tree->id);
606 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
607 }
608 
609 static int
610 mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
611 				  struct mlxsw_sp_prefix_usage *prefix_usage,
612 				  struct mlxsw_sp_lpm_tree *lpm_tree)
613 {
614 	char ralst_pl[MLXSW_REG_RALST_LEN];
615 	u8 root_bin = 0;
616 	u8 prefix;
617 	u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
618 
619 	mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
620 		root_bin = prefix;
621 
622 	mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
623 	mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
624 		if (prefix == 0)
625 			continue;
626 		mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
627 					 MLXSW_REG_RALST_BIN_NO_CHILD);
628 		last_prefix = prefix;
629 	}
630 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
631 }
632 
633 static struct mlxsw_sp_lpm_tree *
634 mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
635 			 struct mlxsw_sp_prefix_usage *prefix_usage,
636 			 enum mlxsw_sp_l3proto proto)
637 {
638 	struct mlxsw_sp_lpm_tree *lpm_tree;
639 	int err;
640 
641 	lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
642 	if (!lpm_tree)
643 		return ERR_PTR(-EBUSY);
644 	lpm_tree->proto = proto;
645 	err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
646 	if (err)
647 		return ERR_PTR(err);
648 
649 	err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
650 						lpm_tree);
651 	if (err)
652 		goto err_left_struct_set;
653 	memcpy(&lpm_tree->prefix_usage, prefix_usage,
654 	       sizeof(lpm_tree->prefix_usage));
655 	memset(&lpm_tree->prefix_ref_count, 0,
656 	       sizeof(lpm_tree->prefix_ref_count));
657 	refcount_set(&lpm_tree->ref_count, 1);
658 	return lpm_tree;
659 
660 err_left_struct_set:
661 	mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
662 	return ERR_PTR(err);
663 }
664 
665 static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
666 				      struct mlxsw_sp_lpm_tree *lpm_tree)
667 {
668 	mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
669 }
670 
671 static struct mlxsw_sp_lpm_tree *
672 mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
673 		      struct mlxsw_sp_prefix_usage *prefix_usage,
674 		      enum mlxsw_sp_l3proto proto)
675 {
676 	struct mlxsw_sp_lpm_tree *lpm_tree;
677 	int i;
678 
679 	for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
680 		lpm_tree = &mlxsw_sp->router->lpm.trees[i];
681 		if (refcount_read(&lpm_tree->ref_count) &&
682 		    lpm_tree->proto == proto &&
683 		    mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
684 					     prefix_usage)) {
685 			mlxsw_sp_lpm_tree_hold(lpm_tree);
686 			return lpm_tree;
687 		}
688 	}
689 	return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
690 }
691 
692 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
693 {
694 	refcount_inc(&lpm_tree->ref_count);
695 }
696 
697 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
698 				  struct mlxsw_sp_lpm_tree *lpm_tree)
699 {
700 	if (!refcount_dec_and_test(&lpm_tree->ref_count))
701 		return;
702 	mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
703 }
704 
705 #define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
706 
707 static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
708 {
709 	struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
710 	struct mlxsw_sp_lpm_tree *lpm_tree;
711 	u64 max_trees;
712 	int err, i;
713 
714 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
715 		return -EIO;
716 
717 	max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
718 	mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
719 	mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
720 					     sizeof(struct mlxsw_sp_lpm_tree),
721 					     GFP_KERNEL);
722 	if (!mlxsw_sp->router->lpm.trees)
723 		return -ENOMEM;
724 
725 	for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
726 		lpm_tree = &mlxsw_sp->router->lpm.trees[i];
727 		lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
728 	}
729 
730 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
731 					 MLXSW_SP_L3_PROTO_IPV4);
732 	if (IS_ERR(lpm_tree)) {
733 		err = PTR_ERR(lpm_tree);
734 		goto err_ipv4_tree_get;
735 	}
736 	mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4] = lpm_tree;
737 
738 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
739 					 MLXSW_SP_L3_PROTO_IPV6);
740 	if (IS_ERR(lpm_tree)) {
741 		err = PTR_ERR(lpm_tree);
742 		goto err_ipv6_tree_get;
743 	}
744 	mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6] = lpm_tree;
745 
746 	return 0;
747 
748 err_ipv6_tree_get:
749 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
750 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
751 err_ipv4_tree_get:
752 	kfree(mlxsw_sp->router->lpm.trees);
753 	return err;
754 }
755 
756 static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
757 {
758 	struct mlxsw_sp_lpm_tree *lpm_tree;
759 
760 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6];
761 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
762 
763 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
764 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
765 
766 	kfree(mlxsw_sp->router->lpm.trees);
767 }
768 
769 static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
770 {
771 	return !!vr->fib4 || !!vr->fib6 ||
772 	       !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] ||
773 	       !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
774 }
775 
776 static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
777 {
778 	int max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
779 	struct mlxsw_sp_vr *vr;
780 	int i;
781 
782 	for (i = 0; i < max_vrs; i++) {
783 		vr = &mlxsw_sp->router->vrs[i];
784 		if (!mlxsw_sp_vr_is_used(vr))
785 			return vr;
786 	}
787 	return NULL;
788 }
789 
790 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
791 				     const struct mlxsw_sp_fib *fib, u8 tree_id)
792 {
793 	char raltb_pl[MLXSW_REG_RALTB_LEN];
794 
795 	mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
796 			     (enum mlxsw_reg_ralxx_protocol) fib->proto,
797 			     tree_id);
798 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
799 }
800 
801 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
802 				       const struct mlxsw_sp_fib *fib)
803 {
804 	char raltb_pl[MLXSW_REG_RALTB_LEN];
805 
806 	/* Bind to tree 0 which is default */
807 	mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
808 			     (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
809 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
810 }
811 
812 static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
813 {
814 	/* For our purpose, squash main, default and local tables into one */
815 	if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
816 		tb_id = RT_TABLE_MAIN;
817 	return tb_id;
818 }
819 
820 static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
821 					    u32 tb_id)
822 {
823 	int max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
824 	struct mlxsw_sp_vr *vr;
825 	int i;
826 
827 	tb_id = mlxsw_sp_fix_tb_id(tb_id);
828 
829 	for (i = 0; i < max_vrs; i++) {
830 		vr = &mlxsw_sp->router->vrs[i];
831 		if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
832 			return vr;
833 	}
834 	return NULL;
835 }
836 
837 int mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
838 				u16 *vr_id)
839 {
840 	struct mlxsw_sp_vr *vr;
841 	int err = 0;
842 
843 	mutex_lock(&mlxsw_sp->router->lock);
844 	vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
845 	if (!vr) {
846 		err = -ESRCH;
847 		goto out;
848 	}
849 	*vr_id = vr->id;
850 out:
851 	mutex_unlock(&mlxsw_sp->router->lock);
852 	return err;
853 }
854 
855 static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
856 					    enum mlxsw_sp_l3proto proto)
857 {
858 	switch (proto) {
859 	case MLXSW_SP_L3_PROTO_IPV4:
860 		return vr->fib4;
861 	case MLXSW_SP_L3_PROTO_IPV6:
862 		return vr->fib6;
863 	}
864 	return NULL;
865 }
866 
867 static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
868 					      u32 tb_id,
869 					      struct netlink_ext_ack *extack)
870 {
871 	struct mlxsw_sp_mr_table *mr4_table, *mr6_table;
872 	struct mlxsw_sp_fib *fib4;
873 	struct mlxsw_sp_fib *fib6;
874 	struct mlxsw_sp_vr *vr;
875 	int err;
876 
877 	vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
878 	if (!vr) {
879 		NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported virtual routers");
880 		return ERR_PTR(-EBUSY);
881 	}
882 	fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
883 	if (IS_ERR(fib4))
884 		return ERR_CAST(fib4);
885 	fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
886 	if (IS_ERR(fib6)) {
887 		err = PTR_ERR(fib6);
888 		goto err_fib6_create;
889 	}
890 	mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
891 					     MLXSW_SP_L3_PROTO_IPV4);
892 	if (IS_ERR(mr4_table)) {
893 		err = PTR_ERR(mr4_table);
894 		goto err_mr4_table_create;
895 	}
896 	mr6_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
897 					     MLXSW_SP_L3_PROTO_IPV6);
898 	if (IS_ERR(mr6_table)) {
899 		err = PTR_ERR(mr6_table);
900 		goto err_mr6_table_create;
901 	}
902 
903 	vr->fib4 = fib4;
904 	vr->fib6 = fib6;
905 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = mr4_table;
906 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = mr6_table;
907 	vr->tb_id = tb_id;
908 	return vr;
909 
910 err_mr6_table_create:
911 	mlxsw_sp_mr_table_destroy(mr4_table);
912 err_mr4_table_create:
913 	mlxsw_sp_fib_destroy(mlxsw_sp, fib6);
914 err_fib6_create:
915 	mlxsw_sp_fib_destroy(mlxsw_sp, fib4);
916 	return ERR_PTR(err);
917 }
918 
919 static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
920 				struct mlxsw_sp_vr *vr)
921 {
922 	mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]);
923 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = NULL;
924 	mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]);
925 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = NULL;
926 	mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
927 	vr->fib6 = NULL;
928 	mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
929 	vr->fib4 = NULL;
930 }
931 
932 static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
933 					   struct netlink_ext_ack *extack)
934 {
935 	struct mlxsw_sp_vr *vr;
936 
937 	tb_id = mlxsw_sp_fix_tb_id(tb_id);
938 	vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
939 	if (!vr)
940 		vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack);
941 	return vr;
942 }
943 
944 static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
945 {
946 	if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
947 	    list_empty(&vr->fib6->node_list) &&
948 	    mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]) &&
949 	    mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]))
950 		mlxsw_sp_vr_destroy(mlxsw_sp, vr);
951 }
952 
953 static bool
954 mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
955 				    enum mlxsw_sp_l3proto proto, u8 tree_id)
956 {
957 	struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
958 
959 	if (!mlxsw_sp_vr_is_used(vr))
960 		return false;
961 	if (fib->lpm_tree->id == tree_id)
962 		return true;
963 	return false;
964 }
965 
966 static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
967 					struct mlxsw_sp_fib *fib,
968 					struct mlxsw_sp_lpm_tree *new_tree)
969 {
970 	struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
971 	int err;
972 
973 	fib->lpm_tree = new_tree;
974 	mlxsw_sp_lpm_tree_hold(new_tree);
975 	err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
976 	if (err)
977 		goto err_tree_bind;
978 	mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
979 	return 0;
980 
981 err_tree_bind:
982 	mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
983 	fib->lpm_tree = old_tree;
984 	return err;
985 }
986 
987 static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
988 					 struct mlxsw_sp_fib *fib,
989 					 struct mlxsw_sp_lpm_tree *new_tree)
990 {
991 	int max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
992 	enum mlxsw_sp_l3proto proto = fib->proto;
993 	struct mlxsw_sp_lpm_tree *old_tree;
994 	u8 old_id, new_id = new_tree->id;
995 	struct mlxsw_sp_vr *vr;
996 	int i, err;
997 
998 	old_tree = mlxsw_sp->router->lpm.proto_trees[proto];
999 	old_id = old_tree->id;
1000 
1001 	for (i = 0; i < max_vrs; i++) {
1002 		vr = &mlxsw_sp->router->vrs[i];
1003 		if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id))
1004 			continue;
1005 		err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
1006 						   mlxsw_sp_vr_fib(vr, proto),
1007 						   new_tree);
1008 		if (err)
1009 			goto err_tree_replace;
1010 	}
1011 
1012 	memcpy(new_tree->prefix_ref_count, old_tree->prefix_ref_count,
1013 	       sizeof(new_tree->prefix_ref_count));
1014 	mlxsw_sp->router->lpm.proto_trees[proto] = new_tree;
1015 	mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
1016 
1017 	return 0;
1018 
1019 err_tree_replace:
1020 	for (i--; i >= 0; i--) {
1021 		if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id))
1022 			continue;
1023 		mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
1024 					     mlxsw_sp_vr_fib(vr, proto),
1025 					     old_tree);
1026 	}
1027 	return err;
1028 }
1029 
1030 static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
1031 {
1032 	struct mlxsw_sp_vr *vr;
1033 	u64 max_vrs;
1034 	int i;
1035 
1036 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
1037 		return -EIO;
1038 
1039 	max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
1040 	mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
1041 					GFP_KERNEL);
1042 	if (!mlxsw_sp->router->vrs)
1043 		return -ENOMEM;
1044 
1045 	for (i = 0; i < max_vrs; i++) {
1046 		vr = &mlxsw_sp->router->vrs[i];
1047 		vr->id = i;
1048 	}
1049 
1050 	return 0;
1051 }
1052 
1053 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
1054 
1055 static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
1056 {
1057 	/* At this stage we're guaranteed not to have new incoming
1058 	 * FIB notifications and the work queue is free from FIBs
1059 	 * sitting on top of mlxsw netdevs. However, we can still
1060 	 * have other FIBs queued. Flush the queue before flushing
1061 	 * the device's tables. No need for locks, as we're the only
1062 	 * writer.
1063 	 */
1064 	mlxsw_core_flush_owq();
1065 	mlxsw_sp_router_fib_flush(mlxsw_sp);
1066 	kfree(mlxsw_sp->router->vrs);
1067 }
1068 
1069 u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
1070 {
1071 	struct net_device *d;
1072 	u32 tb_id;
1073 
1074 	rcu_read_lock();
1075 	d = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1076 	if (d)
1077 		tb_id = l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
1078 	else
1079 		tb_id = RT_TABLE_MAIN;
1080 	rcu_read_unlock();
1081 
1082 	return tb_id;
1083 }
1084 
1085 static void
1086 mlxsw_sp_crif_init(struct mlxsw_sp_crif *crif, struct net_device *dev)
1087 {
1088 	crif->key.dev = dev;
1089 	INIT_LIST_HEAD(&crif->nexthop_list);
1090 }
1091 
1092 static struct mlxsw_sp_crif *
1093 mlxsw_sp_crif_alloc(struct net_device *dev)
1094 {
1095 	struct mlxsw_sp_crif *crif;
1096 
1097 	crif = kzalloc(sizeof(*crif), GFP_KERNEL);
1098 	if (!crif)
1099 		return NULL;
1100 
1101 	mlxsw_sp_crif_init(crif, dev);
1102 	return crif;
1103 }
1104 
1105 static void mlxsw_sp_crif_free(struct mlxsw_sp_crif *crif)
1106 {
1107 	if (WARN_ON(crif->rif))
1108 		return;
1109 
1110 	WARN_ON(!list_empty(&crif->nexthop_list));
1111 	kfree(crif);
1112 }
1113 
1114 static int mlxsw_sp_crif_insert(struct mlxsw_sp_router *router,
1115 				struct mlxsw_sp_crif *crif)
1116 {
1117 	return rhashtable_insert_fast(&router->crif_ht, &crif->ht_node,
1118 				      mlxsw_sp_crif_ht_params);
1119 }
1120 
1121 static void mlxsw_sp_crif_remove(struct mlxsw_sp_router *router,
1122 				 struct mlxsw_sp_crif *crif)
1123 {
1124 	rhashtable_remove_fast(&router->crif_ht, &crif->ht_node,
1125 			       mlxsw_sp_crif_ht_params);
1126 }
1127 
1128 static struct mlxsw_sp_crif *
1129 mlxsw_sp_crif_lookup(struct mlxsw_sp_router *router,
1130 		     const struct net_device *dev)
1131 {
1132 	struct mlxsw_sp_crif_key key = {
1133 		.dev = (struct net_device *)dev,
1134 	};
1135 
1136 	return rhashtable_lookup_fast(&router->crif_ht, &key,
1137 				      mlxsw_sp_crif_ht_params);
1138 }
1139 
1140 static struct mlxsw_sp_rif *
1141 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
1142 		    const struct mlxsw_sp_rif_params *params,
1143 		    struct netlink_ext_ack *extack);
1144 
1145 static struct mlxsw_sp_rif_ipip_lb *
1146 mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
1147 				enum mlxsw_sp_ipip_type ipipt,
1148 				struct net_device *ol_dev,
1149 				struct netlink_ext_ack *extack)
1150 {
1151 	struct mlxsw_sp_rif_params_ipip_lb lb_params;
1152 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1153 	struct mlxsw_sp_rif *rif;
1154 
1155 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1156 	lb_params = (struct mlxsw_sp_rif_params_ipip_lb) {
1157 		.common.dev = ol_dev,
1158 		.common.lag = false,
1159 		.common.double_entry = ipip_ops->double_rif_entry,
1160 		.lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
1161 	};
1162 
1163 	rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, extack);
1164 	if (IS_ERR(rif))
1165 		return ERR_CAST(rif);
1166 	return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
1167 }
1168 
1169 static struct mlxsw_sp_ipip_entry *
1170 mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
1171 			  enum mlxsw_sp_ipip_type ipipt,
1172 			  struct net_device *ol_dev)
1173 {
1174 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1175 	struct mlxsw_sp_ipip_entry *ipip_entry;
1176 	struct mlxsw_sp_ipip_entry *ret = NULL;
1177 	int err;
1178 
1179 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1180 	ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
1181 	if (!ipip_entry)
1182 		return ERR_PTR(-ENOMEM);
1183 
1184 	ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt,
1185 							    ol_dev, NULL);
1186 	if (IS_ERR(ipip_entry->ol_lb)) {
1187 		ret = ERR_CAST(ipip_entry->ol_lb);
1188 		goto err_ol_ipip_lb_create;
1189 	}
1190 
1191 	ipip_entry->ipipt = ipipt;
1192 	ipip_entry->ol_dev = ol_dev;
1193 	ipip_entry->parms = ipip_ops->parms_init(ol_dev);
1194 
1195 	err = ipip_ops->rem_ip_addr_set(mlxsw_sp, ipip_entry);
1196 	if (err) {
1197 		ret = ERR_PTR(err);
1198 		goto err_rem_ip_addr_set;
1199 	}
1200 
1201 	return ipip_entry;
1202 
1203 err_rem_ip_addr_set:
1204 	mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1205 err_ol_ipip_lb_create:
1206 	kfree(ipip_entry);
1207 	return ret;
1208 }
1209 
1210 static void mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp *mlxsw_sp,
1211 					struct mlxsw_sp_ipip_entry *ipip_entry)
1212 {
1213 	const struct mlxsw_sp_ipip_ops *ipip_ops =
1214 		mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1215 
1216 	ipip_ops->rem_ip_addr_unset(mlxsw_sp, ipip_entry);
1217 	mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1218 	kfree(ipip_entry);
1219 }
1220 
1221 static bool
1222 mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
1223 				  const enum mlxsw_sp_l3proto ul_proto,
1224 				  union mlxsw_sp_l3addr saddr,
1225 				  u32 ul_tb_id,
1226 				  struct mlxsw_sp_ipip_entry *ipip_entry)
1227 {
1228 	u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1229 	enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1230 	union mlxsw_sp_l3addr tun_saddr;
1231 
1232 	if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1233 		return false;
1234 
1235 	tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1236 	return tun_ul_tb_id == ul_tb_id &&
1237 	       mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
1238 }
1239 
1240 static int mlxsw_sp_ipip_decap_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp,
1241 						 enum mlxsw_sp_ipip_type ipipt)
1242 {
1243 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1244 
1245 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1246 
1247 	/* Not all tunnels require to increase the default pasing depth
1248 	 * (96 bytes).
1249 	 */
1250 	if (ipip_ops->inc_parsing_depth)
1251 		return mlxsw_sp_parsing_depth_inc(mlxsw_sp);
1252 
1253 	return 0;
1254 }
1255 
1256 static void mlxsw_sp_ipip_decap_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp,
1257 						  enum mlxsw_sp_ipip_type ipipt)
1258 {
1259 	const struct mlxsw_sp_ipip_ops *ipip_ops =
1260 		mlxsw_sp->router->ipip_ops_arr[ipipt];
1261 
1262 	if (ipip_ops->inc_parsing_depth)
1263 		mlxsw_sp_parsing_depth_dec(mlxsw_sp);
1264 }
1265 
1266 static int
1267 mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
1268 			      struct mlxsw_sp_fib_entry *fib_entry,
1269 			      struct mlxsw_sp_ipip_entry *ipip_entry)
1270 {
1271 	u32 tunnel_index;
1272 	int err;
1273 
1274 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1275 				  1, &tunnel_index);
1276 	if (err)
1277 		return err;
1278 
1279 	err = mlxsw_sp_ipip_decap_parsing_depth_inc(mlxsw_sp,
1280 						    ipip_entry->ipipt);
1281 	if (err)
1282 		goto err_parsing_depth_inc;
1283 
1284 	ipip_entry->decap_fib_entry = fib_entry;
1285 	fib_entry->decap.ipip_entry = ipip_entry;
1286 	fib_entry->decap.tunnel_index = tunnel_index;
1287 
1288 	return 0;
1289 
1290 err_parsing_depth_inc:
1291 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
1292 			   fib_entry->decap.tunnel_index);
1293 	return err;
1294 }
1295 
1296 static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
1297 					  struct mlxsw_sp_fib_entry *fib_entry)
1298 {
1299 	enum mlxsw_sp_ipip_type ipipt = fib_entry->decap.ipip_entry->ipipt;
1300 
1301 	/* Unlink this node from the IPIP entry that it's the decap entry of. */
1302 	fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
1303 	fib_entry->decap.ipip_entry = NULL;
1304 	mlxsw_sp_ipip_decap_parsing_depth_dec(mlxsw_sp, ipipt);
1305 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1306 			   1, fib_entry->decap.tunnel_index);
1307 }
1308 
1309 static struct mlxsw_sp_fib_node *
1310 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
1311 			 size_t addr_len, unsigned char prefix_len);
1312 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1313 				     struct mlxsw_sp_fib_entry *fib_entry);
1314 
1315 static void
1316 mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp,
1317 				 struct mlxsw_sp_ipip_entry *ipip_entry)
1318 {
1319 	struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry;
1320 
1321 	mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
1322 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1323 
1324 	mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1325 }
1326 
1327 static void
1328 mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp,
1329 				  struct mlxsw_sp_ipip_entry *ipip_entry,
1330 				  struct mlxsw_sp_fib_entry *decap_fib_entry)
1331 {
1332 	if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry,
1333 					  ipip_entry))
1334 		return;
1335 	decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
1336 
1337 	if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry))
1338 		mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1339 }
1340 
1341 static struct mlxsw_sp_fib_entry *
1342 mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
1343 				     enum mlxsw_sp_l3proto proto,
1344 				     const union mlxsw_sp_l3addr *addr,
1345 				     enum mlxsw_sp_fib_entry_type type)
1346 {
1347 	struct mlxsw_sp_fib_node *fib_node;
1348 	unsigned char addr_prefix_len;
1349 	struct mlxsw_sp_fib *fib;
1350 	struct mlxsw_sp_vr *vr;
1351 	const void *addrp;
1352 	size_t addr_len;
1353 	u32 addr4;
1354 
1355 	vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
1356 	if (!vr)
1357 		return NULL;
1358 	fib = mlxsw_sp_vr_fib(vr, proto);
1359 
1360 	switch (proto) {
1361 	case MLXSW_SP_L3_PROTO_IPV4:
1362 		addr4 = be32_to_cpu(addr->addr4);
1363 		addrp = &addr4;
1364 		addr_len = 4;
1365 		addr_prefix_len = 32;
1366 		break;
1367 	case MLXSW_SP_L3_PROTO_IPV6:
1368 		addrp = &addr->addr6;
1369 		addr_len = 16;
1370 		addr_prefix_len = 128;
1371 		break;
1372 	default:
1373 		WARN_ON(1);
1374 		return NULL;
1375 	}
1376 
1377 	fib_node = mlxsw_sp_fib_node_lookup(fib, addrp, addr_len,
1378 					    addr_prefix_len);
1379 	if (!fib_node || fib_node->fib_entry->type != type)
1380 		return NULL;
1381 
1382 	return fib_node->fib_entry;
1383 }
1384 
1385 /* Given an IPIP entry, find the corresponding decap route. */
1386 static struct mlxsw_sp_fib_entry *
1387 mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
1388 			       struct mlxsw_sp_ipip_entry *ipip_entry)
1389 {
1390 	static struct mlxsw_sp_fib_node *fib_node;
1391 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1392 	unsigned char saddr_prefix_len;
1393 	union mlxsw_sp_l3addr saddr;
1394 	struct mlxsw_sp_fib *ul_fib;
1395 	struct mlxsw_sp_vr *ul_vr;
1396 	const void *saddrp;
1397 	size_t saddr_len;
1398 	u32 ul_tb_id;
1399 	u32 saddr4;
1400 
1401 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1402 
1403 	ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1404 	ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id);
1405 	if (!ul_vr)
1406 		return NULL;
1407 
1408 	ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto);
1409 	saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto,
1410 					   ipip_entry->ol_dev);
1411 
1412 	switch (ipip_ops->ul_proto) {
1413 	case MLXSW_SP_L3_PROTO_IPV4:
1414 		saddr4 = be32_to_cpu(saddr.addr4);
1415 		saddrp = &saddr4;
1416 		saddr_len = 4;
1417 		saddr_prefix_len = 32;
1418 		break;
1419 	case MLXSW_SP_L3_PROTO_IPV6:
1420 		saddrp = &saddr.addr6;
1421 		saddr_len = 16;
1422 		saddr_prefix_len = 128;
1423 		break;
1424 	default:
1425 		WARN_ON(1);
1426 		return NULL;
1427 	}
1428 
1429 	fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
1430 					    saddr_prefix_len);
1431 	if (!fib_node ||
1432 	    fib_node->fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1433 		return NULL;
1434 
1435 	return fib_node->fib_entry;
1436 }
1437 
1438 static struct mlxsw_sp_ipip_entry *
1439 mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp,
1440 			   enum mlxsw_sp_ipip_type ipipt,
1441 			   struct net_device *ol_dev)
1442 {
1443 	struct mlxsw_sp_ipip_entry *ipip_entry;
1444 
1445 	ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev);
1446 	if (IS_ERR(ipip_entry))
1447 		return ipip_entry;
1448 
1449 	list_add_tail(&ipip_entry->ipip_list_node,
1450 		      &mlxsw_sp->router->ipip_list);
1451 
1452 	return ipip_entry;
1453 }
1454 
1455 static void
1456 mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1457 			    struct mlxsw_sp_ipip_entry *ipip_entry)
1458 {
1459 	list_del(&ipip_entry->ipip_list_node);
1460 	mlxsw_sp_ipip_entry_dealloc(mlxsw_sp, ipip_entry);
1461 }
1462 
1463 static bool
1464 mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
1465 				  const struct net_device *ul_dev,
1466 				  enum mlxsw_sp_l3proto ul_proto,
1467 				  union mlxsw_sp_l3addr ul_dip,
1468 				  struct mlxsw_sp_ipip_entry *ipip_entry)
1469 {
1470 	u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1471 	enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1472 
1473 	if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1474 		return false;
1475 
1476 	return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
1477 						 ul_tb_id, ipip_entry);
1478 }
1479 
1480 /* Given decap parameters, find the corresponding IPIP entry. */
1481 static struct mlxsw_sp_ipip_entry *
1482 mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp, int ul_dev_ifindex,
1483 				  enum mlxsw_sp_l3proto ul_proto,
1484 				  union mlxsw_sp_l3addr ul_dip)
1485 {
1486 	struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
1487 	struct net_device *ul_dev;
1488 
1489 	rcu_read_lock();
1490 
1491 	ul_dev = dev_get_by_index_rcu(mlxsw_sp_net(mlxsw_sp), ul_dev_ifindex);
1492 	if (!ul_dev)
1493 		goto out_unlock;
1494 
1495 	list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1496 			    ipip_list_node)
1497 		if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
1498 						      ul_proto, ul_dip,
1499 						      ipip_entry))
1500 			goto out_unlock;
1501 
1502 	rcu_read_unlock();
1503 
1504 	return NULL;
1505 
1506 out_unlock:
1507 	rcu_read_unlock();
1508 	return ipip_entry;
1509 }
1510 
1511 static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
1512 				      const struct net_device *dev,
1513 				      enum mlxsw_sp_ipip_type *p_type)
1514 {
1515 	struct mlxsw_sp_router *router = mlxsw_sp->router;
1516 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1517 	enum mlxsw_sp_ipip_type ipipt;
1518 
1519 	for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
1520 		ipip_ops = router->ipip_ops_arr[ipipt];
1521 		if (dev->type == ipip_ops->dev_type) {
1522 			if (p_type)
1523 				*p_type = ipipt;
1524 			return true;
1525 		}
1526 	}
1527 	return false;
1528 }
1529 
1530 static bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
1531 				       const struct net_device *dev)
1532 {
1533 	return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
1534 }
1535 
1536 static struct mlxsw_sp_ipip_entry *
1537 mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp,
1538 				   const struct net_device *ol_dev)
1539 {
1540 	struct mlxsw_sp_ipip_entry *ipip_entry;
1541 
1542 	list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1543 			    ipip_list_node)
1544 		if (ipip_entry->ol_dev == ol_dev)
1545 			return ipip_entry;
1546 
1547 	return NULL;
1548 }
1549 
1550 static struct mlxsw_sp_ipip_entry *
1551 mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
1552 				   const struct net_device *ul_dev,
1553 				   struct mlxsw_sp_ipip_entry *start)
1554 {
1555 	struct mlxsw_sp_ipip_entry *ipip_entry;
1556 
1557 	ipip_entry = list_prepare_entry(start, &mlxsw_sp->router->ipip_list,
1558 					ipip_list_node);
1559 	list_for_each_entry_continue(ipip_entry, &mlxsw_sp->router->ipip_list,
1560 				     ipip_list_node) {
1561 		struct net_device *ol_dev = ipip_entry->ol_dev;
1562 		struct net_device *ipip_ul_dev;
1563 
1564 		rcu_read_lock();
1565 		ipip_ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1566 		rcu_read_unlock();
1567 
1568 		if (ipip_ul_dev == ul_dev)
1569 			return ipip_entry;
1570 	}
1571 
1572 	return NULL;
1573 }
1574 
1575 static bool mlxsw_sp_netdev_is_ipip_ul(struct mlxsw_sp *mlxsw_sp,
1576 				       const struct net_device *dev)
1577 {
1578 	return mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL);
1579 }
1580 
1581 static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
1582 						const struct net_device *ol_dev,
1583 						enum mlxsw_sp_ipip_type ipipt)
1584 {
1585 	const struct mlxsw_sp_ipip_ops *ops
1586 		= mlxsw_sp->router->ipip_ops_arr[ipipt];
1587 
1588 	return ops->can_offload(mlxsw_sp, ol_dev);
1589 }
1590 
1591 static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
1592 						struct net_device *ol_dev)
1593 {
1594 	enum mlxsw_sp_ipip_type ipipt = MLXSW_SP_IPIP_TYPE_MAX;
1595 	struct mlxsw_sp_ipip_entry *ipip_entry;
1596 	enum mlxsw_sp_l3proto ul_proto;
1597 	union mlxsw_sp_l3addr saddr;
1598 	u32 ul_tb_id;
1599 
1600 	mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt);
1601 	if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) {
1602 		ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1603 		ul_proto = mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto;
1604 		saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1605 		if (!mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1606 							  saddr, ul_tb_id,
1607 							  NULL)) {
1608 			ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt,
1609 								ol_dev);
1610 			if (IS_ERR(ipip_entry))
1611 				return PTR_ERR(ipip_entry);
1612 		}
1613 	}
1614 
1615 	return 0;
1616 }
1617 
1618 static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp,
1619 						   struct net_device *ol_dev)
1620 {
1621 	struct mlxsw_sp_ipip_entry *ipip_entry;
1622 
1623 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1624 	if (ipip_entry)
1625 		mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1626 }
1627 
1628 static void
1629 mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1630 				struct mlxsw_sp_ipip_entry *ipip_entry)
1631 {
1632 	struct mlxsw_sp_fib_entry *decap_fib_entry;
1633 
1634 	decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry);
1635 	if (decap_fib_entry)
1636 		mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1637 						  decap_fib_entry);
1638 }
1639 
1640 static int
1641 mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif, u16 ul_vr_id,
1642 			u16 ul_rif_id, bool enable)
1643 {
1644 	struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
1645 	struct net_device *dev = mlxsw_sp_rif_dev(&lb_rif->common);
1646 	enum mlxsw_reg_ritr_loopback_ipip_options ipip_options;
1647 	struct mlxsw_sp_rif *rif = &lb_rif->common;
1648 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
1649 	char ritr_pl[MLXSW_REG_RITR_LEN];
1650 	struct in6_addr *saddr6;
1651 	u32 saddr4;
1652 
1653 	ipip_options = MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET;
1654 	switch (lb_cf.ul_protocol) {
1655 	case MLXSW_SP_L3_PROTO_IPV4:
1656 		saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
1657 		mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
1658 				    rif->rif_index, rif->vr_id, dev->mtu);
1659 		mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
1660 						   ipip_options, ul_vr_id,
1661 						   ul_rif_id, saddr4,
1662 						   lb_cf.okey);
1663 		break;
1664 
1665 	case MLXSW_SP_L3_PROTO_IPV6:
1666 		saddr6 = &lb_cf.saddr.addr6;
1667 		mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
1668 				    rif->rif_index, rif->vr_id, dev->mtu);
1669 		mlxsw_reg_ritr_loopback_ipip6_pack(ritr_pl, lb_cf.lb_ipipt,
1670 						   ipip_options, ul_vr_id,
1671 						   ul_rif_id, saddr6,
1672 						   lb_cf.okey);
1673 		break;
1674 	}
1675 
1676 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
1677 }
1678 
1679 static int mlxsw_sp_netdevice_ipip_ol_update_mtu(struct mlxsw_sp *mlxsw_sp,
1680 						 struct net_device *ol_dev)
1681 {
1682 	struct mlxsw_sp_ipip_entry *ipip_entry;
1683 	struct mlxsw_sp_rif_ipip_lb *lb_rif;
1684 	int err = 0;
1685 
1686 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1687 	if (ipip_entry) {
1688 		lb_rif = ipip_entry->ol_lb;
1689 		err = mlxsw_sp_rif_ipip_lb_op(lb_rif, lb_rif->ul_vr_id,
1690 					      lb_rif->ul_rif_id, true);
1691 		if (err)
1692 			goto out;
1693 		lb_rif->common.mtu = ol_dev->mtu;
1694 	}
1695 
1696 out:
1697 	return err;
1698 }
1699 
1700 static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1701 						struct net_device *ol_dev)
1702 {
1703 	struct mlxsw_sp_ipip_entry *ipip_entry;
1704 
1705 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1706 	if (ipip_entry)
1707 		mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1708 }
1709 
1710 static void
1711 mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1712 				  struct mlxsw_sp_ipip_entry *ipip_entry)
1713 {
1714 	if (ipip_entry->decap_fib_entry)
1715 		mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1716 }
1717 
1718 static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1719 						  struct net_device *ol_dev)
1720 {
1721 	struct mlxsw_sp_ipip_entry *ipip_entry;
1722 
1723 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1724 	if (ipip_entry)
1725 		mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1726 }
1727 
1728 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
1729 					struct mlxsw_sp_rif *rif);
1730 
1731 static void mlxsw_sp_rif_migrate_destroy(struct mlxsw_sp *mlxsw_sp,
1732 					 struct mlxsw_sp_rif *old_rif,
1733 					 struct mlxsw_sp_rif *new_rif,
1734 					 bool migrate_nhs)
1735 {
1736 	struct mlxsw_sp_crif *crif = old_rif->crif;
1737 	struct mlxsw_sp_crif mock_crif = {};
1738 
1739 	if (migrate_nhs)
1740 		mlxsw_sp_nexthop_rif_update(mlxsw_sp, new_rif);
1741 
1742 	/* Plant a mock CRIF so that destroying the old RIF doesn't unoffload
1743 	 * our nexthops and IPIP tunnels, and doesn't sever the crif->rif link.
1744 	 */
1745 	mlxsw_sp_crif_init(&mock_crif, crif->key.dev);
1746 	old_rif->crif = &mock_crif;
1747 	mock_crif.rif = old_rif;
1748 	mlxsw_sp_rif_destroy(old_rif);
1749 }
1750 
1751 static int
1752 mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
1753 				 struct mlxsw_sp_ipip_entry *ipip_entry,
1754 				 bool keep_encap,
1755 				 struct netlink_ext_ack *extack)
1756 {
1757 	struct mlxsw_sp_rif_ipip_lb *old_lb_rif = ipip_entry->ol_lb;
1758 	struct mlxsw_sp_rif_ipip_lb *new_lb_rif;
1759 
1760 	new_lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp,
1761 						     ipip_entry->ipipt,
1762 						     ipip_entry->ol_dev,
1763 						     extack);
1764 	if (IS_ERR(new_lb_rif))
1765 		return PTR_ERR(new_lb_rif);
1766 	ipip_entry->ol_lb = new_lb_rif;
1767 
1768 	mlxsw_sp_rif_migrate_destroy(mlxsw_sp, &old_lb_rif->common,
1769 				     &new_lb_rif->common, keep_encap);
1770 	return 0;
1771 }
1772 
1773 /**
1774  * __mlxsw_sp_ipip_entry_update_tunnel - Update offload related to IPIP entry.
1775  * @mlxsw_sp: mlxsw_sp.
1776  * @ipip_entry: IPIP entry.
1777  * @recreate_loopback: Recreates the associated loopback RIF.
1778  * @keep_encap: Updates next hops that use the tunnel netdevice. This is only
1779  *              relevant when recreate_loopback is true.
1780  * @update_nexthops: Updates next hops, keeping the current loopback RIF. This
1781  *                   is only relevant when recreate_loopback is false.
1782  * @extack: extack.
1783  *
1784  * Return: Non-zero value on failure.
1785  */
1786 int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp,
1787 					struct mlxsw_sp_ipip_entry *ipip_entry,
1788 					bool recreate_loopback,
1789 					bool keep_encap,
1790 					bool update_nexthops,
1791 					struct netlink_ext_ack *extack)
1792 {
1793 	int err;
1794 
1795 	/* RIFs can't be edited, so to update loopback, we need to destroy and
1796 	 * recreate it. That creates a window of opportunity where RALUE and
1797 	 * RATR registers end up referencing a RIF that's already gone. RATRs
1798 	 * are handled in mlxsw_sp_ipip_entry_ol_lb_update(), and to take care
1799 	 * of RALUE, demote the decap route back.
1800 	 */
1801 	if (ipip_entry->decap_fib_entry)
1802 		mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1803 
1804 	if (recreate_loopback) {
1805 		err = mlxsw_sp_ipip_entry_ol_lb_update(mlxsw_sp, ipip_entry,
1806 						       keep_encap, extack);
1807 		if (err)
1808 			return err;
1809 	} else if (update_nexthops) {
1810 		mlxsw_sp_nexthop_rif_update(mlxsw_sp,
1811 					    &ipip_entry->ol_lb->common);
1812 	}
1813 
1814 	if (ipip_entry->ol_dev->flags & IFF_UP)
1815 		mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1816 
1817 	return 0;
1818 }
1819 
1820 static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
1821 						struct net_device *ol_dev,
1822 						struct netlink_ext_ack *extack)
1823 {
1824 	struct mlxsw_sp_ipip_entry *ipip_entry =
1825 		mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1826 
1827 	if (!ipip_entry)
1828 		return 0;
1829 
1830 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1831 						   true, false, false, extack);
1832 }
1833 
1834 static int
1835 mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp *mlxsw_sp,
1836 				     struct mlxsw_sp_ipip_entry *ipip_entry,
1837 				     struct net_device *ul_dev,
1838 				     bool *demote_this,
1839 				     struct netlink_ext_ack *extack)
1840 {
1841 	u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1842 	enum mlxsw_sp_l3proto ul_proto;
1843 	union mlxsw_sp_l3addr saddr;
1844 
1845 	/* Moving underlay to a different VRF might cause local address
1846 	 * conflict, and the conflicting tunnels need to be demoted.
1847 	 */
1848 	ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
1849 	saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1850 	if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1851 						 saddr, ul_tb_id,
1852 						 ipip_entry)) {
1853 		*demote_this = true;
1854 		return 0;
1855 	}
1856 
1857 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1858 						   true, true, false, extack);
1859 }
1860 
1861 static int
1862 mlxsw_sp_netdevice_ipip_ul_up_event(struct mlxsw_sp *mlxsw_sp,
1863 				    struct mlxsw_sp_ipip_entry *ipip_entry,
1864 				    struct net_device *ul_dev)
1865 {
1866 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1867 						   false, false, true, NULL);
1868 }
1869 
1870 static int
1871 mlxsw_sp_netdevice_ipip_ul_down_event(struct mlxsw_sp *mlxsw_sp,
1872 				      struct mlxsw_sp_ipip_entry *ipip_entry,
1873 				      struct net_device *ul_dev)
1874 {
1875 	/* A down underlay device causes encapsulated packets to not be
1876 	 * forwarded, but decap still works. So refresh next hops without
1877 	 * touching anything else.
1878 	 */
1879 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1880 						   false, false, true, NULL);
1881 }
1882 
1883 static int
1884 mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp *mlxsw_sp,
1885 					struct net_device *ol_dev,
1886 					struct netlink_ext_ack *extack)
1887 {
1888 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1889 	struct mlxsw_sp_ipip_entry *ipip_entry;
1890 	int err;
1891 
1892 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1893 	if (!ipip_entry)
1894 		/* A change might make a tunnel eligible for offloading, but
1895 		 * that is currently not implemented. What falls to slow path
1896 		 * stays there.
1897 		 */
1898 		return 0;
1899 
1900 	/* A change might make a tunnel not eligible for offloading. */
1901 	if (!mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev,
1902 						 ipip_entry->ipipt)) {
1903 		mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1904 		return 0;
1905 	}
1906 
1907 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1908 	err = ipip_ops->ol_netdev_change(mlxsw_sp, ipip_entry, extack);
1909 	return err;
1910 }
1911 
1912 void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
1913 				       struct mlxsw_sp_ipip_entry *ipip_entry)
1914 {
1915 	struct net_device *ol_dev = ipip_entry->ol_dev;
1916 
1917 	if (ol_dev->flags & IFF_UP)
1918 		mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1919 	mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1920 }
1921 
1922 /* The configuration where several tunnels have the same local address in the
1923  * same underlay table needs special treatment in the HW. That is currently not
1924  * implemented in the driver. This function finds and demotes the first tunnel
1925  * with a given source address, except the one passed in the argument
1926  * `except'.
1927  */
1928 bool
1929 mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp,
1930 				     enum mlxsw_sp_l3proto ul_proto,
1931 				     union mlxsw_sp_l3addr saddr,
1932 				     u32 ul_tb_id,
1933 				     const struct mlxsw_sp_ipip_entry *except)
1934 {
1935 	struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1936 
1937 	list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1938 				 ipip_list_node) {
1939 		if (ipip_entry != except &&
1940 		    mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
1941 						      ul_tb_id, ipip_entry)) {
1942 			mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1943 			return true;
1944 		}
1945 	}
1946 
1947 	return false;
1948 }
1949 
1950 static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
1951 						     struct net_device *ul_dev)
1952 {
1953 	struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1954 
1955 	list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1956 				 ipip_list_node) {
1957 		struct net_device *ol_dev = ipip_entry->ol_dev;
1958 		struct net_device *ipip_ul_dev;
1959 
1960 		rcu_read_lock();
1961 		ipip_ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1962 		rcu_read_unlock();
1963 		if (ipip_ul_dev == ul_dev)
1964 			mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1965 	}
1966 }
1967 
1968 static int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
1969 					    struct net_device *ol_dev,
1970 					    unsigned long event,
1971 					    struct netdev_notifier_info *info)
1972 {
1973 	struct netdev_notifier_changeupper_info *chup;
1974 	struct netlink_ext_ack *extack;
1975 	int err = 0;
1976 
1977 	switch (event) {
1978 	case NETDEV_REGISTER:
1979 		err = mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
1980 		break;
1981 	case NETDEV_UNREGISTER:
1982 		mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev);
1983 		break;
1984 	case NETDEV_UP:
1985 		mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev);
1986 		break;
1987 	case NETDEV_DOWN:
1988 		mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev);
1989 		break;
1990 	case NETDEV_CHANGEUPPER:
1991 		chup = container_of(info, typeof(*chup), info);
1992 		extack = info->extack;
1993 		if (netif_is_l3_master(chup->upper_dev))
1994 			err = mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
1995 								   ol_dev,
1996 								   extack);
1997 		break;
1998 	case NETDEV_CHANGE:
1999 		extack = info->extack;
2000 		err = mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp,
2001 							      ol_dev, extack);
2002 		break;
2003 	case NETDEV_CHANGEMTU:
2004 		err = mlxsw_sp_netdevice_ipip_ol_update_mtu(mlxsw_sp, ol_dev);
2005 		break;
2006 	}
2007 	return err;
2008 }
2009 
2010 static int
2011 __mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
2012 				   struct mlxsw_sp_ipip_entry *ipip_entry,
2013 				   struct net_device *ul_dev,
2014 				   bool *demote_this,
2015 				   unsigned long event,
2016 				   struct netdev_notifier_info *info)
2017 {
2018 	struct netdev_notifier_changeupper_info *chup;
2019 	struct netlink_ext_ack *extack;
2020 
2021 	switch (event) {
2022 	case NETDEV_CHANGEUPPER:
2023 		chup = container_of(info, typeof(*chup), info);
2024 		extack = info->extack;
2025 		if (netif_is_l3_master(chup->upper_dev))
2026 			return mlxsw_sp_netdevice_ipip_ul_vrf_event(mlxsw_sp,
2027 								    ipip_entry,
2028 								    ul_dev,
2029 								    demote_this,
2030 								    extack);
2031 		break;
2032 
2033 	case NETDEV_UP:
2034 		return mlxsw_sp_netdevice_ipip_ul_up_event(mlxsw_sp, ipip_entry,
2035 							   ul_dev);
2036 	case NETDEV_DOWN:
2037 		return mlxsw_sp_netdevice_ipip_ul_down_event(mlxsw_sp,
2038 							     ipip_entry,
2039 							     ul_dev);
2040 	}
2041 	return 0;
2042 }
2043 
2044 static int
2045 mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
2046 				 struct net_device *ul_dev,
2047 				 unsigned long event,
2048 				 struct netdev_notifier_info *info)
2049 {
2050 	struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
2051 	int err;
2052 
2053 	while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp,
2054 								ul_dev,
2055 								ipip_entry))) {
2056 		struct mlxsw_sp_ipip_entry *prev;
2057 		bool demote_this = false;
2058 
2059 		err = __mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, ipip_entry,
2060 							 ul_dev, &demote_this,
2061 							 event, info);
2062 		if (err) {
2063 			mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp,
2064 								 ul_dev);
2065 			return err;
2066 		}
2067 
2068 		if (demote_this) {
2069 			if (list_is_first(&ipip_entry->ipip_list_node,
2070 					  &mlxsw_sp->router->ipip_list))
2071 				prev = NULL;
2072 			else
2073 				/* This can't be cached from previous iteration,
2074 				 * because that entry could be gone now.
2075 				 */
2076 				prev = list_prev_entry(ipip_entry,
2077 						       ipip_list_node);
2078 			mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
2079 			ipip_entry = prev;
2080 		}
2081 	}
2082 
2083 	return 0;
2084 }
2085 
2086 int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
2087 				      enum mlxsw_sp_l3proto ul_proto,
2088 				      const union mlxsw_sp_l3addr *ul_sip,
2089 				      u32 tunnel_index)
2090 {
2091 	enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2092 	struct mlxsw_sp_router *router = mlxsw_sp->router;
2093 	struct mlxsw_sp_fib_entry *fib_entry;
2094 	int err = 0;
2095 
2096 	mutex_lock(&mlxsw_sp->router->lock);
2097 
2098 	if (WARN_ON_ONCE(router->nve_decap_config.valid)) {
2099 		err = -EINVAL;
2100 		goto out;
2101 	}
2102 
2103 	router->nve_decap_config.ul_tb_id = ul_tb_id;
2104 	router->nve_decap_config.tunnel_index = tunnel_index;
2105 	router->nve_decap_config.ul_proto = ul_proto;
2106 	router->nve_decap_config.ul_sip = *ul_sip;
2107 	router->nve_decap_config.valid = true;
2108 
2109 	/* It is valid to create a tunnel with a local IP and only later
2110 	 * assign this IP address to a local interface
2111 	 */
2112 	fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
2113 							 ul_proto, ul_sip,
2114 							 type);
2115 	if (!fib_entry)
2116 		goto out;
2117 
2118 	fib_entry->decap.tunnel_index = tunnel_index;
2119 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
2120 
2121 	err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2122 	if (err)
2123 		goto err_fib_entry_update;
2124 
2125 	goto out;
2126 
2127 err_fib_entry_update:
2128 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2129 	mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2130 out:
2131 	mutex_unlock(&mlxsw_sp->router->lock);
2132 	return err;
2133 }
2134 
2135 void mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
2136 				      enum mlxsw_sp_l3proto ul_proto,
2137 				      const union mlxsw_sp_l3addr *ul_sip)
2138 {
2139 	enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
2140 	struct mlxsw_sp_router *router = mlxsw_sp->router;
2141 	struct mlxsw_sp_fib_entry *fib_entry;
2142 
2143 	mutex_lock(&mlxsw_sp->router->lock);
2144 
2145 	if (WARN_ON_ONCE(!router->nve_decap_config.valid))
2146 		goto out;
2147 
2148 	router->nve_decap_config.valid = false;
2149 
2150 	fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
2151 							 ul_proto, ul_sip,
2152 							 type);
2153 	if (!fib_entry)
2154 		goto out;
2155 
2156 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2157 	mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2158 out:
2159 	mutex_unlock(&mlxsw_sp->router->lock);
2160 }
2161 
2162 static bool mlxsw_sp_router_nve_is_decap(struct mlxsw_sp *mlxsw_sp,
2163 					 u32 ul_tb_id,
2164 					 enum mlxsw_sp_l3proto ul_proto,
2165 					 const union mlxsw_sp_l3addr *ul_sip)
2166 {
2167 	struct mlxsw_sp_router *router = mlxsw_sp->router;
2168 
2169 	return router->nve_decap_config.valid &&
2170 	       router->nve_decap_config.ul_tb_id == ul_tb_id &&
2171 	       router->nve_decap_config.ul_proto == ul_proto &&
2172 	       !memcmp(&router->nve_decap_config.ul_sip, ul_sip,
2173 		       sizeof(*ul_sip));
2174 }
2175 
2176 struct mlxsw_sp_neigh_key {
2177 	struct neighbour *n;
2178 };
2179 
2180 struct mlxsw_sp_neigh_entry {
2181 	struct list_head rif_list_node;
2182 	struct rhash_head ht_node;
2183 	struct mlxsw_sp_neigh_key key;
2184 	u16 rif;
2185 	bool connected;
2186 	unsigned char ha[ETH_ALEN];
2187 	struct list_head nexthop_list; /* list of nexthops using
2188 					* this neigh entry
2189 					*/
2190 	struct list_head nexthop_neighs_list_node;
2191 	unsigned int counter_index;
2192 	bool counter_valid;
2193 };
2194 
2195 static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
2196 	.key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
2197 	.head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
2198 	.key_len = sizeof(struct mlxsw_sp_neigh_key),
2199 };
2200 
2201 struct mlxsw_sp_neigh_entry *
2202 mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
2203 			struct mlxsw_sp_neigh_entry *neigh_entry)
2204 {
2205 	if (!neigh_entry) {
2206 		if (list_empty(&rif->neigh_list))
2207 			return NULL;
2208 		else
2209 			return list_first_entry(&rif->neigh_list,
2210 						typeof(*neigh_entry),
2211 						rif_list_node);
2212 	}
2213 	if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list))
2214 		return NULL;
2215 	return list_next_entry(neigh_entry, rif_list_node);
2216 }
2217 
2218 int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry)
2219 {
2220 	return neigh_entry->key.n->tbl->family;
2221 }
2222 
2223 unsigned char *
2224 mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry)
2225 {
2226 	return neigh_entry->ha;
2227 }
2228 
2229 u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
2230 {
2231 	struct neighbour *n;
2232 
2233 	n = neigh_entry->key.n;
2234 	return ntohl(*((__be32 *) n->primary_key));
2235 }
2236 
2237 struct in6_addr *
2238 mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
2239 {
2240 	struct neighbour *n;
2241 
2242 	n = neigh_entry->key.n;
2243 	return (struct in6_addr *) &n->primary_key;
2244 }
2245 
2246 int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
2247 			       struct mlxsw_sp_neigh_entry *neigh_entry,
2248 			       u64 *p_counter)
2249 {
2250 	if (!neigh_entry->counter_valid)
2251 		return -EINVAL;
2252 
2253 	return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
2254 					 p_counter, NULL);
2255 }
2256 
2257 static struct mlxsw_sp_neigh_entry *
2258 mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
2259 			   u16 rif)
2260 {
2261 	struct mlxsw_sp_neigh_entry *neigh_entry;
2262 
2263 	neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
2264 	if (!neigh_entry)
2265 		return NULL;
2266 
2267 	neigh_entry->key.n = n;
2268 	neigh_entry->rif = rif;
2269 	INIT_LIST_HEAD(&neigh_entry->nexthop_list);
2270 
2271 	return neigh_entry;
2272 }
2273 
2274 static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
2275 {
2276 	kfree(neigh_entry);
2277 }
2278 
2279 static int
2280 mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
2281 			    struct mlxsw_sp_neigh_entry *neigh_entry)
2282 {
2283 	return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
2284 				      &neigh_entry->ht_node,
2285 				      mlxsw_sp_neigh_ht_params);
2286 }
2287 
2288 static void
2289 mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
2290 			    struct mlxsw_sp_neigh_entry *neigh_entry)
2291 {
2292 	rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
2293 			       &neigh_entry->ht_node,
2294 			       mlxsw_sp_neigh_ht_params);
2295 }
2296 
2297 static bool
2298 mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp,
2299 				    struct mlxsw_sp_neigh_entry *neigh_entry)
2300 {
2301 	struct devlink *devlink;
2302 	const char *table_name;
2303 
2304 	switch (mlxsw_sp_neigh_entry_type(neigh_entry)) {
2305 	case AF_INET:
2306 		table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4;
2307 		break;
2308 	case AF_INET6:
2309 		table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6;
2310 		break;
2311 	default:
2312 		WARN_ON(1);
2313 		return false;
2314 	}
2315 
2316 	devlink = priv_to_devlink(mlxsw_sp->core);
2317 	return devlink_dpipe_table_counter_enabled(devlink, table_name);
2318 }
2319 
2320 static void
2321 mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2322 			     struct mlxsw_sp_neigh_entry *neigh_entry)
2323 {
2324 	if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry))
2325 		return;
2326 
2327 	if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index))
2328 		return;
2329 
2330 	neigh_entry->counter_valid = true;
2331 }
2332 
2333 static void
2334 mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp,
2335 			    struct mlxsw_sp_neigh_entry *neigh_entry)
2336 {
2337 	if (!neigh_entry->counter_valid)
2338 		return;
2339 	mlxsw_sp_flow_counter_free(mlxsw_sp,
2340 				   neigh_entry->counter_index);
2341 	neigh_entry->counter_valid = false;
2342 }
2343 
2344 static struct mlxsw_sp_neigh_entry *
2345 mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2346 {
2347 	struct mlxsw_sp_neigh_entry *neigh_entry;
2348 	struct mlxsw_sp_rif *rif;
2349 	int err;
2350 
2351 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
2352 	if (!rif)
2353 		return ERR_PTR(-EINVAL);
2354 
2355 	neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
2356 	if (!neigh_entry)
2357 		return ERR_PTR(-ENOMEM);
2358 
2359 	err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
2360 	if (err)
2361 		goto err_neigh_entry_insert;
2362 
2363 	mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2364 	atomic_inc(&mlxsw_sp->router->neighs_update.neigh_count);
2365 	list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
2366 
2367 	return neigh_entry;
2368 
2369 err_neigh_entry_insert:
2370 	mlxsw_sp_neigh_entry_free(neigh_entry);
2371 	return ERR_PTR(err);
2372 }
2373 
2374 static void
2375 mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
2376 			     struct mlxsw_sp_neigh_entry *neigh_entry)
2377 {
2378 	list_del(&neigh_entry->rif_list_node);
2379 	atomic_dec(&mlxsw_sp->router->neighs_update.neigh_count);
2380 	mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2381 	mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
2382 	mlxsw_sp_neigh_entry_free(neigh_entry);
2383 }
2384 
2385 static struct mlxsw_sp_neigh_entry *
2386 mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2387 {
2388 	struct mlxsw_sp_neigh_key key;
2389 
2390 	key.n = n;
2391 	return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
2392 				      &key, mlxsw_sp_neigh_ht_params);
2393 }
2394 
2395 static void
2396 mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
2397 {
2398 	unsigned long interval;
2399 
2400 #if IS_ENABLED(CONFIG_IPV6)
2401 	interval = min_t(unsigned long,
2402 			 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
2403 			 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
2404 #else
2405 	interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
2406 #endif
2407 	mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
2408 }
2409 
2410 static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2411 						   char *rauhtd_pl,
2412 						   int ent_index)
2413 {
2414 	u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
2415 	struct net_device *dev;
2416 	struct neighbour *n;
2417 	__be32 dipn;
2418 	u32 dip;
2419 	u16 rif;
2420 
2421 	mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
2422 
2423 	if (WARN_ON_ONCE(rif >= max_rifs))
2424 		return;
2425 	if (!mlxsw_sp->router->rifs[rif]) {
2426 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2427 		return;
2428 	}
2429 
2430 	dipn = htonl(dip);
2431 	dev = mlxsw_sp_rif_dev(mlxsw_sp->router->rifs[rif]);
2432 	n = neigh_lookup(&arp_tbl, &dipn, dev);
2433 	if (!n)
2434 		return;
2435 
2436 	netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
2437 	neigh_event_send(n, NULL);
2438 	neigh_release(n);
2439 }
2440 
2441 #if IS_ENABLED(CONFIG_IPV6)
2442 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2443 						   char *rauhtd_pl,
2444 						   int rec_index)
2445 {
2446 	struct net_device *dev;
2447 	struct neighbour *n;
2448 	struct in6_addr dip;
2449 	u16 rif;
2450 
2451 	mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
2452 					 (char *) &dip);
2453 
2454 	if (!mlxsw_sp->router->rifs[rif]) {
2455 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2456 		return;
2457 	}
2458 
2459 	dev = mlxsw_sp_rif_dev(mlxsw_sp->router->rifs[rif]);
2460 	n = neigh_lookup(&nd_tbl, &dip, dev);
2461 	if (!n)
2462 		return;
2463 
2464 	netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
2465 	neigh_event_send(n, NULL);
2466 	neigh_release(n);
2467 }
2468 #else
2469 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2470 						   char *rauhtd_pl,
2471 						   int rec_index)
2472 {
2473 }
2474 #endif
2475 
2476 static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2477 						   char *rauhtd_pl,
2478 						   int rec_index)
2479 {
2480 	u8 num_entries;
2481 	int i;
2482 
2483 	num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2484 								rec_index);
2485 	/* Hardware starts counting at 0, so add 1. */
2486 	num_entries++;
2487 
2488 	/* Each record consists of several neighbour entries. */
2489 	for (i = 0; i < num_entries; i++) {
2490 		int ent_index;
2491 
2492 		ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
2493 		mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
2494 						       ent_index);
2495 	}
2496 
2497 }
2498 
2499 static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2500 						   char *rauhtd_pl,
2501 						   int rec_index)
2502 {
2503 	/* One record contains one entry. */
2504 	mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
2505 					       rec_index);
2506 }
2507 
2508 static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
2509 					      char *rauhtd_pl, int rec_index)
2510 {
2511 	switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
2512 	case MLXSW_REG_RAUHTD_TYPE_IPV4:
2513 		mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
2514 						       rec_index);
2515 		break;
2516 	case MLXSW_REG_RAUHTD_TYPE_IPV6:
2517 		mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
2518 						       rec_index);
2519 		break;
2520 	}
2521 }
2522 
2523 static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
2524 {
2525 	u8 num_rec, last_rec_index, num_entries;
2526 
2527 	num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2528 	last_rec_index = num_rec - 1;
2529 
2530 	if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
2531 		return false;
2532 	if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
2533 	    MLXSW_REG_RAUHTD_TYPE_IPV6)
2534 		return true;
2535 
2536 	num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2537 								last_rec_index);
2538 	if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
2539 		return true;
2540 	return false;
2541 }
2542 
2543 static int
2544 __mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
2545 				       char *rauhtd_pl,
2546 				       enum mlxsw_reg_rauhtd_type type)
2547 {
2548 	int i, num_rec;
2549 	int err;
2550 
2551 	/* Ensure the RIF we read from the device does not change mid-dump. */
2552 	mutex_lock(&mlxsw_sp->router->lock);
2553 	do {
2554 		mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
2555 		err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
2556 				      rauhtd_pl);
2557 		if (err) {
2558 			dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n");
2559 			break;
2560 		}
2561 		num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2562 		for (i = 0; i < num_rec; i++)
2563 			mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
2564 							  i);
2565 	} while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
2566 	mutex_unlock(&mlxsw_sp->router->lock);
2567 
2568 	return err;
2569 }
2570 
2571 static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
2572 {
2573 	enum mlxsw_reg_rauhtd_type type;
2574 	char *rauhtd_pl;
2575 	int err;
2576 
2577 	if (!atomic_read(&mlxsw_sp->router->neighs_update.neigh_count))
2578 		return 0;
2579 
2580 	rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
2581 	if (!rauhtd_pl)
2582 		return -ENOMEM;
2583 
2584 	type = MLXSW_REG_RAUHTD_TYPE_IPV4;
2585 	err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2586 	if (err)
2587 		goto out;
2588 
2589 	type = MLXSW_REG_RAUHTD_TYPE_IPV6;
2590 	err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2591 out:
2592 	kfree(rauhtd_pl);
2593 	return err;
2594 }
2595 
2596 static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
2597 {
2598 	struct mlxsw_sp_neigh_entry *neigh_entry;
2599 
2600 	mutex_lock(&mlxsw_sp->router->lock);
2601 	list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
2602 			    nexthop_neighs_list_node)
2603 		/* If this neigh have nexthops, make the kernel think this neigh
2604 		 * is active regardless of the traffic.
2605 		 */
2606 		neigh_event_send(neigh_entry->key.n, NULL);
2607 	mutex_unlock(&mlxsw_sp->router->lock);
2608 }
2609 
2610 static void
2611 mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
2612 {
2613 	unsigned long interval = mlxsw_sp->router->neighs_update.interval;
2614 
2615 	mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
2616 			       msecs_to_jiffies(interval));
2617 }
2618 
2619 static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
2620 {
2621 	struct mlxsw_sp_router *router;
2622 	int err;
2623 
2624 	router = container_of(work, struct mlxsw_sp_router,
2625 			      neighs_update.dw.work);
2626 	err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
2627 	if (err)
2628 		dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
2629 
2630 	mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
2631 
2632 	mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
2633 }
2634 
2635 static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
2636 {
2637 	struct mlxsw_sp_neigh_entry *neigh_entry;
2638 	struct mlxsw_sp_router *router;
2639 
2640 	router = container_of(work, struct mlxsw_sp_router,
2641 			      nexthop_probe_dw.work);
2642 	/* Iterate over nexthop neighbours, find those who are unresolved and
2643 	 * send arp on them. This solves the chicken-egg problem when
2644 	 * the nexthop wouldn't get offloaded until the neighbor is resolved
2645 	 * but it wouldn't get resolved ever in case traffic is flowing in HW
2646 	 * using different nexthop.
2647 	 */
2648 	mutex_lock(&router->lock);
2649 	list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
2650 			    nexthop_neighs_list_node)
2651 		if (!neigh_entry->connected)
2652 			neigh_event_send(neigh_entry->key.n, NULL);
2653 	mutex_unlock(&router->lock);
2654 
2655 	mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
2656 			       MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
2657 }
2658 
2659 static void
2660 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
2661 			      struct mlxsw_sp_neigh_entry *neigh_entry,
2662 			      bool removing, bool dead);
2663 
2664 static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
2665 {
2666 	return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
2667 			MLXSW_REG_RAUHT_OP_WRITE_DELETE;
2668 }
2669 
2670 static int
2671 mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
2672 				struct mlxsw_sp_neigh_entry *neigh_entry,
2673 				enum mlxsw_reg_rauht_op op)
2674 {
2675 	struct neighbour *n = neigh_entry->key.n;
2676 	u32 dip = ntohl(*((__be32 *) n->primary_key));
2677 	char rauht_pl[MLXSW_REG_RAUHT_LEN];
2678 
2679 	mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2680 			      dip);
2681 	if (neigh_entry->counter_valid)
2682 		mlxsw_reg_rauht_pack_counter(rauht_pl,
2683 					     neigh_entry->counter_index);
2684 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2685 }
2686 
2687 static int
2688 mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
2689 				struct mlxsw_sp_neigh_entry *neigh_entry,
2690 				enum mlxsw_reg_rauht_op op)
2691 {
2692 	struct neighbour *n = neigh_entry->key.n;
2693 	char rauht_pl[MLXSW_REG_RAUHT_LEN];
2694 	const char *dip = n->primary_key;
2695 
2696 	mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2697 			      dip);
2698 	if (neigh_entry->counter_valid)
2699 		mlxsw_reg_rauht_pack_counter(rauht_pl,
2700 					     neigh_entry->counter_index);
2701 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2702 }
2703 
2704 bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
2705 {
2706 	struct neighbour *n = neigh_entry->key.n;
2707 
2708 	/* Packets with a link-local destination address are trapped
2709 	 * after LPM lookup and never reach the neighbour table, so
2710 	 * there is no need to program such neighbours to the device.
2711 	 */
2712 	if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
2713 	    IPV6_ADDR_LINKLOCAL)
2714 		return true;
2715 	return false;
2716 }
2717 
2718 static void
2719 mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
2720 			    struct mlxsw_sp_neigh_entry *neigh_entry,
2721 			    bool adding)
2722 {
2723 	enum mlxsw_reg_rauht_op op = mlxsw_sp_rauht_op(adding);
2724 	int err;
2725 
2726 	if (!adding && !neigh_entry->connected)
2727 		return;
2728 	neigh_entry->connected = adding;
2729 	if (neigh_entry->key.n->tbl->family == AF_INET) {
2730 		err = mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
2731 						      op);
2732 		if (err)
2733 			return;
2734 	} else if (neigh_entry->key.n->tbl->family == AF_INET6) {
2735 		if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
2736 			return;
2737 		err = mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
2738 						      op);
2739 		if (err)
2740 			return;
2741 	} else {
2742 		WARN_ON_ONCE(1);
2743 		return;
2744 	}
2745 
2746 	if (adding)
2747 		neigh_entry->key.n->flags |= NTF_OFFLOADED;
2748 	else
2749 		neigh_entry->key.n->flags &= ~NTF_OFFLOADED;
2750 }
2751 
2752 void
2753 mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
2754 				    struct mlxsw_sp_neigh_entry *neigh_entry,
2755 				    bool adding)
2756 {
2757 	if (adding)
2758 		mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2759 	else
2760 		mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2761 	mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true);
2762 }
2763 
2764 struct mlxsw_sp_netevent_work {
2765 	struct work_struct work;
2766 	struct mlxsw_sp *mlxsw_sp;
2767 	struct neighbour *n;
2768 };
2769 
2770 static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
2771 {
2772 	struct mlxsw_sp_netevent_work *net_work =
2773 		container_of(work, struct mlxsw_sp_netevent_work, work);
2774 	struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2775 	struct mlxsw_sp_neigh_entry *neigh_entry;
2776 	struct neighbour *n = net_work->n;
2777 	unsigned char ha[ETH_ALEN];
2778 	bool entry_connected;
2779 	u8 nud_state, dead;
2780 
2781 	/* If these parameters are changed after we release the lock,
2782 	 * then we are guaranteed to receive another event letting us
2783 	 * know about it.
2784 	 */
2785 	read_lock_bh(&n->lock);
2786 	memcpy(ha, n->ha, ETH_ALEN);
2787 	nud_state = n->nud_state;
2788 	dead = n->dead;
2789 	read_unlock_bh(&n->lock);
2790 
2791 	mutex_lock(&mlxsw_sp->router->lock);
2792 	mlxsw_sp_span_respin(mlxsw_sp);
2793 
2794 	entry_connected = nud_state & NUD_VALID && !dead;
2795 	neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
2796 	if (!entry_connected && !neigh_entry)
2797 		goto out;
2798 	if (!neigh_entry) {
2799 		neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
2800 		if (IS_ERR(neigh_entry))
2801 			goto out;
2802 	}
2803 
2804 	if (neigh_entry->connected && entry_connected &&
2805 	    !memcmp(neigh_entry->ha, ha, ETH_ALEN))
2806 		goto out;
2807 
2808 	memcpy(neigh_entry->ha, ha, ETH_ALEN);
2809 	mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
2810 	mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected,
2811 				      dead);
2812 
2813 	if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
2814 		mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2815 
2816 out:
2817 	mutex_unlock(&mlxsw_sp->router->lock);
2818 	neigh_release(n);
2819 	kfree(net_work);
2820 }
2821 
2822 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp);
2823 
2824 static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
2825 {
2826 	struct mlxsw_sp_netevent_work *net_work =
2827 		container_of(work, struct mlxsw_sp_netevent_work, work);
2828 	struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2829 
2830 	mlxsw_sp_mp_hash_init(mlxsw_sp);
2831 	kfree(net_work);
2832 }
2833 
2834 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
2835 
2836 static void mlxsw_sp_router_update_priority_work(struct work_struct *work)
2837 {
2838 	struct mlxsw_sp_netevent_work *net_work =
2839 		container_of(work, struct mlxsw_sp_netevent_work, work);
2840 	struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2841 
2842 	__mlxsw_sp_router_init(mlxsw_sp);
2843 	kfree(net_work);
2844 }
2845 
2846 static int mlxsw_sp_router_schedule_work(struct net *net,
2847 					 struct mlxsw_sp_router *router,
2848 					 struct neighbour *n,
2849 					 void (*cb)(struct work_struct *))
2850 {
2851 	struct mlxsw_sp_netevent_work *net_work;
2852 
2853 	if (!net_eq(net, mlxsw_sp_net(router->mlxsw_sp)))
2854 		return NOTIFY_DONE;
2855 
2856 	net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2857 	if (!net_work)
2858 		return NOTIFY_BAD;
2859 
2860 	INIT_WORK(&net_work->work, cb);
2861 	net_work->mlxsw_sp = router->mlxsw_sp;
2862 	net_work->n = n;
2863 	mlxsw_core_schedule_work(&net_work->work);
2864 	return NOTIFY_DONE;
2865 }
2866 
2867 static bool mlxsw_sp_dev_lower_is_port(struct net_device *dev)
2868 {
2869 	struct mlxsw_sp_port *mlxsw_sp_port;
2870 
2871 	rcu_read_lock();
2872 	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
2873 	rcu_read_unlock();
2874 	return !!mlxsw_sp_port;
2875 }
2876 
2877 static int mlxsw_sp_router_schedule_neigh_work(struct mlxsw_sp_router *router,
2878 					       struct neighbour *n)
2879 {
2880 	struct net *net;
2881 
2882 	net = neigh_parms_net(n->parms);
2883 
2884 	/* Take a reference to ensure the neighbour won't be destructed until we
2885 	 * drop the reference in delayed work.
2886 	 */
2887 	neigh_clone(n);
2888 	return mlxsw_sp_router_schedule_work(net, router, n,
2889 					     mlxsw_sp_router_neigh_event_work);
2890 }
2891 
2892 static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
2893 					  unsigned long event, void *ptr)
2894 {
2895 	struct mlxsw_sp_router *router;
2896 	unsigned long interval;
2897 	struct neigh_parms *p;
2898 	struct neighbour *n;
2899 
2900 	router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
2901 
2902 	switch (event) {
2903 	case NETEVENT_DELAY_PROBE_TIME_UPDATE:
2904 		p = ptr;
2905 
2906 		/* We don't care about changes in the default table. */
2907 		if (!p->dev || (p->tbl->family != AF_INET &&
2908 				p->tbl->family != AF_INET6))
2909 			return NOTIFY_DONE;
2910 
2911 		/* We are in atomic context and can't take RTNL mutex,
2912 		 * so use RCU variant to walk the device chain.
2913 		 */
2914 		if (!mlxsw_sp_dev_lower_is_port(p->dev))
2915 			return NOTIFY_DONE;
2916 
2917 		interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
2918 		router->neighs_update.interval = interval;
2919 		break;
2920 	case NETEVENT_NEIGH_UPDATE:
2921 		n = ptr;
2922 
2923 		if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
2924 			return NOTIFY_DONE;
2925 
2926 		if (!mlxsw_sp_dev_lower_is_port(n->dev))
2927 			return NOTIFY_DONE;
2928 
2929 		return mlxsw_sp_router_schedule_neigh_work(router, n);
2930 
2931 	case NETEVENT_IPV4_MPATH_HASH_UPDATE:
2932 	case NETEVENT_IPV6_MPATH_HASH_UPDATE:
2933 		return mlxsw_sp_router_schedule_work(ptr, router, NULL,
2934 				mlxsw_sp_router_mp_hash_event_work);
2935 
2936 	case NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE:
2937 		return mlxsw_sp_router_schedule_work(ptr, router, NULL,
2938 				mlxsw_sp_router_update_priority_work);
2939 	}
2940 
2941 	return NOTIFY_DONE;
2942 }
2943 
2944 static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
2945 {
2946 	int err;
2947 
2948 	err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
2949 			      &mlxsw_sp_neigh_ht_params);
2950 	if (err)
2951 		return err;
2952 
2953 	/* Initialize the polling interval according to the default
2954 	 * table.
2955 	 */
2956 	mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
2957 
2958 	/* Create the delayed works for the activity_update */
2959 	INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
2960 			  mlxsw_sp_router_neighs_update_work);
2961 	INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
2962 			  mlxsw_sp_router_probe_unresolved_nexthops);
2963 	atomic_set(&mlxsw_sp->router->neighs_update.neigh_count, 0);
2964 	mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
2965 	mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
2966 	return 0;
2967 }
2968 
2969 static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
2970 {
2971 	cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
2972 	cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
2973 	rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
2974 }
2975 
2976 static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
2977 					 struct mlxsw_sp_rif *rif)
2978 {
2979 	struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
2980 
2981 	list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
2982 				 rif_list_node) {
2983 		mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
2984 		mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2985 	}
2986 }
2987 
2988 struct mlxsw_sp_neigh_rif_made_sync {
2989 	struct mlxsw_sp *mlxsw_sp;
2990 	struct mlxsw_sp_rif *rif;
2991 	int err;
2992 };
2993 
2994 static void mlxsw_sp_neigh_rif_made_sync_each(struct neighbour *n, void *data)
2995 {
2996 	struct mlxsw_sp_neigh_rif_made_sync *rms = data;
2997 	int rc;
2998 
2999 	if (rms->err)
3000 		return;
3001 	if (n->dev != mlxsw_sp_rif_dev(rms->rif))
3002 		return;
3003 	rc = mlxsw_sp_router_schedule_neigh_work(rms->mlxsw_sp->router, n);
3004 	if (rc != NOTIFY_DONE)
3005 		rms->err = -ENOMEM;
3006 }
3007 
3008 static int mlxsw_sp_neigh_rif_made_sync(struct mlxsw_sp *mlxsw_sp,
3009 					struct mlxsw_sp_rif *rif)
3010 {
3011 	struct mlxsw_sp_neigh_rif_made_sync rms = {
3012 		.mlxsw_sp = mlxsw_sp,
3013 		.rif = rif,
3014 	};
3015 
3016 	neigh_for_each(&arp_tbl, mlxsw_sp_neigh_rif_made_sync_each, &rms);
3017 	if (rms.err)
3018 		goto err_arp;
3019 
3020 #if IS_ENABLED(CONFIG_IPV6)
3021 	neigh_for_each(&nd_tbl, mlxsw_sp_neigh_rif_made_sync_each, &rms);
3022 #endif
3023 	if (rms.err)
3024 		goto err_nd;
3025 
3026 	return 0;
3027 
3028 err_nd:
3029 err_arp:
3030 	mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
3031 	return rms.err;
3032 }
3033 
3034 enum mlxsw_sp_nexthop_type {
3035 	MLXSW_SP_NEXTHOP_TYPE_ETH,
3036 	MLXSW_SP_NEXTHOP_TYPE_IPIP,
3037 };
3038 
3039 enum mlxsw_sp_nexthop_action {
3040 	/* Nexthop forwards packets to an egress RIF */
3041 	MLXSW_SP_NEXTHOP_ACTION_FORWARD,
3042 	/* Nexthop discards packets */
3043 	MLXSW_SP_NEXTHOP_ACTION_DISCARD,
3044 	/* Nexthop traps packets */
3045 	MLXSW_SP_NEXTHOP_ACTION_TRAP,
3046 };
3047 
3048 struct mlxsw_sp_nexthop_key {
3049 	struct fib_nh *fib_nh;
3050 };
3051 
3052 struct mlxsw_sp_nexthop {
3053 	struct list_head neigh_list_node; /* member of neigh entry list */
3054 	struct list_head crif_list_node;
3055 	struct list_head router_list_node;
3056 	struct mlxsw_sp_nexthop_group_info *nhgi; /* pointer back to the group
3057 						   * this nexthop belongs to
3058 						   */
3059 	struct rhash_head ht_node;
3060 	struct neigh_table *neigh_tbl;
3061 	struct mlxsw_sp_nexthop_key key;
3062 	unsigned char gw_addr[sizeof(struct in6_addr)];
3063 	int ifindex;
3064 	int nh_weight;
3065 	int norm_nh_weight;
3066 	int num_adj_entries;
3067 	struct mlxsw_sp_crif *crif;
3068 	u8 should_offload:1, /* set indicates this nexthop should be written
3069 			      * to the adjacency table.
3070 			      */
3071 	   offloaded:1, /* set indicates this nexthop was written to the
3072 			 * adjacency table.
3073 			 */
3074 	   update:1; /* set indicates this nexthop should be updated in the
3075 		      * adjacency table (f.e., its MAC changed).
3076 		      */
3077 	enum mlxsw_sp_nexthop_action action;
3078 	enum mlxsw_sp_nexthop_type type;
3079 	union {
3080 		struct mlxsw_sp_neigh_entry *neigh_entry;
3081 		struct mlxsw_sp_ipip_entry *ipip_entry;
3082 	};
3083 	unsigned int counter_index;
3084 	bool counter_valid;
3085 };
3086 
3087 static struct net_device *
3088 mlxsw_sp_nexthop_dev(const struct mlxsw_sp_nexthop *nh)
3089 {
3090 	if (!nh->crif)
3091 		return NULL;
3092 	return nh->crif->key.dev;
3093 }
3094 
3095 enum mlxsw_sp_nexthop_group_type {
3096 	MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4,
3097 	MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6,
3098 	MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ,
3099 };
3100 
3101 struct mlxsw_sp_nexthop_group_info {
3102 	struct mlxsw_sp_nexthop_group *nh_grp;
3103 	u32 adj_index;
3104 	u16 ecmp_size;
3105 	u16 count;
3106 	int sum_norm_weight;
3107 	u8 adj_index_valid:1,
3108 	   gateway:1, /* routes using the group use a gateway */
3109 	   is_resilient:1;
3110 	struct list_head list; /* member in nh_res_grp_list */
3111 	struct mlxsw_sp_nexthop nexthops[] __counted_by(count);
3112 };
3113 
3114 static struct mlxsw_sp_rif *
3115 mlxsw_sp_nhgi_rif(const struct mlxsw_sp_nexthop_group_info *nhgi)
3116 {
3117 	struct mlxsw_sp_crif *crif = nhgi->nexthops[0].crif;
3118 
3119 	if (!crif)
3120 		return NULL;
3121 	return crif->rif;
3122 }
3123 
3124 struct mlxsw_sp_nexthop_group_vr_key {
3125 	u16 vr_id;
3126 	enum mlxsw_sp_l3proto proto;
3127 };
3128 
3129 struct mlxsw_sp_nexthop_group_vr_entry {
3130 	struct list_head list; /* member in vr_list */
3131 	struct rhash_head ht_node; /* member in vr_ht */
3132 	refcount_t ref_count;
3133 	struct mlxsw_sp_nexthop_group_vr_key key;
3134 };
3135 
3136 struct mlxsw_sp_nexthop_group {
3137 	struct rhash_head ht_node;
3138 	struct list_head fib_list; /* list of fib entries that use this group */
3139 	union {
3140 		struct {
3141 			struct fib_info *fi;
3142 		} ipv4;
3143 		struct {
3144 			u32 id;
3145 		} obj;
3146 	};
3147 	struct mlxsw_sp_nexthop_group_info *nhgi;
3148 	struct list_head vr_list;
3149 	struct rhashtable vr_ht;
3150 	enum mlxsw_sp_nexthop_group_type type;
3151 	bool can_destroy;
3152 };
3153 
3154 void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
3155 				    struct mlxsw_sp_nexthop *nh)
3156 {
3157 	struct devlink *devlink;
3158 
3159 	devlink = priv_to_devlink(mlxsw_sp->core);
3160 	if (!devlink_dpipe_table_counter_enabled(devlink,
3161 						 MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
3162 		return;
3163 
3164 	if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
3165 		return;
3166 
3167 	nh->counter_valid = true;
3168 }
3169 
3170 void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
3171 				   struct mlxsw_sp_nexthop *nh)
3172 {
3173 	if (!nh->counter_valid)
3174 		return;
3175 	mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
3176 	nh->counter_valid = false;
3177 }
3178 
3179 int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
3180 				 struct mlxsw_sp_nexthop *nh, u64 *p_counter)
3181 {
3182 	if (!nh->counter_valid)
3183 		return -EINVAL;
3184 
3185 	return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
3186 					 p_counter, NULL);
3187 }
3188 
3189 struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
3190 					       struct mlxsw_sp_nexthop *nh)
3191 {
3192 	if (!nh) {
3193 		if (list_empty(&router->nexthop_list))
3194 			return NULL;
3195 		else
3196 			return list_first_entry(&router->nexthop_list,
3197 						typeof(*nh), router_list_node);
3198 	}
3199 	if (list_is_last(&nh->router_list_node, &router->nexthop_list))
3200 		return NULL;
3201 	return list_next_entry(nh, router_list_node);
3202 }
3203 
3204 bool mlxsw_sp_nexthop_is_forward(const struct mlxsw_sp_nexthop *nh)
3205 {
3206 	return nh->offloaded && nh->action == MLXSW_SP_NEXTHOP_ACTION_FORWARD;
3207 }
3208 
3209 unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
3210 {
3211 	if (nh->type != MLXSW_SP_NEXTHOP_TYPE_ETH ||
3212 	    !mlxsw_sp_nexthop_is_forward(nh))
3213 		return NULL;
3214 	return nh->neigh_entry->ha;
3215 }
3216 
3217 int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
3218 			     u32 *p_adj_size, u32 *p_adj_hash_index)
3219 {
3220 	struct mlxsw_sp_nexthop_group_info *nhgi = nh->nhgi;
3221 	u32 adj_hash_index = 0;
3222 	int i;
3223 
3224 	if (!nh->offloaded || !nhgi->adj_index_valid)
3225 		return -EINVAL;
3226 
3227 	*p_adj_index = nhgi->adj_index;
3228 	*p_adj_size = nhgi->ecmp_size;
3229 
3230 	for (i = 0; i < nhgi->count; i++) {
3231 		struct mlxsw_sp_nexthop *nh_iter = &nhgi->nexthops[i];
3232 
3233 		if (nh_iter == nh)
3234 			break;
3235 		if (nh_iter->offloaded)
3236 			adj_hash_index += nh_iter->num_adj_entries;
3237 	}
3238 
3239 	*p_adj_hash_index = adj_hash_index;
3240 	return 0;
3241 }
3242 
3243 struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
3244 {
3245 	if (WARN_ON(!nh->crif))
3246 		return NULL;
3247 	return nh->crif->rif;
3248 }
3249 
3250 bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
3251 {
3252 	struct mlxsw_sp_nexthop_group_info *nhgi = nh->nhgi;
3253 	int i;
3254 
3255 	for (i = 0; i < nhgi->count; i++) {
3256 		struct mlxsw_sp_nexthop *nh_iter = &nhgi->nexthops[i];
3257 
3258 		if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
3259 			return true;
3260 	}
3261 	return false;
3262 }
3263 
3264 static const struct rhashtable_params mlxsw_sp_nexthop_group_vr_ht_params = {
3265 	.key_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, key),
3266 	.head_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, ht_node),
3267 	.key_len = sizeof(struct mlxsw_sp_nexthop_group_vr_key),
3268 	.automatic_shrinking = true,
3269 };
3270 
3271 static struct mlxsw_sp_nexthop_group_vr_entry *
3272 mlxsw_sp_nexthop_group_vr_entry_lookup(struct mlxsw_sp_nexthop_group *nh_grp,
3273 				       const struct mlxsw_sp_fib *fib)
3274 {
3275 	struct mlxsw_sp_nexthop_group_vr_key key;
3276 
3277 	memset(&key, 0, sizeof(key));
3278 	key.vr_id = fib->vr->id;
3279 	key.proto = fib->proto;
3280 	return rhashtable_lookup_fast(&nh_grp->vr_ht, &key,
3281 				      mlxsw_sp_nexthop_group_vr_ht_params);
3282 }
3283 
3284 static int
3285 mlxsw_sp_nexthop_group_vr_entry_create(struct mlxsw_sp_nexthop_group *nh_grp,
3286 				       const struct mlxsw_sp_fib *fib)
3287 {
3288 	struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3289 	int err;
3290 
3291 	vr_entry = kzalloc(sizeof(*vr_entry), GFP_KERNEL);
3292 	if (!vr_entry)
3293 		return -ENOMEM;
3294 
3295 	vr_entry->key.vr_id = fib->vr->id;
3296 	vr_entry->key.proto = fib->proto;
3297 	refcount_set(&vr_entry->ref_count, 1);
3298 
3299 	err = rhashtable_insert_fast(&nh_grp->vr_ht, &vr_entry->ht_node,
3300 				     mlxsw_sp_nexthop_group_vr_ht_params);
3301 	if (err)
3302 		goto err_hashtable_insert;
3303 
3304 	list_add(&vr_entry->list, &nh_grp->vr_list);
3305 
3306 	return 0;
3307 
3308 err_hashtable_insert:
3309 	kfree(vr_entry);
3310 	return err;
3311 }
3312 
3313 static void
3314 mlxsw_sp_nexthop_group_vr_entry_destroy(struct mlxsw_sp_nexthop_group *nh_grp,
3315 					struct mlxsw_sp_nexthop_group_vr_entry *vr_entry)
3316 {
3317 	list_del(&vr_entry->list);
3318 	rhashtable_remove_fast(&nh_grp->vr_ht, &vr_entry->ht_node,
3319 			       mlxsw_sp_nexthop_group_vr_ht_params);
3320 	kfree(vr_entry);
3321 }
3322 
3323 static int
3324 mlxsw_sp_nexthop_group_vr_link(struct mlxsw_sp_nexthop_group *nh_grp,
3325 			       const struct mlxsw_sp_fib *fib)
3326 {
3327 	struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3328 
3329 	vr_entry = mlxsw_sp_nexthop_group_vr_entry_lookup(nh_grp, fib);
3330 	if (vr_entry) {
3331 		refcount_inc(&vr_entry->ref_count);
3332 		return 0;
3333 	}
3334 
3335 	return mlxsw_sp_nexthop_group_vr_entry_create(nh_grp, fib);
3336 }
3337 
3338 static void
3339 mlxsw_sp_nexthop_group_vr_unlink(struct mlxsw_sp_nexthop_group *nh_grp,
3340 				 const struct mlxsw_sp_fib *fib)
3341 {
3342 	struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3343 
3344 	vr_entry = mlxsw_sp_nexthop_group_vr_entry_lookup(nh_grp, fib);
3345 	if (WARN_ON_ONCE(!vr_entry))
3346 		return;
3347 
3348 	if (!refcount_dec_and_test(&vr_entry->ref_count))
3349 		return;
3350 
3351 	mlxsw_sp_nexthop_group_vr_entry_destroy(nh_grp, vr_entry);
3352 }
3353 
3354 struct mlxsw_sp_nexthop_group_cmp_arg {
3355 	enum mlxsw_sp_nexthop_group_type type;
3356 	union {
3357 		struct fib_info *fi;
3358 		struct mlxsw_sp_fib6_entry *fib6_entry;
3359 		u32 id;
3360 	};
3361 };
3362 
3363 static bool
3364 mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
3365 				    const struct in6_addr *gw, int ifindex,
3366 				    int weight)
3367 {
3368 	int i;
3369 
3370 	for (i = 0; i < nh_grp->nhgi->count; i++) {
3371 		const struct mlxsw_sp_nexthop *nh;
3372 
3373 		nh = &nh_grp->nhgi->nexthops[i];
3374 		if (nh->ifindex == ifindex && nh->nh_weight == weight &&
3375 		    ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
3376 			return true;
3377 	}
3378 
3379 	return false;
3380 }
3381 
3382 static bool
3383 mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
3384 			    const struct mlxsw_sp_fib6_entry *fib6_entry)
3385 {
3386 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3387 
3388 	if (nh_grp->nhgi->count != fib6_entry->nrt6)
3389 		return false;
3390 
3391 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3392 		struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3393 		struct in6_addr *gw;
3394 		int ifindex, weight;
3395 
3396 		ifindex = fib6_nh->fib_nh_dev->ifindex;
3397 		weight = fib6_nh->fib_nh_weight;
3398 		gw = &fib6_nh->fib_nh_gw6;
3399 		if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex,
3400 							 weight))
3401 			return false;
3402 	}
3403 
3404 	return true;
3405 }
3406 
3407 static int
3408 mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
3409 {
3410 	const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
3411 	const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
3412 
3413 	if (nh_grp->type != cmp_arg->type)
3414 		return 1;
3415 
3416 	switch (cmp_arg->type) {
3417 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3418 		return cmp_arg->fi != nh_grp->ipv4.fi;
3419 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3420 		return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
3421 						    cmp_arg->fib6_entry);
3422 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3423 		return cmp_arg->id != nh_grp->obj.id;
3424 	default:
3425 		WARN_ON(1);
3426 		return 1;
3427 	}
3428 }
3429 
3430 static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
3431 {
3432 	const struct mlxsw_sp_nexthop_group *nh_grp = data;
3433 	const struct mlxsw_sp_nexthop *nh;
3434 	struct fib_info *fi;
3435 	unsigned int val;
3436 	int i;
3437 
3438 	switch (nh_grp->type) {
3439 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3440 		fi = nh_grp->ipv4.fi;
3441 		return jhash(&fi, sizeof(fi), seed);
3442 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3443 		val = nh_grp->nhgi->count;
3444 		for (i = 0; i < nh_grp->nhgi->count; i++) {
3445 			nh = &nh_grp->nhgi->nexthops[i];
3446 			val ^= jhash(&nh->ifindex, sizeof(nh->ifindex), seed);
3447 			val ^= jhash(&nh->gw_addr, sizeof(nh->gw_addr), seed);
3448 		}
3449 		return jhash(&val, sizeof(val), seed);
3450 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3451 		return jhash(&nh_grp->obj.id, sizeof(nh_grp->obj.id), seed);
3452 	default:
3453 		WARN_ON(1);
3454 		return 0;
3455 	}
3456 }
3457 
3458 static u32
3459 mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
3460 {
3461 	unsigned int val = fib6_entry->nrt6;
3462 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3463 
3464 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3465 		struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3466 		struct net_device *dev = fib6_nh->fib_nh_dev;
3467 		struct in6_addr *gw = &fib6_nh->fib_nh_gw6;
3468 
3469 		val ^= jhash(&dev->ifindex, sizeof(dev->ifindex), seed);
3470 		val ^= jhash(gw, sizeof(*gw), seed);
3471 	}
3472 
3473 	return jhash(&val, sizeof(val), seed);
3474 }
3475 
3476 static u32
3477 mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
3478 {
3479 	const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
3480 
3481 	switch (cmp_arg->type) {
3482 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3483 		return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
3484 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3485 		return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
3486 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3487 		return jhash(&cmp_arg->id, sizeof(cmp_arg->id), seed);
3488 	default:
3489 		WARN_ON(1);
3490 		return 0;
3491 	}
3492 }
3493 
3494 static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
3495 	.head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
3496 	.hashfn	     = mlxsw_sp_nexthop_group_hash,
3497 	.obj_hashfn  = mlxsw_sp_nexthop_group_hash_obj,
3498 	.obj_cmpfn   = mlxsw_sp_nexthop_group_cmp,
3499 };
3500 
3501 static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
3502 					 struct mlxsw_sp_nexthop_group *nh_grp)
3503 {
3504 	if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
3505 	    !nh_grp->nhgi->gateway)
3506 		return 0;
3507 
3508 	return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
3509 				      &nh_grp->ht_node,
3510 				      mlxsw_sp_nexthop_group_ht_params);
3511 }
3512 
3513 static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
3514 					  struct mlxsw_sp_nexthop_group *nh_grp)
3515 {
3516 	if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
3517 	    !nh_grp->nhgi->gateway)
3518 		return;
3519 
3520 	rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
3521 			       &nh_grp->ht_node,
3522 			       mlxsw_sp_nexthop_group_ht_params);
3523 }
3524 
3525 static struct mlxsw_sp_nexthop_group *
3526 mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
3527 			       struct fib_info *fi)
3528 {
3529 	struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3530 
3531 	cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4;
3532 	cmp_arg.fi = fi;
3533 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3534 				      &cmp_arg,
3535 				      mlxsw_sp_nexthop_group_ht_params);
3536 }
3537 
3538 static struct mlxsw_sp_nexthop_group *
3539 mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
3540 			       struct mlxsw_sp_fib6_entry *fib6_entry)
3541 {
3542 	struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3543 
3544 	cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6;
3545 	cmp_arg.fib6_entry = fib6_entry;
3546 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3547 				      &cmp_arg,
3548 				      mlxsw_sp_nexthop_group_ht_params);
3549 }
3550 
3551 static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
3552 	.key_offset = offsetof(struct mlxsw_sp_nexthop, key),
3553 	.head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
3554 	.key_len = sizeof(struct mlxsw_sp_nexthop_key),
3555 };
3556 
3557 static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
3558 				   struct mlxsw_sp_nexthop *nh)
3559 {
3560 	return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
3561 				      &nh->ht_node, mlxsw_sp_nexthop_ht_params);
3562 }
3563 
3564 static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
3565 				    struct mlxsw_sp_nexthop *nh)
3566 {
3567 	rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
3568 			       mlxsw_sp_nexthop_ht_params);
3569 }
3570 
3571 static struct mlxsw_sp_nexthop *
3572 mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
3573 			struct mlxsw_sp_nexthop_key key)
3574 {
3575 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
3576 				      mlxsw_sp_nexthop_ht_params);
3577 }
3578 
3579 static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
3580 					     enum mlxsw_sp_l3proto proto,
3581 					     u16 vr_id,
3582 					     u32 adj_index, u16 ecmp_size,
3583 					     u32 new_adj_index,
3584 					     u16 new_ecmp_size)
3585 {
3586 	char raleu_pl[MLXSW_REG_RALEU_LEN];
3587 
3588 	mlxsw_reg_raleu_pack(raleu_pl,
3589 			     (enum mlxsw_reg_ralxx_protocol) proto, vr_id,
3590 			     adj_index, ecmp_size, new_adj_index,
3591 			     new_ecmp_size);
3592 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
3593 }
3594 
3595 static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
3596 					  struct mlxsw_sp_nexthop_group *nh_grp,
3597 					  u32 old_adj_index, u16 old_ecmp_size)
3598 {
3599 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
3600 	struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3601 	int err;
3602 
3603 	list_for_each_entry(vr_entry, &nh_grp->vr_list, list) {
3604 		err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp,
3605 							vr_entry->key.proto,
3606 							vr_entry->key.vr_id,
3607 							old_adj_index,
3608 							old_ecmp_size,
3609 							nhgi->adj_index,
3610 							nhgi->ecmp_size);
3611 		if (err)
3612 			goto err_mass_update_vr;
3613 	}
3614 	return 0;
3615 
3616 err_mass_update_vr:
3617 	list_for_each_entry_continue_reverse(vr_entry, &nh_grp->vr_list, list)
3618 		mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, vr_entry->key.proto,
3619 						  vr_entry->key.vr_id,
3620 						  nhgi->adj_index,
3621 						  nhgi->ecmp_size,
3622 						  old_adj_index, old_ecmp_size);
3623 	return err;
3624 }
3625 
3626 static int __mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp,
3627 					 u32 adj_index,
3628 					 struct mlxsw_sp_nexthop *nh,
3629 					 bool force, char *ratr_pl)
3630 {
3631 	struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
3632 	struct mlxsw_sp_rif *rif = mlxsw_sp_nexthop_rif(nh);
3633 	enum mlxsw_reg_ratr_op op;
3634 	u16 rif_index;
3635 
3636 	rif_index = rif ? rif->rif_index :
3637 			  mlxsw_sp->router->lb_crif->rif->rif_index;
3638 	op = force ? MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY :
3639 		     MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY_ON_ACTIVITY;
3640 	mlxsw_reg_ratr_pack(ratr_pl, op, true, MLXSW_REG_RATR_TYPE_ETHERNET,
3641 			    adj_index, rif_index);
3642 	switch (nh->action) {
3643 	case MLXSW_SP_NEXTHOP_ACTION_FORWARD:
3644 		mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
3645 		break;
3646 	case MLXSW_SP_NEXTHOP_ACTION_DISCARD:
3647 		mlxsw_reg_ratr_trap_action_set(ratr_pl,
3648 					       MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS);
3649 		break;
3650 	case MLXSW_SP_NEXTHOP_ACTION_TRAP:
3651 		mlxsw_reg_ratr_trap_action_set(ratr_pl,
3652 					       MLXSW_REG_RATR_TRAP_ACTION_TRAP);
3653 		mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
3654 		break;
3655 	default:
3656 		WARN_ON_ONCE(1);
3657 		return -EINVAL;
3658 	}
3659 	if (nh->counter_valid)
3660 		mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
3661 	else
3662 		mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
3663 
3664 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
3665 }
3666 
3667 int mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3668 				struct mlxsw_sp_nexthop *nh, bool force,
3669 				char *ratr_pl)
3670 {
3671 	int i;
3672 
3673 	for (i = 0; i < nh->num_adj_entries; i++) {
3674 		int err;
3675 
3676 		err = __mlxsw_sp_nexthop_eth_update(mlxsw_sp, adj_index + i,
3677 						    nh, force, ratr_pl);
3678 		if (err)
3679 			return err;
3680 	}
3681 
3682 	return 0;
3683 }
3684 
3685 static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3686 					  u32 adj_index,
3687 					  struct mlxsw_sp_nexthop *nh,
3688 					  bool force, char *ratr_pl)
3689 {
3690 	const struct mlxsw_sp_ipip_ops *ipip_ops;
3691 
3692 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt];
3693 	return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry,
3694 					force, ratr_pl);
3695 }
3696 
3697 static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3698 					u32 adj_index,
3699 					struct mlxsw_sp_nexthop *nh, bool force,
3700 					char *ratr_pl)
3701 {
3702 	int i;
3703 
3704 	for (i = 0; i < nh->num_adj_entries; i++) {
3705 		int err;
3706 
3707 		err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
3708 						     nh, force, ratr_pl);
3709 		if (err)
3710 			return err;
3711 	}
3712 
3713 	return 0;
3714 }
3715 
3716 static int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3717 				   struct mlxsw_sp_nexthop *nh, bool force,
3718 				   char *ratr_pl)
3719 {
3720 	/* When action is discard or trap, the nexthop must be
3721 	 * programmed as an Ethernet nexthop.
3722 	 */
3723 	if (nh->type == MLXSW_SP_NEXTHOP_TYPE_ETH ||
3724 	    nh->action == MLXSW_SP_NEXTHOP_ACTION_DISCARD ||
3725 	    nh->action == MLXSW_SP_NEXTHOP_ACTION_TRAP)
3726 		return mlxsw_sp_nexthop_eth_update(mlxsw_sp, adj_index, nh,
3727 						   force, ratr_pl);
3728 	else
3729 		return mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index, nh,
3730 						    force, ratr_pl);
3731 }
3732 
3733 static int
3734 mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
3735 			      struct mlxsw_sp_nexthop_group_info *nhgi,
3736 			      bool reallocate)
3737 {
3738 	char ratr_pl[MLXSW_REG_RATR_LEN];
3739 	u32 adj_index = nhgi->adj_index; /* base */
3740 	struct mlxsw_sp_nexthop *nh;
3741 	int i;
3742 
3743 	for (i = 0; i < nhgi->count; i++) {
3744 		nh = &nhgi->nexthops[i];
3745 
3746 		if (!nh->should_offload) {
3747 			nh->offloaded = 0;
3748 			continue;
3749 		}
3750 
3751 		if (nh->update || reallocate) {
3752 			int err = 0;
3753 
3754 			err = mlxsw_sp_nexthop_update(mlxsw_sp, adj_index, nh,
3755 						      true, ratr_pl);
3756 			if (err)
3757 				return err;
3758 			nh->update = 0;
3759 			nh->offloaded = 1;
3760 		}
3761 		adj_index += nh->num_adj_entries;
3762 	}
3763 	return 0;
3764 }
3765 
3766 static int
3767 mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
3768 				    struct mlxsw_sp_nexthop_group *nh_grp)
3769 {
3770 	struct mlxsw_sp_fib_entry *fib_entry;
3771 	int err;
3772 
3773 	list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
3774 		err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
3775 		if (err)
3776 			return err;
3777 	}
3778 	return 0;
3779 }
3780 
3781 struct mlxsw_sp_adj_grp_size_range {
3782 	u16 start; /* Inclusive */
3783 	u16 end; /* Inclusive */
3784 };
3785 
3786 /* Ordered by range start value */
3787 static const struct mlxsw_sp_adj_grp_size_range
3788 mlxsw_sp1_adj_grp_size_ranges[] = {
3789 	{ .start = 1, .end = 64 },
3790 	{ .start = 512, .end = 512 },
3791 	{ .start = 1024, .end = 1024 },
3792 	{ .start = 2048, .end = 2048 },
3793 	{ .start = 4096, .end = 4096 },
3794 };
3795 
3796 /* Ordered by range start value */
3797 static const struct mlxsw_sp_adj_grp_size_range
3798 mlxsw_sp2_adj_grp_size_ranges[] = {
3799 	{ .start = 1, .end = 128 },
3800 	{ .start = 256, .end = 256 },
3801 	{ .start = 512, .end = 512 },
3802 	{ .start = 1024, .end = 1024 },
3803 	{ .start = 2048, .end = 2048 },
3804 	{ .start = 4096, .end = 4096 },
3805 };
3806 
3807 static void mlxsw_sp_adj_grp_size_round_up(const struct mlxsw_sp *mlxsw_sp,
3808 					   u16 *p_adj_grp_size)
3809 {
3810 	int i;
3811 
3812 	for (i = 0; i < mlxsw_sp->router->adj_grp_size_ranges_count; i++) {
3813 		const struct mlxsw_sp_adj_grp_size_range *size_range;
3814 
3815 		size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
3816 
3817 		if (*p_adj_grp_size >= size_range->start &&
3818 		    *p_adj_grp_size <= size_range->end)
3819 			return;
3820 
3821 		if (*p_adj_grp_size <= size_range->end) {
3822 			*p_adj_grp_size = size_range->end;
3823 			return;
3824 		}
3825 	}
3826 }
3827 
3828 static void mlxsw_sp_adj_grp_size_round_down(const struct mlxsw_sp *mlxsw_sp,
3829 					     u16 *p_adj_grp_size,
3830 					     unsigned int alloc_size)
3831 {
3832 	int i;
3833 
3834 	for (i = mlxsw_sp->router->adj_grp_size_ranges_count - 1; i >= 0; i--) {
3835 		const struct mlxsw_sp_adj_grp_size_range *size_range;
3836 
3837 		size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
3838 
3839 		if (alloc_size >= size_range->end) {
3840 			*p_adj_grp_size = size_range->end;
3841 			return;
3842 		}
3843 	}
3844 }
3845 
3846 static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
3847 				     u16 *p_adj_grp_size)
3848 {
3849 	unsigned int alloc_size;
3850 	int err;
3851 
3852 	/* Round up the requested group size to the next size supported
3853 	 * by the device and make sure the request can be satisfied.
3854 	 */
3855 	mlxsw_sp_adj_grp_size_round_up(mlxsw_sp, p_adj_grp_size);
3856 	err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
3857 					      MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3858 					      *p_adj_grp_size, &alloc_size);
3859 	if (err)
3860 		return err;
3861 	/* It is possible the allocation results in more allocated
3862 	 * entries than requested. Try to use as much of them as
3863 	 * possible.
3864 	 */
3865 	mlxsw_sp_adj_grp_size_round_down(mlxsw_sp, p_adj_grp_size, alloc_size);
3866 
3867 	return 0;
3868 }
3869 
3870 static void
3871 mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group_info *nhgi)
3872 {
3873 	int i, g = 0, sum_norm_weight = 0;
3874 	struct mlxsw_sp_nexthop *nh;
3875 
3876 	for (i = 0; i < nhgi->count; i++) {
3877 		nh = &nhgi->nexthops[i];
3878 
3879 		if (!nh->should_offload)
3880 			continue;
3881 		if (g > 0)
3882 			g = gcd(nh->nh_weight, g);
3883 		else
3884 			g = nh->nh_weight;
3885 	}
3886 
3887 	for (i = 0; i < nhgi->count; i++) {
3888 		nh = &nhgi->nexthops[i];
3889 
3890 		if (!nh->should_offload)
3891 			continue;
3892 		nh->norm_nh_weight = nh->nh_weight / g;
3893 		sum_norm_weight += nh->norm_nh_weight;
3894 	}
3895 
3896 	nhgi->sum_norm_weight = sum_norm_weight;
3897 }
3898 
3899 static void
3900 mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group_info *nhgi)
3901 {
3902 	int i, weight = 0, lower_bound = 0;
3903 	int total = nhgi->sum_norm_weight;
3904 	u16 ecmp_size = nhgi->ecmp_size;
3905 
3906 	for (i = 0; i < nhgi->count; i++) {
3907 		struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
3908 		int upper_bound;
3909 
3910 		if (!nh->should_offload)
3911 			continue;
3912 		weight += nh->norm_nh_weight;
3913 		upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total);
3914 		nh->num_adj_entries = upper_bound - lower_bound;
3915 		lower_bound = upper_bound;
3916 	}
3917 }
3918 
3919 static struct mlxsw_sp_nexthop *
3920 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
3921 		     const struct mlxsw_sp_rt6 *mlxsw_sp_rt6);
3922 
3923 static void
3924 mlxsw_sp_nexthop4_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3925 					struct mlxsw_sp_nexthop_group *nh_grp)
3926 {
3927 	int i;
3928 
3929 	for (i = 0; i < nh_grp->nhgi->count; i++) {
3930 		struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
3931 
3932 		if (nh->offloaded)
3933 			nh->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
3934 		else
3935 			nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
3936 	}
3937 }
3938 
3939 static void
3940 __mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp_nexthop_group *nh_grp,
3941 					  struct mlxsw_sp_fib6_entry *fib6_entry)
3942 {
3943 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3944 
3945 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3946 		struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3947 		struct mlxsw_sp_nexthop *nh;
3948 
3949 		nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
3950 		if (nh && nh->offloaded)
3951 			fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
3952 		else
3953 			fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
3954 	}
3955 }
3956 
3957 static void
3958 mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3959 					struct mlxsw_sp_nexthop_group *nh_grp)
3960 {
3961 	struct mlxsw_sp_fib6_entry *fib6_entry;
3962 
3963 	/* Unfortunately, in IPv6 the route and the nexthop are described by
3964 	 * the same struct, so we need to iterate over all the routes using the
3965 	 * nexthop group and set / clear the offload indication for them.
3966 	 */
3967 	list_for_each_entry(fib6_entry, &nh_grp->fib_list,
3968 			    common.nexthop_group_node)
3969 		__mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
3970 }
3971 
3972 static void
3973 mlxsw_sp_nexthop_bucket_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3974 					const struct mlxsw_sp_nexthop *nh,
3975 					u16 bucket_index)
3976 {
3977 	struct mlxsw_sp_nexthop_group *nh_grp = nh->nhgi->nh_grp;
3978 	bool offload = false, trap = false;
3979 
3980 	if (nh->offloaded) {
3981 		if (nh->action == MLXSW_SP_NEXTHOP_ACTION_TRAP)
3982 			trap = true;
3983 		else
3984 			offload = true;
3985 	}
3986 	nexthop_bucket_set_hw_flags(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
3987 				    bucket_index, offload, trap);
3988 }
3989 
3990 static void
3991 mlxsw_sp_nexthop_obj_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3992 					   struct mlxsw_sp_nexthop_group *nh_grp)
3993 {
3994 	int i;
3995 
3996 	/* Do not update the flags if the nexthop group is being destroyed
3997 	 * since:
3998 	 * 1. The nexthop objects is being deleted, in which case the flags are
3999 	 * irrelevant.
4000 	 * 2. The nexthop group was replaced by a newer group, in which case
4001 	 * the flags of the nexthop object were already updated based on the
4002 	 * new group.
4003 	 */
4004 	if (nh_grp->can_destroy)
4005 		return;
4006 
4007 	nexthop_set_hw_flags(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
4008 			     nh_grp->nhgi->adj_index_valid, false);
4009 
4010 	/* Update flags of individual nexthop buckets in case of a resilient
4011 	 * nexthop group.
4012 	 */
4013 	if (!nh_grp->nhgi->is_resilient)
4014 		return;
4015 
4016 	for (i = 0; i < nh_grp->nhgi->count; i++) {
4017 		struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
4018 
4019 		mlxsw_sp_nexthop_bucket_offload_refresh(mlxsw_sp, nh, i);
4020 	}
4021 }
4022 
4023 static void
4024 mlxsw_sp_nexthop_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
4025 				       struct mlxsw_sp_nexthop_group *nh_grp)
4026 {
4027 	switch (nh_grp->type) {
4028 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
4029 		mlxsw_sp_nexthop4_group_offload_refresh(mlxsw_sp, nh_grp);
4030 		break;
4031 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
4032 		mlxsw_sp_nexthop6_group_offload_refresh(mlxsw_sp, nh_grp);
4033 		break;
4034 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
4035 		mlxsw_sp_nexthop_obj_group_offload_refresh(mlxsw_sp, nh_grp);
4036 		break;
4037 	}
4038 }
4039 
4040 static int
4041 mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
4042 			       struct mlxsw_sp_nexthop_group *nh_grp)
4043 {
4044 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
4045 	u16 ecmp_size, old_ecmp_size;
4046 	struct mlxsw_sp_nexthop *nh;
4047 	bool offload_change = false;
4048 	u32 adj_index;
4049 	bool old_adj_index_valid;
4050 	u32 old_adj_index;
4051 	int i, err2, err;
4052 
4053 	if (!nhgi->gateway)
4054 		return mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
4055 
4056 	for (i = 0; i < nhgi->count; i++) {
4057 		nh = &nhgi->nexthops[i];
4058 
4059 		if (nh->should_offload != nh->offloaded) {
4060 			offload_change = true;
4061 			if (nh->should_offload)
4062 				nh->update = 1;
4063 		}
4064 	}
4065 	if (!offload_change) {
4066 		/* Nothing was added or removed, so no need to reallocate. Just
4067 		 * update MAC on existing adjacency indexes.
4068 		 */
4069 		err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nhgi, false);
4070 		if (err) {
4071 			dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
4072 			goto set_trap;
4073 		}
4074 		/* Flags of individual nexthop buckets might need to be
4075 		 * updated.
4076 		 */
4077 		mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
4078 		return 0;
4079 	}
4080 	mlxsw_sp_nexthop_group_normalize(nhgi);
4081 	if (!nhgi->sum_norm_weight) {
4082 		/* No neigh of this group is connected so we just set
4083 		 * the trap and let everthing flow through kernel.
4084 		 */
4085 		err = 0;
4086 		goto set_trap;
4087 	}
4088 
4089 	ecmp_size = nhgi->sum_norm_weight;
4090 	err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
4091 	if (err)
4092 		/* No valid allocation size available. */
4093 		goto set_trap;
4094 
4095 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4096 				  ecmp_size, &adj_index);
4097 	if (err) {
4098 		/* We ran out of KVD linear space, just set the
4099 		 * trap and let everything flow through kernel.
4100 		 */
4101 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
4102 		goto set_trap;
4103 	}
4104 	old_adj_index_valid = nhgi->adj_index_valid;
4105 	old_adj_index = nhgi->adj_index;
4106 	old_ecmp_size = nhgi->ecmp_size;
4107 	nhgi->adj_index_valid = 1;
4108 	nhgi->adj_index = adj_index;
4109 	nhgi->ecmp_size = ecmp_size;
4110 	mlxsw_sp_nexthop_group_rebalance(nhgi);
4111 	err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nhgi, true);
4112 	if (err) {
4113 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
4114 		goto set_trap;
4115 	}
4116 
4117 	mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
4118 
4119 	if (!old_adj_index_valid) {
4120 		/* The trap was set for fib entries, so we have to call
4121 		 * fib entry update to unset it and use adjacency index.
4122 		 */
4123 		err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
4124 		if (err) {
4125 			dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
4126 			goto set_trap;
4127 		}
4128 		return 0;
4129 	}
4130 
4131 	err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
4132 					     old_adj_index, old_ecmp_size);
4133 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4134 			   old_ecmp_size, old_adj_index);
4135 	if (err) {
4136 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
4137 		goto set_trap;
4138 	}
4139 
4140 	return 0;
4141 
4142 set_trap:
4143 	old_adj_index_valid = nhgi->adj_index_valid;
4144 	nhgi->adj_index_valid = 0;
4145 	for (i = 0; i < nhgi->count; i++) {
4146 		nh = &nhgi->nexthops[i];
4147 		nh->offloaded = 0;
4148 	}
4149 	err2 = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
4150 	if (err2)
4151 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
4152 	mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
4153 	if (old_adj_index_valid)
4154 		mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4155 				   nhgi->ecmp_size, nhgi->adj_index);
4156 	return err;
4157 }
4158 
4159 static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
4160 					    bool removing)
4161 {
4162 	if (!removing) {
4163 		nh->action = MLXSW_SP_NEXTHOP_ACTION_FORWARD;
4164 		nh->should_offload = 1;
4165 	} else if (nh->nhgi->is_resilient) {
4166 		nh->action = MLXSW_SP_NEXTHOP_ACTION_TRAP;
4167 		nh->should_offload = 1;
4168 	} else {
4169 		nh->should_offload = 0;
4170 	}
4171 	nh->update = 1;
4172 }
4173 
4174 static int
4175 mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp *mlxsw_sp,
4176 				    struct mlxsw_sp_neigh_entry *neigh_entry)
4177 {
4178 	struct neighbour *n, *old_n = neigh_entry->key.n;
4179 	struct mlxsw_sp_nexthop *nh;
4180 	struct net_device *dev;
4181 	bool entry_connected;
4182 	u8 nud_state, dead;
4183 	int err;
4184 
4185 	nh = list_first_entry(&neigh_entry->nexthop_list,
4186 			      struct mlxsw_sp_nexthop, neigh_list_node);
4187 	dev = mlxsw_sp_nexthop_dev(nh);
4188 
4189 	n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, dev);
4190 	if (!n) {
4191 		n = neigh_create(nh->neigh_tbl, &nh->gw_addr, dev);
4192 		if (IS_ERR(n))
4193 			return PTR_ERR(n);
4194 		neigh_event_send(n, NULL);
4195 	}
4196 
4197 	mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
4198 	neigh_entry->key.n = n;
4199 	err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
4200 	if (err)
4201 		goto err_neigh_entry_insert;
4202 
4203 	read_lock_bh(&n->lock);
4204 	nud_state = n->nud_state;
4205 	dead = n->dead;
4206 	read_unlock_bh(&n->lock);
4207 	entry_connected = nud_state & NUD_VALID && !dead;
4208 
4209 	list_for_each_entry(nh, &neigh_entry->nexthop_list,
4210 			    neigh_list_node) {
4211 		neigh_release(old_n);
4212 		neigh_clone(n);
4213 		__mlxsw_sp_nexthop_neigh_update(nh, !entry_connected);
4214 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4215 	}
4216 
4217 	neigh_release(n);
4218 
4219 	return 0;
4220 
4221 err_neigh_entry_insert:
4222 	neigh_entry->key.n = old_n;
4223 	mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
4224 	neigh_release(n);
4225 	return err;
4226 }
4227 
4228 static void
4229 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
4230 			      struct mlxsw_sp_neigh_entry *neigh_entry,
4231 			      bool removing, bool dead)
4232 {
4233 	struct mlxsw_sp_nexthop *nh;
4234 
4235 	if (list_empty(&neigh_entry->nexthop_list))
4236 		return;
4237 
4238 	if (dead) {
4239 		int err;
4240 
4241 		err = mlxsw_sp_nexthop_dead_neigh_replace(mlxsw_sp,
4242 							  neigh_entry);
4243 		if (err)
4244 			dev_err(mlxsw_sp->bus_info->dev, "Failed to replace dead neigh\n");
4245 		return;
4246 	}
4247 
4248 	list_for_each_entry(nh, &neigh_entry->nexthop_list,
4249 			    neigh_list_node) {
4250 		__mlxsw_sp_nexthop_neigh_update(nh, removing);
4251 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4252 	}
4253 }
4254 
4255 static void mlxsw_sp_nexthop_crif_init(struct mlxsw_sp_nexthop *nh,
4256 				       struct mlxsw_sp_crif *crif)
4257 {
4258 	if (nh->crif)
4259 		return;
4260 
4261 	nh->crif = crif;
4262 	list_add(&nh->crif_list_node, &crif->nexthop_list);
4263 }
4264 
4265 static void mlxsw_sp_nexthop_crif_fini(struct mlxsw_sp_nexthop *nh)
4266 {
4267 	if (!nh->crif)
4268 		return;
4269 
4270 	list_del(&nh->crif_list_node);
4271 	nh->crif = NULL;
4272 }
4273 
4274 static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
4275 				       struct mlxsw_sp_nexthop *nh)
4276 {
4277 	struct mlxsw_sp_neigh_entry *neigh_entry;
4278 	struct net_device *dev;
4279 	struct neighbour *n;
4280 	u8 nud_state, dead;
4281 	int err;
4282 
4283 	if (WARN_ON(!nh->crif->rif))
4284 		return 0;
4285 
4286 	if (!nh->nhgi->gateway || nh->neigh_entry)
4287 		return 0;
4288 	dev = mlxsw_sp_nexthop_dev(nh);
4289 
4290 	/* Take a reference of neigh here ensuring that neigh would
4291 	 * not be destructed before the nexthop entry is finished.
4292 	 * The reference is taken either in neigh_lookup() or
4293 	 * in neigh_create() in case n is not found.
4294 	 */
4295 	n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, dev);
4296 	if (!n) {
4297 		n = neigh_create(nh->neigh_tbl, &nh->gw_addr, dev);
4298 		if (IS_ERR(n))
4299 			return PTR_ERR(n);
4300 		neigh_event_send(n, NULL);
4301 	}
4302 	neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
4303 	if (!neigh_entry) {
4304 		neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
4305 		if (IS_ERR(neigh_entry)) {
4306 			err = -EINVAL;
4307 			goto err_neigh_entry_create;
4308 		}
4309 	}
4310 
4311 	/* If that is the first nexthop connected to that neigh, add to
4312 	 * nexthop_neighs_list
4313 	 */
4314 	if (list_empty(&neigh_entry->nexthop_list))
4315 		list_add_tail(&neigh_entry->nexthop_neighs_list_node,
4316 			      &mlxsw_sp->router->nexthop_neighs_list);
4317 
4318 	nh->neigh_entry = neigh_entry;
4319 	list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
4320 	read_lock_bh(&n->lock);
4321 	nud_state = n->nud_state;
4322 	dead = n->dead;
4323 	read_unlock_bh(&n->lock);
4324 	__mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
4325 
4326 	return 0;
4327 
4328 err_neigh_entry_create:
4329 	neigh_release(n);
4330 	return err;
4331 }
4332 
4333 static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
4334 					struct mlxsw_sp_nexthop *nh)
4335 {
4336 	struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
4337 	struct neighbour *n;
4338 
4339 	if (!neigh_entry)
4340 		return;
4341 	n = neigh_entry->key.n;
4342 
4343 	__mlxsw_sp_nexthop_neigh_update(nh, true);
4344 	list_del(&nh->neigh_list_node);
4345 	nh->neigh_entry = NULL;
4346 
4347 	/* If that is the last nexthop connected to that neigh, remove from
4348 	 * nexthop_neighs_list
4349 	 */
4350 	if (list_empty(&neigh_entry->nexthop_list))
4351 		list_del(&neigh_entry->nexthop_neighs_list_node);
4352 
4353 	if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
4354 		mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
4355 
4356 	neigh_release(n);
4357 }
4358 
4359 static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
4360 {
4361 	struct net_device *ul_dev;
4362 	bool is_up;
4363 
4364 	rcu_read_lock();
4365 	ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
4366 	is_up = ul_dev ? (ul_dev->flags & IFF_UP) : true;
4367 	rcu_read_unlock();
4368 
4369 	return is_up;
4370 }
4371 
4372 static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
4373 				       struct mlxsw_sp_nexthop *nh,
4374 				       struct mlxsw_sp_ipip_entry *ipip_entry)
4375 {
4376 	struct mlxsw_sp_crif *crif;
4377 	bool removing;
4378 
4379 	if (!nh->nhgi->gateway || nh->ipip_entry)
4380 		return;
4381 
4382 	crif = mlxsw_sp_crif_lookup(mlxsw_sp->router, ipip_entry->ol_dev);
4383 	if (WARN_ON(!crif))
4384 		return;
4385 
4386 	nh->ipip_entry = ipip_entry;
4387 	removing = !mlxsw_sp_ipip_netdev_ul_up(ipip_entry->ol_dev);
4388 	__mlxsw_sp_nexthop_neigh_update(nh, removing);
4389 	mlxsw_sp_nexthop_crif_init(nh, crif);
4390 }
4391 
4392 static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
4393 				       struct mlxsw_sp_nexthop *nh)
4394 {
4395 	struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry;
4396 
4397 	if (!ipip_entry)
4398 		return;
4399 
4400 	__mlxsw_sp_nexthop_neigh_update(nh, true);
4401 	nh->ipip_entry = NULL;
4402 }
4403 
4404 static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
4405 					const struct fib_nh *fib_nh,
4406 					enum mlxsw_sp_ipip_type *p_ipipt)
4407 {
4408 	struct net_device *dev = fib_nh->fib_nh_dev;
4409 
4410 	return dev &&
4411 	       fib_nh->nh_parent->fib_type == RTN_UNICAST &&
4412 	       mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
4413 }
4414 
4415 static int mlxsw_sp_nexthop_type_init(struct mlxsw_sp *mlxsw_sp,
4416 				      struct mlxsw_sp_nexthop *nh,
4417 				      const struct net_device *dev)
4418 {
4419 	const struct mlxsw_sp_ipip_ops *ipip_ops;
4420 	struct mlxsw_sp_ipip_entry *ipip_entry;
4421 	struct mlxsw_sp_crif *crif;
4422 	int err;
4423 
4424 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
4425 	if (ipip_entry) {
4426 		ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
4427 		if (ipip_ops->can_offload(mlxsw_sp, dev)) {
4428 			nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
4429 			mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
4430 			return 0;
4431 		}
4432 	}
4433 
4434 	nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
4435 	crif = mlxsw_sp_crif_lookup(mlxsw_sp->router, dev);
4436 	if (!crif)
4437 		return 0;
4438 
4439 	mlxsw_sp_nexthop_crif_init(nh, crif);
4440 
4441 	if (!crif->rif)
4442 		return 0;
4443 
4444 	err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
4445 	if (err)
4446 		goto err_neigh_init;
4447 
4448 	return 0;
4449 
4450 err_neigh_init:
4451 	mlxsw_sp_nexthop_crif_fini(nh);
4452 	return err;
4453 }
4454 
4455 static int mlxsw_sp_nexthop_type_rif_made(struct mlxsw_sp *mlxsw_sp,
4456 					  struct mlxsw_sp_nexthop *nh)
4457 {
4458 	switch (nh->type) {
4459 	case MLXSW_SP_NEXTHOP_TYPE_ETH:
4460 		return mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
4461 	case MLXSW_SP_NEXTHOP_TYPE_IPIP:
4462 		break;
4463 	}
4464 
4465 	return 0;
4466 }
4467 
4468 static void mlxsw_sp_nexthop_type_rif_gone(struct mlxsw_sp *mlxsw_sp,
4469 					   struct mlxsw_sp_nexthop *nh)
4470 {
4471 	switch (nh->type) {
4472 	case MLXSW_SP_NEXTHOP_TYPE_ETH:
4473 		mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
4474 		break;
4475 	case MLXSW_SP_NEXTHOP_TYPE_IPIP:
4476 		mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
4477 		break;
4478 	}
4479 }
4480 
4481 static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
4482 				       struct mlxsw_sp_nexthop *nh)
4483 {
4484 	mlxsw_sp_nexthop_type_rif_gone(mlxsw_sp, nh);
4485 	mlxsw_sp_nexthop_crif_fini(nh);
4486 }
4487 
4488 static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
4489 				  struct mlxsw_sp_nexthop_group *nh_grp,
4490 				  struct mlxsw_sp_nexthop *nh,
4491 				  struct fib_nh *fib_nh)
4492 {
4493 	struct net_device *dev = fib_nh->fib_nh_dev;
4494 	struct in_device *in_dev;
4495 	int err;
4496 
4497 	nh->nhgi = nh_grp->nhgi;
4498 	nh->key.fib_nh = fib_nh;
4499 #ifdef CONFIG_IP_ROUTE_MULTIPATH
4500 	nh->nh_weight = fib_nh->fib_nh_weight;
4501 #else
4502 	nh->nh_weight = 1;
4503 #endif
4504 	memcpy(&nh->gw_addr, &fib_nh->fib_nh_gw4, sizeof(fib_nh->fib_nh_gw4));
4505 	nh->neigh_tbl = &arp_tbl;
4506 	err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
4507 	if (err)
4508 		return err;
4509 
4510 	mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
4511 	list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
4512 
4513 	if (!dev)
4514 		return 0;
4515 	nh->ifindex = dev->ifindex;
4516 
4517 	rcu_read_lock();
4518 	in_dev = __in_dev_get_rcu(dev);
4519 	if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
4520 	    fib_nh->fib_nh_flags & RTNH_F_LINKDOWN) {
4521 		rcu_read_unlock();
4522 		return 0;
4523 	}
4524 	rcu_read_unlock();
4525 
4526 	err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
4527 	if (err)
4528 		goto err_nexthop_neigh_init;
4529 
4530 	return 0;
4531 
4532 err_nexthop_neigh_init:
4533 	list_del(&nh->router_list_node);
4534 	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4535 	mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
4536 	return err;
4537 }
4538 
4539 static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
4540 				   struct mlxsw_sp_nexthop *nh)
4541 {
4542 	mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4543 	list_del(&nh->router_list_node);
4544 	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4545 	mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
4546 }
4547 
4548 static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
4549 				    unsigned long event, struct fib_nh *fib_nh)
4550 {
4551 	struct mlxsw_sp_nexthop_key key;
4552 	struct mlxsw_sp_nexthop *nh;
4553 
4554 	key.fib_nh = fib_nh;
4555 	nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
4556 	if (!nh)
4557 		return;
4558 
4559 	switch (event) {
4560 	case FIB_EVENT_NH_ADD:
4561 		mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, fib_nh->fib_nh_dev);
4562 		break;
4563 	case FIB_EVENT_NH_DEL:
4564 		mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4565 		break;
4566 	}
4567 
4568 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4569 }
4570 
4571 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
4572 					struct mlxsw_sp_rif *rif)
4573 {
4574 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
4575 	struct mlxsw_sp_nexthop *nh;
4576 	bool removing;
4577 
4578 	list_for_each_entry(nh, &rif->crif->nexthop_list, crif_list_node) {
4579 		switch (nh->type) {
4580 		case MLXSW_SP_NEXTHOP_TYPE_ETH:
4581 			removing = false;
4582 			break;
4583 		case MLXSW_SP_NEXTHOP_TYPE_IPIP:
4584 			removing = !mlxsw_sp_ipip_netdev_ul_up(dev);
4585 			break;
4586 		default:
4587 			WARN_ON(1);
4588 			continue;
4589 		}
4590 
4591 		__mlxsw_sp_nexthop_neigh_update(nh, removing);
4592 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4593 	}
4594 }
4595 
4596 static int mlxsw_sp_nexthop_rif_made_sync(struct mlxsw_sp *mlxsw_sp,
4597 					  struct mlxsw_sp_rif *rif)
4598 {
4599 	struct mlxsw_sp_nexthop *nh, *tmp;
4600 	unsigned int n = 0;
4601 	int err;
4602 
4603 	list_for_each_entry_safe(nh, tmp, &rif->crif->nexthop_list,
4604 				 crif_list_node) {
4605 		err = mlxsw_sp_nexthop_type_rif_made(mlxsw_sp, nh);
4606 		if (err)
4607 			goto err_nexthop_type_rif;
4608 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4609 		n++;
4610 	}
4611 
4612 	return 0;
4613 
4614 err_nexthop_type_rif:
4615 	list_for_each_entry_safe(nh, tmp, &rif->crif->nexthop_list,
4616 				 crif_list_node) {
4617 		if (!n--)
4618 			break;
4619 		mlxsw_sp_nexthop_type_rif_gone(mlxsw_sp, nh);
4620 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4621 	}
4622 	return err;
4623 }
4624 
4625 static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
4626 					   struct mlxsw_sp_rif *rif)
4627 {
4628 	struct mlxsw_sp_nexthop *nh, *tmp;
4629 
4630 	list_for_each_entry_safe(nh, tmp, &rif->crif->nexthop_list,
4631 				 crif_list_node) {
4632 		mlxsw_sp_nexthop_type_rif_gone(mlxsw_sp, nh);
4633 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4634 	}
4635 }
4636 
4637 static int mlxsw_sp_adj_trap_entry_init(struct mlxsw_sp *mlxsw_sp)
4638 {
4639 	enum mlxsw_reg_ratr_trap_action trap_action;
4640 	char ratr_pl[MLXSW_REG_RATR_LEN];
4641 	int err;
4642 
4643 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4644 				  &mlxsw_sp->router->adj_trap_index);
4645 	if (err)
4646 		return err;
4647 
4648 	trap_action = MLXSW_REG_RATR_TRAP_ACTION_TRAP;
4649 	mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true,
4650 			    MLXSW_REG_RATR_TYPE_ETHERNET,
4651 			    mlxsw_sp->router->adj_trap_index,
4652 			    mlxsw_sp->router->lb_crif->rif->rif_index);
4653 	mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action);
4654 	mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
4655 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
4656 	if (err)
4657 		goto err_ratr_write;
4658 
4659 	return 0;
4660 
4661 err_ratr_write:
4662 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4663 			   mlxsw_sp->router->adj_trap_index);
4664 	return err;
4665 }
4666 
4667 static void mlxsw_sp_adj_trap_entry_fini(struct mlxsw_sp *mlxsw_sp)
4668 {
4669 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4670 			   mlxsw_sp->router->adj_trap_index);
4671 }
4672 
4673 static int mlxsw_sp_nexthop_group_inc(struct mlxsw_sp *mlxsw_sp)
4674 {
4675 	int err;
4676 
4677 	if (refcount_inc_not_zero(&mlxsw_sp->router->num_groups))
4678 		return 0;
4679 
4680 	err = mlxsw_sp_adj_trap_entry_init(mlxsw_sp);
4681 	if (err)
4682 		return err;
4683 
4684 	refcount_set(&mlxsw_sp->router->num_groups, 1);
4685 
4686 	return 0;
4687 }
4688 
4689 static void mlxsw_sp_nexthop_group_dec(struct mlxsw_sp *mlxsw_sp)
4690 {
4691 	if (!refcount_dec_and_test(&mlxsw_sp->router->num_groups))
4692 		return;
4693 
4694 	mlxsw_sp_adj_trap_entry_fini(mlxsw_sp);
4695 }
4696 
4697 static void
4698 mlxsw_sp_nh_grp_activity_get(struct mlxsw_sp *mlxsw_sp,
4699 			     const struct mlxsw_sp_nexthop_group *nh_grp,
4700 			     unsigned long *activity)
4701 {
4702 	char *ratrad_pl;
4703 	int i, err;
4704 
4705 	ratrad_pl = kmalloc(MLXSW_REG_RATRAD_LEN, GFP_KERNEL);
4706 	if (!ratrad_pl)
4707 		return;
4708 
4709 	mlxsw_reg_ratrad_pack(ratrad_pl, nh_grp->nhgi->adj_index,
4710 			      nh_grp->nhgi->count);
4711 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ratrad), ratrad_pl);
4712 	if (err)
4713 		goto out;
4714 
4715 	for (i = 0; i < nh_grp->nhgi->count; i++) {
4716 		if (!mlxsw_reg_ratrad_activity_vector_get(ratrad_pl, i))
4717 			continue;
4718 		bitmap_set(activity, i, 1);
4719 	}
4720 
4721 out:
4722 	kfree(ratrad_pl);
4723 }
4724 
4725 #define MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL 1000 /* ms */
4726 
4727 static void
4728 mlxsw_sp_nh_grp_activity_update(struct mlxsw_sp *mlxsw_sp,
4729 				const struct mlxsw_sp_nexthop_group *nh_grp)
4730 {
4731 	unsigned long *activity;
4732 
4733 	activity = bitmap_zalloc(nh_grp->nhgi->count, GFP_KERNEL);
4734 	if (!activity)
4735 		return;
4736 
4737 	mlxsw_sp_nh_grp_activity_get(mlxsw_sp, nh_grp, activity);
4738 	nexthop_res_grp_activity_update(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
4739 					nh_grp->nhgi->count, activity);
4740 
4741 	bitmap_free(activity);
4742 }
4743 
4744 static void
4745 mlxsw_sp_nh_grp_activity_work_schedule(struct mlxsw_sp *mlxsw_sp)
4746 {
4747 	unsigned int interval = MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL;
4748 
4749 	mlxsw_core_schedule_dw(&mlxsw_sp->router->nh_grp_activity_dw,
4750 			       msecs_to_jiffies(interval));
4751 }
4752 
4753 static void mlxsw_sp_nh_grp_activity_work(struct work_struct *work)
4754 {
4755 	struct mlxsw_sp_nexthop_group_info *nhgi;
4756 	struct mlxsw_sp_router *router;
4757 	bool reschedule = false;
4758 
4759 	router = container_of(work, struct mlxsw_sp_router,
4760 			      nh_grp_activity_dw.work);
4761 
4762 	mutex_lock(&router->lock);
4763 
4764 	list_for_each_entry(nhgi, &router->nh_res_grp_list, list) {
4765 		mlxsw_sp_nh_grp_activity_update(router->mlxsw_sp, nhgi->nh_grp);
4766 		reschedule = true;
4767 	}
4768 
4769 	mutex_unlock(&router->lock);
4770 
4771 	if (!reschedule)
4772 		return;
4773 	mlxsw_sp_nh_grp_activity_work_schedule(router->mlxsw_sp);
4774 }
4775 
4776 static int
4777 mlxsw_sp_nexthop_obj_single_validate(struct mlxsw_sp *mlxsw_sp,
4778 				     const struct nh_notifier_single_info *nh,
4779 				     struct netlink_ext_ack *extack)
4780 {
4781 	int err = -EINVAL;
4782 
4783 	if (nh->is_fdb)
4784 		NL_SET_ERR_MSG_MOD(extack, "FDB nexthops are not supported");
4785 	else if (nh->has_encap)
4786 		NL_SET_ERR_MSG_MOD(extack, "Encapsulating nexthops are not supported");
4787 	else
4788 		err = 0;
4789 
4790 	return err;
4791 }
4792 
4793 static int
4794 mlxsw_sp_nexthop_obj_group_entry_validate(struct mlxsw_sp *mlxsw_sp,
4795 					  const struct nh_notifier_single_info *nh,
4796 					  struct netlink_ext_ack *extack)
4797 {
4798 	int err;
4799 
4800 	err = mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, nh, extack);
4801 	if (err)
4802 		return err;
4803 
4804 	/* Device only nexthops with an IPIP device are programmed as
4805 	 * encapsulating adjacency entries.
4806 	 */
4807 	if (!nh->gw_family && !nh->is_reject &&
4808 	    !mlxsw_sp_netdev_ipip_type(mlxsw_sp, nh->dev, NULL)) {
4809 		NL_SET_ERR_MSG_MOD(extack, "Nexthop group entry does not have a gateway");
4810 		return -EINVAL;
4811 	}
4812 
4813 	return 0;
4814 }
4815 
4816 static int
4817 mlxsw_sp_nexthop_obj_group_validate(struct mlxsw_sp *mlxsw_sp,
4818 				    const struct nh_notifier_grp_info *nh_grp,
4819 				    struct netlink_ext_ack *extack)
4820 {
4821 	int i;
4822 
4823 	if (nh_grp->is_fdb) {
4824 		NL_SET_ERR_MSG_MOD(extack, "FDB nexthop groups are not supported");
4825 		return -EINVAL;
4826 	}
4827 
4828 	for (i = 0; i < nh_grp->num_nh; i++) {
4829 		const struct nh_notifier_single_info *nh;
4830 		int err;
4831 
4832 		nh = &nh_grp->nh_entries[i].nh;
4833 		err = mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
4834 								extack);
4835 		if (err)
4836 			return err;
4837 	}
4838 
4839 	return 0;
4840 }
4841 
4842 static int
4843 mlxsw_sp_nexthop_obj_res_group_size_validate(struct mlxsw_sp *mlxsw_sp,
4844 					     const struct nh_notifier_res_table_info *nh_res_table,
4845 					     struct netlink_ext_ack *extack)
4846 {
4847 	unsigned int alloc_size;
4848 	bool valid_size = false;
4849 	int err, i;
4850 
4851 	if (nh_res_table->num_nh_buckets < 32) {
4852 		NL_SET_ERR_MSG_MOD(extack, "Minimum number of buckets is 32");
4853 		return -EINVAL;
4854 	}
4855 
4856 	for (i = 0; i < mlxsw_sp->router->adj_grp_size_ranges_count; i++) {
4857 		const struct mlxsw_sp_adj_grp_size_range *size_range;
4858 
4859 		size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
4860 
4861 		if (nh_res_table->num_nh_buckets >= size_range->start &&
4862 		    nh_res_table->num_nh_buckets <= size_range->end) {
4863 			valid_size = true;
4864 			break;
4865 		}
4866 	}
4867 
4868 	if (!valid_size) {
4869 		NL_SET_ERR_MSG_MOD(extack, "Invalid number of buckets");
4870 		return -EINVAL;
4871 	}
4872 
4873 	err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
4874 					      MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4875 					      nh_res_table->num_nh_buckets,
4876 					      &alloc_size);
4877 	if (err || nh_res_table->num_nh_buckets != alloc_size) {
4878 		NL_SET_ERR_MSG_MOD(extack, "Number of buckets does not fit allocation size of any KVDL partition");
4879 		return -EINVAL;
4880 	}
4881 
4882 	return 0;
4883 }
4884 
4885 static int
4886 mlxsw_sp_nexthop_obj_res_group_validate(struct mlxsw_sp *mlxsw_sp,
4887 					const struct nh_notifier_res_table_info *nh_res_table,
4888 					struct netlink_ext_ack *extack)
4889 {
4890 	int err;
4891 	u16 i;
4892 
4893 	err = mlxsw_sp_nexthop_obj_res_group_size_validate(mlxsw_sp,
4894 							   nh_res_table,
4895 							   extack);
4896 	if (err)
4897 		return err;
4898 
4899 	for (i = 0; i < nh_res_table->num_nh_buckets; i++) {
4900 		const struct nh_notifier_single_info *nh;
4901 		int err;
4902 
4903 		nh = &nh_res_table->nhs[i];
4904 		err = mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
4905 								extack);
4906 		if (err)
4907 			return err;
4908 	}
4909 
4910 	return 0;
4911 }
4912 
4913 static int mlxsw_sp_nexthop_obj_validate(struct mlxsw_sp *mlxsw_sp,
4914 					 unsigned long event,
4915 					 struct nh_notifier_info *info)
4916 {
4917 	struct nh_notifier_single_info *nh;
4918 
4919 	if (event != NEXTHOP_EVENT_REPLACE &&
4920 	    event != NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE &&
4921 	    event != NEXTHOP_EVENT_BUCKET_REPLACE)
4922 		return 0;
4923 
4924 	switch (info->type) {
4925 	case NH_NOTIFIER_INFO_TYPE_SINGLE:
4926 		return mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, info->nh,
4927 							    info->extack);
4928 	case NH_NOTIFIER_INFO_TYPE_GRP:
4929 		return mlxsw_sp_nexthop_obj_group_validate(mlxsw_sp,
4930 							   info->nh_grp,
4931 							   info->extack);
4932 	case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4933 		return mlxsw_sp_nexthop_obj_res_group_validate(mlxsw_sp,
4934 							       info->nh_res_table,
4935 							       info->extack);
4936 	case NH_NOTIFIER_INFO_TYPE_RES_BUCKET:
4937 		nh = &info->nh_res_bucket->new_nh;
4938 		return mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
4939 								 info->extack);
4940 	default:
4941 		NL_SET_ERR_MSG_MOD(info->extack, "Unsupported nexthop type");
4942 		return -EOPNOTSUPP;
4943 	}
4944 }
4945 
4946 static bool mlxsw_sp_nexthop_obj_is_gateway(struct mlxsw_sp *mlxsw_sp,
4947 					    const struct nh_notifier_info *info)
4948 {
4949 	const struct net_device *dev;
4950 
4951 	switch (info->type) {
4952 	case NH_NOTIFIER_INFO_TYPE_SINGLE:
4953 		dev = info->nh->dev;
4954 		return info->nh->gw_family || info->nh->is_reject ||
4955 		       mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
4956 	case NH_NOTIFIER_INFO_TYPE_GRP:
4957 	case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4958 		/* Already validated earlier. */
4959 		return true;
4960 	default:
4961 		return false;
4962 	}
4963 }
4964 
4965 static void mlxsw_sp_nexthop_obj_blackhole_init(struct mlxsw_sp *mlxsw_sp,
4966 						struct mlxsw_sp_nexthop *nh)
4967 {
4968 	nh->action = MLXSW_SP_NEXTHOP_ACTION_DISCARD;
4969 	nh->should_offload = 1;
4970 	/* While nexthops that discard packets do not forward packets
4971 	 * via an egress RIF, they still need to be programmed using a
4972 	 * valid RIF, so use the loopback RIF created during init.
4973 	 */
4974 	nh->crif = mlxsw_sp->router->lb_crif;
4975 }
4976 
4977 static void mlxsw_sp_nexthop_obj_blackhole_fini(struct mlxsw_sp *mlxsw_sp,
4978 						struct mlxsw_sp_nexthop *nh)
4979 {
4980 	nh->crif = NULL;
4981 	nh->should_offload = 0;
4982 }
4983 
4984 static int
4985 mlxsw_sp_nexthop_obj_init(struct mlxsw_sp *mlxsw_sp,
4986 			  struct mlxsw_sp_nexthop_group *nh_grp,
4987 			  struct mlxsw_sp_nexthop *nh,
4988 			  struct nh_notifier_single_info *nh_obj, int weight)
4989 {
4990 	struct net_device *dev = nh_obj->dev;
4991 	int err;
4992 
4993 	nh->nhgi = nh_grp->nhgi;
4994 	nh->nh_weight = weight;
4995 
4996 	switch (nh_obj->gw_family) {
4997 	case AF_INET:
4998 		memcpy(&nh->gw_addr, &nh_obj->ipv4, sizeof(nh_obj->ipv4));
4999 		nh->neigh_tbl = &arp_tbl;
5000 		break;
5001 	case AF_INET6:
5002 		memcpy(&nh->gw_addr, &nh_obj->ipv6, sizeof(nh_obj->ipv6));
5003 #if IS_ENABLED(CONFIG_IPV6)
5004 		nh->neigh_tbl = &nd_tbl;
5005 #endif
5006 		break;
5007 	}
5008 
5009 	mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
5010 	list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
5011 	nh->ifindex = dev->ifindex;
5012 
5013 	err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
5014 	if (err)
5015 		goto err_type_init;
5016 
5017 	if (nh_obj->is_reject)
5018 		mlxsw_sp_nexthop_obj_blackhole_init(mlxsw_sp, nh);
5019 
5020 	/* In a resilient nexthop group, all the nexthops must be written to
5021 	 * the adjacency table. Even if they do not have a valid neighbour or
5022 	 * RIF.
5023 	 */
5024 	if (nh_grp->nhgi->is_resilient && !nh->should_offload) {
5025 		nh->action = MLXSW_SP_NEXTHOP_ACTION_TRAP;
5026 		nh->should_offload = 1;
5027 	}
5028 
5029 	return 0;
5030 
5031 err_type_init:
5032 	list_del(&nh->router_list_node);
5033 	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
5034 	return err;
5035 }
5036 
5037 static void mlxsw_sp_nexthop_obj_fini(struct mlxsw_sp *mlxsw_sp,
5038 				      struct mlxsw_sp_nexthop *nh)
5039 {
5040 	if (nh->action == MLXSW_SP_NEXTHOP_ACTION_DISCARD)
5041 		mlxsw_sp_nexthop_obj_blackhole_fini(mlxsw_sp, nh);
5042 	mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
5043 	list_del(&nh->router_list_node);
5044 	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
5045 	nh->should_offload = 0;
5046 }
5047 
5048 static int
5049 mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
5050 				     struct mlxsw_sp_nexthop_group *nh_grp,
5051 				     struct nh_notifier_info *info)
5052 {
5053 	struct mlxsw_sp_nexthop_group_info *nhgi;
5054 	struct mlxsw_sp_nexthop *nh;
5055 	bool is_resilient = false;
5056 	unsigned int nhs;
5057 	int err, i;
5058 
5059 	switch (info->type) {
5060 	case NH_NOTIFIER_INFO_TYPE_SINGLE:
5061 		nhs = 1;
5062 		break;
5063 	case NH_NOTIFIER_INFO_TYPE_GRP:
5064 		nhs = info->nh_grp->num_nh;
5065 		break;
5066 	case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
5067 		nhs = info->nh_res_table->num_nh_buckets;
5068 		is_resilient = true;
5069 		break;
5070 	default:
5071 		return -EINVAL;
5072 	}
5073 
5074 	nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL);
5075 	if (!nhgi)
5076 		return -ENOMEM;
5077 	nh_grp->nhgi = nhgi;
5078 	nhgi->nh_grp = nh_grp;
5079 	nhgi->gateway = mlxsw_sp_nexthop_obj_is_gateway(mlxsw_sp, info);
5080 	nhgi->is_resilient = is_resilient;
5081 	nhgi->count = nhs;
5082 	for (i = 0; i < nhgi->count; i++) {
5083 		struct nh_notifier_single_info *nh_obj;
5084 		int weight;
5085 
5086 		nh = &nhgi->nexthops[i];
5087 		switch (info->type) {
5088 		case NH_NOTIFIER_INFO_TYPE_SINGLE:
5089 			nh_obj = info->nh;
5090 			weight = 1;
5091 			break;
5092 		case NH_NOTIFIER_INFO_TYPE_GRP:
5093 			nh_obj = &info->nh_grp->nh_entries[i].nh;
5094 			weight = info->nh_grp->nh_entries[i].weight;
5095 			break;
5096 		case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
5097 			nh_obj = &info->nh_res_table->nhs[i];
5098 			weight = 1;
5099 			break;
5100 		default:
5101 			err = -EINVAL;
5102 			goto err_nexthop_obj_init;
5103 		}
5104 		err = mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj,
5105 						weight);
5106 		if (err)
5107 			goto err_nexthop_obj_init;
5108 	}
5109 	err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
5110 	if (err)
5111 		goto err_group_inc;
5112 	err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5113 	if (err) {
5114 		NL_SET_ERR_MSG_MOD(info->extack, "Failed to write adjacency entries to the device");
5115 		goto err_group_refresh;
5116 	}
5117 
5118 	/* Add resilient nexthop groups to a list so that the activity of their
5119 	 * nexthop buckets will be periodically queried and cleared.
5120 	 */
5121 	if (nhgi->is_resilient) {
5122 		if (list_empty(&mlxsw_sp->router->nh_res_grp_list))
5123 			mlxsw_sp_nh_grp_activity_work_schedule(mlxsw_sp);
5124 		list_add(&nhgi->list, &mlxsw_sp->router->nh_res_grp_list);
5125 	}
5126 
5127 	return 0;
5128 
5129 err_group_refresh:
5130 	mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5131 err_group_inc:
5132 	i = nhgi->count;
5133 err_nexthop_obj_init:
5134 	for (i--; i >= 0; i--) {
5135 		nh = &nhgi->nexthops[i];
5136 		mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5137 	}
5138 	kfree(nhgi);
5139 	return err;
5140 }
5141 
5142 static void
5143 mlxsw_sp_nexthop_obj_group_info_fini(struct mlxsw_sp *mlxsw_sp,
5144 				     struct mlxsw_sp_nexthop_group *nh_grp)
5145 {
5146 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
5147 	struct mlxsw_sp_router *router = mlxsw_sp->router;
5148 	int i;
5149 
5150 	if (nhgi->is_resilient) {
5151 		list_del(&nhgi->list);
5152 		if (list_empty(&mlxsw_sp->router->nh_res_grp_list))
5153 			cancel_delayed_work(&router->nh_grp_activity_dw);
5154 	}
5155 
5156 	mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5157 	for (i = nhgi->count - 1; i >= 0; i--) {
5158 		struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
5159 
5160 		mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5161 	}
5162 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5163 	WARN_ON_ONCE(nhgi->adj_index_valid);
5164 	kfree(nhgi);
5165 }
5166 
5167 static struct mlxsw_sp_nexthop_group *
5168 mlxsw_sp_nexthop_obj_group_create(struct mlxsw_sp *mlxsw_sp,
5169 				  struct nh_notifier_info *info)
5170 {
5171 	struct mlxsw_sp_nexthop_group *nh_grp;
5172 	int err;
5173 
5174 	nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
5175 	if (!nh_grp)
5176 		return ERR_PTR(-ENOMEM);
5177 	INIT_LIST_HEAD(&nh_grp->vr_list);
5178 	err = rhashtable_init(&nh_grp->vr_ht,
5179 			      &mlxsw_sp_nexthop_group_vr_ht_params);
5180 	if (err)
5181 		goto err_nexthop_group_vr_ht_init;
5182 	INIT_LIST_HEAD(&nh_grp->fib_list);
5183 	nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ;
5184 	nh_grp->obj.id = info->id;
5185 
5186 	err = mlxsw_sp_nexthop_obj_group_info_init(mlxsw_sp, nh_grp, info);
5187 	if (err)
5188 		goto err_nexthop_group_info_init;
5189 
5190 	nh_grp->can_destroy = false;
5191 
5192 	return nh_grp;
5193 
5194 err_nexthop_group_info_init:
5195 	rhashtable_destroy(&nh_grp->vr_ht);
5196 err_nexthop_group_vr_ht_init:
5197 	kfree(nh_grp);
5198 	return ERR_PTR(err);
5199 }
5200 
5201 static void
5202 mlxsw_sp_nexthop_obj_group_destroy(struct mlxsw_sp *mlxsw_sp,
5203 				   struct mlxsw_sp_nexthop_group *nh_grp)
5204 {
5205 	if (!nh_grp->can_destroy)
5206 		return;
5207 	mlxsw_sp_nexthop_obj_group_info_fini(mlxsw_sp, nh_grp);
5208 	WARN_ON_ONCE(!list_empty(&nh_grp->fib_list));
5209 	WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
5210 	rhashtable_destroy(&nh_grp->vr_ht);
5211 	kfree(nh_grp);
5212 }
5213 
5214 static struct mlxsw_sp_nexthop_group *
5215 mlxsw_sp_nexthop_obj_group_lookup(struct mlxsw_sp *mlxsw_sp, u32 id)
5216 {
5217 	struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
5218 
5219 	cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ;
5220 	cmp_arg.id = id;
5221 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
5222 				      &cmp_arg,
5223 				      mlxsw_sp_nexthop_group_ht_params);
5224 }
5225 
5226 static int mlxsw_sp_nexthop_obj_group_add(struct mlxsw_sp *mlxsw_sp,
5227 					  struct mlxsw_sp_nexthop_group *nh_grp)
5228 {
5229 	return mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
5230 }
5231 
5232 static int
5233 mlxsw_sp_nexthop_obj_group_replace(struct mlxsw_sp *mlxsw_sp,
5234 				   struct mlxsw_sp_nexthop_group *nh_grp,
5235 				   struct mlxsw_sp_nexthop_group *old_nh_grp,
5236 				   struct netlink_ext_ack *extack)
5237 {
5238 	struct mlxsw_sp_nexthop_group_info *old_nhgi = old_nh_grp->nhgi;
5239 	struct mlxsw_sp_nexthop_group_info *new_nhgi = nh_grp->nhgi;
5240 	int err;
5241 
5242 	old_nh_grp->nhgi = new_nhgi;
5243 	new_nhgi->nh_grp = old_nh_grp;
5244 	nh_grp->nhgi = old_nhgi;
5245 	old_nhgi->nh_grp = nh_grp;
5246 
5247 	if (old_nhgi->adj_index_valid && new_nhgi->adj_index_valid) {
5248 		/* Both the old adjacency index and the new one are valid.
5249 		 * Routes are currently using the old one. Tell the device to
5250 		 * replace the old adjacency index with the new one.
5251 		 */
5252 		err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, old_nh_grp,
5253 						     old_nhgi->adj_index,
5254 						     old_nhgi->ecmp_size);
5255 		if (err) {
5256 			NL_SET_ERR_MSG_MOD(extack, "Failed to replace old adjacency index with new one");
5257 			goto err_out;
5258 		}
5259 	} else if (old_nhgi->adj_index_valid && !new_nhgi->adj_index_valid) {
5260 		/* The old adjacency index is valid, while the new one is not.
5261 		 * Iterate over all the routes using the group and change them
5262 		 * to trap packets to the CPU.
5263 		 */
5264 		err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, old_nh_grp);
5265 		if (err) {
5266 			NL_SET_ERR_MSG_MOD(extack, "Failed to update routes to trap packets");
5267 			goto err_out;
5268 		}
5269 	} else if (!old_nhgi->adj_index_valid && new_nhgi->adj_index_valid) {
5270 		/* The old adjacency index is invalid, while the new one is.
5271 		 * Iterate over all the routes using the group and change them
5272 		 * to forward packets using the new valid index.
5273 		 */
5274 		err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, old_nh_grp);
5275 		if (err) {
5276 			NL_SET_ERR_MSG_MOD(extack, "Failed to update routes to forward packets");
5277 			goto err_out;
5278 		}
5279 	}
5280 
5281 	/* Make sure the flags are set / cleared based on the new nexthop group
5282 	 * information.
5283 	 */
5284 	mlxsw_sp_nexthop_obj_group_offload_refresh(mlxsw_sp, old_nh_grp);
5285 
5286 	/* At this point 'nh_grp' is just a shell that is not used by anyone
5287 	 * and its nexthop group info is the old info that was just replaced
5288 	 * with the new one. Remove it.
5289 	 */
5290 	nh_grp->can_destroy = true;
5291 	mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5292 
5293 	return 0;
5294 
5295 err_out:
5296 	old_nhgi->nh_grp = old_nh_grp;
5297 	nh_grp->nhgi = new_nhgi;
5298 	new_nhgi->nh_grp = nh_grp;
5299 	old_nh_grp->nhgi = old_nhgi;
5300 	return err;
5301 }
5302 
5303 static int mlxsw_sp_nexthop_obj_new(struct mlxsw_sp *mlxsw_sp,
5304 				    struct nh_notifier_info *info)
5305 {
5306 	struct mlxsw_sp_nexthop_group *nh_grp, *old_nh_grp;
5307 	struct netlink_ext_ack *extack = info->extack;
5308 	int err;
5309 
5310 	nh_grp = mlxsw_sp_nexthop_obj_group_create(mlxsw_sp, info);
5311 	if (IS_ERR(nh_grp))
5312 		return PTR_ERR(nh_grp);
5313 
5314 	old_nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5315 	if (!old_nh_grp)
5316 		err = mlxsw_sp_nexthop_obj_group_add(mlxsw_sp, nh_grp);
5317 	else
5318 		err = mlxsw_sp_nexthop_obj_group_replace(mlxsw_sp, nh_grp,
5319 							 old_nh_grp, extack);
5320 
5321 	if (err) {
5322 		nh_grp->can_destroy = true;
5323 		mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5324 	}
5325 
5326 	return err;
5327 }
5328 
5329 static void mlxsw_sp_nexthop_obj_del(struct mlxsw_sp *mlxsw_sp,
5330 				     struct nh_notifier_info *info)
5331 {
5332 	struct mlxsw_sp_nexthop_group *nh_grp;
5333 
5334 	nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5335 	if (!nh_grp)
5336 		return;
5337 
5338 	nh_grp->can_destroy = true;
5339 	mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
5340 
5341 	/* If the group still has routes using it, then defer the delete
5342 	 * operation until the last route using it is deleted.
5343 	 */
5344 	if (!list_empty(&nh_grp->fib_list))
5345 		return;
5346 	mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5347 }
5348 
5349 static int mlxsw_sp_nexthop_obj_bucket_query(struct mlxsw_sp *mlxsw_sp,
5350 					     u32 adj_index, char *ratr_pl)
5351 {
5352 	MLXSW_REG_ZERO(ratr, ratr_pl);
5353 	mlxsw_reg_ratr_op_set(ratr_pl, MLXSW_REG_RATR_OP_QUERY_READ);
5354 	mlxsw_reg_ratr_adjacency_index_low_set(ratr_pl, adj_index);
5355 	mlxsw_reg_ratr_adjacency_index_high_set(ratr_pl, adj_index >> 16);
5356 
5357 	return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
5358 }
5359 
5360 static int mlxsw_sp_nexthop_obj_bucket_compare(char *ratr_pl, char *ratr_pl_new)
5361 {
5362 	/* Clear the opcode and activity on both the old and new payload as
5363 	 * they are irrelevant for the comparison.
5364 	 */
5365 	mlxsw_reg_ratr_op_set(ratr_pl, MLXSW_REG_RATR_OP_QUERY_READ);
5366 	mlxsw_reg_ratr_a_set(ratr_pl, 0);
5367 	mlxsw_reg_ratr_op_set(ratr_pl_new, MLXSW_REG_RATR_OP_QUERY_READ);
5368 	mlxsw_reg_ratr_a_set(ratr_pl_new, 0);
5369 
5370 	/* If the contents of the adjacency entry are consistent with the
5371 	 * replacement request, then replacement was successful.
5372 	 */
5373 	if (!memcmp(ratr_pl, ratr_pl_new, MLXSW_REG_RATR_LEN))
5374 		return 0;
5375 
5376 	return -EINVAL;
5377 }
5378 
5379 static int
5380 mlxsw_sp_nexthop_obj_bucket_adj_update(struct mlxsw_sp *mlxsw_sp,
5381 				       struct mlxsw_sp_nexthop *nh,
5382 				       struct nh_notifier_info *info)
5383 {
5384 	u16 bucket_index = info->nh_res_bucket->bucket_index;
5385 	struct netlink_ext_ack *extack = info->extack;
5386 	bool force = info->nh_res_bucket->force;
5387 	char ratr_pl_new[MLXSW_REG_RATR_LEN];
5388 	char ratr_pl[MLXSW_REG_RATR_LEN];
5389 	u32 adj_index;
5390 	int err;
5391 
5392 	/* No point in trying an atomic replacement if the idle timer interval
5393 	 * is smaller than the interval in which we query and clear activity.
5394 	 */
5395 	if (!force && info->nh_res_bucket->idle_timer_ms <
5396 	    MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL)
5397 		force = true;
5398 
5399 	adj_index = nh->nhgi->adj_index + bucket_index;
5400 	err = mlxsw_sp_nexthop_update(mlxsw_sp, adj_index, nh, force, ratr_pl);
5401 	if (err) {
5402 		NL_SET_ERR_MSG_MOD(extack, "Failed to overwrite nexthop bucket");
5403 		return err;
5404 	}
5405 
5406 	if (!force) {
5407 		err = mlxsw_sp_nexthop_obj_bucket_query(mlxsw_sp, adj_index,
5408 							ratr_pl_new);
5409 		if (err) {
5410 			NL_SET_ERR_MSG_MOD(extack, "Failed to query nexthop bucket state after replacement. State might be inconsistent");
5411 			return err;
5412 		}
5413 
5414 		err = mlxsw_sp_nexthop_obj_bucket_compare(ratr_pl, ratr_pl_new);
5415 		if (err) {
5416 			NL_SET_ERR_MSG_MOD(extack, "Nexthop bucket was not replaced because it was active during replacement");
5417 			return err;
5418 		}
5419 	}
5420 
5421 	nh->update = 0;
5422 	nh->offloaded = 1;
5423 	mlxsw_sp_nexthop_bucket_offload_refresh(mlxsw_sp, nh, bucket_index);
5424 
5425 	return 0;
5426 }
5427 
5428 static int mlxsw_sp_nexthop_obj_bucket_replace(struct mlxsw_sp *mlxsw_sp,
5429 					       struct nh_notifier_info *info)
5430 {
5431 	u16 bucket_index = info->nh_res_bucket->bucket_index;
5432 	struct netlink_ext_ack *extack = info->extack;
5433 	struct mlxsw_sp_nexthop_group_info *nhgi;
5434 	struct nh_notifier_single_info *nh_obj;
5435 	struct mlxsw_sp_nexthop_group *nh_grp;
5436 	struct mlxsw_sp_nexthop *nh;
5437 	int err;
5438 
5439 	nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5440 	if (!nh_grp) {
5441 		NL_SET_ERR_MSG_MOD(extack, "Nexthop group was not found");
5442 		return -EINVAL;
5443 	}
5444 
5445 	nhgi = nh_grp->nhgi;
5446 
5447 	if (bucket_index >= nhgi->count) {
5448 		NL_SET_ERR_MSG_MOD(extack, "Nexthop bucket index out of range");
5449 		return -EINVAL;
5450 	}
5451 
5452 	nh = &nhgi->nexthops[bucket_index];
5453 	mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5454 
5455 	nh_obj = &info->nh_res_bucket->new_nh;
5456 	err = mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj, 1);
5457 	if (err) {
5458 		NL_SET_ERR_MSG_MOD(extack, "Failed to initialize nexthop object for nexthop bucket replacement");
5459 		goto err_nexthop_obj_init;
5460 	}
5461 
5462 	err = mlxsw_sp_nexthop_obj_bucket_adj_update(mlxsw_sp, nh, info);
5463 	if (err)
5464 		goto err_nexthop_obj_bucket_adj_update;
5465 
5466 	return 0;
5467 
5468 err_nexthop_obj_bucket_adj_update:
5469 	mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5470 err_nexthop_obj_init:
5471 	nh_obj = &info->nh_res_bucket->old_nh;
5472 	mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj, 1);
5473 	/* The old adjacency entry was not overwritten */
5474 	nh->update = 0;
5475 	nh->offloaded = 1;
5476 	return err;
5477 }
5478 
5479 static int mlxsw_sp_nexthop_obj_event(struct notifier_block *nb,
5480 				      unsigned long event, void *ptr)
5481 {
5482 	struct nh_notifier_info *info = ptr;
5483 	struct mlxsw_sp_router *router;
5484 	int err = 0;
5485 
5486 	router = container_of(nb, struct mlxsw_sp_router, nexthop_nb);
5487 	err = mlxsw_sp_nexthop_obj_validate(router->mlxsw_sp, event, info);
5488 	if (err)
5489 		goto out;
5490 
5491 	mutex_lock(&router->lock);
5492 
5493 	switch (event) {
5494 	case NEXTHOP_EVENT_REPLACE:
5495 		err = mlxsw_sp_nexthop_obj_new(router->mlxsw_sp, info);
5496 		break;
5497 	case NEXTHOP_EVENT_DEL:
5498 		mlxsw_sp_nexthop_obj_del(router->mlxsw_sp, info);
5499 		break;
5500 	case NEXTHOP_EVENT_BUCKET_REPLACE:
5501 		err = mlxsw_sp_nexthop_obj_bucket_replace(router->mlxsw_sp,
5502 							  info);
5503 		break;
5504 	default:
5505 		break;
5506 	}
5507 
5508 	mutex_unlock(&router->lock);
5509 
5510 out:
5511 	return notifier_from_errno(err);
5512 }
5513 
5514 static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
5515 				   struct fib_info *fi)
5516 {
5517 	const struct fib_nh *nh = fib_info_nh(fi, 0);
5518 
5519 	return nh->fib_nh_gw_family ||
5520 	       mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL);
5521 }
5522 
5523 static int
5524 mlxsw_sp_nexthop4_group_info_init(struct mlxsw_sp *mlxsw_sp,
5525 				  struct mlxsw_sp_nexthop_group *nh_grp)
5526 {
5527 	unsigned int nhs = fib_info_num_path(nh_grp->ipv4.fi);
5528 	struct mlxsw_sp_nexthop_group_info *nhgi;
5529 	struct mlxsw_sp_nexthop *nh;
5530 	int err, i;
5531 
5532 	nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL);
5533 	if (!nhgi)
5534 		return -ENOMEM;
5535 	nh_grp->nhgi = nhgi;
5536 	nhgi->nh_grp = nh_grp;
5537 	nhgi->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, nh_grp->ipv4.fi);
5538 	nhgi->count = nhs;
5539 	for (i = 0; i < nhgi->count; i++) {
5540 		struct fib_nh *fib_nh;
5541 
5542 		nh = &nhgi->nexthops[i];
5543 		fib_nh = fib_info_nh(nh_grp->ipv4.fi, i);
5544 		err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
5545 		if (err)
5546 			goto err_nexthop4_init;
5547 	}
5548 	err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
5549 	if (err)
5550 		goto err_group_inc;
5551 	err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5552 	if (err)
5553 		goto err_group_refresh;
5554 
5555 	return 0;
5556 
5557 err_group_refresh:
5558 	mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5559 err_group_inc:
5560 	i = nhgi->count;
5561 err_nexthop4_init:
5562 	for (i--; i >= 0; i--) {
5563 		nh = &nhgi->nexthops[i];
5564 		mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
5565 	}
5566 	kfree(nhgi);
5567 	return err;
5568 }
5569 
5570 static void
5571 mlxsw_sp_nexthop4_group_info_fini(struct mlxsw_sp *mlxsw_sp,
5572 				  struct mlxsw_sp_nexthop_group *nh_grp)
5573 {
5574 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
5575 	int i;
5576 
5577 	mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5578 	for (i = nhgi->count - 1; i >= 0; i--) {
5579 		struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
5580 
5581 		mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
5582 	}
5583 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5584 	WARN_ON_ONCE(nhgi->adj_index_valid);
5585 	kfree(nhgi);
5586 }
5587 
5588 static struct mlxsw_sp_nexthop_group *
5589 mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
5590 {
5591 	struct mlxsw_sp_nexthop_group *nh_grp;
5592 	int err;
5593 
5594 	nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
5595 	if (!nh_grp)
5596 		return ERR_PTR(-ENOMEM);
5597 	INIT_LIST_HEAD(&nh_grp->vr_list);
5598 	err = rhashtable_init(&nh_grp->vr_ht,
5599 			      &mlxsw_sp_nexthop_group_vr_ht_params);
5600 	if (err)
5601 		goto err_nexthop_group_vr_ht_init;
5602 	INIT_LIST_HEAD(&nh_grp->fib_list);
5603 	nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4;
5604 	nh_grp->ipv4.fi = fi;
5605 	fib_info_hold(fi);
5606 
5607 	err = mlxsw_sp_nexthop4_group_info_init(mlxsw_sp, nh_grp);
5608 	if (err)
5609 		goto err_nexthop_group_info_init;
5610 
5611 	err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
5612 	if (err)
5613 		goto err_nexthop_group_insert;
5614 
5615 	nh_grp->can_destroy = true;
5616 
5617 	return nh_grp;
5618 
5619 err_nexthop_group_insert:
5620 	mlxsw_sp_nexthop4_group_info_fini(mlxsw_sp, nh_grp);
5621 err_nexthop_group_info_init:
5622 	fib_info_put(fi);
5623 	rhashtable_destroy(&nh_grp->vr_ht);
5624 err_nexthop_group_vr_ht_init:
5625 	kfree(nh_grp);
5626 	return ERR_PTR(err);
5627 }
5628 
5629 static void
5630 mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
5631 				struct mlxsw_sp_nexthop_group *nh_grp)
5632 {
5633 	if (!nh_grp->can_destroy)
5634 		return;
5635 	mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
5636 	mlxsw_sp_nexthop4_group_info_fini(mlxsw_sp, nh_grp);
5637 	fib_info_put(nh_grp->ipv4.fi);
5638 	WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
5639 	rhashtable_destroy(&nh_grp->vr_ht);
5640 	kfree(nh_grp);
5641 }
5642 
5643 static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
5644 				       struct mlxsw_sp_fib_entry *fib_entry,
5645 				       struct fib_info *fi)
5646 {
5647 	struct mlxsw_sp_nexthop_group *nh_grp;
5648 
5649 	if (fi->nh) {
5650 		nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp,
5651 							   fi->nh->id);
5652 		if (WARN_ON_ONCE(!nh_grp))
5653 			return -EINVAL;
5654 		goto out;
5655 	}
5656 
5657 	nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
5658 	if (!nh_grp) {
5659 		nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
5660 		if (IS_ERR(nh_grp))
5661 			return PTR_ERR(nh_grp);
5662 	}
5663 out:
5664 	list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
5665 	fib_entry->nh_group = nh_grp;
5666 	return 0;
5667 }
5668 
5669 static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
5670 					struct mlxsw_sp_fib_entry *fib_entry)
5671 {
5672 	struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
5673 
5674 	list_del(&fib_entry->nexthop_group_node);
5675 	if (!list_empty(&nh_grp->fib_list))
5676 		return;
5677 
5678 	if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ) {
5679 		mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5680 		return;
5681 	}
5682 
5683 	mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
5684 }
5685 
5686 static bool
5687 mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
5688 {
5689 	struct mlxsw_sp_fib4_entry *fib4_entry;
5690 
5691 	fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
5692 				  common);
5693 	return !fib4_entry->dscp;
5694 }
5695 
5696 static bool
5697 mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
5698 {
5699 	struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
5700 
5701 	switch (fib_entry->fib_node->fib->proto) {
5702 	case MLXSW_SP_L3_PROTO_IPV4:
5703 		if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
5704 			return false;
5705 		break;
5706 	case MLXSW_SP_L3_PROTO_IPV6:
5707 		break;
5708 	}
5709 
5710 	switch (fib_entry->type) {
5711 	case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
5712 		return !!nh_group->nhgi->adj_index_valid;
5713 	case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
5714 		return !!mlxsw_sp_nhgi_rif(nh_group->nhgi);
5715 	case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
5716 	case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
5717 	case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
5718 		return true;
5719 	default:
5720 		return false;
5721 	}
5722 }
5723 
5724 static struct mlxsw_sp_nexthop *
5725 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
5726 		     const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
5727 {
5728 	int i;
5729 
5730 	for (i = 0; i < nh_grp->nhgi->count; i++) {
5731 		struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
5732 		struct net_device *dev = mlxsw_sp_nexthop_dev(nh);
5733 		struct fib6_info *rt = mlxsw_sp_rt6->rt;
5734 
5735 		if (dev && dev == rt->fib6_nh->fib_nh_dev &&
5736 		    ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
5737 				    &rt->fib6_nh->fib_nh_gw6))
5738 			return nh;
5739 	}
5740 
5741 	return NULL;
5742 }
5743 
5744 static void
5745 mlxsw_sp_fib4_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5746 				      struct fib_entry_notifier_info *fen_info)
5747 {
5748 	u32 *p_dst = (u32 *) &fen_info->dst;
5749 	struct fib_rt_info fri;
5750 
5751 	fri.fi = fen_info->fi;
5752 	fri.tb_id = fen_info->tb_id;
5753 	fri.dst = cpu_to_be32(*p_dst);
5754 	fri.dst_len = fen_info->dst_len;
5755 	fri.dscp = fen_info->dscp;
5756 	fri.type = fen_info->type;
5757 	fri.offload = false;
5758 	fri.trap = false;
5759 	fri.offload_failed = true;
5760 	fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
5761 }
5762 
5763 static void
5764 mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5765 				 struct mlxsw_sp_fib_entry *fib_entry)
5766 {
5767 	u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
5768 	int dst_len = fib_entry->fib_node->key.prefix_len;
5769 	struct mlxsw_sp_fib4_entry *fib4_entry;
5770 	struct fib_rt_info fri;
5771 	bool should_offload;
5772 
5773 	should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
5774 	fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
5775 				  common);
5776 	fri.fi = fib4_entry->fi;
5777 	fri.tb_id = fib4_entry->tb_id;
5778 	fri.dst = cpu_to_be32(*p_dst);
5779 	fri.dst_len = dst_len;
5780 	fri.dscp = fib4_entry->dscp;
5781 	fri.type = fib4_entry->type;
5782 	fri.offload = should_offload;
5783 	fri.trap = !should_offload;
5784 	fri.offload_failed = false;
5785 	fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
5786 }
5787 
5788 static void
5789 mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5790 				   struct mlxsw_sp_fib_entry *fib_entry)
5791 {
5792 	u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
5793 	int dst_len = fib_entry->fib_node->key.prefix_len;
5794 	struct mlxsw_sp_fib4_entry *fib4_entry;
5795 	struct fib_rt_info fri;
5796 
5797 	fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
5798 				  common);
5799 	fri.fi = fib4_entry->fi;
5800 	fri.tb_id = fib4_entry->tb_id;
5801 	fri.dst = cpu_to_be32(*p_dst);
5802 	fri.dst_len = dst_len;
5803 	fri.dscp = fib4_entry->dscp;
5804 	fri.type = fib4_entry->type;
5805 	fri.offload = false;
5806 	fri.trap = false;
5807 	fri.offload_failed = false;
5808 	fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
5809 }
5810 
5811 #if IS_ENABLED(CONFIG_IPV6)
5812 static void
5813 mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5814 				      struct fib6_info **rt_arr,
5815 				      unsigned int nrt6)
5816 {
5817 	int i;
5818 
5819 	/* In IPv6 a multipath route is represented using multiple routes, so
5820 	 * we need to set the flags on all of them.
5821 	 */
5822 	for (i = 0; i < nrt6; i++)
5823 		fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), rt_arr[i],
5824 				       false, false, true);
5825 }
5826 #else
5827 static void
5828 mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5829 				      struct fib6_info **rt_arr,
5830 				      unsigned int nrt6)
5831 {
5832 }
5833 #endif
5834 
5835 #if IS_ENABLED(CONFIG_IPV6)
5836 static void
5837 mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5838 				 struct mlxsw_sp_fib_entry *fib_entry)
5839 {
5840 	struct mlxsw_sp_fib6_entry *fib6_entry;
5841 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5842 	bool should_offload;
5843 
5844 	should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
5845 
5846 	/* In IPv6 a multipath route is represented using multiple routes, so
5847 	 * we need to set the flags on all of them.
5848 	 */
5849 	fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
5850 				  common);
5851 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
5852 		fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), mlxsw_sp_rt6->rt,
5853 				       should_offload, !should_offload, false);
5854 }
5855 #else
5856 static void
5857 mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5858 				 struct mlxsw_sp_fib_entry *fib_entry)
5859 {
5860 }
5861 #endif
5862 
5863 #if IS_ENABLED(CONFIG_IPV6)
5864 static void
5865 mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5866 				   struct mlxsw_sp_fib_entry *fib_entry)
5867 {
5868 	struct mlxsw_sp_fib6_entry *fib6_entry;
5869 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5870 
5871 	fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
5872 				  common);
5873 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
5874 		fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), mlxsw_sp_rt6->rt,
5875 				       false, false, false);
5876 }
5877 #else
5878 static void
5879 mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5880 				   struct mlxsw_sp_fib_entry *fib_entry)
5881 {
5882 }
5883 #endif
5884 
5885 static void
5886 mlxsw_sp_fib_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5887 				struct mlxsw_sp_fib_entry *fib_entry)
5888 {
5889 	switch (fib_entry->fib_node->fib->proto) {
5890 	case MLXSW_SP_L3_PROTO_IPV4:
5891 		mlxsw_sp_fib4_entry_hw_flags_set(mlxsw_sp, fib_entry);
5892 		break;
5893 	case MLXSW_SP_L3_PROTO_IPV6:
5894 		mlxsw_sp_fib6_entry_hw_flags_set(mlxsw_sp, fib_entry);
5895 		break;
5896 	}
5897 }
5898 
5899 static void
5900 mlxsw_sp_fib_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5901 				  struct mlxsw_sp_fib_entry *fib_entry)
5902 {
5903 	switch (fib_entry->fib_node->fib->proto) {
5904 	case MLXSW_SP_L3_PROTO_IPV4:
5905 		mlxsw_sp_fib4_entry_hw_flags_clear(mlxsw_sp, fib_entry);
5906 		break;
5907 	case MLXSW_SP_L3_PROTO_IPV6:
5908 		mlxsw_sp_fib6_entry_hw_flags_clear(mlxsw_sp, fib_entry);
5909 		break;
5910 	}
5911 }
5912 
5913 static void
5914 mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp *mlxsw_sp,
5915 				    struct mlxsw_sp_fib_entry *fib_entry,
5916 				    enum mlxsw_reg_ralue_op op)
5917 {
5918 	switch (op) {
5919 	case MLXSW_REG_RALUE_OP_WRITE_WRITE:
5920 		mlxsw_sp_fib_entry_hw_flags_set(mlxsw_sp, fib_entry);
5921 		break;
5922 	case MLXSW_REG_RALUE_OP_WRITE_DELETE:
5923 		mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, fib_entry);
5924 		break;
5925 	default:
5926 		break;
5927 	}
5928 }
5929 
5930 static void
5931 mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
5932 			      const struct mlxsw_sp_fib_entry *fib_entry,
5933 			      enum mlxsw_reg_ralue_op op)
5934 {
5935 	struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
5936 	enum mlxsw_reg_ralxx_protocol proto;
5937 	u32 *p_dip;
5938 
5939 	proto = (enum mlxsw_reg_ralxx_protocol) fib->proto;
5940 
5941 	switch (fib->proto) {
5942 	case MLXSW_SP_L3_PROTO_IPV4:
5943 		p_dip = (u32 *) fib_entry->fib_node->key.addr;
5944 		mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id,
5945 				      fib_entry->fib_node->key.prefix_len,
5946 				      *p_dip);
5947 		break;
5948 	case MLXSW_SP_L3_PROTO_IPV6:
5949 		mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id,
5950 				      fib_entry->fib_node->key.prefix_len,
5951 				      fib_entry->fib_node->key.addr);
5952 		break;
5953 	}
5954 }
5955 
5956 static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
5957 					struct mlxsw_sp_fib_entry *fib_entry,
5958 					enum mlxsw_reg_ralue_op op)
5959 {
5960 	struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
5961 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_group->nhgi;
5962 	char ralue_pl[MLXSW_REG_RALUE_LEN];
5963 	enum mlxsw_reg_ralue_trap_action trap_action;
5964 	u16 trap_id = 0;
5965 	u32 adjacency_index = 0;
5966 	u16 ecmp_size = 0;
5967 
5968 	/* In case the nexthop group adjacency index is valid, use it
5969 	 * with provided ECMP size. Otherwise, setup trap and pass
5970 	 * traffic to kernel.
5971 	 */
5972 	if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
5973 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
5974 		adjacency_index = nhgi->adj_index;
5975 		ecmp_size = nhgi->ecmp_size;
5976 	} else if (!nhgi->adj_index_valid && nhgi->count &&
5977 		   mlxsw_sp_nhgi_rif(nhgi)) {
5978 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
5979 		adjacency_index = mlxsw_sp->router->adj_trap_index;
5980 		ecmp_size = 1;
5981 	} else {
5982 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
5983 		trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
5984 	}
5985 
5986 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
5987 	mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
5988 					adjacency_index, ecmp_size);
5989 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
5990 }
5991 
5992 static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
5993 				       struct mlxsw_sp_fib_entry *fib_entry,
5994 				       enum mlxsw_reg_ralue_op op)
5995 {
5996 	struct mlxsw_sp_rif *rif = mlxsw_sp_nhgi_rif(fib_entry->nh_group->nhgi);
5997 	enum mlxsw_reg_ralue_trap_action trap_action;
5998 	char ralue_pl[MLXSW_REG_RALUE_LEN];
5999 	u16 trap_id = 0;
6000 	u16 rif_index = 0;
6001 
6002 	if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
6003 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
6004 		rif_index = rif->rif_index;
6005 	} else {
6006 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
6007 		trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
6008 	}
6009 
6010 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
6011 	mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
6012 				       rif_index);
6013 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
6014 }
6015 
6016 static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
6017 				      struct mlxsw_sp_fib_entry *fib_entry,
6018 				      enum mlxsw_reg_ralue_op op)
6019 {
6020 	char ralue_pl[MLXSW_REG_RALUE_LEN];
6021 
6022 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
6023 	mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
6024 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
6025 }
6026 
6027 static int mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp *mlxsw_sp,
6028 					   struct mlxsw_sp_fib_entry *fib_entry,
6029 					   enum mlxsw_reg_ralue_op op)
6030 {
6031 	enum mlxsw_reg_ralue_trap_action trap_action;
6032 	char ralue_pl[MLXSW_REG_RALUE_LEN];
6033 
6034 	trap_action = MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR;
6035 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
6036 	mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, 0, 0);
6037 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
6038 }
6039 
6040 static int
6041 mlxsw_sp_fib_entry_op_unreachable(struct mlxsw_sp *mlxsw_sp,
6042 				  struct mlxsw_sp_fib_entry *fib_entry,
6043 				  enum mlxsw_reg_ralue_op op)
6044 {
6045 	enum mlxsw_reg_ralue_trap_action trap_action;
6046 	char ralue_pl[MLXSW_REG_RALUE_LEN];
6047 	u16 trap_id;
6048 
6049 	trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
6050 	trap_id = MLXSW_TRAP_ID_RTR_INGRESS1;
6051 
6052 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
6053 	mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, 0);
6054 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
6055 }
6056 
6057 static int
6058 mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
6059 				 struct mlxsw_sp_fib_entry *fib_entry,
6060 				 enum mlxsw_reg_ralue_op op)
6061 {
6062 	struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
6063 	const struct mlxsw_sp_ipip_ops *ipip_ops;
6064 	char ralue_pl[MLXSW_REG_RALUE_LEN];
6065 	int err;
6066 
6067 	if (WARN_ON(!ipip_entry))
6068 		return -EINVAL;
6069 
6070 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
6071 	err = ipip_ops->decap_config(mlxsw_sp, ipip_entry,
6072 				     fib_entry->decap.tunnel_index);
6073 	if (err)
6074 		return err;
6075 
6076 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
6077 	mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl,
6078 					   fib_entry->decap.tunnel_index);
6079 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
6080 }
6081 
6082 static int mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp *mlxsw_sp,
6083 					   struct mlxsw_sp_fib_entry *fib_entry,
6084 					   enum mlxsw_reg_ralue_op op)
6085 {
6086 	char ralue_pl[MLXSW_REG_RALUE_LEN];
6087 
6088 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
6089 	mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl,
6090 					   fib_entry->decap.tunnel_index);
6091 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
6092 }
6093 
6094 static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
6095 				   struct mlxsw_sp_fib_entry *fib_entry,
6096 				   enum mlxsw_reg_ralue_op op)
6097 {
6098 	switch (fib_entry->type) {
6099 	case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
6100 		return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op);
6101 	case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
6102 		return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op);
6103 	case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
6104 		return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
6105 	case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
6106 		return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, fib_entry, op);
6107 	case MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE:
6108 		return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, fib_entry,
6109 							 op);
6110 	case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
6111 		return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
6112 							fib_entry, op);
6113 	case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
6114 		return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, fib_entry, op);
6115 	}
6116 	return -EINVAL;
6117 }
6118 
6119 static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
6120 				 struct mlxsw_sp_fib_entry *fib_entry,
6121 				 enum mlxsw_reg_ralue_op op)
6122 {
6123 	int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
6124 
6125 	if (err)
6126 		return err;
6127 
6128 	mlxsw_sp_fib_entry_hw_flags_refresh(mlxsw_sp, fib_entry, op);
6129 
6130 	return err;
6131 }
6132 
6133 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
6134 				     struct mlxsw_sp_fib_entry *fib_entry)
6135 {
6136 	return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
6137 				     MLXSW_REG_RALUE_OP_WRITE_WRITE);
6138 }
6139 
6140 static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
6141 				  struct mlxsw_sp_fib_entry *fib_entry)
6142 {
6143 	return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
6144 				     MLXSW_REG_RALUE_OP_WRITE_DELETE);
6145 }
6146 
6147 static int
6148 mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
6149 			     const struct fib_entry_notifier_info *fen_info,
6150 			     struct mlxsw_sp_fib_entry *fib_entry)
6151 {
6152 	struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi;
6153 	union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
6154 	struct mlxsw_sp_router *router = mlxsw_sp->router;
6155 	u32 tb_id = mlxsw_sp_fix_tb_id(fen_info->tb_id);
6156 	int ifindex = nhgi->nexthops[0].ifindex;
6157 	struct mlxsw_sp_ipip_entry *ipip_entry;
6158 
6159 	switch (fen_info->type) {
6160 	case RTN_LOCAL:
6161 		ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, ifindex,
6162 							       MLXSW_SP_L3_PROTO_IPV4, dip);
6163 		if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
6164 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
6165 			return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
6166 							     fib_entry,
6167 							     ipip_entry);
6168 		}
6169 		if (mlxsw_sp_router_nve_is_decap(mlxsw_sp, tb_id,
6170 						 MLXSW_SP_L3_PROTO_IPV4,
6171 						 &dip)) {
6172 			u32 tunnel_index;
6173 
6174 			tunnel_index = router->nve_decap_config.tunnel_index;
6175 			fib_entry->decap.tunnel_index = tunnel_index;
6176 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
6177 			return 0;
6178 		}
6179 		fallthrough;
6180 	case RTN_BROADCAST:
6181 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
6182 		return 0;
6183 	case RTN_BLACKHOLE:
6184 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
6185 		return 0;
6186 	case RTN_UNREACHABLE:
6187 	case RTN_PROHIBIT:
6188 		/* Packets hitting these routes need to be trapped, but
6189 		 * can do so with a lower priority than packets directed
6190 		 * at the host, so use action type local instead of trap.
6191 		 */
6192 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
6193 		return 0;
6194 	case RTN_UNICAST:
6195 		if (nhgi->gateway)
6196 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
6197 		else
6198 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
6199 		return 0;
6200 	default:
6201 		return -EINVAL;
6202 	}
6203 }
6204 
6205 static void
6206 mlxsw_sp_fib_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
6207 			      struct mlxsw_sp_fib_entry *fib_entry)
6208 {
6209 	switch (fib_entry->type) {
6210 	case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
6211 		mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
6212 		break;
6213 	default:
6214 		break;
6215 	}
6216 }
6217 
6218 static void
6219 mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
6220 			       struct mlxsw_sp_fib4_entry *fib4_entry)
6221 {
6222 	mlxsw_sp_fib_entry_type_unset(mlxsw_sp, &fib4_entry->common);
6223 }
6224 
6225 static struct mlxsw_sp_fib4_entry *
6226 mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
6227 			   struct mlxsw_sp_fib_node *fib_node,
6228 			   const struct fib_entry_notifier_info *fen_info)
6229 {
6230 	struct mlxsw_sp_fib4_entry *fib4_entry;
6231 	struct mlxsw_sp_fib_entry *fib_entry;
6232 	int err;
6233 
6234 	fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
6235 	if (!fib4_entry)
6236 		return ERR_PTR(-ENOMEM);
6237 	fib_entry = &fib4_entry->common;
6238 
6239 	err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
6240 	if (err)
6241 		goto err_nexthop4_group_get;
6242 
6243 	err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group,
6244 					     fib_node->fib);
6245 	if (err)
6246 		goto err_nexthop_group_vr_link;
6247 
6248 	err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
6249 	if (err)
6250 		goto err_fib4_entry_type_set;
6251 
6252 	fib4_entry->fi = fen_info->fi;
6253 	fib_info_hold(fib4_entry->fi);
6254 	fib4_entry->tb_id = fen_info->tb_id;
6255 	fib4_entry->type = fen_info->type;
6256 	fib4_entry->dscp = fen_info->dscp;
6257 
6258 	fib_entry->fib_node = fib_node;
6259 
6260 	return fib4_entry;
6261 
6262 err_fib4_entry_type_set:
6263 	mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib);
6264 err_nexthop_group_vr_link:
6265 	mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
6266 err_nexthop4_group_get:
6267 	kfree(fib4_entry);
6268 	return ERR_PTR(err);
6269 }
6270 
6271 static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
6272 					struct mlxsw_sp_fib4_entry *fib4_entry)
6273 {
6274 	struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
6275 
6276 	fib_info_put(fib4_entry->fi);
6277 	mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, fib4_entry);
6278 	mlxsw_sp_nexthop_group_vr_unlink(fib4_entry->common.nh_group,
6279 					 fib_node->fib);
6280 	mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
6281 	kfree(fib4_entry);
6282 }
6283 
6284 static struct mlxsw_sp_fib4_entry *
6285 mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
6286 			   const struct fib_entry_notifier_info *fen_info)
6287 {
6288 	struct mlxsw_sp_fib4_entry *fib4_entry;
6289 	struct mlxsw_sp_fib_node *fib_node;
6290 	struct mlxsw_sp_fib *fib;
6291 	struct mlxsw_sp_vr *vr;
6292 
6293 	vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
6294 	if (!vr)
6295 		return NULL;
6296 	fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
6297 
6298 	fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
6299 					    sizeof(fen_info->dst),
6300 					    fen_info->dst_len);
6301 	if (!fib_node)
6302 		return NULL;
6303 
6304 	fib4_entry = container_of(fib_node->fib_entry,
6305 				  struct mlxsw_sp_fib4_entry, common);
6306 	if (fib4_entry->tb_id == fen_info->tb_id &&
6307 	    fib4_entry->dscp == fen_info->dscp &&
6308 	    fib4_entry->type == fen_info->type &&
6309 	    fib4_entry->fi == fen_info->fi)
6310 		return fib4_entry;
6311 
6312 	return NULL;
6313 }
6314 
6315 static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
6316 	.key_offset = offsetof(struct mlxsw_sp_fib_node, key),
6317 	.head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
6318 	.key_len = sizeof(struct mlxsw_sp_fib_key),
6319 	.automatic_shrinking = true,
6320 };
6321 
6322 static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
6323 				    struct mlxsw_sp_fib_node *fib_node)
6324 {
6325 	return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
6326 				      mlxsw_sp_fib_ht_params);
6327 }
6328 
6329 static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
6330 				     struct mlxsw_sp_fib_node *fib_node)
6331 {
6332 	rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
6333 			       mlxsw_sp_fib_ht_params);
6334 }
6335 
6336 static struct mlxsw_sp_fib_node *
6337 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
6338 			 size_t addr_len, unsigned char prefix_len)
6339 {
6340 	struct mlxsw_sp_fib_key key;
6341 
6342 	memset(&key, 0, sizeof(key));
6343 	memcpy(key.addr, addr, addr_len);
6344 	key.prefix_len = prefix_len;
6345 	return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
6346 }
6347 
6348 static struct mlxsw_sp_fib_node *
6349 mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
6350 			 size_t addr_len, unsigned char prefix_len)
6351 {
6352 	struct mlxsw_sp_fib_node *fib_node;
6353 
6354 	fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
6355 	if (!fib_node)
6356 		return NULL;
6357 
6358 	list_add(&fib_node->list, &fib->node_list);
6359 	memcpy(fib_node->key.addr, addr, addr_len);
6360 	fib_node->key.prefix_len = prefix_len;
6361 
6362 	return fib_node;
6363 }
6364 
6365 static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
6366 {
6367 	list_del(&fib_node->list);
6368 	kfree(fib_node);
6369 }
6370 
6371 static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
6372 				      struct mlxsw_sp_fib_node *fib_node)
6373 {
6374 	struct mlxsw_sp_prefix_usage req_prefix_usage;
6375 	struct mlxsw_sp_fib *fib = fib_node->fib;
6376 	struct mlxsw_sp_lpm_tree *lpm_tree;
6377 	int err;
6378 
6379 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[fib->proto];
6380 	if (lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
6381 		goto out;
6382 
6383 	mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
6384 	mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
6385 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
6386 					 fib->proto);
6387 	if (IS_ERR(lpm_tree))
6388 		return PTR_ERR(lpm_tree);
6389 
6390 	err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
6391 	if (err)
6392 		goto err_lpm_tree_replace;
6393 
6394 out:
6395 	lpm_tree->prefix_ref_count[fib_node->key.prefix_len]++;
6396 	return 0;
6397 
6398 err_lpm_tree_replace:
6399 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
6400 	return err;
6401 }
6402 
6403 static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
6404 					 struct mlxsw_sp_fib_node *fib_node)
6405 {
6406 	struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
6407 	struct mlxsw_sp_prefix_usage req_prefix_usage;
6408 	struct mlxsw_sp_fib *fib = fib_node->fib;
6409 	int err;
6410 
6411 	if (--lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
6412 		return;
6413 	/* Try to construct a new LPM tree from the current prefix usage
6414 	 * minus the unused one. If we fail, continue using the old one.
6415 	 */
6416 	mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
6417 	mlxsw_sp_prefix_usage_clear(&req_prefix_usage,
6418 				    fib_node->key.prefix_len);
6419 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
6420 					 fib->proto);
6421 	if (IS_ERR(lpm_tree))
6422 		return;
6423 
6424 	err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
6425 	if (err)
6426 		goto err_lpm_tree_replace;
6427 
6428 	return;
6429 
6430 err_lpm_tree_replace:
6431 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
6432 }
6433 
6434 static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
6435 				  struct mlxsw_sp_fib_node *fib_node,
6436 				  struct mlxsw_sp_fib *fib)
6437 {
6438 	int err;
6439 
6440 	err = mlxsw_sp_fib_node_insert(fib, fib_node);
6441 	if (err)
6442 		return err;
6443 	fib_node->fib = fib;
6444 
6445 	err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib_node);
6446 	if (err)
6447 		goto err_fib_lpm_tree_link;
6448 
6449 	return 0;
6450 
6451 err_fib_lpm_tree_link:
6452 	fib_node->fib = NULL;
6453 	mlxsw_sp_fib_node_remove(fib, fib_node);
6454 	return err;
6455 }
6456 
6457 static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
6458 				   struct mlxsw_sp_fib_node *fib_node)
6459 {
6460 	struct mlxsw_sp_fib *fib = fib_node->fib;
6461 
6462 	mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib_node);
6463 	fib_node->fib = NULL;
6464 	mlxsw_sp_fib_node_remove(fib, fib_node);
6465 }
6466 
6467 static struct mlxsw_sp_fib_node *
6468 mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
6469 		      size_t addr_len, unsigned char prefix_len,
6470 		      enum mlxsw_sp_l3proto proto)
6471 {
6472 	struct mlxsw_sp_fib_node *fib_node;
6473 	struct mlxsw_sp_fib *fib;
6474 	struct mlxsw_sp_vr *vr;
6475 	int err;
6476 
6477 	vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL);
6478 	if (IS_ERR(vr))
6479 		return ERR_CAST(vr);
6480 	fib = mlxsw_sp_vr_fib(vr, proto);
6481 
6482 	fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
6483 	if (fib_node)
6484 		return fib_node;
6485 
6486 	fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
6487 	if (!fib_node) {
6488 		err = -ENOMEM;
6489 		goto err_fib_node_create;
6490 	}
6491 
6492 	err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
6493 	if (err)
6494 		goto err_fib_node_init;
6495 
6496 	return fib_node;
6497 
6498 err_fib_node_init:
6499 	mlxsw_sp_fib_node_destroy(fib_node);
6500 err_fib_node_create:
6501 	mlxsw_sp_vr_put(mlxsw_sp, vr);
6502 	return ERR_PTR(err);
6503 }
6504 
6505 static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
6506 				  struct mlxsw_sp_fib_node *fib_node)
6507 {
6508 	struct mlxsw_sp_vr *vr = fib_node->fib->vr;
6509 
6510 	if (fib_node->fib_entry)
6511 		return;
6512 	mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
6513 	mlxsw_sp_fib_node_destroy(fib_node);
6514 	mlxsw_sp_vr_put(mlxsw_sp, vr);
6515 }
6516 
6517 static int mlxsw_sp_fib_node_entry_link(struct mlxsw_sp *mlxsw_sp,
6518 					struct mlxsw_sp_fib_entry *fib_entry)
6519 {
6520 	struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
6521 	int err;
6522 
6523 	fib_node->fib_entry = fib_entry;
6524 
6525 	err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
6526 	if (err)
6527 		goto err_fib_entry_update;
6528 
6529 	return 0;
6530 
6531 err_fib_entry_update:
6532 	fib_node->fib_entry = NULL;
6533 	return err;
6534 }
6535 
6536 static void
6537 mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
6538 			       struct mlxsw_sp_fib_entry *fib_entry)
6539 {
6540 	struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
6541 
6542 	mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
6543 	fib_node->fib_entry = NULL;
6544 }
6545 
6546 static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry)
6547 {
6548 	struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
6549 	struct mlxsw_sp_fib4_entry *fib4_replaced;
6550 
6551 	if (!fib_node->fib_entry)
6552 		return true;
6553 
6554 	fib4_replaced = container_of(fib_node->fib_entry,
6555 				     struct mlxsw_sp_fib4_entry, common);
6556 	if (fib4_entry->tb_id == RT_TABLE_MAIN &&
6557 	    fib4_replaced->tb_id == RT_TABLE_LOCAL)
6558 		return false;
6559 
6560 	return true;
6561 }
6562 
6563 static int
6564 mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
6565 			     const struct fib_entry_notifier_info *fen_info)
6566 {
6567 	struct mlxsw_sp_fib4_entry *fib4_entry, *fib4_replaced;
6568 	struct mlxsw_sp_fib_entry *replaced;
6569 	struct mlxsw_sp_fib_node *fib_node;
6570 	int err;
6571 
6572 	if (fen_info->fi->nh &&
6573 	    !mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, fen_info->fi->nh->id))
6574 		return 0;
6575 
6576 	fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
6577 					 &fen_info->dst, sizeof(fen_info->dst),
6578 					 fen_info->dst_len,
6579 					 MLXSW_SP_L3_PROTO_IPV4);
6580 	if (IS_ERR(fib_node)) {
6581 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
6582 		return PTR_ERR(fib_node);
6583 	}
6584 
6585 	fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
6586 	if (IS_ERR(fib4_entry)) {
6587 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
6588 		err = PTR_ERR(fib4_entry);
6589 		goto err_fib4_entry_create;
6590 	}
6591 
6592 	if (!mlxsw_sp_fib4_allow_replace(fib4_entry)) {
6593 		mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6594 		mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6595 		return 0;
6596 	}
6597 
6598 	replaced = fib_node->fib_entry;
6599 	err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib4_entry->common);
6600 	if (err) {
6601 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
6602 		goto err_fib_node_entry_link;
6603 	}
6604 
6605 	/* Nothing to replace */
6606 	if (!replaced)
6607 		return 0;
6608 
6609 	mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
6610 	fib4_replaced = container_of(replaced, struct mlxsw_sp_fib4_entry,
6611 				     common);
6612 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_replaced);
6613 
6614 	return 0;
6615 
6616 err_fib_node_entry_link:
6617 	fib_node->fib_entry = replaced;
6618 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6619 err_fib4_entry_create:
6620 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6621 	return err;
6622 }
6623 
6624 static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
6625 				     struct fib_entry_notifier_info *fen_info)
6626 {
6627 	struct mlxsw_sp_fib4_entry *fib4_entry;
6628 	struct mlxsw_sp_fib_node *fib_node;
6629 
6630 	fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
6631 	if (!fib4_entry)
6632 		return;
6633 	fib_node = fib4_entry->common.fib_node;
6634 
6635 	mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib4_entry->common);
6636 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6637 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6638 }
6639 
6640 static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
6641 {
6642 	/* Multicast routes aren't supported, so ignore them. Neighbour
6643 	 * Discovery packets are specifically trapped.
6644 	 */
6645 	if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_MULTICAST)
6646 		return true;
6647 
6648 	/* Cloned routes are irrelevant in the forwarding path. */
6649 	if (rt->fib6_flags & RTF_CACHE)
6650 		return true;
6651 
6652 	return false;
6653 }
6654 
6655 static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct fib6_info *rt)
6656 {
6657 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6658 
6659 	mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
6660 	if (!mlxsw_sp_rt6)
6661 		return ERR_PTR(-ENOMEM);
6662 
6663 	/* In case of route replace, replaced route is deleted with
6664 	 * no notification. Take reference to prevent accessing freed
6665 	 * memory.
6666 	 */
6667 	mlxsw_sp_rt6->rt = rt;
6668 	fib6_info_hold(rt);
6669 
6670 	return mlxsw_sp_rt6;
6671 }
6672 
6673 #if IS_ENABLED(CONFIG_IPV6)
6674 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
6675 {
6676 	fib6_info_release(rt);
6677 }
6678 #else
6679 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
6680 {
6681 }
6682 #endif
6683 
6684 static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
6685 {
6686 	struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
6687 
6688 	if (!mlxsw_sp_rt6->rt->nh)
6689 		fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
6690 	mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
6691 	kfree(mlxsw_sp_rt6);
6692 }
6693 
6694 static struct fib6_info *
6695 mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
6696 {
6697 	return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
6698 				list)->rt;
6699 }
6700 
6701 static struct mlxsw_sp_rt6 *
6702 mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
6703 			    const struct fib6_info *rt)
6704 {
6705 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6706 
6707 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
6708 		if (mlxsw_sp_rt6->rt == rt)
6709 			return mlxsw_sp_rt6;
6710 	}
6711 
6712 	return NULL;
6713 }
6714 
6715 static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
6716 					const struct fib6_info *rt,
6717 					enum mlxsw_sp_ipip_type *ret)
6718 {
6719 	return rt->fib6_nh->fib_nh_dev &&
6720 	       mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh->fib_nh_dev, ret);
6721 }
6722 
6723 static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
6724 				  struct mlxsw_sp_nexthop_group *nh_grp,
6725 				  struct mlxsw_sp_nexthop *nh,
6726 				  const struct fib6_info *rt)
6727 {
6728 	struct net_device *dev = rt->fib6_nh->fib_nh_dev;
6729 	int err;
6730 
6731 	nh->nhgi = nh_grp->nhgi;
6732 	nh->nh_weight = rt->fib6_nh->fib_nh_weight;
6733 	memcpy(&nh->gw_addr, &rt->fib6_nh->fib_nh_gw6, sizeof(nh->gw_addr));
6734 #if IS_ENABLED(CONFIG_IPV6)
6735 	nh->neigh_tbl = &nd_tbl;
6736 #endif
6737 	mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
6738 
6739 	list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
6740 
6741 	if (!dev)
6742 		return 0;
6743 	nh->ifindex = dev->ifindex;
6744 
6745 	err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
6746 	if (err)
6747 		goto err_nexthop_type_init;
6748 
6749 	return 0;
6750 
6751 err_nexthop_type_init:
6752 	list_del(&nh->router_list_node);
6753 	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
6754 	return err;
6755 }
6756 
6757 static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
6758 				   struct mlxsw_sp_nexthop *nh)
6759 {
6760 	mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
6761 	list_del(&nh->router_list_node);
6762 	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
6763 }
6764 
6765 static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
6766 				    const struct fib6_info *rt)
6767 {
6768 	return rt->fib6_nh->fib_nh_gw_family ||
6769 	       mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
6770 }
6771 
6772 static int
6773 mlxsw_sp_nexthop6_group_info_init(struct mlxsw_sp *mlxsw_sp,
6774 				  struct mlxsw_sp_nexthop_group *nh_grp,
6775 				  struct mlxsw_sp_fib6_entry *fib6_entry)
6776 {
6777 	struct mlxsw_sp_nexthop_group_info *nhgi;
6778 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6779 	struct mlxsw_sp_nexthop *nh;
6780 	int err, i;
6781 
6782 	nhgi = kzalloc(struct_size(nhgi, nexthops, fib6_entry->nrt6),
6783 		       GFP_KERNEL);
6784 	if (!nhgi)
6785 		return -ENOMEM;
6786 	nh_grp->nhgi = nhgi;
6787 	nhgi->nh_grp = nh_grp;
6788 	mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
6789 					struct mlxsw_sp_rt6, list);
6790 	nhgi->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
6791 	nhgi->count = fib6_entry->nrt6;
6792 	for (i = 0; i < nhgi->count; i++) {
6793 		struct fib6_info *rt = mlxsw_sp_rt6->rt;
6794 
6795 		nh = &nhgi->nexthops[i];
6796 		err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
6797 		if (err)
6798 			goto err_nexthop6_init;
6799 		mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
6800 	}
6801 	nh_grp->nhgi = nhgi;
6802 	err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
6803 	if (err)
6804 		goto err_group_inc;
6805 	err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
6806 	if (err)
6807 		goto err_group_refresh;
6808 
6809 	return 0;
6810 
6811 err_group_refresh:
6812 	mlxsw_sp_nexthop_group_dec(mlxsw_sp);
6813 err_group_inc:
6814 	i = nhgi->count;
6815 err_nexthop6_init:
6816 	for (i--; i >= 0; i--) {
6817 		nh = &nhgi->nexthops[i];
6818 		mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
6819 	}
6820 	kfree(nhgi);
6821 	return err;
6822 }
6823 
6824 static void
6825 mlxsw_sp_nexthop6_group_info_fini(struct mlxsw_sp *mlxsw_sp,
6826 				  struct mlxsw_sp_nexthop_group *nh_grp)
6827 {
6828 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
6829 	int i;
6830 
6831 	mlxsw_sp_nexthop_group_dec(mlxsw_sp);
6832 	for (i = nhgi->count - 1; i >= 0; i--) {
6833 		struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
6834 
6835 		mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
6836 	}
6837 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
6838 	WARN_ON_ONCE(nhgi->adj_index_valid);
6839 	kfree(nhgi);
6840 }
6841 
6842 static struct mlxsw_sp_nexthop_group *
6843 mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
6844 			       struct mlxsw_sp_fib6_entry *fib6_entry)
6845 {
6846 	struct mlxsw_sp_nexthop_group *nh_grp;
6847 	int err;
6848 
6849 	nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
6850 	if (!nh_grp)
6851 		return ERR_PTR(-ENOMEM);
6852 	INIT_LIST_HEAD(&nh_grp->vr_list);
6853 	err = rhashtable_init(&nh_grp->vr_ht,
6854 			      &mlxsw_sp_nexthop_group_vr_ht_params);
6855 	if (err)
6856 		goto err_nexthop_group_vr_ht_init;
6857 	INIT_LIST_HEAD(&nh_grp->fib_list);
6858 	nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6;
6859 
6860 	err = mlxsw_sp_nexthop6_group_info_init(mlxsw_sp, nh_grp, fib6_entry);
6861 	if (err)
6862 		goto err_nexthop_group_info_init;
6863 
6864 	err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
6865 	if (err)
6866 		goto err_nexthop_group_insert;
6867 
6868 	nh_grp->can_destroy = true;
6869 
6870 	return nh_grp;
6871 
6872 err_nexthop_group_insert:
6873 	mlxsw_sp_nexthop6_group_info_fini(mlxsw_sp, nh_grp);
6874 err_nexthop_group_info_init:
6875 	rhashtable_destroy(&nh_grp->vr_ht);
6876 err_nexthop_group_vr_ht_init:
6877 	kfree(nh_grp);
6878 	return ERR_PTR(err);
6879 }
6880 
6881 static void
6882 mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
6883 				struct mlxsw_sp_nexthop_group *nh_grp)
6884 {
6885 	if (!nh_grp->can_destroy)
6886 		return;
6887 	mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
6888 	mlxsw_sp_nexthop6_group_info_fini(mlxsw_sp, nh_grp);
6889 	WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
6890 	rhashtable_destroy(&nh_grp->vr_ht);
6891 	kfree(nh_grp);
6892 }
6893 
6894 static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
6895 				       struct mlxsw_sp_fib6_entry *fib6_entry)
6896 {
6897 	struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
6898 	struct mlxsw_sp_nexthop_group *nh_grp;
6899 
6900 	if (rt->nh) {
6901 		nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp,
6902 							   rt->nh->id);
6903 		if (WARN_ON_ONCE(!nh_grp))
6904 			return -EINVAL;
6905 		goto out;
6906 	}
6907 
6908 	nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
6909 	if (!nh_grp) {
6910 		nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
6911 		if (IS_ERR(nh_grp))
6912 			return PTR_ERR(nh_grp);
6913 	}
6914 
6915 	/* The route and the nexthop are described by the same struct, so we
6916 	 * need to the update the nexthop offload indication for the new route.
6917 	 */
6918 	__mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
6919 
6920 out:
6921 	list_add_tail(&fib6_entry->common.nexthop_group_node,
6922 		      &nh_grp->fib_list);
6923 	fib6_entry->common.nh_group = nh_grp;
6924 
6925 	return 0;
6926 }
6927 
6928 static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
6929 					struct mlxsw_sp_fib_entry *fib_entry)
6930 {
6931 	struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
6932 
6933 	list_del(&fib_entry->nexthop_group_node);
6934 	if (!list_empty(&nh_grp->fib_list))
6935 		return;
6936 
6937 	if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ) {
6938 		mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
6939 		return;
6940 	}
6941 
6942 	mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
6943 }
6944 
6945 static int
6946 mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
6947 			       struct mlxsw_sp_fib6_entry *fib6_entry)
6948 {
6949 	struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
6950 	struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
6951 	int err;
6952 
6953 	mlxsw_sp_nexthop_group_vr_unlink(old_nh_grp, fib_node->fib);
6954 	fib6_entry->common.nh_group = NULL;
6955 	list_del(&fib6_entry->common.nexthop_group_node);
6956 
6957 	err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
6958 	if (err)
6959 		goto err_nexthop6_group_get;
6960 
6961 	err = mlxsw_sp_nexthop_group_vr_link(fib6_entry->common.nh_group,
6962 					     fib_node->fib);
6963 	if (err)
6964 		goto err_nexthop_group_vr_link;
6965 
6966 	/* In case this entry is offloaded, then the adjacency index
6967 	 * currently associated with it in the device's table is that
6968 	 * of the old group. Start using the new one instead.
6969 	 */
6970 	err = mlxsw_sp_fib_entry_update(mlxsw_sp, &fib6_entry->common);
6971 	if (err)
6972 		goto err_fib_entry_update;
6973 
6974 	if (list_empty(&old_nh_grp->fib_list))
6975 		mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
6976 
6977 	return 0;
6978 
6979 err_fib_entry_update:
6980 	mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group,
6981 					 fib_node->fib);
6982 err_nexthop_group_vr_link:
6983 	mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
6984 err_nexthop6_group_get:
6985 	list_add_tail(&fib6_entry->common.nexthop_group_node,
6986 		      &old_nh_grp->fib_list);
6987 	fib6_entry->common.nh_group = old_nh_grp;
6988 	mlxsw_sp_nexthop_group_vr_link(old_nh_grp, fib_node->fib);
6989 	return err;
6990 }
6991 
6992 static int
6993 mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
6994 				struct mlxsw_sp_fib6_entry *fib6_entry,
6995 				struct fib6_info **rt_arr, unsigned int nrt6)
6996 {
6997 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6998 	int err, i;
6999 
7000 	for (i = 0; i < nrt6; i++) {
7001 		mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
7002 		if (IS_ERR(mlxsw_sp_rt6)) {
7003 			err = PTR_ERR(mlxsw_sp_rt6);
7004 			goto err_rt6_unwind;
7005 		}
7006 
7007 		list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
7008 		fib6_entry->nrt6++;
7009 	}
7010 
7011 	err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
7012 	if (err)
7013 		goto err_rt6_unwind;
7014 
7015 	return 0;
7016 
7017 err_rt6_unwind:
7018 	for (; i > 0; i--) {
7019 		fib6_entry->nrt6--;
7020 		mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
7021 					       struct mlxsw_sp_rt6, list);
7022 		list_del(&mlxsw_sp_rt6->list);
7023 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
7024 	}
7025 	return err;
7026 }
7027 
7028 static void
7029 mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
7030 				struct mlxsw_sp_fib6_entry *fib6_entry,
7031 				struct fib6_info **rt_arr, unsigned int nrt6)
7032 {
7033 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
7034 	int i;
7035 
7036 	for (i = 0; i < nrt6; i++) {
7037 		mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry,
7038 							   rt_arr[i]);
7039 		if (WARN_ON_ONCE(!mlxsw_sp_rt6))
7040 			continue;
7041 
7042 		fib6_entry->nrt6--;
7043 		list_del(&mlxsw_sp_rt6->list);
7044 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
7045 	}
7046 
7047 	mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
7048 }
7049 
7050 static int
7051 mlxsw_sp_fib6_entry_type_set_local(struct mlxsw_sp *mlxsw_sp,
7052 				   struct mlxsw_sp_fib_entry *fib_entry,
7053 				   const struct fib6_info *rt)
7054 {
7055 	struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi;
7056 	union mlxsw_sp_l3addr dip = { .addr6 = rt->fib6_dst.addr };
7057 	u32 tb_id = mlxsw_sp_fix_tb_id(rt->fib6_table->tb6_id);
7058 	struct mlxsw_sp_router *router = mlxsw_sp->router;
7059 	int ifindex = nhgi->nexthops[0].ifindex;
7060 	struct mlxsw_sp_ipip_entry *ipip_entry;
7061 
7062 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
7063 	ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, ifindex,
7064 						       MLXSW_SP_L3_PROTO_IPV6,
7065 						       dip);
7066 
7067 	if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
7068 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
7069 		return mlxsw_sp_fib_entry_decap_init(mlxsw_sp, fib_entry,
7070 						     ipip_entry);
7071 	}
7072 	if (mlxsw_sp_router_nve_is_decap(mlxsw_sp, tb_id,
7073 					 MLXSW_SP_L3_PROTO_IPV6, &dip)) {
7074 		u32 tunnel_index;
7075 
7076 		tunnel_index = router->nve_decap_config.tunnel_index;
7077 		fib_entry->decap.tunnel_index = tunnel_index;
7078 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
7079 	}
7080 
7081 	return 0;
7082 }
7083 
7084 static int mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
7085 					struct mlxsw_sp_fib_entry *fib_entry,
7086 					const struct fib6_info *rt)
7087 {
7088 	if (rt->fib6_flags & RTF_LOCAL)
7089 		return mlxsw_sp_fib6_entry_type_set_local(mlxsw_sp, fib_entry,
7090 							  rt);
7091 	if (rt->fib6_flags & RTF_ANYCAST)
7092 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
7093 	else if (rt->fib6_type == RTN_BLACKHOLE)
7094 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
7095 	else if (rt->fib6_flags & RTF_REJECT)
7096 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
7097 	else if (fib_entry->nh_group->nhgi->gateway)
7098 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
7099 	else
7100 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
7101 
7102 	return 0;
7103 }
7104 
7105 static void
7106 mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
7107 {
7108 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;
7109 
7110 	list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
7111 				 list) {
7112 		fib6_entry->nrt6--;
7113 		list_del(&mlxsw_sp_rt6->list);
7114 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
7115 	}
7116 }
7117 
7118 static struct mlxsw_sp_fib6_entry *
7119 mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
7120 			   struct mlxsw_sp_fib_node *fib_node,
7121 			   struct fib6_info **rt_arr, unsigned int nrt6)
7122 {
7123 	struct mlxsw_sp_fib6_entry *fib6_entry;
7124 	struct mlxsw_sp_fib_entry *fib_entry;
7125 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
7126 	int err, i;
7127 
7128 	fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
7129 	if (!fib6_entry)
7130 		return ERR_PTR(-ENOMEM);
7131 	fib_entry = &fib6_entry->common;
7132 
7133 	INIT_LIST_HEAD(&fib6_entry->rt6_list);
7134 
7135 	for (i = 0; i < nrt6; i++) {
7136 		mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
7137 		if (IS_ERR(mlxsw_sp_rt6)) {
7138 			err = PTR_ERR(mlxsw_sp_rt6);
7139 			goto err_rt6_unwind;
7140 		}
7141 		list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
7142 		fib6_entry->nrt6++;
7143 	}
7144 
7145 	err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
7146 	if (err)
7147 		goto err_rt6_unwind;
7148 
7149 	err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group,
7150 					     fib_node->fib);
7151 	if (err)
7152 		goto err_nexthop_group_vr_link;
7153 
7154 	err = mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]);
7155 	if (err)
7156 		goto err_fib6_entry_type_set;
7157 
7158 	fib_entry->fib_node = fib_node;
7159 
7160 	return fib6_entry;
7161 
7162 err_fib6_entry_type_set:
7163 	mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib);
7164 err_nexthop_group_vr_link:
7165 	mlxsw_sp_nexthop6_group_put(mlxsw_sp, fib_entry);
7166 err_rt6_unwind:
7167 	for (; i > 0; i--) {
7168 		fib6_entry->nrt6--;
7169 		mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
7170 					       struct mlxsw_sp_rt6, list);
7171 		list_del(&mlxsw_sp_rt6->list);
7172 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
7173 	}
7174 	kfree(fib6_entry);
7175 	return ERR_PTR(err);
7176 }
7177 
7178 static void
7179 mlxsw_sp_fib6_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
7180 			       struct mlxsw_sp_fib6_entry *fib6_entry)
7181 {
7182 	mlxsw_sp_fib_entry_type_unset(mlxsw_sp, &fib6_entry->common);
7183 }
7184 
7185 static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
7186 					struct mlxsw_sp_fib6_entry *fib6_entry)
7187 {
7188 	struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
7189 
7190 	mlxsw_sp_fib6_entry_type_unset(mlxsw_sp, fib6_entry);
7191 	mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group,
7192 					 fib_node->fib);
7193 	mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
7194 	mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
7195 	WARN_ON(fib6_entry->nrt6);
7196 	kfree(fib6_entry);
7197 }
7198 
7199 static struct mlxsw_sp_fib6_entry *
7200 mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
7201 			   const struct fib6_info *rt)
7202 {
7203 	struct mlxsw_sp_fib6_entry *fib6_entry;
7204 	struct mlxsw_sp_fib_node *fib_node;
7205 	struct mlxsw_sp_fib *fib;
7206 	struct fib6_info *cmp_rt;
7207 	struct mlxsw_sp_vr *vr;
7208 
7209 	vr = mlxsw_sp_vr_find(mlxsw_sp, rt->fib6_table->tb6_id);
7210 	if (!vr)
7211 		return NULL;
7212 	fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
7213 
7214 	fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->fib6_dst.addr,
7215 					    sizeof(rt->fib6_dst.addr),
7216 					    rt->fib6_dst.plen);
7217 	if (!fib_node)
7218 		return NULL;
7219 
7220 	fib6_entry = container_of(fib_node->fib_entry,
7221 				  struct mlxsw_sp_fib6_entry, common);
7222 	cmp_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
7223 	if (rt->fib6_table->tb6_id == cmp_rt->fib6_table->tb6_id &&
7224 	    rt->fib6_metric == cmp_rt->fib6_metric &&
7225 	    mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
7226 		return fib6_entry;
7227 
7228 	return NULL;
7229 }
7230 
7231 static bool mlxsw_sp_fib6_allow_replace(struct mlxsw_sp_fib6_entry *fib6_entry)
7232 {
7233 	struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
7234 	struct mlxsw_sp_fib6_entry *fib6_replaced;
7235 	struct fib6_info *rt, *rt_replaced;
7236 
7237 	if (!fib_node->fib_entry)
7238 		return true;
7239 
7240 	fib6_replaced = container_of(fib_node->fib_entry,
7241 				     struct mlxsw_sp_fib6_entry,
7242 				     common);
7243 	rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
7244 	rt_replaced = mlxsw_sp_fib6_entry_rt(fib6_replaced);
7245 	if (rt->fib6_table->tb6_id == RT_TABLE_MAIN &&
7246 	    rt_replaced->fib6_table->tb6_id == RT_TABLE_LOCAL)
7247 		return false;
7248 
7249 	return true;
7250 }
7251 
7252 static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
7253 					struct fib6_info **rt_arr,
7254 					unsigned int nrt6)
7255 {
7256 	struct mlxsw_sp_fib6_entry *fib6_entry, *fib6_replaced;
7257 	struct mlxsw_sp_fib_entry *replaced;
7258 	struct mlxsw_sp_fib_node *fib_node;
7259 	struct fib6_info *rt = rt_arr[0];
7260 	int err;
7261 
7262 	if (rt->fib6_src.plen)
7263 		return -EINVAL;
7264 
7265 	if (mlxsw_sp_fib6_rt_should_ignore(rt))
7266 		return 0;
7267 
7268 	if (rt->nh && !mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, rt->nh->id))
7269 		return 0;
7270 
7271 	fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
7272 					 &rt->fib6_dst.addr,
7273 					 sizeof(rt->fib6_dst.addr),
7274 					 rt->fib6_dst.plen,
7275 					 MLXSW_SP_L3_PROTO_IPV6);
7276 	if (IS_ERR(fib_node))
7277 		return PTR_ERR(fib_node);
7278 
7279 	fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt_arr,
7280 						nrt6);
7281 	if (IS_ERR(fib6_entry)) {
7282 		err = PTR_ERR(fib6_entry);
7283 		goto err_fib6_entry_create;
7284 	}
7285 
7286 	if (!mlxsw_sp_fib6_allow_replace(fib6_entry)) {
7287 		mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7288 		mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7289 		return 0;
7290 	}
7291 
7292 	replaced = fib_node->fib_entry;
7293 	err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib6_entry->common);
7294 	if (err)
7295 		goto err_fib_node_entry_link;
7296 
7297 	/* Nothing to replace */
7298 	if (!replaced)
7299 		return 0;
7300 
7301 	mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
7302 	fib6_replaced = container_of(replaced, struct mlxsw_sp_fib6_entry,
7303 				     common);
7304 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_replaced);
7305 
7306 	return 0;
7307 
7308 err_fib_node_entry_link:
7309 	fib_node->fib_entry = replaced;
7310 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7311 err_fib6_entry_create:
7312 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7313 	return err;
7314 }
7315 
7316 static int mlxsw_sp_router_fib6_append(struct mlxsw_sp *mlxsw_sp,
7317 				       struct fib6_info **rt_arr,
7318 				       unsigned int nrt6)
7319 {
7320 	struct mlxsw_sp_fib6_entry *fib6_entry;
7321 	struct mlxsw_sp_fib_node *fib_node;
7322 	struct fib6_info *rt = rt_arr[0];
7323 	int err;
7324 
7325 	if (rt->fib6_src.plen)
7326 		return -EINVAL;
7327 
7328 	if (mlxsw_sp_fib6_rt_should_ignore(rt))
7329 		return 0;
7330 
7331 	fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
7332 					 &rt->fib6_dst.addr,
7333 					 sizeof(rt->fib6_dst.addr),
7334 					 rt->fib6_dst.plen,
7335 					 MLXSW_SP_L3_PROTO_IPV6);
7336 	if (IS_ERR(fib_node))
7337 		return PTR_ERR(fib_node);
7338 
7339 	if (WARN_ON_ONCE(!fib_node->fib_entry)) {
7340 		mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7341 		return -EINVAL;
7342 	}
7343 
7344 	fib6_entry = container_of(fib_node->fib_entry,
7345 				  struct mlxsw_sp_fib6_entry, common);
7346 	err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt_arr,
7347 					      nrt6);
7348 	if (err)
7349 		goto err_fib6_entry_nexthop_add;
7350 
7351 	return 0;
7352 
7353 err_fib6_entry_nexthop_add:
7354 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7355 	return err;
7356 }
7357 
7358 static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
7359 				     struct fib6_info **rt_arr,
7360 				     unsigned int nrt6)
7361 {
7362 	struct mlxsw_sp_fib6_entry *fib6_entry;
7363 	struct mlxsw_sp_fib_node *fib_node;
7364 	struct fib6_info *rt = rt_arr[0];
7365 
7366 	if (mlxsw_sp_fib6_rt_should_ignore(rt))
7367 		return;
7368 
7369 	/* Multipath routes are first added to the FIB trie and only then
7370 	 * notified. If we vetoed the addition, we will get a delete
7371 	 * notification for a route we do not have. Therefore, do not warn if
7372 	 * route was not found.
7373 	 */
7374 	fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
7375 	if (!fib6_entry)
7376 		return;
7377 
7378 	/* If not all the nexthops are deleted, then only reduce the nexthop
7379 	 * group.
7380 	 */
7381 	if (nrt6 != fib6_entry->nrt6) {
7382 		mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt_arr,
7383 						nrt6);
7384 		return;
7385 	}
7386 
7387 	fib_node = fib6_entry->common.fib_node;
7388 
7389 	mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib6_entry->common);
7390 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7391 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7392 }
7393 
7394 static struct mlxsw_sp_mr_table *
7395 mlxsw_sp_router_fibmr_family_to_table(struct mlxsw_sp_vr *vr, int family)
7396 {
7397 	if (family == RTNL_FAMILY_IPMR)
7398 		return vr->mr_table[MLXSW_SP_L3_PROTO_IPV4];
7399 	else
7400 		return vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
7401 }
7402 
7403 static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
7404 				     struct mfc_entry_notifier_info *men_info,
7405 				     bool replace)
7406 {
7407 	struct mlxsw_sp_mr_table *mrt;
7408 	struct mlxsw_sp_vr *vr;
7409 
7410 	vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
7411 	if (IS_ERR(vr))
7412 		return PTR_ERR(vr);
7413 
7414 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
7415 	return mlxsw_sp_mr_route_add(mrt, men_info->mfc, replace);
7416 }
7417 
7418 static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
7419 				      struct mfc_entry_notifier_info *men_info)
7420 {
7421 	struct mlxsw_sp_mr_table *mrt;
7422 	struct mlxsw_sp_vr *vr;
7423 
7424 	vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
7425 	if (WARN_ON(!vr))
7426 		return;
7427 
7428 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
7429 	mlxsw_sp_mr_route_del(mrt, men_info->mfc);
7430 	mlxsw_sp_vr_put(mlxsw_sp, vr);
7431 }
7432 
7433 static int
7434 mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
7435 			      struct vif_entry_notifier_info *ven_info)
7436 {
7437 	struct mlxsw_sp_mr_table *mrt;
7438 	struct mlxsw_sp_rif *rif;
7439 	struct mlxsw_sp_vr *vr;
7440 
7441 	vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
7442 	if (IS_ERR(vr))
7443 		return PTR_ERR(vr);
7444 
7445 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
7446 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
7447 	return mlxsw_sp_mr_vif_add(mrt, ven_info->dev,
7448 				   ven_info->vif_index,
7449 				   ven_info->vif_flags, rif);
7450 }
7451 
7452 static void
7453 mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
7454 			      struct vif_entry_notifier_info *ven_info)
7455 {
7456 	struct mlxsw_sp_mr_table *mrt;
7457 	struct mlxsw_sp_vr *vr;
7458 
7459 	vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
7460 	if (WARN_ON(!vr))
7461 		return;
7462 
7463 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
7464 	mlxsw_sp_mr_vif_del(mrt, ven_info->vif_index);
7465 	mlxsw_sp_vr_put(mlxsw_sp, vr);
7466 }
7467 
7468 static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
7469 				     struct mlxsw_sp_fib_node *fib_node)
7470 {
7471 	struct mlxsw_sp_fib4_entry *fib4_entry;
7472 
7473 	fib4_entry = container_of(fib_node->fib_entry,
7474 				  struct mlxsw_sp_fib4_entry, common);
7475 	mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
7476 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
7477 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7478 }
7479 
7480 static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
7481 				     struct mlxsw_sp_fib_node *fib_node)
7482 {
7483 	struct mlxsw_sp_fib6_entry *fib6_entry;
7484 
7485 	fib6_entry = container_of(fib_node->fib_entry,
7486 				  struct mlxsw_sp_fib6_entry, common);
7487 	mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
7488 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7489 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7490 }
7491 
7492 static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
7493 				    struct mlxsw_sp_fib_node *fib_node)
7494 {
7495 	switch (fib_node->fib->proto) {
7496 	case MLXSW_SP_L3_PROTO_IPV4:
7497 		mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
7498 		break;
7499 	case MLXSW_SP_L3_PROTO_IPV6:
7500 		mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
7501 		break;
7502 	}
7503 }
7504 
7505 static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
7506 				  struct mlxsw_sp_vr *vr,
7507 				  enum mlxsw_sp_l3proto proto)
7508 {
7509 	struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
7510 	struct mlxsw_sp_fib_node *fib_node, *tmp;
7511 
7512 	list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
7513 		bool do_break = &tmp->list == &fib->node_list;
7514 
7515 		mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
7516 		if (do_break)
7517 			break;
7518 	}
7519 }
7520 
7521 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
7522 {
7523 	int max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
7524 	int i, j;
7525 
7526 	for (i = 0; i < max_vrs; i++) {
7527 		struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
7528 
7529 		if (!mlxsw_sp_vr_is_used(vr))
7530 			continue;
7531 
7532 		for (j = 0; j < MLXSW_SP_L3_PROTO_MAX; j++)
7533 			mlxsw_sp_mr_table_flush(vr->mr_table[j]);
7534 		mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
7535 
7536 		/* If virtual router was only used for IPv4, then it's no
7537 		 * longer used.
7538 		 */
7539 		if (!mlxsw_sp_vr_is_used(vr))
7540 			continue;
7541 		mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
7542 	}
7543 }
7544 
7545 struct mlxsw_sp_fib6_event_work {
7546 	struct fib6_info **rt_arr;
7547 	unsigned int nrt6;
7548 };
7549 
7550 struct mlxsw_sp_fib_event_work {
7551 	struct work_struct work;
7552 	netdevice_tracker dev_tracker;
7553 	union {
7554 		struct mlxsw_sp_fib6_event_work fib6_work;
7555 		struct fib_entry_notifier_info fen_info;
7556 		struct fib_rule_notifier_info fr_info;
7557 		struct fib_nh_notifier_info fnh_info;
7558 		struct mfc_entry_notifier_info men_info;
7559 		struct vif_entry_notifier_info ven_info;
7560 	};
7561 	struct mlxsw_sp *mlxsw_sp;
7562 	unsigned long event;
7563 };
7564 
7565 static int
7566 mlxsw_sp_router_fib6_work_init(struct mlxsw_sp_fib6_event_work *fib6_work,
7567 			       struct fib6_entry_notifier_info *fen6_info)
7568 {
7569 	struct fib6_info *rt = fen6_info->rt;
7570 	struct fib6_info **rt_arr;
7571 	struct fib6_info *iter;
7572 	unsigned int nrt6;
7573 	int i = 0;
7574 
7575 	nrt6 = fen6_info->nsiblings + 1;
7576 
7577 	rt_arr = kcalloc(nrt6, sizeof(struct fib6_info *), GFP_ATOMIC);
7578 	if (!rt_arr)
7579 		return -ENOMEM;
7580 
7581 	fib6_work->rt_arr = rt_arr;
7582 	fib6_work->nrt6 = nrt6;
7583 
7584 	rt_arr[0] = rt;
7585 	fib6_info_hold(rt);
7586 
7587 	if (!fen6_info->nsiblings)
7588 		return 0;
7589 
7590 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
7591 		if (i == fen6_info->nsiblings)
7592 			break;
7593 
7594 		rt_arr[i + 1] = iter;
7595 		fib6_info_hold(iter);
7596 		i++;
7597 	}
7598 	WARN_ON_ONCE(i != fen6_info->nsiblings);
7599 
7600 	return 0;
7601 }
7602 
7603 static void
7604 mlxsw_sp_router_fib6_work_fini(struct mlxsw_sp_fib6_event_work *fib6_work)
7605 {
7606 	int i;
7607 
7608 	for (i = 0; i < fib6_work->nrt6; i++)
7609 		mlxsw_sp_rt6_release(fib6_work->rt_arr[i]);
7610 	kfree(fib6_work->rt_arr);
7611 }
7612 
7613 static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
7614 {
7615 	struct mlxsw_sp_fib_event_work *fib_work =
7616 		container_of(work, struct mlxsw_sp_fib_event_work, work);
7617 	struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
7618 	int err;
7619 
7620 	mutex_lock(&mlxsw_sp->router->lock);
7621 	mlxsw_sp_span_respin(mlxsw_sp);
7622 
7623 	switch (fib_work->event) {
7624 	case FIB_EVENT_ENTRY_REPLACE:
7625 		err = mlxsw_sp_router_fib4_replace(mlxsw_sp,
7626 						   &fib_work->fen_info);
7627 		if (err) {
7628 			dev_warn(mlxsw_sp->bus_info->dev, "FIB replace failed.\n");
7629 			mlxsw_sp_fib4_offload_failed_flag_set(mlxsw_sp,
7630 							      &fib_work->fen_info);
7631 		}
7632 		fib_info_put(fib_work->fen_info.fi);
7633 		break;
7634 	case FIB_EVENT_ENTRY_DEL:
7635 		mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
7636 		fib_info_put(fib_work->fen_info.fi);
7637 		break;
7638 	case FIB_EVENT_NH_ADD:
7639 	case FIB_EVENT_NH_DEL:
7640 		mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
7641 					fib_work->fnh_info.fib_nh);
7642 		fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
7643 		break;
7644 	}
7645 	mutex_unlock(&mlxsw_sp->router->lock);
7646 	kfree(fib_work);
7647 }
7648 
7649 static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
7650 {
7651 	struct mlxsw_sp_fib_event_work *fib_work =
7652 		    container_of(work, struct mlxsw_sp_fib_event_work, work);
7653 	struct mlxsw_sp_fib6_event_work *fib6_work = &fib_work->fib6_work;
7654 	struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
7655 	int err;
7656 
7657 	mutex_lock(&mlxsw_sp->router->lock);
7658 	mlxsw_sp_span_respin(mlxsw_sp);
7659 
7660 	switch (fib_work->event) {
7661 	case FIB_EVENT_ENTRY_REPLACE:
7662 		err = mlxsw_sp_router_fib6_replace(mlxsw_sp,
7663 						   fib6_work->rt_arr,
7664 						   fib6_work->nrt6);
7665 		if (err) {
7666 			dev_warn(mlxsw_sp->bus_info->dev, "FIB replace failed.\n");
7667 			mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
7668 							      fib6_work->rt_arr,
7669 							      fib6_work->nrt6);
7670 		}
7671 		mlxsw_sp_router_fib6_work_fini(fib6_work);
7672 		break;
7673 	case FIB_EVENT_ENTRY_APPEND:
7674 		err = mlxsw_sp_router_fib6_append(mlxsw_sp,
7675 						  fib6_work->rt_arr,
7676 						  fib6_work->nrt6);
7677 		if (err) {
7678 			dev_warn(mlxsw_sp->bus_info->dev, "FIB append failed.\n");
7679 			mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
7680 							      fib6_work->rt_arr,
7681 							      fib6_work->nrt6);
7682 		}
7683 		mlxsw_sp_router_fib6_work_fini(fib6_work);
7684 		break;
7685 	case FIB_EVENT_ENTRY_DEL:
7686 		mlxsw_sp_router_fib6_del(mlxsw_sp,
7687 					 fib6_work->rt_arr,
7688 					 fib6_work->nrt6);
7689 		mlxsw_sp_router_fib6_work_fini(fib6_work);
7690 		break;
7691 	}
7692 	mutex_unlock(&mlxsw_sp->router->lock);
7693 	kfree(fib_work);
7694 }
7695 
7696 static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
7697 {
7698 	struct mlxsw_sp_fib_event_work *fib_work =
7699 		container_of(work, struct mlxsw_sp_fib_event_work, work);
7700 	struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
7701 	bool replace;
7702 	int err;
7703 
7704 	rtnl_lock();
7705 	mutex_lock(&mlxsw_sp->router->lock);
7706 	switch (fib_work->event) {
7707 	case FIB_EVENT_ENTRY_REPLACE:
7708 	case FIB_EVENT_ENTRY_ADD:
7709 		replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
7710 
7711 		err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info,
7712 						replace);
7713 		if (err)
7714 			dev_warn(mlxsw_sp->bus_info->dev, "MR entry add failed.\n");
7715 		mr_cache_put(fib_work->men_info.mfc);
7716 		break;
7717 	case FIB_EVENT_ENTRY_DEL:
7718 		mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info);
7719 		mr_cache_put(fib_work->men_info.mfc);
7720 		break;
7721 	case FIB_EVENT_VIF_ADD:
7722 		err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
7723 						    &fib_work->ven_info);
7724 		if (err)
7725 			dev_warn(mlxsw_sp->bus_info->dev, "MR VIF add failed.\n");
7726 		netdev_put(fib_work->ven_info.dev, &fib_work->dev_tracker);
7727 		break;
7728 	case FIB_EVENT_VIF_DEL:
7729 		mlxsw_sp_router_fibmr_vif_del(mlxsw_sp,
7730 					      &fib_work->ven_info);
7731 		netdev_put(fib_work->ven_info.dev, &fib_work->dev_tracker);
7732 		break;
7733 	}
7734 	mutex_unlock(&mlxsw_sp->router->lock);
7735 	rtnl_unlock();
7736 	kfree(fib_work);
7737 }
7738 
7739 static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
7740 				       struct fib_notifier_info *info)
7741 {
7742 	struct fib_entry_notifier_info *fen_info;
7743 	struct fib_nh_notifier_info *fnh_info;
7744 
7745 	switch (fib_work->event) {
7746 	case FIB_EVENT_ENTRY_REPLACE:
7747 	case FIB_EVENT_ENTRY_DEL:
7748 		fen_info = container_of(info, struct fib_entry_notifier_info,
7749 					info);
7750 		fib_work->fen_info = *fen_info;
7751 		/* Take reference on fib_info to prevent it from being
7752 		 * freed while work is queued. Release it afterwards.
7753 		 */
7754 		fib_info_hold(fib_work->fen_info.fi);
7755 		break;
7756 	case FIB_EVENT_NH_ADD:
7757 	case FIB_EVENT_NH_DEL:
7758 		fnh_info = container_of(info, struct fib_nh_notifier_info,
7759 					info);
7760 		fib_work->fnh_info = *fnh_info;
7761 		fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
7762 		break;
7763 	}
7764 }
7765 
7766 static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
7767 				      struct fib_notifier_info *info)
7768 {
7769 	struct fib6_entry_notifier_info *fen6_info;
7770 	int err;
7771 
7772 	switch (fib_work->event) {
7773 	case FIB_EVENT_ENTRY_REPLACE:
7774 	case FIB_EVENT_ENTRY_APPEND:
7775 	case FIB_EVENT_ENTRY_DEL:
7776 		fen6_info = container_of(info, struct fib6_entry_notifier_info,
7777 					 info);
7778 		err = mlxsw_sp_router_fib6_work_init(&fib_work->fib6_work,
7779 						     fen6_info);
7780 		if (err)
7781 			return err;
7782 		break;
7783 	}
7784 
7785 	return 0;
7786 }
7787 
7788 static void
7789 mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
7790 			    struct fib_notifier_info *info)
7791 {
7792 	switch (fib_work->event) {
7793 	case FIB_EVENT_ENTRY_REPLACE:
7794 	case FIB_EVENT_ENTRY_ADD:
7795 	case FIB_EVENT_ENTRY_DEL:
7796 		memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
7797 		mr_cache_hold(fib_work->men_info.mfc);
7798 		break;
7799 	case FIB_EVENT_VIF_ADD:
7800 	case FIB_EVENT_VIF_DEL:
7801 		memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info));
7802 		netdev_hold(fib_work->ven_info.dev, &fib_work->dev_tracker,
7803 			    GFP_ATOMIC);
7804 		break;
7805 	}
7806 }
7807 
7808 static int mlxsw_sp_router_fib_rule_event(unsigned long event,
7809 					  struct fib_notifier_info *info,
7810 					  struct mlxsw_sp *mlxsw_sp)
7811 {
7812 	struct netlink_ext_ack *extack = info->extack;
7813 	struct fib_rule_notifier_info *fr_info;
7814 	struct fib_rule *rule;
7815 	int err = 0;
7816 
7817 	/* nothing to do at the moment */
7818 	if (event == FIB_EVENT_RULE_DEL)
7819 		return 0;
7820 
7821 	fr_info = container_of(info, struct fib_rule_notifier_info, info);
7822 	rule = fr_info->rule;
7823 
7824 	/* Rule only affects locally generated traffic */
7825 	if (rule->iifindex == mlxsw_sp_net(mlxsw_sp)->loopback_dev->ifindex)
7826 		return 0;
7827 
7828 	switch (info->family) {
7829 	case AF_INET:
7830 		if (!fib4_rule_default(rule) && !rule->l3mdev)
7831 			err = -EOPNOTSUPP;
7832 		break;
7833 	case AF_INET6:
7834 		if (!fib6_rule_default(rule) && !rule->l3mdev)
7835 			err = -EOPNOTSUPP;
7836 		break;
7837 	case RTNL_FAMILY_IPMR:
7838 		if (!ipmr_rule_default(rule) && !rule->l3mdev)
7839 			err = -EOPNOTSUPP;
7840 		break;
7841 	case RTNL_FAMILY_IP6MR:
7842 		if (!ip6mr_rule_default(rule) && !rule->l3mdev)
7843 			err = -EOPNOTSUPP;
7844 		break;
7845 	}
7846 
7847 	if (err < 0)
7848 		NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported");
7849 
7850 	return err;
7851 }
7852 
7853 /* Called with rcu_read_lock() */
7854 static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
7855 				     unsigned long event, void *ptr)
7856 {
7857 	struct mlxsw_sp_fib_event_work *fib_work;
7858 	struct fib_notifier_info *info = ptr;
7859 	struct mlxsw_sp_router *router;
7860 	int err;
7861 
7862 	if ((info->family != AF_INET && info->family != AF_INET6 &&
7863 	     info->family != RTNL_FAMILY_IPMR &&
7864 	     info->family != RTNL_FAMILY_IP6MR))
7865 		return NOTIFY_DONE;
7866 
7867 	router = container_of(nb, struct mlxsw_sp_router, fib_nb);
7868 
7869 	switch (event) {
7870 	case FIB_EVENT_RULE_ADD:
7871 	case FIB_EVENT_RULE_DEL:
7872 		err = mlxsw_sp_router_fib_rule_event(event, info,
7873 						     router->mlxsw_sp);
7874 		return notifier_from_errno(err);
7875 	case FIB_EVENT_ENTRY_ADD:
7876 	case FIB_EVENT_ENTRY_REPLACE:
7877 	case FIB_EVENT_ENTRY_APPEND:
7878 		if (info->family == AF_INET) {
7879 			struct fib_entry_notifier_info *fen_info = ptr;
7880 
7881 			if (fen_info->fi->fib_nh_is_v6) {
7882 				NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported");
7883 				return notifier_from_errno(-EINVAL);
7884 			}
7885 		}
7886 		break;
7887 	}
7888 
7889 	fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
7890 	if (!fib_work)
7891 		return NOTIFY_BAD;
7892 
7893 	fib_work->mlxsw_sp = router->mlxsw_sp;
7894 	fib_work->event = event;
7895 
7896 	switch (info->family) {
7897 	case AF_INET:
7898 		INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work);
7899 		mlxsw_sp_router_fib4_event(fib_work, info);
7900 		break;
7901 	case AF_INET6:
7902 		INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
7903 		err = mlxsw_sp_router_fib6_event(fib_work, info);
7904 		if (err)
7905 			goto err_fib_event;
7906 		break;
7907 	case RTNL_FAMILY_IP6MR:
7908 	case RTNL_FAMILY_IPMR:
7909 		INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work);
7910 		mlxsw_sp_router_fibmr_event(fib_work, info);
7911 		break;
7912 	}
7913 
7914 	mlxsw_core_schedule_work(&fib_work->work);
7915 
7916 	return NOTIFY_DONE;
7917 
7918 err_fib_event:
7919 	kfree(fib_work);
7920 	return NOTIFY_BAD;
7921 }
7922 
7923 static struct mlxsw_sp_rif *
7924 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
7925 			 const struct net_device *dev)
7926 {
7927 	int max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
7928 	int i;
7929 
7930 	for (i = 0; i < max_rifs; i++)
7931 		if (mlxsw_sp->router->rifs[i] &&
7932 		    mlxsw_sp_rif_dev_is(mlxsw_sp->router->rifs[i], dev))
7933 			return mlxsw_sp->router->rifs[i];
7934 
7935 	return NULL;
7936 }
7937 
7938 bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp,
7939 			 const struct net_device *dev)
7940 {
7941 	struct mlxsw_sp_rif *rif;
7942 
7943 	mutex_lock(&mlxsw_sp->router->lock);
7944 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7945 	mutex_unlock(&mlxsw_sp->router->lock);
7946 
7947 	return rif;
7948 }
7949 
7950 u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev)
7951 {
7952 	struct mlxsw_sp_rif *rif;
7953 	u16 vid = 0;
7954 
7955 	mutex_lock(&mlxsw_sp->router->lock);
7956 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7957 	if (!rif)
7958 		goto out;
7959 
7960 	/* We only return the VID for VLAN RIFs. Otherwise we return an
7961 	 * invalid value (0).
7962 	 */
7963 	if (rif->ops->type != MLXSW_SP_RIF_TYPE_VLAN)
7964 		goto out;
7965 
7966 	vid = mlxsw_sp_fid_8021q_vid(rif->fid);
7967 
7968 out:
7969 	mutex_unlock(&mlxsw_sp->router->lock);
7970 	return vid;
7971 }
7972 
7973 static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
7974 {
7975 	char ritr_pl[MLXSW_REG_RITR_LEN];
7976 	int err;
7977 
7978 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
7979 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7980 	if (err)
7981 		return err;
7982 
7983 	mlxsw_reg_ritr_enable_set(ritr_pl, false);
7984 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7985 }
7986 
7987 static int mlxsw_sp_router_rif_made_sync(struct mlxsw_sp *mlxsw_sp,
7988 					 struct mlxsw_sp_rif *rif)
7989 {
7990 	int err;
7991 
7992 	err = mlxsw_sp_neigh_rif_made_sync(mlxsw_sp, rif);
7993 	if (err)
7994 		return err;
7995 
7996 	err = mlxsw_sp_nexthop_rif_made_sync(mlxsw_sp, rif);
7997 	if (err)
7998 		goto err_nexthop;
7999 
8000 	return 0;
8001 
8002 err_nexthop:
8003 	mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
8004 	return err;
8005 }
8006 
8007 static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
8008 					  struct mlxsw_sp_rif *rif)
8009 {
8010 	/* Signal to nexthop cleanup that the RIF is going away. */
8011 	rif->crif->rif = NULL;
8012 
8013 	mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
8014 	mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
8015 	mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
8016 }
8017 
8018 static bool __mlxsw_sp_dev_addr_list_empty(const struct net_device *dev)
8019 {
8020 	struct inet6_dev *inet6_dev;
8021 	struct in_device *idev;
8022 
8023 	idev = __in_dev_get_rcu(dev);
8024 	if (idev && idev->ifa_list)
8025 		return false;
8026 
8027 	inet6_dev = __in6_dev_get(dev);
8028 	if (inet6_dev && !list_empty(&inet6_dev->addr_list))
8029 		return false;
8030 
8031 	return true;
8032 }
8033 
8034 static bool mlxsw_sp_dev_addr_list_empty(const struct net_device *dev)
8035 {
8036 	bool addr_list_empty;
8037 
8038 	rcu_read_lock();
8039 	addr_list_empty = __mlxsw_sp_dev_addr_list_empty(dev);
8040 	rcu_read_unlock();
8041 
8042 	return addr_list_empty;
8043 }
8044 
8045 static bool
8046 mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
8047 			   unsigned long event)
8048 {
8049 	bool addr_list_empty;
8050 
8051 	switch (event) {
8052 	case NETDEV_UP:
8053 		return rif == NULL;
8054 	case NETDEV_DOWN:
8055 		addr_list_empty = mlxsw_sp_dev_addr_list_empty(dev);
8056 
8057 		/* macvlans do not have a RIF, but rather piggy back on the
8058 		 * RIF of their lower device.
8059 		 */
8060 		if (netif_is_macvlan(dev) && addr_list_empty)
8061 			return true;
8062 
8063 		if (rif && addr_list_empty &&
8064 		    !netif_is_l3_slave(mlxsw_sp_rif_dev(rif)))
8065 			return true;
8066 		/* It is possible we already removed the RIF ourselves
8067 		 * if it was assigned to a netdev that is now a bridge
8068 		 * or LAG slave.
8069 		 */
8070 		return false;
8071 	}
8072 
8073 	return false;
8074 }
8075 
8076 static enum mlxsw_sp_rif_type
8077 mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
8078 		      const struct net_device *dev)
8079 {
8080 	enum mlxsw_sp_fid_type type;
8081 
8082 	if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL))
8083 		return MLXSW_SP_RIF_TYPE_IPIP_LB;
8084 
8085 	/* Otherwise RIF type is derived from the type of the underlying FID. */
8086 	if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
8087 		type = MLXSW_SP_FID_TYPE_8021Q;
8088 	else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
8089 		type = MLXSW_SP_FID_TYPE_8021Q;
8090 	else if (netif_is_bridge_master(dev))
8091 		type = MLXSW_SP_FID_TYPE_8021D;
8092 	else
8093 		type = MLXSW_SP_FID_TYPE_RFID;
8094 
8095 	return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
8096 }
8097 
8098 static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index,
8099 				    u8 rif_entries)
8100 {
8101 	*p_rif_index = gen_pool_alloc(mlxsw_sp->router->rifs_table,
8102 				      rif_entries);
8103 	if (*p_rif_index == 0)
8104 		return -ENOBUFS;
8105 	*p_rif_index -= MLXSW_SP_ROUTER_GENALLOC_OFFSET;
8106 
8107 	/* RIF indexes must be aligned to the allocation size. */
8108 	WARN_ON_ONCE(*p_rif_index % rif_entries);
8109 
8110 	return 0;
8111 }
8112 
8113 static void mlxsw_sp_rif_index_free(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
8114 				    u8 rif_entries)
8115 {
8116 	gen_pool_free(mlxsw_sp->router->rifs_table,
8117 		      MLXSW_SP_ROUTER_GENALLOC_OFFSET + rif_index, rif_entries);
8118 }
8119 
8120 static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
8121 					       u16 vr_id,
8122 					       struct mlxsw_sp_crif *crif)
8123 {
8124 	struct net_device *l3_dev = crif ? crif->key.dev : NULL;
8125 	struct mlxsw_sp_rif *rif;
8126 
8127 	rif = kzalloc(rif_size, GFP_KERNEL);
8128 	if (!rif)
8129 		return NULL;
8130 
8131 	INIT_LIST_HEAD(&rif->neigh_list);
8132 	if (l3_dev) {
8133 		ether_addr_copy(rif->addr, l3_dev->dev_addr);
8134 		rif->mtu = l3_dev->mtu;
8135 	}
8136 	rif->vr_id = vr_id;
8137 	rif->rif_index = rif_index;
8138 	if (crif) {
8139 		rif->crif = crif;
8140 		crif->rif = rif;
8141 	}
8142 
8143 	return rif;
8144 }
8145 
8146 static void mlxsw_sp_rif_free(struct mlxsw_sp_rif *rif)
8147 {
8148 	WARN_ON(!list_empty(&rif->neigh_list));
8149 
8150 	if (rif->crif)
8151 		rif->crif->rif = NULL;
8152 	kfree(rif);
8153 }
8154 
8155 struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
8156 					   u16 rif_index)
8157 {
8158 	return mlxsw_sp->router->rifs[rif_index];
8159 }
8160 
8161 u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
8162 {
8163 	return rif->rif_index;
8164 }
8165 
8166 u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
8167 {
8168 	return lb_rif->common.rif_index;
8169 }
8170 
8171 u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
8172 {
8173 	struct net_device *dev = mlxsw_sp_rif_dev(&lb_rif->common);
8174 	u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(dev);
8175 	struct mlxsw_sp_vr *ul_vr;
8176 
8177 	ul_vr = mlxsw_sp_vr_get(lb_rif->common.mlxsw_sp, ul_tb_id, NULL);
8178 	if (WARN_ON(IS_ERR(ul_vr)))
8179 		return 0;
8180 
8181 	return ul_vr->id;
8182 }
8183 
8184 u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
8185 {
8186 	return lb_rif->ul_rif_id;
8187 }
8188 
8189 static bool
8190 mlxsw_sp_router_port_l3_stats_enabled(struct mlxsw_sp_rif *rif)
8191 {
8192 	return mlxsw_sp_rif_counter_valid_get(rif,
8193 					      MLXSW_SP_RIF_COUNTER_EGRESS) &&
8194 	       mlxsw_sp_rif_counter_valid_get(rif,
8195 					      MLXSW_SP_RIF_COUNTER_INGRESS);
8196 }
8197 
8198 static int
8199 mlxsw_sp_router_port_l3_stats_enable(struct mlxsw_sp_rif *rif)
8200 {
8201 	int err;
8202 
8203 	err = mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
8204 	if (err)
8205 		return err;
8206 
8207 	/* Clear stale data. */
8208 	err = mlxsw_sp_rif_counter_fetch_clear(rif,
8209 					       MLXSW_SP_RIF_COUNTER_INGRESS,
8210 					       NULL);
8211 	if (err)
8212 		goto err_clear_ingress;
8213 
8214 	err = mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
8215 	if (err)
8216 		goto err_alloc_egress;
8217 
8218 	/* Clear stale data. */
8219 	err = mlxsw_sp_rif_counter_fetch_clear(rif,
8220 					       MLXSW_SP_RIF_COUNTER_EGRESS,
8221 					       NULL);
8222 	if (err)
8223 		goto err_clear_egress;
8224 
8225 	return 0;
8226 
8227 err_clear_egress:
8228 	mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
8229 err_alloc_egress:
8230 err_clear_ingress:
8231 	mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
8232 	return err;
8233 }
8234 
8235 static void
8236 mlxsw_sp_router_port_l3_stats_disable(struct mlxsw_sp_rif *rif)
8237 {
8238 	mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
8239 	mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
8240 }
8241 
8242 static void
8243 mlxsw_sp_router_port_l3_stats_report_used(struct mlxsw_sp_rif *rif,
8244 					  struct netdev_notifier_offload_xstats_info *info)
8245 {
8246 	if (!mlxsw_sp_router_port_l3_stats_enabled(rif))
8247 		return;
8248 	netdev_offload_xstats_report_used(info->report_used);
8249 }
8250 
8251 static int
8252 mlxsw_sp_router_port_l3_stats_fetch(struct mlxsw_sp_rif *rif,
8253 				    struct rtnl_hw_stats64 *p_stats)
8254 {
8255 	struct mlxsw_sp_rif_counter_set_basic ingress;
8256 	struct mlxsw_sp_rif_counter_set_basic egress;
8257 	int err;
8258 
8259 	err = mlxsw_sp_rif_counter_fetch_clear(rif,
8260 					       MLXSW_SP_RIF_COUNTER_INGRESS,
8261 					       &ingress);
8262 	if (err)
8263 		return err;
8264 
8265 	err = mlxsw_sp_rif_counter_fetch_clear(rif,
8266 					       MLXSW_SP_RIF_COUNTER_EGRESS,
8267 					       &egress);
8268 	if (err)
8269 		return err;
8270 
8271 #define MLXSW_SP_ROUTER_ALL_GOOD(SET, SFX)		\
8272 		((SET.good_unicast_ ## SFX) +		\
8273 		 (SET.good_multicast_ ## SFX) +		\
8274 		 (SET.good_broadcast_ ## SFX))
8275 
8276 	p_stats->rx_packets = MLXSW_SP_ROUTER_ALL_GOOD(ingress, packets);
8277 	p_stats->tx_packets = MLXSW_SP_ROUTER_ALL_GOOD(egress, packets);
8278 	p_stats->rx_bytes = MLXSW_SP_ROUTER_ALL_GOOD(ingress, bytes);
8279 	p_stats->tx_bytes = MLXSW_SP_ROUTER_ALL_GOOD(egress, bytes);
8280 	p_stats->rx_errors = ingress.error_packets;
8281 	p_stats->tx_errors = egress.error_packets;
8282 	p_stats->rx_dropped = ingress.discard_packets;
8283 	p_stats->tx_dropped = egress.discard_packets;
8284 	p_stats->multicast = ingress.good_multicast_packets +
8285 			     ingress.good_broadcast_packets;
8286 
8287 #undef MLXSW_SP_ROUTER_ALL_GOOD
8288 
8289 	return 0;
8290 }
8291 
8292 static int
8293 mlxsw_sp_router_port_l3_stats_report_delta(struct mlxsw_sp_rif *rif,
8294 					   struct netdev_notifier_offload_xstats_info *info)
8295 {
8296 	struct rtnl_hw_stats64 stats = {};
8297 	int err;
8298 
8299 	if (!mlxsw_sp_router_port_l3_stats_enabled(rif))
8300 		return 0;
8301 
8302 	err = mlxsw_sp_router_port_l3_stats_fetch(rif, &stats);
8303 	if (err)
8304 		return err;
8305 
8306 	netdev_offload_xstats_report_delta(info->report_delta, &stats);
8307 	return 0;
8308 }
8309 
8310 struct mlxsw_sp_router_hwstats_notify_work {
8311 	struct work_struct work;
8312 	struct net_device *dev;
8313 	netdevice_tracker dev_tracker;
8314 };
8315 
8316 static void mlxsw_sp_router_hwstats_notify_work(struct work_struct *work)
8317 {
8318 	struct mlxsw_sp_router_hwstats_notify_work *hws_work =
8319 		container_of(work, struct mlxsw_sp_router_hwstats_notify_work,
8320 			     work);
8321 
8322 	rtnl_lock();
8323 	rtnl_offload_xstats_notify(hws_work->dev);
8324 	rtnl_unlock();
8325 	netdev_put(hws_work->dev, &hws_work->dev_tracker);
8326 	kfree(hws_work);
8327 }
8328 
8329 static void
8330 mlxsw_sp_router_hwstats_notify_schedule(struct net_device *dev)
8331 {
8332 	struct mlxsw_sp_router_hwstats_notify_work *hws_work;
8333 
8334 	/* To collect notification payload, the core ends up sending another
8335 	 * notifier block message, which would deadlock on the attempt to
8336 	 * acquire the router lock again. Just postpone the notification until
8337 	 * later.
8338 	 */
8339 
8340 	hws_work = kzalloc(sizeof(*hws_work), GFP_KERNEL);
8341 	if (!hws_work)
8342 		return;
8343 
8344 	INIT_WORK(&hws_work->work, mlxsw_sp_router_hwstats_notify_work);
8345 	netdev_hold(dev, &hws_work->dev_tracker, GFP_KERNEL);
8346 	hws_work->dev = dev;
8347 	mlxsw_core_schedule_work(&hws_work->work);
8348 }
8349 
8350 int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
8351 {
8352 	return mlxsw_sp_rif_dev(rif)->ifindex;
8353 }
8354 
8355 bool mlxsw_sp_rif_has_dev(const struct mlxsw_sp_rif *rif)
8356 {
8357 	return !!mlxsw_sp_rif_dev(rif);
8358 }
8359 
8360 bool mlxsw_sp_rif_dev_is(const struct mlxsw_sp_rif *rif,
8361 			 const struct net_device *dev)
8362 {
8363 	return mlxsw_sp_rif_dev(rif) == dev;
8364 }
8365 
8366 static void mlxsw_sp_rif_push_l3_stats(struct mlxsw_sp_rif *rif)
8367 {
8368 	struct rtnl_hw_stats64 stats = {};
8369 
8370 	if (!mlxsw_sp_router_port_l3_stats_fetch(rif, &stats))
8371 		netdev_offload_xstats_push_delta(mlxsw_sp_rif_dev(rif),
8372 						 NETDEV_OFFLOAD_XSTATS_TYPE_L3,
8373 						 &stats);
8374 }
8375 
8376 static struct mlxsw_sp_rif *
8377 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
8378 		    const struct mlxsw_sp_rif_params *params,
8379 		    struct netlink_ext_ack *extack)
8380 {
8381 	u8 rif_entries = params->double_entry ? 2 : 1;
8382 	u32 tb_id = l3mdev_fib_table(params->dev);
8383 	const struct mlxsw_sp_rif_ops *ops;
8384 	struct mlxsw_sp_fid *fid = NULL;
8385 	enum mlxsw_sp_rif_type type;
8386 	struct mlxsw_sp_crif *crif;
8387 	struct mlxsw_sp_rif *rif;
8388 	struct mlxsw_sp_vr *vr;
8389 	u16 rif_index;
8390 	int i, err;
8391 
8392 	type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
8393 	ops = mlxsw_sp->router->rif_ops_arr[type];
8394 
8395 	vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
8396 	if (IS_ERR(vr))
8397 		return ERR_CAST(vr);
8398 	vr->rif_count++;
8399 
8400 	err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index, rif_entries);
8401 	if (err) {
8402 		NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
8403 		goto err_rif_index_alloc;
8404 	}
8405 
8406 	crif = mlxsw_sp_crif_lookup(mlxsw_sp->router, params->dev);
8407 	if (WARN_ON(!crif)) {
8408 		err = -ENOENT;
8409 		goto err_crif_lookup;
8410 	}
8411 
8412 	rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, crif);
8413 	if (!rif) {
8414 		err = -ENOMEM;
8415 		goto err_rif_alloc;
8416 	}
8417 	netdev_hold(params->dev, &rif->dev_tracker, GFP_KERNEL);
8418 	mlxsw_sp->router->rifs[rif_index] = rif;
8419 	rif->mlxsw_sp = mlxsw_sp;
8420 	rif->ops = ops;
8421 	rif->rif_entries = rif_entries;
8422 
8423 	if (ops->setup)
8424 		ops->setup(rif, params);
8425 
8426 	if (ops->fid_get) {
8427 		fid = ops->fid_get(rif, params, extack);
8428 		if (IS_ERR(fid)) {
8429 			err = PTR_ERR(fid);
8430 			goto err_fid_get;
8431 		}
8432 		rif->fid = fid;
8433 	}
8434 
8435 	err = ops->configure(rif, extack);
8436 	if (err)
8437 		goto err_configure;
8438 
8439 	for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
8440 		err = mlxsw_sp_mr_rif_add(vr->mr_table[i], rif);
8441 		if (err)
8442 			goto err_mr_rif_add;
8443 	}
8444 
8445 	err = mlxsw_sp_router_rif_made_sync(mlxsw_sp, rif);
8446 	if (err)
8447 		goto err_rif_made_sync;
8448 
8449 	if (netdev_offload_xstats_enabled(params->dev,
8450 					  NETDEV_OFFLOAD_XSTATS_TYPE_L3)) {
8451 		err = mlxsw_sp_router_port_l3_stats_enable(rif);
8452 		if (err)
8453 			goto err_stats_enable;
8454 		mlxsw_sp_router_hwstats_notify_schedule(params->dev);
8455 	} else {
8456 		mlxsw_sp_rif_counters_alloc(rif);
8457 	}
8458 
8459 	atomic_add(rif_entries, &mlxsw_sp->router->rifs_count);
8460 	return rif;
8461 
8462 err_stats_enable:
8463 	mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
8464 err_rif_made_sync:
8465 err_mr_rif_add:
8466 	for (i--; i >= 0; i--)
8467 		mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
8468 	ops->deconfigure(rif);
8469 err_configure:
8470 	if (fid)
8471 		mlxsw_sp_fid_put(fid);
8472 err_fid_get:
8473 	mlxsw_sp->router->rifs[rif_index] = NULL;
8474 	netdev_put(params->dev, &rif->dev_tracker);
8475 	mlxsw_sp_rif_free(rif);
8476 err_rif_alloc:
8477 err_crif_lookup:
8478 	mlxsw_sp_rif_index_free(mlxsw_sp, rif_index, rif_entries);
8479 err_rif_index_alloc:
8480 	vr->rif_count--;
8481 	mlxsw_sp_vr_put(mlxsw_sp, vr);
8482 	return ERR_PTR(err);
8483 }
8484 
8485 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
8486 {
8487 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
8488 	const struct mlxsw_sp_rif_ops *ops = rif->ops;
8489 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8490 	struct mlxsw_sp_crif *crif = rif->crif;
8491 	struct mlxsw_sp_fid *fid = rif->fid;
8492 	u8 rif_entries = rif->rif_entries;
8493 	u16 rif_index = rif->rif_index;
8494 	struct mlxsw_sp_vr *vr;
8495 	int i;
8496 
8497 	atomic_sub(rif_entries, &mlxsw_sp->router->rifs_count);
8498 	mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
8499 	vr = &mlxsw_sp->router->vrs[rif->vr_id];
8500 
8501 	if (netdev_offload_xstats_enabled(dev, NETDEV_OFFLOAD_XSTATS_TYPE_L3)) {
8502 		mlxsw_sp_rif_push_l3_stats(rif);
8503 		mlxsw_sp_router_port_l3_stats_disable(rif);
8504 		mlxsw_sp_router_hwstats_notify_schedule(dev);
8505 	} else {
8506 		mlxsw_sp_rif_counters_free(rif);
8507 	}
8508 
8509 	for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
8510 		mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
8511 	ops->deconfigure(rif);
8512 	if (fid)
8513 		/* Loopback RIFs are not associated with a FID. */
8514 		mlxsw_sp_fid_put(fid);
8515 	mlxsw_sp->router->rifs[rif->rif_index] = NULL;
8516 	netdev_put(dev, &rif->dev_tracker);
8517 	mlxsw_sp_rif_free(rif);
8518 	mlxsw_sp_rif_index_free(mlxsw_sp, rif_index, rif_entries);
8519 	vr->rif_count--;
8520 	mlxsw_sp_vr_put(mlxsw_sp, vr);
8521 
8522 	if (crif->can_destroy)
8523 		mlxsw_sp_crif_free(crif);
8524 }
8525 
8526 void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
8527 				 struct net_device *dev)
8528 {
8529 	struct mlxsw_sp_rif *rif;
8530 
8531 	mutex_lock(&mlxsw_sp->router->lock);
8532 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8533 	if (!rif)
8534 		goto out;
8535 	mlxsw_sp_rif_destroy(rif);
8536 out:
8537 	mutex_unlock(&mlxsw_sp->router->lock);
8538 }
8539 
8540 static void mlxsw_sp_rif_destroy_vlan_upper(struct mlxsw_sp *mlxsw_sp,
8541 					    struct net_device *br_dev,
8542 					    u16 vid)
8543 {
8544 	struct net_device *upper_dev;
8545 	struct mlxsw_sp_crif *crif;
8546 
8547 	rcu_read_lock();
8548 	upper_dev = __vlan_find_dev_deep_rcu(br_dev, htons(ETH_P_8021Q), vid);
8549 	rcu_read_unlock();
8550 
8551 	if (!upper_dev)
8552 		return;
8553 
8554 	crif = mlxsw_sp_crif_lookup(mlxsw_sp->router, upper_dev);
8555 	if (!crif || !crif->rif)
8556 		return;
8557 
8558 	mlxsw_sp_rif_destroy(crif->rif);
8559 }
8560 
8561 static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp,
8562 					  struct net_device *l3_dev,
8563 					  int lower_pvid,
8564 					  unsigned long event,
8565 					  struct netlink_ext_ack *extack);
8566 
8567 int mlxsw_sp_router_bridge_vlan_add(struct mlxsw_sp *mlxsw_sp,
8568 				    struct net_device *br_dev,
8569 				    u16 new_vid, bool is_pvid,
8570 				    struct netlink_ext_ack *extack)
8571 {
8572 	struct mlxsw_sp_rif *old_rif;
8573 	struct mlxsw_sp_rif *new_rif;
8574 	struct net_device *upper_dev;
8575 	u16 old_pvid = 0;
8576 	u16 new_pvid;
8577 	int err = 0;
8578 
8579 	mutex_lock(&mlxsw_sp->router->lock);
8580 	old_rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev);
8581 	if (old_rif) {
8582 		/* If the RIF on the bridge is not a VLAN RIF, we shouldn't have
8583 		 * gotten a PVID notification.
8584 		 */
8585 		if (WARN_ON(old_rif->ops->type != MLXSW_SP_RIF_TYPE_VLAN))
8586 			old_rif = NULL;
8587 		else
8588 			old_pvid = mlxsw_sp_fid_8021q_vid(old_rif->fid);
8589 	}
8590 
8591 	if (is_pvid)
8592 		new_pvid = new_vid;
8593 	else if (old_pvid == new_vid)
8594 		new_pvid = 0;
8595 	else
8596 		goto out;
8597 
8598 	if (old_pvid == new_pvid)
8599 		goto out;
8600 
8601 	if (new_pvid) {
8602 		struct mlxsw_sp_rif_params params = {
8603 			.dev = br_dev,
8604 			.vid = new_pvid,
8605 		};
8606 
8607 		/* If there is a VLAN upper with the same VID as the new PVID,
8608 		 * kill its RIF, if there is one.
8609 		 */
8610 		mlxsw_sp_rif_destroy_vlan_upper(mlxsw_sp, br_dev, new_pvid);
8611 
8612 		if (mlxsw_sp_dev_addr_list_empty(br_dev))
8613 			goto out;
8614 		new_rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
8615 		if (IS_ERR(new_rif)) {
8616 			err = PTR_ERR(new_rif);
8617 			goto out;
8618 		}
8619 
8620 		if (old_pvid)
8621 			mlxsw_sp_rif_migrate_destroy(mlxsw_sp, old_rif, new_rif,
8622 						     true);
8623 	} else {
8624 		mlxsw_sp_rif_destroy(old_rif);
8625 	}
8626 
8627 	if (old_pvid) {
8628 		rcu_read_lock();
8629 		upper_dev = __vlan_find_dev_deep_rcu(br_dev, htons(ETH_P_8021Q),
8630 						     old_pvid);
8631 		rcu_read_unlock();
8632 		if (upper_dev)
8633 			err = mlxsw_sp_inetaddr_bridge_event(mlxsw_sp,
8634 							     upper_dev,
8635 							     new_pvid,
8636 							     NETDEV_UP, extack);
8637 	}
8638 
8639 out:
8640 	mutex_unlock(&mlxsw_sp->router->lock);
8641 	return err;
8642 }
8643 
8644 static void
8645 mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
8646 				 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
8647 {
8648 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
8649 
8650 	params->vid = mlxsw_sp_port_vlan->vid;
8651 	params->lag = mlxsw_sp_port->lagged;
8652 	if (params->lag)
8653 		params->lag_id = mlxsw_sp_port->lag_id;
8654 	else
8655 		params->system_port = mlxsw_sp_port->local_port;
8656 }
8657 
8658 static struct mlxsw_sp_rif_subport *
8659 mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
8660 {
8661 	return container_of(rif, struct mlxsw_sp_rif_subport, common);
8662 }
8663 
8664 int mlxsw_sp_rif_subport_port(const struct mlxsw_sp_rif *rif,
8665 			      u16 *port, bool *is_lag)
8666 {
8667 	struct mlxsw_sp_rif_subport *rif_subport;
8668 
8669 	if (WARN_ON(rif->ops->type != MLXSW_SP_RIF_TYPE_SUBPORT))
8670 		return -EINVAL;
8671 
8672 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
8673 	*is_lag = rif_subport->lag;
8674 	*port = *is_lag ? rif_subport->lag_id : rif_subport->system_port;
8675 	return 0;
8676 }
8677 
8678 static struct mlxsw_sp_rif *
8679 mlxsw_sp_rif_subport_get(struct mlxsw_sp *mlxsw_sp,
8680 			 const struct mlxsw_sp_rif_params *params,
8681 			 struct netlink_ext_ack *extack)
8682 {
8683 	struct mlxsw_sp_rif_subport *rif_subport;
8684 	struct mlxsw_sp_rif *rif;
8685 
8686 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, params->dev);
8687 	if (!rif)
8688 		return mlxsw_sp_rif_create(mlxsw_sp, params, extack);
8689 
8690 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
8691 	refcount_inc(&rif_subport->ref_count);
8692 	return rif;
8693 }
8694 
8695 static void mlxsw_sp_rif_subport_put(struct mlxsw_sp_rif *rif)
8696 {
8697 	struct mlxsw_sp_rif_subport *rif_subport;
8698 
8699 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
8700 	if (!refcount_dec_and_test(&rif_subport->ref_count))
8701 		return;
8702 
8703 	mlxsw_sp_rif_destroy(rif);
8704 }
8705 
8706 static int mlxsw_sp_rif_mac_profile_index_alloc(struct mlxsw_sp *mlxsw_sp,
8707 						struct mlxsw_sp_rif_mac_profile *profile,
8708 						struct netlink_ext_ack *extack)
8709 {
8710 	u8 max_rif_mac_profiles = mlxsw_sp->router->max_rif_mac_profile;
8711 	struct mlxsw_sp_router *router = mlxsw_sp->router;
8712 	int id;
8713 
8714 	id = idr_alloc(&router->rif_mac_profiles_idr, profile, 0,
8715 		       max_rif_mac_profiles, GFP_KERNEL);
8716 
8717 	if (id >= 0) {
8718 		profile->id = id;
8719 		return 0;
8720 	}
8721 
8722 	if (id == -ENOSPC)
8723 		NL_SET_ERR_MSG_MOD(extack,
8724 				   "Exceeded number of supported router interface MAC profiles");
8725 
8726 	return id;
8727 }
8728 
8729 static struct mlxsw_sp_rif_mac_profile *
8730 mlxsw_sp_rif_mac_profile_index_free(struct mlxsw_sp *mlxsw_sp, u8 mac_profile)
8731 {
8732 	struct mlxsw_sp_rif_mac_profile *profile;
8733 
8734 	profile = idr_remove(&mlxsw_sp->router->rif_mac_profiles_idr,
8735 			     mac_profile);
8736 	WARN_ON(!profile);
8737 	return profile;
8738 }
8739 
8740 static struct mlxsw_sp_rif_mac_profile *
8741 mlxsw_sp_rif_mac_profile_alloc(const char *mac)
8742 {
8743 	struct mlxsw_sp_rif_mac_profile *profile;
8744 
8745 	profile = kzalloc(sizeof(*profile), GFP_KERNEL);
8746 	if (!profile)
8747 		return NULL;
8748 
8749 	ether_addr_copy(profile->mac_prefix, mac);
8750 	refcount_set(&profile->ref_count, 1);
8751 	return profile;
8752 }
8753 
8754 static struct mlxsw_sp_rif_mac_profile *
8755 mlxsw_sp_rif_mac_profile_find(const struct mlxsw_sp *mlxsw_sp, const char *mac)
8756 {
8757 	struct mlxsw_sp_router *router = mlxsw_sp->router;
8758 	struct mlxsw_sp_rif_mac_profile *profile;
8759 	int id;
8760 
8761 	idr_for_each_entry(&router->rif_mac_profiles_idr, profile, id) {
8762 		if (ether_addr_equal_masked(profile->mac_prefix, mac,
8763 					    mlxsw_sp->mac_mask))
8764 			return profile;
8765 	}
8766 
8767 	return NULL;
8768 }
8769 
8770 static u64 mlxsw_sp_rif_mac_profiles_occ_get(void *priv)
8771 {
8772 	const struct mlxsw_sp *mlxsw_sp = priv;
8773 
8774 	return atomic_read(&mlxsw_sp->router->rif_mac_profiles_count);
8775 }
8776 
8777 static u64 mlxsw_sp_rifs_occ_get(void *priv)
8778 {
8779 	const struct mlxsw_sp *mlxsw_sp = priv;
8780 
8781 	return atomic_read(&mlxsw_sp->router->rifs_count);
8782 }
8783 
8784 static struct mlxsw_sp_rif_mac_profile *
8785 mlxsw_sp_rif_mac_profile_create(struct mlxsw_sp *mlxsw_sp, const char *mac,
8786 				struct netlink_ext_ack *extack)
8787 {
8788 	struct mlxsw_sp_rif_mac_profile *profile;
8789 	int err;
8790 
8791 	profile = mlxsw_sp_rif_mac_profile_alloc(mac);
8792 	if (!profile)
8793 		return ERR_PTR(-ENOMEM);
8794 
8795 	err = mlxsw_sp_rif_mac_profile_index_alloc(mlxsw_sp, profile, extack);
8796 	if (err)
8797 		goto profile_index_alloc_err;
8798 
8799 	atomic_inc(&mlxsw_sp->router->rif_mac_profiles_count);
8800 	return profile;
8801 
8802 profile_index_alloc_err:
8803 	kfree(profile);
8804 	return ERR_PTR(err);
8805 }
8806 
8807 static void mlxsw_sp_rif_mac_profile_destroy(struct mlxsw_sp *mlxsw_sp,
8808 					     u8 mac_profile)
8809 {
8810 	struct mlxsw_sp_rif_mac_profile *profile;
8811 
8812 	atomic_dec(&mlxsw_sp->router->rif_mac_profiles_count);
8813 	profile = mlxsw_sp_rif_mac_profile_index_free(mlxsw_sp, mac_profile);
8814 	kfree(profile);
8815 }
8816 
8817 static int mlxsw_sp_rif_mac_profile_get(struct mlxsw_sp *mlxsw_sp,
8818 					const char *mac, u8 *p_mac_profile,
8819 					struct netlink_ext_ack *extack)
8820 {
8821 	struct mlxsw_sp_rif_mac_profile *profile;
8822 
8823 	profile = mlxsw_sp_rif_mac_profile_find(mlxsw_sp, mac);
8824 	if (profile) {
8825 		refcount_inc(&profile->ref_count);
8826 		goto out;
8827 	}
8828 
8829 	profile = mlxsw_sp_rif_mac_profile_create(mlxsw_sp, mac, extack);
8830 	if (IS_ERR(profile))
8831 		return PTR_ERR(profile);
8832 
8833 out:
8834 	*p_mac_profile = profile->id;
8835 	return 0;
8836 }
8837 
8838 static void mlxsw_sp_rif_mac_profile_put(struct mlxsw_sp *mlxsw_sp,
8839 					 u8 mac_profile)
8840 {
8841 	struct mlxsw_sp_rif_mac_profile *profile;
8842 
8843 	profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
8844 			   mac_profile);
8845 	if (WARN_ON(!profile))
8846 		return;
8847 
8848 	if (!refcount_dec_and_test(&profile->ref_count))
8849 		return;
8850 
8851 	mlxsw_sp_rif_mac_profile_destroy(mlxsw_sp, mac_profile);
8852 }
8853 
8854 static bool mlxsw_sp_rif_mac_profile_is_shared(const struct mlxsw_sp_rif *rif)
8855 {
8856 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8857 	struct mlxsw_sp_rif_mac_profile *profile;
8858 
8859 	profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
8860 			   rif->mac_profile_id);
8861 	if (WARN_ON(!profile))
8862 		return false;
8863 
8864 	return refcount_read(&profile->ref_count) > 1;
8865 }
8866 
8867 static int mlxsw_sp_rif_mac_profile_edit(struct mlxsw_sp_rif *rif,
8868 					 const char *new_mac)
8869 {
8870 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8871 	struct mlxsw_sp_rif_mac_profile *profile;
8872 
8873 	profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
8874 			   rif->mac_profile_id);
8875 	if (WARN_ON(!profile))
8876 		return -EINVAL;
8877 
8878 	ether_addr_copy(profile->mac_prefix, new_mac);
8879 	return 0;
8880 }
8881 
8882 static int
8883 mlxsw_sp_rif_mac_profile_replace(struct mlxsw_sp *mlxsw_sp,
8884 				 struct mlxsw_sp_rif *rif,
8885 				 const char *new_mac,
8886 				 struct netlink_ext_ack *extack)
8887 {
8888 	u8 mac_profile;
8889 	int err;
8890 
8891 	if (!mlxsw_sp_rif_mac_profile_is_shared(rif) &&
8892 	    !mlxsw_sp_rif_mac_profile_find(mlxsw_sp, new_mac))
8893 		return mlxsw_sp_rif_mac_profile_edit(rif, new_mac);
8894 
8895 	err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, new_mac,
8896 					   &mac_profile, extack);
8897 	if (err)
8898 		return err;
8899 
8900 	mlxsw_sp_rif_mac_profile_put(mlxsw_sp, rif->mac_profile_id);
8901 	rif->mac_profile_id = mac_profile;
8902 	return 0;
8903 }
8904 
8905 static int
8906 __mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
8907 				 struct net_device *l3_dev,
8908 				 struct netlink_ext_ack *extack)
8909 {
8910 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
8911 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
8912 	struct mlxsw_sp_rif_params params;
8913 	u16 vid = mlxsw_sp_port_vlan->vid;
8914 	struct mlxsw_sp_rif *rif;
8915 	struct mlxsw_sp_fid *fid;
8916 	int err;
8917 
8918 	params = (struct mlxsw_sp_rif_params) {
8919 		.dev = l3_dev,
8920 		.vid = vid,
8921 	};
8922 
8923 	mlxsw_sp_rif_subport_params_init(&params, mlxsw_sp_port_vlan);
8924 	rif = mlxsw_sp_rif_subport_get(mlxsw_sp, &params, extack);
8925 	if (IS_ERR(rif))
8926 		return PTR_ERR(rif);
8927 
8928 	/* FID was already created, just take a reference */
8929 	fid = rif->ops->fid_get(rif, &params, extack);
8930 	err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
8931 	if (err)
8932 		goto err_fid_port_vid_map;
8933 
8934 	err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
8935 	if (err)
8936 		goto err_port_vid_learning_set;
8937 
8938 	err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
8939 					BR_STATE_FORWARDING);
8940 	if (err)
8941 		goto err_port_vid_stp_set;
8942 
8943 	mlxsw_sp_port_vlan->fid = fid;
8944 
8945 	return 0;
8946 
8947 err_port_vid_stp_set:
8948 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
8949 err_port_vid_learning_set:
8950 	mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
8951 err_fid_port_vid_map:
8952 	mlxsw_sp_fid_put(fid);
8953 	mlxsw_sp_rif_subport_put(rif);
8954 	return err;
8955 }
8956 
8957 static void
8958 __mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
8959 {
8960 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
8961 	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
8962 	struct mlxsw_sp_rif *rif = mlxsw_sp_fid_rif(fid);
8963 	u16 vid = mlxsw_sp_port_vlan->vid;
8964 
8965 	if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
8966 		return;
8967 
8968 	mlxsw_sp_port_vlan->fid = NULL;
8969 	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
8970 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
8971 	mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
8972 	mlxsw_sp_fid_put(fid);
8973 	mlxsw_sp_rif_subport_put(rif);
8974 }
8975 
8976 static int
8977 mlxsw_sp_port_vlan_router_join_existing(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
8978 					struct net_device *l3_dev,
8979 					struct netlink_ext_ack *extack)
8980 {
8981 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;
8982 
8983 	lockdep_assert_held(&mlxsw_sp->router->lock);
8984 
8985 	if (!mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev))
8986 		return 0;
8987 
8988 	return __mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan, l3_dev,
8989 						extack);
8990 }
8991 
8992 void
8993 mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
8994 {
8995 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;
8996 
8997 	mutex_lock(&mlxsw_sp->router->lock);
8998 	__mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
8999 	mutex_unlock(&mlxsw_sp->router->lock);
9000 }
9001 
9002 static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
9003 					     struct net_device *port_dev,
9004 					     unsigned long event, u16 vid,
9005 					     struct netlink_ext_ack *extack)
9006 {
9007 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
9008 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
9009 
9010 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
9011 	if (WARN_ON(!mlxsw_sp_port_vlan))
9012 		return -EINVAL;
9013 
9014 	switch (event) {
9015 	case NETDEV_UP:
9016 		return __mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
9017 							l3_dev, extack);
9018 	case NETDEV_DOWN:
9019 		__mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
9020 		break;
9021 	}
9022 
9023 	return 0;
9024 }
9025 
9026 static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
9027 					unsigned long event, bool nomaster,
9028 					struct netlink_ext_ack *extack)
9029 {
9030 	if (!nomaster && (netif_is_any_bridge_port(port_dev) ||
9031 			  netif_is_lag_port(port_dev)))
9032 		return 0;
9033 
9034 	return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event,
9035 						 MLXSW_SP_DEFAULT_VID, extack);
9036 }
9037 
9038 static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
9039 					 struct net_device *lag_dev,
9040 					 unsigned long event, u16 vid,
9041 					 struct netlink_ext_ack *extack)
9042 {
9043 	struct net_device *port_dev;
9044 	struct list_head *iter;
9045 	int err;
9046 
9047 	netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
9048 		if (mlxsw_sp_port_dev_check(port_dev)) {
9049 			err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
9050 								port_dev,
9051 								event, vid,
9052 								extack);
9053 			if (err)
9054 				return err;
9055 		}
9056 	}
9057 
9058 	return 0;
9059 }
9060 
9061 static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
9062 				       unsigned long event, bool nomaster,
9063 				       struct netlink_ext_ack *extack)
9064 {
9065 	if (!nomaster && netif_is_bridge_port(lag_dev))
9066 		return 0;
9067 
9068 	return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event,
9069 					     MLXSW_SP_DEFAULT_VID, extack);
9070 }
9071 
9072 static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp,
9073 					  struct net_device *l3_dev,
9074 					  int lower_pvid,
9075 					  unsigned long event,
9076 					  struct netlink_ext_ack *extack)
9077 {
9078 	struct mlxsw_sp_rif_params params = {
9079 		.dev = l3_dev,
9080 	};
9081 	struct mlxsw_sp_rif *rif;
9082 	int err;
9083 
9084 	switch (event) {
9085 	case NETDEV_UP:
9086 		if (netif_is_bridge_master(l3_dev) && br_vlan_enabled(l3_dev)) {
9087 			u16 proto;
9088 
9089 			br_vlan_get_proto(l3_dev, &proto);
9090 			if (proto == ETH_P_8021AD) {
9091 				NL_SET_ERR_MSG_MOD(extack, "Adding an IP address to 802.1ad bridge is not supported");
9092 				return -EOPNOTSUPP;
9093 			}
9094 			err = br_vlan_get_pvid(l3_dev, &params.vid);
9095 			if (err)
9096 				return err;
9097 			if (!params.vid)
9098 				return 0;
9099 		} else if (is_vlan_dev(l3_dev)) {
9100 			params.vid = vlan_dev_vlan_id(l3_dev);
9101 
9102 			/* If the VID matches PVID of the bridge below, the
9103 			 * bridge owns the RIF for this VLAN. Don't do anything.
9104 			 */
9105 			if ((int)params.vid == lower_pvid)
9106 				return 0;
9107 		}
9108 
9109 		rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
9110 		if (IS_ERR(rif))
9111 			return PTR_ERR(rif);
9112 		break;
9113 	case NETDEV_DOWN:
9114 		rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
9115 		mlxsw_sp_rif_destroy(rif);
9116 		break;
9117 	}
9118 
9119 	return 0;
9120 }
9121 
9122 static int mlxsw_sp_inetaddr_vlan_event(struct mlxsw_sp *mlxsw_sp,
9123 					struct net_device *vlan_dev,
9124 					unsigned long event, bool nomaster,
9125 					struct netlink_ext_ack *extack)
9126 {
9127 	struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
9128 	u16 vid = vlan_dev_vlan_id(vlan_dev);
9129 	u16 lower_pvid;
9130 	int err;
9131 
9132 	if (!nomaster && netif_is_bridge_port(vlan_dev))
9133 		return 0;
9134 
9135 	if (mlxsw_sp_port_dev_check(real_dev)) {
9136 		return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
9137 							 event, vid, extack);
9138 	} else if (netif_is_lag_master(real_dev)) {
9139 		return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
9140 						     vid, extack);
9141 	} else if (netif_is_bridge_master(real_dev) &&
9142 		   br_vlan_enabled(real_dev)) {
9143 		err = br_vlan_get_pvid(real_dev, &lower_pvid);
9144 		if (err)
9145 			return err;
9146 		return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, vlan_dev,
9147 						      lower_pvid, event,
9148 						      extack);
9149 	}
9150 
9151 	return 0;
9152 }
9153 
9154 static bool mlxsw_sp_rif_macvlan_is_vrrp4(const u8 *mac)
9155 {
9156 	u8 vrrp4[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x01, 0x00 };
9157 	u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
9158 
9159 	return ether_addr_equal_masked(mac, vrrp4, mask);
9160 }
9161 
9162 static bool mlxsw_sp_rif_macvlan_is_vrrp6(const u8 *mac)
9163 {
9164 	u8 vrrp6[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x02, 0x00 };
9165 	u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
9166 
9167 	return ether_addr_equal_masked(mac, vrrp6, mask);
9168 }
9169 
9170 static int mlxsw_sp_rif_vrrp_op(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
9171 				const u8 *mac, bool adding)
9172 {
9173 	char ritr_pl[MLXSW_REG_RITR_LEN];
9174 	u8 vrrp_id = adding ? mac[5] : 0;
9175 	int err;
9176 
9177 	if (!mlxsw_sp_rif_macvlan_is_vrrp4(mac) &&
9178 	    !mlxsw_sp_rif_macvlan_is_vrrp6(mac))
9179 		return 0;
9180 
9181 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
9182 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9183 	if (err)
9184 		return err;
9185 
9186 	if (mlxsw_sp_rif_macvlan_is_vrrp4(mac))
9187 		mlxsw_reg_ritr_if_vrrp_id_ipv4_set(ritr_pl, vrrp_id);
9188 	else
9189 		mlxsw_reg_ritr_if_vrrp_id_ipv6_set(ritr_pl, vrrp_id);
9190 
9191 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9192 }
9193 
9194 static int mlxsw_sp_rif_macvlan_add(struct mlxsw_sp *mlxsw_sp,
9195 				    const struct net_device *macvlan_dev,
9196 				    struct netlink_ext_ack *extack)
9197 {
9198 	struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
9199 	struct mlxsw_sp_rif *rif;
9200 	int err;
9201 
9202 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
9203 	if (!rif)
9204 		return 0;
9205 
9206 	err = mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
9207 				  mlxsw_sp_fid_index(rif->fid), true);
9208 	if (err)
9209 		return err;
9210 
9211 	err = mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index,
9212 				   macvlan_dev->dev_addr, true);
9213 	if (err)
9214 		goto err_rif_vrrp_add;
9215 
9216 	/* Make sure the bridge driver does not have this MAC pointing at
9217 	 * some other port.
9218 	 */
9219 	if (rif->ops->fdb_del)
9220 		rif->ops->fdb_del(rif, macvlan_dev->dev_addr);
9221 
9222 	return 0;
9223 
9224 err_rif_vrrp_add:
9225 	mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
9226 			    mlxsw_sp_fid_index(rif->fid), false);
9227 	return err;
9228 }
9229 
9230 static void __mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
9231 				       const struct net_device *macvlan_dev)
9232 {
9233 	struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
9234 	struct mlxsw_sp_rif *rif;
9235 
9236 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
9237 	/* If we do not have a RIF, then we already took care of
9238 	 * removing the macvlan's MAC during RIF deletion.
9239 	 */
9240 	if (!rif)
9241 		return;
9242 	mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index, macvlan_dev->dev_addr,
9243 			     false);
9244 	mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
9245 			    mlxsw_sp_fid_index(rif->fid), false);
9246 }
9247 
9248 void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
9249 			      const struct net_device *macvlan_dev)
9250 {
9251 	mutex_lock(&mlxsw_sp->router->lock);
9252 	__mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
9253 	mutex_unlock(&mlxsw_sp->router->lock);
9254 }
9255 
9256 static int mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp *mlxsw_sp,
9257 					   struct net_device *macvlan_dev,
9258 					   unsigned long event,
9259 					   struct netlink_ext_ack *extack)
9260 {
9261 	switch (event) {
9262 	case NETDEV_UP:
9263 		return mlxsw_sp_rif_macvlan_add(mlxsw_sp, macvlan_dev, extack);
9264 	case NETDEV_DOWN:
9265 		__mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
9266 		break;
9267 	}
9268 
9269 	return 0;
9270 }
9271 
9272 static int __mlxsw_sp_inetaddr_event(struct mlxsw_sp *mlxsw_sp,
9273 				     struct net_device *dev,
9274 				     unsigned long event, bool nomaster,
9275 				     struct netlink_ext_ack *extack)
9276 {
9277 	if (mlxsw_sp_port_dev_check(dev))
9278 		return mlxsw_sp_inetaddr_port_event(dev, event, nomaster,
9279 						    extack);
9280 	else if (netif_is_lag_master(dev))
9281 		return mlxsw_sp_inetaddr_lag_event(dev, event, nomaster,
9282 						   extack);
9283 	else if (netif_is_bridge_master(dev))
9284 		return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, dev, -1, event,
9285 						      extack);
9286 	else if (is_vlan_dev(dev))
9287 		return mlxsw_sp_inetaddr_vlan_event(mlxsw_sp, dev, event,
9288 						    nomaster, extack);
9289 	else if (netif_is_macvlan(dev))
9290 		return mlxsw_sp_inetaddr_macvlan_event(mlxsw_sp, dev, event,
9291 						       extack);
9292 	else
9293 		return 0;
9294 }
9295 
9296 static int mlxsw_sp_inetaddr_event(struct notifier_block *nb,
9297 				   unsigned long event, void *ptr)
9298 {
9299 	struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
9300 	struct net_device *dev = ifa->ifa_dev->dev;
9301 	struct mlxsw_sp_router *router;
9302 	struct mlxsw_sp_rif *rif;
9303 	int err = 0;
9304 
9305 	/* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
9306 	if (event == NETDEV_UP)
9307 		return NOTIFY_DONE;
9308 
9309 	router = container_of(nb, struct mlxsw_sp_router, inetaddr_nb);
9310 	mutex_lock(&router->lock);
9311 	rif = mlxsw_sp_rif_find_by_dev(router->mlxsw_sp, dev);
9312 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
9313 		goto out;
9314 
9315 	err = __mlxsw_sp_inetaddr_event(router->mlxsw_sp, dev, event, false,
9316 					NULL);
9317 out:
9318 	mutex_unlock(&router->lock);
9319 	return notifier_from_errno(err);
9320 }
9321 
9322 static int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
9323 					 unsigned long event, void *ptr)
9324 {
9325 	struct in_validator_info *ivi = (struct in_validator_info *) ptr;
9326 	struct net_device *dev = ivi->ivi_dev->dev;
9327 	struct mlxsw_sp *mlxsw_sp;
9328 	struct mlxsw_sp_rif *rif;
9329 	int err = 0;
9330 
9331 	mlxsw_sp = mlxsw_sp_lower_get(dev);
9332 	if (!mlxsw_sp)
9333 		return NOTIFY_DONE;
9334 
9335 	mutex_lock(&mlxsw_sp->router->lock);
9336 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9337 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
9338 		goto out;
9339 
9340 	err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, false,
9341 					ivi->extack);
9342 out:
9343 	mutex_unlock(&mlxsw_sp->router->lock);
9344 	return notifier_from_errno(err);
9345 }
9346 
9347 struct mlxsw_sp_inet6addr_event_work {
9348 	struct work_struct work;
9349 	struct mlxsw_sp *mlxsw_sp;
9350 	struct net_device *dev;
9351 	netdevice_tracker dev_tracker;
9352 	unsigned long event;
9353 };
9354 
9355 static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
9356 {
9357 	struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
9358 		container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
9359 	struct mlxsw_sp *mlxsw_sp = inet6addr_work->mlxsw_sp;
9360 	struct net_device *dev = inet6addr_work->dev;
9361 	unsigned long event = inet6addr_work->event;
9362 	struct mlxsw_sp_rif *rif;
9363 
9364 	rtnl_lock();
9365 	mutex_lock(&mlxsw_sp->router->lock);
9366 
9367 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9368 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
9369 		goto out;
9370 
9371 	__mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, false, NULL);
9372 out:
9373 	mutex_unlock(&mlxsw_sp->router->lock);
9374 	rtnl_unlock();
9375 	netdev_put(dev, &inet6addr_work->dev_tracker);
9376 	kfree(inet6addr_work);
9377 }
9378 
9379 /* Called with rcu_read_lock() */
9380 static int mlxsw_sp_inet6addr_event(struct notifier_block *nb,
9381 				    unsigned long event, void *ptr)
9382 {
9383 	struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
9384 	struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
9385 	struct net_device *dev = if6->idev->dev;
9386 	struct mlxsw_sp_router *router;
9387 
9388 	/* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
9389 	if (event == NETDEV_UP)
9390 		return NOTIFY_DONE;
9391 
9392 	inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
9393 	if (!inet6addr_work)
9394 		return NOTIFY_BAD;
9395 
9396 	router = container_of(nb, struct mlxsw_sp_router, inet6addr_nb);
9397 	INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
9398 	inet6addr_work->mlxsw_sp = router->mlxsw_sp;
9399 	inet6addr_work->dev = dev;
9400 	inet6addr_work->event = event;
9401 	netdev_hold(dev, &inet6addr_work->dev_tracker, GFP_ATOMIC);
9402 	mlxsw_core_schedule_work(&inet6addr_work->work);
9403 
9404 	return NOTIFY_DONE;
9405 }
9406 
9407 static int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
9408 					  unsigned long event, void *ptr)
9409 {
9410 	struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
9411 	struct net_device *dev = i6vi->i6vi_dev->dev;
9412 	struct mlxsw_sp *mlxsw_sp;
9413 	struct mlxsw_sp_rif *rif;
9414 	int err = 0;
9415 
9416 	mlxsw_sp = mlxsw_sp_lower_get(dev);
9417 	if (!mlxsw_sp)
9418 		return NOTIFY_DONE;
9419 
9420 	mutex_lock(&mlxsw_sp->router->lock);
9421 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9422 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
9423 		goto out;
9424 
9425 	err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, false,
9426 					i6vi->extack);
9427 out:
9428 	mutex_unlock(&mlxsw_sp->router->lock);
9429 	return notifier_from_errno(err);
9430 }
9431 
9432 static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
9433 			     const char *mac, int mtu, u8 mac_profile)
9434 {
9435 	char ritr_pl[MLXSW_REG_RITR_LEN];
9436 	int err;
9437 
9438 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
9439 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9440 	if (err)
9441 		return err;
9442 
9443 	mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
9444 	mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
9445 	mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, mac_profile);
9446 	mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
9447 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9448 }
9449 
9450 static int
9451 mlxsw_sp_router_port_change_event(struct mlxsw_sp *mlxsw_sp,
9452 				  struct mlxsw_sp_rif *rif,
9453 				  struct netlink_ext_ack *extack)
9454 {
9455 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
9456 	u8 old_mac_profile;
9457 	u16 fid_index;
9458 	int err;
9459 
9460 	fid_index = mlxsw_sp_fid_index(rif->fid);
9461 
9462 	err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
9463 	if (err)
9464 		return err;
9465 
9466 	old_mac_profile = rif->mac_profile_id;
9467 	err = mlxsw_sp_rif_mac_profile_replace(mlxsw_sp, rif, dev->dev_addr,
9468 					       extack);
9469 	if (err)
9470 		goto err_rif_mac_profile_replace;
9471 
9472 	err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
9473 				dev->mtu, rif->mac_profile_id);
9474 	if (err)
9475 		goto err_rif_edit;
9476 
9477 	err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
9478 	if (err)
9479 		goto err_rif_fdb_op;
9480 
9481 	if (rif->mtu != dev->mtu) {
9482 		struct mlxsw_sp_vr *vr;
9483 		int i;
9484 
9485 		/* The RIF is relevant only to its mr_table instance, as unlike
9486 		 * unicast routing, in multicast routing a RIF cannot be shared
9487 		 * between several multicast routing tables.
9488 		 */
9489 		vr = &mlxsw_sp->router->vrs[rif->vr_id];
9490 		for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
9491 			mlxsw_sp_mr_rif_mtu_update(vr->mr_table[i],
9492 						   rif, dev->mtu);
9493 	}
9494 
9495 	ether_addr_copy(rif->addr, dev->dev_addr);
9496 	rif->mtu = dev->mtu;
9497 
9498 	netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
9499 
9500 	return 0;
9501 
9502 err_rif_fdb_op:
9503 	mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu,
9504 			  old_mac_profile);
9505 err_rif_edit:
9506 	mlxsw_sp_rif_mac_profile_replace(mlxsw_sp, rif, rif->addr, extack);
9507 err_rif_mac_profile_replace:
9508 	mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
9509 	return err;
9510 }
9511 
9512 static int mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif *rif,
9513 			    struct netdev_notifier_pre_changeaddr_info *info)
9514 {
9515 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9516 	struct mlxsw_sp_rif_mac_profile *profile;
9517 	struct netlink_ext_ack *extack;
9518 	u8 max_rif_mac_profiles;
9519 	u64 occ;
9520 
9521 	extack = netdev_notifier_info_to_extack(&info->info);
9522 
9523 	profile = mlxsw_sp_rif_mac_profile_find(mlxsw_sp, info->dev_addr);
9524 	if (profile)
9525 		return 0;
9526 
9527 	max_rif_mac_profiles = mlxsw_sp->router->max_rif_mac_profile;
9528 	occ = mlxsw_sp_rif_mac_profiles_occ_get(mlxsw_sp);
9529 	if (occ < max_rif_mac_profiles)
9530 		return 0;
9531 
9532 	if (!mlxsw_sp_rif_mac_profile_is_shared(rif))
9533 		return 0;
9534 
9535 	NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interface MAC profiles");
9536 	return -ENOBUFS;
9537 }
9538 
9539 static bool mlxsw_sp_router_netdevice_interesting(struct mlxsw_sp *mlxsw_sp,
9540 						  struct net_device *dev)
9541 {
9542 	struct vlan_dev_priv *vlan;
9543 
9544 	if (netif_is_lag_master(dev) ||
9545 	    netif_is_bridge_master(dev) ||
9546 	    mlxsw_sp_port_dev_check(dev) ||
9547 	    mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev) ||
9548 	    netif_is_l3_master(dev))
9549 		return true;
9550 
9551 	if (!is_vlan_dev(dev))
9552 		return false;
9553 
9554 	vlan = vlan_dev_priv(dev);
9555 	return netif_is_lag_master(vlan->real_dev) ||
9556 	       netif_is_bridge_master(vlan->real_dev) ||
9557 	       mlxsw_sp_port_dev_check(vlan->real_dev);
9558 }
9559 
9560 static struct mlxsw_sp_crif *
9561 mlxsw_sp_crif_register(struct mlxsw_sp_router *router, struct net_device *dev)
9562 {
9563 	struct mlxsw_sp_crif *crif;
9564 	int err;
9565 
9566 	if (WARN_ON(mlxsw_sp_crif_lookup(router, dev)))
9567 		return NULL;
9568 
9569 	crif = mlxsw_sp_crif_alloc(dev);
9570 	if (!crif)
9571 		return ERR_PTR(-ENOMEM);
9572 
9573 	err = mlxsw_sp_crif_insert(router, crif);
9574 	if (err)
9575 		goto err_netdev_insert;
9576 
9577 	return crif;
9578 
9579 err_netdev_insert:
9580 	mlxsw_sp_crif_free(crif);
9581 	return ERR_PTR(err);
9582 }
9583 
9584 static void mlxsw_sp_crif_unregister(struct mlxsw_sp_router *router,
9585 				     struct mlxsw_sp_crif *crif)
9586 {
9587 	struct mlxsw_sp_nexthop *nh, *tmp;
9588 
9589 	mlxsw_sp_crif_remove(router, crif);
9590 
9591 	list_for_each_entry_safe(nh, tmp, &crif->nexthop_list, crif_list_node)
9592 		mlxsw_sp_nexthop_type_fini(router->mlxsw_sp, nh);
9593 
9594 	if (crif->rif)
9595 		crif->can_destroy = true;
9596 	else
9597 		mlxsw_sp_crif_free(crif);
9598 }
9599 
9600 static int mlxsw_sp_netdevice_register(struct mlxsw_sp_router *router,
9601 				       struct net_device *dev)
9602 {
9603 	struct mlxsw_sp_crif *crif;
9604 
9605 	if (!mlxsw_sp_router_netdevice_interesting(router->mlxsw_sp, dev))
9606 		return 0;
9607 
9608 	crif = mlxsw_sp_crif_register(router, dev);
9609 	return PTR_ERR_OR_ZERO(crif);
9610 }
9611 
9612 static void mlxsw_sp_netdevice_unregister(struct mlxsw_sp_router *router,
9613 					  struct net_device *dev)
9614 {
9615 	struct mlxsw_sp_crif *crif;
9616 
9617 	if (!mlxsw_sp_router_netdevice_interesting(router->mlxsw_sp, dev))
9618 		return;
9619 
9620 	/* netdev_run_todo(), by way of netdev_wait_allrefs_any(), rebroadcasts
9621 	 * the NETDEV_UNREGISTER message, so we can get here twice. If that's
9622 	 * what happened, the netdevice state is NETREG_UNREGISTERED. In that
9623 	 * case, we expect to have collected the CRIF already, and warn if it
9624 	 * still exists. Otherwise we expect the CRIF to exist.
9625 	 */
9626 	crif = mlxsw_sp_crif_lookup(router, dev);
9627 	if (dev->reg_state == NETREG_UNREGISTERED) {
9628 		if (!WARN_ON(crif))
9629 			return;
9630 	}
9631 	if (WARN_ON(!crif))
9632 		return;
9633 
9634 	mlxsw_sp_crif_unregister(router, crif);
9635 }
9636 
9637 static bool mlxsw_sp_is_offload_xstats_event(unsigned long event)
9638 {
9639 	switch (event) {
9640 	case NETDEV_OFFLOAD_XSTATS_ENABLE:
9641 	case NETDEV_OFFLOAD_XSTATS_DISABLE:
9642 	case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
9643 	case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
9644 		return true;
9645 	}
9646 
9647 	return false;
9648 }
9649 
9650 static int
9651 mlxsw_sp_router_port_offload_xstats_cmd(struct mlxsw_sp_rif *rif,
9652 					unsigned long event,
9653 					struct netdev_notifier_offload_xstats_info *info)
9654 {
9655 	switch (info->type) {
9656 	case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
9657 		break;
9658 	default:
9659 		return 0;
9660 	}
9661 
9662 	switch (event) {
9663 	case NETDEV_OFFLOAD_XSTATS_ENABLE:
9664 		return mlxsw_sp_router_port_l3_stats_enable(rif);
9665 	case NETDEV_OFFLOAD_XSTATS_DISABLE:
9666 		mlxsw_sp_router_port_l3_stats_disable(rif);
9667 		return 0;
9668 	case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
9669 		mlxsw_sp_router_port_l3_stats_report_used(rif, info);
9670 		return 0;
9671 	case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
9672 		return mlxsw_sp_router_port_l3_stats_report_delta(rif, info);
9673 	}
9674 
9675 	WARN_ON_ONCE(1);
9676 	return 0;
9677 }
9678 
9679 static int
9680 mlxsw_sp_netdevice_offload_xstats_cmd(struct mlxsw_sp *mlxsw_sp,
9681 				      struct net_device *dev,
9682 				      unsigned long event,
9683 				      struct netdev_notifier_offload_xstats_info *info)
9684 {
9685 	struct mlxsw_sp_rif *rif;
9686 
9687 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9688 	if (!rif)
9689 		return 0;
9690 
9691 	return mlxsw_sp_router_port_offload_xstats_cmd(rif, event, info);
9692 }
9693 
9694 static bool mlxsw_sp_is_router_event(unsigned long event)
9695 {
9696 	switch (event) {
9697 	case NETDEV_PRE_CHANGEADDR:
9698 	case NETDEV_CHANGEADDR:
9699 	case NETDEV_CHANGEMTU:
9700 		return true;
9701 	default:
9702 		return false;
9703 	}
9704 }
9705 
9706 static int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
9707 						unsigned long event, void *ptr)
9708 {
9709 	struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
9710 	struct mlxsw_sp *mlxsw_sp;
9711 	struct mlxsw_sp_rif *rif;
9712 
9713 	mlxsw_sp = mlxsw_sp_lower_get(dev);
9714 	if (!mlxsw_sp)
9715 		return 0;
9716 
9717 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9718 	if (!rif)
9719 		return 0;
9720 
9721 	switch (event) {
9722 	case NETDEV_CHANGEMTU:
9723 	case NETDEV_CHANGEADDR:
9724 		return mlxsw_sp_router_port_change_event(mlxsw_sp, rif, extack);
9725 	case NETDEV_PRE_CHANGEADDR:
9726 		return mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr);
9727 	default:
9728 		WARN_ON_ONCE(1);
9729 		break;
9730 	}
9731 
9732 	return 0;
9733 }
9734 
9735 static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
9736 				  struct net_device *l3_dev,
9737 				  struct netlink_ext_ack *extack)
9738 {
9739 	struct mlxsw_sp_rif *rif;
9740 
9741 	/* If netdev is already associated with a RIF, then we need to
9742 	 * destroy it and create a new one with the new virtual router ID.
9743 	 */
9744 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
9745 	if (rif)
9746 		__mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, false,
9747 					  extack);
9748 
9749 	return __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_UP, false,
9750 					 extack);
9751 }
9752 
9753 static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
9754 				    struct net_device *l3_dev)
9755 {
9756 	struct mlxsw_sp_rif *rif;
9757 
9758 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
9759 	if (!rif)
9760 		return;
9761 	__mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, false, NULL);
9762 }
9763 
9764 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
9765 {
9766 	struct netdev_notifier_changeupper_info *info = ptr;
9767 
9768 	if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER)
9769 		return false;
9770 	return netif_is_l3_master(info->upper_dev);
9771 }
9772 
9773 static int
9774 mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
9775 			     struct netdev_notifier_changeupper_info *info)
9776 {
9777 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
9778 	int err = 0;
9779 
9780 	/* We do not create a RIF for a macvlan, but only use it to
9781 	 * direct more MAC addresses to the router.
9782 	 */
9783 	if (!mlxsw_sp || netif_is_macvlan(l3_dev))
9784 		return 0;
9785 
9786 	switch (event) {
9787 	case NETDEV_PRECHANGEUPPER:
9788 		break;
9789 	case NETDEV_CHANGEUPPER:
9790 		if (info->linking) {
9791 			struct netlink_ext_ack *extack;
9792 
9793 			extack = netdev_notifier_info_to_extack(&info->info);
9794 			err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack);
9795 		} else {
9796 			mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
9797 		}
9798 		break;
9799 	}
9800 
9801 	return err;
9802 }
9803 
9804 struct mlxsw_sp_router_replay_inetaddr_up {
9805 	struct mlxsw_sp *mlxsw_sp;
9806 	struct netlink_ext_ack *extack;
9807 	unsigned int done;
9808 	bool deslavement;
9809 };
9810 
9811 static int mlxsw_sp_router_replay_inetaddr_up(struct net_device *dev,
9812 					      struct netdev_nested_priv *priv)
9813 {
9814 	struct mlxsw_sp_router_replay_inetaddr_up *ctx = priv->data;
9815 	bool nomaster = ctx->deslavement;
9816 	struct mlxsw_sp_crif *crif;
9817 	int err;
9818 
9819 	if (mlxsw_sp_dev_addr_list_empty(dev))
9820 		return 0;
9821 
9822 	crif = mlxsw_sp_crif_lookup(ctx->mlxsw_sp->router, dev);
9823 	if (!crif || crif->rif)
9824 		return 0;
9825 
9826 	if (!mlxsw_sp_rif_should_config(crif->rif, dev, NETDEV_UP))
9827 		return 0;
9828 
9829 	err = __mlxsw_sp_inetaddr_event(ctx->mlxsw_sp, dev, NETDEV_UP,
9830 					nomaster, ctx->extack);
9831 	if (err)
9832 		return err;
9833 
9834 	ctx->done++;
9835 	return 0;
9836 }
9837 
9838 static int mlxsw_sp_router_unreplay_inetaddr_up(struct net_device *dev,
9839 						struct netdev_nested_priv *priv)
9840 {
9841 	struct mlxsw_sp_router_replay_inetaddr_up *ctx = priv->data;
9842 	bool nomaster = ctx->deslavement;
9843 	struct mlxsw_sp_crif *crif;
9844 
9845 	if (!ctx->done)
9846 		return 0;
9847 
9848 	if (mlxsw_sp_dev_addr_list_empty(dev))
9849 		return 0;
9850 
9851 	crif = mlxsw_sp_crif_lookup(ctx->mlxsw_sp->router, dev);
9852 	if (!crif || !crif->rif)
9853 		return 0;
9854 
9855 	/* We are rolling back NETDEV_UP, so ask for that. */
9856 	if (!mlxsw_sp_rif_should_config(crif->rif, dev, NETDEV_UP))
9857 		return 0;
9858 
9859 	__mlxsw_sp_inetaddr_event(ctx->mlxsw_sp, dev, NETDEV_DOWN, nomaster,
9860 				  NULL);
9861 
9862 	ctx->done--;
9863 	return 0;
9864 }
9865 
9866 int mlxsw_sp_netdevice_enslavement_replay(struct mlxsw_sp *mlxsw_sp,
9867 					  struct net_device *upper_dev,
9868 					  struct netlink_ext_ack *extack)
9869 {
9870 	struct mlxsw_sp_router_replay_inetaddr_up ctx = {
9871 		.mlxsw_sp = mlxsw_sp,
9872 		.extack = extack,
9873 		.deslavement = false,
9874 	};
9875 	struct netdev_nested_priv priv = {
9876 		.data = &ctx,
9877 	};
9878 	int err;
9879 
9880 	err = mlxsw_sp_router_replay_inetaddr_up(upper_dev, &priv);
9881 	if (err)
9882 		return err;
9883 
9884 	err = netdev_walk_all_upper_dev_rcu(upper_dev,
9885 					    mlxsw_sp_router_replay_inetaddr_up,
9886 					    &priv);
9887 	if (err)
9888 		goto err_replay_up;
9889 
9890 	return 0;
9891 
9892 err_replay_up:
9893 	netdev_walk_all_upper_dev_rcu(upper_dev,
9894 				      mlxsw_sp_router_unreplay_inetaddr_up,
9895 				      &priv);
9896 	mlxsw_sp_router_unreplay_inetaddr_up(upper_dev, &priv);
9897 	return err;
9898 }
9899 
9900 void mlxsw_sp_netdevice_deslavement_replay(struct mlxsw_sp *mlxsw_sp,
9901 					   struct net_device *dev)
9902 {
9903 	struct mlxsw_sp_router_replay_inetaddr_up ctx = {
9904 		.mlxsw_sp = mlxsw_sp,
9905 		.deslavement = true,
9906 	};
9907 	struct netdev_nested_priv priv = {
9908 		.data = &ctx,
9909 	};
9910 
9911 	mlxsw_sp_router_replay_inetaddr_up(dev, &priv);
9912 }
9913 
9914 static int
9915 mlxsw_sp_port_vid_router_join_existing(struct mlxsw_sp_port *mlxsw_sp_port,
9916 				       u16 vid, struct net_device *dev,
9917 				       struct netlink_ext_ack *extack)
9918 {
9919 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
9920 
9921 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port,
9922 							    vid);
9923 	if (WARN_ON(!mlxsw_sp_port_vlan))
9924 		return -EINVAL;
9925 
9926 	return mlxsw_sp_port_vlan_router_join_existing(mlxsw_sp_port_vlan,
9927 						       dev, extack);
9928 }
9929 
9930 static void
9931 mlxsw_sp_port_vid_router_leave(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
9932 			       struct net_device *dev)
9933 {
9934 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
9935 
9936 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port,
9937 							    vid);
9938 	if (WARN_ON(!mlxsw_sp_port_vlan))
9939 		return;
9940 
9941 	__mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
9942 }
9943 
9944 static int __mlxsw_sp_router_port_join_lag(struct mlxsw_sp_port *mlxsw_sp_port,
9945 					   struct net_device *lag_dev,
9946 					   struct netlink_ext_ack *extack)
9947 {
9948 	u16 default_vid = MLXSW_SP_DEFAULT_VID;
9949 	struct net_device *upper_dev;
9950 	struct list_head *iter;
9951 	int done = 0;
9952 	u16 vid;
9953 	int err;
9954 
9955 	err = mlxsw_sp_port_vid_router_join_existing(mlxsw_sp_port, default_vid,
9956 						     lag_dev, extack);
9957 	if (err)
9958 		return err;
9959 
9960 	netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
9961 		if (!is_vlan_dev(upper_dev))
9962 			continue;
9963 
9964 		vid = vlan_dev_vlan_id(upper_dev);
9965 		err = mlxsw_sp_port_vid_router_join_existing(mlxsw_sp_port, vid,
9966 							     upper_dev, extack);
9967 		if (err)
9968 			goto err_router_join_dev;
9969 
9970 		++done;
9971 	}
9972 
9973 	return 0;
9974 
9975 err_router_join_dev:
9976 	netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
9977 		if (!is_vlan_dev(upper_dev))
9978 			continue;
9979 		if (!done--)
9980 			break;
9981 
9982 		vid = vlan_dev_vlan_id(upper_dev);
9983 		mlxsw_sp_port_vid_router_leave(mlxsw_sp_port, vid, upper_dev);
9984 	}
9985 
9986 	mlxsw_sp_port_vid_router_leave(mlxsw_sp_port, default_vid, lag_dev);
9987 	return err;
9988 }
9989 
9990 static void
9991 __mlxsw_sp_router_port_leave_lag(struct mlxsw_sp_port *mlxsw_sp_port,
9992 				 struct net_device *lag_dev)
9993 {
9994 	u16 default_vid = MLXSW_SP_DEFAULT_VID;
9995 	struct net_device *upper_dev;
9996 	struct list_head *iter;
9997 	u16 vid;
9998 
9999 	netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
10000 		if (!is_vlan_dev(upper_dev))
10001 			continue;
10002 
10003 		vid = vlan_dev_vlan_id(upper_dev);
10004 		mlxsw_sp_port_vid_router_leave(mlxsw_sp_port, vid, upper_dev);
10005 	}
10006 
10007 	mlxsw_sp_port_vid_router_leave(mlxsw_sp_port, default_vid, lag_dev);
10008 }
10009 
10010 int mlxsw_sp_router_port_join_lag(struct mlxsw_sp_port *mlxsw_sp_port,
10011 				  struct net_device *lag_dev,
10012 				  struct netlink_ext_ack *extack)
10013 {
10014 	int err;
10015 
10016 	mutex_lock(&mlxsw_sp_port->mlxsw_sp->router->lock);
10017 	err = __mlxsw_sp_router_port_join_lag(mlxsw_sp_port, lag_dev, extack);
10018 	mutex_unlock(&mlxsw_sp_port->mlxsw_sp->router->lock);
10019 
10020 	return err;
10021 }
10022 
10023 void mlxsw_sp_router_port_leave_lag(struct mlxsw_sp_port *mlxsw_sp_port,
10024 				    struct net_device *lag_dev)
10025 {
10026 	mutex_lock(&mlxsw_sp_port->mlxsw_sp->router->lock);
10027 	__mlxsw_sp_router_port_leave_lag(mlxsw_sp_port, lag_dev);
10028 	mutex_unlock(&mlxsw_sp_port->mlxsw_sp->router->lock);
10029 }
10030 
10031 static int mlxsw_sp_router_netdevice_event(struct notifier_block *nb,
10032 					   unsigned long event, void *ptr)
10033 {
10034 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
10035 	struct mlxsw_sp_router *router;
10036 	struct mlxsw_sp *mlxsw_sp;
10037 	int err = 0;
10038 
10039 	router = container_of(nb, struct mlxsw_sp_router, netdevice_nb);
10040 	mlxsw_sp = router->mlxsw_sp;
10041 
10042 	mutex_lock(&mlxsw_sp->router->lock);
10043 
10044 	if (event == NETDEV_REGISTER) {
10045 		err = mlxsw_sp_netdevice_register(router, dev);
10046 		if (err)
10047 			/* No need to roll this back, UNREGISTER will collect it
10048 			 * anyhow.
10049 			 */
10050 			goto out;
10051 	}
10052 
10053 	if (mlxsw_sp_is_offload_xstats_event(event))
10054 		err = mlxsw_sp_netdevice_offload_xstats_cmd(mlxsw_sp, dev,
10055 							    event, ptr);
10056 	else if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev))
10057 		err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev,
10058 						       event, ptr);
10059 	else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev))
10060 		err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev,
10061 						       event, ptr);
10062 	else if (mlxsw_sp_is_router_event(event))
10063 		err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr);
10064 	else if (mlxsw_sp_is_vrf_event(event, ptr))
10065 		err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
10066 
10067 	if (event == NETDEV_UNREGISTER)
10068 		mlxsw_sp_netdevice_unregister(router, dev);
10069 
10070 out:
10071 	mutex_unlock(&mlxsw_sp->router->lock);
10072 
10073 	return notifier_from_errno(err);
10074 }
10075 
10076 struct mlxsw_sp_macvlan_replay {
10077 	struct mlxsw_sp *mlxsw_sp;
10078 	struct netlink_ext_ack *extack;
10079 };
10080 
10081 static int mlxsw_sp_macvlan_replay_upper(struct net_device *dev,
10082 					 struct netdev_nested_priv *priv)
10083 {
10084 	const struct mlxsw_sp_macvlan_replay *rms = priv->data;
10085 	struct netlink_ext_ack *extack = rms->extack;
10086 	struct mlxsw_sp *mlxsw_sp = rms->mlxsw_sp;
10087 
10088 	if (!netif_is_macvlan(dev))
10089 		return 0;
10090 
10091 	return mlxsw_sp_rif_macvlan_add(mlxsw_sp, dev, extack);
10092 }
10093 
10094 static int mlxsw_sp_macvlan_replay(struct mlxsw_sp_rif *rif,
10095 				   struct netlink_ext_ack *extack)
10096 {
10097 	struct mlxsw_sp_macvlan_replay rms = {
10098 		.mlxsw_sp = rif->mlxsw_sp,
10099 		.extack = extack,
10100 	};
10101 	struct netdev_nested_priv priv = {
10102 		.data = &rms,
10103 	};
10104 
10105 	return netdev_walk_all_upper_dev_rcu(mlxsw_sp_rif_dev(rif),
10106 					     mlxsw_sp_macvlan_replay_upper,
10107 					     &priv);
10108 }
10109 
10110 static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev,
10111 					struct netdev_nested_priv *priv)
10112 {
10113 	struct mlxsw_sp_rif *rif = (struct mlxsw_sp_rif *)priv->data;
10114 
10115 	if (!netif_is_macvlan(dev))
10116 		return 0;
10117 
10118 	return mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10119 				   mlxsw_sp_fid_index(rif->fid), false);
10120 }
10121 
10122 static int mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif *rif)
10123 {
10124 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
10125 	struct netdev_nested_priv priv = {
10126 		.data = (void *)rif,
10127 	};
10128 
10129 	if (!netif_is_macvlan_port(dev))
10130 		return 0;
10131 
10132 	return netdev_walk_all_upper_dev_rcu(dev,
10133 					     __mlxsw_sp_rif_macvlan_flush, &priv);
10134 }
10135 
10136 static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
10137 				       const struct mlxsw_sp_rif_params *params)
10138 {
10139 	struct mlxsw_sp_rif_subport *rif_subport;
10140 
10141 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
10142 	refcount_set(&rif_subport->ref_count, 1);
10143 	rif_subport->vid = params->vid;
10144 	rif_subport->lag = params->lag;
10145 	if (params->lag)
10146 		rif_subport->lag_id = params->lag_id;
10147 	else
10148 		rif_subport->system_port = params->system_port;
10149 }
10150 
10151 static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
10152 {
10153 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
10154 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10155 	struct mlxsw_sp_rif_subport *rif_subport;
10156 	char ritr_pl[MLXSW_REG_RITR_LEN];
10157 	u16 efid;
10158 
10159 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
10160 	mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
10161 			    rif->rif_index, rif->vr_id, dev->mtu);
10162 	mlxsw_reg_ritr_mac_pack(ritr_pl, dev->dev_addr);
10163 	mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
10164 	efid = mlxsw_sp_fid_index(rif->fid);
10165 	mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
10166 				  rif_subport->lag ? rif_subport->lag_id :
10167 						     rif_subport->system_port,
10168 				  efid, 0);
10169 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
10170 }
10171 
10172 static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif,
10173 					  struct netlink_ext_ack *extack)
10174 {
10175 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
10176 	u8 mac_profile;
10177 	int err;
10178 
10179 	err = mlxsw_sp_rif_mac_profile_get(rif->mlxsw_sp, rif->addr,
10180 					   &mac_profile, extack);
10181 	if (err)
10182 		return err;
10183 	rif->mac_profile_id = mac_profile;
10184 
10185 	err = mlxsw_sp_rif_subport_op(rif, true);
10186 	if (err)
10187 		goto err_rif_subport_op;
10188 
10189 	err = mlxsw_sp_macvlan_replay(rif, extack);
10190 	if (err)
10191 		goto err_macvlan_replay;
10192 
10193 	err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10194 				  mlxsw_sp_fid_index(rif->fid), true);
10195 	if (err)
10196 		goto err_rif_fdb_op;
10197 
10198 	err = mlxsw_sp_fid_rif_set(rif->fid, rif);
10199 	if (err)
10200 		goto err_fid_rif_set;
10201 
10202 	return 0;
10203 
10204 err_fid_rif_set:
10205 	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10206 			    mlxsw_sp_fid_index(rif->fid), false);
10207 err_rif_fdb_op:
10208 	mlxsw_sp_rif_macvlan_flush(rif);
10209 err_macvlan_replay:
10210 	mlxsw_sp_rif_subport_op(rif, false);
10211 err_rif_subport_op:
10212 	mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, mac_profile);
10213 	return err;
10214 }
10215 
10216 static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
10217 {
10218 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
10219 	struct mlxsw_sp_fid *fid = rif->fid;
10220 
10221 	mlxsw_sp_fid_rif_unset(fid);
10222 	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10223 			    mlxsw_sp_fid_index(fid), false);
10224 	mlxsw_sp_rif_macvlan_flush(rif);
10225 	mlxsw_sp_rif_subport_op(rif, false);
10226 	mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
10227 }
10228 
10229 static struct mlxsw_sp_fid *
10230 mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif,
10231 			     const struct mlxsw_sp_rif_params *params,
10232 			     struct netlink_ext_ack *extack)
10233 {
10234 	return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
10235 }
10236 
10237 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
10238 	.type			= MLXSW_SP_RIF_TYPE_SUBPORT,
10239 	.rif_size		= sizeof(struct mlxsw_sp_rif_subport),
10240 	.setup			= mlxsw_sp_rif_subport_setup,
10241 	.configure		= mlxsw_sp_rif_subport_configure,
10242 	.deconfigure		= mlxsw_sp_rif_subport_deconfigure,
10243 	.fid_get		= mlxsw_sp_rif_subport_fid_get,
10244 };
10245 
10246 static int mlxsw_sp_rif_fid_op(struct mlxsw_sp_rif *rif, u16 fid, bool enable)
10247 {
10248 	enum mlxsw_reg_ritr_if_type type = MLXSW_REG_RITR_FID_IF;
10249 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
10250 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10251 	char ritr_pl[MLXSW_REG_RITR_LEN];
10252 
10253 	mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
10254 			    dev->mtu);
10255 	mlxsw_reg_ritr_mac_pack(ritr_pl, dev->dev_addr);
10256 	mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
10257 	mlxsw_reg_ritr_fid_if_fid_set(ritr_pl, fid);
10258 
10259 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
10260 }
10261 
10262 u16 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
10263 {
10264 	return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
10265 }
10266 
10267 static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif,
10268 				      struct netlink_ext_ack *extack)
10269 {
10270 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
10271 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10272 	u16 fid_index = mlxsw_sp_fid_index(rif->fid);
10273 	u8 mac_profile;
10274 	int err;
10275 
10276 	err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, rif->addr,
10277 					   &mac_profile, extack);
10278 	if (err)
10279 		return err;
10280 	rif->mac_profile_id = mac_profile;
10281 
10282 	err = mlxsw_sp_rif_fid_op(rif, fid_index, true);
10283 	if (err)
10284 		goto err_rif_fid_op;
10285 
10286 	err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
10287 				     mlxsw_sp_router_port(mlxsw_sp), true);
10288 	if (err)
10289 		goto err_fid_mc_flood_set;
10290 
10291 	err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
10292 				     mlxsw_sp_router_port(mlxsw_sp), true);
10293 	if (err)
10294 		goto err_fid_bc_flood_set;
10295 
10296 	err = mlxsw_sp_macvlan_replay(rif, extack);
10297 	if (err)
10298 		goto err_macvlan_replay;
10299 
10300 	err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10301 				  mlxsw_sp_fid_index(rif->fid), true);
10302 	if (err)
10303 		goto err_rif_fdb_op;
10304 
10305 	err = mlxsw_sp_fid_rif_set(rif->fid, rif);
10306 	if (err)
10307 		goto err_fid_rif_set;
10308 
10309 	return 0;
10310 
10311 err_fid_rif_set:
10312 	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10313 			    mlxsw_sp_fid_index(rif->fid), false);
10314 err_rif_fdb_op:
10315 	mlxsw_sp_rif_macvlan_flush(rif);
10316 err_macvlan_replay:
10317 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
10318 			       mlxsw_sp_router_port(mlxsw_sp), false);
10319 err_fid_bc_flood_set:
10320 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
10321 			       mlxsw_sp_router_port(mlxsw_sp), false);
10322 err_fid_mc_flood_set:
10323 	mlxsw_sp_rif_fid_op(rif, fid_index, false);
10324 err_rif_fid_op:
10325 	mlxsw_sp_rif_mac_profile_put(mlxsw_sp, mac_profile);
10326 	return err;
10327 }
10328 
10329 static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
10330 {
10331 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
10332 	u16 fid_index = mlxsw_sp_fid_index(rif->fid);
10333 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10334 	struct mlxsw_sp_fid *fid = rif->fid;
10335 
10336 	mlxsw_sp_fid_rif_unset(fid);
10337 	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10338 			    mlxsw_sp_fid_index(fid), false);
10339 	mlxsw_sp_rif_macvlan_flush(rif);
10340 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
10341 			       mlxsw_sp_router_port(mlxsw_sp), false);
10342 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
10343 			       mlxsw_sp_router_port(mlxsw_sp), false);
10344 	mlxsw_sp_rif_fid_op(rif, fid_index, false);
10345 	mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
10346 }
10347 
10348 static struct mlxsw_sp_fid *
10349 mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
10350 			 const struct mlxsw_sp_rif_params *params,
10351 			 struct netlink_ext_ack *extack)
10352 {
10353 	int rif_ifindex = mlxsw_sp_rif_dev_ifindex(rif);
10354 
10355 	return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif_ifindex);
10356 }
10357 
10358 static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
10359 {
10360 	struct switchdev_notifier_fdb_info info = {};
10361 	struct net_device *dev;
10362 
10363 	dev = br_fdb_find_port(mlxsw_sp_rif_dev(rif), mac, 0);
10364 	if (!dev)
10365 		return;
10366 
10367 	info.addr = mac;
10368 	info.vid = 0;
10369 	call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
10370 				 NULL);
10371 }
10372 
10373 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
10374 	.type			= MLXSW_SP_RIF_TYPE_FID,
10375 	.rif_size		= sizeof(struct mlxsw_sp_rif),
10376 	.configure		= mlxsw_sp_rif_fid_configure,
10377 	.deconfigure		= mlxsw_sp_rif_fid_deconfigure,
10378 	.fid_get		= mlxsw_sp_rif_fid_fid_get,
10379 	.fdb_del		= mlxsw_sp_rif_fid_fdb_del,
10380 };
10381 
10382 static struct mlxsw_sp_fid *
10383 mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
10384 			  const struct mlxsw_sp_rif_params *params,
10385 			  struct netlink_ext_ack *extack)
10386 {
10387 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
10388 	struct net_device *br_dev;
10389 
10390 	if (WARN_ON(!params->vid))
10391 		return ERR_PTR(-EINVAL);
10392 
10393 	if (is_vlan_dev(dev)) {
10394 		br_dev = vlan_dev_real_dev(dev);
10395 		if (WARN_ON(!netif_is_bridge_master(br_dev)))
10396 			return ERR_PTR(-EINVAL);
10397 	}
10398 
10399 	return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, params->vid);
10400 }
10401 
10402 static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
10403 {
10404 	struct net_device *rif_dev = mlxsw_sp_rif_dev(rif);
10405 	struct switchdev_notifier_fdb_info info = {};
10406 	u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
10407 	struct net_device *br_dev;
10408 	struct net_device *dev;
10409 
10410 	br_dev = is_vlan_dev(rif_dev) ? vlan_dev_real_dev(rif_dev) : rif_dev;
10411 	dev = br_fdb_find_port(br_dev, mac, vid);
10412 	if (!dev)
10413 		return;
10414 
10415 	info.addr = mac;
10416 	info.vid = vid;
10417 	call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
10418 				 NULL);
10419 }
10420 
10421 static int mlxsw_sp_rif_vlan_op(struct mlxsw_sp_rif *rif, u16 vid, u16 efid,
10422 				bool enable)
10423 {
10424 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
10425 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10426 	char ritr_pl[MLXSW_REG_RITR_LEN];
10427 
10428 	mlxsw_reg_ritr_vlan_if_pack(ritr_pl, enable, rif->rif_index, rif->vr_id,
10429 				    dev->mtu, dev->dev_addr,
10430 				    rif->mac_profile_id, vid, efid);
10431 
10432 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
10433 }
10434 
10435 static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif, u16 efid,
10436 				       struct netlink_ext_ack *extack)
10437 {
10438 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
10439 	u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
10440 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10441 	u8 mac_profile;
10442 	int err;
10443 
10444 	err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, rif->addr,
10445 					   &mac_profile, extack);
10446 	if (err)
10447 		return err;
10448 	rif->mac_profile_id = mac_profile;
10449 
10450 	err = mlxsw_sp_rif_vlan_op(rif, vid, efid, true);
10451 	if (err)
10452 		goto err_rif_vlan_fid_op;
10453 
10454 	err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
10455 				     mlxsw_sp_router_port(mlxsw_sp), true);
10456 	if (err)
10457 		goto err_fid_mc_flood_set;
10458 
10459 	err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
10460 				     mlxsw_sp_router_port(mlxsw_sp), true);
10461 	if (err)
10462 		goto err_fid_bc_flood_set;
10463 
10464 	err = mlxsw_sp_macvlan_replay(rif, extack);
10465 	if (err)
10466 		goto err_macvlan_replay;
10467 
10468 	err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10469 				  mlxsw_sp_fid_index(rif->fid), true);
10470 	if (err)
10471 		goto err_rif_fdb_op;
10472 
10473 	err = mlxsw_sp_fid_rif_set(rif->fid, rif);
10474 	if (err)
10475 		goto err_fid_rif_set;
10476 
10477 	return 0;
10478 
10479 err_fid_rif_set:
10480 	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10481 			    mlxsw_sp_fid_index(rif->fid), false);
10482 err_rif_fdb_op:
10483 	mlxsw_sp_rif_macvlan_flush(rif);
10484 err_macvlan_replay:
10485 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
10486 			       mlxsw_sp_router_port(mlxsw_sp), false);
10487 err_fid_bc_flood_set:
10488 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
10489 			       mlxsw_sp_router_port(mlxsw_sp), false);
10490 err_fid_mc_flood_set:
10491 	mlxsw_sp_rif_vlan_op(rif, vid, 0, false);
10492 err_rif_vlan_fid_op:
10493 	mlxsw_sp_rif_mac_profile_put(mlxsw_sp, mac_profile);
10494 	return err;
10495 }
10496 
10497 static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
10498 {
10499 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
10500 	u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
10501 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10502 
10503 	mlxsw_sp_fid_rif_unset(rif->fid);
10504 	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10505 			    mlxsw_sp_fid_index(rif->fid), false);
10506 	mlxsw_sp_rif_macvlan_flush(rif);
10507 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
10508 			       mlxsw_sp_router_port(mlxsw_sp), false);
10509 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
10510 			       mlxsw_sp_router_port(mlxsw_sp), false);
10511 	mlxsw_sp_rif_vlan_op(rif, vid, 0, false);
10512 	mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
10513 }
10514 
10515 static int mlxsw_sp1_rif_vlan_configure(struct mlxsw_sp_rif *rif,
10516 					struct netlink_ext_ack *extack)
10517 {
10518 	return mlxsw_sp_rif_vlan_configure(rif, 0, extack);
10519 }
10520 
10521 static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_vlan_ops = {
10522 	.type			= MLXSW_SP_RIF_TYPE_VLAN,
10523 	.rif_size		= sizeof(struct mlxsw_sp_rif),
10524 	.configure		= mlxsw_sp1_rif_vlan_configure,
10525 	.deconfigure		= mlxsw_sp_rif_vlan_deconfigure,
10526 	.fid_get		= mlxsw_sp_rif_vlan_fid_get,
10527 	.fdb_del		= mlxsw_sp_rif_vlan_fdb_del,
10528 };
10529 
10530 static int mlxsw_sp2_rif_vlan_configure(struct mlxsw_sp_rif *rif,
10531 					struct netlink_ext_ack *extack)
10532 {
10533 	u16 efid = mlxsw_sp_fid_index(rif->fid);
10534 
10535 	return mlxsw_sp_rif_vlan_configure(rif, efid, extack);
10536 }
10537 
10538 static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_vlan_ops = {
10539 	.type			= MLXSW_SP_RIF_TYPE_VLAN,
10540 	.rif_size		= sizeof(struct mlxsw_sp_rif),
10541 	.configure		= mlxsw_sp2_rif_vlan_configure,
10542 	.deconfigure		= mlxsw_sp_rif_vlan_deconfigure,
10543 	.fid_get		= mlxsw_sp_rif_vlan_fid_get,
10544 	.fdb_del		= mlxsw_sp_rif_vlan_fdb_del,
10545 };
10546 
10547 static struct mlxsw_sp_rif_ipip_lb *
10548 mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
10549 {
10550 	return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
10551 }
10552 
10553 static void
10554 mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
10555 			   const struct mlxsw_sp_rif_params *params)
10556 {
10557 	struct mlxsw_sp_rif_params_ipip_lb *params_lb;
10558 	struct mlxsw_sp_rif_ipip_lb *rif_lb;
10559 
10560 	params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb,
10561 				 common);
10562 	rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif);
10563 	rif_lb->lb_config = params_lb->lb_config;
10564 }
10565 
10566 static int
10567 mlxsw_sp1_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif,
10568 				struct netlink_ext_ack *extack)
10569 {
10570 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
10571 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
10572 	u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(dev);
10573 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10574 	struct mlxsw_sp_vr *ul_vr;
10575 	int err;
10576 
10577 	ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, extack);
10578 	if (IS_ERR(ul_vr))
10579 		return PTR_ERR(ul_vr);
10580 
10581 	err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, true);
10582 	if (err)
10583 		goto err_loopback_op;
10584 
10585 	lb_rif->ul_vr_id = ul_vr->id;
10586 	lb_rif->ul_rif_id = 0;
10587 	++ul_vr->rif_count;
10588 	return 0;
10589 
10590 err_loopback_op:
10591 	mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
10592 	return err;
10593 }
10594 
10595 static void mlxsw_sp1_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
10596 {
10597 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
10598 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10599 	struct mlxsw_sp_vr *ul_vr;
10600 
10601 	ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
10602 	mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, false);
10603 
10604 	--ul_vr->rif_count;
10605 	mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
10606 }
10607 
10608 static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_ipip_lb_ops = {
10609 	.type			= MLXSW_SP_RIF_TYPE_IPIP_LB,
10610 	.rif_size		= sizeof(struct mlxsw_sp_rif_ipip_lb),
10611 	.setup                  = mlxsw_sp_rif_ipip_lb_setup,
10612 	.configure		= mlxsw_sp1_rif_ipip_lb_configure,
10613 	.deconfigure		= mlxsw_sp1_rif_ipip_lb_deconfigure,
10614 };
10615 
10616 static const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[] = {
10617 	[MLXSW_SP_RIF_TYPE_SUBPORT]	= &mlxsw_sp_rif_subport_ops,
10618 	[MLXSW_SP_RIF_TYPE_VLAN]	= &mlxsw_sp1_rif_vlan_ops,
10619 	[MLXSW_SP_RIF_TYPE_FID]		= &mlxsw_sp_rif_fid_ops,
10620 	[MLXSW_SP_RIF_TYPE_IPIP_LB]	= &mlxsw_sp1_rif_ipip_lb_ops,
10621 };
10622 
10623 static int
10624 mlxsw_sp_rif_ipip_lb_ul_rif_op(struct mlxsw_sp_rif *ul_rif, bool enable)
10625 {
10626 	struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
10627 	char ritr_pl[MLXSW_REG_RITR_LEN];
10628 
10629 	mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
10630 			    ul_rif->rif_index, ul_rif->vr_id, IP_MAX_MTU);
10631 	mlxsw_reg_ritr_loopback_protocol_set(ritr_pl,
10632 					     MLXSW_REG_RITR_LOOPBACK_GENERIC);
10633 
10634 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
10635 }
10636 
10637 static struct mlxsw_sp_rif *
10638 mlxsw_sp_ul_rif_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
10639 		       struct mlxsw_sp_crif *ul_crif,
10640 		       struct netlink_ext_ack *extack)
10641 {
10642 	struct mlxsw_sp_rif *ul_rif;
10643 	u8 rif_entries = 1;
10644 	u16 rif_index;
10645 	int err;
10646 
10647 	err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index, rif_entries);
10648 	if (err) {
10649 		NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
10650 		return ERR_PTR(err);
10651 	}
10652 
10653 	ul_rif = mlxsw_sp_rif_alloc(sizeof(*ul_rif), rif_index, vr->id,
10654 				    ul_crif);
10655 	if (!ul_rif) {
10656 		err = -ENOMEM;
10657 		goto err_rif_alloc;
10658 	}
10659 
10660 	mlxsw_sp->router->rifs[rif_index] = ul_rif;
10661 	ul_rif->mlxsw_sp = mlxsw_sp;
10662 	ul_rif->rif_entries = rif_entries;
10663 	err = mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, true);
10664 	if (err)
10665 		goto ul_rif_op_err;
10666 
10667 	atomic_add(rif_entries, &mlxsw_sp->router->rifs_count);
10668 	return ul_rif;
10669 
10670 ul_rif_op_err:
10671 	mlxsw_sp->router->rifs[rif_index] = NULL;
10672 	mlxsw_sp_rif_free(ul_rif);
10673 err_rif_alloc:
10674 	mlxsw_sp_rif_index_free(mlxsw_sp, rif_index, rif_entries);
10675 	return ERR_PTR(err);
10676 }
10677 
10678 static void mlxsw_sp_ul_rif_destroy(struct mlxsw_sp_rif *ul_rif)
10679 {
10680 	struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
10681 	u8 rif_entries = ul_rif->rif_entries;
10682 	u16 rif_index = ul_rif->rif_index;
10683 
10684 	atomic_sub(rif_entries, &mlxsw_sp->router->rifs_count);
10685 	mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, false);
10686 	mlxsw_sp->router->rifs[ul_rif->rif_index] = NULL;
10687 	mlxsw_sp_rif_free(ul_rif);
10688 	mlxsw_sp_rif_index_free(mlxsw_sp, rif_index, rif_entries);
10689 }
10690 
10691 static struct mlxsw_sp_rif *
10692 mlxsw_sp_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
10693 		    struct mlxsw_sp_crif *ul_crif,
10694 		    struct netlink_ext_ack *extack)
10695 {
10696 	struct mlxsw_sp_vr *vr;
10697 	int err;
10698 
10699 	vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, extack);
10700 	if (IS_ERR(vr))
10701 		return ERR_CAST(vr);
10702 
10703 	if (refcount_inc_not_zero(&vr->ul_rif_refcnt))
10704 		return vr->ul_rif;
10705 
10706 	vr->ul_rif = mlxsw_sp_ul_rif_create(mlxsw_sp, vr, ul_crif, extack);
10707 	if (IS_ERR(vr->ul_rif)) {
10708 		err = PTR_ERR(vr->ul_rif);
10709 		goto err_ul_rif_create;
10710 	}
10711 
10712 	vr->rif_count++;
10713 	refcount_set(&vr->ul_rif_refcnt, 1);
10714 
10715 	return vr->ul_rif;
10716 
10717 err_ul_rif_create:
10718 	mlxsw_sp_vr_put(mlxsw_sp, vr);
10719 	return ERR_PTR(err);
10720 }
10721 
10722 static void mlxsw_sp_ul_rif_put(struct mlxsw_sp_rif *ul_rif)
10723 {
10724 	struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
10725 	struct mlxsw_sp_vr *vr;
10726 
10727 	vr = &mlxsw_sp->router->vrs[ul_rif->vr_id];
10728 
10729 	if (!refcount_dec_and_test(&vr->ul_rif_refcnt))
10730 		return;
10731 
10732 	vr->rif_count--;
10733 	mlxsw_sp_ul_rif_destroy(ul_rif);
10734 	mlxsw_sp_vr_put(mlxsw_sp, vr);
10735 }
10736 
10737 int mlxsw_sp_router_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
10738 			       u16 *ul_rif_index)
10739 {
10740 	struct mlxsw_sp_rif *ul_rif;
10741 	int err = 0;
10742 
10743 	mutex_lock(&mlxsw_sp->router->lock);
10744 	ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL, NULL);
10745 	if (IS_ERR(ul_rif)) {
10746 		err = PTR_ERR(ul_rif);
10747 		goto out;
10748 	}
10749 	*ul_rif_index = ul_rif->rif_index;
10750 out:
10751 	mutex_unlock(&mlxsw_sp->router->lock);
10752 	return err;
10753 }
10754 
10755 void mlxsw_sp_router_ul_rif_put(struct mlxsw_sp *mlxsw_sp, u16 ul_rif_index)
10756 {
10757 	struct mlxsw_sp_rif *ul_rif;
10758 
10759 	mutex_lock(&mlxsw_sp->router->lock);
10760 	ul_rif = mlxsw_sp->router->rifs[ul_rif_index];
10761 	if (WARN_ON(!ul_rif))
10762 		goto out;
10763 
10764 	mlxsw_sp_ul_rif_put(ul_rif);
10765 out:
10766 	mutex_unlock(&mlxsw_sp->router->lock);
10767 }
10768 
10769 static int
10770 mlxsw_sp2_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif,
10771 				struct netlink_ext_ack *extack)
10772 {
10773 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
10774 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
10775 	u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(dev);
10776 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10777 	struct mlxsw_sp_rif *ul_rif;
10778 	int err;
10779 
10780 	ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL, extack);
10781 	if (IS_ERR(ul_rif))
10782 		return PTR_ERR(ul_rif);
10783 
10784 	err = mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, ul_rif->rif_index, true);
10785 	if (err)
10786 		goto err_loopback_op;
10787 
10788 	lb_rif->ul_vr_id = 0;
10789 	lb_rif->ul_rif_id = ul_rif->rif_index;
10790 
10791 	return 0;
10792 
10793 err_loopback_op:
10794 	mlxsw_sp_ul_rif_put(ul_rif);
10795 	return err;
10796 }
10797 
10798 static void mlxsw_sp2_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
10799 {
10800 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
10801 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10802 	struct mlxsw_sp_rif *ul_rif;
10803 
10804 	ul_rif = mlxsw_sp_rif_by_index(mlxsw_sp, lb_rif->ul_rif_id);
10805 	mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, lb_rif->ul_rif_id, false);
10806 	mlxsw_sp_ul_rif_put(ul_rif);
10807 }
10808 
10809 static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_ipip_lb_ops = {
10810 	.type			= MLXSW_SP_RIF_TYPE_IPIP_LB,
10811 	.rif_size		= sizeof(struct mlxsw_sp_rif_ipip_lb),
10812 	.setup                  = mlxsw_sp_rif_ipip_lb_setup,
10813 	.configure		= mlxsw_sp2_rif_ipip_lb_configure,
10814 	.deconfigure		= mlxsw_sp2_rif_ipip_lb_deconfigure,
10815 };
10816 
10817 static const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = {
10818 	[MLXSW_SP_RIF_TYPE_SUBPORT]	= &mlxsw_sp_rif_subport_ops,
10819 	[MLXSW_SP_RIF_TYPE_VLAN]	= &mlxsw_sp2_rif_vlan_ops,
10820 	[MLXSW_SP_RIF_TYPE_FID]		= &mlxsw_sp_rif_fid_ops,
10821 	[MLXSW_SP_RIF_TYPE_IPIP_LB]	= &mlxsw_sp2_rif_ipip_lb_ops,
10822 };
10823 
10824 static int mlxsw_sp_rifs_table_init(struct mlxsw_sp *mlxsw_sp)
10825 {
10826 	struct gen_pool *rifs_table;
10827 	int err;
10828 
10829 	rifs_table = gen_pool_create(0, -1);
10830 	if (!rifs_table)
10831 		return -ENOMEM;
10832 
10833 	gen_pool_set_algo(rifs_table, gen_pool_first_fit_order_align,
10834 			  NULL);
10835 
10836 	err = gen_pool_add(rifs_table, MLXSW_SP_ROUTER_GENALLOC_OFFSET,
10837 			   MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS), -1);
10838 	if (err)
10839 		goto err_gen_pool_add;
10840 
10841 	mlxsw_sp->router->rifs_table = rifs_table;
10842 
10843 	return 0;
10844 
10845 err_gen_pool_add:
10846 	gen_pool_destroy(rifs_table);
10847 	return err;
10848 }
10849 
10850 static void mlxsw_sp_rifs_table_fini(struct mlxsw_sp *mlxsw_sp)
10851 {
10852 	gen_pool_destroy(mlxsw_sp->router->rifs_table);
10853 }
10854 
10855 static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
10856 {
10857 	u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
10858 	struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
10859 	struct mlxsw_core *core = mlxsw_sp->core;
10860 	int err;
10861 
10862 	if (!MLXSW_CORE_RES_VALID(core, MAX_RIF_MAC_PROFILES))
10863 		return -EIO;
10864 	mlxsw_sp->router->max_rif_mac_profile =
10865 		MLXSW_CORE_RES_GET(core, MAX_RIF_MAC_PROFILES);
10866 
10867 	mlxsw_sp->router->rifs = kcalloc(max_rifs,
10868 					 sizeof(struct mlxsw_sp_rif *),
10869 					 GFP_KERNEL);
10870 	if (!mlxsw_sp->router->rifs)
10871 		return -ENOMEM;
10872 
10873 	err = mlxsw_sp_rifs_table_init(mlxsw_sp);
10874 	if (err)
10875 		goto err_rifs_table_init;
10876 
10877 	idr_init(&mlxsw_sp->router->rif_mac_profiles_idr);
10878 	atomic_set(&mlxsw_sp->router->rif_mac_profiles_count, 0);
10879 	atomic_set(&mlxsw_sp->router->rifs_count, 0);
10880 	devl_resource_occ_get_register(devlink,
10881 				       MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
10882 				       mlxsw_sp_rif_mac_profiles_occ_get,
10883 				       mlxsw_sp);
10884 	devl_resource_occ_get_register(devlink,
10885 				       MLXSW_SP_RESOURCE_RIFS,
10886 				       mlxsw_sp_rifs_occ_get,
10887 				       mlxsw_sp);
10888 
10889 	return 0;
10890 
10891 err_rifs_table_init:
10892 	kfree(mlxsw_sp->router->rifs);
10893 	return err;
10894 }
10895 
10896 static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
10897 {
10898 	int max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
10899 	struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
10900 	int i;
10901 
10902 	WARN_ON_ONCE(atomic_read(&mlxsw_sp->router->rifs_count));
10903 	for (i = 0; i < max_rifs; i++)
10904 		WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
10905 
10906 	devl_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_RIFS);
10907 	devl_resource_occ_get_unregister(devlink,
10908 					 MLXSW_SP_RESOURCE_RIF_MAC_PROFILES);
10909 	WARN_ON(!idr_is_empty(&mlxsw_sp->router->rif_mac_profiles_idr));
10910 	idr_destroy(&mlxsw_sp->router->rif_mac_profiles_idr);
10911 	mlxsw_sp_rifs_table_fini(mlxsw_sp);
10912 	kfree(mlxsw_sp->router->rifs);
10913 }
10914 
10915 static int
10916 mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
10917 {
10918 	char tigcr_pl[MLXSW_REG_TIGCR_LEN];
10919 
10920 	mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
10921 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
10922 }
10923 
10924 static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
10925 {
10926 	int err;
10927 
10928 	INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
10929 
10930 	err = mlxsw_sp_ipip_ecn_encap_init(mlxsw_sp);
10931 	if (err)
10932 		return err;
10933 	err = mlxsw_sp_ipip_ecn_decap_init(mlxsw_sp);
10934 	if (err)
10935 		return err;
10936 
10937 	return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
10938 }
10939 
10940 static int mlxsw_sp1_ipips_init(struct mlxsw_sp *mlxsw_sp)
10941 {
10942 	mlxsw_sp->router->ipip_ops_arr = mlxsw_sp1_ipip_ops_arr;
10943 	return mlxsw_sp_ipips_init(mlxsw_sp);
10944 }
10945 
10946 static int mlxsw_sp2_ipips_init(struct mlxsw_sp *mlxsw_sp)
10947 {
10948 	mlxsw_sp->router->ipip_ops_arr = mlxsw_sp2_ipip_ops_arr;
10949 	return mlxsw_sp_ipips_init(mlxsw_sp);
10950 }
10951 
10952 static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
10953 {
10954 	WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
10955 }
10956 
10957 static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
10958 {
10959 	struct mlxsw_sp_router *router;
10960 
10961 	/* Flush pending FIB notifications and then flush the device's
10962 	 * table before requesting another dump. The FIB notification
10963 	 * block is unregistered, so no need to take RTNL.
10964 	 */
10965 	mlxsw_core_flush_owq();
10966 	router = container_of(nb, struct mlxsw_sp_router, fib_nb);
10967 	mlxsw_sp_router_fib_flush(router->mlxsw_sp);
10968 }
10969 
10970 #ifdef CONFIG_IP_ROUTE_MULTIPATH
10971 struct mlxsw_sp_mp_hash_config {
10972 	DECLARE_BITMAP(headers, __MLXSW_REG_RECR2_HEADER_CNT);
10973 	DECLARE_BITMAP(fields, __MLXSW_REG_RECR2_FIELD_CNT);
10974 	DECLARE_BITMAP(inner_headers, __MLXSW_REG_RECR2_HEADER_CNT);
10975 	DECLARE_BITMAP(inner_fields, __MLXSW_REG_RECR2_INNER_FIELD_CNT);
10976 	bool inc_parsing_depth;
10977 };
10978 
10979 #define MLXSW_SP_MP_HASH_HEADER_SET(_headers, _header) \
10980 	bitmap_set(_headers, MLXSW_REG_RECR2_##_header, 1)
10981 
10982 #define MLXSW_SP_MP_HASH_FIELD_SET(_fields, _field) \
10983 	bitmap_set(_fields, MLXSW_REG_RECR2_##_field, 1)
10984 
10985 #define MLXSW_SP_MP_HASH_FIELD_RANGE_SET(_fields, _field, _nr) \
10986 	bitmap_set(_fields, MLXSW_REG_RECR2_##_field, _nr)
10987 
10988 static void mlxsw_sp_mp_hash_inner_l3(struct mlxsw_sp_mp_hash_config *config)
10989 {
10990 	unsigned long *inner_headers = config->inner_headers;
10991 	unsigned long *inner_fields = config->inner_fields;
10992 
10993 	/* IPv4 inner */
10994 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_NOT_TCP_NOT_UDP);
10995 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_TCP_UDP);
10996 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_SIP0, 4);
10997 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_DIP0, 4);
10998 	/* IPv6 inner */
10999 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_NOT_TCP_NOT_UDP);
11000 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_TCP_UDP);
11001 	MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_SIP0_7);
11002 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_SIP8, 8);
11003 	MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_DIP0_7);
11004 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_DIP8, 8);
11005 	MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_NEXT_HEADER);
11006 	MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_FLOW_LABEL);
11007 }
11008 
11009 static void mlxsw_sp_mp4_hash_outer_addr(struct mlxsw_sp_mp_hash_config *config)
11010 {
11011 	unsigned long *headers = config->headers;
11012 	unsigned long *fields = config->fields;
11013 
11014 	MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_NOT_TCP_NOT_UDP);
11015 	MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_TCP_UDP);
11016 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_SIP0, 4);
11017 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_DIP0, 4);
11018 }
11019 
11020 static void
11021 mlxsw_sp_mp_hash_inner_custom(struct mlxsw_sp_mp_hash_config *config,
11022 			      u32 hash_fields)
11023 {
11024 	unsigned long *inner_headers = config->inner_headers;
11025 	unsigned long *inner_fields = config->inner_fields;
11026 
11027 	/* IPv4 Inner */
11028 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_NOT_TCP_NOT_UDP);
11029 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_TCP_UDP);
11030 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
11031 		MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_SIP0, 4);
11032 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
11033 		MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_DIP0, 4);
11034 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
11035 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV4_PROTOCOL);
11036 	/* IPv6 inner */
11037 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_NOT_TCP_NOT_UDP);
11038 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_TCP_UDP);
11039 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP) {
11040 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_SIP0_7);
11041 		MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_SIP8, 8);
11042 	}
11043 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP) {
11044 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_DIP0_7);
11045 		MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_DIP8, 8);
11046 	}
11047 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
11048 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_NEXT_HEADER);
11049 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL)
11050 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_FLOW_LABEL);
11051 	/* L4 inner */
11052 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, TCP_UDP_EN_IPV4);
11053 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, TCP_UDP_EN_IPV6);
11054 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT)
11055 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_TCP_UDP_SPORT);
11056 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT)
11057 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_TCP_UDP_DPORT);
11058 }
11059 
11060 static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp,
11061 				   struct mlxsw_sp_mp_hash_config *config)
11062 {
11063 	struct net *net = mlxsw_sp_net(mlxsw_sp);
11064 	unsigned long *headers = config->headers;
11065 	unsigned long *fields = config->fields;
11066 	u32 hash_fields;
11067 
11068 	switch (READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_policy)) {
11069 	case 0:
11070 		mlxsw_sp_mp4_hash_outer_addr(config);
11071 		break;
11072 	case 1:
11073 		mlxsw_sp_mp4_hash_outer_addr(config);
11074 		MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV4);
11075 		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV4_PROTOCOL);
11076 		MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
11077 		MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
11078 		break;
11079 	case 2:
11080 		/* Outer */
11081 		mlxsw_sp_mp4_hash_outer_addr(config);
11082 		/* Inner */
11083 		mlxsw_sp_mp_hash_inner_l3(config);
11084 		break;
11085 	case 3:
11086 		hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields);
11087 		/* Outer */
11088 		MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_NOT_TCP_NOT_UDP);
11089 		MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_TCP_UDP);
11090 		MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV4);
11091 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
11092 			MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_SIP0, 4);
11093 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
11094 			MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_DIP0, 4);
11095 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
11096 			MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV4_PROTOCOL);
11097 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
11098 			MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
11099 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
11100 			MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
11101 		/* Inner */
11102 		mlxsw_sp_mp_hash_inner_custom(config, hash_fields);
11103 		break;
11104 	}
11105 }
11106 
11107 static void mlxsw_sp_mp6_hash_outer_addr(struct mlxsw_sp_mp_hash_config *config)
11108 {
11109 	unsigned long *headers = config->headers;
11110 	unsigned long *fields = config->fields;
11111 
11112 	MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_NOT_TCP_NOT_UDP);
11113 	MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_TCP_UDP);
11114 	MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_SIP0_7);
11115 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_SIP8, 8);
11116 	MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_DIP0_7);
11117 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_DIP8, 8);
11118 }
11119 
11120 static void mlxsw_sp_mp6_hash_init(struct mlxsw_sp *mlxsw_sp,
11121 				   struct mlxsw_sp_mp_hash_config *config)
11122 {
11123 	u32 hash_fields = ip6_multipath_hash_fields(mlxsw_sp_net(mlxsw_sp));
11124 	unsigned long *headers = config->headers;
11125 	unsigned long *fields = config->fields;
11126 
11127 	switch (ip6_multipath_hash_policy(mlxsw_sp_net(mlxsw_sp))) {
11128 	case 0:
11129 		mlxsw_sp_mp6_hash_outer_addr(config);
11130 		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
11131 		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
11132 		break;
11133 	case 1:
11134 		mlxsw_sp_mp6_hash_outer_addr(config);
11135 		MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV6);
11136 		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
11137 		MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
11138 		MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
11139 		break;
11140 	case 2:
11141 		/* Outer */
11142 		mlxsw_sp_mp6_hash_outer_addr(config);
11143 		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
11144 		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
11145 		/* Inner */
11146 		mlxsw_sp_mp_hash_inner_l3(config);
11147 		config->inc_parsing_depth = true;
11148 		break;
11149 	case 3:
11150 		/* Outer */
11151 		MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_NOT_TCP_NOT_UDP);
11152 		MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_TCP_UDP);
11153 		MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV6);
11154 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP) {
11155 			MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_SIP0_7);
11156 			MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_SIP8, 8);
11157 		}
11158 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP) {
11159 			MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_DIP0_7);
11160 			MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_DIP8, 8);
11161 		}
11162 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
11163 			MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
11164 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_FLOWLABEL)
11165 			MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
11166 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
11167 			MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
11168 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
11169 			MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
11170 		/* Inner */
11171 		mlxsw_sp_mp_hash_inner_custom(config, hash_fields);
11172 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_MASK)
11173 			config->inc_parsing_depth = true;
11174 		break;
11175 	}
11176 }
11177 
11178 static int mlxsw_sp_mp_hash_parsing_depth_adjust(struct mlxsw_sp *mlxsw_sp,
11179 						 bool old_inc_parsing_depth,
11180 						 bool new_inc_parsing_depth)
11181 {
11182 	int err;
11183 
11184 	if (!old_inc_parsing_depth && new_inc_parsing_depth) {
11185 		err = mlxsw_sp_parsing_depth_inc(mlxsw_sp);
11186 		if (err)
11187 			return err;
11188 		mlxsw_sp->router->inc_parsing_depth = true;
11189 	} else if (old_inc_parsing_depth && !new_inc_parsing_depth) {
11190 		mlxsw_sp_parsing_depth_dec(mlxsw_sp);
11191 		mlxsw_sp->router->inc_parsing_depth = false;
11192 	}
11193 
11194 	return 0;
11195 }
11196 
11197 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
11198 {
11199 	bool old_inc_parsing_depth, new_inc_parsing_depth;
11200 	struct mlxsw_sp_mp_hash_config config = {};
11201 	char recr2_pl[MLXSW_REG_RECR2_LEN];
11202 	unsigned long bit;
11203 	u32 seed;
11204 	int err;
11205 
11206 	seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0);
11207 	mlxsw_reg_recr2_pack(recr2_pl, seed);
11208 	mlxsw_sp_mp4_hash_init(mlxsw_sp, &config);
11209 	mlxsw_sp_mp6_hash_init(mlxsw_sp, &config);
11210 
11211 	old_inc_parsing_depth = mlxsw_sp->router->inc_parsing_depth;
11212 	new_inc_parsing_depth = config.inc_parsing_depth;
11213 	err = mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp,
11214 						    old_inc_parsing_depth,
11215 						    new_inc_parsing_depth);
11216 	if (err)
11217 		return err;
11218 
11219 	for_each_set_bit(bit, config.headers, __MLXSW_REG_RECR2_HEADER_CNT)
11220 		mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, bit, 1);
11221 	for_each_set_bit(bit, config.fields, __MLXSW_REG_RECR2_FIELD_CNT)
11222 		mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, bit, 1);
11223 	for_each_set_bit(bit, config.inner_headers, __MLXSW_REG_RECR2_HEADER_CNT)
11224 		mlxsw_reg_recr2_inner_header_enables_set(recr2_pl, bit, 1);
11225 	for_each_set_bit(bit, config.inner_fields, __MLXSW_REG_RECR2_INNER_FIELD_CNT)
11226 		mlxsw_reg_recr2_inner_header_fields_enable_set(recr2_pl, bit, 1);
11227 
11228 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
11229 	if (err)
11230 		goto err_reg_write;
11231 
11232 	return 0;
11233 
11234 err_reg_write:
11235 	mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp, new_inc_parsing_depth,
11236 					      old_inc_parsing_depth);
11237 	return err;
11238 }
11239 
11240 static void mlxsw_sp_mp_hash_fini(struct mlxsw_sp *mlxsw_sp)
11241 {
11242 	bool old_inc_parsing_depth = mlxsw_sp->router->inc_parsing_depth;
11243 
11244 	mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp, old_inc_parsing_depth,
11245 					      false);
11246 }
11247 #else
11248 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
11249 {
11250 	return 0;
11251 }
11252 
11253 static void mlxsw_sp_mp_hash_fini(struct mlxsw_sp *mlxsw_sp)
11254 {
11255 }
11256 #endif
11257 
11258 static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
11259 {
11260 	char rdpm_pl[MLXSW_REG_RDPM_LEN];
11261 	unsigned int i;
11262 
11263 	MLXSW_REG_ZERO(rdpm, rdpm_pl);
11264 
11265 	/* HW is determining switch priority based on DSCP-bits, but the
11266 	 * kernel is still doing that based on the ToS. Since there's a
11267 	 * mismatch in bits we need to make sure to translate the right
11268 	 * value ToS would observe, skipping the 2 least-significant ECN bits.
11269 	 */
11270 	for (i = 0; i < MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT; i++)
11271 		mlxsw_reg_rdpm_pack(rdpm_pl, i, rt_tos2priority(i << 2));
11272 
11273 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rdpm), rdpm_pl);
11274 }
11275 
11276 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
11277 {
11278 	struct net *net = mlxsw_sp_net(mlxsw_sp);
11279 	char rgcr_pl[MLXSW_REG_RGCR_LEN];
11280 	u64 max_rifs;
11281 	bool usp;
11282 
11283 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
11284 		return -EIO;
11285 	max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
11286 	usp = READ_ONCE(net->ipv4.sysctl_ip_fwd_update_priority);
11287 
11288 	mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
11289 	mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
11290 	mlxsw_reg_rgcr_usp_set(rgcr_pl, usp);
11291 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
11292 }
11293 
11294 static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
11295 {
11296 	char rgcr_pl[MLXSW_REG_RGCR_LEN];
11297 
11298 	mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
11299 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
11300 }
11301 
11302 static int mlxsw_sp_lb_rif_init(struct mlxsw_sp *mlxsw_sp,
11303 				struct netlink_ext_ack *extack)
11304 {
11305 	struct mlxsw_sp_router *router = mlxsw_sp->router;
11306 	struct mlxsw_sp_rif *lb_rif;
11307 	int err;
11308 
11309 	router->lb_crif = mlxsw_sp_crif_alloc(NULL);
11310 	if (!router->lb_crif)
11311 		return -ENOMEM;
11312 
11313 	/* Create a generic loopback RIF associated with the main table
11314 	 * (default VRF). Any table can be used, but the main table exists
11315 	 * anyway, so we do not waste resources. Loopback RIFs are usually
11316 	 * created with a NULL CRIF, but this RIF is used as a fallback RIF
11317 	 * for blackhole nexthops, and nexthops expect to have a valid CRIF.
11318 	 */
11319 	lb_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, RT_TABLE_MAIN, router->lb_crif,
11320 				     extack);
11321 	if (IS_ERR(lb_rif)) {
11322 		err = PTR_ERR(lb_rif);
11323 		goto err_ul_rif_get;
11324 	}
11325 
11326 	return 0;
11327 
11328 err_ul_rif_get:
11329 	mlxsw_sp_crif_free(router->lb_crif);
11330 	return err;
11331 }
11332 
11333 static void mlxsw_sp_lb_rif_fini(struct mlxsw_sp *mlxsw_sp)
11334 {
11335 	mlxsw_sp_ul_rif_put(mlxsw_sp->router->lb_crif->rif);
11336 	mlxsw_sp_crif_free(mlxsw_sp->router->lb_crif);
11337 }
11338 
11339 static int mlxsw_sp1_router_init(struct mlxsw_sp *mlxsw_sp)
11340 {
11341 	size_t size_ranges_count = ARRAY_SIZE(mlxsw_sp1_adj_grp_size_ranges);
11342 
11343 	mlxsw_sp->router->rif_ops_arr = mlxsw_sp1_rif_ops_arr;
11344 	mlxsw_sp->router->adj_grp_size_ranges = mlxsw_sp1_adj_grp_size_ranges;
11345 	mlxsw_sp->router->adj_grp_size_ranges_count = size_ranges_count;
11346 
11347 	return 0;
11348 }
11349 
11350 const struct mlxsw_sp_router_ops mlxsw_sp1_router_ops = {
11351 	.init = mlxsw_sp1_router_init,
11352 	.ipips_init = mlxsw_sp1_ipips_init,
11353 };
11354 
11355 static int mlxsw_sp2_router_init(struct mlxsw_sp *mlxsw_sp)
11356 {
11357 	size_t size_ranges_count = ARRAY_SIZE(mlxsw_sp2_adj_grp_size_ranges);
11358 
11359 	mlxsw_sp->router->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
11360 	mlxsw_sp->router->adj_grp_size_ranges = mlxsw_sp2_adj_grp_size_ranges;
11361 	mlxsw_sp->router->adj_grp_size_ranges_count = size_ranges_count;
11362 
11363 	return 0;
11364 }
11365 
11366 const struct mlxsw_sp_router_ops mlxsw_sp2_router_ops = {
11367 	.init = mlxsw_sp2_router_init,
11368 	.ipips_init = mlxsw_sp2_ipips_init,
11369 };
11370 
11371 int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
11372 			 struct netlink_ext_ack *extack)
11373 {
11374 	struct mlxsw_sp_router *router;
11375 	struct notifier_block *nb;
11376 	int err;
11377 
11378 	router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
11379 	if (!router)
11380 		return -ENOMEM;
11381 	mutex_init(&router->lock);
11382 	mlxsw_sp->router = router;
11383 	router->mlxsw_sp = mlxsw_sp;
11384 
11385 	err = mlxsw_sp->router_ops->init(mlxsw_sp);
11386 	if (err)
11387 		goto err_router_ops_init;
11388 
11389 	INIT_LIST_HEAD(&mlxsw_sp->router->nh_res_grp_list);
11390 	INIT_DELAYED_WORK(&mlxsw_sp->router->nh_grp_activity_dw,
11391 			  mlxsw_sp_nh_grp_activity_work);
11392 	INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
11393 	err = __mlxsw_sp_router_init(mlxsw_sp);
11394 	if (err)
11395 		goto err_router_init;
11396 
11397 	err = mlxsw_sp->router_ops->ipips_init(mlxsw_sp);
11398 	if (err)
11399 		goto err_ipips_init;
11400 
11401 	err = rhashtable_init(&mlxsw_sp->router->crif_ht,
11402 			      &mlxsw_sp_crif_ht_params);
11403 	if (err)
11404 		goto err_crif_ht_init;
11405 
11406 	err = mlxsw_sp_rifs_init(mlxsw_sp);
11407 	if (err)
11408 		goto err_rifs_init;
11409 
11410 	err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
11411 			      &mlxsw_sp_nexthop_ht_params);
11412 	if (err)
11413 		goto err_nexthop_ht_init;
11414 
11415 	err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
11416 			      &mlxsw_sp_nexthop_group_ht_params);
11417 	if (err)
11418 		goto err_nexthop_group_ht_init;
11419 
11420 	INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list);
11421 	err = mlxsw_sp_lpm_init(mlxsw_sp);
11422 	if (err)
11423 		goto err_lpm_init;
11424 
11425 	err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
11426 	if (err)
11427 		goto err_mr_init;
11428 
11429 	err = mlxsw_sp_vrs_init(mlxsw_sp);
11430 	if (err)
11431 		goto err_vrs_init;
11432 
11433 	err = mlxsw_sp_lb_rif_init(mlxsw_sp, extack);
11434 	if (err)
11435 		goto err_lb_rif_init;
11436 
11437 	err = mlxsw_sp_neigh_init(mlxsw_sp);
11438 	if (err)
11439 		goto err_neigh_init;
11440 
11441 	err = mlxsw_sp_mp_hash_init(mlxsw_sp);
11442 	if (err)
11443 		goto err_mp_hash_init;
11444 
11445 	err = mlxsw_sp_dscp_init(mlxsw_sp);
11446 	if (err)
11447 		goto err_dscp_init;
11448 
11449 	router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
11450 	err = register_inetaddr_notifier(&router->inetaddr_nb);
11451 	if (err)
11452 		goto err_register_inetaddr_notifier;
11453 
11454 	router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event;
11455 	err = register_inet6addr_notifier(&router->inet6addr_nb);
11456 	if (err)
11457 		goto err_register_inet6addr_notifier;
11458 
11459 	router->inetaddr_valid_nb.notifier_call = mlxsw_sp_inetaddr_valid_event;
11460 	err = register_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
11461 	if (err)
11462 		goto err_register_inetaddr_valid_notifier;
11463 
11464 	nb = &router->inet6addr_valid_nb;
11465 	nb->notifier_call = mlxsw_sp_inet6addr_valid_event;
11466 	err = register_inet6addr_validator_notifier(nb);
11467 	if (err)
11468 		goto err_register_inet6addr_valid_notifier;
11469 
11470 	mlxsw_sp->router->netevent_nb.notifier_call =
11471 		mlxsw_sp_router_netevent_event;
11472 	err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
11473 	if (err)
11474 		goto err_register_netevent_notifier;
11475 
11476 	mlxsw_sp->router->netdevice_nb.notifier_call =
11477 		mlxsw_sp_router_netdevice_event;
11478 	err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
11479 					      &mlxsw_sp->router->netdevice_nb);
11480 	if (err)
11481 		goto err_register_netdev_notifier;
11482 
11483 	mlxsw_sp->router->nexthop_nb.notifier_call =
11484 		mlxsw_sp_nexthop_obj_event;
11485 	err = register_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
11486 					&mlxsw_sp->router->nexthop_nb,
11487 					extack);
11488 	if (err)
11489 		goto err_register_nexthop_notifier;
11490 
11491 	mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
11492 	err = register_fib_notifier(mlxsw_sp_net(mlxsw_sp),
11493 				    &mlxsw_sp->router->fib_nb,
11494 				    mlxsw_sp_router_fib_dump_flush, extack);
11495 	if (err)
11496 		goto err_register_fib_notifier;
11497 
11498 	return 0;
11499 
11500 err_register_fib_notifier:
11501 	unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
11502 				    &mlxsw_sp->router->nexthop_nb);
11503 err_register_nexthop_notifier:
11504 	unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
11505 					  &router->netdevice_nb);
11506 err_register_netdev_notifier:
11507 	unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
11508 err_register_netevent_notifier:
11509 	unregister_inet6addr_validator_notifier(&router->inet6addr_valid_nb);
11510 err_register_inet6addr_valid_notifier:
11511 	unregister_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
11512 err_register_inetaddr_valid_notifier:
11513 	unregister_inet6addr_notifier(&router->inet6addr_nb);
11514 err_register_inet6addr_notifier:
11515 	unregister_inetaddr_notifier(&router->inetaddr_nb);
11516 err_register_inetaddr_notifier:
11517 	mlxsw_core_flush_owq();
11518 err_dscp_init:
11519 	mlxsw_sp_mp_hash_fini(mlxsw_sp);
11520 err_mp_hash_init:
11521 	mlxsw_sp_neigh_fini(mlxsw_sp);
11522 err_neigh_init:
11523 	mlxsw_sp_lb_rif_fini(mlxsw_sp);
11524 err_lb_rif_init:
11525 	mlxsw_sp_vrs_fini(mlxsw_sp);
11526 err_vrs_init:
11527 	mlxsw_sp_mr_fini(mlxsw_sp);
11528 err_mr_init:
11529 	mlxsw_sp_lpm_fini(mlxsw_sp);
11530 err_lpm_init:
11531 	rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
11532 err_nexthop_group_ht_init:
11533 	rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
11534 err_nexthop_ht_init:
11535 	mlxsw_sp_rifs_fini(mlxsw_sp);
11536 err_rifs_init:
11537 	rhashtable_destroy(&mlxsw_sp->router->crif_ht);
11538 err_crif_ht_init:
11539 	mlxsw_sp_ipips_fini(mlxsw_sp);
11540 err_ipips_init:
11541 	__mlxsw_sp_router_fini(mlxsw_sp);
11542 err_router_init:
11543 	cancel_delayed_work_sync(&mlxsw_sp->router->nh_grp_activity_dw);
11544 err_router_ops_init:
11545 	mutex_destroy(&mlxsw_sp->router->lock);
11546 	kfree(mlxsw_sp->router);
11547 	return err;
11548 }
11549 
11550 void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
11551 {
11552 	struct mlxsw_sp_router *router = mlxsw_sp->router;
11553 
11554 	unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp), &router->fib_nb);
11555 	unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
11556 				    &router->nexthop_nb);
11557 	unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
11558 					  &router->netdevice_nb);
11559 	unregister_netevent_notifier(&router->netevent_nb);
11560 	unregister_inet6addr_validator_notifier(&router->inet6addr_valid_nb);
11561 	unregister_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
11562 	unregister_inet6addr_notifier(&router->inet6addr_nb);
11563 	unregister_inetaddr_notifier(&router->inetaddr_nb);
11564 	mlxsw_core_flush_owq();
11565 	mlxsw_sp_mp_hash_fini(mlxsw_sp);
11566 	mlxsw_sp_neigh_fini(mlxsw_sp);
11567 	mlxsw_sp_lb_rif_fini(mlxsw_sp);
11568 	mlxsw_sp_vrs_fini(mlxsw_sp);
11569 	mlxsw_sp_mr_fini(mlxsw_sp);
11570 	mlxsw_sp_lpm_fini(mlxsw_sp);
11571 	rhashtable_destroy(&router->nexthop_group_ht);
11572 	rhashtable_destroy(&router->nexthop_ht);
11573 	mlxsw_sp_rifs_fini(mlxsw_sp);
11574 	rhashtable_destroy(&mlxsw_sp->router->crif_ht);
11575 	mlxsw_sp_ipips_fini(mlxsw_sp);
11576 	__mlxsw_sp_router_fini(mlxsw_sp);
11577 	cancel_delayed_work_sync(&router->nh_grp_activity_dw);
11578 	mutex_destroy(&router->lock);
11579 	kfree(router);
11580 }
11581