xref: /linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c (revision 4e73826089ce899357580bbf6e0afe4e6f9900b7)
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/rhashtable.h>
7 #include <linux/bitops.h>
8 #include <linux/in6.h>
9 #include <linux/notifier.h>
10 #include <linux/inetdevice.h>
11 #include <linux/netdevice.h>
12 #include <linux/if_bridge.h>
13 #include <linux/socket.h>
14 #include <linux/route.h>
15 #include <linux/gcd.h>
16 #include <linux/if_macvlan.h>
17 #include <linux/refcount.h>
18 #include <linux/jhash.h>
19 #include <linux/net_namespace.h>
20 #include <linux/mutex.h>
21 #include <linux/genalloc.h>
22 #include <net/netevent.h>
23 #include <net/neighbour.h>
24 #include <net/arp.h>
25 #include <net/inet_dscp.h>
26 #include <net/ip_fib.h>
27 #include <net/ip6_fib.h>
28 #include <net/nexthop.h>
29 #include <net/fib_rules.h>
30 #include <net/ip_tunnels.h>
31 #include <net/l3mdev.h>
32 #include <net/addrconf.h>
33 #include <net/ndisc.h>
34 #include <net/ipv6.h>
35 #include <net/fib_notifier.h>
36 #include <net/switchdev.h>
37 
38 #include "spectrum.h"
39 #include "core.h"
40 #include "reg.h"
41 #include "spectrum_cnt.h"
42 #include "spectrum_dpipe.h"
43 #include "spectrum_ipip.h"
44 #include "spectrum_mr.h"
45 #include "spectrum_mr_tcam.h"
46 #include "spectrum_router.h"
47 #include "spectrum_span.h"
48 
49 struct mlxsw_sp_fib;
50 struct mlxsw_sp_vr;
51 struct mlxsw_sp_lpm_tree;
52 struct mlxsw_sp_rif_ops;
53 
54 struct mlxsw_sp_crif_key {
55 	struct net_device *dev;
56 };
57 
58 struct mlxsw_sp_crif {
59 	struct mlxsw_sp_crif_key key;
60 	struct rhash_head ht_node;
61 	bool can_destroy;
62 	struct list_head nexthop_list;
63 	struct mlxsw_sp_rif *rif;
64 };
65 
66 static const struct rhashtable_params mlxsw_sp_crif_ht_params = {
67 	.key_offset = offsetof(struct mlxsw_sp_crif, key),
68 	.key_len = sizeof_field(struct mlxsw_sp_crif, key),
69 	.head_offset = offsetof(struct mlxsw_sp_crif, ht_node),
70 };
71 
72 struct mlxsw_sp_rif {
73 	struct mlxsw_sp_crif *crif; /* NULL for underlay RIF */
74 	netdevice_tracker dev_tracker;
75 	struct list_head neigh_list;
76 	struct mlxsw_sp_fid *fid;
77 	unsigned char addr[ETH_ALEN];
78 	int mtu;
79 	u16 rif_index;
80 	u8 mac_profile_id;
81 	u8 rif_entries;
82 	u16 vr_id;
83 	const struct mlxsw_sp_rif_ops *ops;
84 	struct mlxsw_sp *mlxsw_sp;
85 
86 	unsigned int counter_ingress;
87 	bool counter_ingress_valid;
88 	unsigned int counter_egress;
89 	bool counter_egress_valid;
90 };
91 
92 static struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
93 {
94 	if (!rif->crif)
95 		return NULL;
96 	return rif->crif->key.dev;
97 }
98 
99 struct mlxsw_sp_rif_params {
100 	struct net_device *dev;
101 	union {
102 		u16 system_port;
103 		u16 lag_id;
104 	};
105 	u16 vid;
106 	bool lag;
107 	bool double_entry;
108 };
109 
110 struct mlxsw_sp_rif_subport {
111 	struct mlxsw_sp_rif common;
112 	refcount_t ref_count;
113 	union {
114 		u16 system_port;
115 		u16 lag_id;
116 	};
117 	u16 vid;
118 	bool lag;
119 };
120 
121 struct mlxsw_sp_rif_ipip_lb {
122 	struct mlxsw_sp_rif common;
123 	struct mlxsw_sp_rif_ipip_lb_config lb_config;
124 	u16 ul_vr_id;	/* Spectrum-1. */
125 	u16 ul_rif_id;	/* Spectrum-2+. */
126 };
127 
128 struct mlxsw_sp_rif_params_ipip_lb {
129 	struct mlxsw_sp_rif_params common;
130 	struct mlxsw_sp_rif_ipip_lb_config lb_config;
131 };
132 
133 struct mlxsw_sp_rif_ops {
134 	enum mlxsw_sp_rif_type type;
135 	size_t rif_size;
136 
137 	void (*setup)(struct mlxsw_sp_rif *rif,
138 		      const struct mlxsw_sp_rif_params *params);
139 	int (*configure)(struct mlxsw_sp_rif *rif,
140 			 struct netlink_ext_ack *extack);
141 	void (*deconfigure)(struct mlxsw_sp_rif *rif);
142 	struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif,
143 					 const struct mlxsw_sp_rif_params *params,
144 					 struct netlink_ext_ack *extack);
145 	void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac);
146 };
147 
148 struct mlxsw_sp_rif_mac_profile {
149 	unsigned char mac_prefix[ETH_ALEN];
150 	refcount_t ref_count;
151 	u8 id;
152 };
153 
154 struct mlxsw_sp_router_ops {
155 	int (*init)(struct mlxsw_sp *mlxsw_sp);
156 	int (*ipips_init)(struct mlxsw_sp *mlxsw_sp);
157 };
158 
159 static struct mlxsw_sp_rif *
160 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
161 			 const struct net_device *dev);
162 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
163 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
164 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
165 				  struct mlxsw_sp_lpm_tree *lpm_tree);
166 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
167 				     const struct mlxsw_sp_fib *fib,
168 				     u8 tree_id);
169 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
170 				       const struct mlxsw_sp_fib *fib);
171 
172 static unsigned int *
173 mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
174 			   enum mlxsw_sp_rif_counter_dir dir)
175 {
176 	switch (dir) {
177 	case MLXSW_SP_RIF_COUNTER_EGRESS:
178 		return &rif->counter_egress;
179 	case MLXSW_SP_RIF_COUNTER_INGRESS:
180 		return &rif->counter_ingress;
181 	}
182 	return NULL;
183 }
184 
185 static bool
186 mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
187 			       enum mlxsw_sp_rif_counter_dir dir)
188 {
189 	switch (dir) {
190 	case MLXSW_SP_RIF_COUNTER_EGRESS:
191 		return rif->counter_egress_valid;
192 	case MLXSW_SP_RIF_COUNTER_INGRESS:
193 		return rif->counter_ingress_valid;
194 	}
195 	return false;
196 }
197 
198 static void
199 mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
200 			       enum mlxsw_sp_rif_counter_dir dir,
201 			       bool valid)
202 {
203 	switch (dir) {
204 	case MLXSW_SP_RIF_COUNTER_EGRESS:
205 		rif->counter_egress_valid = valid;
206 		break;
207 	case MLXSW_SP_RIF_COUNTER_INGRESS:
208 		rif->counter_ingress_valid = valid;
209 		break;
210 	}
211 }
212 
213 static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
214 				     unsigned int counter_index, bool enable,
215 				     enum mlxsw_sp_rif_counter_dir dir)
216 {
217 	char ritr_pl[MLXSW_REG_RITR_LEN];
218 	bool is_egress = false;
219 	int err;
220 
221 	if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
222 		is_egress = true;
223 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
224 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
225 	if (err)
226 		return err;
227 
228 	mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
229 				    is_egress);
230 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
231 }
232 
233 int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
234 				   struct mlxsw_sp_rif *rif,
235 				   enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
236 {
237 	char ricnt_pl[MLXSW_REG_RICNT_LEN];
238 	unsigned int *p_counter_index;
239 	bool valid;
240 	int err;
241 
242 	valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
243 	if (!valid)
244 		return -EINVAL;
245 
246 	p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
247 	if (!p_counter_index)
248 		return -EINVAL;
249 	mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
250 			     MLXSW_REG_RICNT_OPCODE_NOP);
251 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
252 	if (err)
253 		return err;
254 	*cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
255 	return 0;
256 }
257 
258 struct mlxsw_sp_rif_counter_set_basic {
259 	u64 good_unicast_packets;
260 	u64 good_multicast_packets;
261 	u64 good_broadcast_packets;
262 	u64 good_unicast_bytes;
263 	u64 good_multicast_bytes;
264 	u64 good_broadcast_bytes;
265 	u64 error_packets;
266 	u64 discard_packets;
267 	u64 error_bytes;
268 	u64 discard_bytes;
269 };
270 
271 static int
272 mlxsw_sp_rif_counter_fetch_clear(struct mlxsw_sp_rif *rif,
273 				 enum mlxsw_sp_rif_counter_dir dir,
274 				 struct mlxsw_sp_rif_counter_set_basic *set)
275 {
276 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
277 	char ricnt_pl[MLXSW_REG_RICNT_LEN];
278 	unsigned int *p_counter_index;
279 	int err;
280 
281 	if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
282 		return -EINVAL;
283 
284 	p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
285 	if (!p_counter_index)
286 		return -EINVAL;
287 
288 	mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
289 			     MLXSW_REG_RICNT_OPCODE_CLEAR);
290 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
291 	if (err)
292 		return err;
293 
294 	if (!set)
295 		return 0;
296 
297 #define MLXSW_SP_RIF_COUNTER_EXTRACT(NAME)				\
298 		(set->NAME = mlxsw_reg_ricnt_ ## NAME ## _get(ricnt_pl))
299 
300 	MLXSW_SP_RIF_COUNTER_EXTRACT(good_unicast_packets);
301 	MLXSW_SP_RIF_COUNTER_EXTRACT(good_multicast_packets);
302 	MLXSW_SP_RIF_COUNTER_EXTRACT(good_broadcast_packets);
303 	MLXSW_SP_RIF_COUNTER_EXTRACT(good_unicast_bytes);
304 	MLXSW_SP_RIF_COUNTER_EXTRACT(good_multicast_bytes);
305 	MLXSW_SP_RIF_COUNTER_EXTRACT(good_broadcast_bytes);
306 	MLXSW_SP_RIF_COUNTER_EXTRACT(error_packets);
307 	MLXSW_SP_RIF_COUNTER_EXTRACT(discard_packets);
308 	MLXSW_SP_RIF_COUNTER_EXTRACT(error_bytes);
309 	MLXSW_SP_RIF_COUNTER_EXTRACT(discard_bytes);
310 
311 #undef MLXSW_SP_RIF_COUNTER_EXTRACT
312 
313 	return 0;
314 }
315 
316 static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
317 				      unsigned int counter_index)
318 {
319 	char ricnt_pl[MLXSW_REG_RICNT_LEN];
320 
321 	mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
322 			     MLXSW_REG_RICNT_OPCODE_CLEAR);
323 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
324 }
325 
326 int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp_rif *rif,
327 			       enum mlxsw_sp_rif_counter_dir dir)
328 {
329 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
330 	unsigned int *p_counter_index;
331 	int err;
332 
333 	if (mlxsw_sp_rif_counter_valid_get(rif, dir))
334 		return 0;
335 
336 	p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
337 	if (!p_counter_index)
338 		return -EINVAL;
339 
340 	err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
341 				     p_counter_index);
342 	if (err)
343 		return err;
344 
345 	err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
346 	if (err)
347 		goto err_counter_clear;
348 
349 	err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
350 					*p_counter_index, true, dir);
351 	if (err)
352 		goto err_counter_edit;
353 	mlxsw_sp_rif_counter_valid_set(rif, dir, true);
354 	return 0;
355 
356 err_counter_edit:
357 err_counter_clear:
358 	mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
359 			      *p_counter_index);
360 	return err;
361 }
362 
363 void mlxsw_sp_rif_counter_free(struct mlxsw_sp_rif *rif,
364 			       enum mlxsw_sp_rif_counter_dir dir)
365 {
366 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
367 	unsigned int *p_counter_index;
368 
369 	if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
370 		return;
371 
372 	p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
373 	if (WARN_ON(!p_counter_index))
374 		return;
375 	mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
376 				  *p_counter_index, false, dir);
377 	mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
378 			      *p_counter_index);
379 	mlxsw_sp_rif_counter_valid_set(rif, dir, false);
380 }
381 
382 static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
383 {
384 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
385 	struct devlink *devlink;
386 
387 	devlink = priv_to_devlink(mlxsw_sp->core);
388 	if (!devlink_dpipe_table_counter_enabled(devlink,
389 						 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
390 		return;
391 	mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
392 }
393 
394 static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
395 {
396 	mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
397 }
398 
399 #define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
400 
401 struct mlxsw_sp_prefix_usage {
402 	DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
403 };
404 
405 #define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
406 	for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
407 
408 static bool
409 mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
410 			 struct mlxsw_sp_prefix_usage *prefix_usage2)
411 {
412 	return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
413 }
414 
415 static void
416 mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
417 			  struct mlxsw_sp_prefix_usage *prefix_usage2)
418 {
419 	memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
420 }
421 
422 static void
423 mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
424 			  unsigned char prefix_len)
425 {
426 	set_bit(prefix_len, prefix_usage->b);
427 }
428 
429 static void
430 mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
431 			    unsigned char prefix_len)
432 {
433 	clear_bit(prefix_len, prefix_usage->b);
434 }
435 
436 struct mlxsw_sp_fib_key {
437 	unsigned char addr[sizeof(struct in6_addr)];
438 	unsigned char prefix_len;
439 };
440 
441 enum mlxsw_sp_fib_entry_type {
442 	MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
443 	MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
444 	MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
445 	MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE,
446 	MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE,
447 
448 	/* This is a special case of local delivery, where a packet should be
449 	 * decapsulated on reception. Note that there is no corresponding ENCAP,
450 	 * because that's a type of next hop, not of FIB entry. (There can be
451 	 * several next hops in a REMOTE entry, and some of them may be
452 	 * encapsulating entries.)
453 	 */
454 	MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP,
455 	MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP,
456 };
457 
458 struct mlxsw_sp_nexthop_group_info;
459 struct mlxsw_sp_nexthop_group;
460 struct mlxsw_sp_fib_entry;
461 
462 struct mlxsw_sp_fib_node {
463 	struct mlxsw_sp_fib_entry *fib_entry;
464 	struct list_head list;
465 	struct rhash_head ht_node;
466 	struct mlxsw_sp_fib *fib;
467 	struct mlxsw_sp_fib_key key;
468 };
469 
470 struct mlxsw_sp_fib_entry_decap {
471 	struct mlxsw_sp_ipip_entry *ipip_entry;
472 	u32 tunnel_index;
473 };
474 
475 struct mlxsw_sp_fib_entry {
476 	struct mlxsw_sp_fib_node *fib_node;
477 	enum mlxsw_sp_fib_entry_type type;
478 	struct list_head nexthop_group_node;
479 	struct mlxsw_sp_nexthop_group *nh_group;
480 	struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
481 };
482 
483 struct mlxsw_sp_fib4_entry {
484 	struct mlxsw_sp_fib_entry common;
485 	struct fib_info *fi;
486 	u32 tb_id;
487 	dscp_t dscp;
488 	u8 type;
489 };
490 
491 struct mlxsw_sp_fib6_entry {
492 	struct mlxsw_sp_fib_entry common;
493 	struct list_head rt6_list;
494 	unsigned int nrt6;
495 };
496 
497 struct mlxsw_sp_rt6 {
498 	struct list_head list;
499 	struct fib6_info *rt;
500 };
501 
502 struct mlxsw_sp_lpm_tree {
503 	u8 id; /* tree ID */
504 	unsigned int ref_count;
505 	enum mlxsw_sp_l3proto proto;
506 	unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
507 	struct mlxsw_sp_prefix_usage prefix_usage;
508 };
509 
510 struct mlxsw_sp_fib {
511 	struct rhashtable ht;
512 	struct list_head node_list;
513 	struct mlxsw_sp_vr *vr;
514 	struct mlxsw_sp_lpm_tree *lpm_tree;
515 	enum mlxsw_sp_l3proto proto;
516 };
517 
518 struct mlxsw_sp_vr {
519 	u16 id; /* virtual router ID */
520 	u32 tb_id; /* kernel fib table id */
521 	unsigned int rif_count;
522 	struct mlxsw_sp_fib *fib4;
523 	struct mlxsw_sp_fib *fib6;
524 	struct mlxsw_sp_mr_table *mr_table[MLXSW_SP_L3_PROTO_MAX];
525 	struct mlxsw_sp_rif *ul_rif;
526 	refcount_t ul_rif_refcnt;
527 };
528 
529 static const struct rhashtable_params mlxsw_sp_fib_ht_params;
530 
531 static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
532 						struct mlxsw_sp_vr *vr,
533 						enum mlxsw_sp_l3proto proto)
534 {
535 	struct mlxsw_sp_lpm_tree *lpm_tree;
536 	struct mlxsw_sp_fib *fib;
537 	int err;
538 
539 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[proto];
540 	fib = kzalloc(sizeof(*fib), GFP_KERNEL);
541 	if (!fib)
542 		return ERR_PTR(-ENOMEM);
543 	err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
544 	if (err)
545 		goto err_rhashtable_init;
546 	INIT_LIST_HEAD(&fib->node_list);
547 	fib->proto = proto;
548 	fib->vr = vr;
549 	fib->lpm_tree = lpm_tree;
550 	mlxsw_sp_lpm_tree_hold(lpm_tree);
551 	err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
552 	if (err)
553 		goto err_lpm_tree_bind;
554 	return fib;
555 
556 err_lpm_tree_bind:
557 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
558 err_rhashtable_init:
559 	kfree(fib);
560 	return ERR_PTR(err);
561 }
562 
563 static void mlxsw_sp_fib_destroy(struct mlxsw_sp *mlxsw_sp,
564 				 struct mlxsw_sp_fib *fib)
565 {
566 	mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
567 	mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
568 	WARN_ON(!list_empty(&fib->node_list));
569 	rhashtable_destroy(&fib->ht);
570 	kfree(fib);
571 }
572 
573 static struct mlxsw_sp_lpm_tree *
574 mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
575 {
576 	static struct mlxsw_sp_lpm_tree *lpm_tree;
577 	int i;
578 
579 	for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
580 		lpm_tree = &mlxsw_sp->router->lpm.trees[i];
581 		if (lpm_tree->ref_count == 0)
582 			return lpm_tree;
583 	}
584 	return NULL;
585 }
586 
587 static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
588 				   struct mlxsw_sp_lpm_tree *lpm_tree)
589 {
590 	char ralta_pl[MLXSW_REG_RALTA_LEN];
591 
592 	mlxsw_reg_ralta_pack(ralta_pl, true,
593 			     (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
594 			     lpm_tree->id);
595 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
596 }
597 
598 static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
599 				   struct mlxsw_sp_lpm_tree *lpm_tree)
600 {
601 	char ralta_pl[MLXSW_REG_RALTA_LEN];
602 
603 	mlxsw_reg_ralta_pack(ralta_pl, false,
604 			     (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
605 			     lpm_tree->id);
606 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
607 }
608 
609 static int
610 mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
611 				  struct mlxsw_sp_prefix_usage *prefix_usage,
612 				  struct mlxsw_sp_lpm_tree *lpm_tree)
613 {
614 	char ralst_pl[MLXSW_REG_RALST_LEN];
615 	u8 root_bin = 0;
616 	u8 prefix;
617 	u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
618 
619 	mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
620 		root_bin = prefix;
621 
622 	mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
623 	mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
624 		if (prefix == 0)
625 			continue;
626 		mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
627 					 MLXSW_REG_RALST_BIN_NO_CHILD);
628 		last_prefix = prefix;
629 	}
630 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
631 }
632 
633 static struct mlxsw_sp_lpm_tree *
634 mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
635 			 struct mlxsw_sp_prefix_usage *prefix_usage,
636 			 enum mlxsw_sp_l3proto proto)
637 {
638 	struct mlxsw_sp_lpm_tree *lpm_tree;
639 	int err;
640 
641 	lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
642 	if (!lpm_tree)
643 		return ERR_PTR(-EBUSY);
644 	lpm_tree->proto = proto;
645 	err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
646 	if (err)
647 		return ERR_PTR(err);
648 
649 	err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
650 						lpm_tree);
651 	if (err)
652 		goto err_left_struct_set;
653 	memcpy(&lpm_tree->prefix_usage, prefix_usage,
654 	       sizeof(lpm_tree->prefix_usage));
655 	memset(&lpm_tree->prefix_ref_count, 0,
656 	       sizeof(lpm_tree->prefix_ref_count));
657 	lpm_tree->ref_count = 1;
658 	return lpm_tree;
659 
660 err_left_struct_set:
661 	mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
662 	return ERR_PTR(err);
663 }
664 
665 static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
666 				      struct mlxsw_sp_lpm_tree *lpm_tree)
667 {
668 	mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
669 }
670 
671 static struct mlxsw_sp_lpm_tree *
672 mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
673 		      struct mlxsw_sp_prefix_usage *prefix_usage,
674 		      enum mlxsw_sp_l3proto proto)
675 {
676 	struct mlxsw_sp_lpm_tree *lpm_tree;
677 	int i;
678 
679 	for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
680 		lpm_tree = &mlxsw_sp->router->lpm.trees[i];
681 		if (lpm_tree->ref_count != 0 &&
682 		    lpm_tree->proto == proto &&
683 		    mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
684 					     prefix_usage)) {
685 			mlxsw_sp_lpm_tree_hold(lpm_tree);
686 			return lpm_tree;
687 		}
688 	}
689 	return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
690 }
691 
692 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
693 {
694 	lpm_tree->ref_count++;
695 }
696 
697 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
698 				  struct mlxsw_sp_lpm_tree *lpm_tree)
699 {
700 	if (--lpm_tree->ref_count == 0)
701 		mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
702 }
703 
704 #define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
705 
706 static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
707 {
708 	struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
709 	struct mlxsw_sp_lpm_tree *lpm_tree;
710 	u64 max_trees;
711 	int err, i;
712 
713 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
714 		return -EIO;
715 
716 	max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
717 	mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
718 	mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
719 					     sizeof(struct mlxsw_sp_lpm_tree),
720 					     GFP_KERNEL);
721 	if (!mlxsw_sp->router->lpm.trees)
722 		return -ENOMEM;
723 
724 	for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
725 		lpm_tree = &mlxsw_sp->router->lpm.trees[i];
726 		lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
727 	}
728 
729 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
730 					 MLXSW_SP_L3_PROTO_IPV4);
731 	if (IS_ERR(lpm_tree)) {
732 		err = PTR_ERR(lpm_tree);
733 		goto err_ipv4_tree_get;
734 	}
735 	mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4] = lpm_tree;
736 
737 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
738 					 MLXSW_SP_L3_PROTO_IPV6);
739 	if (IS_ERR(lpm_tree)) {
740 		err = PTR_ERR(lpm_tree);
741 		goto err_ipv6_tree_get;
742 	}
743 	mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6] = lpm_tree;
744 
745 	return 0;
746 
747 err_ipv6_tree_get:
748 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
749 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
750 err_ipv4_tree_get:
751 	kfree(mlxsw_sp->router->lpm.trees);
752 	return err;
753 }
754 
755 static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
756 {
757 	struct mlxsw_sp_lpm_tree *lpm_tree;
758 
759 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6];
760 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
761 
762 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
763 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
764 
765 	kfree(mlxsw_sp->router->lpm.trees);
766 }
767 
768 static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
769 {
770 	return !!vr->fib4 || !!vr->fib6 ||
771 	       !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] ||
772 	       !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
773 }
774 
775 static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
776 {
777 	int max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
778 	struct mlxsw_sp_vr *vr;
779 	int i;
780 
781 	for (i = 0; i < max_vrs; i++) {
782 		vr = &mlxsw_sp->router->vrs[i];
783 		if (!mlxsw_sp_vr_is_used(vr))
784 			return vr;
785 	}
786 	return NULL;
787 }
788 
789 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
790 				     const struct mlxsw_sp_fib *fib, u8 tree_id)
791 {
792 	char raltb_pl[MLXSW_REG_RALTB_LEN];
793 
794 	mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
795 			     (enum mlxsw_reg_ralxx_protocol) fib->proto,
796 			     tree_id);
797 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
798 }
799 
800 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
801 				       const struct mlxsw_sp_fib *fib)
802 {
803 	char raltb_pl[MLXSW_REG_RALTB_LEN];
804 
805 	/* Bind to tree 0 which is default */
806 	mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
807 			     (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
808 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
809 }
810 
811 static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
812 {
813 	/* For our purpose, squash main, default and local tables into one */
814 	if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
815 		tb_id = RT_TABLE_MAIN;
816 	return tb_id;
817 }
818 
819 static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
820 					    u32 tb_id)
821 {
822 	int max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
823 	struct mlxsw_sp_vr *vr;
824 	int i;
825 
826 	tb_id = mlxsw_sp_fix_tb_id(tb_id);
827 
828 	for (i = 0; i < max_vrs; i++) {
829 		vr = &mlxsw_sp->router->vrs[i];
830 		if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
831 			return vr;
832 	}
833 	return NULL;
834 }
835 
836 int mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
837 				u16 *vr_id)
838 {
839 	struct mlxsw_sp_vr *vr;
840 	int err = 0;
841 
842 	mutex_lock(&mlxsw_sp->router->lock);
843 	vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
844 	if (!vr) {
845 		err = -ESRCH;
846 		goto out;
847 	}
848 	*vr_id = vr->id;
849 out:
850 	mutex_unlock(&mlxsw_sp->router->lock);
851 	return err;
852 }
853 
854 static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
855 					    enum mlxsw_sp_l3proto proto)
856 {
857 	switch (proto) {
858 	case MLXSW_SP_L3_PROTO_IPV4:
859 		return vr->fib4;
860 	case MLXSW_SP_L3_PROTO_IPV6:
861 		return vr->fib6;
862 	}
863 	return NULL;
864 }
865 
866 static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
867 					      u32 tb_id,
868 					      struct netlink_ext_ack *extack)
869 {
870 	struct mlxsw_sp_mr_table *mr4_table, *mr6_table;
871 	struct mlxsw_sp_fib *fib4;
872 	struct mlxsw_sp_fib *fib6;
873 	struct mlxsw_sp_vr *vr;
874 	int err;
875 
876 	vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
877 	if (!vr) {
878 		NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported virtual routers");
879 		return ERR_PTR(-EBUSY);
880 	}
881 	fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
882 	if (IS_ERR(fib4))
883 		return ERR_CAST(fib4);
884 	fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
885 	if (IS_ERR(fib6)) {
886 		err = PTR_ERR(fib6);
887 		goto err_fib6_create;
888 	}
889 	mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
890 					     MLXSW_SP_L3_PROTO_IPV4);
891 	if (IS_ERR(mr4_table)) {
892 		err = PTR_ERR(mr4_table);
893 		goto err_mr4_table_create;
894 	}
895 	mr6_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
896 					     MLXSW_SP_L3_PROTO_IPV6);
897 	if (IS_ERR(mr6_table)) {
898 		err = PTR_ERR(mr6_table);
899 		goto err_mr6_table_create;
900 	}
901 
902 	vr->fib4 = fib4;
903 	vr->fib6 = fib6;
904 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = mr4_table;
905 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = mr6_table;
906 	vr->tb_id = tb_id;
907 	return vr;
908 
909 err_mr6_table_create:
910 	mlxsw_sp_mr_table_destroy(mr4_table);
911 err_mr4_table_create:
912 	mlxsw_sp_fib_destroy(mlxsw_sp, fib6);
913 err_fib6_create:
914 	mlxsw_sp_fib_destroy(mlxsw_sp, fib4);
915 	return ERR_PTR(err);
916 }
917 
918 static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
919 				struct mlxsw_sp_vr *vr)
920 {
921 	mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]);
922 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = NULL;
923 	mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]);
924 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = NULL;
925 	mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
926 	vr->fib6 = NULL;
927 	mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
928 	vr->fib4 = NULL;
929 }
930 
931 static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
932 					   struct netlink_ext_ack *extack)
933 {
934 	struct mlxsw_sp_vr *vr;
935 
936 	tb_id = mlxsw_sp_fix_tb_id(tb_id);
937 	vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
938 	if (!vr)
939 		vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack);
940 	return vr;
941 }
942 
943 static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
944 {
945 	if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
946 	    list_empty(&vr->fib6->node_list) &&
947 	    mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]) &&
948 	    mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]))
949 		mlxsw_sp_vr_destroy(mlxsw_sp, vr);
950 }
951 
952 static bool
953 mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
954 				    enum mlxsw_sp_l3proto proto, u8 tree_id)
955 {
956 	struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
957 
958 	if (!mlxsw_sp_vr_is_used(vr))
959 		return false;
960 	if (fib->lpm_tree->id == tree_id)
961 		return true;
962 	return false;
963 }
964 
965 static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
966 					struct mlxsw_sp_fib *fib,
967 					struct mlxsw_sp_lpm_tree *new_tree)
968 {
969 	struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
970 	int err;
971 
972 	fib->lpm_tree = new_tree;
973 	mlxsw_sp_lpm_tree_hold(new_tree);
974 	err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
975 	if (err)
976 		goto err_tree_bind;
977 	mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
978 	return 0;
979 
980 err_tree_bind:
981 	mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
982 	fib->lpm_tree = old_tree;
983 	return err;
984 }
985 
986 static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
987 					 struct mlxsw_sp_fib *fib,
988 					 struct mlxsw_sp_lpm_tree *new_tree)
989 {
990 	int max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
991 	enum mlxsw_sp_l3proto proto = fib->proto;
992 	struct mlxsw_sp_lpm_tree *old_tree;
993 	u8 old_id, new_id = new_tree->id;
994 	struct mlxsw_sp_vr *vr;
995 	int i, err;
996 
997 	old_tree = mlxsw_sp->router->lpm.proto_trees[proto];
998 	old_id = old_tree->id;
999 
1000 	for (i = 0; i < max_vrs; i++) {
1001 		vr = &mlxsw_sp->router->vrs[i];
1002 		if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id))
1003 			continue;
1004 		err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
1005 						   mlxsw_sp_vr_fib(vr, proto),
1006 						   new_tree);
1007 		if (err)
1008 			goto err_tree_replace;
1009 	}
1010 
1011 	memcpy(new_tree->prefix_ref_count, old_tree->prefix_ref_count,
1012 	       sizeof(new_tree->prefix_ref_count));
1013 	mlxsw_sp->router->lpm.proto_trees[proto] = new_tree;
1014 	mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
1015 
1016 	return 0;
1017 
1018 err_tree_replace:
1019 	for (i--; i >= 0; i--) {
1020 		if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id))
1021 			continue;
1022 		mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
1023 					     mlxsw_sp_vr_fib(vr, proto),
1024 					     old_tree);
1025 	}
1026 	return err;
1027 }
1028 
1029 static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
1030 {
1031 	struct mlxsw_sp_vr *vr;
1032 	u64 max_vrs;
1033 	int i;
1034 
1035 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
1036 		return -EIO;
1037 
1038 	max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
1039 	mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
1040 					GFP_KERNEL);
1041 	if (!mlxsw_sp->router->vrs)
1042 		return -ENOMEM;
1043 
1044 	for (i = 0; i < max_vrs; i++) {
1045 		vr = &mlxsw_sp->router->vrs[i];
1046 		vr->id = i;
1047 	}
1048 
1049 	return 0;
1050 }
1051 
1052 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
1053 
1054 static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
1055 {
1056 	/* At this stage we're guaranteed not to have new incoming
1057 	 * FIB notifications and the work queue is free from FIBs
1058 	 * sitting on top of mlxsw netdevs. However, we can still
1059 	 * have other FIBs queued. Flush the queue before flushing
1060 	 * the device's tables. No need for locks, as we're the only
1061 	 * writer.
1062 	 */
1063 	mlxsw_core_flush_owq();
1064 	mlxsw_sp_router_fib_flush(mlxsw_sp);
1065 	kfree(mlxsw_sp->router->vrs);
1066 }
1067 
1068 u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
1069 {
1070 	struct net_device *d;
1071 	u32 tb_id;
1072 
1073 	rcu_read_lock();
1074 	d = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1075 	if (d)
1076 		tb_id = l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
1077 	else
1078 		tb_id = RT_TABLE_MAIN;
1079 	rcu_read_unlock();
1080 
1081 	return tb_id;
1082 }
1083 
1084 static void
1085 mlxsw_sp_crif_init(struct mlxsw_sp_crif *crif, struct net_device *dev)
1086 {
1087 	crif->key.dev = dev;
1088 	INIT_LIST_HEAD(&crif->nexthop_list);
1089 }
1090 
1091 static struct mlxsw_sp_crif *
1092 mlxsw_sp_crif_alloc(struct net_device *dev)
1093 {
1094 	struct mlxsw_sp_crif *crif;
1095 
1096 	crif = kzalloc(sizeof(*crif), GFP_KERNEL);
1097 	if (!crif)
1098 		return NULL;
1099 
1100 	mlxsw_sp_crif_init(crif, dev);
1101 	return crif;
1102 }
1103 
1104 static void mlxsw_sp_crif_free(struct mlxsw_sp_crif *crif)
1105 {
1106 	if (WARN_ON(crif->rif))
1107 		return;
1108 
1109 	WARN_ON(!list_empty(&crif->nexthop_list));
1110 	kfree(crif);
1111 }
1112 
1113 static int mlxsw_sp_crif_insert(struct mlxsw_sp_router *router,
1114 				struct mlxsw_sp_crif *crif)
1115 {
1116 	return rhashtable_insert_fast(&router->crif_ht, &crif->ht_node,
1117 				      mlxsw_sp_crif_ht_params);
1118 }
1119 
1120 static void mlxsw_sp_crif_remove(struct mlxsw_sp_router *router,
1121 				 struct mlxsw_sp_crif *crif)
1122 {
1123 	rhashtable_remove_fast(&router->crif_ht, &crif->ht_node,
1124 			       mlxsw_sp_crif_ht_params);
1125 }
1126 
1127 static struct mlxsw_sp_crif *
1128 mlxsw_sp_crif_lookup(struct mlxsw_sp_router *router,
1129 		     const struct net_device *dev)
1130 {
1131 	struct mlxsw_sp_crif_key key = {
1132 		.dev = (struct net_device *)dev,
1133 	};
1134 
1135 	return rhashtable_lookup_fast(&router->crif_ht, &key,
1136 				      mlxsw_sp_crif_ht_params);
1137 }
1138 
1139 static struct mlxsw_sp_rif *
1140 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
1141 		    const struct mlxsw_sp_rif_params *params,
1142 		    struct netlink_ext_ack *extack);
1143 
1144 static struct mlxsw_sp_rif_ipip_lb *
1145 mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
1146 				enum mlxsw_sp_ipip_type ipipt,
1147 				struct net_device *ol_dev,
1148 				struct netlink_ext_ack *extack)
1149 {
1150 	struct mlxsw_sp_rif_params_ipip_lb lb_params;
1151 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1152 	struct mlxsw_sp_rif *rif;
1153 
1154 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1155 	lb_params = (struct mlxsw_sp_rif_params_ipip_lb) {
1156 		.common.dev = ol_dev,
1157 		.common.lag = false,
1158 		.common.double_entry = ipip_ops->double_rif_entry,
1159 		.lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
1160 	};
1161 
1162 	rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, extack);
1163 	if (IS_ERR(rif))
1164 		return ERR_CAST(rif);
1165 	return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
1166 }
1167 
1168 static struct mlxsw_sp_ipip_entry *
1169 mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
1170 			  enum mlxsw_sp_ipip_type ipipt,
1171 			  struct net_device *ol_dev)
1172 {
1173 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1174 	struct mlxsw_sp_ipip_entry *ipip_entry;
1175 	struct mlxsw_sp_ipip_entry *ret = NULL;
1176 	int err;
1177 
1178 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1179 	ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
1180 	if (!ipip_entry)
1181 		return ERR_PTR(-ENOMEM);
1182 
1183 	ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt,
1184 							    ol_dev, NULL);
1185 	if (IS_ERR(ipip_entry->ol_lb)) {
1186 		ret = ERR_CAST(ipip_entry->ol_lb);
1187 		goto err_ol_ipip_lb_create;
1188 	}
1189 
1190 	ipip_entry->ipipt = ipipt;
1191 	ipip_entry->ol_dev = ol_dev;
1192 	ipip_entry->parms = ipip_ops->parms_init(ol_dev);
1193 
1194 	err = ipip_ops->rem_ip_addr_set(mlxsw_sp, ipip_entry);
1195 	if (err) {
1196 		ret = ERR_PTR(err);
1197 		goto err_rem_ip_addr_set;
1198 	}
1199 
1200 	return ipip_entry;
1201 
1202 err_rem_ip_addr_set:
1203 	mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1204 err_ol_ipip_lb_create:
1205 	kfree(ipip_entry);
1206 	return ret;
1207 }
1208 
1209 static void mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp *mlxsw_sp,
1210 					struct mlxsw_sp_ipip_entry *ipip_entry)
1211 {
1212 	const struct mlxsw_sp_ipip_ops *ipip_ops =
1213 		mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1214 
1215 	ipip_ops->rem_ip_addr_unset(mlxsw_sp, ipip_entry);
1216 	mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1217 	kfree(ipip_entry);
1218 }
1219 
1220 static bool
1221 mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
1222 				  const enum mlxsw_sp_l3proto ul_proto,
1223 				  union mlxsw_sp_l3addr saddr,
1224 				  u32 ul_tb_id,
1225 				  struct mlxsw_sp_ipip_entry *ipip_entry)
1226 {
1227 	u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1228 	enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1229 	union mlxsw_sp_l3addr tun_saddr;
1230 
1231 	if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1232 		return false;
1233 
1234 	tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1235 	return tun_ul_tb_id == ul_tb_id &&
1236 	       mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
1237 }
1238 
1239 static int mlxsw_sp_ipip_decap_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp,
1240 						 enum mlxsw_sp_ipip_type ipipt)
1241 {
1242 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1243 
1244 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1245 
1246 	/* Not all tunnels require to increase the default pasing depth
1247 	 * (96 bytes).
1248 	 */
1249 	if (ipip_ops->inc_parsing_depth)
1250 		return mlxsw_sp_parsing_depth_inc(mlxsw_sp);
1251 
1252 	return 0;
1253 }
1254 
1255 static void mlxsw_sp_ipip_decap_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp,
1256 						  enum mlxsw_sp_ipip_type ipipt)
1257 {
1258 	const struct mlxsw_sp_ipip_ops *ipip_ops =
1259 		mlxsw_sp->router->ipip_ops_arr[ipipt];
1260 
1261 	if (ipip_ops->inc_parsing_depth)
1262 		mlxsw_sp_parsing_depth_dec(mlxsw_sp);
1263 }
1264 
1265 static int
1266 mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
1267 			      struct mlxsw_sp_fib_entry *fib_entry,
1268 			      struct mlxsw_sp_ipip_entry *ipip_entry)
1269 {
1270 	u32 tunnel_index;
1271 	int err;
1272 
1273 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1274 				  1, &tunnel_index);
1275 	if (err)
1276 		return err;
1277 
1278 	err = mlxsw_sp_ipip_decap_parsing_depth_inc(mlxsw_sp,
1279 						    ipip_entry->ipipt);
1280 	if (err)
1281 		goto err_parsing_depth_inc;
1282 
1283 	ipip_entry->decap_fib_entry = fib_entry;
1284 	fib_entry->decap.ipip_entry = ipip_entry;
1285 	fib_entry->decap.tunnel_index = tunnel_index;
1286 
1287 	return 0;
1288 
1289 err_parsing_depth_inc:
1290 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
1291 			   fib_entry->decap.tunnel_index);
1292 	return err;
1293 }
1294 
1295 static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
1296 					  struct mlxsw_sp_fib_entry *fib_entry)
1297 {
1298 	enum mlxsw_sp_ipip_type ipipt = fib_entry->decap.ipip_entry->ipipt;
1299 
1300 	/* Unlink this node from the IPIP entry that it's the decap entry of. */
1301 	fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
1302 	fib_entry->decap.ipip_entry = NULL;
1303 	mlxsw_sp_ipip_decap_parsing_depth_dec(mlxsw_sp, ipipt);
1304 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1305 			   1, fib_entry->decap.tunnel_index);
1306 }
1307 
1308 static struct mlxsw_sp_fib_node *
1309 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
1310 			 size_t addr_len, unsigned char prefix_len);
1311 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1312 				     struct mlxsw_sp_fib_entry *fib_entry);
1313 
1314 static void
1315 mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp,
1316 				 struct mlxsw_sp_ipip_entry *ipip_entry)
1317 {
1318 	struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry;
1319 
1320 	mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
1321 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1322 
1323 	mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1324 }
1325 
1326 static void
1327 mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp,
1328 				  struct mlxsw_sp_ipip_entry *ipip_entry,
1329 				  struct mlxsw_sp_fib_entry *decap_fib_entry)
1330 {
1331 	if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry,
1332 					  ipip_entry))
1333 		return;
1334 	decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
1335 
1336 	if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry))
1337 		mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1338 }
1339 
1340 static struct mlxsw_sp_fib_entry *
1341 mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
1342 				     enum mlxsw_sp_l3proto proto,
1343 				     const union mlxsw_sp_l3addr *addr,
1344 				     enum mlxsw_sp_fib_entry_type type)
1345 {
1346 	struct mlxsw_sp_fib_node *fib_node;
1347 	unsigned char addr_prefix_len;
1348 	struct mlxsw_sp_fib *fib;
1349 	struct mlxsw_sp_vr *vr;
1350 	const void *addrp;
1351 	size_t addr_len;
1352 	u32 addr4;
1353 
1354 	vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
1355 	if (!vr)
1356 		return NULL;
1357 	fib = mlxsw_sp_vr_fib(vr, proto);
1358 
1359 	switch (proto) {
1360 	case MLXSW_SP_L3_PROTO_IPV4:
1361 		addr4 = be32_to_cpu(addr->addr4);
1362 		addrp = &addr4;
1363 		addr_len = 4;
1364 		addr_prefix_len = 32;
1365 		break;
1366 	case MLXSW_SP_L3_PROTO_IPV6:
1367 		addrp = &addr->addr6;
1368 		addr_len = 16;
1369 		addr_prefix_len = 128;
1370 		break;
1371 	default:
1372 		WARN_ON(1);
1373 		return NULL;
1374 	}
1375 
1376 	fib_node = mlxsw_sp_fib_node_lookup(fib, addrp, addr_len,
1377 					    addr_prefix_len);
1378 	if (!fib_node || fib_node->fib_entry->type != type)
1379 		return NULL;
1380 
1381 	return fib_node->fib_entry;
1382 }
1383 
1384 /* Given an IPIP entry, find the corresponding decap route. */
1385 static struct mlxsw_sp_fib_entry *
1386 mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
1387 			       struct mlxsw_sp_ipip_entry *ipip_entry)
1388 {
1389 	static struct mlxsw_sp_fib_node *fib_node;
1390 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1391 	unsigned char saddr_prefix_len;
1392 	union mlxsw_sp_l3addr saddr;
1393 	struct mlxsw_sp_fib *ul_fib;
1394 	struct mlxsw_sp_vr *ul_vr;
1395 	const void *saddrp;
1396 	size_t saddr_len;
1397 	u32 ul_tb_id;
1398 	u32 saddr4;
1399 
1400 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1401 
1402 	ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1403 	ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id);
1404 	if (!ul_vr)
1405 		return NULL;
1406 
1407 	ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto);
1408 	saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto,
1409 					   ipip_entry->ol_dev);
1410 
1411 	switch (ipip_ops->ul_proto) {
1412 	case MLXSW_SP_L3_PROTO_IPV4:
1413 		saddr4 = be32_to_cpu(saddr.addr4);
1414 		saddrp = &saddr4;
1415 		saddr_len = 4;
1416 		saddr_prefix_len = 32;
1417 		break;
1418 	case MLXSW_SP_L3_PROTO_IPV6:
1419 		saddrp = &saddr.addr6;
1420 		saddr_len = 16;
1421 		saddr_prefix_len = 128;
1422 		break;
1423 	default:
1424 		WARN_ON(1);
1425 		return NULL;
1426 	}
1427 
1428 	fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
1429 					    saddr_prefix_len);
1430 	if (!fib_node ||
1431 	    fib_node->fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1432 		return NULL;
1433 
1434 	return fib_node->fib_entry;
1435 }
1436 
1437 static struct mlxsw_sp_ipip_entry *
1438 mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp,
1439 			   enum mlxsw_sp_ipip_type ipipt,
1440 			   struct net_device *ol_dev)
1441 {
1442 	struct mlxsw_sp_ipip_entry *ipip_entry;
1443 
1444 	ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev);
1445 	if (IS_ERR(ipip_entry))
1446 		return ipip_entry;
1447 
1448 	list_add_tail(&ipip_entry->ipip_list_node,
1449 		      &mlxsw_sp->router->ipip_list);
1450 
1451 	return ipip_entry;
1452 }
1453 
1454 static void
1455 mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1456 			    struct mlxsw_sp_ipip_entry *ipip_entry)
1457 {
1458 	list_del(&ipip_entry->ipip_list_node);
1459 	mlxsw_sp_ipip_entry_dealloc(mlxsw_sp, ipip_entry);
1460 }
1461 
1462 static bool
1463 mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
1464 				  const struct net_device *ul_dev,
1465 				  enum mlxsw_sp_l3proto ul_proto,
1466 				  union mlxsw_sp_l3addr ul_dip,
1467 				  struct mlxsw_sp_ipip_entry *ipip_entry)
1468 {
1469 	u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1470 	enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1471 
1472 	if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1473 		return false;
1474 
1475 	return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
1476 						 ul_tb_id, ipip_entry);
1477 }
1478 
1479 /* Given decap parameters, find the corresponding IPIP entry. */
1480 static struct mlxsw_sp_ipip_entry *
1481 mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp, int ul_dev_ifindex,
1482 				  enum mlxsw_sp_l3proto ul_proto,
1483 				  union mlxsw_sp_l3addr ul_dip)
1484 {
1485 	struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
1486 	struct net_device *ul_dev;
1487 
1488 	rcu_read_lock();
1489 
1490 	ul_dev = dev_get_by_index_rcu(mlxsw_sp_net(mlxsw_sp), ul_dev_ifindex);
1491 	if (!ul_dev)
1492 		goto out_unlock;
1493 
1494 	list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1495 			    ipip_list_node)
1496 		if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
1497 						      ul_proto, ul_dip,
1498 						      ipip_entry))
1499 			goto out_unlock;
1500 
1501 	rcu_read_unlock();
1502 
1503 	return NULL;
1504 
1505 out_unlock:
1506 	rcu_read_unlock();
1507 	return ipip_entry;
1508 }
1509 
1510 static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
1511 				      const struct net_device *dev,
1512 				      enum mlxsw_sp_ipip_type *p_type)
1513 {
1514 	struct mlxsw_sp_router *router = mlxsw_sp->router;
1515 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1516 	enum mlxsw_sp_ipip_type ipipt;
1517 
1518 	for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
1519 		ipip_ops = router->ipip_ops_arr[ipipt];
1520 		if (dev->type == ipip_ops->dev_type) {
1521 			if (p_type)
1522 				*p_type = ipipt;
1523 			return true;
1524 		}
1525 	}
1526 	return false;
1527 }
1528 
1529 static bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
1530 				       const struct net_device *dev)
1531 {
1532 	return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
1533 }
1534 
1535 static struct mlxsw_sp_ipip_entry *
1536 mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp,
1537 				   const struct net_device *ol_dev)
1538 {
1539 	struct mlxsw_sp_ipip_entry *ipip_entry;
1540 
1541 	list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1542 			    ipip_list_node)
1543 		if (ipip_entry->ol_dev == ol_dev)
1544 			return ipip_entry;
1545 
1546 	return NULL;
1547 }
1548 
1549 static struct mlxsw_sp_ipip_entry *
1550 mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
1551 				   const struct net_device *ul_dev,
1552 				   struct mlxsw_sp_ipip_entry *start)
1553 {
1554 	struct mlxsw_sp_ipip_entry *ipip_entry;
1555 
1556 	ipip_entry = list_prepare_entry(start, &mlxsw_sp->router->ipip_list,
1557 					ipip_list_node);
1558 	list_for_each_entry_continue(ipip_entry, &mlxsw_sp->router->ipip_list,
1559 				     ipip_list_node) {
1560 		struct net_device *ol_dev = ipip_entry->ol_dev;
1561 		struct net_device *ipip_ul_dev;
1562 
1563 		rcu_read_lock();
1564 		ipip_ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1565 		rcu_read_unlock();
1566 
1567 		if (ipip_ul_dev == ul_dev)
1568 			return ipip_entry;
1569 	}
1570 
1571 	return NULL;
1572 }
1573 
1574 static bool mlxsw_sp_netdev_is_ipip_ul(struct mlxsw_sp *mlxsw_sp,
1575 				       const struct net_device *dev)
1576 {
1577 	return mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL);
1578 }
1579 
1580 static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
1581 						const struct net_device *ol_dev,
1582 						enum mlxsw_sp_ipip_type ipipt)
1583 {
1584 	const struct mlxsw_sp_ipip_ops *ops
1585 		= mlxsw_sp->router->ipip_ops_arr[ipipt];
1586 
1587 	return ops->can_offload(mlxsw_sp, ol_dev);
1588 }
1589 
1590 static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
1591 						struct net_device *ol_dev)
1592 {
1593 	enum mlxsw_sp_ipip_type ipipt = MLXSW_SP_IPIP_TYPE_MAX;
1594 	struct mlxsw_sp_ipip_entry *ipip_entry;
1595 	enum mlxsw_sp_l3proto ul_proto;
1596 	union mlxsw_sp_l3addr saddr;
1597 	u32 ul_tb_id;
1598 
1599 	mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt);
1600 	if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) {
1601 		ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1602 		ul_proto = mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto;
1603 		saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1604 		if (!mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1605 							  saddr, ul_tb_id,
1606 							  NULL)) {
1607 			ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt,
1608 								ol_dev);
1609 			if (IS_ERR(ipip_entry))
1610 				return PTR_ERR(ipip_entry);
1611 		}
1612 	}
1613 
1614 	return 0;
1615 }
1616 
1617 static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp,
1618 						   struct net_device *ol_dev)
1619 {
1620 	struct mlxsw_sp_ipip_entry *ipip_entry;
1621 
1622 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1623 	if (ipip_entry)
1624 		mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1625 }
1626 
1627 static void
1628 mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1629 				struct mlxsw_sp_ipip_entry *ipip_entry)
1630 {
1631 	struct mlxsw_sp_fib_entry *decap_fib_entry;
1632 
1633 	decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry);
1634 	if (decap_fib_entry)
1635 		mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1636 						  decap_fib_entry);
1637 }
1638 
1639 static int
1640 mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif, u16 ul_vr_id,
1641 			u16 ul_rif_id, bool enable)
1642 {
1643 	struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
1644 	struct net_device *dev = mlxsw_sp_rif_dev(&lb_rif->common);
1645 	enum mlxsw_reg_ritr_loopback_ipip_options ipip_options;
1646 	struct mlxsw_sp_rif *rif = &lb_rif->common;
1647 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
1648 	char ritr_pl[MLXSW_REG_RITR_LEN];
1649 	struct in6_addr *saddr6;
1650 	u32 saddr4;
1651 
1652 	ipip_options = MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET;
1653 	switch (lb_cf.ul_protocol) {
1654 	case MLXSW_SP_L3_PROTO_IPV4:
1655 		saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
1656 		mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
1657 				    rif->rif_index, rif->vr_id, dev->mtu);
1658 		mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
1659 						   ipip_options, ul_vr_id,
1660 						   ul_rif_id, saddr4,
1661 						   lb_cf.okey);
1662 		break;
1663 
1664 	case MLXSW_SP_L3_PROTO_IPV6:
1665 		saddr6 = &lb_cf.saddr.addr6;
1666 		mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
1667 				    rif->rif_index, rif->vr_id, dev->mtu);
1668 		mlxsw_reg_ritr_loopback_ipip6_pack(ritr_pl, lb_cf.lb_ipipt,
1669 						   ipip_options, ul_vr_id,
1670 						   ul_rif_id, saddr6,
1671 						   lb_cf.okey);
1672 		break;
1673 	}
1674 
1675 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
1676 }
1677 
1678 static int mlxsw_sp_netdevice_ipip_ol_update_mtu(struct mlxsw_sp *mlxsw_sp,
1679 						 struct net_device *ol_dev)
1680 {
1681 	struct mlxsw_sp_ipip_entry *ipip_entry;
1682 	struct mlxsw_sp_rif_ipip_lb *lb_rif;
1683 	int err = 0;
1684 
1685 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1686 	if (ipip_entry) {
1687 		lb_rif = ipip_entry->ol_lb;
1688 		err = mlxsw_sp_rif_ipip_lb_op(lb_rif, lb_rif->ul_vr_id,
1689 					      lb_rif->ul_rif_id, true);
1690 		if (err)
1691 			goto out;
1692 		lb_rif->common.mtu = ol_dev->mtu;
1693 	}
1694 
1695 out:
1696 	return err;
1697 }
1698 
1699 static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1700 						struct net_device *ol_dev)
1701 {
1702 	struct mlxsw_sp_ipip_entry *ipip_entry;
1703 
1704 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1705 	if (ipip_entry)
1706 		mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1707 }
1708 
1709 static void
1710 mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1711 				  struct mlxsw_sp_ipip_entry *ipip_entry)
1712 {
1713 	if (ipip_entry->decap_fib_entry)
1714 		mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1715 }
1716 
1717 static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1718 						  struct net_device *ol_dev)
1719 {
1720 	struct mlxsw_sp_ipip_entry *ipip_entry;
1721 
1722 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1723 	if (ipip_entry)
1724 		mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1725 }
1726 
1727 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
1728 					struct mlxsw_sp_rif *rif);
1729 
1730 static void mlxsw_sp_rif_migrate_destroy(struct mlxsw_sp *mlxsw_sp,
1731 					 struct mlxsw_sp_rif *old_rif,
1732 					 struct mlxsw_sp_rif *new_rif,
1733 					 bool migrate_nhs)
1734 {
1735 	struct mlxsw_sp_crif *crif = old_rif->crif;
1736 	struct mlxsw_sp_crif mock_crif = {};
1737 
1738 	if (migrate_nhs)
1739 		mlxsw_sp_nexthop_rif_update(mlxsw_sp, new_rif);
1740 
1741 	/* Plant a mock CRIF so that destroying the old RIF doesn't unoffload
1742 	 * our nexthops and IPIP tunnels, and doesn't sever the crif->rif link.
1743 	 */
1744 	mlxsw_sp_crif_init(&mock_crif, crif->key.dev);
1745 	old_rif->crif = &mock_crif;
1746 	mock_crif.rif = old_rif;
1747 	mlxsw_sp_rif_destroy(old_rif);
1748 }
1749 
1750 static int
1751 mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
1752 				 struct mlxsw_sp_ipip_entry *ipip_entry,
1753 				 bool keep_encap,
1754 				 struct netlink_ext_ack *extack)
1755 {
1756 	struct mlxsw_sp_rif_ipip_lb *old_lb_rif = ipip_entry->ol_lb;
1757 	struct mlxsw_sp_rif_ipip_lb *new_lb_rif;
1758 
1759 	new_lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp,
1760 						     ipip_entry->ipipt,
1761 						     ipip_entry->ol_dev,
1762 						     extack);
1763 	if (IS_ERR(new_lb_rif))
1764 		return PTR_ERR(new_lb_rif);
1765 	ipip_entry->ol_lb = new_lb_rif;
1766 
1767 	mlxsw_sp_rif_migrate_destroy(mlxsw_sp, &old_lb_rif->common,
1768 				     &new_lb_rif->common, keep_encap);
1769 	return 0;
1770 }
1771 
1772 /**
1773  * __mlxsw_sp_ipip_entry_update_tunnel - Update offload related to IPIP entry.
1774  * @mlxsw_sp: mlxsw_sp.
1775  * @ipip_entry: IPIP entry.
1776  * @recreate_loopback: Recreates the associated loopback RIF.
1777  * @keep_encap: Updates next hops that use the tunnel netdevice. This is only
1778  *              relevant when recreate_loopback is true.
1779  * @update_nexthops: Updates next hops, keeping the current loopback RIF. This
1780  *                   is only relevant when recreate_loopback is false.
1781  * @extack: extack.
1782  *
1783  * Return: Non-zero value on failure.
1784  */
1785 int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp,
1786 					struct mlxsw_sp_ipip_entry *ipip_entry,
1787 					bool recreate_loopback,
1788 					bool keep_encap,
1789 					bool update_nexthops,
1790 					struct netlink_ext_ack *extack)
1791 {
1792 	int err;
1793 
1794 	/* RIFs can't be edited, so to update loopback, we need to destroy and
1795 	 * recreate it. That creates a window of opportunity where RALUE and
1796 	 * RATR registers end up referencing a RIF that's already gone. RATRs
1797 	 * are handled in mlxsw_sp_ipip_entry_ol_lb_update(), and to take care
1798 	 * of RALUE, demote the decap route back.
1799 	 */
1800 	if (ipip_entry->decap_fib_entry)
1801 		mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1802 
1803 	if (recreate_loopback) {
1804 		err = mlxsw_sp_ipip_entry_ol_lb_update(mlxsw_sp, ipip_entry,
1805 						       keep_encap, extack);
1806 		if (err)
1807 			return err;
1808 	} else if (update_nexthops) {
1809 		mlxsw_sp_nexthop_rif_update(mlxsw_sp,
1810 					    &ipip_entry->ol_lb->common);
1811 	}
1812 
1813 	if (ipip_entry->ol_dev->flags & IFF_UP)
1814 		mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1815 
1816 	return 0;
1817 }
1818 
1819 static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
1820 						struct net_device *ol_dev,
1821 						struct netlink_ext_ack *extack)
1822 {
1823 	struct mlxsw_sp_ipip_entry *ipip_entry =
1824 		mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1825 
1826 	if (!ipip_entry)
1827 		return 0;
1828 
1829 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1830 						   true, false, false, extack);
1831 }
1832 
1833 static int
1834 mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp *mlxsw_sp,
1835 				     struct mlxsw_sp_ipip_entry *ipip_entry,
1836 				     struct net_device *ul_dev,
1837 				     bool *demote_this,
1838 				     struct netlink_ext_ack *extack)
1839 {
1840 	u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1841 	enum mlxsw_sp_l3proto ul_proto;
1842 	union mlxsw_sp_l3addr saddr;
1843 
1844 	/* Moving underlay to a different VRF might cause local address
1845 	 * conflict, and the conflicting tunnels need to be demoted.
1846 	 */
1847 	ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
1848 	saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1849 	if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1850 						 saddr, ul_tb_id,
1851 						 ipip_entry)) {
1852 		*demote_this = true;
1853 		return 0;
1854 	}
1855 
1856 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1857 						   true, true, false, extack);
1858 }
1859 
1860 static int
1861 mlxsw_sp_netdevice_ipip_ul_up_event(struct mlxsw_sp *mlxsw_sp,
1862 				    struct mlxsw_sp_ipip_entry *ipip_entry,
1863 				    struct net_device *ul_dev)
1864 {
1865 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1866 						   false, false, true, NULL);
1867 }
1868 
1869 static int
1870 mlxsw_sp_netdevice_ipip_ul_down_event(struct mlxsw_sp *mlxsw_sp,
1871 				      struct mlxsw_sp_ipip_entry *ipip_entry,
1872 				      struct net_device *ul_dev)
1873 {
1874 	/* A down underlay device causes encapsulated packets to not be
1875 	 * forwarded, but decap still works. So refresh next hops without
1876 	 * touching anything else.
1877 	 */
1878 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1879 						   false, false, true, NULL);
1880 }
1881 
1882 static int
1883 mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp *mlxsw_sp,
1884 					struct net_device *ol_dev,
1885 					struct netlink_ext_ack *extack)
1886 {
1887 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1888 	struct mlxsw_sp_ipip_entry *ipip_entry;
1889 	int err;
1890 
1891 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1892 	if (!ipip_entry)
1893 		/* A change might make a tunnel eligible for offloading, but
1894 		 * that is currently not implemented. What falls to slow path
1895 		 * stays there.
1896 		 */
1897 		return 0;
1898 
1899 	/* A change might make a tunnel not eligible for offloading. */
1900 	if (!mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev,
1901 						 ipip_entry->ipipt)) {
1902 		mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1903 		return 0;
1904 	}
1905 
1906 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1907 	err = ipip_ops->ol_netdev_change(mlxsw_sp, ipip_entry, extack);
1908 	return err;
1909 }
1910 
1911 void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
1912 				       struct mlxsw_sp_ipip_entry *ipip_entry)
1913 {
1914 	struct net_device *ol_dev = ipip_entry->ol_dev;
1915 
1916 	if (ol_dev->flags & IFF_UP)
1917 		mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1918 	mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1919 }
1920 
1921 /* The configuration where several tunnels have the same local address in the
1922  * same underlay table needs special treatment in the HW. That is currently not
1923  * implemented in the driver. This function finds and demotes the first tunnel
1924  * with a given source address, except the one passed in the argument
1925  * `except'.
1926  */
1927 bool
1928 mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp,
1929 				     enum mlxsw_sp_l3proto ul_proto,
1930 				     union mlxsw_sp_l3addr saddr,
1931 				     u32 ul_tb_id,
1932 				     const struct mlxsw_sp_ipip_entry *except)
1933 {
1934 	struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1935 
1936 	list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1937 				 ipip_list_node) {
1938 		if (ipip_entry != except &&
1939 		    mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
1940 						      ul_tb_id, ipip_entry)) {
1941 			mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1942 			return true;
1943 		}
1944 	}
1945 
1946 	return false;
1947 }
1948 
1949 static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
1950 						     struct net_device *ul_dev)
1951 {
1952 	struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1953 
1954 	list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1955 				 ipip_list_node) {
1956 		struct net_device *ol_dev = ipip_entry->ol_dev;
1957 		struct net_device *ipip_ul_dev;
1958 
1959 		rcu_read_lock();
1960 		ipip_ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1961 		rcu_read_unlock();
1962 		if (ipip_ul_dev == ul_dev)
1963 			mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1964 	}
1965 }
1966 
1967 static int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
1968 					    struct net_device *ol_dev,
1969 					    unsigned long event,
1970 					    struct netdev_notifier_info *info)
1971 {
1972 	struct netdev_notifier_changeupper_info *chup;
1973 	struct netlink_ext_ack *extack;
1974 	int err = 0;
1975 
1976 	switch (event) {
1977 	case NETDEV_REGISTER:
1978 		err = mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
1979 		break;
1980 	case NETDEV_UNREGISTER:
1981 		mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev);
1982 		break;
1983 	case NETDEV_UP:
1984 		mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev);
1985 		break;
1986 	case NETDEV_DOWN:
1987 		mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev);
1988 		break;
1989 	case NETDEV_CHANGEUPPER:
1990 		chup = container_of(info, typeof(*chup), info);
1991 		extack = info->extack;
1992 		if (netif_is_l3_master(chup->upper_dev))
1993 			err = mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
1994 								   ol_dev,
1995 								   extack);
1996 		break;
1997 	case NETDEV_CHANGE:
1998 		extack = info->extack;
1999 		err = mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp,
2000 							      ol_dev, extack);
2001 		break;
2002 	case NETDEV_CHANGEMTU:
2003 		err = mlxsw_sp_netdevice_ipip_ol_update_mtu(mlxsw_sp, ol_dev);
2004 		break;
2005 	}
2006 	return err;
2007 }
2008 
2009 static int
2010 __mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
2011 				   struct mlxsw_sp_ipip_entry *ipip_entry,
2012 				   struct net_device *ul_dev,
2013 				   bool *demote_this,
2014 				   unsigned long event,
2015 				   struct netdev_notifier_info *info)
2016 {
2017 	struct netdev_notifier_changeupper_info *chup;
2018 	struct netlink_ext_ack *extack;
2019 
2020 	switch (event) {
2021 	case NETDEV_CHANGEUPPER:
2022 		chup = container_of(info, typeof(*chup), info);
2023 		extack = info->extack;
2024 		if (netif_is_l3_master(chup->upper_dev))
2025 			return mlxsw_sp_netdevice_ipip_ul_vrf_event(mlxsw_sp,
2026 								    ipip_entry,
2027 								    ul_dev,
2028 								    demote_this,
2029 								    extack);
2030 		break;
2031 
2032 	case NETDEV_UP:
2033 		return mlxsw_sp_netdevice_ipip_ul_up_event(mlxsw_sp, ipip_entry,
2034 							   ul_dev);
2035 	case NETDEV_DOWN:
2036 		return mlxsw_sp_netdevice_ipip_ul_down_event(mlxsw_sp,
2037 							     ipip_entry,
2038 							     ul_dev);
2039 	}
2040 	return 0;
2041 }
2042 
2043 static int
2044 mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
2045 				 struct net_device *ul_dev,
2046 				 unsigned long event,
2047 				 struct netdev_notifier_info *info)
2048 {
2049 	struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
2050 	int err;
2051 
2052 	while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp,
2053 								ul_dev,
2054 								ipip_entry))) {
2055 		struct mlxsw_sp_ipip_entry *prev;
2056 		bool demote_this = false;
2057 
2058 		err = __mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, ipip_entry,
2059 							 ul_dev, &demote_this,
2060 							 event, info);
2061 		if (err) {
2062 			mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp,
2063 								 ul_dev);
2064 			return err;
2065 		}
2066 
2067 		if (demote_this) {
2068 			if (list_is_first(&ipip_entry->ipip_list_node,
2069 					  &mlxsw_sp->router->ipip_list))
2070 				prev = NULL;
2071 			else
2072 				/* This can't be cached from previous iteration,
2073 				 * because that entry could be gone now.
2074 				 */
2075 				prev = list_prev_entry(ipip_entry,
2076 						       ipip_list_node);
2077 			mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
2078 			ipip_entry = prev;
2079 		}
2080 	}
2081 
2082 	return 0;
2083 }
2084 
2085 int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
2086 				      enum mlxsw_sp_l3proto ul_proto,
2087 				      const union mlxsw_sp_l3addr *ul_sip,
2088 				      u32 tunnel_index)
2089 {
2090 	enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2091 	struct mlxsw_sp_router *router = mlxsw_sp->router;
2092 	struct mlxsw_sp_fib_entry *fib_entry;
2093 	int err = 0;
2094 
2095 	mutex_lock(&mlxsw_sp->router->lock);
2096 
2097 	if (WARN_ON_ONCE(router->nve_decap_config.valid)) {
2098 		err = -EINVAL;
2099 		goto out;
2100 	}
2101 
2102 	router->nve_decap_config.ul_tb_id = ul_tb_id;
2103 	router->nve_decap_config.tunnel_index = tunnel_index;
2104 	router->nve_decap_config.ul_proto = ul_proto;
2105 	router->nve_decap_config.ul_sip = *ul_sip;
2106 	router->nve_decap_config.valid = true;
2107 
2108 	/* It is valid to create a tunnel with a local IP and only later
2109 	 * assign this IP address to a local interface
2110 	 */
2111 	fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
2112 							 ul_proto, ul_sip,
2113 							 type);
2114 	if (!fib_entry)
2115 		goto out;
2116 
2117 	fib_entry->decap.tunnel_index = tunnel_index;
2118 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
2119 
2120 	err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2121 	if (err)
2122 		goto err_fib_entry_update;
2123 
2124 	goto out;
2125 
2126 err_fib_entry_update:
2127 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2128 	mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2129 out:
2130 	mutex_unlock(&mlxsw_sp->router->lock);
2131 	return err;
2132 }
2133 
2134 void mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
2135 				      enum mlxsw_sp_l3proto ul_proto,
2136 				      const union mlxsw_sp_l3addr *ul_sip)
2137 {
2138 	enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
2139 	struct mlxsw_sp_router *router = mlxsw_sp->router;
2140 	struct mlxsw_sp_fib_entry *fib_entry;
2141 
2142 	mutex_lock(&mlxsw_sp->router->lock);
2143 
2144 	if (WARN_ON_ONCE(!router->nve_decap_config.valid))
2145 		goto out;
2146 
2147 	router->nve_decap_config.valid = false;
2148 
2149 	fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
2150 							 ul_proto, ul_sip,
2151 							 type);
2152 	if (!fib_entry)
2153 		goto out;
2154 
2155 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2156 	mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2157 out:
2158 	mutex_unlock(&mlxsw_sp->router->lock);
2159 }
2160 
2161 static bool mlxsw_sp_router_nve_is_decap(struct mlxsw_sp *mlxsw_sp,
2162 					 u32 ul_tb_id,
2163 					 enum mlxsw_sp_l3proto ul_proto,
2164 					 const union mlxsw_sp_l3addr *ul_sip)
2165 {
2166 	struct mlxsw_sp_router *router = mlxsw_sp->router;
2167 
2168 	return router->nve_decap_config.valid &&
2169 	       router->nve_decap_config.ul_tb_id == ul_tb_id &&
2170 	       router->nve_decap_config.ul_proto == ul_proto &&
2171 	       !memcmp(&router->nve_decap_config.ul_sip, ul_sip,
2172 		       sizeof(*ul_sip));
2173 }
2174 
2175 struct mlxsw_sp_neigh_key {
2176 	struct neighbour *n;
2177 };
2178 
2179 struct mlxsw_sp_neigh_entry {
2180 	struct list_head rif_list_node;
2181 	struct rhash_head ht_node;
2182 	struct mlxsw_sp_neigh_key key;
2183 	u16 rif;
2184 	bool connected;
2185 	unsigned char ha[ETH_ALEN];
2186 	struct list_head nexthop_list; /* list of nexthops using
2187 					* this neigh entry
2188 					*/
2189 	struct list_head nexthop_neighs_list_node;
2190 	unsigned int counter_index;
2191 	bool counter_valid;
2192 };
2193 
2194 static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
2195 	.key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
2196 	.head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
2197 	.key_len = sizeof(struct mlxsw_sp_neigh_key),
2198 };
2199 
2200 struct mlxsw_sp_neigh_entry *
2201 mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
2202 			struct mlxsw_sp_neigh_entry *neigh_entry)
2203 {
2204 	if (!neigh_entry) {
2205 		if (list_empty(&rif->neigh_list))
2206 			return NULL;
2207 		else
2208 			return list_first_entry(&rif->neigh_list,
2209 						typeof(*neigh_entry),
2210 						rif_list_node);
2211 	}
2212 	if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list))
2213 		return NULL;
2214 	return list_next_entry(neigh_entry, rif_list_node);
2215 }
2216 
2217 int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry)
2218 {
2219 	return neigh_entry->key.n->tbl->family;
2220 }
2221 
2222 unsigned char *
2223 mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry)
2224 {
2225 	return neigh_entry->ha;
2226 }
2227 
2228 u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
2229 {
2230 	struct neighbour *n;
2231 
2232 	n = neigh_entry->key.n;
2233 	return ntohl(*((__be32 *) n->primary_key));
2234 }
2235 
2236 struct in6_addr *
2237 mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
2238 {
2239 	struct neighbour *n;
2240 
2241 	n = neigh_entry->key.n;
2242 	return (struct in6_addr *) &n->primary_key;
2243 }
2244 
2245 int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
2246 			       struct mlxsw_sp_neigh_entry *neigh_entry,
2247 			       u64 *p_counter)
2248 {
2249 	if (!neigh_entry->counter_valid)
2250 		return -EINVAL;
2251 
2252 	return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
2253 					 p_counter, NULL);
2254 }
2255 
2256 static struct mlxsw_sp_neigh_entry *
2257 mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
2258 			   u16 rif)
2259 {
2260 	struct mlxsw_sp_neigh_entry *neigh_entry;
2261 
2262 	neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
2263 	if (!neigh_entry)
2264 		return NULL;
2265 
2266 	neigh_entry->key.n = n;
2267 	neigh_entry->rif = rif;
2268 	INIT_LIST_HEAD(&neigh_entry->nexthop_list);
2269 
2270 	return neigh_entry;
2271 }
2272 
2273 static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
2274 {
2275 	kfree(neigh_entry);
2276 }
2277 
2278 static int
2279 mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
2280 			    struct mlxsw_sp_neigh_entry *neigh_entry)
2281 {
2282 	return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
2283 				      &neigh_entry->ht_node,
2284 				      mlxsw_sp_neigh_ht_params);
2285 }
2286 
2287 static void
2288 mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
2289 			    struct mlxsw_sp_neigh_entry *neigh_entry)
2290 {
2291 	rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
2292 			       &neigh_entry->ht_node,
2293 			       mlxsw_sp_neigh_ht_params);
2294 }
2295 
2296 static bool
2297 mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp,
2298 				    struct mlxsw_sp_neigh_entry *neigh_entry)
2299 {
2300 	struct devlink *devlink;
2301 	const char *table_name;
2302 
2303 	switch (mlxsw_sp_neigh_entry_type(neigh_entry)) {
2304 	case AF_INET:
2305 		table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4;
2306 		break;
2307 	case AF_INET6:
2308 		table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6;
2309 		break;
2310 	default:
2311 		WARN_ON(1);
2312 		return false;
2313 	}
2314 
2315 	devlink = priv_to_devlink(mlxsw_sp->core);
2316 	return devlink_dpipe_table_counter_enabled(devlink, table_name);
2317 }
2318 
2319 static void
2320 mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2321 			     struct mlxsw_sp_neigh_entry *neigh_entry)
2322 {
2323 	if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry))
2324 		return;
2325 
2326 	if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index))
2327 		return;
2328 
2329 	neigh_entry->counter_valid = true;
2330 }
2331 
2332 static void
2333 mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp,
2334 			    struct mlxsw_sp_neigh_entry *neigh_entry)
2335 {
2336 	if (!neigh_entry->counter_valid)
2337 		return;
2338 	mlxsw_sp_flow_counter_free(mlxsw_sp,
2339 				   neigh_entry->counter_index);
2340 	neigh_entry->counter_valid = false;
2341 }
2342 
2343 static struct mlxsw_sp_neigh_entry *
2344 mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2345 {
2346 	struct mlxsw_sp_neigh_entry *neigh_entry;
2347 	struct mlxsw_sp_rif *rif;
2348 	int err;
2349 
2350 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
2351 	if (!rif)
2352 		return ERR_PTR(-EINVAL);
2353 
2354 	neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
2355 	if (!neigh_entry)
2356 		return ERR_PTR(-ENOMEM);
2357 
2358 	err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
2359 	if (err)
2360 		goto err_neigh_entry_insert;
2361 
2362 	mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2363 	atomic_inc(&mlxsw_sp->router->neighs_update.neigh_count);
2364 	list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
2365 
2366 	return neigh_entry;
2367 
2368 err_neigh_entry_insert:
2369 	mlxsw_sp_neigh_entry_free(neigh_entry);
2370 	return ERR_PTR(err);
2371 }
2372 
2373 static void
2374 mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
2375 			     struct mlxsw_sp_neigh_entry *neigh_entry)
2376 {
2377 	list_del(&neigh_entry->rif_list_node);
2378 	atomic_dec(&mlxsw_sp->router->neighs_update.neigh_count);
2379 	mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2380 	mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
2381 	mlxsw_sp_neigh_entry_free(neigh_entry);
2382 }
2383 
2384 static struct mlxsw_sp_neigh_entry *
2385 mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2386 {
2387 	struct mlxsw_sp_neigh_key key;
2388 
2389 	key.n = n;
2390 	return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
2391 				      &key, mlxsw_sp_neigh_ht_params);
2392 }
2393 
2394 static void
2395 mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
2396 {
2397 	unsigned long interval;
2398 
2399 #if IS_ENABLED(CONFIG_IPV6)
2400 	interval = min_t(unsigned long,
2401 			 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
2402 			 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
2403 #else
2404 	interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
2405 #endif
2406 	mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
2407 }
2408 
2409 static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2410 						   char *rauhtd_pl,
2411 						   int ent_index)
2412 {
2413 	u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
2414 	struct net_device *dev;
2415 	struct neighbour *n;
2416 	__be32 dipn;
2417 	u32 dip;
2418 	u16 rif;
2419 
2420 	mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
2421 
2422 	if (WARN_ON_ONCE(rif >= max_rifs))
2423 		return;
2424 	if (!mlxsw_sp->router->rifs[rif]) {
2425 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2426 		return;
2427 	}
2428 
2429 	dipn = htonl(dip);
2430 	dev = mlxsw_sp_rif_dev(mlxsw_sp->router->rifs[rif]);
2431 	n = neigh_lookup(&arp_tbl, &dipn, dev);
2432 	if (!n)
2433 		return;
2434 
2435 	netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
2436 	neigh_event_send(n, NULL);
2437 	neigh_release(n);
2438 }
2439 
2440 #if IS_ENABLED(CONFIG_IPV6)
2441 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2442 						   char *rauhtd_pl,
2443 						   int rec_index)
2444 {
2445 	struct net_device *dev;
2446 	struct neighbour *n;
2447 	struct in6_addr dip;
2448 	u16 rif;
2449 
2450 	mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
2451 					 (char *) &dip);
2452 
2453 	if (!mlxsw_sp->router->rifs[rif]) {
2454 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2455 		return;
2456 	}
2457 
2458 	dev = mlxsw_sp_rif_dev(mlxsw_sp->router->rifs[rif]);
2459 	n = neigh_lookup(&nd_tbl, &dip, dev);
2460 	if (!n)
2461 		return;
2462 
2463 	netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
2464 	neigh_event_send(n, NULL);
2465 	neigh_release(n);
2466 }
2467 #else
2468 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2469 						   char *rauhtd_pl,
2470 						   int rec_index)
2471 {
2472 }
2473 #endif
2474 
2475 static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2476 						   char *rauhtd_pl,
2477 						   int rec_index)
2478 {
2479 	u8 num_entries;
2480 	int i;
2481 
2482 	num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2483 								rec_index);
2484 	/* Hardware starts counting at 0, so add 1. */
2485 	num_entries++;
2486 
2487 	/* Each record consists of several neighbour entries. */
2488 	for (i = 0; i < num_entries; i++) {
2489 		int ent_index;
2490 
2491 		ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
2492 		mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
2493 						       ent_index);
2494 	}
2495 
2496 }
2497 
2498 static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2499 						   char *rauhtd_pl,
2500 						   int rec_index)
2501 {
2502 	/* One record contains one entry. */
2503 	mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
2504 					       rec_index);
2505 }
2506 
2507 static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
2508 					      char *rauhtd_pl, int rec_index)
2509 {
2510 	switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
2511 	case MLXSW_REG_RAUHTD_TYPE_IPV4:
2512 		mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
2513 						       rec_index);
2514 		break;
2515 	case MLXSW_REG_RAUHTD_TYPE_IPV6:
2516 		mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
2517 						       rec_index);
2518 		break;
2519 	}
2520 }
2521 
2522 static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
2523 {
2524 	u8 num_rec, last_rec_index, num_entries;
2525 
2526 	num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2527 	last_rec_index = num_rec - 1;
2528 
2529 	if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
2530 		return false;
2531 	if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
2532 	    MLXSW_REG_RAUHTD_TYPE_IPV6)
2533 		return true;
2534 
2535 	num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2536 								last_rec_index);
2537 	if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
2538 		return true;
2539 	return false;
2540 }
2541 
2542 static int
2543 __mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
2544 				       char *rauhtd_pl,
2545 				       enum mlxsw_reg_rauhtd_type type)
2546 {
2547 	int i, num_rec;
2548 	int err;
2549 
2550 	/* Ensure the RIF we read from the device does not change mid-dump. */
2551 	mutex_lock(&mlxsw_sp->router->lock);
2552 	do {
2553 		mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
2554 		err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
2555 				      rauhtd_pl);
2556 		if (err) {
2557 			dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n");
2558 			break;
2559 		}
2560 		num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2561 		for (i = 0; i < num_rec; i++)
2562 			mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
2563 							  i);
2564 	} while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
2565 	mutex_unlock(&mlxsw_sp->router->lock);
2566 
2567 	return err;
2568 }
2569 
2570 static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
2571 {
2572 	enum mlxsw_reg_rauhtd_type type;
2573 	char *rauhtd_pl;
2574 	int err;
2575 
2576 	if (!atomic_read(&mlxsw_sp->router->neighs_update.neigh_count))
2577 		return 0;
2578 
2579 	rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
2580 	if (!rauhtd_pl)
2581 		return -ENOMEM;
2582 
2583 	type = MLXSW_REG_RAUHTD_TYPE_IPV4;
2584 	err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2585 	if (err)
2586 		goto out;
2587 
2588 	type = MLXSW_REG_RAUHTD_TYPE_IPV6;
2589 	err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2590 out:
2591 	kfree(rauhtd_pl);
2592 	return err;
2593 }
2594 
2595 static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
2596 {
2597 	struct mlxsw_sp_neigh_entry *neigh_entry;
2598 
2599 	mutex_lock(&mlxsw_sp->router->lock);
2600 	list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
2601 			    nexthop_neighs_list_node)
2602 		/* If this neigh have nexthops, make the kernel think this neigh
2603 		 * is active regardless of the traffic.
2604 		 */
2605 		neigh_event_send(neigh_entry->key.n, NULL);
2606 	mutex_unlock(&mlxsw_sp->router->lock);
2607 }
2608 
2609 static void
2610 mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
2611 {
2612 	unsigned long interval = mlxsw_sp->router->neighs_update.interval;
2613 
2614 	mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
2615 			       msecs_to_jiffies(interval));
2616 }
2617 
2618 static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
2619 {
2620 	struct mlxsw_sp_router *router;
2621 	int err;
2622 
2623 	router = container_of(work, struct mlxsw_sp_router,
2624 			      neighs_update.dw.work);
2625 	err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
2626 	if (err)
2627 		dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
2628 
2629 	mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
2630 
2631 	mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
2632 }
2633 
2634 static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
2635 {
2636 	struct mlxsw_sp_neigh_entry *neigh_entry;
2637 	struct mlxsw_sp_router *router;
2638 
2639 	router = container_of(work, struct mlxsw_sp_router,
2640 			      nexthop_probe_dw.work);
2641 	/* Iterate over nexthop neighbours, find those who are unresolved and
2642 	 * send arp on them. This solves the chicken-egg problem when
2643 	 * the nexthop wouldn't get offloaded until the neighbor is resolved
2644 	 * but it wouldn't get resolved ever in case traffic is flowing in HW
2645 	 * using different nexthop.
2646 	 */
2647 	mutex_lock(&router->lock);
2648 	list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
2649 			    nexthop_neighs_list_node)
2650 		if (!neigh_entry->connected)
2651 			neigh_event_send(neigh_entry->key.n, NULL);
2652 	mutex_unlock(&router->lock);
2653 
2654 	mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
2655 			       MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
2656 }
2657 
2658 static void
2659 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
2660 			      struct mlxsw_sp_neigh_entry *neigh_entry,
2661 			      bool removing, bool dead);
2662 
2663 static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
2664 {
2665 	return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
2666 			MLXSW_REG_RAUHT_OP_WRITE_DELETE;
2667 }
2668 
2669 static int
2670 mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
2671 				struct mlxsw_sp_neigh_entry *neigh_entry,
2672 				enum mlxsw_reg_rauht_op op)
2673 {
2674 	struct neighbour *n = neigh_entry->key.n;
2675 	u32 dip = ntohl(*((__be32 *) n->primary_key));
2676 	char rauht_pl[MLXSW_REG_RAUHT_LEN];
2677 
2678 	mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2679 			      dip);
2680 	if (neigh_entry->counter_valid)
2681 		mlxsw_reg_rauht_pack_counter(rauht_pl,
2682 					     neigh_entry->counter_index);
2683 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2684 }
2685 
2686 static int
2687 mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
2688 				struct mlxsw_sp_neigh_entry *neigh_entry,
2689 				enum mlxsw_reg_rauht_op op)
2690 {
2691 	struct neighbour *n = neigh_entry->key.n;
2692 	char rauht_pl[MLXSW_REG_RAUHT_LEN];
2693 	const char *dip = n->primary_key;
2694 
2695 	mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2696 			      dip);
2697 	if (neigh_entry->counter_valid)
2698 		mlxsw_reg_rauht_pack_counter(rauht_pl,
2699 					     neigh_entry->counter_index);
2700 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2701 }
2702 
2703 bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
2704 {
2705 	struct neighbour *n = neigh_entry->key.n;
2706 
2707 	/* Packets with a link-local destination address are trapped
2708 	 * after LPM lookup and never reach the neighbour table, so
2709 	 * there is no need to program such neighbours to the device.
2710 	 */
2711 	if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
2712 	    IPV6_ADDR_LINKLOCAL)
2713 		return true;
2714 	return false;
2715 }
2716 
2717 static void
2718 mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
2719 			    struct mlxsw_sp_neigh_entry *neigh_entry,
2720 			    bool adding)
2721 {
2722 	enum mlxsw_reg_rauht_op op = mlxsw_sp_rauht_op(adding);
2723 	int err;
2724 
2725 	if (!adding && !neigh_entry->connected)
2726 		return;
2727 	neigh_entry->connected = adding;
2728 	if (neigh_entry->key.n->tbl->family == AF_INET) {
2729 		err = mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
2730 						      op);
2731 		if (err)
2732 			return;
2733 	} else if (neigh_entry->key.n->tbl->family == AF_INET6) {
2734 		if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
2735 			return;
2736 		err = mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
2737 						      op);
2738 		if (err)
2739 			return;
2740 	} else {
2741 		WARN_ON_ONCE(1);
2742 		return;
2743 	}
2744 
2745 	if (adding)
2746 		neigh_entry->key.n->flags |= NTF_OFFLOADED;
2747 	else
2748 		neigh_entry->key.n->flags &= ~NTF_OFFLOADED;
2749 }
2750 
2751 void
2752 mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
2753 				    struct mlxsw_sp_neigh_entry *neigh_entry,
2754 				    bool adding)
2755 {
2756 	if (adding)
2757 		mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2758 	else
2759 		mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2760 	mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true);
2761 }
2762 
2763 struct mlxsw_sp_netevent_work {
2764 	struct work_struct work;
2765 	struct mlxsw_sp *mlxsw_sp;
2766 	struct neighbour *n;
2767 };
2768 
2769 static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
2770 {
2771 	struct mlxsw_sp_netevent_work *net_work =
2772 		container_of(work, struct mlxsw_sp_netevent_work, work);
2773 	struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2774 	struct mlxsw_sp_neigh_entry *neigh_entry;
2775 	struct neighbour *n = net_work->n;
2776 	unsigned char ha[ETH_ALEN];
2777 	bool entry_connected;
2778 	u8 nud_state, dead;
2779 
2780 	/* If these parameters are changed after we release the lock,
2781 	 * then we are guaranteed to receive another event letting us
2782 	 * know about it.
2783 	 */
2784 	read_lock_bh(&n->lock);
2785 	memcpy(ha, n->ha, ETH_ALEN);
2786 	nud_state = n->nud_state;
2787 	dead = n->dead;
2788 	read_unlock_bh(&n->lock);
2789 
2790 	mutex_lock(&mlxsw_sp->router->lock);
2791 	mlxsw_sp_span_respin(mlxsw_sp);
2792 
2793 	entry_connected = nud_state & NUD_VALID && !dead;
2794 	neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
2795 	if (!entry_connected && !neigh_entry)
2796 		goto out;
2797 	if (!neigh_entry) {
2798 		neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
2799 		if (IS_ERR(neigh_entry))
2800 			goto out;
2801 	}
2802 
2803 	if (neigh_entry->connected && entry_connected &&
2804 	    !memcmp(neigh_entry->ha, ha, ETH_ALEN))
2805 		goto out;
2806 
2807 	memcpy(neigh_entry->ha, ha, ETH_ALEN);
2808 	mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
2809 	mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected,
2810 				      dead);
2811 
2812 	if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
2813 		mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2814 
2815 out:
2816 	mutex_unlock(&mlxsw_sp->router->lock);
2817 	neigh_release(n);
2818 	kfree(net_work);
2819 }
2820 
2821 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp);
2822 
2823 static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
2824 {
2825 	struct mlxsw_sp_netevent_work *net_work =
2826 		container_of(work, struct mlxsw_sp_netevent_work, work);
2827 	struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2828 
2829 	mlxsw_sp_mp_hash_init(mlxsw_sp);
2830 	kfree(net_work);
2831 }
2832 
2833 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
2834 
2835 static void mlxsw_sp_router_update_priority_work(struct work_struct *work)
2836 {
2837 	struct mlxsw_sp_netevent_work *net_work =
2838 		container_of(work, struct mlxsw_sp_netevent_work, work);
2839 	struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2840 
2841 	__mlxsw_sp_router_init(mlxsw_sp);
2842 	kfree(net_work);
2843 }
2844 
2845 static int mlxsw_sp_router_schedule_work(struct net *net,
2846 					 struct mlxsw_sp_router *router,
2847 					 struct neighbour *n,
2848 					 void (*cb)(struct work_struct *))
2849 {
2850 	struct mlxsw_sp_netevent_work *net_work;
2851 
2852 	if (!net_eq(net, mlxsw_sp_net(router->mlxsw_sp)))
2853 		return NOTIFY_DONE;
2854 
2855 	net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2856 	if (!net_work)
2857 		return NOTIFY_BAD;
2858 
2859 	INIT_WORK(&net_work->work, cb);
2860 	net_work->mlxsw_sp = router->mlxsw_sp;
2861 	net_work->n = n;
2862 	mlxsw_core_schedule_work(&net_work->work);
2863 	return NOTIFY_DONE;
2864 }
2865 
2866 static bool mlxsw_sp_dev_lower_is_port(struct net_device *dev)
2867 {
2868 	struct mlxsw_sp_port *mlxsw_sp_port;
2869 
2870 	rcu_read_lock();
2871 	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
2872 	rcu_read_unlock();
2873 	return !!mlxsw_sp_port;
2874 }
2875 
2876 static int mlxsw_sp_router_schedule_neigh_work(struct mlxsw_sp_router *router,
2877 					       struct neighbour *n)
2878 {
2879 	struct net *net;
2880 
2881 	net = neigh_parms_net(n->parms);
2882 
2883 	/* Take a reference to ensure the neighbour won't be destructed until we
2884 	 * drop the reference in delayed work.
2885 	 */
2886 	neigh_clone(n);
2887 	return mlxsw_sp_router_schedule_work(net, router, n,
2888 					     mlxsw_sp_router_neigh_event_work);
2889 }
2890 
2891 static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
2892 					  unsigned long event, void *ptr)
2893 {
2894 	struct mlxsw_sp_router *router;
2895 	unsigned long interval;
2896 	struct neigh_parms *p;
2897 	struct neighbour *n;
2898 
2899 	router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
2900 
2901 	switch (event) {
2902 	case NETEVENT_DELAY_PROBE_TIME_UPDATE:
2903 		p = ptr;
2904 
2905 		/* We don't care about changes in the default table. */
2906 		if (!p->dev || (p->tbl->family != AF_INET &&
2907 				p->tbl->family != AF_INET6))
2908 			return NOTIFY_DONE;
2909 
2910 		/* We are in atomic context and can't take RTNL mutex,
2911 		 * so use RCU variant to walk the device chain.
2912 		 */
2913 		if (!mlxsw_sp_dev_lower_is_port(p->dev))
2914 			return NOTIFY_DONE;
2915 
2916 		interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
2917 		router->neighs_update.interval = interval;
2918 		break;
2919 	case NETEVENT_NEIGH_UPDATE:
2920 		n = ptr;
2921 
2922 		if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
2923 			return NOTIFY_DONE;
2924 
2925 		if (!mlxsw_sp_dev_lower_is_port(n->dev))
2926 			return NOTIFY_DONE;
2927 
2928 		return mlxsw_sp_router_schedule_neigh_work(router, n);
2929 
2930 	case NETEVENT_IPV4_MPATH_HASH_UPDATE:
2931 	case NETEVENT_IPV6_MPATH_HASH_UPDATE:
2932 		return mlxsw_sp_router_schedule_work(ptr, router, NULL,
2933 				mlxsw_sp_router_mp_hash_event_work);
2934 
2935 	case NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE:
2936 		return mlxsw_sp_router_schedule_work(ptr, router, NULL,
2937 				mlxsw_sp_router_update_priority_work);
2938 	}
2939 
2940 	return NOTIFY_DONE;
2941 }
2942 
2943 static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
2944 {
2945 	int err;
2946 
2947 	err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
2948 			      &mlxsw_sp_neigh_ht_params);
2949 	if (err)
2950 		return err;
2951 
2952 	/* Initialize the polling interval according to the default
2953 	 * table.
2954 	 */
2955 	mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
2956 
2957 	/* Create the delayed works for the activity_update */
2958 	INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
2959 			  mlxsw_sp_router_neighs_update_work);
2960 	INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
2961 			  mlxsw_sp_router_probe_unresolved_nexthops);
2962 	atomic_set(&mlxsw_sp->router->neighs_update.neigh_count, 0);
2963 	mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
2964 	mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
2965 	return 0;
2966 }
2967 
2968 static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
2969 {
2970 	cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
2971 	cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
2972 	rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
2973 }
2974 
2975 static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
2976 					 struct mlxsw_sp_rif *rif)
2977 {
2978 	struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
2979 
2980 	list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
2981 				 rif_list_node) {
2982 		mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
2983 		mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2984 	}
2985 }
2986 
2987 struct mlxsw_sp_neigh_rif_made_sync {
2988 	struct mlxsw_sp *mlxsw_sp;
2989 	struct mlxsw_sp_rif *rif;
2990 	int err;
2991 };
2992 
2993 static void mlxsw_sp_neigh_rif_made_sync_each(struct neighbour *n, void *data)
2994 {
2995 	struct mlxsw_sp_neigh_rif_made_sync *rms = data;
2996 	int rc;
2997 
2998 	if (rms->err)
2999 		return;
3000 	if (n->dev != mlxsw_sp_rif_dev(rms->rif))
3001 		return;
3002 	rc = mlxsw_sp_router_schedule_neigh_work(rms->mlxsw_sp->router, n);
3003 	if (rc != NOTIFY_DONE)
3004 		rms->err = -ENOMEM;
3005 }
3006 
3007 static int mlxsw_sp_neigh_rif_made_sync(struct mlxsw_sp *mlxsw_sp,
3008 					struct mlxsw_sp_rif *rif)
3009 {
3010 	struct mlxsw_sp_neigh_rif_made_sync rms = {
3011 		.mlxsw_sp = mlxsw_sp,
3012 		.rif = rif,
3013 	};
3014 
3015 	neigh_for_each(&arp_tbl, mlxsw_sp_neigh_rif_made_sync_each, &rms);
3016 	if (rms.err)
3017 		goto err_arp;
3018 
3019 #if IS_ENABLED(CONFIG_IPV6)
3020 	neigh_for_each(&nd_tbl, mlxsw_sp_neigh_rif_made_sync_each, &rms);
3021 #endif
3022 	if (rms.err)
3023 		goto err_nd;
3024 
3025 	return 0;
3026 
3027 err_nd:
3028 err_arp:
3029 	mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
3030 	return rms.err;
3031 }
3032 
3033 enum mlxsw_sp_nexthop_type {
3034 	MLXSW_SP_NEXTHOP_TYPE_ETH,
3035 	MLXSW_SP_NEXTHOP_TYPE_IPIP,
3036 };
3037 
3038 enum mlxsw_sp_nexthop_action {
3039 	/* Nexthop forwards packets to an egress RIF */
3040 	MLXSW_SP_NEXTHOP_ACTION_FORWARD,
3041 	/* Nexthop discards packets */
3042 	MLXSW_SP_NEXTHOP_ACTION_DISCARD,
3043 	/* Nexthop traps packets */
3044 	MLXSW_SP_NEXTHOP_ACTION_TRAP,
3045 };
3046 
3047 struct mlxsw_sp_nexthop_key {
3048 	struct fib_nh *fib_nh;
3049 };
3050 
3051 struct mlxsw_sp_nexthop {
3052 	struct list_head neigh_list_node; /* member of neigh entry list */
3053 	struct list_head crif_list_node;
3054 	struct list_head router_list_node;
3055 	struct mlxsw_sp_nexthop_group_info *nhgi; /* pointer back to the group
3056 						   * this nexthop belongs to
3057 						   */
3058 	struct rhash_head ht_node;
3059 	struct neigh_table *neigh_tbl;
3060 	struct mlxsw_sp_nexthop_key key;
3061 	unsigned char gw_addr[sizeof(struct in6_addr)];
3062 	int ifindex;
3063 	int nh_weight;
3064 	int norm_nh_weight;
3065 	int num_adj_entries;
3066 	struct mlxsw_sp_crif *crif;
3067 	u8 should_offload:1, /* set indicates this nexthop should be written
3068 			      * to the adjacency table.
3069 			      */
3070 	   offloaded:1, /* set indicates this nexthop was written to the
3071 			 * adjacency table.
3072 			 */
3073 	   update:1; /* set indicates this nexthop should be updated in the
3074 		      * adjacency table (f.e., its MAC changed).
3075 		      */
3076 	enum mlxsw_sp_nexthop_action action;
3077 	enum mlxsw_sp_nexthop_type type;
3078 	union {
3079 		struct mlxsw_sp_neigh_entry *neigh_entry;
3080 		struct mlxsw_sp_ipip_entry *ipip_entry;
3081 	};
3082 	unsigned int counter_index;
3083 	bool counter_valid;
3084 };
3085 
3086 static struct net_device *
3087 mlxsw_sp_nexthop_dev(const struct mlxsw_sp_nexthop *nh)
3088 {
3089 	if (!nh->crif)
3090 		return NULL;
3091 	return nh->crif->key.dev;
3092 }
3093 
3094 enum mlxsw_sp_nexthop_group_type {
3095 	MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4,
3096 	MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6,
3097 	MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ,
3098 };
3099 
3100 struct mlxsw_sp_nexthop_group_info {
3101 	struct mlxsw_sp_nexthop_group *nh_grp;
3102 	u32 adj_index;
3103 	u16 ecmp_size;
3104 	u16 count;
3105 	int sum_norm_weight;
3106 	u8 adj_index_valid:1,
3107 	   gateway:1, /* routes using the group use a gateway */
3108 	   is_resilient:1;
3109 	struct list_head list; /* member in nh_res_grp_list */
3110 	struct mlxsw_sp_nexthop nexthops[] __counted_by(count);
3111 };
3112 
3113 static struct mlxsw_sp_rif *
3114 mlxsw_sp_nhgi_rif(const struct mlxsw_sp_nexthop_group_info *nhgi)
3115 {
3116 	struct mlxsw_sp_crif *crif = nhgi->nexthops[0].crif;
3117 
3118 	if (!crif)
3119 		return NULL;
3120 	return crif->rif;
3121 }
3122 
3123 struct mlxsw_sp_nexthop_group_vr_key {
3124 	u16 vr_id;
3125 	enum mlxsw_sp_l3proto proto;
3126 };
3127 
3128 struct mlxsw_sp_nexthop_group_vr_entry {
3129 	struct list_head list; /* member in vr_list */
3130 	struct rhash_head ht_node; /* member in vr_ht */
3131 	refcount_t ref_count;
3132 	struct mlxsw_sp_nexthop_group_vr_key key;
3133 };
3134 
3135 struct mlxsw_sp_nexthop_group {
3136 	struct rhash_head ht_node;
3137 	struct list_head fib_list; /* list of fib entries that use this group */
3138 	union {
3139 		struct {
3140 			struct fib_info *fi;
3141 		} ipv4;
3142 		struct {
3143 			u32 id;
3144 		} obj;
3145 	};
3146 	struct mlxsw_sp_nexthop_group_info *nhgi;
3147 	struct list_head vr_list;
3148 	struct rhashtable vr_ht;
3149 	enum mlxsw_sp_nexthop_group_type type;
3150 	bool can_destroy;
3151 };
3152 
3153 void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
3154 				    struct mlxsw_sp_nexthop *nh)
3155 {
3156 	struct devlink *devlink;
3157 
3158 	devlink = priv_to_devlink(mlxsw_sp->core);
3159 	if (!devlink_dpipe_table_counter_enabled(devlink,
3160 						 MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
3161 		return;
3162 
3163 	if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
3164 		return;
3165 
3166 	nh->counter_valid = true;
3167 }
3168 
3169 void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
3170 				   struct mlxsw_sp_nexthop *nh)
3171 {
3172 	if (!nh->counter_valid)
3173 		return;
3174 	mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
3175 	nh->counter_valid = false;
3176 }
3177 
3178 int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
3179 				 struct mlxsw_sp_nexthop *nh, u64 *p_counter)
3180 {
3181 	if (!nh->counter_valid)
3182 		return -EINVAL;
3183 
3184 	return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
3185 					 p_counter, NULL);
3186 }
3187 
3188 struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
3189 					       struct mlxsw_sp_nexthop *nh)
3190 {
3191 	if (!nh) {
3192 		if (list_empty(&router->nexthop_list))
3193 			return NULL;
3194 		else
3195 			return list_first_entry(&router->nexthop_list,
3196 						typeof(*nh), router_list_node);
3197 	}
3198 	if (list_is_last(&nh->router_list_node, &router->nexthop_list))
3199 		return NULL;
3200 	return list_next_entry(nh, router_list_node);
3201 }
3202 
3203 bool mlxsw_sp_nexthop_is_forward(const struct mlxsw_sp_nexthop *nh)
3204 {
3205 	return nh->offloaded && nh->action == MLXSW_SP_NEXTHOP_ACTION_FORWARD;
3206 }
3207 
3208 unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
3209 {
3210 	if (nh->type != MLXSW_SP_NEXTHOP_TYPE_ETH ||
3211 	    !mlxsw_sp_nexthop_is_forward(nh))
3212 		return NULL;
3213 	return nh->neigh_entry->ha;
3214 }
3215 
3216 int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
3217 			     u32 *p_adj_size, u32 *p_adj_hash_index)
3218 {
3219 	struct mlxsw_sp_nexthop_group_info *nhgi = nh->nhgi;
3220 	u32 adj_hash_index = 0;
3221 	int i;
3222 
3223 	if (!nh->offloaded || !nhgi->adj_index_valid)
3224 		return -EINVAL;
3225 
3226 	*p_adj_index = nhgi->adj_index;
3227 	*p_adj_size = nhgi->ecmp_size;
3228 
3229 	for (i = 0; i < nhgi->count; i++) {
3230 		struct mlxsw_sp_nexthop *nh_iter = &nhgi->nexthops[i];
3231 
3232 		if (nh_iter == nh)
3233 			break;
3234 		if (nh_iter->offloaded)
3235 			adj_hash_index += nh_iter->num_adj_entries;
3236 	}
3237 
3238 	*p_adj_hash_index = adj_hash_index;
3239 	return 0;
3240 }
3241 
3242 struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
3243 {
3244 	if (WARN_ON(!nh->crif))
3245 		return NULL;
3246 	return nh->crif->rif;
3247 }
3248 
3249 bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
3250 {
3251 	struct mlxsw_sp_nexthop_group_info *nhgi = nh->nhgi;
3252 	int i;
3253 
3254 	for (i = 0; i < nhgi->count; i++) {
3255 		struct mlxsw_sp_nexthop *nh_iter = &nhgi->nexthops[i];
3256 
3257 		if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
3258 			return true;
3259 	}
3260 	return false;
3261 }
3262 
3263 static const struct rhashtable_params mlxsw_sp_nexthop_group_vr_ht_params = {
3264 	.key_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, key),
3265 	.head_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, ht_node),
3266 	.key_len = sizeof(struct mlxsw_sp_nexthop_group_vr_key),
3267 	.automatic_shrinking = true,
3268 };
3269 
3270 static struct mlxsw_sp_nexthop_group_vr_entry *
3271 mlxsw_sp_nexthop_group_vr_entry_lookup(struct mlxsw_sp_nexthop_group *nh_grp,
3272 				       const struct mlxsw_sp_fib *fib)
3273 {
3274 	struct mlxsw_sp_nexthop_group_vr_key key;
3275 
3276 	memset(&key, 0, sizeof(key));
3277 	key.vr_id = fib->vr->id;
3278 	key.proto = fib->proto;
3279 	return rhashtable_lookup_fast(&nh_grp->vr_ht, &key,
3280 				      mlxsw_sp_nexthop_group_vr_ht_params);
3281 }
3282 
3283 static int
3284 mlxsw_sp_nexthop_group_vr_entry_create(struct mlxsw_sp_nexthop_group *nh_grp,
3285 				       const struct mlxsw_sp_fib *fib)
3286 {
3287 	struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3288 	int err;
3289 
3290 	vr_entry = kzalloc(sizeof(*vr_entry), GFP_KERNEL);
3291 	if (!vr_entry)
3292 		return -ENOMEM;
3293 
3294 	vr_entry->key.vr_id = fib->vr->id;
3295 	vr_entry->key.proto = fib->proto;
3296 	refcount_set(&vr_entry->ref_count, 1);
3297 
3298 	err = rhashtable_insert_fast(&nh_grp->vr_ht, &vr_entry->ht_node,
3299 				     mlxsw_sp_nexthop_group_vr_ht_params);
3300 	if (err)
3301 		goto err_hashtable_insert;
3302 
3303 	list_add(&vr_entry->list, &nh_grp->vr_list);
3304 
3305 	return 0;
3306 
3307 err_hashtable_insert:
3308 	kfree(vr_entry);
3309 	return err;
3310 }
3311 
3312 static void
3313 mlxsw_sp_nexthop_group_vr_entry_destroy(struct mlxsw_sp_nexthop_group *nh_grp,
3314 					struct mlxsw_sp_nexthop_group_vr_entry *vr_entry)
3315 {
3316 	list_del(&vr_entry->list);
3317 	rhashtable_remove_fast(&nh_grp->vr_ht, &vr_entry->ht_node,
3318 			       mlxsw_sp_nexthop_group_vr_ht_params);
3319 	kfree(vr_entry);
3320 }
3321 
3322 static int
3323 mlxsw_sp_nexthop_group_vr_link(struct mlxsw_sp_nexthop_group *nh_grp,
3324 			       const struct mlxsw_sp_fib *fib)
3325 {
3326 	struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3327 
3328 	vr_entry = mlxsw_sp_nexthop_group_vr_entry_lookup(nh_grp, fib);
3329 	if (vr_entry) {
3330 		refcount_inc(&vr_entry->ref_count);
3331 		return 0;
3332 	}
3333 
3334 	return mlxsw_sp_nexthop_group_vr_entry_create(nh_grp, fib);
3335 }
3336 
3337 static void
3338 mlxsw_sp_nexthop_group_vr_unlink(struct mlxsw_sp_nexthop_group *nh_grp,
3339 				 const struct mlxsw_sp_fib *fib)
3340 {
3341 	struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3342 
3343 	vr_entry = mlxsw_sp_nexthop_group_vr_entry_lookup(nh_grp, fib);
3344 	if (WARN_ON_ONCE(!vr_entry))
3345 		return;
3346 
3347 	if (!refcount_dec_and_test(&vr_entry->ref_count))
3348 		return;
3349 
3350 	mlxsw_sp_nexthop_group_vr_entry_destroy(nh_grp, vr_entry);
3351 }
3352 
3353 struct mlxsw_sp_nexthop_group_cmp_arg {
3354 	enum mlxsw_sp_nexthop_group_type type;
3355 	union {
3356 		struct fib_info *fi;
3357 		struct mlxsw_sp_fib6_entry *fib6_entry;
3358 		u32 id;
3359 	};
3360 };
3361 
3362 static bool
3363 mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
3364 				    const struct in6_addr *gw, int ifindex,
3365 				    int weight)
3366 {
3367 	int i;
3368 
3369 	for (i = 0; i < nh_grp->nhgi->count; i++) {
3370 		const struct mlxsw_sp_nexthop *nh;
3371 
3372 		nh = &nh_grp->nhgi->nexthops[i];
3373 		if (nh->ifindex == ifindex && nh->nh_weight == weight &&
3374 		    ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
3375 			return true;
3376 	}
3377 
3378 	return false;
3379 }
3380 
3381 static bool
3382 mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
3383 			    const struct mlxsw_sp_fib6_entry *fib6_entry)
3384 {
3385 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3386 
3387 	if (nh_grp->nhgi->count != fib6_entry->nrt6)
3388 		return false;
3389 
3390 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3391 		struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3392 		struct in6_addr *gw;
3393 		int ifindex, weight;
3394 
3395 		ifindex = fib6_nh->fib_nh_dev->ifindex;
3396 		weight = fib6_nh->fib_nh_weight;
3397 		gw = &fib6_nh->fib_nh_gw6;
3398 		if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex,
3399 							 weight))
3400 			return false;
3401 	}
3402 
3403 	return true;
3404 }
3405 
3406 static int
3407 mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
3408 {
3409 	const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
3410 	const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
3411 
3412 	if (nh_grp->type != cmp_arg->type)
3413 		return 1;
3414 
3415 	switch (cmp_arg->type) {
3416 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3417 		return cmp_arg->fi != nh_grp->ipv4.fi;
3418 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3419 		return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
3420 						    cmp_arg->fib6_entry);
3421 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3422 		return cmp_arg->id != nh_grp->obj.id;
3423 	default:
3424 		WARN_ON(1);
3425 		return 1;
3426 	}
3427 }
3428 
3429 static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
3430 {
3431 	const struct mlxsw_sp_nexthop_group *nh_grp = data;
3432 	const struct mlxsw_sp_nexthop *nh;
3433 	struct fib_info *fi;
3434 	unsigned int val;
3435 	int i;
3436 
3437 	switch (nh_grp->type) {
3438 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3439 		fi = nh_grp->ipv4.fi;
3440 		return jhash(&fi, sizeof(fi), seed);
3441 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3442 		val = nh_grp->nhgi->count;
3443 		for (i = 0; i < nh_grp->nhgi->count; i++) {
3444 			nh = &nh_grp->nhgi->nexthops[i];
3445 			val ^= jhash(&nh->ifindex, sizeof(nh->ifindex), seed);
3446 			val ^= jhash(&nh->gw_addr, sizeof(nh->gw_addr), seed);
3447 		}
3448 		return jhash(&val, sizeof(val), seed);
3449 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3450 		return jhash(&nh_grp->obj.id, sizeof(nh_grp->obj.id), seed);
3451 	default:
3452 		WARN_ON(1);
3453 		return 0;
3454 	}
3455 }
3456 
3457 static u32
3458 mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
3459 {
3460 	unsigned int val = fib6_entry->nrt6;
3461 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3462 
3463 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3464 		struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3465 		struct net_device *dev = fib6_nh->fib_nh_dev;
3466 		struct in6_addr *gw = &fib6_nh->fib_nh_gw6;
3467 
3468 		val ^= jhash(&dev->ifindex, sizeof(dev->ifindex), seed);
3469 		val ^= jhash(gw, sizeof(*gw), seed);
3470 	}
3471 
3472 	return jhash(&val, sizeof(val), seed);
3473 }
3474 
3475 static u32
3476 mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
3477 {
3478 	const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
3479 
3480 	switch (cmp_arg->type) {
3481 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3482 		return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
3483 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3484 		return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
3485 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3486 		return jhash(&cmp_arg->id, sizeof(cmp_arg->id), seed);
3487 	default:
3488 		WARN_ON(1);
3489 		return 0;
3490 	}
3491 }
3492 
3493 static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
3494 	.head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
3495 	.hashfn	     = mlxsw_sp_nexthop_group_hash,
3496 	.obj_hashfn  = mlxsw_sp_nexthop_group_hash_obj,
3497 	.obj_cmpfn   = mlxsw_sp_nexthop_group_cmp,
3498 };
3499 
3500 static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
3501 					 struct mlxsw_sp_nexthop_group *nh_grp)
3502 {
3503 	if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
3504 	    !nh_grp->nhgi->gateway)
3505 		return 0;
3506 
3507 	return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
3508 				      &nh_grp->ht_node,
3509 				      mlxsw_sp_nexthop_group_ht_params);
3510 }
3511 
3512 static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
3513 					  struct mlxsw_sp_nexthop_group *nh_grp)
3514 {
3515 	if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
3516 	    !nh_grp->nhgi->gateway)
3517 		return;
3518 
3519 	rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
3520 			       &nh_grp->ht_node,
3521 			       mlxsw_sp_nexthop_group_ht_params);
3522 }
3523 
3524 static struct mlxsw_sp_nexthop_group *
3525 mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
3526 			       struct fib_info *fi)
3527 {
3528 	struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3529 
3530 	cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4;
3531 	cmp_arg.fi = fi;
3532 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3533 				      &cmp_arg,
3534 				      mlxsw_sp_nexthop_group_ht_params);
3535 }
3536 
3537 static struct mlxsw_sp_nexthop_group *
3538 mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
3539 			       struct mlxsw_sp_fib6_entry *fib6_entry)
3540 {
3541 	struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3542 
3543 	cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6;
3544 	cmp_arg.fib6_entry = fib6_entry;
3545 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3546 				      &cmp_arg,
3547 				      mlxsw_sp_nexthop_group_ht_params);
3548 }
3549 
3550 static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
3551 	.key_offset = offsetof(struct mlxsw_sp_nexthop, key),
3552 	.head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
3553 	.key_len = sizeof(struct mlxsw_sp_nexthop_key),
3554 };
3555 
3556 static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
3557 				   struct mlxsw_sp_nexthop *nh)
3558 {
3559 	return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
3560 				      &nh->ht_node, mlxsw_sp_nexthop_ht_params);
3561 }
3562 
3563 static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
3564 				    struct mlxsw_sp_nexthop *nh)
3565 {
3566 	rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
3567 			       mlxsw_sp_nexthop_ht_params);
3568 }
3569 
3570 static struct mlxsw_sp_nexthop *
3571 mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
3572 			struct mlxsw_sp_nexthop_key key)
3573 {
3574 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
3575 				      mlxsw_sp_nexthop_ht_params);
3576 }
3577 
3578 static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
3579 					     enum mlxsw_sp_l3proto proto,
3580 					     u16 vr_id,
3581 					     u32 adj_index, u16 ecmp_size,
3582 					     u32 new_adj_index,
3583 					     u16 new_ecmp_size)
3584 {
3585 	char raleu_pl[MLXSW_REG_RALEU_LEN];
3586 
3587 	mlxsw_reg_raleu_pack(raleu_pl,
3588 			     (enum mlxsw_reg_ralxx_protocol) proto, vr_id,
3589 			     adj_index, ecmp_size, new_adj_index,
3590 			     new_ecmp_size);
3591 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
3592 }
3593 
3594 static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
3595 					  struct mlxsw_sp_nexthop_group *nh_grp,
3596 					  u32 old_adj_index, u16 old_ecmp_size)
3597 {
3598 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
3599 	struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3600 	int err;
3601 
3602 	list_for_each_entry(vr_entry, &nh_grp->vr_list, list) {
3603 		err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp,
3604 							vr_entry->key.proto,
3605 							vr_entry->key.vr_id,
3606 							old_adj_index,
3607 							old_ecmp_size,
3608 							nhgi->adj_index,
3609 							nhgi->ecmp_size);
3610 		if (err)
3611 			goto err_mass_update_vr;
3612 	}
3613 	return 0;
3614 
3615 err_mass_update_vr:
3616 	list_for_each_entry_continue_reverse(vr_entry, &nh_grp->vr_list, list)
3617 		mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, vr_entry->key.proto,
3618 						  vr_entry->key.vr_id,
3619 						  nhgi->adj_index,
3620 						  nhgi->ecmp_size,
3621 						  old_adj_index, old_ecmp_size);
3622 	return err;
3623 }
3624 
3625 static int __mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp,
3626 					 u32 adj_index,
3627 					 struct mlxsw_sp_nexthop *nh,
3628 					 bool force, char *ratr_pl)
3629 {
3630 	struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
3631 	struct mlxsw_sp_rif *rif = mlxsw_sp_nexthop_rif(nh);
3632 	enum mlxsw_reg_ratr_op op;
3633 	u16 rif_index;
3634 
3635 	rif_index = rif ? rif->rif_index :
3636 			  mlxsw_sp->router->lb_crif->rif->rif_index;
3637 	op = force ? MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY :
3638 		     MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY_ON_ACTIVITY;
3639 	mlxsw_reg_ratr_pack(ratr_pl, op, true, MLXSW_REG_RATR_TYPE_ETHERNET,
3640 			    adj_index, rif_index);
3641 	switch (nh->action) {
3642 	case MLXSW_SP_NEXTHOP_ACTION_FORWARD:
3643 		mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
3644 		break;
3645 	case MLXSW_SP_NEXTHOP_ACTION_DISCARD:
3646 		mlxsw_reg_ratr_trap_action_set(ratr_pl,
3647 					       MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS);
3648 		break;
3649 	case MLXSW_SP_NEXTHOP_ACTION_TRAP:
3650 		mlxsw_reg_ratr_trap_action_set(ratr_pl,
3651 					       MLXSW_REG_RATR_TRAP_ACTION_TRAP);
3652 		mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
3653 		break;
3654 	default:
3655 		WARN_ON_ONCE(1);
3656 		return -EINVAL;
3657 	}
3658 	if (nh->counter_valid)
3659 		mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
3660 	else
3661 		mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
3662 
3663 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
3664 }
3665 
3666 int mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3667 				struct mlxsw_sp_nexthop *nh, bool force,
3668 				char *ratr_pl)
3669 {
3670 	int i;
3671 
3672 	for (i = 0; i < nh->num_adj_entries; i++) {
3673 		int err;
3674 
3675 		err = __mlxsw_sp_nexthop_eth_update(mlxsw_sp, adj_index + i,
3676 						    nh, force, ratr_pl);
3677 		if (err)
3678 			return err;
3679 	}
3680 
3681 	return 0;
3682 }
3683 
3684 static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3685 					  u32 adj_index,
3686 					  struct mlxsw_sp_nexthop *nh,
3687 					  bool force, char *ratr_pl)
3688 {
3689 	const struct mlxsw_sp_ipip_ops *ipip_ops;
3690 
3691 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt];
3692 	return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry,
3693 					force, ratr_pl);
3694 }
3695 
3696 static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3697 					u32 adj_index,
3698 					struct mlxsw_sp_nexthop *nh, bool force,
3699 					char *ratr_pl)
3700 {
3701 	int i;
3702 
3703 	for (i = 0; i < nh->num_adj_entries; i++) {
3704 		int err;
3705 
3706 		err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
3707 						     nh, force, ratr_pl);
3708 		if (err)
3709 			return err;
3710 	}
3711 
3712 	return 0;
3713 }
3714 
3715 static int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3716 				   struct mlxsw_sp_nexthop *nh, bool force,
3717 				   char *ratr_pl)
3718 {
3719 	/* When action is discard or trap, the nexthop must be
3720 	 * programmed as an Ethernet nexthop.
3721 	 */
3722 	if (nh->type == MLXSW_SP_NEXTHOP_TYPE_ETH ||
3723 	    nh->action == MLXSW_SP_NEXTHOP_ACTION_DISCARD ||
3724 	    nh->action == MLXSW_SP_NEXTHOP_ACTION_TRAP)
3725 		return mlxsw_sp_nexthop_eth_update(mlxsw_sp, adj_index, nh,
3726 						   force, ratr_pl);
3727 	else
3728 		return mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index, nh,
3729 						    force, ratr_pl);
3730 }
3731 
3732 static int
3733 mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
3734 			      struct mlxsw_sp_nexthop_group_info *nhgi,
3735 			      bool reallocate)
3736 {
3737 	char ratr_pl[MLXSW_REG_RATR_LEN];
3738 	u32 adj_index = nhgi->adj_index; /* base */
3739 	struct mlxsw_sp_nexthop *nh;
3740 	int i;
3741 
3742 	for (i = 0; i < nhgi->count; i++) {
3743 		nh = &nhgi->nexthops[i];
3744 
3745 		if (!nh->should_offload) {
3746 			nh->offloaded = 0;
3747 			continue;
3748 		}
3749 
3750 		if (nh->update || reallocate) {
3751 			int err = 0;
3752 
3753 			err = mlxsw_sp_nexthop_update(mlxsw_sp, adj_index, nh,
3754 						      true, ratr_pl);
3755 			if (err)
3756 				return err;
3757 			nh->update = 0;
3758 			nh->offloaded = 1;
3759 		}
3760 		adj_index += nh->num_adj_entries;
3761 	}
3762 	return 0;
3763 }
3764 
3765 static int
3766 mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
3767 				    struct mlxsw_sp_nexthop_group *nh_grp)
3768 {
3769 	struct mlxsw_sp_fib_entry *fib_entry;
3770 	int err;
3771 
3772 	list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
3773 		err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
3774 		if (err)
3775 			return err;
3776 	}
3777 	return 0;
3778 }
3779 
3780 struct mlxsw_sp_adj_grp_size_range {
3781 	u16 start; /* Inclusive */
3782 	u16 end; /* Inclusive */
3783 };
3784 
3785 /* Ordered by range start value */
3786 static const struct mlxsw_sp_adj_grp_size_range
3787 mlxsw_sp1_adj_grp_size_ranges[] = {
3788 	{ .start = 1, .end = 64 },
3789 	{ .start = 512, .end = 512 },
3790 	{ .start = 1024, .end = 1024 },
3791 	{ .start = 2048, .end = 2048 },
3792 	{ .start = 4096, .end = 4096 },
3793 };
3794 
3795 /* Ordered by range start value */
3796 static const struct mlxsw_sp_adj_grp_size_range
3797 mlxsw_sp2_adj_grp_size_ranges[] = {
3798 	{ .start = 1, .end = 128 },
3799 	{ .start = 256, .end = 256 },
3800 	{ .start = 512, .end = 512 },
3801 	{ .start = 1024, .end = 1024 },
3802 	{ .start = 2048, .end = 2048 },
3803 	{ .start = 4096, .end = 4096 },
3804 };
3805 
3806 static void mlxsw_sp_adj_grp_size_round_up(const struct mlxsw_sp *mlxsw_sp,
3807 					   u16 *p_adj_grp_size)
3808 {
3809 	int i;
3810 
3811 	for (i = 0; i < mlxsw_sp->router->adj_grp_size_ranges_count; i++) {
3812 		const struct mlxsw_sp_adj_grp_size_range *size_range;
3813 
3814 		size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
3815 
3816 		if (*p_adj_grp_size >= size_range->start &&
3817 		    *p_adj_grp_size <= size_range->end)
3818 			return;
3819 
3820 		if (*p_adj_grp_size <= size_range->end) {
3821 			*p_adj_grp_size = size_range->end;
3822 			return;
3823 		}
3824 	}
3825 }
3826 
3827 static void mlxsw_sp_adj_grp_size_round_down(const struct mlxsw_sp *mlxsw_sp,
3828 					     u16 *p_adj_grp_size,
3829 					     unsigned int alloc_size)
3830 {
3831 	int i;
3832 
3833 	for (i = mlxsw_sp->router->adj_grp_size_ranges_count - 1; i >= 0; i--) {
3834 		const struct mlxsw_sp_adj_grp_size_range *size_range;
3835 
3836 		size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
3837 
3838 		if (alloc_size >= size_range->end) {
3839 			*p_adj_grp_size = size_range->end;
3840 			return;
3841 		}
3842 	}
3843 }
3844 
3845 static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
3846 				     u16 *p_adj_grp_size)
3847 {
3848 	unsigned int alloc_size;
3849 	int err;
3850 
3851 	/* Round up the requested group size to the next size supported
3852 	 * by the device and make sure the request can be satisfied.
3853 	 */
3854 	mlxsw_sp_adj_grp_size_round_up(mlxsw_sp, p_adj_grp_size);
3855 	err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
3856 					      MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3857 					      *p_adj_grp_size, &alloc_size);
3858 	if (err)
3859 		return err;
3860 	/* It is possible the allocation results in more allocated
3861 	 * entries than requested. Try to use as much of them as
3862 	 * possible.
3863 	 */
3864 	mlxsw_sp_adj_grp_size_round_down(mlxsw_sp, p_adj_grp_size, alloc_size);
3865 
3866 	return 0;
3867 }
3868 
3869 static void
3870 mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group_info *nhgi)
3871 {
3872 	int i, g = 0, sum_norm_weight = 0;
3873 	struct mlxsw_sp_nexthop *nh;
3874 
3875 	for (i = 0; i < nhgi->count; i++) {
3876 		nh = &nhgi->nexthops[i];
3877 
3878 		if (!nh->should_offload)
3879 			continue;
3880 		if (g > 0)
3881 			g = gcd(nh->nh_weight, g);
3882 		else
3883 			g = nh->nh_weight;
3884 	}
3885 
3886 	for (i = 0; i < nhgi->count; i++) {
3887 		nh = &nhgi->nexthops[i];
3888 
3889 		if (!nh->should_offload)
3890 			continue;
3891 		nh->norm_nh_weight = nh->nh_weight / g;
3892 		sum_norm_weight += nh->norm_nh_weight;
3893 	}
3894 
3895 	nhgi->sum_norm_weight = sum_norm_weight;
3896 }
3897 
3898 static void
3899 mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group_info *nhgi)
3900 {
3901 	int i, weight = 0, lower_bound = 0;
3902 	int total = nhgi->sum_norm_weight;
3903 	u16 ecmp_size = nhgi->ecmp_size;
3904 
3905 	for (i = 0; i < nhgi->count; i++) {
3906 		struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
3907 		int upper_bound;
3908 
3909 		if (!nh->should_offload)
3910 			continue;
3911 		weight += nh->norm_nh_weight;
3912 		upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total);
3913 		nh->num_adj_entries = upper_bound - lower_bound;
3914 		lower_bound = upper_bound;
3915 	}
3916 }
3917 
3918 static struct mlxsw_sp_nexthop *
3919 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
3920 		     const struct mlxsw_sp_rt6 *mlxsw_sp_rt6);
3921 
3922 static void
3923 mlxsw_sp_nexthop4_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3924 					struct mlxsw_sp_nexthop_group *nh_grp)
3925 {
3926 	int i;
3927 
3928 	for (i = 0; i < nh_grp->nhgi->count; i++) {
3929 		struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
3930 
3931 		if (nh->offloaded)
3932 			nh->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
3933 		else
3934 			nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
3935 	}
3936 }
3937 
3938 static void
3939 __mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp_nexthop_group *nh_grp,
3940 					  struct mlxsw_sp_fib6_entry *fib6_entry)
3941 {
3942 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3943 
3944 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3945 		struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3946 		struct mlxsw_sp_nexthop *nh;
3947 
3948 		nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
3949 		if (nh && nh->offloaded)
3950 			fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
3951 		else
3952 			fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
3953 	}
3954 }
3955 
3956 static void
3957 mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3958 					struct mlxsw_sp_nexthop_group *nh_grp)
3959 {
3960 	struct mlxsw_sp_fib6_entry *fib6_entry;
3961 
3962 	/* Unfortunately, in IPv6 the route and the nexthop are described by
3963 	 * the same struct, so we need to iterate over all the routes using the
3964 	 * nexthop group and set / clear the offload indication for them.
3965 	 */
3966 	list_for_each_entry(fib6_entry, &nh_grp->fib_list,
3967 			    common.nexthop_group_node)
3968 		__mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
3969 }
3970 
3971 static void
3972 mlxsw_sp_nexthop_bucket_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3973 					const struct mlxsw_sp_nexthop *nh,
3974 					u16 bucket_index)
3975 {
3976 	struct mlxsw_sp_nexthop_group *nh_grp = nh->nhgi->nh_grp;
3977 	bool offload = false, trap = false;
3978 
3979 	if (nh->offloaded) {
3980 		if (nh->action == MLXSW_SP_NEXTHOP_ACTION_TRAP)
3981 			trap = true;
3982 		else
3983 			offload = true;
3984 	}
3985 	nexthop_bucket_set_hw_flags(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
3986 				    bucket_index, offload, trap);
3987 }
3988 
3989 static void
3990 mlxsw_sp_nexthop_obj_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3991 					   struct mlxsw_sp_nexthop_group *nh_grp)
3992 {
3993 	int i;
3994 
3995 	/* Do not update the flags if the nexthop group is being destroyed
3996 	 * since:
3997 	 * 1. The nexthop objects is being deleted, in which case the flags are
3998 	 * irrelevant.
3999 	 * 2. The nexthop group was replaced by a newer group, in which case
4000 	 * the flags of the nexthop object were already updated based on the
4001 	 * new group.
4002 	 */
4003 	if (nh_grp->can_destroy)
4004 		return;
4005 
4006 	nexthop_set_hw_flags(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
4007 			     nh_grp->nhgi->adj_index_valid, false);
4008 
4009 	/* Update flags of individual nexthop buckets in case of a resilient
4010 	 * nexthop group.
4011 	 */
4012 	if (!nh_grp->nhgi->is_resilient)
4013 		return;
4014 
4015 	for (i = 0; i < nh_grp->nhgi->count; i++) {
4016 		struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
4017 
4018 		mlxsw_sp_nexthop_bucket_offload_refresh(mlxsw_sp, nh, i);
4019 	}
4020 }
4021 
4022 static void
4023 mlxsw_sp_nexthop_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
4024 				       struct mlxsw_sp_nexthop_group *nh_grp)
4025 {
4026 	switch (nh_grp->type) {
4027 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
4028 		mlxsw_sp_nexthop4_group_offload_refresh(mlxsw_sp, nh_grp);
4029 		break;
4030 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
4031 		mlxsw_sp_nexthop6_group_offload_refresh(mlxsw_sp, nh_grp);
4032 		break;
4033 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
4034 		mlxsw_sp_nexthop_obj_group_offload_refresh(mlxsw_sp, nh_grp);
4035 		break;
4036 	}
4037 }
4038 
4039 static int
4040 mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
4041 			       struct mlxsw_sp_nexthop_group *nh_grp)
4042 {
4043 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
4044 	u16 ecmp_size, old_ecmp_size;
4045 	struct mlxsw_sp_nexthop *nh;
4046 	bool offload_change = false;
4047 	u32 adj_index;
4048 	bool old_adj_index_valid;
4049 	u32 old_adj_index;
4050 	int i, err2, err;
4051 
4052 	if (!nhgi->gateway)
4053 		return mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
4054 
4055 	for (i = 0; i < nhgi->count; i++) {
4056 		nh = &nhgi->nexthops[i];
4057 
4058 		if (nh->should_offload != nh->offloaded) {
4059 			offload_change = true;
4060 			if (nh->should_offload)
4061 				nh->update = 1;
4062 		}
4063 	}
4064 	if (!offload_change) {
4065 		/* Nothing was added or removed, so no need to reallocate. Just
4066 		 * update MAC on existing adjacency indexes.
4067 		 */
4068 		err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nhgi, false);
4069 		if (err) {
4070 			dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
4071 			goto set_trap;
4072 		}
4073 		/* Flags of individual nexthop buckets might need to be
4074 		 * updated.
4075 		 */
4076 		mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
4077 		return 0;
4078 	}
4079 	mlxsw_sp_nexthop_group_normalize(nhgi);
4080 	if (!nhgi->sum_norm_weight) {
4081 		/* No neigh of this group is connected so we just set
4082 		 * the trap and let everthing flow through kernel.
4083 		 */
4084 		err = 0;
4085 		goto set_trap;
4086 	}
4087 
4088 	ecmp_size = nhgi->sum_norm_weight;
4089 	err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
4090 	if (err)
4091 		/* No valid allocation size available. */
4092 		goto set_trap;
4093 
4094 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4095 				  ecmp_size, &adj_index);
4096 	if (err) {
4097 		/* We ran out of KVD linear space, just set the
4098 		 * trap and let everything flow through kernel.
4099 		 */
4100 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
4101 		goto set_trap;
4102 	}
4103 	old_adj_index_valid = nhgi->adj_index_valid;
4104 	old_adj_index = nhgi->adj_index;
4105 	old_ecmp_size = nhgi->ecmp_size;
4106 	nhgi->adj_index_valid = 1;
4107 	nhgi->adj_index = adj_index;
4108 	nhgi->ecmp_size = ecmp_size;
4109 	mlxsw_sp_nexthop_group_rebalance(nhgi);
4110 	err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nhgi, true);
4111 	if (err) {
4112 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
4113 		goto set_trap;
4114 	}
4115 
4116 	mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
4117 
4118 	if (!old_adj_index_valid) {
4119 		/* The trap was set for fib entries, so we have to call
4120 		 * fib entry update to unset it and use adjacency index.
4121 		 */
4122 		err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
4123 		if (err) {
4124 			dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
4125 			goto set_trap;
4126 		}
4127 		return 0;
4128 	}
4129 
4130 	err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
4131 					     old_adj_index, old_ecmp_size);
4132 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4133 			   old_ecmp_size, old_adj_index);
4134 	if (err) {
4135 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
4136 		goto set_trap;
4137 	}
4138 
4139 	return 0;
4140 
4141 set_trap:
4142 	old_adj_index_valid = nhgi->adj_index_valid;
4143 	nhgi->adj_index_valid = 0;
4144 	for (i = 0; i < nhgi->count; i++) {
4145 		nh = &nhgi->nexthops[i];
4146 		nh->offloaded = 0;
4147 	}
4148 	err2 = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
4149 	if (err2)
4150 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
4151 	mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
4152 	if (old_adj_index_valid)
4153 		mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4154 				   nhgi->ecmp_size, nhgi->adj_index);
4155 	return err;
4156 }
4157 
4158 static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
4159 					    bool removing)
4160 {
4161 	if (!removing) {
4162 		nh->action = MLXSW_SP_NEXTHOP_ACTION_FORWARD;
4163 		nh->should_offload = 1;
4164 	} else if (nh->nhgi->is_resilient) {
4165 		nh->action = MLXSW_SP_NEXTHOP_ACTION_TRAP;
4166 		nh->should_offload = 1;
4167 	} else {
4168 		nh->should_offload = 0;
4169 	}
4170 	nh->update = 1;
4171 }
4172 
4173 static int
4174 mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp *mlxsw_sp,
4175 				    struct mlxsw_sp_neigh_entry *neigh_entry)
4176 {
4177 	struct neighbour *n, *old_n = neigh_entry->key.n;
4178 	struct mlxsw_sp_nexthop *nh;
4179 	struct net_device *dev;
4180 	bool entry_connected;
4181 	u8 nud_state, dead;
4182 	int err;
4183 
4184 	nh = list_first_entry(&neigh_entry->nexthop_list,
4185 			      struct mlxsw_sp_nexthop, neigh_list_node);
4186 	dev = mlxsw_sp_nexthop_dev(nh);
4187 
4188 	n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, dev);
4189 	if (!n) {
4190 		n = neigh_create(nh->neigh_tbl, &nh->gw_addr, dev);
4191 		if (IS_ERR(n))
4192 			return PTR_ERR(n);
4193 		neigh_event_send(n, NULL);
4194 	}
4195 
4196 	mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
4197 	neigh_entry->key.n = n;
4198 	err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
4199 	if (err)
4200 		goto err_neigh_entry_insert;
4201 
4202 	read_lock_bh(&n->lock);
4203 	nud_state = n->nud_state;
4204 	dead = n->dead;
4205 	read_unlock_bh(&n->lock);
4206 	entry_connected = nud_state & NUD_VALID && !dead;
4207 
4208 	list_for_each_entry(nh, &neigh_entry->nexthop_list,
4209 			    neigh_list_node) {
4210 		neigh_release(old_n);
4211 		neigh_clone(n);
4212 		__mlxsw_sp_nexthop_neigh_update(nh, !entry_connected);
4213 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4214 	}
4215 
4216 	neigh_release(n);
4217 
4218 	return 0;
4219 
4220 err_neigh_entry_insert:
4221 	neigh_entry->key.n = old_n;
4222 	mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
4223 	neigh_release(n);
4224 	return err;
4225 }
4226 
4227 static void
4228 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
4229 			      struct mlxsw_sp_neigh_entry *neigh_entry,
4230 			      bool removing, bool dead)
4231 {
4232 	struct mlxsw_sp_nexthop *nh;
4233 
4234 	if (list_empty(&neigh_entry->nexthop_list))
4235 		return;
4236 
4237 	if (dead) {
4238 		int err;
4239 
4240 		err = mlxsw_sp_nexthop_dead_neigh_replace(mlxsw_sp,
4241 							  neigh_entry);
4242 		if (err)
4243 			dev_err(mlxsw_sp->bus_info->dev, "Failed to replace dead neigh\n");
4244 		return;
4245 	}
4246 
4247 	list_for_each_entry(nh, &neigh_entry->nexthop_list,
4248 			    neigh_list_node) {
4249 		__mlxsw_sp_nexthop_neigh_update(nh, removing);
4250 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4251 	}
4252 }
4253 
4254 static void mlxsw_sp_nexthop_crif_init(struct mlxsw_sp_nexthop *nh,
4255 				       struct mlxsw_sp_crif *crif)
4256 {
4257 	if (nh->crif)
4258 		return;
4259 
4260 	nh->crif = crif;
4261 	list_add(&nh->crif_list_node, &crif->nexthop_list);
4262 }
4263 
4264 static void mlxsw_sp_nexthop_crif_fini(struct mlxsw_sp_nexthop *nh)
4265 {
4266 	if (!nh->crif)
4267 		return;
4268 
4269 	list_del(&nh->crif_list_node);
4270 	nh->crif = NULL;
4271 }
4272 
4273 static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
4274 				       struct mlxsw_sp_nexthop *nh)
4275 {
4276 	struct mlxsw_sp_neigh_entry *neigh_entry;
4277 	struct net_device *dev;
4278 	struct neighbour *n;
4279 	u8 nud_state, dead;
4280 	int err;
4281 
4282 	if (WARN_ON(!nh->crif->rif))
4283 		return 0;
4284 
4285 	if (!nh->nhgi->gateway || nh->neigh_entry)
4286 		return 0;
4287 	dev = mlxsw_sp_nexthop_dev(nh);
4288 
4289 	/* Take a reference of neigh here ensuring that neigh would
4290 	 * not be destructed before the nexthop entry is finished.
4291 	 * The reference is taken either in neigh_lookup() or
4292 	 * in neigh_create() in case n is not found.
4293 	 */
4294 	n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, dev);
4295 	if (!n) {
4296 		n = neigh_create(nh->neigh_tbl, &nh->gw_addr, dev);
4297 		if (IS_ERR(n))
4298 			return PTR_ERR(n);
4299 		neigh_event_send(n, NULL);
4300 	}
4301 	neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
4302 	if (!neigh_entry) {
4303 		neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
4304 		if (IS_ERR(neigh_entry)) {
4305 			err = -EINVAL;
4306 			goto err_neigh_entry_create;
4307 		}
4308 	}
4309 
4310 	/* If that is the first nexthop connected to that neigh, add to
4311 	 * nexthop_neighs_list
4312 	 */
4313 	if (list_empty(&neigh_entry->nexthop_list))
4314 		list_add_tail(&neigh_entry->nexthop_neighs_list_node,
4315 			      &mlxsw_sp->router->nexthop_neighs_list);
4316 
4317 	nh->neigh_entry = neigh_entry;
4318 	list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
4319 	read_lock_bh(&n->lock);
4320 	nud_state = n->nud_state;
4321 	dead = n->dead;
4322 	read_unlock_bh(&n->lock);
4323 	__mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
4324 
4325 	return 0;
4326 
4327 err_neigh_entry_create:
4328 	neigh_release(n);
4329 	return err;
4330 }
4331 
4332 static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
4333 					struct mlxsw_sp_nexthop *nh)
4334 {
4335 	struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
4336 	struct neighbour *n;
4337 
4338 	if (!neigh_entry)
4339 		return;
4340 	n = neigh_entry->key.n;
4341 
4342 	__mlxsw_sp_nexthop_neigh_update(nh, true);
4343 	list_del(&nh->neigh_list_node);
4344 	nh->neigh_entry = NULL;
4345 
4346 	/* If that is the last nexthop connected to that neigh, remove from
4347 	 * nexthop_neighs_list
4348 	 */
4349 	if (list_empty(&neigh_entry->nexthop_list))
4350 		list_del(&neigh_entry->nexthop_neighs_list_node);
4351 
4352 	if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
4353 		mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
4354 
4355 	neigh_release(n);
4356 }
4357 
4358 static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
4359 {
4360 	struct net_device *ul_dev;
4361 	bool is_up;
4362 
4363 	rcu_read_lock();
4364 	ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
4365 	is_up = ul_dev ? (ul_dev->flags & IFF_UP) : true;
4366 	rcu_read_unlock();
4367 
4368 	return is_up;
4369 }
4370 
4371 static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
4372 				       struct mlxsw_sp_nexthop *nh,
4373 				       struct mlxsw_sp_ipip_entry *ipip_entry)
4374 {
4375 	struct mlxsw_sp_crif *crif;
4376 	bool removing;
4377 
4378 	if (!nh->nhgi->gateway || nh->ipip_entry)
4379 		return;
4380 
4381 	crif = mlxsw_sp_crif_lookup(mlxsw_sp->router, ipip_entry->ol_dev);
4382 	if (WARN_ON(!crif))
4383 		return;
4384 
4385 	nh->ipip_entry = ipip_entry;
4386 	removing = !mlxsw_sp_ipip_netdev_ul_up(ipip_entry->ol_dev);
4387 	__mlxsw_sp_nexthop_neigh_update(nh, removing);
4388 	mlxsw_sp_nexthop_crif_init(nh, crif);
4389 }
4390 
4391 static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
4392 				       struct mlxsw_sp_nexthop *nh)
4393 {
4394 	struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry;
4395 
4396 	if (!ipip_entry)
4397 		return;
4398 
4399 	__mlxsw_sp_nexthop_neigh_update(nh, true);
4400 	nh->ipip_entry = NULL;
4401 }
4402 
4403 static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
4404 					const struct fib_nh *fib_nh,
4405 					enum mlxsw_sp_ipip_type *p_ipipt)
4406 {
4407 	struct net_device *dev = fib_nh->fib_nh_dev;
4408 
4409 	return dev &&
4410 	       fib_nh->nh_parent->fib_type == RTN_UNICAST &&
4411 	       mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
4412 }
4413 
4414 static int mlxsw_sp_nexthop_type_init(struct mlxsw_sp *mlxsw_sp,
4415 				      struct mlxsw_sp_nexthop *nh,
4416 				      const struct net_device *dev)
4417 {
4418 	const struct mlxsw_sp_ipip_ops *ipip_ops;
4419 	struct mlxsw_sp_ipip_entry *ipip_entry;
4420 	struct mlxsw_sp_crif *crif;
4421 	int err;
4422 
4423 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
4424 	if (ipip_entry) {
4425 		ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
4426 		if (ipip_ops->can_offload(mlxsw_sp, dev)) {
4427 			nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
4428 			mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
4429 			return 0;
4430 		}
4431 	}
4432 
4433 	nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
4434 	crif = mlxsw_sp_crif_lookup(mlxsw_sp->router, dev);
4435 	if (!crif)
4436 		return 0;
4437 
4438 	mlxsw_sp_nexthop_crif_init(nh, crif);
4439 
4440 	if (!crif->rif)
4441 		return 0;
4442 
4443 	err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
4444 	if (err)
4445 		goto err_neigh_init;
4446 
4447 	return 0;
4448 
4449 err_neigh_init:
4450 	mlxsw_sp_nexthop_crif_fini(nh);
4451 	return err;
4452 }
4453 
4454 static int mlxsw_sp_nexthop_type_rif_made(struct mlxsw_sp *mlxsw_sp,
4455 					  struct mlxsw_sp_nexthop *nh)
4456 {
4457 	switch (nh->type) {
4458 	case MLXSW_SP_NEXTHOP_TYPE_ETH:
4459 		return mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
4460 	case MLXSW_SP_NEXTHOP_TYPE_IPIP:
4461 		break;
4462 	}
4463 
4464 	return 0;
4465 }
4466 
4467 static void mlxsw_sp_nexthop_type_rif_gone(struct mlxsw_sp *mlxsw_sp,
4468 					   struct mlxsw_sp_nexthop *nh)
4469 {
4470 	switch (nh->type) {
4471 	case MLXSW_SP_NEXTHOP_TYPE_ETH:
4472 		mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
4473 		break;
4474 	case MLXSW_SP_NEXTHOP_TYPE_IPIP:
4475 		mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
4476 		break;
4477 	}
4478 }
4479 
4480 static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
4481 				       struct mlxsw_sp_nexthop *nh)
4482 {
4483 	mlxsw_sp_nexthop_type_rif_gone(mlxsw_sp, nh);
4484 	mlxsw_sp_nexthop_crif_fini(nh);
4485 }
4486 
4487 static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
4488 				  struct mlxsw_sp_nexthop_group *nh_grp,
4489 				  struct mlxsw_sp_nexthop *nh,
4490 				  struct fib_nh *fib_nh)
4491 {
4492 	struct net_device *dev = fib_nh->fib_nh_dev;
4493 	struct in_device *in_dev;
4494 	int err;
4495 
4496 	nh->nhgi = nh_grp->nhgi;
4497 	nh->key.fib_nh = fib_nh;
4498 #ifdef CONFIG_IP_ROUTE_MULTIPATH
4499 	nh->nh_weight = fib_nh->fib_nh_weight;
4500 #else
4501 	nh->nh_weight = 1;
4502 #endif
4503 	memcpy(&nh->gw_addr, &fib_nh->fib_nh_gw4, sizeof(fib_nh->fib_nh_gw4));
4504 	nh->neigh_tbl = &arp_tbl;
4505 	err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
4506 	if (err)
4507 		return err;
4508 
4509 	mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
4510 	list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
4511 
4512 	if (!dev)
4513 		return 0;
4514 	nh->ifindex = dev->ifindex;
4515 
4516 	rcu_read_lock();
4517 	in_dev = __in_dev_get_rcu(dev);
4518 	if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
4519 	    fib_nh->fib_nh_flags & RTNH_F_LINKDOWN) {
4520 		rcu_read_unlock();
4521 		return 0;
4522 	}
4523 	rcu_read_unlock();
4524 
4525 	err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
4526 	if (err)
4527 		goto err_nexthop_neigh_init;
4528 
4529 	return 0;
4530 
4531 err_nexthop_neigh_init:
4532 	list_del(&nh->router_list_node);
4533 	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4534 	mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
4535 	return err;
4536 }
4537 
4538 static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
4539 				   struct mlxsw_sp_nexthop *nh)
4540 {
4541 	mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4542 	list_del(&nh->router_list_node);
4543 	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4544 	mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
4545 }
4546 
4547 static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
4548 				    unsigned long event, struct fib_nh *fib_nh)
4549 {
4550 	struct mlxsw_sp_nexthop_key key;
4551 	struct mlxsw_sp_nexthop *nh;
4552 
4553 	key.fib_nh = fib_nh;
4554 	nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
4555 	if (!nh)
4556 		return;
4557 
4558 	switch (event) {
4559 	case FIB_EVENT_NH_ADD:
4560 		mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, fib_nh->fib_nh_dev);
4561 		break;
4562 	case FIB_EVENT_NH_DEL:
4563 		mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4564 		break;
4565 	}
4566 
4567 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4568 }
4569 
4570 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
4571 					struct mlxsw_sp_rif *rif)
4572 {
4573 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
4574 	struct mlxsw_sp_nexthop *nh;
4575 	bool removing;
4576 
4577 	list_for_each_entry(nh, &rif->crif->nexthop_list, crif_list_node) {
4578 		switch (nh->type) {
4579 		case MLXSW_SP_NEXTHOP_TYPE_ETH:
4580 			removing = false;
4581 			break;
4582 		case MLXSW_SP_NEXTHOP_TYPE_IPIP:
4583 			removing = !mlxsw_sp_ipip_netdev_ul_up(dev);
4584 			break;
4585 		default:
4586 			WARN_ON(1);
4587 			continue;
4588 		}
4589 
4590 		__mlxsw_sp_nexthop_neigh_update(nh, removing);
4591 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4592 	}
4593 }
4594 
4595 static int mlxsw_sp_nexthop_rif_made_sync(struct mlxsw_sp *mlxsw_sp,
4596 					  struct mlxsw_sp_rif *rif)
4597 {
4598 	struct mlxsw_sp_nexthop *nh, *tmp;
4599 	unsigned int n = 0;
4600 	int err;
4601 
4602 	list_for_each_entry_safe(nh, tmp, &rif->crif->nexthop_list,
4603 				 crif_list_node) {
4604 		err = mlxsw_sp_nexthop_type_rif_made(mlxsw_sp, nh);
4605 		if (err)
4606 			goto err_nexthop_type_rif;
4607 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4608 		n++;
4609 	}
4610 
4611 	return 0;
4612 
4613 err_nexthop_type_rif:
4614 	list_for_each_entry_safe(nh, tmp, &rif->crif->nexthop_list,
4615 				 crif_list_node) {
4616 		if (!n--)
4617 			break;
4618 		mlxsw_sp_nexthop_type_rif_gone(mlxsw_sp, nh);
4619 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4620 	}
4621 	return err;
4622 }
4623 
4624 static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
4625 					   struct mlxsw_sp_rif *rif)
4626 {
4627 	struct mlxsw_sp_nexthop *nh, *tmp;
4628 
4629 	list_for_each_entry_safe(nh, tmp, &rif->crif->nexthop_list,
4630 				 crif_list_node) {
4631 		mlxsw_sp_nexthop_type_rif_gone(mlxsw_sp, nh);
4632 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4633 	}
4634 }
4635 
4636 static int mlxsw_sp_adj_trap_entry_init(struct mlxsw_sp *mlxsw_sp)
4637 {
4638 	enum mlxsw_reg_ratr_trap_action trap_action;
4639 	char ratr_pl[MLXSW_REG_RATR_LEN];
4640 	int err;
4641 
4642 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4643 				  &mlxsw_sp->router->adj_trap_index);
4644 	if (err)
4645 		return err;
4646 
4647 	trap_action = MLXSW_REG_RATR_TRAP_ACTION_TRAP;
4648 	mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true,
4649 			    MLXSW_REG_RATR_TYPE_ETHERNET,
4650 			    mlxsw_sp->router->adj_trap_index,
4651 			    mlxsw_sp->router->lb_crif->rif->rif_index);
4652 	mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action);
4653 	mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
4654 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
4655 	if (err)
4656 		goto err_ratr_write;
4657 
4658 	return 0;
4659 
4660 err_ratr_write:
4661 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4662 			   mlxsw_sp->router->adj_trap_index);
4663 	return err;
4664 }
4665 
4666 static void mlxsw_sp_adj_trap_entry_fini(struct mlxsw_sp *mlxsw_sp)
4667 {
4668 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4669 			   mlxsw_sp->router->adj_trap_index);
4670 }
4671 
4672 static int mlxsw_sp_nexthop_group_inc(struct mlxsw_sp *mlxsw_sp)
4673 {
4674 	int err;
4675 
4676 	if (refcount_inc_not_zero(&mlxsw_sp->router->num_groups))
4677 		return 0;
4678 
4679 	err = mlxsw_sp_adj_trap_entry_init(mlxsw_sp);
4680 	if (err)
4681 		return err;
4682 
4683 	refcount_set(&mlxsw_sp->router->num_groups, 1);
4684 
4685 	return 0;
4686 }
4687 
4688 static void mlxsw_sp_nexthop_group_dec(struct mlxsw_sp *mlxsw_sp)
4689 {
4690 	if (!refcount_dec_and_test(&mlxsw_sp->router->num_groups))
4691 		return;
4692 
4693 	mlxsw_sp_adj_trap_entry_fini(mlxsw_sp);
4694 }
4695 
4696 static void
4697 mlxsw_sp_nh_grp_activity_get(struct mlxsw_sp *mlxsw_sp,
4698 			     const struct mlxsw_sp_nexthop_group *nh_grp,
4699 			     unsigned long *activity)
4700 {
4701 	char *ratrad_pl;
4702 	int i, err;
4703 
4704 	ratrad_pl = kmalloc(MLXSW_REG_RATRAD_LEN, GFP_KERNEL);
4705 	if (!ratrad_pl)
4706 		return;
4707 
4708 	mlxsw_reg_ratrad_pack(ratrad_pl, nh_grp->nhgi->adj_index,
4709 			      nh_grp->nhgi->count);
4710 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ratrad), ratrad_pl);
4711 	if (err)
4712 		goto out;
4713 
4714 	for (i = 0; i < nh_grp->nhgi->count; i++) {
4715 		if (!mlxsw_reg_ratrad_activity_vector_get(ratrad_pl, i))
4716 			continue;
4717 		bitmap_set(activity, i, 1);
4718 	}
4719 
4720 out:
4721 	kfree(ratrad_pl);
4722 }
4723 
4724 #define MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL 1000 /* ms */
4725 
4726 static void
4727 mlxsw_sp_nh_grp_activity_update(struct mlxsw_sp *mlxsw_sp,
4728 				const struct mlxsw_sp_nexthop_group *nh_grp)
4729 {
4730 	unsigned long *activity;
4731 
4732 	activity = bitmap_zalloc(nh_grp->nhgi->count, GFP_KERNEL);
4733 	if (!activity)
4734 		return;
4735 
4736 	mlxsw_sp_nh_grp_activity_get(mlxsw_sp, nh_grp, activity);
4737 	nexthop_res_grp_activity_update(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
4738 					nh_grp->nhgi->count, activity);
4739 
4740 	bitmap_free(activity);
4741 }
4742 
4743 static void
4744 mlxsw_sp_nh_grp_activity_work_schedule(struct mlxsw_sp *mlxsw_sp)
4745 {
4746 	unsigned int interval = MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL;
4747 
4748 	mlxsw_core_schedule_dw(&mlxsw_sp->router->nh_grp_activity_dw,
4749 			       msecs_to_jiffies(interval));
4750 }
4751 
4752 static void mlxsw_sp_nh_grp_activity_work(struct work_struct *work)
4753 {
4754 	struct mlxsw_sp_nexthop_group_info *nhgi;
4755 	struct mlxsw_sp_router *router;
4756 	bool reschedule = false;
4757 
4758 	router = container_of(work, struct mlxsw_sp_router,
4759 			      nh_grp_activity_dw.work);
4760 
4761 	mutex_lock(&router->lock);
4762 
4763 	list_for_each_entry(nhgi, &router->nh_res_grp_list, list) {
4764 		mlxsw_sp_nh_grp_activity_update(router->mlxsw_sp, nhgi->nh_grp);
4765 		reschedule = true;
4766 	}
4767 
4768 	mutex_unlock(&router->lock);
4769 
4770 	if (!reschedule)
4771 		return;
4772 	mlxsw_sp_nh_grp_activity_work_schedule(router->mlxsw_sp);
4773 }
4774 
4775 static int
4776 mlxsw_sp_nexthop_obj_single_validate(struct mlxsw_sp *mlxsw_sp,
4777 				     const struct nh_notifier_single_info *nh,
4778 				     struct netlink_ext_ack *extack)
4779 {
4780 	int err = -EINVAL;
4781 
4782 	if (nh->is_fdb)
4783 		NL_SET_ERR_MSG_MOD(extack, "FDB nexthops are not supported");
4784 	else if (nh->has_encap)
4785 		NL_SET_ERR_MSG_MOD(extack, "Encapsulating nexthops are not supported");
4786 	else
4787 		err = 0;
4788 
4789 	return err;
4790 }
4791 
4792 static int
4793 mlxsw_sp_nexthop_obj_group_entry_validate(struct mlxsw_sp *mlxsw_sp,
4794 					  const struct nh_notifier_single_info *nh,
4795 					  struct netlink_ext_ack *extack)
4796 {
4797 	int err;
4798 
4799 	err = mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, nh, extack);
4800 	if (err)
4801 		return err;
4802 
4803 	/* Device only nexthops with an IPIP device are programmed as
4804 	 * encapsulating adjacency entries.
4805 	 */
4806 	if (!nh->gw_family && !nh->is_reject &&
4807 	    !mlxsw_sp_netdev_ipip_type(mlxsw_sp, nh->dev, NULL)) {
4808 		NL_SET_ERR_MSG_MOD(extack, "Nexthop group entry does not have a gateway");
4809 		return -EINVAL;
4810 	}
4811 
4812 	return 0;
4813 }
4814 
4815 static int
4816 mlxsw_sp_nexthop_obj_group_validate(struct mlxsw_sp *mlxsw_sp,
4817 				    const struct nh_notifier_grp_info *nh_grp,
4818 				    struct netlink_ext_ack *extack)
4819 {
4820 	int i;
4821 
4822 	if (nh_grp->is_fdb) {
4823 		NL_SET_ERR_MSG_MOD(extack, "FDB nexthop groups are not supported");
4824 		return -EINVAL;
4825 	}
4826 
4827 	for (i = 0; i < nh_grp->num_nh; i++) {
4828 		const struct nh_notifier_single_info *nh;
4829 		int err;
4830 
4831 		nh = &nh_grp->nh_entries[i].nh;
4832 		err = mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
4833 								extack);
4834 		if (err)
4835 			return err;
4836 	}
4837 
4838 	return 0;
4839 }
4840 
4841 static int
4842 mlxsw_sp_nexthop_obj_res_group_size_validate(struct mlxsw_sp *mlxsw_sp,
4843 					     const struct nh_notifier_res_table_info *nh_res_table,
4844 					     struct netlink_ext_ack *extack)
4845 {
4846 	unsigned int alloc_size;
4847 	bool valid_size = false;
4848 	int err, i;
4849 
4850 	if (nh_res_table->num_nh_buckets < 32) {
4851 		NL_SET_ERR_MSG_MOD(extack, "Minimum number of buckets is 32");
4852 		return -EINVAL;
4853 	}
4854 
4855 	for (i = 0; i < mlxsw_sp->router->adj_grp_size_ranges_count; i++) {
4856 		const struct mlxsw_sp_adj_grp_size_range *size_range;
4857 
4858 		size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
4859 
4860 		if (nh_res_table->num_nh_buckets >= size_range->start &&
4861 		    nh_res_table->num_nh_buckets <= size_range->end) {
4862 			valid_size = true;
4863 			break;
4864 		}
4865 	}
4866 
4867 	if (!valid_size) {
4868 		NL_SET_ERR_MSG_MOD(extack, "Invalid number of buckets");
4869 		return -EINVAL;
4870 	}
4871 
4872 	err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
4873 					      MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4874 					      nh_res_table->num_nh_buckets,
4875 					      &alloc_size);
4876 	if (err || nh_res_table->num_nh_buckets != alloc_size) {
4877 		NL_SET_ERR_MSG_MOD(extack, "Number of buckets does not fit allocation size of any KVDL partition");
4878 		return -EINVAL;
4879 	}
4880 
4881 	return 0;
4882 }
4883 
4884 static int
4885 mlxsw_sp_nexthop_obj_res_group_validate(struct mlxsw_sp *mlxsw_sp,
4886 					const struct nh_notifier_res_table_info *nh_res_table,
4887 					struct netlink_ext_ack *extack)
4888 {
4889 	int err;
4890 	u16 i;
4891 
4892 	err = mlxsw_sp_nexthop_obj_res_group_size_validate(mlxsw_sp,
4893 							   nh_res_table,
4894 							   extack);
4895 	if (err)
4896 		return err;
4897 
4898 	for (i = 0; i < nh_res_table->num_nh_buckets; i++) {
4899 		const struct nh_notifier_single_info *nh;
4900 		int err;
4901 
4902 		nh = &nh_res_table->nhs[i];
4903 		err = mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
4904 								extack);
4905 		if (err)
4906 			return err;
4907 	}
4908 
4909 	return 0;
4910 }
4911 
4912 static int mlxsw_sp_nexthop_obj_validate(struct mlxsw_sp *mlxsw_sp,
4913 					 unsigned long event,
4914 					 struct nh_notifier_info *info)
4915 {
4916 	struct nh_notifier_single_info *nh;
4917 
4918 	if (event != NEXTHOP_EVENT_REPLACE &&
4919 	    event != NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE &&
4920 	    event != NEXTHOP_EVENT_BUCKET_REPLACE)
4921 		return 0;
4922 
4923 	switch (info->type) {
4924 	case NH_NOTIFIER_INFO_TYPE_SINGLE:
4925 		return mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, info->nh,
4926 							    info->extack);
4927 	case NH_NOTIFIER_INFO_TYPE_GRP:
4928 		return mlxsw_sp_nexthop_obj_group_validate(mlxsw_sp,
4929 							   info->nh_grp,
4930 							   info->extack);
4931 	case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4932 		return mlxsw_sp_nexthop_obj_res_group_validate(mlxsw_sp,
4933 							       info->nh_res_table,
4934 							       info->extack);
4935 	case NH_NOTIFIER_INFO_TYPE_RES_BUCKET:
4936 		nh = &info->nh_res_bucket->new_nh;
4937 		return mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
4938 								 info->extack);
4939 	default:
4940 		NL_SET_ERR_MSG_MOD(info->extack, "Unsupported nexthop type");
4941 		return -EOPNOTSUPP;
4942 	}
4943 }
4944 
4945 static bool mlxsw_sp_nexthop_obj_is_gateway(struct mlxsw_sp *mlxsw_sp,
4946 					    const struct nh_notifier_info *info)
4947 {
4948 	const struct net_device *dev;
4949 
4950 	switch (info->type) {
4951 	case NH_NOTIFIER_INFO_TYPE_SINGLE:
4952 		dev = info->nh->dev;
4953 		return info->nh->gw_family || info->nh->is_reject ||
4954 		       mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
4955 	case NH_NOTIFIER_INFO_TYPE_GRP:
4956 	case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4957 		/* Already validated earlier. */
4958 		return true;
4959 	default:
4960 		return false;
4961 	}
4962 }
4963 
4964 static void mlxsw_sp_nexthop_obj_blackhole_init(struct mlxsw_sp *mlxsw_sp,
4965 						struct mlxsw_sp_nexthop *nh)
4966 {
4967 	nh->action = MLXSW_SP_NEXTHOP_ACTION_DISCARD;
4968 	nh->should_offload = 1;
4969 	/* While nexthops that discard packets do not forward packets
4970 	 * via an egress RIF, they still need to be programmed using a
4971 	 * valid RIF, so use the loopback RIF created during init.
4972 	 */
4973 	nh->crif = mlxsw_sp->router->lb_crif;
4974 }
4975 
4976 static void mlxsw_sp_nexthop_obj_blackhole_fini(struct mlxsw_sp *mlxsw_sp,
4977 						struct mlxsw_sp_nexthop *nh)
4978 {
4979 	nh->crif = NULL;
4980 	nh->should_offload = 0;
4981 }
4982 
4983 static int
4984 mlxsw_sp_nexthop_obj_init(struct mlxsw_sp *mlxsw_sp,
4985 			  struct mlxsw_sp_nexthop_group *nh_grp,
4986 			  struct mlxsw_sp_nexthop *nh,
4987 			  struct nh_notifier_single_info *nh_obj, int weight)
4988 {
4989 	struct net_device *dev = nh_obj->dev;
4990 	int err;
4991 
4992 	nh->nhgi = nh_grp->nhgi;
4993 	nh->nh_weight = weight;
4994 
4995 	switch (nh_obj->gw_family) {
4996 	case AF_INET:
4997 		memcpy(&nh->gw_addr, &nh_obj->ipv4, sizeof(nh_obj->ipv4));
4998 		nh->neigh_tbl = &arp_tbl;
4999 		break;
5000 	case AF_INET6:
5001 		memcpy(&nh->gw_addr, &nh_obj->ipv6, sizeof(nh_obj->ipv6));
5002 #if IS_ENABLED(CONFIG_IPV6)
5003 		nh->neigh_tbl = &nd_tbl;
5004 #endif
5005 		break;
5006 	}
5007 
5008 	mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
5009 	list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
5010 	nh->ifindex = dev->ifindex;
5011 
5012 	err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
5013 	if (err)
5014 		goto err_type_init;
5015 
5016 	if (nh_obj->is_reject)
5017 		mlxsw_sp_nexthop_obj_blackhole_init(mlxsw_sp, nh);
5018 
5019 	/* In a resilient nexthop group, all the nexthops must be written to
5020 	 * the adjacency table. Even if they do not have a valid neighbour or
5021 	 * RIF.
5022 	 */
5023 	if (nh_grp->nhgi->is_resilient && !nh->should_offload) {
5024 		nh->action = MLXSW_SP_NEXTHOP_ACTION_TRAP;
5025 		nh->should_offload = 1;
5026 	}
5027 
5028 	return 0;
5029 
5030 err_type_init:
5031 	list_del(&nh->router_list_node);
5032 	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
5033 	return err;
5034 }
5035 
5036 static void mlxsw_sp_nexthop_obj_fini(struct mlxsw_sp *mlxsw_sp,
5037 				      struct mlxsw_sp_nexthop *nh)
5038 {
5039 	if (nh->action == MLXSW_SP_NEXTHOP_ACTION_DISCARD)
5040 		mlxsw_sp_nexthop_obj_blackhole_fini(mlxsw_sp, nh);
5041 	mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
5042 	list_del(&nh->router_list_node);
5043 	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
5044 	nh->should_offload = 0;
5045 }
5046 
5047 static int
5048 mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
5049 				     struct mlxsw_sp_nexthop_group *nh_grp,
5050 				     struct nh_notifier_info *info)
5051 {
5052 	struct mlxsw_sp_nexthop_group_info *nhgi;
5053 	struct mlxsw_sp_nexthop *nh;
5054 	bool is_resilient = false;
5055 	unsigned int nhs;
5056 	int err, i;
5057 
5058 	switch (info->type) {
5059 	case NH_NOTIFIER_INFO_TYPE_SINGLE:
5060 		nhs = 1;
5061 		break;
5062 	case NH_NOTIFIER_INFO_TYPE_GRP:
5063 		nhs = info->nh_grp->num_nh;
5064 		break;
5065 	case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
5066 		nhs = info->nh_res_table->num_nh_buckets;
5067 		is_resilient = true;
5068 		break;
5069 	default:
5070 		return -EINVAL;
5071 	}
5072 
5073 	nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL);
5074 	if (!nhgi)
5075 		return -ENOMEM;
5076 	nh_grp->nhgi = nhgi;
5077 	nhgi->nh_grp = nh_grp;
5078 	nhgi->gateway = mlxsw_sp_nexthop_obj_is_gateway(mlxsw_sp, info);
5079 	nhgi->is_resilient = is_resilient;
5080 	nhgi->count = nhs;
5081 	for (i = 0; i < nhgi->count; i++) {
5082 		struct nh_notifier_single_info *nh_obj;
5083 		int weight;
5084 
5085 		nh = &nhgi->nexthops[i];
5086 		switch (info->type) {
5087 		case NH_NOTIFIER_INFO_TYPE_SINGLE:
5088 			nh_obj = info->nh;
5089 			weight = 1;
5090 			break;
5091 		case NH_NOTIFIER_INFO_TYPE_GRP:
5092 			nh_obj = &info->nh_grp->nh_entries[i].nh;
5093 			weight = info->nh_grp->nh_entries[i].weight;
5094 			break;
5095 		case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
5096 			nh_obj = &info->nh_res_table->nhs[i];
5097 			weight = 1;
5098 			break;
5099 		default:
5100 			err = -EINVAL;
5101 			goto err_nexthop_obj_init;
5102 		}
5103 		err = mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj,
5104 						weight);
5105 		if (err)
5106 			goto err_nexthop_obj_init;
5107 	}
5108 	err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
5109 	if (err)
5110 		goto err_group_inc;
5111 	err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5112 	if (err) {
5113 		NL_SET_ERR_MSG_MOD(info->extack, "Failed to write adjacency entries to the device");
5114 		goto err_group_refresh;
5115 	}
5116 
5117 	/* Add resilient nexthop groups to a list so that the activity of their
5118 	 * nexthop buckets will be periodically queried and cleared.
5119 	 */
5120 	if (nhgi->is_resilient) {
5121 		if (list_empty(&mlxsw_sp->router->nh_res_grp_list))
5122 			mlxsw_sp_nh_grp_activity_work_schedule(mlxsw_sp);
5123 		list_add(&nhgi->list, &mlxsw_sp->router->nh_res_grp_list);
5124 	}
5125 
5126 	return 0;
5127 
5128 err_group_refresh:
5129 	mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5130 err_group_inc:
5131 	i = nhgi->count;
5132 err_nexthop_obj_init:
5133 	for (i--; i >= 0; i--) {
5134 		nh = &nhgi->nexthops[i];
5135 		mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5136 	}
5137 	kfree(nhgi);
5138 	return err;
5139 }
5140 
5141 static void
5142 mlxsw_sp_nexthop_obj_group_info_fini(struct mlxsw_sp *mlxsw_sp,
5143 				     struct mlxsw_sp_nexthop_group *nh_grp)
5144 {
5145 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
5146 	struct mlxsw_sp_router *router = mlxsw_sp->router;
5147 	int i;
5148 
5149 	if (nhgi->is_resilient) {
5150 		list_del(&nhgi->list);
5151 		if (list_empty(&mlxsw_sp->router->nh_res_grp_list))
5152 			cancel_delayed_work(&router->nh_grp_activity_dw);
5153 	}
5154 
5155 	mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5156 	for (i = nhgi->count - 1; i >= 0; i--) {
5157 		struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
5158 
5159 		mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5160 	}
5161 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5162 	WARN_ON_ONCE(nhgi->adj_index_valid);
5163 	kfree(nhgi);
5164 }
5165 
5166 static struct mlxsw_sp_nexthop_group *
5167 mlxsw_sp_nexthop_obj_group_create(struct mlxsw_sp *mlxsw_sp,
5168 				  struct nh_notifier_info *info)
5169 {
5170 	struct mlxsw_sp_nexthop_group *nh_grp;
5171 	int err;
5172 
5173 	nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
5174 	if (!nh_grp)
5175 		return ERR_PTR(-ENOMEM);
5176 	INIT_LIST_HEAD(&nh_grp->vr_list);
5177 	err = rhashtable_init(&nh_grp->vr_ht,
5178 			      &mlxsw_sp_nexthop_group_vr_ht_params);
5179 	if (err)
5180 		goto err_nexthop_group_vr_ht_init;
5181 	INIT_LIST_HEAD(&nh_grp->fib_list);
5182 	nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ;
5183 	nh_grp->obj.id = info->id;
5184 
5185 	err = mlxsw_sp_nexthop_obj_group_info_init(mlxsw_sp, nh_grp, info);
5186 	if (err)
5187 		goto err_nexthop_group_info_init;
5188 
5189 	nh_grp->can_destroy = false;
5190 
5191 	return nh_grp;
5192 
5193 err_nexthop_group_info_init:
5194 	rhashtable_destroy(&nh_grp->vr_ht);
5195 err_nexthop_group_vr_ht_init:
5196 	kfree(nh_grp);
5197 	return ERR_PTR(err);
5198 }
5199 
5200 static void
5201 mlxsw_sp_nexthop_obj_group_destroy(struct mlxsw_sp *mlxsw_sp,
5202 				   struct mlxsw_sp_nexthop_group *nh_grp)
5203 {
5204 	if (!nh_grp->can_destroy)
5205 		return;
5206 	mlxsw_sp_nexthop_obj_group_info_fini(mlxsw_sp, nh_grp);
5207 	WARN_ON_ONCE(!list_empty(&nh_grp->fib_list));
5208 	WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
5209 	rhashtable_destroy(&nh_grp->vr_ht);
5210 	kfree(nh_grp);
5211 }
5212 
5213 static struct mlxsw_sp_nexthop_group *
5214 mlxsw_sp_nexthop_obj_group_lookup(struct mlxsw_sp *mlxsw_sp, u32 id)
5215 {
5216 	struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
5217 
5218 	cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ;
5219 	cmp_arg.id = id;
5220 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
5221 				      &cmp_arg,
5222 				      mlxsw_sp_nexthop_group_ht_params);
5223 }
5224 
5225 static int mlxsw_sp_nexthop_obj_group_add(struct mlxsw_sp *mlxsw_sp,
5226 					  struct mlxsw_sp_nexthop_group *nh_grp)
5227 {
5228 	return mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
5229 }
5230 
5231 static int
5232 mlxsw_sp_nexthop_obj_group_replace(struct mlxsw_sp *mlxsw_sp,
5233 				   struct mlxsw_sp_nexthop_group *nh_grp,
5234 				   struct mlxsw_sp_nexthop_group *old_nh_grp,
5235 				   struct netlink_ext_ack *extack)
5236 {
5237 	struct mlxsw_sp_nexthop_group_info *old_nhgi = old_nh_grp->nhgi;
5238 	struct mlxsw_sp_nexthop_group_info *new_nhgi = nh_grp->nhgi;
5239 	int err;
5240 
5241 	old_nh_grp->nhgi = new_nhgi;
5242 	new_nhgi->nh_grp = old_nh_grp;
5243 	nh_grp->nhgi = old_nhgi;
5244 	old_nhgi->nh_grp = nh_grp;
5245 
5246 	if (old_nhgi->adj_index_valid && new_nhgi->adj_index_valid) {
5247 		/* Both the old adjacency index and the new one are valid.
5248 		 * Routes are currently using the old one. Tell the device to
5249 		 * replace the old adjacency index with the new one.
5250 		 */
5251 		err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, old_nh_grp,
5252 						     old_nhgi->adj_index,
5253 						     old_nhgi->ecmp_size);
5254 		if (err) {
5255 			NL_SET_ERR_MSG_MOD(extack, "Failed to replace old adjacency index with new one");
5256 			goto err_out;
5257 		}
5258 	} else if (old_nhgi->adj_index_valid && !new_nhgi->adj_index_valid) {
5259 		/* The old adjacency index is valid, while the new one is not.
5260 		 * Iterate over all the routes using the group and change them
5261 		 * to trap packets to the CPU.
5262 		 */
5263 		err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, old_nh_grp);
5264 		if (err) {
5265 			NL_SET_ERR_MSG_MOD(extack, "Failed to update routes to trap packets");
5266 			goto err_out;
5267 		}
5268 	} else if (!old_nhgi->adj_index_valid && new_nhgi->adj_index_valid) {
5269 		/* The old adjacency index is invalid, while the new one is.
5270 		 * Iterate over all the routes using the group and change them
5271 		 * to forward packets using the new valid index.
5272 		 */
5273 		err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, old_nh_grp);
5274 		if (err) {
5275 			NL_SET_ERR_MSG_MOD(extack, "Failed to update routes to forward packets");
5276 			goto err_out;
5277 		}
5278 	}
5279 
5280 	/* Make sure the flags are set / cleared based on the new nexthop group
5281 	 * information.
5282 	 */
5283 	mlxsw_sp_nexthop_obj_group_offload_refresh(mlxsw_sp, old_nh_grp);
5284 
5285 	/* At this point 'nh_grp' is just a shell that is not used by anyone
5286 	 * and its nexthop group info is the old info that was just replaced
5287 	 * with the new one. Remove it.
5288 	 */
5289 	nh_grp->can_destroy = true;
5290 	mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5291 
5292 	return 0;
5293 
5294 err_out:
5295 	old_nhgi->nh_grp = old_nh_grp;
5296 	nh_grp->nhgi = new_nhgi;
5297 	new_nhgi->nh_grp = nh_grp;
5298 	old_nh_grp->nhgi = old_nhgi;
5299 	return err;
5300 }
5301 
5302 static int mlxsw_sp_nexthop_obj_new(struct mlxsw_sp *mlxsw_sp,
5303 				    struct nh_notifier_info *info)
5304 {
5305 	struct mlxsw_sp_nexthop_group *nh_grp, *old_nh_grp;
5306 	struct netlink_ext_ack *extack = info->extack;
5307 	int err;
5308 
5309 	nh_grp = mlxsw_sp_nexthop_obj_group_create(mlxsw_sp, info);
5310 	if (IS_ERR(nh_grp))
5311 		return PTR_ERR(nh_grp);
5312 
5313 	old_nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5314 	if (!old_nh_grp)
5315 		err = mlxsw_sp_nexthop_obj_group_add(mlxsw_sp, nh_grp);
5316 	else
5317 		err = mlxsw_sp_nexthop_obj_group_replace(mlxsw_sp, nh_grp,
5318 							 old_nh_grp, extack);
5319 
5320 	if (err) {
5321 		nh_grp->can_destroy = true;
5322 		mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5323 	}
5324 
5325 	return err;
5326 }
5327 
5328 static void mlxsw_sp_nexthop_obj_del(struct mlxsw_sp *mlxsw_sp,
5329 				     struct nh_notifier_info *info)
5330 {
5331 	struct mlxsw_sp_nexthop_group *nh_grp;
5332 
5333 	nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5334 	if (!nh_grp)
5335 		return;
5336 
5337 	nh_grp->can_destroy = true;
5338 	mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
5339 
5340 	/* If the group still has routes using it, then defer the delete
5341 	 * operation until the last route using it is deleted.
5342 	 */
5343 	if (!list_empty(&nh_grp->fib_list))
5344 		return;
5345 	mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5346 }
5347 
5348 static int mlxsw_sp_nexthop_obj_bucket_query(struct mlxsw_sp *mlxsw_sp,
5349 					     u32 adj_index, char *ratr_pl)
5350 {
5351 	MLXSW_REG_ZERO(ratr, ratr_pl);
5352 	mlxsw_reg_ratr_op_set(ratr_pl, MLXSW_REG_RATR_OP_QUERY_READ);
5353 	mlxsw_reg_ratr_adjacency_index_low_set(ratr_pl, adj_index);
5354 	mlxsw_reg_ratr_adjacency_index_high_set(ratr_pl, adj_index >> 16);
5355 
5356 	return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
5357 }
5358 
5359 static int mlxsw_sp_nexthop_obj_bucket_compare(char *ratr_pl, char *ratr_pl_new)
5360 {
5361 	/* Clear the opcode and activity on both the old and new payload as
5362 	 * they are irrelevant for the comparison.
5363 	 */
5364 	mlxsw_reg_ratr_op_set(ratr_pl, MLXSW_REG_RATR_OP_QUERY_READ);
5365 	mlxsw_reg_ratr_a_set(ratr_pl, 0);
5366 	mlxsw_reg_ratr_op_set(ratr_pl_new, MLXSW_REG_RATR_OP_QUERY_READ);
5367 	mlxsw_reg_ratr_a_set(ratr_pl_new, 0);
5368 
5369 	/* If the contents of the adjacency entry are consistent with the
5370 	 * replacement request, then replacement was successful.
5371 	 */
5372 	if (!memcmp(ratr_pl, ratr_pl_new, MLXSW_REG_RATR_LEN))
5373 		return 0;
5374 
5375 	return -EINVAL;
5376 }
5377 
5378 static int
5379 mlxsw_sp_nexthop_obj_bucket_adj_update(struct mlxsw_sp *mlxsw_sp,
5380 				       struct mlxsw_sp_nexthop *nh,
5381 				       struct nh_notifier_info *info)
5382 {
5383 	u16 bucket_index = info->nh_res_bucket->bucket_index;
5384 	struct netlink_ext_ack *extack = info->extack;
5385 	bool force = info->nh_res_bucket->force;
5386 	char ratr_pl_new[MLXSW_REG_RATR_LEN];
5387 	char ratr_pl[MLXSW_REG_RATR_LEN];
5388 	u32 adj_index;
5389 	int err;
5390 
5391 	/* No point in trying an atomic replacement if the idle timer interval
5392 	 * is smaller than the interval in which we query and clear activity.
5393 	 */
5394 	if (!force && info->nh_res_bucket->idle_timer_ms <
5395 	    MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL)
5396 		force = true;
5397 
5398 	adj_index = nh->nhgi->adj_index + bucket_index;
5399 	err = mlxsw_sp_nexthop_update(mlxsw_sp, adj_index, nh, force, ratr_pl);
5400 	if (err) {
5401 		NL_SET_ERR_MSG_MOD(extack, "Failed to overwrite nexthop bucket");
5402 		return err;
5403 	}
5404 
5405 	if (!force) {
5406 		err = mlxsw_sp_nexthop_obj_bucket_query(mlxsw_sp, adj_index,
5407 							ratr_pl_new);
5408 		if (err) {
5409 			NL_SET_ERR_MSG_MOD(extack, "Failed to query nexthop bucket state after replacement. State might be inconsistent");
5410 			return err;
5411 		}
5412 
5413 		err = mlxsw_sp_nexthop_obj_bucket_compare(ratr_pl, ratr_pl_new);
5414 		if (err) {
5415 			NL_SET_ERR_MSG_MOD(extack, "Nexthop bucket was not replaced because it was active during replacement");
5416 			return err;
5417 		}
5418 	}
5419 
5420 	nh->update = 0;
5421 	nh->offloaded = 1;
5422 	mlxsw_sp_nexthop_bucket_offload_refresh(mlxsw_sp, nh, bucket_index);
5423 
5424 	return 0;
5425 }
5426 
5427 static int mlxsw_sp_nexthop_obj_bucket_replace(struct mlxsw_sp *mlxsw_sp,
5428 					       struct nh_notifier_info *info)
5429 {
5430 	u16 bucket_index = info->nh_res_bucket->bucket_index;
5431 	struct netlink_ext_ack *extack = info->extack;
5432 	struct mlxsw_sp_nexthop_group_info *nhgi;
5433 	struct nh_notifier_single_info *nh_obj;
5434 	struct mlxsw_sp_nexthop_group *nh_grp;
5435 	struct mlxsw_sp_nexthop *nh;
5436 	int err;
5437 
5438 	nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5439 	if (!nh_grp) {
5440 		NL_SET_ERR_MSG_MOD(extack, "Nexthop group was not found");
5441 		return -EINVAL;
5442 	}
5443 
5444 	nhgi = nh_grp->nhgi;
5445 
5446 	if (bucket_index >= nhgi->count) {
5447 		NL_SET_ERR_MSG_MOD(extack, "Nexthop bucket index out of range");
5448 		return -EINVAL;
5449 	}
5450 
5451 	nh = &nhgi->nexthops[bucket_index];
5452 	mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5453 
5454 	nh_obj = &info->nh_res_bucket->new_nh;
5455 	err = mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj, 1);
5456 	if (err) {
5457 		NL_SET_ERR_MSG_MOD(extack, "Failed to initialize nexthop object for nexthop bucket replacement");
5458 		goto err_nexthop_obj_init;
5459 	}
5460 
5461 	err = mlxsw_sp_nexthop_obj_bucket_adj_update(mlxsw_sp, nh, info);
5462 	if (err)
5463 		goto err_nexthop_obj_bucket_adj_update;
5464 
5465 	return 0;
5466 
5467 err_nexthop_obj_bucket_adj_update:
5468 	mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5469 err_nexthop_obj_init:
5470 	nh_obj = &info->nh_res_bucket->old_nh;
5471 	mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj, 1);
5472 	/* The old adjacency entry was not overwritten */
5473 	nh->update = 0;
5474 	nh->offloaded = 1;
5475 	return err;
5476 }
5477 
5478 static int mlxsw_sp_nexthop_obj_event(struct notifier_block *nb,
5479 				      unsigned long event, void *ptr)
5480 {
5481 	struct nh_notifier_info *info = ptr;
5482 	struct mlxsw_sp_router *router;
5483 	int err = 0;
5484 
5485 	router = container_of(nb, struct mlxsw_sp_router, nexthop_nb);
5486 	err = mlxsw_sp_nexthop_obj_validate(router->mlxsw_sp, event, info);
5487 	if (err)
5488 		goto out;
5489 
5490 	mutex_lock(&router->lock);
5491 
5492 	switch (event) {
5493 	case NEXTHOP_EVENT_REPLACE:
5494 		err = mlxsw_sp_nexthop_obj_new(router->mlxsw_sp, info);
5495 		break;
5496 	case NEXTHOP_EVENT_DEL:
5497 		mlxsw_sp_nexthop_obj_del(router->mlxsw_sp, info);
5498 		break;
5499 	case NEXTHOP_EVENT_BUCKET_REPLACE:
5500 		err = mlxsw_sp_nexthop_obj_bucket_replace(router->mlxsw_sp,
5501 							  info);
5502 		break;
5503 	default:
5504 		break;
5505 	}
5506 
5507 	mutex_unlock(&router->lock);
5508 
5509 out:
5510 	return notifier_from_errno(err);
5511 }
5512 
5513 static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
5514 				   struct fib_info *fi)
5515 {
5516 	const struct fib_nh *nh = fib_info_nh(fi, 0);
5517 
5518 	return nh->fib_nh_gw_family ||
5519 	       mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL);
5520 }
5521 
5522 static int
5523 mlxsw_sp_nexthop4_group_info_init(struct mlxsw_sp *mlxsw_sp,
5524 				  struct mlxsw_sp_nexthop_group *nh_grp)
5525 {
5526 	unsigned int nhs = fib_info_num_path(nh_grp->ipv4.fi);
5527 	struct mlxsw_sp_nexthop_group_info *nhgi;
5528 	struct mlxsw_sp_nexthop *nh;
5529 	int err, i;
5530 
5531 	nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL);
5532 	if (!nhgi)
5533 		return -ENOMEM;
5534 	nh_grp->nhgi = nhgi;
5535 	nhgi->nh_grp = nh_grp;
5536 	nhgi->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, nh_grp->ipv4.fi);
5537 	nhgi->count = nhs;
5538 	for (i = 0; i < nhgi->count; i++) {
5539 		struct fib_nh *fib_nh;
5540 
5541 		nh = &nhgi->nexthops[i];
5542 		fib_nh = fib_info_nh(nh_grp->ipv4.fi, i);
5543 		err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
5544 		if (err)
5545 			goto err_nexthop4_init;
5546 	}
5547 	err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
5548 	if (err)
5549 		goto err_group_inc;
5550 	err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5551 	if (err)
5552 		goto err_group_refresh;
5553 
5554 	return 0;
5555 
5556 err_group_refresh:
5557 	mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5558 err_group_inc:
5559 	i = nhgi->count;
5560 err_nexthop4_init:
5561 	for (i--; i >= 0; i--) {
5562 		nh = &nhgi->nexthops[i];
5563 		mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
5564 	}
5565 	kfree(nhgi);
5566 	return err;
5567 }
5568 
5569 static void
5570 mlxsw_sp_nexthop4_group_info_fini(struct mlxsw_sp *mlxsw_sp,
5571 				  struct mlxsw_sp_nexthop_group *nh_grp)
5572 {
5573 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
5574 	int i;
5575 
5576 	mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5577 	for (i = nhgi->count - 1; i >= 0; i--) {
5578 		struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
5579 
5580 		mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
5581 	}
5582 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5583 	WARN_ON_ONCE(nhgi->adj_index_valid);
5584 	kfree(nhgi);
5585 }
5586 
5587 static struct mlxsw_sp_nexthop_group *
5588 mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
5589 {
5590 	struct mlxsw_sp_nexthop_group *nh_grp;
5591 	int err;
5592 
5593 	nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
5594 	if (!nh_grp)
5595 		return ERR_PTR(-ENOMEM);
5596 	INIT_LIST_HEAD(&nh_grp->vr_list);
5597 	err = rhashtable_init(&nh_grp->vr_ht,
5598 			      &mlxsw_sp_nexthop_group_vr_ht_params);
5599 	if (err)
5600 		goto err_nexthop_group_vr_ht_init;
5601 	INIT_LIST_HEAD(&nh_grp->fib_list);
5602 	nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4;
5603 	nh_grp->ipv4.fi = fi;
5604 	fib_info_hold(fi);
5605 
5606 	err = mlxsw_sp_nexthop4_group_info_init(mlxsw_sp, nh_grp);
5607 	if (err)
5608 		goto err_nexthop_group_info_init;
5609 
5610 	err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
5611 	if (err)
5612 		goto err_nexthop_group_insert;
5613 
5614 	nh_grp->can_destroy = true;
5615 
5616 	return nh_grp;
5617 
5618 err_nexthop_group_insert:
5619 	mlxsw_sp_nexthop4_group_info_fini(mlxsw_sp, nh_grp);
5620 err_nexthop_group_info_init:
5621 	fib_info_put(fi);
5622 	rhashtable_destroy(&nh_grp->vr_ht);
5623 err_nexthop_group_vr_ht_init:
5624 	kfree(nh_grp);
5625 	return ERR_PTR(err);
5626 }
5627 
5628 static void
5629 mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
5630 				struct mlxsw_sp_nexthop_group *nh_grp)
5631 {
5632 	if (!nh_grp->can_destroy)
5633 		return;
5634 	mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
5635 	mlxsw_sp_nexthop4_group_info_fini(mlxsw_sp, nh_grp);
5636 	fib_info_put(nh_grp->ipv4.fi);
5637 	WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
5638 	rhashtable_destroy(&nh_grp->vr_ht);
5639 	kfree(nh_grp);
5640 }
5641 
5642 static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
5643 				       struct mlxsw_sp_fib_entry *fib_entry,
5644 				       struct fib_info *fi)
5645 {
5646 	struct mlxsw_sp_nexthop_group *nh_grp;
5647 
5648 	if (fi->nh) {
5649 		nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp,
5650 							   fi->nh->id);
5651 		if (WARN_ON_ONCE(!nh_grp))
5652 			return -EINVAL;
5653 		goto out;
5654 	}
5655 
5656 	nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
5657 	if (!nh_grp) {
5658 		nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
5659 		if (IS_ERR(nh_grp))
5660 			return PTR_ERR(nh_grp);
5661 	}
5662 out:
5663 	list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
5664 	fib_entry->nh_group = nh_grp;
5665 	return 0;
5666 }
5667 
5668 static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
5669 					struct mlxsw_sp_fib_entry *fib_entry)
5670 {
5671 	struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
5672 
5673 	list_del(&fib_entry->nexthop_group_node);
5674 	if (!list_empty(&nh_grp->fib_list))
5675 		return;
5676 
5677 	if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ) {
5678 		mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5679 		return;
5680 	}
5681 
5682 	mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
5683 }
5684 
5685 static bool
5686 mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
5687 {
5688 	struct mlxsw_sp_fib4_entry *fib4_entry;
5689 
5690 	fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
5691 				  common);
5692 	return !fib4_entry->dscp;
5693 }
5694 
5695 static bool
5696 mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
5697 {
5698 	struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
5699 
5700 	switch (fib_entry->fib_node->fib->proto) {
5701 	case MLXSW_SP_L3_PROTO_IPV4:
5702 		if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
5703 			return false;
5704 		break;
5705 	case MLXSW_SP_L3_PROTO_IPV6:
5706 		break;
5707 	}
5708 
5709 	switch (fib_entry->type) {
5710 	case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
5711 		return !!nh_group->nhgi->adj_index_valid;
5712 	case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
5713 		return !!mlxsw_sp_nhgi_rif(nh_group->nhgi);
5714 	case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
5715 	case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
5716 	case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
5717 		return true;
5718 	default:
5719 		return false;
5720 	}
5721 }
5722 
5723 static struct mlxsw_sp_nexthop *
5724 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
5725 		     const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
5726 {
5727 	int i;
5728 
5729 	for (i = 0; i < nh_grp->nhgi->count; i++) {
5730 		struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
5731 		struct net_device *dev = mlxsw_sp_nexthop_dev(nh);
5732 		struct fib6_info *rt = mlxsw_sp_rt6->rt;
5733 
5734 		if (dev && dev == rt->fib6_nh->fib_nh_dev &&
5735 		    ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
5736 				    &rt->fib6_nh->fib_nh_gw6))
5737 			return nh;
5738 	}
5739 
5740 	return NULL;
5741 }
5742 
5743 static void
5744 mlxsw_sp_fib4_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5745 				      struct fib_entry_notifier_info *fen_info)
5746 {
5747 	u32 *p_dst = (u32 *) &fen_info->dst;
5748 	struct fib_rt_info fri;
5749 
5750 	fri.fi = fen_info->fi;
5751 	fri.tb_id = fen_info->tb_id;
5752 	fri.dst = cpu_to_be32(*p_dst);
5753 	fri.dst_len = fen_info->dst_len;
5754 	fri.dscp = fen_info->dscp;
5755 	fri.type = fen_info->type;
5756 	fri.offload = false;
5757 	fri.trap = false;
5758 	fri.offload_failed = true;
5759 	fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
5760 }
5761 
5762 static void
5763 mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5764 				 struct mlxsw_sp_fib_entry *fib_entry)
5765 {
5766 	u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
5767 	int dst_len = fib_entry->fib_node->key.prefix_len;
5768 	struct mlxsw_sp_fib4_entry *fib4_entry;
5769 	struct fib_rt_info fri;
5770 	bool should_offload;
5771 
5772 	should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
5773 	fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
5774 				  common);
5775 	fri.fi = fib4_entry->fi;
5776 	fri.tb_id = fib4_entry->tb_id;
5777 	fri.dst = cpu_to_be32(*p_dst);
5778 	fri.dst_len = dst_len;
5779 	fri.dscp = fib4_entry->dscp;
5780 	fri.type = fib4_entry->type;
5781 	fri.offload = should_offload;
5782 	fri.trap = !should_offload;
5783 	fri.offload_failed = false;
5784 	fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
5785 }
5786 
5787 static void
5788 mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5789 				   struct mlxsw_sp_fib_entry *fib_entry)
5790 {
5791 	u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
5792 	int dst_len = fib_entry->fib_node->key.prefix_len;
5793 	struct mlxsw_sp_fib4_entry *fib4_entry;
5794 	struct fib_rt_info fri;
5795 
5796 	fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
5797 				  common);
5798 	fri.fi = fib4_entry->fi;
5799 	fri.tb_id = fib4_entry->tb_id;
5800 	fri.dst = cpu_to_be32(*p_dst);
5801 	fri.dst_len = dst_len;
5802 	fri.dscp = fib4_entry->dscp;
5803 	fri.type = fib4_entry->type;
5804 	fri.offload = false;
5805 	fri.trap = false;
5806 	fri.offload_failed = false;
5807 	fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
5808 }
5809 
5810 #if IS_ENABLED(CONFIG_IPV6)
5811 static void
5812 mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5813 				      struct fib6_info **rt_arr,
5814 				      unsigned int nrt6)
5815 {
5816 	int i;
5817 
5818 	/* In IPv6 a multipath route is represented using multiple routes, so
5819 	 * we need to set the flags on all of them.
5820 	 */
5821 	for (i = 0; i < nrt6; i++)
5822 		fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), rt_arr[i],
5823 				       false, false, true);
5824 }
5825 #else
5826 static void
5827 mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5828 				      struct fib6_info **rt_arr,
5829 				      unsigned int nrt6)
5830 {
5831 }
5832 #endif
5833 
5834 #if IS_ENABLED(CONFIG_IPV6)
5835 static void
5836 mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5837 				 struct mlxsw_sp_fib_entry *fib_entry)
5838 {
5839 	struct mlxsw_sp_fib6_entry *fib6_entry;
5840 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5841 	bool should_offload;
5842 
5843 	should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
5844 
5845 	/* In IPv6 a multipath route is represented using multiple routes, so
5846 	 * we need to set the flags on all of them.
5847 	 */
5848 	fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
5849 				  common);
5850 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
5851 		fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), mlxsw_sp_rt6->rt,
5852 				       should_offload, !should_offload, false);
5853 }
5854 #else
5855 static void
5856 mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5857 				 struct mlxsw_sp_fib_entry *fib_entry)
5858 {
5859 }
5860 #endif
5861 
5862 #if IS_ENABLED(CONFIG_IPV6)
5863 static void
5864 mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5865 				   struct mlxsw_sp_fib_entry *fib_entry)
5866 {
5867 	struct mlxsw_sp_fib6_entry *fib6_entry;
5868 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5869 
5870 	fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
5871 				  common);
5872 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
5873 		fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), mlxsw_sp_rt6->rt,
5874 				       false, false, false);
5875 }
5876 #else
5877 static void
5878 mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5879 				   struct mlxsw_sp_fib_entry *fib_entry)
5880 {
5881 }
5882 #endif
5883 
5884 static void
5885 mlxsw_sp_fib_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5886 				struct mlxsw_sp_fib_entry *fib_entry)
5887 {
5888 	switch (fib_entry->fib_node->fib->proto) {
5889 	case MLXSW_SP_L3_PROTO_IPV4:
5890 		mlxsw_sp_fib4_entry_hw_flags_set(mlxsw_sp, fib_entry);
5891 		break;
5892 	case MLXSW_SP_L3_PROTO_IPV6:
5893 		mlxsw_sp_fib6_entry_hw_flags_set(mlxsw_sp, fib_entry);
5894 		break;
5895 	}
5896 }
5897 
5898 static void
5899 mlxsw_sp_fib_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5900 				  struct mlxsw_sp_fib_entry *fib_entry)
5901 {
5902 	switch (fib_entry->fib_node->fib->proto) {
5903 	case MLXSW_SP_L3_PROTO_IPV4:
5904 		mlxsw_sp_fib4_entry_hw_flags_clear(mlxsw_sp, fib_entry);
5905 		break;
5906 	case MLXSW_SP_L3_PROTO_IPV6:
5907 		mlxsw_sp_fib6_entry_hw_flags_clear(mlxsw_sp, fib_entry);
5908 		break;
5909 	}
5910 }
5911 
5912 static void
5913 mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp *mlxsw_sp,
5914 				    struct mlxsw_sp_fib_entry *fib_entry,
5915 				    enum mlxsw_reg_ralue_op op)
5916 {
5917 	switch (op) {
5918 	case MLXSW_REG_RALUE_OP_WRITE_WRITE:
5919 		mlxsw_sp_fib_entry_hw_flags_set(mlxsw_sp, fib_entry);
5920 		break;
5921 	case MLXSW_REG_RALUE_OP_WRITE_DELETE:
5922 		mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, fib_entry);
5923 		break;
5924 	default:
5925 		break;
5926 	}
5927 }
5928 
5929 static void
5930 mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
5931 			      const struct mlxsw_sp_fib_entry *fib_entry,
5932 			      enum mlxsw_reg_ralue_op op)
5933 {
5934 	struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
5935 	enum mlxsw_reg_ralxx_protocol proto;
5936 	u32 *p_dip;
5937 
5938 	proto = (enum mlxsw_reg_ralxx_protocol) fib->proto;
5939 
5940 	switch (fib->proto) {
5941 	case MLXSW_SP_L3_PROTO_IPV4:
5942 		p_dip = (u32 *) fib_entry->fib_node->key.addr;
5943 		mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id,
5944 				      fib_entry->fib_node->key.prefix_len,
5945 				      *p_dip);
5946 		break;
5947 	case MLXSW_SP_L3_PROTO_IPV6:
5948 		mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id,
5949 				      fib_entry->fib_node->key.prefix_len,
5950 				      fib_entry->fib_node->key.addr);
5951 		break;
5952 	}
5953 }
5954 
5955 static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
5956 					struct mlxsw_sp_fib_entry *fib_entry,
5957 					enum mlxsw_reg_ralue_op op)
5958 {
5959 	struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
5960 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_group->nhgi;
5961 	char ralue_pl[MLXSW_REG_RALUE_LEN];
5962 	enum mlxsw_reg_ralue_trap_action trap_action;
5963 	u16 trap_id = 0;
5964 	u32 adjacency_index = 0;
5965 	u16 ecmp_size = 0;
5966 
5967 	/* In case the nexthop group adjacency index is valid, use it
5968 	 * with provided ECMP size. Otherwise, setup trap and pass
5969 	 * traffic to kernel.
5970 	 */
5971 	if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
5972 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
5973 		adjacency_index = nhgi->adj_index;
5974 		ecmp_size = nhgi->ecmp_size;
5975 	} else if (!nhgi->adj_index_valid && nhgi->count &&
5976 		   mlxsw_sp_nhgi_rif(nhgi)) {
5977 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
5978 		adjacency_index = mlxsw_sp->router->adj_trap_index;
5979 		ecmp_size = 1;
5980 	} else {
5981 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
5982 		trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
5983 	}
5984 
5985 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
5986 	mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
5987 					adjacency_index, ecmp_size);
5988 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
5989 }
5990 
5991 static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
5992 				       struct mlxsw_sp_fib_entry *fib_entry,
5993 				       enum mlxsw_reg_ralue_op op)
5994 {
5995 	struct mlxsw_sp_rif *rif = mlxsw_sp_nhgi_rif(fib_entry->nh_group->nhgi);
5996 	enum mlxsw_reg_ralue_trap_action trap_action;
5997 	char ralue_pl[MLXSW_REG_RALUE_LEN];
5998 	u16 trap_id = 0;
5999 	u16 rif_index = 0;
6000 
6001 	if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
6002 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
6003 		rif_index = rif->rif_index;
6004 	} else {
6005 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
6006 		trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
6007 	}
6008 
6009 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
6010 	mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
6011 				       rif_index);
6012 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
6013 }
6014 
6015 static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
6016 				      struct mlxsw_sp_fib_entry *fib_entry,
6017 				      enum mlxsw_reg_ralue_op op)
6018 {
6019 	char ralue_pl[MLXSW_REG_RALUE_LEN];
6020 
6021 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
6022 	mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
6023 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
6024 }
6025 
6026 static int mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp *mlxsw_sp,
6027 					   struct mlxsw_sp_fib_entry *fib_entry,
6028 					   enum mlxsw_reg_ralue_op op)
6029 {
6030 	enum mlxsw_reg_ralue_trap_action trap_action;
6031 	char ralue_pl[MLXSW_REG_RALUE_LEN];
6032 
6033 	trap_action = MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR;
6034 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
6035 	mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, 0, 0);
6036 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
6037 }
6038 
6039 static int
6040 mlxsw_sp_fib_entry_op_unreachable(struct mlxsw_sp *mlxsw_sp,
6041 				  struct mlxsw_sp_fib_entry *fib_entry,
6042 				  enum mlxsw_reg_ralue_op op)
6043 {
6044 	enum mlxsw_reg_ralue_trap_action trap_action;
6045 	char ralue_pl[MLXSW_REG_RALUE_LEN];
6046 	u16 trap_id;
6047 
6048 	trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
6049 	trap_id = MLXSW_TRAP_ID_RTR_INGRESS1;
6050 
6051 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
6052 	mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, 0);
6053 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
6054 }
6055 
6056 static int
6057 mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
6058 				 struct mlxsw_sp_fib_entry *fib_entry,
6059 				 enum mlxsw_reg_ralue_op op)
6060 {
6061 	struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
6062 	const struct mlxsw_sp_ipip_ops *ipip_ops;
6063 	char ralue_pl[MLXSW_REG_RALUE_LEN];
6064 	int err;
6065 
6066 	if (WARN_ON(!ipip_entry))
6067 		return -EINVAL;
6068 
6069 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
6070 	err = ipip_ops->decap_config(mlxsw_sp, ipip_entry,
6071 				     fib_entry->decap.tunnel_index);
6072 	if (err)
6073 		return err;
6074 
6075 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
6076 	mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl,
6077 					   fib_entry->decap.tunnel_index);
6078 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
6079 }
6080 
6081 static int mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp *mlxsw_sp,
6082 					   struct mlxsw_sp_fib_entry *fib_entry,
6083 					   enum mlxsw_reg_ralue_op op)
6084 {
6085 	char ralue_pl[MLXSW_REG_RALUE_LEN];
6086 
6087 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
6088 	mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl,
6089 					   fib_entry->decap.tunnel_index);
6090 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
6091 }
6092 
6093 static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
6094 				   struct mlxsw_sp_fib_entry *fib_entry,
6095 				   enum mlxsw_reg_ralue_op op)
6096 {
6097 	switch (fib_entry->type) {
6098 	case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
6099 		return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op);
6100 	case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
6101 		return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op);
6102 	case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
6103 		return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
6104 	case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
6105 		return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, fib_entry, op);
6106 	case MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE:
6107 		return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, fib_entry,
6108 							 op);
6109 	case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
6110 		return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
6111 							fib_entry, op);
6112 	case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
6113 		return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, fib_entry, op);
6114 	}
6115 	return -EINVAL;
6116 }
6117 
6118 static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
6119 				 struct mlxsw_sp_fib_entry *fib_entry,
6120 				 enum mlxsw_reg_ralue_op op)
6121 {
6122 	int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
6123 
6124 	if (err)
6125 		return err;
6126 
6127 	mlxsw_sp_fib_entry_hw_flags_refresh(mlxsw_sp, fib_entry, op);
6128 
6129 	return err;
6130 }
6131 
6132 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
6133 				     struct mlxsw_sp_fib_entry *fib_entry)
6134 {
6135 	return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
6136 				     MLXSW_REG_RALUE_OP_WRITE_WRITE);
6137 }
6138 
6139 static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
6140 				  struct mlxsw_sp_fib_entry *fib_entry)
6141 {
6142 	return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
6143 				     MLXSW_REG_RALUE_OP_WRITE_DELETE);
6144 }
6145 
6146 static int
6147 mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
6148 			     const struct fib_entry_notifier_info *fen_info,
6149 			     struct mlxsw_sp_fib_entry *fib_entry)
6150 {
6151 	struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi;
6152 	union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
6153 	struct mlxsw_sp_router *router = mlxsw_sp->router;
6154 	u32 tb_id = mlxsw_sp_fix_tb_id(fen_info->tb_id);
6155 	int ifindex = nhgi->nexthops[0].ifindex;
6156 	struct mlxsw_sp_ipip_entry *ipip_entry;
6157 
6158 	switch (fen_info->type) {
6159 	case RTN_LOCAL:
6160 		ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, ifindex,
6161 							       MLXSW_SP_L3_PROTO_IPV4, dip);
6162 		if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
6163 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
6164 			return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
6165 							     fib_entry,
6166 							     ipip_entry);
6167 		}
6168 		if (mlxsw_sp_router_nve_is_decap(mlxsw_sp, tb_id,
6169 						 MLXSW_SP_L3_PROTO_IPV4,
6170 						 &dip)) {
6171 			u32 tunnel_index;
6172 
6173 			tunnel_index = router->nve_decap_config.tunnel_index;
6174 			fib_entry->decap.tunnel_index = tunnel_index;
6175 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
6176 			return 0;
6177 		}
6178 		fallthrough;
6179 	case RTN_BROADCAST:
6180 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
6181 		return 0;
6182 	case RTN_BLACKHOLE:
6183 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
6184 		return 0;
6185 	case RTN_UNREACHABLE:
6186 	case RTN_PROHIBIT:
6187 		/* Packets hitting these routes need to be trapped, but
6188 		 * can do so with a lower priority than packets directed
6189 		 * at the host, so use action type local instead of trap.
6190 		 */
6191 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
6192 		return 0;
6193 	case RTN_UNICAST:
6194 		if (nhgi->gateway)
6195 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
6196 		else
6197 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
6198 		return 0;
6199 	default:
6200 		return -EINVAL;
6201 	}
6202 }
6203 
6204 static void
6205 mlxsw_sp_fib_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
6206 			      struct mlxsw_sp_fib_entry *fib_entry)
6207 {
6208 	switch (fib_entry->type) {
6209 	case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
6210 		mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
6211 		break;
6212 	default:
6213 		break;
6214 	}
6215 }
6216 
6217 static void
6218 mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
6219 			       struct mlxsw_sp_fib4_entry *fib4_entry)
6220 {
6221 	mlxsw_sp_fib_entry_type_unset(mlxsw_sp, &fib4_entry->common);
6222 }
6223 
6224 static struct mlxsw_sp_fib4_entry *
6225 mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
6226 			   struct mlxsw_sp_fib_node *fib_node,
6227 			   const struct fib_entry_notifier_info *fen_info)
6228 {
6229 	struct mlxsw_sp_fib4_entry *fib4_entry;
6230 	struct mlxsw_sp_fib_entry *fib_entry;
6231 	int err;
6232 
6233 	fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
6234 	if (!fib4_entry)
6235 		return ERR_PTR(-ENOMEM);
6236 	fib_entry = &fib4_entry->common;
6237 
6238 	err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
6239 	if (err)
6240 		goto err_nexthop4_group_get;
6241 
6242 	err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group,
6243 					     fib_node->fib);
6244 	if (err)
6245 		goto err_nexthop_group_vr_link;
6246 
6247 	err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
6248 	if (err)
6249 		goto err_fib4_entry_type_set;
6250 
6251 	fib4_entry->fi = fen_info->fi;
6252 	fib_info_hold(fib4_entry->fi);
6253 	fib4_entry->tb_id = fen_info->tb_id;
6254 	fib4_entry->type = fen_info->type;
6255 	fib4_entry->dscp = fen_info->dscp;
6256 
6257 	fib_entry->fib_node = fib_node;
6258 
6259 	return fib4_entry;
6260 
6261 err_fib4_entry_type_set:
6262 	mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib);
6263 err_nexthop_group_vr_link:
6264 	mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
6265 err_nexthop4_group_get:
6266 	kfree(fib4_entry);
6267 	return ERR_PTR(err);
6268 }
6269 
6270 static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
6271 					struct mlxsw_sp_fib4_entry *fib4_entry)
6272 {
6273 	struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
6274 
6275 	fib_info_put(fib4_entry->fi);
6276 	mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, fib4_entry);
6277 	mlxsw_sp_nexthop_group_vr_unlink(fib4_entry->common.nh_group,
6278 					 fib_node->fib);
6279 	mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
6280 	kfree(fib4_entry);
6281 }
6282 
6283 static struct mlxsw_sp_fib4_entry *
6284 mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
6285 			   const struct fib_entry_notifier_info *fen_info)
6286 {
6287 	struct mlxsw_sp_fib4_entry *fib4_entry;
6288 	struct mlxsw_sp_fib_node *fib_node;
6289 	struct mlxsw_sp_fib *fib;
6290 	struct mlxsw_sp_vr *vr;
6291 
6292 	vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
6293 	if (!vr)
6294 		return NULL;
6295 	fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
6296 
6297 	fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
6298 					    sizeof(fen_info->dst),
6299 					    fen_info->dst_len);
6300 	if (!fib_node)
6301 		return NULL;
6302 
6303 	fib4_entry = container_of(fib_node->fib_entry,
6304 				  struct mlxsw_sp_fib4_entry, common);
6305 	if (fib4_entry->tb_id == fen_info->tb_id &&
6306 	    fib4_entry->dscp == fen_info->dscp &&
6307 	    fib4_entry->type == fen_info->type &&
6308 	    fib4_entry->fi == fen_info->fi)
6309 		return fib4_entry;
6310 
6311 	return NULL;
6312 }
6313 
6314 static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
6315 	.key_offset = offsetof(struct mlxsw_sp_fib_node, key),
6316 	.head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
6317 	.key_len = sizeof(struct mlxsw_sp_fib_key),
6318 	.automatic_shrinking = true,
6319 };
6320 
6321 static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
6322 				    struct mlxsw_sp_fib_node *fib_node)
6323 {
6324 	return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
6325 				      mlxsw_sp_fib_ht_params);
6326 }
6327 
6328 static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
6329 				     struct mlxsw_sp_fib_node *fib_node)
6330 {
6331 	rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
6332 			       mlxsw_sp_fib_ht_params);
6333 }
6334 
6335 static struct mlxsw_sp_fib_node *
6336 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
6337 			 size_t addr_len, unsigned char prefix_len)
6338 {
6339 	struct mlxsw_sp_fib_key key;
6340 
6341 	memset(&key, 0, sizeof(key));
6342 	memcpy(key.addr, addr, addr_len);
6343 	key.prefix_len = prefix_len;
6344 	return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
6345 }
6346 
6347 static struct mlxsw_sp_fib_node *
6348 mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
6349 			 size_t addr_len, unsigned char prefix_len)
6350 {
6351 	struct mlxsw_sp_fib_node *fib_node;
6352 
6353 	fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
6354 	if (!fib_node)
6355 		return NULL;
6356 
6357 	list_add(&fib_node->list, &fib->node_list);
6358 	memcpy(fib_node->key.addr, addr, addr_len);
6359 	fib_node->key.prefix_len = prefix_len;
6360 
6361 	return fib_node;
6362 }
6363 
6364 static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
6365 {
6366 	list_del(&fib_node->list);
6367 	kfree(fib_node);
6368 }
6369 
6370 static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
6371 				      struct mlxsw_sp_fib_node *fib_node)
6372 {
6373 	struct mlxsw_sp_prefix_usage req_prefix_usage;
6374 	struct mlxsw_sp_fib *fib = fib_node->fib;
6375 	struct mlxsw_sp_lpm_tree *lpm_tree;
6376 	int err;
6377 
6378 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[fib->proto];
6379 	if (lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
6380 		goto out;
6381 
6382 	mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
6383 	mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
6384 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
6385 					 fib->proto);
6386 	if (IS_ERR(lpm_tree))
6387 		return PTR_ERR(lpm_tree);
6388 
6389 	err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
6390 	if (err)
6391 		goto err_lpm_tree_replace;
6392 
6393 out:
6394 	lpm_tree->prefix_ref_count[fib_node->key.prefix_len]++;
6395 	return 0;
6396 
6397 err_lpm_tree_replace:
6398 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
6399 	return err;
6400 }
6401 
6402 static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
6403 					 struct mlxsw_sp_fib_node *fib_node)
6404 {
6405 	struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
6406 	struct mlxsw_sp_prefix_usage req_prefix_usage;
6407 	struct mlxsw_sp_fib *fib = fib_node->fib;
6408 	int err;
6409 
6410 	if (--lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
6411 		return;
6412 	/* Try to construct a new LPM tree from the current prefix usage
6413 	 * minus the unused one. If we fail, continue using the old one.
6414 	 */
6415 	mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
6416 	mlxsw_sp_prefix_usage_clear(&req_prefix_usage,
6417 				    fib_node->key.prefix_len);
6418 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
6419 					 fib->proto);
6420 	if (IS_ERR(lpm_tree))
6421 		return;
6422 
6423 	err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
6424 	if (err)
6425 		goto err_lpm_tree_replace;
6426 
6427 	return;
6428 
6429 err_lpm_tree_replace:
6430 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
6431 }
6432 
6433 static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
6434 				  struct mlxsw_sp_fib_node *fib_node,
6435 				  struct mlxsw_sp_fib *fib)
6436 {
6437 	int err;
6438 
6439 	err = mlxsw_sp_fib_node_insert(fib, fib_node);
6440 	if (err)
6441 		return err;
6442 	fib_node->fib = fib;
6443 
6444 	err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib_node);
6445 	if (err)
6446 		goto err_fib_lpm_tree_link;
6447 
6448 	return 0;
6449 
6450 err_fib_lpm_tree_link:
6451 	fib_node->fib = NULL;
6452 	mlxsw_sp_fib_node_remove(fib, fib_node);
6453 	return err;
6454 }
6455 
6456 static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
6457 				   struct mlxsw_sp_fib_node *fib_node)
6458 {
6459 	struct mlxsw_sp_fib *fib = fib_node->fib;
6460 
6461 	mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib_node);
6462 	fib_node->fib = NULL;
6463 	mlxsw_sp_fib_node_remove(fib, fib_node);
6464 }
6465 
6466 static struct mlxsw_sp_fib_node *
6467 mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
6468 		      size_t addr_len, unsigned char prefix_len,
6469 		      enum mlxsw_sp_l3proto proto)
6470 {
6471 	struct mlxsw_sp_fib_node *fib_node;
6472 	struct mlxsw_sp_fib *fib;
6473 	struct mlxsw_sp_vr *vr;
6474 	int err;
6475 
6476 	vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL);
6477 	if (IS_ERR(vr))
6478 		return ERR_CAST(vr);
6479 	fib = mlxsw_sp_vr_fib(vr, proto);
6480 
6481 	fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
6482 	if (fib_node)
6483 		return fib_node;
6484 
6485 	fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
6486 	if (!fib_node) {
6487 		err = -ENOMEM;
6488 		goto err_fib_node_create;
6489 	}
6490 
6491 	err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
6492 	if (err)
6493 		goto err_fib_node_init;
6494 
6495 	return fib_node;
6496 
6497 err_fib_node_init:
6498 	mlxsw_sp_fib_node_destroy(fib_node);
6499 err_fib_node_create:
6500 	mlxsw_sp_vr_put(mlxsw_sp, vr);
6501 	return ERR_PTR(err);
6502 }
6503 
6504 static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
6505 				  struct mlxsw_sp_fib_node *fib_node)
6506 {
6507 	struct mlxsw_sp_vr *vr = fib_node->fib->vr;
6508 
6509 	if (fib_node->fib_entry)
6510 		return;
6511 	mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
6512 	mlxsw_sp_fib_node_destroy(fib_node);
6513 	mlxsw_sp_vr_put(mlxsw_sp, vr);
6514 }
6515 
6516 static int mlxsw_sp_fib_node_entry_link(struct mlxsw_sp *mlxsw_sp,
6517 					struct mlxsw_sp_fib_entry *fib_entry)
6518 {
6519 	struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
6520 	int err;
6521 
6522 	fib_node->fib_entry = fib_entry;
6523 
6524 	err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
6525 	if (err)
6526 		goto err_fib_entry_update;
6527 
6528 	return 0;
6529 
6530 err_fib_entry_update:
6531 	fib_node->fib_entry = NULL;
6532 	return err;
6533 }
6534 
6535 static void
6536 mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
6537 			       struct mlxsw_sp_fib_entry *fib_entry)
6538 {
6539 	struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
6540 
6541 	mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
6542 	fib_node->fib_entry = NULL;
6543 }
6544 
6545 static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry)
6546 {
6547 	struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
6548 	struct mlxsw_sp_fib4_entry *fib4_replaced;
6549 
6550 	if (!fib_node->fib_entry)
6551 		return true;
6552 
6553 	fib4_replaced = container_of(fib_node->fib_entry,
6554 				     struct mlxsw_sp_fib4_entry, common);
6555 	if (fib4_entry->tb_id == RT_TABLE_MAIN &&
6556 	    fib4_replaced->tb_id == RT_TABLE_LOCAL)
6557 		return false;
6558 
6559 	return true;
6560 }
6561 
6562 static int
6563 mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
6564 			     const struct fib_entry_notifier_info *fen_info)
6565 {
6566 	struct mlxsw_sp_fib4_entry *fib4_entry, *fib4_replaced;
6567 	struct mlxsw_sp_fib_entry *replaced;
6568 	struct mlxsw_sp_fib_node *fib_node;
6569 	int err;
6570 
6571 	if (fen_info->fi->nh &&
6572 	    !mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, fen_info->fi->nh->id))
6573 		return 0;
6574 
6575 	fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
6576 					 &fen_info->dst, sizeof(fen_info->dst),
6577 					 fen_info->dst_len,
6578 					 MLXSW_SP_L3_PROTO_IPV4);
6579 	if (IS_ERR(fib_node)) {
6580 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
6581 		return PTR_ERR(fib_node);
6582 	}
6583 
6584 	fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
6585 	if (IS_ERR(fib4_entry)) {
6586 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
6587 		err = PTR_ERR(fib4_entry);
6588 		goto err_fib4_entry_create;
6589 	}
6590 
6591 	if (!mlxsw_sp_fib4_allow_replace(fib4_entry)) {
6592 		mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6593 		mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6594 		return 0;
6595 	}
6596 
6597 	replaced = fib_node->fib_entry;
6598 	err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib4_entry->common);
6599 	if (err) {
6600 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
6601 		goto err_fib_node_entry_link;
6602 	}
6603 
6604 	/* Nothing to replace */
6605 	if (!replaced)
6606 		return 0;
6607 
6608 	mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
6609 	fib4_replaced = container_of(replaced, struct mlxsw_sp_fib4_entry,
6610 				     common);
6611 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_replaced);
6612 
6613 	return 0;
6614 
6615 err_fib_node_entry_link:
6616 	fib_node->fib_entry = replaced;
6617 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6618 err_fib4_entry_create:
6619 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6620 	return err;
6621 }
6622 
6623 static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
6624 				     struct fib_entry_notifier_info *fen_info)
6625 {
6626 	struct mlxsw_sp_fib4_entry *fib4_entry;
6627 	struct mlxsw_sp_fib_node *fib_node;
6628 
6629 	fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
6630 	if (!fib4_entry)
6631 		return;
6632 	fib_node = fib4_entry->common.fib_node;
6633 
6634 	mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib4_entry->common);
6635 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6636 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6637 }
6638 
6639 static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
6640 {
6641 	/* Multicast routes aren't supported, so ignore them. Neighbour
6642 	 * Discovery packets are specifically trapped.
6643 	 */
6644 	if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_MULTICAST)
6645 		return true;
6646 
6647 	/* Cloned routes are irrelevant in the forwarding path. */
6648 	if (rt->fib6_flags & RTF_CACHE)
6649 		return true;
6650 
6651 	return false;
6652 }
6653 
6654 static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct fib6_info *rt)
6655 {
6656 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6657 
6658 	mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
6659 	if (!mlxsw_sp_rt6)
6660 		return ERR_PTR(-ENOMEM);
6661 
6662 	/* In case of route replace, replaced route is deleted with
6663 	 * no notification. Take reference to prevent accessing freed
6664 	 * memory.
6665 	 */
6666 	mlxsw_sp_rt6->rt = rt;
6667 	fib6_info_hold(rt);
6668 
6669 	return mlxsw_sp_rt6;
6670 }
6671 
6672 #if IS_ENABLED(CONFIG_IPV6)
6673 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
6674 {
6675 	fib6_info_release(rt);
6676 }
6677 #else
6678 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
6679 {
6680 }
6681 #endif
6682 
6683 static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
6684 {
6685 	struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
6686 
6687 	if (!mlxsw_sp_rt6->rt->nh)
6688 		fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
6689 	mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
6690 	kfree(mlxsw_sp_rt6);
6691 }
6692 
6693 static struct fib6_info *
6694 mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
6695 {
6696 	return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
6697 				list)->rt;
6698 }
6699 
6700 static struct mlxsw_sp_rt6 *
6701 mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
6702 			    const struct fib6_info *rt)
6703 {
6704 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6705 
6706 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
6707 		if (mlxsw_sp_rt6->rt == rt)
6708 			return mlxsw_sp_rt6;
6709 	}
6710 
6711 	return NULL;
6712 }
6713 
6714 static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
6715 					const struct fib6_info *rt,
6716 					enum mlxsw_sp_ipip_type *ret)
6717 {
6718 	return rt->fib6_nh->fib_nh_dev &&
6719 	       mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh->fib_nh_dev, ret);
6720 }
6721 
6722 static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
6723 				  struct mlxsw_sp_nexthop_group *nh_grp,
6724 				  struct mlxsw_sp_nexthop *nh,
6725 				  const struct fib6_info *rt)
6726 {
6727 	struct net_device *dev = rt->fib6_nh->fib_nh_dev;
6728 	int err;
6729 
6730 	nh->nhgi = nh_grp->nhgi;
6731 	nh->nh_weight = rt->fib6_nh->fib_nh_weight;
6732 	memcpy(&nh->gw_addr, &rt->fib6_nh->fib_nh_gw6, sizeof(nh->gw_addr));
6733 #if IS_ENABLED(CONFIG_IPV6)
6734 	nh->neigh_tbl = &nd_tbl;
6735 #endif
6736 	mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
6737 
6738 	list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
6739 
6740 	if (!dev)
6741 		return 0;
6742 	nh->ifindex = dev->ifindex;
6743 
6744 	err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
6745 	if (err)
6746 		goto err_nexthop_type_init;
6747 
6748 	return 0;
6749 
6750 err_nexthop_type_init:
6751 	list_del(&nh->router_list_node);
6752 	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
6753 	return err;
6754 }
6755 
6756 static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
6757 				   struct mlxsw_sp_nexthop *nh)
6758 {
6759 	mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
6760 	list_del(&nh->router_list_node);
6761 	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
6762 }
6763 
6764 static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
6765 				    const struct fib6_info *rt)
6766 {
6767 	return rt->fib6_nh->fib_nh_gw_family ||
6768 	       mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
6769 }
6770 
6771 static int
6772 mlxsw_sp_nexthop6_group_info_init(struct mlxsw_sp *mlxsw_sp,
6773 				  struct mlxsw_sp_nexthop_group *nh_grp,
6774 				  struct mlxsw_sp_fib6_entry *fib6_entry)
6775 {
6776 	struct mlxsw_sp_nexthop_group_info *nhgi;
6777 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6778 	struct mlxsw_sp_nexthop *nh;
6779 	int err, i;
6780 
6781 	nhgi = kzalloc(struct_size(nhgi, nexthops, fib6_entry->nrt6),
6782 		       GFP_KERNEL);
6783 	if (!nhgi)
6784 		return -ENOMEM;
6785 	nh_grp->nhgi = nhgi;
6786 	nhgi->nh_grp = nh_grp;
6787 	mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
6788 					struct mlxsw_sp_rt6, list);
6789 	nhgi->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
6790 	nhgi->count = fib6_entry->nrt6;
6791 	for (i = 0; i < nhgi->count; i++) {
6792 		struct fib6_info *rt = mlxsw_sp_rt6->rt;
6793 
6794 		nh = &nhgi->nexthops[i];
6795 		err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
6796 		if (err)
6797 			goto err_nexthop6_init;
6798 		mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
6799 	}
6800 	nh_grp->nhgi = nhgi;
6801 	err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
6802 	if (err)
6803 		goto err_group_inc;
6804 	err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
6805 	if (err)
6806 		goto err_group_refresh;
6807 
6808 	return 0;
6809 
6810 err_group_refresh:
6811 	mlxsw_sp_nexthop_group_dec(mlxsw_sp);
6812 err_group_inc:
6813 	i = nhgi->count;
6814 err_nexthop6_init:
6815 	for (i--; i >= 0; i--) {
6816 		nh = &nhgi->nexthops[i];
6817 		mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
6818 	}
6819 	kfree(nhgi);
6820 	return err;
6821 }
6822 
6823 static void
6824 mlxsw_sp_nexthop6_group_info_fini(struct mlxsw_sp *mlxsw_sp,
6825 				  struct mlxsw_sp_nexthop_group *nh_grp)
6826 {
6827 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
6828 	int i;
6829 
6830 	mlxsw_sp_nexthop_group_dec(mlxsw_sp);
6831 	for (i = nhgi->count - 1; i >= 0; i--) {
6832 		struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
6833 
6834 		mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
6835 	}
6836 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
6837 	WARN_ON_ONCE(nhgi->adj_index_valid);
6838 	kfree(nhgi);
6839 }
6840 
6841 static struct mlxsw_sp_nexthop_group *
6842 mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
6843 			       struct mlxsw_sp_fib6_entry *fib6_entry)
6844 {
6845 	struct mlxsw_sp_nexthop_group *nh_grp;
6846 	int err;
6847 
6848 	nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
6849 	if (!nh_grp)
6850 		return ERR_PTR(-ENOMEM);
6851 	INIT_LIST_HEAD(&nh_grp->vr_list);
6852 	err = rhashtable_init(&nh_grp->vr_ht,
6853 			      &mlxsw_sp_nexthop_group_vr_ht_params);
6854 	if (err)
6855 		goto err_nexthop_group_vr_ht_init;
6856 	INIT_LIST_HEAD(&nh_grp->fib_list);
6857 	nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6;
6858 
6859 	err = mlxsw_sp_nexthop6_group_info_init(mlxsw_sp, nh_grp, fib6_entry);
6860 	if (err)
6861 		goto err_nexthop_group_info_init;
6862 
6863 	err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
6864 	if (err)
6865 		goto err_nexthop_group_insert;
6866 
6867 	nh_grp->can_destroy = true;
6868 
6869 	return nh_grp;
6870 
6871 err_nexthop_group_insert:
6872 	mlxsw_sp_nexthop6_group_info_fini(mlxsw_sp, nh_grp);
6873 err_nexthop_group_info_init:
6874 	rhashtable_destroy(&nh_grp->vr_ht);
6875 err_nexthop_group_vr_ht_init:
6876 	kfree(nh_grp);
6877 	return ERR_PTR(err);
6878 }
6879 
6880 static void
6881 mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
6882 				struct mlxsw_sp_nexthop_group *nh_grp)
6883 {
6884 	if (!nh_grp->can_destroy)
6885 		return;
6886 	mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
6887 	mlxsw_sp_nexthop6_group_info_fini(mlxsw_sp, nh_grp);
6888 	WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
6889 	rhashtable_destroy(&nh_grp->vr_ht);
6890 	kfree(nh_grp);
6891 }
6892 
6893 static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
6894 				       struct mlxsw_sp_fib6_entry *fib6_entry)
6895 {
6896 	struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
6897 	struct mlxsw_sp_nexthop_group *nh_grp;
6898 
6899 	if (rt->nh) {
6900 		nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp,
6901 							   rt->nh->id);
6902 		if (WARN_ON_ONCE(!nh_grp))
6903 			return -EINVAL;
6904 		goto out;
6905 	}
6906 
6907 	nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
6908 	if (!nh_grp) {
6909 		nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
6910 		if (IS_ERR(nh_grp))
6911 			return PTR_ERR(nh_grp);
6912 	}
6913 
6914 	/* The route and the nexthop are described by the same struct, so we
6915 	 * need to the update the nexthop offload indication for the new route.
6916 	 */
6917 	__mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
6918 
6919 out:
6920 	list_add_tail(&fib6_entry->common.nexthop_group_node,
6921 		      &nh_grp->fib_list);
6922 	fib6_entry->common.nh_group = nh_grp;
6923 
6924 	return 0;
6925 }
6926 
6927 static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
6928 					struct mlxsw_sp_fib_entry *fib_entry)
6929 {
6930 	struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
6931 
6932 	list_del(&fib_entry->nexthop_group_node);
6933 	if (!list_empty(&nh_grp->fib_list))
6934 		return;
6935 
6936 	if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ) {
6937 		mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
6938 		return;
6939 	}
6940 
6941 	mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
6942 }
6943 
6944 static int
6945 mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
6946 			       struct mlxsw_sp_fib6_entry *fib6_entry)
6947 {
6948 	struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
6949 	struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
6950 	int err;
6951 
6952 	mlxsw_sp_nexthop_group_vr_unlink(old_nh_grp, fib_node->fib);
6953 	fib6_entry->common.nh_group = NULL;
6954 	list_del(&fib6_entry->common.nexthop_group_node);
6955 
6956 	err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
6957 	if (err)
6958 		goto err_nexthop6_group_get;
6959 
6960 	err = mlxsw_sp_nexthop_group_vr_link(fib6_entry->common.nh_group,
6961 					     fib_node->fib);
6962 	if (err)
6963 		goto err_nexthop_group_vr_link;
6964 
6965 	/* In case this entry is offloaded, then the adjacency index
6966 	 * currently associated with it in the device's table is that
6967 	 * of the old group. Start using the new one instead.
6968 	 */
6969 	err = mlxsw_sp_fib_entry_update(mlxsw_sp, &fib6_entry->common);
6970 	if (err)
6971 		goto err_fib_entry_update;
6972 
6973 	if (list_empty(&old_nh_grp->fib_list))
6974 		mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
6975 
6976 	return 0;
6977 
6978 err_fib_entry_update:
6979 	mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group,
6980 					 fib_node->fib);
6981 err_nexthop_group_vr_link:
6982 	mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
6983 err_nexthop6_group_get:
6984 	list_add_tail(&fib6_entry->common.nexthop_group_node,
6985 		      &old_nh_grp->fib_list);
6986 	fib6_entry->common.nh_group = old_nh_grp;
6987 	mlxsw_sp_nexthop_group_vr_link(old_nh_grp, fib_node->fib);
6988 	return err;
6989 }
6990 
6991 static int
6992 mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
6993 				struct mlxsw_sp_fib6_entry *fib6_entry,
6994 				struct fib6_info **rt_arr, unsigned int nrt6)
6995 {
6996 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6997 	int err, i;
6998 
6999 	for (i = 0; i < nrt6; i++) {
7000 		mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
7001 		if (IS_ERR(mlxsw_sp_rt6)) {
7002 			err = PTR_ERR(mlxsw_sp_rt6);
7003 			goto err_rt6_unwind;
7004 		}
7005 
7006 		list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
7007 		fib6_entry->nrt6++;
7008 	}
7009 
7010 	err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
7011 	if (err)
7012 		goto err_rt6_unwind;
7013 
7014 	return 0;
7015 
7016 err_rt6_unwind:
7017 	for (; i > 0; i--) {
7018 		fib6_entry->nrt6--;
7019 		mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
7020 					       struct mlxsw_sp_rt6, list);
7021 		list_del(&mlxsw_sp_rt6->list);
7022 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
7023 	}
7024 	return err;
7025 }
7026 
7027 static void
7028 mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
7029 				struct mlxsw_sp_fib6_entry *fib6_entry,
7030 				struct fib6_info **rt_arr, unsigned int nrt6)
7031 {
7032 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
7033 	int i;
7034 
7035 	for (i = 0; i < nrt6; i++) {
7036 		mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry,
7037 							   rt_arr[i]);
7038 		if (WARN_ON_ONCE(!mlxsw_sp_rt6))
7039 			continue;
7040 
7041 		fib6_entry->nrt6--;
7042 		list_del(&mlxsw_sp_rt6->list);
7043 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
7044 	}
7045 
7046 	mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
7047 }
7048 
7049 static int
7050 mlxsw_sp_fib6_entry_type_set_local(struct mlxsw_sp *mlxsw_sp,
7051 				   struct mlxsw_sp_fib_entry *fib_entry,
7052 				   const struct fib6_info *rt)
7053 {
7054 	struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi;
7055 	union mlxsw_sp_l3addr dip = { .addr6 = rt->fib6_dst.addr };
7056 	u32 tb_id = mlxsw_sp_fix_tb_id(rt->fib6_table->tb6_id);
7057 	struct mlxsw_sp_router *router = mlxsw_sp->router;
7058 	int ifindex = nhgi->nexthops[0].ifindex;
7059 	struct mlxsw_sp_ipip_entry *ipip_entry;
7060 
7061 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
7062 	ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, ifindex,
7063 						       MLXSW_SP_L3_PROTO_IPV6,
7064 						       dip);
7065 
7066 	if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
7067 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
7068 		return mlxsw_sp_fib_entry_decap_init(mlxsw_sp, fib_entry,
7069 						     ipip_entry);
7070 	}
7071 	if (mlxsw_sp_router_nve_is_decap(mlxsw_sp, tb_id,
7072 					 MLXSW_SP_L3_PROTO_IPV6, &dip)) {
7073 		u32 tunnel_index;
7074 
7075 		tunnel_index = router->nve_decap_config.tunnel_index;
7076 		fib_entry->decap.tunnel_index = tunnel_index;
7077 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
7078 	}
7079 
7080 	return 0;
7081 }
7082 
7083 static int mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
7084 					struct mlxsw_sp_fib_entry *fib_entry,
7085 					const struct fib6_info *rt)
7086 {
7087 	if (rt->fib6_flags & RTF_LOCAL)
7088 		return mlxsw_sp_fib6_entry_type_set_local(mlxsw_sp, fib_entry,
7089 							  rt);
7090 	if (rt->fib6_flags & RTF_ANYCAST)
7091 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
7092 	else if (rt->fib6_type == RTN_BLACKHOLE)
7093 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
7094 	else if (rt->fib6_flags & RTF_REJECT)
7095 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
7096 	else if (fib_entry->nh_group->nhgi->gateway)
7097 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
7098 	else
7099 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
7100 
7101 	return 0;
7102 }
7103 
7104 static void
7105 mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
7106 {
7107 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;
7108 
7109 	list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
7110 				 list) {
7111 		fib6_entry->nrt6--;
7112 		list_del(&mlxsw_sp_rt6->list);
7113 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
7114 	}
7115 }
7116 
7117 static struct mlxsw_sp_fib6_entry *
7118 mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
7119 			   struct mlxsw_sp_fib_node *fib_node,
7120 			   struct fib6_info **rt_arr, unsigned int nrt6)
7121 {
7122 	struct mlxsw_sp_fib6_entry *fib6_entry;
7123 	struct mlxsw_sp_fib_entry *fib_entry;
7124 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
7125 	int err, i;
7126 
7127 	fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
7128 	if (!fib6_entry)
7129 		return ERR_PTR(-ENOMEM);
7130 	fib_entry = &fib6_entry->common;
7131 
7132 	INIT_LIST_HEAD(&fib6_entry->rt6_list);
7133 
7134 	for (i = 0; i < nrt6; i++) {
7135 		mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
7136 		if (IS_ERR(mlxsw_sp_rt6)) {
7137 			err = PTR_ERR(mlxsw_sp_rt6);
7138 			goto err_rt6_unwind;
7139 		}
7140 		list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
7141 		fib6_entry->nrt6++;
7142 	}
7143 
7144 	err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
7145 	if (err)
7146 		goto err_rt6_unwind;
7147 
7148 	err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group,
7149 					     fib_node->fib);
7150 	if (err)
7151 		goto err_nexthop_group_vr_link;
7152 
7153 	err = mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]);
7154 	if (err)
7155 		goto err_fib6_entry_type_set;
7156 
7157 	fib_entry->fib_node = fib_node;
7158 
7159 	return fib6_entry;
7160 
7161 err_fib6_entry_type_set:
7162 	mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib);
7163 err_nexthop_group_vr_link:
7164 	mlxsw_sp_nexthop6_group_put(mlxsw_sp, fib_entry);
7165 err_rt6_unwind:
7166 	for (; i > 0; i--) {
7167 		fib6_entry->nrt6--;
7168 		mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
7169 					       struct mlxsw_sp_rt6, list);
7170 		list_del(&mlxsw_sp_rt6->list);
7171 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
7172 	}
7173 	kfree(fib6_entry);
7174 	return ERR_PTR(err);
7175 }
7176 
7177 static void
7178 mlxsw_sp_fib6_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
7179 			       struct mlxsw_sp_fib6_entry *fib6_entry)
7180 {
7181 	mlxsw_sp_fib_entry_type_unset(mlxsw_sp, &fib6_entry->common);
7182 }
7183 
7184 static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
7185 					struct mlxsw_sp_fib6_entry *fib6_entry)
7186 {
7187 	struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
7188 
7189 	mlxsw_sp_fib6_entry_type_unset(mlxsw_sp, fib6_entry);
7190 	mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group,
7191 					 fib_node->fib);
7192 	mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
7193 	mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
7194 	WARN_ON(fib6_entry->nrt6);
7195 	kfree(fib6_entry);
7196 }
7197 
7198 static struct mlxsw_sp_fib6_entry *
7199 mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
7200 			   const struct fib6_info *rt)
7201 {
7202 	struct mlxsw_sp_fib6_entry *fib6_entry;
7203 	struct mlxsw_sp_fib_node *fib_node;
7204 	struct mlxsw_sp_fib *fib;
7205 	struct fib6_info *cmp_rt;
7206 	struct mlxsw_sp_vr *vr;
7207 
7208 	vr = mlxsw_sp_vr_find(mlxsw_sp, rt->fib6_table->tb6_id);
7209 	if (!vr)
7210 		return NULL;
7211 	fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
7212 
7213 	fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->fib6_dst.addr,
7214 					    sizeof(rt->fib6_dst.addr),
7215 					    rt->fib6_dst.plen);
7216 	if (!fib_node)
7217 		return NULL;
7218 
7219 	fib6_entry = container_of(fib_node->fib_entry,
7220 				  struct mlxsw_sp_fib6_entry, common);
7221 	cmp_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
7222 	if (rt->fib6_table->tb6_id == cmp_rt->fib6_table->tb6_id &&
7223 	    rt->fib6_metric == cmp_rt->fib6_metric &&
7224 	    mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
7225 		return fib6_entry;
7226 
7227 	return NULL;
7228 }
7229 
7230 static bool mlxsw_sp_fib6_allow_replace(struct mlxsw_sp_fib6_entry *fib6_entry)
7231 {
7232 	struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
7233 	struct mlxsw_sp_fib6_entry *fib6_replaced;
7234 	struct fib6_info *rt, *rt_replaced;
7235 
7236 	if (!fib_node->fib_entry)
7237 		return true;
7238 
7239 	fib6_replaced = container_of(fib_node->fib_entry,
7240 				     struct mlxsw_sp_fib6_entry,
7241 				     common);
7242 	rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
7243 	rt_replaced = mlxsw_sp_fib6_entry_rt(fib6_replaced);
7244 	if (rt->fib6_table->tb6_id == RT_TABLE_MAIN &&
7245 	    rt_replaced->fib6_table->tb6_id == RT_TABLE_LOCAL)
7246 		return false;
7247 
7248 	return true;
7249 }
7250 
7251 static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
7252 					struct fib6_info **rt_arr,
7253 					unsigned int nrt6)
7254 {
7255 	struct mlxsw_sp_fib6_entry *fib6_entry, *fib6_replaced;
7256 	struct mlxsw_sp_fib_entry *replaced;
7257 	struct mlxsw_sp_fib_node *fib_node;
7258 	struct fib6_info *rt = rt_arr[0];
7259 	int err;
7260 
7261 	if (rt->fib6_src.plen)
7262 		return -EINVAL;
7263 
7264 	if (mlxsw_sp_fib6_rt_should_ignore(rt))
7265 		return 0;
7266 
7267 	if (rt->nh && !mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, rt->nh->id))
7268 		return 0;
7269 
7270 	fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
7271 					 &rt->fib6_dst.addr,
7272 					 sizeof(rt->fib6_dst.addr),
7273 					 rt->fib6_dst.plen,
7274 					 MLXSW_SP_L3_PROTO_IPV6);
7275 	if (IS_ERR(fib_node))
7276 		return PTR_ERR(fib_node);
7277 
7278 	fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt_arr,
7279 						nrt6);
7280 	if (IS_ERR(fib6_entry)) {
7281 		err = PTR_ERR(fib6_entry);
7282 		goto err_fib6_entry_create;
7283 	}
7284 
7285 	if (!mlxsw_sp_fib6_allow_replace(fib6_entry)) {
7286 		mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7287 		mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7288 		return 0;
7289 	}
7290 
7291 	replaced = fib_node->fib_entry;
7292 	err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib6_entry->common);
7293 	if (err)
7294 		goto err_fib_node_entry_link;
7295 
7296 	/* Nothing to replace */
7297 	if (!replaced)
7298 		return 0;
7299 
7300 	mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
7301 	fib6_replaced = container_of(replaced, struct mlxsw_sp_fib6_entry,
7302 				     common);
7303 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_replaced);
7304 
7305 	return 0;
7306 
7307 err_fib_node_entry_link:
7308 	fib_node->fib_entry = replaced;
7309 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7310 err_fib6_entry_create:
7311 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7312 	return err;
7313 }
7314 
7315 static int mlxsw_sp_router_fib6_append(struct mlxsw_sp *mlxsw_sp,
7316 				       struct fib6_info **rt_arr,
7317 				       unsigned int nrt6)
7318 {
7319 	struct mlxsw_sp_fib6_entry *fib6_entry;
7320 	struct mlxsw_sp_fib_node *fib_node;
7321 	struct fib6_info *rt = rt_arr[0];
7322 	int err;
7323 
7324 	if (rt->fib6_src.plen)
7325 		return -EINVAL;
7326 
7327 	if (mlxsw_sp_fib6_rt_should_ignore(rt))
7328 		return 0;
7329 
7330 	fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
7331 					 &rt->fib6_dst.addr,
7332 					 sizeof(rt->fib6_dst.addr),
7333 					 rt->fib6_dst.plen,
7334 					 MLXSW_SP_L3_PROTO_IPV6);
7335 	if (IS_ERR(fib_node))
7336 		return PTR_ERR(fib_node);
7337 
7338 	if (WARN_ON_ONCE(!fib_node->fib_entry)) {
7339 		mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7340 		return -EINVAL;
7341 	}
7342 
7343 	fib6_entry = container_of(fib_node->fib_entry,
7344 				  struct mlxsw_sp_fib6_entry, common);
7345 	err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt_arr,
7346 					      nrt6);
7347 	if (err)
7348 		goto err_fib6_entry_nexthop_add;
7349 
7350 	return 0;
7351 
7352 err_fib6_entry_nexthop_add:
7353 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7354 	return err;
7355 }
7356 
7357 static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
7358 				     struct fib6_info **rt_arr,
7359 				     unsigned int nrt6)
7360 {
7361 	struct mlxsw_sp_fib6_entry *fib6_entry;
7362 	struct mlxsw_sp_fib_node *fib_node;
7363 	struct fib6_info *rt = rt_arr[0];
7364 
7365 	if (mlxsw_sp_fib6_rt_should_ignore(rt))
7366 		return;
7367 
7368 	/* Multipath routes are first added to the FIB trie and only then
7369 	 * notified. If we vetoed the addition, we will get a delete
7370 	 * notification for a route we do not have. Therefore, do not warn if
7371 	 * route was not found.
7372 	 */
7373 	fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
7374 	if (!fib6_entry)
7375 		return;
7376 
7377 	/* If not all the nexthops are deleted, then only reduce the nexthop
7378 	 * group.
7379 	 */
7380 	if (nrt6 != fib6_entry->nrt6) {
7381 		mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt_arr,
7382 						nrt6);
7383 		return;
7384 	}
7385 
7386 	fib_node = fib6_entry->common.fib_node;
7387 
7388 	mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib6_entry->common);
7389 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7390 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7391 }
7392 
7393 static struct mlxsw_sp_mr_table *
7394 mlxsw_sp_router_fibmr_family_to_table(struct mlxsw_sp_vr *vr, int family)
7395 {
7396 	if (family == RTNL_FAMILY_IPMR)
7397 		return vr->mr_table[MLXSW_SP_L3_PROTO_IPV4];
7398 	else
7399 		return vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
7400 }
7401 
7402 static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
7403 				     struct mfc_entry_notifier_info *men_info,
7404 				     bool replace)
7405 {
7406 	struct mlxsw_sp_mr_table *mrt;
7407 	struct mlxsw_sp_vr *vr;
7408 
7409 	vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
7410 	if (IS_ERR(vr))
7411 		return PTR_ERR(vr);
7412 
7413 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
7414 	return mlxsw_sp_mr_route_add(mrt, men_info->mfc, replace);
7415 }
7416 
7417 static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
7418 				      struct mfc_entry_notifier_info *men_info)
7419 {
7420 	struct mlxsw_sp_mr_table *mrt;
7421 	struct mlxsw_sp_vr *vr;
7422 
7423 	vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
7424 	if (WARN_ON(!vr))
7425 		return;
7426 
7427 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
7428 	mlxsw_sp_mr_route_del(mrt, men_info->mfc);
7429 	mlxsw_sp_vr_put(mlxsw_sp, vr);
7430 }
7431 
7432 static int
7433 mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
7434 			      struct vif_entry_notifier_info *ven_info)
7435 {
7436 	struct mlxsw_sp_mr_table *mrt;
7437 	struct mlxsw_sp_rif *rif;
7438 	struct mlxsw_sp_vr *vr;
7439 
7440 	vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
7441 	if (IS_ERR(vr))
7442 		return PTR_ERR(vr);
7443 
7444 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
7445 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
7446 	return mlxsw_sp_mr_vif_add(mrt, ven_info->dev,
7447 				   ven_info->vif_index,
7448 				   ven_info->vif_flags, rif);
7449 }
7450 
7451 static void
7452 mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
7453 			      struct vif_entry_notifier_info *ven_info)
7454 {
7455 	struct mlxsw_sp_mr_table *mrt;
7456 	struct mlxsw_sp_vr *vr;
7457 
7458 	vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
7459 	if (WARN_ON(!vr))
7460 		return;
7461 
7462 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
7463 	mlxsw_sp_mr_vif_del(mrt, ven_info->vif_index);
7464 	mlxsw_sp_vr_put(mlxsw_sp, vr);
7465 }
7466 
7467 static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
7468 				     struct mlxsw_sp_fib_node *fib_node)
7469 {
7470 	struct mlxsw_sp_fib4_entry *fib4_entry;
7471 
7472 	fib4_entry = container_of(fib_node->fib_entry,
7473 				  struct mlxsw_sp_fib4_entry, common);
7474 	mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
7475 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
7476 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7477 }
7478 
7479 static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
7480 				     struct mlxsw_sp_fib_node *fib_node)
7481 {
7482 	struct mlxsw_sp_fib6_entry *fib6_entry;
7483 
7484 	fib6_entry = container_of(fib_node->fib_entry,
7485 				  struct mlxsw_sp_fib6_entry, common);
7486 	mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
7487 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7488 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7489 }
7490 
7491 static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
7492 				    struct mlxsw_sp_fib_node *fib_node)
7493 {
7494 	switch (fib_node->fib->proto) {
7495 	case MLXSW_SP_L3_PROTO_IPV4:
7496 		mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
7497 		break;
7498 	case MLXSW_SP_L3_PROTO_IPV6:
7499 		mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
7500 		break;
7501 	}
7502 }
7503 
7504 static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
7505 				  struct mlxsw_sp_vr *vr,
7506 				  enum mlxsw_sp_l3proto proto)
7507 {
7508 	struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
7509 	struct mlxsw_sp_fib_node *fib_node, *tmp;
7510 
7511 	list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
7512 		bool do_break = &tmp->list == &fib->node_list;
7513 
7514 		mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
7515 		if (do_break)
7516 			break;
7517 	}
7518 }
7519 
7520 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
7521 {
7522 	int max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
7523 	int i, j;
7524 
7525 	for (i = 0; i < max_vrs; i++) {
7526 		struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
7527 
7528 		if (!mlxsw_sp_vr_is_used(vr))
7529 			continue;
7530 
7531 		for (j = 0; j < MLXSW_SP_L3_PROTO_MAX; j++)
7532 			mlxsw_sp_mr_table_flush(vr->mr_table[j]);
7533 		mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
7534 
7535 		/* If virtual router was only used for IPv4, then it's no
7536 		 * longer used.
7537 		 */
7538 		if (!mlxsw_sp_vr_is_used(vr))
7539 			continue;
7540 		mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
7541 	}
7542 }
7543 
7544 struct mlxsw_sp_fib6_event_work {
7545 	struct fib6_info **rt_arr;
7546 	unsigned int nrt6;
7547 };
7548 
7549 struct mlxsw_sp_fib_event_work {
7550 	struct work_struct work;
7551 	netdevice_tracker dev_tracker;
7552 	union {
7553 		struct mlxsw_sp_fib6_event_work fib6_work;
7554 		struct fib_entry_notifier_info fen_info;
7555 		struct fib_rule_notifier_info fr_info;
7556 		struct fib_nh_notifier_info fnh_info;
7557 		struct mfc_entry_notifier_info men_info;
7558 		struct vif_entry_notifier_info ven_info;
7559 	};
7560 	struct mlxsw_sp *mlxsw_sp;
7561 	unsigned long event;
7562 };
7563 
7564 static int
7565 mlxsw_sp_router_fib6_work_init(struct mlxsw_sp_fib6_event_work *fib6_work,
7566 			       struct fib6_entry_notifier_info *fen6_info)
7567 {
7568 	struct fib6_info *rt = fen6_info->rt;
7569 	struct fib6_info **rt_arr;
7570 	struct fib6_info *iter;
7571 	unsigned int nrt6;
7572 	int i = 0;
7573 
7574 	nrt6 = fen6_info->nsiblings + 1;
7575 
7576 	rt_arr = kcalloc(nrt6, sizeof(struct fib6_info *), GFP_ATOMIC);
7577 	if (!rt_arr)
7578 		return -ENOMEM;
7579 
7580 	fib6_work->rt_arr = rt_arr;
7581 	fib6_work->nrt6 = nrt6;
7582 
7583 	rt_arr[0] = rt;
7584 	fib6_info_hold(rt);
7585 
7586 	if (!fen6_info->nsiblings)
7587 		return 0;
7588 
7589 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
7590 		if (i == fen6_info->nsiblings)
7591 			break;
7592 
7593 		rt_arr[i + 1] = iter;
7594 		fib6_info_hold(iter);
7595 		i++;
7596 	}
7597 	WARN_ON_ONCE(i != fen6_info->nsiblings);
7598 
7599 	return 0;
7600 }
7601 
7602 static void
7603 mlxsw_sp_router_fib6_work_fini(struct mlxsw_sp_fib6_event_work *fib6_work)
7604 {
7605 	int i;
7606 
7607 	for (i = 0; i < fib6_work->nrt6; i++)
7608 		mlxsw_sp_rt6_release(fib6_work->rt_arr[i]);
7609 	kfree(fib6_work->rt_arr);
7610 }
7611 
7612 static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
7613 {
7614 	struct mlxsw_sp_fib_event_work *fib_work =
7615 		container_of(work, struct mlxsw_sp_fib_event_work, work);
7616 	struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
7617 	int err;
7618 
7619 	mutex_lock(&mlxsw_sp->router->lock);
7620 	mlxsw_sp_span_respin(mlxsw_sp);
7621 
7622 	switch (fib_work->event) {
7623 	case FIB_EVENT_ENTRY_REPLACE:
7624 		err = mlxsw_sp_router_fib4_replace(mlxsw_sp,
7625 						   &fib_work->fen_info);
7626 		if (err) {
7627 			dev_warn(mlxsw_sp->bus_info->dev, "FIB replace failed.\n");
7628 			mlxsw_sp_fib4_offload_failed_flag_set(mlxsw_sp,
7629 							      &fib_work->fen_info);
7630 		}
7631 		fib_info_put(fib_work->fen_info.fi);
7632 		break;
7633 	case FIB_EVENT_ENTRY_DEL:
7634 		mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
7635 		fib_info_put(fib_work->fen_info.fi);
7636 		break;
7637 	case FIB_EVENT_NH_ADD:
7638 	case FIB_EVENT_NH_DEL:
7639 		mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
7640 					fib_work->fnh_info.fib_nh);
7641 		fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
7642 		break;
7643 	}
7644 	mutex_unlock(&mlxsw_sp->router->lock);
7645 	kfree(fib_work);
7646 }
7647 
7648 static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
7649 {
7650 	struct mlxsw_sp_fib_event_work *fib_work =
7651 		    container_of(work, struct mlxsw_sp_fib_event_work, work);
7652 	struct mlxsw_sp_fib6_event_work *fib6_work = &fib_work->fib6_work;
7653 	struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
7654 	int err;
7655 
7656 	mutex_lock(&mlxsw_sp->router->lock);
7657 	mlxsw_sp_span_respin(mlxsw_sp);
7658 
7659 	switch (fib_work->event) {
7660 	case FIB_EVENT_ENTRY_REPLACE:
7661 		err = mlxsw_sp_router_fib6_replace(mlxsw_sp,
7662 						   fib6_work->rt_arr,
7663 						   fib6_work->nrt6);
7664 		if (err) {
7665 			dev_warn(mlxsw_sp->bus_info->dev, "FIB replace failed.\n");
7666 			mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
7667 							      fib6_work->rt_arr,
7668 							      fib6_work->nrt6);
7669 		}
7670 		mlxsw_sp_router_fib6_work_fini(fib6_work);
7671 		break;
7672 	case FIB_EVENT_ENTRY_APPEND:
7673 		err = mlxsw_sp_router_fib6_append(mlxsw_sp,
7674 						  fib6_work->rt_arr,
7675 						  fib6_work->nrt6);
7676 		if (err) {
7677 			dev_warn(mlxsw_sp->bus_info->dev, "FIB append failed.\n");
7678 			mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
7679 							      fib6_work->rt_arr,
7680 							      fib6_work->nrt6);
7681 		}
7682 		mlxsw_sp_router_fib6_work_fini(fib6_work);
7683 		break;
7684 	case FIB_EVENT_ENTRY_DEL:
7685 		mlxsw_sp_router_fib6_del(mlxsw_sp,
7686 					 fib6_work->rt_arr,
7687 					 fib6_work->nrt6);
7688 		mlxsw_sp_router_fib6_work_fini(fib6_work);
7689 		break;
7690 	}
7691 	mutex_unlock(&mlxsw_sp->router->lock);
7692 	kfree(fib_work);
7693 }
7694 
7695 static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
7696 {
7697 	struct mlxsw_sp_fib_event_work *fib_work =
7698 		container_of(work, struct mlxsw_sp_fib_event_work, work);
7699 	struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
7700 	bool replace;
7701 	int err;
7702 
7703 	rtnl_lock();
7704 	mutex_lock(&mlxsw_sp->router->lock);
7705 	switch (fib_work->event) {
7706 	case FIB_EVENT_ENTRY_REPLACE:
7707 	case FIB_EVENT_ENTRY_ADD:
7708 		replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
7709 
7710 		err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info,
7711 						replace);
7712 		if (err)
7713 			dev_warn(mlxsw_sp->bus_info->dev, "MR entry add failed.\n");
7714 		mr_cache_put(fib_work->men_info.mfc);
7715 		break;
7716 	case FIB_EVENT_ENTRY_DEL:
7717 		mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info);
7718 		mr_cache_put(fib_work->men_info.mfc);
7719 		break;
7720 	case FIB_EVENT_VIF_ADD:
7721 		err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
7722 						    &fib_work->ven_info);
7723 		if (err)
7724 			dev_warn(mlxsw_sp->bus_info->dev, "MR VIF add failed.\n");
7725 		netdev_put(fib_work->ven_info.dev, &fib_work->dev_tracker);
7726 		break;
7727 	case FIB_EVENT_VIF_DEL:
7728 		mlxsw_sp_router_fibmr_vif_del(mlxsw_sp,
7729 					      &fib_work->ven_info);
7730 		netdev_put(fib_work->ven_info.dev, &fib_work->dev_tracker);
7731 		break;
7732 	}
7733 	mutex_unlock(&mlxsw_sp->router->lock);
7734 	rtnl_unlock();
7735 	kfree(fib_work);
7736 }
7737 
7738 static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
7739 				       struct fib_notifier_info *info)
7740 {
7741 	struct fib_entry_notifier_info *fen_info;
7742 	struct fib_nh_notifier_info *fnh_info;
7743 
7744 	switch (fib_work->event) {
7745 	case FIB_EVENT_ENTRY_REPLACE:
7746 	case FIB_EVENT_ENTRY_DEL:
7747 		fen_info = container_of(info, struct fib_entry_notifier_info,
7748 					info);
7749 		fib_work->fen_info = *fen_info;
7750 		/* Take reference on fib_info to prevent it from being
7751 		 * freed while work is queued. Release it afterwards.
7752 		 */
7753 		fib_info_hold(fib_work->fen_info.fi);
7754 		break;
7755 	case FIB_EVENT_NH_ADD:
7756 	case FIB_EVENT_NH_DEL:
7757 		fnh_info = container_of(info, struct fib_nh_notifier_info,
7758 					info);
7759 		fib_work->fnh_info = *fnh_info;
7760 		fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
7761 		break;
7762 	}
7763 }
7764 
7765 static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
7766 				      struct fib_notifier_info *info)
7767 {
7768 	struct fib6_entry_notifier_info *fen6_info;
7769 	int err;
7770 
7771 	switch (fib_work->event) {
7772 	case FIB_EVENT_ENTRY_REPLACE:
7773 	case FIB_EVENT_ENTRY_APPEND:
7774 	case FIB_EVENT_ENTRY_DEL:
7775 		fen6_info = container_of(info, struct fib6_entry_notifier_info,
7776 					 info);
7777 		err = mlxsw_sp_router_fib6_work_init(&fib_work->fib6_work,
7778 						     fen6_info);
7779 		if (err)
7780 			return err;
7781 		break;
7782 	}
7783 
7784 	return 0;
7785 }
7786 
7787 static void
7788 mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
7789 			    struct fib_notifier_info *info)
7790 {
7791 	switch (fib_work->event) {
7792 	case FIB_EVENT_ENTRY_REPLACE:
7793 	case FIB_EVENT_ENTRY_ADD:
7794 	case FIB_EVENT_ENTRY_DEL:
7795 		memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
7796 		mr_cache_hold(fib_work->men_info.mfc);
7797 		break;
7798 	case FIB_EVENT_VIF_ADD:
7799 	case FIB_EVENT_VIF_DEL:
7800 		memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info));
7801 		netdev_hold(fib_work->ven_info.dev, &fib_work->dev_tracker,
7802 			    GFP_ATOMIC);
7803 		break;
7804 	}
7805 }
7806 
7807 static int mlxsw_sp_router_fib_rule_event(unsigned long event,
7808 					  struct fib_notifier_info *info,
7809 					  struct mlxsw_sp *mlxsw_sp)
7810 {
7811 	struct netlink_ext_ack *extack = info->extack;
7812 	struct fib_rule_notifier_info *fr_info;
7813 	struct fib_rule *rule;
7814 	int err = 0;
7815 
7816 	/* nothing to do at the moment */
7817 	if (event == FIB_EVENT_RULE_DEL)
7818 		return 0;
7819 
7820 	fr_info = container_of(info, struct fib_rule_notifier_info, info);
7821 	rule = fr_info->rule;
7822 
7823 	/* Rule only affects locally generated traffic */
7824 	if (rule->iifindex == mlxsw_sp_net(mlxsw_sp)->loopback_dev->ifindex)
7825 		return 0;
7826 
7827 	switch (info->family) {
7828 	case AF_INET:
7829 		if (!fib4_rule_default(rule) && !rule->l3mdev)
7830 			err = -EOPNOTSUPP;
7831 		break;
7832 	case AF_INET6:
7833 		if (!fib6_rule_default(rule) && !rule->l3mdev)
7834 			err = -EOPNOTSUPP;
7835 		break;
7836 	case RTNL_FAMILY_IPMR:
7837 		if (!ipmr_rule_default(rule) && !rule->l3mdev)
7838 			err = -EOPNOTSUPP;
7839 		break;
7840 	case RTNL_FAMILY_IP6MR:
7841 		if (!ip6mr_rule_default(rule) && !rule->l3mdev)
7842 			err = -EOPNOTSUPP;
7843 		break;
7844 	}
7845 
7846 	if (err < 0)
7847 		NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported");
7848 
7849 	return err;
7850 }
7851 
7852 /* Called with rcu_read_lock() */
7853 static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
7854 				     unsigned long event, void *ptr)
7855 {
7856 	struct mlxsw_sp_fib_event_work *fib_work;
7857 	struct fib_notifier_info *info = ptr;
7858 	struct mlxsw_sp_router *router;
7859 	int err;
7860 
7861 	if ((info->family != AF_INET && info->family != AF_INET6 &&
7862 	     info->family != RTNL_FAMILY_IPMR &&
7863 	     info->family != RTNL_FAMILY_IP6MR))
7864 		return NOTIFY_DONE;
7865 
7866 	router = container_of(nb, struct mlxsw_sp_router, fib_nb);
7867 
7868 	switch (event) {
7869 	case FIB_EVENT_RULE_ADD:
7870 	case FIB_EVENT_RULE_DEL:
7871 		err = mlxsw_sp_router_fib_rule_event(event, info,
7872 						     router->mlxsw_sp);
7873 		return notifier_from_errno(err);
7874 	case FIB_EVENT_ENTRY_ADD:
7875 	case FIB_EVENT_ENTRY_REPLACE:
7876 	case FIB_EVENT_ENTRY_APPEND:
7877 		if (info->family == AF_INET) {
7878 			struct fib_entry_notifier_info *fen_info = ptr;
7879 
7880 			if (fen_info->fi->fib_nh_is_v6) {
7881 				NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported");
7882 				return notifier_from_errno(-EINVAL);
7883 			}
7884 		}
7885 		break;
7886 	}
7887 
7888 	fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
7889 	if (!fib_work)
7890 		return NOTIFY_BAD;
7891 
7892 	fib_work->mlxsw_sp = router->mlxsw_sp;
7893 	fib_work->event = event;
7894 
7895 	switch (info->family) {
7896 	case AF_INET:
7897 		INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work);
7898 		mlxsw_sp_router_fib4_event(fib_work, info);
7899 		break;
7900 	case AF_INET6:
7901 		INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
7902 		err = mlxsw_sp_router_fib6_event(fib_work, info);
7903 		if (err)
7904 			goto err_fib_event;
7905 		break;
7906 	case RTNL_FAMILY_IP6MR:
7907 	case RTNL_FAMILY_IPMR:
7908 		INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work);
7909 		mlxsw_sp_router_fibmr_event(fib_work, info);
7910 		break;
7911 	}
7912 
7913 	mlxsw_core_schedule_work(&fib_work->work);
7914 
7915 	return NOTIFY_DONE;
7916 
7917 err_fib_event:
7918 	kfree(fib_work);
7919 	return NOTIFY_BAD;
7920 }
7921 
7922 static struct mlxsw_sp_rif *
7923 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
7924 			 const struct net_device *dev)
7925 {
7926 	int max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
7927 	int i;
7928 
7929 	for (i = 0; i < max_rifs; i++)
7930 		if (mlxsw_sp->router->rifs[i] &&
7931 		    mlxsw_sp_rif_dev_is(mlxsw_sp->router->rifs[i], dev))
7932 			return mlxsw_sp->router->rifs[i];
7933 
7934 	return NULL;
7935 }
7936 
7937 bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp,
7938 			 const struct net_device *dev)
7939 {
7940 	struct mlxsw_sp_rif *rif;
7941 
7942 	mutex_lock(&mlxsw_sp->router->lock);
7943 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7944 	mutex_unlock(&mlxsw_sp->router->lock);
7945 
7946 	return rif;
7947 }
7948 
7949 u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev)
7950 {
7951 	struct mlxsw_sp_rif *rif;
7952 	u16 vid = 0;
7953 
7954 	mutex_lock(&mlxsw_sp->router->lock);
7955 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7956 	if (!rif)
7957 		goto out;
7958 
7959 	/* We only return the VID for VLAN RIFs. Otherwise we return an
7960 	 * invalid value (0).
7961 	 */
7962 	if (rif->ops->type != MLXSW_SP_RIF_TYPE_VLAN)
7963 		goto out;
7964 
7965 	vid = mlxsw_sp_fid_8021q_vid(rif->fid);
7966 
7967 out:
7968 	mutex_unlock(&mlxsw_sp->router->lock);
7969 	return vid;
7970 }
7971 
7972 static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
7973 {
7974 	char ritr_pl[MLXSW_REG_RITR_LEN];
7975 	int err;
7976 
7977 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
7978 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7979 	if (err)
7980 		return err;
7981 
7982 	mlxsw_reg_ritr_enable_set(ritr_pl, false);
7983 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7984 }
7985 
7986 static int mlxsw_sp_router_rif_made_sync(struct mlxsw_sp *mlxsw_sp,
7987 					 struct mlxsw_sp_rif *rif)
7988 {
7989 	int err;
7990 
7991 	err = mlxsw_sp_neigh_rif_made_sync(mlxsw_sp, rif);
7992 	if (err)
7993 		return err;
7994 
7995 	err = mlxsw_sp_nexthop_rif_made_sync(mlxsw_sp, rif);
7996 	if (err)
7997 		goto err_nexthop;
7998 
7999 	return 0;
8000 
8001 err_nexthop:
8002 	mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
8003 	return err;
8004 }
8005 
8006 static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
8007 					  struct mlxsw_sp_rif *rif)
8008 {
8009 	/* Signal to nexthop cleanup that the RIF is going away. */
8010 	rif->crif->rif = NULL;
8011 
8012 	mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
8013 	mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
8014 	mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
8015 }
8016 
8017 static bool __mlxsw_sp_dev_addr_list_empty(const struct net_device *dev)
8018 {
8019 	struct inet6_dev *inet6_dev;
8020 	struct in_device *idev;
8021 
8022 	idev = __in_dev_get_rcu(dev);
8023 	if (idev && idev->ifa_list)
8024 		return false;
8025 
8026 	inet6_dev = __in6_dev_get(dev);
8027 	if (inet6_dev && !list_empty(&inet6_dev->addr_list))
8028 		return false;
8029 
8030 	return true;
8031 }
8032 
8033 static bool mlxsw_sp_dev_addr_list_empty(const struct net_device *dev)
8034 {
8035 	bool addr_list_empty;
8036 
8037 	rcu_read_lock();
8038 	addr_list_empty = __mlxsw_sp_dev_addr_list_empty(dev);
8039 	rcu_read_unlock();
8040 
8041 	return addr_list_empty;
8042 }
8043 
8044 static bool
8045 mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
8046 			   unsigned long event)
8047 {
8048 	bool addr_list_empty;
8049 
8050 	switch (event) {
8051 	case NETDEV_UP:
8052 		return rif == NULL;
8053 	case NETDEV_DOWN:
8054 		addr_list_empty = mlxsw_sp_dev_addr_list_empty(dev);
8055 
8056 		/* macvlans do not have a RIF, but rather piggy back on the
8057 		 * RIF of their lower device.
8058 		 */
8059 		if (netif_is_macvlan(dev) && addr_list_empty)
8060 			return true;
8061 
8062 		if (rif && addr_list_empty &&
8063 		    !netif_is_l3_slave(mlxsw_sp_rif_dev(rif)))
8064 			return true;
8065 		/* It is possible we already removed the RIF ourselves
8066 		 * if it was assigned to a netdev that is now a bridge
8067 		 * or LAG slave.
8068 		 */
8069 		return false;
8070 	}
8071 
8072 	return false;
8073 }
8074 
8075 static enum mlxsw_sp_rif_type
8076 mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
8077 		      const struct net_device *dev)
8078 {
8079 	enum mlxsw_sp_fid_type type;
8080 
8081 	if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL))
8082 		return MLXSW_SP_RIF_TYPE_IPIP_LB;
8083 
8084 	/* Otherwise RIF type is derived from the type of the underlying FID. */
8085 	if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
8086 		type = MLXSW_SP_FID_TYPE_8021Q;
8087 	else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
8088 		type = MLXSW_SP_FID_TYPE_8021Q;
8089 	else if (netif_is_bridge_master(dev))
8090 		type = MLXSW_SP_FID_TYPE_8021D;
8091 	else
8092 		type = MLXSW_SP_FID_TYPE_RFID;
8093 
8094 	return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
8095 }
8096 
8097 static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index,
8098 				    u8 rif_entries)
8099 {
8100 	*p_rif_index = gen_pool_alloc(mlxsw_sp->router->rifs_table,
8101 				      rif_entries);
8102 	if (*p_rif_index == 0)
8103 		return -ENOBUFS;
8104 	*p_rif_index -= MLXSW_SP_ROUTER_GENALLOC_OFFSET;
8105 
8106 	/* RIF indexes must be aligned to the allocation size. */
8107 	WARN_ON_ONCE(*p_rif_index % rif_entries);
8108 
8109 	return 0;
8110 }
8111 
8112 static void mlxsw_sp_rif_index_free(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
8113 				    u8 rif_entries)
8114 {
8115 	gen_pool_free(mlxsw_sp->router->rifs_table,
8116 		      MLXSW_SP_ROUTER_GENALLOC_OFFSET + rif_index, rif_entries);
8117 }
8118 
8119 static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
8120 					       u16 vr_id,
8121 					       struct mlxsw_sp_crif *crif)
8122 {
8123 	struct net_device *l3_dev = crif ? crif->key.dev : NULL;
8124 	struct mlxsw_sp_rif *rif;
8125 
8126 	rif = kzalloc(rif_size, GFP_KERNEL);
8127 	if (!rif)
8128 		return NULL;
8129 
8130 	INIT_LIST_HEAD(&rif->neigh_list);
8131 	if (l3_dev) {
8132 		ether_addr_copy(rif->addr, l3_dev->dev_addr);
8133 		rif->mtu = l3_dev->mtu;
8134 	}
8135 	rif->vr_id = vr_id;
8136 	rif->rif_index = rif_index;
8137 	if (crif) {
8138 		rif->crif = crif;
8139 		crif->rif = rif;
8140 	}
8141 
8142 	return rif;
8143 }
8144 
8145 static void mlxsw_sp_rif_free(struct mlxsw_sp_rif *rif)
8146 {
8147 	WARN_ON(!list_empty(&rif->neigh_list));
8148 
8149 	if (rif->crif)
8150 		rif->crif->rif = NULL;
8151 	kfree(rif);
8152 }
8153 
8154 struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
8155 					   u16 rif_index)
8156 {
8157 	return mlxsw_sp->router->rifs[rif_index];
8158 }
8159 
8160 u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
8161 {
8162 	return rif->rif_index;
8163 }
8164 
8165 u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
8166 {
8167 	return lb_rif->common.rif_index;
8168 }
8169 
8170 u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
8171 {
8172 	struct net_device *dev = mlxsw_sp_rif_dev(&lb_rif->common);
8173 	u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(dev);
8174 	struct mlxsw_sp_vr *ul_vr;
8175 
8176 	ul_vr = mlxsw_sp_vr_get(lb_rif->common.mlxsw_sp, ul_tb_id, NULL);
8177 	if (WARN_ON(IS_ERR(ul_vr)))
8178 		return 0;
8179 
8180 	return ul_vr->id;
8181 }
8182 
8183 u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
8184 {
8185 	return lb_rif->ul_rif_id;
8186 }
8187 
8188 static bool
8189 mlxsw_sp_router_port_l3_stats_enabled(struct mlxsw_sp_rif *rif)
8190 {
8191 	return mlxsw_sp_rif_counter_valid_get(rif,
8192 					      MLXSW_SP_RIF_COUNTER_EGRESS) &&
8193 	       mlxsw_sp_rif_counter_valid_get(rif,
8194 					      MLXSW_SP_RIF_COUNTER_INGRESS);
8195 }
8196 
8197 static int
8198 mlxsw_sp_router_port_l3_stats_enable(struct mlxsw_sp_rif *rif)
8199 {
8200 	int err;
8201 
8202 	err = mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
8203 	if (err)
8204 		return err;
8205 
8206 	/* Clear stale data. */
8207 	err = mlxsw_sp_rif_counter_fetch_clear(rif,
8208 					       MLXSW_SP_RIF_COUNTER_INGRESS,
8209 					       NULL);
8210 	if (err)
8211 		goto err_clear_ingress;
8212 
8213 	err = mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
8214 	if (err)
8215 		goto err_alloc_egress;
8216 
8217 	/* Clear stale data. */
8218 	err = mlxsw_sp_rif_counter_fetch_clear(rif,
8219 					       MLXSW_SP_RIF_COUNTER_EGRESS,
8220 					       NULL);
8221 	if (err)
8222 		goto err_clear_egress;
8223 
8224 	return 0;
8225 
8226 err_clear_egress:
8227 	mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
8228 err_alloc_egress:
8229 err_clear_ingress:
8230 	mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
8231 	return err;
8232 }
8233 
8234 static void
8235 mlxsw_sp_router_port_l3_stats_disable(struct mlxsw_sp_rif *rif)
8236 {
8237 	mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
8238 	mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
8239 }
8240 
8241 static void
8242 mlxsw_sp_router_port_l3_stats_report_used(struct mlxsw_sp_rif *rif,
8243 					  struct netdev_notifier_offload_xstats_info *info)
8244 {
8245 	if (!mlxsw_sp_router_port_l3_stats_enabled(rif))
8246 		return;
8247 	netdev_offload_xstats_report_used(info->report_used);
8248 }
8249 
8250 static int
8251 mlxsw_sp_router_port_l3_stats_fetch(struct mlxsw_sp_rif *rif,
8252 				    struct rtnl_hw_stats64 *p_stats)
8253 {
8254 	struct mlxsw_sp_rif_counter_set_basic ingress;
8255 	struct mlxsw_sp_rif_counter_set_basic egress;
8256 	int err;
8257 
8258 	err = mlxsw_sp_rif_counter_fetch_clear(rif,
8259 					       MLXSW_SP_RIF_COUNTER_INGRESS,
8260 					       &ingress);
8261 	if (err)
8262 		return err;
8263 
8264 	err = mlxsw_sp_rif_counter_fetch_clear(rif,
8265 					       MLXSW_SP_RIF_COUNTER_EGRESS,
8266 					       &egress);
8267 	if (err)
8268 		return err;
8269 
8270 #define MLXSW_SP_ROUTER_ALL_GOOD(SET, SFX)		\
8271 		((SET.good_unicast_ ## SFX) +		\
8272 		 (SET.good_multicast_ ## SFX) +		\
8273 		 (SET.good_broadcast_ ## SFX))
8274 
8275 	p_stats->rx_packets = MLXSW_SP_ROUTER_ALL_GOOD(ingress, packets);
8276 	p_stats->tx_packets = MLXSW_SP_ROUTER_ALL_GOOD(egress, packets);
8277 	p_stats->rx_bytes = MLXSW_SP_ROUTER_ALL_GOOD(ingress, bytes);
8278 	p_stats->tx_bytes = MLXSW_SP_ROUTER_ALL_GOOD(egress, bytes);
8279 	p_stats->rx_errors = ingress.error_packets;
8280 	p_stats->tx_errors = egress.error_packets;
8281 	p_stats->rx_dropped = ingress.discard_packets;
8282 	p_stats->tx_dropped = egress.discard_packets;
8283 	p_stats->multicast = ingress.good_multicast_packets +
8284 			     ingress.good_broadcast_packets;
8285 
8286 #undef MLXSW_SP_ROUTER_ALL_GOOD
8287 
8288 	return 0;
8289 }
8290 
8291 static int
8292 mlxsw_sp_router_port_l3_stats_report_delta(struct mlxsw_sp_rif *rif,
8293 					   struct netdev_notifier_offload_xstats_info *info)
8294 {
8295 	struct rtnl_hw_stats64 stats = {};
8296 	int err;
8297 
8298 	if (!mlxsw_sp_router_port_l3_stats_enabled(rif))
8299 		return 0;
8300 
8301 	err = mlxsw_sp_router_port_l3_stats_fetch(rif, &stats);
8302 	if (err)
8303 		return err;
8304 
8305 	netdev_offload_xstats_report_delta(info->report_delta, &stats);
8306 	return 0;
8307 }
8308 
8309 struct mlxsw_sp_router_hwstats_notify_work {
8310 	struct work_struct work;
8311 	struct net_device *dev;
8312 	netdevice_tracker dev_tracker;
8313 };
8314 
8315 static void mlxsw_sp_router_hwstats_notify_work(struct work_struct *work)
8316 {
8317 	struct mlxsw_sp_router_hwstats_notify_work *hws_work =
8318 		container_of(work, struct mlxsw_sp_router_hwstats_notify_work,
8319 			     work);
8320 
8321 	rtnl_lock();
8322 	rtnl_offload_xstats_notify(hws_work->dev);
8323 	rtnl_unlock();
8324 	netdev_put(hws_work->dev, &hws_work->dev_tracker);
8325 	kfree(hws_work);
8326 }
8327 
8328 static void
8329 mlxsw_sp_router_hwstats_notify_schedule(struct net_device *dev)
8330 {
8331 	struct mlxsw_sp_router_hwstats_notify_work *hws_work;
8332 
8333 	/* To collect notification payload, the core ends up sending another
8334 	 * notifier block message, which would deadlock on the attempt to
8335 	 * acquire the router lock again. Just postpone the notification until
8336 	 * later.
8337 	 */
8338 
8339 	hws_work = kzalloc(sizeof(*hws_work), GFP_KERNEL);
8340 	if (!hws_work)
8341 		return;
8342 
8343 	INIT_WORK(&hws_work->work, mlxsw_sp_router_hwstats_notify_work);
8344 	netdev_hold(dev, &hws_work->dev_tracker, GFP_KERNEL);
8345 	hws_work->dev = dev;
8346 	mlxsw_core_schedule_work(&hws_work->work);
8347 }
8348 
8349 int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
8350 {
8351 	return mlxsw_sp_rif_dev(rif)->ifindex;
8352 }
8353 
8354 bool mlxsw_sp_rif_has_dev(const struct mlxsw_sp_rif *rif)
8355 {
8356 	return !!mlxsw_sp_rif_dev(rif);
8357 }
8358 
8359 bool mlxsw_sp_rif_dev_is(const struct mlxsw_sp_rif *rif,
8360 			 const struct net_device *dev)
8361 {
8362 	return mlxsw_sp_rif_dev(rif) == dev;
8363 }
8364 
8365 static void mlxsw_sp_rif_push_l3_stats(struct mlxsw_sp_rif *rif)
8366 {
8367 	struct rtnl_hw_stats64 stats = {};
8368 
8369 	if (!mlxsw_sp_router_port_l3_stats_fetch(rif, &stats))
8370 		netdev_offload_xstats_push_delta(mlxsw_sp_rif_dev(rif),
8371 						 NETDEV_OFFLOAD_XSTATS_TYPE_L3,
8372 						 &stats);
8373 }
8374 
8375 static struct mlxsw_sp_rif *
8376 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
8377 		    const struct mlxsw_sp_rif_params *params,
8378 		    struct netlink_ext_ack *extack)
8379 {
8380 	u8 rif_entries = params->double_entry ? 2 : 1;
8381 	u32 tb_id = l3mdev_fib_table(params->dev);
8382 	const struct mlxsw_sp_rif_ops *ops;
8383 	struct mlxsw_sp_fid *fid = NULL;
8384 	enum mlxsw_sp_rif_type type;
8385 	struct mlxsw_sp_crif *crif;
8386 	struct mlxsw_sp_rif *rif;
8387 	struct mlxsw_sp_vr *vr;
8388 	u16 rif_index;
8389 	int i, err;
8390 
8391 	type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
8392 	ops = mlxsw_sp->router->rif_ops_arr[type];
8393 
8394 	vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
8395 	if (IS_ERR(vr))
8396 		return ERR_CAST(vr);
8397 	vr->rif_count++;
8398 
8399 	err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index, rif_entries);
8400 	if (err) {
8401 		NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
8402 		goto err_rif_index_alloc;
8403 	}
8404 
8405 	crif = mlxsw_sp_crif_lookup(mlxsw_sp->router, params->dev);
8406 	if (WARN_ON(!crif)) {
8407 		err = -ENOENT;
8408 		goto err_crif_lookup;
8409 	}
8410 
8411 	rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, crif);
8412 	if (!rif) {
8413 		err = -ENOMEM;
8414 		goto err_rif_alloc;
8415 	}
8416 	netdev_hold(params->dev, &rif->dev_tracker, GFP_KERNEL);
8417 	mlxsw_sp->router->rifs[rif_index] = rif;
8418 	rif->mlxsw_sp = mlxsw_sp;
8419 	rif->ops = ops;
8420 	rif->rif_entries = rif_entries;
8421 
8422 	if (ops->setup)
8423 		ops->setup(rif, params);
8424 
8425 	if (ops->fid_get) {
8426 		fid = ops->fid_get(rif, params, extack);
8427 		if (IS_ERR(fid)) {
8428 			err = PTR_ERR(fid);
8429 			goto err_fid_get;
8430 		}
8431 		rif->fid = fid;
8432 	}
8433 
8434 	err = ops->configure(rif, extack);
8435 	if (err)
8436 		goto err_configure;
8437 
8438 	for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
8439 		err = mlxsw_sp_mr_rif_add(vr->mr_table[i], rif);
8440 		if (err)
8441 			goto err_mr_rif_add;
8442 	}
8443 
8444 	err = mlxsw_sp_router_rif_made_sync(mlxsw_sp, rif);
8445 	if (err)
8446 		goto err_rif_made_sync;
8447 
8448 	if (netdev_offload_xstats_enabled(params->dev,
8449 					  NETDEV_OFFLOAD_XSTATS_TYPE_L3)) {
8450 		err = mlxsw_sp_router_port_l3_stats_enable(rif);
8451 		if (err)
8452 			goto err_stats_enable;
8453 		mlxsw_sp_router_hwstats_notify_schedule(params->dev);
8454 	} else {
8455 		mlxsw_sp_rif_counters_alloc(rif);
8456 	}
8457 
8458 	atomic_add(rif_entries, &mlxsw_sp->router->rifs_count);
8459 	return rif;
8460 
8461 err_stats_enable:
8462 	mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
8463 err_rif_made_sync:
8464 err_mr_rif_add:
8465 	for (i--; i >= 0; i--)
8466 		mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
8467 	ops->deconfigure(rif);
8468 err_configure:
8469 	if (fid)
8470 		mlxsw_sp_fid_put(fid);
8471 err_fid_get:
8472 	mlxsw_sp->router->rifs[rif_index] = NULL;
8473 	netdev_put(params->dev, &rif->dev_tracker);
8474 	mlxsw_sp_rif_free(rif);
8475 err_rif_alloc:
8476 err_crif_lookup:
8477 	mlxsw_sp_rif_index_free(mlxsw_sp, rif_index, rif_entries);
8478 err_rif_index_alloc:
8479 	vr->rif_count--;
8480 	mlxsw_sp_vr_put(mlxsw_sp, vr);
8481 	return ERR_PTR(err);
8482 }
8483 
8484 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
8485 {
8486 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
8487 	const struct mlxsw_sp_rif_ops *ops = rif->ops;
8488 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8489 	struct mlxsw_sp_crif *crif = rif->crif;
8490 	struct mlxsw_sp_fid *fid = rif->fid;
8491 	u8 rif_entries = rif->rif_entries;
8492 	u16 rif_index = rif->rif_index;
8493 	struct mlxsw_sp_vr *vr;
8494 	int i;
8495 
8496 	atomic_sub(rif_entries, &mlxsw_sp->router->rifs_count);
8497 	mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
8498 	vr = &mlxsw_sp->router->vrs[rif->vr_id];
8499 
8500 	if (netdev_offload_xstats_enabled(dev, NETDEV_OFFLOAD_XSTATS_TYPE_L3)) {
8501 		mlxsw_sp_rif_push_l3_stats(rif);
8502 		mlxsw_sp_router_port_l3_stats_disable(rif);
8503 		mlxsw_sp_router_hwstats_notify_schedule(dev);
8504 	} else {
8505 		mlxsw_sp_rif_counters_free(rif);
8506 	}
8507 
8508 	for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
8509 		mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
8510 	ops->deconfigure(rif);
8511 	if (fid)
8512 		/* Loopback RIFs are not associated with a FID. */
8513 		mlxsw_sp_fid_put(fid);
8514 	mlxsw_sp->router->rifs[rif->rif_index] = NULL;
8515 	netdev_put(dev, &rif->dev_tracker);
8516 	mlxsw_sp_rif_free(rif);
8517 	mlxsw_sp_rif_index_free(mlxsw_sp, rif_index, rif_entries);
8518 	vr->rif_count--;
8519 	mlxsw_sp_vr_put(mlxsw_sp, vr);
8520 
8521 	if (crif->can_destroy)
8522 		mlxsw_sp_crif_free(crif);
8523 }
8524 
8525 void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
8526 				 struct net_device *dev)
8527 {
8528 	struct mlxsw_sp_rif *rif;
8529 
8530 	mutex_lock(&mlxsw_sp->router->lock);
8531 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8532 	if (!rif)
8533 		goto out;
8534 	mlxsw_sp_rif_destroy(rif);
8535 out:
8536 	mutex_unlock(&mlxsw_sp->router->lock);
8537 }
8538 
8539 static void mlxsw_sp_rif_destroy_vlan_upper(struct mlxsw_sp *mlxsw_sp,
8540 					    struct net_device *br_dev,
8541 					    u16 vid)
8542 {
8543 	struct net_device *upper_dev;
8544 	struct mlxsw_sp_crif *crif;
8545 
8546 	rcu_read_lock();
8547 	upper_dev = __vlan_find_dev_deep_rcu(br_dev, htons(ETH_P_8021Q), vid);
8548 	rcu_read_unlock();
8549 
8550 	if (!upper_dev)
8551 		return;
8552 
8553 	crif = mlxsw_sp_crif_lookup(mlxsw_sp->router, upper_dev);
8554 	if (!crif || !crif->rif)
8555 		return;
8556 
8557 	mlxsw_sp_rif_destroy(crif->rif);
8558 }
8559 
8560 static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp,
8561 					  struct net_device *l3_dev,
8562 					  int lower_pvid,
8563 					  unsigned long event,
8564 					  struct netlink_ext_ack *extack);
8565 
8566 int mlxsw_sp_router_bridge_vlan_add(struct mlxsw_sp *mlxsw_sp,
8567 				    struct net_device *br_dev,
8568 				    u16 new_vid, bool is_pvid,
8569 				    struct netlink_ext_ack *extack)
8570 {
8571 	struct mlxsw_sp_rif *old_rif;
8572 	struct mlxsw_sp_rif *new_rif;
8573 	struct net_device *upper_dev;
8574 	u16 old_pvid = 0;
8575 	u16 new_pvid;
8576 	int err = 0;
8577 
8578 	mutex_lock(&mlxsw_sp->router->lock);
8579 	old_rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev);
8580 	if (old_rif) {
8581 		/* If the RIF on the bridge is not a VLAN RIF, we shouldn't have
8582 		 * gotten a PVID notification.
8583 		 */
8584 		if (WARN_ON(old_rif->ops->type != MLXSW_SP_RIF_TYPE_VLAN))
8585 			old_rif = NULL;
8586 		else
8587 			old_pvid = mlxsw_sp_fid_8021q_vid(old_rif->fid);
8588 	}
8589 
8590 	if (is_pvid)
8591 		new_pvid = new_vid;
8592 	else if (old_pvid == new_vid)
8593 		new_pvid = 0;
8594 	else
8595 		goto out;
8596 
8597 	if (old_pvid == new_pvid)
8598 		goto out;
8599 
8600 	if (new_pvid) {
8601 		struct mlxsw_sp_rif_params params = {
8602 			.dev = br_dev,
8603 			.vid = new_pvid,
8604 		};
8605 
8606 		/* If there is a VLAN upper with the same VID as the new PVID,
8607 		 * kill its RIF, if there is one.
8608 		 */
8609 		mlxsw_sp_rif_destroy_vlan_upper(mlxsw_sp, br_dev, new_pvid);
8610 
8611 		if (mlxsw_sp_dev_addr_list_empty(br_dev))
8612 			goto out;
8613 		new_rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
8614 		if (IS_ERR(new_rif)) {
8615 			err = PTR_ERR(new_rif);
8616 			goto out;
8617 		}
8618 
8619 		if (old_pvid)
8620 			mlxsw_sp_rif_migrate_destroy(mlxsw_sp, old_rif, new_rif,
8621 						     true);
8622 	} else {
8623 		mlxsw_sp_rif_destroy(old_rif);
8624 	}
8625 
8626 	if (old_pvid) {
8627 		rcu_read_lock();
8628 		upper_dev = __vlan_find_dev_deep_rcu(br_dev, htons(ETH_P_8021Q),
8629 						     old_pvid);
8630 		rcu_read_unlock();
8631 		if (upper_dev)
8632 			err = mlxsw_sp_inetaddr_bridge_event(mlxsw_sp,
8633 							     upper_dev,
8634 							     new_pvid,
8635 							     NETDEV_UP, extack);
8636 	}
8637 
8638 out:
8639 	mutex_unlock(&mlxsw_sp->router->lock);
8640 	return err;
8641 }
8642 
8643 static void
8644 mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
8645 				 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
8646 {
8647 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
8648 
8649 	params->vid = mlxsw_sp_port_vlan->vid;
8650 	params->lag = mlxsw_sp_port->lagged;
8651 	if (params->lag)
8652 		params->lag_id = mlxsw_sp_port->lag_id;
8653 	else
8654 		params->system_port = mlxsw_sp_port->local_port;
8655 }
8656 
8657 static struct mlxsw_sp_rif_subport *
8658 mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
8659 {
8660 	return container_of(rif, struct mlxsw_sp_rif_subport, common);
8661 }
8662 
8663 int mlxsw_sp_rif_subport_port(const struct mlxsw_sp_rif *rif,
8664 			      u16 *port, bool *is_lag)
8665 {
8666 	struct mlxsw_sp_rif_subport *rif_subport;
8667 
8668 	if (WARN_ON(rif->ops->type != MLXSW_SP_RIF_TYPE_SUBPORT))
8669 		return -EINVAL;
8670 
8671 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
8672 	*is_lag = rif_subport->lag;
8673 	*port = *is_lag ? rif_subport->lag_id : rif_subport->system_port;
8674 	return 0;
8675 }
8676 
8677 static struct mlxsw_sp_rif *
8678 mlxsw_sp_rif_subport_get(struct mlxsw_sp *mlxsw_sp,
8679 			 const struct mlxsw_sp_rif_params *params,
8680 			 struct netlink_ext_ack *extack)
8681 {
8682 	struct mlxsw_sp_rif_subport *rif_subport;
8683 	struct mlxsw_sp_rif *rif;
8684 
8685 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, params->dev);
8686 	if (!rif)
8687 		return mlxsw_sp_rif_create(mlxsw_sp, params, extack);
8688 
8689 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
8690 	refcount_inc(&rif_subport->ref_count);
8691 	return rif;
8692 }
8693 
8694 static void mlxsw_sp_rif_subport_put(struct mlxsw_sp_rif *rif)
8695 {
8696 	struct mlxsw_sp_rif_subport *rif_subport;
8697 
8698 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
8699 	if (!refcount_dec_and_test(&rif_subport->ref_count))
8700 		return;
8701 
8702 	mlxsw_sp_rif_destroy(rif);
8703 }
8704 
8705 static int mlxsw_sp_rif_mac_profile_index_alloc(struct mlxsw_sp *mlxsw_sp,
8706 						struct mlxsw_sp_rif_mac_profile *profile,
8707 						struct netlink_ext_ack *extack)
8708 {
8709 	u8 max_rif_mac_profiles = mlxsw_sp->router->max_rif_mac_profile;
8710 	struct mlxsw_sp_router *router = mlxsw_sp->router;
8711 	int id;
8712 
8713 	id = idr_alloc(&router->rif_mac_profiles_idr, profile, 0,
8714 		       max_rif_mac_profiles, GFP_KERNEL);
8715 
8716 	if (id >= 0) {
8717 		profile->id = id;
8718 		return 0;
8719 	}
8720 
8721 	if (id == -ENOSPC)
8722 		NL_SET_ERR_MSG_MOD(extack,
8723 				   "Exceeded number of supported router interface MAC profiles");
8724 
8725 	return id;
8726 }
8727 
8728 static struct mlxsw_sp_rif_mac_profile *
8729 mlxsw_sp_rif_mac_profile_index_free(struct mlxsw_sp *mlxsw_sp, u8 mac_profile)
8730 {
8731 	struct mlxsw_sp_rif_mac_profile *profile;
8732 
8733 	profile = idr_remove(&mlxsw_sp->router->rif_mac_profiles_idr,
8734 			     mac_profile);
8735 	WARN_ON(!profile);
8736 	return profile;
8737 }
8738 
8739 static struct mlxsw_sp_rif_mac_profile *
8740 mlxsw_sp_rif_mac_profile_alloc(const char *mac)
8741 {
8742 	struct mlxsw_sp_rif_mac_profile *profile;
8743 
8744 	profile = kzalloc(sizeof(*profile), GFP_KERNEL);
8745 	if (!profile)
8746 		return NULL;
8747 
8748 	ether_addr_copy(profile->mac_prefix, mac);
8749 	refcount_set(&profile->ref_count, 1);
8750 	return profile;
8751 }
8752 
8753 static struct mlxsw_sp_rif_mac_profile *
8754 mlxsw_sp_rif_mac_profile_find(const struct mlxsw_sp *mlxsw_sp, const char *mac)
8755 {
8756 	struct mlxsw_sp_router *router = mlxsw_sp->router;
8757 	struct mlxsw_sp_rif_mac_profile *profile;
8758 	int id;
8759 
8760 	idr_for_each_entry(&router->rif_mac_profiles_idr, profile, id) {
8761 		if (ether_addr_equal_masked(profile->mac_prefix, mac,
8762 					    mlxsw_sp->mac_mask))
8763 			return profile;
8764 	}
8765 
8766 	return NULL;
8767 }
8768 
8769 static u64 mlxsw_sp_rif_mac_profiles_occ_get(void *priv)
8770 {
8771 	const struct mlxsw_sp *mlxsw_sp = priv;
8772 
8773 	return atomic_read(&mlxsw_sp->router->rif_mac_profiles_count);
8774 }
8775 
8776 static u64 mlxsw_sp_rifs_occ_get(void *priv)
8777 {
8778 	const struct mlxsw_sp *mlxsw_sp = priv;
8779 
8780 	return atomic_read(&mlxsw_sp->router->rifs_count);
8781 }
8782 
8783 static struct mlxsw_sp_rif_mac_profile *
8784 mlxsw_sp_rif_mac_profile_create(struct mlxsw_sp *mlxsw_sp, const char *mac,
8785 				struct netlink_ext_ack *extack)
8786 {
8787 	struct mlxsw_sp_rif_mac_profile *profile;
8788 	int err;
8789 
8790 	profile = mlxsw_sp_rif_mac_profile_alloc(mac);
8791 	if (!profile)
8792 		return ERR_PTR(-ENOMEM);
8793 
8794 	err = mlxsw_sp_rif_mac_profile_index_alloc(mlxsw_sp, profile, extack);
8795 	if (err)
8796 		goto profile_index_alloc_err;
8797 
8798 	atomic_inc(&mlxsw_sp->router->rif_mac_profiles_count);
8799 	return profile;
8800 
8801 profile_index_alloc_err:
8802 	kfree(profile);
8803 	return ERR_PTR(err);
8804 }
8805 
8806 static void mlxsw_sp_rif_mac_profile_destroy(struct mlxsw_sp *mlxsw_sp,
8807 					     u8 mac_profile)
8808 {
8809 	struct mlxsw_sp_rif_mac_profile *profile;
8810 
8811 	atomic_dec(&mlxsw_sp->router->rif_mac_profiles_count);
8812 	profile = mlxsw_sp_rif_mac_profile_index_free(mlxsw_sp, mac_profile);
8813 	kfree(profile);
8814 }
8815 
8816 static int mlxsw_sp_rif_mac_profile_get(struct mlxsw_sp *mlxsw_sp,
8817 					const char *mac, u8 *p_mac_profile,
8818 					struct netlink_ext_ack *extack)
8819 {
8820 	struct mlxsw_sp_rif_mac_profile *profile;
8821 
8822 	profile = mlxsw_sp_rif_mac_profile_find(mlxsw_sp, mac);
8823 	if (profile) {
8824 		refcount_inc(&profile->ref_count);
8825 		goto out;
8826 	}
8827 
8828 	profile = mlxsw_sp_rif_mac_profile_create(mlxsw_sp, mac, extack);
8829 	if (IS_ERR(profile))
8830 		return PTR_ERR(profile);
8831 
8832 out:
8833 	*p_mac_profile = profile->id;
8834 	return 0;
8835 }
8836 
8837 static void mlxsw_sp_rif_mac_profile_put(struct mlxsw_sp *mlxsw_sp,
8838 					 u8 mac_profile)
8839 {
8840 	struct mlxsw_sp_rif_mac_profile *profile;
8841 
8842 	profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
8843 			   mac_profile);
8844 	if (WARN_ON(!profile))
8845 		return;
8846 
8847 	if (!refcount_dec_and_test(&profile->ref_count))
8848 		return;
8849 
8850 	mlxsw_sp_rif_mac_profile_destroy(mlxsw_sp, mac_profile);
8851 }
8852 
8853 static bool mlxsw_sp_rif_mac_profile_is_shared(const struct mlxsw_sp_rif *rif)
8854 {
8855 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8856 	struct mlxsw_sp_rif_mac_profile *profile;
8857 
8858 	profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
8859 			   rif->mac_profile_id);
8860 	if (WARN_ON(!profile))
8861 		return false;
8862 
8863 	return refcount_read(&profile->ref_count) > 1;
8864 }
8865 
8866 static int mlxsw_sp_rif_mac_profile_edit(struct mlxsw_sp_rif *rif,
8867 					 const char *new_mac)
8868 {
8869 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8870 	struct mlxsw_sp_rif_mac_profile *profile;
8871 
8872 	profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
8873 			   rif->mac_profile_id);
8874 	if (WARN_ON(!profile))
8875 		return -EINVAL;
8876 
8877 	ether_addr_copy(profile->mac_prefix, new_mac);
8878 	return 0;
8879 }
8880 
8881 static int
8882 mlxsw_sp_rif_mac_profile_replace(struct mlxsw_sp *mlxsw_sp,
8883 				 struct mlxsw_sp_rif *rif,
8884 				 const char *new_mac,
8885 				 struct netlink_ext_ack *extack)
8886 {
8887 	u8 mac_profile;
8888 	int err;
8889 
8890 	if (!mlxsw_sp_rif_mac_profile_is_shared(rif) &&
8891 	    !mlxsw_sp_rif_mac_profile_find(mlxsw_sp, new_mac))
8892 		return mlxsw_sp_rif_mac_profile_edit(rif, new_mac);
8893 
8894 	err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, new_mac,
8895 					   &mac_profile, extack);
8896 	if (err)
8897 		return err;
8898 
8899 	mlxsw_sp_rif_mac_profile_put(mlxsw_sp, rif->mac_profile_id);
8900 	rif->mac_profile_id = mac_profile;
8901 	return 0;
8902 }
8903 
8904 static int
8905 __mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
8906 				 struct net_device *l3_dev,
8907 				 struct netlink_ext_ack *extack)
8908 {
8909 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
8910 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
8911 	struct mlxsw_sp_rif_params params;
8912 	u16 vid = mlxsw_sp_port_vlan->vid;
8913 	struct mlxsw_sp_rif *rif;
8914 	struct mlxsw_sp_fid *fid;
8915 	int err;
8916 
8917 	params = (struct mlxsw_sp_rif_params) {
8918 		.dev = l3_dev,
8919 		.vid = vid,
8920 	};
8921 
8922 	mlxsw_sp_rif_subport_params_init(&params, mlxsw_sp_port_vlan);
8923 	rif = mlxsw_sp_rif_subport_get(mlxsw_sp, &params, extack);
8924 	if (IS_ERR(rif))
8925 		return PTR_ERR(rif);
8926 
8927 	/* FID was already created, just take a reference */
8928 	fid = rif->ops->fid_get(rif, &params, extack);
8929 	err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
8930 	if (err)
8931 		goto err_fid_port_vid_map;
8932 
8933 	err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
8934 	if (err)
8935 		goto err_port_vid_learning_set;
8936 
8937 	err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
8938 					BR_STATE_FORWARDING);
8939 	if (err)
8940 		goto err_port_vid_stp_set;
8941 
8942 	mlxsw_sp_port_vlan->fid = fid;
8943 
8944 	return 0;
8945 
8946 err_port_vid_stp_set:
8947 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
8948 err_port_vid_learning_set:
8949 	mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
8950 err_fid_port_vid_map:
8951 	mlxsw_sp_fid_put(fid);
8952 	mlxsw_sp_rif_subport_put(rif);
8953 	return err;
8954 }
8955 
8956 static void
8957 __mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
8958 {
8959 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
8960 	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
8961 	struct mlxsw_sp_rif *rif = mlxsw_sp_fid_rif(fid);
8962 	u16 vid = mlxsw_sp_port_vlan->vid;
8963 
8964 	if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
8965 		return;
8966 
8967 	mlxsw_sp_port_vlan->fid = NULL;
8968 	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
8969 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
8970 	mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
8971 	mlxsw_sp_fid_put(fid);
8972 	mlxsw_sp_rif_subport_put(rif);
8973 }
8974 
8975 static int
8976 mlxsw_sp_port_vlan_router_join_existing(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
8977 					struct net_device *l3_dev,
8978 					struct netlink_ext_ack *extack)
8979 {
8980 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;
8981 
8982 	lockdep_assert_held(&mlxsw_sp->router->lock);
8983 
8984 	if (!mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev))
8985 		return 0;
8986 
8987 	return __mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan, l3_dev,
8988 						extack);
8989 }
8990 
8991 void
8992 mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
8993 {
8994 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;
8995 
8996 	mutex_lock(&mlxsw_sp->router->lock);
8997 	__mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
8998 	mutex_unlock(&mlxsw_sp->router->lock);
8999 }
9000 
9001 static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
9002 					     struct net_device *port_dev,
9003 					     unsigned long event, u16 vid,
9004 					     struct netlink_ext_ack *extack)
9005 {
9006 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
9007 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
9008 
9009 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
9010 	if (WARN_ON(!mlxsw_sp_port_vlan))
9011 		return -EINVAL;
9012 
9013 	switch (event) {
9014 	case NETDEV_UP:
9015 		return __mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
9016 							l3_dev, extack);
9017 	case NETDEV_DOWN:
9018 		__mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
9019 		break;
9020 	}
9021 
9022 	return 0;
9023 }
9024 
9025 static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
9026 					unsigned long event, bool nomaster,
9027 					struct netlink_ext_ack *extack)
9028 {
9029 	if (!nomaster && (netif_is_any_bridge_port(port_dev) ||
9030 			  netif_is_lag_port(port_dev)))
9031 		return 0;
9032 
9033 	return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event,
9034 						 MLXSW_SP_DEFAULT_VID, extack);
9035 }
9036 
9037 static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
9038 					 struct net_device *lag_dev,
9039 					 unsigned long event, u16 vid,
9040 					 struct netlink_ext_ack *extack)
9041 {
9042 	struct net_device *port_dev;
9043 	struct list_head *iter;
9044 	int err;
9045 
9046 	netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
9047 		if (mlxsw_sp_port_dev_check(port_dev)) {
9048 			err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
9049 								port_dev,
9050 								event, vid,
9051 								extack);
9052 			if (err)
9053 				return err;
9054 		}
9055 	}
9056 
9057 	return 0;
9058 }
9059 
9060 static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
9061 				       unsigned long event, bool nomaster,
9062 				       struct netlink_ext_ack *extack)
9063 {
9064 	if (!nomaster && netif_is_bridge_port(lag_dev))
9065 		return 0;
9066 
9067 	return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event,
9068 					     MLXSW_SP_DEFAULT_VID, extack);
9069 }
9070 
9071 static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp,
9072 					  struct net_device *l3_dev,
9073 					  int lower_pvid,
9074 					  unsigned long event,
9075 					  struct netlink_ext_ack *extack)
9076 {
9077 	struct mlxsw_sp_rif_params params = {
9078 		.dev = l3_dev,
9079 	};
9080 	struct mlxsw_sp_rif *rif;
9081 	int err;
9082 
9083 	switch (event) {
9084 	case NETDEV_UP:
9085 		if (netif_is_bridge_master(l3_dev) && br_vlan_enabled(l3_dev)) {
9086 			u16 proto;
9087 
9088 			br_vlan_get_proto(l3_dev, &proto);
9089 			if (proto == ETH_P_8021AD) {
9090 				NL_SET_ERR_MSG_MOD(extack, "Adding an IP address to 802.1ad bridge is not supported");
9091 				return -EOPNOTSUPP;
9092 			}
9093 			err = br_vlan_get_pvid(l3_dev, &params.vid);
9094 			if (err)
9095 				return err;
9096 			if (!params.vid)
9097 				return 0;
9098 		} else if (is_vlan_dev(l3_dev)) {
9099 			params.vid = vlan_dev_vlan_id(l3_dev);
9100 
9101 			/* If the VID matches PVID of the bridge below, the
9102 			 * bridge owns the RIF for this VLAN. Don't do anything.
9103 			 */
9104 			if ((int)params.vid == lower_pvid)
9105 				return 0;
9106 		}
9107 
9108 		rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
9109 		if (IS_ERR(rif))
9110 			return PTR_ERR(rif);
9111 		break;
9112 	case NETDEV_DOWN:
9113 		rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
9114 		mlxsw_sp_rif_destroy(rif);
9115 		break;
9116 	}
9117 
9118 	return 0;
9119 }
9120 
9121 static int mlxsw_sp_inetaddr_vlan_event(struct mlxsw_sp *mlxsw_sp,
9122 					struct net_device *vlan_dev,
9123 					unsigned long event, bool nomaster,
9124 					struct netlink_ext_ack *extack)
9125 {
9126 	struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
9127 	u16 vid = vlan_dev_vlan_id(vlan_dev);
9128 	u16 lower_pvid;
9129 	int err;
9130 
9131 	if (!nomaster && netif_is_bridge_port(vlan_dev))
9132 		return 0;
9133 
9134 	if (mlxsw_sp_port_dev_check(real_dev)) {
9135 		return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
9136 							 event, vid, extack);
9137 	} else if (netif_is_lag_master(real_dev)) {
9138 		return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
9139 						     vid, extack);
9140 	} else if (netif_is_bridge_master(real_dev) &&
9141 		   br_vlan_enabled(real_dev)) {
9142 		err = br_vlan_get_pvid(real_dev, &lower_pvid);
9143 		if (err)
9144 			return err;
9145 		return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, vlan_dev,
9146 						      lower_pvid, event,
9147 						      extack);
9148 	}
9149 
9150 	return 0;
9151 }
9152 
9153 static bool mlxsw_sp_rif_macvlan_is_vrrp4(const u8 *mac)
9154 {
9155 	u8 vrrp4[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x01, 0x00 };
9156 	u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
9157 
9158 	return ether_addr_equal_masked(mac, vrrp4, mask);
9159 }
9160 
9161 static bool mlxsw_sp_rif_macvlan_is_vrrp6(const u8 *mac)
9162 {
9163 	u8 vrrp6[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x02, 0x00 };
9164 	u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
9165 
9166 	return ether_addr_equal_masked(mac, vrrp6, mask);
9167 }
9168 
9169 static int mlxsw_sp_rif_vrrp_op(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
9170 				const u8 *mac, bool adding)
9171 {
9172 	char ritr_pl[MLXSW_REG_RITR_LEN];
9173 	u8 vrrp_id = adding ? mac[5] : 0;
9174 	int err;
9175 
9176 	if (!mlxsw_sp_rif_macvlan_is_vrrp4(mac) &&
9177 	    !mlxsw_sp_rif_macvlan_is_vrrp6(mac))
9178 		return 0;
9179 
9180 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
9181 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9182 	if (err)
9183 		return err;
9184 
9185 	if (mlxsw_sp_rif_macvlan_is_vrrp4(mac))
9186 		mlxsw_reg_ritr_if_vrrp_id_ipv4_set(ritr_pl, vrrp_id);
9187 	else
9188 		mlxsw_reg_ritr_if_vrrp_id_ipv6_set(ritr_pl, vrrp_id);
9189 
9190 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9191 }
9192 
9193 static int mlxsw_sp_rif_macvlan_add(struct mlxsw_sp *mlxsw_sp,
9194 				    const struct net_device *macvlan_dev,
9195 				    struct netlink_ext_ack *extack)
9196 {
9197 	struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
9198 	struct mlxsw_sp_rif *rif;
9199 	int err;
9200 
9201 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
9202 	if (!rif)
9203 		return 0;
9204 
9205 	err = mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
9206 				  mlxsw_sp_fid_index(rif->fid), true);
9207 	if (err)
9208 		return err;
9209 
9210 	err = mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index,
9211 				   macvlan_dev->dev_addr, true);
9212 	if (err)
9213 		goto err_rif_vrrp_add;
9214 
9215 	/* Make sure the bridge driver does not have this MAC pointing at
9216 	 * some other port.
9217 	 */
9218 	if (rif->ops->fdb_del)
9219 		rif->ops->fdb_del(rif, macvlan_dev->dev_addr);
9220 
9221 	return 0;
9222 
9223 err_rif_vrrp_add:
9224 	mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
9225 			    mlxsw_sp_fid_index(rif->fid), false);
9226 	return err;
9227 }
9228 
9229 static void __mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
9230 				       const struct net_device *macvlan_dev)
9231 {
9232 	struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
9233 	struct mlxsw_sp_rif *rif;
9234 
9235 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
9236 	/* If we do not have a RIF, then we already took care of
9237 	 * removing the macvlan's MAC during RIF deletion.
9238 	 */
9239 	if (!rif)
9240 		return;
9241 	mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index, macvlan_dev->dev_addr,
9242 			     false);
9243 	mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
9244 			    mlxsw_sp_fid_index(rif->fid), false);
9245 }
9246 
9247 void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
9248 			      const struct net_device *macvlan_dev)
9249 {
9250 	mutex_lock(&mlxsw_sp->router->lock);
9251 	__mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
9252 	mutex_unlock(&mlxsw_sp->router->lock);
9253 }
9254 
9255 static int mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp *mlxsw_sp,
9256 					   struct net_device *macvlan_dev,
9257 					   unsigned long event,
9258 					   struct netlink_ext_ack *extack)
9259 {
9260 	switch (event) {
9261 	case NETDEV_UP:
9262 		return mlxsw_sp_rif_macvlan_add(mlxsw_sp, macvlan_dev, extack);
9263 	case NETDEV_DOWN:
9264 		__mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
9265 		break;
9266 	}
9267 
9268 	return 0;
9269 }
9270 
9271 static int __mlxsw_sp_inetaddr_event(struct mlxsw_sp *mlxsw_sp,
9272 				     struct net_device *dev,
9273 				     unsigned long event, bool nomaster,
9274 				     struct netlink_ext_ack *extack)
9275 {
9276 	if (mlxsw_sp_port_dev_check(dev))
9277 		return mlxsw_sp_inetaddr_port_event(dev, event, nomaster,
9278 						    extack);
9279 	else if (netif_is_lag_master(dev))
9280 		return mlxsw_sp_inetaddr_lag_event(dev, event, nomaster,
9281 						   extack);
9282 	else if (netif_is_bridge_master(dev))
9283 		return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, dev, -1, event,
9284 						      extack);
9285 	else if (is_vlan_dev(dev))
9286 		return mlxsw_sp_inetaddr_vlan_event(mlxsw_sp, dev, event,
9287 						    nomaster, extack);
9288 	else if (netif_is_macvlan(dev))
9289 		return mlxsw_sp_inetaddr_macvlan_event(mlxsw_sp, dev, event,
9290 						       extack);
9291 	else
9292 		return 0;
9293 }
9294 
9295 static int mlxsw_sp_inetaddr_event(struct notifier_block *nb,
9296 				   unsigned long event, void *ptr)
9297 {
9298 	struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
9299 	struct net_device *dev = ifa->ifa_dev->dev;
9300 	struct mlxsw_sp_router *router;
9301 	struct mlxsw_sp_rif *rif;
9302 	int err = 0;
9303 
9304 	/* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
9305 	if (event == NETDEV_UP)
9306 		return NOTIFY_DONE;
9307 
9308 	router = container_of(nb, struct mlxsw_sp_router, inetaddr_nb);
9309 	mutex_lock(&router->lock);
9310 	rif = mlxsw_sp_rif_find_by_dev(router->mlxsw_sp, dev);
9311 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
9312 		goto out;
9313 
9314 	err = __mlxsw_sp_inetaddr_event(router->mlxsw_sp, dev, event, false,
9315 					NULL);
9316 out:
9317 	mutex_unlock(&router->lock);
9318 	return notifier_from_errno(err);
9319 }
9320 
9321 static int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
9322 					 unsigned long event, void *ptr)
9323 {
9324 	struct in_validator_info *ivi = (struct in_validator_info *) ptr;
9325 	struct net_device *dev = ivi->ivi_dev->dev;
9326 	struct mlxsw_sp *mlxsw_sp;
9327 	struct mlxsw_sp_rif *rif;
9328 	int err = 0;
9329 
9330 	mlxsw_sp = mlxsw_sp_lower_get(dev);
9331 	if (!mlxsw_sp)
9332 		return NOTIFY_DONE;
9333 
9334 	mutex_lock(&mlxsw_sp->router->lock);
9335 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9336 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
9337 		goto out;
9338 
9339 	err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, false,
9340 					ivi->extack);
9341 out:
9342 	mutex_unlock(&mlxsw_sp->router->lock);
9343 	return notifier_from_errno(err);
9344 }
9345 
9346 struct mlxsw_sp_inet6addr_event_work {
9347 	struct work_struct work;
9348 	struct mlxsw_sp *mlxsw_sp;
9349 	struct net_device *dev;
9350 	netdevice_tracker dev_tracker;
9351 	unsigned long event;
9352 };
9353 
9354 static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
9355 {
9356 	struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
9357 		container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
9358 	struct mlxsw_sp *mlxsw_sp = inet6addr_work->mlxsw_sp;
9359 	struct net_device *dev = inet6addr_work->dev;
9360 	unsigned long event = inet6addr_work->event;
9361 	struct mlxsw_sp_rif *rif;
9362 
9363 	rtnl_lock();
9364 	mutex_lock(&mlxsw_sp->router->lock);
9365 
9366 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9367 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
9368 		goto out;
9369 
9370 	__mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, false, NULL);
9371 out:
9372 	mutex_unlock(&mlxsw_sp->router->lock);
9373 	rtnl_unlock();
9374 	netdev_put(dev, &inet6addr_work->dev_tracker);
9375 	kfree(inet6addr_work);
9376 }
9377 
9378 /* Called with rcu_read_lock() */
9379 static int mlxsw_sp_inet6addr_event(struct notifier_block *nb,
9380 				    unsigned long event, void *ptr)
9381 {
9382 	struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
9383 	struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
9384 	struct net_device *dev = if6->idev->dev;
9385 	struct mlxsw_sp_router *router;
9386 
9387 	/* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
9388 	if (event == NETDEV_UP)
9389 		return NOTIFY_DONE;
9390 
9391 	inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
9392 	if (!inet6addr_work)
9393 		return NOTIFY_BAD;
9394 
9395 	router = container_of(nb, struct mlxsw_sp_router, inet6addr_nb);
9396 	INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
9397 	inet6addr_work->mlxsw_sp = router->mlxsw_sp;
9398 	inet6addr_work->dev = dev;
9399 	inet6addr_work->event = event;
9400 	netdev_hold(dev, &inet6addr_work->dev_tracker, GFP_ATOMIC);
9401 	mlxsw_core_schedule_work(&inet6addr_work->work);
9402 
9403 	return NOTIFY_DONE;
9404 }
9405 
9406 static int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
9407 					  unsigned long event, void *ptr)
9408 {
9409 	struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
9410 	struct net_device *dev = i6vi->i6vi_dev->dev;
9411 	struct mlxsw_sp *mlxsw_sp;
9412 	struct mlxsw_sp_rif *rif;
9413 	int err = 0;
9414 
9415 	mlxsw_sp = mlxsw_sp_lower_get(dev);
9416 	if (!mlxsw_sp)
9417 		return NOTIFY_DONE;
9418 
9419 	mutex_lock(&mlxsw_sp->router->lock);
9420 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9421 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
9422 		goto out;
9423 
9424 	err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, false,
9425 					i6vi->extack);
9426 out:
9427 	mutex_unlock(&mlxsw_sp->router->lock);
9428 	return notifier_from_errno(err);
9429 }
9430 
9431 static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
9432 			     const char *mac, int mtu, u8 mac_profile)
9433 {
9434 	char ritr_pl[MLXSW_REG_RITR_LEN];
9435 	int err;
9436 
9437 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
9438 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9439 	if (err)
9440 		return err;
9441 
9442 	mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
9443 	mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
9444 	mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, mac_profile);
9445 	mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
9446 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9447 }
9448 
9449 static int
9450 mlxsw_sp_router_port_change_event(struct mlxsw_sp *mlxsw_sp,
9451 				  struct mlxsw_sp_rif *rif,
9452 				  struct netlink_ext_ack *extack)
9453 {
9454 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
9455 	u8 old_mac_profile;
9456 	u16 fid_index;
9457 	int err;
9458 
9459 	fid_index = mlxsw_sp_fid_index(rif->fid);
9460 
9461 	err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
9462 	if (err)
9463 		return err;
9464 
9465 	old_mac_profile = rif->mac_profile_id;
9466 	err = mlxsw_sp_rif_mac_profile_replace(mlxsw_sp, rif, dev->dev_addr,
9467 					       extack);
9468 	if (err)
9469 		goto err_rif_mac_profile_replace;
9470 
9471 	err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
9472 				dev->mtu, rif->mac_profile_id);
9473 	if (err)
9474 		goto err_rif_edit;
9475 
9476 	err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
9477 	if (err)
9478 		goto err_rif_fdb_op;
9479 
9480 	if (rif->mtu != dev->mtu) {
9481 		struct mlxsw_sp_vr *vr;
9482 		int i;
9483 
9484 		/* The RIF is relevant only to its mr_table instance, as unlike
9485 		 * unicast routing, in multicast routing a RIF cannot be shared
9486 		 * between several multicast routing tables.
9487 		 */
9488 		vr = &mlxsw_sp->router->vrs[rif->vr_id];
9489 		for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
9490 			mlxsw_sp_mr_rif_mtu_update(vr->mr_table[i],
9491 						   rif, dev->mtu);
9492 	}
9493 
9494 	ether_addr_copy(rif->addr, dev->dev_addr);
9495 	rif->mtu = dev->mtu;
9496 
9497 	netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
9498 
9499 	return 0;
9500 
9501 err_rif_fdb_op:
9502 	mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu,
9503 			  old_mac_profile);
9504 err_rif_edit:
9505 	mlxsw_sp_rif_mac_profile_replace(mlxsw_sp, rif, rif->addr, extack);
9506 err_rif_mac_profile_replace:
9507 	mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
9508 	return err;
9509 }
9510 
9511 static int mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif *rif,
9512 			    struct netdev_notifier_pre_changeaddr_info *info)
9513 {
9514 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9515 	struct mlxsw_sp_rif_mac_profile *profile;
9516 	struct netlink_ext_ack *extack;
9517 	u8 max_rif_mac_profiles;
9518 	u64 occ;
9519 
9520 	extack = netdev_notifier_info_to_extack(&info->info);
9521 
9522 	profile = mlxsw_sp_rif_mac_profile_find(mlxsw_sp, info->dev_addr);
9523 	if (profile)
9524 		return 0;
9525 
9526 	max_rif_mac_profiles = mlxsw_sp->router->max_rif_mac_profile;
9527 	occ = mlxsw_sp_rif_mac_profiles_occ_get(mlxsw_sp);
9528 	if (occ < max_rif_mac_profiles)
9529 		return 0;
9530 
9531 	if (!mlxsw_sp_rif_mac_profile_is_shared(rif))
9532 		return 0;
9533 
9534 	NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interface MAC profiles");
9535 	return -ENOBUFS;
9536 }
9537 
9538 static bool mlxsw_sp_router_netdevice_interesting(struct mlxsw_sp *mlxsw_sp,
9539 						  struct net_device *dev)
9540 {
9541 	struct vlan_dev_priv *vlan;
9542 
9543 	if (netif_is_lag_master(dev) ||
9544 	    netif_is_bridge_master(dev) ||
9545 	    mlxsw_sp_port_dev_check(dev) ||
9546 	    mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev) ||
9547 	    netif_is_l3_master(dev))
9548 		return true;
9549 
9550 	if (!is_vlan_dev(dev))
9551 		return false;
9552 
9553 	vlan = vlan_dev_priv(dev);
9554 	return netif_is_lag_master(vlan->real_dev) ||
9555 	       netif_is_bridge_master(vlan->real_dev) ||
9556 	       mlxsw_sp_port_dev_check(vlan->real_dev);
9557 }
9558 
9559 static struct mlxsw_sp_crif *
9560 mlxsw_sp_crif_register(struct mlxsw_sp_router *router, struct net_device *dev)
9561 {
9562 	struct mlxsw_sp_crif *crif;
9563 	int err;
9564 
9565 	if (WARN_ON(mlxsw_sp_crif_lookup(router, dev)))
9566 		return NULL;
9567 
9568 	crif = mlxsw_sp_crif_alloc(dev);
9569 	if (!crif)
9570 		return ERR_PTR(-ENOMEM);
9571 
9572 	err = mlxsw_sp_crif_insert(router, crif);
9573 	if (err)
9574 		goto err_netdev_insert;
9575 
9576 	return crif;
9577 
9578 err_netdev_insert:
9579 	mlxsw_sp_crif_free(crif);
9580 	return ERR_PTR(err);
9581 }
9582 
9583 static void mlxsw_sp_crif_unregister(struct mlxsw_sp_router *router,
9584 				     struct mlxsw_sp_crif *crif)
9585 {
9586 	struct mlxsw_sp_nexthop *nh, *tmp;
9587 
9588 	mlxsw_sp_crif_remove(router, crif);
9589 
9590 	list_for_each_entry_safe(nh, tmp, &crif->nexthop_list, crif_list_node)
9591 		mlxsw_sp_nexthop_type_fini(router->mlxsw_sp, nh);
9592 
9593 	if (crif->rif)
9594 		crif->can_destroy = true;
9595 	else
9596 		mlxsw_sp_crif_free(crif);
9597 }
9598 
9599 static int mlxsw_sp_netdevice_register(struct mlxsw_sp_router *router,
9600 				       struct net_device *dev)
9601 {
9602 	struct mlxsw_sp_crif *crif;
9603 
9604 	if (!mlxsw_sp_router_netdevice_interesting(router->mlxsw_sp, dev))
9605 		return 0;
9606 
9607 	crif = mlxsw_sp_crif_register(router, dev);
9608 	return PTR_ERR_OR_ZERO(crif);
9609 }
9610 
9611 static void mlxsw_sp_netdevice_unregister(struct mlxsw_sp_router *router,
9612 					  struct net_device *dev)
9613 {
9614 	struct mlxsw_sp_crif *crif;
9615 
9616 	if (!mlxsw_sp_router_netdevice_interesting(router->mlxsw_sp, dev))
9617 		return;
9618 
9619 	/* netdev_run_todo(), by way of netdev_wait_allrefs_any(), rebroadcasts
9620 	 * the NETDEV_UNREGISTER message, so we can get here twice. If that's
9621 	 * what happened, the netdevice state is NETREG_UNREGISTERED. In that
9622 	 * case, we expect to have collected the CRIF already, and warn if it
9623 	 * still exists. Otherwise we expect the CRIF to exist.
9624 	 */
9625 	crif = mlxsw_sp_crif_lookup(router, dev);
9626 	if (dev->reg_state == NETREG_UNREGISTERED) {
9627 		if (!WARN_ON(crif))
9628 			return;
9629 	}
9630 	if (WARN_ON(!crif))
9631 		return;
9632 
9633 	mlxsw_sp_crif_unregister(router, crif);
9634 }
9635 
9636 static bool mlxsw_sp_is_offload_xstats_event(unsigned long event)
9637 {
9638 	switch (event) {
9639 	case NETDEV_OFFLOAD_XSTATS_ENABLE:
9640 	case NETDEV_OFFLOAD_XSTATS_DISABLE:
9641 	case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
9642 	case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
9643 		return true;
9644 	}
9645 
9646 	return false;
9647 }
9648 
9649 static int
9650 mlxsw_sp_router_port_offload_xstats_cmd(struct mlxsw_sp_rif *rif,
9651 					unsigned long event,
9652 					struct netdev_notifier_offload_xstats_info *info)
9653 {
9654 	switch (info->type) {
9655 	case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
9656 		break;
9657 	default:
9658 		return 0;
9659 	}
9660 
9661 	switch (event) {
9662 	case NETDEV_OFFLOAD_XSTATS_ENABLE:
9663 		return mlxsw_sp_router_port_l3_stats_enable(rif);
9664 	case NETDEV_OFFLOAD_XSTATS_DISABLE:
9665 		mlxsw_sp_router_port_l3_stats_disable(rif);
9666 		return 0;
9667 	case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
9668 		mlxsw_sp_router_port_l3_stats_report_used(rif, info);
9669 		return 0;
9670 	case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
9671 		return mlxsw_sp_router_port_l3_stats_report_delta(rif, info);
9672 	}
9673 
9674 	WARN_ON_ONCE(1);
9675 	return 0;
9676 }
9677 
9678 static int
9679 mlxsw_sp_netdevice_offload_xstats_cmd(struct mlxsw_sp *mlxsw_sp,
9680 				      struct net_device *dev,
9681 				      unsigned long event,
9682 				      struct netdev_notifier_offload_xstats_info *info)
9683 {
9684 	struct mlxsw_sp_rif *rif;
9685 
9686 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9687 	if (!rif)
9688 		return 0;
9689 
9690 	return mlxsw_sp_router_port_offload_xstats_cmd(rif, event, info);
9691 }
9692 
9693 static bool mlxsw_sp_is_router_event(unsigned long event)
9694 {
9695 	switch (event) {
9696 	case NETDEV_PRE_CHANGEADDR:
9697 	case NETDEV_CHANGEADDR:
9698 	case NETDEV_CHANGEMTU:
9699 		return true;
9700 	default:
9701 		return false;
9702 	}
9703 }
9704 
9705 static int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
9706 						unsigned long event, void *ptr)
9707 {
9708 	struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
9709 	struct mlxsw_sp *mlxsw_sp;
9710 	struct mlxsw_sp_rif *rif;
9711 
9712 	mlxsw_sp = mlxsw_sp_lower_get(dev);
9713 	if (!mlxsw_sp)
9714 		return 0;
9715 
9716 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9717 	if (!rif)
9718 		return 0;
9719 
9720 	switch (event) {
9721 	case NETDEV_CHANGEMTU:
9722 	case NETDEV_CHANGEADDR:
9723 		return mlxsw_sp_router_port_change_event(mlxsw_sp, rif, extack);
9724 	case NETDEV_PRE_CHANGEADDR:
9725 		return mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr);
9726 	default:
9727 		WARN_ON_ONCE(1);
9728 		break;
9729 	}
9730 
9731 	return 0;
9732 }
9733 
9734 static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
9735 				  struct net_device *l3_dev,
9736 				  struct netlink_ext_ack *extack)
9737 {
9738 	struct mlxsw_sp_rif *rif;
9739 
9740 	/* If netdev is already associated with a RIF, then we need to
9741 	 * destroy it and create a new one with the new virtual router ID.
9742 	 */
9743 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
9744 	if (rif)
9745 		__mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, false,
9746 					  extack);
9747 
9748 	return __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_UP, false,
9749 					 extack);
9750 }
9751 
9752 static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
9753 				    struct net_device *l3_dev)
9754 {
9755 	struct mlxsw_sp_rif *rif;
9756 
9757 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
9758 	if (!rif)
9759 		return;
9760 	__mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, false, NULL);
9761 }
9762 
9763 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
9764 {
9765 	struct netdev_notifier_changeupper_info *info = ptr;
9766 
9767 	if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER)
9768 		return false;
9769 	return netif_is_l3_master(info->upper_dev);
9770 }
9771 
9772 static int
9773 mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
9774 			     struct netdev_notifier_changeupper_info *info)
9775 {
9776 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
9777 	int err = 0;
9778 
9779 	/* We do not create a RIF for a macvlan, but only use it to
9780 	 * direct more MAC addresses to the router.
9781 	 */
9782 	if (!mlxsw_sp || netif_is_macvlan(l3_dev))
9783 		return 0;
9784 
9785 	switch (event) {
9786 	case NETDEV_PRECHANGEUPPER:
9787 		break;
9788 	case NETDEV_CHANGEUPPER:
9789 		if (info->linking) {
9790 			struct netlink_ext_ack *extack;
9791 
9792 			extack = netdev_notifier_info_to_extack(&info->info);
9793 			err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack);
9794 		} else {
9795 			mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
9796 		}
9797 		break;
9798 	}
9799 
9800 	return err;
9801 }
9802 
9803 struct mlxsw_sp_router_replay_inetaddr_up {
9804 	struct mlxsw_sp *mlxsw_sp;
9805 	struct netlink_ext_ack *extack;
9806 	unsigned int done;
9807 	bool deslavement;
9808 };
9809 
9810 static int mlxsw_sp_router_replay_inetaddr_up(struct net_device *dev,
9811 					      struct netdev_nested_priv *priv)
9812 {
9813 	struct mlxsw_sp_router_replay_inetaddr_up *ctx = priv->data;
9814 	bool nomaster = ctx->deslavement;
9815 	struct mlxsw_sp_crif *crif;
9816 	int err;
9817 
9818 	if (mlxsw_sp_dev_addr_list_empty(dev))
9819 		return 0;
9820 
9821 	crif = mlxsw_sp_crif_lookup(ctx->mlxsw_sp->router, dev);
9822 	if (!crif || crif->rif)
9823 		return 0;
9824 
9825 	if (!mlxsw_sp_rif_should_config(crif->rif, dev, NETDEV_UP))
9826 		return 0;
9827 
9828 	err = __mlxsw_sp_inetaddr_event(ctx->mlxsw_sp, dev, NETDEV_UP,
9829 					nomaster, ctx->extack);
9830 	if (err)
9831 		return err;
9832 
9833 	ctx->done++;
9834 	return 0;
9835 }
9836 
9837 static int mlxsw_sp_router_unreplay_inetaddr_up(struct net_device *dev,
9838 						struct netdev_nested_priv *priv)
9839 {
9840 	struct mlxsw_sp_router_replay_inetaddr_up *ctx = priv->data;
9841 	bool nomaster = ctx->deslavement;
9842 	struct mlxsw_sp_crif *crif;
9843 
9844 	if (!ctx->done)
9845 		return 0;
9846 
9847 	if (mlxsw_sp_dev_addr_list_empty(dev))
9848 		return 0;
9849 
9850 	crif = mlxsw_sp_crif_lookup(ctx->mlxsw_sp->router, dev);
9851 	if (!crif || !crif->rif)
9852 		return 0;
9853 
9854 	/* We are rolling back NETDEV_UP, so ask for that. */
9855 	if (!mlxsw_sp_rif_should_config(crif->rif, dev, NETDEV_UP))
9856 		return 0;
9857 
9858 	__mlxsw_sp_inetaddr_event(ctx->mlxsw_sp, dev, NETDEV_DOWN, nomaster,
9859 				  NULL);
9860 
9861 	ctx->done--;
9862 	return 0;
9863 }
9864 
9865 int mlxsw_sp_netdevice_enslavement_replay(struct mlxsw_sp *mlxsw_sp,
9866 					  struct net_device *upper_dev,
9867 					  struct netlink_ext_ack *extack)
9868 {
9869 	struct mlxsw_sp_router_replay_inetaddr_up ctx = {
9870 		.mlxsw_sp = mlxsw_sp,
9871 		.extack = extack,
9872 		.deslavement = false,
9873 	};
9874 	struct netdev_nested_priv priv = {
9875 		.data = &ctx,
9876 	};
9877 	int err;
9878 
9879 	err = mlxsw_sp_router_replay_inetaddr_up(upper_dev, &priv);
9880 	if (err)
9881 		return err;
9882 
9883 	err = netdev_walk_all_upper_dev_rcu(upper_dev,
9884 					    mlxsw_sp_router_replay_inetaddr_up,
9885 					    &priv);
9886 	if (err)
9887 		goto err_replay_up;
9888 
9889 	return 0;
9890 
9891 err_replay_up:
9892 	netdev_walk_all_upper_dev_rcu(upper_dev,
9893 				      mlxsw_sp_router_unreplay_inetaddr_up,
9894 				      &priv);
9895 	mlxsw_sp_router_unreplay_inetaddr_up(upper_dev, &priv);
9896 	return err;
9897 }
9898 
9899 void mlxsw_sp_netdevice_deslavement_replay(struct mlxsw_sp *mlxsw_sp,
9900 					   struct net_device *dev)
9901 {
9902 	struct mlxsw_sp_router_replay_inetaddr_up ctx = {
9903 		.mlxsw_sp = mlxsw_sp,
9904 		.deslavement = true,
9905 	};
9906 	struct netdev_nested_priv priv = {
9907 		.data = &ctx,
9908 	};
9909 
9910 	mlxsw_sp_router_replay_inetaddr_up(dev, &priv);
9911 }
9912 
9913 static int
9914 mlxsw_sp_port_vid_router_join_existing(struct mlxsw_sp_port *mlxsw_sp_port,
9915 				       u16 vid, struct net_device *dev,
9916 				       struct netlink_ext_ack *extack)
9917 {
9918 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
9919 
9920 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port,
9921 							    vid);
9922 	if (WARN_ON(!mlxsw_sp_port_vlan))
9923 		return -EINVAL;
9924 
9925 	return mlxsw_sp_port_vlan_router_join_existing(mlxsw_sp_port_vlan,
9926 						       dev, extack);
9927 }
9928 
9929 static void
9930 mlxsw_sp_port_vid_router_leave(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
9931 			       struct net_device *dev)
9932 {
9933 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
9934 
9935 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port,
9936 							    vid);
9937 	if (WARN_ON(!mlxsw_sp_port_vlan))
9938 		return;
9939 
9940 	__mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
9941 }
9942 
9943 static int __mlxsw_sp_router_port_join_lag(struct mlxsw_sp_port *mlxsw_sp_port,
9944 					   struct net_device *lag_dev,
9945 					   struct netlink_ext_ack *extack)
9946 {
9947 	u16 default_vid = MLXSW_SP_DEFAULT_VID;
9948 	struct net_device *upper_dev;
9949 	struct list_head *iter;
9950 	int done = 0;
9951 	u16 vid;
9952 	int err;
9953 
9954 	err = mlxsw_sp_port_vid_router_join_existing(mlxsw_sp_port, default_vid,
9955 						     lag_dev, extack);
9956 	if (err)
9957 		return err;
9958 
9959 	netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
9960 		if (!is_vlan_dev(upper_dev))
9961 			continue;
9962 
9963 		vid = vlan_dev_vlan_id(upper_dev);
9964 		err = mlxsw_sp_port_vid_router_join_existing(mlxsw_sp_port, vid,
9965 							     upper_dev, extack);
9966 		if (err)
9967 			goto err_router_join_dev;
9968 
9969 		++done;
9970 	}
9971 
9972 	return 0;
9973 
9974 err_router_join_dev:
9975 	netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
9976 		if (!is_vlan_dev(upper_dev))
9977 			continue;
9978 		if (!done--)
9979 			break;
9980 
9981 		vid = vlan_dev_vlan_id(upper_dev);
9982 		mlxsw_sp_port_vid_router_leave(mlxsw_sp_port, vid, upper_dev);
9983 	}
9984 
9985 	mlxsw_sp_port_vid_router_leave(mlxsw_sp_port, default_vid, lag_dev);
9986 	return err;
9987 }
9988 
9989 static void
9990 __mlxsw_sp_router_port_leave_lag(struct mlxsw_sp_port *mlxsw_sp_port,
9991 				 struct net_device *lag_dev)
9992 {
9993 	u16 default_vid = MLXSW_SP_DEFAULT_VID;
9994 	struct net_device *upper_dev;
9995 	struct list_head *iter;
9996 	u16 vid;
9997 
9998 	netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
9999 		if (!is_vlan_dev(upper_dev))
10000 			continue;
10001 
10002 		vid = vlan_dev_vlan_id(upper_dev);
10003 		mlxsw_sp_port_vid_router_leave(mlxsw_sp_port, vid, upper_dev);
10004 	}
10005 
10006 	mlxsw_sp_port_vid_router_leave(mlxsw_sp_port, default_vid, lag_dev);
10007 }
10008 
10009 int mlxsw_sp_router_port_join_lag(struct mlxsw_sp_port *mlxsw_sp_port,
10010 				  struct net_device *lag_dev,
10011 				  struct netlink_ext_ack *extack)
10012 {
10013 	int err;
10014 
10015 	mutex_lock(&mlxsw_sp_port->mlxsw_sp->router->lock);
10016 	err = __mlxsw_sp_router_port_join_lag(mlxsw_sp_port, lag_dev, extack);
10017 	mutex_unlock(&mlxsw_sp_port->mlxsw_sp->router->lock);
10018 
10019 	return err;
10020 }
10021 
10022 void mlxsw_sp_router_port_leave_lag(struct mlxsw_sp_port *mlxsw_sp_port,
10023 				    struct net_device *lag_dev)
10024 {
10025 	mutex_lock(&mlxsw_sp_port->mlxsw_sp->router->lock);
10026 	__mlxsw_sp_router_port_leave_lag(mlxsw_sp_port, lag_dev);
10027 	mutex_unlock(&mlxsw_sp_port->mlxsw_sp->router->lock);
10028 }
10029 
10030 static int mlxsw_sp_router_netdevice_event(struct notifier_block *nb,
10031 					   unsigned long event, void *ptr)
10032 {
10033 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
10034 	struct mlxsw_sp_router *router;
10035 	struct mlxsw_sp *mlxsw_sp;
10036 	int err = 0;
10037 
10038 	router = container_of(nb, struct mlxsw_sp_router, netdevice_nb);
10039 	mlxsw_sp = router->mlxsw_sp;
10040 
10041 	mutex_lock(&mlxsw_sp->router->lock);
10042 
10043 	if (event == NETDEV_REGISTER) {
10044 		err = mlxsw_sp_netdevice_register(router, dev);
10045 		if (err)
10046 			/* No need to roll this back, UNREGISTER will collect it
10047 			 * anyhow.
10048 			 */
10049 			goto out;
10050 	}
10051 
10052 	if (mlxsw_sp_is_offload_xstats_event(event))
10053 		err = mlxsw_sp_netdevice_offload_xstats_cmd(mlxsw_sp, dev,
10054 							    event, ptr);
10055 	else if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev))
10056 		err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev,
10057 						       event, ptr);
10058 	else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev))
10059 		err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev,
10060 						       event, ptr);
10061 	else if (mlxsw_sp_is_router_event(event))
10062 		err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr);
10063 	else if (mlxsw_sp_is_vrf_event(event, ptr))
10064 		err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
10065 
10066 	if (event == NETDEV_UNREGISTER)
10067 		mlxsw_sp_netdevice_unregister(router, dev);
10068 
10069 out:
10070 	mutex_unlock(&mlxsw_sp->router->lock);
10071 
10072 	return notifier_from_errno(err);
10073 }
10074 
10075 struct mlxsw_sp_macvlan_replay {
10076 	struct mlxsw_sp *mlxsw_sp;
10077 	struct netlink_ext_ack *extack;
10078 };
10079 
10080 static int mlxsw_sp_macvlan_replay_upper(struct net_device *dev,
10081 					 struct netdev_nested_priv *priv)
10082 {
10083 	const struct mlxsw_sp_macvlan_replay *rms = priv->data;
10084 	struct netlink_ext_ack *extack = rms->extack;
10085 	struct mlxsw_sp *mlxsw_sp = rms->mlxsw_sp;
10086 
10087 	if (!netif_is_macvlan(dev))
10088 		return 0;
10089 
10090 	return mlxsw_sp_rif_macvlan_add(mlxsw_sp, dev, extack);
10091 }
10092 
10093 static int mlxsw_sp_macvlan_replay(struct mlxsw_sp_rif *rif,
10094 				   struct netlink_ext_ack *extack)
10095 {
10096 	struct mlxsw_sp_macvlan_replay rms = {
10097 		.mlxsw_sp = rif->mlxsw_sp,
10098 		.extack = extack,
10099 	};
10100 	struct netdev_nested_priv priv = {
10101 		.data = &rms,
10102 	};
10103 
10104 	return netdev_walk_all_upper_dev_rcu(mlxsw_sp_rif_dev(rif),
10105 					     mlxsw_sp_macvlan_replay_upper,
10106 					     &priv);
10107 }
10108 
10109 static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev,
10110 					struct netdev_nested_priv *priv)
10111 {
10112 	struct mlxsw_sp_rif *rif = (struct mlxsw_sp_rif *)priv->data;
10113 
10114 	if (!netif_is_macvlan(dev))
10115 		return 0;
10116 
10117 	return mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10118 				   mlxsw_sp_fid_index(rif->fid), false);
10119 }
10120 
10121 static int mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif *rif)
10122 {
10123 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
10124 	struct netdev_nested_priv priv = {
10125 		.data = (void *)rif,
10126 	};
10127 
10128 	if (!netif_is_macvlan_port(dev))
10129 		return 0;
10130 
10131 	return netdev_walk_all_upper_dev_rcu(dev,
10132 					     __mlxsw_sp_rif_macvlan_flush, &priv);
10133 }
10134 
10135 static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
10136 				       const struct mlxsw_sp_rif_params *params)
10137 {
10138 	struct mlxsw_sp_rif_subport *rif_subport;
10139 
10140 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
10141 	refcount_set(&rif_subport->ref_count, 1);
10142 	rif_subport->vid = params->vid;
10143 	rif_subport->lag = params->lag;
10144 	if (params->lag)
10145 		rif_subport->lag_id = params->lag_id;
10146 	else
10147 		rif_subport->system_port = params->system_port;
10148 }
10149 
10150 static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
10151 {
10152 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
10153 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10154 	struct mlxsw_sp_rif_subport *rif_subport;
10155 	char ritr_pl[MLXSW_REG_RITR_LEN];
10156 	u16 efid;
10157 
10158 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
10159 	mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
10160 			    rif->rif_index, rif->vr_id, dev->mtu);
10161 	mlxsw_reg_ritr_mac_pack(ritr_pl, dev->dev_addr);
10162 	mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
10163 	efid = mlxsw_sp_fid_index(rif->fid);
10164 	mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
10165 				  rif_subport->lag ? rif_subport->lag_id :
10166 						     rif_subport->system_port,
10167 				  efid, 0);
10168 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
10169 }
10170 
10171 static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif,
10172 					  struct netlink_ext_ack *extack)
10173 {
10174 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
10175 	u8 mac_profile;
10176 	int err;
10177 
10178 	err = mlxsw_sp_rif_mac_profile_get(rif->mlxsw_sp, rif->addr,
10179 					   &mac_profile, extack);
10180 	if (err)
10181 		return err;
10182 	rif->mac_profile_id = mac_profile;
10183 
10184 	err = mlxsw_sp_rif_subport_op(rif, true);
10185 	if (err)
10186 		goto err_rif_subport_op;
10187 
10188 	err = mlxsw_sp_macvlan_replay(rif, extack);
10189 	if (err)
10190 		goto err_macvlan_replay;
10191 
10192 	err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10193 				  mlxsw_sp_fid_index(rif->fid), true);
10194 	if (err)
10195 		goto err_rif_fdb_op;
10196 
10197 	err = mlxsw_sp_fid_rif_set(rif->fid, rif);
10198 	if (err)
10199 		goto err_fid_rif_set;
10200 
10201 	return 0;
10202 
10203 err_fid_rif_set:
10204 	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10205 			    mlxsw_sp_fid_index(rif->fid), false);
10206 err_rif_fdb_op:
10207 	mlxsw_sp_rif_macvlan_flush(rif);
10208 err_macvlan_replay:
10209 	mlxsw_sp_rif_subport_op(rif, false);
10210 err_rif_subport_op:
10211 	mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, mac_profile);
10212 	return err;
10213 }
10214 
10215 static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
10216 {
10217 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
10218 	struct mlxsw_sp_fid *fid = rif->fid;
10219 
10220 	mlxsw_sp_fid_rif_unset(fid);
10221 	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10222 			    mlxsw_sp_fid_index(fid), false);
10223 	mlxsw_sp_rif_macvlan_flush(rif);
10224 	mlxsw_sp_rif_subport_op(rif, false);
10225 	mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
10226 }
10227 
10228 static struct mlxsw_sp_fid *
10229 mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif,
10230 			     const struct mlxsw_sp_rif_params *params,
10231 			     struct netlink_ext_ack *extack)
10232 {
10233 	return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
10234 }
10235 
10236 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
10237 	.type			= MLXSW_SP_RIF_TYPE_SUBPORT,
10238 	.rif_size		= sizeof(struct mlxsw_sp_rif_subport),
10239 	.setup			= mlxsw_sp_rif_subport_setup,
10240 	.configure		= mlxsw_sp_rif_subport_configure,
10241 	.deconfigure		= mlxsw_sp_rif_subport_deconfigure,
10242 	.fid_get		= mlxsw_sp_rif_subport_fid_get,
10243 };
10244 
10245 static int mlxsw_sp_rif_fid_op(struct mlxsw_sp_rif *rif, u16 fid, bool enable)
10246 {
10247 	enum mlxsw_reg_ritr_if_type type = MLXSW_REG_RITR_FID_IF;
10248 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
10249 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10250 	char ritr_pl[MLXSW_REG_RITR_LEN];
10251 
10252 	mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
10253 			    dev->mtu);
10254 	mlxsw_reg_ritr_mac_pack(ritr_pl, dev->dev_addr);
10255 	mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
10256 	mlxsw_reg_ritr_fid_if_fid_set(ritr_pl, fid);
10257 
10258 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
10259 }
10260 
10261 u16 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
10262 {
10263 	return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
10264 }
10265 
10266 static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif,
10267 				      struct netlink_ext_ack *extack)
10268 {
10269 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
10270 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10271 	u16 fid_index = mlxsw_sp_fid_index(rif->fid);
10272 	u8 mac_profile;
10273 	int err;
10274 
10275 	err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, rif->addr,
10276 					   &mac_profile, extack);
10277 	if (err)
10278 		return err;
10279 	rif->mac_profile_id = mac_profile;
10280 
10281 	err = mlxsw_sp_rif_fid_op(rif, fid_index, true);
10282 	if (err)
10283 		goto err_rif_fid_op;
10284 
10285 	err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
10286 				     mlxsw_sp_router_port(mlxsw_sp), true);
10287 	if (err)
10288 		goto err_fid_mc_flood_set;
10289 
10290 	err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
10291 				     mlxsw_sp_router_port(mlxsw_sp), true);
10292 	if (err)
10293 		goto err_fid_bc_flood_set;
10294 
10295 	err = mlxsw_sp_macvlan_replay(rif, extack);
10296 	if (err)
10297 		goto err_macvlan_replay;
10298 
10299 	err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10300 				  mlxsw_sp_fid_index(rif->fid), true);
10301 	if (err)
10302 		goto err_rif_fdb_op;
10303 
10304 	err = mlxsw_sp_fid_rif_set(rif->fid, rif);
10305 	if (err)
10306 		goto err_fid_rif_set;
10307 
10308 	return 0;
10309 
10310 err_fid_rif_set:
10311 	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10312 			    mlxsw_sp_fid_index(rif->fid), false);
10313 err_rif_fdb_op:
10314 	mlxsw_sp_rif_macvlan_flush(rif);
10315 err_macvlan_replay:
10316 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
10317 			       mlxsw_sp_router_port(mlxsw_sp), false);
10318 err_fid_bc_flood_set:
10319 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
10320 			       mlxsw_sp_router_port(mlxsw_sp), false);
10321 err_fid_mc_flood_set:
10322 	mlxsw_sp_rif_fid_op(rif, fid_index, false);
10323 err_rif_fid_op:
10324 	mlxsw_sp_rif_mac_profile_put(mlxsw_sp, mac_profile);
10325 	return err;
10326 }
10327 
10328 static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
10329 {
10330 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
10331 	u16 fid_index = mlxsw_sp_fid_index(rif->fid);
10332 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10333 	struct mlxsw_sp_fid *fid = rif->fid;
10334 
10335 	mlxsw_sp_fid_rif_unset(fid);
10336 	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10337 			    mlxsw_sp_fid_index(fid), false);
10338 	mlxsw_sp_rif_macvlan_flush(rif);
10339 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
10340 			       mlxsw_sp_router_port(mlxsw_sp), false);
10341 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
10342 			       mlxsw_sp_router_port(mlxsw_sp), false);
10343 	mlxsw_sp_rif_fid_op(rif, fid_index, false);
10344 	mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
10345 }
10346 
10347 static struct mlxsw_sp_fid *
10348 mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
10349 			 const struct mlxsw_sp_rif_params *params,
10350 			 struct netlink_ext_ack *extack)
10351 {
10352 	int rif_ifindex = mlxsw_sp_rif_dev_ifindex(rif);
10353 
10354 	return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif_ifindex);
10355 }
10356 
10357 static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
10358 {
10359 	struct switchdev_notifier_fdb_info info = {};
10360 	struct net_device *dev;
10361 
10362 	dev = br_fdb_find_port(mlxsw_sp_rif_dev(rif), mac, 0);
10363 	if (!dev)
10364 		return;
10365 
10366 	info.addr = mac;
10367 	info.vid = 0;
10368 	call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
10369 				 NULL);
10370 }
10371 
10372 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
10373 	.type			= MLXSW_SP_RIF_TYPE_FID,
10374 	.rif_size		= sizeof(struct mlxsw_sp_rif),
10375 	.configure		= mlxsw_sp_rif_fid_configure,
10376 	.deconfigure		= mlxsw_sp_rif_fid_deconfigure,
10377 	.fid_get		= mlxsw_sp_rif_fid_fid_get,
10378 	.fdb_del		= mlxsw_sp_rif_fid_fdb_del,
10379 };
10380 
10381 static struct mlxsw_sp_fid *
10382 mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
10383 			  const struct mlxsw_sp_rif_params *params,
10384 			  struct netlink_ext_ack *extack)
10385 {
10386 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
10387 	struct net_device *br_dev;
10388 
10389 	if (WARN_ON(!params->vid))
10390 		return ERR_PTR(-EINVAL);
10391 
10392 	if (is_vlan_dev(dev)) {
10393 		br_dev = vlan_dev_real_dev(dev);
10394 		if (WARN_ON(!netif_is_bridge_master(br_dev)))
10395 			return ERR_PTR(-EINVAL);
10396 	}
10397 
10398 	return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, params->vid);
10399 }
10400 
10401 static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
10402 {
10403 	struct net_device *rif_dev = mlxsw_sp_rif_dev(rif);
10404 	struct switchdev_notifier_fdb_info info = {};
10405 	u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
10406 	struct net_device *br_dev;
10407 	struct net_device *dev;
10408 
10409 	br_dev = is_vlan_dev(rif_dev) ? vlan_dev_real_dev(rif_dev) : rif_dev;
10410 	dev = br_fdb_find_port(br_dev, mac, vid);
10411 	if (!dev)
10412 		return;
10413 
10414 	info.addr = mac;
10415 	info.vid = vid;
10416 	call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
10417 				 NULL);
10418 }
10419 
10420 static int mlxsw_sp_rif_vlan_op(struct mlxsw_sp_rif *rif, u16 vid, u16 efid,
10421 				bool enable)
10422 {
10423 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
10424 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10425 	char ritr_pl[MLXSW_REG_RITR_LEN];
10426 
10427 	mlxsw_reg_ritr_vlan_if_pack(ritr_pl, enable, rif->rif_index, rif->vr_id,
10428 				    dev->mtu, dev->dev_addr,
10429 				    rif->mac_profile_id, vid, efid);
10430 
10431 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
10432 }
10433 
10434 static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif, u16 efid,
10435 				       struct netlink_ext_ack *extack)
10436 {
10437 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
10438 	u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
10439 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10440 	u8 mac_profile;
10441 	int err;
10442 
10443 	err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, rif->addr,
10444 					   &mac_profile, extack);
10445 	if (err)
10446 		return err;
10447 	rif->mac_profile_id = mac_profile;
10448 
10449 	err = mlxsw_sp_rif_vlan_op(rif, vid, efid, true);
10450 	if (err)
10451 		goto err_rif_vlan_fid_op;
10452 
10453 	err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
10454 				     mlxsw_sp_router_port(mlxsw_sp), true);
10455 	if (err)
10456 		goto err_fid_mc_flood_set;
10457 
10458 	err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
10459 				     mlxsw_sp_router_port(mlxsw_sp), true);
10460 	if (err)
10461 		goto err_fid_bc_flood_set;
10462 
10463 	err = mlxsw_sp_macvlan_replay(rif, extack);
10464 	if (err)
10465 		goto err_macvlan_replay;
10466 
10467 	err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10468 				  mlxsw_sp_fid_index(rif->fid), true);
10469 	if (err)
10470 		goto err_rif_fdb_op;
10471 
10472 	err = mlxsw_sp_fid_rif_set(rif->fid, rif);
10473 	if (err)
10474 		goto err_fid_rif_set;
10475 
10476 	return 0;
10477 
10478 err_fid_rif_set:
10479 	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10480 			    mlxsw_sp_fid_index(rif->fid), false);
10481 err_rif_fdb_op:
10482 	mlxsw_sp_rif_macvlan_flush(rif);
10483 err_macvlan_replay:
10484 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
10485 			       mlxsw_sp_router_port(mlxsw_sp), false);
10486 err_fid_bc_flood_set:
10487 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
10488 			       mlxsw_sp_router_port(mlxsw_sp), false);
10489 err_fid_mc_flood_set:
10490 	mlxsw_sp_rif_vlan_op(rif, vid, 0, false);
10491 err_rif_vlan_fid_op:
10492 	mlxsw_sp_rif_mac_profile_put(mlxsw_sp, mac_profile);
10493 	return err;
10494 }
10495 
10496 static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
10497 {
10498 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
10499 	u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
10500 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10501 
10502 	mlxsw_sp_fid_rif_unset(rif->fid);
10503 	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10504 			    mlxsw_sp_fid_index(rif->fid), false);
10505 	mlxsw_sp_rif_macvlan_flush(rif);
10506 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
10507 			       mlxsw_sp_router_port(mlxsw_sp), false);
10508 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
10509 			       mlxsw_sp_router_port(mlxsw_sp), false);
10510 	mlxsw_sp_rif_vlan_op(rif, vid, 0, false);
10511 	mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
10512 }
10513 
10514 static int mlxsw_sp1_rif_vlan_configure(struct mlxsw_sp_rif *rif,
10515 					struct netlink_ext_ack *extack)
10516 {
10517 	return mlxsw_sp_rif_vlan_configure(rif, 0, extack);
10518 }
10519 
10520 static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_vlan_ops = {
10521 	.type			= MLXSW_SP_RIF_TYPE_VLAN,
10522 	.rif_size		= sizeof(struct mlxsw_sp_rif),
10523 	.configure		= mlxsw_sp1_rif_vlan_configure,
10524 	.deconfigure		= mlxsw_sp_rif_vlan_deconfigure,
10525 	.fid_get		= mlxsw_sp_rif_vlan_fid_get,
10526 	.fdb_del		= mlxsw_sp_rif_vlan_fdb_del,
10527 };
10528 
10529 static int mlxsw_sp2_rif_vlan_configure(struct mlxsw_sp_rif *rif,
10530 					struct netlink_ext_ack *extack)
10531 {
10532 	u16 efid = mlxsw_sp_fid_index(rif->fid);
10533 
10534 	return mlxsw_sp_rif_vlan_configure(rif, efid, extack);
10535 }
10536 
10537 static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_vlan_ops = {
10538 	.type			= MLXSW_SP_RIF_TYPE_VLAN,
10539 	.rif_size		= sizeof(struct mlxsw_sp_rif),
10540 	.configure		= mlxsw_sp2_rif_vlan_configure,
10541 	.deconfigure		= mlxsw_sp_rif_vlan_deconfigure,
10542 	.fid_get		= mlxsw_sp_rif_vlan_fid_get,
10543 	.fdb_del		= mlxsw_sp_rif_vlan_fdb_del,
10544 };
10545 
10546 static struct mlxsw_sp_rif_ipip_lb *
10547 mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
10548 {
10549 	return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
10550 }
10551 
10552 static void
10553 mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
10554 			   const struct mlxsw_sp_rif_params *params)
10555 {
10556 	struct mlxsw_sp_rif_params_ipip_lb *params_lb;
10557 	struct mlxsw_sp_rif_ipip_lb *rif_lb;
10558 
10559 	params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb,
10560 				 common);
10561 	rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif);
10562 	rif_lb->lb_config = params_lb->lb_config;
10563 }
10564 
10565 static int
10566 mlxsw_sp1_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif,
10567 				struct netlink_ext_ack *extack)
10568 {
10569 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
10570 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
10571 	u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(dev);
10572 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10573 	struct mlxsw_sp_vr *ul_vr;
10574 	int err;
10575 
10576 	ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, extack);
10577 	if (IS_ERR(ul_vr))
10578 		return PTR_ERR(ul_vr);
10579 
10580 	err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, true);
10581 	if (err)
10582 		goto err_loopback_op;
10583 
10584 	lb_rif->ul_vr_id = ul_vr->id;
10585 	lb_rif->ul_rif_id = 0;
10586 	++ul_vr->rif_count;
10587 	return 0;
10588 
10589 err_loopback_op:
10590 	mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
10591 	return err;
10592 }
10593 
10594 static void mlxsw_sp1_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
10595 {
10596 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
10597 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10598 	struct mlxsw_sp_vr *ul_vr;
10599 
10600 	ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
10601 	mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, false);
10602 
10603 	--ul_vr->rif_count;
10604 	mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
10605 }
10606 
10607 static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_ipip_lb_ops = {
10608 	.type			= MLXSW_SP_RIF_TYPE_IPIP_LB,
10609 	.rif_size		= sizeof(struct mlxsw_sp_rif_ipip_lb),
10610 	.setup                  = mlxsw_sp_rif_ipip_lb_setup,
10611 	.configure		= mlxsw_sp1_rif_ipip_lb_configure,
10612 	.deconfigure		= mlxsw_sp1_rif_ipip_lb_deconfigure,
10613 };
10614 
10615 static const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[] = {
10616 	[MLXSW_SP_RIF_TYPE_SUBPORT]	= &mlxsw_sp_rif_subport_ops,
10617 	[MLXSW_SP_RIF_TYPE_VLAN]	= &mlxsw_sp1_rif_vlan_ops,
10618 	[MLXSW_SP_RIF_TYPE_FID]		= &mlxsw_sp_rif_fid_ops,
10619 	[MLXSW_SP_RIF_TYPE_IPIP_LB]	= &mlxsw_sp1_rif_ipip_lb_ops,
10620 };
10621 
10622 static int
10623 mlxsw_sp_rif_ipip_lb_ul_rif_op(struct mlxsw_sp_rif *ul_rif, bool enable)
10624 {
10625 	struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
10626 	char ritr_pl[MLXSW_REG_RITR_LEN];
10627 
10628 	mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
10629 			    ul_rif->rif_index, ul_rif->vr_id, IP_MAX_MTU);
10630 	mlxsw_reg_ritr_loopback_protocol_set(ritr_pl,
10631 					     MLXSW_REG_RITR_LOOPBACK_GENERIC);
10632 
10633 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
10634 }
10635 
10636 static struct mlxsw_sp_rif *
10637 mlxsw_sp_ul_rif_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
10638 		       struct mlxsw_sp_crif *ul_crif,
10639 		       struct netlink_ext_ack *extack)
10640 {
10641 	struct mlxsw_sp_rif *ul_rif;
10642 	u8 rif_entries = 1;
10643 	u16 rif_index;
10644 	int err;
10645 
10646 	err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index, rif_entries);
10647 	if (err) {
10648 		NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
10649 		return ERR_PTR(err);
10650 	}
10651 
10652 	ul_rif = mlxsw_sp_rif_alloc(sizeof(*ul_rif), rif_index, vr->id,
10653 				    ul_crif);
10654 	if (!ul_rif) {
10655 		err = -ENOMEM;
10656 		goto err_rif_alloc;
10657 	}
10658 
10659 	mlxsw_sp->router->rifs[rif_index] = ul_rif;
10660 	ul_rif->mlxsw_sp = mlxsw_sp;
10661 	ul_rif->rif_entries = rif_entries;
10662 	err = mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, true);
10663 	if (err)
10664 		goto ul_rif_op_err;
10665 
10666 	atomic_add(rif_entries, &mlxsw_sp->router->rifs_count);
10667 	return ul_rif;
10668 
10669 ul_rif_op_err:
10670 	mlxsw_sp->router->rifs[rif_index] = NULL;
10671 	mlxsw_sp_rif_free(ul_rif);
10672 err_rif_alloc:
10673 	mlxsw_sp_rif_index_free(mlxsw_sp, rif_index, rif_entries);
10674 	return ERR_PTR(err);
10675 }
10676 
10677 static void mlxsw_sp_ul_rif_destroy(struct mlxsw_sp_rif *ul_rif)
10678 {
10679 	struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
10680 	u8 rif_entries = ul_rif->rif_entries;
10681 	u16 rif_index = ul_rif->rif_index;
10682 
10683 	atomic_sub(rif_entries, &mlxsw_sp->router->rifs_count);
10684 	mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, false);
10685 	mlxsw_sp->router->rifs[ul_rif->rif_index] = NULL;
10686 	mlxsw_sp_rif_free(ul_rif);
10687 	mlxsw_sp_rif_index_free(mlxsw_sp, rif_index, rif_entries);
10688 }
10689 
10690 static struct mlxsw_sp_rif *
10691 mlxsw_sp_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
10692 		    struct mlxsw_sp_crif *ul_crif,
10693 		    struct netlink_ext_ack *extack)
10694 {
10695 	struct mlxsw_sp_vr *vr;
10696 	int err;
10697 
10698 	vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, extack);
10699 	if (IS_ERR(vr))
10700 		return ERR_CAST(vr);
10701 
10702 	if (refcount_inc_not_zero(&vr->ul_rif_refcnt))
10703 		return vr->ul_rif;
10704 
10705 	vr->ul_rif = mlxsw_sp_ul_rif_create(mlxsw_sp, vr, ul_crif, extack);
10706 	if (IS_ERR(vr->ul_rif)) {
10707 		err = PTR_ERR(vr->ul_rif);
10708 		goto err_ul_rif_create;
10709 	}
10710 
10711 	vr->rif_count++;
10712 	refcount_set(&vr->ul_rif_refcnt, 1);
10713 
10714 	return vr->ul_rif;
10715 
10716 err_ul_rif_create:
10717 	mlxsw_sp_vr_put(mlxsw_sp, vr);
10718 	return ERR_PTR(err);
10719 }
10720 
10721 static void mlxsw_sp_ul_rif_put(struct mlxsw_sp_rif *ul_rif)
10722 {
10723 	struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
10724 	struct mlxsw_sp_vr *vr;
10725 
10726 	vr = &mlxsw_sp->router->vrs[ul_rif->vr_id];
10727 
10728 	if (!refcount_dec_and_test(&vr->ul_rif_refcnt))
10729 		return;
10730 
10731 	vr->rif_count--;
10732 	mlxsw_sp_ul_rif_destroy(ul_rif);
10733 	mlxsw_sp_vr_put(mlxsw_sp, vr);
10734 }
10735 
10736 int mlxsw_sp_router_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
10737 			       u16 *ul_rif_index)
10738 {
10739 	struct mlxsw_sp_rif *ul_rif;
10740 	int err = 0;
10741 
10742 	mutex_lock(&mlxsw_sp->router->lock);
10743 	ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL, NULL);
10744 	if (IS_ERR(ul_rif)) {
10745 		err = PTR_ERR(ul_rif);
10746 		goto out;
10747 	}
10748 	*ul_rif_index = ul_rif->rif_index;
10749 out:
10750 	mutex_unlock(&mlxsw_sp->router->lock);
10751 	return err;
10752 }
10753 
10754 void mlxsw_sp_router_ul_rif_put(struct mlxsw_sp *mlxsw_sp, u16 ul_rif_index)
10755 {
10756 	struct mlxsw_sp_rif *ul_rif;
10757 
10758 	mutex_lock(&mlxsw_sp->router->lock);
10759 	ul_rif = mlxsw_sp->router->rifs[ul_rif_index];
10760 	if (WARN_ON(!ul_rif))
10761 		goto out;
10762 
10763 	mlxsw_sp_ul_rif_put(ul_rif);
10764 out:
10765 	mutex_unlock(&mlxsw_sp->router->lock);
10766 }
10767 
10768 static int
10769 mlxsw_sp2_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif,
10770 				struct netlink_ext_ack *extack)
10771 {
10772 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
10773 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
10774 	u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(dev);
10775 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10776 	struct mlxsw_sp_rif *ul_rif;
10777 	int err;
10778 
10779 	ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL, extack);
10780 	if (IS_ERR(ul_rif))
10781 		return PTR_ERR(ul_rif);
10782 
10783 	err = mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, ul_rif->rif_index, true);
10784 	if (err)
10785 		goto err_loopback_op;
10786 
10787 	lb_rif->ul_vr_id = 0;
10788 	lb_rif->ul_rif_id = ul_rif->rif_index;
10789 
10790 	return 0;
10791 
10792 err_loopback_op:
10793 	mlxsw_sp_ul_rif_put(ul_rif);
10794 	return err;
10795 }
10796 
10797 static void mlxsw_sp2_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
10798 {
10799 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
10800 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10801 	struct mlxsw_sp_rif *ul_rif;
10802 
10803 	ul_rif = mlxsw_sp_rif_by_index(mlxsw_sp, lb_rif->ul_rif_id);
10804 	mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, lb_rif->ul_rif_id, false);
10805 	mlxsw_sp_ul_rif_put(ul_rif);
10806 }
10807 
10808 static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_ipip_lb_ops = {
10809 	.type			= MLXSW_SP_RIF_TYPE_IPIP_LB,
10810 	.rif_size		= sizeof(struct mlxsw_sp_rif_ipip_lb),
10811 	.setup                  = mlxsw_sp_rif_ipip_lb_setup,
10812 	.configure		= mlxsw_sp2_rif_ipip_lb_configure,
10813 	.deconfigure		= mlxsw_sp2_rif_ipip_lb_deconfigure,
10814 };
10815 
10816 static const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = {
10817 	[MLXSW_SP_RIF_TYPE_SUBPORT]	= &mlxsw_sp_rif_subport_ops,
10818 	[MLXSW_SP_RIF_TYPE_VLAN]	= &mlxsw_sp2_rif_vlan_ops,
10819 	[MLXSW_SP_RIF_TYPE_FID]		= &mlxsw_sp_rif_fid_ops,
10820 	[MLXSW_SP_RIF_TYPE_IPIP_LB]	= &mlxsw_sp2_rif_ipip_lb_ops,
10821 };
10822 
10823 static int mlxsw_sp_rifs_table_init(struct mlxsw_sp *mlxsw_sp)
10824 {
10825 	struct gen_pool *rifs_table;
10826 	int err;
10827 
10828 	rifs_table = gen_pool_create(0, -1);
10829 	if (!rifs_table)
10830 		return -ENOMEM;
10831 
10832 	gen_pool_set_algo(rifs_table, gen_pool_first_fit_order_align,
10833 			  NULL);
10834 
10835 	err = gen_pool_add(rifs_table, MLXSW_SP_ROUTER_GENALLOC_OFFSET,
10836 			   MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS), -1);
10837 	if (err)
10838 		goto err_gen_pool_add;
10839 
10840 	mlxsw_sp->router->rifs_table = rifs_table;
10841 
10842 	return 0;
10843 
10844 err_gen_pool_add:
10845 	gen_pool_destroy(rifs_table);
10846 	return err;
10847 }
10848 
10849 static void mlxsw_sp_rifs_table_fini(struct mlxsw_sp *mlxsw_sp)
10850 {
10851 	gen_pool_destroy(mlxsw_sp->router->rifs_table);
10852 }
10853 
10854 static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
10855 {
10856 	u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
10857 	struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
10858 	struct mlxsw_core *core = mlxsw_sp->core;
10859 	int err;
10860 
10861 	if (!MLXSW_CORE_RES_VALID(core, MAX_RIF_MAC_PROFILES))
10862 		return -EIO;
10863 	mlxsw_sp->router->max_rif_mac_profile =
10864 		MLXSW_CORE_RES_GET(core, MAX_RIF_MAC_PROFILES);
10865 
10866 	mlxsw_sp->router->rifs = kcalloc(max_rifs,
10867 					 sizeof(struct mlxsw_sp_rif *),
10868 					 GFP_KERNEL);
10869 	if (!mlxsw_sp->router->rifs)
10870 		return -ENOMEM;
10871 
10872 	err = mlxsw_sp_rifs_table_init(mlxsw_sp);
10873 	if (err)
10874 		goto err_rifs_table_init;
10875 
10876 	idr_init(&mlxsw_sp->router->rif_mac_profiles_idr);
10877 	atomic_set(&mlxsw_sp->router->rif_mac_profiles_count, 0);
10878 	atomic_set(&mlxsw_sp->router->rifs_count, 0);
10879 	devl_resource_occ_get_register(devlink,
10880 				       MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
10881 				       mlxsw_sp_rif_mac_profiles_occ_get,
10882 				       mlxsw_sp);
10883 	devl_resource_occ_get_register(devlink,
10884 				       MLXSW_SP_RESOURCE_RIFS,
10885 				       mlxsw_sp_rifs_occ_get,
10886 				       mlxsw_sp);
10887 
10888 	return 0;
10889 
10890 err_rifs_table_init:
10891 	kfree(mlxsw_sp->router->rifs);
10892 	return err;
10893 }
10894 
10895 static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
10896 {
10897 	int max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
10898 	struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
10899 	int i;
10900 
10901 	WARN_ON_ONCE(atomic_read(&mlxsw_sp->router->rifs_count));
10902 	for (i = 0; i < max_rifs; i++)
10903 		WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
10904 
10905 	devl_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_RIFS);
10906 	devl_resource_occ_get_unregister(devlink,
10907 					 MLXSW_SP_RESOURCE_RIF_MAC_PROFILES);
10908 	WARN_ON(!idr_is_empty(&mlxsw_sp->router->rif_mac_profiles_idr));
10909 	idr_destroy(&mlxsw_sp->router->rif_mac_profiles_idr);
10910 	mlxsw_sp_rifs_table_fini(mlxsw_sp);
10911 	kfree(mlxsw_sp->router->rifs);
10912 }
10913 
10914 static int
10915 mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
10916 {
10917 	char tigcr_pl[MLXSW_REG_TIGCR_LEN];
10918 
10919 	mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
10920 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
10921 }
10922 
10923 static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
10924 {
10925 	int err;
10926 
10927 	INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
10928 
10929 	err = mlxsw_sp_ipip_ecn_encap_init(mlxsw_sp);
10930 	if (err)
10931 		return err;
10932 	err = mlxsw_sp_ipip_ecn_decap_init(mlxsw_sp);
10933 	if (err)
10934 		return err;
10935 
10936 	return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
10937 }
10938 
10939 static int mlxsw_sp1_ipips_init(struct mlxsw_sp *mlxsw_sp)
10940 {
10941 	mlxsw_sp->router->ipip_ops_arr = mlxsw_sp1_ipip_ops_arr;
10942 	return mlxsw_sp_ipips_init(mlxsw_sp);
10943 }
10944 
10945 static int mlxsw_sp2_ipips_init(struct mlxsw_sp *mlxsw_sp)
10946 {
10947 	mlxsw_sp->router->ipip_ops_arr = mlxsw_sp2_ipip_ops_arr;
10948 	return mlxsw_sp_ipips_init(mlxsw_sp);
10949 }
10950 
10951 static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
10952 {
10953 	WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
10954 }
10955 
10956 static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
10957 {
10958 	struct mlxsw_sp_router *router;
10959 
10960 	/* Flush pending FIB notifications and then flush the device's
10961 	 * table before requesting another dump. The FIB notification
10962 	 * block is unregistered, so no need to take RTNL.
10963 	 */
10964 	mlxsw_core_flush_owq();
10965 	router = container_of(nb, struct mlxsw_sp_router, fib_nb);
10966 	mlxsw_sp_router_fib_flush(router->mlxsw_sp);
10967 }
10968 
10969 #ifdef CONFIG_IP_ROUTE_MULTIPATH
10970 struct mlxsw_sp_mp_hash_config {
10971 	DECLARE_BITMAP(headers, __MLXSW_REG_RECR2_HEADER_CNT);
10972 	DECLARE_BITMAP(fields, __MLXSW_REG_RECR2_FIELD_CNT);
10973 	DECLARE_BITMAP(inner_headers, __MLXSW_REG_RECR2_HEADER_CNT);
10974 	DECLARE_BITMAP(inner_fields, __MLXSW_REG_RECR2_INNER_FIELD_CNT);
10975 	bool inc_parsing_depth;
10976 };
10977 
10978 #define MLXSW_SP_MP_HASH_HEADER_SET(_headers, _header) \
10979 	bitmap_set(_headers, MLXSW_REG_RECR2_##_header, 1)
10980 
10981 #define MLXSW_SP_MP_HASH_FIELD_SET(_fields, _field) \
10982 	bitmap_set(_fields, MLXSW_REG_RECR2_##_field, 1)
10983 
10984 #define MLXSW_SP_MP_HASH_FIELD_RANGE_SET(_fields, _field, _nr) \
10985 	bitmap_set(_fields, MLXSW_REG_RECR2_##_field, _nr)
10986 
10987 static void mlxsw_sp_mp_hash_inner_l3(struct mlxsw_sp_mp_hash_config *config)
10988 {
10989 	unsigned long *inner_headers = config->inner_headers;
10990 	unsigned long *inner_fields = config->inner_fields;
10991 
10992 	/* IPv4 inner */
10993 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_NOT_TCP_NOT_UDP);
10994 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_TCP_UDP);
10995 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_SIP0, 4);
10996 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_DIP0, 4);
10997 	/* IPv6 inner */
10998 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_NOT_TCP_NOT_UDP);
10999 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_TCP_UDP);
11000 	MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_SIP0_7);
11001 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_SIP8, 8);
11002 	MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_DIP0_7);
11003 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_DIP8, 8);
11004 	MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_NEXT_HEADER);
11005 	MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_FLOW_LABEL);
11006 }
11007 
11008 static void mlxsw_sp_mp4_hash_outer_addr(struct mlxsw_sp_mp_hash_config *config)
11009 {
11010 	unsigned long *headers = config->headers;
11011 	unsigned long *fields = config->fields;
11012 
11013 	MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_NOT_TCP_NOT_UDP);
11014 	MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_TCP_UDP);
11015 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_SIP0, 4);
11016 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_DIP0, 4);
11017 }
11018 
11019 static void
11020 mlxsw_sp_mp_hash_inner_custom(struct mlxsw_sp_mp_hash_config *config,
11021 			      u32 hash_fields)
11022 {
11023 	unsigned long *inner_headers = config->inner_headers;
11024 	unsigned long *inner_fields = config->inner_fields;
11025 
11026 	/* IPv4 Inner */
11027 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_NOT_TCP_NOT_UDP);
11028 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_TCP_UDP);
11029 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
11030 		MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_SIP0, 4);
11031 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
11032 		MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_DIP0, 4);
11033 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
11034 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV4_PROTOCOL);
11035 	/* IPv6 inner */
11036 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_NOT_TCP_NOT_UDP);
11037 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_TCP_UDP);
11038 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP) {
11039 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_SIP0_7);
11040 		MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_SIP8, 8);
11041 	}
11042 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP) {
11043 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_DIP0_7);
11044 		MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_DIP8, 8);
11045 	}
11046 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
11047 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_NEXT_HEADER);
11048 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL)
11049 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_FLOW_LABEL);
11050 	/* L4 inner */
11051 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, TCP_UDP_EN_IPV4);
11052 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, TCP_UDP_EN_IPV6);
11053 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT)
11054 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_TCP_UDP_SPORT);
11055 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT)
11056 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_TCP_UDP_DPORT);
11057 }
11058 
11059 static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp,
11060 				   struct mlxsw_sp_mp_hash_config *config)
11061 {
11062 	struct net *net = mlxsw_sp_net(mlxsw_sp);
11063 	unsigned long *headers = config->headers;
11064 	unsigned long *fields = config->fields;
11065 	u32 hash_fields;
11066 
11067 	switch (READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_policy)) {
11068 	case 0:
11069 		mlxsw_sp_mp4_hash_outer_addr(config);
11070 		break;
11071 	case 1:
11072 		mlxsw_sp_mp4_hash_outer_addr(config);
11073 		MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV4);
11074 		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV4_PROTOCOL);
11075 		MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
11076 		MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
11077 		break;
11078 	case 2:
11079 		/* Outer */
11080 		mlxsw_sp_mp4_hash_outer_addr(config);
11081 		/* Inner */
11082 		mlxsw_sp_mp_hash_inner_l3(config);
11083 		break;
11084 	case 3:
11085 		hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields);
11086 		/* Outer */
11087 		MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_NOT_TCP_NOT_UDP);
11088 		MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_TCP_UDP);
11089 		MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV4);
11090 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
11091 			MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_SIP0, 4);
11092 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
11093 			MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_DIP0, 4);
11094 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
11095 			MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV4_PROTOCOL);
11096 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
11097 			MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
11098 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
11099 			MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
11100 		/* Inner */
11101 		mlxsw_sp_mp_hash_inner_custom(config, hash_fields);
11102 		break;
11103 	}
11104 }
11105 
11106 static void mlxsw_sp_mp6_hash_outer_addr(struct mlxsw_sp_mp_hash_config *config)
11107 {
11108 	unsigned long *headers = config->headers;
11109 	unsigned long *fields = config->fields;
11110 
11111 	MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_NOT_TCP_NOT_UDP);
11112 	MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_TCP_UDP);
11113 	MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_SIP0_7);
11114 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_SIP8, 8);
11115 	MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_DIP0_7);
11116 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_DIP8, 8);
11117 }
11118 
11119 static void mlxsw_sp_mp6_hash_init(struct mlxsw_sp *mlxsw_sp,
11120 				   struct mlxsw_sp_mp_hash_config *config)
11121 {
11122 	u32 hash_fields = ip6_multipath_hash_fields(mlxsw_sp_net(mlxsw_sp));
11123 	unsigned long *headers = config->headers;
11124 	unsigned long *fields = config->fields;
11125 
11126 	switch (ip6_multipath_hash_policy(mlxsw_sp_net(mlxsw_sp))) {
11127 	case 0:
11128 		mlxsw_sp_mp6_hash_outer_addr(config);
11129 		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
11130 		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
11131 		break;
11132 	case 1:
11133 		mlxsw_sp_mp6_hash_outer_addr(config);
11134 		MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV6);
11135 		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
11136 		MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
11137 		MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
11138 		break;
11139 	case 2:
11140 		/* Outer */
11141 		mlxsw_sp_mp6_hash_outer_addr(config);
11142 		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
11143 		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
11144 		/* Inner */
11145 		mlxsw_sp_mp_hash_inner_l3(config);
11146 		config->inc_parsing_depth = true;
11147 		break;
11148 	case 3:
11149 		/* Outer */
11150 		MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_NOT_TCP_NOT_UDP);
11151 		MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_TCP_UDP);
11152 		MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV6);
11153 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP) {
11154 			MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_SIP0_7);
11155 			MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_SIP8, 8);
11156 		}
11157 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP) {
11158 			MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_DIP0_7);
11159 			MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_DIP8, 8);
11160 		}
11161 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
11162 			MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
11163 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_FLOWLABEL)
11164 			MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
11165 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
11166 			MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
11167 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
11168 			MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
11169 		/* Inner */
11170 		mlxsw_sp_mp_hash_inner_custom(config, hash_fields);
11171 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_MASK)
11172 			config->inc_parsing_depth = true;
11173 		break;
11174 	}
11175 }
11176 
11177 static int mlxsw_sp_mp_hash_parsing_depth_adjust(struct mlxsw_sp *mlxsw_sp,
11178 						 bool old_inc_parsing_depth,
11179 						 bool new_inc_parsing_depth)
11180 {
11181 	int err;
11182 
11183 	if (!old_inc_parsing_depth && new_inc_parsing_depth) {
11184 		err = mlxsw_sp_parsing_depth_inc(mlxsw_sp);
11185 		if (err)
11186 			return err;
11187 		mlxsw_sp->router->inc_parsing_depth = true;
11188 	} else if (old_inc_parsing_depth && !new_inc_parsing_depth) {
11189 		mlxsw_sp_parsing_depth_dec(mlxsw_sp);
11190 		mlxsw_sp->router->inc_parsing_depth = false;
11191 	}
11192 
11193 	return 0;
11194 }
11195 
11196 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
11197 {
11198 	bool old_inc_parsing_depth, new_inc_parsing_depth;
11199 	struct mlxsw_sp_mp_hash_config config = {};
11200 	char recr2_pl[MLXSW_REG_RECR2_LEN];
11201 	unsigned long bit;
11202 	u32 seed;
11203 	int err;
11204 
11205 	seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0);
11206 	mlxsw_reg_recr2_pack(recr2_pl, seed);
11207 	mlxsw_sp_mp4_hash_init(mlxsw_sp, &config);
11208 	mlxsw_sp_mp6_hash_init(mlxsw_sp, &config);
11209 
11210 	old_inc_parsing_depth = mlxsw_sp->router->inc_parsing_depth;
11211 	new_inc_parsing_depth = config.inc_parsing_depth;
11212 	err = mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp,
11213 						    old_inc_parsing_depth,
11214 						    new_inc_parsing_depth);
11215 	if (err)
11216 		return err;
11217 
11218 	for_each_set_bit(bit, config.headers, __MLXSW_REG_RECR2_HEADER_CNT)
11219 		mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, bit, 1);
11220 	for_each_set_bit(bit, config.fields, __MLXSW_REG_RECR2_FIELD_CNT)
11221 		mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, bit, 1);
11222 	for_each_set_bit(bit, config.inner_headers, __MLXSW_REG_RECR2_HEADER_CNT)
11223 		mlxsw_reg_recr2_inner_header_enables_set(recr2_pl, bit, 1);
11224 	for_each_set_bit(bit, config.inner_fields, __MLXSW_REG_RECR2_INNER_FIELD_CNT)
11225 		mlxsw_reg_recr2_inner_header_fields_enable_set(recr2_pl, bit, 1);
11226 
11227 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
11228 	if (err)
11229 		goto err_reg_write;
11230 
11231 	return 0;
11232 
11233 err_reg_write:
11234 	mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp, new_inc_parsing_depth,
11235 					      old_inc_parsing_depth);
11236 	return err;
11237 }
11238 
11239 static void mlxsw_sp_mp_hash_fini(struct mlxsw_sp *mlxsw_sp)
11240 {
11241 	bool old_inc_parsing_depth = mlxsw_sp->router->inc_parsing_depth;
11242 
11243 	mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp, old_inc_parsing_depth,
11244 					      false);
11245 }
11246 #else
11247 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
11248 {
11249 	return 0;
11250 }
11251 
11252 static void mlxsw_sp_mp_hash_fini(struct mlxsw_sp *mlxsw_sp)
11253 {
11254 }
11255 #endif
11256 
11257 static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
11258 {
11259 	char rdpm_pl[MLXSW_REG_RDPM_LEN];
11260 	unsigned int i;
11261 
11262 	MLXSW_REG_ZERO(rdpm, rdpm_pl);
11263 
11264 	/* HW is determining switch priority based on DSCP-bits, but the
11265 	 * kernel is still doing that based on the ToS. Since there's a
11266 	 * mismatch in bits we need to make sure to translate the right
11267 	 * value ToS would observe, skipping the 2 least-significant ECN bits.
11268 	 */
11269 	for (i = 0; i < MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT; i++)
11270 		mlxsw_reg_rdpm_pack(rdpm_pl, i, rt_tos2priority(i << 2));
11271 
11272 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rdpm), rdpm_pl);
11273 }
11274 
11275 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
11276 {
11277 	struct net *net = mlxsw_sp_net(mlxsw_sp);
11278 	char rgcr_pl[MLXSW_REG_RGCR_LEN];
11279 	u64 max_rifs;
11280 	bool usp;
11281 
11282 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
11283 		return -EIO;
11284 	max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
11285 	usp = READ_ONCE(net->ipv4.sysctl_ip_fwd_update_priority);
11286 
11287 	mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
11288 	mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
11289 	mlxsw_reg_rgcr_usp_set(rgcr_pl, usp);
11290 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
11291 }
11292 
11293 static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
11294 {
11295 	char rgcr_pl[MLXSW_REG_RGCR_LEN];
11296 
11297 	mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
11298 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
11299 }
11300 
11301 static int mlxsw_sp_lb_rif_init(struct mlxsw_sp *mlxsw_sp,
11302 				struct netlink_ext_ack *extack)
11303 {
11304 	struct mlxsw_sp_router *router = mlxsw_sp->router;
11305 	struct mlxsw_sp_rif *lb_rif;
11306 	int err;
11307 
11308 	router->lb_crif = mlxsw_sp_crif_alloc(NULL);
11309 	if (!router->lb_crif)
11310 		return -ENOMEM;
11311 
11312 	/* Create a generic loopback RIF associated with the main table
11313 	 * (default VRF). Any table can be used, but the main table exists
11314 	 * anyway, so we do not waste resources. Loopback RIFs are usually
11315 	 * created with a NULL CRIF, but this RIF is used as a fallback RIF
11316 	 * for blackhole nexthops, and nexthops expect to have a valid CRIF.
11317 	 */
11318 	lb_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, RT_TABLE_MAIN, router->lb_crif,
11319 				     extack);
11320 	if (IS_ERR(lb_rif)) {
11321 		err = PTR_ERR(lb_rif);
11322 		goto err_ul_rif_get;
11323 	}
11324 
11325 	return 0;
11326 
11327 err_ul_rif_get:
11328 	mlxsw_sp_crif_free(router->lb_crif);
11329 	return err;
11330 }
11331 
11332 static void mlxsw_sp_lb_rif_fini(struct mlxsw_sp *mlxsw_sp)
11333 {
11334 	mlxsw_sp_ul_rif_put(mlxsw_sp->router->lb_crif->rif);
11335 	mlxsw_sp_crif_free(mlxsw_sp->router->lb_crif);
11336 }
11337 
11338 static int mlxsw_sp1_router_init(struct mlxsw_sp *mlxsw_sp)
11339 {
11340 	size_t size_ranges_count = ARRAY_SIZE(mlxsw_sp1_adj_grp_size_ranges);
11341 
11342 	mlxsw_sp->router->rif_ops_arr = mlxsw_sp1_rif_ops_arr;
11343 	mlxsw_sp->router->adj_grp_size_ranges = mlxsw_sp1_adj_grp_size_ranges;
11344 	mlxsw_sp->router->adj_grp_size_ranges_count = size_ranges_count;
11345 
11346 	return 0;
11347 }
11348 
11349 const struct mlxsw_sp_router_ops mlxsw_sp1_router_ops = {
11350 	.init = mlxsw_sp1_router_init,
11351 	.ipips_init = mlxsw_sp1_ipips_init,
11352 };
11353 
11354 static int mlxsw_sp2_router_init(struct mlxsw_sp *mlxsw_sp)
11355 {
11356 	size_t size_ranges_count = ARRAY_SIZE(mlxsw_sp2_adj_grp_size_ranges);
11357 
11358 	mlxsw_sp->router->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
11359 	mlxsw_sp->router->adj_grp_size_ranges = mlxsw_sp2_adj_grp_size_ranges;
11360 	mlxsw_sp->router->adj_grp_size_ranges_count = size_ranges_count;
11361 
11362 	return 0;
11363 }
11364 
11365 const struct mlxsw_sp_router_ops mlxsw_sp2_router_ops = {
11366 	.init = mlxsw_sp2_router_init,
11367 	.ipips_init = mlxsw_sp2_ipips_init,
11368 };
11369 
11370 int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
11371 			 struct netlink_ext_ack *extack)
11372 {
11373 	struct mlxsw_sp_router *router;
11374 	struct notifier_block *nb;
11375 	int err;
11376 
11377 	router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
11378 	if (!router)
11379 		return -ENOMEM;
11380 	mutex_init(&router->lock);
11381 	mlxsw_sp->router = router;
11382 	router->mlxsw_sp = mlxsw_sp;
11383 
11384 	err = mlxsw_sp->router_ops->init(mlxsw_sp);
11385 	if (err)
11386 		goto err_router_ops_init;
11387 
11388 	INIT_LIST_HEAD(&mlxsw_sp->router->nh_res_grp_list);
11389 	INIT_DELAYED_WORK(&mlxsw_sp->router->nh_grp_activity_dw,
11390 			  mlxsw_sp_nh_grp_activity_work);
11391 	INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
11392 	err = __mlxsw_sp_router_init(mlxsw_sp);
11393 	if (err)
11394 		goto err_router_init;
11395 
11396 	err = mlxsw_sp->router_ops->ipips_init(mlxsw_sp);
11397 	if (err)
11398 		goto err_ipips_init;
11399 
11400 	err = rhashtable_init(&mlxsw_sp->router->crif_ht,
11401 			      &mlxsw_sp_crif_ht_params);
11402 	if (err)
11403 		goto err_crif_ht_init;
11404 
11405 	err = mlxsw_sp_rifs_init(mlxsw_sp);
11406 	if (err)
11407 		goto err_rifs_init;
11408 
11409 	err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
11410 			      &mlxsw_sp_nexthop_ht_params);
11411 	if (err)
11412 		goto err_nexthop_ht_init;
11413 
11414 	err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
11415 			      &mlxsw_sp_nexthop_group_ht_params);
11416 	if (err)
11417 		goto err_nexthop_group_ht_init;
11418 
11419 	INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list);
11420 	err = mlxsw_sp_lpm_init(mlxsw_sp);
11421 	if (err)
11422 		goto err_lpm_init;
11423 
11424 	err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
11425 	if (err)
11426 		goto err_mr_init;
11427 
11428 	err = mlxsw_sp_vrs_init(mlxsw_sp);
11429 	if (err)
11430 		goto err_vrs_init;
11431 
11432 	err = mlxsw_sp_lb_rif_init(mlxsw_sp, extack);
11433 	if (err)
11434 		goto err_lb_rif_init;
11435 
11436 	err = mlxsw_sp_neigh_init(mlxsw_sp);
11437 	if (err)
11438 		goto err_neigh_init;
11439 
11440 	err = mlxsw_sp_mp_hash_init(mlxsw_sp);
11441 	if (err)
11442 		goto err_mp_hash_init;
11443 
11444 	err = mlxsw_sp_dscp_init(mlxsw_sp);
11445 	if (err)
11446 		goto err_dscp_init;
11447 
11448 	router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
11449 	err = register_inetaddr_notifier(&router->inetaddr_nb);
11450 	if (err)
11451 		goto err_register_inetaddr_notifier;
11452 
11453 	router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event;
11454 	err = register_inet6addr_notifier(&router->inet6addr_nb);
11455 	if (err)
11456 		goto err_register_inet6addr_notifier;
11457 
11458 	router->inetaddr_valid_nb.notifier_call = mlxsw_sp_inetaddr_valid_event;
11459 	err = register_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
11460 	if (err)
11461 		goto err_register_inetaddr_valid_notifier;
11462 
11463 	nb = &router->inet6addr_valid_nb;
11464 	nb->notifier_call = mlxsw_sp_inet6addr_valid_event;
11465 	err = register_inet6addr_validator_notifier(nb);
11466 	if (err)
11467 		goto err_register_inet6addr_valid_notifier;
11468 
11469 	mlxsw_sp->router->netevent_nb.notifier_call =
11470 		mlxsw_sp_router_netevent_event;
11471 	err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
11472 	if (err)
11473 		goto err_register_netevent_notifier;
11474 
11475 	mlxsw_sp->router->netdevice_nb.notifier_call =
11476 		mlxsw_sp_router_netdevice_event;
11477 	err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
11478 					      &mlxsw_sp->router->netdevice_nb);
11479 	if (err)
11480 		goto err_register_netdev_notifier;
11481 
11482 	mlxsw_sp->router->nexthop_nb.notifier_call =
11483 		mlxsw_sp_nexthop_obj_event;
11484 	err = register_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
11485 					&mlxsw_sp->router->nexthop_nb,
11486 					extack);
11487 	if (err)
11488 		goto err_register_nexthop_notifier;
11489 
11490 	mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
11491 	err = register_fib_notifier(mlxsw_sp_net(mlxsw_sp),
11492 				    &mlxsw_sp->router->fib_nb,
11493 				    mlxsw_sp_router_fib_dump_flush, extack);
11494 	if (err)
11495 		goto err_register_fib_notifier;
11496 
11497 	return 0;
11498 
11499 err_register_fib_notifier:
11500 	unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
11501 				    &mlxsw_sp->router->nexthop_nb);
11502 err_register_nexthop_notifier:
11503 	unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
11504 					  &router->netdevice_nb);
11505 err_register_netdev_notifier:
11506 	unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
11507 err_register_netevent_notifier:
11508 	unregister_inet6addr_validator_notifier(&router->inet6addr_valid_nb);
11509 err_register_inet6addr_valid_notifier:
11510 	unregister_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
11511 err_register_inetaddr_valid_notifier:
11512 	unregister_inet6addr_notifier(&router->inet6addr_nb);
11513 err_register_inet6addr_notifier:
11514 	unregister_inetaddr_notifier(&router->inetaddr_nb);
11515 err_register_inetaddr_notifier:
11516 	mlxsw_core_flush_owq();
11517 err_dscp_init:
11518 	mlxsw_sp_mp_hash_fini(mlxsw_sp);
11519 err_mp_hash_init:
11520 	mlxsw_sp_neigh_fini(mlxsw_sp);
11521 err_neigh_init:
11522 	mlxsw_sp_lb_rif_fini(mlxsw_sp);
11523 err_lb_rif_init:
11524 	mlxsw_sp_vrs_fini(mlxsw_sp);
11525 err_vrs_init:
11526 	mlxsw_sp_mr_fini(mlxsw_sp);
11527 err_mr_init:
11528 	mlxsw_sp_lpm_fini(mlxsw_sp);
11529 err_lpm_init:
11530 	rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
11531 err_nexthop_group_ht_init:
11532 	rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
11533 err_nexthop_ht_init:
11534 	mlxsw_sp_rifs_fini(mlxsw_sp);
11535 err_rifs_init:
11536 	rhashtable_destroy(&mlxsw_sp->router->crif_ht);
11537 err_crif_ht_init:
11538 	mlxsw_sp_ipips_fini(mlxsw_sp);
11539 err_ipips_init:
11540 	__mlxsw_sp_router_fini(mlxsw_sp);
11541 err_router_init:
11542 	cancel_delayed_work_sync(&mlxsw_sp->router->nh_grp_activity_dw);
11543 err_router_ops_init:
11544 	mutex_destroy(&mlxsw_sp->router->lock);
11545 	kfree(mlxsw_sp->router);
11546 	return err;
11547 }
11548 
11549 void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
11550 {
11551 	struct mlxsw_sp_router *router = mlxsw_sp->router;
11552 
11553 	unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp), &router->fib_nb);
11554 	unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
11555 				    &router->nexthop_nb);
11556 	unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
11557 					  &router->netdevice_nb);
11558 	unregister_netevent_notifier(&router->netevent_nb);
11559 	unregister_inet6addr_validator_notifier(&router->inet6addr_valid_nb);
11560 	unregister_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
11561 	unregister_inet6addr_notifier(&router->inet6addr_nb);
11562 	unregister_inetaddr_notifier(&router->inetaddr_nb);
11563 	mlxsw_core_flush_owq();
11564 	mlxsw_sp_mp_hash_fini(mlxsw_sp);
11565 	mlxsw_sp_neigh_fini(mlxsw_sp);
11566 	mlxsw_sp_lb_rif_fini(mlxsw_sp);
11567 	mlxsw_sp_vrs_fini(mlxsw_sp);
11568 	mlxsw_sp_mr_fini(mlxsw_sp);
11569 	mlxsw_sp_lpm_fini(mlxsw_sp);
11570 	rhashtable_destroy(&router->nexthop_group_ht);
11571 	rhashtable_destroy(&router->nexthop_ht);
11572 	mlxsw_sp_rifs_fini(mlxsw_sp);
11573 	rhashtable_destroy(&mlxsw_sp->router->crif_ht);
11574 	mlxsw_sp_ipips_fini(mlxsw_sp);
11575 	__mlxsw_sp_router_fini(mlxsw_sp);
11576 	cancel_delayed_work_sync(&router->nh_grp_activity_dw);
11577 	mutex_destroy(&router->lock);
11578 	kfree(router);
11579 }
11580